Lines Matching refs:uipc_main
102 static tUIPC_MAIN uipc_main; variable
213 uipc_main.tid = 0; in uipc_main_init()
214 uipc_main.running = 0; in uipc_main_init()
215 memset(&uipc_main.active_set, 0, sizeof(uipc_main.active_set)); in uipc_main_init()
216 memset(&uipc_main.read_set, 0, sizeof(uipc_main.read_set)); in uipc_main_init()
217 uipc_main.max_fd = 0; in uipc_main_init()
218 memset(&uipc_main.signal_fds, 0, sizeof(uipc_main.signal_fds)); in uipc_main_init()
219 memset(&uipc_main.ch, 0, sizeof(uipc_main.ch)); in uipc_main_init()
222 if (socketpair(AF_UNIX, SOCK_STREAM, 0, uipc_main.signal_fds) < 0) { in uipc_main_init()
226 FD_SET(uipc_main.signal_fds[0], &uipc_main.active_set); in uipc_main_init()
227 uipc_main.max_fd = MAX(uipc_main.max_fd, uipc_main.signal_fds[0]); in uipc_main_init()
230 tUIPC_CHAN* p = &uipc_main.ch[i]; in uipc_main_init()
245 close(uipc_main.signal_fds[0]); in uipc_main_cleanup()
246 close(uipc_main.signal_fds[1]); in uipc_main_cleanup()
257 if (uipc_main.ch[i].task_evt_flags & UIPC_TASK_FLAG_DISCONNECT_CHAN) { in uipc_check_task_flags_locked()
258 uipc_main.ch[i].task_evt_flags &= ~UIPC_TASK_FLAG_DISCONNECT_CHAN; in uipc_check_task_flags_locked()
272 if (SAFE_FD_ISSET(uipc_main.ch[ch_id].srvfd, &uipc_main.read_set)) { in uipc_check_fd_locked()
276 if (uipc_main.ch[ch_id].fd != UIPC_DISCONNECTED) { in uipc_check_fd_locked()
277 BTIF_TRACE_EVENT("CLOSE CONNECTION (FD %d)", uipc_main.ch[ch_id].fd); in uipc_check_fd_locked()
278 close(uipc_main.ch[ch_id].fd); in uipc_check_fd_locked()
279 FD_CLR(uipc_main.ch[ch_id].fd, &uipc_main.active_set); in uipc_check_fd_locked()
280 uipc_main.ch[ch_id].fd = UIPC_DISCONNECTED; in uipc_check_fd_locked()
283 uipc_main.ch[ch_id].fd = accept_server_socket(uipc_main.ch[ch_id].srvfd); in uipc_check_fd_locked()
285 BTIF_TRACE_EVENT("NEW FD %d", uipc_main.ch[ch_id].fd); in uipc_check_fd_locked()
287 if ((uipc_main.ch[ch_id].fd >= 0) && uipc_main.ch[ch_id].cback) { in uipc_check_fd_locked()
290 BTIF_TRACE_EVENT("ADD FD %d TO ACTIVE SET", uipc_main.ch[ch_id].fd); in uipc_check_fd_locked()
291 FD_SET(uipc_main.ch[ch_id].fd, &uipc_main.active_set); in uipc_check_fd_locked()
292 uipc_main.max_fd = MAX(uipc_main.max_fd, uipc_main.ch[ch_id].fd); in uipc_check_fd_locked()
295 if (uipc_main.ch[ch_id].fd < 0) { in uipc_check_fd_locked()
300 if (uipc_main.ch[ch_id].cback) in uipc_check_fd_locked()
301 uipc_main.ch[ch_id].cback(ch_id, UIPC_OPEN_EVT); in uipc_check_fd_locked()
306 if (SAFE_FD_ISSET(uipc_main.ch[ch_id].fd, &uipc_main.read_set)) { in uipc_check_fd_locked()
309 if (uipc_main.ch[ch_id].cback) in uipc_check_fd_locked()
310 uipc_main.ch[ch_id].cback(ch_id, UIPC_RX_DATA_READY_EVT); in uipc_check_fd_locked()
316 if (SAFE_FD_ISSET(uipc_main.signal_fds[0], &uipc_main.read_set)) { in uipc_check_interrupt_locked()
318 OSI_NO_INTR(recv(uipc_main.signal_fds[0], &sig_recv, sizeof(sig_recv), in uipc_check_interrupt_locked()
327 OSI_NO_INTR(send(uipc_main.signal_fds[1], &sig_on, sizeof(sig_on), 0)); in uipc_wakeup_locked()
338 std::lock_guard<std::recursive_mutex> guard(uipc_main.mutex); in uipc_setup_server_locked()
348 FD_SET(fd, &uipc_main.active_set); in uipc_setup_server_locked()
349 uipc_main.max_fd = MAX(uipc_main.max_fd, fd); in uipc_setup_server_locked()
351 uipc_main.ch[ch_id].srvfd = fd; in uipc_setup_server_locked()
352 uipc_main.ch[ch_id].cback = cback; in uipc_setup_server_locked()
353 uipc_main.ch[ch_id].read_poll_tmo_ms = DEFAULT_READ_POLL_TMO_MS; in uipc_setup_server_locked()
366 pfd.fd = uipc_main.ch[ch_id].fd; in uipc_flush_ch_locked()
368 if (uipc_main.ch[ch_id].fd == UIPC_DISCONNECTED) { in uipc_flush_ch_locked()
421 if (uipc_main.ch[ch_id].srvfd != UIPC_DISCONNECTED) { in uipc_close_ch_locked()
422 BTIF_TRACE_EVENT("CLOSE SERVER (FD %d)", uipc_main.ch[ch_id].srvfd); in uipc_close_ch_locked()
423 close(uipc_main.ch[ch_id].srvfd); in uipc_close_ch_locked()
424 FD_CLR(uipc_main.ch[ch_id].srvfd, &uipc_main.active_set); in uipc_close_ch_locked()
425 uipc_main.ch[ch_id].srvfd = UIPC_DISCONNECTED; in uipc_close_ch_locked()
429 if (uipc_main.ch[ch_id].fd != UIPC_DISCONNECTED) { in uipc_close_ch_locked()
430 BTIF_TRACE_EVENT("CLOSE CONNECTION (FD %d)", uipc_main.ch[ch_id].fd); in uipc_close_ch_locked()
431 close(uipc_main.ch[ch_id].fd); in uipc_close_ch_locked()
432 FD_CLR(uipc_main.ch[ch_id].fd, &uipc_main.active_set); in uipc_close_ch_locked()
433 uipc_main.ch[ch_id].fd = UIPC_DISCONNECTED; in uipc_close_ch_locked()
438 if (uipc_main.ch[ch_id].cback) in uipc_close_ch_locked()
439 uipc_main.ch[ch_id].cback(ch_id, UIPC_CLOSE_EVT); in uipc_close_ch_locked()
448 if (uipc_main.ch[ch_id].srvfd == UIPC_DISCONNECTED) { in uipc_close_locked()
454 uipc_main.ch[ch_id].task_evt_flags |= UIPC_TASK_FLAG_DISCONNECT_CHAN; in uipc_close_locked()
466 while (uipc_main.running) { in uipc_read_task()
467 uipc_main.read_set = uipc_main.active_set; in uipc_read_task()
470 select(uipc_main.max_fd + 1, &uipc_main.read_set, NULL, NULL, NULL); in uipc_read_task()
484 std::lock_guard<std::recursive_mutex> guard(uipc_main.mutex); in uipc_read_task()
506 uipc_main.tid = 0; in uipc_read_task()
514 uipc_main.running = 1; in uipc_start_main_server_thread()
516 if (pthread_create(&uipc_main.tid, (const pthread_attr_t*)NULL, in uipc_start_main_server_thread()
529 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in uipc_stop_main_server_thread()
530 uipc_main.running = 0; in uipc_stop_main_server_thread()
538 if (uipc_main.tid) pthread_join(uipc_main.tid, NULL); in uipc_stop_main_server_thread()
570 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Open()
576 if (uipc_main.ch[ch_id].srvfd != UIPC_DISCONNECTED) { in UIPC_Open()
609 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Close()
631 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Send()
634 OSI_NO_INTR(ret = write(uipc_main.ch[ch_id].fd, p_buf, msglen)); in UIPC_Send()
655 int fd = uipc_main.ch[ch_id].fd; in UIPC_Read()
676 OSI_NO_INTR(poll_ret = poll(&pfd, 1, uipc_main.ch[ch_id].read_poll_tmo_ms)); in UIPC_Read()
679 uipc_main.ch[ch_id].read_poll_tmo_ms); in UIPC_Read()
692 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Read()
704 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Read()
733 std::lock_guard<std::recursive_mutex> lock(uipc_main.mutex); in UIPC_Ioctl()
743 uipc_main.ch[ch_id].cback = (tUIPC_RCV_CBACK*)param; in UIPC_Ioctl()
748 if (uipc_main.ch[ch_id].fd != UIPC_DISCONNECTED) { in UIPC_Ioctl()
750 FD_CLR(uipc_main.ch[ch_id].fd, &uipc_main.active_set); in UIPC_Ioctl()
758 uipc_main.ch[ch_id].read_poll_tmo_ms = (intptr_t)param; in UIPC_Ioctl()
760 uipc_main.ch[ch_id].read_poll_tmo_ms); in UIPC_Ioctl()