Lines Matching full:poll
8 #include <linux/poll.h>
22 #include "poll.h"
40 /* output value, set only if arm poll returns >0 */
92 * arming poll and wakeups.
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ in io_poll_get_double()
118 return &req->apoll->poll; in io_poll_get_single()
172 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) in io_init_poll_iocb() argument
174 poll->head = NULL; in io_init_poll_iocb()
177 poll->events = events | IO_POLL_UNMASK; in io_init_poll_iocb()
178 INIT_LIST_HEAD(&poll->wait.entry); in io_init_poll_iocb()
179 init_waitqueue_func_entry(&poll->wait, io_poll_wake); in io_init_poll_iocb()
182 static inline void io_poll_remove_entry(struct io_poll *poll) in io_poll_remove_entry() argument
184 struct wait_queue_head *head = smp_load_acquire(&poll->head); in io_poll_remove_entry()
188 list_del_init(&poll->wait.entry); in io_poll_remove_entry()
189 poll->head = NULL; in io_poll_remove_entry()
198 * into the poll/apoll/double cachelines if we can. in io_poll_remove_entries()
255 * All poll tw should go through this. Checks for poll events, manages
262 * poll and that the result is stored in req->cqe.
370 struct io_poll *poll; in io_poll_task_func() local
372 poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_task_func()
373 req->cqe.res = mangle_poll(req->cqe.res & poll->events); in io_poll_task_func()
405 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) in io_pollfree_wake() argument
418 list_del_init(&poll->wait.entry); in io_pollfree_wake()
426 smp_store_release(&poll->head, NULL); in io_pollfree_wake()
434 struct io_poll *poll = container_of(wait, struct io_poll, wait); in io_poll_wake() local
438 return io_pollfree_wake(req, poll); in io_poll_wake()
441 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) in io_poll_wake()
446 * If we trigger a multishot poll off our own wakeup path, in io_poll_wake()
451 poll->events |= EPOLLONESHOT; in io_poll_wake()
454 if (mask && poll->events & EPOLLONESHOT) { in io_poll_wake()
455 list_del_init(&poll->wait.entry); in io_poll_wake()
456 poll->head = NULL; in io_poll_wake()
471 struct io_poll *poll = io_poll_get_single(req); in io_poll_double_prepare() local
475 head = smp_load_acquire(&poll->head); in io_poll_double_prepare()
477 * poll arm might not hold ownership and so race for req->flags with in io_poll_double_prepare()
478 * io_poll_wake(). There is only one poll entry queued, serialise with in io_poll_double_prepare()
493 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, in __io_queue_proc() argument
501 * The file being polled uses multiple waitqueues for poll handling in __io_queue_proc()
506 struct io_poll *first = poll; in __io_queue_proc()
519 poll = kmalloc(sizeof(*poll), GFP_ATOMIC); in __io_queue_proc()
520 if (!poll) { in __io_queue_proc()
527 io_init_poll_iocb(poll, first->events); in __io_queue_proc()
530 kfree(poll); in __io_queue_proc()
533 *poll_ptr = poll; in __io_queue_proc()
535 /* fine to modify, there is no poll queued to race with us */ in __io_queue_proc()
540 poll->head = head; in __io_queue_proc()
541 poll->wait.private = (void *) wqe_private; in __io_queue_proc()
543 if (poll->events & EPOLLEXCLUSIVE) { in __io_queue_proc()
544 add_wait_queue_exclusive(head, &poll->wait); in __io_queue_proc()
546 add_wait_queue(head, &poll->wait); in __io_queue_proc()
554 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); in io_poll_queue_proc() local
556 __io_queue_proc(poll, pt, head, in io_poll_queue_proc()
581 struct io_poll *poll, in __io_arm_poll_handler() argument
586 io_init_poll_iocb(poll, mask); in __io_arm_poll_handler()
587 poll->file = req->file; in __io_arm_poll_handler()
588 req->apoll_events = poll->events; in __io_arm_poll_handler()
620 if (poll->events & EPOLLEXCLUSIVE) in __io_arm_poll_handler()
623 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
631 } else if (mask && (poll->events & EPOLLET)) { in __io_arm_poll_handler()
639 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { in __io_arm_poll_handler()
652 if (mask && (poll->events & EPOLLET) && in __io_arm_poll_handler()
662 * poll was waken up, queue up a tw, it'll deal with it. in __io_arm_poll_handler()
676 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); in io_async_queue_proc()
680 * We can't reliably detect loops in repeated poll triggers and issue
700 apoll->poll.retries = APOLL_MAX_RETRY; in io_req_alloc_apoll()
706 apoll->poll.retries = APOLL_MAX_RETRY; in io_req_alloc_apoll()
710 if (unlikely(!--apoll->poll.retries)) in io_req_alloc_apoll()
757 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
760 trace_io_uring_poll_arm(req, mask, apoll->poll.events); in io_arm_poll_handler()
791 * Returns true if we found and killed one or more poll requests
952 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add_prep() local
963 poll->events = io_poll_parse_events(sqe, flags); in io_poll_add_prep()
969 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add() local
982 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); in io_poll_add()
1029 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); in io_poll_remove() local
1031 poll->events &= ~0xffff; in io_poll_remove()
1032 poll->events |= poll_update->events & 0xffff; in io_poll_remove()
1033 poll->events |= IO_POLL_UNMASK; in io_poll_remove()
1039 /* successfully updated, don't complete poll request */ in io_poll_remove()