Lines Matching refs:poll

8 #include <linux/poll.h>
22 #include "poll.h"
40 /* output value, set only if arm poll returns >0 */
92 * arming poll and wakeups.
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
118 return &req->apoll->poll;
131 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
133 poll->head = NULL;
136 poll->events = events | IO_POLL_UNMASK;
137 INIT_LIST_HEAD(&poll->wait.entry);
138 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
141 static inline void io_poll_remove_entry(struct io_poll *poll)
143 struct wait_queue_head *head = smp_load_acquire(&poll->head);
147 list_del_init(&poll->wait.entry);
148 poll->head = NULL;
157 * into the poll/apoll/double cachelines if we can.
214 * All poll tw should go through this. Checks for poll events, manages
221 * poll and that the result is stored in req->cqe.
333 struct io_poll *poll;
335 poll = io_kiocb_to_cmd(req, struct io_poll);
336 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
368 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
381 list_del_init(&poll->wait.entry);
389 smp_store_release(&poll->head, NULL);
397 struct io_poll *poll = container_of(wait, struct io_poll, wait);
401 return io_pollfree_wake(req, poll);
404 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
409 * If we trigger a multishot poll off our own wakeup path,
414 poll->events |= EPOLLONESHOT;
417 if (mask && poll->events & EPOLLONESHOT) {
418 list_del_init(&poll->wait.entry);
419 poll->head = NULL;
434 struct io_poll *poll = io_poll_get_single(req);
438 head = smp_load_acquire(&poll->head);
440 * poll arm might not hold ownership and so race for req->flags with
441 * io_poll_wake(). There is only one poll entry queued, serialise with
456 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
464 * The file being polled uses multiple waitqueues for poll handling
469 struct io_poll *first = poll;
482 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
483 if (!poll) {
490 io_init_poll_iocb(poll, first->events);
493 kfree(poll);
496 *poll_ptr = poll;
498 /* fine to modify, there is no poll queued to race with us */
503 poll->head = head;
504 poll->wait.private = (void *) wqe_private;
506 if (poll->events & EPOLLEXCLUSIVE) {
507 add_wait_queue_exclusive(head, &poll->wait);
509 add_wait_queue(head, &poll->wait);
517 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
519 __io_queue_proc(poll, pt, head,
545 struct io_poll *poll,
550 io_init_poll_iocb(poll, mask);
551 poll->file = req->file;
552 req->apoll_events = poll->events;
579 if (poll->events & EPOLLEXCLUSIVE)
582 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
590 } else if (mask && (poll->events & EPOLLET)) {
598 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
611 if (mask && (poll->events & EPOLLET) &&
621 * poll was waken up, queue up a tw, it'll deal with it.
635 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
639 * We can't reliably detect loops in repeated poll triggers and issue
662 apoll->poll.retries = APOLL_MAX_RETRY;
666 if (unlikely(!--apoll->poll.retries))
707 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
710 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
715 * Returns true if we found and killed one or more poll requests
869 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
880 poll->events = io_poll_parse_events(sqe, flags);
886 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
892 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
923 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
925 poll->events &= ~0xffff;
926 poll->events |= poll_update->events & 0xffff;
927 poll->events |= IO_POLL_UNMASK;
933 /* successfully updated, don't complete poll request */