Lines Matching +defs:wait +defs:head
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
133 poll->head = NULL;
137 INIT_LIST_HEAD(&poll->wait.entry);
138 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
143 struct wait_queue_head *head = smp_load_acquire(&poll->head);
145 if (head) {
146 spin_lock_irq(&head->lock);
147 list_del_init(&poll->wait.entry);
148 poll->head = NULL;
149 spin_unlock_irq(&head->lock);
164 * wake_up_pollfree() will wait for us. However, taking the waitqueue
378 list_del_init(&poll->wait.entry);
382 * as req->head is NULL'ed out, the request can be
386 smp_store_release(&poll->head, NULL);
390 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
393 struct io_kiocb *req = wqe_to_req(wait);
394 struct io_poll *poll = container_of(wait, struct io_poll, wait);
415 list_del_init(&poll->wait.entry);
416 poll->head = NULL;
417 if (wqe_is_double(wait))
430 struct wait_queue_head *head;
433 /* head is RCU protected, see io_poll_remove_entries() comments */
435 head = smp_load_acquire(&poll->head);
439 * it by taking its head lock. As we're still arming the tw hanlder
442 if (head) {
443 spin_lock_irq(&head->lock);
447 spin_unlock_irq(&head->lock);
450 return !!head;
454 struct wait_queue_head *head,
468 /* double add on the same waitqueue head, ignore */
469 if (first->head == head)
473 if ((*poll_ptr)->head == head)
500 poll->head = head;
501 poll->wait.private = (void *) wqe_private;
504 add_wait_queue_exclusive(head, &poll->wait);
506 add_wait_queue(head, &poll->wait);
510 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
516 __io_queue_proc(poll, pt, head,
572 * wake if someone does wait(events > 1). Ensure we don't do
626 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
632 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);