1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11
12 #include <trace/events/io_uring.h>
13
14 #include <uapi/linux/io_uring.h>
15
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24
25 struct io_poll_update {
26 struct file *file;
27 u64 old_user_data;
28 u64 new_user_data;
29 __poll_t events;
30 bool update_events;
31 bool update_user_data;
32 };
33
34 struct io_poll_table {
35 struct poll_table_struct pt;
36 struct io_kiocb *req;
37 int nr_entries;
38 int error;
39 bool owning;
40 /* output value, set only if arm poll returns >0 */
41 __poll_t result_mask;
42 };
43
44 #define IO_POLL_CANCEL_FLAG BIT(31)
45 #define IO_POLL_RETRY_FLAG BIT(30)
46 #define IO_POLL_REF_MASK GENMASK(29, 0)
47
48 /*
49 * We usually have 1-2 refs taken, 128 is more than enough and we want to
50 * maximise the margin between this amount and the moment when it overflows.
51 */
52 #define IO_POLL_REF_BIAS 128
53
54 #define IO_WQE_F_DOUBLE 1
55
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 void *key);
58
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 unsigned long priv = (unsigned long)wqe->private;
62
63 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 unsigned long priv = (unsigned long)wqe->private;
69
70 return priv & IO_WQE_F_DOUBLE;
71 }
72
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 int v;
76
77 /*
78 * poll_refs are already elevated and we don't have much hope for
79 * grabbing the ownership. Instead of incrementing set a retry flag
80 * to notify the loop that there might have been some change.
81 */
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 if (v & IO_POLL_REF_MASK)
84 return false;
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87
88 /*
89 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90 * bump it and acquire ownership. It's disallowed to modify requests while not
91 * owning it, that prevents from races for enqueueing task_work's and b/w
92 * arming poll and wakeups.
93 */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 if (unlikely((unsigned int)atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 return io_poll_get_ownership_slowpath(req);
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 if (req->opcode == IORING_OP_POLL_ADD)
110 return req->async_data;
111 return req->apoll->double_poll;
112 }
113
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 if (req->opcode == IORING_OP_POLL_ADD)
117 return io_kiocb_to_cmd(req, struct io_poll);
118 return &req->apoll->poll;
119 }
120
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 struct io_hash_table *table = &req->ctx->cancel_table;
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125
126 lockdep_assert_held(&req->ctx->uring_lock);
127
128 hlist_add_head(&req->hash_node, &table->hbs[index].list);
129 }
130
io_init_poll_iocb(struct io_poll * poll,__poll_t events)131 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132 {
133 poll->head = NULL;
134 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135 /* mask in events that we always want/need */
136 poll->events = events | IO_POLL_UNMASK;
137 INIT_LIST_HEAD(&poll->wait.entry);
138 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139 }
140
io_poll_remove_waitq(struct io_poll * poll)141 static void io_poll_remove_waitq(struct io_poll *poll)
142 {
143 /*
144 * If the waitqueue is being freed early but someone is already holds
145 * ownership over it, we have to tear down the request as best we can.
146 * That means immediately removing the request from its waitqueue and
147 * preventing all further accesses to the waitqueue via the request.
148 */
149 list_del_init(&poll->wait.entry);
150
151 /*
152 * Careful: this *must* be the last step, since as soon as req->head is
153 * NULL'ed out, the request can be completed and freed, since
154 * io_poll_remove_entry() will no longer need to take the waitqueue
155 * lock.
156 */
157 smp_store_release(&poll->head, NULL);
158 }
159
io_poll_remove_entry(struct io_poll * poll)160 static inline void io_poll_remove_entry(struct io_poll *poll)
161 {
162 struct wait_queue_head *head = smp_load_acquire(&poll->head);
163
164 if (head) {
165 spin_lock_irq(&head->lock);
166 io_poll_remove_waitq(poll);
167 spin_unlock_irq(&head->lock);
168 }
169 }
170
io_poll_remove_entries(struct io_kiocb * req)171 static void io_poll_remove_entries(struct io_kiocb *req)
172 {
173 /*
174 * Nothing to do if neither of those flags are set. Avoid dipping
175 * into the poll/apoll/double cachelines if we can.
176 */
177 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
178 return;
179
180 /*
181 * While we hold the waitqueue lock and the waitqueue is nonempty,
182 * wake_up_pollfree() will wait for us. However, taking the waitqueue
183 * lock in the first place can race with the waitqueue being freed.
184 *
185 * We solve this as eventpoll does: by taking advantage of the fact that
186 * all users of wake_up_pollfree() will RCU-delay the actual free. If
187 * we enter rcu_read_lock() and see that the pointer to the queue is
188 * non-NULL, we can then lock it without the memory being freed out from
189 * under us.
190 *
191 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
192 * case the caller deletes the entry from the queue, leaving it empty.
193 * In that case, only RCU prevents the queue memory from being freed.
194 */
195 rcu_read_lock();
196 if (req->flags & REQ_F_SINGLE_POLL)
197 io_poll_remove_entry(io_poll_get_single(req));
198 if (req->flags & REQ_F_DOUBLE_POLL)
199 io_poll_remove_entry(io_poll_get_double(req));
200 rcu_read_unlock();
201 }
202
203 enum {
204 IOU_POLL_DONE = 0,
205 IOU_POLL_NO_ACTION = 1,
206 IOU_POLL_REMOVE_POLL_USE_RES = 2,
207 IOU_POLL_REISSUE = 3,
208 IOU_POLL_REQUEUE = 4,
209 };
210
__io_poll_execute(struct io_kiocb * req,int mask)211 static void __io_poll_execute(struct io_kiocb *req, int mask)
212 {
213 unsigned flags = 0;
214
215 io_req_set_res(req, mask, 0);
216 req->io_task_work.func = io_poll_task_func;
217
218 trace_io_uring_task_add(req, mask);
219
220 if (!(req->flags & REQ_F_POLL_NO_LAZY))
221 flags = IOU_F_TWQ_LAZY_WAKE;
222 __io_req_task_work_add(req, flags);
223 }
224
io_poll_execute(struct io_kiocb * req,int res)225 static inline void io_poll_execute(struct io_kiocb *req, int res)
226 {
227 if (io_poll_get_ownership(req))
228 __io_poll_execute(req, res);
229 }
230
231 /*
232 * All poll tw should go through this. Checks for poll events, manages
233 * references, does rewait, etc.
234 *
235 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
236 * require, which is either spurious wakeup or multishot CQE is served.
237 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
238 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
239 * poll and that the result is stored in req->cqe.
240 */
io_poll_check_events(struct io_kiocb * req,io_tw_token_t tw)241 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
242 {
243 int v;
244
245 if (unlikely(tw.cancel))
246 return -ECANCELED;
247
248 do {
249 v = atomic_read(&req->poll_refs);
250
251 if (unlikely(v != 1)) {
252 /* tw should be the owner and so have some refs */
253 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
254 return IOU_POLL_NO_ACTION;
255 if (v & IO_POLL_CANCEL_FLAG)
256 return -ECANCELED;
257 /*
258 * cqe.res contains only events of the first wake up
259 * and all others are to be lost. Redo vfs_poll() to get
260 * up to date state.
261 */
262 if ((v & IO_POLL_REF_MASK) != 1)
263 req->cqe.res = 0;
264
265 if (v & IO_POLL_RETRY_FLAG) {
266 req->cqe.res = 0;
267 /*
268 * We won't find new events that came in between
269 * vfs_poll and the ref put unless we clear the
270 * flag in advance.
271 */
272 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
273 v &= ~IO_POLL_RETRY_FLAG;
274 }
275 v &= IO_POLL_REF_MASK;
276 }
277
278 /* the mask was stashed in __io_poll_execute */
279 if (!req->cqe.res) {
280 __poll_t events = req->apoll_events;
281 struct poll_table_struct pt = { ._key = events };
282
283 req->cqe.res = vfs_poll(req->file, &pt) & events;
284 /*
285 * We got woken with a mask, but someone else got to
286 * it first. The above vfs_poll() doesn't add us back
287 * to the waitqueue, so if we get nothing back, we
288 * should be safe and attempt a reissue.
289 */
290 if (unlikely(!req->cqe.res)) {
291 /* Multishot armed need not reissue */
292 if (!(events & EPOLLONESHOT))
293 continue;
294 return IOU_POLL_REISSUE;
295 }
296 }
297 if (req->apoll_events & EPOLLONESHOT)
298 return IOU_POLL_DONE;
299
300 /* multishot, just fill a CQE and proceed */
301 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
302 __poll_t mask = mangle_poll(req->cqe.res &
303 req->apoll_events);
304
305 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
306 io_req_set_res(req, mask, 0);
307 return IOU_POLL_REMOVE_POLL_USE_RES;
308 }
309 } else {
310 int ret;
311
312 /* multiple refs and HUP, ensure we loop once more */
313 if ((req->cqe.res & (POLLHUP | POLLRDHUP)) && v != 1)
314 v--;
315
316 ret = io_poll_issue(req, tw);
317 if (ret == IOU_COMPLETE)
318 return IOU_POLL_REMOVE_POLL_USE_RES;
319 else if (ret == IOU_REQUEUE)
320 return IOU_POLL_REQUEUE;
321 if (ret != IOU_RETRY && ret < 0)
322 return ret;
323 }
324
325 /* force the next iteration to vfs_poll() */
326 req->cqe.res = 0;
327
328 /*
329 * Release all references, retry if someone tried to restart
330 * task_work while we were executing it.
331 */
332 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
333
334 io_napi_add(req);
335 return IOU_POLL_NO_ACTION;
336 }
337
io_poll_task_func(struct io_tw_req tw_req,io_tw_token_t tw)338 void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw)
339 {
340 struct io_kiocb *req = tw_req.req;
341 int ret;
342
343 ret = io_poll_check_events(req, tw);
344 if (ret == IOU_POLL_NO_ACTION) {
345 return;
346 } else if (ret == IOU_POLL_REQUEUE) {
347 __io_poll_execute(req, 0);
348 return;
349 }
350 io_poll_remove_entries(req);
351 /* task_work always has ->uring_lock held */
352 hash_del(&req->hash_node);
353
354 if (req->opcode == IORING_OP_POLL_ADD) {
355 if (ret == IOU_POLL_DONE) {
356 struct io_poll *poll;
357
358 poll = io_kiocb_to_cmd(req, struct io_poll);
359 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
360 } else if (ret == IOU_POLL_REISSUE) {
361 io_req_task_submit(tw_req, tw);
362 return;
363 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
364 req->cqe.res = ret;
365 req_set_fail(req);
366 }
367
368 io_req_set_res(req, req->cqe.res, 0);
369 io_req_task_complete(tw_req, tw);
370 } else {
371 io_tw_lock(req->ctx, tw);
372
373 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
374 io_req_task_complete(tw_req, tw);
375 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
376 io_req_task_submit(tw_req, tw);
377 else
378 io_req_defer_failed(req, ret);
379 }
380 }
381
io_poll_cancel_req(struct io_kiocb * req)382 static void io_poll_cancel_req(struct io_kiocb *req)
383 {
384 io_poll_mark_cancelled(req);
385 /* kick tw, which should complete the request */
386 io_poll_execute(req, 0);
387 }
388
389 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
390
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)391 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
392 {
393 io_poll_mark_cancelled(req);
394 /* we have to kick tw in case it's not already */
395 io_poll_execute(req, 0);
396 io_poll_remove_waitq(poll);
397 return 1;
398 }
399
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)400 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
401 void *key)
402 {
403 struct io_kiocb *req = wqe_to_req(wait);
404 struct io_poll *poll = container_of(wait, struct io_poll, wait);
405 __poll_t mask = key_to_poll(key);
406
407 if (unlikely(mask & POLLFREE))
408 return io_pollfree_wake(req, poll);
409
410 /* for instances that support it check for an event match first */
411 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
412 return 0;
413
414 if (io_poll_get_ownership(req)) {
415 /*
416 * If we trigger a multishot poll off our own wakeup path,
417 * disable multishot as there is a circular dependency between
418 * CQ posting and triggering the event.
419 */
420 if (mask & EPOLL_URING_WAKE) {
421 poll->events |= EPOLLONESHOT;
422 req->apoll_events |= EPOLLONESHOT;
423 }
424
425 /* optional, saves extra locking for removal in tw handler */
426 if (mask && poll->events & EPOLLONESHOT) {
427 io_poll_remove_waitq(poll);
428 if (wqe_is_double(wait))
429 req->flags &= ~REQ_F_DOUBLE_POLL;
430 else
431 req->flags &= ~REQ_F_SINGLE_POLL;
432 }
433 __io_poll_execute(req, mask);
434 }
435 return 1;
436 }
437
438 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)439 static bool io_poll_double_prepare(struct io_kiocb *req)
440 {
441 struct wait_queue_head *head;
442 struct io_poll *poll = io_poll_get_single(req);
443
444 /* head is RCU protected, see io_poll_remove_entries() comments */
445 rcu_read_lock();
446 head = smp_load_acquire(&poll->head);
447 /*
448 * poll arm might not hold ownership and so race for req->flags with
449 * io_poll_wake(). There is only one poll entry queued, serialise with
450 * it by taking its head lock. As we're still arming the tw hanlder
451 * is not going to be run, so there are no races with it.
452 */
453 if (head) {
454 spin_lock_irq(&head->lock);
455 req->flags |= REQ_F_DOUBLE_POLL;
456 if (req->opcode == IORING_OP_POLL_ADD)
457 req->flags |= REQ_F_ASYNC_DATA;
458 spin_unlock_irq(&head->lock);
459 }
460 rcu_read_unlock();
461 return !!head;
462 }
463
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)464 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
465 struct wait_queue_head *head,
466 struct io_poll **poll_ptr)
467 {
468 struct io_kiocb *req = pt->req;
469 unsigned long wqe_private = (unsigned long) req;
470
471 /*
472 * The file being polled uses multiple waitqueues for poll handling
473 * (e.g. one for read, one for write). Setup a separate io_poll
474 * if this happens.
475 */
476 if (unlikely(pt->nr_entries)) {
477 struct io_poll *first = poll;
478
479 /* double add on the same waitqueue head, ignore */
480 if (first->head == head)
481 return;
482 /* already have a 2nd entry, fail a third attempt */
483 if (*poll_ptr) {
484 if ((*poll_ptr)->head == head)
485 return;
486 pt->error = -EINVAL;
487 return;
488 }
489
490 poll = kmalloc_obj(*poll, GFP_ATOMIC);
491 if (!poll) {
492 pt->error = -ENOMEM;
493 return;
494 }
495
496 /* mark as double wq entry */
497 wqe_private |= IO_WQE_F_DOUBLE;
498 io_init_poll_iocb(poll, first->events);
499 if (!io_poll_double_prepare(req)) {
500 /* the request is completing, just back off */
501 kfree(poll);
502 return;
503 }
504 *poll_ptr = poll;
505 } else {
506 /* fine to modify, there is no poll queued to race with us */
507 req->flags |= REQ_F_SINGLE_POLL;
508 }
509
510 pt->nr_entries++;
511 poll->head = head;
512 poll->wait.private = (void *) wqe_private;
513
514 if (poll->events & EPOLLEXCLUSIVE) {
515 add_wait_queue_exclusive(head, &poll->wait);
516 } else {
517 add_wait_queue(head, &poll->wait);
518 }
519 }
520
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)521 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
522 struct poll_table_struct *p)
523 {
524 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
525 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
526
527 __io_queue_proc(poll, pt, head,
528 (struct io_poll **) &pt->req->async_data);
529 }
530
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)531 static bool io_poll_can_finish_inline(struct io_kiocb *req,
532 struct io_poll_table *pt)
533 {
534 return pt->owning || io_poll_get_ownership(req);
535 }
536
io_poll_add_hash(struct io_kiocb * req,unsigned int issue_flags)537 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
538 {
539 struct io_ring_ctx *ctx = req->ctx;
540
541 io_ring_submit_lock(ctx, issue_flags);
542 io_poll_req_insert(req);
543 io_ring_submit_unlock(ctx, issue_flags);
544 }
545
546 /*
547 * Returns 0 when it's handed over for polling. The caller owns the requests if
548 * it returns non-zero, but otherwise should not touch it. Negative values
549 * contain an error code. When the result is >0, the polling has completed
550 * inline and ipt.result_mask is set to the mask.
551 */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)552 static int __io_arm_poll_handler(struct io_kiocb *req,
553 struct io_poll *poll,
554 struct io_poll_table *ipt, __poll_t mask,
555 unsigned issue_flags)
556 {
557 INIT_HLIST_NODE(&req->hash_node);
558 io_init_poll_iocb(poll, mask);
559 poll->file = req->file;
560 req->apoll_events = poll->events;
561
562 ipt->pt._key = mask;
563 ipt->req = req;
564 ipt->error = 0;
565 ipt->nr_entries = 0;
566 /*
567 * Polling is either completed here or via task_work, so if we're in the
568 * task context we're naturally serialised with tw by merit of running
569 * the same task. When it's io-wq, take the ownership to prevent tw
570 * from running. However, when we're in the task context, skip taking
571 * it as an optimisation.
572 *
573 * Note: even though the request won't be completed/freed, without
574 * ownership we still can race with io_poll_wake().
575 * io_poll_can_finish_inline() tries to deal with that.
576 */
577 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
578 atomic_set(&req->poll_refs, (int)ipt->owning);
579
580 /*
581 * Exclusive waits may only wake a limited amount of entries
582 * rather than all of them, this may interfere with lazy
583 * wake if someone does wait(events > 1). Ensure we don't do
584 * lazy wake for those, as we need to process each one as they
585 * come in.
586 */
587 if (poll->events & EPOLLEXCLUSIVE)
588 req->flags |= REQ_F_POLL_NO_LAZY;
589
590 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
591
592 if (unlikely(ipt->error || !ipt->nr_entries)) {
593 io_poll_remove_entries(req);
594
595 if (!io_poll_can_finish_inline(req, ipt)) {
596 io_poll_mark_cancelled(req);
597 return 0;
598 } else if (mask && (poll->events & EPOLLET)) {
599 ipt->result_mask = mask;
600 return 1;
601 }
602 return ipt->error ?: -EINVAL;
603 }
604
605 if (mask &&
606 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
607 if (!io_poll_can_finish_inline(req, ipt)) {
608 io_poll_add_hash(req, issue_flags);
609 return 0;
610 }
611 io_poll_remove_entries(req);
612 ipt->result_mask = mask;
613 /* no one else has access to the req, forget about the ref */
614 return 1;
615 }
616
617 io_poll_add_hash(req, issue_flags);
618
619 if (mask && (poll->events & EPOLLET) &&
620 io_poll_can_finish_inline(req, ipt)) {
621 __io_poll_execute(req, mask);
622 return 0;
623 }
624 io_napi_add(req);
625
626 if (ipt->owning) {
627 /*
628 * Try to release ownership. If we see a change of state, e.g.
629 * poll was waken up, queue up a tw, it'll deal with it.
630 */
631 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
632 __io_poll_execute(req, 0);
633 }
634 return 0;
635 }
636
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)637 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
638 struct poll_table_struct *p)
639 {
640 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
641 struct async_poll *apoll = pt->req->apoll;
642
643 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
644 }
645
646 /*
647 * We can't reliably detect loops in repeated poll triggers and issue
648 * subsequently failing. But rather than fail these immediately, allow a
649 * certain amount of retries before we give up. Given that this condition
650 * should _rarely_ trigger even once, we should be fine with a larger value.
651 */
652 #define APOLL_MAX_RETRY 128
653
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)654 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
655 unsigned issue_flags)
656 {
657 struct io_ring_ctx *ctx = req->ctx;
658 struct async_poll *apoll;
659
660 if (req->flags & REQ_F_POLLED) {
661 apoll = req->apoll;
662 kfree(apoll->double_poll);
663 } else {
664 if (!(issue_flags & IO_URING_F_UNLOCKED))
665 apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
666 else
667 apoll = kmalloc_obj(*apoll, GFP_ATOMIC);
668 if (!apoll)
669 return NULL;
670 apoll->poll.retries = APOLL_MAX_RETRY;
671 }
672 apoll->double_poll = NULL;
673 req->apoll = apoll;
674 if (unlikely(!--apoll->poll.retries))
675 return NULL;
676 return apoll;
677 }
678
io_arm_apoll(struct io_kiocb * req,unsigned issue_flags,__poll_t mask)679 int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
680 {
681 struct async_poll *apoll;
682 struct io_poll_table ipt;
683 int ret;
684
685 mask |= EPOLLET;
686 if (!io_file_can_poll(req))
687 return IO_APOLL_ABORTED;
688 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
689 mask |= EPOLLONESHOT;
690
691 apoll = io_req_alloc_apoll(req, issue_flags);
692 if (!apoll)
693 return IO_APOLL_ABORTED;
694 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
695 req->flags |= REQ_F_POLLED;
696 ipt.pt._qproc = io_async_queue_proc;
697
698 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
699 if (ret)
700 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
701 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
702 return IO_APOLL_OK;
703 }
704
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)705 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
706 {
707 const struct io_issue_def *def = &io_issue_defs[req->opcode];
708 __poll_t mask = POLLPRI | POLLERR;
709
710 if (!def->pollin && !def->pollout)
711 return IO_APOLL_ABORTED;
712 if (!io_file_can_poll(req))
713 return IO_APOLL_ABORTED;
714
715 if (def->pollin) {
716 mask |= EPOLLIN | EPOLLRDNORM;
717
718 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
719 if (req->flags & REQ_F_CLEAR_POLLIN)
720 mask &= ~EPOLLIN;
721 } else {
722 mask |= EPOLLOUT | EPOLLWRNORM;
723 }
724 if (def->poll_exclusive)
725 mask |= EPOLLEXCLUSIVE;
726
727 return io_arm_apoll(req, issue_flags, mask);
728 }
729
730 /*
731 * Returns true if we found and killed one or more poll requests
732 */
io_poll_remove_all(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)733 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
734 bool cancel_all)
735 {
736 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
737 struct hlist_node *tmp;
738 struct io_kiocb *req;
739 bool found = false;
740 int i;
741
742 lockdep_assert_held(&ctx->uring_lock);
743
744 for (i = 0; i < nr_buckets; i++) {
745 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
746
747 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
748 if (io_match_task_safe(req, tctx, cancel_all)) {
749 hlist_del_init(&req->hash_node);
750 io_poll_cancel_req(req);
751 found = true;
752 }
753 }
754 }
755 return found;
756 }
757
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd)758 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
759 struct io_cancel_data *cd)
760 {
761 struct io_kiocb *req;
762 u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
763 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
764
765 hlist_for_each_entry(req, &hb->list, hash_node) {
766 if (cd->data != req->cqe.user_data)
767 continue;
768 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
769 continue;
770 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
771 if (io_cancel_match_sequence(req, cd->seq))
772 continue;
773 }
774 return req;
775 }
776 return NULL;
777 }
778
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd)779 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
780 struct io_cancel_data *cd)
781 {
782 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
783 struct io_kiocb *req;
784 int i;
785
786 for (i = 0; i < nr_buckets; i++) {
787 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
788
789 hlist_for_each_entry(req, &hb->list, hash_node) {
790 if (io_cancel_req_match(req, cd))
791 return req;
792 }
793 }
794 return NULL;
795 }
796
io_poll_disarm(struct io_kiocb * req)797 static int io_poll_disarm(struct io_kiocb *req)
798 {
799 if (!req)
800 return -ENOENT;
801 if (!io_poll_get_ownership(req))
802 return -EALREADY;
803 io_poll_remove_entries(req);
804 hash_del(&req->hash_node);
805 return 0;
806 }
807
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd)808 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
809 {
810 struct io_kiocb *req;
811
812 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
813 IORING_ASYNC_CANCEL_ANY))
814 req = io_poll_file_find(ctx, cd);
815 else
816 req = io_poll_find(ctx, false, cd);
817
818 if (req) {
819 io_poll_cancel_req(req);
820 return 0;
821 }
822 return -ENOENT;
823 }
824
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)825 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
826 unsigned issue_flags)
827 {
828 int ret;
829
830 io_ring_submit_lock(ctx, issue_flags);
831 ret = __io_poll_cancel(ctx, cd);
832 io_ring_submit_unlock(ctx, issue_flags);
833 return ret;
834 }
835
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)836 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
837 unsigned int flags)
838 {
839 u32 events;
840
841 events = READ_ONCE(sqe->poll32_events);
842 #ifdef __BIG_ENDIAN
843 events = swahw32(events);
844 #endif
845 if (!(flags & IORING_POLL_ADD_MULTI))
846 events |= EPOLLONESHOT;
847 if (!(flags & IORING_POLL_ADD_LEVEL))
848 events |= EPOLLET;
849 return demangle_poll(events) |
850 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
851 }
852
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)853 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
854 {
855 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
856 u32 flags;
857
858 if (sqe->buf_index || sqe->splice_fd_in)
859 return -EINVAL;
860 flags = READ_ONCE(sqe->len);
861 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
862 IORING_POLL_ADD_MULTI))
863 return -EINVAL;
864 /* meaningless without update */
865 if (flags == IORING_POLL_ADD_MULTI)
866 return -EINVAL;
867
868 upd->old_user_data = READ_ONCE(sqe->addr);
869 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
870 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
871
872 upd->new_user_data = READ_ONCE(sqe->off);
873 if (!upd->update_user_data && upd->new_user_data)
874 return -EINVAL;
875 if (upd->update_events)
876 upd->events = io_poll_parse_events(sqe, flags);
877 else if (sqe->poll32_events)
878 return -EINVAL;
879
880 return 0;
881 }
882
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)883 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
884 {
885 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
886 u32 flags;
887
888 if (sqe->buf_index || sqe->off || sqe->addr)
889 return -EINVAL;
890 flags = READ_ONCE(sqe->len);
891 if (flags & ~IORING_POLL_ADD_MULTI)
892 return -EINVAL;
893 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
894 return -EINVAL;
895
896 poll->events = io_poll_parse_events(sqe, flags);
897 return 0;
898 }
899
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)900 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
901 {
902 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
903 struct io_poll_table ipt;
904 int ret;
905
906 ipt.pt._qproc = io_poll_queue_proc;
907
908 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
909 if (ret > 0) {
910 io_req_set_res(req, ipt.result_mask, 0);
911 return IOU_COMPLETE;
912 }
913 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
914 }
915
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)916 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
917 {
918 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
919 struct io_ring_ctx *ctx = req->ctx;
920 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
921 struct io_kiocb *preq;
922 int ret2, ret = 0;
923
924 io_ring_submit_lock(ctx, issue_flags);
925 preq = io_poll_find(ctx, true, &cd);
926 ret2 = io_poll_disarm(preq);
927 if (ret2) {
928 ret = ret2;
929 goto out;
930 }
931 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
932 ret = -EFAULT;
933 goto out;
934 }
935
936 if (poll_update->update_events || poll_update->update_user_data) {
937 /* only mask one event flags, keep behavior flags */
938 if (poll_update->update_events) {
939 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
940
941 poll->events &= ~0xffff;
942 poll->events |= poll_update->events & 0xffff;
943 poll->events |= IO_POLL_UNMASK;
944 }
945 if (poll_update->update_user_data)
946 preq->cqe.user_data = poll_update->new_user_data;
947
948 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
949 /* successfully updated, don't complete poll request */
950 if (ret2 == IOU_ISSUE_SKIP_COMPLETE)
951 goto out;
952 /* request completed as part of the update, complete it */
953 else if (ret2 == IOU_COMPLETE)
954 goto complete;
955 }
956
957 io_req_set_res(preq, -ECANCELED, 0);
958 complete:
959 if (preq->cqe.res < 0)
960 req_set_fail(preq);
961 preq->io_task_work.func = io_req_task_complete;
962 io_req_task_work_add(preq);
963 out:
964 io_ring_submit_unlock(ctx, issue_flags);
965 if (ret < 0) {
966 req_set_fail(req);
967 return ret;
968 }
969 /* complete update request, we're done with it */
970 io_req_set_res(req, ret, 0);
971 return IOU_COMPLETE;
972 }
973