1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11
12 #include <trace/events/io_uring.h>
13
14 #include <uapi/linux/io_uring.h>
15
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24
25 struct io_poll_update {
26 struct file *file;
27 u64 old_user_data;
28 u64 new_user_data;
29 __poll_t events;
30 bool update_events;
31 bool update_user_data;
32 };
33
34 struct io_poll_table {
35 struct poll_table_struct pt;
36 struct io_kiocb *req;
37 int nr_entries;
38 int error;
39 bool owning;
40 /* output value, set only if arm poll returns >0 */
41 __poll_t result_mask;
42 };
43
44 #define IO_POLL_CANCEL_FLAG BIT(31)
45 #define IO_POLL_RETRY_FLAG BIT(30)
46 #define IO_POLL_REF_MASK GENMASK(29, 0)
47
48 /*
49 * We usually have 1-2 refs taken, 128 is more than enough and we want to
50 * maximise the margin between this amount and the moment when it overflows.
51 */
52 #define IO_POLL_REF_BIAS 128
53
54 #define IO_WQE_F_DOUBLE 1
55
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 void *key);
58
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 unsigned long priv = (unsigned long)wqe->private;
62
63 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 unsigned long priv = (unsigned long)wqe->private;
69
70 return priv & IO_WQE_F_DOUBLE;
71 }
72
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 int v;
76
77 /*
78 * poll_refs are already elevated and we don't have much hope for
79 * grabbing the ownership. Instead of incrementing set a retry flag
80 * to notify the loop that there might have been some change.
81 */
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 if (v & IO_POLL_REF_MASK)
84 return false;
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87
88 /*
89 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90 * bump it and acquire ownership. It's disallowed to modify requests while not
91 * owning it, that prevents from races for enqueueing task_work's and b/w
92 * arming poll and wakeups.
93 */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 return io_poll_get_ownership_slowpath(req);
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 if (req->opcode == IORING_OP_POLL_ADD)
110 return req->async_data;
111 return req->apoll->double_poll;
112 }
113
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 if (req->opcode == IORING_OP_POLL_ADD)
117 return io_kiocb_to_cmd(req, struct io_poll);
118 return &req->apoll->poll;
119 }
120
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 struct io_hash_table *table = &req->ctx->cancel_table;
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125
126 lockdep_assert_held(&req->ctx->uring_lock);
127
128 hlist_add_head(&req->hash_node, &table->hbs[index].list);
129 }
130
io_init_poll_iocb(struct io_poll * poll,__poll_t events)131 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132 {
133 poll->head = NULL;
134 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135 /* mask in events that we always want/need */
136 poll->events = events | IO_POLL_UNMASK;
137 INIT_LIST_HEAD(&poll->wait.entry);
138 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139 }
140
io_poll_remove_entry(struct io_poll * poll)141 static inline void io_poll_remove_entry(struct io_poll *poll)
142 {
143 struct wait_queue_head *head = smp_load_acquire(&poll->head);
144
145 if (head) {
146 spin_lock_irq(&head->lock);
147 list_del_init(&poll->wait.entry);
148 poll->head = NULL;
149 spin_unlock_irq(&head->lock);
150 }
151 }
152
io_poll_remove_entries(struct io_kiocb * req)153 static void io_poll_remove_entries(struct io_kiocb *req)
154 {
155 /*
156 * Nothing to do if neither of those flags are set. Avoid dipping
157 * into the poll/apoll/double cachelines if we can.
158 */
159 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
160 return;
161
162 /*
163 * While we hold the waitqueue lock and the waitqueue is nonempty,
164 * wake_up_pollfree() will wait for us. However, taking the waitqueue
165 * lock in the first place can race with the waitqueue being freed.
166 *
167 * We solve this as eventpoll does: by taking advantage of the fact that
168 * all users of wake_up_pollfree() will RCU-delay the actual free. If
169 * we enter rcu_read_lock() and see that the pointer to the queue is
170 * non-NULL, we can then lock it without the memory being freed out from
171 * under us.
172 *
173 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
174 * case the caller deletes the entry from the queue, leaving it empty.
175 * In that case, only RCU prevents the queue memory from being freed.
176 */
177 rcu_read_lock();
178 if (req->flags & REQ_F_SINGLE_POLL)
179 io_poll_remove_entry(io_poll_get_single(req));
180 if (req->flags & REQ_F_DOUBLE_POLL)
181 io_poll_remove_entry(io_poll_get_double(req));
182 rcu_read_unlock();
183 }
184
185 enum {
186 IOU_POLL_DONE = 0,
187 IOU_POLL_NO_ACTION = 1,
188 IOU_POLL_REMOVE_POLL_USE_RES = 2,
189 IOU_POLL_REISSUE = 3,
190 IOU_POLL_REQUEUE = 4,
191 };
192
__io_poll_execute(struct io_kiocb * req,int mask)193 static void __io_poll_execute(struct io_kiocb *req, int mask)
194 {
195 unsigned flags = 0;
196
197 io_req_set_res(req, mask, 0);
198 req->io_task_work.func = io_poll_task_func;
199
200 trace_io_uring_task_add(req, mask);
201
202 if (!(req->flags & REQ_F_POLL_NO_LAZY))
203 flags = IOU_F_TWQ_LAZY_WAKE;
204 __io_req_task_work_add(req, flags);
205 }
206
io_poll_execute(struct io_kiocb * req,int res)207 static inline void io_poll_execute(struct io_kiocb *req, int res)
208 {
209 if (io_poll_get_ownership(req))
210 __io_poll_execute(req, res);
211 }
212
213 /*
214 * All poll tw should go through this. Checks for poll events, manages
215 * references, does rewait, etc.
216 *
217 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
218 * require, which is either spurious wakeup or multishot CQE is served.
219 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
220 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221 * poll and that the result is stored in req->cqe.
222 */
io_poll_check_events(struct io_kiocb * req,io_tw_token_t tw)223 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
224 {
225 int v;
226
227 if (unlikely(io_should_terminate_tw()))
228 return -ECANCELED;
229
230 do {
231 v = atomic_read(&req->poll_refs);
232
233 if (unlikely(v != 1)) {
234 /* tw should be the owner and so have some refs */
235 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
236 return IOU_POLL_NO_ACTION;
237 if (v & IO_POLL_CANCEL_FLAG)
238 return -ECANCELED;
239 /*
240 * cqe.res contains only events of the first wake up
241 * and all others are to be lost. Redo vfs_poll() to get
242 * up to date state.
243 */
244 if ((v & IO_POLL_REF_MASK) != 1)
245 req->cqe.res = 0;
246
247 if (v & IO_POLL_RETRY_FLAG) {
248 req->cqe.res = 0;
249 /*
250 * We won't find new events that came in between
251 * vfs_poll and the ref put unless we clear the
252 * flag in advance.
253 */
254 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
255 v &= ~IO_POLL_RETRY_FLAG;
256 }
257 }
258
259 /* the mask was stashed in __io_poll_execute */
260 if (!req->cqe.res) {
261 struct poll_table_struct pt = { ._key = req->apoll_events };
262 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
263 /*
264 * We got woken with a mask, but someone else got to
265 * it first. The above vfs_poll() doesn't add us back
266 * to the waitqueue, so if we get nothing back, we
267 * should be safe and attempt a reissue.
268 */
269 if (unlikely(!req->cqe.res)) {
270 /* Multishot armed need not reissue */
271 if (!(req->apoll_events & EPOLLONESHOT))
272 continue;
273 return IOU_POLL_REISSUE;
274 }
275 }
276 if (req->apoll_events & EPOLLONESHOT)
277 return IOU_POLL_DONE;
278
279 /* multishot, just fill a CQE and proceed */
280 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
281 __poll_t mask = mangle_poll(req->cqe.res &
282 req->apoll_events);
283
284 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
285 io_req_set_res(req, mask, 0);
286 return IOU_POLL_REMOVE_POLL_USE_RES;
287 }
288 } else {
289 int ret = io_poll_issue(req, tw);
290
291 if (ret == IOU_COMPLETE)
292 return IOU_POLL_REMOVE_POLL_USE_RES;
293 else if (ret == IOU_REQUEUE)
294 return IOU_POLL_REQUEUE;
295 if (ret != IOU_RETRY && ret < 0)
296 return ret;
297 }
298
299 /* force the next iteration to vfs_poll() */
300 req->cqe.res = 0;
301
302 /*
303 * Release all references, retry if someone tried to restart
304 * task_work while we were executing it.
305 */
306 v &= IO_POLL_REF_MASK;
307 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
308
309 io_napi_add(req);
310 return IOU_POLL_NO_ACTION;
311 }
312
io_poll_task_func(struct io_kiocb * req,io_tw_token_t tw)313 void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
314 {
315 int ret;
316
317 ret = io_poll_check_events(req, tw);
318 if (ret == IOU_POLL_NO_ACTION) {
319 io_kbuf_recycle(req, 0);
320 return;
321 } else if (ret == IOU_POLL_REQUEUE) {
322 io_kbuf_recycle(req, 0);
323 __io_poll_execute(req, 0);
324 return;
325 }
326 io_poll_remove_entries(req);
327 /* task_work always has ->uring_lock held */
328 hash_del(&req->hash_node);
329
330 if (req->opcode == IORING_OP_POLL_ADD) {
331 if (ret == IOU_POLL_DONE) {
332 struct io_poll *poll;
333
334 poll = io_kiocb_to_cmd(req, struct io_poll);
335 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
336 } else if (ret == IOU_POLL_REISSUE) {
337 io_req_task_submit(req, tw);
338 return;
339 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
340 req->cqe.res = ret;
341 req_set_fail(req);
342 }
343
344 io_req_set_res(req, req->cqe.res, 0);
345 io_req_task_complete(req, tw);
346 } else {
347 io_tw_lock(req->ctx, tw);
348
349 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
350 io_req_task_complete(req, tw);
351 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
352 io_req_task_submit(req, tw);
353 else
354 io_req_defer_failed(req, ret);
355 }
356 }
357
io_poll_cancel_req(struct io_kiocb * req)358 static void io_poll_cancel_req(struct io_kiocb *req)
359 {
360 io_poll_mark_cancelled(req);
361 /* kick tw, which should complete the request */
362 io_poll_execute(req, 0);
363 }
364
365 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
366
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)367 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
368 {
369 io_poll_mark_cancelled(req);
370 /* we have to kick tw in case it's not already */
371 io_poll_execute(req, 0);
372
373 /*
374 * If the waitqueue is being freed early but someone is already
375 * holds ownership over it, we have to tear down the request as
376 * best we can. That means immediately removing the request from
377 * its waitqueue and preventing all further accesses to the
378 * waitqueue via the request.
379 */
380 list_del_init(&poll->wait.entry);
381
382 /*
383 * Careful: this *must* be the last step, since as soon
384 * as req->head is NULL'ed out, the request can be
385 * completed and freed, since aio_poll_complete_work()
386 * will no longer need to take the waitqueue lock.
387 */
388 smp_store_release(&poll->head, NULL);
389 return 1;
390 }
391
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)392 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
393 void *key)
394 {
395 struct io_kiocb *req = wqe_to_req(wait);
396 struct io_poll *poll = container_of(wait, struct io_poll, wait);
397 __poll_t mask = key_to_poll(key);
398
399 if (unlikely(mask & POLLFREE))
400 return io_pollfree_wake(req, poll);
401
402 /* for instances that support it check for an event match first */
403 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
404 return 0;
405
406 if (io_poll_get_ownership(req)) {
407 /*
408 * If we trigger a multishot poll off our own wakeup path,
409 * disable multishot as there is a circular dependency between
410 * CQ posting and triggering the event.
411 */
412 if (mask & EPOLL_URING_WAKE)
413 poll->events |= EPOLLONESHOT;
414
415 /* optional, saves extra locking for removal in tw handler */
416 if (mask && poll->events & EPOLLONESHOT) {
417 list_del_init(&poll->wait.entry);
418 poll->head = NULL;
419 if (wqe_is_double(wait))
420 req->flags &= ~REQ_F_DOUBLE_POLL;
421 else
422 req->flags &= ~REQ_F_SINGLE_POLL;
423 }
424 __io_poll_execute(req, mask);
425 }
426 return 1;
427 }
428
429 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)430 static bool io_poll_double_prepare(struct io_kiocb *req)
431 {
432 struct wait_queue_head *head;
433 struct io_poll *poll = io_poll_get_single(req);
434
435 /* head is RCU protected, see io_poll_remove_entries() comments */
436 rcu_read_lock();
437 head = smp_load_acquire(&poll->head);
438 /*
439 * poll arm might not hold ownership and so race for req->flags with
440 * io_poll_wake(). There is only one poll entry queued, serialise with
441 * it by taking its head lock. As we're still arming the tw hanlder
442 * is not going to be run, so there are no races with it.
443 */
444 if (head) {
445 spin_lock_irq(&head->lock);
446 req->flags |= REQ_F_DOUBLE_POLL;
447 if (req->opcode == IORING_OP_POLL_ADD)
448 req->flags |= REQ_F_ASYNC_DATA;
449 spin_unlock_irq(&head->lock);
450 }
451 rcu_read_unlock();
452 return !!head;
453 }
454
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)455 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
456 struct wait_queue_head *head,
457 struct io_poll **poll_ptr)
458 {
459 struct io_kiocb *req = pt->req;
460 unsigned long wqe_private = (unsigned long) req;
461
462 /*
463 * The file being polled uses multiple waitqueues for poll handling
464 * (e.g. one for read, one for write). Setup a separate io_poll
465 * if this happens.
466 */
467 if (unlikely(pt->nr_entries)) {
468 struct io_poll *first = poll;
469
470 /* double add on the same waitqueue head, ignore */
471 if (first->head == head)
472 return;
473 /* already have a 2nd entry, fail a third attempt */
474 if (*poll_ptr) {
475 if ((*poll_ptr)->head == head)
476 return;
477 pt->error = -EINVAL;
478 return;
479 }
480
481 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
482 if (!poll) {
483 pt->error = -ENOMEM;
484 return;
485 }
486
487 /* mark as double wq entry */
488 wqe_private |= IO_WQE_F_DOUBLE;
489 io_init_poll_iocb(poll, first->events);
490 if (!io_poll_double_prepare(req)) {
491 /* the request is completing, just back off */
492 kfree(poll);
493 return;
494 }
495 *poll_ptr = poll;
496 } else {
497 /* fine to modify, there is no poll queued to race with us */
498 req->flags |= REQ_F_SINGLE_POLL;
499 }
500
501 pt->nr_entries++;
502 poll->head = head;
503 poll->wait.private = (void *) wqe_private;
504
505 if (poll->events & EPOLLEXCLUSIVE) {
506 add_wait_queue_exclusive(head, &poll->wait);
507 } else {
508 add_wait_queue(head, &poll->wait);
509 }
510 }
511
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)512 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
513 struct poll_table_struct *p)
514 {
515 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
516 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
517
518 __io_queue_proc(poll, pt, head,
519 (struct io_poll **) &pt->req->async_data);
520 }
521
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)522 static bool io_poll_can_finish_inline(struct io_kiocb *req,
523 struct io_poll_table *pt)
524 {
525 return pt->owning || io_poll_get_ownership(req);
526 }
527
io_poll_add_hash(struct io_kiocb * req,unsigned int issue_flags)528 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
529 {
530 struct io_ring_ctx *ctx = req->ctx;
531
532 io_ring_submit_lock(ctx, issue_flags);
533 io_poll_req_insert(req);
534 io_ring_submit_unlock(ctx, issue_flags);
535 }
536
537 /*
538 * Returns 0 when it's handed over for polling. The caller owns the requests if
539 * it returns non-zero, but otherwise should not touch it. Negative values
540 * contain an error code. When the result is >0, the polling has completed
541 * inline and ipt.result_mask is set to the mask.
542 */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)543 static int __io_arm_poll_handler(struct io_kiocb *req,
544 struct io_poll *poll,
545 struct io_poll_table *ipt, __poll_t mask,
546 unsigned issue_flags)
547 {
548 INIT_HLIST_NODE(&req->hash_node);
549 io_init_poll_iocb(poll, mask);
550 poll->file = req->file;
551 req->apoll_events = poll->events;
552
553 ipt->pt._key = mask;
554 ipt->req = req;
555 ipt->error = 0;
556 ipt->nr_entries = 0;
557 /*
558 * Polling is either completed here or via task_work, so if we're in the
559 * task context we're naturally serialised with tw by merit of running
560 * the same task. When it's io-wq, take the ownership to prevent tw
561 * from running. However, when we're in the task context, skip taking
562 * it as an optimisation.
563 *
564 * Note: even though the request won't be completed/freed, without
565 * ownership we still can race with io_poll_wake().
566 * io_poll_can_finish_inline() tries to deal with that.
567 */
568 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
569 atomic_set(&req->poll_refs, (int)ipt->owning);
570
571 /*
572 * Exclusive waits may only wake a limited amount of entries
573 * rather than all of them, this may interfere with lazy
574 * wake if someone does wait(events > 1). Ensure we don't do
575 * lazy wake for those, as we need to process each one as they
576 * come in.
577 */
578 if (poll->events & EPOLLEXCLUSIVE)
579 req->flags |= REQ_F_POLL_NO_LAZY;
580
581 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
582
583 if (unlikely(ipt->error || !ipt->nr_entries)) {
584 io_poll_remove_entries(req);
585
586 if (!io_poll_can_finish_inline(req, ipt)) {
587 io_poll_mark_cancelled(req);
588 return 0;
589 } else if (mask && (poll->events & EPOLLET)) {
590 ipt->result_mask = mask;
591 return 1;
592 }
593 return ipt->error ?: -EINVAL;
594 }
595
596 if (mask &&
597 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
598 if (!io_poll_can_finish_inline(req, ipt)) {
599 io_poll_add_hash(req, issue_flags);
600 return 0;
601 }
602 io_poll_remove_entries(req);
603 ipt->result_mask = mask;
604 /* no one else has access to the req, forget about the ref */
605 return 1;
606 }
607
608 io_poll_add_hash(req, issue_flags);
609
610 if (mask && (poll->events & EPOLLET) &&
611 io_poll_can_finish_inline(req, ipt)) {
612 __io_poll_execute(req, mask);
613 return 0;
614 }
615 io_napi_add(req);
616
617 if (ipt->owning) {
618 /*
619 * Try to release ownership. If we see a change of state, e.g.
620 * poll was waken up, queue up a tw, it'll deal with it.
621 */
622 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
623 __io_poll_execute(req, 0);
624 }
625 return 0;
626 }
627
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)628 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
629 struct poll_table_struct *p)
630 {
631 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
632 struct async_poll *apoll = pt->req->apoll;
633
634 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
635 }
636
637 /*
638 * We can't reliably detect loops in repeated poll triggers and issue
639 * subsequently failing. But rather than fail these immediately, allow a
640 * certain amount of retries before we give up. Given that this condition
641 * should _rarely_ trigger even once, we should be fine with a larger value.
642 */
643 #define APOLL_MAX_RETRY 128
644
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)645 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
646 unsigned issue_flags)
647 {
648 struct io_ring_ctx *ctx = req->ctx;
649 struct async_poll *apoll;
650
651 if (req->flags & REQ_F_POLLED) {
652 apoll = req->apoll;
653 kfree(apoll->double_poll);
654 } else {
655 if (!(issue_flags & IO_URING_F_UNLOCKED))
656 apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
657 else
658 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
659 if (!apoll)
660 return NULL;
661 apoll->poll.retries = APOLL_MAX_RETRY;
662 }
663 apoll->double_poll = NULL;
664 req->apoll = apoll;
665 if (unlikely(!--apoll->poll.retries))
666 return NULL;
667 return apoll;
668 }
669
io_arm_apoll(struct io_kiocb * req,unsigned issue_flags,__poll_t mask)670 int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
671 {
672 struct async_poll *apoll;
673 struct io_poll_table ipt;
674 int ret;
675
676 mask |= EPOLLET;
677 if (!io_file_can_poll(req))
678 return IO_APOLL_ABORTED;
679 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
680 mask |= EPOLLONESHOT;
681
682 apoll = io_req_alloc_apoll(req, issue_flags);
683 if (!apoll)
684 return IO_APOLL_ABORTED;
685 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
686 req->flags |= REQ_F_POLLED;
687 ipt.pt._qproc = io_async_queue_proc;
688
689 io_kbuf_recycle(req, issue_flags);
690
691 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
692 if (ret)
693 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
694 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
695 return IO_APOLL_OK;
696 }
697
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)698 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
699 {
700 const struct io_issue_def *def = &io_issue_defs[req->opcode];
701 __poll_t mask = POLLPRI | POLLERR;
702
703 if (!def->pollin && !def->pollout)
704 return IO_APOLL_ABORTED;
705 if (!io_file_can_poll(req))
706 return IO_APOLL_ABORTED;
707
708 if (def->pollin) {
709 mask |= EPOLLIN | EPOLLRDNORM;
710
711 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
712 if (req->flags & REQ_F_CLEAR_POLLIN)
713 mask &= ~EPOLLIN;
714 } else {
715 mask |= EPOLLOUT | EPOLLWRNORM;
716 }
717 if (def->poll_exclusive)
718 mask |= EPOLLEXCLUSIVE;
719
720 return io_arm_apoll(req, issue_flags, mask);
721 }
722
723 /*
724 * Returns true if we found and killed one or more poll requests
725 */
io_poll_remove_all(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)726 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
727 bool cancel_all)
728 {
729 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
730 struct hlist_node *tmp;
731 struct io_kiocb *req;
732 bool found = false;
733 int i;
734
735 lockdep_assert_held(&ctx->uring_lock);
736
737 for (i = 0; i < nr_buckets; i++) {
738 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
739
740 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
741 if (io_match_task_safe(req, tctx, cancel_all)) {
742 hlist_del_init(&req->hash_node);
743 io_poll_cancel_req(req);
744 found = true;
745 }
746 }
747 }
748 return found;
749 }
750
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd)751 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
752 struct io_cancel_data *cd)
753 {
754 struct io_kiocb *req;
755 u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
756 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
757
758 hlist_for_each_entry(req, &hb->list, hash_node) {
759 if (cd->data != req->cqe.user_data)
760 continue;
761 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
762 continue;
763 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
764 if (io_cancel_match_sequence(req, cd->seq))
765 continue;
766 }
767 return req;
768 }
769 return NULL;
770 }
771
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd)772 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
773 struct io_cancel_data *cd)
774 {
775 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
776 struct io_kiocb *req;
777 int i;
778
779 for (i = 0; i < nr_buckets; i++) {
780 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
781
782 hlist_for_each_entry(req, &hb->list, hash_node) {
783 if (io_cancel_req_match(req, cd))
784 return req;
785 }
786 }
787 return NULL;
788 }
789
io_poll_disarm(struct io_kiocb * req)790 static int io_poll_disarm(struct io_kiocb *req)
791 {
792 if (!req)
793 return -ENOENT;
794 if (!io_poll_get_ownership(req))
795 return -EALREADY;
796 io_poll_remove_entries(req);
797 hash_del(&req->hash_node);
798 return 0;
799 }
800
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd)801 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
802 {
803 struct io_kiocb *req;
804
805 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
806 IORING_ASYNC_CANCEL_ANY))
807 req = io_poll_file_find(ctx, cd);
808 else
809 req = io_poll_find(ctx, false, cd);
810
811 if (req) {
812 io_poll_cancel_req(req);
813 return 0;
814 }
815 return -ENOENT;
816 }
817
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)818 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
819 unsigned issue_flags)
820 {
821 int ret;
822
823 io_ring_submit_lock(ctx, issue_flags);
824 ret = __io_poll_cancel(ctx, cd);
825 io_ring_submit_unlock(ctx, issue_flags);
826 return ret;
827 }
828
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)829 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
830 unsigned int flags)
831 {
832 u32 events;
833
834 events = READ_ONCE(sqe->poll32_events);
835 #ifdef __BIG_ENDIAN
836 events = swahw32(events);
837 #endif
838 if (!(flags & IORING_POLL_ADD_MULTI))
839 events |= EPOLLONESHOT;
840 if (!(flags & IORING_POLL_ADD_LEVEL))
841 events |= EPOLLET;
842 return demangle_poll(events) |
843 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
844 }
845
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)846 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
847 {
848 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
849 u32 flags;
850
851 if (sqe->buf_index || sqe->splice_fd_in)
852 return -EINVAL;
853 flags = READ_ONCE(sqe->len);
854 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
855 IORING_POLL_ADD_MULTI))
856 return -EINVAL;
857 /* meaningless without update */
858 if (flags == IORING_POLL_ADD_MULTI)
859 return -EINVAL;
860
861 upd->old_user_data = READ_ONCE(sqe->addr);
862 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
863 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
864
865 upd->new_user_data = READ_ONCE(sqe->off);
866 if (!upd->update_user_data && upd->new_user_data)
867 return -EINVAL;
868 if (upd->update_events)
869 upd->events = io_poll_parse_events(sqe, flags);
870 else if (sqe->poll32_events)
871 return -EINVAL;
872
873 return 0;
874 }
875
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)876 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
877 {
878 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
879 u32 flags;
880
881 if (sqe->buf_index || sqe->off || sqe->addr)
882 return -EINVAL;
883 flags = READ_ONCE(sqe->len);
884 if (flags & ~IORING_POLL_ADD_MULTI)
885 return -EINVAL;
886 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
887 return -EINVAL;
888
889 poll->events = io_poll_parse_events(sqe, flags);
890 return 0;
891 }
892
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)893 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
894 {
895 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
896 struct io_poll_table ipt;
897 int ret;
898
899 ipt.pt._qproc = io_poll_queue_proc;
900
901 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
902 if (ret > 0) {
903 io_req_set_res(req, ipt.result_mask, 0);
904 return IOU_COMPLETE;
905 }
906 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
907 }
908
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)909 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
910 {
911 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
912 struct io_ring_ctx *ctx = req->ctx;
913 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
914 struct io_kiocb *preq;
915 int ret2, ret = 0;
916
917 io_ring_submit_lock(ctx, issue_flags);
918 preq = io_poll_find(ctx, true, &cd);
919 ret2 = io_poll_disarm(preq);
920 if (ret2) {
921 ret = ret2;
922 goto out;
923 }
924 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
925 ret = -EFAULT;
926 goto out;
927 }
928
929 if (poll_update->update_events || poll_update->update_user_data) {
930 /* only mask one event flags, keep behavior flags */
931 if (poll_update->update_events) {
932 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
933
934 poll->events &= ~0xffff;
935 poll->events |= poll_update->events & 0xffff;
936 poll->events |= IO_POLL_UNMASK;
937 }
938 if (poll_update->update_user_data)
939 preq->cqe.user_data = poll_update->new_user_data;
940
941 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
942 /* successfully updated, don't complete poll request */
943 if (!ret2 || ret2 == -EIOCBQUEUED)
944 goto out;
945 }
946
947 req_set_fail(preq);
948 io_req_set_res(preq, -ECANCELED, 0);
949 preq->io_task_work.func = io_req_task_complete;
950 io_req_task_work_add(preq);
951 out:
952 io_ring_submit_unlock(ctx, issue_flags);
953 if (ret < 0) {
954 req_set_fail(req);
955 return ret;
956 }
957 /* complete update request, we're done with it */
958 io_req_set_res(req, ret, 0);
959 return IOU_COMPLETE;
960 }
961