xref: /linux/io_uring/poll.c (revision 91928e0d3cc29789f4483bffee5f36218f23942b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24 
25 struct io_poll_update {
26 	struct file			*file;
27 	u64				old_user_data;
28 	u64				new_user_data;
29 	__poll_t			events;
30 	bool				update_events;
31 	bool				update_user_data;
32 };
33 
34 struct io_poll_table {
35 	struct poll_table_struct pt;
36 	struct io_kiocb *req;
37 	int nr_entries;
38 	int error;
39 	bool owning;
40 	/* output value, set only if arm poll returns >0 */
41 	__poll_t result_mask;
42 };
43 
44 #define IO_POLL_CANCEL_FLAG	BIT(31)
45 #define IO_POLL_RETRY_FLAG	BIT(30)
46 #define IO_POLL_REF_MASK	GENMASK(29, 0)
47 
48 /*
49  * We usually have 1-2 refs taken, 128 is more than enough and we want to
50  * maximise the margin between this amount and the moment when it overflows.
51  */
52 #define IO_POLL_REF_BIAS	128
53 
54 #define IO_WQE_F_DOUBLE		1
55 
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 			void *key);
58 
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 	unsigned long priv = (unsigned long)wqe->private;
62 
63 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65 
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 	unsigned long priv = (unsigned long)wqe->private;
69 
70 	return priv & IO_WQE_F_DOUBLE;
71 }
72 
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 	int v;
76 
77 	/*
78 	 * poll_refs are already elevated and we don't have much hope for
79 	 * grabbing the ownership. Instead of incrementing set a retry flag
80 	 * to notify the loop that there might have been some change.
81 	 */
82 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 	if (v & IO_POLL_REF_MASK)
84 		return false;
85 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87 
88 /*
89  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90  * bump it and acquire ownership. It's disallowed to modify requests while not
91  * owning it, that prevents from races for enqueueing task_work's and b/w
92  * arming poll and wakeups.
93  */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 		return io_poll_get_ownership_slowpath(req);
98 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100 
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105 
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 	if (req->opcode == IORING_OP_POLL_ADD)
110 		return req->async_data;
111 	return req->apoll->double_poll;
112 }
113 
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 	if (req->opcode == IORING_OP_POLL_ADD)
117 		return io_kiocb_to_cmd(req, struct io_poll);
118 	return &req->apoll->poll;
119 }
120 
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 	struct io_hash_table *table = &req->ctx->cancel_table;
124 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125 
126 	lockdep_assert_held(&req->ctx->uring_lock);
127 
128 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
129 }
130 
io_init_poll_iocb(struct io_poll * poll,__poll_t events)131 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132 {
133 	poll->head = NULL;
134 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135 	/* mask in events that we always want/need */
136 	poll->events = events | IO_POLL_UNMASK;
137 	INIT_LIST_HEAD(&poll->wait.entry);
138 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139 }
140 
io_poll_remove_entry(struct io_poll * poll)141 static inline void io_poll_remove_entry(struct io_poll *poll)
142 {
143 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
144 
145 	if (head) {
146 		spin_lock_irq(&head->lock);
147 		list_del_init(&poll->wait.entry);
148 		poll->head = NULL;
149 		spin_unlock_irq(&head->lock);
150 	}
151 }
152 
io_poll_remove_entries(struct io_kiocb * req)153 static void io_poll_remove_entries(struct io_kiocb *req)
154 {
155 	/*
156 	 * Nothing to do if neither of those flags are set. Avoid dipping
157 	 * into the poll/apoll/double cachelines if we can.
158 	 */
159 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
160 		return;
161 
162 	/*
163 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
164 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
165 	 * lock in the first place can race with the waitqueue being freed.
166 	 *
167 	 * We solve this as eventpoll does: by taking advantage of the fact that
168 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
169 	 * we enter rcu_read_lock() and see that the pointer to the queue is
170 	 * non-NULL, we can then lock it without the memory being freed out from
171 	 * under us.
172 	 *
173 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
174 	 * case the caller deletes the entry from the queue, leaving it empty.
175 	 * In that case, only RCU prevents the queue memory from being freed.
176 	 */
177 	rcu_read_lock();
178 	if (req->flags & REQ_F_SINGLE_POLL)
179 		io_poll_remove_entry(io_poll_get_single(req));
180 	if (req->flags & REQ_F_DOUBLE_POLL)
181 		io_poll_remove_entry(io_poll_get_double(req));
182 	rcu_read_unlock();
183 }
184 
185 enum {
186 	IOU_POLL_DONE = 0,
187 	IOU_POLL_NO_ACTION = 1,
188 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
189 	IOU_POLL_REISSUE = 3,
190 	IOU_POLL_REQUEUE = 4,
191 };
192 
__io_poll_execute(struct io_kiocb * req,int mask)193 static void __io_poll_execute(struct io_kiocb *req, int mask)
194 {
195 	unsigned flags = 0;
196 
197 	io_req_set_res(req, mask, 0);
198 	req->io_task_work.func = io_poll_task_func;
199 
200 	trace_io_uring_task_add(req, mask);
201 
202 	if (!(req->flags & REQ_F_POLL_NO_LAZY))
203 		flags = IOU_F_TWQ_LAZY_WAKE;
204 	__io_req_task_work_add(req, flags);
205 }
206 
io_poll_execute(struct io_kiocb * req,int res)207 static inline void io_poll_execute(struct io_kiocb *req, int res)
208 {
209 	if (io_poll_get_ownership(req))
210 		__io_poll_execute(req, res);
211 }
212 
213 /*
214  * All poll tw should go through this. Checks for poll events, manages
215  * references, does rewait, etc.
216  *
217  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
218  * require, which is either spurious wakeup or multishot CQE is served.
219  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
220  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221  * poll and that the result is stored in req->cqe.
222  */
io_poll_check_events(struct io_kiocb * req,io_tw_token_t tw)223 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
224 {
225 	int v;
226 
227 	if (unlikely(io_should_terminate_tw()))
228 		return -ECANCELED;
229 
230 	do {
231 		v = atomic_read(&req->poll_refs);
232 
233 		if (unlikely(v != 1)) {
234 			/* tw should be the owner and so have some refs */
235 			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
236 				return IOU_POLL_NO_ACTION;
237 			if (v & IO_POLL_CANCEL_FLAG)
238 				return -ECANCELED;
239 			/*
240 			 * cqe.res contains only events of the first wake up
241 			 * and all others are to be lost. Redo vfs_poll() to get
242 			 * up to date state.
243 			 */
244 			if ((v & IO_POLL_REF_MASK) != 1)
245 				req->cqe.res = 0;
246 
247 			if (v & IO_POLL_RETRY_FLAG) {
248 				req->cqe.res = 0;
249 				/*
250 				 * We won't find new events that came in between
251 				 * vfs_poll and the ref put unless we clear the
252 				 * flag in advance.
253 				 */
254 				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
255 				v &= ~IO_POLL_RETRY_FLAG;
256 			}
257 		}
258 
259 		/* the mask was stashed in __io_poll_execute */
260 		if (!req->cqe.res) {
261 			struct poll_table_struct pt = { ._key = req->apoll_events };
262 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
263 			/*
264 			 * We got woken with a mask, but someone else got to
265 			 * it first. The above vfs_poll() doesn't add us back
266 			 * to the waitqueue, so if we get nothing back, we
267 			 * should be safe and attempt a reissue.
268 			 */
269 			if (unlikely(!req->cqe.res)) {
270 				/* Multishot armed need not reissue */
271 				if (!(req->apoll_events & EPOLLONESHOT))
272 					continue;
273 				return IOU_POLL_REISSUE;
274 			}
275 		}
276 		if (unlikely(req->cqe.res & EPOLLERR))
277 			req_set_fail(req);
278 		if (req->apoll_events & EPOLLONESHOT)
279 			return IOU_POLL_DONE;
280 
281 		/* multishot, just fill a CQE and proceed */
282 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
283 			__poll_t mask = mangle_poll(req->cqe.res &
284 						    req->apoll_events);
285 
286 			if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
287 				io_req_set_res(req, mask, 0);
288 				return IOU_POLL_REMOVE_POLL_USE_RES;
289 			}
290 		} else {
291 			int ret = io_poll_issue(req, tw);
292 			if (ret == IOU_STOP_MULTISHOT)
293 				return IOU_POLL_REMOVE_POLL_USE_RES;
294 			else if (ret == IOU_REQUEUE)
295 				return IOU_POLL_REQUEUE;
296 			if (ret < 0)
297 				return ret;
298 		}
299 
300 		/* force the next iteration to vfs_poll() */
301 		req->cqe.res = 0;
302 
303 		/*
304 		 * Release all references, retry if someone tried to restart
305 		 * task_work while we were executing it.
306 		 */
307 		v &= IO_POLL_REF_MASK;
308 	} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
309 
310 	io_napi_add(req);
311 	return IOU_POLL_NO_ACTION;
312 }
313 
io_poll_task_func(struct io_kiocb * req,io_tw_token_t tw)314 void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
315 {
316 	int ret;
317 
318 	ret = io_poll_check_events(req, tw);
319 	if (ret == IOU_POLL_NO_ACTION) {
320 		io_kbuf_recycle(req, 0);
321 		return;
322 	} else if (ret == IOU_POLL_REQUEUE) {
323 		io_kbuf_recycle(req, 0);
324 		__io_poll_execute(req, 0);
325 		return;
326 	}
327 	io_poll_remove_entries(req);
328 	/* task_work always has ->uring_lock held */
329 	hash_del(&req->hash_node);
330 
331 	if (req->opcode == IORING_OP_POLL_ADD) {
332 		if (ret == IOU_POLL_DONE) {
333 			struct io_poll *poll;
334 
335 			poll = io_kiocb_to_cmd(req, struct io_poll);
336 			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
337 		} else if (ret == IOU_POLL_REISSUE) {
338 			io_req_task_submit(req, tw);
339 			return;
340 		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
341 			req->cqe.res = ret;
342 			req_set_fail(req);
343 		}
344 
345 		io_req_set_res(req, req->cqe.res, 0);
346 		io_req_task_complete(req, tw);
347 	} else {
348 		io_tw_lock(req->ctx, tw);
349 
350 		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
351 			io_req_task_complete(req, tw);
352 		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
353 			io_req_task_submit(req, tw);
354 		else
355 			io_req_defer_failed(req, ret);
356 	}
357 }
358 
io_poll_cancel_req(struct io_kiocb * req)359 static void io_poll_cancel_req(struct io_kiocb *req)
360 {
361 	io_poll_mark_cancelled(req);
362 	/* kick tw, which should complete the request */
363 	io_poll_execute(req, 0);
364 }
365 
366 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
367 
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)368 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
369 {
370 	io_poll_mark_cancelled(req);
371 	/* we have to kick tw in case it's not already */
372 	io_poll_execute(req, 0);
373 
374 	/*
375 	 * If the waitqueue is being freed early but someone is already
376 	 * holds ownership over it, we have to tear down the request as
377 	 * best we can. That means immediately removing the request from
378 	 * its waitqueue and preventing all further accesses to the
379 	 * waitqueue via the request.
380 	 */
381 	list_del_init(&poll->wait.entry);
382 
383 	/*
384 	 * Careful: this *must* be the last step, since as soon
385 	 * as req->head is NULL'ed out, the request can be
386 	 * completed and freed, since aio_poll_complete_work()
387 	 * will no longer need to take the waitqueue lock.
388 	 */
389 	smp_store_release(&poll->head, NULL);
390 	return 1;
391 }
392 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)393 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
394 			void *key)
395 {
396 	struct io_kiocb *req = wqe_to_req(wait);
397 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
398 	__poll_t mask = key_to_poll(key);
399 
400 	if (unlikely(mask & POLLFREE))
401 		return io_pollfree_wake(req, poll);
402 
403 	/* for instances that support it check for an event match first */
404 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
405 		return 0;
406 
407 	if (io_poll_get_ownership(req)) {
408 		/*
409 		 * If we trigger a multishot poll off our own wakeup path,
410 		 * disable multishot as there is a circular dependency between
411 		 * CQ posting and triggering the event.
412 		 */
413 		if (mask & EPOLL_URING_WAKE)
414 			poll->events |= EPOLLONESHOT;
415 
416 		/* optional, saves extra locking for removal in tw handler */
417 		if (mask && poll->events & EPOLLONESHOT) {
418 			list_del_init(&poll->wait.entry);
419 			poll->head = NULL;
420 			if (wqe_is_double(wait))
421 				req->flags &= ~REQ_F_DOUBLE_POLL;
422 			else
423 				req->flags &= ~REQ_F_SINGLE_POLL;
424 		}
425 		__io_poll_execute(req, mask);
426 	}
427 	return 1;
428 }
429 
430 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)431 static bool io_poll_double_prepare(struct io_kiocb *req)
432 {
433 	struct wait_queue_head *head;
434 	struct io_poll *poll = io_poll_get_single(req);
435 
436 	/* head is RCU protected, see io_poll_remove_entries() comments */
437 	rcu_read_lock();
438 	head = smp_load_acquire(&poll->head);
439 	/*
440 	 * poll arm might not hold ownership and so race for req->flags with
441 	 * io_poll_wake(). There is only one poll entry queued, serialise with
442 	 * it by taking its head lock. As we're still arming the tw hanlder
443 	 * is not going to be run, so there are no races with it.
444 	 */
445 	if (head) {
446 		spin_lock_irq(&head->lock);
447 		req->flags |= REQ_F_DOUBLE_POLL;
448 		if (req->opcode == IORING_OP_POLL_ADD)
449 			req->flags |= REQ_F_ASYNC_DATA;
450 		spin_unlock_irq(&head->lock);
451 	}
452 	rcu_read_unlock();
453 	return !!head;
454 }
455 
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)456 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
457 			    struct wait_queue_head *head,
458 			    struct io_poll **poll_ptr)
459 {
460 	struct io_kiocb *req = pt->req;
461 	unsigned long wqe_private = (unsigned long) req;
462 
463 	/*
464 	 * The file being polled uses multiple waitqueues for poll handling
465 	 * (e.g. one for read, one for write). Setup a separate io_poll
466 	 * if this happens.
467 	 */
468 	if (unlikely(pt->nr_entries)) {
469 		struct io_poll *first = poll;
470 
471 		/* double add on the same waitqueue head, ignore */
472 		if (first->head == head)
473 			return;
474 		/* already have a 2nd entry, fail a third attempt */
475 		if (*poll_ptr) {
476 			if ((*poll_ptr)->head == head)
477 				return;
478 			pt->error = -EINVAL;
479 			return;
480 		}
481 
482 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
483 		if (!poll) {
484 			pt->error = -ENOMEM;
485 			return;
486 		}
487 
488 		/* mark as double wq entry */
489 		wqe_private |= IO_WQE_F_DOUBLE;
490 		io_init_poll_iocb(poll, first->events);
491 		if (!io_poll_double_prepare(req)) {
492 			/* the request is completing, just back off */
493 			kfree(poll);
494 			return;
495 		}
496 		*poll_ptr = poll;
497 	} else {
498 		/* fine to modify, there is no poll queued to race with us */
499 		req->flags |= REQ_F_SINGLE_POLL;
500 	}
501 
502 	pt->nr_entries++;
503 	poll->head = head;
504 	poll->wait.private = (void *) wqe_private;
505 
506 	if (poll->events & EPOLLEXCLUSIVE) {
507 		add_wait_queue_exclusive(head, &poll->wait);
508 	} else {
509 		add_wait_queue(head, &poll->wait);
510 	}
511 }
512 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)513 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
514 			       struct poll_table_struct *p)
515 {
516 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
517 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
518 
519 	__io_queue_proc(poll, pt, head,
520 			(struct io_poll **) &pt->req->async_data);
521 }
522 
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)523 static bool io_poll_can_finish_inline(struct io_kiocb *req,
524 				      struct io_poll_table *pt)
525 {
526 	return pt->owning || io_poll_get_ownership(req);
527 }
528 
io_poll_add_hash(struct io_kiocb * req,unsigned int issue_flags)529 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
530 {
531 	struct io_ring_ctx *ctx = req->ctx;
532 
533 	io_ring_submit_lock(ctx, issue_flags);
534 	io_poll_req_insert(req);
535 	io_ring_submit_unlock(ctx, issue_flags);
536 }
537 
538 /*
539  * Returns 0 when it's handed over for polling. The caller owns the requests if
540  * it returns non-zero, but otherwise should not touch it. Negative values
541  * contain an error code. When the result is >0, the polling has completed
542  * inline and ipt.result_mask is set to the mask.
543  */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)544 static int __io_arm_poll_handler(struct io_kiocb *req,
545 				 struct io_poll *poll,
546 				 struct io_poll_table *ipt, __poll_t mask,
547 				 unsigned issue_flags)
548 {
549 	INIT_HLIST_NODE(&req->hash_node);
550 	io_init_poll_iocb(poll, mask);
551 	poll->file = req->file;
552 	req->apoll_events = poll->events;
553 
554 	ipt->pt._key = mask;
555 	ipt->req = req;
556 	ipt->error = 0;
557 	ipt->nr_entries = 0;
558 	/*
559 	 * Polling is either completed here or via task_work, so if we're in the
560 	 * task context we're naturally serialised with tw by merit of running
561 	 * the same task. When it's io-wq, take the ownership to prevent tw
562 	 * from running. However, when we're in the task context, skip taking
563 	 * it as an optimisation.
564 	 *
565 	 * Note: even though the request won't be completed/freed, without
566 	 * ownership we still can race with io_poll_wake().
567 	 * io_poll_can_finish_inline() tries to deal with that.
568 	 */
569 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
570 	atomic_set(&req->poll_refs, (int)ipt->owning);
571 
572 	/*
573 	 * Exclusive waits may only wake a limited amount of entries
574 	 * rather than all of them, this may interfere with lazy
575 	 * wake if someone does wait(events > 1). Ensure we don't do
576 	 * lazy wake for those, as we need to process each one as they
577 	 * come in.
578 	 */
579 	if (poll->events & EPOLLEXCLUSIVE)
580 		req->flags |= REQ_F_POLL_NO_LAZY;
581 
582 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
583 
584 	if (unlikely(ipt->error || !ipt->nr_entries)) {
585 		io_poll_remove_entries(req);
586 
587 		if (!io_poll_can_finish_inline(req, ipt)) {
588 			io_poll_mark_cancelled(req);
589 			return 0;
590 		} else if (mask && (poll->events & EPOLLET)) {
591 			ipt->result_mask = mask;
592 			return 1;
593 		}
594 		return ipt->error ?: -EINVAL;
595 	}
596 
597 	if (mask &&
598 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
599 		if (!io_poll_can_finish_inline(req, ipt)) {
600 			io_poll_add_hash(req, issue_flags);
601 			return 0;
602 		}
603 		io_poll_remove_entries(req);
604 		ipt->result_mask = mask;
605 		/* no one else has access to the req, forget about the ref */
606 		return 1;
607 	}
608 
609 	io_poll_add_hash(req, issue_flags);
610 
611 	if (mask && (poll->events & EPOLLET) &&
612 	    io_poll_can_finish_inline(req, ipt)) {
613 		__io_poll_execute(req, mask);
614 		return 0;
615 	}
616 	io_napi_add(req);
617 
618 	if (ipt->owning) {
619 		/*
620 		 * Try to release ownership. If we see a change of state, e.g.
621 		 * poll was waken up, queue up a tw, it'll deal with it.
622 		 */
623 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
624 			__io_poll_execute(req, 0);
625 	}
626 	return 0;
627 }
628 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)629 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
630 			       struct poll_table_struct *p)
631 {
632 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
633 	struct async_poll *apoll = pt->req->apoll;
634 
635 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
636 }
637 
638 /*
639  * We can't reliably detect loops in repeated poll triggers and issue
640  * subsequently failing. But rather than fail these immediately, allow a
641  * certain amount of retries before we give up. Given that this condition
642  * should _rarely_ trigger even once, we should be fine with a larger value.
643  */
644 #define APOLL_MAX_RETRY		128
645 
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)646 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
647 					     unsigned issue_flags)
648 {
649 	struct io_ring_ctx *ctx = req->ctx;
650 	struct async_poll *apoll;
651 
652 	if (req->flags & REQ_F_POLLED) {
653 		apoll = req->apoll;
654 		kfree(apoll->double_poll);
655 	} else {
656 		if (!(issue_flags & IO_URING_F_UNLOCKED))
657 			apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
658 		else
659 			apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
660 		if (!apoll)
661 			return NULL;
662 		apoll->poll.retries = APOLL_MAX_RETRY;
663 	}
664 	apoll->double_poll = NULL;
665 	req->apoll = apoll;
666 	if (unlikely(!--apoll->poll.retries))
667 		return NULL;
668 	return apoll;
669 }
670 
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)671 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
672 {
673 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
674 	struct async_poll *apoll;
675 	struct io_poll_table ipt;
676 	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
677 	int ret;
678 
679 	if (!def->pollin && !def->pollout)
680 		return IO_APOLL_ABORTED;
681 	if (!io_file_can_poll(req))
682 		return IO_APOLL_ABORTED;
683 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
684 		mask |= EPOLLONESHOT;
685 
686 	if (def->pollin) {
687 		mask |= EPOLLIN | EPOLLRDNORM;
688 
689 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
690 		if (req->flags & REQ_F_CLEAR_POLLIN)
691 			mask &= ~EPOLLIN;
692 	} else {
693 		mask |= EPOLLOUT | EPOLLWRNORM;
694 	}
695 	if (def->poll_exclusive)
696 		mask |= EPOLLEXCLUSIVE;
697 
698 	apoll = io_req_alloc_apoll(req, issue_flags);
699 	if (!apoll)
700 		return IO_APOLL_ABORTED;
701 	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
702 	req->flags |= REQ_F_POLLED;
703 	ipt.pt._qproc = io_async_queue_proc;
704 
705 	io_kbuf_recycle(req, issue_flags);
706 
707 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
708 	if (ret)
709 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
710 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
711 	return IO_APOLL_OK;
712 }
713 
714 /*
715  * Returns true if we found and killed one or more poll requests
716  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)717 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
718 			       bool cancel_all)
719 {
720 	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
721 	struct hlist_node *tmp;
722 	struct io_kiocb *req;
723 	bool found = false;
724 	int i;
725 
726 	lockdep_assert_held(&ctx->uring_lock);
727 
728 	for (i = 0; i < nr_buckets; i++) {
729 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
730 
731 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
732 			if (io_match_task_safe(req, tctx, cancel_all)) {
733 				hlist_del_init(&req->hash_node);
734 				io_poll_cancel_req(req);
735 				found = true;
736 			}
737 		}
738 	}
739 	return found;
740 }
741 
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd)742 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
743 				     struct io_cancel_data *cd)
744 {
745 	struct io_kiocb *req;
746 	u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
747 	struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
748 
749 	hlist_for_each_entry(req, &hb->list, hash_node) {
750 		if (cd->data != req->cqe.user_data)
751 			continue;
752 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
753 			continue;
754 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
755 			if (io_cancel_match_sequence(req, cd->seq))
756 				continue;
757 		}
758 		return req;
759 	}
760 	return NULL;
761 }
762 
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd)763 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
764 					  struct io_cancel_data *cd)
765 {
766 	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
767 	struct io_kiocb *req;
768 	int i;
769 
770 	for (i = 0; i < nr_buckets; i++) {
771 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
772 
773 		hlist_for_each_entry(req, &hb->list, hash_node) {
774 			if (io_cancel_req_match(req, cd))
775 				return req;
776 		}
777 	}
778 	return NULL;
779 }
780 
io_poll_disarm(struct io_kiocb * req)781 static int io_poll_disarm(struct io_kiocb *req)
782 {
783 	if (!req)
784 		return -ENOENT;
785 	if (!io_poll_get_ownership(req))
786 		return -EALREADY;
787 	io_poll_remove_entries(req);
788 	hash_del(&req->hash_node);
789 	return 0;
790 }
791 
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd)792 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
793 {
794 	struct io_kiocb *req;
795 
796 	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
797 			 IORING_ASYNC_CANCEL_ANY))
798 		req = io_poll_file_find(ctx, cd);
799 	else
800 		req = io_poll_find(ctx, false, cd);
801 
802 	if (req) {
803 		io_poll_cancel_req(req);
804 		return 0;
805 	}
806 	return -ENOENT;
807 }
808 
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)809 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
810 		   unsigned issue_flags)
811 {
812 	int ret;
813 
814 	io_ring_submit_lock(ctx, issue_flags);
815 	ret = __io_poll_cancel(ctx, cd);
816 	io_ring_submit_unlock(ctx, issue_flags);
817 	return ret;
818 }
819 
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)820 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
821 				     unsigned int flags)
822 {
823 	u32 events;
824 
825 	events = READ_ONCE(sqe->poll32_events);
826 #ifdef __BIG_ENDIAN
827 	events = swahw32(events);
828 #endif
829 	if (!(flags & IORING_POLL_ADD_MULTI))
830 		events |= EPOLLONESHOT;
831 	if (!(flags & IORING_POLL_ADD_LEVEL))
832 		events |= EPOLLET;
833 	return demangle_poll(events) |
834 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
835 }
836 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)837 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
838 {
839 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
840 	u32 flags;
841 
842 	if (sqe->buf_index || sqe->splice_fd_in)
843 		return -EINVAL;
844 	flags = READ_ONCE(sqe->len);
845 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
846 		      IORING_POLL_ADD_MULTI))
847 		return -EINVAL;
848 	/* meaningless without update */
849 	if (flags == IORING_POLL_ADD_MULTI)
850 		return -EINVAL;
851 
852 	upd->old_user_data = READ_ONCE(sqe->addr);
853 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
854 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
855 
856 	upd->new_user_data = READ_ONCE(sqe->off);
857 	if (!upd->update_user_data && upd->new_user_data)
858 		return -EINVAL;
859 	if (upd->update_events)
860 		upd->events = io_poll_parse_events(sqe, flags);
861 	else if (sqe->poll32_events)
862 		return -EINVAL;
863 
864 	return 0;
865 }
866 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)867 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
868 {
869 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
870 	u32 flags;
871 
872 	if (sqe->buf_index || sqe->off || sqe->addr)
873 		return -EINVAL;
874 	flags = READ_ONCE(sqe->len);
875 	if (flags & ~IORING_POLL_ADD_MULTI)
876 		return -EINVAL;
877 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
878 		return -EINVAL;
879 
880 	poll->events = io_poll_parse_events(sqe, flags);
881 	return 0;
882 }
883 
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)884 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
885 {
886 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
887 	struct io_poll_table ipt;
888 	int ret;
889 
890 	ipt.pt._qproc = io_poll_queue_proc;
891 
892 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
893 	if (ret > 0) {
894 		io_req_set_res(req, ipt.result_mask, 0);
895 		return IOU_OK;
896 	}
897 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
898 }
899 
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)900 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
901 {
902 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
903 	struct io_ring_ctx *ctx = req->ctx;
904 	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
905 	struct io_kiocb *preq;
906 	int ret2, ret = 0;
907 
908 	io_ring_submit_lock(ctx, issue_flags);
909 	preq = io_poll_find(ctx, true, &cd);
910 	ret2 = io_poll_disarm(preq);
911 	if (ret2) {
912 		ret = ret2;
913 		goto out;
914 	}
915 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
916 		ret = -EFAULT;
917 		goto out;
918 	}
919 
920 	if (poll_update->update_events || poll_update->update_user_data) {
921 		/* only mask one event flags, keep behavior flags */
922 		if (poll_update->update_events) {
923 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
924 
925 			poll->events &= ~0xffff;
926 			poll->events |= poll_update->events & 0xffff;
927 			poll->events |= IO_POLL_UNMASK;
928 		}
929 		if (poll_update->update_user_data)
930 			preq->cqe.user_data = poll_update->new_user_data;
931 
932 		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
933 		/* successfully updated, don't complete poll request */
934 		if (!ret2 || ret2 == -EIOCBQUEUED)
935 			goto out;
936 	}
937 
938 	req_set_fail(preq);
939 	io_req_set_res(preq, -ECANCELED, 0);
940 	preq->io_task_work.func = io_req_task_complete;
941 	io_req_task_work_add(preq);
942 out:
943 	io_ring_submit_unlock(ctx, issue_flags);
944 	if (ret < 0) {
945 		req_set_fail(req);
946 		return ret;
947 	}
948 	/* complete update request, we're done with it */
949 	io_req_set_res(req, ret, 0);
950 	return IOU_OK;
951 }
952