xref: /linux/io_uring/poll.c (revision cfd4039213e7b5a828c5b78e1b5235cac91af53d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24 
25 struct io_poll_update {
26 	struct file			*file;
27 	u64				old_user_data;
28 	u64				new_user_data;
29 	__poll_t			events;
30 	bool				update_events;
31 	bool				update_user_data;
32 };
33 
34 struct io_poll_table {
35 	struct poll_table_struct pt;
36 	struct io_kiocb *req;
37 	int nr_entries;
38 	int error;
39 	bool owning;
40 	/* output value, set only if arm poll returns >0 */
41 	__poll_t result_mask;
42 };
43 
44 #define IO_POLL_CANCEL_FLAG	BIT(31)
45 #define IO_POLL_RETRY_FLAG	BIT(30)
46 #define IO_POLL_REF_MASK	GENMASK(29, 0)
47 
48 /*
49  * We usually have 1-2 refs taken, 128 is more than enough and we want to
50  * maximise the margin between this amount and the moment when it overflows.
51  */
52 #define IO_POLL_REF_BIAS	128
53 
54 #define IO_WQE_F_DOUBLE		1
55 
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 			void *key);
58 
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 	unsigned long priv = (unsigned long)wqe->private;
62 
63 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65 
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 	unsigned long priv = (unsigned long)wqe->private;
69 
70 	return priv & IO_WQE_F_DOUBLE;
71 }
72 
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 	int v;
76 
77 	/*
78 	 * poll_refs are already elevated and we don't have much hope for
79 	 * grabbing the ownership. Instead of incrementing set a retry flag
80 	 * to notify the loop that there might have been some change.
81 	 */
82 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 	if (v & IO_POLL_REF_MASK)
84 		return false;
85 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87 
88 /*
89  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90  * bump it and acquire ownership. It's disallowed to modify requests while not
91  * owning it, that prevents from races for enqueueing task_work's and b/w
92  * arming poll and wakeups.
93  */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 		return io_poll_get_ownership_slowpath(req);
98 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100 
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105 
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 	if (req->opcode == IORING_OP_POLL_ADD)
110 		return req->async_data;
111 	return req->apoll->double_poll;
112 }
113 
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 	if (req->opcode == IORING_OP_POLL_ADD)
117 		return io_kiocb_to_cmd(req, struct io_poll);
118 	return &req->apoll->poll;
119 }
120 
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 	struct io_hash_table *table = &req->ctx->cancel_table;
124 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125 
126 	lockdep_assert_held(&req->ctx->uring_lock);
127 
128 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
129 }
130 
io_init_poll_iocb(struct io_poll * poll,__poll_t events)131 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132 {
133 	poll->head = NULL;
134 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135 	/* mask in events that we always want/need */
136 	poll->events = events | IO_POLL_UNMASK;
137 	INIT_LIST_HEAD(&poll->wait.entry);
138 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139 }
140 
io_poll_remove_waitq(struct io_poll * poll)141 static void io_poll_remove_waitq(struct io_poll *poll)
142 {
143 	/*
144 	 * If the waitqueue is being freed early but someone is already holds
145 	 * ownership over it, we have to tear down the request as best we can.
146 	 * That means immediately removing the request from its waitqueue and
147 	 * preventing all further accesses to the waitqueue via the request.
148 	 */
149 	list_del_init(&poll->wait.entry);
150 
151 	/*
152 	 * Careful: this *must* be the last step, since as soon as req->head is
153 	 * NULL'ed out, the request can be completed and freed, since
154 	 * io_poll_remove_entry() will no longer need to take the waitqueue
155 	 * lock.
156 	 */
157 	smp_store_release(&poll->head, NULL);
158 }
159 
io_poll_remove_entry(struct io_poll * poll)160 static inline void io_poll_remove_entry(struct io_poll *poll)
161 {
162 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
163 
164 	if (head) {
165 		spin_lock_irq(&head->lock);
166 		io_poll_remove_waitq(poll);
167 		spin_unlock_irq(&head->lock);
168 	}
169 }
170 
io_poll_remove_entries(struct io_kiocb * req)171 static void io_poll_remove_entries(struct io_kiocb *req)
172 {
173 	/*
174 	 * Nothing to do if neither of those flags are set. Avoid dipping
175 	 * into the poll/apoll/double cachelines if we can.
176 	 */
177 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
178 		return;
179 
180 	/*
181 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
182 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
183 	 * lock in the first place can race with the waitqueue being freed.
184 	 *
185 	 * We solve this as eventpoll does: by taking advantage of the fact that
186 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
187 	 * we enter rcu_read_lock() and see that the pointer to the queue is
188 	 * non-NULL, we can then lock it without the memory being freed out from
189 	 * under us.
190 	 *
191 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
192 	 * case the caller deletes the entry from the queue, leaving it empty.
193 	 * In that case, only RCU prevents the queue memory from being freed.
194 	 */
195 	rcu_read_lock();
196 	if (req->flags & REQ_F_SINGLE_POLL)
197 		io_poll_remove_entry(io_poll_get_single(req));
198 	if (req->flags & REQ_F_DOUBLE_POLL)
199 		io_poll_remove_entry(io_poll_get_double(req));
200 	rcu_read_unlock();
201 }
202 
203 enum {
204 	IOU_POLL_DONE = 0,
205 	IOU_POLL_NO_ACTION = 1,
206 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
207 	IOU_POLL_REISSUE = 3,
208 	IOU_POLL_REQUEUE = 4,
209 };
210 
__io_poll_execute(struct io_kiocb * req,int mask)211 static void __io_poll_execute(struct io_kiocb *req, int mask)
212 {
213 	unsigned flags = 0;
214 
215 	io_req_set_res(req, mask, 0);
216 	req->io_task_work.func = io_poll_task_func;
217 
218 	trace_io_uring_task_add(req, mask);
219 
220 	if (!(req->flags & REQ_F_POLL_NO_LAZY))
221 		flags = IOU_F_TWQ_LAZY_WAKE;
222 	__io_req_task_work_add(req, flags);
223 }
224 
io_poll_execute(struct io_kiocb * req,int res)225 static inline void io_poll_execute(struct io_kiocb *req, int res)
226 {
227 	if (io_poll_get_ownership(req))
228 		__io_poll_execute(req, res);
229 }
230 
231 /*
232  * All poll tw should go through this. Checks for poll events, manages
233  * references, does rewait, etc.
234  *
235  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
236  * require, which is either spurious wakeup or multishot CQE is served.
237  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
238  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
239  * poll and that the result is stored in req->cqe.
240  */
io_poll_check_events(struct io_kiocb * req,io_tw_token_t tw)241 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
242 {
243 	int v;
244 
245 	if (unlikely(tw.cancel))
246 		return -ECANCELED;
247 
248 	do {
249 		v = atomic_read(&req->poll_refs);
250 
251 		if (unlikely(v != 1)) {
252 			/* tw should be the owner and so have some refs */
253 			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
254 				return IOU_POLL_NO_ACTION;
255 			if (v & IO_POLL_CANCEL_FLAG)
256 				return -ECANCELED;
257 			/*
258 			 * cqe.res contains only events of the first wake up
259 			 * and all others are to be lost. Redo vfs_poll() to get
260 			 * up to date state.
261 			 */
262 			if ((v & IO_POLL_REF_MASK) != 1)
263 				req->cqe.res = 0;
264 
265 			if (v & IO_POLL_RETRY_FLAG) {
266 				req->cqe.res = 0;
267 				/*
268 				 * We won't find new events that came in between
269 				 * vfs_poll and the ref put unless we clear the
270 				 * flag in advance.
271 				 */
272 				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
273 				v &= ~IO_POLL_RETRY_FLAG;
274 			}
275 		}
276 
277 		/* the mask was stashed in __io_poll_execute */
278 		if (!req->cqe.res) {
279 			struct poll_table_struct pt = { ._key = req->apoll_events };
280 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
281 			/*
282 			 * We got woken with a mask, but someone else got to
283 			 * it first. The above vfs_poll() doesn't add us back
284 			 * to the waitqueue, so if we get nothing back, we
285 			 * should be safe and attempt a reissue.
286 			 */
287 			if (unlikely(!req->cqe.res)) {
288 				/* Multishot armed need not reissue */
289 				if (!(req->apoll_events & EPOLLONESHOT))
290 					continue;
291 				return IOU_POLL_REISSUE;
292 			}
293 		}
294 		if (req->apoll_events & EPOLLONESHOT)
295 			return IOU_POLL_DONE;
296 
297 		/* multishot, just fill a CQE and proceed */
298 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
299 			__poll_t mask = mangle_poll(req->cqe.res &
300 						    req->apoll_events);
301 
302 			if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
303 				io_req_set_res(req, mask, 0);
304 				return IOU_POLL_REMOVE_POLL_USE_RES;
305 			}
306 		} else {
307 			int ret = io_poll_issue(req, tw);
308 
309 			if (ret == IOU_COMPLETE)
310 				return IOU_POLL_REMOVE_POLL_USE_RES;
311 			else if (ret == IOU_REQUEUE)
312 				return IOU_POLL_REQUEUE;
313 			if (ret != IOU_RETRY && ret < 0)
314 				return ret;
315 		}
316 
317 		/* force the next iteration to vfs_poll() */
318 		req->cqe.res = 0;
319 
320 		/*
321 		 * Release all references, retry if someone tried to restart
322 		 * task_work while we were executing it.
323 		 */
324 		v &= IO_POLL_REF_MASK;
325 	} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
326 
327 	io_napi_add(req);
328 	return IOU_POLL_NO_ACTION;
329 }
330 
io_poll_task_func(struct io_tw_req tw_req,io_tw_token_t tw)331 void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw)
332 {
333 	struct io_kiocb *req = tw_req.req;
334 	int ret;
335 
336 	ret = io_poll_check_events(req, tw);
337 	if (ret == IOU_POLL_NO_ACTION) {
338 		return;
339 	} else if (ret == IOU_POLL_REQUEUE) {
340 		__io_poll_execute(req, 0);
341 		return;
342 	}
343 	io_poll_remove_entries(req);
344 	/* task_work always has ->uring_lock held */
345 	hash_del(&req->hash_node);
346 
347 	if (req->opcode == IORING_OP_POLL_ADD) {
348 		if (ret == IOU_POLL_DONE) {
349 			struct io_poll *poll;
350 
351 			poll = io_kiocb_to_cmd(req, struct io_poll);
352 			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
353 		} else if (ret == IOU_POLL_REISSUE) {
354 			io_req_task_submit(tw_req, tw);
355 			return;
356 		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
357 			req->cqe.res = ret;
358 			req_set_fail(req);
359 		}
360 
361 		io_req_set_res(req, req->cqe.res, 0);
362 		io_req_task_complete(tw_req, tw);
363 	} else {
364 		io_tw_lock(req->ctx, tw);
365 
366 		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
367 			io_req_task_complete(tw_req, tw);
368 		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
369 			io_req_task_submit(tw_req, tw);
370 		else
371 			io_req_defer_failed(req, ret);
372 	}
373 }
374 
io_poll_cancel_req(struct io_kiocb * req)375 static void io_poll_cancel_req(struct io_kiocb *req)
376 {
377 	io_poll_mark_cancelled(req);
378 	/* kick tw, which should complete the request */
379 	io_poll_execute(req, 0);
380 }
381 
382 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
383 
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)384 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
385 {
386 	io_poll_mark_cancelled(req);
387 	/* we have to kick tw in case it's not already */
388 	io_poll_execute(req, 0);
389 	io_poll_remove_waitq(poll);
390 	return 1;
391 }
392 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)393 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
394 			void *key)
395 {
396 	struct io_kiocb *req = wqe_to_req(wait);
397 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
398 	__poll_t mask = key_to_poll(key);
399 
400 	if (unlikely(mask & POLLFREE))
401 		return io_pollfree_wake(req, poll);
402 
403 	/* for instances that support it check for an event match first */
404 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
405 		return 0;
406 
407 	if (io_poll_get_ownership(req)) {
408 		/*
409 		 * If we trigger a multishot poll off our own wakeup path,
410 		 * disable multishot as there is a circular dependency between
411 		 * CQ posting and triggering the event.
412 		 */
413 		if (mask & EPOLL_URING_WAKE)
414 			poll->events |= EPOLLONESHOT;
415 
416 		/* optional, saves extra locking for removal in tw handler */
417 		if (mask && poll->events & EPOLLONESHOT) {
418 			io_poll_remove_waitq(poll);
419 			if (wqe_is_double(wait))
420 				req->flags &= ~REQ_F_DOUBLE_POLL;
421 			else
422 				req->flags &= ~REQ_F_SINGLE_POLL;
423 		}
424 		__io_poll_execute(req, mask);
425 	}
426 	return 1;
427 }
428 
429 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)430 static bool io_poll_double_prepare(struct io_kiocb *req)
431 {
432 	struct wait_queue_head *head;
433 	struct io_poll *poll = io_poll_get_single(req);
434 
435 	/* head is RCU protected, see io_poll_remove_entries() comments */
436 	rcu_read_lock();
437 	head = smp_load_acquire(&poll->head);
438 	/*
439 	 * poll arm might not hold ownership and so race for req->flags with
440 	 * io_poll_wake(). There is only one poll entry queued, serialise with
441 	 * it by taking its head lock. As we're still arming the tw hanlder
442 	 * is not going to be run, so there are no races with it.
443 	 */
444 	if (head) {
445 		spin_lock_irq(&head->lock);
446 		req->flags |= REQ_F_DOUBLE_POLL;
447 		if (req->opcode == IORING_OP_POLL_ADD)
448 			req->flags |= REQ_F_ASYNC_DATA;
449 		spin_unlock_irq(&head->lock);
450 	}
451 	rcu_read_unlock();
452 	return !!head;
453 }
454 
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)455 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
456 			    struct wait_queue_head *head,
457 			    struct io_poll **poll_ptr)
458 {
459 	struct io_kiocb *req = pt->req;
460 	unsigned long wqe_private = (unsigned long) req;
461 
462 	/*
463 	 * The file being polled uses multiple waitqueues for poll handling
464 	 * (e.g. one for read, one for write). Setup a separate io_poll
465 	 * if this happens.
466 	 */
467 	if (unlikely(pt->nr_entries)) {
468 		struct io_poll *first = poll;
469 
470 		/* double add on the same waitqueue head, ignore */
471 		if (first->head == head)
472 			return;
473 		/* already have a 2nd entry, fail a third attempt */
474 		if (*poll_ptr) {
475 			if ((*poll_ptr)->head == head)
476 				return;
477 			pt->error = -EINVAL;
478 			return;
479 		}
480 
481 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
482 		if (!poll) {
483 			pt->error = -ENOMEM;
484 			return;
485 		}
486 
487 		/* mark as double wq entry */
488 		wqe_private |= IO_WQE_F_DOUBLE;
489 		io_init_poll_iocb(poll, first->events);
490 		if (!io_poll_double_prepare(req)) {
491 			/* the request is completing, just back off */
492 			kfree(poll);
493 			return;
494 		}
495 		*poll_ptr = poll;
496 	} else {
497 		/* fine to modify, there is no poll queued to race with us */
498 		req->flags |= REQ_F_SINGLE_POLL;
499 	}
500 
501 	pt->nr_entries++;
502 	poll->head = head;
503 	poll->wait.private = (void *) wqe_private;
504 
505 	if (poll->events & EPOLLEXCLUSIVE) {
506 		add_wait_queue_exclusive(head, &poll->wait);
507 	} else {
508 		add_wait_queue(head, &poll->wait);
509 	}
510 }
511 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)512 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
513 			       struct poll_table_struct *p)
514 {
515 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
516 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
517 
518 	__io_queue_proc(poll, pt, head,
519 			(struct io_poll **) &pt->req->async_data);
520 }
521 
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)522 static bool io_poll_can_finish_inline(struct io_kiocb *req,
523 				      struct io_poll_table *pt)
524 {
525 	return pt->owning || io_poll_get_ownership(req);
526 }
527 
io_poll_add_hash(struct io_kiocb * req,unsigned int issue_flags)528 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
529 {
530 	struct io_ring_ctx *ctx = req->ctx;
531 
532 	io_ring_submit_lock(ctx, issue_flags);
533 	io_poll_req_insert(req);
534 	io_ring_submit_unlock(ctx, issue_flags);
535 }
536 
537 /*
538  * Returns 0 when it's handed over for polling. The caller owns the requests if
539  * it returns non-zero, but otherwise should not touch it. Negative values
540  * contain an error code. When the result is >0, the polling has completed
541  * inline and ipt.result_mask is set to the mask.
542  */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)543 static int __io_arm_poll_handler(struct io_kiocb *req,
544 				 struct io_poll *poll,
545 				 struct io_poll_table *ipt, __poll_t mask,
546 				 unsigned issue_flags)
547 {
548 	INIT_HLIST_NODE(&req->hash_node);
549 	io_init_poll_iocb(poll, mask);
550 	poll->file = req->file;
551 	req->apoll_events = poll->events;
552 
553 	ipt->pt._key = mask;
554 	ipt->req = req;
555 	ipt->error = 0;
556 	ipt->nr_entries = 0;
557 	/*
558 	 * Polling is either completed here or via task_work, so if we're in the
559 	 * task context we're naturally serialised with tw by merit of running
560 	 * the same task. When it's io-wq, take the ownership to prevent tw
561 	 * from running. However, when we're in the task context, skip taking
562 	 * it as an optimisation.
563 	 *
564 	 * Note: even though the request won't be completed/freed, without
565 	 * ownership we still can race with io_poll_wake().
566 	 * io_poll_can_finish_inline() tries to deal with that.
567 	 */
568 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
569 	atomic_set(&req->poll_refs, (int)ipt->owning);
570 
571 	/*
572 	 * Exclusive waits may only wake a limited amount of entries
573 	 * rather than all of them, this may interfere with lazy
574 	 * wake if someone does wait(events > 1). Ensure we don't do
575 	 * lazy wake for those, as we need to process each one as they
576 	 * come in.
577 	 */
578 	if (poll->events & EPOLLEXCLUSIVE)
579 		req->flags |= REQ_F_POLL_NO_LAZY;
580 
581 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
582 
583 	if (unlikely(ipt->error || !ipt->nr_entries)) {
584 		io_poll_remove_entries(req);
585 
586 		if (!io_poll_can_finish_inline(req, ipt)) {
587 			io_poll_mark_cancelled(req);
588 			return 0;
589 		} else if (mask && (poll->events & EPOLLET)) {
590 			ipt->result_mask = mask;
591 			return 1;
592 		}
593 		return ipt->error ?: -EINVAL;
594 	}
595 
596 	if (mask &&
597 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
598 		if (!io_poll_can_finish_inline(req, ipt)) {
599 			io_poll_add_hash(req, issue_flags);
600 			return 0;
601 		}
602 		io_poll_remove_entries(req);
603 		ipt->result_mask = mask;
604 		/* no one else has access to the req, forget about the ref */
605 		return 1;
606 	}
607 
608 	io_poll_add_hash(req, issue_flags);
609 
610 	if (mask && (poll->events & EPOLLET) &&
611 	    io_poll_can_finish_inline(req, ipt)) {
612 		__io_poll_execute(req, mask);
613 		return 0;
614 	}
615 	io_napi_add(req);
616 
617 	if (ipt->owning) {
618 		/*
619 		 * Try to release ownership. If we see a change of state, e.g.
620 		 * poll was waken up, queue up a tw, it'll deal with it.
621 		 */
622 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
623 			__io_poll_execute(req, 0);
624 	}
625 	return 0;
626 }
627 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)628 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
629 			       struct poll_table_struct *p)
630 {
631 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
632 	struct async_poll *apoll = pt->req->apoll;
633 
634 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
635 }
636 
637 /*
638  * We can't reliably detect loops in repeated poll triggers and issue
639  * subsequently failing. But rather than fail these immediately, allow a
640  * certain amount of retries before we give up. Given that this condition
641  * should _rarely_ trigger even once, we should be fine with a larger value.
642  */
643 #define APOLL_MAX_RETRY		128
644 
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)645 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
646 					     unsigned issue_flags)
647 {
648 	struct io_ring_ctx *ctx = req->ctx;
649 	struct async_poll *apoll;
650 
651 	if (req->flags & REQ_F_POLLED) {
652 		apoll = req->apoll;
653 		kfree(apoll->double_poll);
654 	} else {
655 		if (!(issue_flags & IO_URING_F_UNLOCKED))
656 			apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
657 		else
658 			apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
659 		if (!apoll)
660 			return NULL;
661 		apoll->poll.retries = APOLL_MAX_RETRY;
662 	}
663 	apoll->double_poll = NULL;
664 	req->apoll = apoll;
665 	if (unlikely(!--apoll->poll.retries))
666 		return NULL;
667 	return apoll;
668 }
669 
io_arm_apoll(struct io_kiocb * req,unsigned issue_flags,__poll_t mask)670 int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
671 {
672 	struct async_poll *apoll;
673 	struct io_poll_table ipt;
674 	int ret;
675 
676 	mask |= EPOLLET;
677 	if (!io_file_can_poll(req))
678 		return IO_APOLL_ABORTED;
679 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
680 		mask |= EPOLLONESHOT;
681 
682 	apoll = io_req_alloc_apoll(req, issue_flags);
683 	if (!apoll)
684 		return IO_APOLL_ABORTED;
685 	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
686 	req->flags |= REQ_F_POLLED;
687 	ipt.pt._qproc = io_async_queue_proc;
688 
689 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
690 	if (ret)
691 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
692 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
693 	return IO_APOLL_OK;
694 }
695 
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)696 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
697 {
698 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
699 	__poll_t mask = POLLPRI | POLLERR;
700 
701 	if (!def->pollin && !def->pollout)
702 		return IO_APOLL_ABORTED;
703 	if (!io_file_can_poll(req))
704 		return IO_APOLL_ABORTED;
705 
706 	if (def->pollin) {
707 		mask |= EPOLLIN | EPOLLRDNORM;
708 
709 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
710 		if (req->flags & REQ_F_CLEAR_POLLIN)
711 			mask &= ~EPOLLIN;
712 	} else {
713 		mask |= EPOLLOUT | EPOLLWRNORM;
714 	}
715 	if (def->poll_exclusive)
716 		mask |= EPOLLEXCLUSIVE;
717 
718 	return io_arm_apoll(req, issue_flags, mask);
719 }
720 
721 /*
722  * Returns true if we found and killed one or more poll requests
723  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)724 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
725 			       bool cancel_all)
726 {
727 	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
728 	struct hlist_node *tmp;
729 	struct io_kiocb *req;
730 	bool found = false;
731 	int i;
732 
733 	lockdep_assert_held(&ctx->uring_lock);
734 
735 	for (i = 0; i < nr_buckets; i++) {
736 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
737 
738 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
739 			if (io_match_task_safe(req, tctx, cancel_all)) {
740 				hlist_del_init(&req->hash_node);
741 				io_poll_cancel_req(req);
742 				found = true;
743 			}
744 		}
745 	}
746 	return found;
747 }
748 
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd)749 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
750 				     struct io_cancel_data *cd)
751 {
752 	struct io_kiocb *req;
753 	u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
754 	struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
755 
756 	hlist_for_each_entry(req, &hb->list, hash_node) {
757 		if (cd->data != req->cqe.user_data)
758 			continue;
759 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
760 			continue;
761 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
762 			if (io_cancel_match_sequence(req, cd->seq))
763 				continue;
764 		}
765 		return req;
766 	}
767 	return NULL;
768 }
769 
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd)770 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
771 					  struct io_cancel_data *cd)
772 {
773 	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
774 	struct io_kiocb *req;
775 	int i;
776 
777 	for (i = 0; i < nr_buckets; i++) {
778 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
779 
780 		hlist_for_each_entry(req, &hb->list, hash_node) {
781 			if (io_cancel_req_match(req, cd))
782 				return req;
783 		}
784 	}
785 	return NULL;
786 }
787 
io_poll_disarm(struct io_kiocb * req)788 static int io_poll_disarm(struct io_kiocb *req)
789 {
790 	if (!req)
791 		return -ENOENT;
792 	if (!io_poll_get_ownership(req))
793 		return -EALREADY;
794 	io_poll_remove_entries(req);
795 	hash_del(&req->hash_node);
796 	return 0;
797 }
798 
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd)799 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
800 {
801 	struct io_kiocb *req;
802 
803 	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
804 			 IORING_ASYNC_CANCEL_ANY))
805 		req = io_poll_file_find(ctx, cd);
806 	else
807 		req = io_poll_find(ctx, false, cd);
808 
809 	if (req) {
810 		io_poll_cancel_req(req);
811 		return 0;
812 	}
813 	return -ENOENT;
814 }
815 
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)816 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
817 		   unsigned issue_flags)
818 {
819 	int ret;
820 
821 	io_ring_submit_lock(ctx, issue_flags);
822 	ret = __io_poll_cancel(ctx, cd);
823 	io_ring_submit_unlock(ctx, issue_flags);
824 	return ret;
825 }
826 
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)827 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
828 				     unsigned int flags)
829 {
830 	u32 events;
831 
832 	events = READ_ONCE(sqe->poll32_events);
833 #ifdef __BIG_ENDIAN
834 	events = swahw32(events);
835 #endif
836 	if (!(flags & IORING_POLL_ADD_MULTI))
837 		events |= EPOLLONESHOT;
838 	if (!(flags & IORING_POLL_ADD_LEVEL))
839 		events |= EPOLLET;
840 	return demangle_poll(events) |
841 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
842 }
843 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)844 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
845 {
846 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
847 	u32 flags;
848 
849 	if (sqe->buf_index || sqe->splice_fd_in)
850 		return -EINVAL;
851 	flags = READ_ONCE(sqe->len);
852 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
853 		      IORING_POLL_ADD_MULTI))
854 		return -EINVAL;
855 	/* meaningless without update */
856 	if (flags == IORING_POLL_ADD_MULTI)
857 		return -EINVAL;
858 
859 	upd->old_user_data = READ_ONCE(sqe->addr);
860 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
861 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
862 
863 	upd->new_user_data = READ_ONCE(sqe->off);
864 	if (!upd->update_user_data && upd->new_user_data)
865 		return -EINVAL;
866 	if (upd->update_events)
867 		upd->events = io_poll_parse_events(sqe, flags);
868 	else if (sqe->poll32_events)
869 		return -EINVAL;
870 
871 	return 0;
872 }
873 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)874 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
875 {
876 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
877 	u32 flags;
878 
879 	if (sqe->buf_index || sqe->off || sqe->addr)
880 		return -EINVAL;
881 	flags = READ_ONCE(sqe->len);
882 	if (flags & ~IORING_POLL_ADD_MULTI)
883 		return -EINVAL;
884 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
885 		return -EINVAL;
886 
887 	poll->events = io_poll_parse_events(sqe, flags);
888 	return 0;
889 }
890 
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)891 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
892 {
893 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
894 	struct io_poll_table ipt;
895 	int ret;
896 
897 	ipt.pt._qproc = io_poll_queue_proc;
898 
899 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
900 	if (ret > 0) {
901 		io_req_set_res(req, ipt.result_mask, 0);
902 		return IOU_COMPLETE;
903 	}
904 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
905 }
906 
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)907 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
908 {
909 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
910 	struct io_ring_ctx *ctx = req->ctx;
911 	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
912 	struct io_kiocb *preq;
913 	int ret2, ret = 0;
914 
915 	io_ring_submit_lock(ctx, issue_flags);
916 	preq = io_poll_find(ctx, true, &cd);
917 	ret2 = io_poll_disarm(preq);
918 	if (ret2) {
919 		ret = ret2;
920 		goto out;
921 	}
922 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
923 		ret = -EFAULT;
924 		goto out;
925 	}
926 
927 	if (poll_update->update_events || poll_update->update_user_data) {
928 		/* only mask one event flags, keep behavior flags */
929 		if (poll_update->update_events) {
930 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
931 
932 			poll->events &= ~0xffff;
933 			poll->events |= poll_update->events & 0xffff;
934 			poll->events |= IO_POLL_UNMASK;
935 		}
936 		if (poll_update->update_user_data)
937 			preq->cqe.user_data = poll_update->new_user_data;
938 
939 		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
940 		/* successfully updated, don't complete poll request */
941 		if (ret2 == IOU_ISSUE_SKIP_COMPLETE)
942 			goto out;
943 		/* request completed as part of the update, complete it */
944 		else if (ret2 == IOU_COMPLETE)
945 			goto complete;
946 	}
947 
948 	io_req_set_res(preq, -ECANCELED, 0);
949 complete:
950 	if (preq->cqe.res < 0)
951 		req_set_fail(preq);
952 	preq->io_task_work.func = io_req_task_complete;
953 	io_req_task_work_add(preq);
954 out:
955 	io_ring_submit_unlock(ctx, issue_flags);
956 	if (ret < 0) {
957 		req_set_fail(req);
958 		return ret;
959 	}
960 	/* complete update request, we're done with it */
961 	io_req_set_res(req, ret, 0);
962 	return IOU_COMPLETE;
963 }
964