xref: /linux/io_uring/cancel.c (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "tctx.h"
16 #include "poll.h"
17 #include "timeout.h"
18 #include "waitid.h"
19 #include "futex.h"
20 #include "cancel.h"
21 
22 struct io_cancel {
23 	struct file			*file;
24 	u64				addr;
25 	u32				flags;
26 	s32				fd;
27 	u8				opcode;
28 };
29 
30 #define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31 			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32 			 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
33 
34 /*
35  * Returns true if the request matches the criteria outlined by 'cd'.
36  */
37 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
38 {
39 	bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
40 
41 	if (req->ctx != cd->ctx)
42 		return false;
43 
44 	if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
45 		match_user_data = true;
46 
47 	if (cd->flags & IORING_ASYNC_CANCEL_ANY)
48 		goto check_seq;
49 	if (cd->flags & IORING_ASYNC_CANCEL_FD) {
50 		if (req->file != cd->file)
51 			return false;
52 	}
53 	if (cd->flags & IORING_ASYNC_CANCEL_OP) {
54 		if (req->opcode != cd->opcode)
55 			return false;
56 	}
57 	if (match_user_data && req->cqe.user_data != cd->data)
58 		return false;
59 	if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
60 check_seq:
61 		if (io_cancel_match_sequence(req, cd->seq))
62 			return false;
63 	}
64 
65 	return true;
66 }
67 
68 static bool io_cancel_cb(struct io_wq_work *work, void *data)
69 {
70 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
71 	struct io_cancel_data *cd = data;
72 
73 	return io_cancel_req_match(req, cd);
74 }
75 
76 static int io_async_cancel_one(struct io_uring_task *tctx,
77 			       struct io_cancel_data *cd)
78 {
79 	enum io_wq_cancel cancel_ret;
80 	int ret = 0;
81 	bool all;
82 
83 	if (!tctx || !tctx->io_wq)
84 		return -ENOENT;
85 
86 	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
87 	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
88 	switch (cancel_ret) {
89 	case IO_WQ_CANCEL_OK:
90 		ret = 0;
91 		break;
92 	case IO_WQ_CANCEL_RUNNING:
93 		ret = -EALREADY;
94 		break;
95 	case IO_WQ_CANCEL_NOTFOUND:
96 		ret = -ENOENT;
97 		break;
98 	}
99 
100 	return ret;
101 }
102 
103 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104 		  unsigned issue_flags)
105 {
106 	struct io_ring_ctx *ctx = cd->ctx;
107 	int ret;
108 
109 	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
110 
111 	ret = io_async_cancel_one(tctx, cd);
112 	/*
113 	 * Fall-through even for -EALREADY, as we may have poll armed
114 	 * that need unarming.
115 	 */
116 	if (!ret)
117 		return 0;
118 
119 	ret = io_poll_cancel(ctx, cd, issue_flags);
120 	if (ret != -ENOENT)
121 		return ret;
122 
123 	ret = io_waitid_cancel(ctx, cd, issue_flags);
124 	if (ret != -ENOENT)
125 		return ret;
126 
127 	ret = io_futex_cancel(ctx, cd, issue_flags);
128 	if (ret != -ENOENT)
129 		return ret;
130 
131 	spin_lock(&ctx->completion_lock);
132 	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133 		ret = io_timeout_cancel(ctx, cd);
134 	spin_unlock(&ctx->completion_lock);
135 	return ret;
136 }
137 
138 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
139 {
140 	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
141 
142 	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
143 		return -EINVAL;
144 	if (sqe->off || sqe->splice_fd_in)
145 		return -EINVAL;
146 
147 	cancel->addr = READ_ONCE(sqe->addr);
148 	cancel->flags = READ_ONCE(sqe->cancel_flags);
149 	if (cancel->flags & ~CANCEL_FLAGS)
150 		return -EINVAL;
151 	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
153 			return -EINVAL;
154 		cancel->fd = READ_ONCE(sqe->fd);
155 	}
156 	if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157 		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
158 			return -EINVAL;
159 		cancel->opcode = READ_ONCE(sqe->len);
160 	}
161 
162 	return 0;
163 }
164 
165 static int __io_async_cancel(struct io_cancel_data *cd,
166 			     struct io_uring_task *tctx,
167 			     unsigned int issue_flags)
168 {
169 	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170 	struct io_ring_ctx *ctx = cd->ctx;
171 	struct io_tctx_node *node;
172 	int ret, nr = 0;
173 
174 	do {
175 		ret = io_try_cancel(tctx, cd, issue_flags);
176 		if (ret == -ENOENT)
177 			break;
178 		if (!all)
179 			return ret;
180 		nr++;
181 	} while (1);
182 
183 	/* slow path, try all io-wq's */
184 	io_ring_submit_lock(ctx, issue_flags);
185 	ret = -ENOENT;
186 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187 		struct io_uring_task *tctx = node->task->io_uring;
188 
189 		ret = io_async_cancel_one(tctx, cd);
190 		if (ret != -ENOENT) {
191 			if (!all)
192 				break;
193 			nr++;
194 		}
195 	}
196 	io_ring_submit_unlock(ctx, issue_flags);
197 	return all ? nr : ret;
198 }
199 
200 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
201 {
202 	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
203 	struct io_cancel_data cd = {
204 		.ctx	= req->ctx,
205 		.data	= cancel->addr,
206 		.flags	= cancel->flags,
207 		.opcode	= cancel->opcode,
208 		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
209 	};
210 	struct io_uring_task *tctx = req->task->io_uring;
211 	int ret;
212 
213 	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
214 		if (req->flags & REQ_F_FIXED_FILE ||
215 		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
216 			req->flags |= REQ_F_FIXED_FILE;
217 			req->file = io_file_get_fixed(req, cancel->fd,
218 							issue_flags);
219 		} else {
220 			req->file = io_file_get_normal(req, cancel->fd);
221 		}
222 		if (!req->file) {
223 			ret = -EBADF;
224 			goto done;
225 		}
226 		cd.file = req->file;
227 	}
228 
229 	ret = __io_async_cancel(&cd, tctx, issue_flags);
230 done:
231 	if (ret < 0)
232 		req_set_fail(req);
233 	io_req_set_res(req, ret, 0);
234 	return IOU_OK;
235 }
236 
237 void init_hash_table(struct io_hash_table *table, unsigned size)
238 {
239 	unsigned int i;
240 
241 	for (i = 0; i < size; i++) {
242 		spin_lock_init(&table->hbs[i].lock);
243 		INIT_HLIST_HEAD(&table->hbs[i].list);
244 	}
245 }
246 
247 static int __io_sync_cancel(struct io_uring_task *tctx,
248 			    struct io_cancel_data *cd, int fd)
249 {
250 	struct io_ring_ctx *ctx = cd->ctx;
251 
252 	/* fixed must be grabbed every time since we drop the uring_lock */
253 	if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
254 	    (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
255 		if (unlikely(fd >= ctx->nr_user_files))
256 			return -EBADF;
257 		fd = array_index_nospec(fd, ctx->nr_user_files);
258 		cd->file = io_file_from_index(&ctx->file_table, fd);
259 		if (!cd->file)
260 			return -EBADF;
261 	}
262 
263 	return __io_async_cancel(cd, tctx, 0);
264 }
265 
266 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
267 	__must_hold(&ctx->uring_lock)
268 {
269 	struct io_cancel_data cd = {
270 		.ctx	= ctx,
271 		.seq	= atomic_inc_return(&ctx->cancel_seq),
272 	};
273 	ktime_t timeout = KTIME_MAX;
274 	struct io_uring_sync_cancel_reg sc;
275 	struct file *file = NULL;
276 	DEFINE_WAIT(wait);
277 	int ret, i;
278 
279 	if (copy_from_user(&sc, arg, sizeof(sc)))
280 		return -EFAULT;
281 	if (sc.flags & ~CANCEL_FLAGS)
282 		return -EINVAL;
283 	for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
284 		if (sc.pad[i])
285 			return -EINVAL;
286 	for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
287 		if (sc.pad2[i])
288 			return -EINVAL;
289 
290 	cd.data = sc.addr;
291 	cd.flags = sc.flags;
292 	cd.opcode = sc.opcode;
293 
294 	/* we can grab a normal file descriptor upfront */
295 	if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
296 	   !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
297 		file = fget(sc.fd);
298 		if (!file)
299 			return -EBADF;
300 		cd.file = file;
301 	}
302 
303 	ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
304 
305 	/* found something, done! */
306 	if (ret != -EALREADY)
307 		goto out;
308 
309 	if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
310 		struct timespec64 ts = {
311 			.tv_sec		= sc.timeout.tv_sec,
312 			.tv_nsec	= sc.timeout.tv_nsec
313 		};
314 
315 		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
316 	}
317 
318 	/*
319 	 * Keep looking until we get -ENOENT. we'll get woken everytime
320 	 * every time a request completes and will retry the cancelation.
321 	 */
322 	do {
323 		cd.seq = atomic_inc_return(&ctx->cancel_seq);
324 
325 		prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
326 
327 		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
328 
329 		mutex_unlock(&ctx->uring_lock);
330 		if (ret != -EALREADY)
331 			break;
332 
333 		ret = io_run_task_work_sig(ctx);
334 		if (ret < 0)
335 			break;
336 		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
337 		if (!ret) {
338 			ret = -ETIME;
339 			break;
340 		}
341 		mutex_lock(&ctx->uring_lock);
342 	} while (1);
343 
344 	finish_wait(&ctx->cq_wait, &wait);
345 	mutex_lock(&ctx->uring_lock);
346 
347 	if (ret == -ENOENT || ret > 0)
348 		ret = 0;
349 out:
350 	if (file)
351 		fput(file);
352 	return ret;
353 }
354