xref: /linux/io_uring/openclose.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/pipe_fs_i.h>
10 #include <linux/watch_queue.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "../fs/internal.h"
16 
17 #include "filetable.h"
18 #include "io_uring.h"
19 #include "rsrc.h"
20 #include "openclose.h"
21 
22 struct io_open {
23 	struct file			*file;
24 	int				dfd;
25 	u32				file_slot;
26 	struct delayed_filename		filename;
27 	struct open_how			how;
28 	unsigned long			nofile;
29 };
30 
31 struct io_close {
32 	struct file			*file;
33 	int				fd;
34 	u32				file_slot;
35 };
36 
37 struct io_fixed_install {
38 	struct file			*file;
39 	unsigned int			o_flags;
40 };
41 
42 static bool io_openat_force_async(struct io_open *open)
43 {
44 	/*
45 	 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
46 	 * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
47 	 * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
48 	 * async for.
49 	 */
50 	return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
51 }
52 
53 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
54 {
55 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
56 	const char __user *fname;
57 	int ret;
58 
59 	if (unlikely(sqe->buf_index))
60 		return -EINVAL;
61 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
62 		return -EBADF;
63 
64 	/* open.how should be already initialised */
65 	if (!(open->how.flags & O_PATH) && force_o_largefile())
66 		open->how.flags |= O_LARGEFILE;
67 
68 	open->dfd = READ_ONCE(sqe->fd);
69 	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
70 	ret = delayed_getname(&open->filename, fname);
71 	if (unlikely(ret))
72 		return ret;
73 	req->flags |= REQ_F_NEED_CLEANUP;
74 
75 	open->file_slot = READ_ONCE(sqe->file_index);
76 	if (open->file_slot && (open->how.flags & O_CLOEXEC))
77 		return -EINVAL;
78 
79 	open->nofile = rlimit(RLIMIT_NOFILE);
80 	if (io_openat_force_async(open))
81 		req->flags |= REQ_F_FORCE_ASYNC;
82 	return 0;
83 }
84 
85 void io_openat_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req)
86 {
87 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
88 
89 	bctx->open.flags = open->how.flags;
90 	bctx->open.mode = open->how.mode;
91 	bctx->open.resolve = open->how.resolve;
92 }
93 
94 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
95 {
96 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
97 	u64 mode = READ_ONCE(sqe->len);
98 	u64 flags = READ_ONCE(sqe->open_flags);
99 
100 	open->how = build_open_how(flags, mode);
101 	return __io_openat_prep(req, sqe);
102 }
103 
104 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
105 {
106 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
107 	struct open_how __user *how;
108 	size_t len;
109 	int ret;
110 
111 	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
112 	len = READ_ONCE(sqe->len);
113 	if (len < OPEN_HOW_SIZE_VER0)
114 		return -EINVAL;
115 
116 	ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
117 	if (ret)
118 		return ret;
119 
120 	return __io_openat_prep(req, sqe);
121 }
122 
123 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
124 {
125 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
126 	struct open_flags op;
127 	struct file *file;
128 	bool resolve_nonblock, nonblock_set;
129 	bool fixed = !!open->file_slot;
130 	CLASS(filename_complete_delayed, name)(&open->filename);
131 	int ret;
132 
133 	ret = build_open_flags(&open->how, &op);
134 	if (ret)
135 		goto err;
136 	nonblock_set = op.open_flag & O_NONBLOCK;
137 	resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
138 	if (issue_flags & IO_URING_F_NONBLOCK) {
139 		WARN_ON_ONCE(io_openat_force_async(open));
140 		op.lookup_flags |= LOOKUP_CACHED;
141 		op.open_flag |= O_NONBLOCK;
142 	}
143 
144 	if (!fixed) {
145 		ret = __get_unused_fd_flags(open->how.flags, open->nofile);
146 		if (ret < 0)
147 			goto err;
148 	}
149 
150 	file = do_file_open(open->dfd, name, &op);
151 	if (IS_ERR(file)) {
152 		/*
153 		 * We could hang on to this 'fd' on retrying, but seems like
154 		 * marginal gain for something that is now known to be a slower
155 		 * path. So just put it, and we'll get a new one when we retry.
156 		 */
157 		if (!fixed)
158 			put_unused_fd(ret);
159 
160 		ret = PTR_ERR(file);
161 		/* only retry if RESOLVE_CACHED wasn't already set by application */
162 		if (ret == -EAGAIN && !resolve_nonblock &&
163 		    (issue_flags & IO_URING_F_NONBLOCK)) {
164 			ret = putname_to_delayed(&open->filename,
165 						 no_free_ptr(name));
166 			if (likely(!ret))
167 				return -EAGAIN;
168 		}
169 		goto err;
170 	}
171 
172 	if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
173 		file->f_flags &= ~O_NONBLOCK;
174 
175 	if (!fixed)
176 		fd_install(ret, file);
177 	else
178 		ret = io_fixed_fd_install(req, issue_flags, file,
179 						open->file_slot);
180 err:
181 	req->flags &= ~REQ_F_NEED_CLEANUP;
182 	if (ret < 0)
183 		req_set_fail(req);
184 	io_req_set_res(req, ret, 0);
185 	return IOU_COMPLETE;
186 }
187 
188 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
189 {
190 	return io_openat2(req, issue_flags);
191 }
192 
193 void io_open_cleanup(struct io_kiocb *req)
194 {
195 	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
196 
197 	dismiss_delayed_filename(&open->filename);
198 }
199 
200 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
201 		     unsigned int offset)
202 {
203 	int ret;
204 
205 	io_ring_submit_lock(ctx, issue_flags);
206 	ret = io_fixed_fd_remove(ctx, offset);
207 	io_ring_submit_unlock(ctx, issue_flags);
208 
209 	return ret;
210 }
211 
212 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
213 {
214 	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
215 
216 	return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
217 }
218 
219 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
220 {
221 	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
222 
223 	if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
224 		return -EINVAL;
225 	if (req->flags & REQ_F_FIXED_FILE)
226 		return -EBADF;
227 
228 	close->fd = READ_ONCE(sqe->fd);
229 	close->file_slot = READ_ONCE(sqe->file_index);
230 	if (close->file_slot && close->fd)
231 		return -EINVAL;
232 
233 	return 0;
234 }
235 
236 int io_close(struct io_kiocb *req, unsigned int issue_flags)
237 {
238 	struct files_struct *files = current->files;
239 	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
240 	struct file *file;
241 	int ret = -EBADF;
242 
243 	if (close->file_slot) {
244 		ret = io_close_fixed(req, issue_flags);
245 		goto err;
246 	}
247 
248 	spin_lock(&files->file_lock);
249 	file = files_lookup_fd_locked(files, close->fd);
250 	if (!file || io_is_uring_fops(file)) {
251 		spin_unlock(&files->file_lock);
252 		goto err;
253 	}
254 
255 	/* if the file has a flush method, be safe and punt to async */
256 	if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
257 		spin_unlock(&files->file_lock);
258 		return -EAGAIN;
259 	}
260 
261 	file = file_close_fd_locked(files, close->fd);
262 	spin_unlock(&files->file_lock);
263 	if (!file)
264 		goto err;
265 
266 	/* No ->flush() or already async, safely close from here */
267 	ret = filp_close(file, current->files);
268 err:
269 	if (ret < 0)
270 		req_set_fail(req);
271 	io_req_set_res(req, ret, 0);
272 	return IOU_COMPLETE;
273 }
274 
275 int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
276 {
277 	struct io_fixed_install *ifi;
278 	unsigned int flags;
279 
280 	if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
281 	    sqe->splice_fd_in || sqe->addr3)
282 		return -EINVAL;
283 
284 	/* must be a fixed file */
285 	if (!(req->flags & REQ_F_FIXED_FILE))
286 		return -EBADF;
287 
288 	flags = READ_ONCE(sqe->install_fd_flags);
289 	if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
290 		return -EINVAL;
291 
292 	/* ensure the task's creds are used when installing/receiving fds */
293 	if (req->flags & REQ_F_CREDS)
294 		return -EPERM;
295 
296 	/* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
297 	ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
298 	ifi->o_flags = O_CLOEXEC;
299 	if (flags & IORING_FIXED_FD_NO_CLOEXEC)
300 		ifi->o_flags = 0;
301 
302 	return 0;
303 }
304 
305 int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags)
306 {
307 	struct io_fixed_install *ifi;
308 	int ret;
309 
310 	ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
311 	ret = receive_fd(req->file, NULL, ifi->o_flags);
312 	if (ret < 0)
313 		req_set_fail(req);
314 	io_req_set_res(req, ret, 0);
315 	return IOU_COMPLETE;
316 }
317 
318 struct io_pipe {
319 	struct file *file;
320 	int __user *fds;
321 	int flags;
322 	int file_slot;
323 	unsigned long nofile;
324 };
325 
326 int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
327 {
328 	struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
329 
330 	if (sqe->fd || sqe->off || sqe->addr3)
331 		return -EINVAL;
332 
333 	p->fds = u64_to_user_ptr(READ_ONCE(sqe->addr));
334 	p->flags = READ_ONCE(sqe->pipe_flags);
335 	if (p->flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
336 		return -EINVAL;
337 
338 	p->file_slot = READ_ONCE(sqe->file_index);
339 	p->nofile = rlimit(RLIMIT_NOFILE);
340 	return 0;
341 }
342 
343 static int io_pipe_fixed(struct io_kiocb *req, struct file **files,
344 			 unsigned int issue_flags)
345 {
346 	struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
347 	struct io_ring_ctx *ctx = req->ctx;
348 	bool alloc_slot;
349 	int ret, fds[2] = { -1, -1 };
350 	int slot = p->file_slot;
351 
352 	if (p->flags & O_CLOEXEC)
353 		return -EINVAL;
354 
355 	alloc_slot = slot == IORING_FILE_INDEX_ALLOC;
356 
357 	io_ring_submit_lock(ctx, issue_flags);
358 
359 	ret = __io_fixed_fd_install(ctx, files[0], slot);
360 	if (ret < 0)
361 		goto err;
362 	fds[0] = alloc_slot ? ret : slot - 1;
363 	files[0] = NULL;
364 
365 	/*
366 	 * If a specific slot is given, next one will be used for
367 	 * the write side.
368 	 */
369 	if (!alloc_slot)
370 		slot++;
371 
372 	ret = __io_fixed_fd_install(ctx, files[1], slot);
373 	if (ret < 0)
374 		goto err;
375 	fds[1] = alloc_slot ? ret : slot - 1;
376 	files[1] = NULL;
377 
378 	io_ring_submit_unlock(ctx, issue_flags);
379 
380 	if (!copy_to_user(p->fds, fds, sizeof(fds)))
381 		return 0;
382 
383 	ret = -EFAULT;
384 	io_ring_submit_lock(ctx, issue_flags);
385 err:
386 	if (fds[0] != -1)
387 		io_fixed_fd_remove(ctx, fds[0]);
388 	if (fds[1] != -1)
389 		io_fixed_fd_remove(ctx, fds[1]);
390 	io_ring_submit_unlock(ctx, issue_flags);
391 	return ret;
392 }
393 
394 static int io_pipe_fd(struct io_kiocb *req, struct file **files)
395 {
396 	struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
397 	int ret, fds[2] = { -1, -1 };
398 
399 	ret = __get_unused_fd_flags(p->flags, p->nofile);
400 	if (ret < 0)
401 		goto err;
402 	fds[0] = ret;
403 
404 	ret = __get_unused_fd_flags(p->flags, p->nofile);
405 	if (ret < 0)
406 		goto err;
407 	fds[1] = ret;
408 
409 	if (!copy_to_user(p->fds, fds, sizeof(fds))) {
410 		fd_install(fds[0], files[0]);
411 		fd_install(fds[1], files[1]);
412 		return 0;
413 	}
414 	ret = -EFAULT;
415 err:
416 	if (fds[0] != -1)
417 		put_unused_fd(fds[0]);
418 	if (fds[1] != -1)
419 		put_unused_fd(fds[1]);
420 	return ret;
421 }
422 
423 int io_pipe(struct io_kiocb *req, unsigned int issue_flags)
424 {
425 	struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
426 	struct file *files[2];
427 	int ret;
428 
429 	ret = create_pipe_files(files, p->flags);
430 	if (ret)
431 		return ret;
432 
433 	if (!!p->file_slot)
434 		ret = io_pipe_fixed(req, files, issue_flags);
435 	else
436 		ret = io_pipe_fd(req, files);
437 
438 	io_req_set_res(req, ret, 0);
439 	if (!ret)
440 		return IOU_COMPLETE;
441 
442 	req_set_fail(req);
443 	if (files[0])
444 		fput(files[0]);
445 	if (files[1])
446 		fput(files[1]);
447 	return ret;
448 }
449