1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/namei.h> 9 #include <linux/io_uring.h> 10 #include <linux/fsnotify.h> 11 12 #include <uapi/linux/io_uring.h> 13 14 #include "io_uring.h" 15 #include "sync.h" 16 17 struct io_sync { 18 struct file *file; 19 loff_t len; 20 loff_t off; 21 int flags; 22 int mode; 23 }; 24 25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 26 { 27 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 28 29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) 30 return -EINVAL; 31 32 sync->off = READ_ONCE(sqe->off); 33 sync->len = READ_ONCE(sqe->len); 34 sync->flags = READ_ONCE(sqe->sync_range_flags); 35 req->flags |= REQ_F_FORCE_ASYNC; 36 37 return 0; 38 } 39 40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) 41 { 42 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 43 int ret; 44 45 /* sync_file_range always requires a blocking context */ 46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 47 48 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); 49 io_req_set_res(req, ret, 0); 50 return IOU_COMPLETE; 51 } 52 53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 54 { 55 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 56 57 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) 58 return -EINVAL; 59 60 sync->flags = READ_ONCE(sqe->fsync_flags); 61 if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC)) 62 return -EINVAL; 63 64 sync->off = READ_ONCE(sqe->off); 65 if (sync->off < 0) 66 return -EINVAL; 67 sync->len = READ_ONCE(sqe->len); 68 req->flags |= REQ_F_FORCE_ASYNC; 69 return 0; 70 } 71 72 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) 73 { 74 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 75 loff_t end = sync->off + sync->len; 76 int ret; 77 78 /* fsync always requires a blocking context */ 79 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 80 81 ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, 82 sync->flags & IORING_FSYNC_DATASYNC); 83 io_req_set_res(req, ret, 0); 84 return IOU_COMPLETE; 85 } 86 87 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 88 { 89 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 90 91 if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 92 return -EINVAL; 93 94 sync->off = READ_ONCE(sqe->off); 95 sync->len = READ_ONCE(sqe->addr); 96 sync->mode = READ_ONCE(sqe->len); 97 req->flags |= REQ_F_FORCE_ASYNC; 98 return 0; 99 } 100 101 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) 102 { 103 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); 104 int ret; 105 106 /* fallocate always requiring blocking context */ 107 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 108 109 ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); 110 if (ret >= 0) 111 fsnotify_modify(req->file); 112 io_req_set_res(req, ret, 0); 113 return IOU_COMPLETE; 114 } 115