1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10 #include <linux/xattr.h>
11
12 #include <uapi/linux/io_uring.h>
13
14 #include "../fs/internal.h"
15
16 #include "io_uring.h"
17 #include "xattr.h"
18
19 struct io_xattr {
20 struct file *file;
21 struct kernel_xattr_ctx ctx;
22 struct delayed_filename filename;
23 };
24
io_xattr_cleanup(struct io_kiocb * req)25 void io_xattr_cleanup(struct io_kiocb *req)
26 {
27 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
28
29 dismiss_delayed_filename(&ix->filename);
30 kfree(ix->ctx.kname);
31 kvfree(ix->ctx.kvalue);
32 }
33
io_xattr_finish(struct io_kiocb * req,int ret)34 static void io_xattr_finish(struct io_kiocb *req, int ret)
35 {
36 req->flags &= ~REQ_F_NEED_CLEANUP;
37
38 io_xattr_cleanup(req);
39 io_req_set_res(req, ret, 0);
40 }
41
__io_getxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)42 static int __io_getxattr_prep(struct io_kiocb *req,
43 const struct io_uring_sqe *sqe)
44 {
45 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
46 const char __user *name;
47 int ret;
48
49 INIT_DELAYED_FILENAME(&ix->filename);
50 ix->ctx.kvalue = NULL;
51 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
52 ix->ctx.value = u64_to_user_ptr(READ_ONCE(sqe->addr2));
53 ix->ctx.size = READ_ONCE(sqe->len);
54 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
55
56 if (ix->ctx.flags)
57 return -EINVAL;
58
59 ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
60 if (!ix->ctx.kname)
61 return -ENOMEM;
62
63 ret = import_xattr_name(ix->ctx.kname, name);
64 if (ret) {
65 kfree(ix->ctx.kname);
66 return ret;
67 }
68
69 req->flags |= REQ_F_NEED_CLEANUP;
70 req->flags |= REQ_F_FORCE_ASYNC;
71 return 0;
72 }
73
io_fgetxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)74 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
75 {
76 return __io_getxattr_prep(req, sqe);
77 }
78
io_getxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)79 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
80 {
81 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
82 const char __user *path;
83 int ret;
84
85 if (unlikely(req->flags & REQ_F_FIXED_FILE))
86 return -EBADF;
87
88 ret = __io_getxattr_prep(req, sqe);
89 if (ret)
90 return ret;
91
92 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
93
94 return delayed_getname(&ix->filename, path);
95 }
96
io_fgetxattr(struct io_kiocb * req,unsigned int issue_flags)97 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
98 {
99 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
100 int ret;
101
102 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
103
104 ret = file_getxattr(req->file, &ix->ctx);
105 io_xattr_finish(req, ret);
106 return IOU_COMPLETE;
107 }
108
io_getxattr(struct io_kiocb * req,unsigned int issue_flags)109 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
110 {
111 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
112 CLASS(filename_complete_delayed, name)(&ix->filename);
113 int ret;
114
115 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
116
117 ret = filename_getxattr(AT_FDCWD, name, LOOKUP_FOLLOW, &ix->ctx);
118 io_xattr_finish(req, ret);
119 return IOU_COMPLETE;
120 }
121
__io_setxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)122 static int __io_setxattr_prep(struct io_kiocb *req,
123 const struct io_uring_sqe *sqe)
124 {
125 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
126 const char __user *name;
127 int ret;
128
129 INIT_DELAYED_FILENAME(&ix->filename);
130 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
131 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
132 ix->ctx.kvalue = NULL;
133 ix->ctx.size = READ_ONCE(sqe->len);
134 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
135
136 ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
137 if (!ix->ctx.kname)
138 return -ENOMEM;
139
140 ret = setxattr_copy(name, &ix->ctx);
141 if (ret) {
142 kfree(ix->ctx.kname);
143 return ret;
144 }
145
146 req->flags |= REQ_F_NEED_CLEANUP;
147 req->flags |= REQ_F_FORCE_ASYNC;
148 return 0;
149 }
150
io_setxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)151 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
152 {
153 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
154 const char __user *path;
155 int ret;
156
157 if (unlikely(req->flags & REQ_F_FIXED_FILE))
158 return -EBADF;
159
160 ret = __io_setxattr_prep(req, sqe);
161 if (ret)
162 return ret;
163
164 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
165
166 return delayed_getname(&ix->filename, path);
167 }
168
io_fsetxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)169 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
170 {
171 return __io_setxattr_prep(req, sqe);
172 }
173
io_fsetxattr(struct io_kiocb * req,unsigned int issue_flags)174 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
175 {
176 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
177 int ret;
178
179 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
180
181 ret = file_setxattr(req->file, &ix->ctx);
182 io_xattr_finish(req, ret);
183 return IOU_COMPLETE;
184 }
185
io_setxattr(struct io_kiocb * req,unsigned int issue_flags)186 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
187 {
188 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
189 CLASS(filename_complete_delayed, name)(&ix->filename);
190 int ret;
191
192 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
193
194 ret = filename_setxattr(AT_FDCWD, name, LOOKUP_FOLLOW, &ix->ctx);
195 io_xattr_finish(req, ret);
196 return IOU_COMPLETE;
197 }
198