xref: /linux/fs/backing-file.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Common helpers for stackable filesystems and backing files.
4  *
5  * Forked from fs/overlayfs/file.c.
6  *
7  * Copyright (C) 2017 Red Hat, Inc.
8  * Copyright (C) 2023 CTERA Networks.
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/backing-file.h>
13 #include <linux/splice.h>
14 #include <linux/mm.h>
15 
16 #include "internal.h"
17 
18 /**
19  * backing_file_open - open a backing file for kernel internal use
20  * @user_path:	path that the user reuqested to open
21  * @flags:	open flags
22  * @real_path:	path of the backing file
23  * @cred:	credentials for open
24  *
25  * Open a backing file for a stackable filesystem (e.g., overlayfs).
26  * @user_path may be on the stackable filesystem and @real_path on the
27  * underlying filesystem.  In this case, we want to be able to return the
28  * @user_path of the stackable filesystem. This is done by embedding the
29  * returned file into a container structure that also stores the stacked
30  * file's path, which can be retrieved using backing_file_user_path().
31  */
32 struct file *backing_file_open(const struct path *user_path, int flags,
33 			       const struct path *real_path,
34 			       const struct cred *cred)
35 {
36 	struct file *f;
37 	int error;
38 
39 	f = alloc_empty_backing_file(flags, cred);
40 	if (IS_ERR(f))
41 		return f;
42 
43 	path_get(user_path);
44 	backing_file_set_user_path(f, user_path);
45 	error = vfs_open(real_path, f);
46 	if (error) {
47 		fput(f);
48 		f = ERR_PTR(error);
49 	}
50 
51 	return f;
52 }
53 EXPORT_SYMBOL_GPL(backing_file_open);
54 
55 struct file *backing_tmpfile_open(const struct path *user_path, int flags,
56 				  const struct path *real_parentpath,
57 				  umode_t mode, const struct cred *cred)
58 {
59 	struct mnt_idmap *real_idmap = mnt_idmap(real_parentpath->mnt);
60 	struct file *f;
61 	int error;
62 
63 	f = alloc_empty_backing_file(flags, cred);
64 	if (IS_ERR(f))
65 		return f;
66 
67 	path_get(user_path);
68 	backing_file_set_user_path(f, user_path);
69 	error = vfs_tmpfile(real_idmap, real_parentpath, f, mode);
70 	if (error) {
71 		fput(f);
72 		f = ERR_PTR(error);
73 	}
74 	return f;
75 }
76 EXPORT_SYMBOL(backing_tmpfile_open);
77 
78 struct backing_aio {
79 	struct kiocb iocb;
80 	refcount_t ref;
81 	struct kiocb *orig_iocb;
82 	/* used for aio completion */
83 	void (*end_write)(struct kiocb *iocb, ssize_t);
84 	struct work_struct work;
85 	long res;
86 };
87 
88 static struct kmem_cache *backing_aio_cachep;
89 
90 #define BACKING_IOCB_MASK \
91 	(IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
92 
93 static rwf_t iocb_to_rw_flags(int flags)
94 {
95 	return (__force rwf_t)(flags & BACKING_IOCB_MASK);
96 }
97 
98 static void backing_aio_put(struct backing_aio *aio)
99 {
100 	if (refcount_dec_and_test(&aio->ref)) {
101 		fput(aio->iocb.ki_filp);
102 		kmem_cache_free(backing_aio_cachep, aio);
103 	}
104 }
105 
106 static void backing_aio_cleanup(struct backing_aio *aio, long res)
107 {
108 	struct kiocb *iocb = &aio->iocb;
109 	struct kiocb *orig_iocb = aio->orig_iocb;
110 
111 	orig_iocb->ki_pos = iocb->ki_pos;
112 	if (aio->end_write)
113 		aio->end_write(orig_iocb, res);
114 
115 	backing_aio_put(aio);
116 }
117 
118 static void backing_aio_rw_complete(struct kiocb *iocb, long res)
119 {
120 	struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
121 	struct kiocb *orig_iocb = aio->orig_iocb;
122 
123 	if (iocb->ki_flags & IOCB_WRITE)
124 		kiocb_end_write(iocb);
125 
126 	backing_aio_cleanup(aio, res);
127 	orig_iocb->ki_complete(orig_iocb, res);
128 }
129 
130 static void backing_aio_complete_work(struct work_struct *work)
131 {
132 	struct backing_aio *aio = container_of(work, struct backing_aio, work);
133 
134 	backing_aio_rw_complete(&aio->iocb, aio->res);
135 }
136 
137 static void backing_aio_queue_completion(struct kiocb *iocb, long res)
138 {
139 	struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
140 
141 	/*
142 	 * Punt to a work queue to serialize updates of mtime/size.
143 	 */
144 	aio->res = res;
145 	INIT_WORK(&aio->work, backing_aio_complete_work);
146 	queue_work(file_inode(aio->orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
147 		   &aio->work);
148 }
149 
150 static int backing_aio_init_wq(struct kiocb *iocb)
151 {
152 	struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
153 
154 	if (sb->s_dio_done_wq)
155 		return 0;
156 
157 	return sb_init_dio_done_wq(sb);
158 }
159 
160 static int do_backing_file_read_iter(struct file *file, struct iov_iter *iter,
161 				     struct kiocb *iocb, int flags)
162 {
163 	struct backing_aio *aio = NULL;
164 	int ret;
165 
166 	if (is_sync_kiocb(iocb)) {
167 		rwf_t rwf = iocb_to_rw_flags(flags);
168 
169 		return vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
170 	}
171 
172 	aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
173 	if (!aio)
174 		return -ENOMEM;
175 
176 	aio->orig_iocb = iocb;
177 	kiocb_clone(&aio->iocb, iocb, get_file(file));
178 	aio->iocb.ki_complete = backing_aio_rw_complete;
179 	refcount_set(&aio->ref, 2);
180 	ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
181 	backing_aio_put(aio);
182 	if (ret != -EIOCBQUEUED)
183 		backing_aio_cleanup(aio, ret);
184 	return ret;
185 }
186 
187 ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
188 			       struct kiocb *iocb, int flags,
189 			       struct backing_file_ctx *ctx)
190 {
191 	ssize_t ret;
192 
193 	if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
194 		return -EIO;
195 
196 	if (!iov_iter_count(iter))
197 		return 0;
198 
199 	if (iocb->ki_flags & IOCB_DIRECT &&
200 	    !(file->f_mode & FMODE_CAN_ODIRECT))
201 		return -EINVAL;
202 
203 	scoped_with_creds(ctx->cred)
204 		ret = do_backing_file_read_iter(file, iter, iocb, flags);
205 
206 	if (ctx->accessed)
207 		ctx->accessed(iocb->ki_filp);
208 
209 	return ret;
210 }
211 EXPORT_SYMBOL_GPL(backing_file_read_iter);
212 
213 static int do_backing_file_write_iter(struct file *file, struct iov_iter *iter,
214 				      struct kiocb *iocb, int flags,
215 				      void (*end_write)(struct kiocb *, ssize_t))
216 {
217 	struct backing_aio *aio;
218 	int ret;
219 
220 	if (is_sync_kiocb(iocb)) {
221 		rwf_t rwf = iocb_to_rw_flags(flags);
222 
223 		ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
224 		if (end_write)
225 			end_write(iocb, ret);
226 		return ret;
227 	}
228 
229 	ret = backing_aio_init_wq(iocb);
230 	if (ret)
231 		return ret;
232 
233 	aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
234 	if (!aio)
235 		return -ENOMEM;
236 
237 	aio->orig_iocb = iocb;
238 	aio->end_write = end_write;
239 	kiocb_clone(&aio->iocb, iocb, get_file(file));
240 	aio->iocb.ki_flags = flags;
241 	aio->iocb.ki_complete = backing_aio_queue_completion;
242 	refcount_set(&aio->ref, 2);
243 	ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
244 	backing_aio_put(aio);
245 	if (ret != -EIOCBQUEUED)
246 		backing_aio_cleanup(aio, ret);
247 	return ret;
248 }
249 
250 ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
251 				struct kiocb *iocb, int flags,
252 				struct backing_file_ctx *ctx)
253 {
254 	ssize_t ret;
255 
256 	if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
257 		return -EIO;
258 
259 	if (!iov_iter_count(iter))
260 		return 0;
261 
262 	ret = file_remove_privs(iocb->ki_filp);
263 	if (ret)
264 		return ret;
265 
266 	if (iocb->ki_flags & IOCB_DIRECT &&
267 	    !(file->f_mode & FMODE_CAN_ODIRECT))
268 		return -EINVAL;
269 
270 	scoped_with_creds(ctx->cred)
271 		return do_backing_file_write_iter(file, iter, iocb, flags, ctx->end_write);
272 }
273 EXPORT_SYMBOL_GPL(backing_file_write_iter);
274 
275 ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
276 				 struct pipe_inode_info *pipe, size_t len,
277 				 unsigned int flags,
278 				 struct backing_file_ctx *ctx)
279 {
280 	ssize_t ret;
281 
282 	if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
283 		return -EIO;
284 
285 	scoped_with_creds(ctx->cred)
286 		ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags);
287 
288 	if (ctx->accessed)
289 		ctx->accessed(iocb->ki_filp);
290 
291 	return ret;
292 }
293 EXPORT_SYMBOL_GPL(backing_file_splice_read);
294 
295 ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
296 				  struct file *out, struct kiocb *iocb,
297 				  size_t len, unsigned int flags,
298 				  struct backing_file_ctx *ctx)
299 {
300 	ssize_t ret;
301 
302 	if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
303 		return -EIO;
304 
305 	if (!out->f_op->splice_write)
306 		return -EINVAL;
307 
308 	ret = file_remove_privs(iocb->ki_filp);
309 	if (ret)
310 		return ret;
311 
312 	scoped_with_creds(ctx->cred) {
313 		file_start_write(out);
314 		ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags);
315 		file_end_write(out);
316 	}
317 
318 	if (ctx->end_write)
319 		ctx->end_write(iocb, ret);
320 
321 	return ret;
322 }
323 EXPORT_SYMBOL_GPL(backing_file_splice_write);
324 
325 int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
326 		      struct backing_file_ctx *ctx)
327 {
328 	struct file *user_file = vma->vm_file;
329 	int ret;
330 
331 	if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
332 		return -EIO;
333 
334 	if (!can_mmap_file(file))
335 		return -ENODEV;
336 
337 	vma_set_file(vma, file);
338 
339 	scoped_with_creds(ctx->cred)
340 		ret = vfs_mmap(vma->vm_file, vma);
341 
342 	if (ctx->accessed)
343 		ctx->accessed(user_file);
344 
345 	return ret;
346 }
347 EXPORT_SYMBOL_GPL(backing_file_mmap);
348 
349 static int __init backing_aio_init(void)
350 {
351 	backing_aio_cachep = KMEM_CACHE(backing_aio, SLAB_HWCACHE_ALIGN);
352 	if (!backing_aio_cachep)
353 		return -ENOMEM;
354 
355 	return 0;
356 }
357 fs_initcall(backing_aio_init);
358