xref: /linux/fs/remap_range.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/slab.h>
3 #include <linux/stat.h>
4 #include <linux/sched/xacct.h>
5 #include <linux/fcntl.h>
6 #include <linux/file.h>
7 #include <linux/uio.h>
8 #include <linux/fsnotify.h>
9 #include <linux/security.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/pagemap.h>
13 #include <linux/splice.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
16 #include <linux/fs.h>
17 #include "internal.h"
18 
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21 
22 /*
23  * Performs necessary checks before doing a clone.
24  *
25  * Can adjust amount of bytes to clone via @req_count argument.
26  * Returns appropriate error code that caller should return or
27  * zero in case the clone should be allowed.
28  */
29 static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 				struct file *file_out, loff_t pos_out,
31 				loff_t *req_count, unsigned int remap_flags)
32 {
33 	struct inode *inode_in = file_in->f_mapping->host;
34 	struct inode *inode_out = file_out->f_mapping->host;
35 	uint64_t count = *req_count;
36 	uint64_t bcount;
37 	loff_t size_in, size_out;
38 	loff_t bs = inode_out->i_sb->s_blocksize;
39 	int ret;
40 
41 	/* The start of both ranges must be aligned to an fs block. */
42 	if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
43 		return -EINVAL;
44 
45 	/* Ensure offsets don't wrap. */
46 	if (pos_in + count < pos_in || pos_out + count < pos_out)
47 		return -EINVAL;
48 
49 	size_in = i_size_read(inode_in);
50 	size_out = i_size_read(inode_out);
51 
52 	/* Dedupe requires both ranges to be within EOF. */
53 	if ((remap_flags & REMAP_FILE_DEDUP) &&
54 	    (pos_in >= size_in || pos_in + count > size_in ||
55 	     pos_out >= size_out || pos_out + count > size_out))
56 		return -EINVAL;
57 
58 	/* Ensure the infile range is within the infile. */
59 	if (pos_in >= size_in)
60 		return -EINVAL;
61 	count = min(count, size_in - (uint64_t)pos_in);
62 
63 	ret = generic_write_check_limits(file_out, pos_out, &count);
64 	if (ret)
65 		return ret;
66 
67 	/*
68 	 * If the user wanted us to link to the infile's EOF, round up to the
69 	 * next block boundary for this check.
70 	 *
71 	 * Otherwise, make sure the count is also block-aligned, having
72 	 * already confirmed the starting offsets' block alignment.
73 	 */
74 	if (pos_in + count == size_in) {
75 		bcount = ALIGN(size_in, bs) - pos_in;
76 	} else {
77 		if (!IS_ALIGNED(count, bs))
78 			count = ALIGN_DOWN(count, bs);
79 		bcount = count;
80 	}
81 
82 	/* Don't allow overlapped cloning within the same file. */
83 	if (inode_in == inode_out &&
84 	    pos_out + bcount > pos_in &&
85 	    pos_out < pos_in + bcount)
86 		return -EINVAL;
87 
88 	/*
89 	 * We shortened the request but the caller can't deal with that, so
90 	 * bounce the request back to userspace.
91 	 */
92 	if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
93 		return -EINVAL;
94 
95 	*req_count = count;
96 	return 0;
97 }
98 
99 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
100 			     bool write)
101 {
102 	if (unlikely(pos < 0 || len < 0))
103 		return -EINVAL;
104 
105 	if (unlikely((loff_t) (pos + len) < 0))
106 		return -EINVAL;
107 
108 	return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
109 }
110 
111 /*
112  * Ensure that we don't remap a partial EOF block in the middle of something
113  * else.  Assume that the offsets have already been checked for block
114  * alignment.
115  *
116  * For clone we only link a partial EOF block above or at the destination file's
117  * EOF.  For deduplication we accept a partial EOF block only if it ends at the
118  * destination file's EOF (can not link it into the middle of a file).
119  *
120  * Shorten the request if possible.
121  */
122 static int generic_remap_check_len(struct inode *inode_in,
123 				   struct inode *inode_out,
124 				   loff_t pos_out,
125 				   loff_t *len,
126 				   unsigned int remap_flags)
127 {
128 	u64 blkmask = i_blocksize(inode_in) - 1;
129 	loff_t new_len = *len;
130 
131 	if ((*len & blkmask) == 0)
132 		return 0;
133 
134 	if (pos_out + *len < i_size_read(inode_out))
135 		new_len &= ~blkmask;
136 
137 	if (new_len == *len)
138 		return 0;
139 
140 	if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
141 		*len = new_len;
142 		return 0;
143 	}
144 
145 	return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
146 }
147 
148 /* Read a page's worth of file data into the page cache. */
149 static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
150 {
151 	struct folio *folio;
152 
153 	folio = read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
154 	if (IS_ERR(folio))
155 		return folio;
156 	if (!folio_test_uptodate(folio)) {
157 		folio_put(folio);
158 		return ERR_PTR(-EIO);
159 	}
160 	return folio;
161 }
162 
163 /*
164  * Lock two folios, ensuring that we lock in offset order if the folios
165  * are from the same file.
166  */
167 static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
168 {
169 	/* Always lock in order of increasing index. */
170 	if (folio1->index > folio2->index)
171 		swap(folio1, folio2);
172 
173 	folio_lock(folio1);
174 	if (folio1 != folio2)
175 		folio_lock(folio2);
176 }
177 
178 /* Unlock two folios, being careful not to unlock the same folio twice. */
179 static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
180 {
181 	folio_unlock(folio1);
182 	if (folio1 != folio2)
183 		folio_unlock(folio2);
184 }
185 
186 /*
187  * Compare extents of two files to see if they are the same.
188  * Caller must have locked both inodes to prevent write races.
189  */
190 static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff,
191 					 struct file *dest, loff_t dstoff,
192 					 loff_t len, bool *is_same)
193 {
194 	bool same = true;
195 	int error = -EINVAL;
196 
197 	while (len) {
198 		struct folio *src_folio, *dst_folio;
199 		void *src_addr, *dst_addr;
200 		loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
201 				     PAGE_SIZE - offset_in_page(dstoff));
202 
203 		cmp_len = min(cmp_len, len);
204 		if (cmp_len <= 0)
205 			goto out_error;
206 
207 		src_folio = vfs_dedupe_get_folio(src, srcoff);
208 		if (IS_ERR(src_folio)) {
209 			error = PTR_ERR(src_folio);
210 			goto out_error;
211 		}
212 		dst_folio = vfs_dedupe_get_folio(dest, dstoff);
213 		if (IS_ERR(dst_folio)) {
214 			error = PTR_ERR(dst_folio);
215 			folio_put(src_folio);
216 			goto out_error;
217 		}
218 
219 		vfs_lock_two_folios(src_folio, dst_folio);
220 
221 		/*
222 		 * Now that we've locked both folios, make sure they're still
223 		 * mapped to the file data we're interested in.  If not,
224 		 * someone is invalidating pages on us and we lose.
225 		 */
226 		if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
227 		    src_folio->mapping != src->f_mapping ||
228 		    dst_folio->mapping != dest->f_mapping) {
229 			same = false;
230 			goto unlock;
231 		}
232 
233 		src_addr = kmap_local_folio(src_folio,
234 					offset_in_folio(src_folio, srcoff));
235 		dst_addr = kmap_local_folio(dst_folio,
236 					offset_in_folio(dst_folio, dstoff));
237 
238 		flush_dcache_folio(src_folio);
239 		flush_dcache_folio(dst_folio);
240 
241 		if (memcmp(src_addr, dst_addr, cmp_len))
242 			same = false;
243 
244 		kunmap_local(dst_addr);
245 		kunmap_local(src_addr);
246 unlock:
247 		vfs_unlock_two_folios(src_folio, dst_folio);
248 		folio_put(dst_folio);
249 		folio_put(src_folio);
250 
251 		if (!same)
252 			break;
253 
254 		srcoff += cmp_len;
255 		dstoff += cmp_len;
256 		len -= cmp_len;
257 	}
258 
259 	*is_same = same;
260 	return 0;
261 
262 out_error:
263 	return error;
264 }
265 
266 /*
267  * Check that the two inodes are eligible for cloning, the ranges make
268  * sense, and then flush all dirty data.  Caller must ensure that the
269  * inodes have been locked against any other modifications.
270  *
271  * If there's an error, then the usual negative error code is returned.
272  * Otherwise returns 0 with *len set to the request length.
273  */
274 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
275 				  struct file *file_out, loff_t pos_out,
276 				  loff_t *len, unsigned int remap_flags)
277 {
278 	struct inode *inode_in = file_inode(file_in);
279 	struct inode *inode_out = file_inode(file_out);
280 	bool same_inode = (inode_in == inode_out);
281 	int ret;
282 
283 	/* Don't touch certain kinds of inodes */
284 	if (IS_IMMUTABLE(inode_out))
285 		return -EPERM;
286 
287 	if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
288 		return -ETXTBSY;
289 
290 	/* Don't reflink dirs, pipes, sockets... */
291 	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
292 		return -EISDIR;
293 	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
294 		return -EINVAL;
295 
296 	/* Zero length dedupe exits immediately; reflink goes to EOF. */
297 	if (*len == 0) {
298 		loff_t isize = i_size_read(inode_in);
299 
300 		if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
301 			return 0;
302 		if (pos_in > isize)
303 			return -EINVAL;
304 		*len = isize - pos_in;
305 		if (*len == 0)
306 			return 0;
307 	}
308 
309 	/* Check that we don't violate system file offset limits. */
310 	ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
311 			remap_flags);
312 	if (ret)
313 		return ret;
314 
315 	/* Wait for the completion of any pending IOs on both files */
316 	inode_dio_wait(inode_in);
317 	if (!same_inode)
318 		inode_dio_wait(inode_out);
319 
320 	ret = filemap_write_and_wait_range(inode_in->i_mapping,
321 			pos_in, pos_in + *len - 1);
322 	if (ret)
323 		return ret;
324 
325 	ret = filemap_write_and_wait_range(inode_out->i_mapping,
326 			pos_out, pos_out + *len - 1);
327 	if (ret)
328 		return ret;
329 
330 	/*
331 	 * Check that the extents are the same.
332 	 */
333 	if (remap_flags & REMAP_FILE_DEDUP) {
334 		bool		is_same = false;
335 
336 		ret = vfs_dedupe_file_range_compare(file_in, pos_in,
337 				file_out, pos_out, *len, &is_same);
338 		if (ret)
339 			return ret;
340 		if (!is_same)
341 			return -EBADE;
342 	}
343 
344 	ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
345 			remap_flags);
346 	if (ret)
347 		return ret;
348 
349 	/* If can't alter the file contents, we're done. */
350 	if (!(remap_flags & REMAP_FILE_DEDUP))
351 		ret = file_modified(file_out);
352 
353 	return ret;
354 }
355 EXPORT_SYMBOL(generic_remap_file_range_prep);
356 
357 loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
358 			   struct file *file_out, loff_t pos_out,
359 			   loff_t len, unsigned int remap_flags)
360 {
361 	loff_t ret;
362 
363 	WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
364 
365 	if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
366 		return -EXDEV;
367 
368 	ret = generic_file_rw_checks(file_in, file_out);
369 	if (ret < 0)
370 		return ret;
371 
372 	if (!file_in->f_op->remap_file_range)
373 		return -EOPNOTSUPP;
374 
375 	ret = remap_verify_area(file_in, pos_in, len, false);
376 	if (ret)
377 		return ret;
378 
379 	ret = remap_verify_area(file_out, pos_out, len, true);
380 	if (ret)
381 		return ret;
382 
383 	ret = file_in->f_op->remap_file_range(file_in, pos_in,
384 			file_out, pos_out, len, remap_flags);
385 	if (ret < 0)
386 		return ret;
387 
388 	fsnotify_access(file_in);
389 	fsnotify_modify(file_out);
390 	return ret;
391 }
392 EXPORT_SYMBOL(do_clone_file_range);
393 
394 loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
395 			    struct file *file_out, loff_t pos_out,
396 			    loff_t len, unsigned int remap_flags)
397 {
398 	loff_t ret;
399 
400 	file_start_write(file_out);
401 	ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
402 				  remap_flags);
403 	file_end_write(file_out);
404 
405 	return ret;
406 }
407 EXPORT_SYMBOL(vfs_clone_file_range);
408 
409 /* Check whether we are allowed to dedupe the destination file */
410 static bool allow_file_dedupe(struct file *file)
411 {
412 	struct user_namespace *mnt_userns = file_mnt_user_ns(file);
413 	struct inode *inode = file_inode(file);
414 
415 	if (capable(CAP_SYS_ADMIN))
416 		return true;
417 	if (file->f_mode & FMODE_WRITE)
418 		return true;
419 	if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
420 		return true;
421 	if (!inode_permission(mnt_userns, inode, MAY_WRITE))
422 		return true;
423 	return false;
424 }
425 
426 loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
427 				 struct file *dst_file, loff_t dst_pos,
428 				 loff_t len, unsigned int remap_flags)
429 {
430 	loff_t ret;
431 
432 	WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
433 				     REMAP_FILE_CAN_SHORTEN));
434 
435 	ret = mnt_want_write_file(dst_file);
436 	if (ret)
437 		return ret;
438 
439 	/*
440 	 * This is redundant if called from vfs_dedupe_file_range(), but other
441 	 * callers need it and it's not performance sesitive...
442 	 */
443 	ret = remap_verify_area(src_file, src_pos, len, false);
444 	if (ret)
445 		goto out_drop_write;
446 
447 	ret = remap_verify_area(dst_file, dst_pos, len, true);
448 	if (ret)
449 		goto out_drop_write;
450 
451 	ret = -EPERM;
452 	if (!allow_file_dedupe(dst_file))
453 		goto out_drop_write;
454 
455 	ret = -EXDEV;
456 	if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb)
457 		goto out_drop_write;
458 
459 	ret = -EISDIR;
460 	if (S_ISDIR(file_inode(dst_file)->i_mode))
461 		goto out_drop_write;
462 
463 	ret = -EINVAL;
464 	if (!dst_file->f_op->remap_file_range)
465 		goto out_drop_write;
466 
467 	if (len == 0) {
468 		ret = 0;
469 		goto out_drop_write;
470 	}
471 
472 	ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
473 			dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
474 out_drop_write:
475 	mnt_drop_write_file(dst_file);
476 
477 	return ret;
478 }
479 EXPORT_SYMBOL(vfs_dedupe_file_range_one);
480 
481 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
482 {
483 	struct file_dedupe_range_info *info;
484 	struct inode *src = file_inode(file);
485 	u64 off;
486 	u64 len;
487 	int i;
488 	int ret;
489 	u16 count = same->dest_count;
490 	loff_t deduped;
491 
492 	if (!(file->f_mode & FMODE_READ))
493 		return -EINVAL;
494 
495 	if (same->reserved1 || same->reserved2)
496 		return -EINVAL;
497 
498 	off = same->src_offset;
499 	len = same->src_length;
500 
501 	if (S_ISDIR(src->i_mode))
502 		return -EISDIR;
503 
504 	if (!S_ISREG(src->i_mode))
505 		return -EINVAL;
506 
507 	if (!file->f_op->remap_file_range)
508 		return -EOPNOTSUPP;
509 
510 	ret = remap_verify_area(file, off, len, false);
511 	if (ret < 0)
512 		return ret;
513 	ret = 0;
514 
515 	if (off + len > i_size_read(src))
516 		return -EINVAL;
517 
518 	/* Arbitrary 1G limit on a single dedupe request, can be raised. */
519 	len = min_t(u64, len, 1 << 30);
520 
521 	/* pre-format output fields to sane values */
522 	for (i = 0; i < count; i++) {
523 		same->info[i].bytes_deduped = 0ULL;
524 		same->info[i].status = FILE_DEDUPE_RANGE_SAME;
525 	}
526 
527 	for (i = 0, info = same->info; i < count; i++, info++) {
528 		struct fd dst_fd = fdget(info->dest_fd);
529 		struct file *dst_file = dst_fd.file;
530 
531 		if (!dst_file) {
532 			info->status = -EBADF;
533 			goto next_loop;
534 		}
535 
536 		if (info->reserved) {
537 			info->status = -EINVAL;
538 			goto next_fdput;
539 		}
540 
541 		deduped = vfs_dedupe_file_range_one(file, off, dst_file,
542 						    info->dest_offset, len,
543 						    REMAP_FILE_CAN_SHORTEN);
544 		if (deduped == -EBADE)
545 			info->status = FILE_DEDUPE_RANGE_DIFFERS;
546 		else if (deduped < 0)
547 			info->status = deduped;
548 		else
549 			info->bytes_deduped = len;
550 
551 next_fdput:
552 		fdput(dst_fd);
553 next_loop:
554 		if (fatal_signal_pending(current))
555 			break;
556 	}
557 	return ret;
558 }
559 EXPORT_SYMBOL(vfs_dedupe_file_range);
560