xref: /linux/fs/overlayfs/copy_up.c (revision 45d986d11313ff2d8ed2cf6a34e2aefdc4639a99)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2011 Novell Inc.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fileattr.h>
12 #include <linux/splice.h>
13 #include <linux/xattr.h>
14 #include <linux/security.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/signal.h>
17 #include <linux/cred.h>
18 #include <linux/namei.h>
19 #include <linux/fdtable.h>
20 #include <linux/ratelimit.h>
21 #include <linux/exportfs.h>
22 #include "overlayfs.h"
23 
24 #define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
25 
26 static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
27 {
28 	pr_warn("\"check_copy_up\" module option is obsolete\n");
29 	return 0;
30 }
31 
32 static int ovl_ccup_get(char *buf, const struct kernel_param *param)
33 {
34 	return sprintf(buf, "N\n");
35 }
36 
37 module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644);
38 MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing");
39 
40 static bool ovl_must_copy_xattr(const char *name)
41 {
42 	return !strcmp(name, XATTR_POSIX_ACL_ACCESS) ||
43 	       !strcmp(name, XATTR_POSIX_ACL_DEFAULT) ||
44 	       !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
45 }
46 
47 static int ovl_copy_acl(struct ovl_fs *ofs, const struct path *path,
48 			struct dentry *dentry, const char *acl_name)
49 {
50 	int err;
51 	struct posix_acl *clone, *real_acl = NULL;
52 
53 	real_acl = ovl_get_acl_path(path, acl_name, false);
54 	if (!real_acl)
55 		return 0;
56 
57 	if (IS_ERR(real_acl)) {
58 		err = PTR_ERR(real_acl);
59 		if (err == -ENODATA || err == -EOPNOTSUPP)
60 			return 0;
61 		return err;
62 	}
63 
64 	clone = posix_acl_clone(real_acl, GFP_KERNEL);
65 	posix_acl_release(real_acl); /* release original acl */
66 	if (!clone)
67 		return -ENOMEM;
68 
69 	err = ovl_do_set_acl(ofs, dentry, acl_name, clone);
70 
71 	/* release cloned acl */
72 	posix_acl_release(clone);
73 	return err;
74 }
75 
76 int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct dentry *new)
77 {
78 	struct dentry *old = oldpath->dentry;
79 	ssize_t list_size, size, value_size = 0;
80 	char *buf, *name, *value = NULL;
81 	int error = 0;
82 	size_t slen;
83 
84 	if (!old->d_inode->i_op->listxattr || !new->d_inode->i_op->listxattr)
85 		return 0;
86 
87 	list_size = vfs_listxattr(old, NULL, 0);
88 	if (list_size <= 0) {
89 		if (list_size == -EOPNOTSUPP)
90 			return 0;
91 		return list_size;
92 	}
93 
94 	buf = kvzalloc(list_size, GFP_KERNEL);
95 	if (!buf)
96 		return -ENOMEM;
97 
98 	list_size = vfs_listxattr(old, buf, list_size);
99 	if (list_size <= 0) {
100 		error = list_size;
101 		goto out;
102 	}
103 
104 	for (name = buf; list_size; name += slen) {
105 		slen = strnlen(name, list_size) + 1;
106 
107 		/* underlying fs providing us with an broken xattr list? */
108 		if (WARN_ON(slen > list_size)) {
109 			error = -EIO;
110 			break;
111 		}
112 		list_size -= slen;
113 
114 		if (ovl_is_private_xattr(sb, name))
115 			continue;
116 
117 		error = security_inode_copy_up_xattr(old, name);
118 		if (error == -ECANCELED) {
119 			error = 0;
120 			continue; /* Discard */
121 		}
122 		if (error < 0 && error != -EOPNOTSUPP)
123 			break;
124 
125 		if (is_posix_acl_xattr(name)) {
126 			error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name);
127 			if (!error)
128 				continue;
129 			/* POSIX ACLs must be copied. */
130 			break;
131 		}
132 
133 retry:
134 		size = ovl_do_getxattr(oldpath, name, value, value_size);
135 		if (size == -ERANGE)
136 			size = ovl_do_getxattr(oldpath, name, NULL, 0);
137 
138 		if (size < 0) {
139 			error = size;
140 			break;
141 		}
142 
143 		if (size > value_size) {
144 			void *new;
145 
146 			new = kvmalloc(size, GFP_KERNEL);
147 			if (!new) {
148 				error = -ENOMEM;
149 				break;
150 			}
151 			kvfree(value);
152 			value = new;
153 			value_size = size;
154 			goto retry;
155 		}
156 
157 		error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0);
158 		if (error) {
159 			if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
160 				break;
161 
162 			/* Ignore failure to copy unknown xattrs */
163 			error = 0;
164 		}
165 	}
166 	kvfree(value);
167 out:
168 	kvfree(buf);
169 	return error;
170 }
171 
172 static int ovl_copy_fileattr(struct inode *inode, const struct path *old,
173 			     const struct path *new)
174 {
175 	struct fileattr oldfa = { .flags_valid = true };
176 	struct fileattr newfa = { .flags_valid = true };
177 	int err;
178 
179 	err = ovl_real_fileattr_get(old, &oldfa);
180 	if (err) {
181 		/* Ntfs-3g returns -EINVAL for "no fileattr support" */
182 		if (err == -ENOTTY || err == -EINVAL)
183 			return 0;
184 		pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
185 			old->dentry, err);
186 		return err;
187 	}
188 
189 	/*
190 	 * We cannot set immutable and append-only flags on upper inode,
191 	 * because we would not be able to link upper inode to upper dir
192 	 * not set overlay private xattr on upper inode.
193 	 * Store these flags in overlay.protattr xattr instead.
194 	 */
195 	if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
196 		err = ovl_set_protattr(inode, new->dentry, &oldfa);
197 		if (err == -EPERM)
198 			pr_warn_once("copying fileattr: no xattr on upper\n");
199 		else if (err)
200 			return err;
201 	}
202 
203 	/* Don't bother copying flags if none are set */
204 	if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK))
205 		return 0;
206 
207 	err = ovl_real_fileattr_get(new, &newfa);
208 	if (err) {
209 		/*
210 		 * Returning an error if upper doesn't support fileattr will
211 		 * result in a regression, so revert to the old behavior.
212 		 */
213 		if (err == -ENOTTY || err == -EINVAL) {
214 			pr_warn_once("copying fileattr: no support on upper\n");
215 			return 0;
216 		}
217 		pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
218 			new->dentry, err);
219 		return err;
220 	}
221 
222 	BUILD_BUG_ON(OVL_COPY_FS_FLAGS_MASK & ~FS_COMMON_FL);
223 	newfa.flags &= ~OVL_COPY_FS_FLAGS_MASK;
224 	newfa.flags |= (oldfa.flags & OVL_COPY_FS_FLAGS_MASK);
225 
226 	BUILD_BUG_ON(OVL_COPY_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON);
227 	newfa.fsx_xflags &= ~OVL_COPY_FSX_FLAGS_MASK;
228 	newfa.fsx_xflags |= (oldfa.fsx_xflags & OVL_COPY_FSX_FLAGS_MASK);
229 
230 	return ovl_real_fileattr_set(new, &newfa);
231 }
232 
233 static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen)
234 {
235 	loff_t tmp;
236 
237 	if (pos != pos2)
238 		return -EIO;
239 	if (pos < 0 || len < 0 || totlen < 0)
240 		return -EIO;
241 	if (check_add_overflow(pos, len, &tmp))
242 		return -EIO;
243 	return 0;
244 }
245 
246 static int ovl_sync_file(struct path *path)
247 {
248 	struct file *new_file;
249 	int err;
250 
251 	new_file = ovl_path_open(path, O_LARGEFILE | O_RDONLY);
252 	if (IS_ERR(new_file))
253 		return PTR_ERR(new_file);
254 
255 	err = vfs_fsync(new_file, 0);
256 	fput(new_file);
257 
258 	return err;
259 }
260 
261 static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
262 			    struct file *new_file, loff_t len,
263 			    bool datasync)
264 {
265 	struct path datapath;
266 	struct file *old_file;
267 	loff_t old_pos = 0;
268 	loff_t new_pos = 0;
269 	loff_t cloned;
270 	loff_t data_pos = -1;
271 	loff_t hole_len;
272 	bool skip_hole = false;
273 	int error = 0;
274 
275 	ovl_path_lowerdata(dentry, &datapath);
276 	if (WARN_ON_ONCE(datapath.dentry == NULL) ||
277 	    WARN_ON_ONCE(len < 0))
278 		return -EIO;
279 
280 	old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY);
281 	if (IS_ERR(old_file))
282 		return PTR_ERR(old_file);
283 
284 	/* Try to use clone_file_range to clone up within the same fs */
285 	cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0);
286 	if (cloned == len)
287 		goto out_fput;
288 
289 	/* Couldn't clone, so now we try to copy the data */
290 	error = rw_verify_area(READ, old_file, &old_pos, len);
291 	if (!error)
292 		error = rw_verify_area(WRITE, new_file, &new_pos, len);
293 	if (error)
294 		goto out_fput;
295 
296 	/* Check if lower fs supports seek operation */
297 	if (old_file->f_mode & FMODE_LSEEK)
298 		skip_hole = true;
299 
300 	while (len) {
301 		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
302 		ssize_t bytes;
303 
304 		if (len < this_len)
305 			this_len = len;
306 
307 		if (signal_pending_state(TASK_KILLABLE, current)) {
308 			error = -EINTR;
309 			break;
310 		}
311 
312 		/*
313 		 * Fill zero for hole will cost unnecessary disk space
314 		 * and meanwhile slow down the copy-up speed, so we do
315 		 * an optimization for hole during copy-up, it relies
316 		 * on SEEK_DATA implementation in lower fs so if lower
317 		 * fs does not support it, copy-up will behave as before.
318 		 *
319 		 * Detail logic of hole detection as below:
320 		 * When we detect next data position is larger than current
321 		 * position we will skip that hole, otherwise we copy
322 		 * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
323 		 * it may not recognize all kind of holes and sometimes
324 		 * only skips partial of hole area. However, it will be
325 		 * enough for most of the use cases.
326 		 *
327 		 * We do not hold upper sb_writers throughout the loop to avert
328 		 * lockdep warning with llseek of lower file in nested overlay:
329 		 * - upper sb_writers
330 		 * -- lower ovl_inode_lock (ovl_llseek)
331 		 */
332 		if (skip_hole && data_pos < old_pos) {
333 			data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
334 			if (data_pos > old_pos) {
335 				hole_len = data_pos - old_pos;
336 				len -= hole_len;
337 				old_pos = new_pos = data_pos;
338 				continue;
339 			} else if (data_pos == -ENXIO) {
340 				break;
341 			} else if (data_pos < 0) {
342 				skip_hole = false;
343 			}
344 		}
345 
346 		error = ovl_verify_area(old_pos, new_pos, this_len, len);
347 		if (error)
348 			break;
349 
350 		bytes = do_splice_direct(old_file, &old_pos,
351 					 new_file, &new_pos,
352 					 this_len, SPLICE_F_MOVE);
353 		if (bytes <= 0) {
354 			error = bytes;
355 			break;
356 		}
357 		WARN_ON(old_pos != new_pos);
358 
359 		len -= bytes;
360 	}
361 	/* call fsync once, either now or later along with metadata */
362 	if (!error && ovl_should_sync(ofs) && datasync)
363 		error = vfs_fsync(new_file, 0);
364 out_fput:
365 	fput(old_file);
366 	return error;
367 }
368 
369 static int ovl_set_size(struct ovl_fs *ofs,
370 			struct dentry *upperdentry, struct kstat *stat)
371 {
372 	struct iattr attr = {
373 		.ia_valid = ATTR_SIZE,
374 		.ia_size = stat->size,
375 	};
376 
377 	return ovl_do_notify_change(ofs, upperdentry, &attr);
378 }
379 
380 static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
381 			      struct kstat *stat)
382 {
383 	struct iattr attr = {
384 		.ia_valid =
385 		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
386 		.ia_atime = stat->atime,
387 		.ia_mtime = stat->mtime,
388 	};
389 
390 	return ovl_do_notify_change(ofs, upperdentry, &attr);
391 }
392 
393 int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
394 		 struct kstat *stat)
395 {
396 	int err = 0;
397 
398 	if (!S_ISLNK(stat->mode)) {
399 		struct iattr attr = {
400 			.ia_valid = ATTR_MODE,
401 			.ia_mode = stat->mode,
402 		};
403 		err = ovl_do_notify_change(ofs, upperdentry, &attr);
404 	}
405 	if (!err) {
406 		struct iattr attr = {
407 			.ia_valid = ATTR_UID | ATTR_GID,
408 			.ia_vfsuid = VFSUIDT_INIT(stat->uid),
409 			.ia_vfsgid = VFSGIDT_INIT(stat->gid),
410 		};
411 		err = ovl_do_notify_change(ofs, upperdentry, &attr);
412 	}
413 	if (!err)
414 		ovl_set_timestamps(ofs, upperdentry, stat);
415 
416 	return err;
417 }
418 
419 struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
420 				  bool is_upper)
421 {
422 	struct ovl_fh *fh;
423 	int fh_type, dwords;
424 	int buflen = MAX_HANDLE_SZ;
425 	uuid_t *uuid = &real->d_sb->s_uuid;
426 	int err;
427 
428 	/* Make sure the real fid stays 32bit aligned */
429 	BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
430 	BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
431 
432 	fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL);
433 	if (!fh)
434 		return ERR_PTR(-ENOMEM);
435 
436 	/*
437 	 * We encode a non-connectable file handle for non-dir, because we
438 	 * only need to find the lower inode number and we don't want to pay
439 	 * the price or reconnecting the dentry.
440 	 */
441 	dwords = buflen >> 2;
442 	fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
443 	buflen = (dwords << 2);
444 
445 	err = -EIO;
446 	if (WARN_ON(fh_type < 0) ||
447 	    WARN_ON(buflen > MAX_HANDLE_SZ) ||
448 	    WARN_ON(fh_type == FILEID_INVALID))
449 		goto out_err;
450 
451 	fh->fb.version = OVL_FH_VERSION;
452 	fh->fb.magic = OVL_FH_MAGIC;
453 	fh->fb.type = fh_type;
454 	fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN;
455 	/*
456 	 * When we will want to decode an overlay dentry from this handle
457 	 * and all layers are on the same fs, if we get a disconncted real
458 	 * dentry when we decode fid, the only way to tell if we should assign
459 	 * it to upperdentry or to lowerstack is by checking this flag.
460 	 */
461 	if (is_upper)
462 		fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
463 	fh->fb.len = sizeof(fh->fb) + buflen;
464 	if (ovl_origin_uuid(ofs))
465 		fh->fb.uuid = *uuid;
466 
467 	return fh;
468 
469 out_err:
470 	kfree(fh);
471 	return ERR_PTR(err);
472 }
473 
474 struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
475 {
476 	/*
477 	 * When lower layer doesn't support export operations store a 'null' fh,
478 	 * so we can use the overlay.origin xattr to distignuish between a copy
479 	 * up and a pure upper inode.
480 	 */
481 	if (!ovl_can_decode_fh(origin->d_sb))
482 		return NULL;
483 
484 	return ovl_encode_real_fh(ofs, origin, false);
485 }
486 
487 int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
488 		      struct dentry *upper)
489 {
490 	int err;
491 
492 	/*
493 	 * Do not fail when upper doesn't support xattrs.
494 	 */
495 	err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf,
496 				 fh ? fh->fb.len : 0, 0);
497 
498 	/* Ignore -EPERM from setting "user.*" on symlink/special */
499 	return err == -EPERM ? 0 : err;
500 }
501 
502 /* Store file handle of @upper dir in @index dir entry */
503 static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
504 			    struct dentry *index)
505 {
506 	const struct ovl_fh *fh;
507 	int err;
508 
509 	fh = ovl_encode_real_fh(ofs, upper, true);
510 	if (IS_ERR(fh))
511 		return PTR_ERR(fh);
512 
513 	err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
514 
515 	kfree(fh);
516 	return err;
517 }
518 
519 /*
520  * Create and install index entry.
521  *
522  * Caller must hold i_mutex on indexdir.
523  */
524 static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh,
525 			    struct dentry *upper)
526 {
527 	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
528 	struct dentry *indexdir = ovl_indexdir(dentry->d_sb);
529 	struct inode *dir = d_inode(indexdir);
530 	struct dentry *index = NULL;
531 	struct dentry *temp = NULL;
532 	struct qstr name = { };
533 	int err;
534 
535 	/*
536 	 * For now this is only used for creating index entry for directories,
537 	 * because non-dir are copied up directly to index and then hardlinked
538 	 * to upper dir.
539 	 *
540 	 * TODO: implement create index for non-dir, so we can call it when
541 	 * encoding file handle for non-dir in case index does not exist.
542 	 */
543 	if (WARN_ON(!d_is_dir(dentry)))
544 		return -EIO;
545 
546 	/* Directory not expected to be indexed before copy up */
547 	if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry))))
548 		return -EIO;
549 
550 	err = ovl_get_index_name_fh(fh, &name);
551 	if (err)
552 		return err;
553 
554 	temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0));
555 	err = PTR_ERR(temp);
556 	if (IS_ERR(temp))
557 		goto free_name;
558 
559 	err = ovl_set_upper_fh(ofs, upper, temp);
560 	if (err)
561 		goto out;
562 
563 	index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
564 	if (IS_ERR(index)) {
565 		err = PTR_ERR(index);
566 	} else {
567 		err = ovl_do_rename(ofs, dir, temp, dir, index, 0);
568 		dput(index);
569 	}
570 out:
571 	if (err)
572 		ovl_cleanup(ofs, dir, temp);
573 	dput(temp);
574 free_name:
575 	kfree(name.name);
576 	return err;
577 }
578 
579 struct ovl_copy_up_ctx {
580 	struct dentry *parent;
581 	struct dentry *dentry;
582 	struct path lowerpath;
583 	struct kstat stat;
584 	struct kstat pstat;
585 	const char *link;
586 	struct dentry *destdir;
587 	struct qstr destname;
588 	struct dentry *workdir;
589 	const struct ovl_fh *origin_fh;
590 	bool origin;
591 	bool indexed;
592 	bool metacopy;
593 	bool metacopy_digest;
594 	bool metadata_fsync;
595 };
596 
597 static int ovl_link_up(struct ovl_copy_up_ctx *c)
598 {
599 	int err;
600 	struct dentry *upper;
601 	struct dentry *upperdir = ovl_dentry_upper(c->parent);
602 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
603 	struct inode *udir = d_inode(upperdir);
604 
605 	ovl_start_write(c->dentry);
606 
607 	/* Mark parent "impure" because it may now contain non-pure upper */
608 	err = ovl_set_impure(c->parent, upperdir);
609 	if (err)
610 		goto out;
611 
612 	err = ovl_set_nlink_lower(c->dentry);
613 	if (err)
614 		goto out;
615 
616 	inode_lock_nested(udir, I_MUTEX_PARENT);
617 	upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
618 				 c->dentry->d_name.len);
619 	err = PTR_ERR(upper);
620 	if (!IS_ERR(upper)) {
621 		err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
622 		dput(upper);
623 
624 		if (!err) {
625 			/* Restore timestamps on parent (best effort) */
626 			ovl_set_timestamps(ofs, upperdir, &c->pstat);
627 			ovl_dentry_set_upper_alias(c->dentry);
628 			ovl_dentry_update_reval(c->dentry, upper);
629 		}
630 	}
631 	inode_unlock(udir);
632 	if (err)
633 		goto out;
634 
635 	err = ovl_set_nlink_upper(c->dentry);
636 
637 out:
638 	ovl_end_write(c->dentry);
639 	return err;
640 }
641 
642 static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp)
643 {
644 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
645 	struct file *new_file;
646 	int err;
647 
648 	if (!S_ISREG(c->stat.mode) || c->metacopy || !c->stat.size)
649 		return 0;
650 
651 	new_file = ovl_path_open(temp, O_LARGEFILE | O_WRONLY);
652 	if (IS_ERR(new_file))
653 		return PTR_ERR(new_file);
654 
655 	err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size,
656 			       !c->metadata_fsync);
657 	fput(new_file);
658 
659 	return err;
660 }
661 
662 static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
663 {
664 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
665 	struct inode *inode = d_inode(c->dentry);
666 	struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = temp };
667 	int err;
668 
669 	err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp);
670 	if (err)
671 		return err;
672 
673 	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
674 	    (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
675 		/*
676 		 * Copy the fileattr inode flags that are the source of already
677 		 * copied i_flags
678 		 */
679 		err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath);
680 		if (err)
681 			return err;
682 	}
683 
684 	/*
685 	 * Store identifier of lower inode in upper inode xattr to
686 	 * allow lookup of the copy up origin inode.
687 	 *
688 	 * Don't set origin when we are breaking the association with a lower
689 	 * hard link.
690 	 */
691 	if (c->origin) {
692 		err = ovl_set_origin_fh(ofs, c->origin_fh, temp);
693 		if (err)
694 			return err;
695 	}
696 
697 	if (c->metacopy) {
698 		struct path lowerdatapath;
699 		struct ovl_metacopy metacopy_data = OVL_METACOPY_INIT;
700 
701 		ovl_path_lowerdata(c->dentry, &lowerdatapath);
702 		if (WARN_ON_ONCE(lowerdatapath.dentry == NULL))
703 			return -EIO;
704 		err = ovl_get_verity_digest(ofs, &lowerdatapath, &metacopy_data);
705 		if (err)
706 			return err;
707 
708 		if (metacopy_data.digest_algo)
709 			c->metacopy_digest = true;
710 
711 		err = ovl_set_metacopy_xattr(ofs, temp, &metacopy_data);
712 		if (err)
713 			return err;
714 	}
715 
716 	inode_lock(temp->d_inode);
717 	if (S_ISREG(c->stat.mode))
718 		err = ovl_set_size(ofs, temp, &c->stat);
719 	if (!err)
720 		err = ovl_set_attr(ofs, temp, &c->stat);
721 	inode_unlock(temp->d_inode);
722 
723 	/* fsync metadata before moving it into upper dir */
724 	if (!err && ovl_should_sync(ofs) && c->metadata_fsync)
725 		err = ovl_sync_file(&upperpath);
726 
727 	return err;
728 }
729 
730 struct ovl_cu_creds {
731 	const struct cred *old;
732 	struct cred *new;
733 };
734 
735 static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
736 {
737 	int err;
738 
739 	cc->old = cc->new = NULL;
740 	err = security_inode_copy_up(dentry, &cc->new);
741 	if (err < 0)
742 		return err;
743 
744 	if (cc->new)
745 		cc->old = override_creds(cc->new);
746 
747 	return 0;
748 }
749 
750 static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
751 {
752 	if (cc->new) {
753 		revert_creds(cc->old);
754 		put_cred(cc->new);
755 	}
756 }
757 
758 /*
759  * Copyup using workdir to prepare temp file.  Used when copying up directories,
760  * special files or when upper fs doesn't support O_TMPFILE.
761  */
762 static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
763 {
764 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
765 	struct inode *inode;
766 	struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
767 	struct path path = { .mnt = ovl_upper_mnt(ofs) };
768 	struct dentry *temp, *upper, *trap;
769 	struct ovl_cu_creds cc;
770 	int err;
771 	struct ovl_cattr cattr = {
772 		/* Can't properly set mode on creation because of the umask */
773 		.mode = c->stat.mode & S_IFMT,
774 		.rdev = c->stat.rdev,
775 		.link = c->link
776 	};
777 
778 	err = ovl_prep_cu_creds(c->dentry, &cc);
779 	if (err)
780 		return err;
781 
782 	ovl_start_write(c->dentry);
783 	inode_lock(wdir);
784 	temp = ovl_create_temp(ofs, c->workdir, &cattr);
785 	inode_unlock(wdir);
786 	ovl_end_write(c->dentry);
787 	ovl_revert_cu_creds(&cc);
788 
789 	if (IS_ERR(temp))
790 		return PTR_ERR(temp);
791 
792 	/*
793 	 * Copy up data first and then xattrs. Writing data after
794 	 * xattrs will remove security.capability xattr automatically.
795 	 */
796 	path.dentry = temp;
797 	err = ovl_copy_up_data(c, &path);
798 	/*
799 	 * We cannot hold lock_rename() throughout this helper, because of
800 	 * lock ordering with sb_writers, which shouldn't be held when calling
801 	 * ovl_copy_up_data(), so lock workdir and destdir and make sure that
802 	 * temp wasn't moved before copy up completion or cleanup.
803 	 */
804 	ovl_start_write(c->dentry);
805 	trap = lock_rename(c->workdir, c->destdir);
806 	if (trap || temp->d_parent != c->workdir) {
807 		/* temp or workdir moved underneath us? abort without cleanup */
808 		dput(temp);
809 		err = -EIO;
810 		if (IS_ERR(trap))
811 			goto out;
812 		goto unlock;
813 	} else if (err) {
814 		goto cleanup;
815 	}
816 
817 	err = ovl_copy_up_metadata(c, temp);
818 	if (err)
819 		goto cleanup;
820 
821 	if (S_ISDIR(c->stat.mode) && c->indexed) {
822 		err = ovl_create_index(c->dentry, c->origin_fh, temp);
823 		if (err)
824 			goto cleanup;
825 	}
826 
827 	upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
828 				 c->destname.len);
829 	err = PTR_ERR(upper);
830 	if (IS_ERR(upper))
831 		goto cleanup;
832 
833 	err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0);
834 	dput(upper);
835 	if (err)
836 		goto cleanup;
837 
838 	inode = d_inode(c->dentry);
839 	if (c->metacopy_digest)
840 		ovl_set_flag(OVL_HAS_DIGEST, inode);
841 	else
842 		ovl_clear_flag(OVL_HAS_DIGEST, inode);
843 	ovl_clear_flag(OVL_VERIFIED_DIGEST, inode);
844 
845 	if (!c->metacopy)
846 		ovl_set_upperdata(inode);
847 	ovl_inode_update(inode, temp);
848 	if (S_ISDIR(inode->i_mode))
849 		ovl_set_flag(OVL_WHITEOUTS, inode);
850 unlock:
851 	unlock_rename(c->workdir, c->destdir);
852 out:
853 	ovl_end_write(c->dentry);
854 
855 	return err;
856 
857 cleanup:
858 	ovl_cleanup(ofs, wdir, temp);
859 	dput(temp);
860 	goto unlock;
861 }
862 
863 /* Copyup using O_TMPFILE which does not require cross dir locking */
864 static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
865 {
866 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
867 	struct inode *udir = d_inode(c->destdir);
868 	struct dentry *temp, *upper;
869 	struct file *tmpfile;
870 	struct ovl_cu_creds cc;
871 	int err;
872 
873 	err = ovl_prep_cu_creds(c->dentry, &cc);
874 	if (err)
875 		return err;
876 
877 	ovl_start_write(c->dentry);
878 	tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
879 	ovl_end_write(c->dentry);
880 	ovl_revert_cu_creds(&cc);
881 	if (IS_ERR(tmpfile))
882 		return PTR_ERR(tmpfile);
883 
884 	temp = tmpfile->f_path.dentry;
885 	if (!c->metacopy && c->stat.size) {
886 		err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size,
887 				       !c->metadata_fsync);
888 		if (err)
889 			goto out_fput;
890 	}
891 
892 	ovl_start_write(c->dentry);
893 
894 	err = ovl_copy_up_metadata(c, temp);
895 	if (err)
896 		goto out;
897 
898 	inode_lock_nested(udir, I_MUTEX_PARENT);
899 
900 	upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
901 				 c->destname.len);
902 	err = PTR_ERR(upper);
903 	if (!IS_ERR(upper)) {
904 		err = ovl_do_link(ofs, temp, udir, upper);
905 		dput(upper);
906 	}
907 	inode_unlock(udir);
908 
909 	if (err)
910 		goto out;
911 
912 	if (c->metacopy_digest)
913 		ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
914 	else
915 		ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
916 	ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry));
917 
918 	if (!c->metacopy)
919 		ovl_set_upperdata(d_inode(c->dentry));
920 	ovl_inode_update(d_inode(c->dentry), dget(temp));
921 
922 out:
923 	ovl_end_write(c->dentry);
924 out_fput:
925 	fput(tmpfile);
926 	return err;
927 }
928 
929 /*
930  * Copy up a single dentry
931  *
932  * All renames start with copy up of source if necessary.  The actual
933  * rename will only proceed once the copy up was successful.  Copy up uses
934  * upper parent i_mutex for exclusion.  Since rename can change d_parent it
935  * is possible that the copy up will lock the old parent.  At that point
936  * the file will have already been copied up anyway.
937  */
938 static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
939 {
940 	int err;
941 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
942 	struct dentry *origin = c->lowerpath.dentry;
943 	struct ovl_fh *fh = NULL;
944 	bool to_index = false;
945 
946 	/*
947 	 * Indexed non-dir is copied up directly to the index entry and then
948 	 * hardlinked to upper dir. Indexed dir is copied up to indexdir,
949 	 * then index entry is created and then copied up dir installed.
950 	 * Copying dir up to indexdir instead of workdir simplifies locking.
951 	 */
952 	if (ovl_need_index(c->dentry)) {
953 		c->indexed = true;
954 		if (S_ISDIR(c->stat.mode))
955 			c->workdir = ovl_indexdir(c->dentry->d_sb);
956 		else
957 			to_index = true;
958 	}
959 
960 	if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) {
961 		fh = ovl_get_origin_fh(ofs, origin);
962 		if (IS_ERR(fh))
963 			return PTR_ERR(fh);
964 
965 		/* origin_fh may be NULL */
966 		c->origin_fh = fh;
967 		c->origin = true;
968 	}
969 
970 	if (to_index) {
971 		c->destdir = ovl_indexdir(c->dentry->d_sb);
972 		err = ovl_get_index_name(ofs, origin, &c->destname);
973 		if (err)
974 			goto out_free_fh;
975 	} else if (WARN_ON(!c->parent)) {
976 		/* Disconnected dentry must be copied up to index dir */
977 		err = -EIO;
978 		goto out_free_fh;
979 	} else {
980 		/*
981 		 * c->dentry->d_name is stabilzed by ovl_copy_up_start(),
982 		 * because if we got here, it means that c->dentry has no upper
983 		 * alias and changing ->d_name means going through ovl_rename()
984 		 * that will call ovl_copy_up() on source and target dentry.
985 		 */
986 		c->destname = c->dentry->d_name;
987 		/*
988 		 * Mark parent "impure" because it may now contain non-pure
989 		 * upper
990 		 */
991 		ovl_start_write(c->dentry);
992 		err = ovl_set_impure(c->parent, c->destdir);
993 		ovl_end_write(c->dentry);
994 		if (err)
995 			goto out_free_fh;
996 	}
997 
998 	/* Should we copyup with O_TMPFILE or with workdir? */
999 	if (S_ISREG(c->stat.mode) && ofs->tmpfile)
1000 		err = ovl_copy_up_tmpfile(c);
1001 	else
1002 		err = ovl_copy_up_workdir(c);
1003 	if (err)
1004 		goto out;
1005 
1006 	if (c->indexed)
1007 		ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
1008 
1009 	ovl_start_write(c->dentry);
1010 	if (to_index) {
1011 		/* Initialize nlink for copy up of disconnected dentry */
1012 		err = ovl_set_nlink_upper(c->dentry);
1013 	} else {
1014 		struct inode *udir = d_inode(c->destdir);
1015 
1016 		/* Restore timestamps on parent (best effort) */
1017 		inode_lock(udir);
1018 		ovl_set_timestamps(ofs, c->destdir, &c->pstat);
1019 		inode_unlock(udir);
1020 
1021 		ovl_dentry_set_upper_alias(c->dentry);
1022 		ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
1023 	}
1024 	ovl_end_write(c->dentry);
1025 
1026 out:
1027 	if (to_index)
1028 		kfree(c->destname.name);
1029 out_free_fh:
1030 	kfree(fh);
1031 	return err;
1032 }
1033 
1034 static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
1035 				  int flags)
1036 {
1037 	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
1038 
1039 	if (!ofs->config.metacopy)
1040 		return false;
1041 
1042 	if (!S_ISREG(mode))
1043 		return false;
1044 
1045 	if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)))
1046 		return false;
1047 
1048 	/* Fall back to full copy if no fsverity on source data and we require verity */
1049 	if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) {
1050 		struct path lowerdata;
1051 
1052 		ovl_path_lowerdata(dentry, &lowerdata);
1053 
1054 		if (WARN_ON_ONCE(lowerdata.dentry == NULL) ||
1055 		    ovl_ensure_verity_loaded(&lowerdata) ||
1056 		    !fsverity_active(d_inode(lowerdata.dentry))) {
1057 			return false;
1058 		}
1059 	}
1060 
1061 	return true;
1062 }
1063 
1064 static ssize_t ovl_getxattr_value(const struct path *path, char *name, char **value)
1065 {
1066 	ssize_t res;
1067 	char *buf;
1068 
1069 	res = ovl_do_getxattr(path, name, NULL, 0);
1070 	if (res == -ENODATA || res == -EOPNOTSUPP)
1071 		res = 0;
1072 
1073 	if (res > 0) {
1074 		buf = kzalloc(res, GFP_KERNEL);
1075 		if (!buf)
1076 			return -ENOMEM;
1077 
1078 		res = ovl_do_getxattr(path, name, buf, res);
1079 		if (res < 0)
1080 			kfree(buf);
1081 		else
1082 			*value = buf;
1083 	}
1084 	return res;
1085 }
1086 
1087 /* Copy up data of an inode which was copied up metadata only in the past. */
1088 static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
1089 {
1090 	struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
1091 	struct path upperpath;
1092 	int err;
1093 	char *capability = NULL;
1094 	ssize_t cap_size;
1095 
1096 	ovl_path_upper(c->dentry, &upperpath);
1097 	if (WARN_ON(upperpath.dentry == NULL))
1098 		return -EIO;
1099 
1100 	if (c->stat.size) {
1101 		err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS,
1102 						    &capability);
1103 		if (cap_size < 0)
1104 			goto out;
1105 	}
1106 
1107 	err = ovl_copy_up_data(c, &upperpath);
1108 	if (err)
1109 		goto out_free;
1110 
1111 	/*
1112 	 * Writing to upper file will clear security.capability xattr. We
1113 	 * don't want that to happen for normal copy-up operation.
1114 	 */
1115 	ovl_start_write(c->dentry);
1116 	if (capability) {
1117 		err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
1118 				      capability, cap_size, 0);
1119 	}
1120 	if (!err) {
1121 		err = ovl_removexattr(ofs, upperpath.dentry,
1122 				      OVL_XATTR_METACOPY);
1123 	}
1124 	ovl_end_write(c->dentry);
1125 	if (err)
1126 		goto out_free;
1127 
1128 	ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
1129 	ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry));
1130 	ovl_set_upperdata(d_inode(c->dentry));
1131 out_free:
1132 	kfree(capability);
1133 out:
1134 	return err;
1135 }
1136 
1137 static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
1138 			   int flags)
1139 {
1140 	int err;
1141 	DEFINE_DELAYED_CALL(done);
1142 	struct path parentpath;
1143 	struct ovl_copy_up_ctx ctx = {
1144 		.parent = parent,
1145 		.dentry = dentry,
1146 		.workdir = ovl_workdir(dentry),
1147 	};
1148 
1149 	if (WARN_ON(!ctx.workdir))
1150 		return -EROFS;
1151 
1152 	ovl_path_lower(dentry, &ctx.lowerpath);
1153 	err = vfs_getattr(&ctx.lowerpath, &ctx.stat,
1154 			  STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
1155 	if (err)
1156 		return err;
1157 
1158 	if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
1159 	    !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
1160 		return -EOVERFLOW;
1161 
1162 	/*
1163 	 * With metacopy disabled, we fsync after final metadata copyup, for
1164 	 * both regular files and directories to get atomic copyup semantics
1165 	 * on filesystems that do not use strict metadata ordering (e.g. ubifs).
1166 	 *
1167 	 * With metacopy enabled we want to avoid fsync on all meta copyup
1168 	 * that will hurt performance of workloads such as chown -R, so we
1169 	 * only fsync on data copyup as legacy behavior.
1170 	 */
1171 	ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy &&
1172 			     (S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode));
1173 	ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
1174 
1175 	if (parent) {
1176 		ovl_path_upper(parent, &parentpath);
1177 		ctx.destdir = parentpath.dentry;
1178 
1179 		err = vfs_getattr(&parentpath, &ctx.pstat,
1180 				  STATX_ATIME | STATX_MTIME,
1181 				  AT_STATX_SYNC_AS_STAT);
1182 		if (err)
1183 			return err;
1184 	}
1185 
1186 	/* maybe truncate regular file. this has no effect on dirs */
1187 	if (flags & O_TRUNC)
1188 		ctx.stat.size = 0;
1189 
1190 	if (S_ISLNK(ctx.stat.mode)) {
1191 		ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done);
1192 		if (IS_ERR(ctx.link))
1193 			return PTR_ERR(ctx.link);
1194 	}
1195 
1196 	err = ovl_copy_up_start(dentry, flags);
1197 	/* err < 0: interrupted, err > 0: raced with another copy-up */
1198 	if (unlikely(err)) {
1199 		if (err > 0)
1200 			err = 0;
1201 	} else {
1202 		if (!ovl_dentry_upper(dentry))
1203 			err = ovl_do_copy_up(&ctx);
1204 		if (!err && parent && !ovl_dentry_has_upper_alias(dentry))
1205 			err = ovl_link_up(&ctx);
1206 		if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags))
1207 			err = ovl_copy_up_meta_inode_data(&ctx);
1208 		ovl_copy_up_end(dentry);
1209 	}
1210 	do_delayed_call(&done);
1211 
1212 	return err;
1213 }
1214 
1215 static int ovl_copy_up_flags(struct dentry *dentry, int flags)
1216 {
1217 	int err = 0;
1218 	const struct cred *old_cred;
1219 	bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
1220 
1221 	/*
1222 	 * With NFS export, copy up can get called for a disconnected non-dir.
1223 	 * In this case, we will copy up lower inode to index dir without
1224 	 * linking it to upper dir.
1225 	 */
1226 	if (WARN_ON(disconnected && d_is_dir(dentry)))
1227 		return -EIO;
1228 
1229 	/*
1230 	 * We may not need lowerdata if we are only doing metacopy up, but it is
1231 	 * not very important to optimize this case, so do lazy lowerdata lookup
1232 	 * before any copy up, so we can do it before taking ovl_inode_lock().
1233 	 */
1234 	err = ovl_verify_lowerdata(dentry);
1235 	if (err)
1236 		return err;
1237 
1238 	old_cred = ovl_override_creds(dentry->d_sb);
1239 	while (!err) {
1240 		struct dentry *next;
1241 		struct dentry *parent = NULL;
1242 
1243 		if (ovl_already_copied_up(dentry, flags))
1244 			break;
1245 
1246 		next = dget(dentry);
1247 		/* find the topmost dentry not yet copied up */
1248 		for (; !disconnected;) {
1249 			parent = dget_parent(next);
1250 
1251 			if (ovl_dentry_upper(parent))
1252 				break;
1253 
1254 			dput(next);
1255 			next = parent;
1256 		}
1257 
1258 		err = ovl_copy_up_one(parent, next, flags);
1259 
1260 		dput(parent);
1261 		dput(next);
1262 	}
1263 	revert_creds(old_cred);
1264 
1265 	return err;
1266 }
1267 
1268 static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
1269 {
1270 	/* Copy up of disconnected dentry does not set upper alias */
1271 	if (ovl_already_copied_up(dentry, flags))
1272 		return false;
1273 
1274 	if (special_file(d_inode(dentry)->i_mode))
1275 		return false;
1276 
1277 	if (!ovl_open_flags_need_copy_up(flags))
1278 		return false;
1279 
1280 	return true;
1281 }
1282 
1283 int ovl_maybe_copy_up(struct dentry *dentry, int flags)
1284 {
1285 	if (!ovl_open_need_copy_up(dentry, flags))
1286 		return 0;
1287 
1288 	return ovl_copy_up_flags(dentry, flags);
1289 }
1290 
1291 int ovl_copy_up_with_data(struct dentry *dentry)
1292 {
1293 	return ovl_copy_up_flags(dentry, O_WRONLY);
1294 }
1295 
1296 int ovl_copy_up(struct dentry *dentry)
1297 {
1298 	return ovl_copy_up_flags(dentry, 0);
1299 }
1300