xref: /linux/fs/stat.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/stat.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20 
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23 
24 #include "internal.h"
25 #include "mount.h"
26 
27 /**
28  * generic_fillattr - Fill in the basic attributes from the inode struct
29  * @mnt_userns:	user namespace of the mount the inode was found from
30  * @inode:	Inode to use as the source
31  * @stat:	Where to fill in the attributes
32  *
33  * Fill in the basic attributes in the kstat structure from data that's to be
34  * found on the VFS inode structure.  This is the default if no getattr inode
35  * operation is supplied.
36  *
37  * If the inode has been found through an idmapped mount the user namespace of
38  * the vfsmount must be passed through @mnt_userns. This function will then
39  * take care to map the inode according to @mnt_userns before filling in the
40  * uid and gid filds. On non-idmapped mounts or if permission checking is to be
41  * performed on the raw inode simply passs init_user_ns.
42  */
43 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
44 		      struct kstat *stat)
45 {
46 	stat->dev = inode->i_sb->s_dev;
47 	stat->ino = inode->i_ino;
48 	stat->mode = inode->i_mode;
49 	stat->nlink = inode->i_nlink;
50 	stat->uid = i_uid_into_mnt(mnt_userns, inode);
51 	stat->gid = i_gid_into_mnt(mnt_userns, inode);
52 	stat->rdev = inode->i_rdev;
53 	stat->size = i_size_read(inode);
54 	stat->atime = inode->i_atime;
55 	stat->mtime = inode->i_mtime;
56 	stat->ctime = inode->i_ctime;
57 	stat->blksize = i_blocksize(inode);
58 	stat->blocks = inode->i_blocks;
59 }
60 EXPORT_SYMBOL(generic_fillattr);
61 
62 /**
63  * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
64  * @inode:	Inode to use as the source
65  * @stat:	Where to fill in the attribute flags
66  *
67  * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
68  * inode that are published on i_flags and enforced by the VFS.
69  */
70 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
71 {
72 	if (inode->i_flags & S_IMMUTABLE)
73 		stat->attributes |= STATX_ATTR_IMMUTABLE;
74 	if (inode->i_flags & S_APPEND)
75 		stat->attributes |= STATX_ATTR_APPEND;
76 	stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
77 }
78 EXPORT_SYMBOL(generic_fill_statx_attr);
79 
80 /**
81  * vfs_getattr_nosec - getattr without security checks
82  * @path: file to get attributes from
83  * @stat: structure to return attributes in
84  * @request_mask: STATX_xxx flags indicating what the caller wants
85  * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
86  *
87  * Get attributes without calling security_inode_getattr.
88  *
89  * Currently the only caller other than vfs_getattr is internal to the
90  * filehandle lookup code, which uses only the inode number and returns no
91  * attributes to any user.  Any other code probably wants vfs_getattr.
92  */
93 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
94 		      u32 request_mask, unsigned int query_flags)
95 {
96 	struct user_namespace *mnt_userns;
97 	struct inode *inode = d_backing_inode(path->dentry);
98 
99 	memset(stat, 0, sizeof(*stat));
100 	stat->result_mask |= STATX_BASIC_STATS;
101 	query_flags &= AT_STATX_SYNC_TYPE;
102 
103 	/* allow the fs to override these if it really wants to */
104 	/* SB_NOATIME means filesystem supplies dummy atime value */
105 	if (inode->i_sb->s_flags & SB_NOATIME)
106 		stat->result_mask &= ~STATX_ATIME;
107 
108 	/*
109 	 * Note: If you add another clause to set an attribute flag, please
110 	 * update attributes_mask below.
111 	 */
112 	if (IS_AUTOMOUNT(inode))
113 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
114 
115 	if (IS_DAX(inode))
116 		stat->attributes |= STATX_ATTR_DAX;
117 
118 	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
119 				  STATX_ATTR_DAX);
120 
121 	mnt_userns = mnt_user_ns(path->mnt);
122 	if (inode->i_op->getattr)
123 		return inode->i_op->getattr(mnt_userns, path, stat,
124 					    request_mask, query_flags);
125 
126 	generic_fillattr(mnt_userns, inode, stat);
127 	return 0;
128 }
129 EXPORT_SYMBOL(vfs_getattr_nosec);
130 
131 /*
132  * vfs_getattr - Get the enhanced basic attributes of a file
133  * @path: The file of interest
134  * @stat: Where to return the statistics
135  * @request_mask: STATX_xxx flags indicating what the caller wants
136  * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
137  *
138  * Ask the filesystem for a file's attributes.  The caller must indicate in
139  * request_mask and query_flags to indicate what they want.
140  *
141  * If the file is remote, the filesystem can be forced to update the attributes
142  * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
143  * suppress the update by passing AT_STATX_DONT_SYNC.
144  *
145  * Bits must have been set in request_mask to indicate which attributes the
146  * caller wants retrieving.  Any such attribute not requested may be returned
147  * anyway, but the value may be approximate, and, if remote, may not have been
148  * synchronised with the server.
149  *
150  * 0 will be returned on success, and a -ve error code if unsuccessful.
151  */
152 int vfs_getattr(const struct path *path, struct kstat *stat,
153 		u32 request_mask, unsigned int query_flags)
154 {
155 	int retval;
156 
157 	retval = security_inode_getattr(path);
158 	if (retval)
159 		return retval;
160 	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
161 }
162 EXPORT_SYMBOL(vfs_getattr);
163 
164 /**
165  * vfs_fstat - Get the basic attributes by file descriptor
166  * @fd: The file descriptor referring to the file of interest
167  * @stat: The result structure to fill in.
168  *
169  * This function is a wrapper around vfs_getattr().  The main difference is
170  * that it uses a file descriptor to determine the file location.
171  *
172  * 0 will be returned on success, and a -ve error code if unsuccessful.
173  */
174 int vfs_fstat(int fd, struct kstat *stat)
175 {
176 	struct fd f;
177 	int error;
178 
179 	f = fdget_raw(fd);
180 	if (!f.file)
181 		return -EBADF;
182 	error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
183 	fdput(f);
184 	return error;
185 }
186 
187 /**
188  * vfs_statx - Get basic and extra attributes by filename
189  * @dfd: A file descriptor representing the base dir for a relative filename
190  * @filename: The name of the file of interest
191  * @flags: Flags to control the query
192  * @stat: The result structure to fill in.
193  * @request_mask: STATX_xxx flags indicating what the caller wants
194  *
195  * This function is a wrapper around vfs_getattr().  The main difference is
196  * that it uses a filename and base directory to determine the file location.
197  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
198  * at the given name from being referenced.
199  *
200  * 0 will be returned on success, and a -ve error code if unsuccessful.
201  */
202 static int vfs_statx(int dfd, const char __user *filename, int flags,
203 	      struct kstat *stat, u32 request_mask)
204 {
205 	struct path path;
206 	unsigned lookup_flags = 0;
207 	int error;
208 
209 	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
210 		      AT_STATX_SYNC_TYPE))
211 		return -EINVAL;
212 
213 	if (!(flags & AT_SYMLINK_NOFOLLOW))
214 		lookup_flags |= LOOKUP_FOLLOW;
215 	if (!(flags & AT_NO_AUTOMOUNT))
216 		lookup_flags |= LOOKUP_AUTOMOUNT;
217 	if (flags & AT_EMPTY_PATH)
218 		lookup_flags |= LOOKUP_EMPTY;
219 
220 retry:
221 	error = user_path_at(dfd, filename, lookup_flags, &path);
222 	if (error)
223 		goto out;
224 
225 	error = vfs_getattr(&path, stat, request_mask, flags);
226 	stat->mnt_id = real_mount(path.mnt)->mnt_id;
227 	stat->result_mask |= STATX_MNT_ID;
228 	if (path.mnt->mnt_root == path.dentry)
229 		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
230 	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
231 	path_put(&path);
232 	if (retry_estale(error, lookup_flags)) {
233 		lookup_flags |= LOOKUP_REVAL;
234 		goto retry;
235 	}
236 out:
237 	return error;
238 }
239 
240 int vfs_fstatat(int dfd, const char __user *filename,
241 			      struct kstat *stat, int flags)
242 {
243 	return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
244 			 stat, STATX_BASIC_STATS);
245 }
246 
247 #ifdef __ARCH_WANT_OLD_STAT
248 
249 /*
250  * For backward compatibility?  Maybe this should be moved
251  * into arch/i386 instead?
252  */
253 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
254 {
255 	static int warncount = 5;
256 	struct __old_kernel_stat tmp;
257 
258 	if (warncount > 0) {
259 		warncount--;
260 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
261 			current->comm);
262 	} else if (warncount < 0) {
263 		/* it's laughable, but... */
264 		warncount = 0;
265 	}
266 
267 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
268 	tmp.st_dev = old_encode_dev(stat->dev);
269 	tmp.st_ino = stat->ino;
270 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
271 		return -EOVERFLOW;
272 	tmp.st_mode = stat->mode;
273 	tmp.st_nlink = stat->nlink;
274 	if (tmp.st_nlink != stat->nlink)
275 		return -EOVERFLOW;
276 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
277 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
278 	tmp.st_rdev = old_encode_dev(stat->rdev);
279 #if BITS_PER_LONG == 32
280 	if (stat->size > MAX_NON_LFS)
281 		return -EOVERFLOW;
282 #endif
283 	tmp.st_size = stat->size;
284 	tmp.st_atime = stat->atime.tv_sec;
285 	tmp.st_mtime = stat->mtime.tv_sec;
286 	tmp.st_ctime = stat->ctime.tv_sec;
287 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
288 }
289 
290 SYSCALL_DEFINE2(stat, const char __user *, filename,
291 		struct __old_kernel_stat __user *, statbuf)
292 {
293 	struct kstat stat;
294 	int error;
295 
296 	error = vfs_stat(filename, &stat);
297 	if (error)
298 		return error;
299 
300 	return cp_old_stat(&stat, statbuf);
301 }
302 
303 SYSCALL_DEFINE2(lstat, const char __user *, filename,
304 		struct __old_kernel_stat __user *, statbuf)
305 {
306 	struct kstat stat;
307 	int error;
308 
309 	error = vfs_lstat(filename, &stat);
310 	if (error)
311 		return error;
312 
313 	return cp_old_stat(&stat, statbuf);
314 }
315 
316 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
317 {
318 	struct kstat stat;
319 	int error = vfs_fstat(fd, &stat);
320 
321 	if (!error)
322 		error = cp_old_stat(&stat, statbuf);
323 
324 	return error;
325 }
326 
327 #endif /* __ARCH_WANT_OLD_STAT */
328 
329 #ifdef __ARCH_WANT_NEW_STAT
330 
331 #if BITS_PER_LONG == 32
332 #  define choose_32_64(a,b) a
333 #else
334 #  define choose_32_64(a,b) b
335 #endif
336 
337 #define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
338 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
339 
340 #ifndef INIT_STRUCT_STAT_PADDING
341 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
342 #endif
343 
344 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
345 {
346 	struct stat tmp;
347 
348 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
349 		return -EOVERFLOW;
350 #if BITS_PER_LONG == 32
351 	if (stat->size > MAX_NON_LFS)
352 		return -EOVERFLOW;
353 #endif
354 
355 	INIT_STRUCT_STAT_PADDING(tmp);
356 	tmp.st_dev = encode_dev(stat->dev);
357 	tmp.st_ino = stat->ino;
358 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
359 		return -EOVERFLOW;
360 	tmp.st_mode = stat->mode;
361 	tmp.st_nlink = stat->nlink;
362 	if (tmp.st_nlink != stat->nlink)
363 		return -EOVERFLOW;
364 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
365 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
366 	tmp.st_rdev = encode_dev(stat->rdev);
367 	tmp.st_size = stat->size;
368 	tmp.st_atime = stat->atime.tv_sec;
369 	tmp.st_mtime = stat->mtime.tv_sec;
370 	tmp.st_ctime = stat->ctime.tv_sec;
371 #ifdef STAT_HAVE_NSEC
372 	tmp.st_atime_nsec = stat->atime.tv_nsec;
373 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
374 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
375 #endif
376 	tmp.st_blocks = stat->blocks;
377 	tmp.st_blksize = stat->blksize;
378 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
379 }
380 
381 SYSCALL_DEFINE2(newstat, const char __user *, filename,
382 		struct stat __user *, statbuf)
383 {
384 	struct kstat stat;
385 	int error = vfs_stat(filename, &stat);
386 
387 	if (error)
388 		return error;
389 	return cp_new_stat(&stat, statbuf);
390 }
391 
392 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
393 		struct stat __user *, statbuf)
394 {
395 	struct kstat stat;
396 	int error;
397 
398 	error = vfs_lstat(filename, &stat);
399 	if (error)
400 		return error;
401 
402 	return cp_new_stat(&stat, statbuf);
403 }
404 
405 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
406 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
407 		struct stat __user *, statbuf, int, flag)
408 {
409 	struct kstat stat;
410 	int error;
411 
412 	error = vfs_fstatat(dfd, filename, &stat, flag);
413 	if (error)
414 		return error;
415 	return cp_new_stat(&stat, statbuf);
416 }
417 #endif
418 
419 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
420 {
421 	struct kstat stat;
422 	int error = vfs_fstat(fd, &stat);
423 
424 	if (!error)
425 		error = cp_new_stat(&stat, statbuf);
426 
427 	return error;
428 }
429 #endif
430 
431 static int do_readlinkat(int dfd, const char __user *pathname,
432 			 char __user *buf, int bufsiz)
433 {
434 	struct path path;
435 	int error;
436 	int empty = 0;
437 	unsigned int lookup_flags = LOOKUP_EMPTY;
438 
439 	if (bufsiz <= 0)
440 		return -EINVAL;
441 
442 retry:
443 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
444 	if (!error) {
445 		struct inode *inode = d_backing_inode(path.dentry);
446 
447 		error = empty ? -ENOENT : -EINVAL;
448 		/*
449 		 * AFS mountpoints allow readlink(2) but are not symlinks
450 		 */
451 		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
452 			error = security_inode_readlink(path.dentry);
453 			if (!error) {
454 				touch_atime(&path);
455 				error = vfs_readlink(path.dentry, buf, bufsiz);
456 			}
457 		}
458 		path_put(&path);
459 		if (retry_estale(error, lookup_flags)) {
460 			lookup_flags |= LOOKUP_REVAL;
461 			goto retry;
462 		}
463 	}
464 	return error;
465 }
466 
467 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
468 		char __user *, buf, int, bufsiz)
469 {
470 	return do_readlinkat(dfd, pathname, buf, bufsiz);
471 }
472 
473 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
474 		int, bufsiz)
475 {
476 	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
477 }
478 
479 
480 /* ---------- LFS-64 ----------- */
481 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
482 
483 #ifndef INIT_STRUCT_STAT64_PADDING
484 #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
485 #endif
486 
487 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
488 {
489 	struct stat64 tmp;
490 
491 	INIT_STRUCT_STAT64_PADDING(tmp);
492 #ifdef CONFIG_MIPS
493 	/* mips has weird padding, so we don't get 64 bits there */
494 	tmp.st_dev = new_encode_dev(stat->dev);
495 	tmp.st_rdev = new_encode_dev(stat->rdev);
496 #else
497 	tmp.st_dev = huge_encode_dev(stat->dev);
498 	tmp.st_rdev = huge_encode_dev(stat->rdev);
499 #endif
500 	tmp.st_ino = stat->ino;
501 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
502 		return -EOVERFLOW;
503 #ifdef STAT64_HAS_BROKEN_ST_INO
504 	tmp.__st_ino = stat->ino;
505 #endif
506 	tmp.st_mode = stat->mode;
507 	tmp.st_nlink = stat->nlink;
508 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
509 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
510 	tmp.st_atime = stat->atime.tv_sec;
511 	tmp.st_atime_nsec = stat->atime.tv_nsec;
512 	tmp.st_mtime = stat->mtime.tv_sec;
513 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
514 	tmp.st_ctime = stat->ctime.tv_sec;
515 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
516 	tmp.st_size = stat->size;
517 	tmp.st_blocks = stat->blocks;
518 	tmp.st_blksize = stat->blksize;
519 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
520 }
521 
522 SYSCALL_DEFINE2(stat64, const char __user *, filename,
523 		struct stat64 __user *, statbuf)
524 {
525 	struct kstat stat;
526 	int error = vfs_stat(filename, &stat);
527 
528 	if (!error)
529 		error = cp_new_stat64(&stat, statbuf);
530 
531 	return error;
532 }
533 
534 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
535 		struct stat64 __user *, statbuf)
536 {
537 	struct kstat stat;
538 	int error = vfs_lstat(filename, &stat);
539 
540 	if (!error)
541 		error = cp_new_stat64(&stat, statbuf);
542 
543 	return error;
544 }
545 
546 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
547 {
548 	struct kstat stat;
549 	int error = vfs_fstat(fd, &stat);
550 
551 	if (!error)
552 		error = cp_new_stat64(&stat, statbuf);
553 
554 	return error;
555 }
556 
557 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
558 		struct stat64 __user *, statbuf, int, flag)
559 {
560 	struct kstat stat;
561 	int error;
562 
563 	error = vfs_fstatat(dfd, filename, &stat, flag);
564 	if (error)
565 		return error;
566 	return cp_new_stat64(&stat, statbuf);
567 }
568 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
569 
570 static noinline_for_stack int
571 cp_statx(const struct kstat *stat, struct statx __user *buffer)
572 {
573 	struct statx tmp;
574 
575 	memset(&tmp, 0, sizeof(tmp));
576 
577 	tmp.stx_mask = stat->result_mask;
578 	tmp.stx_blksize = stat->blksize;
579 	tmp.stx_attributes = stat->attributes;
580 	tmp.stx_nlink = stat->nlink;
581 	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
582 	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
583 	tmp.stx_mode = stat->mode;
584 	tmp.stx_ino = stat->ino;
585 	tmp.stx_size = stat->size;
586 	tmp.stx_blocks = stat->blocks;
587 	tmp.stx_attributes_mask = stat->attributes_mask;
588 	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
589 	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
590 	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
591 	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
592 	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
593 	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
594 	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
595 	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
596 	tmp.stx_rdev_major = MAJOR(stat->rdev);
597 	tmp.stx_rdev_minor = MINOR(stat->rdev);
598 	tmp.stx_dev_major = MAJOR(stat->dev);
599 	tmp.stx_dev_minor = MINOR(stat->dev);
600 	tmp.stx_mnt_id = stat->mnt_id;
601 
602 	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
603 }
604 
605 int do_statx(int dfd, const char __user *filename, unsigned flags,
606 	     unsigned int mask, struct statx __user *buffer)
607 {
608 	struct kstat stat;
609 	int error;
610 
611 	if (mask & STATX__RESERVED)
612 		return -EINVAL;
613 	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
614 		return -EINVAL;
615 
616 	error = vfs_statx(dfd, filename, flags, &stat, mask);
617 	if (error)
618 		return error;
619 
620 	return cp_statx(&stat, buffer);
621 }
622 
623 /**
624  * sys_statx - System call to get enhanced stats
625  * @dfd: Base directory to pathwalk from *or* fd to stat.
626  * @filename: File to stat or "" with AT_EMPTY_PATH
627  * @flags: AT_* flags to control pathwalk.
628  * @mask: Parts of statx struct actually required.
629  * @buffer: Result buffer.
630  *
631  * Note that fstat() can be emulated by setting dfd to the fd of interest,
632  * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
633  */
634 SYSCALL_DEFINE5(statx,
635 		int, dfd, const char __user *, filename, unsigned, flags,
636 		unsigned int, mask,
637 		struct statx __user *, buffer)
638 {
639 	return do_statx(dfd, filename, flags, mask, buffer);
640 }
641 
642 #ifdef CONFIG_COMPAT
643 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
644 {
645 	struct compat_stat tmp;
646 
647 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
648 		return -EOVERFLOW;
649 
650 	memset(&tmp, 0, sizeof(tmp));
651 	tmp.st_dev = old_encode_dev(stat->dev);
652 	tmp.st_ino = stat->ino;
653 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
654 		return -EOVERFLOW;
655 	tmp.st_mode = stat->mode;
656 	tmp.st_nlink = stat->nlink;
657 	if (tmp.st_nlink != stat->nlink)
658 		return -EOVERFLOW;
659 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
660 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
661 	tmp.st_rdev = old_encode_dev(stat->rdev);
662 	if ((u64) stat->size > MAX_NON_LFS)
663 		return -EOVERFLOW;
664 	tmp.st_size = stat->size;
665 	tmp.st_atime = stat->atime.tv_sec;
666 	tmp.st_atime_nsec = stat->atime.tv_nsec;
667 	tmp.st_mtime = stat->mtime.tv_sec;
668 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
669 	tmp.st_ctime = stat->ctime.tv_sec;
670 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
671 	tmp.st_blocks = stat->blocks;
672 	tmp.st_blksize = stat->blksize;
673 	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
674 }
675 
676 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
677 		       struct compat_stat __user *, statbuf)
678 {
679 	struct kstat stat;
680 	int error;
681 
682 	error = vfs_stat(filename, &stat);
683 	if (error)
684 		return error;
685 	return cp_compat_stat(&stat, statbuf);
686 }
687 
688 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
689 		       struct compat_stat __user *, statbuf)
690 {
691 	struct kstat stat;
692 	int error;
693 
694 	error = vfs_lstat(filename, &stat);
695 	if (error)
696 		return error;
697 	return cp_compat_stat(&stat, statbuf);
698 }
699 
700 #ifndef __ARCH_WANT_STAT64
701 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
702 		       const char __user *, filename,
703 		       struct compat_stat __user *, statbuf, int, flag)
704 {
705 	struct kstat stat;
706 	int error;
707 
708 	error = vfs_fstatat(dfd, filename, &stat, flag);
709 	if (error)
710 		return error;
711 	return cp_compat_stat(&stat, statbuf);
712 }
713 #endif
714 
715 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
716 		       struct compat_stat __user *, statbuf)
717 {
718 	struct kstat stat;
719 	int error = vfs_fstat(fd, &stat);
720 
721 	if (!error)
722 		error = cp_compat_stat(&stat, statbuf);
723 	return error;
724 }
725 #endif
726 
727 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
728 void __inode_add_bytes(struct inode *inode, loff_t bytes)
729 {
730 	inode->i_blocks += bytes >> 9;
731 	bytes &= 511;
732 	inode->i_bytes += bytes;
733 	if (inode->i_bytes >= 512) {
734 		inode->i_blocks++;
735 		inode->i_bytes -= 512;
736 	}
737 }
738 EXPORT_SYMBOL(__inode_add_bytes);
739 
740 void inode_add_bytes(struct inode *inode, loff_t bytes)
741 {
742 	spin_lock(&inode->i_lock);
743 	__inode_add_bytes(inode, bytes);
744 	spin_unlock(&inode->i_lock);
745 }
746 
747 EXPORT_SYMBOL(inode_add_bytes);
748 
749 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
750 {
751 	inode->i_blocks -= bytes >> 9;
752 	bytes &= 511;
753 	if (inode->i_bytes < bytes) {
754 		inode->i_blocks--;
755 		inode->i_bytes += 512;
756 	}
757 	inode->i_bytes -= bytes;
758 }
759 
760 EXPORT_SYMBOL(__inode_sub_bytes);
761 
762 void inode_sub_bytes(struct inode *inode, loff_t bytes)
763 {
764 	spin_lock(&inode->i_lock);
765 	__inode_sub_bytes(inode, bytes);
766 	spin_unlock(&inode->i_lock);
767 }
768 
769 EXPORT_SYMBOL(inode_sub_bytes);
770 
771 loff_t inode_get_bytes(struct inode *inode)
772 {
773 	loff_t ret;
774 
775 	spin_lock(&inode->i_lock);
776 	ret = __inode_get_bytes(inode);
777 	spin_unlock(&inode->i_lock);
778 	return ret;
779 }
780 
781 EXPORT_SYMBOL(inode_get_bytes);
782 
783 void inode_set_bytes(struct inode *inode, loff_t bytes)
784 {
785 	/* Caller is here responsible for sufficient locking
786 	 * (ie. inode->i_lock) */
787 	inode->i_blocks = bytes >> 9;
788 	inode->i_bytes = bytes & 511;
789 }
790 
791 EXPORT_SYMBOL(inode_set_bytes);
792