xref: /linux/fs/stat.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/stat.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20 
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23 
24 #include "internal.h"
25 #include "mount.h"
26 
27 /**
28  * generic_fillattr - Fill in the basic attributes from the inode struct
29  * @mnt_userns:	user namespace of the mount the inode was found from
30  * @inode:	Inode to use as the source
31  * @stat:	Where to fill in the attributes
32  *
33  * Fill in the basic attributes in the kstat structure from data that's to be
34  * found on the VFS inode structure.  This is the default if no getattr inode
35  * operation is supplied.
36  *
37  * If the inode has been found through an idmapped mount the user namespace of
38  * the vfsmount must be passed through @mnt_userns. This function will then
39  * take care to map the inode according to @mnt_userns before filling in the
40  * uid and gid filds. On non-idmapped mounts or if permission checking is to be
41  * performed on the raw inode simply passs init_user_ns.
42  */
43 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
44 		      struct kstat *stat)
45 {
46 	stat->dev = inode->i_sb->s_dev;
47 	stat->ino = inode->i_ino;
48 	stat->mode = inode->i_mode;
49 	stat->nlink = inode->i_nlink;
50 	stat->uid = i_uid_into_mnt(mnt_userns, inode);
51 	stat->gid = i_gid_into_mnt(mnt_userns, inode);
52 	stat->rdev = inode->i_rdev;
53 	stat->size = i_size_read(inode);
54 	stat->atime = inode->i_atime;
55 	stat->mtime = inode->i_mtime;
56 	stat->ctime = inode->i_ctime;
57 	stat->blksize = i_blocksize(inode);
58 	stat->blocks = inode->i_blocks;
59 }
60 EXPORT_SYMBOL(generic_fillattr);
61 
62 /**
63  * vfs_getattr_nosec - getattr without security checks
64  * @path: file to get attributes from
65  * @stat: structure to return attributes in
66  * @request_mask: STATX_xxx flags indicating what the caller wants
67  * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
68  *
69  * Get attributes without calling security_inode_getattr.
70  *
71  * Currently the only caller other than vfs_getattr is internal to the
72  * filehandle lookup code, which uses only the inode number and returns no
73  * attributes to any user.  Any other code probably wants vfs_getattr.
74  */
75 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
76 		      u32 request_mask, unsigned int query_flags)
77 {
78 	struct user_namespace *mnt_userns;
79 	struct inode *inode = d_backing_inode(path->dentry);
80 
81 	memset(stat, 0, sizeof(*stat));
82 	stat->result_mask |= STATX_BASIC_STATS;
83 	query_flags &= AT_STATX_SYNC_TYPE;
84 
85 	/* allow the fs to override these if it really wants to */
86 	/* SB_NOATIME means filesystem supplies dummy atime value */
87 	if (inode->i_sb->s_flags & SB_NOATIME)
88 		stat->result_mask &= ~STATX_ATIME;
89 	if (IS_AUTOMOUNT(inode))
90 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
91 
92 	if (IS_DAX(inode))
93 		stat->attributes |= STATX_ATTR_DAX;
94 
95 	mnt_userns = mnt_user_ns(path->mnt);
96 	if (inode->i_op->getattr)
97 		return inode->i_op->getattr(mnt_userns, path, stat,
98 					    request_mask, query_flags);
99 
100 	generic_fillattr(mnt_userns, inode, stat);
101 	return 0;
102 }
103 EXPORT_SYMBOL(vfs_getattr_nosec);
104 
105 /*
106  * vfs_getattr - Get the enhanced basic attributes of a file
107  * @path: The file of interest
108  * @stat: Where to return the statistics
109  * @request_mask: STATX_xxx flags indicating what the caller wants
110  * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
111  *
112  * Ask the filesystem for a file's attributes.  The caller must indicate in
113  * request_mask and query_flags to indicate what they want.
114  *
115  * If the file is remote, the filesystem can be forced to update the attributes
116  * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
117  * suppress the update by passing AT_STATX_DONT_SYNC.
118  *
119  * Bits must have been set in request_mask to indicate which attributes the
120  * caller wants retrieving.  Any such attribute not requested may be returned
121  * anyway, but the value may be approximate, and, if remote, may not have been
122  * synchronised with the server.
123  *
124  * 0 will be returned on success, and a -ve error code if unsuccessful.
125  */
126 int vfs_getattr(const struct path *path, struct kstat *stat,
127 		u32 request_mask, unsigned int query_flags)
128 {
129 	int retval;
130 
131 	retval = security_inode_getattr(path);
132 	if (retval)
133 		return retval;
134 	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
135 }
136 EXPORT_SYMBOL(vfs_getattr);
137 
138 /**
139  * vfs_fstat - Get the basic attributes by file descriptor
140  * @fd: The file descriptor referring to the file of interest
141  * @stat: The result structure to fill in.
142  *
143  * This function is a wrapper around vfs_getattr().  The main difference is
144  * that it uses a file descriptor to determine the file location.
145  *
146  * 0 will be returned on success, and a -ve error code if unsuccessful.
147  */
148 int vfs_fstat(int fd, struct kstat *stat)
149 {
150 	struct fd f;
151 	int error;
152 
153 	f = fdget_raw(fd);
154 	if (!f.file)
155 		return -EBADF;
156 	error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
157 	fdput(f);
158 	return error;
159 }
160 
161 /**
162  * vfs_statx - Get basic and extra attributes by filename
163  * @dfd: A file descriptor representing the base dir for a relative filename
164  * @filename: The name of the file of interest
165  * @flags: Flags to control the query
166  * @stat: The result structure to fill in.
167  * @request_mask: STATX_xxx flags indicating what the caller wants
168  *
169  * This function is a wrapper around vfs_getattr().  The main difference is
170  * that it uses a filename and base directory to determine the file location.
171  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
172  * at the given name from being referenced.
173  *
174  * 0 will be returned on success, and a -ve error code if unsuccessful.
175  */
176 static int vfs_statx(int dfd, const char __user *filename, int flags,
177 	      struct kstat *stat, u32 request_mask)
178 {
179 	struct path path;
180 	unsigned lookup_flags = 0;
181 	int error;
182 
183 	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
184 		      AT_STATX_SYNC_TYPE))
185 		return -EINVAL;
186 
187 	if (!(flags & AT_SYMLINK_NOFOLLOW))
188 		lookup_flags |= LOOKUP_FOLLOW;
189 	if (!(flags & AT_NO_AUTOMOUNT))
190 		lookup_flags |= LOOKUP_AUTOMOUNT;
191 	if (flags & AT_EMPTY_PATH)
192 		lookup_flags |= LOOKUP_EMPTY;
193 
194 retry:
195 	error = user_path_at(dfd, filename, lookup_flags, &path);
196 	if (error)
197 		goto out;
198 
199 	error = vfs_getattr(&path, stat, request_mask, flags);
200 	stat->mnt_id = real_mount(path.mnt)->mnt_id;
201 	stat->result_mask |= STATX_MNT_ID;
202 	if (path.mnt->mnt_root == path.dentry)
203 		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
204 	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
205 	path_put(&path);
206 	if (retry_estale(error, lookup_flags)) {
207 		lookup_flags |= LOOKUP_REVAL;
208 		goto retry;
209 	}
210 out:
211 	return error;
212 }
213 
214 int vfs_fstatat(int dfd, const char __user *filename,
215 			      struct kstat *stat, int flags)
216 {
217 	return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
218 			 stat, STATX_BASIC_STATS);
219 }
220 
221 #ifdef __ARCH_WANT_OLD_STAT
222 
223 /*
224  * For backward compatibility?  Maybe this should be moved
225  * into arch/i386 instead?
226  */
227 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
228 {
229 	static int warncount = 5;
230 	struct __old_kernel_stat tmp;
231 
232 	if (warncount > 0) {
233 		warncount--;
234 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
235 			current->comm);
236 	} else if (warncount < 0) {
237 		/* it's laughable, but... */
238 		warncount = 0;
239 	}
240 
241 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
242 	tmp.st_dev = old_encode_dev(stat->dev);
243 	tmp.st_ino = stat->ino;
244 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
245 		return -EOVERFLOW;
246 	tmp.st_mode = stat->mode;
247 	tmp.st_nlink = stat->nlink;
248 	if (tmp.st_nlink != stat->nlink)
249 		return -EOVERFLOW;
250 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
251 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
252 	tmp.st_rdev = old_encode_dev(stat->rdev);
253 #if BITS_PER_LONG == 32
254 	if (stat->size > MAX_NON_LFS)
255 		return -EOVERFLOW;
256 #endif
257 	tmp.st_size = stat->size;
258 	tmp.st_atime = stat->atime.tv_sec;
259 	tmp.st_mtime = stat->mtime.tv_sec;
260 	tmp.st_ctime = stat->ctime.tv_sec;
261 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
262 }
263 
264 SYSCALL_DEFINE2(stat, const char __user *, filename,
265 		struct __old_kernel_stat __user *, statbuf)
266 {
267 	struct kstat stat;
268 	int error;
269 
270 	error = vfs_stat(filename, &stat);
271 	if (error)
272 		return error;
273 
274 	return cp_old_stat(&stat, statbuf);
275 }
276 
277 SYSCALL_DEFINE2(lstat, const char __user *, filename,
278 		struct __old_kernel_stat __user *, statbuf)
279 {
280 	struct kstat stat;
281 	int error;
282 
283 	error = vfs_lstat(filename, &stat);
284 	if (error)
285 		return error;
286 
287 	return cp_old_stat(&stat, statbuf);
288 }
289 
290 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
291 {
292 	struct kstat stat;
293 	int error = vfs_fstat(fd, &stat);
294 
295 	if (!error)
296 		error = cp_old_stat(&stat, statbuf);
297 
298 	return error;
299 }
300 
301 #endif /* __ARCH_WANT_OLD_STAT */
302 
303 #ifdef __ARCH_WANT_NEW_STAT
304 
305 #if BITS_PER_LONG == 32
306 #  define choose_32_64(a,b) a
307 #else
308 #  define choose_32_64(a,b) b
309 #endif
310 
311 #define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
312 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
313 
314 #ifndef INIT_STRUCT_STAT_PADDING
315 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
316 #endif
317 
318 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
319 {
320 	struct stat tmp;
321 
322 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
323 		return -EOVERFLOW;
324 #if BITS_PER_LONG == 32
325 	if (stat->size > MAX_NON_LFS)
326 		return -EOVERFLOW;
327 #endif
328 
329 	INIT_STRUCT_STAT_PADDING(tmp);
330 	tmp.st_dev = encode_dev(stat->dev);
331 	tmp.st_ino = stat->ino;
332 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
333 		return -EOVERFLOW;
334 	tmp.st_mode = stat->mode;
335 	tmp.st_nlink = stat->nlink;
336 	if (tmp.st_nlink != stat->nlink)
337 		return -EOVERFLOW;
338 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
339 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
340 	tmp.st_rdev = encode_dev(stat->rdev);
341 	tmp.st_size = stat->size;
342 	tmp.st_atime = stat->atime.tv_sec;
343 	tmp.st_mtime = stat->mtime.tv_sec;
344 	tmp.st_ctime = stat->ctime.tv_sec;
345 #ifdef STAT_HAVE_NSEC
346 	tmp.st_atime_nsec = stat->atime.tv_nsec;
347 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
348 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
349 #endif
350 	tmp.st_blocks = stat->blocks;
351 	tmp.st_blksize = stat->blksize;
352 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
353 }
354 
355 SYSCALL_DEFINE2(newstat, const char __user *, filename,
356 		struct stat __user *, statbuf)
357 {
358 	struct kstat stat;
359 	int error = vfs_stat(filename, &stat);
360 
361 	if (error)
362 		return error;
363 	return cp_new_stat(&stat, statbuf);
364 }
365 
366 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
367 		struct stat __user *, statbuf)
368 {
369 	struct kstat stat;
370 	int error;
371 
372 	error = vfs_lstat(filename, &stat);
373 	if (error)
374 		return error;
375 
376 	return cp_new_stat(&stat, statbuf);
377 }
378 
379 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
380 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
381 		struct stat __user *, statbuf, int, flag)
382 {
383 	struct kstat stat;
384 	int error;
385 
386 	error = vfs_fstatat(dfd, filename, &stat, flag);
387 	if (error)
388 		return error;
389 	return cp_new_stat(&stat, statbuf);
390 }
391 #endif
392 
393 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
394 {
395 	struct kstat stat;
396 	int error = vfs_fstat(fd, &stat);
397 
398 	if (!error)
399 		error = cp_new_stat(&stat, statbuf);
400 
401 	return error;
402 }
403 #endif
404 
405 static int do_readlinkat(int dfd, const char __user *pathname,
406 			 char __user *buf, int bufsiz)
407 {
408 	struct path path;
409 	int error;
410 	int empty = 0;
411 	unsigned int lookup_flags = LOOKUP_EMPTY;
412 
413 	if (bufsiz <= 0)
414 		return -EINVAL;
415 
416 retry:
417 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
418 	if (!error) {
419 		struct inode *inode = d_backing_inode(path.dentry);
420 
421 		error = empty ? -ENOENT : -EINVAL;
422 		/*
423 		 * AFS mountpoints allow readlink(2) but are not symlinks
424 		 */
425 		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
426 			error = security_inode_readlink(path.dentry);
427 			if (!error) {
428 				touch_atime(&path);
429 				error = vfs_readlink(path.dentry, buf, bufsiz);
430 			}
431 		}
432 		path_put(&path);
433 		if (retry_estale(error, lookup_flags)) {
434 			lookup_flags |= LOOKUP_REVAL;
435 			goto retry;
436 		}
437 	}
438 	return error;
439 }
440 
441 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
442 		char __user *, buf, int, bufsiz)
443 {
444 	return do_readlinkat(dfd, pathname, buf, bufsiz);
445 }
446 
447 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
448 		int, bufsiz)
449 {
450 	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
451 }
452 
453 
454 /* ---------- LFS-64 ----------- */
455 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
456 
457 #ifndef INIT_STRUCT_STAT64_PADDING
458 #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
459 #endif
460 
461 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
462 {
463 	struct stat64 tmp;
464 
465 	INIT_STRUCT_STAT64_PADDING(tmp);
466 #ifdef CONFIG_MIPS
467 	/* mips has weird padding, so we don't get 64 bits there */
468 	tmp.st_dev = new_encode_dev(stat->dev);
469 	tmp.st_rdev = new_encode_dev(stat->rdev);
470 #else
471 	tmp.st_dev = huge_encode_dev(stat->dev);
472 	tmp.st_rdev = huge_encode_dev(stat->rdev);
473 #endif
474 	tmp.st_ino = stat->ino;
475 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
476 		return -EOVERFLOW;
477 #ifdef STAT64_HAS_BROKEN_ST_INO
478 	tmp.__st_ino = stat->ino;
479 #endif
480 	tmp.st_mode = stat->mode;
481 	tmp.st_nlink = stat->nlink;
482 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
483 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
484 	tmp.st_atime = stat->atime.tv_sec;
485 	tmp.st_atime_nsec = stat->atime.tv_nsec;
486 	tmp.st_mtime = stat->mtime.tv_sec;
487 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
488 	tmp.st_ctime = stat->ctime.tv_sec;
489 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
490 	tmp.st_size = stat->size;
491 	tmp.st_blocks = stat->blocks;
492 	tmp.st_blksize = stat->blksize;
493 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
494 }
495 
496 SYSCALL_DEFINE2(stat64, const char __user *, filename,
497 		struct stat64 __user *, statbuf)
498 {
499 	struct kstat stat;
500 	int error = vfs_stat(filename, &stat);
501 
502 	if (!error)
503 		error = cp_new_stat64(&stat, statbuf);
504 
505 	return error;
506 }
507 
508 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
509 		struct stat64 __user *, statbuf)
510 {
511 	struct kstat stat;
512 	int error = vfs_lstat(filename, &stat);
513 
514 	if (!error)
515 		error = cp_new_stat64(&stat, statbuf);
516 
517 	return error;
518 }
519 
520 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
521 {
522 	struct kstat stat;
523 	int error = vfs_fstat(fd, &stat);
524 
525 	if (!error)
526 		error = cp_new_stat64(&stat, statbuf);
527 
528 	return error;
529 }
530 
531 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
532 		struct stat64 __user *, statbuf, int, flag)
533 {
534 	struct kstat stat;
535 	int error;
536 
537 	error = vfs_fstatat(dfd, filename, &stat, flag);
538 	if (error)
539 		return error;
540 	return cp_new_stat64(&stat, statbuf);
541 }
542 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
543 
544 static noinline_for_stack int
545 cp_statx(const struct kstat *stat, struct statx __user *buffer)
546 {
547 	struct statx tmp;
548 
549 	memset(&tmp, 0, sizeof(tmp));
550 
551 	tmp.stx_mask = stat->result_mask;
552 	tmp.stx_blksize = stat->blksize;
553 	tmp.stx_attributes = stat->attributes;
554 	tmp.stx_nlink = stat->nlink;
555 	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
556 	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
557 	tmp.stx_mode = stat->mode;
558 	tmp.stx_ino = stat->ino;
559 	tmp.stx_size = stat->size;
560 	tmp.stx_blocks = stat->blocks;
561 	tmp.stx_attributes_mask = stat->attributes_mask;
562 	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
563 	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
564 	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
565 	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
566 	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
567 	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
568 	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
569 	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
570 	tmp.stx_rdev_major = MAJOR(stat->rdev);
571 	tmp.stx_rdev_minor = MINOR(stat->rdev);
572 	tmp.stx_dev_major = MAJOR(stat->dev);
573 	tmp.stx_dev_minor = MINOR(stat->dev);
574 	tmp.stx_mnt_id = stat->mnt_id;
575 
576 	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
577 }
578 
579 int do_statx(int dfd, const char __user *filename, unsigned flags,
580 	     unsigned int mask, struct statx __user *buffer)
581 {
582 	struct kstat stat;
583 	int error;
584 
585 	if (mask & STATX__RESERVED)
586 		return -EINVAL;
587 	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
588 		return -EINVAL;
589 
590 	error = vfs_statx(dfd, filename, flags, &stat, mask);
591 	if (error)
592 		return error;
593 
594 	return cp_statx(&stat, buffer);
595 }
596 
597 /**
598  * sys_statx - System call to get enhanced stats
599  * @dfd: Base directory to pathwalk from *or* fd to stat.
600  * @filename: File to stat or "" with AT_EMPTY_PATH
601  * @flags: AT_* flags to control pathwalk.
602  * @mask: Parts of statx struct actually required.
603  * @buffer: Result buffer.
604  *
605  * Note that fstat() can be emulated by setting dfd to the fd of interest,
606  * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
607  */
608 SYSCALL_DEFINE5(statx,
609 		int, dfd, const char __user *, filename, unsigned, flags,
610 		unsigned int, mask,
611 		struct statx __user *, buffer)
612 {
613 	return do_statx(dfd, filename, flags, mask, buffer);
614 }
615 
616 #ifdef CONFIG_COMPAT
617 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
618 {
619 	struct compat_stat tmp;
620 
621 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
622 		return -EOVERFLOW;
623 
624 	memset(&tmp, 0, sizeof(tmp));
625 	tmp.st_dev = old_encode_dev(stat->dev);
626 	tmp.st_ino = stat->ino;
627 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
628 		return -EOVERFLOW;
629 	tmp.st_mode = stat->mode;
630 	tmp.st_nlink = stat->nlink;
631 	if (tmp.st_nlink != stat->nlink)
632 		return -EOVERFLOW;
633 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
634 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
635 	tmp.st_rdev = old_encode_dev(stat->rdev);
636 	if ((u64) stat->size > MAX_NON_LFS)
637 		return -EOVERFLOW;
638 	tmp.st_size = stat->size;
639 	tmp.st_atime = stat->atime.tv_sec;
640 	tmp.st_atime_nsec = stat->atime.tv_nsec;
641 	tmp.st_mtime = stat->mtime.tv_sec;
642 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
643 	tmp.st_ctime = stat->ctime.tv_sec;
644 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
645 	tmp.st_blocks = stat->blocks;
646 	tmp.st_blksize = stat->blksize;
647 	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
648 }
649 
650 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
651 		       struct compat_stat __user *, statbuf)
652 {
653 	struct kstat stat;
654 	int error;
655 
656 	error = vfs_stat(filename, &stat);
657 	if (error)
658 		return error;
659 	return cp_compat_stat(&stat, statbuf);
660 }
661 
662 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
663 		       struct compat_stat __user *, statbuf)
664 {
665 	struct kstat stat;
666 	int error;
667 
668 	error = vfs_lstat(filename, &stat);
669 	if (error)
670 		return error;
671 	return cp_compat_stat(&stat, statbuf);
672 }
673 
674 #ifndef __ARCH_WANT_STAT64
675 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
676 		       const char __user *, filename,
677 		       struct compat_stat __user *, statbuf, int, flag)
678 {
679 	struct kstat stat;
680 	int error;
681 
682 	error = vfs_fstatat(dfd, filename, &stat, flag);
683 	if (error)
684 		return error;
685 	return cp_compat_stat(&stat, statbuf);
686 }
687 #endif
688 
689 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
690 		       struct compat_stat __user *, statbuf)
691 {
692 	struct kstat stat;
693 	int error = vfs_fstat(fd, &stat);
694 
695 	if (!error)
696 		error = cp_compat_stat(&stat, statbuf);
697 	return error;
698 }
699 #endif
700 
701 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
702 void __inode_add_bytes(struct inode *inode, loff_t bytes)
703 {
704 	inode->i_blocks += bytes >> 9;
705 	bytes &= 511;
706 	inode->i_bytes += bytes;
707 	if (inode->i_bytes >= 512) {
708 		inode->i_blocks++;
709 		inode->i_bytes -= 512;
710 	}
711 }
712 EXPORT_SYMBOL(__inode_add_bytes);
713 
714 void inode_add_bytes(struct inode *inode, loff_t bytes)
715 {
716 	spin_lock(&inode->i_lock);
717 	__inode_add_bytes(inode, bytes);
718 	spin_unlock(&inode->i_lock);
719 }
720 
721 EXPORT_SYMBOL(inode_add_bytes);
722 
723 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
724 {
725 	inode->i_blocks -= bytes >> 9;
726 	bytes &= 511;
727 	if (inode->i_bytes < bytes) {
728 		inode->i_blocks--;
729 		inode->i_bytes += 512;
730 	}
731 	inode->i_bytes -= bytes;
732 }
733 
734 EXPORT_SYMBOL(__inode_sub_bytes);
735 
736 void inode_sub_bytes(struct inode *inode, loff_t bytes)
737 {
738 	spin_lock(&inode->i_lock);
739 	__inode_sub_bytes(inode, bytes);
740 	spin_unlock(&inode->i_lock);
741 }
742 
743 EXPORT_SYMBOL(inode_sub_bytes);
744 
745 loff_t inode_get_bytes(struct inode *inode)
746 {
747 	loff_t ret;
748 
749 	spin_lock(&inode->i_lock);
750 	ret = __inode_get_bytes(inode);
751 	spin_unlock(&inode->i_lock);
752 	return ret;
753 }
754 
755 EXPORT_SYMBOL(inode_get_bytes);
756 
757 void inode_set_bytes(struct inode *inode, loff_t bytes)
758 {
759 	/* Caller is here responsible for sufficient locking
760 	 * (ie. inode->i_lock) */
761 	inode->i_blocks = bytes >> 9;
762 	inode->i_bytes = bytes & 511;
763 }
764 
765 EXPORT_SYMBOL(inode_set_bytes);
766