xref: /linux/fs/stat.c (revision d9afbb3509900a953f5cf90bc57e793ee80c1108)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/stat.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
13 #include <linux/fs.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
20 
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
23 
24 #include "internal.h"
25 #include "mount.h"
26 
27 /**
28  * generic_fillattr - Fill in the basic attributes from the inode struct
29  * @inode: Inode to use as the source
30  * @stat: Where to fill in the attributes
31  *
32  * Fill in the basic attributes in the kstat structure from data that's to be
33  * found on the VFS inode structure.  This is the default if no getattr inode
34  * operation is supplied.
35  */
36 void generic_fillattr(struct inode *inode, struct kstat *stat)
37 {
38 	stat->dev = inode->i_sb->s_dev;
39 	stat->ino = inode->i_ino;
40 	stat->mode = inode->i_mode;
41 	stat->nlink = inode->i_nlink;
42 	stat->uid = inode->i_uid;
43 	stat->gid = inode->i_gid;
44 	stat->rdev = inode->i_rdev;
45 	stat->size = i_size_read(inode);
46 	stat->atime = inode->i_atime;
47 	stat->mtime = inode->i_mtime;
48 	stat->ctime = inode->i_ctime;
49 	stat->blksize = i_blocksize(inode);
50 	stat->blocks = inode->i_blocks;
51 }
52 EXPORT_SYMBOL(generic_fillattr);
53 
54 /**
55  * vfs_getattr_nosec - getattr without security checks
56  * @path: file to get attributes from
57  * @stat: structure to return attributes in
58  * @request_mask: STATX_xxx flags indicating what the caller wants
59  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
60  *
61  * Get attributes without calling security_inode_getattr.
62  *
63  * Currently the only caller other than vfs_getattr is internal to the
64  * filehandle lookup code, which uses only the inode number and returns no
65  * attributes to any user.  Any other code probably wants vfs_getattr.
66  */
67 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
68 		      u32 request_mask, unsigned int query_flags)
69 {
70 	struct inode *inode = d_backing_inode(path->dentry);
71 
72 	memset(stat, 0, sizeof(*stat));
73 	stat->result_mask |= STATX_BASIC_STATS;
74 	query_flags &= KSTAT_QUERY_FLAGS;
75 
76 	/* allow the fs to override these if it really wants to */
77 	/* SB_NOATIME means filesystem supplies dummy atime value */
78 	if (inode->i_sb->s_flags & SB_NOATIME)
79 		stat->result_mask &= ~STATX_ATIME;
80 	if (IS_AUTOMOUNT(inode))
81 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
82 
83 	if (inode->i_op->getattr)
84 		return inode->i_op->getattr(path, stat, request_mask,
85 					    query_flags);
86 
87 	generic_fillattr(inode, stat);
88 	return 0;
89 }
90 EXPORT_SYMBOL(vfs_getattr_nosec);
91 
92 /*
93  * vfs_getattr - Get the enhanced basic attributes of a file
94  * @path: The file of interest
95  * @stat: Where to return the statistics
96  * @request_mask: STATX_xxx flags indicating what the caller wants
97  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
98  *
99  * Ask the filesystem for a file's attributes.  The caller must indicate in
100  * request_mask and query_flags to indicate what they want.
101  *
102  * If the file is remote, the filesystem can be forced to update the attributes
103  * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
104  * suppress the update by passing AT_STATX_DONT_SYNC.
105  *
106  * Bits must have been set in request_mask to indicate which attributes the
107  * caller wants retrieving.  Any such attribute not requested may be returned
108  * anyway, but the value may be approximate, and, if remote, may not have been
109  * synchronised with the server.
110  *
111  * 0 will be returned on success, and a -ve error code if unsuccessful.
112  */
113 int vfs_getattr(const struct path *path, struct kstat *stat,
114 		u32 request_mask, unsigned int query_flags)
115 {
116 	int retval;
117 
118 	retval = security_inode_getattr(path);
119 	if (retval)
120 		return retval;
121 	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
122 }
123 EXPORT_SYMBOL(vfs_getattr);
124 
125 /**
126  * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
127  * @fd: The file descriptor referring to the file of interest
128  * @stat: The result structure to fill in.
129  * @request_mask: STATX_xxx flags indicating what the caller wants
130  * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
131  *
132  * This function is a wrapper around vfs_getattr().  The main difference is
133  * that it uses a file descriptor to determine the file location.
134  *
135  * 0 will be returned on success, and a -ve error code if unsuccessful.
136  */
137 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
138 		 u32 request_mask, unsigned int query_flags)
139 {
140 	struct fd f;
141 	int error = -EBADF;
142 
143 	if (query_flags & ~KSTAT_QUERY_FLAGS)
144 		return -EINVAL;
145 
146 	f = fdget_raw(fd);
147 	if (f.file) {
148 		error = vfs_getattr(&f.file->f_path, stat,
149 				    request_mask, query_flags);
150 		fdput(f);
151 	}
152 	return error;
153 }
154 EXPORT_SYMBOL(vfs_statx_fd);
155 
156 static inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags,
157 						 int flags)
158 {
159 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
160 		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
161 		return -EINVAL;
162 
163 	*lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
164 	if (flags & AT_SYMLINK_NOFOLLOW)
165 		*lookup_flags &= ~LOOKUP_FOLLOW;
166 	if (flags & AT_NO_AUTOMOUNT)
167 		*lookup_flags &= ~LOOKUP_AUTOMOUNT;
168 	if (flags & AT_EMPTY_PATH)
169 		*lookup_flags |= LOOKUP_EMPTY;
170 
171 	return 0;
172 }
173 
174 /**
175  * vfs_statx - Get basic and extra attributes by filename
176  * @dfd: A file descriptor representing the base dir for a relative filename
177  * @filename: The name of the file of interest
178  * @flags: Flags to control the query
179  * @stat: The result structure to fill in.
180  * @request_mask: STATX_xxx flags indicating what the caller wants
181  *
182  * This function is a wrapper around vfs_getattr().  The main difference is
183  * that it uses a filename and base directory to determine the file location.
184  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
185  * at the given name from being referenced.
186  *
187  * 0 will be returned on success, and a -ve error code if unsuccessful.
188  */
189 int vfs_statx(int dfd, const char __user *filename, int flags,
190 	      struct kstat *stat, u32 request_mask)
191 {
192 	struct path path;
193 	int error = -EINVAL;
194 	unsigned lookup_flags;
195 
196 	if (vfs_stat_set_lookup_flags(&lookup_flags, flags))
197 		return -EINVAL;
198 retry:
199 	error = user_path_at(dfd, filename, lookup_flags, &path);
200 	if (error)
201 		goto out;
202 
203 	error = vfs_getattr(&path, stat, request_mask, flags);
204 	stat->mnt_id = real_mount(path.mnt)->mnt_id;
205 	stat->result_mask |= STATX_MNT_ID;
206 	if (path.mnt->mnt_root == path.dentry)
207 		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
208 	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
209 	path_put(&path);
210 	if (retry_estale(error, lookup_flags)) {
211 		lookup_flags |= LOOKUP_REVAL;
212 		goto retry;
213 	}
214 out:
215 	return error;
216 }
217 EXPORT_SYMBOL(vfs_statx);
218 
219 
220 #ifdef __ARCH_WANT_OLD_STAT
221 
222 /*
223  * For backward compatibility?  Maybe this should be moved
224  * into arch/i386 instead?
225  */
226 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
227 {
228 	static int warncount = 5;
229 	struct __old_kernel_stat tmp;
230 
231 	if (warncount > 0) {
232 		warncount--;
233 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
234 			current->comm);
235 	} else if (warncount < 0) {
236 		/* it's laughable, but... */
237 		warncount = 0;
238 	}
239 
240 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
241 	tmp.st_dev = old_encode_dev(stat->dev);
242 	tmp.st_ino = stat->ino;
243 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
244 		return -EOVERFLOW;
245 	tmp.st_mode = stat->mode;
246 	tmp.st_nlink = stat->nlink;
247 	if (tmp.st_nlink != stat->nlink)
248 		return -EOVERFLOW;
249 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
250 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
251 	tmp.st_rdev = old_encode_dev(stat->rdev);
252 #if BITS_PER_LONG == 32
253 	if (stat->size > MAX_NON_LFS)
254 		return -EOVERFLOW;
255 #endif
256 	tmp.st_size = stat->size;
257 	tmp.st_atime = stat->atime.tv_sec;
258 	tmp.st_mtime = stat->mtime.tv_sec;
259 	tmp.st_ctime = stat->ctime.tv_sec;
260 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
261 }
262 
263 SYSCALL_DEFINE2(stat, const char __user *, filename,
264 		struct __old_kernel_stat __user *, statbuf)
265 {
266 	struct kstat stat;
267 	int error;
268 
269 	error = vfs_stat(filename, &stat);
270 	if (error)
271 		return error;
272 
273 	return cp_old_stat(&stat, statbuf);
274 }
275 
276 SYSCALL_DEFINE2(lstat, const char __user *, filename,
277 		struct __old_kernel_stat __user *, statbuf)
278 {
279 	struct kstat stat;
280 	int error;
281 
282 	error = vfs_lstat(filename, &stat);
283 	if (error)
284 		return error;
285 
286 	return cp_old_stat(&stat, statbuf);
287 }
288 
289 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
290 {
291 	struct kstat stat;
292 	int error = vfs_fstat(fd, &stat);
293 
294 	if (!error)
295 		error = cp_old_stat(&stat, statbuf);
296 
297 	return error;
298 }
299 
300 #endif /* __ARCH_WANT_OLD_STAT */
301 
302 #ifdef __ARCH_WANT_NEW_STAT
303 
304 #if BITS_PER_LONG == 32
305 #  define choose_32_64(a,b) a
306 #else
307 #  define choose_32_64(a,b) b
308 #endif
309 
310 #define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
311 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
312 
313 #ifndef INIT_STRUCT_STAT_PADDING
314 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
315 #endif
316 
317 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
318 {
319 	struct stat tmp;
320 
321 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
322 		return -EOVERFLOW;
323 #if BITS_PER_LONG == 32
324 	if (stat->size > MAX_NON_LFS)
325 		return -EOVERFLOW;
326 #endif
327 
328 	INIT_STRUCT_STAT_PADDING(tmp);
329 	tmp.st_dev = encode_dev(stat->dev);
330 	tmp.st_ino = stat->ino;
331 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
332 		return -EOVERFLOW;
333 	tmp.st_mode = stat->mode;
334 	tmp.st_nlink = stat->nlink;
335 	if (tmp.st_nlink != stat->nlink)
336 		return -EOVERFLOW;
337 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
338 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
339 	tmp.st_rdev = encode_dev(stat->rdev);
340 	tmp.st_size = stat->size;
341 	tmp.st_atime = stat->atime.tv_sec;
342 	tmp.st_mtime = stat->mtime.tv_sec;
343 	tmp.st_ctime = stat->ctime.tv_sec;
344 #ifdef STAT_HAVE_NSEC
345 	tmp.st_atime_nsec = stat->atime.tv_nsec;
346 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
347 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
348 #endif
349 	tmp.st_blocks = stat->blocks;
350 	tmp.st_blksize = stat->blksize;
351 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
352 }
353 
354 SYSCALL_DEFINE2(newstat, const char __user *, filename,
355 		struct stat __user *, statbuf)
356 {
357 	struct kstat stat;
358 	int error = vfs_stat(filename, &stat);
359 
360 	if (error)
361 		return error;
362 	return cp_new_stat(&stat, statbuf);
363 }
364 
365 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
366 		struct stat __user *, statbuf)
367 {
368 	struct kstat stat;
369 	int error;
370 
371 	error = vfs_lstat(filename, &stat);
372 	if (error)
373 		return error;
374 
375 	return cp_new_stat(&stat, statbuf);
376 }
377 
378 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
379 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
380 		struct stat __user *, statbuf, int, flag)
381 {
382 	struct kstat stat;
383 	int error;
384 
385 	error = vfs_fstatat(dfd, filename, &stat, flag);
386 	if (error)
387 		return error;
388 	return cp_new_stat(&stat, statbuf);
389 }
390 #endif
391 
392 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
393 {
394 	struct kstat stat;
395 	int error = vfs_fstat(fd, &stat);
396 
397 	if (!error)
398 		error = cp_new_stat(&stat, statbuf);
399 
400 	return error;
401 }
402 #endif
403 
404 static int do_readlinkat(int dfd, const char __user *pathname,
405 			 char __user *buf, int bufsiz)
406 {
407 	struct path path;
408 	int error;
409 	int empty = 0;
410 	unsigned int lookup_flags = LOOKUP_EMPTY;
411 
412 	if (bufsiz <= 0)
413 		return -EINVAL;
414 
415 retry:
416 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
417 	if (!error) {
418 		struct inode *inode = d_backing_inode(path.dentry);
419 
420 		error = empty ? -ENOENT : -EINVAL;
421 		/*
422 		 * AFS mountpoints allow readlink(2) but are not symlinks
423 		 */
424 		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
425 			error = security_inode_readlink(path.dentry);
426 			if (!error) {
427 				touch_atime(&path);
428 				error = vfs_readlink(path.dentry, buf, bufsiz);
429 			}
430 		}
431 		path_put(&path);
432 		if (retry_estale(error, lookup_flags)) {
433 			lookup_flags |= LOOKUP_REVAL;
434 			goto retry;
435 		}
436 	}
437 	return error;
438 }
439 
440 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
441 		char __user *, buf, int, bufsiz)
442 {
443 	return do_readlinkat(dfd, pathname, buf, bufsiz);
444 }
445 
446 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
447 		int, bufsiz)
448 {
449 	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
450 }
451 
452 
453 /* ---------- LFS-64 ----------- */
454 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
455 
456 #ifndef INIT_STRUCT_STAT64_PADDING
457 #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
458 #endif
459 
460 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
461 {
462 	struct stat64 tmp;
463 
464 	INIT_STRUCT_STAT64_PADDING(tmp);
465 #ifdef CONFIG_MIPS
466 	/* mips has weird padding, so we don't get 64 bits there */
467 	tmp.st_dev = new_encode_dev(stat->dev);
468 	tmp.st_rdev = new_encode_dev(stat->rdev);
469 #else
470 	tmp.st_dev = huge_encode_dev(stat->dev);
471 	tmp.st_rdev = huge_encode_dev(stat->rdev);
472 #endif
473 	tmp.st_ino = stat->ino;
474 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
475 		return -EOVERFLOW;
476 #ifdef STAT64_HAS_BROKEN_ST_INO
477 	tmp.__st_ino = stat->ino;
478 #endif
479 	tmp.st_mode = stat->mode;
480 	tmp.st_nlink = stat->nlink;
481 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
482 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
483 	tmp.st_atime = stat->atime.tv_sec;
484 	tmp.st_atime_nsec = stat->atime.tv_nsec;
485 	tmp.st_mtime = stat->mtime.tv_sec;
486 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
487 	tmp.st_ctime = stat->ctime.tv_sec;
488 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
489 	tmp.st_size = stat->size;
490 	tmp.st_blocks = stat->blocks;
491 	tmp.st_blksize = stat->blksize;
492 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
493 }
494 
495 SYSCALL_DEFINE2(stat64, const char __user *, filename,
496 		struct stat64 __user *, statbuf)
497 {
498 	struct kstat stat;
499 	int error = vfs_stat(filename, &stat);
500 
501 	if (!error)
502 		error = cp_new_stat64(&stat, statbuf);
503 
504 	return error;
505 }
506 
507 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
508 		struct stat64 __user *, statbuf)
509 {
510 	struct kstat stat;
511 	int error = vfs_lstat(filename, &stat);
512 
513 	if (!error)
514 		error = cp_new_stat64(&stat, statbuf);
515 
516 	return error;
517 }
518 
519 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
520 {
521 	struct kstat stat;
522 	int error = vfs_fstat(fd, &stat);
523 
524 	if (!error)
525 		error = cp_new_stat64(&stat, statbuf);
526 
527 	return error;
528 }
529 
530 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
531 		struct stat64 __user *, statbuf, int, flag)
532 {
533 	struct kstat stat;
534 	int error;
535 
536 	error = vfs_fstatat(dfd, filename, &stat, flag);
537 	if (error)
538 		return error;
539 	return cp_new_stat64(&stat, statbuf);
540 }
541 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
542 
543 static noinline_for_stack int
544 cp_statx(const struct kstat *stat, struct statx __user *buffer)
545 {
546 	struct statx tmp;
547 
548 	memset(&tmp, 0, sizeof(tmp));
549 
550 	tmp.stx_mask = stat->result_mask;
551 	tmp.stx_blksize = stat->blksize;
552 	tmp.stx_attributes = stat->attributes;
553 	tmp.stx_nlink = stat->nlink;
554 	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
555 	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
556 	tmp.stx_mode = stat->mode;
557 	tmp.stx_ino = stat->ino;
558 	tmp.stx_size = stat->size;
559 	tmp.stx_blocks = stat->blocks;
560 	tmp.stx_attributes_mask = stat->attributes_mask;
561 	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
562 	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
563 	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
564 	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
565 	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
566 	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
567 	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
568 	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
569 	tmp.stx_rdev_major = MAJOR(stat->rdev);
570 	tmp.stx_rdev_minor = MINOR(stat->rdev);
571 	tmp.stx_dev_major = MAJOR(stat->dev);
572 	tmp.stx_dev_minor = MINOR(stat->dev);
573 	tmp.stx_mnt_id = stat->mnt_id;
574 
575 	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
576 }
577 
578 int do_statx(int dfd, const char __user *filename, unsigned flags,
579 	     unsigned int mask, struct statx __user *buffer)
580 {
581 	struct kstat stat;
582 	int error;
583 
584 	if (mask & STATX__RESERVED)
585 		return -EINVAL;
586 	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
587 		return -EINVAL;
588 
589 	error = vfs_statx(dfd, filename, flags, &stat, mask);
590 	if (error)
591 		return error;
592 
593 	return cp_statx(&stat, buffer);
594 }
595 
596 /**
597  * sys_statx - System call to get enhanced stats
598  * @dfd: Base directory to pathwalk from *or* fd to stat.
599  * @filename: File to stat or "" with AT_EMPTY_PATH
600  * @flags: AT_* flags to control pathwalk.
601  * @mask: Parts of statx struct actually required.
602  * @buffer: Result buffer.
603  *
604  * Note that fstat() can be emulated by setting dfd to the fd of interest,
605  * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
606  */
607 SYSCALL_DEFINE5(statx,
608 		int, dfd, const char __user *, filename, unsigned, flags,
609 		unsigned int, mask,
610 		struct statx __user *, buffer)
611 {
612 	return do_statx(dfd, filename, flags, mask, buffer);
613 }
614 
615 #ifdef CONFIG_COMPAT
616 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
617 {
618 	struct compat_stat tmp;
619 
620 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
621 		return -EOVERFLOW;
622 
623 	memset(&tmp, 0, sizeof(tmp));
624 	tmp.st_dev = old_encode_dev(stat->dev);
625 	tmp.st_ino = stat->ino;
626 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
627 		return -EOVERFLOW;
628 	tmp.st_mode = stat->mode;
629 	tmp.st_nlink = stat->nlink;
630 	if (tmp.st_nlink != stat->nlink)
631 		return -EOVERFLOW;
632 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
633 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
634 	tmp.st_rdev = old_encode_dev(stat->rdev);
635 	if ((u64) stat->size > MAX_NON_LFS)
636 		return -EOVERFLOW;
637 	tmp.st_size = stat->size;
638 	tmp.st_atime = stat->atime.tv_sec;
639 	tmp.st_atime_nsec = stat->atime.tv_nsec;
640 	tmp.st_mtime = stat->mtime.tv_sec;
641 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
642 	tmp.st_ctime = stat->ctime.tv_sec;
643 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
644 	tmp.st_blocks = stat->blocks;
645 	tmp.st_blksize = stat->blksize;
646 	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
647 }
648 
649 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
650 		       struct compat_stat __user *, statbuf)
651 {
652 	struct kstat stat;
653 	int error;
654 
655 	error = vfs_stat(filename, &stat);
656 	if (error)
657 		return error;
658 	return cp_compat_stat(&stat, statbuf);
659 }
660 
661 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
662 		       struct compat_stat __user *, statbuf)
663 {
664 	struct kstat stat;
665 	int error;
666 
667 	error = vfs_lstat(filename, &stat);
668 	if (error)
669 		return error;
670 	return cp_compat_stat(&stat, statbuf);
671 }
672 
673 #ifndef __ARCH_WANT_STAT64
674 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
675 		       const char __user *, filename,
676 		       struct compat_stat __user *, statbuf, int, flag)
677 {
678 	struct kstat stat;
679 	int error;
680 
681 	error = vfs_fstatat(dfd, filename, &stat, flag);
682 	if (error)
683 		return error;
684 	return cp_compat_stat(&stat, statbuf);
685 }
686 #endif
687 
688 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
689 		       struct compat_stat __user *, statbuf)
690 {
691 	struct kstat stat;
692 	int error = vfs_fstat(fd, &stat);
693 
694 	if (!error)
695 		error = cp_compat_stat(&stat, statbuf);
696 	return error;
697 }
698 #endif
699 
700 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
701 void __inode_add_bytes(struct inode *inode, loff_t bytes)
702 {
703 	inode->i_blocks += bytes >> 9;
704 	bytes &= 511;
705 	inode->i_bytes += bytes;
706 	if (inode->i_bytes >= 512) {
707 		inode->i_blocks++;
708 		inode->i_bytes -= 512;
709 	}
710 }
711 EXPORT_SYMBOL(__inode_add_bytes);
712 
713 void inode_add_bytes(struct inode *inode, loff_t bytes)
714 {
715 	spin_lock(&inode->i_lock);
716 	__inode_add_bytes(inode, bytes);
717 	spin_unlock(&inode->i_lock);
718 }
719 
720 EXPORT_SYMBOL(inode_add_bytes);
721 
722 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
723 {
724 	inode->i_blocks -= bytes >> 9;
725 	bytes &= 511;
726 	if (inode->i_bytes < bytes) {
727 		inode->i_blocks--;
728 		inode->i_bytes += 512;
729 	}
730 	inode->i_bytes -= bytes;
731 }
732 
733 EXPORT_SYMBOL(__inode_sub_bytes);
734 
735 void inode_sub_bytes(struct inode *inode, loff_t bytes)
736 {
737 	spin_lock(&inode->i_lock);
738 	__inode_sub_bytes(inode, bytes);
739 	spin_unlock(&inode->i_lock);
740 }
741 
742 EXPORT_SYMBOL(inode_sub_bytes);
743 
744 loff_t inode_get_bytes(struct inode *inode)
745 {
746 	loff_t ret;
747 
748 	spin_lock(&inode->i_lock);
749 	ret = __inode_get_bytes(inode);
750 	spin_unlock(&inode->i_lock);
751 	return ret;
752 }
753 
754 EXPORT_SYMBOL(inode_get_bytes);
755 
756 void inode_set_bytes(struct inode *inode, loff_t bytes)
757 {
758 	/* Caller is here responsible for sufficient locking
759 	 * (ie. inode->i_lock) */
760 	inode->i_blocks = bytes >> 9;
761 	inode->i_bytes = bytes & 511;
762 }
763 
764 EXPORT_SYMBOL(inode_set_bytes);
765