1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> 22 23 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 25 26 #include "internal.h" 27 #include "mount.h" 28 29 /** 30 * generic_fillattr - Fill in the basic attributes from the inode struct 31 * @idmap: idmap of the mount the inode was found from 32 * @request_mask: statx request_mask 33 * @inode: Inode to use as the source 34 * @stat: Where to fill in the attributes 35 * 36 * Fill in the basic attributes in the kstat structure from data that's to be 37 * found on the VFS inode structure. This is the default if no getattr inode 38 * operation is supplied. 39 * 40 * If the inode has been found through an idmapped mount the idmap of 41 * the vfsmount must be passed through @idmap. This function will then 42 * take care to map the inode according to @idmap before filling in the 43 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 44 * performed on the raw inode simply pass @nop_mnt_idmap. 45 */ 46 void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 47 struct inode *inode, struct kstat *stat) 48 { 49 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 50 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 51 52 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); 57 stat->gid = vfsgid_into_kgid(vfsgid); 58 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 60 stat->atime = inode_get_atime(inode); 61 stat->mtime = inode_get_mtime(inode); 62 stat->ctime = inode_get_ctime(inode); 63 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 65 66 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 67 stat->result_mask |= STATX_CHANGE_COOKIE; 68 stat->change_cookie = inode_query_iversion(inode); 69 } 70 71 } 72 EXPORT_SYMBOL(generic_fillattr); 73 74 /** 75 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 76 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute flags 78 * 79 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 80 * inode that are published on i_flags and enforced by the VFS. 81 */ 82 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 83 { 84 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR_IMMUTABLE; 86 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR_APPEND; 88 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 89 } 90 EXPORT_SYMBOL(generic_fill_statx_attr); 91 92 /** 93 * vfs_getattr_nosec - getattr without security checks 94 * @path: file to get attributes from 95 * @stat: structure to return attributes in 96 * @request_mask: STATX_xxx flags indicating what the caller wants 97 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 98 * 99 * Get attributes without calling security_inode_getattr. 100 * 101 * Currently the only caller other than vfs_getattr is internal to the 102 * filehandle lookup code, which uses only the inode number and returns no 103 * attributes to any user. Any other code probably wants vfs_getattr. 104 */ 105 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 106 u32 request_mask, unsigned int query_flags) 107 { 108 struct mnt_idmap *idmap; 109 struct inode *inode = d_backing_inode(path->dentry); 110 111 memset(stat, 0, sizeof(*stat)); 112 stat->result_mask |= STATX_BASIC_STATS; 113 query_flags &= AT_STATX_SYNC_TYPE; 114 115 /* allow the fs to override these if it really wants to */ 116 /* SB_NOATIME means filesystem supplies dummy atime value */ 117 if (inode->i_sb->s_flags & SB_NOATIME) 118 stat->result_mask &= ~STATX_ATIME; 119 120 /* 121 * Note: If you add another clause to set an attribute flag, please 122 * update attributes_mask below. 123 */ 124 if (IS_AUTOMOUNT(inode)) 125 stat->attributes |= STATX_ATTR_AUTOMOUNT; 126 127 if (IS_DAX(inode)) 128 stat->attributes |= STATX_ATTR_DAX; 129 130 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 131 STATX_ATTR_DAX); 132 133 idmap = mnt_idmap(path->mnt); 134 if (inode->i_op->getattr) 135 return inode->i_op->getattr(idmap, path, stat, 136 request_mask, 137 query_flags | AT_GETATTR_NOSEC); 138 139 generic_fillattr(idmap, request_mask, inode, stat); 140 return 0; 141 } 142 EXPORT_SYMBOL(vfs_getattr_nosec); 143 144 /* 145 * vfs_getattr - Get the enhanced basic attributes of a file 146 * @path: The file of interest 147 * @stat: Where to return the statistics 148 * @request_mask: STATX_xxx flags indicating what the caller wants 149 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 150 * 151 * Ask the filesystem for a file's attributes. The caller must indicate in 152 * request_mask and query_flags to indicate what they want. 153 * 154 * If the file is remote, the filesystem can be forced to update the attributes 155 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 156 * suppress the update by passing AT_STATX_DONT_SYNC. 157 * 158 * Bits must have been set in request_mask to indicate which attributes the 159 * caller wants retrieving. Any such attribute not requested may be returned 160 * anyway, but the value may be approximate, and, if remote, may not have been 161 * synchronised with the server. 162 * 163 * 0 will be returned on success, and a -ve error code if unsuccessful. 164 */ 165 int vfs_getattr(const struct path *path, struct kstat *stat, 166 u32 request_mask, unsigned int query_flags) 167 { 168 int retval; 169 170 if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC)) 171 return -EPERM; 172 173 retval = security_inode_getattr(path); 174 if (retval) 175 return retval; 176 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 177 } 178 EXPORT_SYMBOL(vfs_getattr); 179 180 /** 181 * vfs_fstat - Get the basic attributes by file descriptor 182 * @fd: The file descriptor referring to the file of interest 183 * @stat: The result structure to fill in. 184 * 185 * This function is a wrapper around vfs_getattr(). The main difference is 186 * that it uses a file descriptor to determine the file location. 187 * 188 * 0 will be returned on success, and a -ve error code if unsuccessful. 189 */ 190 int vfs_fstat(int fd, struct kstat *stat) 191 { 192 struct fd f; 193 int error; 194 195 f = fdget_raw(fd); 196 if (!f.file) 197 return -EBADF; 198 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 199 fdput(f); 200 return error; 201 } 202 203 int getname_statx_lookup_flags(int flags) 204 { 205 int lookup_flags = 0; 206 207 if (!(flags & AT_SYMLINK_NOFOLLOW)) 208 lookup_flags |= LOOKUP_FOLLOW; 209 if (!(flags & AT_NO_AUTOMOUNT)) 210 lookup_flags |= LOOKUP_AUTOMOUNT; 211 if (flags & AT_EMPTY_PATH) 212 lookup_flags |= LOOKUP_EMPTY; 213 214 return lookup_flags; 215 } 216 217 /** 218 * vfs_statx - Get basic and extra attributes by filename 219 * @dfd: A file descriptor representing the base dir for a relative filename 220 * @filename: The name of the file of interest 221 * @flags: Flags to control the query 222 * @stat: The result structure to fill in. 223 * @request_mask: STATX_xxx flags indicating what the caller wants 224 * 225 * This function is a wrapper around vfs_getattr(). The main difference is 226 * that it uses a filename and base directory to determine the file location. 227 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 228 * at the given name from being referenced. 229 * 230 * 0 will be returned on success, and a -ve error code if unsuccessful. 231 */ 232 static int vfs_statx(int dfd, struct filename *filename, int flags, 233 struct kstat *stat, u32 request_mask) 234 { 235 struct path path; 236 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 237 int error; 238 239 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 240 AT_STATX_SYNC_TYPE)) 241 return -EINVAL; 242 243 retry: 244 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 245 if (error) 246 goto out; 247 248 error = vfs_getattr(&path, stat, request_mask, flags); 249 250 if (request_mask & STATX_MNT_ID_UNIQUE) { 251 stat->mnt_id = real_mount(path.mnt)->mnt_id_unique; 252 stat->result_mask |= STATX_MNT_ID_UNIQUE; 253 } else { 254 stat->mnt_id = real_mount(path.mnt)->mnt_id; 255 stat->result_mask |= STATX_MNT_ID; 256 } 257 258 if (path.mnt->mnt_root == path.dentry) 259 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 260 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 261 262 /* Handle STATX_DIOALIGN for block devices. */ 263 if (request_mask & STATX_DIOALIGN) { 264 struct inode *inode = d_backing_inode(path.dentry); 265 266 if (S_ISBLK(inode->i_mode)) 267 bdev_statx_dioalign(inode, stat); 268 } 269 270 path_put(&path); 271 if (retry_estale(error, lookup_flags)) { 272 lookup_flags |= LOOKUP_REVAL; 273 goto retry; 274 } 275 out: 276 return error; 277 } 278 279 int vfs_fstatat(int dfd, const char __user *filename, 280 struct kstat *stat, int flags) 281 { 282 int ret; 283 int statx_flags = flags | AT_NO_AUTOMOUNT; 284 struct filename *name; 285 286 /* 287 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH) 288 * 289 * If AT_EMPTY_PATH is set, we expect the common case to be that 290 * empty path, and avoid doing all the extra pathname work. 291 */ 292 if (dfd >= 0 && flags == AT_EMPTY_PATH) { 293 char c; 294 295 ret = get_user(c, filename); 296 if (unlikely(ret)) 297 return ret; 298 299 if (likely(!c)) 300 return vfs_fstat(dfd, stat); 301 } 302 303 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 304 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 305 putname(name); 306 307 return ret; 308 } 309 310 #ifdef __ARCH_WANT_OLD_STAT 311 312 /* 313 * For backward compatibility? Maybe this should be moved 314 * into arch/i386 instead? 315 */ 316 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 317 { 318 static int warncount = 5; 319 struct __old_kernel_stat tmp; 320 321 if (warncount > 0) { 322 warncount--; 323 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 324 current->comm); 325 } else if (warncount < 0) { 326 /* it's laughable, but... */ 327 warncount = 0; 328 } 329 330 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 331 tmp.st_dev = old_encode_dev(stat->dev); 332 tmp.st_ino = stat->ino; 333 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 334 return -EOVERFLOW; 335 tmp.st_mode = stat->mode; 336 tmp.st_nlink = stat->nlink; 337 if (tmp.st_nlink != stat->nlink) 338 return -EOVERFLOW; 339 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 340 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 341 tmp.st_rdev = old_encode_dev(stat->rdev); 342 #if BITS_PER_LONG == 32 343 if (stat->size > MAX_NON_LFS) 344 return -EOVERFLOW; 345 #endif 346 tmp.st_size = stat->size; 347 tmp.st_atime = stat->atime.tv_sec; 348 tmp.st_mtime = stat->mtime.tv_sec; 349 tmp.st_ctime = stat->ctime.tv_sec; 350 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 351 } 352 353 SYSCALL_DEFINE2(stat, const char __user *, filename, 354 struct __old_kernel_stat __user *, statbuf) 355 { 356 struct kstat stat; 357 int error; 358 359 error = vfs_stat(filename, &stat); 360 if (error) 361 return error; 362 363 return cp_old_stat(&stat, statbuf); 364 } 365 366 SYSCALL_DEFINE2(lstat, const char __user *, filename, 367 struct __old_kernel_stat __user *, statbuf) 368 { 369 struct kstat stat; 370 int error; 371 372 error = vfs_lstat(filename, &stat); 373 if (error) 374 return error; 375 376 return cp_old_stat(&stat, statbuf); 377 } 378 379 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 380 { 381 struct kstat stat; 382 int error = vfs_fstat(fd, &stat); 383 384 if (!error) 385 error = cp_old_stat(&stat, statbuf); 386 387 return error; 388 } 389 390 #endif /* __ARCH_WANT_OLD_STAT */ 391 392 #ifdef __ARCH_WANT_NEW_STAT 393 394 #ifndef INIT_STRUCT_STAT_PADDING 395 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 396 #endif 397 398 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 399 { 400 struct stat tmp; 401 402 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 403 return -EOVERFLOW; 404 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 405 return -EOVERFLOW; 406 #if BITS_PER_LONG == 32 407 if (stat->size > MAX_NON_LFS) 408 return -EOVERFLOW; 409 #endif 410 411 INIT_STRUCT_STAT_PADDING(tmp); 412 tmp.st_dev = new_encode_dev(stat->dev); 413 tmp.st_ino = stat->ino; 414 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 415 return -EOVERFLOW; 416 tmp.st_mode = stat->mode; 417 tmp.st_nlink = stat->nlink; 418 if (tmp.st_nlink != stat->nlink) 419 return -EOVERFLOW; 420 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 421 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 422 tmp.st_rdev = new_encode_dev(stat->rdev); 423 tmp.st_size = stat->size; 424 tmp.st_atime = stat->atime.tv_sec; 425 tmp.st_mtime = stat->mtime.tv_sec; 426 tmp.st_ctime = stat->ctime.tv_sec; 427 #ifdef STAT_HAVE_NSEC 428 tmp.st_atime_nsec = stat->atime.tv_nsec; 429 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 430 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 431 #endif 432 tmp.st_blocks = stat->blocks; 433 tmp.st_blksize = stat->blksize; 434 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 435 } 436 437 SYSCALL_DEFINE2(newstat, const char __user *, filename, 438 struct stat __user *, statbuf) 439 { 440 struct kstat stat; 441 int error = vfs_stat(filename, &stat); 442 443 if (error) 444 return error; 445 return cp_new_stat(&stat, statbuf); 446 } 447 448 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 449 struct stat __user *, statbuf) 450 { 451 struct kstat stat; 452 int error; 453 454 error = vfs_lstat(filename, &stat); 455 if (error) 456 return error; 457 458 return cp_new_stat(&stat, statbuf); 459 } 460 461 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 462 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 463 struct stat __user *, statbuf, int, flag) 464 { 465 struct kstat stat; 466 int error; 467 468 error = vfs_fstatat(dfd, filename, &stat, flag); 469 if (error) 470 return error; 471 return cp_new_stat(&stat, statbuf); 472 } 473 #endif 474 475 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 476 { 477 struct kstat stat; 478 int error = vfs_fstat(fd, &stat); 479 480 if (!error) 481 error = cp_new_stat(&stat, statbuf); 482 483 return error; 484 } 485 #endif 486 487 static int do_readlinkat(int dfd, const char __user *pathname, 488 char __user *buf, int bufsiz) 489 { 490 struct path path; 491 int error; 492 int empty = 0; 493 unsigned int lookup_flags = LOOKUP_EMPTY; 494 495 if (bufsiz <= 0) 496 return -EINVAL; 497 498 retry: 499 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 500 if (!error) { 501 struct inode *inode = d_backing_inode(path.dentry); 502 503 error = empty ? -ENOENT : -EINVAL; 504 /* 505 * AFS mountpoints allow readlink(2) but are not symlinks 506 */ 507 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 508 error = security_inode_readlink(path.dentry); 509 if (!error) { 510 touch_atime(&path); 511 error = vfs_readlink(path.dentry, buf, bufsiz); 512 } 513 } 514 path_put(&path); 515 if (retry_estale(error, lookup_flags)) { 516 lookup_flags |= LOOKUP_REVAL; 517 goto retry; 518 } 519 } 520 return error; 521 } 522 523 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 524 char __user *, buf, int, bufsiz) 525 { 526 return do_readlinkat(dfd, pathname, buf, bufsiz); 527 } 528 529 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 530 int, bufsiz) 531 { 532 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 533 } 534 535 536 /* ---------- LFS-64 ----------- */ 537 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 538 539 #ifndef INIT_STRUCT_STAT64_PADDING 540 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 541 #endif 542 543 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 544 { 545 struct stat64 tmp; 546 547 INIT_STRUCT_STAT64_PADDING(tmp); 548 #ifdef CONFIG_MIPS 549 /* mips has weird padding, so we don't get 64 bits there */ 550 tmp.st_dev = new_encode_dev(stat->dev); 551 tmp.st_rdev = new_encode_dev(stat->rdev); 552 #else 553 tmp.st_dev = huge_encode_dev(stat->dev); 554 tmp.st_rdev = huge_encode_dev(stat->rdev); 555 #endif 556 tmp.st_ino = stat->ino; 557 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 558 return -EOVERFLOW; 559 #ifdef STAT64_HAS_BROKEN_ST_INO 560 tmp.__st_ino = stat->ino; 561 #endif 562 tmp.st_mode = stat->mode; 563 tmp.st_nlink = stat->nlink; 564 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 565 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 566 tmp.st_atime = stat->atime.tv_sec; 567 tmp.st_atime_nsec = stat->atime.tv_nsec; 568 tmp.st_mtime = stat->mtime.tv_sec; 569 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 570 tmp.st_ctime = stat->ctime.tv_sec; 571 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 572 tmp.st_size = stat->size; 573 tmp.st_blocks = stat->blocks; 574 tmp.st_blksize = stat->blksize; 575 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 576 } 577 578 SYSCALL_DEFINE2(stat64, const char __user *, filename, 579 struct stat64 __user *, statbuf) 580 { 581 struct kstat stat; 582 int error = vfs_stat(filename, &stat); 583 584 if (!error) 585 error = cp_new_stat64(&stat, statbuf); 586 587 return error; 588 } 589 590 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 591 struct stat64 __user *, statbuf) 592 { 593 struct kstat stat; 594 int error = vfs_lstat(filename, &stat); 595 596 if (!error) 597 error = cp_new_stat64(&stat, statbuf); 598 599 return error; 600 } 601 602 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 603 { 604 struct kstat stat; 605 int error = vfs_fstat(fd, &stat); 606 607 if (!error) 608 error = cp_new_stat64(&stat, statbuf); 609 610 return error; 611 } 612 613 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 614 struct stat64 __user *, statbuf, int, flag) 615 { 616 struct kstat stat; 617 int error; 618 619 error = vfs_fstatat(dfd, filename, &stat, flag); 620 if (error) 621 return error; 622 return cp_new_stat64(&stat, statbuf); 623 } 624 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 625 626 static noinline_for_stack int 627 cp_statx(const struct kstat *stat, struct statx __user *buffer) 628 { 629 struct statx tmp; 630 631 memset(&tmp, 0, sizeof(tmp)); 632 633 /* STATX_CHANGE_COOKIE is kernel-only for now */ 634 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 635 tmp.stx_blksize = stat->blksize; 636 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 637 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 638 tmp.stx_nlink = stat->nlink; 639 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 640 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 641 tmp.stx_mode = stat->mode; 642 tmp.stx_ino = stat->ino; 643 tmp.stx_size = stat->size; 644 tmp.stx_blocks = stat->blocks; 645 tmp.stx_attributes_mask = stat->attributes_mask; 646 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 647 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 648 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 649 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 650 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 651 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 652 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 653 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 654 tmp.stx_rdev_major = MAJOR(stat->rdev); 655 tmp.stx_rdev_minor = MINOR(stat->rdev); 656 tmp.stx_dev_major = MAJOR(stat->dev); 657 tmp.stx_dev_minor = MINOR(stat->dev); 658 tmp.stx_mnt_id = stat->mnt_id; 659 tmp.stx_dio_mem_align = stat->dio_mem_align; 660 tmp.stx_dio_offset_align = stat->dio_offset_align; 661 tmp.stx_subvol = stat->subvol; 662 663 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 664 } 665 666 int do_statx(int dfd, struct filename *filename, unsigned int flags, 667 unsigned int mask, struct statx __user *buffer) 668 { 669 struct kstat stat; 670 int error; 671 672 if (mask & STATX__RESERVED) 673 return -EINVAL; 674 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 675 return -EINVAL; 676 677 /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 678 * from userland. 679 */ 680 mask &= ~STATX_CHANGE_COOKIE; 681 682 error = vfs_statx(dfd, filename, flags, &stat, mask); 683 if (error) 684 return error; 685 686 return cp_statx(&stat, buffer); 687 } 688 689 /** 690 * sys_statx - System call to get enhanced stats 691 * @dfd: Base directory to pathwalk from *or* fd to stat. 692 * @filename: File to stat or "" with AT_EMPTY_PATH 693 * @flags: AT_* flags to control pathwalk. 694 * @mask: Parts of statx struct actually required. 695 * @buffer: Result buffer. 696 * 697 * Note that fstat() can be emulated by setting dfd to the fd of interest, 698 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 699 */ 700 SYSCALL_DEFINE5(statx, 701 int, dfd, const char __user *, filename, unsigned, flags, 702 unsigned int, mask, 703 struct statx __user *, buffer) 704 { 705 int ret; 706 struct filename *name; 707 708 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 709 ret = do_statx(dfd, name, flags, mask, buffer); 710 putname(name); 711 712 return ret; 713 } 714 715 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 716 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 717 { 718 struct compat_stat tmp; 719 720 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 721 return -EOVERFLOW; 722 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 723 return -EOVERFLOW; 724 725 memset(&tmp, 0, sizeof(tmp)); 726 tmp.st_dev = new_encode_dev(stat->dev); 727 tmp.st_ino = stat->ino; 728 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 729 return -EOVERFLOW; 730 tmp.st_mode = stat->mode; 731 tmp.st_nlink = stat->nlink; 732 if (tmp.st_nlink != stat->nlink) 733 return -EOVERFLOW; 734 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 735 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 736 tmp.st_rdev = new_encode_dev(stat->rdev); 737 if ((u64) stat->size > MAX_NON_LFS) 738 return -EOVERFLOW; 739 tmp.st_size = stat->size; 740 tmp.st_atime = stat->atime.tv_sec; 741 tmp.st_atime_nsec = stat->atime.tv_nsec; 742 tmp.st_mtime = stat->mtime.tv_sec; 743 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 744 tmp.st_ctime = stat->ctime.tv_sec; 745 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 746 tmp.st_blocks = stat->blocks; 747 tmp.st_blksize = stat->blksize; 748 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 749 } 750 751 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 752 struct compat_stat __user *, statbuf) 753 { 754 struct kstat stat; 755 int error; 756 757 error = vfs_stat(filename, &stat); 758 if (error) 759 return error; 760 return cp_compat_stat(&stat, statbuf); 761 } 762 763 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 764 struct compat_stat __user *, statbuf) 765 { 766 struct kstat stat; 767 int error; 768 769 error = vfs_lstat(filename, &stat); 770 if (error) 771 return error; 772 return cp_compat_stat(&stat, statbuf); 773 } 774 775 #ifndef __ARCH_WANT_STAT64 776 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 777 const char __user *, filename, 778 struct compat_stat __user *, statbuf, int, flag) 779 { 780 struct kstat stat; 781 int error; 782 783 error = vfs_fstatat(dfd, filename, &stat, flag); 784 if (error) 785 return error; 786 return cp_compat_stat(&stat, statbuf); 787 } 788 #endif 789 790 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 791 struct compat_stat __user *, statbuf) 792 { 793 struct kstat stat; 794 int error = vfs_fstat(fd, &stat); 795 796 if (!error) 797 error = cp_compat_stat(&stat, statbuf); 798 return error; 799 } 800 #endif 801 802 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 803 void __inode_add_bytes(struct inode *inode, loff_t bytes) 804 { 805 inode->i_blocks += bytes >> 9; 806 bytes &= 511; 807 inode->i_bytes += bytes; 808 if (inode->i_bytes >= 512) { 809 inode->i_blocks++; 810 inode->i_bytes -= 512; 811 } 812 } 813 EXPORT_SYMBOL(__inode_add_bytes); 814 815 void inode_add_bytes(struct inode *inode, loff_t bytes) 816 { 817 spin_lock(&inode->i_lock); 818 __inode_add_bytes(inode, bytes); 819 spin_unlock(&inode->i_lock); 820 } 821 822 EXPORT_SYMBOL(inode_add_bytes); 823 824 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 825 { 826 inode->i_blocks -= bytes >> 9; 827 bytes &= 511; 828 if (inode->i_bytes < bytes) { 829 inode->i_blocks--; 830 inode->i_bytes += 512; 831 } 832 inode->i_bytes -= bytes; 833 } 834 835 EXPORT_SYMBOL(__inode_sub_bytes); 836 837 void inode_sub_bytes(struct inode *inode, loff_t bytes) 838 { 839 spin_lock(&inode->i_lock); 840 __inode_sub_bytes(inode, bytes); 841 spin_unlock(&inode->i_lock); 842 } 843 844 EXPORT_SYMBOL(inode_sub_bytes); 845 846 loff_t inode_get_bytes(struct inode *inode) 847 { 848 loff_t ret; 849 850 spin_lock(&inode->i_lock); 851 ret = __inode_get_bytes(inode); 852 spin_unlock(&inode->i_lock); 853 return ret; 854 } 855 856 EXPORT_SYMBOL(inode_get_bytes); 857 858 void inode_set_bytes(struct inode *inode, loff_t bytes) 859 { 860 /* Caller is here responsible for sufficient locking 861 * (ie. inode->i_lock) */ 862 inode->i_blocks = bytes >> 9; 863 inode->i_bytes = bytes & 511; 864 } 865 866 EXPORT_SYMBOL(inode_set_bytes); 867