1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> 22 23 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 25 26 #include <trace/events/timestamp.h> 27 28 #include "internal.h" 29 #include "mount.h" 30 31 /** 32 * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED 33 * @stat: where to store the resulting values 34 * @request_mask: STATX_* values requested 35 * @inode: inode from which to grab the c/mtime 36 * 37 * Given @inode, grab the ctime and mtime out if it and store the result 38 * in @stat. When fetching the value, flag it as QUERIED (if not already) 39 * so the next write will record a distinct timestamp. 40 * 41 * NB: The QUERIED flag is tracked in the ctime, but we set it there even 42 * if only the mtime was requested, as that ensures that the next mtime 43 * change will be distinct. 44 */ 45 void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode) 46 { 47 atomic_t *pcn = (atomic_t *)&inode->i_ctime_nsec; 48 49 /* If neither time was requested, then don't report them */ 50 if (!(request_mask & (STATX_CTIME|STATX_MTIME))) { 51 stat->result_mask &= ~(STATX_CTIME|STATX_MTIME); 52 return; 53 } 54 55 stat->mtime = inode_get_mtime(inode); 56 stat->ctime.tv_sec = inode->i_ctime_sec; 57 stat->ctime.tv_nsec = (u32)atomic_read(pcn); 58 if (!(stat->ctime.tv_nsec & I_CTIME_QUERIED)) 59 stat->ctime.tv_nsec = ((u32)atomic_fetch_or(I_CTIME_QUERIED, pcn)); 60 stat->ctime.tv_nsec &= ~I_CTIME_QUERIED; 61 trace_fill_mg_cmtime(inode, &stat->ctime, &stat->mtime); 62 } 63 EXPORT_SYMBOL(fill_mg_cmtime); 64 65 /** 66 * generic_fillattr - Fill in the basic attributes from the inode struct 67 * @idmap: idmap of the mount the inode was found from 68 * @request_mask: statx request_mask 69 * @inode: Inode to use as the source 70 * @stat: Where to fill in the attributes 71 * 72 * Fill in the basic attributes in the kstat structure from data that's to be 73 * found on the VFS inode structure. This is the default if no getattr inode 74 * operation is supplied. 75 * 76 * If the inode has been found through an idmapped mount the idmap of 77 * the vfsmount must be passed through @idmap. This function will then 78 * take care to map the inode according to @idmap before filling in the 79 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 80 * performed on the raw inode simply pass @nop_mnt_idmap. 81 */ 82 void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 83 struct inode *inode, struct kstat *stat) 84 { 85 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 86 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 87 88 stat->dev = inode->i_sb->s_dev; 89 stat->ino = inode->i_ino; 90 stat->mode = inode->i_mode; 91 stat->nlink = inode->i_nlink; 92 stat->uid = vfsuid_into_kuid(vfsuid); 93 stat->gid = vfsgid_into_kgid(vfsgid); 94 stat->rdev = inode->i_rdev; 95 stat->size = i_size_read(inode); 96 stat->atime = inode_get_atime(inode); 97 98 if (is_mgtime(inode)) { 99 fill_mg_cmtime(stat, request_mask, inode); 100 } else { 101 stat->ctime = inode_get_ctime(inode); 102 stat->mtime = inode_get_mtime(inode); 103 } 104 105 stat->blksize = i_blocksize(inode); 106 stat->blocks = inode->i_blocks; 107 108 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 109 stat->result_mask |= STATX_CHANGE_COOKIE; 110 stat->change_cookie = inode_query_iversion(inode); 111 } 112 113 } 114 EXPORT_SYMBOL(generic_fillattr); 115 116 /** 117 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 118 * @inode: Inode to use as the source 119 * @stat: Where to fill in the attribute flags 120 * 121 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 122 * inode that are published on i_flags and enforced by the VFS. 123 */ 124 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 125 { 126 if (inode->i_flags & S_IMMUTABLE) 127 stat->attributes |= STATX_ATTR_IMMUTABLE; 128 if (inode->i_flags & S_APPEND) 129 stat->attributes |= STATX_ATTR_APPEND; 130 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 131 } 132 EXPORT_SYMBOL(generic_fill_statx_attr); 133 134 /** 135 * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes 136 * @stat: Where to fill in the attribute flags 137 * @unit_min: Minimum supported atomic write length in bytes 138 * @unit_max: Maximum supported atomic write length in bytes 139 * @unit_max_opt: Optimised maximum supported atomic write length in bytes 140 * 141 * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from 142 * atomic write unit_min and unit_max values. 143 */ 144 void generic_fill_statx_atomic_writes(struct kstat *stat, 145 unsigned int unit_min, 146 unsigned int unit_max, 147 unsigned int unit_max_opt) 148 { 149 /* Confirm that the request type is known */ 150 stat->result_mask |= STATX_WRITE_ATOMIC; 151 152 /* Confirm that the file attribute type is known */ 153 stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC; 154 155 if (unit_min) { 156 stat->atomic_write_unit_min = unit_min; 157 stat->atomic_write_unit_max = unit_max; 158 stat->atomic_write_unit_max_opt = unit_max_opt; 159 /* Initially only allow 1x segment */ 160 stat->atomic_write_segments_max = 1; 161 162 /* Confirm atomic writes are actually supported */ 163 stat->attributes |= STATX_ATTR_WRITE_ATOMIC; 164 } 165 } 166 EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes); 167 168 /** 169 * vfs_getattr_nosec - getattr without security checks 170 * @path: file to get attributes from 171 * @stat: structure to return attributes in 172 * @request_mask: STATX_xxx flags indicating what the caller wants 173 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 174 * 175 * Get attributes without calling security_inode_getattr. 176 * 177 * Currently the only caller other than vfs_getattr is internal to the 178 * filehandle lookup code, which uses only the inode number and returns no 179 * attributes to any user. Any other code probably wants vfs_getattr. 180 */ 181 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 182 u32 request_mask, unsigned int query_flags) 183 { 184 struct mnt_idmap *idmap; 185 struct inode *inode = d_backing_inode(path->dentry); 186 187 memset(stat, 0, sizeof(*stat)); 188 stat->result_mask |= STATX_BASIC_STATS; 189 query_flags &= AT_STATX_SYNC_TYPE; 190 191 /* allow the fs to override these if it really wants to */ 192 /* SB_NOATIME means filesystem supplies dummy atime value */ 193 if (inode->i_sb->s_flags & SB_NOATIME) 194 stat->result_mask &= ~STATX_ATIME; 195 196 /* 197 * Note: If you add another clause to set an attribute flag, please 198 * update attributes_mask below. 199 */ 200 if (IS_AUTOMOUNT(inode)) 201 stat->attributes |= STATX_ATTR_AUTOMOUNT; 202 203 if (IS_DAX(inode)) 204 stat->attributes |= STATX_ATTR_DAX; 205 206 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 207 STATX_ATTR_DAX); 208 209 idmap = mnt_idmap(path->mnt); 210 if (inode->i_op->getattr) { 211 int ret; 212 213 ret = inode->i_op->getattr(idmap, path, stat, request_mask, 214 query_flags); 215 if (ret) 216 return ret; 217 } else { 218 generic_fillattr(idmap, request_mask, inode, stat); 219 } 220 221 /* 222 * If this is a block device inode, override the filesystem attributes 223 * with the block device specific parameters that need to be obtained 224 * from the bdev backing inode. 225 */ 226 if (S_ISBLK(stat->mode)) 227 bdev_statx(path, stat, request_mask); 228 229 return 0; 230 } 231 EXPORT_SYMBOL(vfs_getattr_nosec); 232 233 /* 234 * vfs_getattr - Get the enhanced basic attributes of a file 235 * @path: The file of interest 236 * @stat: Where to return the statistics 237 * @request_mask: STATX_xxx flags indicating what the caller wants 238 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 239 * 240 * Ask the filesystem for a file's attributes. The caller must indicate in 241 * request_mask and query_flags to indicate what they want. 242 * 243 * If the file is remote, the filesystem can be forced to update the attributes 244 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 245 * suppress the update by passing AT_STATX_DONT_SYNC. 246 * 247 * Bits must have been set in request_mask to indicate which attributes the 248 * caller wants retrieving. Any such attribute not requested may be returned 249 * anyway, but the value may be approximate, and, if remote, may not have been 250 * synchronised with the server. 251 * 252 * 0 will be returned on success, and a -ve error code if unsuccessful. 253 */ 254 int vfs_getattr(const struct path *path, struct kstat *stat, 255 u32 request_mask, unsigned int query_flags) 256 { 257 int retval; 258 259 retval = security_inode_getattr(path); 260 if (unlikely(retval)) 261 return retval; 262 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 263 } 264 EXPORT_SYMBOL(vfs_getattr); 265 266 /** 267 * vfs_fstat - Get the basic attributes by file descriptor 268 * @fd: The file descriptor referring to the file of interest 269 * @stat: The result structure to fill in. 270 * 271 * This function is a wrapper around vfs_getattr(). The main difference is 272 * that it uses a file descriptor to determine the file location. 273 * 274 * 0 will be returned on success, and a -ve error code if unsuccessful. 275 */ 276 int vfs_fstat(int fd, struct kstat *stat) 277 { 278 CLASS(fd_raw, f)(fd); 279 if (fd_empty(f)) 280 return -EBADF; 281 return vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0); 282 } 283 284 static int statx_lookup_flags(int flags) 285 { 286 int lookup_flags = 0; 287 288 if (!(flags & AT_SYMLINK_NOFOLLOW)) 289 lookup_flags |= LOOKUP_FOLLOW; 290 if (!(flags & AT_NO_AUTOMOUNT)) 291 lookup_flags |= LOOKUP_AUTOMOUNT; 292 293 return lookup_flags; 294 } 295 296 static int vfs_statx_path(struct path *path, int flags, struct kstat *stat, 297 u32 request_mask) 298 { 299 int error = vfs_getattr(path, stat, request_mask, flags); 300 if (error) 301 return error; 302 303 if (request_mask & STATX_MNT_ID_UNIQUE) { 304 stat->mnt_id = real_mount(path->mnt)->mnt_id_unique; 305 stat->result_mask |= STATX_MNT_ID_UNIQUE; 306 } else { 307 stat->mnt_id = real_mount(path->mnt)->mnt_id; 308 stat->result_mask |= STATX_MNT_ID; 309 } 310 311 if (path_mounted(path)) 312 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 313 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 314 return 0; 315 } 316 317 static int vfs_statx_fd(int fd, int flags, struct kstat *stat, 318 u32 request_mask) 319 { 320 CLASS(fd_raw, f)(fd); 321 if (fd_empty(f)) 322 return -EBADF; 323 return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask); 324 } 325 326 /** 327 * vfs_statx - Get basic and extra attributes by filename 328 * @dfd: A file descriptor representing the base dir for a relative filename 329 * @filename: The name of the file of interest 330 * @flags: Flags to control the query 331 * @stat: The result structure to fill in. 332 * @request_mask: STATX_xxx flags indicating what the caller wants 333 * 334 * This function is a wrapper around vfs_getattr(). The main difference is 335 * that it uses a filename and base directory to determine the file location. 336 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 337 * at the given name from being referenced. 338 * 339 * 0 will be returned on success, and a -ve error code if unsuccessful. 340 */ 341 static int vfs_statx(int dfd, struct filename *filename, int flags, 342 struct kstat *stat, u32 request_mask) 343 { 344 struct path path; 345 unsigned int lookup_flags = statx_lookup_flags(flags); 346 int error; 347 348 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 349 AT_STATX_SYNC_TYPE)) 350 return -EINVAL; 351 352 retry: 353 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 354 if (error) 355 return error; 356 error = vfs_statx_path(&path, flags, stat, request_mask); 357 path_put(&path); 358 if (retry_estale(error, lookup_flags)) { 359 lookup_flags |= LOOKUP_REVAL; 360 goto retry; 361 } 362 return error; 363 } 364 365 int vfs_fstatat(int dfd, const char __user *filename, 366 struct kstat *stat, int flags) 367 { 368 int ret; 369 int statx_flags = flags | AT_NO_AUTOMOUNT; 370 struct filename *name = getname_maybe_null(filename, flags); 371 372 if (!name && dfd >= 0) 373 return vfs_fstat(dfd, stat); 374 375 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 376 putname(name); 377 378 return ret; 379 } 380 381 #ifdef __ARCH_WANT_OLD_STAT 382 383 /* 384 * For backward compatibility? Maybe this should be moved 385 * into arch/i386 instead? 386 */ 387 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 388 { 389 static int warncount = 5; 390 struct __old_kernel_stat tmp; 391 392 if (warncount > 0) { 393 warncount--; 394 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 395 current->comm); 396 } else if (warncount < 0) { 397 /* it's laughable, but... */ 398 warncount = 0; 399 } 400 401 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 402 tmp.st_dev = old_encode_dev(stat->dev); 403 tmp.st_ino = stat->ino; 404 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 405 return -EOVERFLOW; 406 tmp.st_mode = stat->mode; 407 tmp.st_nlink = stat->nlink; 408 if (tmp.st_nlink != stat->nlink) 409 return -EOVERFLOW; 410 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 411 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 412 tmp.st_rdev = old_encode_dev(stat->rdev); 413 #if BITS_PER_LONG == 32 414 if (stat->size > MAX_NON_LFS) 415 return -EOVERFLOW; 416 #endif 417 tmp.st_size = stat->size; 418 tmp.st_atime = stat->atime.tv_sec; 419 tmp.st_mtime = stat->mtime.tv_sec; 420 tmp.st_ctime = stat->ctime.tv_sec; 421 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 422 } 423 424 SYSCALL_DEFINE2(stat, const char __user *, filename, 425 struct __old_kernel_stat __user *, statbuf) 426 { 427 struct kstat stat; 428 int error; 429 430 error = vfs_stat(filename, &stat); 431 if (unlikely(error)) 432 return error; 433 434 return cp_old_stat(&stat, statbuf); 435 } 436 437 SYSCALL_DEFINE2(lstat, const char __user *, filename, 438 struct __old_kernel_stat __user *, statbuf) 439 { 440 struct kstat stat; 441 int error; 442 443 error = vfs_lstat(filename, &stat); 444 if (unlikely(error)) 445 return error; 446 447 return cp_old_stat(&stat, statbuf); 448 } 449 450 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 451 { 452 struct kstat stat; 453 int error; 454 455 error = vfs_fstat(fd, &stat); 456 if (unlikely(error)) 457 return error; 458 459 return cp_old_stat(&stat, statbuf); 460 } 461 462 #endif /* __ARCH_WANT_OLD_STAT */ 463 464 #ifdef __ARCH_WANT_NEW_STAT 465 466 #ifndef INIT_STRUCT_STAT_PADDING 467 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 468 #endif 469 470 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 471 { 472 struct stat tmp; 473 474 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 475 return -EOVERFLOW; 476 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 477 return -EOVERFLOW; 478 #if BITS_PER_LONG == 32 479 if (stat->size > MAX_NON_LFS) 480 return -EOVERFLOW; 481 #endif 482 483 INIT_STRUCT_STAT_PADDING(tmp); 484 tmp.st_dev = new_encode_dev(stat->dev); 485 tmp.st_ino = stat->ino; 486 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 487 return -EOVERFLOW; 488 tmp.st_mode = stat->mode; 489 tmp.st_nlink = stat->nlink; 490 if (tmp.st_nlink != stat->nlink) 491 return -EOVERFLOW; 492 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 493 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 494 tmp.st_rdev = new_encode_dev(stat->rdev); 495 tmp.st_size = stat->size; 496 tmp.st_atime = stat->atime.tv_sec; 497 tmp.st_mtime = stat->mtime.tv_sec; 498 tmp.st_ctime = stat->ctime.tv_sec; 499 #ifdef STAT_HAVE_NSEC 500 tmp.st_atime_nsec = stat->atime.tv_nsec; 501 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 502 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 503 #endif 504 tmp.st_blocks = stat->blocks; 505 tmp.st_blksize = stat->blksize; 506 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 507 } 508 509 SYSCALL_DEFINE2(newstat, const char __user *, filename, 510 struct stat __user *, statbuf) 511 { 512 struct kstat stat; 513 int error; 514 515 error = vfs_stat(filename, &stat); 516 if (unlikely(error)) 517 return error; 518 519 return cp_new_stat(&stat, statbuf); 520 } 521 522 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 523 struct stat __user *, statbuf) 524 { 525 struct kstat stat; 526 int error; 527 528 error = vfs_lstat(filename, &stat); 529 if (unlikely(error)) 530 return error; 531 532 return cp_new_stat(&stat, statbuf); 533 } 534 535 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 536 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 537 struct stat __user *, statbuf, int, flag) 538 { 539 struct kstat stat; 540 int error; 541 542 error = vfs_fstatat(dfd, filename, &stat, flag); 543 if (unlikely(error)) 544 return error; 545 546 return cp_new_stat(&stat, statbuf); 547 } 548 #endif 549 550 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 551 { 552 struct kstat stat; 553 int error; 554 555 error = vfs_fstat(fd, &stat); 556 if (unlikely(error)) 557 return error; 558 559 return cp_new_stat(&stat, statbuf); 560 } 561 #endif 562 563 static int do_readlinkat(int dfd, const char __user *pathname, 564 char __user *buf, int bufsiz) 565 { 566 struct path path; 567 struct filename *name; 568 int error; 569 unsigned int lookup_flags = LOOKUP_EMPTY; 570 571 if (bufsiz <= 0) 572 return -EINVAL; 573 574 retry: 575 name = getname_flags(pathname, lookup_flags); 576 error = filename_lookup(dfd, name, lookup_flags, &path, NULL); 577 if (unlikely(error)) { 578 putname(name); 579 return error; 580 } 581 582 /* 583 * AFS mountpoints allow readlink(2) but are not symlinks 584 */ 585 if (d_is_symlink(path.dentry) || 586 d_backing_inode(path.dentry)->i_op->readlink) { 587 error = security_inode_readlink(path.dentry); 588 if (!error) { 589 touch_atime(&path); 590 error = vfs_readlink(path.dentry, buf, bufsiz); 591 } 592 } else { 593 error = (name->name[0] == '\0') ? -ENOENT : -EINVAL; 594 } 595 path_put(&path); 596 putname(name); 597 if (retry_estale(error, lookup_flags)) { 598 lookup_flags |= LOOKUP_REVAL; 599 goto retry; 600 } 601 return error; 602 } 603 604 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 605 char __user *, buf, int, bufsiz) 606 { 607 return do_readlinkat(dfd, pathname, buf, bufsiz); 608 } 609 610 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 611 int, bufsiz) 612 { 613 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 614 } 615 616 617 /* ---------- LFS-64 ----------- */ 618 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 619 620 #ifndef INIT_STRUCT_STAT64_PADDING 621 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 622 #endif 623 624 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 625 { 626 struct stat64 tmp; 627 628 INIT_STRUCT_STAT64_PADDING(tmp); 629 #ifdef CONFIG_MIPS 630 /* mips has weird padding, so we don't get 64 bits there */ 631 tmp.st_dev = new_encode_dev(stat->dev); 632 tmp.st_rdev = new_encode_dev(stat->rdev); 633 #else 634 tmp.st_dev = huge_encode_dev(stat->dev); 635 tmp.st_rdev = huge_encode_dev(stat->rdev); 636 #endif 637 tmp.st_ino = stat->ino; 638 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 639 return -EOVERFLOW; 640 #ifdef STAT64_HAS_BROKEN_ST_INO 641 tmp.__st_ino = stat->ino; 642 #endif 643 tmp.st_mode = stat->mode; 644 tmp.st_nlink = stat->nlink; 645 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 646 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 647 tmp.st_atime = stat->atime.tv_sec; 648 tmp.st_atime_nsec = stat->atime.tv_nsec; 649 tmp.st_mtime = stat->mtime.tv_sec; 650 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 651 tmp.st_ctime = stat->ctime.tv_sec; 652 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 653 tmp.st_size = stat->size; 654 tmp.st_blocks = stat->blocks; 655 tmp.st_blksize = stat->blksize; 656 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 657 } 658 659 SYSCALL_DEFINE2(stat64, const char __user *, filename, 660 struct stat64 __user *, statbuf) 661 { 662 struct kstat stat; 663 int error = vfs_stat(filename, &stat); 664 665 if (!error) 666 error = cp_new_stat64(&stat, statbuf); 667 668 return error; 669 } 670 671 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 672 struct stat64 __user *, statbuf) 673 { 674 struct kstat stat; 675 int error = vfs_lstat(filename, &stat); 676 677 if (!error) 678 error = cp_new_stat64(&stat, statbuf); 679 680 return error; 681 } 682 683 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 684 { 685 struct kstat stat; 686 int error = vfs_fstat(fd, &stat); 687 688 if (!error) 689 error = cp_new_stat64(&stat, statbuf); 690 691 return error; 692 } 693 694 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 695 struct stat64 __user *, statbuf, int, flag) 696 { 697 struct kstat stat; 698 int error; 699 700 error = vfs_fstatat(dfd, filename, &stat, flag); 701 if (error) 702 return error; 703 return cp_new_stat64(&stat, statbuf); 704 } 705 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 706 707 static noinline_for_stack int 708 cp_statx(const struct kstat *stat, struct statx __user *buffer) 709 { 710 struct statx tmp; 711 712 memset(&tmp, 0, sizeof(tmp)); 713 714 /* STATX_CHANGE_COOKIE is kernel-only for now */ 715 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 716 tmp.stx_blksize = stat->blksize; 717 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 718 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 719 tmp.stx_nlink = stat->nlink; 720 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 721 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 722 tmp.stx_mode = stat->mode; 723 tmp.stx_ino = stat->ino; 724 tmp.stx_size = stat->size; 725 tmp.stx_blocks = stat->blocks; 726 tmp.stx_attributes_mask = stat->attributes_mask; 727 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 728 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 729 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 730 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 731 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 732 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 733 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 734 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 735 tmp.stx_rdev_major = MAJOR(stat->rdev); 736 tmp.stx_rdev_minor = MINOR(stat->rdev); 737 tmp.stx_dev_major = MAJOR(stat->dev); 738 tmp.stx_dev_minor = MINOR(stat->dev); 739 tmp.stx_mnt_id = stat->mnt_id; 740 tmp.stx_dio_mem_align = stat->dio_mem_align; 741 tmp.stx_dio_offset_align = stat->dio_offset_align; 742 tmp.stx_dio_read_offset_align = stat->dio_read_offset_align; 743 tmp.stx_subvol = stat->subvol; 744 tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; 745 tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; 746 tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max; 747 tmp.stx_atomic_write_unit_max_opt = stat->atomic_write_unit_max_opt; 748 749 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 750 } 751 752 int do_statx(int dfd, struct filename *filename, unsigned int flags, 753 unsigned int mask, struct statx __user *buffer) 754 { 755 struct kstat stat; 756 int error; 757 758 if (mask & STATX__RESERVED) 759 return -EINVAL; 760 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 761 return -EINVAL; 762 763 /* 764 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 765 * from userland. 766 */ 767 mask &= ~STATX_CHANGE_COOKIE; 768 769 error = vfs_statx(dfd, filename, flags, &stat, mask); 770 if (error) 771 return error; 772 773 return cp_statx(&stat, buffer); 774 } 775 776 int do_statx_fd(int fd, unsigned int flags, unsigned int mask, 777 struct statx __user *buffer) 778 { 779 struct kstat stat; 780 int error; 781 782 if (mask & STATX__RESERVED) 783 return -EINVAL; 784 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 785 return -EINVAL; 786 787 /* 788 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 789 * from userland. 790 */ 791 mask &= ~STATX_CHANGE_COOKIE; 792 793 error = vfs_statx_fd(fd, flags, &stat, mask); 794 if (error) 795 return error; 796 797 return cp_statx(&stat, buffer); 798 } 799 800 /** 801 * sys_statx - System call to get enhanced stats 802 * @dfd: Base directory to pathwalk from *or* fd to stat. 803 * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH 804 * @flags: AT_* flags to control pathwalk. 805 * @mask: Parts of statx struct actually required. 806 * @buffer: Result buffer. 807 * 808 * Note that fstat() can be emulated by setting dfd to the fd of interest, 809 * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH 810 * in the flags. 811 */ 812 SYSCALL_DEFINE5(statx, 813 int, dfd, const char __user *, filename, unsigned, flags, 814 unsigned int, mask, 815 struct statx __user *, buffer) 816 { 817 int ret; 818 struct filename *name = getname_maybe_null(filename, flags); 819 820 if (!name && dfd >= 0) 821 return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer); 822 823 ret = do_statx(dfd, name, flags, mask, buffer); 824 putname(name); 825 826 return ret; 827 } 828 829 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 830 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 831 { 832 struct compat_stat tmp; 833 834 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 835 return -EOVERFLOW; 836 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 837 return -EOVERFLOW; 838 839 memset(&tmp, 0, sizeof(tmp)); 840 tmp.st_dev = new_encode_dev(stat->dev); 841 tmp.st_ino = stat->ino; 842 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 843 return -EOVERFLOW; 844 tmp.st_mode = stat->mode; 845 tmp.st_nlink = stat->nlink; 846 if (tmp.st_nlink != stat->nlink) 847 return -EOVERFLOW; 848 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 849 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 850 tmp.st_rdev = new_encode_dev(stat->rdev); 851 if ((u64) stat->size > MAX_NON_LFS) 852 return -EOVERFLOW; 853 tmp.st_size = stat->size; 854 tmp.st_atime = stat->atime.tv_sec; 855 tmp.st_atime_nsec = stat->atime.tv_nsec; 856 tmp.st_mtime = stat->mtime.tv_sec; 857 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 858 tmp.st_ctime = stat->ctime.tv_sec; 859 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 860 tmp.st_blocks = stat->blocks; 861 tmp.st_blksize = stat->blksize; 862 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 863 } 864 865 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 866 struct compat_stat __user *, statbuf) 867 { 868 struct kstat stat; 869 int error; 870 871 error = vfs_stat(filename, &stat); 872 if (error) 873 return error; 874 return cp_compat_stat(&stat, statbuf); 875 } 876 877 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 878 struct compat_stat __user *, statbuf) 879 { 880 struct kstat stat; 881 int error; 882 883 error = vfs_lstat(filename, &stat); 884 if (error) 885 return error; 886 return cp_compat_stat(&stat, statbuf); 887 } 888 889 #ifndef __ARCH_WANT_STAT64 890 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 891 const char __user *, filename, 892 struct compat_stat __user *, statbuf, int, flag) 893 { 894 struct kstat stat; 895 int error; 896 897 error = vfs_fstatat(dfd, filename, &stat, flag); 898 if (error) 899 return error; 900 return cp_compat_stat(&stat, statbuf); 901 } 902 #endif 903 904 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 905 struct compat_stat __user *, statbuf) 906 { 907 struct kstat stat; 908 int error = vfs_fstat(fd, &stat); 909 910 if (!error) 911 error = cp_compat_stat(&stat, statbuf); 912 return error; 913 } 914 #endif 915 916 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 917 void __inode_add_bytes(struct inode *inode, loff_t bytes) 918 { 919 inode->i_blocks += bytes >> 9; 920 bytes &= 511; 921 inode->i_bytes += bytes; 922 if (inode->i_bytes >= 512) { 923 inode->i_blocks++; 924 inode->i_bytes -= 512; 925 } 926 } 927 EXPORT_SYMBOL(__inode_add_bytes); 928 929 void inode_add_bytes(struct inode *inode, loff_t bytes) 930 { 931 spin_lock(&inode->i_lock); 932 __inode_add_bytes(inode, bytes); 933 spin_unlock(&inode->i_lock); 934 } 935 936 EXPORT_SYMBOL(inode_add_bytes); 937 938 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 939 { 940 inode->i_blocks -= bytes >> 9; 941 bytes &= 511; 942 if (inode->i_bytes < bytes) { 943 inode->i_blocks--; 944 inode->i_bytes += 512; 945 } 946 inode->i_bytes -= bytes; 947 } 948 949 EXPORT_SYMBOL(__inode_sub_bytes); 950 951 void inode_sub_bytes(struct inode *inode, loff_t bytes) 952 { 953 spin_lock(&inode->i_lock); 954 __inode_sub_bytes(inode, bytes); 955 spin_unlock(&inode->i_lock); 956 } 957 958 EXPORT_SYMBOL(inode_sub_bytes); 959 960 loff_t inode_get_bytes(struct inode *inode) 961 { 962 loff_t ret; 963 964 spin_lock(&inode->i_lock); 965 ret = __inode_get_bytes(inode); 966 spin_unlock(&inode->i_lock); 967 return ret; 968 } 969 970 EXPORT_SYMBOL(inode_get_bytes); 971 972 void inode_set_bytes(struct inode *inode, loff_t bytes) 973 { 974 /* Caller is here responsible for sufficient locking 975 * (ie. inode->i_lock) */ 976 inode->i_blocks = bytes >> 9; 977 inode->i_bytes = bytes & 511; 978 } 979 980 EXPORT_SYMBOL(inode_set_bytes); 981