1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> 22 23 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 25 26 #include <trace/events/timestamp.h> 27 28 #include "internal.h" 29 #include "mount.h" 30 31 /** 32 * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED 33 * @stat: where to store the resulting values 34 * @request_mask: STATX_* values requested 35 * @inode: inode from which to grab the c/mtime 36 * 37 * Given @inode, grab the ctime and mtime out if it and store the result 38 * in @stat. When fetching the value, flag it as QUERIED (if not already) 39 * so the next write will record a distinct timestamp. 40 * 41 * NB: The QUERIED flag is tracked in the ctime, but we set it there even 42 * if only the mtime was requested, as that ensures that the next mtime 43 * change will be distinct. 44 */ 45 void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode) 46 { 47 atomic_t *pcn = (atomic_t *)&inode->i_ctime_nsec; 48 49 /* If neither time was requested, then don't report them */ 50 if (!(request_mask & (STATX_CTIME|STATX_MTIME))) { 51 stat->result_mask &= ~(STATX_CTIME|STATX_MTIME); 52 return; 53 } 54 55 stat->mtime = inode_get_mtime(inode); 56 stat->ctime.tv_sec = inode->i_ctime_sec; 57 stat->ctime.tv_nsec = (u32)atomic_read(pcn); 58 if (!(stat->ctime.tv_nsec & I_CTIME_QUERIED)) 59 stat->ctime.tv_nsec = ((u32)atomic_fetch_or(I_CTIME_QUERIED, pcn)); 60 stat->ctime.tv_nsec &= ~I_CTIME_QUERIED; 61 trace_fill_mg_cmtime(inode, &stat->ctime, &stat->mtime); 62 } 63 EXPORT_SYMBOL(fill_mg_cmtime); 64 65 /** 66 * generic_fillattr - Fill in the basic attributes from the inode struct 67 * @idmap: idmap of the mount the inode was found from 68 * @request_mask: statx request_mask 69 * @inode: Inode to use as the source 70 * @stat: Where to fill in the attributes 71 * 72 * Fill in the basic attributes in the kstat structure from data that's to be 73 * found on the VFS inode structure. This is the default if no getattr inode 74 * operation is supplied. 75 * 76 * If the inode has been found through an idmapped mount the idmap of 77 * the vfsmount must be passed through @idmap. This function will then 78 * take care to map the inode according to @idmap before filling in the 79 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 80 * performed on the raw inode simply pass @nop_mnt_idmap. 81 */ 82 void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 83 struct inode *inode, struct kstat *stat) 84 { 85 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 86 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 87 88 stat->dev = inode->i_sb->s_dev; 89 stat->ino = inode->i_ino; 90 stat->mode = inode->i_mode; 91 stat->nlink = inode->i_nlink; 92 stat->uid = vfsuid_into_kuid(vfsuid); 93 stat->gid = vfsgid_into_kgid(vfsgid); 94 stat->rdev = inode->i_rdev; 95 stat->size = i_size_read(inode); 96 stat->atime = inode_get_atime(inode); 97 98 if (is_mgtime(inode)) { 99 fill_mg_cmtime(stat, request_mask, inode); 100 } else { 101 stat->ctime = inode_get_ctime(inode); 102 stat->mtime = inode_get_mtime(inode); 103 } 104 105 stat->blksize = i_blocksize(inode); 106 stat->blocks = inode->i_blocks; 107 108 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 109 stat->result_mask |= STATX_CHANGE_COOKIE; 110 stat->change_cookie = inode_query_iversion(inode); 111 } 112 113 } 114 EXPORT_SYMBOL(generic_fillattr); 115 116 /** 117 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 118 * @inode: Inode to use as the source 119 * @stat: Where to fill in the attribute flags 120 * 121 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 122 * inode that are published on i_flags and enforced by the VFS. 123 */ 124 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 125 { 126 if (inode->i_flags & S_IMMUTABLE) 127 stat->attributes |= STATX_ATTR_IMMUTABLE; 128 if (inode->i_flags & S_APPEND) 129 stat->attributes |= STATX_ATTR_APPEND; 130 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 131 } 132 EXPORT_SYMBOL(generic_fill_statx_attr); 133 134 /** 135 * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes 136 * @stat: Where to fill in the attribute flags 137 * @unit_min: Minimum supported atomic write length in bytes 138 * @unit_max: Maximum supported atomic write length in bytes 139 * 140 * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from 141 * atomic write unit_min and unit_max values. 142 */ 143 void generic_fill_statx_atomic_writes(struct kstat *stat, 144 unsigned int unit_min, 145 unsigned int unit_max) 146 { 147 /* Confirm that the request type is known */ 148 stat->result_mask |= STATX_WRITE_ATOMIC; 149 150 /* Confirm that the file attribute type is known */ 151 stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC; 152 153 if (unit_min) { 154 stat->atomic_write_unit_min = unit_min; 155 stat->atomic_write_unit_max = unit_max; 156 /* Initially only allow 1x segment */ 157 stat->atomic_write_segments_max = 1; 158 159 /* Confirm atomic writes are actually supported */ 160 stat->attributes |= STATX_ATTR_WRITE_ATOMIC; 161 } 162 } 163 EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes); 164 165 /** 166 * vfs_getattr_nosec - getattr without security checks 167 * @path: file to get attributes from 168 * @stat: structure to return attributes in 169 * @request_mask: STATX_xxx flags indicating what the caller wants 170 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 171 * 172 * Get attributes without calling security_inode_getattr. 173 * 174 * Currently the only caller other than vfs_getattr is internal to the 175 * filehandle lookup code, which uses only the inode number and returns no 176 * attributes to any user. Any other code probably wants vfs_getattr. 177 */ 178 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 179 u32 request_mask, unsigned int query_flags) 180 { 181 struct mnt_idmap *idmap; 182 struct inode *inode = d_backing_inode(path->dentry); 183 184 memset(stat, 0, sizeof(*stat)); 185 stat->result_mask |= STATX_BASIC_STATS; 186 query_flags &= AT_STATX_SYNC_TYPE; 187 188 /* allow the fs to override these if it really wants to */ 189 /* SB_NOATIME means filesystem supplies dummy atime value */ 190 if (inode->i_sb->s_flags & SB_NOATIME) 191 stat->result_mask &= ~STATX_ATIME; 192 193 /* 194 * Note: If you add another clause to set an attribute flag, please 195 * update attributes_mask below. 196 */ 197 if (IS_AUTOMOUNT(inode)) 198 stat->attributes |= STATX_ATTR_AUTOMOUNT; 199 200 if (IS_DAX(inode)) 201 stat->attributes |= STATX_ATTR_DAX; 202 203 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 204 STATX_ATTR_DAX); 205 206 idmap = mnt_idmap(path->mnt); 207 if (inode->i_op->getattr) 208 return inode->i_op->getattr(idmap, path, stat, 209 request_mask, 210 query_flags | AT_GETATTR_NOSEC); 211 212 generic_fillattr(idmap, request_mask, inode, stat); 213 return 0; 214 } 215 EXPORT_SYMBOL(vfs_getattr_nosec); 216 217 /* 218 * vfs_getattr - Get the enhanced basic attributes of a file 219 * @path: The file of interest 220 * @stat: Where to return the statistics 221 * @request_mask: STATX_xxx flags indicating what the caller wants 222 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 223 * 224 * Ask the filesystem for a file's attributes. The caller must indicate in 225 * request_mask and query_flags to indicate what they want. 226 * 227 * If the file is remote, the filesystem can be forced to update the attributes 228 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 229 * suppress the update by passing AT_STATX_DONT_SYNC. 230 * 231 * Bits must have been set in request_mask to indicate which attributes the 232 * caller wants retrieving. Any such attribute not requested may be returned 233 * anyway, but the value may be approximate, and, if remote, may not have been 234 * synchronised with the server. 235 * 236 * 0 will be returned on success, and a -ve error code if unsuccessful. 237 */ 238 int vfs_getattr(const struct path *path, struct kstat *stat, 239 u32 request_mask, unsigned int query_flags) 240 { 241 int retval; 242 243 if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC)) 244 return -EPERM; 245 246 retval = security_inode_getattr(path); 247 if (retval) 248 return retval; 249 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 250 } 251 EXPORT_SYMBOL(vfs_getattr); 252 253 /** 254 * vfs_fstat - Get the basic attributes by file descriptor 255 * @fd: The file descriptor referring to the file of interest 256 * @stat: The result structure to fill in. 257 * 258 * This function is a wrapper around vfs_getattr(). The main difference is 259 * that it uses a file descriptor to determine the file location. 260 * 261 * 0 will be returned on success, and a -ve error code if unsuccessful. 262 */ 263 int vfs_fstat(int fd, struct kstat *stat) 264 { 265 struct fd f; 266 int error; 267 268 f = fdget_raw(fd); 269 if (!fd_file(f)) 270 return -EBADF; 271 error = vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0); 272 fdput(f); 273 return error; 274 } 275 276 int getname_statx_lookup_flags(int flags) 277 { 278 int lookup_flags = 0; 279 280 if (!(flags & AT_SYMLINK_NOFOLLOW)) 281 lookup_flags |= LOOKUP_FOLLOW; 282 if (!(flags & AT_NO_AUTOMOUNT)) 283 lookup_flags |= LOOKUP_AUTOMOUNT; 284 if (flags & AT_EMPTY_PATH) 285 lookup_flags |= LOOKUP_EMPTY; 286 287 return lookup_flags; 288 } 289 290 static int vfs_statx_path(struct path *path, int flags, struct kstat *stat, 291 u32 request_mask) 292 { 293 int error = vfs_getattr(path, stat, request_mask, flags); 294 295 if (request_mask & STATX_MNT_ID_UNIQUE) { 296 stat->mnt_id = real_mount(path->mnt)->mnt_id_unique; 297 stat->result_mask |= STATX_MNT_ID_UNIQUE; 298 } else { 299 stat->mnt_id = real_mount(path->mnt)->mnt_id; 300 stat->result_mask |= STATX_MNT_ID; 301 } 302 303 if (path_mounted(path)) 304 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 305 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 306 307 /* 308 * If this is a block device inode, override the filesystem 309 * attributes with the block device specific parameters that need to be 310 * obtained from the bdev backing inode. 311 */ 312 if (S_ISBLK(stat->mode)) 313 bdev_statx(path, stat, request_mask); 314 315 return error; 316 } 317 318 static int vfs_statx_fd(int fd, int flags, struct kstat *stat, 319 u32 request_mask) 320 { 321 CLASS(fd_raw, f)(fd); 322 if (!fd_file(f)) 323 return -EBADF; 324 return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask); 325 } 326 327 /** 328 * vfs_statx - Get basic and extra attributes by filename 329 * @dfd: A file descriptor representing the base dir for a relative filename 330 * @filename: The name of the file of interest 331 * @flags: Flags to control the query 332 * @stat: The result structure to fill in. 333 * @request_mask: STATX_xxx flags indicating what the caller wants 334 * 335 * This function is a wrapper around vfs_getattr(). The main difference is 336 * that it uses a filename and base directory to determine the file location. 337 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 338 * at the given name from being referenced. 339 * 340 * 0 will be returned on success, and a -ve error code if unsuccessful. 341 */ 342 static int vfs_statx(int dfd, struct filename *filename, int flags, 343 struct kstat *stat, u32 request_mask) 344 { 345 struct path path; 346 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 347 int error; 348 349 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 350 AT_STATX_SYNC_TYPE)) 351 return -EINVAL; 352 353 retry: 354 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 355 if (error) 356 return error; 357 error = vfs_statx_path(&path, flags, stat, request_mask); 358 path_put(&path); 359 if (retry_estale(error, lookup_flags)) { 360 lookup_flags |= LOOKUP_REVAL; 361 goto retry; 362 } 363 return error; 364 } 365 366 int vfs_fstatat(int dfd, const char __user *filename, 367 struct kstat *stat, int flags) 368 { 369 int ret; 370 int statx_flags = flags | AT_NO_AUTOMOUNT; 371 struct filename *name; 372 373 /* 374 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH) 375 * 376 * If AT_EMPTY_PATH is set, we expect the common case to be that 377 * empty path, and avoid doing all the extra pathname work. 378 */ 379 if (flags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename)) 380 return vfs_fstat(dfd, stat); 381 382 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags)); 383 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 384 putname(name); 385 386 return ret; 387 } 388 389 #ifdef __ARCH_WANT_OLD_STAT 390 391 /* 392 * For backward compatibility? Maybe this should be moved 393 * into arch/i386 instead? 394 */ 395 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 396 { 397 static int warncount = 5; 398 struct __old_kernel_stat tmp; 399 400 if (warncount > 0) { 401 warncount--; 402 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 403 current->comm); 404 } else if (warncount < 0) { 405 /* it's laughable, but... */ 406 warncount = 0; 407 } 408 409 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 410 tmp.st_dev = old_encode_dev(stat->dev); 411 tmp.st_ino = stat->ino; 412 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 413 return -EOVERFLOW; 414 tmp.st_mode = stat->mode; 415 tmp.st_nlink = stat->nlink; 416 if (tmp.st_nlink != stat->nlink) 417 return -EOVERFLOW; 418 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 419 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 420 tmp.st_rdev = old_encode_dev(stat->rdev); 421 #if BITS_PER_LONG == 32 422 if (stat->size > MAX_NON_LFS) 423 return -EOVERFLOW; 424 #endif 425 tmp.st_size = stat->size; 426 tmp.st_atime = stat->atime.tv_sec; 427 tmp.st_mtime = stat->mtime.tv_sec; 428 tmp.st_ctime = stat->ctime.tv_sec; 429 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 430 } 431 432 SYSCALL_DEFINE2(stat, const char __user *, filename, 433 struct __old_kernel_stat __user *, statbuf) 434 { 435 struct kstat stat; 436 int error; 437 438 error = vfs_stat(filename, &stat); 439 if (error) 440 return error; 441 442 return cp_old_stat(&stat, statbuf); 443 } 444 445 SYSCALL_DEFINE2(lstat, const char __user *, filename, 446 struct __old_kernel_stat __user *, statbuf) 447 { 448 struct kstat stat; 449 int error; 450 451 error = vfs_lstat(filename, &stat); 452 if (error) 453 return error; 454 455 return cp_old_stat(&stat, statbuf); 456 } 457 458 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 459 { 460 struct kstat stat; 461 int error = vfs_fstat(fd, &stat); 462 463 if (!error) 464 error = cp_old_stat(&stat, statbuf); 465 466 return error; 467 } 468 469 #endif /* __ARCH_WANT_OLD_STAT */ 470 471 #ifdef __ARCH_WANT_NEW_STAT 472 473 #ifndef INIT_STRUCT_STAT_PADDING 474 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 475 #endif 476 477 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 478 { 479 struct stat tmp; 480 481 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 482 return -EOVERFLOW; 483 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 484 return -EOVERFLOW; 485 #if BITS_PER_LONG == 32 486 if (stat->size > MAX_NON_LFS) 487 return -EOVERFLOW; 488 #endif 489 490 INIT_STRUCT_STAT_PADDING(tmp); 491 tmp.st_dev = new_encode_dev(stat->dev); 492 tmp.st_ino = stat->ino; 493 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 494 return -EOVERFLOW; 495 tmp.st_mode = stat->mode; 496 tmp.st_nlink = stat->nlink; 497 if (tmp.st_nlink != stat->nlink) 498 return -EOVERFLOW; 499 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 500 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 501 tmp.st_rdev = new_encode_dev(stat->rdev); 502 tmp.st_size = stat->size; 503 tmp.st_atime = stat->atime.tv_sec; 504 tmp.st_mtime = stat->mtime.tv_sec; 505 tmp.st_ctime = stat->ctime.tv_sec; 506 #ifdef STAT_HAVE_NSEC 507 tmp.st_atime_nsec = stat->atime.tv_nsec; 508 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 509 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 510 #endif 511 tmp.st_blocks = stat->blocks; 512 tmp.st_blksize = stat->blksize; 513 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 514 } 515 516 SYSCALL_DEFINE2(newstat, const char __user *, filename, 517 struct stat __user *, statbuf) 518 { 519 struct kstat stat; 520 int error = vfs_stat(filename, &stat); 521 522 if (error) 523 return error; 524 return cp_new_stat(&stat, statbuf); 525 } 526 527 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 528 struct stat __user *, statbuf) 529 { 530 struct kstat stat; 531 int error; 532 533 error = vfs_lstat(filename, &stat); 534 if (error) 535 return error; 536 537 return cp_new_stat(&stat, statbuf); 538 } 539 540 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 541 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 542 struct stat __user *, statbuf, int, flag) 543 { 544 struct kstat stat; 545 int error; 546 547 error = vfs_fstatat(dfd, filename, &stat, flag); 548 if (error) 549 return error; 550 return cp_new_stat(&stat, statbuf); 551 } 552 #endif 553 554 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 555 { 556 struct kstat stat; 557 int error = vfs_fstat(fd, &stat); 558 559 if (!error) 560 error = cp_new_stat(&stat, statbuf); 561 562 return error; 563 } 564 #endif 565 566 static int do_readlinkat(int dfd, const char __user *pathname, 567 char __user *buf, int bufsiz) 568 { 569 struct path path; 570 struct filename *name; 571 int error; 572 unsigned int lookup_flags = LOOKUP_EMPTY; 573 574 if (bufsiz <= 0) 575 return -EINVAL; 576 577 retry: 578 name = getname_flags(pathname, lookup_flags); 579 error = filename_lookup(dfd, name, lookup_flags, &path, NULL); 580 if (unlikely(error)) { 581 putname(name); 582 return error; 583 } 584 585 /* 586 * AFS mountpoints allow readlink(2) but are not symlinks 587 */ 588 if (d_is_symlink(path.dentry) || 589 d_backing_inode(path.dentry)->i_op->readlink) { 590 error = security_inode_readlink(path.dentry); 591 if (!error) { 592 touch_atime(&path); 593 error = vfs_readlink(path.dentry, buf, bufsiz); 594 } 595 } else { 596 error = (name->name[0] == '\0') ? -ENOENT : -EINVAL; 597 } 598 path_put(&path); 599 putname(name); 600 if (retry_estale(error, lookup_flags)) { 601 lookup_flags |= LOOKUP_REVAL; 602 goto retry; 603 } 604 return error; 605 } 606 607 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 608 char __user *, buf, int, bufsiz) 609 { 610 return do_readlinkat(dfd, pathname, buf, bufsiz); 611 } 612 613 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 614 int, bufsiz) 615 { 616 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 617 } 618 619 620 /* ---------- LFS-64 ----------- */ 621 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 622 623 #ifndef INIT_STRUCT_STAT64_PADDING 624 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 625 #endif 626 627 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 628 { 629 struct stat64 tmp; 630 631 INIT_STRUCT_STAT64_PADDING(tmp); 632 #ifdef CONFIG_MIPS 633 /* mips has weird padding, so we don't get 64 bits there */ 634 tmp.st_dev = new_encode_dev(stat->dev); 635 tmp.st_rdev = new_encode_dev(stat->rdev); 636 #else 637 tmp.st_dev = huge_encode_dev(stat->dev); 638 tmp.st_rdev = huge_encode_dev(stat->rdev); 639 #endif 640 tmp.st_ino = stat->ino; 641 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 642 return -EOVERFLOW; 643 #ifdef STAT64_HAS_BROKEN_ST_INO 644 tmp.__st_ino = stat->ino; 645 #endif 646 tmp.st_mode = stat->mode; 647 tmp.st_nlink = stat->nlink; 648 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 649 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 650 tmp.st_atime = stat->atime.tv_sec; 651 tmp.st_atime_nsec = stat->atime.tv_nsec; 652 tmp.st_mtime = stat->mtime.tv_sec; 653 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 654 tmp.st_ctime = stat->ctime.tv_sec; 655 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 656 tmp.st_size = stat->size; 657 tmp.st_blocks = stat->blocks; 658 tmp.st_blksize = stat->blksize; 659 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 660 } 661 662 SYSCALL_DEFINE2(stat64, const char __user *, filename, 663 struct stat64 __user *, statbuf) 664 { 665 struct kstat stat; 666 int error = vfs_stat(filename, &stat); 667 668 if (!error) 669 error = cp_new_stat64(&stat, statbuf); 670 671 return error; 672 } 673 674 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 675 struct stat64 __user *, statbuf) 676 { 677 struct kstat stat; 678 int error = vfs_lstat(filename, &stat); 679 680 if (!error) 681 error = cp_new_stat64(&stat, statbuf); 682 683 return error; 684 } 685 686 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 687 { 688 struct kstat stat; 689 int error = vfs_fstat(fd, &stat); 690 691 if (!error) 692 error = cp_new_stat64(&stat, statbuf); 693 694 return error; 695 } 696 697 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 698 struct stat64 __user *, statbuf, int, flag) 699 { 700 struct kstat stat; 701 int error; 702 703 error = vfs_fstatat(dfd, filename, &stat, flag); 704 if (error) 705 return error; 706 return cp_new_stat64(&stat, statbuf); 707 } 708 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 709 710 static noinline_for_stack int 711 cp_statx(const struct kstat *stat, struct statx __user *buffer) 712 { 713 struct statx tmp; 714 715 memset(&tmp, 0, sizeof(tmp)); 716 717 /* STATX_CHANGE_COOKIE is kernel-only for now */ 718 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 719 tmp.stx_blksize = stat->blksize; 720 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 721 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 722 tmp.stx_nlink = stat->nlink; 723 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 724 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 725 tmp.stx_mode = stat->mode; 726 tmp.stx_ino = stat->ino; 727 tmp.stx_size = stat->size; 728 tmp.stx_blocks = stat->blocks; 729 tmp.stx_attributes_mask = stat->attributes_mask; 730 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 731 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 732 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 733 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 734 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 735 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 736 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 737 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 738 tmp.stx_rdev_major = MAJOR(stat->rdev); 739 tmp.stx_rdev_minor = MINOR(stat->rdev); 740 tmp.stx_dev_major = MAJOR(stat->dev); 741 tmp.stx_dev_minor = MINOR(stat->dev); 742 tmp.stx_mnt_id = stat->mnt_id; 743 tmp.stx_dio_mem_align = stat->dio_mem_align; 744 tmp.stx_dio_offset_align = stat->dio_offset_align; 745 tmp.stx_subvol = stat->subvol; 746 tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; 747 tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; 748 tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max; 749 750 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 751 } 752 753 int do_statx(int dfd, struct filename *filename, unsigned int flags, 754 unsigned int mask, struct statx __user *buffer) 755 { 756 struct kstat stat; 757 int error; 758 759 if (mask & STATX__RESERVED) 760 return -EINVAL; 761 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 762 return -EINVAL; 763 764 /* 765 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 766 * from userland. 767 */ 768 mask &= ~STATX_CHANGE_COOKIE; 769 770 error = vfs_statx(dfd, filename, flags, &stat, mask); 771 if (error) 772 return error; 773 774 return cp_statx(&stat, buffer); 775 } 776 777 int do_statx_fd(int fd, unsigned int flags, unsigned int mask, 778 struct statx __user *buffer) 779 { 780 struct kstat stat; 781 int error; 782 783 if (mask & STATX__RESERVED) 784 return -EINVAL; 785 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 786 return -EINVAL; 787 788 /* 789 * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 790 * from userland. 791 */ 792 mask &= ~STATX_CHANGE_COOKIE; 793 794 error = vfs_statx_fd(fd, flags, &stat, mask); 795 if (error) 796 return error; 797 798 return cp_statx(&stat, buffer); 799 } 800 801 /** 802 * sys_statx - System call to get enhanced stats 803 * @dfd: Base directory to pathwalk from *or* fd to stat. 804 * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH 805 * @flags: AT_* flags to control pathwalk. 806 * @mask: Parts of statx struct actually required. 807 * @buffer: Result buffer. 808 * 809 * Note that fstat() can be emulated by setting dfd to the fd of interest, 810 * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH 811 * in the flags. 812 */ 813 SYSCALL_DEFINE5(statx, 814 int, dfd, const char __user *, filename, unsigned, flags, 815 unsigned int, mask, 816 struct statx __user *, buffer) 817 { 818 int ret; 819 unsigned lflags; 820 struct filename *name; 821 822 /* 823 * Short-circuit handling of NULL and "" paths. 824 * 825 * For a NULL path we require and accept only the AT_EMPTY_PATH flag 826 * (possibly |'d with AT_STATX flags). 827 * 828 * However, glibc on 32-bit architectures implements fstatat as statx 829 * with the "" pathname and AT_NO_AUTOMOUNT | AT_EMPTY_PATH flags. 830 * Supporting this results in the uglification below. 831 */ 832 lflags = flags & ~(AT_NO_AUTOMOUNT | AT_STATX_SYNC_TYPE); 833 if (lflags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename)) 834 return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer); 835 836 name = getname_flags(filename, getname_statx_lookup_flags(flags)); 837 ret = do_statx(dfd, name, flags, mask, buffer); 838 putname(name); 839 840 return ret; 841 } 842 843 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 844 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 845 { 846 struct compat_stat tmp; 847 848 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 849 return -EOVERFLOW; 850 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 851 return -EOVERFLOW; 852 853 memset(&tmp, 0, sizeof(tmp)); 854 tmp.st_dev = new_encode_dev(stat->dev); 855 tmp.st_ino = stat->ino; 856 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 857 return -EOVERFLOW; 858 tmp.st_mode = stat->mode; 859 tmp.st_nlink = stat->nlink; 860 if (tmp.st_nlink != stat->nlink) 861 return -EOVERFLOW; 862 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 863 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 864 tmp.st_rdev = new_encode_dev(stat->rdev); 865 if ((u64) stat->size > MAX_NON_LFS) 866 return -EOVERFLOW; 867 tmp.st_size = stat->size; 868 tmp.st_atime = stat->atime.tv_sec; 869 tmp.st_atime_nsec = stat->atime.tv_nsec; 870 tmp.st_mtime = stat->mtime.tv_sec; 871 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 872 tmp.st_ctime = stat->ctime.tv_sec; 873 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 874 tmp.st_blocks = stat->blocks; 875 tmp.st_blksize = stat->blksize; 876 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 877 } 878 879 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 880 struct compat_stat __user *, statbuf) 881 { 882 struct kstat stat; 883 int error; 884 885 error = vfs_stat(filename, &stat); 886 if (error) 887 return error; 888 return cp_compat_stat(&stat, statbuf); 889 } 890 891 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 892 struct compat_stat __user *, statbuf) 893 { 894 struct kstat stat; 895 int error; 896 897 error = vfs_lstat(filename, &stat); 898 if (error) 899 return error; 900 return cp_compat_stat(&stat, statbuf); 901 } 902 903 #ifndef __ARCH_WANT_STAT64 904 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 905 const char __user *, filename, 906 struct compat_stat __user *, statbuf, int, flag) 907 { 908 struct kstat stat; 909 int error; 910 911 error = vfs_fstatat(dfd, filename, &stat, flag); 912 if (error) 913 return error; 914 return cp_compat_stat(&stat, statbuf); 915 } 916 #endif 917 918 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 919 struct compat_stat __user *, statbuf) 920 { 921 struct kstat stat; 922 int error = vfs_fstat(fd, &stat); 923 924 if (!error) 925 error = cp_compat_stat(&stat, statbuf); 926 return error; 927 } 928 #endif 929 930 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 931 void __inode_add_bytes(struct inode *inode, loff_t bytes) 932 { 933 inode->i_blocks += bytes >> 9; 934 bytes &= 511; 935 inode->i_bytes += bytes; 936 if (inode->i_bytes >= 512) { 937 inode->i_blocks++; 938 inode->i_bytes -= 512; 939 } 940 } 941 EXPORT_SYMBOL(__inode_add_bytes); 942 943 void inode_add_bytes(struct inode *inode, loff_t bytes) 944 { 945 spin_lock(&inode->i_lock); 946 __inode_add_bytes(inode, bytes); 947 spin_unlock(&inode->i_lock); 948 } 949 950 EXPORT_SYMBOL(inode_add_bytes); 951 952 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 953 { 954 inode->i_blocks -= bytes >> 9; 955 bytes &= 511; 956 if (inode->i_bytes < bytes) { 957 inode->i_blocks--; 958 inode->i_bytes += 512; 959 } 960 inode->i_bytes -= bytes; 961 } 962 963 EXPORT_SYMBOL(__inode_sub_bytes); 964 965 void inode_sub_bytes(struct inode *inode, loff_t bytes) 966 { 967 spin_lock(&inode->i_lock); 968 __inode_sub_bytes(inode, bytes); 969 spin_unlock(&inode->i_lock); 970 } 971 972 EXPORT_SYMBOL(inode_sub_bytes); 973 974 loff_t inode_get_bytes(struct inode *inode) 975 { 976 loff_t ret; 977 978 spin_lock(&inode->i_lock); 979 ret = __inode_get_bytes(inode); 980 spin_unlock(&inode->i_lock); 981 return ret; 982 } 983 984 EXPORT_SYMBOL(inode_get_bytes); 985 986 void inode_set_bytes(struct inode *inode, loff_t bytes) 987 { 988 /* Caller is here responsible for sufficient locking 989 * (ie. inode->i_lock) */ 990 inode->i_blocks = bytes >> 9; 991 inode->i_bytes = bytes & 511; 992 } 993 994 EXPORT_SYMBOL(inode_set_bytes); 995