1 /* 2 * linux/fs/namei.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * Some corrections by tytso. 9 */ 10 11 /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname 12 * lookup logic. 13 */ 14 /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. 15 */ 16 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/fs.h> 21 #include <linux/namei.h> 22 #include <linux/pagemap.h> 23 #include <linux/fsnotify.h> 24 #include <linux/personality.h> 25 #include <linux/security.h> 26 #include <linux/ima.h> 27 #include <linux/syscalls.h> 28 #include <linux/mount.h> 29 #include <linux/audit.h> 30 #include <linux/capability.h> 31 #include <linux/file.h> 32 #include <linux/fcntl.h> 33 #include <linux/device_cgroup.h> 34 #include <linux/fs_struct.h> 35 #include <linux/posix_acl.h> 36 #include <asm/uaccess.h> 37 38 #include "internal.h" 39 #include "mount.h" 40 41 /* [Feb-1997 T. Schoebel-Theuer] 42 * Fundamental changes in the pathname lookup mechanisms (namei) 43 * were necessary because of omirr. The reason is that omirr needs 44 * to know the _real_ pathname, not the user-supplied one, in case 45 * of symlinks (and also when transname replacements occur). 46 * 47 * The new code replaces the old recursive symlink resolution with 48 * an iterative one (in case of non-nested symlink chains). It does 49 * this with calls to <fs>_follow_link(). 50 * As a side effect, dir_namei(), _namei() and follow_link() are now 51 * replaced with a single function lookup_dentry() that can handle all 52 * the special cases of the former code. 53 * 54 * With the new dcache, the pathname is stored at each inode, at least as 55 * long as the refcount of the inode is positive. As a side effect, the 56 * size of the dcache depends on the inode cache and thus is dynamic. 57 * 58 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink 59 * resolution to correspond with current state of the code. 60 * 61 * Note that the symlink resolution is not *completely* iterative. 62 * There is still a significant amount of tail- and mid- recursion in 63 * the algorithm. Also, note that <fs>_readlink() is not used in 64 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() 65 * may return different results than <fs>_follow_link(). Many virtual 66 * filesystems (including /proc) exhibit this behavior. 67 */ 68 69 /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: 70 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL 71 * and the name already exists in form of a symlink, try to create the new 72 * name indicated by the symlink. The old code always complained that the 73 * name already exists, due to not following the symlink even if its target 74 * is nonexistent. The new semantics affects also mknod() and link() when 75 * the name is a symlink pointing to a non-existent name. 76 * 77 * I don't know which semantics is the right one, since I have no access 78 * to standards. But I found by trial that HP-UX 9.0 has the full "new" 79 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the 80 * "old" one. Personally, I think the new semantics is much more logical. 81 * Note that "ln old new" where "new" is a symlink pointing to a non-existing 82 * file does succeed in both HP-UX and SunOs, but not in Solaris 83 * and in the old Linux semantics. 84 */ 85 86 /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink 87 * semantics. See the comments in "open_namei" and "do_link" below. 88 * 89 * [10-Sep-98 Alan Modra] Another symlink change. 90 */ 91 92 /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: 93 * inside the path - always follow. 94 * in the last component in creation/removal/renaming - never follow. 95 * if LOOKUP_FOLLOW passed - follow. 96 * if the pathname has trailing slashes - follow. 97 * otherwise - don't follow. 98 * (applied in that order). 99 * 100 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT 101 * restored for 2.4. This is the last surviving part of old 4.2BSD bug. 102 * During the 2.4 we need to fix the userland stuff depending on it - 103 * hopefully we will be able to get rid of that wart in 2.5. So far only 104 * XEmacs seems to be relying on it... 105 */ 106 /* 107 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) 108 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives 109 * any extra contention... 110 */ 111 112 /* In order to reduce some races, while at the same time doing additional 113 * checking and hopefully speeding things up, we copy filenames to the 114 * kernel data space before using them.. 115 * 116 * POSIX.1 2.4: an empty pathname is invalid (ENOENT). 117 * PATH_MAX includes the nul terminator --RR. 118 */ 119 static int do_getname(const char __user *filename, char *page) 120 { 121 int retval; 122 unsigned long len = PATH_MAX; 123 124 if (!segment_eq(get_fs(), KERNEL_DS)) { 125 if ((unsigned long) filename >= TASK_SIZE) 126 return -EFAULT; 127 if (TASK_SIZE - (unsigned long) filename < PATH_MAX) 128 len = TASK_SIZE - (unsigned long) filename; 129 } 130 131 retval = strncpy_from_user(page, filename, len); 132 if (retval > 0) { 133 if (retval < len) 134 return 0; 135 return -ENAMETOOLONG; 136 } else if (!retval) 137 retval = -ENOENT; 138 return retval; 139 } 140 141 static char *getname_flags(const char __user *filename, int flags, int *empty) 142 { 143 char *result = __getname(); 144 int retval; 145 146 if (!result) 147 return ERR_PTR(-ENOMEM); 148 149 retval = do_getname(filename, result); 150 if (retval < 0) { 151 if (retval == -ENOENT && empty) 152 *empty = 1; 153 if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) { 154 __putname(result); 155 return ERR_PTR(retval); 156 } 157 } 158 audit_getname(result); 159 return result; 160 } 161 162 char *getname(const char __user * filename) 163 { 164 return getname_flags(filename, 0, NULL); 165 } 166 167 #ifdef CONFIG_AUDITSYSCALL 168 void putname(const char *name) 169 { 170 if (unlikely(!audit_dummy_context())) 171 audit_putname(name); 172 else 173 __putname(name); 174 } 175 EXPORT_SYMBOL(putname); 176 #endif 177 178 static int check_acl(struct inode *inode, int mask) 179 { 180 #ifdef CONFIG_FS_POSIX_ACL 181 struct posix_acl *acl; 182 183 if (mask & MAY_NOT_BLOCK) { 184 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); 185 if (!acl) 186 return -EAGAIN; 187 /* no ->get_acl() calls in RCU mode... */ 188 if (acl == ACL_NOT_CACHED) 189 return -ECHILD; 190 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); 191 } 192 193 acl = get_cached_acl(inode, ACL_TYPE_ACCESS); 194 195 /* 196 * A filesystem can force a ACL callback by just never filling the 197 * ACL cache. But normally you'd fill the cache either at inode 198 * instantiation time, or on the first ->get_acl call. 199 * 200 * If the filesystem doesn't have a get_acl() function at all, we'll 201 * just create the negative cache entry. 202 */ 203 if (acl == ACL_NOT_CACHED) { 204 if (inode->i_op->get_acl) { 205 acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS); 206 if (IS_ERR(acl)) 207 return PTR_ERR(acl); 208 } else { 209 set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); 210 return -EAGAIN; 211 } 212 } 213 214 if (acl) { 215 int error = posix_acl_permission(inode, acl, mask); 216 posix_acl_release(acl); 217 return error; 218 } 219 #endif 220 221 return -EAGAIN; 222 } 223 224 /* 225 * This does the basic permission checking 226 */ 227 static int acl_permission_check(struct inode *inode, int mask) 228 { 229 unsigned int mode = inode->i_mode; 230 231 if (current_user_ns() != inode_userns(inode)) 232 goto other_perms; 233 234 if (likely(current_fsuid() == inode->i_uid)) 235 mode >>= 6; 236 else { 237 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { 238 int error = check_acl(inode, mask); 239 if (error != -EAGAIN) 240 return error; 241 } 242 243 if (in_group_p(inode->i_gid)) 244 mode >>= 3; 245 } 246 247 other_perms: 248 /* 249 * If the DACs are ok we don't need any capability check. 250 */ 251 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) 252 return 0; 253 return -EACCES; 254 } 255 256 /** 257 * generic_permission - check for access rights on a Posix-like filesystem 258 * @inode: inode to check access rights for 259 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) 260 * 261 * Used to check for read/write/execute permissions on a file. 262 * We use "fsuid" for this, letting us set arbitrary permissions 263 * for filesystem access without changing the "normal" uids which 264 * are used for other things. 265 * 266 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk 267 * request cannot be satisfied (eg. requires blocking or too much complexity). 268 * It would then be called again in ref-walk mode. 269 */ 270 int generic_permission(struct inode *inode, int mask) 271 { 272 int ret; 273 274 /* 275 * Do the basic permission checks. 276 */ 277 ret = acl_permission_check(inode, mask); 278 if (ret != -EACCES) 279 return ret; 280 281 if (S_ISDIR(inode->i_mode)) { 282 /* DACs are overridable for directories */ 283 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 284 return 0; 285 if (!(mask & MAY_WRITE)) 286 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) 287 return 0; 288 return -EACCES; 289 } 290 /* 291 * Read/write DACs are always overridable. 292 * Executable DACs are overridable when there is 293 * at least one exec bit set. 294 */ 295 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) 296 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 297 return 0; 298 299 /* 300 * Searching includes executable on directories, else just read. 301 */ 302 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 303 if (mask == MAY_READ) 304 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) 305 return 0; 306 307 return -EACCES; 308 } 309 310 /* 311 * We _really_ want to just do "generic_permission()" without 312 * even looking at the inode->i_op values. So we keep a cache 313 * flag in inode->i_opflags, that says "this has not special 314 * permission function, use the fast case". 315 */ 316 static inline int do_inode_permission(struct inode *inode, int mask) 317 { 318 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { 319 if (likely(inode->i_op->permission)) 320 return inode->i_op->permission(inode, mask); 321 322 /* This gets set once for the inode lifetime */ 323 spin_lock(&inode->i_lock); 324 inode->i_opflags |= IOP_FASTPERM; 325 spin_unlock(&inode->i_lock); 326 } 327 return generic_permission(inode, mask); 328 } 329 330 /** 331 * inode_permission - check for access rights to a given inode 332 * @inode: inode to check permission on 333 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) 334 * 335 * Used to check for read/write/execute permissions on an inode. 336 * We use "fsuid" for this, letting us set arbitrary permissions 337 * for filesystem access without changing the "normal" uids which 338 * are used for other things. 339 * 340 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. 341 */ 342 int inode_permission(struct inode *inode, int mask) 343 { 344 int retval; 345 346 if (unlikely(mask & MAY_WRITE)) { 347 umode_t mode = inode->i_mode; 348 349 /* 350 * Nobody gets write access to a read-only fs. 351 */ 352 if (IS_RDONLY(inode) && 353 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) 354 return -EROFS; 355 356 /* 357 * Nobody gets write access to an immutable file. 358 */ 359 if (IS_IMMUTABLE(inode)) 360 return -EACCES; 361 } 362 363 retval = do_inode_permission(inode, mask); 364 if (retval) 365 return retval; 366 367 retval = devcgroup_inode_permission(inode, mask); 368 if (retval) 369 return retval; 370 371 return security_inode_permission(inode, mask); 372 } 373 374 /** 375 * path_get - get a reference to a path 376 * @path: path to get the reference to 377 * 378 * Given a path increment the reference count to the dentry and the vfsmount. 379 */ 380 void path_get(struct path *path) 381 { 382 mntget(path->mnt); 383 dget(path->dentry); 384 } 385 EXPORT_SYMBOL(path_get); 386 387 /** 388 * path_put - put a reference to a path 389 * @path: path to put the reference to 390 * 391 * Given a path decrement the reference count to the dentry and the vfsmount. 392 */ 393 void path_put(struct path *path) 394 { 395 dput(path->dentry); 396 mntput(path->mnt); 397 } 398 EXPORT_SYMBOL(path_put); 399 400 /* 401 * Path walking has 2 modes, rcu-walk and ref-walk (see 402 * Documentation/filesystems/path-lookup.txt). In situations when we can't 403 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab 404 * normal reference counts on dentries and vfsmounts to transition to rcu-walk 405 * mode. Refcounts are grabbed at the last known good point before rcu-walk 406 * got stuck, so ref-walk may continue from there. If this is not successful 407 * (eg. a seqcount has changed), then failure is returned and it's up to caller 408 * to restart the path walk from the beginning in ref-walk mode. 409 */ 410 411 /** 412 * unlazy_walk - try to switch to ref-walk mode. 413 * @nd: nameidata pathwalk data 414 * @dentry: child of nd->path.dentry or NULL 415 * Returns: 0 on success, -ECHILD on failure 416 * 417 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry 418 * for ref-walk mode. @dentry must be a path found by a do_lookup call on 419 * @nd or NULL. Must be called from rcu-walk context. 420 */ 421 static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) 422 { 423 struct fs_struct *fs = current->fs; 424 struct dentry *parent = nd->path.dentry; 425 int want_root = 0; 426 427 BUG_ON(!(nd->flags & LOOKUP_RCU)); 428 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { 429 want_root = 1; 430 spin_lock(&fs->lock); 431 if (nd->root.mnt != fs->root.mnt || 432 nd->root.dentry != fs->root.dentry) 433 goto err_root; 434 } 435 spin_lock(&parent->d_lock); 436 if (!dentry) { 437 if (!__d_rcu_to_refcount(parent, nd->seq)) 438 goto err_parent; 439 BUG_ON(nd->inode != parent->d_inode); 440 } else { 441 if (dentry->d_parent != parent) 442 goto err_parent; 443 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 444 if (!__d_rcu_to_refcount(dentry, nd->seq)) 445 goto err_child; 446 /* 447 * If the sequence check on the child dentry passed, then 448 * the child has not been removed from its parent. This 449 * means the parent dentry must be valid and able to take 450 * a reference at this point. 451 */ 452 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); 453 BUG_ON(!parent->d_count); 454 parent->d_count++; 455 spin_unlock(&dentry->d_lock); 456 } 457 spin_unlock(&parent->d_lock); 458 if (want_root) { 459 path_get(&nd->root); 460 spin_unlock(&fs->lock); 461 } 462 mntget(nd->path.mnt); 463 464 rcu_read_unlock(); 465 br_read_unlock(vfsmount_lock); 466 nd->flags &= ~LOOKUP_RCU; 467 return 0; 468 469 err_child: 470 spin_unlock(&dentry->d_lock); 471 err_parent: 472 spin_unlock(&parent->d_lock); 473 err_root: 474 if (want_root) 475 spin_unlock(&fs->lock); 476 return -ECHILD; 477 } 478 479 /** 480 * release_open_intent - free up open intent resources 481 * @nd: pointer to nameidata 482 */ 483 void release_open_intent(struct nameidata *nd) 484 { 485 struct file *file = nd->intent.open.file; 486 487 if (file && !IS_ERR(file)) { 488 if (file->f_path.dentry == NULL) 489 put_filp(file); 490 else 491 fput(file); 492 } 493 } 494 495 static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) 496 { 497 return dentry->d_op->d_revalidate(dentry, nd); 498 } 499 500 /** 501 * complete_walk - successful completion of path walk 502 * @nd: pointer nameidata 503 * 504 * If we had been in RCU mode, drop out of it and legitimize nd->path. 505 * Revalidate the final result, unless we'd already done that during 506 * the path walk or the filesystem doesn't ask for it. Return 0 on 507 * success, -error on failure. In case of failure caller does not 508 * need to drop nd->path. 509 */ 510 static int complete_walk(struct nameidata *nd) 511 { 512 struct dentry *dentry = nd->path.dentry; 513 int status; 514 515 if (nd->flags & LOOKUP_RCU) { 516 nd->flags &= ~LOOKUP_RCU; 517 if (!(nd->flags & LOOKUP_ROOT)) 518 nd->root.mnt = NULL; 519 spin_lock(&dentry->d_lock); 520 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { 521 spin_unlock(&dentry->d_lock); 522 rcu_read_unlock(); 523 br_read_unlock(vfsmount_lock); 524 return -ECHILD; 525 } 526 BUG_ON(nd->inode != dentry->d_inode); 527 spin_unlock(&dentry->d_lock); 528 mntget(nd->path.mnt); 529 rcu_read_unlock(); 530 br_read_unlock(vfsmount_lock); 531 } 532 533 if (likely(!(nd->flags & LOOKUP_JUMPED))) 534 return 0; 535 536 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) 537 return 0; 538 539 if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))) 540 return 0; 541 542 /* Note: we do not d_invalidate() */ 543 status = d_revalidate(dentry, nd); 544 if (status > 0) 545 return 0; 546 547 if (!status) 548 status = -ESTALE; 549 550 path_put(&nd->path); 551 return status; 552 } 553 554 static __always_inline void set_root(struct nameidata *nd) 555 { 556 if (!nd->root.mnt) 557 get_fs_root(current->fs, &nd->root); 558 } 559 560 static int link_path_walk(const char *, struct nameidata *); 561 562 static __always_inline void set_root_rcu(struct nameidata *nd) 563 { 564 if (!nd->root.mnt) { 565 struct fs_struct *fs = current->fs; 566 unsigned seq; 567 568 do { 569 seq = read_seqcount_begin(&fs->seq); 570 nd->root = fs->root; 571 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); 572 } while (read_seqcount_retry(&fs->seq, seq)); 573 } 574 } 575 576 static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) 577 { 578 int ret; 579 580 if (IS_ERR(link)) 581 goto fail; 582 583 if (*link == '/') { 584 set_root(nd); 585 path_put(&nd->path); 586 nd->path = nd->root; 587 path_get(&nd->root); 588 nd->flags |= LOOKUP_JUMPED; 589 } 590 nd->inode = nd->path.dentry->d_inode; 591 592 ret = link_path_walk(link, nd); 593 return ret; 594 fail: 595 path_put(&nd->path); 596 return PTR_ERR(link); 597 } 598 599 static void path_put_conditional(struct path *path, struct nameidata *nd) 600 { 601 dput(path->dentry); 602 if (path->mnt != nd->path.mnt) 603 mntput(path->mnt); 604 } 605 606 static inline void path_to_nameidata(const struct path *path, 607 struct nameidata *nd) 608 { 609 if (!(nd->flags & LOOKUP_RCU)) { 610 dput(nd->path.dentry); 611 if (nd->path.mnt != path->mnt) 612 mntput(nd->path.mnt); 613 } 614 nd->path.mnt = path->mnt; 615 nd->path.dentry = path->dentry; 616 } 617 618 static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) 619 { 620 struct inode *inode = link->dentry->d_inode; 621 if (!IS_ERR(cookie) && inode->i_op->put_link) 622 inode->i_op->put_link(link->dentry, nd, cookie); 623 path_put(link); 624 } 625 626 static __always_inline int 627 follow_link(struct path *link, struct nameidata *nd, void **p) 628 { 629 int error; 630 struct dentry *dentry = link->dentry; 631 632 BUG_ON(nd->flags & LOOKUP_RCU); 633 634 if (link->mnt == nd->path.mnt) 635 mntget(link->mnt); 636 637 if (unlikely(current->total_link_count >= 40)) { 638 *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */ 639 path_put(&nd->path); 640 return -ELOOP; 641 } 642 cond_resched(); 643 current->total_link_count++; 644 645 touch_atime(link); 646 nd_set_link(nd, NULL); 647 648 error = security_inode_follow_link(link->dentry, nd); 649 if (error) { 650 *p = ERR_PTR(error); /* no ->put_link(), please */ 651 path_put(&nd->path); 652 return error; 653 } 654 655 nd->last_type = LAST_BIND; 656 *p = dentry->d_inode->i_op->follow_link(dentry, nd); 657 error = PTR_ERR(*p); 658 if (!IS_ERR(*p)) { 659 char *s = nd_get_link(nd); 660 error = 0; 661 if (s) 662 error = __vfs_follow_link(nd, s); 663 else if (nd->last_type == LAST_BIND) { 664 nd->flags |= LOOKUP_JUMPED; 665 nd->inode = nd->path.dentry->d_inode; 666 if (nd->inode->i_op->follow_link) { 667 /* stepped on a _really_ weird one */ 668 path_put(&nd->path); 669 error = -ELOOP; 670 } 671 } 672 } 673 return error; 674 } 675 676 static int follow_up_rcu(struct path *path) 677 { 678 struct mount *mnt = real_mount(path->mnt); 679 struct mount *parent; 680 struct dentry *mountpoint; 681 682 parent = mnt->mnt_parent; 683 if (&parent->mnt == path->mnt) 684 return 0; 685 mountpoint = mnt->mnt_mountpoint; 686 path->dentry = mountpoint; 687 path->mnt = &parent->mnt; 688 return 1; 689 } 690 691 int follow_up(struct path *path) 692 { 693 struct mount *mnt = real_mount(path->mnt); 694 struct mount *parent; 695 struct dentry *mountpoint; 696 697 br_read_lock(vfsmount_lock); 698 parent = mnt->mnt_parent; 699 if (&parent->mnt == path->mnt) { 700 br_read_unlock(vfsmount_lock); 701 return 0; 702 } 703 mntget(&parent->mnt); 704 mountpoint = dget(mnt->mnt_mountpoint); 705 br_read_unlock(vfsmount_lock); 706 dput(path->dentry); 707 path->dentry = mountpoint; 708 mntput(path->mnt); 709 path->mnt = &parent->mnt; 710 return 1; 711 } 712 713 /* 714 * Perform an automount 715 * - return -EISDIR to tell follow_managed() to stop and return the path we 716 * were called with. 717 */ 718 static int follow_automount(struct path *path, unsigned flags, 719 bool *need_mntput) 720 { 721 struct vfsmount *mnt; 722 int err; 723 724 if (!path->dentry->d_op || !path->dentry->d_op->d_automount) 725 return -EREMOTE; 726 727 /* We don't want to mount if someone's just doing a stat - 728 * unless they're stat'ing a directory and appended a '/' to 729 * the name. 730 * 731 * We do, however, want to mount if someone wants to open or 732 * create a file of any type under the mountpoint, wants to 733 * traverse through the mountpoint or wants to open the 734 * mounted directory. Also, autofs may mark negative dentries 735 * as being automount points. These will need the attentions 736 * of the daemon to instantiate them before they can be used. 737 */ 738 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 739 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && 740 path->dentry->d_inode) 741 return -EISDIR; 742 743 current->total_link_count++; 744 if (current->total_link_count >= 40) 745 return -ELOOP; 746 747 mnt = path->dentry->d_op->d_automount(path); 748 if (IS_ERR(mnt)) { 749 /* 750 * The filesystem is allowed to return -EISDIR here to indicate 751 * it doesn't want to automount. For instance, autofs would do 752 * this so that its userspace daemon can mount on this dentry. 753 * 754 * However, we can only permit this if it's a terminal point in 755 * the path being looked up; if it wasn't then the remainder of 756 * the path is inaccessible and we should say so. 757 */ 758 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT)) 759 return -EREMOTE; 760 return PTR_ERR(mnt); 761 } 762 763 if (!mnt) /* mount collision */ 764 return 0; 765 766 if (!*need_mntput) { 767 /* lock_mount() may release path->mnt on error */ 768 mntget(path->mnt); 769 *need_mntput = true; 770 } 771 err = finish_automount(mnt, path); 772 773 switch (err) { 774 case -EBUSY: 775 /* Someone else made a mount here whilst we were busy */ 776 return 0; 777 case 0: 778 path_put(path); 779 path->mnt = mnt; 780 path->dentry = dget(mnt->mnt_root); 781 return 0; 782 default: 783 return err; 784 } 785 786 } 787 788 /* 789 * Handle a dentry that is managed in some way. 790 * - Flagged for transit management (autofs) 791 * - Flagged as mountpoint 792 * - Flagged as automount point 793 * 794 * This may only be called in refwalk mode. 795 * 796 * Serialization is taken care of in namespace.c 797 */ 798 static int follow_managed(struct path *path, unsigned flags) 799 { 800 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ 801 unsigned managed; 802 bool need_mntput = false; 803 int ret = 0; 804 805 /* Given that we're not holding a lock here, we retain the value in a 806 * local variable for each dentry as we look at it so that we don't see 807 * the components of that value change under us */ 808 while (managed = ACCESS_ONCE(path->dentry->d_flags), 809 managed &= DCACHE_MANAGED_DENTRY, 810 unlikely(managed != 0)) { 811 /* Allow the filesystem to manage the transit without i_mutex 812 * being held. */ 813 if (managed & DCACHE_MANAGE_TRANSIT) { 814 BUG_ON(!path->dentry->d_op); 815 BUG_ON(!path->dentry->d_op->d_manage); 816 ret = path->dentry->d_op->d_manage(path->dentry, false); 817 if (ret < 0) 818 break; 819 } 820 821 /* Transit to a mounted filesystem. */ 822 if (managed & DCACHE_MOUNTED) { 823 struct vfsmount *mounted = lookup_mnt(path); 824 if (mounted) { 825 dput(path->dentry); 826 if (need_mntput) 827 mntput(path->mnt); 828 path->mnt = mounted; 829 path->dentry = dget(mounted->mnt_root); 830 need_mntput = true; 831 continue; 832 } 833 834 /* Something is mounted on this dentry in another 835 * namespace and/or whatever was mounted there in this 836 * namespace got unmounted before we managed to get the 837 * vfsmount_lock */ 838 } 839 840 /* Handle an automount point */ 841 if (managed & DCACHE_NEED_AUTOMOUNT) { 842 ret = follow_automount(path, flags, &need_mntput); 843 if (ret < 0) 844 break; 845 continue; 846 } 847 848 /* We didn't change the current path point */ 849 break; 850 } 851 852 if (need_mntput && path->mnt == mnt) 853 mntput(path->mnt); 854 if (ret == -EISDIR) 855 ret = 0; 856 return ret < 0 ? ret : need_mntput; 857 } 858 859 int follow_down_one(struct path *path) 860 { 861 struct vfsmount *mounted; 862 863 mounted = lookup_mnt(path); 864 if (mounted) { 865 dput(path->dentry); 866 mntput(path->mnt); 867 path->mnt = mounted; 868 path->dentry = dget(mounted->mnt_root); 869 return 1; 870 } 871 return 0; 872 } 873 874 static inline bool managed_dentry_might_block(struct dentry *dentry) 875 { 876 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT && 877 dentry->d_op->d_manage(dentry, true) < 0); 878 } 879 880 /* 881 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if 882 * we meet a managed dentry that would need blocking. 883 */ 884 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, 885 struct inode **inode) 886 { 887 for (;;) { 888 struct mount *mounted; 889 /* 890 * Don't forget we might have a non-mountpoint managed dentry 891 * that wants to block transit. 892 */ 893 if (unlikely(managed_dentry_might_block(path->dentry))) 894 return false; 895 896 if (!d_mountpoint(path->dentry)) 897 break; 898 899 mounted = __lookup_mnt(path->mnt, path->dentry, 1); 900 if (!mounted) 901 break; 902 path->mnt = &mounted->mnt; 903 path->dentry = mounted->mnt.mnt_root; 904 nd->flags |= LOOKUP_JUMPED; 905 nd->seq = read_seqcount_begin(&path->dentry->d_seq); 906 /* 907 * Update the inode too. We don't need to re-check the 908 * dentry sequence number here after this d_inode read, 909 * because a mount-point is always pinned. 910 */ 911 *inode = path->dentry->d_inode; 912 } 913 return true; 914 } 915 916 static void follow_mount_rcu(struct nameidata *nd) 917 { 918 while (d_mountpoint(nd->path.dentry)) { 919 struct mount *mounted; 920 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1); 921 if (!mounted) 922 break; 923 nd->path.mnt = &mounted->mnt; 924 nd->path.dentry = mounted->mnt.mnt_root; 925 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 926 } 927 } 928 929 static int follow_dotdot_rcu(struct nameidata *nd) 930 { 931 set_root_rcu(nd); 932 933 while (1) { 934 if (nd->path.dentry == nd->root.dentry && 935 nd->path.mnt == nd->root.mnt) { 936 break; 937 } 938 if (nd->path.dentry != nd->path.mnt->mnt_root) { 939 struct dentry *old = nd->path.dentry; 940 struct dentry *parent = old->d_parent; 941 unsigned seq; 942 943 seq = read_seqcount_begin(&parent->d_seq); 944 if (read_seqcount_retry(&old->d_seq, nd->seq)) 945 goto failed; 946 nd->path.dentry = parent; 947 nd->seq = seq; 948 break; 949 } 950 if (!follow_up_rcu(&nd->path)) 951 break; 952 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 953 } 954 follow_mount_rcu(nd); 955 nd->inode = nd->path.dentry->d_inode; 956 return 0; 957 958 failed: 959 nd->flags &= ~LOOKUP_RCU; 960 if (!(nd->flags & LOOKUP_ROOT)) 961 nd->root.mnt = NULL; 962 rcu_read_unlock(); 963 br_read_unlock(vfsmount_lock); 964 return -ECHILD; 965 } 966 967 /* 968 * Follow down to the covering mount currently visible to userspace. At each 969 * point, the filesystem owning that dentry may be queried as to whether the 970 * caller is permitted to proceed or not. 971 */ 972 int follow_down(struct path *path) 973 { 974 unsigned managed; 975 int ret; 976 977 while (managed = ACCESS_ONCE(path->dentry->d_flags), 978 unlikely(managed & DCACHE_MANAGED_DENTRY)) { 979 /* Allow the filesystem to manage the transit without i_mutex 980 * being held. 981 * 982 * We indicate to the filesystem if someone is trying to mount 983 * something here. This gives autofs the chance to deny anyone 984 * other than its daemon the right to mount on its 985 * superstructure. 986 * 987 * The filesystem may sleep at this point. 988 */ 989 if (managed & DCACHE_MANAGE_TRANSIT) { 990 BUG_ON(!path->dentry->d_op); 991 BUG_ON(!path->dentry->d_op->d_manage); 992 ret = path->dentry->d_op->d_manage( 993 path->dentry, false); 994 if (ret < 0) 995 return ret == -EISDIR ? 0 : ret; 996 } 997 998 /* Transit to a mounted filesystem. */ 999 if (managed & DCACHE_MOUNTED) { 1000 struct vfsmount *mounted = lookup_mnt(path); 1001 if (!mounted) 1002 break; 1003 dput(path->dentry); 1004 mntput(path->mnt); 1005 path->mnt = mounted; 1006 path->dentry = dget(mounted->mnt_root); 1007 continue; 1008 } 1009 1010 /* Don't handle automount points here */ 1011 break; 1012 } 1013 return 0; 1014 } 1015 1016 /* 1017 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() 1018 */ 1019 static void follow_mount(struct path *path) 1020 { 1021 while (d_mountpoint(path->dentry)) { 1022 struct vfsmount *mounted = lookup_mnt(path); 1023 if (!mounted) 1024 break; 1025 dput(path->dentry); 1026 mntput(path->mnt); 1027 path->mnt = mounted; 1028 path->dentry = dget(mounted->mnt_root); 1029 } 1030 } 1031 1032 static void follow_dotdot(struct nameidata *nd) 1033 { 1034 set_root(nd); 1035 1036 while(1) { 1037 struct dentry *old = nd->path.dentry; 1038 1039 if (nd->path.dentry == nd->root.dentry && 1040 nd->path.mnt == nd->root.mnt) { 1041 break; 1042 } 1043 if (nd->path.dentry != nd->path.mnt->mnt_root) { 1044 /* rare case of legitimate dget_parent()... */ 1045 nd->path.dentry = dget_parent(nd->path.dentry); 1046 dput(old); 1047 break; 1048 } 1049 if (!follow_up(&nd->path)) 1050 break; 1051 } 1052 follow_mount(&nd->path); 1053 nd->inode = nd->path.dentry->d_inode; 1054 } 1055 1056 /* 1057 * Allocate a dentry with name and parent, and perform a parent 1058 * directory ->lookup on it. Returns the new dentry, or ERR_PTR 1059 * on error. parent->d_inode->i_mutex must be held. d_lookup must 1060 * have verified that no child exists while under i_mutex. 1061 */ 1062 static struct dentry *d_alloc_and_lookup(struct dentry *parent, 1063 struct qstr *name, struct nameidata *nd) 1064 { 1065 struct inode *inode = parent->d_inode; 1066 struct dentry *dentry; 1067 struct dentry *old; 1068 1069 /* Don't create child dentry for a dead directory. */ 1070 if (unlikely(IS_DEADDIR(inode))) 1071 return ERR_PTR(-ENOENT); 1072 1073 dentry = d_alloc(parent, name); 1074 if (unlikely(!dentry)) 1075 return ERR_PTR(-ENOMEM); 1076 1077 old = inode->i_op->lookup(inode, dentry, nd); 1078 if (unlikely(old)) { 1079 dput(dentry); 1080 dentry = old; 1081 } 1082 return dentry; 1083 } 1084 1085 /* 1086 * We already have a dentry, but require a lookup to be performed on the parent 1087 * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error. 1088 * parent->d_inode->i_mutex must be held. d_lookup must have verified that no 1089 * child exists while under i_mutex. 1090 */ 1091 static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry, 1092 struct nameidata *nd) 1093 { 1094 struct inode *inode = parent->d_inode; 1095 struct dentry *old; 1096 1097 /* Don't create child dentry for a dead directory. */ 1098 if (unlikely(IS_DEADDIR(inode))) { 1099 dput(dentry); 1100 return ERR_PTR(-ENOENT); 1101 } 1102 1103 old = inode->i_op->lookup(inode, dentry, nd); 1104 if (unlikely(old)) { 1105 dput(dentry); 1106 dentry = old; 1107 } 1108 return dentry; 1109 } 1110 1111 /* 1112 * It's more convoluted than I'd like it to be, but... it's still fairly 1113 * small and for now I'd prefer to have fast path as straight as possible. 1114 * It _is_ time-critical. 1115 */ 1116 static int do_lookup(struct nameidata *nd, struct qstr *name, 1117 struct path *path, struct inode **inode) 1118 { 1119 struct vfsmount *mnt = nd->path.mnt; 1120 struct dentry *dentry, *parent = nd->path.dentry; 1121 int need_reval = 1; 1122 int status = 1; 1123 int err; 1124 1125 /* 1126 * Rename seqlock is not required here because in the off chance 1127 * of a false negative due to a concurrent rename, we're going to 1128 * do the non-racy lookup, below. 1129 */ 1130 if (nd->flags & LOOKUP_RCU) { 1131 unsigned seq; 1132 *inode = nd->inode; 1133 dentry = __d_lookup_rcu(parent, name, &seq, inode); 1134 if (!dentry) 1135 goto unlazy; 1136 1137 /* Memory barrier in read_seqcount_begin of child is enough */ 1138 if (__read_seqcount_retry(&parent->d_seq, nd->seq)) 1139 return -ECHILD; 1140 nd->seq = seq; 1141 1142 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1143 status = d_revalidate(dentry, nd); 1144 if (unlikely(status <= 0)) { 1145 if (status != -ECHILD) 1146 need_reval = 0; 1147 goto unlazy; 1148 } 1149 } 1150 if (unlikely(d_need_lookup(dentry))) 1151 goto unlazy; 1152 path->mnt = mnt; 1153 path->dentry = dentry; 1154 if (unlikely(!__follow_mount_rcu(nd, path, inode))) 1155 goto unlazy; 1156 if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) 1157 goto unlazy; 1158 return 0; 1159 unlazy: 1160 if (unlazy_walk(nd, dentry)) 1161 return -ECHILD; 1162 } else { 1163 dentry = __d_lookup(parent, name); 1164 } 1165 1166 if (dentry && unlikely(d_need_lookup(dentry))) { 1167 dput(dentry); 1168 dentry = NULL; 1169 } 1170 retry: 1171 if (unlikely(!dentry)) { 1172 struct inode *dir = parent->d_inode; 1173 BUG_ON(nd->inode != dir); 1174 1175 mutex_lock(&dir->i_mutex); 1176 dentry = d_lookup(parent, name); 1177 if (likely(!dentry)) { 1178 dentry = d_alloc_and_lookup(parent, name, nd); 1179 if (IS_ERR(dentry)) { 1180 mutex_unlock(&dir->i_mutex); 1181 return PTR_ERR(dentry); 1182 } 1183 /* known good */ 1184 need_reval = 0; 1185 status = 1; 1186 } else if (unlikely(d_need_lookup(dentry))) { 1187 dentry = d_inode_lookup(parent, dentry, nd); 1188 if (IS_ERR(dentry)) { 1189 mutex_unlock(&dir->i_mutex); 1190 return PTR_ERR(dentry); 1191 } 1192 /* known good */ 1193 need_reval = 0; 1194 status = 1; 1195 } 1196 mutex_unlock(&dir->i_mutex); 1197 } 1198 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) 1199 status = d_revalidate(dentry, nd); 1200 if (unlikely(status <= 0)) { 1201 if (status < 0) { 1202 dput(dentry); 1203 return status; 1204 } 1205 if (!d_invalidate(dentry)) { 1206 dput(dentry); 1207 dentry = NULL; 1208 need_reval = 1; 1209 goto retry; 1210 } 1211 } 1212 1213 path->mnt = mnt; 1214 path->dentry = dentry; 1215 err = follow_managed(path, nd->flags); 1216 if (unlikely(err < 0)) { 1217 path_put_conditional(path, nd); 1218 return err; 1219 } 1220 if (err) 1221 nd->flags |= LOOKUP_JUMPED; 1222 *inode = path->dentry->d_inode; 1223 return 0; 1224 } 1225 1226 static inline int may_lookup(struct nameidata *nd) 1227 { 1228 if (nd->flags & LOOKUP_RCU) { 1229 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); 1230 if (err != -ECHILD) 1231 return err; 1232 if (unlazy_walk(nd, NULL)) 1233 return -ECHILD; 1234 } 1235 return inode_permission(nd->inode, MAY_EXEC); 1236 } 1237 1238 static inline int handle_dots(struct nameidata *nd, int type) 1239 { 1240 if (type == LAST_DOTDOT) { 1241 if (nd->flags & LOOKUP_RCU) { 1242 if (follow_dotdot_rcu(nd)) 1243 return -ECHILD; 1244 } else 1245 follow_dotdot(nd); 1246 } 1247 return 0; 1248 } 1249 1250 static void terminate_walk(struct nameidata *nd) 1251 { 1252 if (!(nd->flags & LOOKUP_RCU)) { 1253 path_put(&nd->path); 1254 } else { 1255 nd->flags &= ~LOOKUP_RCU; 1256 if (!(nd->flags & LOOKUP_ROOT)) 1257 nd->root.mnt = NULL; 1258 rcu_read_unlock(); 1259 br_read_unlock(vfsmount_lock); 1260 } 1261 } 1262 1263 /* 1264 * Do we need to follow links? We _really_ want to be able 1265 * to do this check without having to look at inode->i_op, 1266 * so we keep a cache of "no, this doesn't need follow_link" 1267 * for the common case. 1268 */ 1269 static inline int should_follow_link(struct inode *inode, int follow) 1270 { 1271 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1272 if (likely(inode->i_op->follow_link)) 1273 return follow; 1274 1275 /* This gets set once for the inode lifetime */ 1276 spin_lock(&inode->i_lock); 1277 inode->i_opflags |= IOP_NOFOLLOW; 1278 spin_unlock(&inode->i_lock); 1279 } 1280 return 0; 1281 } 1282 1283 static inline int walk_component(struct nameidata *nd, struct path *path, 1284 struct qstr *name, int type, int follow) 1285 { 1286 struct inode *inode; 1287 int err; 1288 /* 1289 * "." and ".." are special - ".." especially so because it has 1290 * to be able to know about the current root directory and 1291 * parent relationships. 1292 */ 1293 if (unlikely(type != LAST_NORM)) 1294 return handle_dots(nd, type); 1295 err = do_lookup(nd, name, path, &inode); 1296 if (unlikely(err)) { 1297 terminate_walk(nd); 1298 return err; 1299 } 1300 if (!inode) { 1301 path_to_nameidata(path, nd); 1302 terminate_walk(nd); 1303 return -ENOENT; 1304 } 1305 if (should_follow_link(inode, follow)) { 1306 if (nd->flags & LOOKUP_RCU) { 1307 if (unlikely(unlazy_walk(nd, path->dentry))) { 1308 terminate_walk(nd); 1309 return -ECHILD; 1310 } 1311 } 1312 BUG_ON(inode != path->dentry->d_inode); 1313 return 1; 1314 } 1315 path_to_nameidata(path, nd); 1316 nd->inode = inode; 1317 return 0; 1318 } 1319 1320 /* 1321 * This limits recursive symlink follows to 8, while 1322 * limiting consecutive symlinks to 40. 1323 * 1324 * Without that kind of total limit, nasty chains of consecutive 1325 * symlinks can cause almost arbitrarily long lookups. 1326 */ 1327 static inline int nested_symlink(struct path *path, struct nameidata *nd) 1328 { 1329 int res; 1330 1331 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) { 1332 path_put_conditional(path, nd); 1333 path_put(&nd->path); 1334 return -ELOOP; 1335 } 1336 BUG_ON(nd->depth >= MAX_NESTED_LINKS); 1337 1338 nd->depth++; 1339 current->link_count++; 1340 1341 do { 1342 struct path link = *path; 1343 void *cookie; 1344 1345 res = follow_link(&link, nd, &cookie); 1346 if (!res) 1347 res = walk_component(nd, path, &nd->last, 1348 nd->last_type, LOOKUP_FOLLOW); 1349 put_link(nd, &link, cookie); 1350 } while (res > 0); 1351 1352 current->link_count--; 1353 nd->depth--; 1354 return res; 1355 } 1356 1357 /* 1358 * We really don't want to look at inode->i_op->lookup 1359 * when we don't have to. So we keep a cache bit in 1360 * the inode ->i_opflags field that says "yes, we can 1361 * do lookup on this inode". 1362 */ 1363 static inline int can_lookup(struct inode *inode) 1364 { 1365 if (likely(inode->i_opflags & IOP_LOOKUP)) 1366 return 1; 1367 if (likely(!inode->i_op->lookup)) 1368 return 0; 1369 1370 /* We do this once for the lifetime of the inode */ 1371 spin_lock(&inode->i_lock); 1372 inode->i_opflags |= IOP_LOOKUP; 1373 spin_unlock(&inode->i_lock); 1374 return 1; 1375 } 1376 1377 /* 1378 * We can do the critical dentry name comparison and hashing 1379 * operations one word at a time, but we are limited to: 1380 * 1381 * - Architectures with fast unaligned word accesses. We could 1382 * do a "get_unaligned()" if this helps and is sufficiently 1383 * fast. 1384 * 1385 * - Little-endian machines (so that we can generate the mask 1386 * of low bytes efficiently). Again, we *could* do a byte 1387 * swapping load on big-endian architectures if that is not 1388 * expensive enough to make the optimization worthless. 1389 * 1390 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1391 * do not trap on the (extremely unlikely) case of a page 1392 * crossing operation. 1393 * 1394 * - Furthermore, we need an efficient 64-bit compile for the 1395 * 64-bit case in order to generate the "number of bytes in 1396 * the final mask". Again, that could be replaced with a 1397 * efficient population count instruction or similar. 1398 */ 1399 #ifdef CONFIG_DCACHE_WORD_ACCESS 1400 1401 #ifdef CONFIG_64BIT 1402 1403 /* 1404 * Jan Achrenius on G+: microoptimized version of 1405 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" 1406 * that works for the bytemasks without having to 1407 * mask them first. 1408 */ 1409 static inline long count_masked_bytes(unsigned long mask) 1410 { 1411 return mask*0x0001020304050608ul >> 56; 1412 } 1413 1414 static inline unsigned int fold_hash(unsigned long hash) 1415 { 1416 hash += hash >> (8*sizeof(int)); 1417 return hash; 1418 } 1419 1420 #else /* 32-bit case */ 1421 1422 /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ 1423 static inline long count_masked_bytes(long mask) 1424 { 1425 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ 1426 long a = (0x0ff0001+mask) >> 23; 1427 /* Fix the 1 for 00 case */ 1428 return a & mask; 1429 } 1430 1431 #define fold_hash(x) (x) 1432 1433 #endif 1434 1435 unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1436 { 1437 unsigned long a, mask; 1438 unsigned long hash = 0; 1439 1440 for (;;) { 1441 a = *(unsigned long *)name; 1442 if (len < sizeof(unsigned long)) 1443 break; 1444 hash += a; 1445 hash *= 9; 1446 name += sizeof(unsigned long); 1447 len -= sizeof(unsigned long); 1448 if (!len) 1449 goto done; 1450 } 1451 mask = ~(~0ul << len*8); 1452 hash += mask & a; 1453 done: 1454 return fold_hash(hash); 1455 } 1456 EXPORT_SYMBOL(full_name_hash); 1457 1458 #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) 1459 #define ONEBYTES REPEAT_BYTE(0x01) 1460 #define SLASHBYTES REPEAT_BYTE('/') 1461 #define HIGHBITS REPEAT_BYTE(0x80) 1462 1463 /* Return the high bit set in the first byte that is a zero */ 1464 static inline unsigned long has_zero(unsigned long a) 1465 { 1466 return ((a - ONEBYTES) & ~a) & HIGHBITS; 1467 } 1468 1469 /* 1470 * Calculate the length and hash of the path component, and 1471 * return the length of the component; 1472 */ 1473 static inline unsigned long hash_name(const char *name, unsigned int *hashp) 1474 { 1475 unsigned long a, mask, hash, len; 1476 1477 hash = a = 0; 1478 len = -sizeof(unsigned long); 1479 do { 1480 hash = (hash + a) * 9; 1481 len += sizeof(unsigned long); 1482 a = *(unsigned long *)(name+len); 1483 /* Do we have any NUL or '/' bytes in this word? */ 1484 mask = has_zero(a) | has_zero(a ^ SLASHBYTES); 1485 } while (!mask); 1486 1487 /* The mask *below* the first high bit set */ 1488 mask = (mask - 1) & ~mask; 1489 mask >>= 7; 1490 hash += a & mask; 1491 *hashp = fold_hash(hash); 1492 1493 return len + count_masked_bytes(mask); 1494 } 1495 1496 #else 1497 1498 unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1499 { 1500 unsigned long hash = init_name_hash(); 1501 while (len--) 1502 hash = partial_name_hash(*name++, hash); 1503 return end_name_hash(hash); 1504 } 1505 EXPORT_SYMBOL(full_name_hash); 1506 1507 /* 1508 * We know there's a real path component here of at least 1509 * one character. 1510 */ 1511 static inline unsigned long hash_name(const char *name, unsigned int *hashp) 1512 { 1513 unsigned long hash = init_name_hash(); 1514 unsigned long len = 0, c; 1515 1516 c = (unsigned char)*name; 1517 do { 1518 len++; 1519 hash = partial_name_hash(c, hash); 1520 c = (unsigned char)name[len]; 1521 } while (c && c != '/'); 1522 *hashp = end_name_hash(hash); 1523 return len; 1524 } 1525 1526 #endif 1527 1528 /* 1529 * Name resolution. 1530 * This is the basic name resolution function, turning a pathname into 1531 * the final dentry. We expect 'base' to be positive and a directory. 1532 * 1533 * Returns 0 and nd will have valid dentry and mnt on success. 1534 * Returns error and drops reference to input namei data on failure. 1535 */ 1536 static int link_path_walk(const char *name, struct nameidata *nd) 1537 { 1538 struct path next; 1539 int err; 1540 1541 while (*name=='/') 1542 name++; 1543 if (!*name) 1544 return 0; 1545 1546 /* At this point we know we have a real path component. */ 1547 for(;;) { 1548 struct qstr this; 1549 long len; 1550 int type; 1551 1552 err = may_lookup(nd); 1553 if (err) 1554 break; 1555 1556 len = hash_name(name, &this.hash); 1557 this.name = name; 1558 this.len = len; 1559 1560 type = LAST_NORM; 1561 if (name[0] == '.') switch (len) { 1562 case 2: 1563 if (name[1] == '.') { 1564 type = LAST_DOTDOT; 1565 nd->flags |= LOOKUP_JUMPED; 1566 } 1567 break; 1568 case 1: 1569 type = LAST_DOT; 1570 } 1571 if (likely(type == LAST_NORM)) { 1572 struct dentry *parent = nd->path.dentry; 1573 nd->flags &= ~LOOKUP_JUMPED; 1574 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { 1575 err = parent->d_op->d_hash(parent, nd->inode, 1576 &this); 1577 if (err < 0) 1578 break; 1579 } 1580 } 1581 1582 if (!name[len]) 1583 goto last_component; 1584 /* 1585 * If it wasn't NUL, we know it was '/'. Skip that 1586 * slash, and continue until no more slashes. 1587 */ 1588 do { 1589 len++; 1590 } while (unlikely(name[len] == '/')); 1591 if (!name[len]) 1592 goto last_component; 1593 name += len; 1594 1595 err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); 1596 if (err < 0) 1597 return err; 1598 1599 if (err) { 1600 err = nested_symlink(&next, nd); 1601 if (err) 1602 return err; 1603 } 1604 if (can_lookup(nd->inode)) 1605 continue; 1606 err = -ENOTDIR; 1607 break; 1608 /* here ends the main loop */ 1609 1610 last_component: 1611 nd->last = this; 1612 nd->last_type = type; 1613 return 0; 1614 } 1615 terminate_walk(nd); 1616 return err; 1617 } 1618 1619 static int path_init(int dfd, const char *name, unsigned int flags, 1620 struct nameidata *nd, struct file **fp) 1621 { 1622 int retval = 0; 1623 int fput_needed; 1624 struct file *file; 1625 1626 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1627 nd->flags = flags | LOOKUP_JUMPED; 1628 nd->depth = 0; 1629 if (flags & LOOKUP_ROOT) { 1630 struct inode *inode = nd->root.dentry->d_inode; 1631 if (*name) { 1632 if (!inode->i_op->lookup) 1633 return -ENOTDIR; 1634 retval = inode_permission(inode, MAY_EXEC); 1635 if (retval) 1636 return retval; 1637 } 1638 nd->path = nd->root; 1639 nd->inode = inode; 1640 if (flags & LOOKUP_RCU) { 1641 br_read_lock(vfsmount_lock); 1642 rcu_read_lock(); 1643 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1644 } else { 1645 path_get(&nd->path); 1646 } 1647 return 0; 1648 } 1649 1650 nd->root.mnt = NULL; 1651 1652 if (*name=='/') { 1653 if (flags & LOOKUP_RCU) { 1654 br_read_lock(vfsmount_lock); 1655 rcu_read_lock(); 1656 set_root_rcu(nd); 1657 } else { 1658 set_root(nd); 1659 path_get(&nd->root); 1660 } 1661 nd->path = nd->root; 1662 } else if (dfd == AT_FDCWD) { 1663 if (flags & LOOKUP_RCU) { 1664 struct fs_struct *fs = current->fs; 1665 unsigned seq; 1666 1667 br_read_lock(vfsmount_lock); 1668 rcu_read_lock(); 1669 1670 do { 1671 seq = read_seqcount_begin(&fs->seq); 1672 nd->path = fs->pwd; 1673 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1674 } while (read_seqcount_retry(&fs->seq, seq)); 1675 } else { 1676 get_fs_pwd(current->fs, &nd->path); 1677 } 1678 } else { 1679 struct dentry *dentry; 1680 1681 file = fget_raw_light(dfd, &fput_needed); 1682 retval = -EBADF; 1683 if (!file) 1684 goto out_fail; 1685 1686 dentry = file->f_path.dentry; 1687 1688 if (*name) { 1689 retval = -ENOTDIR; 1690 if (!S_ISDIR(dentry->d_inode->i_mode)) 1691 goto fput_fail; 1692 1693 retval = inode_permission(dentry->d_inode, MAY_EXEC); 1694 if (retval) 1695 goto fput_fail; 1696 } 1697 1698 nd->path = file->f_path; 1699 if (flags & LOOKUP_RCU) { 1700 if (fput_needed) 1701 *fp = file; 1702 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1703 br_read_lock(vfsmount_lock); 1704 rcu_read_lock(); 1705 } else { 1706 path_get(&file->f_path); 1707 fput_light(file, fput_needed); 1708 } 1709 } 1710 1711 nd->inode = nd->path.dentry->d_inode; 1712 return 0; 1713 1714 fput_fail: 1715 fput_light(file, fput_needed); 1716 out_fail: 1717 return retval; 1718 } 1719 1720 static inline int lookup_last(struct nameidata *nd, struct path *path) 1721 { 1722 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) 1723 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 1724 1725 nd->flags &= ~LOOKUP_PARENT; 1726 return walk_component(nd, path, &nd->last, nd->last_type, 1727 nd->flags & LOOKUP_FOLLOW); 1728 } 1729 1730 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ 1731 static int path_lookupat(int dfd, const char *name, 1732 unsigned int flags, struct nameidata *nd) 1733 { 1734 struct file *base = NULL; 1735 struct path path; 1736 int err; 1737 1738 /* 1739 * Path walking is largely split up into 2 different synchronisation 1740 * schemes, rcu-walk and ref-walk (explained in 1741 * Documentation/filesystems/path-lookup.txt). These share much of the 1742 * path walk code, but some things particularly setup, cleanup, and 1743 * following mounts are sufficiently divergent that functions are 1744 * duplicated. Typically there is a function foo(), and its RCU 1745 * analogue, foo_rcu(). 1746 * 1747 * -ECHILD is the error number of choice (just to avoid clashes) that 1748 * is returned if some aspect of an rcu-walk fails. Such an error must 1749 * be handled by restarting a traditional ref-walk (which will always 1750 * be able to complete). 1751 */ 1752 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base); 1753 1754 if (unlikely(err)) 1755 return err; 1756 1757 current->total_link_count = 0; 1758 err = link_path_walk(name, nd); 1759 1760 if (!err && !(flags & LOOKUP_PARENT)) { 1761 err = lookup_last(nd, &path); 1762 while (err > 0) { 1763 void *cookie; 1764 struct path link = path; 1765 nd->flags |= LOOKUP_PARENT; 1766 err = follow_link(&link, nd, &cookie); 1767 if (!err) 1768 err = lookup_last(nd, &path); 1769 put_link(nd, &link, cookie); 1770 } 1771 } 1772 1773 if (!err) 1774 err = complete_walk(nd); 1775 1776 if (!err && nd->flags & LOOKUP_DIRECTORY) { 1777 if (!nd->inode->i_op->lookup) { 1778 path_put(&nd->path); 1779 err = -ENOTDIR; 1780 } 1781 } 1782 1783 if (base) 1784 fput(base); 1785 1786 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { 1787 path_put(&nd->root); 1788 nd->root.mnt = NULL; 1789 } 1790 return err; 1791 } 1792 1793 static int do_path_lookup(int dfd, const char *name, 1794 unsigned int flags, struct nameidata *nd) 1795 { 1796 int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd); 1797 if (unlikely(retval == -ECHILD)) 1798 retval = path_lookupat(dfd, name, flags, nd); 1799 if (unlikely(retval == -ESTALE)) 1800 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); 1801 1802 if (likely(!retval)) { 1803 if (unlikely(!audit_dummy_context())) { 1804 if (nd->path.dentry && nd->inode) 1805 audit_inode(name, nd->path.dentry); 1806 } 1807 } 1808 return retval; 1809 } 1810 1811 int kern_path_parent(const char *name, struct nameidata *nd) 1812 { 1813 return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd); 1814 } 1815 1816 int kern_path(const char *name, unsigned int flags, struct path *path) 1817 { 1818 struct nameidata nd; 1819 int res = do_path_lookup(AT_FDCWD, name, flags, &nd); 1820 if (!res) 1821 *path = nd.path; 1822 return res; 1823 } 1824 1825 /** 1826 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair 1827 * @dentry: pointer to dentry of the base directory 1828 * @mnt: pointer to vfs mount of the base directory 1829 * @name: pointer to file name 1830 * @flags: lookup flags 1831 * @path: pointer to struct path to fill 1832 */ 1833 int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, 1834 const char *name, unsigned int flags, 1835 struct path *path) 1836 { 1837 struct nameidata nd; 1838 int err; 1839 nd.root.dentry = dentry; 1840 nd.root.mnt = mnt; 1841 BUG_ON(flags & LOOKUP_PARENT); 1842 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ 1843 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); 1844 if (!err) 1845 *path = nd.path; 1846 return err; 1847 } 1848 1849 static struct dentry *__lookup_hash(struct qstr *name, 1850 struct dentry *base, struct nameidata *nd) 1851 { 1852 struct inode *inode = base->d_inode; 1853 struct dentry *dentry; 1854 int err; 1855 1856 err = inode_permission(inode, MAY_EXEC); 1857 if (err) 1858 return ERR_PTR(err); 1859 1860 /* 1861 * Don't bother with __d_lookup: callers are for creat as 1862 * well as unlink, so a lot of the time it would cost 1863 * a double lookup. 1864 */ 1865 dentry = d_lookup(base, name); 1866 1867 if (dentry && d_need_lookup(dentry)) { 1868 /* 1869 * __lookup_hash is called with the parent dir's i_mutex already 1870 * held, so we are good to go here. 1871 */ 1872 dentry = d_inode_lookup(base, dentry, nd); 1873 if (IS_ERR(dentry)) 1874 return dentry; 1875 } 1876 1877 if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1878 int status = d_revalidate(dentry, nd); 1879 if (unlikely(status <= 0)) { 1880 /* 1881 * The dentry failed validation. 1882 * If d_revalidate returned 0 attempt to invalidate 1883 * the dentry otherwise d_revalidate is asking us 1884 * to return a fail status. 1885 */ 1886 if (status < 0) { 1887 dput(dentry); 1888 return ERR_PTR(status); 1889 } else if (!d_invalidate(dentry)) { 1890 dput(dentry); 1891 dentry = NULL; 1892 } 1893 } 1894 } 1895 1896 if (!dentry) 1897 dentry = d_alloc_and_lookup(base, name, nd); 1898 1899 return dentry; 1900 } 1901 1902 /* 1903 * Restricted form of lookup. Doesn't follow links, single-component only, 1904 * needs parent already locked. Doesn't follow mounts. 1905 * SMP-safe. 1906 */ 1907 static struct dentry *lookup_hash(struct nameidata *nd) 1908 { 1909 return __lookup_hash(&nd->last, nd->path.dentry, nd); 1910 } 1911 1912 /** 1913 * lookup_one_len - filesystem helper to lookup single pathname component 1914 * @name: pathname component to lookup 1915 * @base: base directory to lookup from 1916 * @len: maximum length @len should be interpreted to 1917 * 1918 * Note that this routine is purely a helper for filesystem usage and should 1919 * not be called by generic code. Also note that by using this function the 1920 * nameidata argument is passed to the filesystem methods and a filesystem 1921 * using this helper needs to be prepared for that. 1922 */ 1923 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) 1924 { 1925 struct qstr this; 1926 unsigned int c; 1927 1928 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); 1929 1930 this.name = name; 1931 this.len = len; 1932 this.hash = full_name_hash(name, len); 1933 if (!len) 1934 return ERR_PTR(-EACCES); 1935 1936 while (len--) { 1937 c = *(const unsigned char *)name++; 1938 if (c == '/' || c == '\0') 1939 return ERR_PTR(-EACCES); 1940 } 1941 /* 1942 * See if the low-level filesystem might want 1943 * to use its own hash.. 1944 */ 1945 if (base->d_flags & DCACHE_OP_HASH) { 1946 int err = base->d_op->d_hash(base, base->d_inode, &this); 1947 if (err < 0) 1948 return ERR_PTR(err); 1949 } 1950 1951 return __lookup_hash(&this, base, NULL); 1952 } 1953 1954 int user_path_at_empty(int dfd, const char __user *name, unsigned flags, 1955 struct path *path, int *empty) 1956 { 1957 struct nameidata nd; 1958 char *tmp = getname_flags(name, flags, empty); 1959 int err = PTR_ERR(tmp); 1960 if (!IS_ERR(tmp)) { 1961 1962 BUG_ON(flags & LOOKUP_PARENT); 1963 1964 err = do_path_lookup(dfd, tmp, flags, &nd); 1965 putname(tmp); 1966 if (!err) 1967 *path = nd.path; 1968 } 1969 return err; 1970 } 1971 1972 int user_path_at(int dfd, const char __user *name, unsigned flags, 1973 struct path *path) 1974 { 1975 return user_path_at_empty(dfd, name, flags, path, NULL); 1976 } 1977 1978 static int user_path_parent(int dfd, const char __user *path, 1979 struct nameidata *nd, char **name) 1980 { 1981 char *s = getname(path); 1982 int error; 1983 1984 if (IS_ERR(s)) 1985 return PTR_ERR(s); 1986 1987 error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd); 1988 if (error) 1989 putname(s); 1990 else 1991 *name = s; 1992 1993 return error; 1994 } 1995 1996 /* 1997 * It's inline, so penalty for filesystems that don't use sticky bit is 1998 * minimal. 1999 */ 2000 static inline int check_sticky(struct inode *dir, struct inode *inode) 2001 { 2002 uid_t fsuid = current_fsuid(); 2003 2004 if (!(dir->i_mode & S_ISVTX)) 2005 return 0; 2006 if (current_user_ns() != inode_userns(inode)) 2007 goto other_userns; 2008 if (inode->i_uid == fsuid) 2009 return 0; 2010 if (dir->i_uid == fsuid) 2011 return 0; 2012 2013 other_userns: 2014 return !ns_capable(inode_userns(inode), CAP_FOWNER); 2015 } 2016 2017 /* 2018 * Check whether we can remove a link victim from directory dir, check 2019 * whether the type of victim is right. 2020 * 1. We can't do it if dir is read-only (done in permission()) 2021 * 2. We should have write and exec permissions on dir 2022 * 3. We can't remove anything from append-only dir 2023 * 4. We can't do anything with immutable dir (done in permission()) 2024 * 5. If the sticky bit on dir is set we should either 2025 * a. be owner of dir, or 2026 * b. be owner of victim, or 2027 * c. have CAP_FOWNER capability 2028 * 6. If the victim is append-only or immutable we can't do antyhing with 2029 * links pointing to it. 2030 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. 2031 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. 2032 * 9. We can't remove a root or mountpoint. 2033 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by 2034 * nfs_async_unlink(). 2035 */ 2036 static int may_delete(struct inode *dir,struct dentry *victim,int isdir) 2037 { 2038 int error; 2039 2040 if (!victim->d_inode) 2041 return -ENOENT; 2042 2043 BUG_ON(victim->d_parent->d_inode != dir); 2044 audit_inode_child(victim, dir); 2045 2046 error = inode_permission(dir, MAY_WRITE | MAY_EXEC); 2047 if (error) 2048 return error; 2049 if (IS_APPEND(dir)) 2050 return -EPERM; 2051 if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| 2052 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 2053 return -EPERM; 2054 if (isdir) { 2055 if (!S_ISDIR(victim->d_inode->i_mode)) 2056 return -ENOTDIR; 2057 if (IS_ROOT(victim)) 2058 return -EBUSY; 2059 } else if (S_ISDIR(victim->d_inode->i_mode)) 2060 return -EISDIR; 2061 if (IS_DEADDIR(dir)) 2062 return -ENOENT; 2063 if (victim->d_flags & DCACHE_NFSFS_RENAMED) 2064 return -EBUSY; 2065 return 0; 2066 } 2067 2068 /* Check whether we can create an object with dentry child in directory 2069 * dir. 2070 * 1. We can't do it if child already exists (open has special treatment for 2071 * this case, but since we are inlined it's OK) 2072 * 2. We can't do it if dir is read-only (done in permission()) 2073 * 3. We should have write and exec permissions on dir 2074 * 4. We can't do it if dir is immutable (done in permission()) 2075 */ 2076 static inline int may_create(struct inode *dir, struct dentry *child) 2077 { 2078 if (child->d_inode) 2079 return -EEXIST; 2080 if (IS_DEADDIR(dir)) 2081 return -ENOENT; 2082 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 2083 } 2084 2085 /* 2086 * p1 and p2 should be directories on the same fs. 2087 */ 2088 struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) 2089 { 2090 struct dentry *p; 2091 2092 if (p1 == p2) { 2093 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2094 return NULL; 2095 } 2096 2097 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); 2098 2099 p = d_ancestor(p2, p1); 2100 if (p) { 2101 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); 2102 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); 2103 return p; 2104 } 2105 2106 p = d_ancestor(p1, p2); 2107 if (p) { 2108 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2109 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2110 return p; 2111 } 2112 2113 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2114 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2115 return NULL; 2116 } 2117 2118 void unlock_rename(struct dentry *p1, struct dentry *p2) 2119 { 2120 mutex_unlock(&p1->d_inode->i_mutex); 2121 if (p1 != p2) { 2122 mutex_unlock(&p2->d_inode->i_mutex); 2123 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); 2124 } 2125 } 2126 2127 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2128 struct nameidata *nd) 2129 { 2130 int error = may_create(dir, dentry); 2131 2132 if (error) 2133 return error; 2134 2135 if (!dir->i_op->create) 2136 return -EACCES; /* shouldn't it be ENOSYS? */ 2137 mode &= S_IALLUGO; 2138 mode |= S_IFREG; 2139 error = security_inode_create(dir, dentry, mode); 2140 if (error) 2141 return error; 2142 error = dir->i_op->create(dir, dentry, mode, nd); 2143 if (!error) 2144 fsnotify_create(dir, dentry); 2145 return error; 2146 } 2147 2148 static int may_open(struct path *path, int acc_mode, int flag) 2149 { 2150 struct dentry *dentry = path->dentry; 2151 struct inode *inode = dentry->d_inode; 2152 int error; 2153 2154 /* O_PATH? */ 2155 if (!acc_mode) 2156 return 0; 2157 2158 if (!inode) 2159 return -ENOENT; 2160 2161 switch (inode->i_mode & S_IFMT) { 2162 case S_IFLNK: 2163 return -ELOOP; 2164 case S_IFDIR: 2165 if (acc_mode & MAY_WRITE) 2166 return -EISDIR; 2167 break; 2168 case S_IFBLK: 2169 case S_IFCHR: 2170 if (path->mnt->mnt_flags & MNT_NODEV) 2171 return -EACCES; 2172 /*FALLTHRU*/ 2173 case S_IFIFO: 2174 case S_IFSOCK: 2175 flag &= ~O_TRUNC; 2176 break; 2177 } 2178 2179 error = inode_permission(inode, acc_mode); 2180 if (error) 2181 return error; 2182 2183 /* 2184 * An append-only file must be opened in append mode for writing. 2185 */ 2186 if (IS_APPEND(inode)) { 2187 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) 2188 return -EPERM; 2189 if (flag & O_TRUNC) 2190 return -EPERM; 2191 } 2192 2193 /* O_NOATIME can only be set by the owner or superuser */ 2194 if (flag & O_NOATIME && !inode_owner_or_capable(inode)) 2195 return -EPERM; 2196 2197 return 0; 2198 } 2199 2200 static int handle_truncate(struct file *filp) 2201 { 2202 struct path *path = &filp->f_path; 2203 struct inode *inode = path->dentry->d_inode; 2204 int error = get_write_access(inode); 2205 if (error) 2206 return error; 2207 /* 2208 * Refuse to truncate files with mandatory locks held on them. 2209 */ 2210 error = locks_verify_locked(inode); 2211 if (!error) 2212 error = security_path_truncate(path); 2213 if (!error) { 2214 error = do_truncate(path->dentry, 0, 2215 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, 2216 filp); 2217 } 2218 put_write_access(inode); 2219 return error; 2220 } 2221 2222 static inline int open_to_namei_flags(int flag) 2223 { 2224 if ((flag & O_ACCMODE) == 3) 2225 flag--; 2226 return flag; 2227 } 2228 2229 /* 2230 * Handle the last step of open() 2231 */ 2232 static struct file *do_last(struct nameidata *nd, struct path *path, 2233 const struct open_flags *op, const char *pathname) 2234 { 2235 struct dentry *dir = nd->path.dentry; 2236 struct dentry *dentry; 2237 int open_flag = op->open_flag; 2238 int will_truncate = open_flag & O_TRUNC; 2239 int want_write = 0; 2240 int acc_mode = op->acc_mode; 2241 struct file *filp; 2242 int error; 2243 2244 nd->flags &= ~LOOKUP_PARENT; 2245 nd->flags |= op->intent; 2246 2247 switch (nd->last_type) { 2248 case LAST_DOTDOT: 2249 case LAST_DOT: 2250 error = handle_dots(nd, nd->last_type); 2251 if (error) 2252 return ERR_PTR(error); 2253 /* fallthrough */ 2254 case LAST_ROOT: 2255 error = complete_walk(nd); 2256 if (error) 2257 return ERR_PTR(error); 2258 audit_inode(pathname, nd->path.dentry); 2259 if (open_flag & O_CREAT) { 2260 error = -EISDIR; 2261 goto exit; 2262 } 2263 goto ok; 2264 case LAST_BIND: 2265 error = complete_walk(nd); 2266 if (error) 2267 return ERR_PTR(error); 2268 audit_inode(pathname, dir); 2269 goto ok; 2270 } 2271 2272 if (!(open_flag & O_CREAT)) { 2273 int symlink_ok = 0; 2274 if (nd->last.name[nd->last.len]) 2275 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 2276 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) 2277 symlink_ok = 1; 2278 /* we _can_ be in RCU mode here */ 2279 error = walk_component(nd, path, &nd->last, LAST_NORM, 2280 !symlink_ok); 2281 if (error < 0) 2282 return ERR_PTR(error); 2283 if (error) /* symlink */ 2284 return NULL; 2285 /* sayonara */ 2286 error = complete_walk(nd); 2287 if (error) 2288 return ERR_PTR(error); 2289 2290 error = -ENOTDIR; 2291 if (nd->flags & LOOKUP_DIRECTORY) { 2292 if (!nd->inode->i_op->lookup) 2293 goto exit; 2294 } 2295 audit_inode(pathname, nd->path.dentry); 2296 goto ok; 2297 } 2298 2299 /* create side of things */ 2300 /* 2301 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been 2302 * cleared when we got to the last component we are about to look up 2303 */ 2304 error = complete_walk(nd); 2305 if (error) 2306 return ERR_PTR(error); 2307 2308 audit_inode(pathname, dir); 2309 error = -EISDIR; 2310 /* trailing slashes? */ 2311 if (nd->last.name[nd->last.len]) 2312 goto exit; 2313 2314 mutex_lock(&dir->d_inode->i_mutex); 2315 2316 dentry = lookup_hash(nd); 2317 error = PTR_ERR(dentry); 2318 if (IS_ERR(dentry)) { 2319 mutex_unlock(&dir->d_inode->i_mutex); 2320 goto exit; 2321 } 2322 2323 path->dentry = dentry; 2324 path->mnt = nd->path.mnt; 2325 2326 /* Negative dentry, just create the file */ 2327 if (!dentry->d_inode) { 2328 umode_t mode = op->mode; 2329 if (!IS_POSIXACL(dir->d_inode)) 2330 mode &= ~current_umask(); 2331 /* 2332 * This write is needed to ensure that a 2333 * rw->ro transition does not occur between 2334 * the time when the file is created and when 2335 * a permanent write count is taken through 2336 * the 'struct file' in nameidata_to_filp(). 2337 */ 2338 error = mnt_want_write(nd->path.mnt); 2339 if (error) 2340 goto exit_mutex_unlock; 2341 want_write = 1; 2342 /* Don't check for write permission, don't truncate */ 2343 open_flag &= ~O_TRUNC; 2344 will_truncate = 0; 2345 acc_mode = MAY_OPEN; 2346 error = security_path_mknod(&nd->path, dentry, mode, 0); 2347 if (error) 2348 goto exit_mutex_unlock; 2349 error = vfs_create(dir->d_inode, dentry, mode, nd); 2350 if (error) 2351 goto exit_mutex_unlock; 2352 mutex_unlock(&dir->d_inode->i_mutex); 2353 dput(nd->path.dentry); 2354 nd->path.dentry = dentry; 2355 goto common; 2356 } 2357 2358 /* 2359 * It already exists. 2360 */ 2361 mutex_unlock(&dir->d_inode->i_mutex); 2362 audit_inode(pathname, path->dentry); 2363 2364 error = -EEXIST; 2365 if (open_flag & O_EXCL) 2366 goto exit_dput; 2367 2368 error = follow_managed(path, nd->flags); 2369 if (error < 0) 2370 goto exit_dput; 2371 2372 if (error) 2373 nd->flags |= LOOKUP_JUMPED; 2374 2375 error = -ENOENT; 2376 if (!path->dentry->d_inode) 2377 goto exit_dput; 2378 2379 if (path->dentry->d_inode->i_op->follow_link) 2380 return NULL; 2381 2382 path_to_nameidata(path, nd); 2383 nd->inode = path->dentry->d_inode; 2384 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ 2385 error = complete_walk(nd); 2386 if (error) 2387 return ERR_PTR(error); 2388 error = -EISDIR; 2389 if (S_ISDIR(nd->inode->i_mode)) 2390 goto exit; 2391 ok: 2392 if (!S_ISREG(nd->inode->i_mode)) 2393 will_truncate = 0; 2394 2395 if (will_truncate) { 2396 error = mnt_want_write(nd->path.mnt); 2397 if (error) 2398 goto exit; 2399 want_write = 1; 2400 } 2401 common: 2402 error = may_open(&nd->path, acc_mode, open_flag); 2403 if (error) 2404 goto exit; 2405 filp = nameidata_to_filp(nd); 2406 if (!IS_ERR(filp)) { 2407 error = ima_file_check(filp, op->acc_mode); 2408 if (error) { 2409 fput(filp); 2410 filp = ERR_PTR(error); 2411 } 2412 } 2413 if (!IS_ERR(filp)) { 2414 if (will_truncate) { 2415 error = handle_truncate(filp); 2416 if (error) { 2417 fput(filp); 2418 filp = ERR_PTR(error); 2419 } 2420 } 2421 } 2422 out: 2423 if (want_write) 2424 mnt_drop_write(nd->path.mnt); 2425 path_put(&nd->path); 2426 return filp; 2427 2428 exit_mutex_unlock: 2429 mutex_unlock(&dir->d_inode->i_mutex); 2430 exit_dput: 2431 path_put_conditional(path, nd); 2432 exit: 2433 filp = ERR_PTR(error); 2434 goto out; 2435 } 2436 2437 static struct file *path_openat(int dfd, const char *pathname, 2438 struct nameidata *nd, const struct open_flags *op, int flags) 2439 { 2440 struct file *base = NULL; 2441 struct file *filp; 2442 struct path path; 2443 int error; 2444 2445 filp = get_empty_filp(); 2446 if (!filp) 2447 return ERR_PTR(-ENFILE); 2448 2449 filp->f_flags = op->open_flag; 2450 nd->intent.open.file = filp; 2451 nd->intent.open.flags = open_to_namei_flags(op->open_flag); 2452 nd->intent.open.create_mode = op->mode; 2453 2454 error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base); 2455 if (unlikely(error)) 2456 goto out_filp; 2457 2458 current->total_link_count = 0; 2459 error = link_path_walk(pathname, nd); 2460 if (unlikely(error)) 2461 goto out_filp; 2462 2463 filp = do_last(nd, &path, op, pathname); 2464 while (unlikely(!filp)) { /* trailing symlink */ 2465 struct path link = path; 2466 void *cookie; 2467 if (!(nd->flags & LOOKUP_FOLLOW)) { 2468 path_put_conditional(&path, nd); 2469 path_put(&nd->path); 2470 filp = ERR_PTR(-ELOOP); 2471 break; 2472 } 2473 nd->flags |= LOOKUP_PARENT; 2474 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); 2475 error = follow_link(&link, nd, &cookie); 2476 if (unlikely(error)) 2477 filp = ERR_PTR(error); 2478 else 2479 filp = do_last(nd, &path, op, pathname); 2480 put_link(nd, &link, cookie); 2481 } 2482 out: 2483 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) 2484 path_put(&nd->root); 2485 if (base) 2486 fput(base); 2487 release_open_intent(nd); 2488 return filp; 2489 2490 out_filp: 2491 filp = ERR_PTR(error); 2492 goto out; 2493 } 2494 2495 struct file *do_filp_open(int dfd, const char *pathname, 2496 const struct open_flags *op, int flags) 2497 { 2498 struct nameidata nd; 2499 struct file *filp; 2500 2501 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); 2502 if (unlikely(filp == ERR_PTR(-ECHILD))) 2503 filp = path_openat(dfd, pathname, &nd, op, flags); 2504 if (unlikely(filp == ERR_PTR(-ESTALE))) 2505 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL); 2506 return filp; 2507 } 2508 2509 struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, 2510 const char *name, const struct open_flags *op, int flags) 2511 { 2512 struct nameidata nd; 2513 struct file *file; 2514 2515 nd.root.mnt = mnt; 2516 nd.root.dentry = dentry; 2517 2518 flags |= LOOKUP_ROOT; 2519 2520 if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN) 2521 return ERR_PTR(-ELOOP); 2522 2523 file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU); 2524 if (unlikely(file == ERR_PTR(-ECHILD))) 2525 file = path_openat(-1, name, &nd, op, flags); 2526 if (unlikely(file == ERR_PTR(-ESTALE))) 2527 file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL); 2528 return file; 2529 } 2530 2531 struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) 2532 { 2533 struct dentry *dentry = ERR_PTR(-EEXIST); 2534 struct nameidata nd; 2535 int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); 2536 if (error) 2537 return ERR_PTR(error); 2538 2539 /* 2540 * Yucky last component or no last component at all? 2541 * (foo/., foo/.., /////) 2542 */ 2543 if (nd.last_type != LAST_NORM) 2544 goto out; 2545 nd.flags &= ~LOOKUP_PARENT; 2546 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; 2547 nd.intent.open.flags = O_EXCL; 2548 2549 /* 2550 * Do the final lookup. 2551 */ 2552 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2553 dentry = lookup_hash(&nd); 2554 if (IS_ERR(dentry)) 2555 goto fail; 2556 2557 if (dentry->d_inode) 2558 goto eexist; 2559 /* 2560 * Special case - lookup gave negative, but... we had foo/bar/ 2561 * From the vfs_mknod() POV we just have a negative dentry - 2562 * all is fine. Let's be bastards - you had / on the end, you've 2563 * been asking for (non-existent) directory. -ENOENT for you. 2564 */ 2565 if (unlikely(!is_dir && nd.last.name[nd.last.len])) { 2566 dput(dentry); 2567 dentry = ERR_PTR(-ENOENT); 2568 goto fail; 2569 } 2570 *path = nd.path; 2571 return dentry; 2572 eexist: 2573 dput(dentry); 2574 dentry = ERR_PTR(-EEXIST); 2575 fail: 2576 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2577 out: 2578 path_put(&nd.path); 2579 return dentry; 2580 } 2581 EXPORT_SYMBOL(kern_path_create); 2582 2583 struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) 2584 { 2585 char *tmp = getname(pathname); 2586 struct dentry *res; 2587 if (IS_ERR(tmp)) 2588 return ERR_CAST(tmp); 2589 res = kern_path_create(dfd, tmp, path, is_dir); 2590 putname(tmp); 2591 return res; 2592 } 2593 EXPORT_SYMBOL(user_path_create); 2594 2595 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 2596 { 2597 int error = may_create(dir, dentry); 2598 2599 if (error) 2600 return error; 2601 2602 if ((S_ISCHR(mode) || S_ISBLK(mode)) && 2603 !ns_capable(inode_userns(dir), CAP_MKNOD)) 2604 return -EPERM; 2605 2606 if (!dir->i_op->mknod) 2607 return -EPERM; 2608 2609 error = devcgroup_inode_mknod(mode, dev); 2610 if (error) 2611 return error; 2612 2613 error = security_inode_mknod(dir, dentry, mode, dev); 2614 if (error) 2615 return error; 2616 2617 error = dir->i_op->mknod(dir, dentry, mode, dev); 2618 if (!error) 2619 fsnotify_create(dir, dentry); 2620 return error; 2621 } 2622 2623 static int may_mknod(umode_t mode) 2624 { 2625 switch (mode & S_IFMT) { 2626 case S_IFREG: 2627 case S_IFCHR: 2628 case S_IFBLK: 2629 case S_IFIFO: 2630 case S_IFSOCK: 2631 case 0: /* zero mode translates to S_IFREG */ 2632 return 0; 2633 case S_IFDIR: 2634 return -EPERM; 2635 default: 2636 return -EINVAL; 2637 } 2638 } 2639 2640 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, 2641 unsigned, dev) 2642 { 2643 struct dentry *dentry; 2644 struct path path; 2645 int error; 2646 2647 if (S_ISDIR(mode)) 2648 return -EPERM; 2649 2650 dentry = user_path_create(dfd, filename, &path, 0); 2651 if (IS_ERR(dentry)) 2652 return PTR_ERR(dentry); 2653 2654 if (!IS_POSIXACL(path.dentry->d_inode)) 2655 mode &= ~current_umask(); 2656 error = may_mknod(mode); 2657 if (error) 2658 goto out_dput; 2659 error = mnt_want_write(path.mnt); 2660 if (error) 2661 goto out_dput; 2662 error = security_path_mknod(&path, dentry, mode, dev); 2663 if (error) 2664 goto out_drop_write; 2665 switch (mode & S_IFMT) { 2666 case 0: case S_IFREG: 2667 error = vfs_create(path.dentry->d_inode,dentry,mode,NULL); 2668 break; 2669 case S_IFCHR: case S_IFBLK: 2670 error = vfs_mknod(path.dentry->d_inode,dentry,mode, 2671 new_decode_dev(dev)); 2672 break; 2673 case S_IFIFO: case S_IFSOCK: 2674 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); 2675 break; 2676 } 2677 out_drop_write: 2678 mnt_drop_write(path.mnt); 2679 out_dput: 2680 dput(dentry); 2681 mutex_unlock(&path.dentry->d_inode->i_mutex); 2682 path_put(&path); 2683 2684 return error; 2685 } 2686 2687 SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) 2688 { 2689 return sys_mknodat(AT_FDCWD, filename, mode, dev); 2690 } 2691 2692 int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 2693 { 2694 int error = may_create(dir, dentry); 2695 unsigned max_links = dir->i_sb->s_max_links; 2696 2697 if (error) 2698 return error; 2699 2700 if (!dir->i_op->mkdir) 2701 return -EPERM; 2702 2703 mode &= (S_IRWXUGO|S_ISVTX); 2704 error = security_inode_mkdir(dir, dentry, mode); 2705 if (error) 2706 return error; 2707 2708 if (max_links && dir->i_nlink >= max_links) 2709 return -EMLINK; 2710 2711 error = dir->i_op->mkdir(dir, dentry, mode); 2712 if (!error) 2713 fsnotify_mkdir(dir, dentry); 2714 return error; 2715 } 2716 2717 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) 2718 { 2719 struct dentry *dentry; 2720 struct path path; 2721 int error; 2722 2723 dentry = user_path_create(dfd, pathname, &path, 1); 2724 if (IS_ERR(dentry)) 2725 return PTR_ERR(dentry); 2726 2727 if (!IS_POSIXACL(path.dentry->d_inode)) 2728 mode &= ~current_umask(); 2729 error = mnt_want_write(path.mnt); 2730 if (error) 2731 goto out_dput; 2732 error = security_path_mkdir(&path, dentry, mode); 2733 if (error) 2734 goto out_drop_write; 2735 error = vfs_mkdir(path.dentry->d_inode, dentry, mode); 2736 out_drop_write: 2737 mnt_drop_write(path.mnt); 2738 out_dput: 2739 dput(dentry); 2740 mutex_unlock(&path.dentry->d_inode->i_mutex); 2741 path_put(&path); 2742 return error; 2743 } 2744 2745 SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) 2746 { 2747 return sys_mkdirat(AT_FDCWD, pathname, mode); 2748 } 2749 2750 /* 2751 * The dentry_unhash() helper will try to drop the dentry early: we 2752 * should have a usage count of 2 if we're the only user of this 2753 * dentry, and if that is true (possibly after pruning the dcache), 2754 * then we drop the dentry now. 2755 * 2756 * A low-level filesystem can, if it choses, legally 2757 * do a 2758 * 2759 * if (!d_unhashed(dentry)) 2760 * return -EBUSY; 2761 * 2762 * if it cannot handle the case of removing a directory 2763 * that is still in use by something else.. 2764 */ 2765 void dentry_unhash(struct dentry *dentry) 2766 { 2767 shrink_dcache_parent(dentry); 2768 spin_lock(&dentry->d_lock); 2769 if (dentry->d_count == 1) 2770 __d_drop(dentry); 2771 spin_unlock(&dentry->d_lock); 2772 } 2773 2774 int vfs_rmdir(struct inode *dir, struct dentry *dentry) 2775 { 2776 int error = may_delete(dir, dentry, 1); 2777 2778 if (error) 2779 return error; 2780 2781 if (!dir->i_op->rmdir) 2782 return -EPERM; 2783 2784 dget(dentry); 2785 mutex_lock(&dentry->d_inode->i_mutex); 2786 2787 error = -EBUSY; 2788 if (d_mountpoint(dentry)) 2789 goto out; 2790 2791 error = security_inode_rmdir(dir, dentry); 2792 if (error) 2793 goto out; 2794 2795 shrink_dcache_parent(dentry); 2796 error = dir->i_op->rmdir(dir, dentry); 2797 if (error) 2798 goto out; 2799 2800 dentry->d_inode->i_flags |= S_DEAD; 2801 dont_mount(dentry); 2802 2803 out: 2804 mutex_unlock(&dentry->d_inode->i_mutex); 2805 dput(dentry); 2806 if (!error) 2807 d_delete(dentry); 2808 return error; 2809 } 2810 2811 static long do_rmdir(int dfd, const char __user *pathname) 2812 { 2813 int error = 0; 2814 char * name; 2815 struct dentry *dentry; 2816 struct nameidata nd; 2817 2818 error = user_path_parent(dfd, pathname, &nd, &name); 2819 if (error) 2820 return error; 2821 2822 switch(nd.last_type) { 2823 case LAST_DOTDOT: 2824 error = -ENOTEMPTY; 2825 goto exit1; 2826 case LAST_DOT: 2827 error = -EINVAL; 2828 goto exit1; 2829 case LAST_ROOT: 2830 error = -EBUSY; 2831 goto exit1; 2832 } 2833 2834 nd.flags &= ~LOOKUP_PARENT; 2835 2836 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2837 dentry = lookup_hash(&nd); 2838 error = PTR_ERR(dentry); 2839 if (IS_ERR(dentry)) 2840 goto exit2; 2841 if (!dentry->d_inode) { 2842 error = -ENOENT; 2843 goto exit3; 2844 } 2845 error = mnt_want_write(nd.path.mnt); 2846 if (error) 2847 goto exit3; 2848 error = security_path_rmdir(&nd.path, dentry); 2849 if (error) 2850 goto exit4; 2851 error = vfs_rmdir(nd.path.dentry->d_inode, dentry); 2852 exit4: 2853 mnt_drop_write(nd.path.mnt); 2854 exit3: 2855 dput(dentry); 2856 exit2: 2857 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2858 exit1: 2859 path_put(&nd.path); 2860 putname(name); 2861 return error; 2862 } 2863 2864 SYSCALL_DEFINE1(rmdir, const char __user *, pathname) 2865 { 2866 return do_rmdir(AT_FDCWD, pathname); 2867 } 2868 2869 int vfs_unlink(struct inode *dir, struct dentry *dentry) 2870 { 2871 int error = may_delete(dir, dentry, 0); 2872 2873 if (error) 2874 return error; 2875 2876 if (!dir->i_op->unlink) 2877 return -EPERM; 2878 2879 mutex_lock(&dentry->d_inode->i_mutex); 2880 if (d_mountpoint(dentry)) 2881 error = -EBUSY; 2882 else { 2883 error = security_inode_unlink(dir, dentry); 2884 if (!error) { 2885 error = dir->i_op->unlink(dir, dentry); 2886 if (!error) 2887 dont_mount(dentry); 2888 } 2889 } 2890 mutex_unlock(&dentry->d_inode->i_mutex); 2891 2892 /* We don't d_delete() NFS sillyrenamed files--they still exist. */ 2893 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { 2894 fsnotify_link_count(dentry->d_inode); 2895 d_delete(dentry); 2896 } 2897 2898 return error; 2899 } 2900 2901 /* 2902 * Make sure that the actual truncation of the file will occur outside its 2903 * directory's i_mutex. Truncate can take a long time if there is a lot of 2904 * writeout happening, and we don't want to prevent access to the directory 2905 * while waiting on the I/O. 2906 */ 2907 static long do_unlinkat(int dfd, const char __user *pathname) 2908 { 2909 int error; 2910 char *name; 2911 struct dentry *dentry; 2912 struct nameidata nd; 2913 struct inode *inode = NULL; 2914 2915 error = user_path_parent(dfd, pathname, &nd, &name); 2916 if (error) 2917 return error; 2918 2919 error = -EISDIR; 2920 if (nd.last_type != LAST_NORM) 2921 goto exit1; 2922 2923 nd.flags &= ~LOOKUP_PARENT; 2924 2925 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2926 dentry = lookup_hash(&nd); 2927 error = PTR_ERR(dentry); 2928 if (!IS_ERR(dentry)) { 2929 /* Why not before? Because we want correct error value */ 2930 if (nd.last.name[nd.last.len]) 2931 goto slashes; 2932 inode = dentry->d_inode; 2933 if (!inode) 2934 goto slashes; 2935 ihold(inode); 2936 error = mnt_want_write(nd.path.mnt); 2937 if (error) 2938 goto exit2; 2939 error = security_path_unlink(&nd.path, dentry); 2940 if (error) 2941 goto exit3; 2942 error = vfs_unlink(nd.path.dentry->d_inode, dentry); 2943 exit3: 2944 mnt_drop_write(nd.path.mnt); 2945 exit2: 2946 dput(dentry); 2947 } 2948 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2949 if (inode) 2950 iput(inode); /* truncate the inode here */ 2951 exit1: 2952 path_put(&nd.path); 2953 putname(name); 2954 return error; 2955 2956 slashes: 2957 error = !dentry->d_inode ? -ENOENT : 2958 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; 2959 goto exit2; 2960 } 2961 2962 SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) 2963 { 2964 if ((flag & ~AT_REMOVEDIR) != 0) 2965 return -EINVAL; 2966 2967 if (flag & AT_REMOVEDIR) 2968 return do_rmdir(dfd, pathname); 2969 2970 return do_unlinkat(dfd, pathname); 2971 } 2972 2973 SYSCALL_DEFINE1(unlink, const char __user *, pathname) 2974 { 2975 return do_unlinkat(AT_FDCWD, pathname); 2976 } 2977 2978 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) 2979 { 2980 int error = may_create(dir, dentry); 2981 2982 if (error) 2983 return error; 2984 2985 if (!dir->i_op->symlink) 2986 return -EPERM; 2987 2988 error = security_inode_symlink(dir, dentry, oldname); 2989 if (error) 2990 return error; 2991 2992 error = dir->i_op->symlink(dir, dentry, oldname); 2993 if (!error) 2994 fsnotify_create(dir, dentry); 2995 return error; 2996 } 2997 2998 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, 2999 int, newdfd, const char __user *, newname) 3000 { 3001 int error; 3002 char *from; 3003 struct dentry *dentry; 3004 struct path path; 3005 3006 from = getname(oldname); 3007 if (IS_ERR(from)) 3008 return PTR_ERR(from); 3009 3010 dentry = user_path_create(newdfd, newname, &path, 0); 3011 error = PTR_ERR(dentry); 3012 if (IS_ERR(dentry)) 3013 goto out_putname; 3014 3015 error = mnt_want_write(path.mnt); 3016 if (error) 3017 goto out_dput; 3018 error = security_path_symlink(&path, dentry, from); 3019 if (error) 3020 goto out_drop_write; 3021 error = vfs_symlink(path.dentry->d_inode, dentry, from); 3022 out_drop_write: 3023 mnt_drop_write(path.mnt); 3024 out_dput: 3025 dput(dentry); 3026 mutex_unlock(&path.dentry->d_inode->i_mutex); 3027 path_put(&path); 3028 out_putname: 3029 putname(from); 3030 return error; 3031 } 3032 3033 SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) 3034 { 3035 return sys_symlinkat(oldname, AT_FDCWD, newname); 3036 } 3037 3038 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) 3039 { 3040 struct inode *inode = old_dentry->d_inode; 3041 unsigned max_links = dir->i_sb->s_max_links; 3042 int error; 3043 3044 if (!inode) 3045 return -ENOENT; 3046 3047 error = may_create(dir, new_dentry); 3048 if (error) 3049 return error; 3050 3051 if (dir->i_sb != inode->i_sb) 3052 return -EXDEV; 3053 3054 /* 3055 * A link to an append-only or immutable file cannot be created. 3056 */ 3057 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 3058 return -EPERM; 3059 if (!dir->i_op->link) 3060 return -EPERM; 3061 if (S_ISDIR(inode->i_mode)) 3062 return -EPERM; 3063 3064 error = security_inode_link(old_dentry, dir, new_dentry); 3065 if (error) 3066 return error; 3067 3068 mutex_lock(&inode->i_mutex); 3069 /* Make sure we don't allow creating hardlink to an unlinked file */ 3070 if (inode->i_nlink == 0) 3071 error = -ENOENT; 3072 else if (max_links && inode->i_nlink >= max_links) 3073 error = -EMLINK; 3074 else 3075 error = dir->i_op->link(old_dentry, dir, new_dentry); 3076 mutex_unlock(&inode->i_mutex); 3077 if (!error) 3078 fsnotify_link(dir, inode, new_dentry); 3079 return error; 3080 } 3081 3082 /* 3083 * Hardlinks are often used in delicate situations. We avoid 3084 * security-related surprises by not following symlinks on the 3085 * newname. --KAB 3086 * 3087 * We don't follow them on the oldname either to be compatible 3088 * with linux 2.0, and to avoid hard-linking to directories 3089 * and other special files. --ADM 3090 */ 3091 SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, 3092 int, newdfd, const char __user *, newname, int, flags) 3093 { 3094 struct dentry *new_dentry; 3095 struct path old_path, new_path; 3096 int how = 0; 3097 int error; 3098 3099 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3100 return -EINVAL; 3101 /* 3102 * To use null names we require CAP_DAC_READ_SEARCH 3103 * This ensures that not everyone will be able to create 3104 * handlink using the passed filedescriptor. 3105 */ 3106 if (flags & AT_EMPTY_PATH) { 3107 if (!capable(CAP_DAC_READ_SEARCH)) 3108 return -ENOENT; 3109 how = LOOKUP_EMPTY; 3110 } 3111 3112 if (flags & AT_SYMLINK_FOLLOW) 3113 how |= LOOKUP_FOLLOW; 3114 3115 error = user_path_at(olddfd, oldname, how, &old_path); 3116 if (error) 3117 return error; 3118 3119 new_dentry = user_path_create(newdfd, newname, &new_path, 0); 3120 error = PTR_ERR(new_dentry); 3121 if (IS_ERR(new_dentry)) 3122 goto out; 3123 3124 error = -EXDEV; 3125 if (old_path.mnt != new_path.mnt) 3126 goto out_dput; 3127 error = mnt_want_write(new_path.mnt); 3128 if (error) 3129 goto out_dput; 3130 error = security_path_link(old_path.dentry, &new_path, new_dentry); 3131 if (error) 3132 goto out_drop_write; 3133 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); 3134 out_drop_write: 3135 mnt_drop_write(new_path.mnt); 3136 out_dput: 3137 dput(new_dentry); 3138 mutex_unlock(&new_path.dentry->d_inode->i_mutex); 3139 path_put(&new_path); 3140 out: 3141 path_put(&old_path); 3142 3143 return error; 3144 } 3145 3146 SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) 3147 { 3148 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); 3149 } 3150 3151 /* 3152 * The worst of all namespace operations - renaming directory. "Perverted" 3153 * doesn't even start to describe it. Somebody in UCB had a heck of a trip... 3154 * Problems: 3155 * a) we can get into loop creation. Check is done in is_subdir(). 3156 * b) race potential - two innocent renames can create a loop together. 3157 * That's where 4.4 screws up. Current fix: serialization on 3158 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another 3159 * story. 3160 * c) we have to lock _three_ objects - parents and victim (if it exists). 3161 * And that - after we got ->i_mutex on parents (until then we don't know 3162 * whether the target exists). Solution: try to be smart with locking 3163 * order for inodes. We rely on the fact that tree topology may change 3164 * only under ->s_vfs_rename_mutex _and_ that parent of the object we 3165 * move will be locked. Thus we can rank directories by the tree 3166 * (ancestors first) and rank all non-directories after them. 3167 * That works since everybody except rename does "lock parent, lookup, 3168 * lock child" and rename is under ->s_vfs_rename_mutex. 3169 * HOWEVER, it relies on the assumption that any object with ->lookup() 3170 * has no more than 1 dentry. If "hybrid" objects will ever appear, 3171 * we'd better make sure that there's no link(2) for them. 3172 * d) conversion from fhandle to dentry may come in the wrong moment - when 3173 * we are removing the target. Solution: we will have to grab ->i_mutex 3174 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on 3175 * ->i_mutex on parents, which works but leads to some truly excessive 3176 * locking]. 3177 */ 3178 static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, 3179 struct inode *new_dir, struct dentry *new_dentry) 3180 { 3181 int error = 0; 3182 struct inode *target = new_dentry->d_inode; 3183 unsigned max_links = new_dir->i_sb->s_max_links; 3184 3185 /* 3186 * If we are going to change the parent - check write permissions, 3187 * we'll need to flip '..'. 3188 */ 3189 if (new_dir != old_dir) { 3190 error = inode_permission(old_dentry->d_inode, MAY_WRITE); 3191 if (error) 3192 return error; 3193 } 3194 3195 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); 3196 if (error) 3197 return error; 3198 3199 dget(new_dentry); 3200 if (target) 3201 mutex_lock(&target->i_mutex); 3202 3203 error = -EBUSY; 3204 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) 3205 goto out; 3206 3207 error = -EMLINK; 3208 if (max_links && !target && new_dir != old_dir && 3209 new_dir->i_nlink >= max_links) 3210 goto out; 3211 3212 if (target) 3213 shrink_dcache_parent(new_dentry); 3214 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 3215 if (error) 3216 goto out; 3217 3218 if (target) { 3219 target->i_flags |= S_DEAD; 3220 dont_mount(new_dentry); 3221 } 3222 out: 3223 if (target) 3224 mutex_unlock(&target->i_mutex); 3225 dput(new_dentry); 3226 if (!error) 3227 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3228 d_move(old_dentry,new_dentry); 3229 return error; 3230 } 3231 3232 static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, 3233 struct inode *new_dir, struct dentry *new_dentry) 3234 { 3235 struct inode *target = new_dentry->d_inode; 3236 int error; 3237 3238 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); 3239 if (error) 3240 return error; 3241 3242 dget(new_dentry); 3243 if (target) 3244 mutex_lock(&target->i_mutex); 3245 3246 error = -EBUSY; 3247 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) 3248 goto out; 3249 3250 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 3251 if (error) 3252 goto out; 3253 3254 if (target) 3255 dont_mount(new_dentry); 3256 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3257 d_move(old_dentry, new_dentry); 3258 out: 3259 if (target) 3260 mutex_unlock(&target->i_mutex); 3261 dput(new_dentry); 3262 return error; 3263 } 3264 3265 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, 3266 struct inode *new_dir, struct dentry *new_dentry) 3267 { 3268 int error; 3269 int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 3270 const unsigned char *old_name; 3271 3272 if (old_dentry->d_inode == new_dentry->d_inode) 3273 return 0; 3274 3275 error = may_delete(old_dir, old_dentry, is_dir); 3276 if (error) 3277 return error; 3278 3279 if (!new_dentry->d_inode) 3280 error = may_create(new_dir, new_dentry); 3281 else 3282 error = may_delete(new_dir, new_dentry, is_dir); 3283 if (error) 3284 return error; 3285 3286 if (!old_dir->i_op->rename) 3287 return -EPERM; 3288 3289 old_name = fsnotify_oldname_init(old_dentry->d_name.name); 3290 3291 if (is_dir) 3292 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); 3293 else 3294 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); 3295 if (!error) 3296 fsnotify_move(old_dir, new_dir, old_name, is_dir, 3297 new_dentry->d_inode, old_dentry); 3298 fsnotify_oldname_free(old_name); 3299 3300 return error; 3301 } 3302 3303 SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, 3304 int, newdfd, const char __user *, newname) 3305 { 3306 struct dentry *old_dir, *new_dir; 3307 struct dentry *old_dentry, *new_dentry; 3308 struct dentry *trap; 3309 struct nameidata oldnd, newnd; 3310 char *from; 3311 char *to; 3312 int error; 3313 3314 error = user_path_parent(olddfd, oldname, &oldnd, &from); 3315 if (error) 3316 goto exit; 3317 3318 error = user_path_parent(newdfd, newname, &newnd, &to); 3319 if (error) 3320 goto exit1; 3321 3322 error = -EXDEV; 3323 if (oldnd.path.mnt != newnd.path.mnt) 3324 goto exit2; 3325 3326 old_dir = oldnd.path.dentry; 3327 error = -EBUSY; 3328 if (oldnd.last_type != LAST_NORM) 3329 goto exit2; 3330 3331 new_dir = newnd.path.dentry; 3332 if (newnd.last_type != LAST_NORM) 3333 goto exit2; 3334 3335 oldnd.flags &= ~LOOKUP_PARENT; 3336 newnd.flags &= ~LOOKUP_PARENT; 3337 newnd.flags |= LOOKUP_RENAME_TARGET; 3338 3339 trap = lock_rename(new_dir, old_dir); 3340 3341 old_dentry = lookup_hash(&oldnd); 3342 error = PTR_ERR(old_dentry); 3343 if (IS_ERR(old_dentry)) 3344 goto exit3; 3345 /* source must exist */ 3346 error = -ENOENT; 3347 if (!old_dentry->d_inode) 3348 goto exit4; 3349 /* unless the source is a directory trailing slashes give -ENOTDIR */ 3350 if (!S_ISDIR(old_dentry->d_inode->i_mode)) { 3351 error = -ENOTDIR; 3352 if (oldnd.last.name[oldnd.last.len]) 3353 goto exit4; 3354 if (newnd.last.name[newnd.last.len]) 3355 goto exit4; 3356 } 3357 /* source should not be ancestor of target */ 3358 error = -EINVAL; 3359 if (old_dentry == trap) 3360 goto exit4; 3361 new_dentry = lookup_hash(&newnd); 3362 error = PTR_ERR(new_dentry); 3363 if (IS_ERR(new_dentry)) 3364 goto exit4; 3365 /* target should not be an ancestor of source */ 3366 error = -ENOTEMPTY; 3367 if (new_dentry == trap) 3368 goto exit5; 3369 3370 error = mnt_want_write(oldnd.path.mnt); 3371 if (error) 3372 goto exit5; 3373 error = security_path_rename(&oldnd.path, old_dentry, 3374 &newnd.path, new_dentry); 3375 if (error) 3376 goto exit6; 3377 error = vfs_rename(old_dir->d_inode, old_dentry, 3378 new_dir->d_inode, new_dentry); 3379 exit6: 3380 mnt_drop_write(oldnd.path.mnt); 3381 exit5: 3382 dput(new_dentry); 3383 exit4: 3384 dput(old_dentry); 3385 exit3: 3386 unlock_rename(new_dir, old_dir); 3387 exit2: 3388 path_put(&newnd.path); 3389 putname(to); 3390 exit1: 3391 path_put(&oldnd.path); 3392 putname(from); 3393 exit: 3394 return error; 3395 } 3396 3397 SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) 3398 { 3399 return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); 3400 } 3401 3402 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) 3403 { 3404 int len; 3405 3406 len = PTR_ERR(link); 3407 if (IS_ERR(link)) 3408 goto out; 3409 3410 len = strlen(link); 3411 if (len > (unsigned) buflen) 3412 len = buflen; 3413 if (copy_to_user(buffer, link, len)) 3414 len = -EFAULT; 3415 out: 3416 return len; 3417 } 3418 3419 /* 3420 * A helper for ->readlink(). This should be used *ONLY* for symlinks that 3421 * have ->follow_link() touching nd only in nd_set_link(). Using (or not 3422 * using) it for any given inode is up to filesystem. 3423 */ 3424 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) 3425 { 3426 struct nameidata nd; 3427 void *cookie; 3428 int res; 3429 3430 nd.depth = 0; 3431 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); 3432 if (IS_ERR(cookie)) 3433 return PTR_ERR(cookie); 3434 3435 res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); 3436 if (dentry->d_inode->i_op->put_link) 3437 dentry->d_inode->i_op->put_link(dentry, &nd, cookie); 3438 return res; 3439 } 3440 3441 int vfs_follow_link(struct nameidata *nd, const char *link) 3442 { 3443 return __vfs_follow_link(nd, link); 3444 } 3445 3446 /* get the link contents into pagecache */ 3447 static char *page_getlink(struct dentry * dentry, struct page **ppage) 3448 { 3449 char *kaddr; 3450 struct page *page; 3451 struct address_space *mapping = dentry->d_inode->i_mapping; 3452 page = read_mapping_page(mapping, 0, NULL); 3453 if (IS_ERR(page)) 3454 return (char*)page; 3455 *ppage = page; 3456 kaddr = kmap(page); 3457 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); 3458 return kaddr; 3459 } 3460 3461 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) 3462 { 3463 struct page *page = NULL; 3464 char *s = page_getlink(dentry, &page); 3465 int res = vfs_readlink(dentry,buffer,buflen,s); 3466 if (page) { 3467 kunmap(page); 3468 page_cache_release(page); 3469 } 3470 return res; 3471 } 3472 3473 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) 3474 { 3475 struct page *page = NULL; 3476 nd_set_link(nd, page_getlink(dentry, &page)); 3477 return page; 3478 } 3479 3480 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 3481 { 3482 struct page *page = cookie; 3483 3484 if (page) { 3485 kunmap(page); 3486 page_cache_release(page); 3487 } 3488 } 3489 3490 /* 3491 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS 3492 */ 3493 int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) 3494 { 3495 struct address_space *mapping = inode->i_mapping; 3496 struct page *page; 3497 void *fsdata; 3498 int err; 3499 char *kaddr; 3500 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; 3501 if (nofs) 3502 flags |= AOP_FLAG_NOFS; 3503 3504 retry: 3505 err = pagecache_write_begin(NULL, mapping, 0, len-1, 3506 flags, &page, &fsdata); 3507 if (err) 3508 goto fail; 3509 3510 kaddr = kmap_atomic(page); 3511 memcpy(kaddr, symname, len-1); 3512 kunmap_atomic(kaddr); 3513 3514 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, 3515 page, fsdata); 3516 if (err < 0) 3517 goto fail; 3518 if (err < len-1) 3519 goto retry; 3520 3521 mark_inode_dirty(inode); 3522 return 0; 3523 fail: 3524 return err; 3525 } 3526 3527 int page_symlink(struct inode *inode, const char *symname, int len) 3528 { 3529 return __page_symlink(inode, symname, len, 3530 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); 3531 } 3532 3533 const struct inode_operations page_symlink_inode_operations = { 3534 .readlink = generic_readlink, 3535 .follow_link = page_follow_link_light, 3536 .put_link = page_put_link, 3537 }; 3538 3539 EXPORT_SYMBOL(user_path_at); 3540 EXPORT_SYMBOL(follow_down_one); 3541 EXPORT_SYMBOL(follow_down); 3542 EXPORT_SYMBOL(follow_up); 3543 EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ 3544 EXPORT_SYMBOL(getname); 3545 EXPORT_SYMBOL(lock_rename); 3546 EXPORT_SYMBOL(lookup_one_len); 3547 EXPORT_SYMBOL(page_follow_link_light); 3548 EXPORT_SYMBOL(page_put_link); 3549 EXPORT_SYMBOL(page_readlink); 3550 EXPORT_SYMBOL(__page_symlink); 3551 EXPORT_SYMBOL(page_symlink); 3552 EXPORT_SYMBOL(page_symlink_inode_operations); 3553 EXPORT_SYMBOL(kern_path); 3554 EXPORT_SYMBOL(vfs_path_lookup); 3555 EXPORT_SYMBOL(inode_permission); 3556 EXPORT_SYMBOL(unlock_rename); 3557 EXPORT_SYMBOL(vfs_create); 3558 EXPORT_SYMBOL(vfs_follow_link); 3559 EXPORT_SYMBOL(vfs_link); 3560 EXPORT_SYMBOL(vfs_mkdir); 3561 EXPORT_SYMBOL(vfs_mknod); 3562 EXPORT_SYMBOL(generic_permission); 3563 EXPORT_SYMBOL(vfs_readlink); 3564 EXPORT_SYMBOL(vfs_rename); 3565 EXPORT_SYMBOL(vfs_rmdir); 3566 EXPORT_SYMBOL(vfs_symlink); 3567 EXPORT_SYMBOL(vfs_unlink); 3568 EXPORT_SYMBOL(dentry_unhash); 3569 EXPORT_SYMBOL(generic_readlink); 3570