1 /* CacheFiles path walking and related routines 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/file.h> 15 #include <linux/fs.h> 16 #include <linux/fsnotify.h> 17 #include <linux/quotaops.h> 18 #include <linux/xattr.h> 19 #include <linux/mount.h> 20 #include <linux/namei.h> 21 #include <linux/security.h> 22 #include <linux/slab.h> 23 #include "internal.h" 24 25 #define CACHEFILES_KEYBUF_SIZE 512 26 27 /* 28 * dump debugging info about an object 29 */ 30 static noinline 31 void __cachefiles_printk_object(struct cachefiles_object *object, 32 const char *prefix, 33 u8 *keybuf) 34 { 35 struct fscache_cookie *cookie; 36 unsigned keylen, loop; 37 38 pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id); 39 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", 40 prefix, object->fscache.state->name, 41 object->fscache.flags, work_busy(&object->fscache.work), 42 object->fscache.events, object->fscache.event_mask); 43 pr_err("%sops=%u inp=%u exc=%u\n", 44 prefix, object->fscache.n_ops, object->fscache.n_in_progress, 45 object->fscache.n_exclusive); 46 pr_err("%sparent=%p\n", 47 prefix, object->fscache.parent); 48 49 spin_lock(&object->fscache.lock); 50 cookie = object->fscache.cookie; 51 if (cookie) { 52 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n", 53 prefix, 54 object->fscache.cookie, 55 object->fscache.cookie->parent, 56 object->fscache.cookie->netfs_data, 57 object->fscache.cookie->flags); 58 if (keybuf && cookie->def) 59 keylen = cookie->def->get_key(cookie->netfs_data, keybuf, 60 CACHEFILES_KEYBUF_SIZE); 61 else 62 keylen = 0; 63 } else { 64 pr_err("%scookie=NULL\n", prefix); 65 keylen = 0; 66 } 67 spin_unlock(&object->fscache.lock); 68 69 if (keylen) { 70 pr_err("%skey=[%u] '", prefix, keylen); 71 for (loop = 0; loop < keylen; loop++) 72 pr_cont("%02x", keybuf[loop]); 73 pr_cont("'\n"); 74 } 75 } 76 77 /* 78 * dump debugging info about a pair of objects 79 */ 80 static noinline void cachefiles_printk_object(struct cachefiles_object *object, 81 struct cachefiles_object *xobject) 82 { 83 u8 *keybuf; 84 85 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO); 86 if (object) 87 __cachefiles_printk_object(object, "", keybuf); 88 if (xobject) 89 __cachefiles_printk_object(xobject, "x", keybuf); 90 kfree(keybuf); 91 } 92 93 /* 94 * mark the owner of a dentry, if there is one, to indicate that that dentry 95 * has been preemptively deleted 96 * - the caller must hold the i_mutex on the dentry's parent as required to 97 * call vfs_unlink(), vfs_rmdir() or vfs_rename() 98 */ 99 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, 100 struct dentry *dentry) 101 { 102 struct cachefiles_object *object; 103 struct rb_node *p; 104 105 _enter(",'%pd'", dentry); 106 107 write_lock(&cache->active_lock); 108 109 p = cache->active_nodes.rb_node; 110 while (p) { 111 object = rb_entry(p, struct cachefiles_object, active_node); 112 if (object->dentry > dentry) 113 p = p->rb_left; 114 else if (object->dentry < dentry) 115 p = p->rb_right; 116 else 117 goto found_dentry; 118 } 119 120 write_unlock(&cache->active_lock); 121 _leave(" [no owner]"); 122 return; 123 124 /* found the dentry for */ 125 found_dentry: 126 kdebug("preemptive burial: OBJ%x [%s] %p", 127 object->fscache.debug_id, 128 object->fscache.state->name, 129 dentry); 130 131 if (fscache_object_is_live(&object->fscache)) { 132 pr_err("\n"); 133 pr_err("Error: Can't preemptively bury live object\n"); 134 cachefiles_printk_object(object, NULL); 135 } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { 136 pr_err("Error: Object already preemptively buried\n"); 137 } 138 139 write_unlock(&cache->active_lock); 140 _leave(" [owner marked]"); 141 } 142 143 /* 144 * record the fact that an object is now active 145 */ 146 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, 147 struct cachefiles_object *object) 148 { 149 struct cachefiles_object *xobject; 150 struct rb_node **_p, *_parent = NULL; 151 struct dentry *dentry; 152 153 _enter(",%p", object); 154 155 try_again: 156 write_lock(&cache->active_lock); 157 158 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) { 159 pr_err("Error: Object already active\n"); 160 cachefiles_printk_object(object, NULL); 161 BUG(); 162 } 163 164 dentry = object->dentry; 165 _p = &cache->active_nodes.rb_node; 166 while (*_p) { 167 _parent = *_p; 168 xobject = rb_entry(_parent, 169 struct cachefiles_object, active_node); 170 171 ASSERT(xobject != object); 172 173 if (xobject->dentry > dentry) 174 _p = &(*_p)->rb_left; 175 else if (xobject->dentry < dentry) 176 _p = &(*_p)->rb_right; 177 else 178 goto wait_for_old_object; 179 } 180 181 rb_link_node(&object->active_node, _parent, _p); 182 rb_insert_color(&object->active_node, &cache->active_nodes); 183 184 write_unlock(&cache->active_lock); 185 _leave(" = 0"); 186 return 0; 187 188 /* an old object from a previous incarnation is hogging the slot - we 189 * need to wait for it to be destroyed */ 190 wait_for_old_object: 191 if (fscache_object_is_live(&xobject->fscache)) { 192 pr_err("\n"); 193 pr_err("Error: Unexpected object collision\n"); 194 cachefiles_printk_object(object, xobject); 195 BUG(); 196 } 197 atomic_inc(&xobject->usage); 198 write_unlock(&cache->active_lock); 199 200 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 201 wait_queue_head_t *wq; 202 203 signed long timeout = 60 * HZ; 204 wait_queue_t wait; 205 bool requeue; 206 207 /* if the object we're waiting for is queued for processing, 208 * then just put ourselves on the queue behind it */ 209 if (work_pending(&xobject->fscache.work)) { 210 _debug("queue OBJ%x behind OBJ%x immediately", 211 object->fscache.debug_id, 212 xobject->fscache.debug_id); 213 goto requeue; 214 } 215 216 /* otherwise we sleep until either the object we're waiting for 217 * is done, or the fscache_object is congested */ 218 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); 219 init_wait(&wait); 220 requeue = false; 221 do { 222 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 223 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) 224 break; 225 226 requeue = fscache_object_sleep_till_congested(&timeout); 227 } while (timeout > 0 && !requeue); 228 finish_wait(wq, &wait); 229 230 if (requeue && 231 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 232 _debug("queue OBJ%x behind OBJ%x after wait", 233 object->fscache.debug_id, 234 xobject->fscache.debug_id); 235 goto requeue; 236 } 237 238 if (timeout <= 0) { 239 pr_err("\n"); 240 pr_err("Error: Overlong wait for old active object to go away\n"); 241 cachefiles_printk_object(object, xobject); 242 goto requeue; 243 } 244 } 245 246 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); 247 248 cache->cache.ops->put_object(&xobject->fscache); 249 goto try_again; 250 251 requeue: 252 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 253 cache->cache.ops->put_object(&xobject->fscache); 254 _leave(" = -ETIMEDOUT"); 255 return -ETIMEDOUT; 256 } 257 258 /* 259 * delete an object representation from the cache 260 * - file backed objects are unlinked 261 * - directory backed objects are stuffed into the graveyard for userspace to 262 * delete 263 * - unlocks the directory mutex 264 */ 265 static int cachefiles_bury_object(struct cachefiles_cache *cache, 266 struct dentry *dir, 267 struct dentry *rep, 268 bool preemptive) 269 { 270 struct dentry *grave, *trap; 271 struct path path, path_to_graveyard; 272 char nbuffer[8 + 8 + 1]; 273 int ret; 274 275 _enter(",'%pd','%pd'", dir, rep); 276 277 _debug("remove %p from %p", rep, dir); 278 279 /* non-directories can just be unlinked */ 280 if (!S_ISDIR(rep->d_inode->i_mode)) { 281 _debug("unlink stale object"); 282 283 path.mnt = cache->mnt; 284 path.dentry = dir; 285 ret = security_path_unlink(&path, rep); 286 if (ret < 0) { 287 cachefiles_io_error(cache, "Unlink security error"); 288 } else { 289 ret = vfs_unlink(dir->d_inode, rep, NULL); 290 291 if (preemptive) 292 cachefiles_mark_object_buried(cache, rep); 293 } 294 295 mutex_unlock(&dir->d_inode->i_mutex); 296 297 if (ret == -EIO) 298 cachefiles_io_error(cache, "Unlink failed"); 299 300 _leave(" = %d", ret); 301 return ret; 302 } 303 304 /* directories have to be moved to the graveyard */ 305 _debug("move stale object to graveyard"); 306 mutex_unlock(&dir->d_inode->i_mutex); 307 308 try_again: 309 /* first step is to make up a grave dentry in the graveyard */ 310 sprintf(nbuffer, "%08x%08x", 311 (uint32_t) get_seconds(), 312 (uint32_t) atomic_inc_return(&cache->gravecounter)); 313 314 /* do the multiway lock magic */ 315 trap = lock_rename(cache->graveyard, dir); 316 317 /* do some checks before getting the grave dentry */ 318 if (rep->d_parent != dir) { 319 /* the entry was probably culled when we dropped the parent dir 320 * lock */ 321 unlock_rename(cache->graveyard, dir); 322 _leave(" = 0 [culled?]"); 323 return 0; 324 } 325 326 if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { 327 unlock_rename(cache->graveyard, dir); 328 cachefiles_io_error(cache, "Graveyard no longer a directory"); 329 return -EIO; 330 } 331 332 if (trap == rep) { 333 unlock_rename(cache->graveyard, dir); 334 cachefiles_io_error(cache, "May not make directory loop"); 335 return -EIO; 336 } 337 338 if (d_mountpoint(rep)) { 339 unlock_rename(cache->graveyard, dir); 340 cachefiles_io_error(cache, "Mountpoint in cache"); 341 return -EIO; 342 } 343 344 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); 345 if (IS_ERR(grave)) { 346 unlock_rename(cache->graveyard, dir); 347 348 if (PTR_ERR(grave) == -ENOMEM) { 349 _leave(" = -ENOMEM"); 350 return -ENOMEM; 351 } 352 353 cachefiles_io_error(cache, "Lookup error %ld", 354 PTR_ERR(grave)); 355 return -EIO; 356 } 357 358 if (grave->d_inode) { 359 unlock_rename(cache->graveyard, dir); 360 dput(grave); 361 grave = NULL; 362 cond_resched(); 363 goto try_again; 364 } 365 366 if (d_mountpoint(grave)) { 367 unlock_rename(cache->graveyard, dir); 368 dput(grave); 369 cachefiles_io_error(cache, "Mountpoint in graveyard"); 370 return -EIO; 371 } 372 373 /* target should not be an ancestor of source */ 374 if (trap == grave) { 375 unlock_rename(cache->graveyard, dir); 376 dput(grave); 377 cachefiles_io_error(cache, "May not make directory loop"); 378 return -EIO; 379 } 380 381 /* attempt the rename */ 382 path.mnt = cache->mnt; 383 path.dentry = dir; 384 path_to_graveyard.mnt = cache->mnt; 385 path_to_graveyard.dentry = cache->graveyard; 386 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0); 387 if (ret < 0) { 388 cachefiles_io_error(cache, "Rename security error %d", ret); 389 } else { 390 ret = vfs_rename(dir->d_inode, rep, 391 cache->graveyard->d_inode, grave, NULL, 0); 392 if (ret != 0 && ret != -ENOMEM) 393 cachefiles_io_error(cache, 394 "Rename failed with error %d", ret); 395 396 if (preemptive) 397 cachefiles_mark_object_buried(cache, rep); 398 } 399 400 unlock_rename(cache->graveyard, dir); 401 dput(grave); 402 _leave(" = 0"); 403 return 0; 404 } 405 406 /* 407 * delete an object representation from the cache 408 */ 409 int cachefiles_delete_object(struct cachefiles_cache *cache, 410 struct cachefiles_object *object) 411 { 412 struct dentry *dir; 413 int ret; 414 415 _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); 416 417 ASSERT(object->dentry); 418 ASSERT(object->dentry->d_inode); 419 ASSERT(object->dentry->d_parent); 420 421 dir = dget_parent(object->dentry); 422 423 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 424 425 if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { 426 /* object allocation for the same key preemptively deleted this 427 * object's file so that it could create its own file */ 428 _debug("object preemptively buried"); 429 mutex_unlock(&dir->d_inode->i_mutex); 430 ret = 0; 431 } else { 432 /* we need to check that our parent is _still_ our parent - it 433 * may have been renamed */ 434 if (dir == object->dentry->d_parent) { 435 ret = cachefiles_bury_object(cache, dir, 436 object->dentry, false); 437 } else { 438 /* it got moved, presumably by cachefilesd culling it, 439 * so it's no longer in the key path and we can ignore 440 * it */ 441 mutex_unlock(&dir->d_inode->i_mutex); 442 ret = 0; 443 } 444 } 445 446 dput(dir); 447 _leave(" = %d", ret); 448 return ret; 449 } 450 451 /* 452 * walk from the parent object to the child object through the backing 453 * filesystem, creating directories as we go 454 */ 455 int cachefiles_walk_to_object(struct cachefiles_object *parent, 456 struct cachefiles_object *object, 457 const char *key, 458 struct cachefiles_xattr *auxdata) 459 { 460 struct cachefiles_cache *cache; 461 struct dentry *dir, *next = NULL; 462 struct path path; 463 unsigned long start; 464 const char *name; 465 int ret, nlen; 466 467 _enter("OBJ%x{%p},OBJ%x,%s,", 468 parent->fscache.debug_id, parent->dentry, 469 object->fscache.debug_id, key); 470 471 cache = container_of(parent->fscache.cache, 472 struct cachefiles_cache, cache); 473 path.mnt = cache->mnt; 474 475 ASSERT(parent->dentry); 476 ASSERT(parent->dentry->d_inode); 477 478 if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { 479 // TODO: convert file to dir 480 _leave("looking up in none directory"); 481 return -ENOBUFS; 482 } 483 484 dir = dget(parent->dentry); 485 486 advance: 487 /* attempt to transit the first directory component */ 488 name = key; 489 nlen = strlen(key); 490 491 /* key ends in a double NUL */ 492 key = key + nlen + 1; 493 if (!*key) 494 key = NULL; 495 496 lookup_again: 497 /* search the current directory for the element name */ 498 _debug("lookup '%s'", name); 499 500 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 501 502 start = jiffies; 503 next = lookup_one_len(name, dir, nlen); 504 cachefiles_hist(cachefiles_lookup_histogram, start); 505 if (IS_ERR(next)) 506 goto lookup_error; 507 508 _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative"); 509 510 if (!key) 511 object->new = !next->d_inode; 512 513 /* if this element of the path doesn't exist, then the lookup phase 514 * failed, and we can release any readers in the certain knowledge that 515 * there's nothing for them to actually read */ 516 if (!next->d_inode) 517 fscache_object_lookup_negative(&object->fscache); 518 519 /* we need to create the object if it's negative */ 520 if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { 521 /* index objects and intervening tree levels must be subdirs */ 522 if (!next->d_inode) { 523 ret = cachefiles_has_space(cache, 1, 0); 524 if (ret < 0) 525 goto create_error; 526 527 path.dentry = dir; 528 ret = security_path_mkdir(&path, next, 0); 529 if (ret < 0) 530 goto create_error; 531 start = jiffies; 532 ret = vfs_mkdir(dir->d_inode, next, 0); 533 cachefiles_hist(cachefiles_mkdir_histogram, start); 534 if (ret < 0) 535 goto create_error; 536 537 ASSERT(next->d_inode); 538 539 _debug("mkdir -> %p{%p{ino=%lu}}", 540 next, next->d_inode, next->d_inode->i_ino); 541 542 } else if (!S_ISDIR(next->d_inode->i_mode)) { 543 pr_err("inode %lu is not a directory\n", 544 next->d_inode->i_ino); 545 ret = -ENOBUFS; 546 goto error; 547 } 548 549 } else { 550 /* non-index objects start out life as files */ 551 if (!next->d_inode) { 552 ret = cachefiles_has_space(cache, 1, 0); 553 if (ret < 0) 554 goto create_error; 555 556 path.dentry = dir; 557 ret = security_path_mknod(&path, next, S_IFREG, 0); 558 if (ret < 0) 559 goto create_error; 560 start = jiffies; 561 ret = vfs_create(dir->d_inode, next, S_IFREG, true); 562 cachefiles_hist(cachefiles_create_histogram, start); 563 if (ret < 0) 564 goto create_error; 565 566 ASSERT(next->d_inode); 567 568 _debug("create -> %p{%p{ino=%lu}}", 569 next, next->d_inode, next->d_inode->i_ino); 570 571 } else if (!S_ISDIR(next->d_inode->i_mode) && 572 !S_ISREG(next->d_inode->i_mode) 573 ) { 574 pr_err("inode %lu is not a file or directory\n", 575 next->d_inode->i_ino); 576 ret = -ENOBUFS; 577 goto error; 578 } 579 } 580 581 /* process the next component */ 582 if (key) { 583 _debug("advance"); 584 mutex_unlock(&dir->d_inode->i_mutex); 585 dput(dir); 586 dir = next; 587 next = NULL; 588 goto advance; 589 } 590 591 /* we've found the object we were looking for */ 592 object->dentry = next; 593 594 /* if we've found that the terminal object exists, then we need to 595 * check its attributes and delete it if it's out of date */ 596 if (!object->new) { 597 _debug("validate '%pd'", next); 598 599 ret = cachefiles_check_object_xattr(object, auxdata); 600 if (ret == -ESTALE) { 601 /* delete the object (the deleter drops the directory 602 * mutex) */ 603 object->dentry = NULL; 604 605 ret = cachefiles_bury_object(cache, dir, next, true); 606 dput(next); 607 next = NULL; 608 609 if (ret < 0) 610 goto delete_error; 611 612 _debug("redo lookup"); 613 goto lookup_again; 614 } 615 } 616 617 /* note that we're now using this object */ 618 ret = cachefiles_mark_object_active(cache, object); 619 620 mutex_unlock(&dir->d_inode->i_mutex); 621 dput(dir); 622 dir = NULL; 623 624 if (ret == -ETIMEDOUT) 625 goto mark_active_timed_out; 626 627 _debug("=== OBTAINED_OBJECT ==="); 628 629 if (object->new) { 630 /* attach data to a newly constructed terminal object */ 631 ret = cachefiles_set_object_xattr(object, auxdata); 632 if (ret < 0) 633 goto check_error; 634 } else { 635 /* always update the atime on an object we've just looked up 636 * (this is used to keep track of culling, and atimes are only 637 * updated by read, write and readdir but not lookup or 638 * open) */ 639 path.dentry = next; 640 touch_atime(&path); 641 } 642 643 /* open a file interface onto a data file */ 644 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { 645 if (S_ISREG(object->dentry->d_inode->i_mode)) { 646 const struct address_space_operations *aops; 647 648 ret = -EPERM; 649 aops = object->dentry->d_inode->i_mapping->a_ops; 650 if (!aops->bmap) 651 goto check_error; 652 653 object->backer = object->dentry; 654 } else { 655 BUG(); // TODO: open file in data-class subdir 656 } 657 } 658 659 object->new = 0; 660 fscache_obtained_object(&object->fscache); 661 662 _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino); 663 return 0; 664 665 create_error: 666 _debug("create error %d", ret); 667 if (ret == -EIO) 668 cachefiles_io_error(cache, "Create/mkdir failed"); 669 goto error; 670 671 mark_active_timed_out: 672 _debug("mark active timed out"); 673 goto release_dentry; 674 675 check_error: 676 _debug("check error %d", ret); 677 write_lock(&cache->active_lock); 678 rb_erase(&object->active_node, &cache->active_nodes); 679 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 680 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 681 write_unlock(&cache->active_lock); 682 release_dentry: 683 dput(object->dentry); 684 object->dentry = NULL; 685 goto error_out; 686 687 delete_error: 688 _debug("delete error %d", ret); 689 goto error_out2; 690 691 lookup_error: 692 _debug("lookup error %ld", PTR_ERR(next)); 693 ret = PTR_ERR(next); 694 if (ret == -EIO) 695 cachefiles_io_error(cache, "Lookup failed"); 696 next = NULL; 697 error: 698 mutex_unlock(&dir->d_inode->i_mutex); 699 dput(next); 700 error_out2: 701 dput(dir); 702 error_out: 703 _leave(" = error %d", -ret); 704 return ret; 705 } 706 707 /* 708 * get a subdirectory 709 */ 710 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, 711 struct dentry *dir, 712 const char *dirname) 713 { 714 struct dentry *subdir; 715 unsigned long start; 716 struct path path; 717 int ret; 718 719 _enter(",,%s", dirname); 720 721 /* search the current directory for the element name */ 722 mutex_lock(&dir->d_inode->i_mutex); 723 724 start = jiffies; 725 subdir = lookup_one_len(dirname, dir, strlen(dirname)); 726 cachefiles_hist(cachefiles_lookup_histogram, start); 727 if (IS_ERR(subdir)) { 728 if (PTR_ERR(subdir) == -ENOMEM) 729 goto nomem_d_alloc; 730 goto lookup_error; 731 } 732 733 _debug("subdir -> %p %s", 734 subdir, subdir->d_inode ? "positive" : "negative"); 735 736 /* we need to create the subdir if it doesn't exist yet */ 737 if (!subdir->d_inode) { 738 ret = cachefiles_has_space(cache, 1, 0); 739 if (ret < 0) 740 goto mkdir_error; 741 742 _debug("attempt mkdir"); 743 744 path.mnt = cache->mnt; 745 path.dentry = dir; 746 ret = security_path_mkdir(&path, subdir, 0700); 747 if (ret < 0) 748 goto mkdir_error; 749 ret = vfs_mkdir(dir->d_inode, subdir, 0700); 750 if (ret < 0) 751 goto mkdir_error; 752 753 ASSERT(subdir->d_inode); 754 755 _debug("mkdir -> %p{%p{ino=%lu}}", 756 subdir, 757 subdir->d_inode, 758 subdir->d_inode->i_ino); 759 } 760 761 mutex_unlock(&dir->d_inode->i_mutex); 762 763 /* we need to make sure the subdir is a directory */ 764 ASSERT(subdir->d_inode); 765 766 if (!S_ISDIR(subdir->d_inode->i_mode)) { 767 pr_err("%s is not a directory\n", dirname); 768 ret = -EIO; 769 goto check_error; 770 } 771 772 ret = -EPERM; 773 if (!subdir->d_inode->i_op->setxattr || 774 !subdir->d_inode->i_op->getxattr || 775 !subdir->d_inode->i_op->lookup || 776 !subdir->d_inode->i_op->mkdir || 777 !subdir->d_inode->i_op->create || 778 (!subdir->d_inode->i_op->rename && 779 !subdir->d_inode->i_op->rename2) || 780 !subdir->d_inode->i_op->rmdir || 781 !subdir->d_inode->i_op->unlink) 782 goto check_error; 783 784 _leave(" = [%lu]", subdir->d_inode->i_ino); 785 return subdir; 786 787 check_error: 788 dput(subdir); 789 _leave(" = %d [check]", ret); 790 return ERR_PTR(ret); 791 792 mkdir_error: 793 mutex_unlock(&dir->d_inode->i_mutex); 794 dput(subdir); 795 pr_err("mkdir %s failed with error %d\n", dirname, ret); 796 return ERR_PTR(ret); 797 798 lookup_error: 799 mutex_unlock(&dir->d_inode->i_mutex); 800 ret = PTR_ERR(subdir); 801 pr_err("Lookup %s failed with error %d\n", dirname, ret); 802 return ERR_PTR(ret); 803 804 nomem_d_alloc: 805 mutex_unlock(&dir->d_inode->i_mutex); 806 _leave(" = -ENOMEM"); 807 return ERR_PTR(-ENOMEM); 808 } 809 810 /* 811 * find out if an object is in use or not 812 * - if finds object and it's not in use: 813 * - returns a pointer to the object and a reference on it 814 * - returns with the directory locked 815 */ 816 static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, 817 struct dentry *dir, 818 char *filename) 819 { 820 struct cachefiles_object *object; 821 struct rb_node *_n; 822 struct dentry *victim; 823 unsigned long start; 824 int ret; 825 826 //_enter(",%pd/,%s", 827 // dir, filename); 828 829 /* look up the victim */ 830 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 831 832 start = jiffies; 833 victim = lookup_one_len(filename, dir, strlen(filename)); 834 cachefiles_hist(cachefiles_lookup_histogram, start); 835 if (IS_ERR(victim)) 836 goto lookup_error; 837 838 //_debug("victim -> %p %s", 839 // victim, victim->d_inode ? "positive" : "negative"); 840 841 /* if the object is no longer there then we probably retired the object 842 * at the netfs's request whilst the cull was in progress 843 */ 844 if (!victim->d_inode) { 845 mutex_unlock(&dir->d_inode->i_mutex); 846 dput(victim); 847 _leave(" = -ENOENT [absent]"); 848 return ERR_PTR(-ENOENT); 849 } 850 851 /* check to see if we're using this object */ 852 read_lock(&cache->active_lock); 853 854 _n = cache->active_nodes.rb_node; 855 856 while (_n) { 857 object = rb_entry(_n, struct cachefiles_object, active_node); 858 859 if (object->dentry > victim) 860 _n = _n->rb_left; 861 else if (object->dentry < victim) 862 _n = _n->rb_right; 863 else 864 goto object_in_use; 865 } 866 867 read_unlock(&cache->active_lock); 868 869 //_leave(" = %p", victim); 870 return victim; 871 872 object_in_use: 873 read_unlock(&cache->active_lock); 874 mutex_unlock(&dir->d_inode->i_mutex); 875 dput(victim); 876 //_leave(" = -EBUSY [in use]"); 877 return ERR_PTR(-EBUSY); 878 879 lookup_error: 880 mutex_unlock(&dir->d_inode->i_mutex); 881 ret = PTR_ERR(victim); 882 if (ret == -ENOENT) { 883 /* file or dir now absent - probably retired by netfs */ 884 _leave(" = -ESTALE [absent]"); 885 return ERR_PTR(-ESTALE); 886 } 887 888 if (ret == -EIO) { 889 cachefiles_io_error(cache, "Lookup failed"); 890 } else if (ret != -ENOMEM) { 891 pr_err("Internal error: %d\n", ret); 892 ret = -EIO; 893 } 894 895 _leave(" = %d", ret); 896 return ERR_PTR(ret); 897 } 898 899 /* 900 * cull an object if it's not in use 901 * - called only by cache manager daemon 902 */ 903 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, 904 char *filename) 905 { 906 struct dentry *victim; 907 int ret; 908 909 _enter(",%pd/,%s", dir, filename); 910 911 victim = cachefiles_check_active(cache, dir, filename); 912 if (IS_ERR(victim)) 913 return PTR_ERR(victim); 914 915 _debug("victim -> %p %s", 916 victim, victim->d_inode ? "positive" : "negative"); 917 918 /* okay... the victim is not being used so we can cull it 919 * - start by marking it as stale 920 */ 921 _debug("victim is cullable"); 922 923 ret = cachefiles_remove_object_xattr(cache, victim); 924 if (ret < 0) 925 goto error_unlock; 926 927 /* actually remove the victim (drops the dir mutex) */ 928 _debug("bury"); 929 930 ret = cachefiles_bury_object(cache, dir, victim, false); 931 if (ret < 0) 932 goto error; 933 934 dput(victim); 935 _leave(" = 0"); 936 return 0; 937 938 error_unlock: 939 mutex_unlock(&dir->d_inode->i_mutex); 940 error: 941 dput(victim); 942 if (ret == -ENOENT) { 943 /* file or dir now absent - probably retired by netfs */ 944 _leave(" = -ESTALE [absent]"); 945 return -ESTALE; 946 } 947 948 if (ret != -ENOMEM) { 949 pr_err("Internal error: %d\n", ret); 950 ret = -EIO; 951 } 952 953 _leave(" = %d", ret); 954 return ret; 955 } 956 957 /* 958 * find out if an object is in use or not 959 * - called only by cache manager daemon 960 * - returns -EBUSY or 0 to indicate whether an object is in use or not 961 */ 962 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, 963 char *filename) 964 { 965 struct dentry *victim; 966 967 //_enter(",%pd/,%s", 968 // dir, filename); 969 970 victim = cachefiles_check_active(cache, dir, filename); 971 if (IS_ERR(victim)) 972 return PTR_ERR(victim); 973 974 mutex_unlock(&dir->d_inode->i_mutex); 975 dput(victim); 976 //_leave(" = 0"); 977 return 0; 978 } 979