1 /* AFS cell and server record management 2 * 3 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/key.h> 14 #include <linux/ctype.h> 15 #include <linux/dns_resolver.h> 16 #include <linux/sched.h> 17 #include <linux/inet.h> 18 #include <linux/namei.h> 19 #include <keys/rxrpc-type.h> 20 #include "internal.h" 21 22 static unsigned __read_mostly afs_cell_gc_delay = 10; 23 24 static void afs_manage_cell(struct work_struct *); 25 26 static void afs_dec_cells_outstanding(struct afs_net *net) 27 { 28 if (atomic_dec_and_test(&net->cells_outstanding)) 29 wake_up_var(&net->cells_outstanding); 30 } 31 32 /* 33 * Set the cell timer to fire after a given delay, assuming it's not already 34 * set for an earlier time. 35 */ 36 static void afs_set_cell_timer(struct afs_net *net, time64_t delay) 37 { 38 if (net->live) { 39 atomic_inc(&net->cells_outstanding); 40 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) 41 afs_dec_cells_outstanding(net); 42 } 43 } 44 45 /* 46 * Look up and get an activation reference on a cell record under RCU 47 * conditions. The caller must hold the RCU read lock. 48 */ 49 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, 50 const char *name, unsigned int namesz) 51 { 52 struct afs_cell *cell = NULL; 53 struct rb_node *p; 54 int n, seq = 0, ret = 0; 55 56 _enter("%*.*s", namesz, namesz, name); 57 58 if (name && namesz == 0) 59 return ERR_PTR(-EINVAL); 60 if (namesz > AFS_MAXCELLNAME) 61 return ERR_PTR(-ENAMETOOLONG); 62 63 do { 64 /* Unfortunately, rbtree walking doesn't give reliable results 65 * under just the RCU read lock, so we have to check for 66 * changes. 67 */ 68 if (cell) 69 afs_put_cell(net, cell); 70 cell = NULL; 71 ret = -ENOENT; 72 73 read_seqbegin_or_lock(&net->cells_lock, &seq); 74 75 if (!name) { 76 cell = rcu_dereference_raw(net->ws_cell); 77 if (cell) { 78 afs_get_cell(cell); 79 break; 80 } 81 ret = -EDESTADDRREQ; 82 continue; 83 } 84 85 p = rcu_dereference_raw(net->cells.rb_node); 86 while (p) { 87 cell = rb_entry(p, struct afs_cell, net_node); 88 89 n = strncasecmp(cell->name, name, 90 min_t(size_t, cell->name_len, namesz)); 91 if (n == 0) 92 n = cell->name_len - namesz; 93 if (n < 0) { 94 p = rcu_dereference_raw(p->rb_left); 95 } else if (n > 0) { 96 p = rcu_dereference_raw(p->rb_right); 97 } else { 98 if (atomic_inc_not_zero(&cell->usage)) { 99 ret = 0; 100 break; 101 } 102 /* We want to repeat the search, this time with 103 * the lock properly locked. 104 */ 105 } 106 cell = NULL; 107 } 108 109 } while (need_seqretry(&net->cells_lock, seq)); 110 111 done_seqretry(&net->cells_lock, seq); 112 113 return ret == 0 ? cell : ERR_PTR(ret); 114 } 115 116 /* 117 * Set up a cell record and fill in its name, VL server address list and 118 * allocate an anonymous key 119 */ 120 static struct afs_cell *afs_alloc_cell(struct afs_net *net, 121 const char *name, unsigned int namelen, 122 const char *vllist) 123 { 124 struct afs_cell *cell; 125 int i, ret; 126 127 ASSERT(name); 128 if (namelen == 0) 129 return ERR_PTR(-EINVAL); 130 if (namelen > AFS_MAXCELLNAME) { 131 _leave(" = -ENAMETOOLONG"); 132 return ERR_PTR(-ENAMETOOLONG); 133 } 134 if (namelen == 5 && memcmp(name, "@cell", 5) == 0) 135 return ERR_PTR(-EINVAL); 136 137 _enter("%*.*s,%s", namelen, namelen, name, vllist); 138 139 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); 140 if (!cell) { 141 _leave(" = -ENOMEM"); 142 return ERR_PTR(-ENOMEM); 143 } 144 145 cell->net = net; 146 cell->name_len = namelen; 147 for (i = 0; i < namelen; i++) 148 cell->name[i] = tolower(name[i]); 149 150 atomic_set(&cell->usage, 2); 151 INIT_WORK(&cell->manager, afs_manage_cell); 152 cell->flags = ((1 << AFS_CELL_FL_NOT_READY) | 153 (1 << AFS_CELL_FL_NO_LOOKUP_YET)); 154 INIT_LIST_HEAD(&cell->proc_volumes); 155 rwlock_init(&cell->proc_lock); 156 rwlock_init(&cell->vl_addrs_lock); 157 158 /* Fill in the VL server list if we were given a list of addresses to 159 * use. 160 */ 161 if (vllist) { 162 struct afs_addr_list *alist; 163 164 alist = afs_parse_text_addrs(vllist, strlen(vllist), ':', 165 VL_SERVICE, AFS_VL_PORT); 166 if (IS_ERR(alist)) { 167 ret = PTR_ERR(alist); 168 goto parse_failed; 169 } 170 171 rcu_assign_pointer(cell->vl_addrs, alist); 172 cell->dns_expiry = TIME64_MAX; 173 } 174 175 _leave(" = %p", cell); 176 return cell; 177 178 parse_failed: 179 if (ret == -EINVAL) 180 printk(KERN_ERR "kAFS: bad VL server IP address\n"); 181 kfree(cell); 182 _leave(" = %d", ret); 183 return ERR_PTR(ret); 184 } 185 186 /* 187 * afs_lookup_cell - Look up or create a cell record. 188 * @net: The network namespace 189 * @name: The name of the cell. 190 * @namesz: The strlen of the cell name. 191 * @vllist: A colon/comma separated list of numeric IP addresses or NULL. 192 * @excl: T if an error should be given if the cell name already exists. 193 * 194 * Look up a cell record by name and query the DNS for VL server addresses if 195 * needed. Note that that actual DNS query is punted off to the manager thread 196 * so that this function can return immediately if interrupted whilst allowing 197 * cell records to be shared even if not yet fully constructed. 198 */ 199 struct afs_cell *afs_lookup_cell(struct afs_net *net, 200 const char *name, unsigned int namesz, 201 const char *vllist, bool excl) 202 { 203 struct afs_cell *cell, *candidate, *cursor; 204 struct rb_node *parent, **pp; 205 int ret, n; 206 207 _enter("%s,%s", name, vllist); 208 209 if (!excl) { 210 rcu_read_lock(); 211 cell = afs_lookup_cell_rcu(net, name, namesz); 212 rcu_read_unlock(); 213 if (!IS_ERR(cell)) 214 goto wait_for_cell; 215 } 216 217 /* Assume we're probably going to create a cell and preallocate and 218 * mostly set up a candidate record. We can then use this to stash the 219 * name, the net namespace and VL server addresses. 220 * 221 * We also want to do this before we hold any locks as it may involve 222 * upcalling to userspace to make DNS queries. 223 */ 224 candidate = afs_alloc_cell(net, name, namesz, vllist); 225 if (IS_ERR(candidate)) { 226 _leave(" = %ld", PTR_ERR(candidate)); 227 return candidate; 228 } 229 230 /* Find the insertion point and check to see if someone else added a 231 * cell whilst we were allocating. 232 */ 233 write_seqlock(&net->cells_lock); 234 235 pp = &net->cells.rb_node; 236 parent = NULL; 237 while (*pp) { 238 parent = *pp; 239 cursor = rb_entry(parent, struct afs_cell, net_node); 240 241 n = strncasecmp(cursor->name, name, 242 min_t(size_t, cursor->name_len, namesz)); 243 if (n == 0) 244 n = cursor->name_len - namesz; 245 if (n < 0) 246 pp = &(*pp)->rb_left; 247 else if (n > 0) 248 pp = &(*pp)->rb_right; 249 else 250 goto cell_already_exists; 251 } 252 253 cell = candidate; 254 candidate = NULL; 255 rb_link_node_rcu(&cell->net_node, parent, pp); 256 rb_insert_color(&cell->net_node, &net->cells); 257 atomic_inc(&net->cells_outstanding); 258 write_sequnlock(&net->cells_lock); 259 260 queue_work(afs_wq, &cell->manager); 261 262 wait_for_cell: 263 _debug("wait_for_cell"); 264 ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE); 265 smp_rmb(); 266 267 switch (READ_ONCE(cell->state)) { 268 case AFS_CELL_FAILED: 269 ret = cell->error; 270 goto error; 271 default: 272 _debug("weird %u %d", cell->state, cell->error); 273 goto error; 274 case AFS_CELL_ACTIVE: 275 break; 276 } 277 278 _leave(" = %p [cell]", cell); 279 return cell; 280 281 cell_already_exists: 282 _debug("cell exists"); 283 cell = cursor; 284 if (excl) { 285 ret = -EEXIST; 286 } else { 287 afs_get_cell(cursor); 288 ret = 0; 289 } 290 write_sequnlock(&net->cells_lock); 291 kfree(candidate); 292 if (ret == 0) 293 goto wait_for_cell; 294 goto error_noput; 295 error: 296 afs_put_cell(net, cell); 297 error_noput: 298 _leave(" = %d [error]", ret); 299 return ERR_PTR(ret); 300 } 301 302 /* 303 * set the root cell information 304 * - can be called with a module parameter string 305 * - can be called from a write to /proc/fs/afs/rootcell 306 */ 307 int afs_cell_init(struct afs_net *net, const char *rootcell) 308 { 309 struct afs_cell *old_root, *new_root; 310 const char *cp, *vllist; 311 size_t len; 312 313 _enter(""); 314 315 if (!rootcell) { 316 /* module is loaded with no parameters, or built statically. 317 * - in the future we might initialize cell DB here. 318 */ 319 _leave(" = 0 [no root]"); 320 return 0; 321 } 322 323 cp = strchr(rootcell, ':'); 324 if (!cp) { 325 _debug("kAFS: no VL server IP addresses specified"); 326 vllist = NULL; 327 len = strlen(rootcell); 328 } else { 329 vllist = cp + 1; 330 len = cp - rootcell; 331 } 332 333 /* allocate a cell record for the root cell */ 334 new_root = afs_lookup_cell(net, rootcell, len, vllist, false); 335 if (IS_ERR(new_root)) { 336 _leave(" = %ld", PTR_ERR(new_root)); 337 return PTR_ERR(new_root); 338 } 339 340 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) 341 afs_get_cell(new_root); 342 343 /* install the new cell */ 344 write_seqlock(&net->cells_lock); 345 old_root = rcu_access_pointer(net->ws_cell); 346 rcu_assign_pointer(net->ws_cell, new_root); 347 write_sequnlock(&net->cells_lock); 348 349 afs_put_cell(net, old_root); 350 _leave(" = 0"); 351 return 0; 352 } 353 354 /* 355 * Update a cell's VL server address list from the DNS. 356 */ 357 static void afs_update_cell(struct afs_cell *cell) 358 { 359 struct afs_addr_list *alist, *old; 360 time64_t now, expiry; 361 362 _enter("%s", cell->name); 363 364 alist = afs_dns_query(cell, &expiry); 365 if (IS_ERR(alist)) { 366 switch (PTR_ERR(alist)) { 367 case -ENODATA: 368 /* The DNS said that the cell does not exist */ 369 set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); 370 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 371 cell->dns_expiry = ktime_get_real_seconds() + 61; 372 break; 373 374 case -EAGAIN: 375 case -ECONNREFUSED: 376 default: 377 set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 378 cell->dns_expiry = ktime_get_real_seconds() + 10; 379 break; 380 } 381 382 cell->error = -EDESTADDRREQ; 383 } else { 384 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 385 clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); 386 387 /* Exclusion on changing vl_addrs is achieved by a 388 * non-reentrant work item. 389 */ 390 old = rcu_dereference_protected(cell->vl_addrs, true); 391 rcu_assign_pointer(cell->vl_addrs, alist); 392 cell->dns_expiry = expiry; 393 394 if (old) 395 afs_put_addrlist(old); 396 } 397 398 if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags)) 399 wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET); 400 401 now = ktime_get_real_seconds(); 402 afs_set_cell_timer(cell->net, cell->dns_expiry - now); 403 _leave(""); 404 } 405 406 /* 407 * Destroy a cell record 408 */ 409 static void afs_cell_destroy(struct rcu_head *rcu) 410 { 411 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 412 413 _enter("%p{%s}", cell, cell->name); 414 415 ASSERTCMP(atomic_read(&cell->usage), ==, 0); 416 417 afs_put_addrlist(rcu_access_pointer(cell->vl_addrs)); 418 key_put(cell->anonymous_key); 419 kfree(cell); 420 421 _leave(" [destroyed]"); 422 } 423 424 /* 425 * Queue the cell manager. 426 */ 427 static void afs_queue_cell_manager(struct afs_net *net) 428 { 429 int outstanding = atomic_inc_return(&net->cells_outstanding); 430 431 _enter("%d", outstanding); 432 433 if (!queue_work(afs_wq, &net->cells_manager)) 434 afs_dec_cells_outstanding(net); 435 } 436 437 /* 438 * Cell management timer. We have an increment on cells_outstanding that we 439 * need to pass along to the work item. 440 */ 441 void afs_cells_timer(struct timer_list *timer) 442 { 443 struct afs_net *net = container_of(timer, struct afs_net, cells_timer); 444 445 _enter(""); 446 if (!queue_work(afs_wq, &net->cells_manager)) 447 afs_dec_cells_outstanding(net); 448 } 449 450 /* 451 * Get a reference on a cell record. 452 */ 453 struct afs_cell *afs_get_cell(struct afs_cell *cell) 454 { 455 atomic_inc(&cell->usage); 456 return cell; 457 } 458 459 /* 460 * Drop a reference on a cell record. 461 */ 462 void afs_put_cell(struct afs_net *net, struct afs_cell *cell) 463 { 464 time64_t now, expire_delay; 465 466 if (!cell) 467 return; 468 469 _enter("%s", cell->name); 470 471 now = ktime_get_real_seconds(); 472 cell->last_inactive = now; 473 expire_delay = 0; 474 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && 475 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) 476 expire_delay = afs_cell_gc_delay; 477 478 if (atomic_dec_return(&cell->usage) > 1) 479 return; 480 481 /* 'cell' may now be garbage collected. */ 482 afs_set_cell_timer(net, expire_delay); 483 } 484 485 /* 486 * Allocate a key to use as a placeholder for anonymous user security. 487 */ 488 static int afs_alloc_anon_key(struct afs_cell *cell) 489 { 490 struct key *key; 491 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; 492 493 /* Create a key to represent an anonymous user. */ 494 memcpy(keyname, "afs@", 4); 495 dp = keyname + 4; 496 cp = cell->name; 497 do { 498 *dp++ = tolower(*cp); 499 } while (*cp++); 500 501 key = rxrpc_get_null_key(keyname); 502 if (IS_ERR(key)) 503 return PTR_ERR(key); 504 505 cell->anonymous_key = key; 506 507 _debug("anon key %p{%x}", 508 cell->anonymous_key, key_serial(cell->anonymous_key)); 509 return 0; 510 } 511 512 /* 513 * Activate a cell. 514 */ 515 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 516 { 517 struct hlist_node **p; 518 struct afs_cell *pcell; 519 int ret; 520 521 if (!cell->anonymous_key) { 522 ret = afs_alloc_anon_key(cell); 523 if (ret < 0) 524 return ret; 525 } 526 527 #ifdef CONFIG_AFS_FSCACHE 528 cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, 529 &afs_cell_cache_index_def, 530 cell->name, strlen(cell->name), 531 NULL, 0, 532 cell, 0, true); 533 #endif 534 ret = afs_proc_cell_setup(cell); 535 if (ret < 0) 536 return ret; 537 538 mutex_lock(&net->proc_cells_lock); 539 for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 540 pcell = hlist_entry(*p, struct afs_cell, proc_link); 541 if (strcmp(cell->name, pcell->name) < 0) 542 break; 543 } 544 545 cell->proc_link.pprev = p; 546 cell->proc_link.next = *p; 547 rcu_assign_pointer(*p, &cell->proc_link.next); 548 if (cell->proc_link.next) 549 cell->proc_link.next->pprev = &cell->proc_link.next; 550 551 afs_dynroot_mkdir(net, cell); 552 mutex_unlock(&net->proc_cells_lock); 553 return 0; 554 } 555 556 /* 557 * Deactivate a cell. 558 */ 559 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) 560 { 561 _enter("%s", cell->name); 562 563 afs_proc_cell_remove(cell); 564 565 mutex_lock(&net->proc_cells_lock); 566 hlist_del_rcu(&cell->proc_link); 567 afs_dynroot_rmdir(net, cell); 568 mutex_unlock(&net->proc_cells_lock); 569 570 #ifdef CONFIG_AFS_FSCACHE 571 fscache_relinquish_cookie(cell->cache, NULL, false); 572 cell->cache = NULL; 573 #endif 574 575 _leave(""); 576 } 577 578 /* 579 * Manage a cell record, initialising and destroying it, maintaining its DNS 580 * records. 581 */ 582 static void afs_manage_cell(struct work_struct *work) 583 { 584 struct afs_cell *cell = container_of(work, struct afs_cell, manager); 585 struct afs_net *net = cell->net; 586 bool deleted; 587 int ret, usage; 588 589 _enter("%s", cell->name); 590 591 again: 592 _debug("state %u", cell->state); 593 switch (cell->state) { 594 case AFS_CELL_INACTIVE: 595 case AFS_CELL_FAILED: 596 write_seqlock(&net->cells_lock); 597 usage = 1; 598 deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); 599 if (deleted) 600 rb_erase(&cell->net_node, &net->cells); 601 write_sequnlock(&net->cells_lock); 602 if (deleted) 603 goto final_destruction; 604 if (cell->state == AFS_CELL_FAILED) 605 goto done; 606 cell->state = AFS_CELL_UNSET; 607 goto again; 608 609 case AFS_CELL_UNSET: 610 cell->state = AFS_CELL_ACTIVATING; 611 goto again; 612 613 case AFS_CELL_ACTIVATING: 614 ret = afs_activate_cell(net, cell); 615 if (ret < 0) 616 goto activation_failed; 617 618 cell->state = AFS_CELL_ACTIVE; 619 smp_wmb(); 620 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 621 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 622 goto again; 623 624 case AFS_CELL_ACTIVE: 625 if (atomic_read(&cell->usage) > 1) { 626 time64_t now = ktime_get_real_seconds(); 627 if (cell->dns_expiry <= now && net->live) 628 afs_update_cell(cell); 629 goto done; 630 } 631 cell->state = AFS_CELL_DEACTIVATING; 632 goto again; 633 634 case AFS_CELL_DEACTIVATING: 635 set_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 636 if (atomic_read(&cell->usage) > 1) 637 goto reverse_deactivation; 638 afs_deactivate_cell(net, cell); 639 cell->state = AFS_CELL_INACTIVE; 640 goto again; 641 642 default: 643 break; 644 } 645 _debug("bad state %u", cell->state); 646 BUG(); /* Unhandled state */ 647 648 activation_failed: 649 cell->error = ret; 650 afs_deactivate_cell(net, cell); 651 652 cell->state = AFS_CELL_FAILED; 653 smp_wmb(); 654 if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags)) 655 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 656 goto again; 657 658 reverse_deactivation: 659 cell->state = AFS_CELL_ACTIVE; 660 smp_wmb(); 661 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 662 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 663 _leave(" [deact->act]"); 664 return; 665 666 done: 667 _leave(" [done %u]", cell->state); 668 return; 669 670 final_destruction: 671 call_rcu(&cell->rcu, afs_cell_destroy); 672 afs_dec_cells_outstanding(net); 673 _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); 674 } 675 676 /* 677 * Manage the records of cells known to a network namespace. This includes 678 * updating the DNS records and garbage collecting unused cells that were 679 * automatically added. 680 * 681 * Note that constructed cell records may only be removed from net->cells by 682 * this work item, so it is safe for this work item to stash a cursor pointing 683 * into the tree and then return to caller (provided it skips cells that are 684 * still under construction). 685 * 686 * Note also that we were given an increment on net->cells_outstanding by 687 * whoever queued us that we need to deal with before returning. 688 */ 689 void afs_manage_cells(struct work_struct *work) 690 { 691 struct afs_net *net = container_of(work, struct afs_net, cells_manager); 692 struct rb_node *cursor; 693 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 694 bool purging = !net->live; 695 696 _enter(""); 697 698 /* Trawl the cell database looking for cells that have expired from 699 * lack of use and cells whose DNS results have expired and dispatch 700 * their managers. 701 */ 702 read_seqlock_excl(&net->cells_lock); 703 704 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { 705 struct afs_cell *cell = 706 rb_entry(cursor, struct afs_cell, net_node); 707 unsigned usage; 708 bool sched_cell = false; 709 710 usage = atomic_read(&cell->usage); 711 _debug("manage %s %u", cell->name, usage); 712 713 ASSERTCMP(usage, >=, 1); 714 715 if (purging) { 716 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) 717 usage = atomic_dec_return(&cell->usage); 718 ASSERTCMP(usage, ==, 1); 719 } 720 721 if (usage == 1) { 722 time64_t expire_at = cell->last_inactive; 723 724 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && 725 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) 726 expire_at += afs_cell_gc_delay; 727 if (purging || expire_at <= now) 728 sched_cell = true; 729 else if (expire_at < next_manage) 730 next_manage = expire_at; 731 } 732 733 if (!purging) { 734 if (cell->dns_expiry <= now) 735 sched_cell = true; 736 else if (cell->dns_expiry <= next_manage) 737 next_manage = cell->dns_expiry; 738 } 739 740 if (sched_cell) 741 queue_work(afs_wq, &cell->manager); 742 } 743 744 read_sequnlock_excl(&net->cells_lock); 745 746 /* Update the timer on the way out. We have to pass an increment on 747 * cells_outstanding in the namespace that we are in to the timer or 748 * the work scheduler. 749 */ 750 if (!purging && next_manage < TIME64_MAX) { 751 now = ktime_get_real_seconds(); 752 753 if (next_manage - now <= 0) { 754 if (queue_work(afs_wq, &net->cells_manager)) 755 atomic_inc(&net->cells_outstanding); 756 } else { 757 afs_set_cell_timer(net, next_manage - now); 758 } 759 } 760 761 afs_dec_cells_outstanding(net); 762 _leave(" [%d]", atomic_read(&net->cells_outstanding)); 763 } 764 765 /* 766 * Purge in-memory cell database. 767 */ 768 void afs_cell_purge(struct afs_net *net) 769 { 770 struct afs_cell *ws; 771 772 _enter(""); 773 774 write_seqlock(&net->cells_lock); 775 ws = rcu_access_pointer(net->ws_cell); 776 RCU_INIT_POINTER(net->ws_cell, NULL); 777 write_sequnlock(&net->cells_lock); 778 afs_put_cell(net, ws); 779 780 _debug("del timer"); 781 if (del_timer_sync(&net->cells_timer)) 782 atomic_dec(&net->cells_outstanding); 783 784 _debug("kick mgr"); 785 afs_queue_cell_manager(net); 786 787 _debug("wait"); 788 wait_var_event(&net->cells_outstanding, 789 !atomic_read(&net->cells_outstanding)); 790 _leave(""); 791 } 792