1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AFS cell and server record management 3 * 4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/key.h> 10 #include <linux/ctype.h> 11 #include <linux/dns_resolver.h> 12 #include <linux/sched.h> 13 #include <linux/inet.h> 14 #include <linux/namei.h> 15 #include <keys/rxrpc-type.h> 16 #include "internal.h" 17 18 static unsigned __read_mostly afs_cell_gc_delay = 10; 19 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; 20 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; 21 static atomic_t cell_debug_id; 22 23 static void afs_queue_cell_manager(struct afs_net *); 24 static void afs_manage_cell_work(struct work_struct *); 25 26 static void afs_dec_cells_outstanding(struct afs_net *net) 27 { 28 if (atomic_dec_and_test(&net->cells_outstanding)) 29 wake_up_var(&net->cells_outstanding); 30 } 31 32 /* 33 * Set the cell timer to fire after a given delay, assuming it's not already 34 * set for an earlier time. 35 */ 36 static void afs_set_cell_timer(struct afs_net *net, time64_t delay) 37 { 38 if (net->live) { 39 atomic_inc(&net->cells_outstanding); 40 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) 41 afs_dec_cells_outstanding(net); 42 } else { 43 afs_queue_cell_manager(net); 44 } 45 } 46 47 /* 48 * Look up and get an activation reference on a cell record. The caller must 49 * hold net->cells_lock at least read-locked. 50 */ 51 static struct afs_cell *afs_find_cell_locked(struct afs_net *net, 52 const char *name, unsigned int namesz, 53 enum afs_cell_trace reason) 54 { 55 struct afs_cell *cell = NULL; 56 struct rb_node *p; 57 int n; 58 59 _enter("%*.*s", namesz, namesz, name); 60 61 if (name && namesz == 0) 62 return ERR_PTR(-EINVAL); 63 if (namesz > AFS_MAXCELLNAME) 64 return ERR_PTR(-ENAMETOOLONG); 65 66 if (!name) { 67 cell = rcu_dereference_protected(net->ws_cell, 68 lockdep_is_held(&net->cells_lock)); 69 if (!cell) 70 return ERR_PTR(-EDESTADDRREQ); 71 goto found; 72 } 73 74 p = net->cells.rb_node; 75 while (p) { 76 cell = rb_entry(p, struct afs_cell, net_node); 77 78 n = strncasecmp(cell->name, name, 79 min_t(size_t, cell->name_len, namesz)); 80 if (n == 0) 81 n = cell->name_len - namesz; 82 if (n < 0) 83 p = p->rb_left; 84 else if (n > 0) 85 p = p->rb_right; 86 else 87 goto found; 88 } 89 90 return ERR_PTR(-ENOENT); 91 92 found: 93 return afs_use_cell(cell, reason); 94 } 95 96 /* 97 * Look up and get an activation reference on a cell record. 98 */ 99 struct afs_cell *afs_find_cell(struct afs_net *net, 100 const char *name, unsigned int namesz, 101 enum afs_cell_trace reason) 102 { 103 struct afs_cell *cell; 104 105 down_read(&net->cells_lock); 106 cell = afs_find_cell_locked(net, name, namesz, reason); 107 up_read(&net->cells_lock); 108 return cell; 109 } 110 111 /* 112 * Set up a cell record and fill in its name, VL server address list and 113 * allocate an anonymous key 114 */ 115 static struct afs_cell *afs_alloc_cell(struct afs_net *net, 116 const char *name, unsigned int namelen, 117 const char *addresses) 118 { 119 struct afs_vlserver_list *vllist; 120 struct afs_cell *cell; 121 int i, ret; 122 123 ASSERT(name); 124 if (namelen == 0) 125 return ERR_PTR(-EINVAL); 126 if (namelen > AFS_MAXCELLNAME) { 127 _leave(" = -ENAMETOOLONG"); 128 return ERR_PTR(-ENAMETOOLONG); 129 } 130 131 /* Prohibit cell names that contain unprintable chars, '/' and '@' or 132 * that begin with a dot. This also precludes "@cell". 133 */ 134 if (name[0] == '.') 135 return ERR_PTR(-EINVAL); 136 for (i = 0; i < namelen; i++) { 137 char ch = name[i]; 138 if (!isprint(ch) || ch == '/' || ch == '@') 139 return ERR_PTR(-EINVAL); 140 } 141 142 _enter("%*.*s,%s", namelen, namelen, name, addresses); 143 144 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); 145 if (!cell) { 146 _leave(" = -ENOMEM"); 147 return ERR_PTR(-ENOMEM); 148 } 149 150 cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL); 151 if (!cell->name) { 152 kfree(cell); 153 return ERR_PTR(-ENOMEM); 154 } 155 156 cell->name[0] = '.'; 157 cell->name++; 158 cell->name_len = namelen; 159 for (i = 0; i < namelen; i++) 160 cell->name[i] = tolower(name[i]); 161 cell->name[i] = 0; 162 163 cell->net = net; 164 refcount_set(&cell->ref, 1); 165 atomic_set(&cell->active, 0); 166 INIT_WORK(&cell->manager, afs_manage_cell_work); 167 init_rwsem(&cell->vs_lock); 168 cell->volumes = RB_ROOT; 169 INIT_HLIST_HEAD(&cell->proc_volumes); 170 seqlock_init(&cell->volume_lock); 171 cell->fs_servers = RB_ROOT; 172 seqlock_init(&cell->fs_lock); 173 rwlock_init(&cell->vl_servers_lock); 174 cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); 175 176 /* Provide a VL server list, filling it in if we were given a list of 177 * addresses to use. 178 */ 179 if (addresses) { 180 vllist = afs_parse_text_addrs(net, 181 addresses, strlen(addresses), ':', 182 VL_SERVICE, AFS_VL_PORT); 183 if (IS_ERR(vllist)) { 184 ret = PTR_ERR(vllist); 185 goto parse_failed; 186 } 187 188 vllist->source = DNS_RECORD_FROM_CONFIG; 189 vllist->status = DNS_LOOKUP_NOT_DONE; 190 cell->dns_expiry = TIME64_MAX; 191 } else { 192 ret = -ENOMEM; 193 vllist = afs_alloc_vlserver_list(0); 194 if (!vllist) 195 goto error; 196 vllist->source = DNS_RECORD_UNAVAILABLE; 197 vllist->status = DNS_LOOKUP_NOT_DONE; 198 cell->dns_expiry = ktime_get_real_seconds(); 199 } 200 201 rcu_assign_pointer(cell->vl_servers, vllist); 202 203 cell->dns_source = vllist->source; 204 cell->dns_status = vllist->status; 205 smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ 206 atomic_inc(&net->cells_outstanding); 207 cell->debug_id = atomic_inc_return(&cell_debug_id); 208 trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc); 209 210 _leave(" = %p", cell); 211 return cell; 212 213 parse_failed: 214 if (ret == -EINVAL) 215 printk(KERN_ERR "kAFS: bad VL server IP address\n"); 216 error: 217 kfree(cell->name - 1); 218 kfree(cell); 219 _leave(" = %d", ret); 220 return ERR_PTR(ret); 221 } 222 223 /* 224 * afs_lookup_cell - Look up or create a cell record. 225 * @net: The network namespace 226 * @name: The name of the cell. 227 * @namesz: The strlen of the cell name. 228 * @vllist: A colon/comma separated list of numeric IP addresses or NULL. 229 * @excl: T if an error should be given if the cell name already exists. 230 * 231 * Look up a cell record by name and query the DNS for VL server addresses if 232 * needed. Note that that actual DNS query is punted off to the manager thread 233 * so that this function can return immediately if interrupted whilst allowing 234 * cell records to be shared even if not yet fully constructed. 235 */ 236 struct afs_cell *afs_lookup_cell(struct afs_net *net, 237 const char *name, unsigned int namesz, 238 const char *vllist, bool excl) 239 { 240 struct afs_cell *cell, *candidate, *cursor; 241 struct rb_node *parent, **pp; 242 enum afs_cell_state state; 243 int ret, n; 244 245 _enter("%s,%s", name, vllist); 246 247 if (!excl) { 248 cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup); 249 if (!IS_ERR(cell)) 250 goto wait_for_cell; 251 } 252 253 /* Assume we're probably going to create a cell and preallocate and 254 * mostly set up a candidate record. We can then use this to stash the 255 * name, the net namespace and VL server addresses. 256 * 257 * We also want to do this before we hold any locks as it may involve 258 * upcalling to userspace to make DNS queries. 259 */ 260 candidate = afs_alloc_cell(net, name, namesz, vllist); 261 if (IS_ERR(candidate)) { 262 _leave(" = %ld", PTR_ERR(candidate)); 263 return candidate; 264 } 265 266 /* Find the insertion point and check to see if someone else added a 267 * cell whilst we were allocating. 268 */ 269 down_write(&net->cells_lock); 270 271 pp = &net->cells.rb_node; 272 parent = NULL; 273 while (*pp) { 274 parent = *pp; 275 cursor = rb_entry(parent, struct afs_cell, net_node); 276 277 n = strncasecmp(cursor->name, name, 278 min_t(size_t, cursor->name_len, namesz)); 279 if (n == 0) 280 n = cursor->name_len - namesz; 281 if (n < 0) 282 pp = &(*pp)->rb_left; 283 else if (n > 0) 284 pp = &(*pp)->rb_right; 285 else 286 goto cell_already_exists; 287 } 288 289 cell = candidate; 290 candidate = NULL; 291 atomic_set(&cell->active, 2); 292 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert); 293 rb_link_node_rcu(&cell->net_node, parent, pp); 294 rb_insert_color(&cell->net_node, &net->cells); 295 up_write(&net->cells_lock); 296 297 afs_queue_cell(cell, afs_cell_trace_get_queue_new); 298 299 wait_for_cell: 300 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active), 301 afs_cell_trace_wait); 302 _debug("wait_for_cell"); 303 wait_var_event(&cell->state, 304 ({ 305 state = smp_load_acquire(&cell->state); /* vs error */ 306 state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED; 307 })); 308 309 /* Check the state obtained from the wait check. */ 310 if (state == AFS_CELL_REMOVED) { 311 ret = cell->error; 312 goto error; 313 } 314 315 _leave(" = %p [cell]", cell); 316 return cell; 317 318 cell_already_exists: 319 _debug("cell exists"); 320 cell = cursor; 321 if (excl) { 322 ret = -EEXIST; 323 } else { 324 afs_use_cell(cursor, afs_cell_trace_use_lookup); 325 ret = 0; 326 } 327 up_write(&net->cells_lock); 328 if (candidate) 329 afs_put_cell(candidate, afs_cell_trace_put_candidate); 330 if (ret == 0) 331 goto wait_for_cell; 332 goto error_noput; 333 error: 334 afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup); 335 error_noput: 336 _leave(" = %d [error]", ret); 337 return ERR_PTR(ret); 338 } 339 340 /* 341 * set the root cell information 342 * - can be called with a module parameter string 343 * - can be called from a write to /proc/fs/afs/rootcell 344 */ 345 int afs_cell_init(struct afs_net *net, const char *rootcell) 346 { 347 struct afs_cell *old_root, *new_root; 348 const char *cp, *vllist; 349 size_t len; 350 351 _enter(""); 352 353 if (!rootcell) { 354 /* module is loaded with no parameters, or built statically. 355 * - in the future we might initialize cell DB here. 356 */ 357 _leave(" = 0 [no root]"); 358 return 0; 359 } 360 361 cp = strchr(rootcell, ':'); 362 if (!cp) { 363 _debug("kAFS: no VL server IP addresses specified"); 364 vllist = NULL; 365 len = strlen(rootcell); 366 } else { 367 vllist = cp + 1; 368 len = cp - rootcell; 369 } 370 371 if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.') 372 return -EINVAL; 373 if (memchr(rootcell, '/', len)) 374 return -EINVAL; 375 cp = strstr(rootcell, ".."); 376 if (cp && cp < rootcell + len) 377 return -EINVAL; 378 379 /* allocate a cell record for the root cell */ 380 new_root = afs_lookup_cell(net, rootcell, len, vllist, false); 381 if (IS_ERR(new_root)) { 382 _leave(" = %ld", PTR_ERR(new_root)); 383 return PTR_ERR(new_root); 384 } 385 386 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) 387 afs_use_cell(new_root, afs_cell_trace_use_pin); 388 389 /* install the new cell */ 390 down_write(&net->cells_lock); 391 afs_see_cell(new_root, afs_cell_trace_see_ws); 392 old_root = rcu_replace_pointer(net->ws_cell, new_root, 393 lockdep_is_held(&net->cells_lock)); 394 up_write(&net->cells_lock); 395 396 afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws); 397 _leave(" = 0"); 398 return 0; 399 } 400 401 /* 402 * Update a cell's VL server address list from the DNS. 403 */ 404 static int afs_update_cell(struct afs_cell *cell) 405 { 406 struct afs_vlserver_list *vllist, *old = NULL, *p; 407 unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); 408 unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); 409 time64_t now, expiry = 0; 410 int ret = 0; 411 412 _enter("%s", cell->name); 413 414 vllist = afs_dns_query(cell, &expiry); 415 if (IS_ERR(vllist)) { 416 ret = PTR_ERR(vllist); 417 418 _debug("%s: fail %d", cell->name, ret); 419 if (ret == -ENOMEM) 420 goto out_wake; 421 422 vllist = afs_alloc_vlserver_list(0); 423 if (!vllist) { 424 if (ret >= 0) 425 ret = -ENOMEM; 426 goto out_wake; 427 } 428 429 switch (ret) { 430 case -ENODATA: 431 case -EDESTADDRREQ: 432 vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; 433 break; 434 case -EAGAIN: 435 case -ECONNREFUSED: 436 vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; 437 break; 438 default: 439 vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; 440 break; 441 } 442 } 443 444 _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); 445 cell->dns_status = vllist->status; 446 447 now = ktime_get_real_seconds(); 448 if (min_ttl > max_ttl) 449 max_ttl = min_ttl; 450 if (expiry < now + min_ttl) 451 expiry = now + min_ttl; 452 else if (expiry > now + max_ttl) 453 expiry = now + max_ttl; 454 455 _debug("%s: status %d", cell->name, vllist->status); 456 if (vllist->source == DNS_RECORD_UNAVAILABLE) { 457 switch (vllist->status) { 458 case DNS_LOOKUP_GOT_NOT_FOUND: 459 /* The DNS said that the cell does not exist or there 460 * weren't any addresses to be had. 461 */ 462 cell->dns_expiry = expiry; 463 break; 464 465 case DNS_LOOKUP_BAD: 466 case DNS_LOOKUP_GOT_LOCAL_FAILURE: 467 case DNS_LOOKUP_GOT_TEMP_FAILURE: 468 case DNS_LOOKUP_GOT_NS_FAILURE: 469 default: 470 cell->dns_expiry = now + 10; 471 break; 472 } 473 } else { 474 cell->dns_expiry = expiry; 475 } 476 477 /* Replace the VL server list if the new record has servers or the old 478 * record doesn't. 479 */ 480 write_lock(&cell->vl_servers_lock); 481 p = rcu_dereference_protected(cell->vl_servers, true); 482 if (vllist->nr_servers > 0 || p->nr_servers == 0) { 483 rcu_assign_pointer(cell->vl_servers, vllist); 484 cell->dns_source = vllist->source; 485 old = p; 486 } 487 write_unlock(&cell->vl_servers_lock); 488 afs_put_vlserverlist(cell->net, old); 489 490 out_wake: 491 smp_store_release(&cell->dns_lookup_count, 492 cell->dns_lookup_count + 1); /* vs source/status */ 493 wake_up_var(&cell->dns_lookup_count); 494 _leave(" = %d", ret); 495 return ret; 496 } 497 498 /* 499 * Destroy a cell record 500 */ 501 static void afs_cell_destroy(struct rcu_head *rcu) 502 { 503 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 504 struct afs_net *net = cell->net; 505 int r; 506 507 _enter("%p{%s}", cell, cell->name); 508 509 r = refcount_read(&cell->ref); 510 ASSERTCMP(r, ==, 0); 511 trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free); 512 513 afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); 514 afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); 515 key_put(cell->anonymous_key); 516 kfree(cell->name - 1); 517 kfree(cell); 518 519 afs_dec_cells_outstanding(net); 520 _leave(" [destroyed]"); 521 } 522 523 /* 524 * Queue the cell manager. 525 */ 526 static void afs_queue_cell_manager(struct afs_net *net) 527 { 528 int outstanding = atomic_inc_return(&net->cells_outstanding); 529 530 _enter("%d", outstanding); 531 532 if (!queue_work(afs_wq, &net->cells_manager)) 533 afs_dec_cells_outstanding(net); 534 } 535 536 /* 537 * Cell management timer. We have an increment on cells_outstanding that we 538 * need to pass along to the work item. 539 */ 540 void afs_cells_timer(struct timer_list *timer) 541 { 542 struct afs_net *net = container_of(timer, struct afs_net, cells_timer); 543 544 _enter(""); 545 if (!queue_work(afs_wq, &net->cells_manager)) 546 afs_dec_cells_outstanding(net); 547 } 548 549 /* 550 * Get a reference on a cell record. 551 */ 552 struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason) 553 { 554 int r; 555 556 __refcount_inc(&cell->ref, &r); 557 trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason); 558 return cell; 559 } 560 561 /* 562 * Drop a reference on a cell record. 563 */ 564 void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason) 565 { 566 if (cell) { 567 unsigned int debug_id = cell->debug_id; 568 unsigned int a; 569 bool zero; 570 int r; 571 572 a = atomic_read(&cell->active); 573 zero = __refcount_dec_and_test(&cell->ref, &r); 574 trace_afs_cell(debug_id, r - 1, a, reason); 575 if (zero) { 576 a = atomic_read(&cell->active); 577 WARN(a != 0, "Cell active count %u > 0\n", a); 578 call_rcu(&cell->rcu, afs_cell_destroy); 579 } 580 } 581 } 582 583 /* 584 * Note a cell becoming more active. 585 */ 586 struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason) 587 { 588 int r, a; 589 590 r = refcount_read(&cell->ref); 591 WARN_ON(r == 0); 592 a = atomic_inc_return(&cell->active); 593 trace_afs_cell(cell->debug_id, r, a, reason); 594 return cell; 595 } 596 597 /* 598 * Record a cell becoming less active. When the active counter reaches 1, it 599 * is scheduled for destruction, but may get reactivated. 600 */ 601 void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason) 602 { 603 unsigned int debug_id; 604 time64_t now, expire_delay; 605 int r, a; 606 607 if (!cell) 608 return; 609 610 _enter("%s", cell->name); 611 612 now = ktime_get_real_seconds(); 613 cell->last_inactive = now; 614 expire_delay = 0; 615 if (cell->vl_servers->nr_servers) 616 expire_delay = afs_cell_gc_delay; 617 618 debug_id = cell->debug_id; 619 r = refcount_read(&cell->ref); 620 a = atomic_dec_return(&cell->active); 621 trace_afs_cell(debug_id, r, a, reason); 622 WARN_ON(a == 0); 623 if (a == 1) 624 /* 'cell' may now be garbage collected. */ 625 afs_set_cell_timer(net, expire_delay); 626 } 627 628 /* 629 * Note that a cell has been seen. 630 */ 631 void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason) 632 { 633 int r, a; 634 635 r = refcount_read(&cell->ref); 636 a = atomic_read(&cell->active); 637 trace_afs_cell(cell->debug_id, r, a, reason); 638 } 639 640 /* 641 * Queue a cell for management, giving the workqueue a ref to hold. 642 */ 643 void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason) 644 { 645 afs_get_cell(cell, reason); 646 if (!queue_work(afs_wq, &cell->manager)) 647 afs_put_cell(cell, afs_cell_trace_put_queue_fail); 648 } 649 650 /* 651 * Allocate a key to use as a placeholder for anonymous user security. 652 */ 653 static int afs_alloc_anon_key(struct afs_cell *cell) 654 { 655 struct key *key; 656 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; 657 658 /* Create a key to represent an anonymous user. */ 659 memcpy(keyname, "afs@", 4); 660 dp = keyname + 4; 661 cp = cell->name; 662 do { 663 *dp++ = tolower(*cp); 664 } while (*cp++); 665 666 key = rxrpc_get_null_key(keyname); 667 if (IS_ERR(key)) 668 return PTR_ERR(key); 669 670 cell->anonymous_key = key; 671 672 _debug("anon key %p{%x}", 673 cell->anonymous_key, key_serial(cell->anonymous_key)); 674 return 0; 675 } 676 677 /* 678 * Activate a cell. 679 */ 680 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 681 { 682 struct hlist_node **p; 683 struct afs_cell *pcell; 684 int ret; 685 686 if (!cell->anonymous_key) { 687 ret = afs_alloc_anon_key(cell); 688 if (ret < 0) 689 return ret; 690 } 691 692 ret = afs_proc_cell_setup(cell); 693 if (ret < 0) 694 return ret; 695 696 mutex_lock(&net->proc_cells_lock); 697 for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 698 pcell = hlist_entry(*p, struct afs_cell, proc_link); 699 if (strcmp(cell->name, pcell->name) < 0) 700 break; 701 } 702 703 cell->proc_link.pprev = p; 704 cell->proc_link.next = *p; 705 rcu_assign_pointer(*p, &cell->proc_link.next); 706 if (cell->proc_link.next) 707 cell->proc_link.next->pprev = &cell->proc_link.next; 708 709 afs_dynroot_mkdir(net, cell); 710 mutex_unlock(&net->proc_cells_lock); 711 return 0; 712 } 713 714 /* 715 * Deactivate a cell. 716 */ 717 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) 718 { 719 _enter("%s", cell->name); 720 721 afs_proc_cell_remove(cell); 722 723 mutex_lock(&net->proc_cells_lock); 724 if (!hlist_unhashed(&cell->proc_link)) 725 hlist_del_rcu(&cell->proc_link); 726 afs_dynroot_rmdir(net, cell); 727 mutex_unlock(&net->proc_cells_lock); 728 729 _leave(""); 730 } 731 732 /* 733 * Manage a cell record, initialising and destroying it, maintaining its DNS 734 * records. 735 */ 736 static void afs_manage_cell(struct afs_cell *cell) 737 { 738 struct afs_net *net = cell->net; 739 int ret, active; 740 741 _enter("%s", cell->name); 742 743 again: 744 _debug("state %u", cell->state); 745 switch (cell->state) { 746 case AFS_CELL_INACTIVE: 747 case AFS_CELL_FAILED: 748 down_write(&net->cells_lock); 749 active = 1; 750 if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { 751 rb_erase(&cell->net_node, &net->cells); 752 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0, 753 afs_cell_trace_unuse_delete); 754 smp_store_release(&cell->state, AFS_CELL_REMOVED); 755 } 756 up_write(&net->cells_lock); 757 if (cell->state == AFS_CELL_REMOVED) { 758 wake_up_var(&cell->state); 759 goto final_destruction; 760 } 761 if (cell->state == AFS_CELL_FAILED) 762 goto done; 763 smp_store_release(&cell->state, AFS_CELL_UNSET); 764 wake_up_var(&cell->state); 765 goto again; 766 767 case AFS_CELL_UNSET: 768 smp_store_release(&cell->state, AFS_CELL_ACTIVATING); 769 wake_up_var(&cell->state); 770 goto again; 771 772 case AFS_CELL_ACTIVATING: 773 ret = afs_activate_cell(net, cell); 774 if (ret < 0) 775 goto activation_failed; 776 777 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 778 wake_up_var(&cell->state); 779 goto again; 780 781 case AFS_CELL_ACTIVE: 782 if (atomic_read(&cell->active) > 1) { 783 if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { 784 ret = afs_update_cell(cell); 785 if (ret < 0) 786 cell->error = ret; 787 } 788 goto done; 789 } 790 smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); 791 wake_up_var(&cell->state); 792 goto again; 793 794 case AFS_CELL_DEACTIVATING: 795 if (atomic_read(&cell->active) > 1) 796 goto reverse_deactivation; 797 afs_deactivate_cell(net, cell); 798 smp_store_release(&cell->state, AFS_CELL_INACTIVE); 799 wake_up_var(&cell->state); 800 goto again; 801 802 case AFS_CELL_REMOVED: 803 goto done; 804 805 default: 806 break; 807 } 808 _debug("bad state %u", cell->state); 809 BUG(); /* Unhandled state */ 810 811 activation_failed: 812 cell->error = ret; 813 afs_deactivate_cell(net, cell); 814 815 smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ 816 wake_up_var(&cell->state); 817 goto again; 818 819 reverse_deactivation: 820 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 821 wake_up_var(&cell->state); 822 _leave(" [deact->act]"); 823 return; 824 825 done: 826 _leave(" [done %u]", cell->state); 827 return; 828 829 final_destruction: 830 /* The root volume is pinning the cell */ 831 afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root); 832 cell->root_volume = NULL; 833 afs_put_cell(cell, afs_cell_trace_put_destroy); 834 } 835 836 static void afs_manage_cell_work(struct work_struct *work) 837 { 838 struct afs_cell *cell = container_of(work, struct afs_cell, manager); 839 840 afs_manage_cell(cell); 841 afs_put_cell(cell, afs_cell_trace_put_queue_work); 842 } 843 844 /* 845 * Manage the records of cells known to a network namespace. This includes 846 * updating the DNS records and garbage collecting unused cells that were 847 * automatically added. 848 * 849 * Note that constructed cell records may only be removed from net->cells by 850 * this work item, so it is safe for this work item to stash a cursor pointing 851 * into the tree and then return to caller (provided it skips cells that are 852 * still under construction). 853 * 854 * Note also that we were given an increment on net->cells_outstanding by 855 * whoever queued us that we need to deal with before returning. 856 */ 857 void afs_manage_cells(struct work_struct *work) 858 { 859 struct afs_net *net = container_of(work, struct afs_net, cells_manager); 860 struct rb_node *cursor; 861 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 862 bool purging = !net->live; 863 864 _enter(""); 865 866 /* Trawl the cell database looking for cells that have expired from 867 * lack of use and cells whose DNS results have expired and dispatch 868 * their managers. 869 */ 870 down_read(&net->cells_lock); 871 872 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { 873 struct afs_cell *cell = 874 rb_entry(cursor, struct afs_cell, net_node); 875 unsigned active; 876 bool sched_cell = false; 877 878 active = atomic_read(&cell->active); 879 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 880 active, afs_cell_trace_manage); 881 882 ASSERTCMP(active, >=, 1); 883 884 if (purging) { 885 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) { 886 active = atomic_dec_return(&cell->active); 887 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 888 active, afs_cell_trace_unuse_pin); 889 } 890 } 891 892 if (active == 1) { 893 struct afs_vlserver_list *vllist; 894 time64_t expire_at = cell->last_inactive; 895 896 read_lock(&cell->vl_servers_lock); 897 vllist = rcu_dereference_protected( 898 cell->vl_servers, 899 lockdep_is_held(&cell->vl_servers_lock)); 900 if (vllist->nr_servers > 0) 901 expire_at += afs_cell_gc_delay; 902 read_unlock(&cell->vl_servers_lock); 903 if (purging || expire_at <= now) 904 sched_cell = true; 905 else if (expire_at < next_manage) 906 next_manage = expire_at; 907 } 908 909 if (!purging) { 910 if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) 911 sched_cell = true; 912 } 913 914 if (sched_cell) 915 afs_queue_cell(cell, afs_cell_trace_get_queue_manage); 916 } 917 918 up_read(&net->cells_lock); 919 920 /* Update the timer on the way out. We have to pass an increment on 921 * cells_outstanding in the namespace that we are in to the timer or 922 * the work scheduler. 923 */ 924 if (!purging && next_manage < TIME64_MAX) { 925 now = ktime_get_real_seconds(); 926 927 if (next_manage - now <= 0) { 928 if (queue_work(afs_wq, &net->cells_manager)) 929 atomic_inc(&net->cells_outstanding); 930 } else { 931 afs_set_cell_timer(net, next_manage - now); 932 } 933 } 934 935 afs_dec_cells_outstanding(net); 936 _leave(" [%d]", atomic_read(&net->cells_outstanding)); 937 } 938 939 /* 940 * Purge in-memory cell database. 941 */ 942 void afs_cell_purge(struct afs_net *net) 943 { 944 struct afs_cell *ws; 945 946 _enter(""); 947 948 down_write(&net->cells_lock); 949 ws = rcu_replace_pointer(net->ws_cell, NULL, 950 lockdep_is_held(&net->cells_lock)); 951 up_write(&net->cells_lock); 952 afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws); 953 954 _debug("del timer"); 955 if (del_timer_sync(&net->cells_timer)) 956 atomic_dec(&net->cells_outstanding); 957 958 _debug("kick mgr"); 959 afs_queue_cell_manager(net); 960 961 _debug("wait"); 962 wait_var_event(&net->cells_outstanding, 963 !atomic_read(&net->cells_outstanding)); 964 _leave(""); 965 } 966