1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AFS cell and server record management 3 * 4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/key.h> 10 #include <linux/ctype.h> 11 #include <linux/dns_resolver.h> 12 #include <linux/sched.h> 13 #include <linux/inet.h> 14 #include <linux/namei.h> 15 #include <keys/rxrpc-type.h> 16 #include "internal.h" 17 18 static unsigned __read_mostly afs_cell_gc_delay = 10; 19 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; 20 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; 21 static atomic_t cell_debug_id; 22 23 static void afs_queue_cell_manager(struct afs_net *); 24 static void afs_manage_cell_work(struct work_struct *); 25 26 static void afs_dec_cells_outstanding(struct afs_net *net) 27 { 28 if (atomic_dec_and_test(&net->cells_outstanding)) 29 wake_up_var(&net->cells_outstanding); 30 } 31 32 /* 33 * Set the cell timer to fire after a given delay, assuming it's not already 34 * set for an earlier time. 35 */ 36 static void afs_set_cell_timer(struct afs_net *net, time64_t delay) 37 { 38 if (net->live) { 39 atomic_inc(&net->cells_outstanding); 40 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) 41 afs_dec_cells_outstanding(net); 42 } else { 43 afs_queue_cell_manager(net); 44 } 45 } 46 47 /* 48 * Look up and get an activation reference on a cell record. The caller must 49 * hold net->cells_lock at least read-locked. 50 */ 51 static struct afs_cell *afs_find_cell_locked(struct afs_net *net, 52 const char *name, unsigned int namesz, 53 enum afs_cell_trace reason) 54 { 55 struct afs_cell *cell = NULL; 56 struct rb_node *p; 57 int n; 58 59 _enter("%*.*s", namesz, namesz, name); 60 61 if (name && namesz == 0) 62 return ERR_PTR(-EINVAL); 63 if (namesz > AFS_MAXCELLNAME) 64 return ERR_PTR(-ENAMETOOLONG); 65 66 if (!name) { 67 cell = net->ws_cell; 68 if (!cell) 69 return ERR_PTR(-EDESTADDRREQ); 70 goto found; 71 } 72 73 p = net->cells.rb_node; 74 while (p) { 75 cell = rb_entry(p, struct afs_cell, net_node); 76 77 n = strncasecmp(cell->name, name, 78 min_t(size_t, cell->name_len, namesz)); 79 if (n == 0) 80 n = cell->name_len - namesz; 81 if (n < 0) 82 p = p->rb_left; 83 else if (n > 0) 84 p = p->rb_right; 85 else 86 goto found; 87 } 88 89 return ERR_PTR(-ENOENT); 90 91 found: 92 return afs_use_cell(cell, reason); 93 } 94 95 /* 96 * Look up and get an activation reference on a cell record. 97 */ 98 struct afs_cell *afs_find_cell(struct afs_net *net, 99 const char *name, unsigned int namesz, 100 enum afs_cell_trace reason) 101 { 102 struct afs_cell *cell; 103 104 down_read(&net->cells_lock); 105 cell = afs_find_cell_locked(net, name, namesz, reason); 106 up_read(&net->cells_lock); 107 return cell; 108 } 109 110 /* 111 * Set up a cell record and fill in its name, VL server address list and 112 * allocate an anonymous key 113 */ 114 static struct afs_cell *afs_alloc_cell(struct afs_net *net, 115 const char *name, unsigned int namelen, 116 const char *addresses) 117 { 118 struct afs_vlserver_list *vllist; 119 struct afs_cell *cell; 120 int i, ret; 121 122 ASSERT(name); 123 if (namelen == 0) 124 return ERR_PTR(-EINVAL); 125 if (namelen > AFS_MAXCELLNAME) { 126 _leave(" = -ENAMETOOLONG"); 127 return ERR_PTR(-ENAMETOOLONG); 128 } 129 130 /* Prohibit cell names that contain unprintable chars, '/' and '@' or 131 * that begin with a dot. This also precludes "@cell". 132 */ 133 if (name[0] == '.') 134 return ERR_PTR(-EINVAL); 135 for (i = 0; i < namelen; i++) { 136 char ch = name[i]; 137 if (!isprint(ch) || ch == '/' || ch == '@') 138 return ERR_PTR(-EINVAL); 139 } 140 141 _enter("%*.*s,%s", namelen, namelen, name, addresses); 142 143 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); 144 if (!cell) { 145 _leave(" = -ENOMEM"); 146 return ERR_PTR(-ENOMEM); 147 } 148 149 cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL); 150 if (!cell->name) { 151 kfree(cell); 152 return ERR_PTR(-ENOMEM); 153 } 154 155 cell->name[0] = '.'; 156 cell->name++; 157 cell->name_len = namelen; 158 for (i = 0; i < namelen; i++) 159 cell->name[i] = tolower(name[i]); 160 cell->name[i] = 0; 161 162 cell->net = net; 163 refcount_set(&cell->ref, 1); 164 atomic_set(&cell->active, 0); 165 INIT_WORK(&cell->manager, afs_manage_cell_work); 166 init_rwsem(&cell->vs_lock); 167 cell->volumes = RB_ROOT; 168 INIT_HLIST_HEAD(&cell->proc_volumes); 169 seqlock_init(&cell->volume_lock); 170 cell->fs_servers = RB_ROOT; 171 seqlock_init(&cell->fs_lock); 172 rwlock_init(&cell->vl_servers_lock); 173 cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); 174 175 /* Provide a VL server list, filling it in if we were given a list of 176 * addresses to use. 177 */ 178 if (addresses) { 179 vllist = afs_parse_text_addrs(net, 180 addresses, strlen(addresses), ':', 181 VL_SERVICE, AFS_VL_PORT); 182 if (IS_ERR(vllist)) { 183 ret = PTR_ERR(vllist); 184 goto parse_failed; 185 } 186 187 vllist->source = DNS_RECORD_FROM_CONFIG; 188 vllist->status = DNS_LOOKUP_NOT_DONE; 189 cell->dns_expiry = TIME64_MAX; 190 } else { 191 ret = -ENOMEM; 192 vllist = afs_alloc_vlserver_list(0); 193 if (!vllist) 194 goto error; 195 vllist->source = DNS_RECORD_UNAVAILABLE; 196 vllist->status = DNS_LOOKUP_NOT_DONE; 197 cell->dns_expiry = ktime_get_real_seconds(); 198 } 199 200 rcu_assign_pointer(cell->vl_servers, vllist); 201 202 cell->dns_source = vllist->source; 203 cell->dns_status = vllist->status; 204 smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ 205 atomic_inc(&net->cells_outstanding); 206 cell->debug_id = atomic_inc_return(&cell_debug_id); 207 trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc); 208 209 _leave(" = %p", cell); 210 return cell; 211 212 parse_failed: 213 if (ret == -EINVAL) 214 printk(KERN_ERR "kAFS: bad VL server IP address\n"); 215 error: 216 kfree(cell->name - 1); 217 kfree(cell); 218 _leave(" = %d", ret); 219 return ERR_PTR(ret); 220 } 221 222 /* 223 * afs_lookup_cell - Look up or create a cell record. 224 * @net: The network namespace 225 * @name: The name of the cell. 226 * @namesz: The strlen of the cell name. 227 * @vllist: A colon/comma separated list of numeric IP addresses or NULL. 228 * @excl: T if an error should be given if the cell name already exists. 229 * 230 * Look up a cell record by name and query the DNS for VL server addresses if 231 * needed. Note that that actual DNS query is punted off to the manager thread 232 * so that this function can return immediately if interrupted whilst allowing 233 * cell records to be shared even if not yet fully constructed. 234 */ 235 struct afs_cell *afs_lookup_cell(struct afs_net *net, 236 const char *name, unsigned int namesz, 237 const char *vllist, bool excl) 238 { 239 struct afs_cell *cell, *candidate, *cursor; 240 struct rb_node *parent, **pp; 241 enum afs_cell_state state; 242 int ret, n; 243 244 _enter("%s,%s", name, vllist); 245 246 if (!excl) { 247 cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup); 248 if (!IS_ERR(cell)) 249 goto wait_for_cell; 250 } 251 252 /* Assume we're probably going to create a cell and preallocate and 253 * mostly set up a candidate record. We can then use this to stash the 254 * name, the net namespace and VL server addresses. 255 * 256 * We also want to do this before we hold any locks as it may involve 257 * upcalling to userspace to make DNS queries. 258 */ 259 candidate = afs_alloc_cell(net, name, namesz, vllist); 260 if (IS_ERR(candidate)) { 261 _leave(" = %ld", PTR_ERR(candidate)); 262 return candidate; 263 } 264 265 /* Find the insertion point and check to see if someone else added a 266 * cell whilst we were allocating. 267 */ 268 down_write(&net->cells_lock); 269 270 pp = &net->cells.rb_node; 271 parent = NULL; 272 while (*pp) { 273 parent = *pp; 274 cursor = rb_entry(parent, struct afs_cell, net_node); 275 276 n = strncasecmp(cursor->name, name, 277 min_t(size_t, cursor->name_len, namesz)); 278 if (n == 0) 279 n = cursor->name_len - namesz; 280 if (n < 0) 281 pp = &(*pp)->rb_left; 282 else if (n > 0) 283 pp = &(*pp)->rb_right; 284 else 285 goto cell_already_exists; 286 } 287 288 cell = candidate; 289 candidate = NULL; 290 atomic_set(&cell->active, 2); 291 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert); 292 rb_link_node_rcu(&cell->net_node, parent, pp); 293 rb_insert_color(&cell->net_node, &net->cells); 294 up_write(&net->cells_lock); 295 296 afs_queue_cell(cell, afs_cell_trace_get_queue_new); 297 298 wait_for_cell: 299 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active), 300 afs_cell_trace_wait); 301 _debug("wait_for_cell"); 302 wait_var_event(&cell->state, 303 ({ 304 state = smp_load_acquire(&cell->state); /* vs error */ 305 state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED; 306 })); 307 308 /* Check the state obtained from the wait check. */ 309 if (state == AFS_CELL_REMOVED) { 310 ret = cell->error; 311 goto error; 312 } 313 314 _leave(" = %p [cell]", cell); 315 return cell; 316 317 cell_already_exists: 318 _debug("cell exists"); 319 cell = cursor; 320 if (excl) { 321 ret = -EEXIST; 322 } else { 323 afs_use_cell(cursor, afs_cell_trace_use_lookup); 324 ret = 0; 325 } 326 up_write(&net->cells_lock); 327 if (candidate) 328 afs_put_cell(candidate, afs_cell_trace_put_candidate); 329 if (ret == 0) 330 goto wait_for_cell; 331 goto error_noput; 332 error: 333 afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup); 334 error_noput: 335 _leave(" = %d [error]", ret); 336 return ERR_PTR(ret); 337 } 338 339 /* 340 * set the root cell information 341 * - can be called with a module parameter string 342 * - can be called from a write to /proc/fs/afs/rootcell 343 */ 344 int afs_cell_init(struct afs_net *net, const char *rootcell) 345 { 346 struct afs_cell *old_root, *new_root; 347 const char *cp, *vllist; 348 size_t len; 349 350 _enter(""); 351 352 if (!rootcell) { 353 /* module is loaded with no parameters, or built statically. 354 * - in the future we might initialize cell DB here. 355 */ 356 _leave(" = 0 [no root]"); 357 return 0; 358 } 359 360 cp = strchr(rootcell, ':'); 361 if (!cp) { 362 _debug("kAFS: no VL server IP addresses specified"); 363 vllist = NULL; 364 len = strlen(rootcell); 365 } else { 366 vllist = cp + 1; 367 len = cp - rootcell; 368 } 369 370 if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.') 371 return -EINVAL; 372 if (memchr(rootcell, '/', len)) 373 return -EINVAL; 374 cp = strstr(rootcell, ".."); 375 if (cp && cp < rootcell + len) 376 return -EINVAL; 377 378 /* allocate a cell record for the root cell */ 379 new_root = afs_lookup_cell(net, rootcell, len, vllist, false); 380 if (IS_ERR(new_root)) { 381 _leave(" = %ld", PTR_ERR(new_root)); 382 return PTR_ERR(new_root); 383 } 384 385 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) 386 afs_use_cell(new_root, afs_cell_trace_use_pin); 387 388 /* install the new cell */ 389 down_write(&net->cells_lock); 390 afs_see_cell(new_root, afs_cell_trace_see_ws); 391 old_root = net->ws_cell; 392 net->ws_cell = new_root; 393 up_write(&net->cells_lock); 394 395 afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws); 396 _leave(" = 0"); 397 return 0; 398 } 399 400 /* 401 * Update a cell's VL server address list from the DNS. 402 */ 403 static int afs_update_cell(struct afs_cell *cell) 404 { 405 struct afs_vlserver_list *vllist, *old = NULL, *p; 406 unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); 407 unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); 408 time64_t now, expiry = 0; 409 int ret = 0; 410 411 _enter("%s", cell->name); 412 413 vllist = afs_dns_query(cell, &expiry); 414 if (IS_ERR(vllist)) { 415 ret = PTR_ERR(vllist); 416 417 _debug("%s: fail %d", cell->name, ret); 418 if (ret == -ENOMEM) 419 goto out_wake; 420 421 vllist = afs_alloc_vlserver_list(0); 422 if (!vllist) { 423 if (ret >= 0) 424 ret = -ENOMEM; 425 goto out_wake; 426 } 427 428 switch (ret) { 429 case -ENODATA: 430 case -EDESTADDRREQ: 431 vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; 432 break; 433 case -EAGAIN: 434 case -ECONNREFUSED: 435 vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; 436 break; 437 default: 438 vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; 439 break; 440 } 441 } 442 443 _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); 444 cell->dns_status = vllist->status; 445 446 now = ktime_get_real_seconds(); 447 if (min_ttl > max_ttl) 448 max_ttl = min_ttl; 449 if (expiry < now + min_ttl) 450 expiry = now + min_ttl; 451 else if (expiry > now + max_ttl) 452 expiry = now + max_ttl; 453 454 _debug("%s: status %d", cell->name, vllist->status); 455 if (vllist->source == DNS_RECORD_UNAVAILABLE) { 456 switch (vllist->status) { 457 case DNS_LOOKUP_GOT_NOT_FOUND: 458 /* The DNS said that the cell does not exist or there 459 * weren't any addresses to be had. 460 */ 461 cell->dns_expiry = expiry; 462 break; 463 464 case DNS_LOOKUP_BAD: 465 case DNS_LOOKUP_GOT_LOCAL_FAILURE: 466 case DNS_LOOKUP_GOT_TEMP_FAILURE: 467 case DNS_LOOKUP_GOT_NS_FAILURE: 468 default: 469 cell->dns_expiry = now + 10; 470 break; 471 } 472 } else { 473 cell->dns_expiry = expiry; 474 } 475 476 /* Replace the VL server list if the new record has servers or the old 477 * record doesn't. 478 */ 479 write_lock(&cell->vl_servers_lock); 480 p = rcu_dereference_protected(cell->vl_servers, true); 481 if (vllist->nr_servers > 0 || p->nr_servers == 0) { 482 rcu_assign_pointer(cell->vl_servers, vllist); 483 cell->dns_source = vllist->source; 484 old = p; 485 } 486 write_unlock(&cell->vl_servers_lock); 487 afs_put_vlserverlist(cell->net, old); 488 489 out_wake: 490 smp_store_release(&cell->dns_lookup_count, 491 cell->dns_lookup_count + 1); /* vs source/status */ 492 wake_up_var(&cell->dns_lookup_count); 493 _leave(" = %d", ret); 494 return ret; 495 } 496 497 /* 498 * Destroy a cell record 499 */ 500 static void afs_cell_destroy(struct rcu_head *rcu) 501 { 502 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 503 struct afs_net *net = cell->net; 504 int r; 505 506 _enter("%p{%s}", cell, cell->name); 507 508 r = refcount_read(&cell->ref); 509 ASSERTCMP(r, ==, 0); 510 trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free); 511 512 afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); 513 afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); 514 key_put(cell->anonymous_key); 515 kfree(cell->name - 1); 516 kfree(cell); 517 518 afs_dec_cells_outstanding(net); 519 _leave(" [destroyed]"); 520 } 521 522 /* 523 * Queue the cell manager. 524 */ 525 static void afs_queue_cell_manager(struct afs_net *net) 526 { 527 int outstanding = atomic_inc_return(&net->cells_outstanding); 528 529 _enter("%d", outstanding); 530 531 if (!queue_work(afs_wq, &net->cells_manager)) 532 afs_dec_cells_outstanding(net); 533 } 534 535 /* 536 * Cell management timer. We have an increment on cells_outstanding that we 537 * need to pass along to the work item. 538 */ 539 void afs_cells_timer(struct timer_list *timer) 540 { 541 struct afs_net *net = container_of(timer, struct afs_net, cells_timer); 542 543 _enter(""); 544 if (!queue_work(afs_wq, &net->cells_manager)) 545 afs_dec_cells_outstanding(net); 546 } 547 548 /* 549 * Get a reference on a cell record. 550 */ 551 struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason) 552 { 553 int r; 554 555 __refcount_inc(&cell->ref, &r); 556 trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason); 557 return cell; 558 } 559 560 /* 561 * Drop a reference on a cell record. 562 */ 563 void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason) 564 { 565 if (cell) { 566 unsigned int debug_id = cell->debug_id; 567 unsigned int a; 568 bool zero; 569 int r; 570 571 a = atomic_read(&cell->active); 572 zero = __refcount_dec_and_test(&cell->ref, &r); 573 trace_afs_cell(debug_id, r - 1, a, reason); 574 if (zero) { 575 a = atomic_read(&cell->active); 576 WARN(a != 0, "Cell active count %u > 0\n", a); 577 call_rcu(&cell->rcu, afs_cell_destroy); 578 } 579 } 580 } 581 582 /* 583 * Note a cell becoming more active. 584 */ 585 struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason) 586 { 587 int r, a; 588 589 r = refcount_read(&cell->ref); 590 WARN_ON(r == 0); 591 a = atomic_inc_return(&cell->active); 592 trace_afs_cell(cell->debug_id, r, a, reason); 593 return cell; 594 } 595 596 /* 597 * Record a cell becoming less active. When the active counter reaches 1, it 598 * is scheduled for destruction, but may get reactivated. 599 */ 600 void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason) 601 { 602 unsigned int debug_id; 603 time64_t now, expire_delay; 604 int r, a; 605 606 if (!cell) 607 return; 608 609 _enter("%s", cell->name); 610 611 now = ktime_get_real_seconds(); 612 cell->last_inactive = now; 613 expire_delay = 0; 614 if (cell->vl_servers->nr_servers) 615 expire_delay = afs_cell_gc_delay; 616 617 debug_id = cell->debug_id; 618 r = refcount_read(&cell->ref); 619 a = atomic_dec_return(&cell->active); 620 trace_afs_cell(debug_id, r, a, reason); 621 WARN_ON(a == 0); 622 if (a == 1) 623 /* 'cell' may now be garbage collected. */ 624 afs_set_cell_timer(net, expire_delay); 625 } 626 627 /* 628 * Note that a cell has been seen. 629 */ 630 void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason) 631 { 632 int r, a; 633 634 r = refcount_read(&cell->ref); 635 a = atomic_read(&cell->active); 636 trace_afs_cell(cell->debug_id, r, a, reason); 637 } 638 639 /* 640 * Queue a cell for management, giving the workqueue a ref to hold. 641 */ 642 void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason) 643 { 644 afs_get_cell(cell, reason); 645 if (!queue_work(afs_wq, &cell->manager)) 646 afs_put_cell(cell, afs_cell_trace_put_queue_fail); 647 } 648 649 /* 650 * Allocate a key to use as a placeholder for anonymous user security. 651 */ 652 static int afs_alloc_anon_key(struct afs_cell *cell) 653 { 654 struct key *key; 655 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; 656 657 /* Create a key to represent an anonymous user. */ 658 memcpy(keyname, "afs@", 4); 659 dp = keyname + 4; 660 cp = cell->name; 661 do { 662 *dp++ = tolower(*cp); 663 } while (*cp++); 664 665 key = rxrpc_get_null_key(keyname); 666 if (IS_ERR(key)) 667 return PTR_ERR(key); 668 669 cell->anonymous_key = key; 670 671 _debug("anon key %p{%x}", 672 cell->anonymous_key, key_serial(cell->anonymous_key)); 673 return 0; 674 } 675 676 /* 677 * Activate a cell. 678 */ 679 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 680 { 681 struct hlist_node **p; 682 struct afs_cell *pcell; 683 int ret; 684 685 if (!cell->anonymous_key) { 686 ret = afs_alloc_anon_key(cell); 687 if (ret < 0) 688 return ret; 689 } 690 691 ret = afs_proc_cell_setup(cell); 692 if (ret < 0) 693 return ret; 694 695 mutex_lock(&net->proc_cells_lock); 696 for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 697 pcell = hlist_entry(*p, struct afs_cell, proc_link); 698 if (strcmp(cell->name, pcell->name) < 0) 699 break; 700 } 701 702 cell->proc_link.pprev = p; 703 cell->proc_link.next = *p; 704 rcu_assign_pointer(*p, &cell->proc_link.next); 705 if (cell->proc_link.next) 706 cell->proc_link.next->pprev = &cell->proc_link.next; 707 708 afs_dynroot_mkdir(net, cell); 709 mutex_unlock(&net->proc_cells_lock); 710 return 0; 711 } 712 713 /* 714 * Deactivate a cell. 715 */ 716 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) 717 { 718 _enter("%s", cell->name); 719 720 afs_proc_cell_remove(cell); 721 722 mutex_lock(&net->proc_cells_lock); 723 if (!hlist_unhashed(&cell->proc_link)) 724 hlist_del_rcu(&cell->proc_link); 725 afs_dynroot_rmdir(net, cell); 726 mutex_unlock(&net->proc_cells_lock); 727 728 _leave(""); 729 } 730 731 /* 732 * Manage a cell record, initialising and destroying it, maintaining its DNS 733 * records. 734 */ 735 static void afs_manage_cell(struct afs_cell *cell) 736 { 737 struct afs_net *net = cell->net; 738 int ret, active; 739 740 _enter("%s", cell->name); 741 742 again: 743 _debug("state %u", cell->state); 744 switch (cell->state) { 745 case AFS_CELL_INACTIVE: 746 case AFS_CELL_FAILED: 747 down_write(&net->cells_lock); 748 active = 1; 749 if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { 750 rb_erase(&cell->net_node, &net->cells); 751 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0, 752 afs_cell_trace_unuse_delete); 753 smp_store_release(&cell->state, AFS_CELL_REMOVED); 754 } 755 up_write(&net->cells_lock); 756 if (cell->state == AFS_CELL_REMOVED) { 757 wake_up_var(&cell->state); 758 goto final_destruction; 759 } 760 if (cell->state == AFS_CELL_FAILED) 761 goto done; 762 smp_store_release(&cell->state, AFS_CELL_UNSET); 763 wake_up_var(&cell->state); 764 goto again; 765 766 case AFS_CELL_UNSET: 767 smp_store_release(&cell->state, AFS_CELL_ACTIVATING); 768 wake_up_var(&cell->state); 769 goto again; 770 771 case AFS_CELL_ACTIVATING: 772 ret = afs_activate_cell(net, cell); 773 if (ret < 0) 774 goto activation_failed; 775 776 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 777 wake_up_var(&cell->state); 778 goto again; 779 780 case AFS_CELL_ACTIVE: 781 if (atomic_read(&cell->active) > 1) { 782 if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { 783 ret = afs_update_cell(cell); 784 if (ret < 0) 785 cell->error = ret; 786 } 787 goto done; 788 } 789 smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); 790 wake_up_var(&cell->state); 791 goto again; 792 793 case AFS_CELL_DEACTIVATING: 794 if (atomic_read(&cell->active) > 1) 795 goto reverse_deactivation; 796 afs_deactivate_cell(net, cell); 797 smp_store_release(&cell->state, AFS_CELL_INACTIVE); 798 wake_up_var(&cell->state); 799 goto again; 800 801 case AFS_CELL_REMOVED: 802 goto done; 803 804 default: 805 break; 806 } 807 _debug("bad state %u", cell->state); 808 BUG(); /* Unhandled state */ 809 810 activation_failed: 811 cell->error = ret; 812 afs_deactivate_cell(net, cell); 813 814 smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ 815 wake_up_var(&cell->state); 816 goto again; 817 818 reverse_deactivation: 819 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 820 wake_up_var(&cell->state); 821 _leave(" [deact->act]"); 822 return; 823 824 done: 825 _leave(" [done %u]", cell->state); 826 return; 827 828 final_destruction: 829 /* The root volume is pinning the cell */ 830 afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root); 831 cell->root_volume = NULL; 832 afs_put_cell(cell, afs_cell_trace_put_destroy); 833 } 834 835 static void afs_manage_cell_work(struct work_struct *work) 836 { 837 struct afs_cell *cell = container_of(work, struct afs_cell, manager); 838 839 afs_manage_cell(cell); 840 afs_put_cell(cell, afs_cell_trace_put_queue_work); 841 } 842 843 /* 844 * Manage the records of cells known to a network namespace. This includes 845 * updating the DNS records and garbage collecting unused cells that were 846 * automatically added. 847 * 848 * Note that constructed cell records may only be removed from net->cells by 849 * this work item, so it is safe for this work item to stash a cursor pointing 850 * into the tree and then return to caller (provided it skips cells that are 851 * still under construction). 852 * 853 * Note also that we were given an increment on net->cells_outstanding by 854 * whoever queued us that we need to deal with before returning. 855 */ 856 void afs_manage_cells(struct work_struct *work) 857 { 858 struct afs_net *net = container_of(work, struct afs_net, cells_manager); 859 struct rb_node *cursor; 860 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 861 bool purging = !net->live; 862 863 _enter(""); 864 865 /* Trawl the cell database looking for cells that have expired from 866 * lack of use and cells whose DNS results have expired and dispatch 867 * their managers. 868 */ 869 down_read(&net->cells_lock); 870 871 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { 872 struct afs_cell *cell = 873 rb_entry(cursor, struct afs_cell, net_node); 874 unsigned active; 875 bool sched_cell = false; 876 877 active = atomic_read(&cell->active); 878 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 879 active, afs_cell_trace_manage); 880 881 ASSERTCMP(active, >=, 1); 882 883 if (purging) { 884 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) { 885 active = atomic_dec_return(&cell->active); 886 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 887 active, afs_cell_trace_unuse_pin); 888 } 889 } 890 891 if (active == 1) { 892 struct afs_vlserver_list *vllist; 893 time64_t expire_at = cell->last_inactive; 894 895 read_lock(&cell->vl_servers_lock); 896 vllist = rcu_dereference_protected( 897 cell->vl_servers, 898 lockdep_is_held(&cell->vl_servers_lock)); 899 if (vllist->nr_servers > 0) 900 expire_at += afs_cell_gc_delay; 901 read_unlock(&cell->vl_servers_lock); 902 if (purging || expire_at <= now) 903 sched_cell = true; 904 else if (expire_at < next_manage) 905 next_manage = expire_at; 906 } 907 908 if (!purging) { 909 if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) 910 sched_cell = true; 911 } 912 913 if (sched_cell) 914 afs_queue_cell(cell, afs_cell_trace_get_queue_manage); 915 } 916 917 up_read(&net->cells_lock); 918 919 /* Update the timer on the way out. We have to pass an increment on 920 * cells_outstanding in the namespace that we are in to the timer or 921 * the work scheduler. 922 */ 923 if (!purging && next_manage < TIME64_MAX) { 924 now = ktime_get_real_seconds(); 925 926 if (next_manage - now <= 0) { 927 if (queue_work(afs_wq, &net->cells_manager)) 928 atomic_inc(&net->cells_outstanding); 929 } else { 930 afs_set_cell_timer(net, next_manage - now); 931 } 932 } 933 934 afs_dec_cells_outstanding(net); 935 _leave(" [%d]", atomic_read(&net->cells_outstanding)); 936 } 937 938 /* 939 * Purge in-memory cell database. 940 */ 941 void afs_cell_purge(struct afs_net *net) 942 { 943 struct afs_cell *ws; 944 945 _enter(""); 946 947 down_write(&net->cells_lock); 948 ws = net->ws_cell; 949 net->ws_cell = NULL; 950 up_write(&net->cells_lock); 951 afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws); 952 953 _debug("del timer"); 954 if (del_timer_sync(&net->cells_timer)) 955 atomic_dec(&net->cells_outstanding); 956 957 _debug("kick mgr"); 958 afs_queue_cell_manager(net); 959 960 _debug("wait"); 961 wait_var_event(&net->cells_outstanding, 962 !atomic_read(&net->cells_outstanding)); 963 _leave(""); 964 } 965