xref: /linux/fs/afs/cell.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS cell and server record management
3  *
4  * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/key.h>
10 #include <linux/ctype.h>
11 #include <linux/dns_resolver.h>
12 #include <linux/sched.h>
13 #include <linux/inet.h>
14 #include <linux/namei.h>
15 #include <keys/rxrpc-type.h>
16 #include "internal.h"
17 
18 static unsigned __read_mostly afs_cell_gc_delay = 10;
19 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21 
22 static void afs_manage_cell(struct work_struct *);
23 
24 static void afs_dec_cells_outstanding(struct afs_net *net)
25 {
26 	if (atomic_dec_and_test(&net->cells_outstanding))
27 		wake_up_var(&net->cells_outstanding);
28 }
29 
30 /*
31  * Set the cell timer to fire after a given delay, assuming it's not already
32  * set for an earlier time.
33  */
34 static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
35 {
36 	if (net->live) {
37 		atomic_inc(&net->cells_outstanding);
38 		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
39 			afs_dec_cells_outstanding(net);
40 	}
41 }
42 
43 /*
44  * Look up and get an activation reference on a cell record under RCU
45  * conditions.  The caller must hold the RCU read lock.
46  */
47 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
48 				     const char *name, unsigned int namesz)
49 {
50 	struct afs_cell *cell = NULL;
51 	struct rb_node *p;
52 	int n, seq = 0, ret = 0;
53 
54 	_enter("%*.*s", namesz, namesz, name);
55 
56 	if (name && namesz == 0)
57 		return ERR_PTR(-EINVAL);
58 	if (namesz > AFS_MAXCELLNAME)
59 		return ERR_PTR(-ENAMETOOLONG);
60 
61 	do {
62 		/* Unfortunately, rbtree walking doesn't give reliable results
63 		 * under just the RCU read lock, so we have to check for
64 		 * changes.
65 		 */
66 		if (cell)
67 			afs_put_cell(net, cell);
68 		cell = NULL;
69 		ret = -ENOENT;
70 
71 		read_seqbegin_or_lock(&net->cells_lock, &seq);
72 
73 		if (!name) {
74 			cell = rcu_dereference_raw(net->ws_cell);
75 			if (cell) {
76 				afs_get_cell(cell);
77 				ret = 0;
78 				break;
79 			}
80 			ret = -EDESTADDRREQ;
81 			continue;
82 		}
83 
84 		p = rcu_dereference_raw(net->cells.rb_node);
85 		while (p) {
86 			cell = rb_entry(p, struct afs_cell, net_node);
87 
88 			n = strncasecmp(cell->name, name,
89 					min_t(size_t, cell->name_len, namesz));
90 			if (n == 0)
91 				n = cell->name_len - namesz;
92 			if (n < 0) {
93 				p = rcu_dereference_raw(p->rb_left);
94 			} else if (n > 0) {
95 				p = rcu_dereference_raw(p->rb_right);
96 			} else {
97 				if (atomic_inc_not_zero(&cell->usage)) {
98 					ret = 0;
99 					break;
100 				}
101 				/* We want to repeat the search, this time with
102 				 * the lock properly locked.
103 				 */
104 			}
105 			cell = NULL;
106 		}
107 
108 	} while (need_seqretry(&net->cells_lock, seq));
109 
110 	done_seqretry(&net->cells_lock, seq);
111 
112 	if (ret != 0 && cell)
113 		afs_put_cell(net, cell);
114 
115 	return ret == 0 ? cell : ERR_PTR(ret);
116 }
117 
118 /*
119  * Set up a cell record and fill in its name, VL server address list and
120  * allocate an anonymous key
121  */
122 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
123 				       const char *name, unsigned int namelen,
124 				       const char *addresses)
125 {
126 	struct afs_vlserver_list *vllist;
127 	struct afs_cell *cell;
128 	int i, ret;
129 
130 	ASSERT(name);
131 	if (namelen == 0)
132 		return ERR_PTR(-EINVAL);
133 	if (namelen > AFS_MAXCELLNAME) {
134 		_leave(" = -ENAMETOOLONG");
135 		return ERR_PTR(-ENAMETOOLONG);
136 	}
137 
138 	/* Prohibit cell names that contain unprintable chars, '/' and '@' or
139 	 * that begin with a dot.  This also precludes "@cell".
140 	 */
141 	if (name[0] == '.')
142 		return ERR_PTR(-EINVAL);
143 	for (i = 0; i < namelen; i++) {
144 		char ch = name[i];
145 		if (!isprint(ch) || ch == '/' || ch == '@')
146 			return ERR_PTR(-EINVAL);
147 	}
148 
149 	_enter("%*.*s,%s", namelen, namelen, name, addresses);
150 
151 	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
152 	if (!cell) {
153 		_leave(" = -ENOMEM");
154 		return ERR_PTR(-ENOMEM);
155 	}
156 
157 	cell->net = net;
158 	cell->name_len = namelen;
159 	for (i = 0; i < namelen; i++)
160 		cell->name[i] = tolower(name[i]);
161 
162 	atomic_set(&cell->usage, 2);
163 	INIT_WORK(&cell->manager, afs_manage_cell);
164 	cell->volumes = RB_ROOT;
165 	INIT_HLIST_HEAD(&cell->proc_volumes);
166 	seqlock_init(&cell->volume_lock);
167 	cell->fs_servers = RB_ROOT;
168 	seqlock_init(&cell->fs_lock);
169 	rwlock_init(&cell->vl_servers_lock);
170 	cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
171 
172 	/* Provide a VL server list, filling it in if we were given a list of
173 	 * addresses to use.
174 	 */
175 	if (addresses) {
176 		vllist = afs_parse_text_addrs(net,
177 					      addresses, strlen(addresses), ':',
178 					      VL_SERVICE, AFS_VL_PORT);
179 		if (IS_ERR(vllist)) {
180 			ret = PTR_ERR(vllist);
181 			goto parse_failed;
182 		}
183 
184 		vllist->source = DNS_RECORD_FROM_CONFIG;
185 		vllist->status = DNS_LOOKUP_NOT_DONE;
186 		cell->dns_expiry = TIME64_MAX;
187 	} else {
188 		ret = -ENOMEM;
189 		vllist = afs_alloc_vlserver_list(0);
190 		if (!vllist)
191 			goto error;
192 		vllist->source = DNS_RECORD_UNAVAILABLE;
193 		vllist->status = DNS_LOOKUP_NOT_DONE;
194 		cell->dns_expiry = ktime_get_real_seconds();
195 	}
196 
197 	rcu_assign_pointer(cell->vl_servers, vllist);
198 
199 	cell->dns_source = vllist->source;
200 	cell->dns_status = vllist->status;
201 	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
202 
203 	_leave(" = %p", cell);
204 	return cell;
205 
206 parse_failed:
207 	if (ret == -EINVAL)
208 		printk(KERN_ERR "kAFS: bad VL server IP address\n");
209 error:
210 	kfree(cell);
211 	_leave(" = %d", ret);
212 	return ERR_PTR(ret);
213 }
214 
215 /*
216  * afs_lookup_cell - Look up or create a cell record.
217  * @net:	The network namespace
218  * @name:	The name of the cell.
219  * @namesz:	The strlen of the cell name.
220  * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
221  * @excl:	T if an error should be given if the cell name already exists.
222  *
223  * Look up a cell record by name and query the DNS for VL server addresses if
224  * needed.  Note that that actual DNS query is punted off to the manager thread
225  * so that this function can return immediately if interrupted whilst allowing
226  * cell records to be shared even if not yet fully constructed.
227  */
228 struct afs_cell *afs_lookup_cell(struct afs_net *net,
229 				 const char *name, unsigned int namesz,
230 				 const char *vllist, bool excl)
231 {
232 	struct afs_cell *cell, *candidate, *cursor;
233 	struct rb_node *parent, **pp;
234 	enum afs_cell_state state;
235 	int ret, n;
236 
237 	_enter("%s,%s", name, vllist);
238 
239 	if (!excl) {
240 		rcu_read_lock();
241 		cell = afs_lookup_cell_rcu(net, name, namesz);
242 		rcu_read_unlock();
243 		if (!IS_ERR(cell))
244 			goto wait_for_cell;
245 	}
246 
247 	/* Assume we're probably going to create a cell and preallocate and
248 	 * mostly set up a candidate record.  We can then use this to stash the
249 	 * name, the net namespace and VL server addresses.
250 	 *
251 	 * We also want to do this before we hold any locks as it may involve
252 	 * upcalling to userspace to make DNS queries.
253 	 */
254 	candidate = afs_alloc_cell(net, name, namesz, vllist);
255 	if (IS_ERR(candidate)) {
256 		_leave(" = %ld", PTR_ERR(candidate));
257 		return candidate;
258 	}
259 
260 	/* Find the insertion point and check to see if someone else added a
261 	 * cell whilst we were allocating.
262 	 */
263 	write_seqlock(&net->cells_lock);
264 
265 	pp = &net->cells.rb_node;
266 	parent = NULL;
267 	while (*pp) {
268 		parent = *pp;
269 		cursor = rb_entry(parent, struct afs_cell, net_node);
270 
271 		n = strncasecmp(cursor->name, name,
272 				min_t(size_t, cursor->name_len, namesz));
273 		if (n == 0)
274 			n = cursor->name_len - namesz;
275 		if (n < 0)
276 			pp = &(*pp)->rb_left;
277 		else if (n > 0)
278 			pp = &(*pp)->rb_right;
279 		else
280 			goto cell_already_exists;
281 	}
282 
283 	cell = candidate;
284 	candidate = NULL;
285 	rb_link_node_rcu(&cell->net_node, parent, pp);
286 	rb_insert_color(&cell->net_node, &net->cells);
287 	atomic_inc(&net->cells_outstanding);
288 	write_sequnlock(&net->cells_lock);
289 
290 	queue_work(afs_wq, &cell->manager);
291 
292 wait_for_cell:
293 	_debug("wait_for_cell");
294 	wait_var_event(&cell->state,
295 		       ({
296 			       state = smp_load_acquire(&cell->state); /* vs error */
297 			       state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
298 		       }));
299 
300 	/* Check the state obtained from the wait check. */
301 	if (state == AFS_CELL_FAILED) {
302 		ret = cell->error;
303 		goto error;
304 	}
305 
306 	_leave(" = %p [cell]", cell);
307 	return cell;
308 
309 cell_already_exists:
310 	_debug("cell exists");
311 	cell = cursor;
312 	if (excl) {
313 		ret = -EEXIST;
314 	} else {
315 		afs_get_cell(cursor);
316 		ret = 0;
317 	}
318 	write_sequnlock(&net->cells_lock);
319 	kfree(candidate);
320 	if (ret == 0)
321 		goto wait_for_cell;
322 	goto error_noput;
323 error:
324 	afs_put_cell(net, cell);
325 error_noput:
326 	_leave(" = %d [error]", ret);
327 	return ERR_PTR(ret);
328 }
329 
330 /*
331  * set the root cell information
332  * - can be called with a module parameter string
333  * - can be called from a write to /proc/fs/afs/rootcell
334  */
335 int afs_cell_init(struct afs_net *net, const char *rootcell)
336 {
337 	struct afs_cell *old_root, *new_root;
338 	const char *cp, *vllist;
339 	size_t len;
340 
341 	_enter("");
342 
343 	if (!rootcell) {
344 		/* module is loaded with no parameters, or built statically.
345 		 * - in the future we might initialize cell DB here.
346 		 */
347 		_leave(" = 0 [no root]");
348 		return 0;
349 	}
350 
351 	cp = strchr(rootcell, ':');
352 	if (!cp) {
353 		_debug("kAFS: no VL server IP addresses specified");
354 		vllist = NULL;
355 		len = strlen(rootcell);
356 	} else {
357 		vllist = cp + 1;
358 		len = cp - rootcell;
359 	}
360 
361 	/* allocate a cell record for the root cell */
362 	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
363 	if (IS_ERR(new_root)) {
364 		_leave(" = %ld", PTR_ERR(new_root));
365 		return PTR_ERR(new_root);
366 	}
367 
368 	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
369 		afs_get_cell(new_root);
370 
371 	/* install the new cell */
372 	write_seqlock(&net->cells_lock);
373 	old_root = rcu_access_pointer(net->ws_cell);
374 	rcu_assign_pointer(net->ws_cell, new_root);
375 	write_sequnlock(&net->cells_lock);
376 
377 	afs_put_cell(net, old_root);
378 	_leave(" = 0");
379 	return 0;
380 }
381 
382 /*
383  * Update a cell's VL server address list from the DNS.
384  */
385 static int afs_update_cell(struct afs_cell *cell)
386 {
387 	struct afs_vlserver_list *vllist, *old = NULL, *p;
388 	unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
389 	unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
390 	time64_t now, expiry = 0;
391 	int ret = 0;
392 
393 	_enter("%s", cell->name);
394 
395 	vllist = afs_dns_query(cell, &expiry);
396 	if (IS_ERR(vllist)) {
397 		ret = PTR_ERR(vllist);
398 
399 		_debug("%s: fail %d", cell->name, ret);
400 		if (ret == -ENOMEM)
401 			goto out_wake;
402 
403 		ret = -ENOMEM;
404 		vllist = afs_alloc_vlserver_list(0);
405 		if (!vllist)
406 			goto out_wake;
407 
408 		switch (ret) {
409 		case -ENODATA:
410 		case -EDESTADDRREQ:
411 			vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
412 			break;
413 		case -EAGAIN:
414 		case -ECONNREFUSED:
415 			vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
416 			break;
417 		default:
418 			vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
419 			break;
420 		}
421 	}
422 
423 	_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
424 	cell->dns_status = vllist->status;
425 
426 	now = ktime_get_real_seconds();
427 	if (min_ttl > max_ttl)
428 		max_ttl = min_ttl;
429 	if (expiry < now + min_ttl)
430 		expiry = now + min_ttl;
431 	else if (expiry > now + max_ttl)
432 		expiry = now + max_ttl;
433 
434 	_debug("%s: status %d", cell->name, vllist->status);
435 	if (vllist->source == DNS_RECORD_UNAVAILABLE) {
436 		switch (vllist->status) {
437 		case DNS_LOOKUP_GOT_NOT_FOUND:
438 			/* The DNS said that the cell does not exist or there
439 			 * weren't any addresses to be had.
440 			 */
441 			cell->dns_expiry = expiry;
442 			break;
443 
444 		case DNS_LOOKUP_BAD:
445 		case DNS_LOOKUP_GOT_LOCAL_FAILURE:
446 		case DNS_LOOKUP_GOT_TEMP_FAILURE:
447 		case DNS_LOOKUP_GOT_NS_FAILURE:
448 		default:
449 			cell->dns_expiry = now + 10;
450 			break;
451 		}
452 	} else {
453 		cell->dns_expiry = expiry;
454 	}
455 
456 	/* Replace the VL server list if the new record has servers or the old
457 	 * record doesn't.
458 	 */
459 	write_lock(&cell->vl_servers_lock);
460 	p = rcu_dereference_protected(cell->vl_servers, true);
461 	if (vllist->nr_servers > 0 || p->nr_servers == 0) {
462 		rcu_assign_pointer(cell->vl_servers, vllist);
463 		cell->dns_source = vllist->source;
464 		old = p;
465 	}
466 	write_unlock(&cell->vl_servers_lock);
467 	afs_put_vlserverlist(cell->net, old);
468 
469 out_wake:
470 	smp_store_release(&cell->dns_lookup_count,
471 			  cell->dns_lookup_count + 1); /* vs source/status */
472 	wake_up_var(&cell->dns_lookup_count);
473 	_leave(" = %d", ret);
474 	return ret;
475 }
476 
477 /*
478  * Destroy a cell record
479  */
480 static void afs_cell_destroy(struct rcu_head *rcu)
481 {
482 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
483 
484 	_enter("%p{%s}", cell, cell->name);
485 
486 	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
487 
488 	afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
489 	afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
490 	afs_put_cell(cell->net, cell->alias_of);
491 	key_put(cell->anonymous_key);
492 	kfree(cell);
493 
494 	_leave(" [destroyed]");
495 }
496 
497 /*
498  * Queue the cell manager.
499  */
500 static void afs_queue_cell_manager(struct afs_net *net)
501 {
502 	int outstanding = atomic_inc_return(&net->cells_outstanding);
503 
504 	_enter("%d", outstanding);
505 
506 	if (!queue_work(afs_wq, &net->cells_manager))
507 		afs_dec_cells_outstanding(net);
508 }
509 
510 /*
511  * Cell management timer.  We have an increment on cells_outstanding that we
512  * need to pass along to the work item.
513  */
514 void afs_cells_timer(struct timer_list *timer)
515 {
516 	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
517 
518 	_enter("");
519 	if (!queue_work(afs_wq, &net->cells_manager))
520 		afs_dec_cells_outstanding(net);
521 }
522 
523 /*
524  * Get a reference on a cell record.
525  */
526 struct afs_cell *afs_get_cell(struct afs_cell *cell)
527 {
528 	atomic_inc(&cell->usage);
529 	return cell;
530 }
531 
532 /*
533  * Drop a reference on a cell record.
534  */
535 void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
536 {
537 	time64_t now, expire_delay;
538 
539 	if (!cell)
540 		return;
541 
542 	_enter("%s", cell->name);
543 
544 	now = ktime_get_real_seconds();
545 	cell->last_inactive = now;
546 	expire_delay = 0;
547 	if (cell->vl_servers->nr_servers)
548 		expire_delay = afs_cell_gc_delay;
549 
550 	if (atomic_dec_return(&cell->usage) > 1)
551 		return;
552 
553 	/* 'cell' may now be garbage collected. */
554 	afs_set_cell_timer(net, expire_delay);
555 }
556 
557 /*
558  * Allocate a key to use as a placeholder for anonymous user security.
559  */
560 static int afs_alloc_anon_key(struct afs_cell *cell)
561 {
562 	struct key *key;
563 	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
564 
565 	/* Create a key to represent an anonymous user. */
566 	memcpy(keyname, "afs@", 4);
567 	dp = keyname + 4;
568 	cp = cell->name;
569 	do {
570 		*dp++ = tolower(*cp);
571 	} while (*cp++);
572 
573 	key = rxrpc_get_null_key(keyname);
574 	if (IS_ERR(key))
575 		return PTR_ERR(key);
576 
577 	cell->anonymous_key = key;
578 
579 	_debug("anon key %p{%x}",
580 	       cell->anonymous_key, key_serial(cell->anonymous_key));
581 	return 0;
582 }
583 
584 /*
585  * Activate a cell.
586  */
587 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
588 {
589 	struct hlist_node **p;
590 	struct afs_cell *pcell;
591 	int ret;
592 
593 	if (!cell->anonymous_key) {
594 		ret = afs_alloc_anon_key(cell);
595 		if (ret < 0)
596 			return ret;
597 	}
598 
599 #ifdef CONFIG_AFS_FSCACHE
600 	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
601 					     &afs_cell_cache_index_def,
602 					     cell->name, strlen(cell->name),
603 					     NULL, 0,
604 					     cell, 0, true);
605 #endif
606 	ret = afs_proc_cell_setup(cell);
607 	if (ret < 0)
608 		return ret;
609 
610 	mutex_lock(&net->proc_cells_lock);
611 	for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
612 		pcell = hlist_entry(*p, struct afs_cell, proc_link);
613 		if (strcmp(cell->name, pcell->name) < 0)
614 			break;
615 	}
616 
617 	cell->proc_link.pprev = p;
618 	cell->proc_link.next = *p;
619 	rcu_assign_pointer(*p, &cell->proc_link.next);
620 	if (cell->proc_link.next)
621 		cell->proc_link.next->pprev = &cell->proc_link.next;
622 
623 	afs_dynroot_mkdir(net, cell);
624 	mutex_unlock(&net->proc_cells_lock);
625 	return 0;
626 }
627 
628 /*
629  * Deactivate a cell.
630  */
631 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
632 {
633 	_enter("%s", cell->name);
634 
635 	afs_proc_cell_remove(cell);
636 
637 	mutex_lock(&net->proc_cells_lock);
638 	hlist_del_rcu(&cell->proc_link);
639 	afs_dynroot_rmdir(net, cell);
640 	mutex_unlock(&net->proc_cells_lock);
641 
642 #ifdef CONFIG_AFS_FSCACHE
643 	fscache_relinquish_cookie(cell->cache, NULL, false);
644 	cell->cache = NULL;
645 #endif
646 
647 	_leave("");
648 }
649 
650 /*
651  * Manage a cell record, initialising and destroying it, maintaining its DNS
652  * records.
653  */
654 static void afs_manage_cell(struct work_struct *work)
655 {
656 	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
657 	struct afs_net *net = cell->net;
658 	bool deleted;
659 	int ret, usage;
660 
661 	_enter("%s", cell->name);
662 
663 again:
664 	_debug("state %u", cell->state);
665 	switch (cell->state) {
666 	case AFS_CELL_INACTIVE:
667 	case AFS_CELL_FAILED:
668 		write_seqlock(&net->cells_lock);
669 		usage = 1;
670 		deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
671 		if (deleted)
672 			rb_erase(&cell->net_node, &net->cells);
673 		write_sequnlock(&net->cells_lock);
674 		if (deleted)
675 			goto final_destruction;
676 		if (cell->state == AFS_CELL_FAILED)
677 			goto done;
678 		smp_store_release(&cell->state, AFS_CELL_UNSET);
679 		wake_up_var(&cell->state);
680 		goto again;
681 
682 	case AFS_CELL_UNSET:
683 		smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
684 		wake_up_var(&cell->state);
685 		goto again;
686 
687 	case AFS_CELL_ACTIVATING:
688 		ret = afs_activate_cell(net, cell);
689 		if (ret < 0)
690 			goto activation_failed;
691 
692 		smp_store_release(&cell->state, AFS_CELL_ACTIVE);
693 		wake_up_var(&cell->state);
694 		goto again;
695 
696 	case AFS_CELL_ACTIVE:
697 		if (atomic_read(&cell->usage) > 1) {
698 			if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
699 				ret = afs_update_cell(cell);
700 				if (ret < 0)
701 					cell->error = ret;
702 			}
703 			goto done;
704 		}
705 		smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
706 		wake_up_var(&cell->state);
707 		goto again;
708 
709 	case AFS_CELL_DEACTIVATING:
710 		if (atomic_read(&cell->usage) > 1)
711 			goto reverse_deactivation;
712 		afs_deactivate_cell(net, cell);
713 		smp_store_release(&cell->state, AFS_CELL_INACTIVE);
714 		wake_up_var(&cell->state);
715 		goto again;
716 
717 	default:
718 		break;
719 	}
720 	_debug("bad state %u", cell->state);
721 	BUG(); /* Unhandled state */
722 
723 activation_failed:
724 	cell->error = ret;
725 	afs_deactivate_cell(net, cell);
726 
727 	smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
728 	wake_up_var(&cell->state);
729 	goto again;
730 
731 reverse_deactivation:
732 	smp_store_release(&cell->state, AFS_CELL_ACTIVE);
733 	wake_up_var(&cell->state);
734 	_leave(" [deact->act]");
735 	return;
736 
737 done:
738 	_leave(" [done %u]", cell->state);
739 	return;
740 
741 final_destruction:
742 	call_rcu(&cell->rcu, afs_cell_destroy);
743 	afs_dec_cells_outstanding(net);
744 	_leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
745 }
746 
747 /*
748  * Manage the records of cells known to a network namespace.  This includes
749  * updating the DNS records and garbage collecting unused cells that were
750  * automatically added.
751  *
752  * Note that constructed cell records may only be removed from net->cells by
753  * this work item, so it is safe for this work item to stash a cursor pointing
754  * into the tree and then return to caller (provided it skips cells that are
755  * still under construction).
756  *
757  * Note also that we were given an increment on net->cells_outstanding by
758  * whoever queued us that we need to deal with before returning.
759  */
760 void afs_manage_cells(struct work_struct *work)
761 {
762 	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
763 	struct rb_node *cursor;
764 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
765 	bool purging = !net->live;
766 
767 	_enter("");
768 
769 	/* Trawl the cell database looking for cells that have expired from
770 	 * lack of use and cells whose DNS results have expired and dispatch
771 	 * their managers.
772 	 */
773 	read_seqlock_excl(&net->cells_lock);
774 
775 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
776 		struct afs_cell *cell =
777 			rb_entry(cursor, struct afs_cell, net_node);
778 		unsigned usage;
779 		bool sched_cell = false;
780 
781 		usage = atomic_read(&cell->usage);
782 		_debug("manage %s %u", cell->name, usage);
783 
784 		ASSERTCMP(usage, >=, 1);
785 
786 		if (purging) {
787 			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
788 				usage = atomic_dec_return(&cell->usage);
789 			ASSERTCMP(usage, ==, 1);
790 		}
791 
792 		if (usage == 1) {
793 			struct afs_vlserver_list *vllist;
794 			time64_t expire_at = cell->last_inactive;
795 
796 			read_lock(&cell->vl_servers_lock);
797 			vllist = rcu_dereference_protected(
798 				cell->vl_servers,
799 				lockdep_is_held(&cell->vl_servers_lock));
800 			if (vllist->nr_servers > 0)
801 				expire_at += afs_cell_gc_delay;
802 			read_unlock(&cell->vl_servers_lock);
803 			if (purging || expire_at <= now)
804 				sched_cell = true;
805 			else if (expire_at < next_manage)
806 				next_manage = expire_at;
807 		}
808 
809 		if (!purging) {
810 			if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
811 				sched_cell = true;
812 		}
813 
814 		if (sched_cell)
815 			queue_work(afs_wq, &cell->manager);
816 	}
817 
818 	read_sequnlock_excl(&net->cells_lock);
819 
820 	/* Update the timer on the way out.  We have to pass an increment on
821 	 * cells_outstanding in the namespace that we are in to the timer or
822 	 * the work scheduler.
823 	 */
824 	if (!purging && next_manage < TIME64_MAX) {
825 		now = ktime_get_real_seconds();
826 
827 		if (next_manage - now <= 0) {
828 			if (queue_work(afs_wq, &net->cells_manager))
829 				atomic_inc(&net->cells_outstanding);
830 		} else {
831 			afs_set_cell_timer(net, next_manage - now);
832 		}
833 	}
834 
835 	afs_dec_cells_outstanding(net);
836 	_leave(" [%d]", atomic_read(&net->cells_outstanding));
837 }
838 
839 /*
840  * Purge in-memory cell database.
841  */
842 void afs_cell_purge(struct afs_net *net)
843 {
844 	struct afs_cell *ws;
845 
846 	_enter("");
847 
848 	write_seqlock(&net->cells_lock);
849 	ws = rcu_access_pointer(net->ws_cell);
850 	RCU_INIT_POINTER(net->ws_cell, NULL);
851 	write_sequnlock(&net->cells_lock);
852 	afs_put_cell(net, ws);
853 
854 	_debug("del timer");
855 	if (del_timer_sync(&net->cells_timer))
856 		atomic_dec(&net->cells_outstanding);
857 
858 	_debug("kick mgr");
859 	afs_queue_cell_manager(net);
860 
861 	_debug("wait");
862 	wait_var_event(&net->cells_outstanding,
863 		       !atomic_read(&net->cells_outstanding));
864 	_leave("");
865 }
866