xref: /linux/fs/afs/server.c (revision fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa)
1 /* AFS server record management
2  *
3  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include "afs_fs.h"
15 #include "internal.h"
16 
17 static unsigned afs_server_gc_delay = 10;	/* Server record timeout in seconds */
18 static unsigned afs_server_update_delay = 30;	/* Time till VLDB recheck in secs */
19 
20 static void afs_inc_servers_outstanding(struct afs_net *net)
21 {
22 	atomic_inc(&net->servers_outstanding);
23 }
24 
25 static void afs_dec_servers_outstanding(struct afs_net *net)
26 {
27 	if (atomic_dec_and_test(&net->servers_outstanding))
28 		wake_up_var(&net->servers_outstanding);
29 }
30 
31 /*
32  * Find a server by one of its addresses.
33  */
34 struct afs_server *afs_find_server(struct afs_net *net,
35 				   const struct sockaddr_rxrpc *srx)
36 {
37 	const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
38 	const struct afs_addr_list *alist;
39 	struct afs_server *server = NULL;
40 	unsigned int i;
41 	bool ipv6 = true;
42 	int seq = 0, diff;
43 
44 	if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
45 	    srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
46 	    srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
47 		ipv6 = false;
48 
49 	rcu_read_lock();
50 
51 	do {
52 		if (server)
53 			afs_put_server(net, server);
54 		server = NULL;
55 		read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
56 
57 		if (ipv6) {
58 			hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
59 				alist = rcu_dereference(server->addresses);
60 				for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
61 					b = &alist->addrs[i].transport.sin6;
62 					diff = ((u16 __force)a->sin6_port -
63 						(u16 __force)b->sin6_port);
64 					if (diff == 0)
65 						diff = memcmp(&a->sin6_addr,
66 							      &b->sin6_addr,
67 							      sizeof(struct in6_addr));
68 					if (diff == 0)
69 						goto found;
70 				}
71 			}
72 		} else {
73 			hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
74 				alist = rcu_dereference(server->addresses);
75 				for (i = 0; i < alist->nr_ipv4; i++) {
76 					b = &alist->addrs[i].transport.sin6;
77 					diff = ((u16 __force)a->sin6_port -
78 						(u16 __force)b->sin6_port);
79 					if (diff == 0)
80 						diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
81 							(u32 __force)b->sin6_addr.s6_addr32[3]);
82 					if (diff == 0)
83 						goto found;
84 				}
85 			}
86 		}
87 
88 		server = NULL;
89 	found:
90 		if (server && !atomic_inc_not_zero(&server->usage))
91 			server = NULL;
92 
93 	} while (need_seqretry(&net->fs_addr_lock, seq));
94 
95 	done_seqretry(&net->fs_addr_lock, seq);
96 
97 	rcu_read_unlock();
98 	return server;
99 }
100 
101 /*
102  * Look up a server by its UUID
103  */
104 struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
105 {
106 	struct afs_server *server = NULL;
107 	struct rb_node *p;
108 	int diff, seq = 0;
109 
110 	_enter("%pU", uuid);
111 
112 	do {
113 		/* Unfortunately, rbtree walking doesn't give reliable results
114 		 * under just the RCU read lock, so we have to check for
115 		 * changes.
116 		 */
117 		if (server)
118 			afs_put_server(net, server);
119 		server = NULL;
120 
121 		read_seqbegin_or_lock(&net->fs_lock, &seq);
122 
123 		p = net->fs_servers.rb_node;
124 		while (p) {
125 			server = rb_entry(p, struct afs_server, uuid_rb);
126 
127 			diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
128 			if (diff < 0) {
129 				p = p->rb_left;
130 			} else if (diff > 0) {
131 				p = p->rb_right;
132 			} else {
133 				afs_get_server(server);
134 				break;
135 			}
136 
137 			server = NULL;
138 		}
139 	} while (need_seqretry(&net->fs_lock, seq));
140 
141 	done_seqretry(&net->fs_lock, seq);
142 
143 	_leave(" = %p", server);
144 	return server;
145 }
146 
147 /*
148  * Install a server record in the namespace tree
149  */
150 static struct afs_server *afs_install_server(struct afs_net *net,
151 					     struct afs_server *candidate)
152 {
153 	const struct afs_addr_list *alist;
154 	struct afs_server *server;
155 	struct rb_node **pp, *p;
156 	int ret = -EEXIST, diff;
157 
158 	_enter("%p", candidate);
159 
160 	write_seqlock(&net->fs_lock);
161 
162 	/* Firstly install the server in the UUID lookup tree */
163 	pp = &net->fs_servers.rb_node;
164 	p = NULL;
165 	while (*pp) {
166 		p = *pp;
167 		_debug("- consider %p", p);
168 		server = rb_entry(p, struct afs_server, uuid_rb);
169 		diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
170 		if (diff < 0)
171 			pp = &(*pp)->rb_left;
172 		else if (diff > 0)
173 			pp = &(*pp)->rb_right;
174 		else
175 			goto exists;
176 	}
177 
178 	server = candidate;
179 	rb_link_node(&server->uuid_rb, p, pp);
180 	rb_insert_color(&server->uuid_rb, &net->fs_servers);
181 	hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
182 
183 	write_seqlock(&net->fs_addr_lock);
184 	alist = rcu_dereference_protected(server->addresses,
185 					  lockdep_is_held(&net->fs_addr_lock.lock));
186 
187 	/* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
188 	 * it in the IPv4 and/or IPv6 reverse-map lists.
189 	 *
190 	 * TODO: For speed we want to use something other than a flat list
191 	 * here; even sorting the list in terms of lowest address would help a
192 	 * bit, but anything we might want to do gets messy and memory
193 	 * intensive.
194 	 */
195 	if (alist->nr_ipv4 > 0)
196 		hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
197 	if (alist->nr_addrs > alist->nr_ipv4)
198 		hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
199 
200 	write_sequnlock(&net->fs_addr_lock);
201 	ret = 0;
202 	goto out;
203 
204 exists:
205 	afs_get_server(server);
206 out:
207 	write_sequnlock(&net->fs_lock);
208 	return server;
209 }
210 
211 /*
212  * allocate a new server record
213  */
214 static struct afs_server *afs_alloc_server(struct afs_net *net,
215 					   const uuid_t *uuid,
216 					   struct afs_addr_list *alist)
217 {
218 	struct afs_server *server;
219 
220 	_enter("");
221 
222 	server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
223 	if (!server)
224 		goto enomem;
225 
226 	atomic_set(&server->usage, 1);
227 	RCU_INIT_POINTER(server->addresses, alist);
228 	server->addr_version = alist->version;
229 	server->uuid = *uuid;
230 	server->flags = (1UL << AFS_SERVER_FL_NEW);
231 	server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
232 	rwlock_init(&server->fs_lock);
233 	INIT_HLIST_HEAD(&server->cb_volumes);
234 	rwlock_init(&server->cb_break_lock);
235 
236 	afs_inc_servers_outstanding(net);
237 	_leave(" = %p", server);
238 	return server;
239 
240 enomem:
241 	_leave(" = NULL [nomem]");
242 	return NULL;
243 }
244 
245 /*
246  * Look up an address record for a server
247  */
248 static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
249 						 struct key *key, const uuid_t *uuid)
250 {
251 	struct afs_addr_cursor ac;
252 	struct afs_addr_list *alist;
253 	int ret;
254 
255 	ret = afs_set_vl_cursor(&ac, cell);
256 	if (ret < 0)
257 		return ERR_PTR(ret);
258 
259 	while (afs_iterate_addresses(&ac)) {
260 		if (test_bit(ac.index, &ac.alist->yfs))
261 			alist = afs_yfsvl_get_endpoints(cell->net, &ac, key, uuid);
262 		else
263 			alist = afs_vl_get_addrs_u(cell->net, &ac, key, uuid);
264 		switch (ac.error) {
265 		case 0:
266 			afs_end_cursor(&ac);
267 			return alist;
268 		case -ECONNABORTED:
269 			ac.error = afs_abort_to_error(ac.abort_code);
270 			goto error;
271 		case -ENOMEM:
272 		case -ENONET:
273 			goto error;
274 		case -ENETUNREACH:
275 		case -EHOSTUNREACH:
276 		case -ECONNREFUSED:
277 			break;
278 		default:
279 			ac.error = -EIO;
280 			goto error;
281 		}
282 	}
283 
284 error:
285 	return ERR_PTR(afs_end_cursor(&ac));
286 }
287 
288 /*
289  * Get or create a fileserver record.
290  */
291 struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
292 				     const uuid_t *uuid)
293 {
294 	struct afs_addr_list *alist;
295 	struct afs_server *server, *candidate;
296 
297 	_enter("%p,%pU", cell->net, uuid);
298 
299 	server = afs_find_server_by_uuid(cell->net, uuid);
300 	if (server)
301 		return server;
302 
303 	alist = afs_vl_lookup_addrs(cell, key, uuid);
304 	if (IS_ERR(alist))
305 		return ERR_CAST(alist);
306 
307 	candidate = afs_alloc_server(cell->net, uuid, alist);
308 	if (!candidate) {
309 		afs_put_addrlist(alist);
310 		return ERR_PTR(-ENOMEM);
311 	}
312 
313 	server = afs_install_server(cell->net, candidate);
314 	if (server != candidate) {
315 		afs_put_addrlist(alist);
316 		kfree(candidate);
317 	}
318 
319 	_leave(" = %p{%d}", server, atomic_read(&server->usage));
320 	return server;
321 }
322 
323 /*
324  * Set the server timer to fire after a given delay, assuming it's not already
325  * set for an earlier time.
326  */
327 static void afs_set_server_timer(struct afs_net *net, time64_t delay)
328 {
329 	if (net->live) {
330 		afs_inc_servers_outstanding(net);
331 		if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
332 			afs_dec_servers_outstanding(net);
333 	}
334 }
335 
336 /*
337  * Server management timer.  We have an increment on fs_outstanding that we
338  * need to pass along to the work item.
339  */
340 void afs_servers_timer(struct timer_list *timer)
341 {
342 	struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
343 
344 	_enter("");
345 	if (!queue_work(afs_wq, &net->fs_manager))
346 		afs_dec_servers_outstanding(net);
347 }
348 
349 /*
350  * Release a reference on a server record.
351  */
352 void afs_put_server(struct afs_net *net, struct afs_server *server)
353 {
354 	unsigned int usage;
355 
356 	if (!server)
357 		return;
358 
359 	server->put_time = ktime_get_real_seconds();
360 
361 	usage = atomic_dec_return(&server->usage);
362 
363 	_enter("{%u}", usage);
364 
365 	if (likely(usage > 0))
366 		return;
367 
368 	afs_set_server_timer(net, afs_server_gc_delay);
369 }
370 
371 static void afs_server_rcu(struct rcu_head *rcu)
372 {
373 	struct afs_server *server = container_of(rcu, struct afs_server, rcu);
374 
375 	afs_put_addrlist(rcu_access_pointer(server->addresses));
376 	kfree(server);
377 }
378 
379 /*
380  * destroy a dead server
381  */
382 static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
383 {
384 	struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
385 	struct afs_addr_cursor ac = {
386 		.alist	= alist,
387 		.start	= alist->index,
388 		.index	= 0,
389 		.addr	= &alist->addrs[alist->index],
390 		.error	= 0,
391 	};
392 	_enter("%p", server);
393 
394 	if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
395 		afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
396 
397 	call_rcu(&server->rcu, afs_server_rcu);
398 	afs_dec_servers_outstanding(net);
399 }
400 
401 /*
402  * Garbage collect any expired servers.
403  */
404 static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
405 {
406 	struct afs_server *server;
407 	bool deleted;
408 	int usage;
409 
410 	while ((server = gc_list)) {
411 		gc_list = server->gc_next;
412 
413 		write_seqlock(&net->fs_lock);
414 		usage = 1;
415 		deleted = atomic_try_cmpxchg(&server->usage, &usage, 0);
416 		if (deleted) {
417 			rb_erase(&server->uuid_rb, &net->fs_servers);
418 			hlist_del_rcu(&server->proc_link);
419 		}
420 		write_sequnlock(&net->fs_lock);
421 
422 		if (deleted) {
423 			write_seqlock(&net->fs_addr_lock);
424 			if (!hlist_unhashed(&server->addr4_link))
425 				hlist_del_rcu(&server->addr4_link);
426 			if (!hlist_unhashed(&server->addr6_link))
427 				hlist_del_rcu(&server->addr6_link);
428 			write_sequnlock(&net->fs_addr_lock);
429 			afs_destroy_server(net, server);
430 		}
431 	}
432 }
433 
434 /*
435  * Manage the records of servers known to be within a network namespace.  This
436  * includes garbage collecting unused servers.
437  *
438  * Note also that we were given an increment on net->servers_outstanding by
439  * whoever queued us that we need to deal with before returning.
440  */
441 void afs_manage_servers(struct work_struct *work)
442 {
443 	struct afs_net *net = container_of(work, struct afs_net, fs_manager);
444 	struct afs_server *gc_list = NULL;
445 	struct rb_node *cursor;
446 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
447 	bool purging = !net->live;
448 
449 	_enter("");
450 
451 	/* Trawl the server list looking for servers that have expired from
452 	 * lack of use.
453 	 */
454 	read_seqlock_excl(&net->fs_lock);
455 
456 	for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
457 		struct afs_server *server =
458 			rb_entry(cursor, struct afs_server, uuid_rb);
459 		int usage = atomic_read(&server->usage);
460 
461 		_debug("manage %pU %u", &server->uuid, usage);
462 
463 		ASSERTCMP(usage, >=, 1);
464 		ASSERTIFCMP(purging, usage, ==, 1);
465 
466 		if (usage == 1) {
467 			time64_t expire_at = server->put_time;
468 
469 			if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
470 			    !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
471 				expire_at += afs_server_gc_delay;
472 			if (purging || expire_at <= now) {
473 				server->gc_next = gc_list;
474 				gc_list = server;
475 			} else if (expire_at < next_manage) {
476 				next_manage = expire_at;
477 			}
478 		}
479 	}
480 
481 	read_sequnlock_excl(&net->fs_lock);
482 
483 	/* Update the timer on the way out.  We have to pass an increment on
484 	 * servers_outstanding in the namespace that we are in to the timer or
485 	 * the work scheduler.
486 	 */
487 	if (!purging && next_manage < TIME64_MAX) {
488 		now = ktime_get_real_seconds();
489 
490 		if (next_manage - now <= 0) {
491 			if (queue_work(afs_wq, &net->fs_manager))
492 				afs_inc_servers_outstanding(net);
493 		} else {
494 			afs_set_server_timer(net, next_manage - now);
495 		}
496 	}
497 
498 	afs_gc_servers(net, gc_list);
499 
500 	afs_dec_servers_outstanding(net);
501 	_leave(" [%d]", atomic_read(&net->servers_outstanding));
502 }
503 
504 static void afs_queue_server_manager(struct afs_net *net)
505 {
506 	afs_inc_servers_outstanding(net);
507 	if (!queue_work(afs_wq, &net->fs_manager))
508 		afs_dec_servers_outstanding(net);
509 }
510 
511 /*
512  * Purge list of servers.
513  */
514 void afs_purge_servers(struct afs_net *net)
515 {
516 	_enter("");
517 
518 	if (del_timer_sync(&net->fs_timer))
519 		atomic_dec(&net->servers_outstanding);
520 
521 	afs_queue_server_manager(net);
522 
523 	_debug("wait");
524 	wait_var_event(&net->servers_outstanding,
525 		       !atomic_read(&net->servers_outstanding));
526 	_leave("");
527 }
528 
529 /*
530  * Probe a fileserver to find its capabilities.
531  *
532  * TODO: Try service upgrade.
533  */
534 static bool afs_do_probe_fileserver(struct afs_fs_cursor *fc)
535 {
536 	_enter("");
537 
538 	fc->ac.addr = NULL;
539 	fc->ac.start = READ_ONCE(fc->ac.alist->index);
540 	fc->ac.index = fc->ac.start;
541 	fc->ac.error = 0;
542 	fc->ac.begun = false;
543 
544 	while (afs_iterate_addresses(&fc->ac)) {
545 		afs_fs_get_capabilities(afs_v2net(fc->vnode), fc->cbi->server,
546 					&fc->ac, fc->key);
547 		switch (fc->ac.error) {
548 		case 0:
549 			afs_end_cursor(&fc->ac);
550 			set_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags);
551 			return true;
552 		case -ECONNABORTED:
553 			fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
554 			goto error;
555 		case -ENOMEM:
556 		case -ENONET:
557 			goto error;
558 		case -ENETUNREACH:
559 		case -EHOSTUNREACH:
560 		case -ECONNREFUSED:
561 		case -ETIMEDOUT:
562 		case -ETIME:
563 			break;
564 		default:
565 			fc->ac.error = -EIO;
566 			goto error;
567 		}
568 	}
569 
570 error:
571 	afs_end_cursor(&fc->ac);
572 	return false;
573 }
574 
575 /*
576  * If we haven't already, try probing the fileserver to get its capabilities.
577  * We try not to instigate parallel probes, but it's possible that the parallel
578  * probes will fail due to authentication failure when ours would succeed.
579  *
580  * TODO: Try sending an anonymous probe if an authenticated probe fails.
581  */
582 bool afs_probe_fileserver(struct afs_fs_cursor *fc)
583 {
584 	bool success;
585 	int ret, retries = 0;
586 
587 	_enter("");
588 
589 retry:
590 	if (test_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags)) {
591 		_leave(" = t");
592 		return true;
593 	}
594 
595 	if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags)) {
596 		success = afs_do_probe_fileserver(fc);
597 		clear_bit_unlock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags);
598 		wake_up_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING);
599 		_leave(" = t");
600 		return success;
601 	}
602 
603 	_debug("wait");
604 	ret = wait_on_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING,
605 			  TASK_INTERRUPTIBLE);
606 	if (ret == -ERESTARTSYS) {
607 		fc->ac.error = ret;
608 		_leave(" = f [%d]", ret);
609 		return false;
610 	}
611 
612 	retries++;
613 	if (retries == 4) {
614 		fc->ac.error = -ESTALE;
615 		_leave(" = f [stale]");
616 		return false;
617 	}
618 	_debug("retry");
619 	goto retry;
620 }
621 
622 /*
623  * Get an update for a server's address list.
624  */
625 static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
626 {
627 	struct afs_addr_list *alist, *discard;
628 
629 	_enter("");
630 
631 	alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
632 				    &server->uuid);
633 	if (IS_ERR(alist)) {
634 		fc->ac.error = PTR_ERR(alist);
635 		_leave(" = f [%d]", fc->ac.error);
636 		return false;
637 	}
638 
639 	discard = alist;
640 	if (server->addr_version != alist->version) {
641 		write_lock(&server->fs_lock);
642 		discard = rcu_dereference_protected(server->addresses,
643 						    lockdep_is_held(&server->fs_lock));
644 		rcu_assign_pointer(server->addresses, alist);
645 		server->addr_version = alist->version;
646 		write_unlock(&server->fs_lock);
647 	}
648 
649 	server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
650 	afs_put_addrlist(discard);
651 	_leave(" = t");
652 	return true;
653 }
654 
655 /*
656  * See if a server's address list needs updating.
657  */
658 bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
659 {
660 	time64_t now = ktime_get_real_seconds();
661 	long diff;
662 	bool success;
663 	int ret, retries = 0;
664 
665 	_enter("");
666 
667 	ASSERT(server);
668 
669 retry:
670 	diff = READ_ONCE(server->update_at) - now;
671 	if (diff > 0) {
672 		_leave(" = t [not now %ld]", diff);
673 		return true;
674 	}
675 
676 	if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
677 		success = afs_update_server_record(fc, server);
678 		clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
679 		wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
680 		_leave(" = %d", success);
681 		return success;
682 	}
683 
684 	ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
685 			  TASK_INTERRUPTIBLE);
686 	if (ret == -ERESTARTSYS) {
687 		fc->ac.error = ret;
688 		_leave(" = f [intr]");
689 		return false;
690 	}
691 
692 	retries++;
693 	if (retries == 4) {
694 		_leave(" = f [stale]");
695 		ret = -ESTALE;
696 		return false;
697 	}
698 	goto retry;
699 }
700