1 /* AFS server record management 2 * 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include "afs_fs.h" 15 #include "internal.h" 16 #include "protocol_yfs.h" 17 18 static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ 19 static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */ 20 21 static void afs_inc_servers_outstanding(struct afs_net *net) 22 { 23 atomic_inc(&net->servers_outstanding); 24 } 25 26 static void afs_dec_servers_outstanding(struct afs_net *net) 27 { 28 if (atomic_dec_and_test(&net->servers_outstanding)) 29 wake_up_var(&net->servers_outstanding); 30 } 31 32 /* 33 * Find a server by one of its addresses. 34 */ 35 struct afs_server *afs_find_server(struct afs_net *net, 36 const struct sockaddr_rxrpc *srx) 37 { 38 const struct sockaddr_in6 *a = &srx->transport.sin6, *b; 39 const struct afs_addr_list *alist; 40 struct afs_server *server = NULL; 41 unsigned int i; 42 bool ipv6 = true; 43 int seq = 0, diff; 44 45 if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 || 46 srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 || 47 srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff)) 48 ipv6 = false; 49 50 rcu_read_lock(); 51 52 do { 53 if (server) 54 afs_put_server(net, server); 55 server = NULL; 56 read_seqbegin_or_lock(&net->fs_addr_lock, &seq); 57 58 if (ipv6) { 59 hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) { 60 alist = rcu_dereference(server->addresses); 61 for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { 62 b = &alist->addrs[i].transport.sin6; 63 diff = ((u16 __force)a->sin6_port - 64 (u16 __force)b->sin6_port); 65 if (diff == 0) 66 diff = memcmp(&a->sin6_addr, 67 &b->sin6_addr, 68 sizeof(struct in6_addr)); 69 if (diff == 0) 70 goto found; 71 } 72 } 73 } else { 74 hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) { 75 alist = rcu_dereference(server->addresses); 76 for (i = 0; i < alist->nr_ipv4; i++) { 77 b = &alist->addrs[i].transport.sin6; 78 diff = ((u16 __force)a->sin6_port - 79 (u16 __force)b->sin6_port); 80 if (diff == 0) 81 diff = ((u32 __force)a->sin6_addr.s6_addr32[3] - 82 (u32 __force)b->sin6_addr.s6_addr32[3]); 83 if (diff == 0) 84 goto found; 85 } 86 } 87 } 88 89 server = NULL; 90 found: 91 if (server && !atomic_inc_not_zero(&server->usage)) 92 server = NULL; 93 94 } while (need_seqretry(&net->fs_addr_lock, seq)); 95 96 done_seqretry(&net->fs_addr_lock, seq); 97 98 rcu_read_unlock(); 99 return server; 100 } 101 102 /* 103 * Look up a server by its UUID 104 */ 105 struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid) 106 { 107 struct afs_server *server = NULL; 108 struct rb_node *p; 109 int diff, seq = 0; 110 111 _enter("%pU", uuid); 112 113 do { 114 /* Unfortunately, rbtree walking doesn't give reliable results 115 * under just the RCU read lock, so we have to check for 116 * changes. 117 */ 118 if (server) 119 afs_put_server(net, server); 120 server = NULL; 121 122 read_seqbegin_or_lock(&net->fs_lock, &seq); 123 124 p = net->fs_servers.rb_node; 125 while (p) { 126 server = rb_entry(p, struct afs_server, uuid_rb); 127 128 diff = memcmp(uuid, &server->uuid, sizeof(*uuid)); 129 if (diff < 0) { 130 p = p->rb_left; 131 } else if (diff > 0) { 132 p = p->rb_right; 133 } else { 134 afs_get_server(server); 135 break; 136 } 137 138 server = NULL; 139 } 140 } while (need_seqretry(&net->fs_lock, seq)); 141 142 done_seqretry(&net->fs_lock, seq); 143 144 _leave(" = %p", server); 145 return server; 146 } 147 148 /* 149 * Install a server record in the namespace tree 150 */ 151 static struct afs_server *afs_install_server(struct afs_net *net, 152 struct afs_server *candidate) 153 { 154 const struct afs_addr_list *alist; 155 struct afs_server *server; 156 struct rb_node **pp, *p; 157 int ret = -EEXIST, diff; 158 159 _enter("%p", candidate); 160 161 write_seqlock(&net->fs_lock); 162 163 /* Firstly install the server in the UUID lookup tree */ 164 pp = &net->fs_servers.rb_node; 165 p = NULL; 166 while (*pp) { 167 p = *pp; 168 _debug("- consider %p", p); 169 server = rb_entry(p, struct afs_server, uuid_rb); 170 diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t)); 171 if (diff < 0) 172 pp = &(*pp)->rb_left; 173 else if (diff > 0) 174 pp = &(*pp)->rb_right; 175 else 176 goto exists; 177 } 178 179 server = candidate; 180 rb_link_node(&server->uuid_rb, p, pp); 181 rb_insert_color(&server->uuid_rb, &net->fs_servers); 182 hlist_add_head_rcu(&server->proc_link, &net->fs_proc); 183 184 write_seqlock(&net->fs_addr_lock); 185 alist = rcu_dereference_protected(server->addresses, 186 lockdep_is_held(&net->fs_addr_lock.lock)); 187 188 /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install 189 * it in the IPv4 and/or IPv6 reverse-map lists. 190 * 191 * TODO: For speed we want to use something other than a flat list 192 * here; even sorting the list in terms of lowest address would help a 193 * bit, but anything we might want to do gets messy and memory 194 * intensive. 195 */ 196 if (alist->nr_ipv4 > 0) 197 hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4); 198 if (alist->nr_addrs > alist->nr_ipv4) 199 hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6); 200 201 write_sequnlock(&net->fs_addr_lock); 202 ret = 0; 203 204 exists: 205 afs_get_server(server); 206 write_sequnlock(&net->fs_lock); 207 return server; 208 } 209 210 /* 211 * allocate a new server record 212 */ 213 static struct afs_server *afs_alloc_server(struct afs_net *net, 214 const uuid_t *uuid, 215 struct afs_addr_list *alist) 216 { 217 struct afs_server *server; 218 219 _enter(""); 220 221 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); 222 if (!server) 223 goto enomem; 224 225 atomic_set(&server->usage, 1); 226 RCU_INIT_POINTER(server->addresses, alist); 227 server->addr_version = alist->version; 228 server->uuid = *uuid; 229 server->flags = (1UL << AFS_SERVER_FL_NEW); 230 server->update_at = ktime_get_real_seconds() + afs_server_update_delay; 231 rwlock_init(&server->fs_lock); 232 INIT_HLIST_HEAD(&server->cb_volumes); 233 rwlock_init(&server->cb_break_lock); 234 init_waitqueue_head(&server->probe_wq); 235 spin_lock_init(&server->probe_lock); 236 237 afs_inc_servers_outstanding(net); 238 _leave(" = %p", server); 239 return server; 240 241 enomem: 242 _leave(" = NULL [nomem]"); 243 return NULL; 244 } 245 246 /* 247 * Look up an address record for a server 248 */ 249 static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell, 250 struct key *key, const uuid_t *uuid) 251 { 252 struct afs_vl_cursor vc; 253 struct afs_addr_list *alist = NULL; 254 int ret; 255 256 ret = -ERESTARTSYS; 257 if (afs_begin_vlserver_operation(&vc, cell, key)) { 258 while (afs_select_vlserver(&vc)) { 259 if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) 260 alist = afs_yfsvl_get_endpoints(&vc, uuid); 261 else 262 alist = afs_vl_get_addrs_u(&vc, uuid); 263 } 264 265 ret = afs_end_vlserver_operation(&vc); 266 } 267 268 return ret < 0 ? ERR_PTR(ret) : alist; 269 } 270 271 /* 272 * Get or create a fileserver record. 273 */ 274 struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key, 275 const uuid_t *uuid) 276 { 277 struct afs_addr_list *alist; 278 struct afs_server *server, *candidate; 279 280 _enter("%p,%pU", cell->net, uuid); 281 282 server = afs_find_server_by_uuid(cell->net, uuid); 283 if (server) 284 return server; 285 286 alist = afs_vl_lookup_addrs(cell, key, uuid); 287 if (IS_ERR(alist)) 288 return ERR_CAST(alist); 289 290 candidate = afs_alloc_server(cell->net, uuid, alist); 291 if (!candidate) { 292 afs_put_addrlist(alist); 293 return ERR_PTR(-ENOMEM); 294 } 295 296 server = afs_install_server(cell->net, candidate); 297 if (server != candidate) { 298 afs_put_addrlist(alist); 299 kfree(candidate); 300 } 301 302 _leave(" = %p{%d}", server, atomic_read(&server->usage)); 303 return server; 304 } 305 306 /* 307 * Set the server timer to fire after a given delay, assuming it's not already 308 * set for an earlier time. 309 */ 310 static void afs_set_server_timer(struct afs_net *net, time64_t delay) 311 { 312 if (net->live) { 313 afs_inc_servers_outstanding(net); 314 if (timer_reduce(&net->fs_timer, jiffies + delay * HZ)) 315 afs_dec_servers_outstanding(net); 316 } 317 } 318 319 /* 320 * Server management timer. We have an increment on fs_outstanding that we 321 * need to pass along to the work item. 322 */ 323 void afs_servers_timer(struct timer_list *timer) 324 { 325 struct afs_net *net = container_of(timer, struct afs_net, fs_timer); 326 327 _enter(""); 328 if (!queue_work(afs_wq, &net->fs_manager)) 329 afs_dec_servers_outstanding(net); 330 } 331 332 /* 333 * Release a reference on a server record. 334 */ 335 void afs_put_server(struct afs_net *net, struct afs_server *server) 336 { 337 unsigned int usage; 338 339 if (!server) 340 return; 341 342 server->put_time = ktime_get_real_seconds(); 343 344 usage = atomic_dec_return(&server->usage); 345 346 _enter("{%u}", usage); 347 348 if (likely(usage > 0)) 349 return; 350 351 afs_set_server_timer(net, afs_server_gc_delay); 352 } 353 354 static void afs_server_rcu(struct rcu_head *rcu) 355 { 356 struct afs_server *server = container_of(rcu, struct afs_server, rcu); 357 358 afs_put_addrlist(rcu_access_pointer(server->addresses)); 359 kfree(server); 360 } 361 362 /* 363 * destroy a dead server 364 */ 365 static void afs_destroy_server(struct afs_net *net, struct afs_server *server) 366 { 367 struct afs_addr_list *alist = rcu_access_pointer(server->addresses); 368 struct afs_addr_cursor ac = { 369 .alist = alist, 370 .index = alist->preferred, 371 .error = 0, 372 }; 373 _enter("%p", server); 374 375 if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) 376 afs_fs_give_up_all_callbacks(net, server, &ac, NULL); 377 378 wait_var_event(&server->probe_outstanding, 379 atomic_read(&server->probe_outstanding) == 0); 380 381 call_rcu(&server->rcu, afs_server_rcu); 382 afs_dec_servers_outstanding(net); 383 } 384 385 /* 386 * Garbage collect any expired servers. 387 */ 388 static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) 389 { 390 struct afs_server *server; 391 bool deleted; 392 int usage; 393 394 while ((server = gc_list)) { 395 gc_list = server->gc_next; 396 397 write_seqlock(&net->fs_lock); 398 usage = 1; 399 deleted = atomic_try_cmpxchg(&server->usage, &usage, 0); 400 if (deleted) { 401 rb_erase(&server->uuid_rb, &net->fs_servers); 402 hlist_del_rcu(&server->proc_link); 403 } 404 write_sequnlock(&net->fs_lock); 405 406 if (deleted) { 407 write_seqlock(&net->fs_addr_lock); 408 if (!hlist_unhashed(&server->addr4_link)) 409 hlist_del_rcu(&server->addr4_link); 410 if (!hlist_unhashed(&server->addr6_link)) 411 hlist_del_rcu(&server->addr6_link); 412 write_sequnlock(&net->fs_addr_lock); 413 afs_destroy_server(net, server); 414 } 415 } 416 } 417 418 /* 419 * Manage the records of servers known to be within a network namespace. This 420 * includes garbage collecting unused servers. 421 * 422 * Note also that we were given an increment on net->servers_outstanding by 423 * whoever queued us that we need to deal with before returning. 424 */ 425 void afs_manage_servers(struct work_struct *work) 426 { 427 struct afs_net *net = container_of(work, struct afs_net, fs_manager); 428 struct afs_server *gc_list = NULL; 429 struct rb_node *cursor; 430 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 431 bool purging = !net->live; 432 433 _enter(""); 434 435 /* Trawl the server list looking for servers that have expired from 436 * lack of use. 437 */ 438 read_seqlock_excl(&net->fs_lock); 439 440 for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) { 441 struct afs_server *server = 442 rb_entry(cursor, struct afs_server, uuid_rb); 443 int usage = atomic_read(&server->usage); 444 445 _debug("manage %pU %u", &server->uuid, usage); 446 447 ASSERTCMP(usage, >=, 1); 448 ASSERTIFCMP(purging, usage, ==, 1); 449 450 if (usage == 1) { 451 time64_t expire_at = server->put_time; 452 453 if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) && 454 !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags)) 455 expire_at += afs_server_gc_delay; 456 if (purging || expire_at <= now) { 457 server->gc_next = gc_list; 458 gc_list = server; 459 } else if (expire_at < next_manage) { 460 next_manage = expire_at; 461 } 462 } 463 } 464 465 read_sequnlock_excl(&net->fs_lock); 466 467 /* Update the timer on the way out. We have to pass an increment on 468 * servers_outstanding in the namespace that we are in to the timer or 469 * the work scheduler. 470 */ 471 if (!purging && next_manage < TIME64_MAX) { 472 now = ktime_get_real_seconds(); 473 474 if (next_manage - now <= 0) { 475 if (queue_work(afs_wq, &net->fs_manager)) 476 afs_inc_servers_outstanding(net); 477 } else { 478 afs_set_server_timer(net, next_manage - now); 479 } 480 } 481 482 afs_gc_servers(net, gc_list); 483 484 afs_dec_servers_outstanding(net); 485 _leave(" [%d]", atomic_read(&net->servers_outstanding)); 486 } 487 488 static void afs_queue_server_manager(struct afs_net *net) 489 { 490 afs_inc_servers_outstanding(net); 491 if (!queue_work(afs_wq, &net->fs_manager)) 492 afs_dec_servers_outstanding(net); 493 } 494 495 /* 496 * Purge list of servers. 497 */ 498 void afs_purge_servers(struct afs_net *net) 499 { 500 _enter(""); 501 502 if (del_timer_sync(&net->fs_timer)) 503 atomic_dec(&net->servers_outstanding); 504 505 afs_queue_server_manager(net); 506 507 _debug("wait"); 508 wait_var_event(&net->servers_outstanding, 509 !atomic_read(&net->servers_outstanding)); 510 _leave(""); 511 } 512 513 /* 514 * Get an update for a server's address list. 515 */ 516 static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server) 517 { 518 struct afs_addr_list *alist, *discard; 519 520 _enter(""); 521 522 alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key, 523 &server->uuid); 524 if (IS_ERR(alist)) { 525 fc->ac.error = PTR_ERR(alist); 526 _leave(" = f [%d]", fc->ac.error); 527 return false; 528 } 529 530 discard = alist; 531 if (server->addr_version != alist->version) { 532 write_lock(&server->fs_lock); 533 discard = rcu_dereference_protected(server->addresses, 534 lockdep_is_held(&server->fs_lock)); 535 rcu_assign_pointer(server->addresses, alist); 536 server->addr_version = alist->version; 537 write_unlock(&server->fs_lock); 538 } 539 540 server->update_at = ktime_get_real_seconds() + afs_server_update_delay; 541 afs_put_addrlist(discard); 542 _leave(" = t"); 543 return true; 544 } 545 546 /* 547 * See if a server's address list needs updating. 548 */ 549 bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server) 550 { 551 time64_t now = ktime_get_real_seconds(); 552 long diff; 553 bool success; 554 int ret, retries = 0; 555 556 _enter(""); 557 558 ASSERT(server); 559 560 retry: 561 diff = READ_ONCE(server->update_at) - now; 562 if (diff > 0) { 563 _leave(" = t [not now %ld]", diff); 564 return true; 565 } 566 567 if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) { 568 success = afs_update_server_record(fc, server); 569 clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags); 570 wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING); 571 _leave(" = %d", success); 572 return success; 573 } 574 575 ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING, 576 TASK_INTERRUPTIBLE); 577 if (ret == -ERESTARTSYS) { 578 fc->ac.error = ret; 579 _leave(" = f [intr]"); 580 return false; 581 } 582 583 retries++; 584 if (retries == 4) { 585 _leave(" = f [stale]"); 586 ret = -ESTALE; 587 return false; 588 } 589 goto retry; 590 } 591