1 /* 2 * linux/fs/lockd/host.c 3 * 4 * Management for NLM peer hosts. The nlm_host struct is shared 5 * between client and server implementation. The only reason to 6 * do so is to reduce code bloat. 7 * 8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/in.h> 15 #include <linux/sunrpc/clnt.h> 16 #include <linux/sunrpc/svc.h> 17 #include <linux/lockd/lockd.h> 18 #include <linux/lockd/sm_inter.h> 19 #include <linux/mutex.h> 20 21 22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE 23 #define NLM_HOST_MAX 64 24 #define NLM_HOST_NRHASH 32 25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) 26 #define NLM_HOST_REBIND (60 * HZ) 27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) 28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) 29 30 static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; 31 static unsigned long next_gc; 32 static int nrhosts; 33 static DEFINE_MUTEX(nlm_host_mutex); 34 35 36 static void nlm_gc_hosts(void); 37 static struct nsm_handle * __nsm_find(const struct sockaddr_in *, 38 const char *, int, int); 39 static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, 40 const char *hostname, 41 int hostname_len); 42 43 /* 44 * Common host lookup routine for server & client 45 */ 46 static struct nlm_host * 47 nlm_lookup_host(int server, const struct sockaddr_in *sin, 48 int proto, int version, 49 const char *hostname, 50 int hostname_len) 51 { 52 struct hlist_head *chain; 53 struct hlist_node *pos; 54 struct nlm_host *host; 55 struct nsm_handle *nsm = NULL; 56 int hash; 57 58 dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n", 59 NIPQUAD(sin->sin_addr.s_addr), proto, version, 60 server? "server" : "client", 61 hostname_len, 62 hostname? hostname : "<none>"); 63 64 65 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 66 67 /* Lock hash table */ 68 mutex_lock(&nlm_host_mutex); 69 70 if (time_after_eq(jiffies, next_gc)) 71 nlm_gc_hosts(); 72 73 /* We may keep several nlm_host objects for a peer, because each 74 * nlm_host is identified by 75 * (address, protocol, version, server/client) 76 * We could probably simplify this a little by putting all those 77 * different NLM rpc_clients into one single nlm_host object. 78 * This would allow us to have one nlm_host per address. 79 */ 80 chain = &nlm_hosts[hash]; 81 hlist_for_each_entry(host, pos, chain, h_hash) { 82 if (!nlm_cmp_addr(&host->h_addr, sin)) 83 continue; 84 85 /* See if we have an NSM handle for this client */ 86 if (!nsm) 87 nsm = host->h_nsmhandle; 88 89 if (host->h_proto != proto) 90 continue; 91 if (host->h_version != version) 92 continue; 93 if (host->h_server != server) 94 continue; 95 96 /* Move to head of hash chain. */ 97 hlist_del(&host->h_hash); 98 hlist_add_head(&host->h_hash, chain); 99 100 nlm_get_host(host); 101 goto out; 102 } 103 if (nsm) 104 atomic_inc(&nsm->sm_count); 105 106 host = NULL; 107 108 /* Sadly, the host isn't in our hash table yet. See if 109 * we have an NSM handle for it. If not, create one. 110 */ 111 if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len))) 112 goto out; 113 114 host = kzalloc(sizeof(*host), GFP_KERNEL); 115 if (!host) { 116 nsm_release(nsm); 117 goto out; 118 } 119 host->h_name = nsm->sm_name; 120 host->h_addr = *sin; 121 host->h_addr.sin_port = 0; /* ouch! */ 122 host->h_version = version; 123 host->h_proto = proto; 124 host->h_rpcclnt = NULL; 125 mutex_init(&host->h_mutex); 126 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 127 host->h_expires = jiffies + NLM_HOST_EXPIRE; 128 atomic_set(&host->h_count, 1); 129 init_waitqueue_head(&host->h_gracewait); 130 init_rwsem(&host->h_rwsem); 131 host->h_state = 0; /* pseudo NSM state */ 132 host->h_nsmstate = 0; /* real NSM state */ 133 host->h_nsmhandle = nsm; 134 host->h_server = server; 135 hlist_add_head(&host->h_hash, chain); 136 INIT_LIST_HEAD(&host->h_lockowners); 137 spin_lock_init(&host->h_lock); 138 INIT_LIST_HEAD(&host->h_granted); 139 INIT_LIST_HEAD(&host->h_reclaim); 140 141 if (++nrhosts > NLM_HOST_MAX) 142 next_gc = 0; 143 144 out: 145 mutex_unlock(&nlm_host_mutex); 146 return host; 147 } 148 149 /* 150 * Destroy a host 151 */ 152 static void 153 nlm_destroy_host(struct nlm_host *host) 154 { 155 struct rpc_clnt *clnt; 156 157 BUG_ON(!list_empty(&host->h_lockowners)); 158 BUG_ON(atomic_read(&host->h_count)); 159 160 /* 161 * Release NSM handle and unmonitor host. 162 */ 163 nsm_unmonitor(host); 164 165 if ((clnt = host->h_rpcclnt) != NULL) { 166 if (atomic_read(&clnt->cl_users)) { 167 printk(KERN_WARNING 168 "lockd: active RPC handle\n"); 169 clnt->cl_dead = 1; 170 } else { 171 rpc_destroy_client(host->h_rpcclnt); 172 } 173 } 174 kfree(host); 175 } 176 177 /* 178 * Find an NLM server handle in the cache. If there is none, create it. 179 */ 180 struct nlm_host * 181 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, 182 const char *hostname, int hostname_len) 183 { 184 return nlm_lookup_host(0, sin, proto, version, 185 hostname, hostname_len); 186 } 187 188 /* 189 * Find an NLM client handle in the cache. If there is none, create it. 190 */ 191 struct nlm_host * 192 nlmsvc_lookup_host(struct svc_rqst *rqstp, 193 const char *hostname, int hostname_len) 194 { 195 return nlm_lookup_host(1, &rqstp->rq_addr, 196 rqstp->rq_prot, rqstp->rq_vers, 197 hostname, hostname_len); 198 } 199 200 /* 201 * Create the NLM RPC client for an NLM peer 202 */ 203 struct rpc_clnt * 204 nlm_bind_host(struct nlm_host *host) 205 { 206 struct rpc_clnt *clnt; 207 208 dprintk("lockd: nlm_bind_host(%08x)\n", 209 (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); 210 211 /* Lock host handle */ 212 mutex_lock(&host->h_mutex); 213 214 /* If we've already created an RPC client, check whether 215 * RPC rebind is required 216 */ 217 if ((clnt = host->h_rpcclnt) != NULL) { 218 if (time_after_eq(jiffies, host->h_nextrebind)) { 219 rpc_force_rebind(clnt); 220 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 221 dprintk("lockd: next rebind in %ld jiffies\n", 222 host->h_nextrebind - jiffies); 223 } 224 } else { 225 unsigned long increment = nlmsvc_timeout * HZ; 226 struct rpc_timeout timeparms = { 227 .to_initval = increment, 228 .to_increment = increment, 229 .to_maxval = increment * 6UL, 230 .to_retries = 5U, 231 }; 232 struct rpc_create_args args = { 233 .protocol = host->h_proto, 234 .address = (struct sockaddr *)&host->h_addr, 235 .addrsize = sizeof(host->h_addr), 236 .timeout = &timeparms, 237 .servername = host->h_name, 238 .program = &nlm_program, 239 .version = host->h_version, 240 .authflavor = RPC_AUTH_UNIX, 241 .flags = (RPC_CLNT_CREATE_HARDRTRY | 242 RPC_CLNT_CREATE_AUTOBIND), 243 }; 244 245 clnt = rpc_create(&args); 246 if (!IS_ERR(clnt)) 247 host->h_rpcclnt = clnt; 248 else { 249 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); 250 clnt = NULL; 251 } 252 } 253 254 mutex_unlock(&host->h_mutex); 255 return clnt; 256 } 257 258 /* 259 * Force a portmap lookup of the remote lockd port 260 */ 261 void 262 nlm_rebind_host(struct nlm_host *host) 263 { 264 dprintk("lockd: rebind host %s\n", host->h_name); 265 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { 266 rpc_force_rebind(host->h_rpcclnt); 267 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 268 } 269 } 270 271 /* 272 * Increment NLM host count 273 */ 274 struct nlm_host * nlm_get_host(struct nlm_host *host) 275 { 276 if (host) { 277 dprintk("lockd: get host %s\n", host->h_name); 278 atomic_inc(&host->h_count); 279 host->h_expires = jiffies + NLM_HOST_EXPIRE; 280 } 281 return host; 282 } 283 284 /* 285 * Release NLM host after use 286 */ 287 void nlm_release_host(struct nlm_host *host) 288 { 289 if (host != NULL) { 290 dprintk("lockd: release host %s\n", host->h_name); 291 BUG_ON(atomic_read(&host->h_count) < 0); 292 if (atomic_dec_and_test(&host->h_count)) { 293 BUG_ON(!list_empty(&host->h_lockowners)); 294 BUG_ON(!list_empty(&host->h_granted)); 295 BUG_ON(!list_empty(&host->h_reclaim)); 296 } 297 } 298 } 299 300 /* 301 * We were notified that the host indicated by address &sin 302 * has rebooted. 303 * Release all resources held by that peer. 304 */ 305 void nlm_host_rebooted(const struct sockaddr_in *sin, 306 const char *hostname, int hostname_len, 307 u32 new_state) 308 { 309 struct hlist_head *chain; 310 struct hlist_node *pos; 311 struct nsm_handle *nsm; 312 struct nlm_host *host; 313 314 dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n", 315 hostname, NIPQUAD(sin->sin_addr)); 316 317 /* Find the NSM handle for this peer */ 318 if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0))) 319 return; 320 321 /* When reclaiming locks on this peer, make sure that 322 * we set up a new notification */ 323 nsm->sm_monitored = 0; 324 325 /* Mark all hosts tied to this NSM state as having rebooted. 326 * We run the loop repeatedly, because we drop the host table 327 * lock for this. 328 * To avoid processing a host several times, we match the nsmstate. 329 */ 330 again: mutex_lock(&nlm_host_mutex); 331 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 332 hlist_for_each_entry(host, pos, chain, h_hash) { 333 if (host->h_nsmhandle == nsm 334 && host->h_nsmstate != new_state) { 335 host->h_nsmstate = new_state; 336 host->h_state++; 337 338 nlm_get_host(host); 339 mutex_unlock(&nlm_host_mutex); 340 341 if (host->h_server) { 342 /* We're server for this guy, just ditch 343 * all the locks he held. */ 344 nlmsvc_free_host_resources(host); 345 } else { 346 /* He's the server, initiate lock recovery. */ 347 nlmclnt_recovery(host); 348 } 349 350 nlm_release_host(host); 351 goto again; 352 } 353 } 354 } 355 356 mutex_unlock(&nlm_host_mutex); 357 } 358 359 /* 360 * Shut down the hosts module. 361 * Note that this routine is called only at server shutdown time. 362 */ 363 void 364 nlm_shutdown_hosts(void) 365 { 366 struct hlist_head *chain; 367 struct hlist_node *pos; 368 struct nlm_host *host; 369 370 dprintk("lockd: shutting down host module\n"); 371 mutex_lock(&nlm_host_mutex); 372 373 /* First, make all hosts eligible for gc */ 374 dprintk("lockd: nuking all hosts...\n"); 375 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 376 hlist_for_each_entry(host, pos, chain, h_hash) 377 host->h_expires = jiffies - 1; 378 } 379 380 /* Then, perform a garbage collection pass */ 381 nlm_gc_hosts(); 382 mutex_unlock(&nlm_host_mutex); 383 384 /* complain if any hosts are left */ 385 if (nrhosts) { 386 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 387 dprintk("lockd: %d hosts left:\n", nrhosts); 388 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 389 hlist_for_each_entry(host, pos, chain, h_hash) { 390 dprintk(" %s (cnt %d use %d exp %ld)\n", 391 host->h_name, atomic_read(&host->h_count), 392 host->h_inuse, host->h_expires); 393 } 394 } 395 } 396 } 397 398 /* 399 * Garbage collect any unused NLM hosts. 400 * This GC combines reference counting for async operations with 401 * mark & sweep for resources held by remote clients. 402 */ 403 static void 404 nlm_gc_hosts(void) 405 { 406 struct hlist_head *chain; 407 struct hlist_node *pos, *next; 408 struct nlm_host *host; 409 410 dprintk("lockd: host garbage collection\n"); 411 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 412 hlist_for_each_entry(host, pos, chain, h_hash) 413 host->h_inuse = 0; 414 } 415 416 /* Mark all hosts that hold locks, blocks or shares */ 417 nlmsvc_mark_resources(); 418 419 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 420 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { 421 if (atomic_read(&host->h_count) || host->h_inuse 422 || time_before(jiffies, host->h_expires)) { 423 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 424 host->h_name, atomic_read(&host->h_count), 425 host->h_inuse, host->h_expires); 426 continue; 427 } 428 dprintk("lockd: delete host %s\n", host->h_name); 429 hlist_del_init(&host->h_hash); 430 431 nlm_destroy_host(host); 432 nrhosts--; 433 } 434 } 435 436 next_gc = jiffies + NLM_HOST_COLLECT; 437 } 438 439 440 /* 441 * Manage NSM handles 442 */ 443 static LIST_HEAD(nsm_handles); 444 static DEFINE_MUTEX(nsm_mutex); 445 446 static struct nsm_handle * 447 __nsm_find(const struct sockaddr_in *sin, 448 const char *hostname, int hostname_len, 449 int create) 450 { 451 struct nsm_handle *nsm = NULL; 452 struct list_head *pos; 453 454 if (!sin) 455 return NULL; 456 457 if (hostname && memchr(hostname, '/', hostname_len) != NULL) { 458 if (printk_ratelimit()) { 459 printk(KERN_WARNING "Invalid hostname \"%.*s\" " 460 "in NFS lock request\n", 461 hostname_len, hostname); 462 } 463 return NULL; 464 } 465 466 mutex_lock(&nsm_mutex); 467 list_for_each(pos, &nsm_handles) { 468 nsm = list_entry(pos, struct nsm_handle, sm_link); 469 470 if (hostname && nsm_use_hostnames) { 471 if (strlen(nsm->sm_name) != hostname_len 472 || memcmp(nsm->sm_name, hostname, hostname_len)) 473 continue; 474 } else if (!nlm_cmp_addr(&nsm->sm_addr, sin)) 475 continue; 476 atomic_inc(&nsm->sm_count); 477 goto out; 478 } 479 480 if (!create) { 481 nsm = NULL; 482 goto out; 483 } 484 485 nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); 486 if (nsm != NULL) { 487 nsm->sm_addr = *sin; 488 nsm->sm_name = (char *) (nsm + 1); 489 memcpy(nsm->sm_name, hostname, hostname_len); 490 nsm->sm_name[hostname_len] = '\0'; 491 atomic_set(&nsm->sm_count, 1); 492 493 list_add(&nsm->sm_link, &nsm_handles); 494 } 495 496 out: 497 mutex_unlock(&nsm_mutex); 498 return nsm; 499 } 500 501 static struct nsm_handle * 502 nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len) 503 { 504 return __nsm_find(sin, hostname, hostname_len, 1); 505 } 506 507 /* 508 * Release an NSM handle 509 */ 510 void 511 nsm_release(struct nsm_handle *nsm) 512 { 513 if (!nsm) 514 return; 515 if (atomic_dec_and_test(&nsm->sm_count)) { 516 mutex_lock(&nsm_mutex); 517 if (atomic_read(&nsm->sm_count) == 0) { 518 list_del(&nsm->sm_link); 519 kfree(nsm); 520 } 521 mutex_unlock(&nsm_mutex); 522 } 523 } 524