1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/lockd/host.c 4 * 5 * Management for NLM peer hosts. The nlm_host struct is shared 6 * between client and server implementation. The only reason to 7 * do so is to reduce code bloat. 8 * 9 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 10 */ 11 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/in.h> 15 #include <linux/in6.h> 16 #include <linux/sunrpc/clnt.h> 17 #include <linux/sunrpc/addr.h> 18 #include <linux/sunrpc/svc.h> 19 #include <linux/mutex.h> 20 21 #include <linux/sunrpc/svc_xprt.h> 22 23 #include <net/ipv6.h> 24 25 #include "lockd.h" 26 #include "netns.h" 27 28 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE 29 #define NLM_HOST_NRHASH 32 30 #define NLM_HOST_REBIND (60 * HZ) 31 #define NLM_HOST_EXPIRE (300 * HZ) 32 #define NLM_HOST_COLLECT (120 * HZ) 33 34 static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; 35 static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; 36 37 #define for_each_host(host, chain, table) \ 38 for ((chain) = (table); \ 39 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 40 hlist_for_each_entry((host), (chain), h_hash) 41 42 #define for_each_host_safe(host, next, chain, table) \ 43 for ((chain) = (table); \ 44 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 45 hlist_for_each_entry_safe((host), (next), \ 46 (chain), h_hash) 47 48 static unsigned long nrhosts; 49 static DEFINE_MUTEX(nlm_host_mutex); 50 51 static void nlm_gc_hosts(struct net *net); 52 53 struct nlm_lookup_host_info { 54 const int server; /* search for server|client */ 55 const struct sockaddr *sap; /* address to search for */ 56 const size_t salen; /* it's length */ 57 const unsigned short protocol; /* transport to search for*/ 58 const u32 version; /* NLM version to search for */ 59 const char *hostname; /* remote's hostname */ 60 const size_t hostname_len; /* it's length */ 61 const int noresvport; /* use non-priv port */ 62 struct net *net; /* network namespace to bind */ 63 const struct cred *cred; 64 }; 65 66 /* 67 * Hash function must work well on big- and little-endian platforms 68 */ 69 static unsigned int __nlm_hash32(const __be32 n) 70 { 71 unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); 72 return hash ^ (hash >> 8); 73 } 74 75 static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) 76 { 77 const struct sockaddr_in *sin = (struct sockaddr_in *)sap; 78 return __nlm_hash32(sin->sin_addr.s_addr); 79 } 80 81 static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) 82 { 83 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 84 const struct in6_addr addr = sin6->sin6_addr; 85 return __nlm_hash32(addr.s6_addr32[0]) ^ 86 __nlm_hash32(addr.s6_addr32[1]) ^ 87 __nlm_hash32(addr.s6_addr32[2]) ^ 88 __nlm_hash32(addr.s6_addr32[3]); 89 } 90 91 static unsigned int nlm_hash_address(const struct sockaddr *sap) 92 { 93 unsigned int hash; 94 95 switch (sap->sa_family) { 96 case AF_INET: 97 hash = __nlm_hash_addr4(sap); 98 break; 99 case AF_INET6: 100 hash = __nlm_hash_addr6(sap); 101 break; 102 default: 103 hash = 0; 104 } 105 return hash & (NLM_HOST_NRHASH - 1); 106 } 107 108 /* 109 * Allocate and initialize an nlm_host. Common to both client and server. 110 */ 111 static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, 112 struct nsm_handle *nsm) 113 { 114 struct nlm_host *host = NULL; 115 unsigned long now = jiffies; 116 117 if (nsm != NULL) 118 refcount_inc(&nsm->sm_count); 119 else { 120 nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, 121 ni->hostname, ni->hostname_len); 122 if (unlikely(nsm == NULL)) { 123 dprintk("lockd: %s failed; no nsm handle\n", 124 __func__); 125 goto out; 126 } 127 } 128 129 host = kmalloc_obj(*host); 130 if (unlikely(host == NULL)) { 131 dprintk("lockd: %s failed; no memory\n", __func__); 132 nsm_release(nsm); 133 goto out; 134 } 135 136 memcpy(nlm_addr(host), ni->sap, ni->salen); 137 host->h_addrlen = ni->salen; 138 rpc_set_port(nlm_addr(host), 0); 139 host->h_srcaddrlen = 0; 140 141 host->h_rpcclnt = NULL; 142 host->h_name = nsm->sm_name; 143 host->h_version = ni->version; 144 host->h_proto = ni->protocol; 145 host->h_reclaiming = 0; 146 host->h_server = ni->server; 147 host->h_noresvport = ni->noresvport; 148 host->h_inuse = 0; 149 init_waitqueue_head(&host->h_gracewait); 150 init_rwsem(&host->h_rwsem); 151 host->h_state = 0; 152 host->h_nsmstate = 0; 153 host->h_pidcount = 0; 154 refcount_set(&host->h_count, 1); 155 mutex_init(&host->h_mutex); 156 host->h_nextrebind = now + NLM_HOST_REBIND; 157 host->h_expires = now + NLM_HOST_EXPIRE; 158 INIT_LIST_HEAD(&host->h_lockowners); 159 spin_lock_init(&host->h_lock); 160 INIT_LIST_HEAD(&host->h_granted); 161 INIT_LIST_HEAD(&host->h_reclaim); 162 host->h_nsmhandle = nsm; 163 host->h_addrbuf = nsm->sm_addrbuf; 164 host->net = ni->net; 165 host->h_cred = get_cred(ni->cred); 166 strscpy(host->nodename, utsname()->nodename, sizeof(host->nodename)); 167 168 out: 169 return host; 170 } 171 172 /* 173 * Destroy an nlm_host and free associated resources 174 * 175 * Caller must hold nlm_host_mutex. 176 */ 177 static void nlm_destroy_host_locked(struct nlm_host *host) 178 { 179 struct rpc_clnt *clnt; 180 struct lockd_net *ln = net_generic(host->net, lockd_net_id); 181 182 dprintk("lockd: destroy host %s\n", host->h_name); 183 184 hlist_del_init(&host->h_hash); 185 186 nsm_unmonitor(host); 187 nsm_release(host->h_nsmhandle); 188 189 clnt = host->h_rpcclnt; 190 if (clnt != NULL) 191 rpc_shutdown_client(clnt); 192 put_cred(host->h_cred); 193 kfree(host); 194 195 ln->nrhosts--; 196 nrhosts--; 197 } 198 199 /** 200 * nlmclnt_lookup_host - Find an NLM host handle matching a remote server 201 * @sap: network address of server 202 * @salen: length of server address 203 * @protocol: transport protocol to use 204 * @version: NLM protocol version 205 * @hostname: '\0'-terminated hostname of server 206 * @noresvport: 1 if non-privileged port should be used 207 * @net: pointer to net namespace 208 * @cred: pointer to cred 209 * 210 * Returns an nlm_host structure that matches the passed-in 211 * [server address, transport protocol, NLM version, server hostname]. 212 * If one doesn't already exist in the host cache, a new handle is 213 * created and returned. 214 */ 215 struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, 216 const size_t salen, 217 const unsigned short protocol, 218 const u32 version, 219 const char *hostname, 220 int noresvport, 221 struct net *net, 222 const struct cred *cred) 223 { 224 struct nlm_lookup_host_info ni = { 225 .server = 0, 226 .sap = sap, 227 .salen = salen, 228 .protocol = protocol, 229 .version = version, 230 .hostname = hostname, 231 .hostname_len = strlen(hostname), 232 .noresvport = noresvport, 233 .net = net, 234 .cred = cred, 235 }; 236 struct hlist_head *chain; 237 struct nlm_host *host; 238 struct nsm_handle *nsm = NULL; 239 struct lockd_net *ln = net_generic(net, lockd_net_id); 240 241 dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, 242 (hostname ? hostname : "<none>"), version, 243 (protocol == IPPROTO_UDP ? "udp" : "tcp")); 244 245 mutex_lock(&nlm_host_mutex); 246 247 chain = &nlm_client_hosts[nlm_hash_address(sap)]; 248 hlist_for_each_entry(host, chain, h_hash) { 249 if (host->net != net) 250 continue; 251 if (!rpc_cmp_addr(nlm_addr(host), sap)) 252 continue; 253 254 /* Same address. Share an NSM handle if we already have one */ 255 if (nsm == NULL) 256 nsm = host->h_nsmhandle; 257 258 if (host->h_proto != protocol) 259 continue; 260 if (host->h_version != version) 261 continue; 262 263 nlm_get_host(host); 264 dprintk("lockd: %s found host %s (%s)\n", __func__, 265 host->h_name, host->h_addrbuf); 266 goto out; 267 } 268 269 host = nlm_alloc_host(&ni, nsm); 270 if (unlikely(host == NULL)) 271 goto out; 272 273 hlist_add_head(&host->h_hash, chain); 274 ln->nrhosts++; 275 nrhosts++; 276 277 dprintk("lockd: %s created host %s (%s)\n", __func__, 278 host->h_name, host->h_addrbuf); 279 280 out: 281 mutex_unlock(&nlm_host_mutex); 282 return host; 283 } 284 285 /** 286 * nlmclnt_release_host - release client nlm_host 287 * @host: nlm_host to release 288 * 289 */ 290 void nlmclnt_release_host(struct nlm_host *host) 291 { 292 if (host == NULL) 293 return; 294 295 dprintk("lockd: release client host %s\n", host->h_name); 296 297 WARN_ON_ONCE(host->h_server); 298 299 if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) { 300 WARN_ON_ONCE(!list_empty(&host->h_lockowners)); 301 WARN_ON_ONCE(!list_empty(&host->h_granted)); 302 WARN_ON_ONCE(!list_empty(&host->h_reclaim)); 303 304 nlm_destroy_host_locked(host); 305 mutex_unlock(&nlm_host_mutex); 306 } 307 } 308 309 /* Callback for rpc_cancel_tasks() - matches all tasks for cancellation */ 310 static bool nlmclnt_match_all(const struct rpc_task *task, const void *data) 311 { 312 return true; 313 } 314 315 /** 316 * nlmclnt_shutdown_rpc_clnt - safely shut down NLM client RPC operations 317 * @host: nlm_host to shut down 318 * 319 * Cancels outstanding RPC tasks and marks the client as shut down. 320 * Synchronizes with nlmclnt_release_host() via nlm_host_mutex to prevent 321 * races between shutdown and host destruction. Safe to call if h_rpcclnt 322 * is NULL or already shut down. 323 */ 324 void nlmclnt_shutdown_rpc_clnt(struct nlm_host *host) 325 { 326 struct rpc_clnt *clnt; 327 328 mutex_lock(&nlm_host_mutex); 329 clnt = host->h_rpcclnt; 330 if (clnt) { 331 clnt->cl_shutdown = 1; 332 rpc_cancel_tasks(clnt, -EIO, nlmclnt_match_all, NULL); 333 } 334 mutex_unlock(&nlm_host_mutex); 335 } 336 EXPORT_SYMBOL_GPL(nlmclnt_shutdown_rpc_clnt); 337 338 /** 339 * nlmsvc_lookup_host - Find an NLM host handle matching a remote client 340 * @rqstp: incoming NLM request 341 * @hostname: name of client host 342 * @hostname_len: length of client hostname 343 * 344 * Returns an nlm_host structure that matches the [client address, 345 * transport protocol, NLM version, client hostname] of the passed-in 346 * NLM request. If one doesn't already exist in the host cache, a 347 * new handle is created and returned. 348 * 349 * Before possibly creating a new nlm_host, construct a sockaddr 350 * for a specific source address in case the local system has 351 * multiple network addresses. The family of the address in 352 * rq_daddr is guaranteed to be the same as the family of the 353 * address in rq_addr, so it's safe to use the same family for 354 * the source address. 355 */ 356 struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, 357 const char *hostname, 358 const size_t hostname_len) 359 { 360 struct hlist_head *chain; 361 struct nlm_host *host = NULL; 362 struct nsm_handle *nsm = NULL; 363 struct sockaddr *src_sap = svc_daddr(rqstp); 364 size_t src_len = rqstp->rq_daddrlen; 365 struct net *net = SVC_NET(rqstp); 366 struct nlm_lookup_host_info ni = { 367 .server = 1, 368 .sap = svc_addr(rqstp), 369 .salen = rqstp->rq_addrlen, 370 .protocol = rqstp->rq_prot, 371 .version = rqstp->rq_vers, 372 .hostname = hostname, 373 .hostname_len = hostname_len, 374 .net = net, 375 }; 376 struct lockd_net *ln = net_generic(net, lockd_net_id); 377 378 dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__, 379 (int)hostname_len, hostname, rqstp->rq_vers, 380 (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); 381 382 mutex_lock(&nlm_host_mutex); 383 384 if (time_after_eq(jiffies, ln->next_gc)) 385 nlm_gc_hosts(net); 386 387 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; 388 hlist_for_each_entry(host, chain, h_hash) { 389 if (host->net != net) 390 continue; 391 if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) 392 continue; 393 394 /* Same address. Share an NSM handle if we already have one */ 395 if (nsm == NULL) 396 nsm = host->h_nsmhandle; 397 398 if (host->h_proto != ni.protocol) 399 continue; 400 if (host->h_version != ni.version) 401 continue; 402 if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) 403 continue; 404 405 /* Move to head of hash chain. */ 406 hlist_del(&host->h_hash); 407 hlist_add_head(&host->h_hash, chain); 408 409 nlm_get_host(host); 410 dprintk("lockd: %s found host %s (%s)\n", 411 __func__, host->h_name, host->h_addrbuf); 412 goto out; 413 } 414 415 host = nlm_alloc_host(&ni, nsm); 416 if (unlikely(host == NULL)) 417 goto out; 418 419 memcpy(nlm_srcaddr(host), src_sap, src_len); 420 host->h_srcaddrlen = src_len; 421 hlist_add_head(&host->h_hash, chain); 422 ln->nrhosts++; 423 nrhosts++; 424 425 refcount_inc(&host->h_count); 426 427 dprintk("lockd: %s created host %s (%s)\n", 428 __func__, host->h_name, host->h_addrbuf); 429 430 out: 431 mutex_unlock(&nlm_host_mutex); 432 return host; 433 } 434 435 /** 436 * nlmsvc_release_host - release server nlm_host 437 * @host: nlm_host to release 438 * 439 * Host is destroyed later in nlm_gc_host(). 440 */ 441 void nlmsvc_release_host(struct nlm_host *host) 442 { 443 if (host == NULL) 444 return; 445 446 dprintk("lockd: release server host %s\n", host->h_name); 447 448 WARN_ON_ONCE(!host->h_server); 449 refcount_dec(&host->h_count); 450 } 451 452 /* 453 * Create the NLM RPC client for an NLM peer 454 */ 455 struct rpc_clnt * 456 nlm_bind_host(struct nlm_host *host) 457 { 458 struct rpc_clnt *clnt; 459 460 dprintk("lockd: nlm_bind_host %s (%s)\n", 461 host->h_name, host->h_addrbuf); 462 463 /* Lock host handle */ 464 mutex_lock(&host->h_mutex); 465 466 /* If we've already created an RPC client, check whether 467 * RPC rebind is required 468 */ 469 if ((clnt = host->h_rpcclnt) != NULL) { 470 nlm_rebind_host(host); 471 } else { 472 unsigned long increment = nlm_timeout * HZ; 473 struct rpc_timeout timeparms = { 474 .to_initval = increment, 475 .to_increment = increment, 476 .to_maxval = increment * 6UL, 477 .to_retries = 5U, 478 }; 479 struct rpc_create_args args = { 480 .net = host->net, 481 .protocol = host->h_proto, 482 .address = nlm_addr(host), 483 .addrsize = host->h_addrlen, 484 .timeout = &timeparms, 485 .servername = host->h_name, 486 .program = &nlm_program, 487 .version = host->h_version, 488 .authflavor = RPC_AUTH_UNIX, 489 .flags = (RPC_CLNT_CREATE_NOPING | 490 RPC_CLNT_CREATE_AUTOBIND | 491 RPC_CLNT_CREATE_REUSEPORT), 492 .cred = host->h_cred, 493 }; 494 495 /* 496 * lockd retries server side blocks automatically so we want 497 * those to be soft RPC calls. Client side calls need to be 498 * hard RPC tasks. 499 */ 500 if (!host->h_server) 501 args.flags |= RPC_CLNT_CREATE_HARDRTRY; 502 if (host->h_noresvport) 503 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 504 if (host->h_srcaddrlen) 505 args.saddress = nlm_srcaddr(host); 506 507 clnt = rpc_create(&args); 508 if (!IS_ERR(clnt)) 509 host->h_rpcclnt = clnt; 510 else { 511 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); 512 clnt = NULL; 513 } 514 } 515 516 mutex_unlock(&host->h_mutex); 517 return clnt; 518 } 519 520 /** 521 * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port 522 * @host: NLM host handle for peer 523 * 524 * This is not needed when using a connection-oriented protocol, such as TCP. 525 * The existing autobind mechanism is sufficient to force a rebind when 526 * required, e.g. on connection state transitions. 527 */ 528 void 529 nlm_rebind_host(struct nlm_host *host) 530 { 531 if (host->h_proto != IPPROTO_UDP) 532 return; 533 534 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { 535 rpc_force_rebind(host->h_rpcclnt); 536 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 537 } 538 } 539 540 /* 541 * Increment NLM host count 542 */ 543 struct nlm_host * nlm_get_host(struct nlm_host *host) 544 { 545 if (host) { 546 dprintk("lockd: get host %s\n", host->h_name); 547 refcount_inc(&host->h_count); 548 host->h_expires = jiffies + NLM_HOST_EXPIRE; 549 } 550 return host; 551 } 552 553 static struct nlm_host *next_host_state(struct hlist_head *cache, 554 struct nsm_handle *nsm, 555 const struct nlm_reboot *info) 556 { 557 struct nlm_host *host; 558 struct hlist_head *chain; 559 560 mutex_lock(&nlm_host_mutex); 561 for_each_host(host, chain, cache) { 562 if (host->h_nsmhandle == nsm 563 && host->h_nsmstate != info->state) { 564 host->h_nsmstate = info->state; 565 host->h_state++; 566 567 nlm_get_host(host); 568 mutex_unlock(&nlm_host_mutex); 569 return host; 570 } 571 } 572 573 mutex_unlock(&nlm_host_mutex); 574 return NULL; 575 } 576 577 /** 578 * nlm_host_rebooted - Release all resources held by rebooted host 579 * @net: network namespace 580 * @info: pointer to decoded results of NLM_SM_NOTIFY call 581 * 582 * We were notified that the specified host has rebooted. Release 583 * all resources held by that peer. 584 */ 585 void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) 586 { 587 struct nsm_handle *nsm; 588 struct nlm_host *host; 589 590 nsm = nsm_reboot_lookup(net, info); 591 if (unlikely(nsm == NULL)) 592 return; 593 594 /* Mark all hosts tied to this NSM state as having rebooted. 595 * We run the loop repeatedly, because we drop the host table 596 * lock for this. 597 * To avoid processing a host several times, we match the nsmstate. 598 */ 599 while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { 600 nlmsvc_free_host_resources(host); 601 nlmsvc_release_host(host); 602 } 603 while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { 604 nlmclnt_recovery(host); 605 nlmclnt_release_host(host); 606 } 607 608 nsm_release(nsm); 609 } 610 611 static void nlm_complain_hosts(struct net *net) 612 { 613 struct hlist_head *chain; 614 struct nlm_host *host; 615 616 if (net) { 617 struct lockd_net *ln = net_generic(net, lockd_net_id); 618 619 if (ln->nrhosts == 0) 620 return; 621 pr_warn("lockd: couldn't shutdown host module for net %x!\n", 622 net->ns.inum); 623 dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts, 624 net->ns.inum); 625 } else { 626 if (nrhosts == 0) 627 return; 628 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 629 dprintk("lockd: %lu hosts left:\n", nrhosts); 630 } 631 632 for_each_host(host, chain, nlm_server_hosts) { 633 if (net && host->net != net) 634 continue; 635 dprintk(" %s (cnt %d use %d exp %ld net %x)\n", 636 host->h_name, refcount_read(&host->h_count), 637 host->h_inuse, host->h_expires, host->net->ns.inum); 638 } 639 } 640 641 void 642 nlm_shutdown_hosts_net(struct net *net) 643 { 644 struct hlist_head *chain; 645 struct nlm_host *host; 646 647 mutex_lock(&nlm_host_mutex); 648 649 /* First, make all hosts eligible for gc */ 650 dprintk("lockd: nuking all hosts in net %x...\n", 651 net ? net->ns.inum : 0); 652 for_each_host(host, chain, nlm_server_hosts) { 653 if (net && host->net != net) 654 continue; 655 host->h_expires = jiffies - 1; 656 if (host->h_rpcclnt) { 657 rpc_shutdown_client(host->h_rpcclnt); 658 host->h_rpcclnt = NULL; 659 } 660 nlmsvc_free_host_resources(host); 661 } 662 663 /* Then, perform a garbage collection pass */ 664 nlm_gc_hosts(net); 665 nlm_complain_hosts(net); 666 mutex_unlock(&nlm_host_mutex); 667 } 668 669 /* 670 * Shut down the hosts module. 671 * Note that this routine is called only at server shutdown time. 672 */ 673 void 674 nlm_shutdown_hosts(void) 675 { 676 dprintk("lockd: shutting down host module\n"); 677 nlm_shutdown_hosts_net(NULL); 678 } 679 680 /* 681 * Garbage collect any unused NLM hosts. 682 * This GC combines reference counting for async operations with 683 * mark & sweep for resources held by remote clients. 684 */ 685 static void 686 nlm_gc_hosts(struct net *net) 687 { 688 struct hlist_head *chain; 689 struct hlist_node *next; 690 struct nlm_host *host; 691 692 dprintk("lockd: host garbage collection for net %x\n", 693 net ? net->ns.inum : 0); 694 for_each_host(host, chain, nlm_server_hosts) { 695 if (net && host->net != net) 696 continue; 697 host->h_inuse = 0; 698 } 699 700 /* Mark all hosts that hold locks, blocks or shares */ 701 nlmsvc_mark_resources(net); 702 703 for_each_host_safe(host, next, chain, nlm_server_hosts) { 704 if (net && host->net != net) 705 continue; 706 if (host->h_inuse || time_before(jiffies, host->h_expires)) { 707 dprintk("nlm_gc_hosts skipping %s " 708 "(cnt %d use %d exp %ld net %x)\n", 709 host->h_name, refcount_read(&host->h_count), 710 host->h_inuse, host->h_expires, 711 host->net->ns.inum); 712 continue; 713 } 714 if (refcount_dec_if_one(&host->h_count)) 715 nlm_destroy_host_locked(host); 716 } 717 718 if (net) { 719 struct lockd_net *ln = net_generic(net, lockd_net_id); 720 721 ln->next_gc = jiffies + NLM_HOST_COLLECT; 722 } 723 } 724