1 /* 2 * linux/fs/lockd/host.c 3 * 4 * Management for NLM peer hosts. The nlm_host struct is shared 5 * between client and server implementation. The only reason to 6 * do so is to reduce code bloat. 7 * 8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/in.h> 15 #include <linux/sunrpc/clnt.h> 16 #include <linux/sunrpc/svc.h> 17 #include <linux/lockd/lockd.h> 18 #include <linux/lockd/sm_inter.h> 19 #include <linux/mutex.h> 20 21 22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE 23 #define NLM_HOST_MAX 64 24 #define NLM_HOST_NRHASH 32 25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) 26 #define NLM_HOST_REBIND (60 * HZ) 27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) 28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) 29 #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr) 30 31 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; 32 static unsigned long next_gc; 33 static int nrhosts; 34 static DEFINE_MUTEX(nlm_host_mutex); 35 36 37 static void nlm_gc_hosts(void); 38 39 /* 40 * Find an NLM server handle in the cache. If there is none, create it. 41 */ 42 struct nlm_host * 43 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) 44 { 45 return nlm_lookup_host(0, sin, proto, version); 46 } 47 48 /* 49 * Find an NLM client handle in the cache. If there is none, create it. 50 */ 51 struct nlm_host * 52 nlmsvc_lookup_host(struct svc_rqst *rqstp) 53 { 54 return nlm_lookup_host(1, &rqstp->rq_addr, 55 rqstp->rq_prot, rqstp->rq_vers); 56 } 57 58 /* 59 * Common host lookup routine for server & client 60 */ 61 struct nlm_host * 62 nlm_lookup_host(int server, struct sockaddr_in *sin, 63 int proto, int version) 64 { 65 struct nlm_host *host, **hp; 66 u32 addr; 67 int hash; 68 69 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", 70 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); 71 72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 73 74 /* Lock hash table */ 75 mutex_lock(&nlm_host_mutex); 76 77 if (time_after_eq(jiffies, next_gc)) 78 nlm_gc_hosts(); 79 80 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 81 if (host->h_proto != proto) 82 continue; 83 if (host->h_version != version) 84 continue; 85 if (host->h_server != server) 86 continue; 87 88 if (nlm_cmp_addr(&host->h_addr, sin)) { 89 if (hp != nlm_hosts + hash) { 90 *hp = host->h_next; 91 host->h_next = nlm_hosts[hash]; 92 nlm_hosts[hash] = host; 93 } 94 nlm_get_host(host); 95 mutex_unlock(&nlm_host_mutex); 96 return host; 97 } 98 } 99 100 /* Ooops, no host found, create it */ 101 dprintk("lockd: creating host entry\n"); 102 103 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL))) 104 goto nohost; 105 memset(host, 0, sizeof(*host)); 106 107 addr = sin->sin_addr.s_addr; 108 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); 109 110 host->h_addr = *sin; 111 host->h_addr.sin_port = 0; /* ouch! */ 112 host->h_version = version; 113 host->h_proto = proto; 114 host->h_rpcclnt = NULL; 115 mutex_init(&host->h_mutex); 116 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 117 host->h_expires = jiffies + NLM_HOST_EXPIRE; 118 atomic_set(&host->h_count, 1); 119 init_waitqueue_head(&host->h_gracewait); 120 init_rwsem(&host->h_rwsem); 121 host->h_state = 0; /* pseudo NSM state */ 122 host->h_nsmstate = 0; /* real NSM state */ 123 host->h_server = server; 124 host->h_next = nlm_hosts[hash]; 125 nlm_hosts[hash] = host; 126 INIT_LIST_HEAD(&host->h_lockowners); 127 spin_lock_init(&host->h_lock); 128 INIT_LIST_HEAD(&host->h_granted); 129 INIT_LIST_HEAD(&host->h_reclaim); 130 131 if (++nrhosts > NLM_HOST_MAX) 132 next_gc = 0; 133 134 nohost: 135 mutex_unlock(&nlm_host_mutex); 136 return host; 137 } 138 139 struct nlm_host * 140 nlm_find_client(void) 141 { 142 /* find a nlm_host for a client for which h_killed == 0. 143 * and return it 144 */ 145 int hash; 146 mutex_lock(&nlm_host_mutex); 147 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { 148 struct nlm_host *host, **hp; 149 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 150 if (host->h_server && 151 host->h_killed == 0) { 152 nlm_get_host(host); 153 mutex_unlock(&nlm_host_mutex); 154 return host; 155 } 156 } 157 } 158 mutex_unlock(&nlm_host_mutex); 159 return NULL; 160 } 161 162 163 /* 164 * Create the NLM RPC client for an NLM peer 165 */ 166 struct rpc_clnt * 167 nlm_bind_host(struct nlm_host *host) 168 { 169 struct rpc_clnt *clnt; 170 struct rpc_xprt *xprt; 171 172 dprintk("lockd: nlm_bind_host(%08x)\n", 173 (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); 174 175 /* Lock host handle */ 176 mutex_lock(&host->h_mutex); 177 178 /* If we've already created an RPC client, check whether 179 * RPC rebind is required 180 */ 181 if ((clnt = host->h_rpcclnt) != NULL) { 182 xprt = clnt->cl_xprt; 183 if (time_after_eq(jiffies, host->h_nextrebind)) { 184 rpc_force_rebind(clnt); 185 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 186 dprintk("lockd: next rebind in %ld jiffies\n", 187 host->h_nextrebind - jiffies); 188 } 189 } else { 190 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL); 191 if (IS_ERR(xprt)) 192 goto forgetit; 193 194 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); 195 xprt->resvport = 1; /* NLM requires a reserved port */ 196 197 /* Existing NLM servers accept AUTH_UNIX only */ 198 clnt = rpc_new_client(xprt, host->h_name, &nlm_program, 199 host->h_version, RPC_AUTH_UNIX); 200 if (IS_ERR(clnt)) 201 goto forgetit; 202 clnt->cl_autobind = 1; /* turn on pmap queries */ 203 clnt->cl_softrtry = 1; /* All queries are soft */ 204 205 host->h_rpcclnt = clnt; 206 } 207 208 mutex_unlock(&host->h_mutex); 209 return clnt; 210 211 forgetit: 212 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); 213 mutex_unlock(&host->h_mutex); 214 return NULL; 215 } 216 217 /* 218 * Force a portmap lookup of the remote lockd port 219 */ 220 void 221 nlm_rebind_host(struct nlm_host *host) 222 { 223 dprintk("lockd: rebind host %s\n", host->h_name); 224 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { 225 rpc_force_rebind(host->h_rpcclnt); 226 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 227 } 228 } 229 230 /* 231 * Increment NLM host count 232 */ 233 struct nlm_host * nlm_get_host(struct nlm_host *host) 234 { 235 if (host) { 236 dprintk("lockd: get host %s\n", host->h_name); 237 atomic_inc(&host->h_count); 238 host->h_expires = jiffies + NLM_HOST_EXPIRE; 239 } 240 return host; 241 } 242 243 /* 244 * Release NLM host after use 245 */ 246 void nlm_release_host(struct nlm_host *host) 247 { 248 if (host != NULL) { 249 dprintk("lockd: release host %s\n", host->h_name); 250 BUG_ON(atomic_read(&host->h_count) < 0); 251 if (atomic_dec_and_test(&host->h_count)) { 252 BUG_ON(!list_empty(&host->h_lockowners)); 253 BUG_ON(!list_empty(&host->h_granted)); 254 BUG_ON(!list_empty(&host->h_reclaim)); 255 } 256 } 257 } 258 259 /* 260 * Shut down the hosts module. 261 * Note that this routine is called only at server shutdown time. 262 */ 263 void 264 nlm_shutdown_hosts(void) 265 { 266 struct nlm_host *host; 267 int i; 268 269 dprintk("lockd: shutting down host module\n"); 270 mutex_lock(&nlm_host_mutex); 271 272 /* First, make all hosts eligible for gc */ 273 dprintk("lockd: nuking all hosts...\n"); 274 for (i = 0; i < NLM_HOST_NRHASH; i++) { 275 for (host = nlm_hosts[i]; host; host = host->h_next) 276 host->h_expires = jiffies - 1; 277 } 278 279 /* Then, perform a garbage collection pass */ 280 nlm_gc_hosts(); 281 mutex_unlock(&nlm_host_mutex); 282 283 /* complain if any hosts are left */ 284 if (nrhosts) { 285 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 286 dprintk("lockd: %d hosts left:\n", nrhosts); 287 for (i = 0; i < NLM_HOST_NRHASH; i++) { 288 for (host = nlm_hosts[i]; host; host = host->h_next) { 289 dprintk(" %s (cnt %d use %d exp %ld)\n", 290 host->h_name, atomic_read(&host->h_count), 291 host->h_inuse, host->h_expires); 292 } 293 } 294 } 295 } 296 297 /* 298 * Garbage collect any unused NLM hosts. 299 * This GC combines reference counting for async operations with 300 * mark & sweep for resources held by remote clients. 301 */ 302 static void 303 nlm_gc_hosts(void) 304 { 305 struct nlm_host **q, *host; 306 struct rpc_clnt *clnt; 307 int i; 308 309 dprintk("lockd: host garbage collection\n"); 310 for (i = 0; i < NLM_HOST_NRHASH; i++) { 311 for (host = nlm_hosts[i]; host; host = host->h_next) 312 host->h_inuse = 0; 313 } 314 315 /* Mark all hosts that hold locks, blocks or shares */ 316 nlmsvc_mark_resources(); 317 318 for (i = 0; i < NLM_HOST_NRHASH; i++) { 319 q = &nlm_hosts[i]; 320 while ((host = *q) != NULL) { 321 if (atomic_read(&host->h_count) || host->h_inuse 322 || time_before(jiffies, host->h_expires)) { 323 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 324 host->h_name, atomic_read(&host->h_count), 325 host->h_inuse, host->h_expires); 326 q = &host->h_next; 327 continue; 328 } 329 dprintk("lockd: delete host %s\n", host->h_name); 330 *q = host->h_next; 331 /* Don't unmonitor hosts that have been invalidated */ 332 if (host->h_monitored && !host->h_killed) 333 nsm_unmonitor(host); 334 if ((clnt = host->h_rpcclnt) != NULL) { 335 if (atomic_read(&clnt->cl_users)) { 336 printk(KERN_WARNING 337 "lockd: active RPC handle\n"); 338 clnt->cl_dead = 1; 339 } else { 340 rpc_destroy_client(host->h_rpcclnt); 341 } 342 } 343 kfree(host); 344 nrhosts--; 345 } 346 } 347 348 next_gc = jiffies + NLM_HOST_COLLECT; 349 } 350 351