xref: /titanic_52/usr/src/uts/common/fs/nfs/nfs_auth.c (revision 16e76cdd6e3cfaac7d91c3b0644ee1bc6cf52347)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/errno.h>
27 #include <sys/vfs.h>
28 #include <sys/vnode.h>
29 #include <sys/cred.h>
30 #include <sys/cmn_err.h>
31 #include <sys/systm.h>
32 #include <sys/kmem.h>
33 #include <sys/pathname.h>
34 #include <sys/utsname.h>
35 #include <sys/debug.h>
36 #include <sys/door.h>
37 #include <sys/sdt.h>
38 #include <sys/thread.h>
39 
40 #include <rpc/types.h>
41 #include <rpc/auth.h>
42 #include <rpc/clnt.h>
43 
44 #include <nfs/nfs.h>
45 #include <nfs/export.h>
46 #include <nfs/nfs_clnt.h>
47 #include <nfs/auth.h>
48 
49 #define	EQADDR(a1, a2)  \
50 	(bcmp((char *)(a1)->buf, (char *)(a2)->buf, (a1)->len) == 0 && \
51 	(a1)->len == (a2)->len)
52 
53 static struct knetconfig auth_knconf;
54 static servinfo_t svp;
55 static clinfo_t ci;
56 
57 static struct kmem_cache *exi_cache_handle;
58 static void exi_cache_reclaim(void *);
59 static void exi_cache_trim(struct exportinfo *exi);
60 
61 extern pri_t minclsyspri;
62 
63 int nfsauth_cache_hit;
64 int nfsauth_cache_miss;
65 int nfsauth_cache_refresh;
66 int nfsauth_cache_reclaim;
67 
68 /*
69  * The lifetime of an auth cache entry:
70  * ------------------------------------
71  *
72  * An auth cache entry is created with both the auth_time
73  * and auth_freshness times set to the current time.
74  *
75  * Upon every client access which results in a hit, the
76  * auth_time will be updated.
77  *
78  * If a client access determines that the auth_freshness
79  * indicates that the entry is STALE, then it will be
80  * refreshed. Note that this will explicitly reset
81  * auth_time.
82  *
83  * When the REFRESH successfully occurs, then the
84  * auth_freshness is updated.
85  *
86  * There are two ways for an entry to leave the cache:
87  *
88  * 1) Purged by an action on the export (remove or changed)
89  * 2) Memory backpressure from the kernel (check against NFSAUTH_CACHE_TRIM)
90  *
91  * For 2) we check the timeout value against auth_time.
92  */
93 
94 /*
95  * Number of seconds until we mark for refresh an auth cache entry.
96  */
97 #define	NFSAUTH_CACHE_REFRESH 600
98 
99 /*
100  * Number of idle seconds until we yield to backpressure
101  * to trim a cache entry.
102  */
103 #define	NFSAUTH_CACHE_TRIM 3600
104 
105 /*
106  * While we could encapuslate the exi_list inside the
107  * exi structure, we can't do that for the auth_list.
108  * So, to keep things looking clean, we keep them both
109  * in these external lists.
110  */
111 typedef struct refreshq_exi_node {
112 	struct exportinfo	*ren_exi;
113 	list_t			ren_authlist;
114 	list_node_t		ren_node;
115 } refreshq_exi_node_t;
116 
117 typedef struct refreshq_auth_node {
118 	struct auth_cache	*ran_auth;
119 	list_node_t		ran_node;
120 } refreshq_auth_node_t;
121 
122 /*
123  * Used to manipulate things on the refreshq_queue.
124  * Note that the refresh thread will effectively
125  * pop a node off of the queue, at which point it
126  * will no longer need to hold the mutex.
127  */
128 static kmutex_t refreshq_lock;
129 static list_t refreshq_queue;
130 static kcondvar_t refreshq_cv;
131 
132 /*
133  * A list_t would be overkill. These are auth_cache
134  * entries which are no longer linked to an exi.
135  * It should be the case that all of their states
136  * are NFS_AUTH_INVALID.
137  *
138  * I.e., the only way to be put on this list is
139  * iff their state indicated that they had been placed
140  * on the refreshq_queue.
141  *
142  * Note that while there is no link from the exi or
143  * back to the exi, the exi can not go away until
144  * these entries are harvested.
145  */
146 static struct auth_cache	*refreshq_dead_entries;
147 
148 /*
149  * If there is ever a problem with loading the
150  * module, then nfsauth_fini() needs to be called
151  * to remove state. In that event, since the
152  * refreshq thread has been started, they need to
153  * work together to get rid of state.
154  */
155 typedef enum nfsauth_refreshq_thread_state {
156 	REFRESHQ_THREAD_RUNNING,
157 	REFRESHQ_THREAD_FINI_REQ,
158 	REFRESHQ_THREAD_HALTED
159 } nfsauth_refreshq_thread_state_t;
160 
161 nfsauth_refreshq_thread_state_t
162 refreshq_thread_state = REFRESHQ_THREAD_HALTED;
163 
164 static void nfsauth_free_node(struct auth_cache *);
165 static void nfsauth_remove_dead_entry(struct auth_cache *);
166 static void nfsauth_refresh_thread(void);
167 
168 /*
169  * mountd is a server-side only daemon. This will need to be
170  * revisited if the NFS server is ever made zones-aware.
171  */
172 kmutex_t	mountd_lock;
173 door_handle_t   mountd_dh;
174 
175 void
176 mountd_args(uint_t did)
177 {
178 	mutex_enter(&mountd_lock);
179 	if (mountd_dh)
180 		door_ki_rele(mountd_dh);
181 	mountd_dh = door_ki_lookup(did);
182 	mutex_exit(&mountd_lock);
183 }
184 
185 void
186 nfsauth_init(void)
187 {
188 	/*
189 	 * mountd can be restarted by smf(5). We need to make sure
190 	 * the updated door handle will safely make it to mountd_dh
191 	 */
192 	mutex_init(&mountd_lock, NULL, MUTEX_DEFAULT, NULL);
193 
194 	mutex_init(&refreshq_lock, NULL, MUTEX_DEFAULT, NULL);
195 	list_create(&refreshq_queue, sizeof (refreshq_exi_node_t),
196 	    offsetof(refreshq_exi_node_t, ren_node));
197 	refreshq_dead_entries = NULL;
198 
199 	cv_init(&refreshq_cv, NULL, CV_DEFAULT, NULL);
200 
201 	/*
202 	 * Allocate nfsauth cache handle
203 	 */
204 	exi_cache_handle = kmem_cache_create("exi_cache_handle",
205 	    sizeof (struct auth_cache), 0, NULL, NULL,
206 	    exi_cache_reclaim, NULL, NULL, 0);
207 
208 	refreshq_thread_state = REFRESHQ_THREAD_RUNNING;
209 	(void) zthread_create(NULL, 0, nfsauth_refresh_thread,
210 	    NULL, 0, minclsyspri);
211 }
212 
213 /*
214  * Finalization routine for nfsauth. It is important to call this routine
215  * before destroying the exported_lock.
216  */
217 void
218 nfsauth_fini(void)
219 {
220 	refreshq_exi_node_t	*ren;
221 	refreshq_auth_node_t	*ran;
222 	struct auth_cache	*p;
223 	struct auth_cache	*auth_next;
224 
225 	/*
226 	 * Prevent the refreshq_thread from getting new
227 	 * work.
228 	 */
229 	mutex_enter(&refreshq_lock);
230 	if (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
231 		refreshq_thread_state = REFRESHQ_THREAD_FINI_REQ;
232 		cv_broadcast(&refreshq_cv);
233 
234 		/*
235 		 * Also, wait for nfsauth_refresh_thread() to exit.
236 		 */
237 		while (refreshq_thread_state != REFRESHQ_THREAD_HALTED) {
238 			cv_wait(&refreshq_cv, &refreshq_lock);
239 		}
240 	}
241 
242 	/*
243 	 * Walk the exi_list and in turn, walk the
244 	 * auth_lists.
245 	 */
246 	while ((ren = list_remove_head(&refreshq_queue))) {
247 		while ((ran = list_remove_head(&ren->ren_authlist))) {
248 			kmem_free(ran, sizeof (refreshq_auth_node_t));
249 		}
250 
251 		list_destroy(&ren->ren_authlist);
252 		exi_rele(ren->ren_exi);
253 		kmem_free(ren, sizeof (refreshq_exi_node_t));
254 	}
255 
256 	/*
257 	 * Okay, now that the lists are deleted, we
258 	 * need to see if there are any dead entries
259 	 * to harvest.
260 	 */
261 	for (p = refreshq_dead_entries; p != NULL; p = auth_next) {
262 		auth_next = p->auth_next;
263 		nfsauth_free_node(p);
264 	}
265 
266 	mutex_exit(&refreshq_lock);
267 
268 	list_destroy(&refreshq_queue);
269 
270 	cv_destroy(&refreshq_cv);
271 	mutex_destroy(&refreshq_lock);
272 
273 	mutex_destroy(&mountd_lock);
274 
275 	/*
276 	 * Deallocate nfsauth cache handle
277 	 */
278 	kmem_cache_destroy(exi_cache_handle);
279 }
280 
281 /*
282  * Convert the address in a netbuf to
283  * a hash index for the auth_cache table.
284  */
285 static int
286 hash(struct netbuf *a)
287 {
288 	int i, h = 0;
289 
290 	for (i = 0; i < a->len; i++)
291 		h ^= a->buf[i];
292 
293 	return (h & (AUTH_TABLESIZE - 1));
294 }
295 
296 /*
297  * Mask out the components of an
298  * address that do not identify
299  * a host. For socket addresses the
300  * masking gets rid of the port number.
301  */
302 static void
303 addrmask(struct netbuf *addr, struct netbuf *mask)
304 {
305 	int i;
306 
307 	for (i = 0; i < addr->len; i++)
308 		addr->buf[i] &= mask->buf[i];
309 }
310 
311 /*
312  * nfsauth4_access is used for NFS V4 auth checking. Besides doing
313  * the common nfsauth_access(), it will check if the client can
314  * have a limited access to this vnode even if the security flavor
315  * used does not meet the policy.
316  */
317 int
318 nfsauth4_access(struct exportinfo *exi, vnode_t *vp, struct svc_req *req)
319 {
320 	int access;
321 
322 	access = nfsauth_access(exi, req);
323 
324 	/*
325 	 * There are cases that the server needs to allow the client
326 	 * to have a limited view.
327 	 *
328 	 * e.g.
329 	 * /export is shared as "sec=sys,rw=dfs-test-4,sec=krb5,rw"
330 	 * /export/home is shared as "sec=sys,rw"
331 	 *
332 	 * When the client mounts /export with sec=sys, the client
333 	 * would get a limited view with RO access on /export to see
334 	 * "home" only because the client is allowed to access
335 	 * /export/home with auth_sys.
336 	 */
337 	if (access & NFSAUTH_DENIED || access & NFSAUTH_WRONGSEC) {
338 		/*
339 		 * Allow ro permission with LIMITED view if there is a
340 		 * sub-dir exported under vp.
341 		 */
342 		if (has_visible(exi, vp))
343 			return (NFSAUTH_LIMITED);
344 	}
345 
346 	return (access);
347 }
348 
349 static void
350 sys_log(const char *msg)
351 {
352 	static time_t	tstamp = 0;
353 	time_t		now;
354 
355 	/*
356 	 * msg is shown (at most) once per minute
357 	 */
358 	now = gethrestime_sec();
359 	if ((tstamp + 60) < now) {
360 		tstamp = now;
361 		cmn_err(CE_WARN, msg);
362 	}
363 }
364 
365 /*
366  * Callup to the mountd to get access information in the kernel.
367  */
368 static bool_t
369 nfsauth_retrieve(struct exportinfo *exi, char *req_netid, int flavor,
370     struct netbuf *addr, int *access)
371 {
372 	varg_t			  varg = {0};
373 	nfsauth_res_t		  res = {0};
374 	XDR			  xdrs_a;
375 	XDR			  xdrs_r;
376 	size_t			  absz;
377 	caddr_t			  abuf;
378 	size_t			  rbsz = (size_t)(BYTES_PER_XDR_UNIT * 2);
379 	char			  result[BYTES_PER_XDR_UNIT * 2] = {0};
380 	caddr_t			  rbuf = (caddr_t)&result;
381 	int			  last = 0;
382 	door_arg_t		  da;
383 	door_info_t		  di;
384 	door_handle_t		  dh;
385 	uint_t			  ntries = 0;
386 
387 	/*
388 	 * No entry in the cache for this client/flavor
389 	 * so we need to call the nfsauth service in the
390 	 * mount daemon.
391 	 */
392 retry:
393 	mutex_enter(&mountd_lock);
394 	dh = mountd_dh;
395 	if (dh)
396 		door_ki_hold(dh);
397 	mutex_exit(&mountd_lock);
398 
399 	if (dh == NULL) {
400 		/*
401 		 * The rendezvous point has not been established yet !
402 		 * This could mean that either mountd(1m) has not yet
403 		 * been started or that _this_ routine nuked the door
404 		 * handle after receiving an EINTR for a REVOKED door.
405 		 *
406 		 * Returning NFSAUTH_DROP will cause the NFS client
407 		 * to retransmit the request, so let's try to be more
408 		 * rescillient and attempt for ntries before we bail.
409 		 */
410 		if (++ntries % NFSAUTH_DR_TRYCNT) {
411 			delay(hz);
412 			goto retry;
413 		}
414 
415 		sys_log("nfsauth: mountd has not established door");
416 		*access = NFSAUTH_DROP;
417 		return (FALSE);
418 	}
419 
420 	ntries = 0;
421 	varg.vers = V_PROTO;
422 	varg.arg_u.arg.cmd = NFSAUTH_ACCESS;
423 	varg.arg_u.arg.areq.req_client.n_len = addr->len;
424 	varg.arg_u.arg.areq.req_client.n_bytes = addr->buf;
425 	varg.arg_u.arg.areq.req_netid = req_netid;
426 	varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path;
427 	varg.arg_u.arg.areq.req_flavor = flavor;
428 
429 	/*
430 	 * Setup the XDR stream for encoding the arguments. Notice that
431 	 * in addition to the args having variable fields (req_netid and
432 	 * req_path), the argument data structure is itself versioned,
433 	 * so we need to make sure we can size the arguments buffer
434 	 * appropriately to encode all the args. If we can't get sizing
435 	 * info _or_ properly encode the arguments, there's really no
436 	 * point in continuting, so we fail the request.
437 	 */
438 	DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg);
439 	if ((absz = xdr_sizeof(xdr_varg, (void *)&varg)) == 0) {
440 		door_ki_rele(dh);
441 		*access = NFSAUTH_DENIED;
442 		return (FALSE);
443 	}
444 
445 	abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP);
446 	xdrmem_create(&xdrs_a, abuf, absz, XDR_ENCODE);
447 	if (!xdr_varg(&xdrs_a, &varg)) {
448 		door_ki_rele(dh);
449 		goto fail;
450 	}
451 	XDR_DESTROY(&xdrs_a);
452 
453 	/*
454 	 * The result (nfsauth_res_t) is always two int's, so we don't
455 	 * have to dynamically size (or allocate) the results buffer.
456 	 * Now that we've got what we need, we prep the door arguments
457 	 * and place the call.
458 	 */
459 	da.data_ptr = (char *)abuf;
460 	da.data_size = absz;
461 	da.desc_ptr = NULL;
462 	da.desc_num = 0;
463 	da.rbuf = (char *)rbuf;
464 	da.rsize = rbsz;
465 
466 	switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) {
467 		case 0:				/* Success */
468 			if (da.data_ptr != da.rbuf && da.data_size == 0) {
469 				/*
470 				 * The door_return that contained the data
471 				 * failed ! We're here because of the 2nd
472 				 * door_return (w/o data) such that we can
473 				 * get control of the thread (and exit
474 				 * gracefully).
475 				 */
476 				DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil,
477 				    door_arg_t *, &da);
478 				door_ki_rele(dh);
479 				goto fail;
480 
481 			} else if (rbuf != da.rbuf) {
482 				/*
483 				 * The only time this should be true
484 				 * is iff userland wanted to hand us
485 				 * a bigger response than what we
486 				 * expect; that should not happen
487 				 * (nfsauth_res_t is only 2 int's),
488 				 * but we check nevertheless.
489 				 */
490 				rbuf = da.rbuf;
491 				rbsz = da.rsize;
492 
493 			} else if (rbsz > da.data_size) {
494 				/*
495 				 * We were expecting two int's; but if
496 				 * userland fails in encoding the XDR
497 				 * stream, we detect that here, since
498 				 * the mountd forces down only one byte
499 				 * in such scenario.
500 				 */
501 				door_ki_rele(dh);
502 				goto fail;
503 			}
504 			door_ki_rele(dh);
505 			break;
506 
507 		case EAGAIN:
508 			/*
509 			 * Server out of resources; back off for a bit
510 			 */
511 			door_ki_rele(dh);
512 			kmem_free(abuf, absz);
513 			delay(hz);
514 			goto retry;
515 			/* NOTREACHED */
516 
517 		case EINTR:
518 			if (!door_ki_info(dh, &di)) {
519 				if (di.di_attributes & DOOR_REVOKED) {
520 					/*
521 					 * The server barfed and revoked
522 					 * the (existing) door on us; we
523 					 * want to wait to give smf(5) a
524 					 * chance to restart mountd(1m)
525 					 * and establish a new door handle.
526 					 */
527 					mutex_enter(&mountd_lock);
528 					if (dh == mountd_dh)
529 						mountd_dh = NULL;
530 					mutex_exit(&mountd_lock);
531 					door_ki_rele(dh);
532 					kmem_free(abuf, absz);
533 					delay(hz);
534 					goto retry;
535 				}
536 				/*
537 				 * If the door was _not_ revoked on us,
538 				 * then more than likely we took an INTR,
539 				 * so we need to fail the operation.
540 				 */
541 				door_ki_rele(dh);
542 				goto fail;
543 			}
544 			/*
545 			 * The only failure that can occur from getting
546 			 * the door info is EINVAL, so we let the code
547 			 * below handle it.
548 			 */
549 			/* FALLTHROUGH */
550 
551 		case EBADF:
552 		case EINVAL:
553 		default:
554 			/*
555 			 * If we have a stale door handle, give smf a last
556 			 * chance to start it by sleeping for a little bit.
557 			 * If we're still hosed, we'll fail the call.
558 			 *
559 			 * Since we're going to reacquire the door handle
560 			 * upon the retry, we opt to sleep for a bit and
561 			 * _not_ to clear mountd_dh. If mountd restarted
562 			 * and was able to set mountd_dh, we should see
563 			 * the new instance; if not, we won't get caught
564 			 * up in the retry/DELAY loop.
565 			 */
566 			door_ki_rele(dh);
567 			if (!last) {
568 				delay(hz);
569 				last++;
570 				goto retry;
571 			}
572 			sys_log("nfsauth: stale mountd door handle");
573 			goto fail;
574 	}
575 
576 	/*
577 	 * No door errors encountered; setup the XDR stream for decoding
578 	 * the results. If we fail to decode the results, we've got no
579 	 * other recourse than to fail the request.
580 	 */
581 	xdrmem_create(&xdrs_r, rbuf, rbsz, XDR_DECODE);
582 	if (!xdr_nfsauth_res(&xdrs_r, &res))
583 		goto fail;
584 	XDR_DESTROY(&xdrs_r);
585 
586 	DTRACE_PROBE1(nfsserv__func__nfsauth__results, nfsauth_res_t *, &res);
587 	switch (res.stat) {
588 		case NFSAUTH_DR_OKAY:
589 			*access = res.ares.auth_perm;
590 			kmem_free(abuf, absz);
591 			break;
592 
593 		case NFSAUTH_DR_EFAIL:
594 		case NFSAUTH_DR_DECERR:
595 		case NFSAUTH_DR_BADCMD:
596 		default:
597 fail:
598 			*access = NFSAUTH_DENIED;
599 			kmem_free(abuf, absz);
600 			return (FALSE);
601 			/* NOTREACHED */
602 	}
603 
604 	return (TRUE);
605 }
606 
607 static void
608 nfsauth_refresh_thread(void)
609 {
610 	refreshq_exi_node_t	*ren;
611 	refreshq_auth_node_t	*ran;
612 
613 	struct exportinfo	*exi;
614 	struct auth_cache	*p;
615 
616 	int			access;
617 	bool_t			retrieval;
618 
619 	callb_cpr_t		cprinfo;
620 
621 	CALLB_CPR_INIT(&cprinfo, &refreshq_lock, callb_generic_cpr,
622 	    "nfsauth_refresh");
623 
624 	for (;;) {
625 		mutex_enter(&refreshq_lock);
626 		if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) {
627 			/* Keep the hold on the lock! */
628 			break;
629 		}
630 
631 		ren = list_remove_head(&refreshq_queue);
632 		if (ren == NULL) {
633 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
634 			cv_wait(&refreshq_cv, &refreshq_lock);
635 			CALLB_CPR_SAFE_END(&cprinfo, &refreshq_lock);
636 			mutex_exit(&refreshq_lock);
637 			continue;
638 		}
639 		mutex_exit(&refreshq_lock);
640 
641 		exi = ren->ren_exi;
642 		ASSERT(exi != NULL);
643 		rw_enter(&exi->exi_cache_lock, RW_READER);
644 
645 		while ((ran = list_remove_head(&ren->ren_authlist))) {
646 			/*
647 			 * We are shutting down. No need to refresh
648 			 * entries which are about to be nuked.
649 			 *
650 			 * So just throw them away until we are done
651 			 * with this exi node...
652 			 */
653 			if (refreshq_thread_state !=
654 			    REFRESHQ_THREAD_RUNNING) {
655 				kmem_free(ran, sizeof (refreshq_auth_node_t));
656 				continue;
657 			}
658 
659 			p = ran->ran_auth;
660 			ASSERT(p != NULL);
661 
662 			mutex_enter(&p->auth_lock);
663 
664 			/*
665 			 * Make sure the state is valid now that
666 			 * we have the lock. Note that once we
667 			 * change the state to NFS_AUTH_REFRESHING,
668 			 * no other thread will be able to work on
669 			 * this entry.
670 			 */
671 			if (p->auth_state != NFS_AUTH_STALE) {
672 				/*
673 				 * Once it goes INVALID, it can not
674 				 * change state.
675 				 */
676 				if (p->auth_state == NFS_AUTH_INVALID) {
677 					mutex_exit(&p->auth_lock);
678 					nfsauth_remove_dead_entry(p);
679 				} else
680 					mutex_exit(&p->auth_lock);
681 
682 				kmem_free(ran, sizeof (refreshq_auth_node_t));
683 				continue;
684 			}
685 
686 			p->auth_state = NFS_AUTH_REFRESHING;
687 			mutex_exit(&p->auth_lock);
688 
689 			DTRACE_PROBE2(nfsauth__debug__cache__refresh,
690 			    struct exportinfo *, exi,
691 			    struct auth_cache *, p);
692 
693 			/*
694 			 * The first caching of the access rights
695 			 * is done with the netid pulled out of the
696 			 * request from the client. All subsequent
697 			 * users of the cache may or may not have
698 			 * the same netid. It doesn't matter. So
699 			 * when we refresh, we simply use the netid
700 			 * of the request which triggered the
701 			 * refresh attempt.
702 			 */
703 			ASSERT(p->auth_netid != NULL);
704 
705 			retrieval = nfsauth_retrieve(exi, p->auth_netid,
706 			    p->auth_flavor, &p->auth_addr, &access);
707 
708 			/*
709 			 * This can only be set in one other place
710 			 * and the state has to be NFS_AUTH_FRESH.
711 			 */
712 			kmem_free(p->auth_netid, strlen(p->auth_netid) + 1);
713 			p->auth_netid = NULL;
714 
715 			/*
716 			 * We got an error, so do not reset the
717 			 * time. This will cause the next access
718 			 * check for the client to reschedule this
719 			 * node.
720 			 */
721 			if (retrieval == FALSE) {
722 				mutex_enter(&p->auth_lock);
723 				if (p->auth_state == NFS_AUTH_INVALID) {
724 					mutex_exit(&p->auth_lock);
725 					nfsauth_remove_dead_entry(p);
726 				} else {
727 					p->auth_state = NFS_AUTH_FRESH;
728 					mutex_exit(&p->auth_lock);
729 				}
730 
731 				kmem_free(ran, sizeof (refreshq_auth_node_t));
732 				continue;
733 			}
734 
735 			mutex_enter(&p->auth_lock);
736 			if (p->auth_state == NFS_AUTH_INVALID) {
737 				mutex_exit(&p->auth_lock);
738 				nfsauth_remove_dead_entry(p);
739 			} else {
740 				p->auth_access = access;
741 				p->auth_freshness = gethrestime_sec();
742 				p->auth_state = NFS_AUTH_FRESH;
743 				mutex_exit(&p->auth_lock);
744 			}
745 
746 			kmem_free(ran, sizeof (refreshq_auth_node_t));
747 		}
748 
749 		rw_exit(&exi->exi_cache_lock);
750 
751 		list_destroy(&ren->ren_authlist);
752 		exi_rele(ren->ren_exi);
753 		kmem_free(ren, sizeof (refreshq_exi_node_t));
754 	}
755 
756 	refreshq_thread_state = REFRESHQ_THREAD_HALTED;
757 	cv_broadcast(&refreshq_cv);
758 	CALLB_CPR_EXIT(&cprinfo);
759 	zthread_exit();
760 }
761 
762 /*
763  * Get the access information from the cache or callup to the mountd
764  * to get and cache the access information in the kernel.
765  */
766 int
767 nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor)
768 {
769 	struct netbuf		*taddrmask;
770 	struct netbuf		addr;
771 	struct netbuf		*claddr;
772 	struct auth_cache	**head;
773 	struct auth_cache	*p;
774 	int			access;
775 	time_t			refresh;
776 
777 	refreshq_exi_node_t	*ren;
778 	refreshq_auth_node_t	*ran;
779 
780 	/*
781 	 * Now check whether this client already
782 	 * has an entry for this flavor in the cache
783 	 * for this export.
784 	 * Get the caller's address, mask off the
785 	 * parts of the address that do not identify
786 	 * the host (port number, etc), and then hash
787 	 * it to find the chain of cache entries.
788 	 */
789 
790 	claddr = svc_getrpccaller(req->rq_xprt);
791 	addr = *claddr;
792 	addr.buf = kmem_alloc(addr.len, KM_SLEEP);
793 	bcopy(claddr->buf, addr.buf, claddr->len);
794 	SVC_GETADDRMASK(req->rq_xprt, SVC_TATTR_ADDRMASK, (void **)&taddrmask);
795 	ASSERT(taddrmask != NULL);
796 	if (taddrmask)
797 		addrmask(&addr, taddrmask);
798 
799 	rw_enter(&exi->exi_cache_lock, RW_READER);
800 	head = &exi->exi_cache[hash(&addr)];
801 	for (p = *head; p; p = p->auth_next) {
802 		if (EQADDR(&addr, &p->auth_addr) && flavor == p->auth_flavor)
803 			break;
804 	}
805 
806 	if (p != NULL) {
807 		nfsauth_cache_hit++;
808 
809 		refresh = gethrestime_sec() - p->auth_freshness;
810 		DTRACE_PROBE2(nfsauth__debug__cache__hit,
811 		    int, nfsauth_cache_hit,
812 		    time_t, refresh);
813 
814 		mutex_enter(&p->auth_lock);
815 		if ((refresh > NFSAUTH_CACHE_REFRESH) &&
816 		    p->auth_state == NFS_AUTH_FRESH) {
817 			p->auth_state = NFS_AUTH_STALE;
818 			mutex_exit(&p->auth_lock);
819 
820 			ASSERT(p->auth_netid == NULL);
821 			p->auth_netid =
822 			    strdup(svc_getnetid(req->rq_xprt));
823 
824 			nfsauth_cache_refresh++;
825 
826 			DTRACE_PROBE3(nfsauth__debug__cache__stale,
827 			    struct exportinfo *, exi,
828 			    struct auth_cache *, p,
829 			    int, nfsauth_cache_refresh);
830 
831 			ran = kmem_alloc(sizeof (refreshq_auth_node_t),
832 			    KM_SLEEP);
833 			ran->ran_auth = p;
834 
835 			mutex_enter(&refreshq_lock);
836 			/*
837 			 * We should not add a work queue
838 			 * item if the thread is not
839 			 * accepting them.
840 			 */
841 			if (refreshq_thread_state == REFRESHQ_THREAD_RUNNING) {
842 				/*
843 				 * Is there an existing exi_list?
844 				 */
845 				for (ren = list_head(&refreshq_queue);
846 				    ren != NULL;
847 				    ren = list_next(&refreshq_queue, ren)) {
848 					if (ren->ren_exi == exi) {
849 						list_insert_tail(
850 						    &ren->ren_authlist, ran);
851 						break;
852 					}
853 				}
854 
855 				if (ren == NULL) {
856 					ren = kmem_alloc(
857 					    sizeof (refreshq_exi_node_t),
858 					    KM_SLEEP);
859 
860 					exi_hold(exi);
861 					ren->ren_exi = exi;
862 
863 					list_create(&ren->ren_authlist,
864 					    sizeof (refreshq_auth_node_t),
865 					    offsetof(refreshq_auth_node_t,
866 					    ran_node));
867 
868 					list_insert_tail(&ren->ren_authlist,
869 					    ran);
870 					list_insert_tail(&refreshq_queue, ren);
871 				}
872 
873 				cv_broadcast(&refreshq_cv);
874 			} else {
875 				kmem_free(ran, sizeof (refreshq_auth_node_t));
876 			}
877 
878 			mutex_exit(&refreshq_lock);
879 		} else {
880 			mutex_exit(&p->auth_lock);
881 		}
882 
883 		access = p->auth_access;
884 		p->auth_time = gethrestime_sec();
885 
886 		rw_exit(&exi->exi_cache_lock);
887 		kmem_free(addr.buf, addr.len);
888 
889 		return (access);
890 	}
891 
892 	rw_exit(&exi->exi_cache_lock);
893 
894 	nfsauth_cache_miss++;
895 
896 	if (!nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor,
897 	    &addr, &access)) {
898 		kmem_free(addr.buf, addr.len);
899 		return (access);
900 	}
901 
902 	/*
903 	 * Now cache the result on the cache chain
904 	 * for this export (if there's enough memory)
905 	 */
906 	p = kmem_cache_alloc(exi_cache_handle, KM_NOSLEEP);
907 	if (p != NULL) {
908 		p->auth_addr = addr;
909 		p->auth_flavor = flavor;
910 		p->auth_access = access;
911 		p->auth_time = p->auth_freshness = gethrestime_sec();
912 		p->auth_state = NFS_AUTH_FRESH;
913 		p->auth_netid = NULL;
914 		mutex_init(&p->auth_lock, NULL, MUTEX_DEFAULT, NULL);
915 
916 		rw_enter(&exi->exi_cache_lock, RW_WRITER);
917 		p->auth_next = *head;
918 		*head = p;
919 		rw_exit(&exi->exi_cache_lock);
920 	} else {
921 		kmem_free(addr.buf, addr.len);
922 	}
923 
924 	return (access);
925 }
926 
927 /*
928  * Check if the requesting client has access to the filesystem with
929  * a given nfs flavor number which is an explicitly shared flavor.
930  */
931 int
932 nfsauth4_secinfo_access(struct exportinfo *exi, struct svc_req *req,
933 			int flavor, int perm)
934 {
935 	int access;
936 
937 	if (! (perm & M_4SEC_EXPORTED)) {
938 		return (NFSAUTH_DENIED);
939 	}
940 
941 	/*
942 	 * Optimize if there are no lists
943 	 */
944 	if ((perm & (M_ROOT|M_NONE)) == 0) {
945 		perm &= ~M_4SEC_EXPORTED;
946 		if (perm == M_RO)
947 			return (NFSAUTH_RO);
948 		if (perm == M_RW)
949 			return (NFSAUTH_RW);
950 	}
951 
952 	access = nfsauth_cache_get(exi, req, flavor);
953 
954 	return (access);
955 }
956 
957 int
958 nfsauth_access(struct exportinfo *exi, struct svc_req *req)
959 {
960 	int access, mapaccess;
961 	struct secinfo *sp;
962 	int i, flavor, perm;
963 	int authnone_entry = -1;
964 
965 	/*
966 	 *  Get the nfs flavor number from xprt.
967 	 */
968 	flavor = (int)(uintptr_t)req->rq_xprt->xp_cookie;
969 
970 	/*
971 	 * First check the access restrictions on the filesystem.  If
972 	 * there are no lists associated with this flavor then there's no
973 	 * need to make an expensive call to the nfsauth service or to
974 	 * cache anything.
975 	 */
976 
977 	sp = exi->exi_export.ex_secinfo;
978 	for (i = 0; i < exi->exi_export.ex_seccnt; i++) {
979 		if (flavor != sp[i].s_secinfo.sc_nfsnum) {
980 			if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE)
981 				authnone_entry = i;
982 			continue;
983 		}
984 		break;
985 	}
986 
987 	mapaccess = 0;
988 
989 	if (i >= exi->exi_export.ex_seccnt) {
990 		/*
991 		 * Flavor not found, but use AUTH_NONE if it exists
992 		 */
993 		if (authnone_entry == -1)
994 			return (NFSAUTH_DENIED);
995 		flavor = AUTH_NONE;
996 		mapaccess = NFSAUTH_MAPNONE;
997 		i = authnone_entry;
998 	}
999 
1000 	/*
1001 	 * If the flavor is in the ex_secinfo list, but not an explicitly
1002 	 * shared flavor by the user, it is a result of the nfsv4 server
1003 	 * namespace setup. We will grant an RO permission similar for
1004 	 * a pseudo node except that this node is a shared one.
1005 	 *
1006 	 * e.g. flavor in (flavor) indicates that it is not explictly
1007 	 *	shared by the user:
1008 	 *
1009 	 *		/	(sys, krb5)
1010 	 *		|
1011 	 *		export  #share -o sec=sys (krb5)
1012 	 *		|
1013 	 *		secure  #share -o sec=krb5
1014 	 *
1015 	 *	In this case, when a krb5 request coming in to access
1016 	 *	/export, RO permission is granted.
1017 	 */
1018 	if (!(sp[i].s_flags & M_4SEC_EXPORTED))
1019 		return (mapaccess | NFSAUTH_RO);
1020 
1021 	/*
1022 	 * Optimize if there are no lists
1023 	 */
1024 	perm = sp[i].s_flags;
1025 	if ((perm & (M_ROOT|M_NONE)) == 0) {
1026 		perm &= ~M_4SEC_EXPORTED;
1027 		if (perm == M_RO)
1028 			return (mapaccess | NFSAUTH_RO);
1029 		if (perm == M_RW)
1030 			return (mapaccess | NFSAUTH_RW);
1031 	}
1032 
1033 	access = nfsauth_cache_get(exi, req, flavor);
1034 
1035 	/*
1036 	 * Client's security flavor doesn't match with "ro" or
1037 	 * "rw" list. Try again using AUTH_NONE if present.
1038 	 */
1039 	if ((access & NFSAUTH_WRONGSEC) && (flavor != AUTH_NONE)) {
1040 		/*
1041 		 * Have we already encountered AUTH_NONE ?
1042 		 */
1043 		if (authnone_entry != -1) {
1044 			mapaccess = NFSAUTH_MAPNONE;
1045 			access = nfsauth_cache_get(exi, req, AUTH_NONE);
1046 		} else {
1047 			/*
1048 			 * Check for AUTH_NONE presence.
1049 			 */
1050 			for (; i < exi->exi_export.ex_seccnt; i++) {
1051 				if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE) {
1052 					mapaccess = NFSAUTH_MAPNONE;
1053 					access = nfsauth_cache_get(exi, req,
1054 					    AUTH_NONE);
1055 					break;
1056 				}
1057 			}
1058 		}
1059 	}
1060 
1061 	if (access & NFSAUTH_DENIED)
1062 		access = NFSAUTH_DENIED;
1063 
1064 	return (access | mapaccess);
1065 }
1066 
1067 static void
1068 nfsauth_free_node(struct auth_cache *p)
1069 {
1070 	if (p->auth_netid != NULL)
1071 		kmem_free(p->auth_netid, strlen(p->auth_netid) + 1);
1072 	kmem_free(p->auth_addr.buf, p->auth_addr.len);
1073 	mutex_destroy(&p->auth_lock);
1074 	kmem_cache_free(exi_cache_handle, (void *)p);
1075 }
1076 
1077 /*
1078  * Remove the dead entry from the refreshq_dead_entries
1079  * list.
1080  */
1081 static void
1082 nfsauth_remove_dead_entry(struct auth_cache *dead)
1083 {
1084 	struct auth_cache	*p;
1085 	struct auth_cache	*prev;
1086 	struct auth_cache	*next;
1087 
1088 	mutex_enter(&refreshq_lock);
1089 	prev = NULL;
1090 	for (p = refreshq_dead_entries; p != NULL; p = next) {
1091 		next = p->auth_next;
1092 
1093 		if (p == dead) {
1094 			if (prev == NULL)
1095 				refreshq_dead_entries = next;
1096 			else
1097 				prev->auth_next = next;
1098 
1099 			nfsauth_free_node(dead);
1100 			break;
1101 		}
1102 
1103 		prev = p;
1104 	}
1105 	mutex_exit(&refreshq_lock);
1106 }
1107 
1108 /*
1109  * Free the nfsauth cache for a given export
1110  */
1111 void
1112 nfsauth_cache_free(struct exportinfo *exi)
1113 {
1114 	int i;
1115 	struct auth_cache *p, *next;
1116 
1117 	for (i = 0; i < AUTH_TABLESIZE; i++) {
1118 		for (p = exi->exi_cache[i]; p; p = next) {
1119 			next = p->auth_next;
1120 
1121 			/*
1122 			 * The only way we got here
1123 			 * was with an exi_rele, which
1124 			 * means that no auth cache entry
1125 			 * is being refreshed.
1126 			 */
1127 			nfsauth_free_node(p);
1128 		}
1129 	}
1130 }
1131 
1132 /*
1133  * Called by the kernel memory allocator when
1134  * memory is low. Free unused cache entries.
1135  * If that's not enough, the VM system will
1136  * call again for some more.
1137  */
1138 /*ARGSUSED*/
1139 void
1140 exi_cache_reclaim(void *cdrarg)
1141 {
1142 	int i;
1143 	struct exportinfo *exi;
1144 
1145 	rw_enter(&exported_lock, RW_READER);
1146 
1147 	for (i = 0; i < EXPTABLESIZE; i++) {
1148 		for (exi = exptable[i]; exi; exi = exi->fid_hash.next) {
1149 			exi_cache_trim(exi);
1150 		}
1151 	}
1152 	nfsauth_cache_reclaim++;
1153 
1154 	rw_exit(&exported_lock);
1155 }
1156 
1157 void
1158 exi_cache_trim(struct exportinfo *exi)
1159 {
1160 	struct auth_cache *p;
1161 	struct auth_cache *prev, *next;
1162 	int i;
1163 	time_t stale_time;
1164 
1165 	stale_time = gethrestime_sec() - NFSAUTH_CACHE_TRIM;
1166 
1167 	rw_enter(&exi->exi_cache_lock, RW_WRITER);
1168 
1169 	for (i = 0; i < AUTH_TABLESIZE; i++) {
1170 
1171 		/*
1172 		 * Free entries that have not been
1173 		 * used for NFSAUTH_CACHE_TRIM seconds.
1174 		 */
1175 		prev = NULL;
1176 		for (p = exi->exi_cache[i]; p; p = next) {
1177 			next = p->auth_next;
1178 			if (p->auth_time > stale_time) {
1179 				prev = p;
1180 				continue;
1181 			}
1182 
1183 			mutex_enter(&p->auth_lock);
1184 			DTRACE_PROBE1(nfsauth__debug__trim__state,
1185 			    auth_state_t, p->auth_state);
1186 
1187 			if (p->auth_state != NFS_AUTH_FRESH) {
1188 				p->auth_state = NFS_AUTH_INVALID;
1189 				mutex_exit(&p->auth_lock);
1190 
1191 				mutex_enter(&refreshq_lock);
1192 				p->auth_next = refreshq_dead_entries;
1193 				refreshq_dead_entries = p;
1194 				mutex_exit(&refreshq_lock);
1195 			} else {
1196 				mutex_exit(&p->auth_lock);
1197 				nfsauth_free_node(p);
1198 			}
1199 
1200 			if (prev == NULL)
1201 				exi->exi_cache[i] = next;
1202 			else
1203 				prev->auth_next = next;
1204 		}
1205 	}
1206 
1207 	rw_exit(&exi->exi_cache_lock);
1208 }
1209