1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/errno.h> 28 #include <sys/vfs.h> 29 #include <sys/vnode.h> 30 #include <sys/cred.h> 31 #include <sys/cmn_err.h> 32 #include <sys/systm.h> 33 #include <sys/kmem.h> 34 #include <sys/pathname.h> 35 #include <sys/utsname.h> 36 #include <sys/debug.h> 37 #include <sys/door.h> 38 #include <sys/sdt.h> 39 #include <sys/thread.h> 40 41 #include <rpc/types.h> 42 #include <rpc/auth.h> 43 #include <rpc/clnt.h> 44 45 #include <nfs/nfs.h> 46 #include <nfs/export.h> 47 #include <nfs/nfs_clnt.h> 48 #include <nfs/auth.h> 49 50 #define EQADDR(a1, a2) \ 51 (bcmp((char *)(a1)->buf, (char *)(a2)->buf, (a1)->len) == 0 && \ 52 (a1)->len == (a2)->len) 53 54 static struct knetconfig auth_knconf; 55 static servinfo_t svp; 56 static clinfo_t ci; 57 58 static struct kmem_cache *exi_cache_handle; 59 static void exi_cache_reclaim(void *); 60 static void exi_cache_trim(struct exportinfo *exi); 61 62 extern pri_t minclsyspri; 63 64 int nfsauth_cache_hit; 65 int nfsauth_cache_miss; 66 int nfsauth_cache_refresh; 67 int nfsauth_cache_reclaim; 68 69 /* 70 * The lifetime of an auth cache entry: 71 * ------------------------------------ 72 * 73 * An auth cache entry is created with both the auth_time 74 * and auth_freshness times set to the current time. 75 * 76 * Upon every client access which results in a hit, the 77 * auth_time will be updated. 78 * 79 * If a client access determines that the auth_freshness 80 * indicates that the entry is STALE, then it will be 81 * refreshed. Note that this will explicitly reset 82 * auth_time. 83 * 84 * When the REFRESH successfully occurs, then the 85 * auth_freshness is updated. 86 * 87 * There are two ways for an entry to leave the cache: 88 * 89 * 1) Purged by an action on the export (remove or changed) 90 * 2) Memory backpressure from the kernel (check against NFSAUTH_CACHE_TRIM) 91 * 92 * For 2) we check the timeout value against auth_time. 93 */ 94 95 /* 96 * Number of seconds until we mark for refresh an auth cache entry. 97 */ 98 #define NFSAUTH_CACHE_REFRESH 600 99 100 /* 101 * Number of idle seconds until we yield to backpressure 102 * to trim a cache entry. 103 */ 104 #define NFSAUTH_CACHE_TRIM 3600 105 106 /* 107 * While we could encapuslate the exi_list inside the 108 * exi structure, we can't do that for the auth_list. 109 * So, to keep things looking clean, we keep them both 110 * in these external lists. 111 */ 112 typedef struct refreshq_exi_node { 113 struct exportinfo *ren_exi; 114 list_t ren_authlist; 115 list_node_t ren_node; 116 } refreshq_exi_node_t; 117 118 typedef struct refreshq_auth_node { 119 struct auth_cache *ran_auth; 120 list_node_t ran_node; 121 } refreshq_auth_node_t; 122 123 /* 124 * Used to manipulate things on the refreshq_queue. 125 * Note that the refresh thread will effectively 126 * pop a node off of the queue, at which point it 127 * will no longer need to hold the mutex. 128 */ 129 static kmutex_t refreshq_lock; 130 static list_t refreshq_queue; 131 static kcondvar_t refreshq_cv; 132 133 /* 134 * A list_t would be overkill. These are auth_cache 135 * entries which are no longer linked to an exi. 136 * It should be the case that all of their states 137 * are NFS_AUTH_INVALID. 138 * 139 * I.e., the only way to be put on this list is 140 * iff their state indicated that they had been placed 141 * on the refreshq_queue. 142 * 143 * Note that while there is no link from the exi or 144 * back to the exi, the exi can not go away until 145 * these entries are harvested. 146 */ 147 static struct auth_cache *refreshq_dead_entries; 148 149 /* 150 * If there is ever a problem with loading the 151 * module, then nfsauth_fini() needs to be called 152 * to remove state. In that event, since the 153 * refreshq thread has been started, they need to 154 * work together to get rid of state. 155 */ 156 typedef enum nfsauth_refreshq_thread_state { 157 REFRESHQ_THREAD_RUNNING, 158 REFRESHQ_THREAD_FINI_REQ, 159 REFRESHQ_THREAD_HALTED 160 } nfsauth_refreshq_thread_state_t; 161 162 nfsauth_refreshq_thread_state_t 163 refreshq_thread_state = REFRESHQ_THREAD_HALTED; 164 165 static void nfsauth_free_node(struct auth_cache *); 166 static void nfsauth_remove_dead_entry(struct auth_cache *); 167 static void nfsauth_refresh_thread(void); 168 169 /* 170 * mountd is a server-side only daemon. This will need to be 171 * revisited if the NFS server is ever made zones-aware. 172 */ 173 kmutex_t mountd_lock; 174 door_handle_t mountd_dh; 175 176 void 177 mountd_args(uint_t did) 178 { 179 mutex_enter(&mountd_lock); 180 if (mountd_dh) 181 door_ki_rele(mountd_dh); 182 mountd_dh = door_ki_lookup(did); 183 mutex_exit(&mountd_lock); 184 } 185 186 void 187 nfsauth_init(void) 188 { 189 /* 190 * mountd can be restarted by smf(5). We need to make sure 191 * the updated door handle will safely make it to mountd_dh 192 */ 193 mutex_init(&mountd_lock, NULL, MUTEX_DEFAULT, NULL); 194 195 mutex_init(&refreshq_lock, NULL, MUTEX_DEFAULT, NULL); 196 list_create(&refreshq_queue, sizeof (refreshq_exi_node_t), 197 offsetof(refreshq_exi_node_t, ren_node)); 198 refreshq_dead_entries = NULL; 199 200 cv_init(&refreshq_cv, NULL, CV_DEFAULT, NULL); 201 202 /* 203 * Allocate nfsauth cache handle 204 */ 205 exi_cache_handle = kmem_cache_create("exi_cache_handle", 206 sizeof (struct auth_cache), 0, NULL, NULL, 207 exi_cache_reclaim, NULL, NULL, 0); 208 209 refreshq_thread_state = REFRESHQ_THREAD_RUNNING; 210 (void) zthread_create(NULL, 0, nfsauth_refresh_thread, 211 NULL, 0, minclsyspri); 212 } 213 214 /* 215 * Finalization routine for nfsauth. It is important to call this routine 216 * before destroying the exported_lock. 217 */ 218 void 219 nfsauth_fini(void) 220 { 221 refreshq_exi_node_t *ren; 222 refreshq_auth_node_t *ran; 223 struct auth_cache *p; 224 struct auth_cache *auth_next; 225 226 /* 227 * Prevent the refreshq_thread from getting new 228 * work. 229 */ 230 mutex_enter(&refreshq_lock); 231 if (refreshq_thread_state != REFRESHQ_THREAD_HALTED) { 232 refreshq_thread_state = REFRESHQ_THREAD_FINI_REQ; 233 cv_broadcast(&refreshq_cv); 234 235 /* 236 * Also, wait for nfsauth_refresh_thread() to exit. 237 */ 238 while (refreshq_thread_state != REFRESHQ_THREAD_HALTED) { 239 cv_wait(&refreshq_cv, &refreshq_lock); 240 } 241 } 242 243 /* 244 * Walk the exi_list and in turn, walk the 245 * auth_lists. 246 */ 247 while ((ren = list_remove_head(&refreshq_queue))) { 248 while ((ran = list_remove_head(&ren->ren_authlist))) { 249 kmem_free(ran, sizeof (refreshq_auth_node_t)); 250 } 251 252 list_destroy(&ren->ren_authlist); 253 exi_rele(ren->ren_exi); 254 kmem_free(ren, sizeof (refreshq_exi_node_t)); 255 } 256 257 /* 258 * Okay, now that the lists are deleted, we 259 * need to see if there are any dead entries 260 * to harvest. 261 */ 262 for (p = refreshq_dead_entries; p != NULL; p = auth_next) { 263 auth_next = p->auth_next; 264 nfsauth_free_node(p); 265 } 266 267 mutex_exit(&refreshq_lock); 268 269 list_destroy(&refreshq_queue); 270 271 cv_destroy(&refreshq_cv); 272 mutex_destroy(&refreshq_lock); 273 274 mutex_destroy(&mountd_lock); 275 276 /* 277 * Deallocate nfsauth cache handle 278 */ 279 kmem_cache_destroy(exi_cache_handle); 280 } 281 282 /* 283 * Convert the address in a netbuf to 284 * a hash index for the auth_cache table. 285 */ 286 static int 287 hash(struct netbuf *a) 288 { 289 int i, h = 0; 290 291 for (i = 0; i < a->len; i++) 292 h ^= a->buf[i]; 293 294 return (h & (AUTH_TABLESIZE - 1)); 295 } 296 297 /* 298 * Mask out the components of an 299 * address that do not identify 300 * a host. For socket addresses the 301 * masking gets rid of the port number. 302 */ 303 static void 304 addrmask(struct netbuf *addr, struct netbuf *mask) 305 { 306 int i; 307 308 for (i = 0; i < addr->len; i++) 309 addr->buf[i] &= mask->buf[i]; 310 } 311 312 /* 313 * nfsauth4_access is used for NFS V4 auth checking. Besides doing 314 * the common nfsauth_access(), it will check if the client can 315 * have a limited access to this vnode even if the security flavor 316 * used does not meet the policy. 317 */ 318 int 319 nfsauth4_access(struct exportinfo *exi, vnode_t *vp, struct svc_req *req) 320 { 321 int access; 322 323 access = nfsauth_access(exi, req); 324 325 /* 326 * There are cases that the server needs to allow the client 327 * to have a limited view. 328 * 329 * e.g. 330 * /export is shared as "sec=sys,rw=dfs-test-4,sec=krb5,rw" 331 * /export/home is shared as "sec=sys,rw" 332 * 333 * When the client mounts /export with sec=sys, the client 334 * would get a limited view with RO access on /export to see 335 * "home" only because the client is allowed to access 336 * /export/home with auth_sys. 337 */ 338 if (access & NFSAUTH_DENIED || access & NFSAUTH_WRONGSEC) { 339 /* 340 * Allow ro permission with LIMITED view if there is a 341 * sub-dir exported under vp. 342 */ 343 if (has_visible(exi, vp)) 344 return (NFSAUTH_LIMITED); 345 } 346 347 return (access); 348 } 349 350 static void 351 sys_log(const char *msg) 352 { 353 static time_t tstamp = 0; 354 time_t now; 355 356 /* 357 * msg is shown (at most) once per minute 358 */ 359 now = gethrestime_sec(); 360 if ((tstamp + 60) < now) { 361 tstamp = now; 362 cmn_err(CE_WARN, msg); 363 } 364 } 365 366 /* 367 * Callup to the mountd to get access information in the kernel. 368 */ 369 static bool_t 370 nfsauth_retrieve(struct exportinfo *exi, char *req_netid, int flavor, 371 struct netbuf *addr, int *access) 372 { 373 varg_t varg = {0}; 374 nfsauth_res_t res = {0}; 375 XDR xdrs_a; 376 XDR xdrs_r; 377 size_t absz; 378 caddr_t abuf; 379 size_t rbsz = (size_t)(BYTES_PER_XDR_UNIT * 2); 380 char result[BYTES_PER_XDR_UNIT * 2] = {0}; 381 caddr_t rbuf = (caddr_t)&result; 382 int last = 0; 383 door_arg_t da; 384 door_info_t di; 385 door_handle_t dh; 386 uint_t ntries = 0; 387 388 /* 389 * No entry in the cache for this client/flavor 390 * so we need to call the nfsauth service in the 391 * mount daemon. 392 */ 393 retry: 394 mutex_enter(&mountd_lock); 395 dh = mountd_dh; 396 if (dh) 397 door_ki_hold(dh); 398 mutex_exit(&mountd_lock); 399 400 if (dh == NULL) { 401 /* 402 * The rendezvous point has not been established yet ! 403 * This could mean that either mountd(1m) has not yet 404 * been started or that _this_ routine nuked the door 405 * handle after receiving an EINTR for a REVOKED door. 406 * 407 * Returning NFSAUTH_DROP will cause the NFS client 408 * to retransmit the request, so let's try to be more 409 * rescillient and attempt for ntries before we bail. 410 */ 411 if (++ntries % NFSAUTH_DR_TRYCNT) { 412 delay(hz); 413 goto retry; 414 } 415 416 sys_log("nfsauth: mountd has not established door"); 417 *access = NFSAUTH_DROP; 418 return (FALSE); 419 } 420 421 ntries = 0; 422 varg.vers = V_PROTO; 423 varg.arg_u.arg.cmd = NFSAUTH_ACCESS; 424 varg.arg_u.arg.areq.req_client.n_len = addr->len; 425 varg.arg_u.arg.areq.req_client.n_bytes = addr->buf; 426 varg.arg_u.arg.areq.req_netid = req_netid; 427 varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path; 428 varg.arg_u.arg.areq.req_flavor = flavor; 429 430 /* 431 * Setup the XDR stream for encoding the arguments. Notice that 432 * in addition to the args having variable fields (req_netid and 433 * req_path), the argument data structure is itself versioned, 434 * so we need to make sure we can size the arguments buffer 435 * appropriately to encode all the args. If we can't get sizing 436 * info _or_ properly encode the arguments, there's really no 437 * point in continuting, so we fail the request. 438 */ 439 DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg); 440 if ((absz = xdr_sizeof(xdr_varg, (void *)&varg)) == 0) { 441 door_ki_rele(dh); 442 *access = NFSAUTH_DENIED; 443 return (FALSE); 444 } 445 446 abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP); 447 xdrmem_create(&xdrs_a, abuf, absz, XDR_ENCODE); 448 if (!xdr_varg(&xdrs_a, &varg)) { 449 door_ki_rele(dh); 450 goto fail; 451 } 452 XDR_DESTROY(&xdrs_a); 453 454 /* 455 * The result (nfsauth_res_t) is always two int's, so we don't 456 * have to dynamically size (or allocate) the results buffer. 457 * Now that we've got what we need, we prep the door arguments 458 * and place the call. 459 */ 460 da.data_ptr = (char *)abuf; 461 da.data_size = absz; 462 da.desc_ptr = NULL; 463 da.desc_num = 0; 464 da.rbuf = (char *)rbuf; 465 da.rsize = rbsz; 466 467 switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) { 468 case 0: /* Success */ 469 if (da.data_ptr != da.rbuf && da.data_size == 0) { 470 /* 471 * The door_return that contained the data 472 * failed ! We're here because of the 2nd 473 * door_return (w/o data) such that we can 474 * get control of the thread (and exit 475 * gracefully). 476 */ 477 DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil, 478 door_arg_t *, &da); 479 door_ki_rele(dh); 480 goto fail; 481 482 } else if (rbuf != da.rbuf) { 483 /* 484 * The only time this should be true 485 * is iff userland wanted to hand us 486 * a bigger response than what we 487 * expect; that should not happen 488 * (nfsauth_res_t is only 2 int's), 489 * but we check nevertheless. 490 */ 491 rbuf = da.rbuf; 492 rbsz = da.rsize; 493 494 } else if (rbsz > da.data_size) { 495 /* 496 * We were expecting two int's; but if 497 * userland fails in encoding the XDR 498 * stream, we detect that here, since 499 * the mountd forces down only one byte 500 * in such scenario. 501 */ 502 door_ki_rele(dh); 503 goto fail; 504 } 505 door_ki_rele(dh); 506 break; 507 508 case EAGAIN: 509 /* 510 * Server out of resources; back off for a bit 511 */ 512 door_ki_rele(dh); 513 kmem_free(abuf, absz); 514 delay(hz); 515 goto retry; 516 /* NOTREACHED */ 517 518 case EINTR: 519 if (!door_ki_info(dh, &di)) { 520 if (di.di_attributes & DOOR_REVOKED) { 521 /* 522 * The server barfed and revoked 523 * the (existing) door on us; we 524 * want to wait to give smf(5) a 525 * chance to restart mountd(1m) 526 * and establish a new door handle. 527 */ 528 mutex_enter(&mountd_lock); 529 if (dh == mountd_dh) 530 mountd_dh = NULL; 531 mutex_exit(&mountd_lock); 532 door_ki_rele(dh); 533 kmem_free(abuf, absz); 534 delay(hz); 535 goto retry; 536 } 537 /* 538 * If the door was _not_ revoked on us, 539 * then more than likely we took an INTR, 540 * so we need to fail the operation. 541 */ 542 door_ki_rele(dh); 543 goto fail; 544 } 545 /* 546 * The only failure that can occur from getting 547 * the door info is EINVAL, so we let the code 548 * below handle it. 549 */ 550 /* FALLTHROUGH */ 551 552 case EBADF: 553 case EINVAL: 554 default: 555 /* 556 * If we have a stale door handle, give smf a last 557 * chance to start it by sleeping for a little bit. 558 * If we're still hosed, we'll fail the call. 559 * 560 * Since we're going to reacquire the door handle 561 * upon the retry, we opt to sleep for a bit and 562 * _not_ to clear mountd_dh. If mountd restarted 563 * and was able to set mountd_dh, we should see 564 * the new instance; if not, we won't get caught 565 * up in the retry/DELAY loop. 566 */ 567 door_ki_rele(dh); 568 if (!last) { 569 delay(hz); 570 last++; 571 goto retry; 572 } 573 sys_log("nfsauth: stale mountd door handle"); 574 goto fail; 575 } 576 577 /* 578 * No door errors encountered; setup the XDR stream for decoding 579 * the results. If we fail to decode the results, we've got no 580 * other recourse than to fail the request. 581 */ 582 xdrmem_create(&xdrs_r, rbuf, rbsz, XDR_DECODE); 583 if (!xdr_nfsauth_res(&xdrs_r, &res)) 584 goto fail; 585 XDR_DESTROY(&xdrs_r); 586 587 DTRACE_PROBE1(nfsserv__func__nfsauth__results, nfsauth_res_t *, &res); 588 switch (res.stat) { 589 case NFSAUTH_DR_OKAY: 590 *access = res.ares.auth_perm; 591 kmem_free(abuf, absz); 592 break; 593 594 case NFSAUTH_DR_EFAIL: 595 case NFSAUTH_DR_DECERR: 596 case NFSAUTH_DR_BADCMD: 597 default: 598 fail: 599 *access = NFSAUTH_DENIED; 600 kmem_free(abuf, absz); 601 return (FALSE); 602 /* NOTREACHED */ 603 } 604 605 return (TRUE); 606 } 607 608 static void 609 nfsauth_refresh_thread(void) 610 { 611 refreshq_exi_node_t *ren; 612 refreshq_auth_node_t *ran; 613 614 struct exportinfo *exi; 615 616 int access; 617 bool_t retrieval; 618 619 callb_cpr_t cprinfo; 620 621 CALLB_CPR_INIT(&cprinfo, &refreshq_lock, callb_generic_cpr, 622 "nfsauth_refresh"); 623 624 for (;;) { 625 mutex_enter(&refreshq_lock); 626 if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) { 627 /* Keep the hold on the lock! */ 628 break; 629 } 630 631 ren = list_remove_head(&refreshq_queue); 632 if (ren == NULL) { 633 CALLB_CPR_SAFE_BEGIN(&cprinfo); 634 cv_wait(&refreshq_cv, &refreshq_lock); 635 CALLB_CPR_SAFE_END(&cprinfo, &refreshq_lock); 636 mutex_exit(&refreshq_lock); 637 continue; 638 } 639 mutex_exit(&refreshq_lock); 640 641 exi = ren->ren_exi; 642 ASSERT(exi != NULL); 643 644 /* 645 * Since the ren was removed from the refreshq_queue above, 646 * this is the only thread aware about the ren existence, so we 647 * have the exclusive ownership of it and we do not need to 648 * protect it by any lock. 649 */ 650 while ((ran = list_remove_head(&ren->ren_authlist))) { 651 652 struct auth_cache *p = ran->ran_auth; 653 654 ASSERT(p != NULL); 655 kmem_free(ran, sizeof (refreshq_auth_node_t)); 656 657 /* 658 * We are shutting down. No need to refresh 659 * entries which are about to be nuked. 660 * 661 * So just throw them away until we are done 662 * with this exi node... 663 */ 664 if (refreshq_thread_state != REFRESHQ_THREAD_RUNNING) 665 continue; 666 667 mutex_enter(&p->auth_lock); 668 669 /* 670 * Make sure the state is valid now that 671 * we have the lock. Note that once we 672 * change the state to NFS_AUTH_REFRESHING, 673 * no other thread will be able to work on 674 * this entry. 675 */ 676 if (p->auth_state != NFS_AUTH_STALE) { 677 /* 678 * Once it goes INVALID, it can not 679 * change state. 680 */ 681 if (p->auth_state == NFS_AUTH_INVALID) { 682 mutex_exit(&p->auth_lock); 683 nfsauth_remove_dead_entry(p); 684 } else 685 mutex_exit(&p->auth_lock); 686 687 continue; 688 } 689 690 p->auth_state = NFS_AUTH_REFRESHING; 691 mutex_exit(&p->auth_lock); 692 693 DTRACE_PROBE2(nfsauth__debug__cache__refresh, 694 struct exportinfo *, exi, 695 struct auth_cache *, p); 696 697 /* 698 * The first caching of the access rights 699 * is done with the netid pulled out of the 700 * request from the client. All subsequent 701 * users of the cache may or may not have 702 * the same netid. It doesn't matter. So 703 * when we refresh, we simply use the netid 704 * of the request which triggered the 705 * refresh attempt. 706 */ 707 ASSERT(p->auth_netid != NULL); 708 709 retrieval = nfsauth_retrieve(exi, p->auth_netid, 710 p->auth_flavor, &p->auth_addr, &access); 711 712 /* 713 * This can only be set in one other place 714 * and the state has to be NFS_AUTH_FRESH. 715 */ 716 kmem_free(p->auth_netid, strlen(p->auth_netid) + 1); 717 p->auth_netid = NULL; 718 719 mutex_enter(&p->auth_lock); 720 if (p->auth_state == NFS_AUTH_INVALID) { 721 mutex_exit(&p->auth_lock); 722 nfsauth_remove_dead_entry(p); 723 } else { 724 /* 725 * If we got an error, do not reset the 726 * time. This will cause the next access 727 * check for the client to reschedule this 728 * node. 729 */ 730 if (retrieval == TRUE) { 731 p->auth_access = access; 732 p->auth_freshness = gethrestime_sec(); 733 } 734 p->auth_state = NFS_AUTH_FRESH; 735 mutex_exit(&p->auth_lock); 736 } 737 } 738 739 list_destroy(&ren->ren_authlist); 740 exi_rele(ren->ren_exi); 741 kmem_free(ren, sizeof (refreshq_exi_node_t)); 742 } 743 744 refreshq_thread_state = REFRESHQ_THREAD_HALTED; 745 cv_broadcast(&refreshq_cv); 746 CALLB_CPR_EXIT(&cprinfo); 747 zthread_exit(); 748 } 749 750 /* 751 * Get the access information from the cache or callup to the mountd 752 * to get and cache the access information in the kernel. 753 */ 754 int 755 nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor) 756 { 757 struct netbuf *taddrmask; 758 struct netbuf addr; 759 struct netbuf *claddr; 760 struct auth_cache **head; 761 struct auth_cache *p; 762 int access; 763 time_t refresh; 764 765 refreshq_exi_node_t *ren; 766 refreshq_auth_node_t *ran; 767 768 /* 769 * Now check whether this client already 770 * has an entry for this flavor in the cache 771 * for this export. 772 * Get the caller's address, mask off the 773 * parts of the address that do not identify 774 * the host (port number, etc), and then hash 775 * it to find the chain of cache entries. 776 */ 777 778 claddr = svc_getrpccaller(req->rq_xprt); 779 addr = *claddr; 780 addr.buf = kmem_alloc(addr.len, KM_SLEEP); 781 bcopy(claddr->buf, addr.buf, claddr->len); 782 SVC_GETADDRMASK(req->rq_xprt, SVC_TATTR_ADDRMASK, (void **)&taddrmask); 783 ASSERT(taddrmask != NULL); 784 if (taddrmask) 785 addrmask(&addr, taddrmask); 786 787 rw_enter(&exi->exi_cache_lock, RW_READER); 788 head = &exi->exi_cache[hash(&addr)]; 789 for (p = *head; p; p = p->auth_next) { 790 if (EQADDR(&addr, &p->auth_addr) && flavor == p->auth_flavor) 791 break; 792 } 793 794 if (p != NULL) { 795 nfsauth_cache_hit++; 796 797 refresh = gethrestime_sec() - p->auth_freshness; 798 DTRACE_PROBE2(nfsauth__debug__cache__hit, 799 int, nfsauth_cache_hit, 800 time_t, refresh); 801 802 mutex_enter(&p->auth_lock); 803 if ((refresh > NFSAUTH_CACHE_REFRESH) && 804 p->auth_state == NFS_AUTH_FRESH) { 805 p->auth_state = NFS_AUTH_STALE; 806 mutex_exit(&p->auth_lock); 807 808 ASSERT(p->auth_netid == NULL); 809 p->auth_netid = 810 strdup(svc_getnetid(req->rq_xprt)); 811 812 nfsauth_cache_refresh++; 813 814 DTRACE_PROBE3(nfsauth__debug__cache__stale, 815 struct exportinfo *, exi, 816 struct auth_cache *, p, 817 int, nfsauth_cache_refresh); 818 819 ran = kmem_alloc(sizeof (refreshq_auth_node_t), 820 KM_SLEEP); 821 ran->ran_auth = p; 822 823 mutex_enter(&refreshq_lock); 824 /* 825 * We should not add a work queue 826 * item if the thread is not 827 * accepting them. 828 */ 829 if (refreshq_thread_state == REFRESHQ_THREAD_RUNNING) { 830 /* 831 * Is there an existing exi_list? 832 */ 833 for (ren = list_head(&refreshq_queue); 834 ren != NULL; 835 ren = list_next(&refreshq_queue, ren)) { 836 if (ren->ren_exi == exi) { 837 list_insert_tail( 838 &ren->ren_authlist, ran); 839 break; 840 } 841 } 842 843 if (ren == NULL) { 844 ren = kmem_alloc( 845 sizeof (refreshq_exi_node_t), 846 KM_SLEEP); 847 848 exi_hold(exi); 849 ren->ren_exi = exi; 850 851 list_create(&ren->ren_authlist, 852 sizeof (refreshq_auth_node_t), 853 offsetof(refreshq_auth_node_t, 854 ran_node)); 855 856 list_insert_tail(&ren->ren_authlist, 857 ran); 858 list_insert_tail(&refreshq_queue, ren); 859 } 860 861 cv_broadcast(&refreshq_cv); 862 } else { 863 kmem_free(ran, sizeof (refreshq_auth_node_t)); 864 } 865 866 mutex_exit(&refreshq_lock); 867 } else { 868 mutex_exit(&p->auth_lock); 869 } 870 871 access = p->auth_access; 872 p->auth_time = gethrestime_sec(); 873 874 rw_exit(&exi->exi_cache_lock); 875 kmem_free(addr.buf, addr.len); 876 877 return (access); 878 } 879 880 rw_exit(&exi->exi_cache_lock); 881 882 nfsauth_cache_miss++; 883 884 if (!nfsauth_retrieve(exi, svc_getnetid(req->rq_xprt), flavor, 885 &addr, &access)) { 886 kmem_free(addr.buf, addr.len); 887 return (access); 888 } 889 890 /* 891 * Now cache the result on the cache chain 892 * for this export (if there's enough memory) 893 */ 894 p = kmem_cache_alloc(exi_cache_handle, KM_NOSLEEP); 895 if (p != NULL) { 896 p->auth_addr = addr; 897 p->auth_flavor = flavor; 898 p->auth_access = access; 899 p->auth_time = p->auth_freshness = gethrestime_sec(); 900 p->auth_state = NFS_AUTH_FRESH; 901 p->auth_netid = NULL; 902 mutex_init(&p->auth_lock, NULL, MUTEX_DEFAULT, NULL); 903 904 rw_enter(&exi->exi_cache_lock, RW_WRITER); 905 p->auth_next = *head; 906 *head = p; 907 rw_exit(&exi->exi_cache_lock); 908 } else { 909 kmem_free(addr.buf, addr.len); 910 } 911 912 return (access); 913 } 914 915 /* 916 * Check if the requesting client has access to the filesystem with 917 * a given nfs flavor number which is an explicitly shared flavor. 918 */ 919 int 920 nfsauth4_secinfo_access(struct exportinfo *exi, struct svc_req *req, 921 int flavor, int perm) 922 { 923 int access; 924 925 if (! (perm & M_4SEC_EXPORTED)) { 926 return (NFSAUTH_DENIED); 927 } 928 929 /* 930 * Optimize if there are no lists 931 */ 932 if ((perm & (M_ROOT|M_NONE)) == 0) { 933 perm &= ~M_4SEC_EXPORTED; 934 if (perm == M_RO) 935 return (NFSAUTH_RO); 936 if (perm == M_RW) 937 return (NFSAUTH_RW); 938 } 939 940 access = nfsauth_cache_get(exi, req, flavor); 941 942 return (access); 943 } 944 945 int 946 nfsauth_access(struct exportinfo *exi, struct svc_req *req) 947 { 948 int access, mapaccess; 949 struct secinfo *sp; 950 int i, flavor, perm; 951 int authnone_entry = -1; 952 953 /* 954 * Get the nfs flavor number from xprt. 955 */ 956 flavor = (int)(uintptr_t)req->rq_xprt->xp_cookie; 957 958 /* 959 * First check the access restrictions on the filesystem. If 960 * there are no lists associated with this flavor then there's no 961 * need to make an expensive call to the nfsauth service or to 962 * cache anything. 963 */ 964 965 sp = exi->exi_export.ex_secinfo; 966 for (i = 0; i < exi->exi_export.ex_seccnt; i++) { 967 if (flavor != sp[i].s_secinfo.sc_nfsnum) { 968 if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE) 969 authnone_entry = i; 970 continue; 971 } 972 break; 973 } 974 975 mapaccess = 0; 976 977 if (i >= exi->exi_export.ex_seccnt) { 978 /* 979 * Flavor not found, but use AUTH_NONE if it exists 980 */ 981 if (authnone_entry == -1) 982 return (NFSAUTH_DENIED); 983 flavor = AUTH_NONE; 984 mapaccess = NFSAUTH_MAPNONE; 985 i = authnone_entry; 986 } 987 988 /* 989 * If the flavor is in the ex_secinfo list, but not an explicitly 990 * shared flavor by the user, it is a result of the nfsv4 server 991 * namespace setup. We will grant an RO permission similar for 992 * a pseudo node except that this node is a shared one. 993 * 994 * e.g. flavor in (flavor) indicates that it is not explictly 995 * shared by the user: 996 * 997 * / (sys, krb5) 998 * | 999 * export #share -o sec=sys (krb5) 1000 * | 1001 * secure #share -o sec=krb5 1002 * 1003 * In this case, when a krb5 request coming in to access 1004 * /export, RO permission is granted. 1005 */ 1006 if (!(sp[i].s_flags & M_4SEC_EXPORTED)) 1007 return (mapaccess | NFSAUTH_RO); 1008 1009 /* 1010 * Optimize if there are no lists 1011 */ 1012 perm = sp[i].s_flags; 1013 if ((perm & (M_ROOT|M_NONE)) == 0) { 1014 perm &= ~M_4SEC_EXPORTED; 1015 if (perm == M_RO) 1016 return (mapaccess | NFSAUTH_RO); 1017 if (perm == M_RW) 1018 return (mapaccess | NFSAUTH_RW); 1019 } 1020 1021 access = nfsauth_cache_get(exi, req, flavor); 1022 1023 /* 1024 * Client's security flavor doesn't match with "ro" or 1025 * "rw" list. Try again using AUTH_NONE if present. 1026 */ 1027 if ((access & NFSAUTH_WRONGSEC) && (flavor != AUTH_NONE)) { 1028 /* 1029 * Have we already encountered AUTH_NONE ? 1030 */ 1031 if (authnone_entry != -1) { 1032 mapaccess = NFSAUTH_MAPNONE; 1033 access = nfsauth_cache_get(exi, req, AUTH_NONE); 1034 } else { 1035 /* 1036 * Check for AUTH_NONE presence. 1037 */ 1038 for (; i < exi->exi_export.ex_seccnt; i++) { 1039 if (sp[i].s_secinfo.sc_nfsnum == AUTH_NONE) { 1040 mapaccess = NFSAUTH_MAPNONE; 1041 access = nfsauth_cache_get(exi, req, 1042 AUTH_NONE); 1043 break; 1044 } 1045 } 1046 } 1047 } 1048 1049 if (access & NFSAUTH_DENIED) 1050 access = NFSAUTH_DENIED; 1051 1052 return (access | mapaccess); 1053 } 1054 1055 static void 1056 nfsauth_free_node(struct auth_cache *p) 1057 { 1058 if (p->auth_netid != NULL) 1059 kmem_free(p->auth_netid, strlen(p->auth_netid) + 1); 1060 kmem_free(p->auth_addr.buf, p->auth_addr.len); 1061 mutex_destroy(&p->auth_lock); 1062 kmem_cache_free(exi_cache_handle, (void *)p); 1063 } 1064 1065 /* 1066 * Remove the dead entry from the refreshq_dead_entries 1067 * list. 1068 */ 1069 static void 1070 nfsauth_remove_dead_entry(struct auth_cache *dead) 1071 { 1072 struct auth_cache *p; 1073 struct auth_cache *prev; 1074 struct auth_cache *next; 1075 1076 mutex_enter(&refreshq_lock); 1077 prev = NULL; 1078 for (p = refreshq_dead_entries; p != NULL; p = next) { 1079 next = p->auth_next; 1080 1081 if (p == dead) { 1082 if (prev == NULL) 1083 refreshq_dead_entries = next; 1084 else 1085 prev->auth_next = next; 1086 1087 nfsauth_free_node(dead); 1088 break; 1089 } 1090 1091 prev = p; 1092 } 1093 mutex_exit(&refreshq_lock); 1094 } 1095 1096 /* 1097 * Free the nfsauth cache for a given export 1098 */ 1099 void 1100 nfsauth_cache_free(struct exportinfo *exi) 1101 { 1102 int i; 1103 struct auth_cache *p, *next; 1104 1105 for (i = 0; i < AUTH_TABLESIZE; i++) { 1106 for (p = exi->exi_cache[i]; p; p = next) { 1107 next = p->auth_next; 1108 1109 /* 1110 * The only way we got here 1111 * was with an exi_rele, which 1112 * means that no auth cache entry 1113 * is being refreshed. 1114 */ 1115 nfsauth_free_node(p); 1116 } 1117 } 1118 } 1119 1120 /* 1121 * Called by the kernel memory allocator when 1122 * memory is low. Free unused cache entries. 1123 * If that's not enough, the VM system will 1124 * call again for some more. 1125 */ 1126 /*ARGSUSED*/ 1127 void 1128 exi_cache_reclaim(void *cdrarg) 1129 { 1130 int i; 1131 struct exportinfo *exi; 1132 1133 rw_enter(&exported_lock, RW_READER); 1134 1135 for (i = 0; i < EXPTABLESIZE; i++) { 1136 for (exi = exptable[i]; exi; exi = exi->fid_hash.next) { 1137 exi_cache_trim(exi); 1138 } 1139 } 1140 nfsauth_cache_reclaim++; 1141 1142 rw_exit(&exported_lock); 1143 } 1144 1145 void 1146 exi_cache_trim(struct exportinfo *exi) 1147 { 1148 struct auth_cache *p; 1149 struct auth_cache *prev, *next; 1150 int i; 1151 time_t stale_time; 1152 1153 stale_time = gethrestime_sec() - NFSAUTH_CACHE_TRIM; 1154 1155 rw_enter(&exi->exi_cache_lock, RW_WRITER); 1156 1157 for (i = 0; i < AUTH_TABLESIZE; i++) { 1158 1159 /* 1160 * Free entries that have not been 1161 * used for NFSAUTH_CACHE_TRIM seconds. 1162 */ 1163 prev = NULL; 1164 for (p = exi->exi_cache[i]; p; p = next) { 1165 next = p->auth_next; 1166 if (p->auth_time > stale_time) { 1167 prev = p; 1168 continue; 1169 } 1170 1171 mutex_enter(&p->auth_lock); 1172 DTRACE_PROBE1(nfsauth__debug__trim__state, 1173 auth_state_t, p->auth_state); 1174 1175 if (p->auth_state != NFS_AUTH_FRESH) { 1176 p->auth_state = NFS_AUTH_INVALID; 1177 mutex_exit(&p->auth_lock); 1178 1179 mutex_enter(&refreshq_lock); 1180 p->auth_next = refreshq_dead_entries; 1181 refreshq_dead_entries = p; 1182 mutex_exit(&refreshq_lock); 1183 } else { 1184 mutex_exit(&p->auth_lock); 1185 nfsauth_free_node(p); 1186 } 1187 1188 if (prev == NULL) 1189 exi->exi_cache[i] = next; 1190 else 1191 prev->auth_next = next; 1192 } 1193 } 1194 1195 rw_exit(&exi->exi_cache_lock); 1196 } 1197