1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/systm.h> 29 #include <rpc/auth.h> 30 #include <rpc/clnt.h> 31 #include <nfs/nfs4_kprot.h> 32 #include <nfs/nfs4.h> 33 #include <nfs/lm.h> 34 #include <sys/cmn_err.h> 35 #include <sys/disp.h> 36 37 #include <sys/pathname.h> 38 39 #include <sys/strsubr.h> 40 #include <sys/ddi.h> 41 42 #include <sys/vnode.h> 43 #include <sys/sdt.h> 44 #include <inet/common.h> 45 #include <inet/ip.h> 46 #include <inet/ip6.h> 47 48 #define MAX_READ_DELEGATIONS 5 49 50 krwlock_t rfs4_deleg_policy_lock; 51 srv_deleg_policy_t rfs4_deleg_policy = SRV_NEVER_DELEGATE; 52 static int rfs4_deleg_wlp = 5; 53 kmutex_t rfs4_deleg_lock; 54 static int rfs4_deleg_disabled; 55 56 #ifdef DEBUG 57 58 static int rfs4_test_cbgetattr_fail = 0; 59 int rfs4_cb_null; 60 int rfs4_cb_debug; 61 int rfs4_deleg_debug; 62 63 #endif 64 65 static void rfs4_recall_file(rfs4_file_t *, 66 void (*recall)(rfs4_deleg_state_t *, bool_t), 67 bool_t, rfs4_client_t *); 68 static void rfs4_revoke_deleg(rfs4_deleg_state_t *); 69 static void rfs4_revoke_file(rfs4_file_t *); 70 static void rfs4_cb_chflush(rfs4_cbinfo_t *); 71 static CLIENT *rfs4_cb_getch(rfs4_cbinfo_t *); 72 static void rfs4_cb_freech(rfs4_cbinfo_t *, CLIENT *, bool_t); 73 static rfs4_deleg_state_t *rfs4_deleg_state(rfs4_state_t *, 74 open_delegation_type4, int *); 75 76 /* 77 * Convert a universal address to an transport specific 78 * address using inet_pton. 79 */ 80 static int 81 uaddr2sockaddr(int af, char *ua, void *ap, in_port_t *pp) 82 { 83 int dots = 0, i, j, len, k; 84 unsigned char c; 85 in_port_t port = 0; 86 87 len = strlen(ua); 88 89 for (i = len-1; i >= 0; i--) { 90 91 if (ua[i] == '.') 92 dots++; 93 94 if (dots == 2) { 95 96 ua[i] = '\0'; 97 /* 98 * We use k to remember were to stick '.' back, since 99 * ua was kmem_allocateded from the pool len+1. 100 */ 101 k = i; 102 if (inet_pton(af, ua, ap) == 1) { 103 104 c = 0; 105 106 for (j = i+1; j < len; j++) { 107 if (ua[j] == '.') { 108 port = c << 8; 109 c = 0; 110 } else if (ua[j] >= '0' && 111 ua[j] <= '9') { 112 c *= 10; 113 c += ua[j] - '0'; 114 } else { 115 ua[k] = '.'; 116 return (EINVAL); 117 } 118 } 119 port += c; 120 121 122 /* reset to network order */ 123 if (af == AF_INET) { 124 *(uint32_t *)ap = 125 htonl(*(uint32_t *)ap); 126 *pp = htons(port); 127 } else { 128 int ix; 129 uint16_t *sap; 130 131 for (sap = ap, ix = 0; ix < 132 sizeof (struct in6_addr) / 133 sizeof (uint16_t); ix++) 134 sap[ix] = htons(sap[ix]); 135 136 *pp = htons(port); 137 } 138 139 ua[k] = '.'; 140 return (0); 141 } else { 142 ua[k] = '.'; 143 return (EINVAL); 144 } 145 } 146 } 147 148 return (EINVAL); 149 } 150 151 /* 152 * Update the delegation policy with the 153 * value of "new_policy" 154 */ 155 void 156 rfs4_set_deleg_policy(srv_deleg_policy_t new_policy) 157 { 158 rw_enter(&rfs4_deleg_policy_lock, RW_WRITER); 159 rfs4_deleg_policy = new_policy; 160 rw_exit(&rfs4_deleg_policy_lock); 161 } 162 163 void 164 rfs4_hold_deleg_policy(void) 165 { 166 rw_enter(&rfs4_deleg_policy_lock, RW_READER); 167 } 168 169 void 170 rfs4_rele_deleg_policy(void) 171 { 172 rw_exit(&rfs4_deleg_policy_lock); 173 } 174 175 176 /* 177 * This free function is to be used when the client struct is being 178 * released and nothing at all is needed of the callback info any 179 * longer. 180 */ 181 void 182 rfs4_cbinfo_free(rfs4_cbinfo_t *cbp) 183 { 184 char *addr = cbp->cb_callback.cb_location.r_addr; 185 char *netid = cbp->cb_callback.cb_location.r_netid; 186 187 /* Free old address if any */ 188 189 if (addr) 190 kmem_free(addr, strlen(addr) + 1); 191 if (netid) 192 kmem_free(netid, strlen(netid) + 1); 193 194 addr = cbp->cb_newer.cb_callback.cb_location.r_addr; 195 netid = cbp->cb_newer.cb_callback.cb_location.r_netid; 196 197 if (addr) 198 kmem_free(addr, strlen(addr) + 1); 199 if (netid) 200 kmem_free(netid, strlen(netid) + 1); 201 202 if (cbp->cb_chc_free) { 203 rfs4_cb_chflush(cbp); 204 } 205 } 206 207 /* 208 * The server uses this to check the callback path supplied by the 209 * client. The callback connection is marked "in progress" while this 210 * work is going on and then eventually marked either OK or FAILED. 211 * This work can be done as part of a separate thread and at the end 212 * of this the thread will exit or it may be done such that the caller 213 * will continue with other work. 214 */ 215 static void 216 rfs4_do_cb_null(rfs4_client_t *cp) 217 { 218 struct timeval tv; 219 CLIENT *ch; 220 rfs4_cbstate_t newstate; 221 rfs4_cbinfo_t *cbp = &cp->cbinfo; 222 223 mutex_enter(cbp->cb_lock); 224 /* If another thread is doing CB_NULL RPC then return */ 225 if (cbp->cb_nullcaller == TRUE) { 226 mutex_exit(cbp->cb_lock); 227 rfs4_client_rele(cp); 228 return; 229 } 230 231 /* Mark the cbinfo as having a thread in the NULL callback */ 232 cbp->cb_nullcaller = TRUE; 233 234 /* 235 * Are there other threads still using the cbinfo client 236 * handles? If so, this thread must wait before going and 237 * mucking aroiund with the callback information 238 */ 239 while (cbp->cb_refcnt != 0) 240 cv_wait(cbp->cb_cv_nullcaller, cbp->cb_lock); 241 242 /* 243 * This thread itself may find that new callback info has 244 * arrived and is set up to handle this case and redrive the 245 * call to the client's callback server. 246 */ 247 retry: 248 if (cbp->cb_newer.cb_new == TRUE && 249 cbp->cb_newer.cb_confirmed == TRUE) { 250 char *addr = cbp->cb_callback.cb_location.r_addr; 251 char *netid = cbp->cb_callback.cb_location.r_netid; 252 253 /* 254 * Free the old stuff if it exists; may be the first 255 * time through this path 256 */ 257 if (addr) 258 kmem_free(addr, strlen(addr) + 1); 259 if (netid) 260 kmem_free(netid, strlen(netid) + 1); 261 262 /* Move over the addr/netid */ 263 cbp->cb_callback.cb_location.r_addr = 264 cbp->cb_newer.cb_callback.cb_location.r_addr; 265 cbp->cb_newer.cb_callback.cb_location.r_addr = NULL; 266 cbp->cb_callback.cb_location.r_netid = 267 cbp->cb_newer.cb_callback.cb_location.r_netid; 268 cbp->cb_newer.cb_callback.cb_location.r_netid = NULL; 269 270 /* Get the program number */ 271 cbp->cb_callback.cb_program = 272 cbp->cb_newer.cb_callback.cb_program; 273 cbp->cb_newer.cb_callback.cb_program = 0; 274 275 /* Don't forget the protocol's "cb_ident" field */ 276 cbp->cb_ident = cbp->cb_newer.cb_ident; 277 cbp->cb_newer.cb_ident = 0; 278 279 /* no longer new */ 280 cbp->cb_newer.cb_new = FALSE; 281 cbp->cb_newer.cb_confirmed = FALSE; 282 283 /* get rid of the old client handles that may exist */ 284 rfs4_cb_chflush(cbp); 285 286 cbp->cb_state = CB_NONE; 287 cbp->cb_timefailed = 0; /* reset the clock */ 288 cbp->cb_notified_of_cb_path_down = TRUE; 289 } 290 291 if (cbp->cb_state != CB_NONE) { 292 cv_broadcast(cbp->cb_cv); /* let the others know */ 293 cbp->cb_nullcaller = FALSE; 294 mutex_exit(cbp->cb_lock); 295 rfs4_client_rele(cp); 296 return; 297 } 298 299 /* mark rfs4_client_t as CALLBACK NULL in progress */ 300 cbp->cb_state = CB_INPROG; 301 mutex_exit(cbp->cb_lock); 302 303 /* get/generate a client handle */ 304 if ((ch = rfs4_cb_getch(cbp)) == NULL) { 305 mutex_enter(cbp->cb_lock); 306 cbp->cb_state = CB_BAD; 307 cbp->cb_timefailed = gethrestime_sec(); /* observability */ 308 goto retry; 309 } 310 311 312 tv.tv_sec = 30; 313 tv.tv_usec = 0; 314 if (clnt_call(ch, CB_NULL, xdr_void, NULL, xdr_void, NULL, tv) != 0) { 315 newstate = CB_BAD; 316 } else { 317 newstate = CB_OK; 318 #ifdef DEBUG 319 rfs4_cb_null++; 320 #endif 321 } 322 323 /* Check to see if the client has specified new callback info */ 324 mutex_enter(cbp->cb_lock); 325 rfs4_cb_freech(cbp, ch, TRUE); 326 if (cbp->cb_newer.cb_new == TRUE && 327 cbp->cb_newer.cb_confirmed == TRUE) { 328 goto retry; /* give the CB_NULL another chance */ 329 } 330 331 cbp->cb_state = newstate; 332 if (cbp->cb_state == CB_BAD) 333 cbp->cb_timefailed = gethrestime_sec(); /* observability */ 334 335 cv_broadcast(cbp->cb_cv); /* start up the other threads */ 336 cbp->cb_nullcaller = FALSE; 337 mutex_exit(cbp->cb_lock); 338 339 rfs4_client_rele(cp); 340 } 341 342 /* 343 * Given a client struct, inspect the callback info to see if the 344 * callback path is up and available. If it is being initialized, 345 * then wait for the CB_NULL RPC call to occur. 346 */ 347 static rfs4_cbinfo_t * 348 rfs4_cbinfo_hold(rfs4_client_t *cp) 349 { 350 rfs4_cbinfo_t *cbp = &cp->cbinfo; 351 352 retry: 353 mutex_enter(cbp->cb_lock); 354 355 if (cbp->cb_newer.cb_new == TRUE && cbp->cb_nullcaller == FALSE) { 356 /* 357 * Looks like a new callback path may be available and 358 * noone has set it up. 359 */ 360 mutex_exit(cbp->cb_lock); 361 rfs4_dbe_hold(cp->dbe); 362 rfs4_do_cb_null(cp); /* caller will release client hold */ 363 goto retry; 364 } 365 366 /* Is there a thread working on doing the CB_NULL RPC? */ 367 if (cbp->cb_nullcaller == TRUE) 368 cv_wait(cbp->cb_cv, cbp->cb_lock); /* if so, wait on it */ 369 370 /* If the callback path is not okay (up and running), just quit */ 371 if (cbp->cb_state != CB_OK) { 372 mutex_exit(cbp->cb_lock); 373 return (NULL); 374 } 375 376 /* Let someone know we are using the current callback info */ 377 cbp->cb_refcnt++; 378 mutex_exit(cbp->cb_lock); 379 return (cbp); 380 } 381 382 /* 383 * The caller is done with the callback info. It may be that the 384 * caller's RPC failed and the NFSv4 client has actually provided new 385 * callback information. If so, let the caller know so they can 386 * advantage of this and maybe retry the RPC that originally failed. 387 */ 388 static int 389 rfs4_cbinfo_rele(rfs4_cbinfo_t *cbp, rfs4_cbstate_t newstate) 390 { 391 int cb_new = FALSE; 392 393 mutex_enter(cbp->cb_lock); 394 395 /* The caller gets a chance to mark the callback info as bad */ 396 if (newstate != CB_NOCHANGE) 397 cbp->cb_state = newstate; 398 if (newstate == CB_FAILED) { 399 cbp->cb_timefailed = gethrestime_sec(); /* observability */ 400 cbp->cb_notified_of_cb_path_down = FALSE; 401 } 402 403 cbp->cb_refcnt--; /* no longer using the information */ 404 405 /* 406 * A thread may be waiting on this one to finish and if so, 407 * let it know that it is okay to do the CB_NULL to the 408 * client's callback server. 409 */ 410 if (cbp->cb_refcnt == 0 && cbp->cb_nullcaller) 411 cv_broadcast(cbp->cb_cv_nullcaller); 412 413 /* 414 * If this is the last thread to use the callback info and 415 * there is new callback information to try and no thread is 416 * there ready to do the CB_NULL, then return true to teh 417 * caller so they can do the CB_NULL 418 */ 419 if (cbp->cb_refcnt == 0 && 420 cbp->cb_nullcaller == FALSE && 421 cbp->cb_newer.cb_new == TRUE && 422 cbp->cb_newer.cb_confirmed == TRUE) 423 cb_new = TRUE; 424 425 mutex_exit(cbp->cb_lock); 426 427 return (cb_new); 428 } 429 430 /* 431 * Given the information in the callback info struct, create a client 432 * handle that can be used by the server for its callback path. 433 */ 434 static CLIENT * 435 rfs4_cbch_init(rfs4_cbinfo_t *cbp) 436 { 437 struct knetconfig knc; 438 vnode_t *vp; 439 struct sockaddr_in addr4; 440 struct sockaddr_in6 addr6; 441 void *addr, *taddr; 442 in_port_t *pp; 443 int af; 444 char *devnam; 445 struct netbuf nb; 446 int size; 447 CLIENT *ch = NULL; 448 int useresvport = 0; 449 450 mutex_enter(cbp->cb_lock); 451 452 if (cbp->cb_callback.cb_location.r_netid == NULL || 453 cbp->cb_callback.cb_location.r_addr == NULL) { 454 goto cb_init_out; 455 } 456 457 if (strcmp(cbp->cb_callback.cb_location.r_netid, "tcp") == 0) { 458 knc.knc_semantics = NC_TPI_COTS; 459 knc.knc_protofmly = "inet"; 460 knc.knc_proto = "tcp"; 461 devnam = "/dev/tcp"; 462 af = AF_INET; 463 } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "udp") 464 == 0) { 465 knc.knc_semantics = NC_TPI_CLTS; 466 knc.knc_protofmly = "inet"; 467 knc.knc_proto = "udp"; 468 devnam = "/dev/udp"; 469 af = AF_INET; 470 } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "tcp6") 471 == 0) { 472 knc.knc_semantics = NC_TPI_COTS; 473 knc.knc_protofmly = "inet6"; 474 knc.knc_proto = "tcp"; 475 devnam = "/dev/tcp6"; 476 af = AF_INET6; 477 } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "udp6") 478 == 0) { 479 knc.knc_semantics = NC_TPI_CLTS; 480 knc.knc_protofmly = "inet6"; 481 knc.knc_proto = "udp"; 482 devnam = "/dev/udp6"; 483 af = AF_INET6; 484 } else { 485 goto cb_init_out; 486 } 487 488 if (lookupname(devnam, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp) != 0) { 489 490 goto cb_init_out; 491 } 492 493 if (vp->v_type != VCHR) { 494 VN_RELE(vp); 495 goto cb_init_out; 496 } 497 498 knc.knc_rdev = vp->v_rdev; 499 500 VN_RELE(vp); 501 502 if (af == AF_INET) { 503 size = sizeof (addr4); 504 bzero(&addr4, size); 505 addr4.sin_family = (sa_family_t)af; 506 addr = &addr4.sin_addr; 507 pp = &addr4.sin_port; 508 taddr = &addr4; 509 } else /* AF_INET6 */ { 510 size = sizeof (addr6); 511 bzero(&addr6, size); 512 addr6.sin6_family = (sa_family_t)af; 513 addr = &addr6.sin6_addr; 514 pp = &addr6.sin6_port; 515 taddr = &addr6; 516 } 517 518 if (uaddr2sockaddr(af, 519 cbp->cb_callback.cb_location.r_addr, addr, pp)) { 520 521 goto cb_init_out; 522 } 523 524 525 nb.maxlen = nb.len = size; 526 nb.buf = (char *)taddr; 527 528 if (clnt_tli_kcreate(&knc, &nb, cbp->cb_callback.cb_program, 529 NFS_CB, 0, 0, curthread->t_cred, &ch)) { 530 531 ch = NULL; 532 } 533 534 /* turn off reserved port usage */ 535 (void) CLNT_CONTROL(ch, CLSET_BINDRESVPORT, (char *)&useresvport); 536 537 cb_init_out: 538 mutex_exit(cbp->cb_lock); 539 return (ch); 540 } 541 542 /* 543 * Iterate over the client handle cache and 544 * destroy it. 545 */ 546 static void 547 rfs4_cb_chflush(rfs4_cbinfo_t *cbp) 548 { 549 CLIENT *ch; 550 551 while (cbp->cb_chc_free) { 552 cbp->cb_chc_free--; 553 ch = cbp->cb_chc[cbp->cb_chc_free]; 554 cbp->cb_chc[cbp->cb_chc_free] = NULL; 555 if (ch) { 556 if (ch->cl_auth) 557 auth_destroy(ch->cl_auth); 558 clnt_destroy(ch); 559 } 560 } 561 } 562 563 /* 564 * Return a client handle, either from a the small 565 * rfs4_client_t cache or one that we just created. 566 */ 567 static CLIENT * 568 rfs4_cb_getch(rfs4_cbinfo_t *cbp) 569 { 570 CLIENT *cbch = NULL; 571 uint32_t zilch = 0; 572 573 mutex_enter(cbp->cb_lock); 574 575 if (cbp->cb_chc_free) { 576 cbp->cb_chc_free--; 577 cbch = cbp->cb_chc[ cbp->cb_chc_free ]; 578 mutex_exit(cbp->cb_lock); 579 (void) CLNT_CONTROL(cbch, CLSET_XID, (char *)&zilch); 580 return (cbch); 581 } 582 583 mutex_exit(cbp->cb_lock); 584 585 /* none free so make it now */ 586 cbch = rfs4_cbch_init(cbp); 587 588 return (cbch); 589 } 590 591 /* 592 * Return the client handle to the small cache or 593 * destroy it. 594 */ 595 static void 596 rfs4_cb_freech(rfs4_cbinfo_t *cbp, CLIENT *ch, bool_t lockheld) 597 { 598 if (lockheld == FALSE) 599 mutex_enter(cbp->cb_lock); 600 601 if (cbp->cb_chc_free < RFS4_CBCH_MAX) { 602 cbp->cb_chc[ cbp->cb_chc_free++ ] = ch; 603 if (lockheld == FALSE) 604 mutex_exit(cbp->cb_lock); 605 return; 606 } 607 if (lockheld == FALSE) 608 mutex_exit(cbp->cb_lock); 609 610 /* 611 * cache maxed out of free entries, obliterate 612 * this client handle, destroy it, throw it away. 613 */ 614 if (ch->cl_auth) 615 auth_destroy(ch->cl_auth); 616 clnt_destroy(ch); 617 } 618 619 /* 620 * With the supplied callback information - initialize the client 621 * callback data. If there is a callback in progress, save the 622 * callback info so that a thread can pick it up in the future. 623 */ 624 void 625 rfs4_client_setcb(rfs4_client_t *cp, cb_client4 *cb, uint32_t cb_ident) 626 { 627 char *addr = NULL; 628 char *netid = NULL; 629 rfs4_cbinfo_t *cbp = &cp->cbinfo; 630 size_t len; 631 632 /* Set the call back for the client */ 633 if (cb->cb_location.r_addr && cb->cb_location.r_addr[0] != '\0' && 634 cb->cb_location.r_netid && cb->cb_location.r_netid[0] != '\0') { 635 len = strlen(cb->cb_location.r_addr) + 1; 636 addr = kmem_alloc(len, KM_SLEEP); 637 bcopy(cb->cb_location.r_addr, addr, len); 638 len = strlen(cb->cb_location.r_netid) + 1; 639 netid = kmem_alloc(len, KM_SLEEP); 640 bcopy(cb->cb_location.r_netid, netid, len); 641 } 642 /* ready to save the new information but first free old, if exists */ 643 mutex_enter(cbp->cb_lock); 644 645 cbp->cb_newer.cb_callback.cb_program = cb->cb_program; 646 647 if (cbp->cb_newer.cb_callback.cb_location.r_addr != NULL) 648 kmem_free(cbp->cb_newer.cb_callback.cb_location.r_addr, 649 strlen(cbp->cb_newer.cb_callback.cb_location.r_addr) + 1); 650 cbp->cb_newer.cb_callback.cb_location.r_addr = addr; 651 652 if (cbp->cb_newer.cb_callback.cb_location.r_netid != NULL) 653 kmem_free(cbp->cb_newer.cb_callback.cb_location.r_netid, 654 strlen(cbp->cb_newer.cb_callback.cb_location.r_netid) + 1); 655 cbp->cb_newer.cb_callback.cb_location.r_netid = netid; 656 657 cbp->cb_newer.cb_ident = cb_ident; 658 659 if (addr && *addr && netid && *netid) { 660 cbp->cb_newer.cb_new = TRUE; 661 cbp->cb_newer.cb_confirmed = FALSE; 662 } else { 663 cbp->cb_newer.cb_new = FALSE; 664 cbp->cb_newer.cb_confirmed = FALSE; 665 } 666 667 mutex_exit(cbp->cb_lock); 668 } 669 670 /* 671 * The server uses this when processing SETCLIENTID_CONFIRM. Callback 672 * information may have been provided on SETCLIENTID and this call 673 * marks that information as confirmed and then starts a thread to 674 * test the callback path. 675 */ 676 void 677 rfs4_deleg_cb_check(rfs4_client_t *cp) 678 { 679 if (cp->cbinfo.cb_newer.cb_new == FALSE) 680 return; 681 682 cp->cbinfo.cb_newer.cb_confirmed = TRUE; 683 684 rfs4_dbe_hold(cp->dbe); /* hold the client struct for thread */ 685 686 (void) thread_create(NULL, 0, rfs4_do_cb_null, cp, 0, &p0, TS_RUN, 687 minclsyspri); 688 } 689 690 static void 691 rfs4args_cb_recall_free(nfs_cb_argop4 *argop) 692 { 693 CB_RECALL4args *rec_argp; 694 695 rec_argp = &argop->nfs_cb_argop4_u.opcbrecall; 696 if (rec_argp->fh.nfs_fh4_val) 697 kmem_free(rec_argp->fh.nfs_fh4_val, rec_argp->fh.nfs_fh4_len); 698 } 699 700 /* ARGSUSED */ 701 static void 702 rfs4args_cb_getattr_free(nfs_cb_argop4 *argop) 703 { 704 CB_GETATTR4args *argp; 705 706 argp = &argop->nfs_cb_argop4_u.opcbgetattr; 707 if (argp->fh.nfs_fh4_val) 708 kmem_free(argp->fh.nfs_fh4_val, argp->fh.nfs_fh4_len); 709 } 710 711 static void 712 rfs4freeargres(CB_COMPOUND4args *args, CB_COMPOUND4res *resp) 713 { 714 int i, arglen; 715 nfs_cb_argop4 *argop; 716 717 /* 718 * First free any special args alloc'd for specific ops. 719 */ 720 arglen = args->array_len; 721 argop = args->array; 722 for (i = 0; i < arglen; i++, argop++) { 723 724 switch (argop->argop) { 725 case OP_CB_RECALL: 726 rfs4args_cb_recall_free(argop); 727 break; 728 729 case OP_CB_GETATTR: 730 rfs4args_cb_getattr_free(argop); 731 break; 732 733 default: 734 return; 735 } 736 } 737 738 if (args->tag.utf8string_len > 0) 739 UTF8STRING_FREE(args->tag) 740 741 kmem_free(args->array, arglen * sizeof (nfs_cb_argop4)); 742 if (resp) 743 (void) xdr_free(xdr_CB_COMPOUND4res, (caddr_t)resp); 744 } 745 746 /* 747 * General callback routine for the server to the client. 748 */ 749 static enum clnt_stat 750 rfs4_do_callback(rfs4_client_t *cp, CB_COMPOUND4args *args, 751 CB_COMPOUND4res *res, struct timeval timeout) 752 { 753 rfs4_cbinfo_t *cbp; 754 CLIENT *ch; 755 /* start with this in case cb_getch() fails */ 756 enum clnt_stat stat = RPC_FAILED; 757 758 res->tag.utf8string_val = NULL; 759 res->array = NULL; 760 761 retry: 762 cbp = rfs4_cbinfo_hold(cp); 763 if (cbp == NULL) 764 return (stat); 765 766 /* get a client handle */ 767 if ((ch = rfs4_cb_getch(cbp)) != NULL) { 768 /* 769 * reset the cb_ident since it may have changed in 770 * rfs4_cbinfo_hold() 771 */ 772 args->callback_ident = cbp->cb_ident; 773 774 stat = clnt_call(ch, CB_COMPOUND, xdr_CB_COMPOUND4args_srv, 775 (caddr_t)args, xdr_CB_COMPOUND4res, 776 (caddr_t)res, timeout); 777 778 /* free client handle */ 779 rfs4_cb_freech(cbp, ch, FALSE); 780 } 781 782 /* 783 * If the rele says that there may be new callback info then 784 * retry this sequence and it may succeed as a result of the 785 * new callback path 786 */ 787 if (rfs4_cbinfo_rele(cbp, 788 (stat == RPC_SUCCESS ? CB_NOCHANGE : CB_FAILED)) == TRUE) 789 goto retry; 790 791 return (stat); 792 } 793 794 /* 795 * Used by the NFSv4 server to get attributes for a file while 796 * handling the case where a file has been write delegated. For the 797 * time being, VOP_GETATTR() is called and CB_GETATTR processing is 798 * not undertaken. This call site is maintained in case the server is 799 * updated in the future to handle write delegation space guarantees. 800 */ 801 nfsstat4 802 rfs4_vop_getattr(vnode_t *vp, vattr_t *vap, int flag, cred_t *cr) 803 { 804 uint_t mask; 805 int error; 806 807 mask = vap->va_mask; 808 error = VOP_GETATTR(vp, vap, flag, cr); 809 /* 810 * Some file systems clobber va_mask. it is probably wrong of 811 * them to do so, nonethless we practice defensive coding. 812 * See bug id 4276830. 813 */ 814 vap->va_mask = mask; 815 return (puterrno4(error)); 816 } 817 818 /* 819 * This is used everywhere in the v2/v3 server to allow the 820 * integration of all NFS versions and the support of delegation. For 821 * now, just call the VOP_GETATTR(). If the NFSv4 server is enhanced 822 * in the future to provide space guarantees for write delegations 823 * then this call site should be expanded to interact with the client. 824 */ 825 int 826 rfs4_delegated_getattr(vnode_t *vp, vattr_t *vap, int flag, cred_t *cr) 827 { 828 return (VOP_GETATTR(vp, vap, flag, cr)); 829 } 830 831 /* 832 * Place the actual cb_recall otw call to client. 833 */ 834 static void 835 rfs4_do_cb_recall(rfs4_deleg_state_t *dsp, bool_t trunc) 836 { 837 CB_COMPOUND4args cb4_args; 838 CB_COMPOUND4res cb4_res; 839 CB_RECALL4args *rec_argp; 840 nfs_cb_argop4 *argop; 841 int numops; 842 int argoplist_size; 843 struct timeval timeout; 844 nfs_fh4 *fhp; 845 enum clnt_stat call_stat; 846 847 /* 848 * set up the compound args 849 */ 850 numops = 1; /* CB_RECALL only */ 851 852 argoplist_size = numops * sizeof (nfs_cb_argop4); 853 argop = kmem_zalloc(argoplist_size, KM_SLEEP); 854 argop->argop = OP_CB_RECALL; 855 rec_argp = &argop->nfs_cb_argop4_u.opcbrecall; 856 857 (void) str_to_utf8("cb_recall", &cb4_args.tag); 858 cb4_args.minorversion = CB4_MINORVERSION; 859 /* cb4_args.callback_ident is set in rfs4_do_callback() */ 860 cb4_args.array_len = numops; 861 cb4_args.array = argop; 862 863 /* 864 * fill in the args struct 865 */ 866 bcopy(&dsp->delegid.stateid, &rec_argp->stateid, sizeof (stateid4)); 867 rec_argp->truncate = trunc; 868 869 fhp = &dsp->finfo->filehandle; 870 rec_argp->fh.nfs_fh4_val = kmem_alloc(sizeof (char) * 871 fhp->nfs_fh4_len, KM_SLEEP); 872 nfs_fh4_copy(fhp, &rec_argp->fh); 873 874 /* Keep track of when we did this for observability */ 875 dsp->time_recalled = gethrestime_sec(); 876 877 /* 878 * Set up the timeout for the callback and make the actual call. 879 * Timeout will be 80% of the lease period for this server. 880 */ 881 timeout.tv_sec = (rfs4_lease_time * 80) / 100; 882 timeout.tv_usec = 0; 883 884 call_stat = rfs4_do_callback(dsp->client, &cb4_args, &cb4_res, timeout); 885 886 if (call_stat != RPC_SUCCESS || cb4_res.status != NFS4_OK) { 887 rfs4_revoke_deleg(dsp); 888 } 889 890 rfs4freeargres(&cb4_args, &cb4_res); 891 } 892 893 struct recall_arg { 894 rfs4_deleg_state_t *dsp; 895 void (*recall)(rfs4_deleg_state_t *, bool_t trunc); 896 bool_t trunc; 897 }; 898 899 static void 900 do_recall(struct recall_arg *arg) 901 { 902 rfs4_deleg_state_t *dsp = arg->dsp; 903 rfs4_file_t *fp = dsp->finfo; 904 callb_cpr_t cpr_info; 905 kmutex_t cpr_lock; 906 907 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 908 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, "nfsv4Recall"); 909 910 /* 911 * It is possible that before this thread starts 912 * the client has send us a return_delegation, and 913 * if that is the case we do not need to send the 914 * recall callback. 915 */ 916 if (dsp->dtype != OPEN_DELEGATE_NONE) { 917 DTRACE_PROBE3(nfss__i__recall, 918 struct recall_arg *, arg, 919 struct rfs4_deleg_state_t *, dsp, 920 struct rfs4_file_t *, fp); 921 922 if (arg->recall) 923 (void) (*arg->recall)(dsp, arg->trunc); 924 } 925 926 mutex_enter(fp->dinfo->recall_lock); 927 /* 928 * Recall count may go negative if the parent thread that is 929 * creating the individual callback threads does not modify 930 * the recall_count field before the callback thread actually 931 * gets a response from the CB_RECALL 932 */ 933 fp->dinfo->recall_count--; 934 if (fp->dinfo->recall_count == 0) 935 cv_signal(fp->dinfo->recall_cv); 936 mutex_exit(fp->dinfo->recall_lock); 937 938 mutex_enter(&cpr_lock); 939 CALLB_CPR_EXIT(&cpr_info); 940 mutex_destroy(&cpr_lock); 941 942 rfs4_deleg_state_rele(dsp); /* release the hold for this thread */ 943 944 kmem_free(arg, sizeof (struct recall_arg)); 945 } 946 947 struct master_recall_args { 948 rfs4_file_t *fp; 949 void (*recall)(rfs4_deleg_state_t *, bool_t); 950 bool_t trunc; 951 }; 952 953 static void 954 do_recall_file(struct master_recall_args *map) 955 { 956 rfs4_file_t *fp = map->fp; 957 rfs4_deleg_state_t *dsp; 958 struct recall_arg *arg; 959 callb_cpr_t cpr_info; 960 kmutex_t cpr_lock; 961 int32_t recall_count; 962 963 rfs4_dbe_lock(fp->dbe); 964 965 /* Recall already in progress ? */ 966 mutex_enter(fp->dinfo->recall_lock); 967 if (fp->dinfo->recall_count != 0) { 968 mutex_exit(fp->dinfo->recall_lock); 969 rfs4_dbe_rele_nolock(fp->dbe); 970 rfs4_dbe_unlock(fp->dbe); 971 kmem_free(map, sizeof (struct master_recall_args)); 972 return; 973 } 974 975 mutex_exit(fp->dinfo->recall_lock); 976 977 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 978 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, "v4RecallFile"); 979 980 recall_count = 0; 981 for (dsp = fp->delegationlist.next->dsp; dsp != NULL; 982 dsp = dsp->delegationlist.next->dsp) { 983 984 rfs4_dbe_lock(dsp->dbe); 985 /* 986 * if this delegation state 987 * is being reaped skip it 988 */ 989 if (rfs4_dbe_is_invalid(dsp->dbe)) { 990 rfs4_dbe_unlock(dsp->dbe); 991 continue; 992 } 993 994 /* hold for receiving thread */ 995 rfs4_dbe_hold(dsp->dbe); 996 rfs4_dbe_unlock(dsp->dbe); 997 998 arg = kmem_alloc(sizeof (struct recall_arg), KM_SLEEP); 999 arg->recall = map->recall; 1000 arg->trunc = map->trunc; 1001 arg->dsp = dsp; 1002 1003 recall_count++; 1004 1005 (void) thread_create(NULL, 0, do_recall, arg, 0, &p0, TS_RUN, 1006 minclsyspri); 1007 } 1008 1009 rfs4_dbe_unlock(fp->dbe); 1010 1011 mutex_enter(fp->dinfo->recall_lock); 1012 /* 1013 * Recall count may go negative if the parent thread that is 1014 * creating the individual callback threads does not modify 1015 * the recall_count field before the callback thread actually 1016 * gets a response from the CB_RECALL 1017 */ 1018 fp->dinfo->recall_count += recall_count; 1019 while (fp->dinfo->recall_count) 1020 cv_wait(fp->dinfo->recall_cv, fp->dinfo->recall_lock); 1021 1022 mutex_exit(fp->dinfo->recall_lock); 1023 1024 DTRACE_PROBE1(nfss__i__recall_done, rfs4_file_t *, fp); 1025 rfs4_file_rele(fp); 1026 kmem_free(map, sizeof (struct master_recall_args)); 1027 mutex_enter(&cpr_lock); 1028 CALLB_CPR_EXIT(&cpr_info); 1029 mutex_destroy(&cpr_lock); 1030 } 1031 1032 static void 1033 rfs4_recall_file(rfs4_file_t *fp, 1034 void (*recall)(rfs4_deleg_state_t *, bool_t trunc), 1035 bool_t trunc, rfs4_client_t *cp) 1036 { 1037 struct master_recall_args *args; 1038 1039 rfs4_dbe_lock(fp->dbe); 1040 if (fp->dinfo->dtype == OPEN_DELEGATE_NONE) { 1041 rfs4_dbe_unlock(fp->dbe); 1042 return; 1043 } 1044 rfs4_dbe_hold(fp->dbe); /* hold for new thread */ 1045 1046 /* 1047 * Mark the time we started the recall processing. 1048 * If it has been previously recalled, do not reset the 1049 * timer since this is used for the revocation decision. 1050 */ 1051 if (fp->dinfo->time_recalled == 0) 1052 fp->dinfo->time_recalled = gethrestime_sec(); 1053 fp->dinfo->ever_recalled = TRUE; /* used for policy decision */ 1054 /* Client causing recall not always available */ 1055 if (cp) 1056 fp->dinfo->conflicted_client = cp->clientid; 1057 1058 rfs4_dbe_unlock(fp->dbe); 1059 1060 args = kmem_alloc(sizeof (struct master_recall_args), KM_SLEEP); 1061 args->fp = fp; 1062 args->recall = recall; 1063 args->trunc = trunc; 1064 1065 (void) thread_create(NULL, 0, do_recall_file, args, 0, &p0, TS_RUN, 1066 minclsyspri); 1067 } 1068 1069 void 1070 rfs4_recall_deleg(rfs4_file_t *fp, bool_t trunc, rfs4_client_t *cp) 1071 { 1072 time_t elapsed1, elapsed2; 1073 1074 if (fp->dinfo->time_recalled != 0) { 1075 elapsed1 = gethrestime_sec() - fp->dinfo->time_recalled; 1076 elapsed2 = gethrestime_sec() - fp->dinfo->time_lastwrite; 1077 /* First check to see if a revocation should occur */ 1078 if (elapsed1 > rfs4_lease_time && 1079 elapsed2 > rfs4_lease_time) { 1080 rfs4_revoke_file(fp); 1081 return; 1082 } 1083 /* 1084 * Next check to see if a recall should be done again 1085 * so quickly. 1086 */ 1087 if (elapsed1 <= ((rfs4_lease_time * 20) / 100)) 1088 return; 1089 } 1090 rfs4_recall_file(fp, rfs4_do_cb_recall, trunc, cp); 1091 } 1092 1093 /* 1094 * rfs4_check_recall is called from rfs4_do_open to determine if the current 1095 * open conflicts with the delegation. 1096 * Return true if we need recall otherwise false. 1097 * Assumes entry locks for sp and sp->finfo are held. 1098 */ 1099 bool_t 1100 rfs4_check_recall(rfs4_state_t *sp, uint32_t access) 1101 { 1102 open_delegation_type4 dtype = sp->finfo->dinfo->dtype; 1103 1104 switch (dtype) { 1105 case OPEN_DELEGATE_NONE: 1106 /* Not currently delegated so there is nothing to do */ 1107 return (FALSE); 1108 case OPEN_DELEGATE_READ: 1109 /* 1110 * If the access is only asking for READ then there is 1111 * no conflict and nothing to do. If it is asking 1112 * for write, then there will be conflict and the read 1113 * delegation should be recalled. 1114 */ 1115 if (access == OPEN4_SHARE_ACCESS_READ) 1116 return (FALSE); 1117 else 1118 return (TRUE); 1119 case OPEN_DELEGATE_WRITE: 1120 /* Check to see if this client has the delegation */ 1121 return (rfs4_is_deleg(sp)); 1122 } 1123 1124 return (FALSE); 1125 } 1126 1127 /* 1128 * Return the "best" allowable delegation available given the current 1129 * delegation type and the desired access and deny modes on the file. 1130 * At the point that this routine is called we know that the access and 1131 * deny modes are consistent with the file modes. 1132 */ 1133 static open_delegation_type4 1134 rfs4_check_delegation(rfs4_state_t *sp, rfs4_file_t *fp) 1135 { 1136 open_delegation_type4 dtype = fp->dinfo->dtype; 1137 uint32_t access = sp->share_access; 1138 uint32_t deny = sp->share_deny; 1139 int readcnt = 0; 1140 int writecnt = 0; 1141 1142 switch (dtype) { 1143 case OPEN_DELEGATE_NONE: 1144 /* 1145 * Determine if more than just this OPEN have the file 1146 * open and if so, no delegation may be provided to 1147 * the client. 1148 */ 1149 if (access & OPEN4_SHARE_ACCESS_WRITE) 1150 writecnt++; 1151 if (access & OPEN4_SHARE_ACCESS_READ) 1152 readcnt++; 1153 1154 if (fp->access_read > readcnt || fp->access_write > writecnt) 1155 return (OPEN_DELEGATE_NONE); 1156 1157 /* 1158 * If the client is going to write, or if the client 1159 * has exclusive access, return a write delegation. 1160 */ 1161 if ((access & OPEN4_SHARE_ACCESS_WRITE) || 1162 (deny & (OPEN4_SHARE_DENY_READ | OPEN4_SHARE_DENY_WRITE))) 1163 return (OPEN_DELEGATE_WRITE); 1164 /* 1165 * If we don't want to write or we've haven't denied read 1166 * access to others, return a read delegation. 1167 */ 1168 if ((access & ~OPEN4_SHARE_ACCESS_WRITE) || 1169 (deny & ~OPEN4_SHARE_DENY_READ)) 1170 return (OPEN_DELEGATE_READ); 1171 1172 /* Shouldn't get here */ 1173 return (OPEN_DELEGATE_NONE); 1174 1175 case OPEN_DELEGATE_READ: 1176 /* 1177 * If the file is delegated for read but we wan't to 1178 * write or deny others to read then we can't delegate 1179 * the file. We shouldn't get here since the delegation should 1180 * have been recalled already. 1181 */ 1182 if ((access & OPEN4_SHARE_ACCESS_WRITE) || 1183 (deny & OPEN4_SHARE_DENY_READ)) 1184 return (OPEN_DELEGATE_NONE); 1185 return (OPEN_DELEGATE_READ); 1186 1187 case OPEN_DELEGATE_WRITE: 1188 return (OPEN_DELEGATE_WRITE); 1189 } 1190 1191 /* Shouldn't get here */ 1192 return (OPEN_DELEGATE_NONE); 1193 } 1194 1195 /* 1196 * Given the desired delegation type and the "history" of the file 1197 * determine the actual delegation type to return. 1198 */ 1199 static open_delegation_type4 1200 rfs4_delegation_policy(open_delegation_type4 dtype, 1201 rfs4_dinfo_t *dinfo, clientid4 cid) 1202 { 1203 time_t elapsed; 1204 1205 if (rfs4_deleg_policy != SRV_NORMAL_DELEGATE) 1206 return (OPEN_DELEGATE_NONE); 1207 1208 /* 1209 * Has this file/delegation ever been recalled? If not then 1210 * no furhter checks for a delegation race need to be done. 1211 * However if a recall has occurred, then check to see if a 1212 * client has caused its own delegation recall to occur. If 1213 * not, then has a delegation for this file been returned 1214 * recently? If so, then do not assign a new delegation to 1215 * avoid a "delegation race" between the original client and 1216 * the new/conflicting client. 1217 */ 1218 if (dinfo->ever_recalled == TRUE) { 1219 if (dinfo->conflicted_client != cid) { 1220 elapsed = gethrestime_sec() - dinfo->time_returned; 1221 if (elapsed < rfs4_lease_time) 1222 return (OPEN_DELEGATE_NONE); 1223 } 1224 } 1225 1226 /* Limit the number of read grants */ 1227 if (dtype == OPEN_DELEGATE_READ && 1228 dinfo->rdgrants > MAX_READ_DELEGATIONS) 1229 return (OPEN_DELEGATE_NONE); 1230 1231 /* 1232 * Should consider limiting total number of read/write 1233 * delegations the server will permit. 1234 */ 1235 1236 return (dtype); 1237 } 1238 1239 /* 1240 * Try and grant a delegation for an open give the state. The routine 1241 * returns the delegation type granted. This could be OPEN_DELEGATE_NONE. 1242 * 1243 * The state and associate file entry must be locked 1244 */ 1245 rfs4_deleg_state_t * 1246 rfs4_grant_delegation(delegreq_t dreq, rfs4_state_t *sp, int *recall) 1247 { 1248 rfs4_file_t *fp = sp->finfo; 1249 open_delegation_type4 dtype; 1250 int no_delegation; 1251 1252 ASSERT(rfs4_dbe_islocked(sp->dbe)); 1253 ASSERT(rfs4_dbe_islocked(fp->dbe)); 1254 1255 /* Is the server even providing delegations? */ 1256 if (rfs4_deleg_policy == SRV_NEVER_DELEGATE || dreq == DELEG_NONE) 1257 return (NULL); 1258 1259 /* Check to see if delegations have been temporarily disabled */ 1260 mutex_enter(&rfs4_deleg_lock); 1261 no_delegation = rfs4_deleg_disabled; 1262 mutex_exit(&rfs4_deleg_lock); 1263 1264 if (no_delegation) 1265 return (NULL); 1266 1267 /* Don't grant a delegation if a deletion is impending. */ 1268 if (fp->dinfo->hold_grant > 0) { 1269 return (NULL); 1270 } 1271 1272 /* 1273 * Don't grant a delegation if there are any lock manager 1274 * (NFSv2/v3) locks for the file. This is a bit of a hack (e.g., 1275 * if there are only read locks we should be able to grant a 1276 * read-only delegation), but it's good enough for now. 1277 * 1278 * MT safety: the lock manager checks for conflicting delegations 1279 * before processing a lock request. That check will block until 1280 * we are done here. So if the lock manager acquires a lock after 1281 * we decide to grant the delegation, the delegation will get 1282 * immediately recalled (if there's a conflict), so we're safe. 1283 */ 1284 if (lm_vp_active(fp->vp)) { 1285 return (NULL); 1286 } 1287 1288 /* 1289 * Based on the type of delegation request passed in, take the 1290 * appropriate action (DELEG_NONE is handled above) 1291 */ 1292 switch (dreq) { 1293 1294 case DELEG_READ: 1295 case DELEG_WRITE: 1296 /* 1297 * The server "must" grant the delegation in this case. 1298 * Client is using open previous 1299 */ 1300 dtype = (open_delegation_type4)dreq; 1301 *recall = 1; 1302 break; 1303 case DELEG_ANY: 1304 /* 1305 * If a valid callback path does not exist, no delegation may 1306 * be granted. 1307 */ 1308 if (sp->owner->client->cbinfo.cb_state != CB_OK) 1309 return (NULL); 1310 1311 /* 1312 * If the original operation which caused time_rm_delayed 1313 * to be set hasn't been retried and completed for one 1314 * full lease period, clear it and allow delegations to 1315 * get granted again. 1316 */ 1317 if (fp->dinfo->time_rm_delayed > 0 && 1318 gethrestime_sec() > 1319 fp->dinfo->time_rm_delayed + rfs4_lease_time) 1320 fp->dinfo->time_rm_delayed = 0; 1321 1322 /* 1323 * If we are waiting for a delegation to be returned then 1324 * don't delegate this file. We do this for correctness as 1325 * well as if the file is being recalled we would likely 1326 * recall this file again. 1327 */ 1328 1329 if (fp->dinfo->time_recalled != 0 || 1330 fp->dinfo->time_rm_delayed != 0) 1331 return (NULL); 1332 1333 /* Get the "best" delegation candidate */ 1334 dtype = rfs4_check_delegation(sp, fp); 1335 1336 if (dtype == OPEN_DELEGATE_NONE) 1337 return (NULL); 1338 1339 /* 1340 * Based on policy and the history of the file get the 1341 * actual delegation. 1342 */ 1343 dtype = rfs4_delegation_policy(dtype, fp->dinfo, 1344 sp->owner->client->clientid); 1345 1346 if (dtype == OPEN_DELEGATE_NONE) 1347 return (NULL); 1348 break; 1349 default: 1350 return (NULL); 1351 } 1352 1353 /* set the delegation for the state */ 1354 return (rfs4_deleg_state(sp, dtype, recall)); 1355 } 1356 1357 void 1358 rfs4_set_deleg_response(rfs4_deleg_state_t *dsp, open_delegation4 *dp, 1359 nfsace4 *ace, int recall) 1360 { 1361 open_write_delegation4 *wp; 1362 open_read_delegation4 *rp; 1363 nfs_space_limit4 *spl; 1364 nfsace4 nace; 1365 1366 /* 1367 * We need to allocate a new copy of the who string. 1368 * this string will be freed by the rfs4_op_open dis_resfree 1369 * routine. We need to do this allocation since replays will 1370 * be allocated and rfs4_compound can't tell the difference from 1371 * a replay and an inital open. N.B. if an ace is passed in, it 1372 * the caller's responsibility to free it. 1373 */ 1374 1375 if (ace == NULL) { 1376 /* 1377 * Default is to deny all access, the client will have 1378 * to contact the server. XXX Do we want to actually 1379 * set a deny for every one, or do we simply want to 1380 * construct an entity that will match no one? 1381 */ 1382 nace.type = ACE4_ACCESS_DENIED_ACE_TYPE; 1383 nace.flag = 0; 1384 nace.access_mask = ACE4_VALID_MASK_BITS; 1385 (void) str_to_utf8(ACE4_WHO_EVERYONE, &nace.who); 1386 } else { 1387 nace.type = ace->type; 1388 nace.flag = ace->flag; 1389 nace.access_mask = ace->access_mask; 1390 (void) utf8_copy(&ace->who, &nace.who); 1391 } 1392 1393 dp->delegation_type = dsp->dtype; 1394 1395 switch (dsp->dtype) { 1396 case OPEN_DELEGATE_NONE: 1397 break; 1398 case OPEN_DELEGATE_READ: 1399 rp = &dp->open_delegation4_u.read; 1400 rp->stateid = dsp->delegid.stateid; 1401 rp->recall = (bool_t)recall; 1402 rp->permissions = nace; 1403 break; 1404 case OPEN_DELEGATE_WRITE: 1405 wp = &dp->open_delegation4_u.write; 1406 wp->stateid = dsp->delegid.stateid; 1407 wp->recall = (bool_t)recall; 1408 spl = &wp->space_limit; 1409 spl->limitby = NFS_LIMIT_SIZE; 1410 spl->nfs_space_limit4_u.filesize = 0; 1411 wp->permissions = nace; 1412 break; 1413 } 1414 } 1415 1416 /* 1417 * Check if the file is delegated via the provided file struct. 1418 * Return TRUE if it is delegated. This is intended for use by 1419 * the v4 server. The v2/v3 server code should use rfs4_check_delegated(). 1420 * 1421 * Note that if the file is found to have a delegation, it is 1422 * recalled, unless the clientid of the caller matches the clientid of the 1423 * delegation. If the caller has specified, there is a slight delay 1424 * inserted in the hopes that the delegation will be returned quickly. 1425 */ 1426 bool_t 1427 rfs4_check_delegated_byfp(int mode, rfs4_file_t *fp, 1428 bool_t trunc, bool_t do_delay, bool_t is_rm, clientid4 *cp) 1429 { 1430 rfs4_deleg_state_t *dsp; 1431 1432 /* Is delegation enabled? */ 1433 if (rfs4_deleg_policy == SRV_NEVER_DELEGATE) 1434 return (FALSE); 1435 1436 /* do we have a delegation on this file? */ 1437 rfs4_dbe_lock(fp->dbe); 1438 if (fp->dinfo->dtype == OPEN_DELEGATE_NONE) { 1439 if (is_rm) 1440 fp->dinfo->hold_grant++; 1441 rfs4_dbe_unlock(fp->dbe); 1442 return (FALSE); 1443 } 1444 /* 1445 * do we have a write delegation on this file or are we 1446 * requesting write access to a file with any type of existing 1447 * delegation? 1448 */ 1449 if (mode == FWRITE || fp->dinfo->dtype == OPEN_DELEGATE_WRITE) { 1450 if (cp != NULL) { 1451 dsp = fp->delegationlist.next->dsp; 1452 if (dsp == NULL) { 1453 rfs4_dbe_unlock(fp->dbe); 1454 return (FALSE); 1455 } 1456 /* 1457 * Does the requestor already own the delegation? 1458 */ 1459 if (dsp->client->clientid == *(cp)) { 1460 rfs4_dbe_unlock(fp->dbe); 1461 return (FALSE); 1462 } 1463 } 1464 1465 rfs4_dbe_unlock(fp->dbe); 1466 rfs4_recall_deleg(fp, trunc, NULL); 1467 1468 if (!do_delay) { 1469 rfs4_dbe_lock(fp->dbe); 1470 fp->dinfo->time_rm_delayed = gethrestime_sec(); 1471 rfs4_dbe_unlock(fp->dbe); 1472 return (TRUE); 1473 } 1474 1475 delay(NFS4_DELEGATION_CONFLICT_DELAY); 1476 1477 rfs4_dbe_lock(fp->dbe); 1478 if (fp->dinfo->dtype != OPEN_DELEGATE_NONE) { 1479 fp->dinfo->time_rm_delayed = gethrestime_sec(); 1480 rfs4_dbe_unlock(fp->dbe); 1481 return (TRUE); 1482 } 1483 } 1484 if (is_rm) 1485 fp->dinfo->hold_grant++; 1486 rfs4_dbe_unlock(fp->dbe); 1487 return (FALSE); 1488 } 1489 1490 /* 1491 * Check if the file is delegated in the case of a v2 or v3 access. 1492 * Return TRUE if it is delegated which in turn means that v2 should 1493 * drop the request and in the case of v3 JUKEBOX should be returned. 1494 */ 1495 bool_t 1496 rfs4_check_delegated(int mode, vnode_t *vp, bool_t trunc) 1497 { 1498 rfs4_file_t *fp; 1499 bool_t create = FALSE; 1500 bool_t rc = FALSE; 1501 1502 rfs4_hold_deleg_policy(); 1503 1504 /* Is delegation enabled? */ 1505 if (rfs4_deleg_policy != SRV_NEVER_DELEGATE) { 1506 fp = rfs4_findfile(vp, NULL, &create); 1507 if (fp != NULL) { 1508 if (rfs4_check_delegated_byfp(mode, fp, trunc, 1509 TRUE, FALSE, NULL)) { 1510 rc = TRUE; 1511 } 1512 rfs4_file_rele(fp); 1513 } 1514 } 1515 rfs4_rele_deleg_policy(); 1516 return (rc); 1517 } 1518 1519 /* 1520 * Release a hold on the hold_grant counter which 1521 * prevents delegation from being granted while a remove 1522 * or a rename is in progress. 1523 */ 1524 void 1525 rfs4_clear_dont_grant(rfs4_file_t *fp) 1526 { 1527 if (rfs4_deleg_policy == SRV_NEVER_DELEGATE) 1528 return; 1529 rfs4_dbe_lock(fp->dbe); 1530 ASSERT(fp->dinfo->hold_grant > 0); 1531 fp->dinfo->hold_grant--; 1532 fp->dinfo->time_rm_delayed = 0; 1533 rfs4_dbe_unlock(fp->dbe); 1534 } 1535 1536 /* 1537 * State support for delegation. 1538 * Set the state delegation type for this state; 1539 * This routine is called from open via rfs4_grant_delegation and the entry 1540 * locks on sp and sp->finfo are assumed. 1541 */ 1542 static rfs4_deleg_state_t * 1543 rfs4_deleg_state(rfs4_state_t *sp, open_delegation_type4 dtype, int *recall) 1544 { 1545 rfs4_file_t *fp = sp->finfo; 1546 bool_t create = TRUE; 1547 rfs4_deleg_state_t *dsp; 1548 vnode_t *vp; 1549 int open_prev = *recall; 1550 1551 ASSERT(rfs4_dbe_islocked(sp->dbe)); 1552 ASSERT(rfs4_dbe_islocked(fp->dbe)); 1553 1554 /* Shouldn't happen */ 1555 if (fp->dinfo->recall_count != 0 || 1556 (fp->dinfo->dtype == OPEN_DELEGATE_READ && 1557 dtype != OPEN_DELEGATE_READ)) { 1558 return (NULL); 1559 } 1560 1561 /* Unlock to avoid deadlock */ 1562 rfs4_dbe_unlock(fp->dbe); 1563 rfs4_dbe_unlock(sp->dbe); 1564 1565 dsp = rfs4_finddeleg(sp, &create); 1566 1567 rfs4_dbe_lock(sp->dbe); 1568 rfs4_dbe_lock(fp->dbe); 1569 1570 if (dsp == NULL) 1571 return (NULL); 1572 1573 /* 1574 * It is possible that since we dropped the lock 1575 * in order to call finddeleg, the rfs4_file_t 1576 * was marked such that we should not grant a 1577 * delegation, if so bail out. 1578 */ 1579 if (fp->dinfo->hold_grant > 0) { 1580 rfs4_deleg_state_rele(dsp); 1581 return (NULL); 1582 } 1583 1584 if (create == FALSE) { 1585 if (sp->owner->client == dsp->client && 1586 dsp->dtype == dtype) { 1587 return (dsp); 1588 } else { 1589 rfs4_deleg_state_rele(dsp); 1590 return (NULL); 1591 } 1592 } 1593 1594 /* 1595 * Check that this file has not been delegated to another 1596 * client 1597 */ 1598 if (fp->dinfo->recall_count != 0 || 1599 fp->dinfo->dtype == OPEN_DELEGATE_WRITE || 1600 (fp->dinfo->dtype == OPEN_DELEGATE_READ && 1601 dtype != OPEN_DELEGATE_READ)) { 1602 rfs4_deleg_state_rele(dsp); 1603 return (NULL); 1604 } 1605 1606 vp = fp->vp; 1607 /* vnevent_support returns 0 if file system supports vnevents */ 1608 if (vnevent_support(vp)) { 1609 rfs4_deleg_state_rele(dsp); 1610 return (NULL); 1611 } 1612 1613 *recall = 0; 1614 if (dtype == OPEN_DELEGATE_READ) { 1615 if (vn_is_opened(vp, V_WRITE) || vn_is_mapped(vp, V_WRITE)) { 1616 if (open_prev) { 1617 *recall = 1; 1618 } else { 1619 rfs4_deleg_state_rele(dsp); 1620 return (NULL); 1621 } 1622 } 1623 (void) fem_install(vp, deleg_rdops, (void *)fp, OPUNIQ, 1624 rfs4_mon_hold, rfs4_mon_rele); 1625 if (vn_is_opened(vp, V_WRITE) || vn_is_mapped(vp, V_WRITE)) { 1626 if (open_prev) { 1627 *recall = 1; 1628 } else { 1629 (void) fem_uninstall(vp, deleg_rdops, 1630 (void *)fp); 1631 rfs4_deleg_state_rele(dsp); 1632 return (NULL); 1633 } 1634 } 1635 } else { /* WRITE */ 1636 if (vn_is_opened(vp, V_RDORWR) || vn_is_mapped(vp, V_RDORWR)) { 1637 if (open_prev) { 1638 *recall = 1; 1639 } else { 1640 rfs4_deleg_state_rele(dsp); 1641 return (NULL); 1642 } 1643 } 1644 (void) fem_install(vp, deleg_wrops, (void *)fp, OPUNIQ, 1645 rfs4_mon_hold, rfs4_mon_rele); 1646 if (vn_is_opened(vp, V_RDORWR) || vn_is_mapped(vp, V_RDORWR)) { 1647 if (open_prev) { 1648 *recall = 1; 1649 } else { 1650 (void) fem_uninstall(vp, deleg_wrops, 1651 (void *)fp); 1652 rfs4_deleg_state_rele(dsp); 1653 return (NULL); 1654 } 1655 } 1656 } 1657 /* Place on delegation list for file */ 1658 insque(&dsp->delegationlist, fp->delegationlist.prev); 1659 1660 dsp->dtype = fp->dinfo->dtype = dtype; 1661 1662 /* Update delegation stats for this file */ 1663 fp->dinfo->time_lastgrant = gethrestime_sec(); 1664 1665 /* reset since this is a new delegation */ 1666 fp->dinfo->conflicted_client = 0; 1667 fp->dinfo->ever_recalled = FALSE; 1668 1669 if (dtype == OPEN_DELEGATE_READ) 1670 fp->dinfo->rdgrants++; 1671 else 1672 fp->dinfo->wrgrants++; 1673 1674 return (dsp); 1675 } 1676 1677 /* 1678 * State routine for the server when a delegation is returned. 1679 */ 1680 void 1681 rfs4_return_deleg(rfs4_deleg_state_t *dsp, bool_t revoked) 1682 { 1683 rfs4_file_t *fp = dsp->finfo; 1684 open_delegation_type4 dtypewas; 1685 1686 rfs4_dbe_lock(fp->dbe); 1687 /* Remove state from recall list */ 1688 1689 remque(&dsp->delegationlist); 1690 dsp->delegationlist.next = dsp->delegationlist.prev = 1691 &dsp->delegationlist; 1692 1693 if (&fp->delegationlist == fp->delegationlist.next) { 1694 dtypewas = fp->dinfo->dtype; 1695 fp->dinfo->dtype = OPEN_DELEGATE_NONE; 1696 rfs4_dbe_cv_broadcast(fp->dbe); 1697 1698 /* if file system was unshared, the vp will be NULL */ 1699 if (fp->vp != NULL) { 1700 if (dtypewas == OPEN_DELEGATE_READ) 1701 (void) fem_uninstall(fp->vp, deleg_rdops, 1702 (void *)fp); 1703 else 1704 (void) fem_uninstall(fp->vp, deleg_wrops, 1705 (void *)fp); 1706 } 1707 } 1708 1709 switch (dsp->dtype) { 1710 case OPEN_DELEGATE_READ: 1711 fp->dinfo->rdgrants--; 1712 break; 1713 case OPEN_DELEGATE_WRITE: 1714 fp->dinfo->wrgrants--; 1715 break; 1716 default: 1717 break; 1718 } 1719 1720 /* used in the policy decision */ 1721 fp->dinfo->time_returned = gethrestime_sec(); 1722 1723 /* 1724 * reset the time_recalled field so future delegations are not 1725 * accidentally revoked 1726 */ 1727 if ((fp->dinfo->rdgrants + fp->dinfo->wrgrants) == 0) 1728 fp->dinfo->time_recalled = 0; 1729 1730 rfs4_dbe_unlock(fp->dbe); 1731 1732 rfs4_dbe_lock(dsp->dbe); 1733 1734 dsp->dtype = OPEN_DELEGATE_NONE; 1735 1736 if (revoked == TRUE) 1737 dsp->time_revoked = gethrestime_sec(); 1738 1739 rfs4_dbe_invalidate(dsp->dbe); 1740 1741 rfs4_dbe_unlock(dsp->dbe); 1742 1743 if (revoked == TRUE) { 1744 rfs4_dbe_lock(dsp->client->dbe); 1745 dsp->client->deleg_revoked++; /* observability */ 1746 rfs4_dbe_unlock(dsp->client->dbe); 1747 } 1748 } 1749 1750 static void 1751 rfs4_revoke_deleg(rfs4_deleg_state_t *dsp) 1752 { 1753 rfs4_return_deleg(dsp, TRUE); 1754 } 1755 1756 static void 1757 rfs4_revoke_file(rfs4_file_t *fp) 1758 { 1759 rfs4_deleg_state_t *dsp; 1760 1761 /* 1762 * The lock for rfs4_file_t must be held when traversing the 1763 * delegation list but that lock needs to be released to call 1764 * rfs4_revoke_deleg() 1765 * This for loop is set up to check the list for being empty, 1766 * and locking the rfs4_file_t struct on init and end 1767 */ 1768 for (rfs4_dbe_lock(fp->dbe); 1769 &fp->delegationlist != fp->delegationlist.next; 1770 rfs4_dbe_lock(fp->dbe)) { 1771 1772 dsp = fp->delegationlist.next->dsp; 1773 rfs4_dbe_hold(dsp->dbe); 1774 rfs4_dbe_unlock(fp->dbe); 1775 rfs4_revoke_deleg(dsp); 1776 rfs4_deleg_state_rele(dsp); 1777 } 1778 rfs4_dbe_unlock(fp->dbe); 1779 } 1780 1781 /* 1782 * A delegation is assumed to be present on the file associated with 1783 * "state". Check to see if the delegation matches is associated with 1784 * the same client as referenced by "state". If it is not, TRUE is 1785 * returned. If the delegation DOES match the client (or no 1786 * delegation is present), return FALSE. 1787 * Assume the state entry and file entry are locked. 1788 */ 1789 bool_t 1790 rfs4_is_deleg(rfs4_state_t *state) 1791 { 1792 rfs4_deleg_state_t *dsp; 1793 rfs4_file_t *fp = state->finfo; 1794 rfs4_client_t *cp = state->owner->client; 1795 1796 ASSERT(rfs4_dbe_islocked(fp->dbe)); 1797 for (dsp = fp->delegationlist.next->dsp; dsp != NULL; 1798 dsp = dsp->delegationlist.next->dsp) { 1799 if (cp != dsp->client) { 1800 return (TRUE); 1801 } 1802 } 1803 return (FALSE); 1804 } 1805 1806 void 1807 rfs4_disable_delegation(void) 1808 { 1809 mutex_enter(&rfs4_deleg_lock); 1810 rfs4_deleg_disabled++; 1811 mutex_exit(&rfs4_deleg_lock); 1812 } 1813 1814 void 1815 rfs4_enable_delegation(void) 1816 { 1817 mutex_enter(&rfs4_deleg_lock); 1818 ASSERT(rfs4_deleg_disabled > 0); 1819 rfs4_deleg_disabled--; 1820 mutex_exit(&rfs4_deleg_lock); 1821 } 1822 1823 void 1824 rfs4_mon_hold(void *arg) 1825 { 1826 rfs4_file_t *fp = arg; 1827 1828 rfs4_dbe_hold(fp->dbe); 1829 } 1830 1831 void 1832 rfs4_mon_rele(void *arg) 1833 { 1834 rfs4_file_t *fp = arg; 1835 1836 rfs4_dbe_rele_nolock(fp->dbe); 1837 } 1838