1 /*- 2 * Copyright (c) 2009 Rick Macklem, University of Guelph 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * These functions implement the client side state handling for NFSv4. 33 * NFSv4 state handling: 34 * - A lockowner is used to determine lock contention, so it 35 * corresponds directly to a Posix pid. (1 to 1 mapping) 36 * - The correct granularity of an OpenOwner is not nearly so 37 * obvious. An OpenOwner does the following: 38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 39 * - is used to check for Open/Share contention (not applicable to 40 * this client, since all Opens are Deny_None) 41 * As such, I considered both extreme. 42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 43 * all Open, Close and Lock (with a new lockowner) Ops. 44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 45 * every Open, for most servers. 46 * So, I chose to use the same mapping as I did for LockOwnwers. 47 * The main concern here is that you can end up with multiple Opens 48 * for the same File Handle, but on different OpenOwners (opens 49 * inherited from parents, grandparents...) and you do not know 50 * which of these the vnodeop close applies to. This is handled by 51 * delaying the Close Op(s) until all of the Opens have been closed. 52 * (It is not yet obvious if this is the correct granularity.) 53 * - How the code handles serialization: 54 * - For the ClientId, it uses an exclusive lock while getting its 55 * SetClientId and during recovery. Otherwise, it uses a shared 56 * lock via a reference count. 57 * - For the rest of the data structures, it uses an SMP mutex 58 * (once the nfs client is SMP safe) and doesn't sleep while 59 * manipulating the linked lists. 60 * - The serialization of Open/Close/Lock/LockU falls out in the 61 * "wash", since OpenOwners and LockOwners are both mapped from 62 * Posix pid. In other words, there is only one Posix pid using 63 * any given owner, so that owner is serialized. (If you change 64 * the granularity of the OpenOwner, then code must be added to 65 * serialize Ops on the OpenOwner.) 66 * - When to get rid of OpenOwners and LockOwners. 67 * - The function nfscl_cleanup_common() is executed after a process exits. 68 * It goes through the client list looking for all Open and Lock Owners. 69 * When one is found, it is marked "defunct" or in the case of 70 * an OpenOwner without any Opens, freed. 71 * The renew thread scans for defunct Owners and gets rid of them, 72 * if it can. The LockOwners will also be deleted when the 73 * associated Open is closed. 74 * - If the LockU or Close Op(s) fail during close in a way 75 * that could be recovered upon retry, they are relinked to the 76 * ClientId's defunct open list and retried by the renew thread 77 * until they succeed or an unmount/recovery occurs. 78 * (Since we are done with them, they do not need to be recovered.) 79 */ 80 81 #ifndef APPLEKEXT 82 #include <fs/nfs/nfsport.h> 83 84 /* 85 * Global variables 86 */ 87 extern struct nfsstats newnfsstats; 88 extern struct nfsreqhead nfsd_reqq; 89 extern u_int32_t newnfs_false, newnfs_true; 90 extern int nfscl_debuglevel; 91 NFSREQSPINLOCK; 92 NFSCLSTATEMUTEX; 93 int nfscl_inited = 0; 94 struct nfsclhead nfsclhead; /* Head of clientid list */ 95 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 96 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; 97 #endif /* !APPLEKEXT */ 98 99 static int nfscl_delegcnt = 0; 100 static int nfscl_layoutcnt = 0; 101 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, 102 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **); 103 static void nfscl_clrelease(struct nfsclclient *); 104 static void nfscl_cleanclient(struct nfsclclient *); 105 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 106 struct ucred *, NFSPROC_T *); 107 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 108 struct nfsmount *, struct ucred *, NFSPROC_T *); 109 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); 110 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 111 struct nfscllock *, int); 112 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 113 struct nfscllock **, int); 114 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); 115 static u_int32_t nfscl_nextcbident(void); 116 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); 117 static struct nfsclclient *nfscl_getclnt(u_int32_t); 118 static struct nfsclclient *nfscl_getclntsess(uint8_t *); 119 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 120 int); 121 static void nfscl_retoncloselayout(struct nfsclclient *, uint8_t *, int); 122 static void nfscl_reldevinfo_locked(struct nfscldevinfo *); 123 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, 124 int); 125 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); 126 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 127 u_int8_t *, struct nfscllock **); 128 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 129 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 130 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 131 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 132 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 133 struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *); 134 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 135 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 136 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 137 static void nfscl_totalrecall(struct nfsclclient *); 138 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 139 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 140 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 141 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 142 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 143 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 144 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 145 struct ucred *, NFSPROC_T *); 146 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 147 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 148 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); 149 static int nfscl_errmap(struct nfsrv_descript *); 150 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 151 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 152 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); 153 static void nfscl_freeopenowner(struct nfsclowner *, int); 154 static void nfscl_cleandeleg(struct nfscldeleg *); 155 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, 156 struct nfsmount *, NFSPROC_T *); 157 static void nfscl_emptylockowner(struct nfscllockowner *, 158 struct nfscllockownerfhhead *); 159 static void nfscl_mergeflayouts(struct nfsclflayouthead *, 160 struct nfsclflayouthead *); 161 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, 162 uint64_t, uint32_t, struct nfsclrecalllayout *); 163 static int nfscl_seq(uint32_t, uint32_t); 164 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, 165 struct ucred *, NFSPROC_T *); 166 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, 167 struct ucred *, NFSPROC_T *); 168 169 static short nfscberr_null[] = { 170 0, 171 0, 172 }; 173 174 static short nfscberr_getattr[] = { 175 NFSERR_RESOURCE, 176 NFSERR_BADHANDLE, 177 NFSERR_BADXDR, 178 NFSERR_RESOURCE, 179 NFSERR_SERVERFAULT, 180 0, 181 }; 182 183 static short nfscberr_recall[] = { 184 NFSERR_RESOURCE, 185 NFSERR_BADHANDLE, 186 NFSERR_BADSTATEID, 187 NFSERR_BADXDR, 188 NFSERR_RESOURCE, 189 NFSERR_SERVERFAULT, 190 0, 191 }; 192 193 static short *nfscl_cberrmap[] = { 194 nfscberr_null, 195 nfscberr_null, 196 nfscberr_null, 197 nfscberr_getattr, 198 nfscberr_recall 199 }; 200 201 #define NETFAMILY(clp) \ 202 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 203 204 /* 205 * Called for an open operation. 206 * If the nfhp argument is NULL, just get an openowner. 207 */ 208 APPLESTATIC int 209 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 210 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 211 struct nfsclopen **opp, int *newonep, int *retp, int lockit) 212 { 213 struct nfsclclient *clp; 214 struct nfsclowner *owp, *nowp; 215 struct nfsclopen *op = NULL, *nop = NULL; 216 struct nfscldeleg *dp; 217 struct nfsclownerhead *ohp; 218 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 219 int ret; 220 221 if (newonep != NULL) 222 *newonep = 0; 223 if (opp != NULL) 224 *opp = NULL; 225 if (owpp != NULL) 226 *owpp = NULL; 227 228 /* 229 * Might need one or both of these, so MALLOC them now, to 230 * avoid a tsleep() in MALLOC later. 231 */ 232 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner), 233 M_NFSCLOWNER, M_WAITOK); 234 if (nfhp != NULL) 235 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 236 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 237 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 238 if (ret != 0) { 239 FREE((caddr_t)nowp, M_NFSCLOWNER); 240 if (nop != NULL) 241 FREE((caddr_t)nop, M_NFSCLOPEN); 242 return (ret); 243 } 244 245 /* 246 * Get the Open iff it already exists. 247 * If none found, add the new one or return error, depending upon 248 * "create". 249 */ 250 nfscl_filllockowner(p->td_proc, own, F_POSIX); 251 NFSLOCKCLSTATE(); 252 dp = NULL; 253 /* First check the delegation list */ 254 if (nfhp != NULL && usedeleg) { 255 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 256 if (dp->nfsdl_fhlen == fhlen && 257 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 258 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 259 (dp->nfsdl_flags & NFSCLDL_WRITE)) 260 break; 261 dp = NULL; 262 break; 263 } 264 } 265 } 266 267 if (dp != NULL) 268 ohp = &dp->nfsdl_owner; 269 else 270 ohp = &clp->nfsc_owner; 271 /* Now, search for an openowner */ 272 LIST_FOREACH(owp, ohp, nfsow_list) { 273 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 274 break; 275 } 276 277 /* 278 * Create a new open, as required. 279 */ 280 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 281 newonep); 282 283 /* 284 * Serialize modifications to the open owner for multiple threads 285 * within the same process using a read/write sleep lock. 286 */ 287 if (lockit) 288 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 289 NFSUNLOCKCLSTATE(); 290 if (nowp != NULL) 291 FREE((caddr_t)nowp, M_NFSCLOWNER); 292 if (nop != NULL) 293 FREE((caddr_t)nop, M_NFSCLOPEN); 294 if (owpp != NULL) 295 *owpp = owp; 296 if (opp != NULL) 297 *opp = op; 298 if (retp != NULL) { 299 if (nfhp != NULL && dp != NULL && nop == NULL) 300 /* new local open on delegation */ 301 *retp = NFSCLOPEN_SETCRED; 302 else 303 *retp = NFSCLOPEN_OK; 304 } 305 306 /* 307 * Now, check the mode on the open and return the appropriate 308 * value. 309 */ 310 if (op != NULL && (amode & ~(op->nfso_mode))) { 311 op->nfso_mode |= amode; 312 if (retp != NULL && dp == NULL) 313 *retp = NFSCLOPEN_DOOPEN; 314 } 315 return (0); 316 } 317 318 /* 319 * Create a new open, as required. 320 */ 321 static void 322 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 323 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 324 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 325 int *newonep) 326 { 327 struct nfsclowner *owp = *owpp, *nowp; 328 struct nfsclopen *op, *nop; 329 330 if (nowpp != NULL) 331 nowp = *nowpp; 332 else 333 nowp = NULL; 334 if (nopp != NULL) 335 nop = *nopp; 336 else 337 nop = NULL; 338 if (owp == NULL && nowp != NULL) { 339 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 340 LIST_INIT(&nowp->nfsow_open); 341 nowp->nfsow_clp = clp; 342 nowp->nfsow_seqid = 0; 343 nowp->nfsow_defunct = 0; 344 nfscl_lockinit(&nowp->nfsow_rwlock); 345 if (dp != NULL) { 346 newnfsstats.cllocalopenowners++; 347 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 348 } else { 349 newnfsstats.clopenowners++; 350 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 351 } 352 owp = *owpp = nowp; 353 *nowpp = NULL; 354 if (newonep != NULL) 355 *newonep = 1; 356 } 357 358 /* If an fhp has been specified, create an Open as well. */ 359 if (fhp != NULL) { 360 /* and look for the correct open, based upon FH */ 361 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 362 if (op->nfso_fhlen == fhlen && 363 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 364 break; 365 } 366 if (op == NULL && nop != NULL) { 367 nop->nfso_own = owp; 368 nop->nfso_mode = 0; 369 nop->nfso_opencnt = 0; 370 nop->nfso_posixlock = 1; 371 nop->nfso_fhlen = fhlen; 372 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 373 LIST_INIT(&nop->nfso_lock); 374 nop->nfso_stateid.seqid = 0; 375 nop->nfso_stateid.other[0] = 0; 376 nop->nfso_stateid.other[1] = 0; 377 nop->nfso_stateid.other[2] = 0; 378 if (dp != NULL) { 379 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 380 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 381 nfsdl_list); 382 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 383 newnfsstats.cllocalopens++; 384 } else { 385 newnfsstats.clopens++; 386 } 387 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 388 *opp = nop; 389 *nopp = NULL; 390 if (newonep != NULL) 391 *newonep = 1; 392 } else { 393 *opp = op; 394 } 395 } 396 } 397 398 /* 399 * Called to find/add a delegation to a client. 400 */ 401 APPLESTATIC int 402 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 403 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 404 { 405 struct nfscldeleg *dp = *dpp, *tdp; 406 407 /* 408 * First, if we have received a Read delegation for a file on a 409 * read/write file system, just return it, because they aren't 410 * useful, imho. 411 */ 412 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && 413 (dp->nfsdl_flags & NFSCLDL_READ)) { 414 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); 415 FREE((caddr_t)dp, M_NFSCLDELEG); 416 *dpp = NULL; 417 return (0); 418 } 419 420 /* Look for the correct deleg, based upon FH */ 421 NFSLOCKCLSTATE(); 422 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 423 if (tdp == NULL) { 424 if (dp == NULL) { 425 NFSUNLOCKCLSTATE(); 426 return (NFSERR_BADSTATEID); 427 } 428 *dpp = NULL; 429 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 430 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 431 nfsdl_hash); 432 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 433 newnfsstats.cldelegates++; 434 nfscl_delegcnt++; 435 } else { 436 /* 437 * Delegation already exists, what do we do if a new one?? 438 */ 439 if (dp != NULL) { 440 printf("Deleg already exists!\n"); 441 FREE((caddr_t)dp, M_NFSCLDELEG); 442 *dpp = NULL; 443 } else { 444 *dpp = tdp; 445 } 446 } 447 NFSUNLOCKCLSTATE(); 448 return (0); 449 } 450 451 /* 452 * Find a delegation for this file handle. Return NULL upon failure. 453 */ 454 static struct nfscldeleg * 455 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 456 { 457 struct nfscldeleg *dp; 458 459 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 460 if (dp->nfsdl_fhlen == fhlen && 461 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 462 break; 463 } 464 return (dp); 465 } 466 467 /* 468 * Get a stateid for an I/O operation. First, look for an open and iff 469 * found, return either a lockowner stateid or the open stateid. 470 * If no Open is found, just return error and the special stateid of all zeros. 471 */ 472 APPLESTATIC int 473 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 474 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 475 void **lckpp) 476 { 477 struct nfsclclient *clp; 478 struct nfsclowner *owp; 479 struct nfsclopen *op = NULL; 480 struct nfscllockowner *lp; 481 struct nfscldeleg *dp; 482 struct nfsnode *np; 483 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 484 int error, done; 485 486 *lckpp = NULL; 487 /* 488 * Initially, just set the special stateid of all zeros. 489 * (Don't do this for a DS, since the special stateid can't be used.) 490 */ 491 if (fords == 0) { 492 stateidp->seqid = 0; 493 stateidp->other[0] = 0; 494 stateidp->other[1] = 0; 495 stateidp->other[2] = 0; 496 } 497 if (vnode_vtype(vp) != VREG) 498 return (EISDIR); 499 np = VTONFS(vp); 500 NFSLOCKCLSTATE(); 501 clp = nfscl_findcl(VFSTONFS(vnode_mount(vp))); 502 if (clp == NULL) { 503 NFSUNLOCKCLSTATE(); 504 return (EACCES); 505 } 506 507 /* 508 * Wait for recovery to complete. 509 */ 510 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 511 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 512 PZERO, "nfsrecvr", NULL); 513 514 /* 515 * First, look for a delegation. 516 */ 517 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 518 if (dp->nfsdl_fhlen == fhlen && 519 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 520 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 521 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 522 stateidp->seqid = dp->nfsdl_stateid.seqid; 523 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 524 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 525 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 526 if (!(np->n_flag & NDELEGRECALL)) { 527 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 528 nfsdl_list); 529 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 530 nfsdl_list); 531 dp->nfsdl_timestamp = NFSD_MONOSEC + 532 120; 533 dp->nfsdl_rwlock.nfslock_usecnt++; 534 *lckpp = (void *)&dp->nfsdl_rwlock; 535 } 536 NFSUNLOCKCLSTATE(); 537 return (0); 538 } 539 break; 540 } 541 } 542 543 if (p != NULL) { 544 /* 545 * If p != NULL, we want to search the parentage tree 546 * for a matching OpenOwner and use that. 547 */ 548 nfscl_filllockowner(p->td_proc, own, F_POSIX); 549 lp = NULL; 550 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own, 551 mode, &lp, &op); 552 if (error == 0 && lp != NULL && fords == 0) { 553 /* Don't return a lock stateid for a DS. */ 554 stateidp->seqid = 555 lp->nfsl_stateid.seqid; 556 stateidp->other[0] = 557 lp->nfsl_stateid.other[0]; 558 stateidp->other[1] = 559 lp->nfsl_stateid.other[1]; 560 stateidp->other[2] = 561 lp->nfsl_stateid.other[2]; 562 NFSUNLOCKCLSTATE(); 563 return (0); 564 } 565 } 566 if (op == NULL) { 567 /* If not found, just look for any OpenOwner that will work. */ 568 done = 0; 569 owp = LIST_FIRST(&clp->nfsc_owner); 570 while (!done && owp != NULL) { 571 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 572 if (op->nfso_fhlen == fhlen && 573 !NFSBCMP(op->nfso_fh, nfhp, fhlen) && 574 (mode & op->nfso_mode) == mode) { 575 done = 1; 576 break; 577 } 578 } 579 if (!done) 580 owp = LIST_NEXT(owp, nfsow_list); 581 } 582 if (!done) { 583 NFSUNLOCKCLSTATE(); 584 return (ENOENT); 585 } 586 /* 587 * For read aheads or write behinds, use the open cred. 588 * A read ahead or write behind is indicated by p == NULL. 589 */ 590 if (p == NULL) 591 newnfs_copycred(&op->nfso_cred, cred); 592 } 593 594 /* 595 * No lock stateid, so return the open stateid. 596 */ 597 stateidp->seqid = op->nfso_stateid.seqid; 598 stateidp->other[0] = op->nfso_stateid.other[0]; 599 stateidp->other[1] = op->nfso_stateid.other[1]; 600 stateidp->other[2] = op->nfso_stateid.other[2]; 601 NFSUNLOCKCLSTATE(); 602 return (0); 603 } 604 605 /* 606 * Search for a matching file, mode and, optionally, lockowner. 607 */ 608 static int 609 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, 610 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode, 611 struct nfscllockowner **lpp, struct nfsclopen **opp) 612 { 613 struct nfsclowner *owp; 614 struct nfsclopen *op, *rop, *rop2; 615 struct nfscllockowner *lp; 616 int keep_looping; 617 618 if (lpp != NULL) 619 *lpp = NULL; 620 /* 621 * rop will be set to the open to be returned. There are three 622 * variants of this, all for an open of the correct file: 623 * 1 - A match of lockown. 624 * 2 - A match of the openown, when no lockown match exists. 625 * 3 - A match for any open, if no openown or lockown match exists. 626 * Looking for #2 over #3 probably isn't necessary, but since 627 * RFC3530 is vague w.r.t. the relationship between openowners and 628 * lockowners, I think this is the safer way to go. 629 */ 630 rop = NULL; 631 rop2 = NULL; 632 keep_looping = 1; 633 /* Search the client list */ 634 owp = LIST_FIRST(ohp); 635 while (owp != NULL && keep_looping != 0) { 636 /* and look for the correct open */ 637 op = LIST_FIRST(&owp->nfsow_open); 638 while (op != NULL && keep_looping != 0) { 639 if (op->nfso_fhlen == fhlen && 640 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 641 && (op->nfso_mode & mode) == mode) { 642 if (lpp != NULL) { 643 /* Now look for a matching lockowner. */ 644 LIST_FOREACH(lp, &op->nfso_lock, 645 nfsl_list) { 646 if (!NFSBCMP(lp->nfsl_owner, 647 lockown, 648 NFSV4CL_LOCKNAMELEN)) { 649 *lpp = lp; 650 rop = op; 651 keep_looping = 0; 652 break; 653 } 654 } 655 } 656 if (rop == NULL && !NFSBCMP(owp->nfsow_owner, 657 openown, NFSV4CL_LOCKNAMELEN)) { 658 rop = op; 659 if (lpp == NULL) 660 keep_looping = 0; 661 } 662 if (rop2 == NULL) 663 rop2 = op; 664 } 665 op = LIST_NEXT(op, nfso_list); 666 } 667 owp = LIST_NEXT(owp, nfsow_list); 668 } 669 if (rop == NULL) 670 rop = rop2; 671 if (rop == NULL) 672 return (EBADF); 673 *opp = rop; 674 return (0); 675 } 676 677 /* 678 * Release use of an open owner. Called when open operations are done 679 * with the open owner. 680 */ 681 APPLESTATIC void 682 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error, 683 __unused int candelete, int unlocked) 684 { 685 686 if (owp == NULL) 687 return; 688 NFSLOCKCLSTATE(); 689 if (!unlocked) 690 nfscl_lockunlock(&owp->nfsow_rwlock); 691 nfscl_clrelease(owp->nfsow_clp); 692 NFSUNLOCKCLSTATE(); 693 } 694 695 /* 696 * Release use of an open structure under an open owner. 697 */ 698 APPLESTATIC void 699 nfscl_openrelease(struct nfsclopen *op, int error, int candelete) 700 { 701 struct nfsclclient *clp; 702 struct nfsclowner *owp; 703 704 if (op == NULL) 705 return; 706 NFSLOCKCLSTATE(); 707 owp = op->nfso_own; 708 nfscl_lockunlock(&owp->nfsow_rwlock); 709 clp = owp->nfsow_clp; 710 if (error && candelete && op->nfso_opencnt == 0) 711 nfscl_freeopen(op, 0); 712 nfscl_clrelease(clp); 713 NFSUNLOCKCLSTATE(); 714 } 715 716 /* 717 * Called to get a clientid structure. It will optionally lock the 718 * client data structures to do the SetClientId/SetClientId_confirm, 719 * but will release that lock and return the clientid with a refernce 720 * count on it. 721 * If the "cred" argument is NULL, a new clientid should not be created. 722 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 723 * be done. 724 * The start_renewthread argument tells nfscl_getcl() to start a renew 725 * thread if this creates a new clp. 726 * It always clpp with a reference count on it, unless returning an error. 727 */ 728 APPLESTATIC int 729 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, 730 int start_renewthread, struct nfsclclient **clpp) 731 { 732 struct nfsclclient *clp; 733 struct nfsclclient *newclp = NULL; 734 struct nfsmount *nmp; 735 char uuid[HOSTUUIDLEN]; 736 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 737 u_int16_t idlen = 0; 738 739 nmp = VFSTONFS(mp); 740 if (cred != NULL) { 741 getcredhostuuid(cred, uuid, sizeof uuid); 742 idlen = strlen(uuid); 743 if (idlen > 0) 744 idlen += sizeof (u_int64_t); 745 else 746 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 747 MALLOC(newclp, struct nfsclclient *, 748 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 749 M_WAITOK | M_ZERO); 750 } 751 NFSLOCKCLSTATE(); 752 /* 753 * If a forced dismount is already in progress, don't 754 * allocate a new clientid and get out now. For the case where 755 * clp != NULL, this is a harmless optimization. 756 */ 757 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 758 NFSUNLOCKCLSTATE(); 759 if (newclp != NULL) 760 free(newclp, M_NFSCLCLIENT); 761 return (EBADF); 762 } 763 clp = nmp->nm_clp; 764 if (clp == NULL) { 765 if (newclp == NULL) { 766 NFSUNLOCKCLSTATE(); 767 return (EACCES); 768 } 769 clp = newclp; 770 clp->nfsc_idlen = idlen; 771 LIST_INIT(&clp->nfsc_owner); 772 TAILQ_INIT(&clp->nfsc_deleg); 773 TAILQ_INIT(&clp->nfsc_layout); 774 LIST_INIT(&clp->nfsc_devinfo); 775 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 776 LIST_INIT(&clp->nfsc_deleghash[i]); 777 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 778 LIST_INIT(&clp->nfsc_layouthash[i]); 779 clp->nfsc_flags = NFSCLFLAGS_INITED; 780 clp->nfsc_clientidrev = 1; 781 clp->nfsc_cbident = nfscl_nextcbident(); 782 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 783 clp->nfsc_idlen); 784 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 785 nmp->nm_clp = clp; 786 clp->nfsc_nmp = nmp; 787 NFSUNLOCKCLSTATE(); 788 if (start_renewthread != 0) 789 nfscl_start_renewthread(clp); 790 } else { 791 NFSUNLOCKCLSTATE(); 792 if (newclp != NULL) 793 free(newclp, M_NFSCLCLIENT); 794 } 795 NFSLOCKCLSTATE(); 796 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && 797 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0) 798 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 799 NFSCLSTATEMUTEXPTR, mp); 800 if (!igotlock) 801 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 802 if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 803 /* 804 * Both nfsv4_lock() and nfsv4_getref() know to check 805 * for MNTK_UNMOUNTF and return without sleeping to 806 * wait for the exclusive lock to be released, since it 807 * might be held by nfscl_umount() and we need to get out 808 * now for that case and not wait until nfscl_umount() 809 * releases it. 810 */ 811 NFSUNLOCKCLSTATE(); 812 return (EBADF); 813 } 814 NFSUNLOCKCLSTATE(); 815 816 /* 817 * If it needs a clientid, do the setclientid now. 818 */ 819 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 820 if (!igotlock) 821 panic("nfscl_clget"); 822 if (p == NULL || cred == NULL) { 823 NFSLOCKCLSTATE(); 824 nfsv4_unlock(&clp->nfsc_lock, 0); 825 NFSUNLOCKCLSTATE(); 826 return (EACCES); 827 } 828 /* 829 * If RFC3530 Sec. 14.2.33 is taken literally, 830 * NFSERR_CLIDINUSE will be returned persistently for the 831 * case where a new mount of the same file system is using 832 * a different principal. In practice, NFSERR_CLIDINUSE is 833 * only returned when there is outstanding unexpired state 834 * on the clientid. As such, try for twice the lease 835 * interval, if we know what that is. Otherwise, make a 836 * wild ass guess. 837 * The case of returning NFSERR_STALECLIENTID is far less 838 * likely, but might occur if there is a significant delay 839 * between doing the SetClientID and SetClientIDConfirm Ops, 840 * such that the server throws away the clientid before 841 * receiving the SetClientIDConfirm. 842 */ 843 if (clp->nfsc_renew > 0) 844 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 845 else 846 clidinusedelay = 120; 847 trystalecnt = 3; 848 do { 849 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 850 if (error == NFSERR_STALECLIENTID || 851 error == NFSERR_STALEDONTRECOVER || 852 error == NFSERR_BADSESSION || 853 error == NFSERR_CLIDINUSE) { 854 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 855 } 856 } while (((error == NFSERR_STALECLIENTID || 857 error == NFSERR_BADSESSION || 858 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 859 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); 860 if (error) { 861 NFSLOCKCLSTATE(); 862 nfsv4_unlock(&clp->nfsc_lock, 0); 863 NFSUNLOCKCLSTATE(); 864 return (error); 865 } 866 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 867 } 868 if (igotlock) { 869 NFSLOCKCLSTATE(); 870 nfsv4_unlock(&clp->nfsc_lock, 1); 871 NFSUNLOCKCLSTATE(); 872 } 873 874 *clpp = clp; 875 return (0); 876 } 877 878 /* 879 * Get a reference to a clientid and return it, if valid. 880 */ 881 APPLESTATIC struct nfsclclient * 882 nfscl_findcl(struct nfsmount *nmp) 883 { 884 struct nfsclclient *clp; 885 886 clp = nmp->nm_clp; 887 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 888 return (NULL); 889 return (clp); 890 } 891 892 /* 893 * Release the clientid structure. It may be locked or reference counted. 894 */ 895 static void 896 nfscl_clrelease(struct nfsclclient *clp) 897 { 898 899 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 900 nfsv4_unlock(&clp->nfsc_lock, 0); 901 else 902 nfsv4_relref(&clp->nfsc_lock); 903 } 904 905 /* 906 * External call for nfscl_clrelease. 907 */ 908 APPLESTATIC void 909 nfscl_clientrelease(struct nfsclclient *clp) 910 { 911 912 NFSLOCKCLSTATE(); 913 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 914 nfsv4_unlock(&clp->nfsc_lock, 0); 915 else 916 nfsv4_relref(&clp->nfsc_lock); 917 NFSUNLOCKCLSTATE(); 918 } 919 920 /* 921 * Called when wanting to lock a byte region. 922 */ 923 APPLESTATIC int 924 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 925 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 926 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, 927 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 928 { 929 struct nfscllockowner *lp; 930 struct nfsclopen *op; 931 struct nfsclclient *clp; 932 struct nfscllockowner *nlp; 933 struct nfscllock *nlop, *otherlop; 934 struct nfscldeleg *dp = NULL, *ldp = NULL; 935 struct nfscllockownerhead *lhp = NULL; 936 struct nfsnode *np; 937 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; 938 u_int8_t *openownp; 939 int error = 0, ret, donelocally = 0; 940 u_int32_t mode; 941 942 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ 943 mode = 0; 944 np = VTONFS(vp); 945 *lpp = NULL; 946 lp = NULL; 947 *newonep = 0; 948 *donelocallyp = 0; 949 950 /* 951 * Might need these, so MALLOC them now, to 952 * avoid a tsleep() in MALLOC later. 953 */ 954 MALLOC(nlp, struct nfscllockowner *, 955 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 956 MALLOC(otherlop, struct nfscllock *, 957 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 958 MALLOC(nlop, struct nfscllock *, 959 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 960 nlop->nfslo_type = type; 961 nlop->nfslo_first = off; 962 if (len == NFS64BITSSET) { 963 nlop->nfslo_end = NFS64BITSSET; 964 } else { 965 nlop->nfslo_end = off + len; 966 if (nlop->nfslo_end <= nlop->nfslo_first) 967 error = NFSERR_INVAL; 968 } 969 970 if (!error) { 971 if (recovery) 972 clp = rclp; 973 else 974 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 975 } 976 if (error) { 977 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 978 FREE((caddr_t)otherlop, M_NFSCLLOCK); 979 FREE((caddr_t)nlop, M_NFSCLLOCK); 980 return (error); 981 } 982 983 op = NULL; 984 if (recovery) { 985 ownp = rownp; 986 openownp = ropenownp; 987 } else { 988 nfscl_filllockowner(id, own, flags); 989 ownp = own; 990 nfscl_filllockowner(p->td_proc, openown, F_POSIX); 991 openownp = openown; 992 } 993 if (!recovery) { 994 NFSLOCKCLSTATE(); 995 /* 996 * First, search for a delegation. If one exists for this file, 997 * the lock can be done locally against it, so long as there 998 * isn't a local lock conflict. 999 */ 1000 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1001 np->n_fhp->nfh_len); 1002 /* Just sanity check for correct type of delegation */ 1003 if (dp != NULL && ((dp->nfsdl_flags & 1004 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 1005 (type == F_WRLCK && 1006 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 1007 dp = NULL; 1008 } 1009 if (dp != NULL) { 1010 /* Now, find an open and maybe a lockowner. */ 1011 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, 1012 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); 1013 if (ret) 1014 ret = nfscl_getopen(&clp->nfsc_owner, 1015 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1016 ownp, mode, NULL, &op); 1017 if (!ret) { 1018 lhp = &dp->nfsdl_lock; 1019 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 1020 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 1021 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 1022 donelocally = 1; 1023 } else { 1024 dp = NULL; 1025 } 1026 } 1027 if (!donelocally) { 1028 /* 1029 * Get the related Open and maybe lockowner. 1030 */ 1031 error = nfscl_getopen(&clp->nfsc_owner, 1032 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1033 ownp, mode, &lp, &op); 1034 if (!error) 1035 lhp = &op->nfso_lock; 1036 } 1037 if (!error && !recovery) 1038 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 1039 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1040 if (error) { 1041 if (!recovery) { 1042 nfscl_clrelease(clp); 1043 NFSUNLOCKCLSTATE(); 1044 } 1045 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1046 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1047 FREE((caddr_t)nlop, M_NFSCLLOCK); 1048 return (error); 1049 } 1050 1051 /* 1052 * Ok, see if a lockowner exists and create one, as required. 1053 */ 1054 if (lp == NULL) 1055 LIST_FOREACH(lp, lhp, nfsl_list) { 1056 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1057 break; 1058 } 1059 if (lp == NULL) { 1060 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1061 if (recovery) 1062 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1063 NFSV4CL_LOCKNAMELEN); 1064 else 1065 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1066 NFSV4CL_LOCKNAMELEN); 1067 nlp->nfsl_seqid = 0; 1068 nlp->nfsl_lockflags = flags; 1069 nlp->nfsl_inprog = NULL; 1070 nfscl_lockinit(&nlp->nfsl_rwlock); 1071 LIST_INIT(&nlp->nfsl_lock); 1072 if (donelocally) { 1073 nlp->nfsl_open = NULL; 1074 newnfsstats.cllocallockowners++; 1075 } else { 1076 nlp->nfsl_open = op; 1077 newnfsstats.cllockowners++; 1078 } 1079 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1080 lp = nlp; 1081 nlp = NULL; 1082 *newonep = 1; 1083 } 1084 1085 /* 1086 * Now, update the byte ranges for locks. 1087 */ 1088 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1089 if (!ret) 1090 donelocally = 1; 1091 if (donelocally) { 1092 *donelocallyp = 1; 1093 if (!recovery) 1094 nfscl_clrelease(clp); 1095 } else { 1096 /* 1097 * Serial modifications on the lock owner for multiple threads 1098 * for the same process using a read/write lock. 1099 */ 1100 if (!recovery) 1101 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1102 } 1103 if (!recovery) 1104 NFSUNLOCKCLSTATE(); 1105 1106 if (nlp) 1107 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1108 if (nlop) 1109 FREE((caddr_t)nlop, M_NFSCLLOCK); 1110 if (otherlop) 1111 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1112 1113 *lpp = lp; 1114 return (0); 1115 } 1116 1117 /* 1118 * Called to unlock a byte range, for LockU. 1119 */ 1120 APPLESTATIC int 1121 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1122 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1123 struct nfsclclient *clp, void *id, int flags, 1124 struct nfscllockowner **lpp, int *dorpcp) 1125 { 1126 struct nfscllockowner *lp; 1127 struct nfsclowner *owp; 1128 struct nfsclopen *op; 1129 struct nfscllock *nlop, *other_lop = NULL; 1130 struct nfscldeleg *dp; 1131 struct nfsnode *np; 1132 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1133 int ret = 0, fnd; 1134 1135 np = VTONFS(vp); 1136 *lpp = NULL; 1137 *dorpcp = 0; 1138 1139 /* 1140 * Might need these, so MALLOC them now, to 1141 * avoid a tsleep() in MALLOC later. 1142 */ 1143 MALLOC(nlop, struct nfscllock *, 1144 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1145 nlop->nfslo_type = F_UNLCK; 1146 nlop->nfslo_first = off; 1147 if (len == NFS64BITSSET) { 1148 nlop->nfslo_end = NFS64BITSSET; 1149 } else { 1150 nlop->nfslo_end = off + len; 1151 if (nlop->nfslo_end <= nlop->nfslo_first) { 1152 FREE((caddr_t)nlop, M_NFSCLLOCK); 1153 return (NFSERR_INVAL); 1154 } 1155 } 1156 if (callcnt == 0) { 1157 MALLOC(other_lop, struct nfscllock *, 1158 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1159 *other_lop = *nlop; 1160 } 1161 nfscl_filllockowner(id, own, flags); 1162 dp = NULL; 1163 NFSLOCKCLSTATE(); 1164 if (callcnt == 0) 1165 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1166 np->n_fhp->nfh_len); 1167 1168 /* 1169 * First, unlock any local regions on a delegation. 1170 */ 1171 if (dp != NULL) { 1172 /* Look for this lockowner. */ 1173 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1174 if (!NFSBCMP(lp->nfsl_owner, own, 1175 NFSV4CL_LOCKNAMELEN)) 1176 break; 1177 } 1178 if (lp != NULL) 1179 /* Use other_lop, so nlop is still available */ 1180 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1181 } 1182 1183 /* 1184 * Now, find a matching open/lockowner that hasn't already been done, 1185 * as marked by nfsl_inprog. 1186 */ 1187 lp = NULL; 1188 fnd = 0; 1189 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1190 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1191 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1192 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1193 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1194 if (lp->nfsl_inprog == NULL && 1195 !NFSBCMP(lp->nfsl_owner, own, 1196 NFSV4CL_LOCKNAMELEN)) { 1197 fnd = 1; 1198 break; 1199 } 1200 } 1201 if (fnd) 1202 break; 1203 } 1204 } 1205 if (fnd) 1206 break; 1207 } 1208 1209 if (lp != NULL) { 1210 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1211 if (ret) 1212 *dorpcp = 1; 1213 /* 1214 * Serial modifications on the lock owner for multiple 1215 * threads for the same process using a read/write lock. 1216 */ 1217 lp->nfsl_inprog = p; 1218 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1219 *lpp = lp; 1220 } 1221 NFSUNLOCKCLSTATE(); 1222 if (nlop) 1223 FREE((caddr_t)nlop, M_NFSCLLOCK); 1224 if (other_lop) 1225 FREE((caddr_t)other_lop, M_NFSCLLOCK); 1226 return (0); 1227 } 1228 1229 /* 1230 * Release all lockowners marked in progess for this process and file. 1231 */ 1232 APPLESTATIC void 1233 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, 1234 void *id, int flags) 1235 { 1236 struct nfsclowner *owp; 1237 struct nfsclopen *op; 1238 struct nfscllockowner *lp; 1239 struct nfsnode *np; 1240 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1241 1242 np = VTONFS(vp); 1243 nfscl_filllockowner(id, own, flags); 1244 NFSLOCKCLSTATE(); 1245 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1246 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1247 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1248 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1249 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1250 if (lp->nfsl_inprog == p && 1251 !NFSBCMP(lp->nfsl_owner, own, 1252 NFSV4CL_LOCKNAMELEN)) { 1253 lp->nfsl_inprog = NULL; 1254 nfscl_lockunlock(&lp->nfsl_rwlock); 1255 } 1256 } 1257 } 1258 } 1259 } 1260 nfscl_clrelease(clp); 1261 NFSUNLOCKCLSTATE(); 1262 } 1263 1264 /* 1265 * Called to find out if any bytes within the byte range specified are 1266 * write locked by the calling process. Used to determine if flushing 1267 * is required before a LockU. 1268 * If in doubt, return 1, so the flush will occur. 1269 */ 1270 APPLESTATIC int 1271 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1272 struct ucred *cred, NFSPROC_T *p, void *id, int flags) 1273 { 1274 struct nfsclowner *owp; 1275 struct nfscllockowner *lp; 1276 struct nfsclopen *op; 1277 struct nfsclclient *clp; 1278 struct nfscllock *lop; 1279 struct nfscldeleg *dp; 1280 struct nfsnode *np; 1281 u_int64_t off, end; 1282 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1283 int error = 0; 1284 1285 np = VTONFS(vp); 1286 switch (fl->l_whence) { 1287 case SEEK_SET: 1288 case SEEK_CUR: 1289 /* 1290 * Caller is responsible for adding any necessary offset 1291 * when SEEK_CUR is used. 1292 */ 1293 off = fl->l_start; 1294 break; 1295 case SEEK_END: 1296 off = np->n_size + fl->l_start; 1297 break; 1298 default: 1299 return (1); 1300 }; 1301 if (fl->l_len != 0) { 1302 end = off + fl->l_len; 1303 if (end < off) 1304 return (1); 1305 } else { 1306 end = NFS64BITSSET; 1307 } 1308 1309 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 1310 if (error) 1311 return (1); 1312 nfscl_filllockowner(id, own, flags); 1313 NFSLOCKCLSTATE(); 1314 1315 /* 1316 * First check the delegation locks. 1317 */ 1318 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1319 if (dp != NULL) { 1320 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1321 if (!NFSBCMP(lp->nfsl_owner, own, 1322 NFSV4CL_LOCKNAMELEN)) 1323 break; 1324 } 1325 if (lp != NULL) { 1326 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1327 if (lop->nfslo_first >= end) 1328 break; 1329 if (lop->nfslo_end <= off) 1330 continue; 1331 if (lop->nfslo_type == F_WRLCK) { 1332 nfscl_clrelease(clp); 1333 NFSUNLOCKCLSTATE(); 1334 return (1); 1335 } 1336 } 1337 } 1338 } 1339 1340 /* 1341 * Now, check state against the server. 1342 */ 1343 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1344 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1345 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1346 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1347 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1348 if (!NFSBCMP(lp->nfsl_owner, own, 1349 NFSV4CL_LOCKNAMELEN)) 1350 break; 1351 } 1352 if (lp != NULL) { 1353 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1354 if (lop->nfslo_first >= end) 1355 break; 1356 if (lop->nfslo_end <= off) 1357 continue; 1358 if (lop->nfslo_type == F_WRLCK) { 1359 nfscl_clrelease(clp); 1360 NFSUNLOCKCLSTATE(); 1361 return (1); 1362 } 1363 } 1364 } 1365 } 1366 } 1367 } 1368 nfscl_clrelease(clp); 1369 NFSUNLOCKCLSTATE(); 1370 return (0); 1371 } 1372 1373 /* 1374 * Release a byte range lock owner structure. 1375 */ 1376 APPLESTATIC void 1377 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1378 { 1379 struct nfsclclient *clp; 1380 1381 if (lp == NULL) 1382 return; 1383 NFSLOCKCLSTATE(); 1384 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1385 if (error != 0 && candelete && 1386 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1387 nfscl_freelockowner(lp, 0); 1388 else 1389 nfscl_lockunlock(&lp->nfsl_rwlock); 1390 nfscl_clrelease(clp); 1391 NFSUNLOCKCLSTATE(); 1392 } 1393 1394 /* 1395 * Free up an open structure and any associated byte range lock structures. 1396 */ 1397 APPLESTATIC void 1398 nfscl_freeopen(struct nfsclopen *op, int local) 1399 { 1400 1401 LIST_REMOVE(op, nfso_list); 1402 nfscl_freealllocks(&op->nfso_lock, local); 1403 FREE((caddr_t)op, M_NFSCLOPEN); 1404 if (local) 1405 newnfsstats.cllocalopens--; 1406 else 1407 newnfsstats.clopens--; 1408 } 1409 1410 /* 1411 * Free up all lock owners and associated locks. 1412 */ 1413 static void 1414 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1415 { 1416 struct nfscllockowner *lp, *nlp; 1417 1418 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1419 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1420 panic("nfscllckw"); 1421 nfscl_freelockowner(lp, local); 1422 } 1423 } 1424 1425 /* 1426 * Called for an Open when NFSERR_EXPIRED is received from the server. 1427 * If there are no byte range locks nor a Share Deny lost, try to do a 1428 * fresh Open. Otherwise, free the open. 1429 */ 1430 static int 1431 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1432 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1433 { 1434 struct nfscllockowner *lp; 1435 struct nfscldeleg *dp; 1436 int mustdelete = 0, error; 1437 1438 /* 1439 * Look for any byte range lock(s). 1440 */ 1441 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1442 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1443 mustdelete = 1; 1444 break; 1445 } 1446 } 1447 1448 /* 1449 * If no byte range lock(s) nor a Share deny, try to re-open. 1450 */ 1451 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1452 newnfs_copycred(&op->nfso_cred, cred); 1453 dp = NULL; 1454 error = nfsrpc_reopen(nmp, op->nfso_fh, 1455 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1456 if (error) { 1457 mustdelete = 1; 1458 if (dp != NULL) { 1459 FREE((caddr_t)dp, M_NFSCLDELEG); 1460 dp = NULL; 1461 } 1462 } 1463 if (dp != NULL) 1464 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1465 op->nfso_fhlen, cred, p, &dp); 1466 } 1467 1468 /* 1469 * If a byte range lock or Share deny or couldn't re-open, free it. 1470 */ 1471 if (mustdelete) 1472 nfscl_freeopen(op, 0); 1473 return (mustdelete); 1474 } 1475 1476 /* 1477 * Free up an open owner structure. 1478 */ 1479 static void 1480 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1481 { 1482 1483 LIST_REMOVE(owp, nfsow_list); 1484 FREE((caddr_t)owp, M_NFSCLOWNER); 1485 if (local) 1486 newnfsstats.cllocalopenowners--; 1487 else 1488 newnfsstats.clopenowners--; 1489 } 1490 1491 /* 1492 * Free up a byte range lock owner structure. 1493 */ 1494 APPLESTATIC void 1495 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1496 { 1497 struct nfscllock *lop, *nlop; 1498 1499 LIST_REMOVE(lp, nfsl_list); 1500 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1501 nfscl_freelock(lop, local); 1502 } 1503 FREE((caddr_t)lp, M_NFSCLLOCKOWNER); 1504 if (local) 1505 newnfsstats.cllocallockowners--; 1506 else 1507 newnfsstats.cllockowners--; 1508 } 1509 1510 /* 1511 * Free up a byte range lock structure. 1512 */ 1513 APPLESTATIC void 1514 nfscl_freelock(struct nfscllock *lop, int local) 1515 { 1516 1517 LIST_REMOVE(lop, nfslo_list); 1518 FREE((caddr_t)lop, M_NFSCLLOCK); 1519 if (local) 1520 newnfsstats.cllocallocks--; 1521 else 1522 newnfsstats.cllocks--; 1523 } 1524 1525 /* 1526 * Clean out the state related to a delegation. 1527 */ 1528 static void 1529 nfscl_cleandeleg(struct nfscldeleg *dp) 1530 { 1531 struct nfsclowner *owp, *nowp; 1532 struct nfsclopen *op; 1533 1534 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1535 op = LIST_FIRST(&owp->nfsow_open); 1536 if (op != NULL) { 1537 if (LIST_NEXT(op, nfso_list) != NULL) 1538 panic("nfscleandel"); 1539 nfscl_freeopen(op, 1); 1540 } 1541 nfscl_freeopenowner(owp, 1); 1542 } 1543 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1544 } 1545 1546 /* 1547 * Free a delegation. 1548 */ 1549 static void 1550 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) 1551 { 1552 1553 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1554 LIST_REMOVE(dp, nfsdl_hash); 1555 FREE((caddr_t)dp, M_NFSCLDELEG); 1556 newnfsstats.cldelegates--; 1557 nfscl_delegcnt--; 1558 } 1559 1560 /* 1561 * Free up all state related to this client structure. 1562 */ 1563 static void 1564 nfscl_cleanclient(struct nfsclclient *clp) 1565 { 1566 struct nfsclowner *owp, *nowp; 1567 struct nfsclopen *op, *nop; 1568 1569 /* Now, all the OpenOwners, etc. */ 1570 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1571 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1572 nfscl_freeopen(op, 0); 1573 } 1574 nfscl_freeopenowner(owp, 0); 1575 } 1576 } 1577 1578 /* 1579 * Called when an NFSERR_EXPIRED is received from the server. 1580 */ 1581 static void 1582 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1583 struct ucred *cred, NFSPROC_T *p) 1584 { 1585 struct nfsclowner *owp, *nowp, *towp; 1586 struct nfsclopen *op, *nop, *top; 1587 struct nfscldeleg *dp, *ndp; 1588 int ret, printed = 0; 1589 1590 /* 1591 * First, merge locally issued Opens into the list for the server. 1592 */ 1593 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1594 while (dp != NULL) { 1595 ndp = TAILQ_NEXT(dp, nfsdl_list); 1596 owp = LIST_FIRST(&dp->nfsdl_owner); 1597 while (owp != NULL) { 1598 nowp = LIST_NEXT(owp, nfsow_list); 1599 op = LIST_FIRST(&owp->nfsow_open); 1600 if (op != NULL) { 1601 if (LIST_NEXT(op, nfso_list) != NULL) 1602 panic("nfsclexp"); 1603 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1604 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1605 NFSV4CL_LOCKNAMELEN)) 1606 break; 1607 } 1608 if (towp != NULL) { 1609 /* Merge opens in */ 1610 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1611 if (top->nfso_fhlen == op->nfso_fhlen && 1612 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1613 op->nfso_fhlen)) { 1614 top->nfso_mode |= op->nfso_mode; 1615 top->nfso_opencnt += op->nfso_opencnt; 1616 break; 1617 } 1618 } 1619 if (top == NULL) { 1620 /* Just add the open to the owner list */ 1621 LIST_REMOVE(op, nfso_list); 1622 op->nfso_own = towp; 1623 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1624 newnfsstats.cllocalopens--; 1625 newnfsstats.clopens++; 1626 } 1627 } else { 1628 /* Just add the openowner to the client list */ 1629 LIST_REMOVE(owp, nfsow_list); 1630 owp->nfsow_clp = clp; 1631 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1632 newnfsstats.cllocalopenowners--; 1633 newnfsstats.clopenowners++; 1634 newnfsstats.cllocalopens--; 1635 newnfsstats.clopens++; 1636 } 1637 } 1638 owp = nowp; 1639 } 1640 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1641 printed = 1; 1642 printf("nfsv4 expired locks lost\n"); 1643 } 1644 nfscl_cleandeleg(dp); 1645 nfscl_freedeleg(&clp->nfsc_deleg, dp); 1646 dp = ndp; 1647 } 1648 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1649 panic("nfsclexp"); 1650 1651 /* 1652 * Now, try and reopen against the server. 1653 */ 1654 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1655 owp->nfsow_seqid = 0; 1656 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1657 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1658 if (ret && !printed) { 1659 printed = 1; 1660 printf("nfsv4 expired locks lost\n"); 1661 } 1662 } 1663 if (LIST_EMPTY(&owp->nfsow_open)) 1664 nfscl_freeopenowner(owp, 0); 1665 } 1666 } 1667 1668 /* 1669 * This function must be called after the process represented by "own" has 1670 * exited. Must be called with CLSTATE lock held. 1671 */ 1672 static void 1673 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1674 { 1675 struct nfsclowner *owp, *nowp; 1676 struct nfscllockowner *lp, *nlp; 1677 struct nfscldeleg *dp; 1678 1679 /* First, get rid of local locks on delegations. */ 1680 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1681 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1682 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1683 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1684 panic("nfscllckw"); 1685 nfscl_freelockowner(lp, 1); 1686 } 1687 } 1688 } 1689 owp = LIST_FIRST(&clp->nfsc_owner); 1690 while (owp != NULL) { 1691 nowp = LIST_NEXT(owp, nfsow_list); 1692 if (!NFSBCMP(owp->nfsow_owner, own, 1693 NFSV4CL_LOCKNAMELEN)) { 1694 /* 1695 * If there are children that haven't closed the 1696 * file descriptors yet, the opens will still be 1697 * here. For that case, let the renew thread clear 1698 * out the OpenOwner later. 1699 */ 1700 if (LIST_EMPTY(&owp->nfsow_open)) 1701 nfscl_freeopenowner(owp, 0); 1702 else 1703 owp->nfsow_defunct = 1; 1704 } 1705 owp = nowp; 1706 } 1707 } 1708 1709 /* 1710 * Find open/lock owners for processes that have exited. 1711 */ 1712 static void 1713 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) 1714 { 1715 struct nfsclowner *owp, *nowp; 1716 struct nfsclopen *op; 1717 struct nfscllockowner *lp, *nlp; 1718 1719 NFSPROCLISTLOCK(); 1720 NFSLOCKCLSTATE(); 1721 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1722 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1723 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { 1724 if (LIST_EMPTY(&lp->nfsl_lock)) 1725 nfscl_emptylockowner(lp, lhp); 1726 } 1727 } 1728 if (nfscl_procdoesntexist(owp->nfsow_owner)) 1729 nfscl_cleanup_common(clp, owp->nfsow_owner); 1730 } 1731 NFSUNLOCKCLSTATE(); 1732 NFSPROCLISTUNLOCK(); 1733 } 1734 1735 /* 1736 * Take the empty lock owner and move it to the local lhp list if the 1737 * associated process no longer exists. 1738 */ 1739 static void 1740 nfscl_emptylockowner(struct nfscllockowner *lp, 1741 struct nfscllockownerfhhead *lhp) 1742 { 1743 struct nfscllockownerfh *lfhp, *mylfhp; 1744 struct nfscllockowner *nlp; 1745 int fnd_it; 1746 1747 /* If not a Posix lock owner, just return. */ 1748 if ((lp->nfsl_lockflags & F_POSIX) == 0) 1749 return; 1750 1751 fnd_it = 0; 1752 mylfhp = NULL; 1753 /* 1754 * First, search to see if this lock owner is already in the list. 1755 * If it is, then the associated process no longer exists. 1756 */ 1757 SLIST_FOREACH(lfhp, lhp, nfslfh_list) { 1758 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && 1759 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, 1760 lfhp->nfslfh_len)) 1761 mylfhp = lfhp; 1762 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) 1763 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, 1764 NFSV4CL_LOCKNAMELEN)) 1765 fnd_it = 1; 1766 } 1767 /* If not found, check if process still exists. */ 1768 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) 1769 return; 1770 1771 /* Move the lock owner over to the local list. */ 1772 if (mylfhp == NULL) { 1773 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, 1774 M_NOWAIT); 1775 if (mylfhp == NULL) 1776 return; 1777 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; 1778 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, 1779 mylfhp->nfslfh_len); 1780 LIST_INIT(&mylfhp->nfslfh_lock); 1781 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); 1782 } 1783 LIST_REMOVE(lp, nfsl_list); 1784 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); 1785 } 1786 1787 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 1788 /* 1789 * Called from nfs umount to free up the clientid. 1790 */ 1791 APPLESTATIC void 1792 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) 1793 { 1794 struct nfsclclient *clp; 1795 struct ucred *cred; 1796 int igotlock; 1797 1798 /* 1799 * For the case that matters, this is the thread that set 1800 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 1801 * done to ensure that any thread executing nfscl_getcl() after 1802 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 1803 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 1804 * explanation, courtesy of Alan Cox. 1805 * What follows is a snippet from Alan Cox's email at: 1806 * http://docs.FreeBSD.org/cgi/ 1807 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 1808 * 1809 * 1. Set MNTK_UNMOUNTF 1810 * 2. Acquire a standard FreeBSD mutex "m". 1811 * 3. Update some data structures. 1812 * 4. Release mutex "m". 1813 * 1814 * Then, other threads that acquire "m" after step 4 has occurred will 1815 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 1816 * step 2 may or may not see MNTK_UNMOUNTF as set. 1817 */ 1818 NFSLOCKCLSTATE(); 1819 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 1820 fake_global++; 1821 NFSUNLOCKCLSTATE(); 1822 NFSLOCKCLSTATE(); 1823 } 1824 1825 clp = nmp->nm_clp; 1826 if (clp != NULL) { 1827 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 1828 panic("nfscl umount"); 1829 1830 /* 1831 * First, handshake with the nfscl renew thread, to terminate 1832 * it. 1833 */ 1834 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 1835 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 1836 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 1837 "nfsclumnt", hz); 1838 1839 /* 1840 * Now, get the exclusive lock on the client state, so 1841 * that no uses of the state are still in progress. 1842 */ 1843 do { 1844 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1845 NFSCLSTATEMUTEXPTR, NULL); 1846 } while (!igotlock); 1847 NFSUNLOCKCLSTATE(); 1848 1849 /* 1850 * Free up all the state. It will expire on the server, but 1851 * maybe we should do a SetClientId/SetClientIdConfirm so 1852 * the server throws it away? 1853 */ 1854 LIST_REMOVE(clp, nfsc_list); 1855 nfscl_delegreturnall(clp, p); 1856 cred = newnfs_getcred(); 1857 if (NFSHASNFSV4N(nmp)) { 1858 (void)nfsrpc_destroysession(nmp, clp, cred, p); 1859 (void)nfsrpc_destroyclient(nmp, clp, cred, p); 1860 } else 1861 (void)nfsrpc_setclient(nmp, clp, 0, cred, p); 1862 nfscl_cleanclient(clp); 1863 nmp->nm_clp = NULL; 1864 NFSFREECRED(cred); 1865 free(clp, M_NFSCLCLIENT); 1866 } else 1867 NFSUNLOCKCLSTATE(); 1868 } 1869 1870 /* 1871 * This function is called when a server replies with NFSERR_STALECLIENTID 1872 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, 1873 * doing Opens and Locks with reclaim. If these fail, it deletes the 1874 * corresponding state. 1875 */ 1876 static void 1877 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) 1878 { 1879 struct nfsclowner *owp, *nowp; 1880 struct nfsclopen *op, *nop; 1881 struct nfscllockowner *lp, *nlp; 1882 struct nfscllock *lop, *nlop; 1883 struct nfscldeleg *dp, *ndp, *tdp; 1884 struct nfsmount *nmp; 1885 struct ucred *tcred; 1886 struct nfsclopenhead extra_open; 1887 struct nfscldeleghead extra_deleg; 1888 struct nfsreq *rep; 1889 u_int64_t len; 1890 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 1891 int i, igotlock = 0, error, trycnt, firstlock, s; 1892 struct nfscllayout *lyp, *nlyp; 1893 1894 /* 1895 * First, lock the client structure, so everyone else will 1896 * block when trying to use state. 1897 */ 1898 NFSLOCKCLSTATE(); 1899 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 1900 do { 1901 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1902 NFSCLSTATEMUTEXPTR, NULL); 1903 } while (!igotlock); 1904 NFSUNLOCKCLSTATE(); 1905 1906 nmp = clp->nfsc_nmp; 1907 if (nmp == NULL) 1908 panic("nfscl recover"); 1909 1910 /* 1911 * For now, just get rid of all layouts. There may be a need 1912 * to do LayoutCommit Ops with reclaim == true later. 1913 */ 1914 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 1915 nfscl_freelayout(lyp); 1916 TAILQ_INIT(&clp->nfsc_layout); 1917 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 1918 LIST_INIT(&clp->nfsc_layouthash[i]); 1919 1920 trycnt = 5; 1921 do { 1922 error = nfsrpc_setclient(nmp, clp, 1, cred, p); 1923 } while ((error == NFSERR_STALECLIENTID || 1924 error == NFSERR_BADSESSION || 1925 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 1926 if (error) { 1927 nfscl_cleanclient(clp); 1928 NFSLOCKCLSTATE(); 1929 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 1930 NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG); 1931 wakeup(&clp->nfsc_flags); 1932 nfsv4_unlock(&clp->nfsc_lock, 0); 1933 NFSUNLOCKCLSTATE(); 1934 return; 1935 } 1936 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 1937 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 1938 1939 /* 1940 * Mark requests already queued on the server, so that they don't 1941 * initiate another recovery cycle. Any requests already in the 1942 * queue that handle state information will have the old stale 1943 * clientid/stateid and will get a NFSERR_STALESTATEID, 1944 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. 1945 * This will be translated to NFSERR_STALEDONTRECOVER when 1946 * R_DONTRECOVER is set. 1947 */ 1948 s = splsoftclock(); 1949 NFSLOCKREQ(); 1950 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 1951 if (rep->r_nmp == nmp) 1952 rep->r_flags |= R_DONTRECOVER; 1953 } 1954 NFSUNLOCKREQ(); 1955 splx(s); 1956 1957 /* 1958 * Now, mark all delegations "need reclaim". 1959 */ 1960 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 1961 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 1962 1963 TAILQ_INIT(&extra_deleg); 1964 LIST_INIT(&extra_open); 1965 /* 1966 * Now traverse the state lists, doing Open and Lock Reclaims. 1967 */ 1968 tcred = newnfs_getcred(); 1969 owp = LIST_FIRST(&clp->nfsc_owner); 1970 while (owp != NULL) { 1971 nowp = LIST_NEXT(owp, nfsow_list); 1972 owp->nfsow_seqid = 0; 1973 op = LIST_FIRST(&owp->nfsow_open); 1974 while (op != NULL) { 1975 nop = LIST_NEXT(op, nfso_list); 1976 if (error != NFSERR_NOGRACE) { 1977 /* Search for a delegation to reclaim with the open */ 1978 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1979 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 1980 continue; 1981 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 1982 mode = NFSV4OPEN_ACCESSWRITE; 1983 delegtype = NFSV4OPEN_DELEGATEWRITE; 1984 } else { 1985 mode = NFSV4OPEN_ACCESSREAD; 1986 delegtype = NFSV4OPEN_DELEGATEREAD; 1987 } 1988 if ((op->nfso_mode & mode) == mode && 1989 op->nfso_fhlen == dp->nfsdl_fhlen && 1990 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 1991 break; 1992 } 1993 ndp = dp; 1994 if (dp == NULL) 1995 delegtype = NFSV4OPEN_DELEGATENONE; 1996 newnfs_copycred(&op->nfso_cred, tcred); 1997 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 1998 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 1999 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 2000 tcred, p); 2001 if (!error) { 2002 /* Handle any replied delegation */ 2003 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 2004 || NFSMNT_RDONLY(nmp->nm_mountp))) { 2005 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 2006 mode = NFSV4OPEN_ACCESSWRITE; 2007 else 2008 mode = NFSV4OPEN_ACCESSREAD; 2009 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2010 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2011 continue; 2012 if ((op->nfso_mode & mode) == mode && 2013 op->nfso_fhlen == dp->nfsdl_fhlen && 2014 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 2015 op->nfso_fhlen)) { 2016 dp->nfsdl_stateid = ndp->nfsdl_stateid; 2017 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 2018 dp->nfsdl_ace = ndp->nfsdl_ace; 2019 dp->nfsdl_change = ndp->nfsdl_change; 2020 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2021 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 2022 dp->nfsdl_flags |= NFSCLDL_RECALL; 2023 FREE((caddr_t)ndp, M_NFSCLDELEG); 2024 ndp = NULL; 2025 break; 2026 } 2027 } 2028 } 2029 if (ndp != NULL) 2030 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 2031 2032 /* and reclaim all byte range locks */ 2033 lp = LIST_FIRST(&op->nfso_lock); 2034 while (lp != NULL) { 2035 nlp = LIST_NEXT(lp, nfsl_list); 2036 lp->nfsl_seqid = 0; 2037 firstlock = 1; 2038 lop = LIST_FIRST(&lp->nfsl_lock); 2039 while (lop != NULL) { 2040 nlop = LIST_NEXT(lop, nfslo_list); 2041 if (lop->nfslo_end == NFS64BITSSET) 2042 len = NFS64BITSSET; 2043 else 2044 len = lop->nfslo_end - lop->nfslo_first; 2045 if (error != NFSERR_NOGRACE) 2046 error = nfscl_trylock(nmp, NULL, 2047 op->nfso_fh, op->nfso_fhlen, lp, 2048 firstlock, 1, lop->nfslo_first, len, 2049 lop->nfslo_type, tcred, p); 2050 if (error != 0) 2051 nfscl_freelock(lop, 0); 2052 else 2053 firstlock = 0; 2054 lop = nlop; 2055 } 2056 /* If no locks, but a lockowner, just delete it. */ 2057 if (LIST_EMPTY(&lp->nfsl_lock)) 2058 nfscl_freelockowner(lp, 0); 2059 lp = nlp; 2060 } 2061 } else { 2062 nfscl_freeopen(op, 0); 2063 } 2064 } 2065 op = nop; 2066 } 2067 owp = nowp; 2068 } 2069 2070 /* 2071 * Now, try and get any delegations not yet reclaimed by cobbling 2072 * to-gether an appropriate open. 2073 */ 2074 nowp = NULL; 2075 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2076 while (dp != NULL) { 2077 ndp = TAILQ_NEXT(dp, nfsdl_list); 2078 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2079 if (nowp == NULL) { 2080 MALLOC(nowp, struct nfsclowner *, 2081 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2082 /* 2083 * Name must be as long an largest possible 2084 * NFSV4CL_LOCKNAMELEN. 12 for now. 2085 */ 2086 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2087 NFSV4CL_LOCKNAMELEN); 2088 LIST_INIT(&nowp->nfsow_open); 2089 nowp->nfsow_clp = clp; 2090 nowp->nfsow_seqid = 0; 2091 nowp->nfsow_defunct = 0; 2092 nfscl_lockinit(&nowp->nfsow_rwlock); 2093 } 2094 nop = NULL; 2095 if (error != NFSERR_NOGRACE) { 2096 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 2097 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2098 nop->nfso_own = nowp; 2099 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2100 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2101 delegtype = NFSV4OPEN_DELEGATEWRITE; 2102 } else { 2103 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2104 delegtype = NFSV4OPEN_DELEGATEREAD; 2105 } 2106 nop->nfso_opencnt = 0; 2107 nop->nfso_posixlock = 1; 2108 nop->nfso_fhlen = dp->nfsdl_fhlen; 2109 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2110 LIST_INIT(&nop->nfso_lock); 2111 nop->nfso_stateid.seqid = 0; 2112 nop->nfso_stateid.other[0] = 0; 2113 nop->nfso_stateid.other[1] = 0; 2114 nop->nfso_stateid.other[2] = 0; 2115 newnfs_copycred(&dp->nfsdl_cred, tcred); 2116 newnfs_copyincred(tcred, &nop->nfso_cred); 2117 tdp = NULL; 2118 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2119 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2120 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2121 delegtype, tcred, p); 2122 if (tdp != NULL) { 2123 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2124 mode = NFSV4OPEN_ACCESSWRITE; 2125 else 2126 mode = NFSV4OPEN_ACCESSREAD; 2127 if ((nop->nfso_mode & mode) == mode && 2128 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2129 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2130 nop->nfso_fhlen)) { 2131 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2132 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2133 dp->nfsdl_ace = tdp->nfsdl_ace; 2134 dp->nfsdl_change = tdp->nfsdl_change; 2135 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2136 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2137 dp->nfsdl_flags |= NFSCLDL_RECALL; 2138 FREE((caddr_t)tdp, M_NFSCLDELEG); 2139 } else { 2140 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2141 } 2142 } 2143 } 2144 if (error) { 2145 if (nop != NULL) 2146 FREE((caddr_t)nop, M_NFSCLOPEN); 2147 /* 2148 * Couldn't reclaim it, so throw the state 2149 * away. Ouch!! 2150 */ 2151 nfscl_cleandeleg(dp); 2152 nfscl_freedeleg(&clp->nfsc_deleg, dp); 2153 } else { 2154 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2155 } 2156 } 2157 dp = ndp; 2158 } 2159 2160 /* 2161 * Now, get rid of extra Opens and Delegations. 2162 */ 2163 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2164 do { 2165 newnfs_copycred(&op->nfso_cred, tcred); 2166 error = nfscl_tryclose(op, tcred, nmp, p); 2167 if (error == NFSERR_GRACE) 2168 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2169 } while (error == NFSERR_GRACE); 2170 LIST_REMOVE(op, nfso_list); 2171 FREE((caddr_t)op, M_NFSCLOPEN); 2172 } 2173 if (nowp != NULL) 2174 FREE((caddr_t)nowp, M_NFSCLOWNER); 2175 2176 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2177 do { 2178 newnfs_copycred(&dp->nfsdl_cred, tcred); 2179 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2180 if (error == NFSERR_GRACE) 2181 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2182 } while (error == NFSERR_GRACE); 2183 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2184 FREE((caddr_t)dp, M_NFSCLDELEG); 2185 } 2186 2187 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ 2188 if (NFSHASNFSV4N(nmp)) 2189 (void)nfsrpc_reclaimcomplete(nmp, cred, p); 2190 2191 NFSLOCKCLSTATE(); 2192 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2193 wakeup(&clp->nfsc_flags); 2194 nfsv4_unlock(&clp->nfsc_lock, 0); 2195 NFSUNLOCKCLSTATE(); 2196 NFSFREECRED(tcred); 2197 } 2198 2199 /* 2200 * This function is called when a server replies with NFSERR_EXPIRED. 2201 * It deletes all state for the client and does a fresh SetClientId/confirm. 2202 * XXX Someday it should post a signal to the process(es) that hold the 2203 * state, so they know that lock state has been lost. 2204 */ 2205 APPLESTATIC int 2206 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2207 { 2208 struct nfsmount *nmp; 2209 struct ucred *cred; 2210 int igotlock = 0, error, trycnt; 2211 2212 /* 2213 * If the clientid has gone away or a new SetClientid has already 2214 * been done, just return ok. 2215 */ 2216 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2217 return (0); 2218 2219 /* 2220 * First, lock the client structure, so everyone else will 2221 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2222 * that only one thread does the work. 2223 */ 2224 NFSLOCKCLSTATE(); 2225 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2226 do { 2227 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2228 NFSCLSTATEMUTEXPTR, NULL); 2229 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2230 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2231 if (igotlock) 2232 nfsv4_unlock(&clp->nfsc_lock, 0); 2233 NFSUNLOCKCLSTATE(); 2234 return (0); 2235 } 2236 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2237 NFSUNLOCKCLSTATE(); 2238 2239 nmp = clp->nfsc_nmp; 2240 if (nmp == NULL) 2241 panic("nfscl expired"); 2242 cred = newnfs_getcred(); 2243 trycnt = 5; 2244 do { 2245 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 2246 } while ((error == NFSERR_STALECLIENTID || 2247 error == NFSERR_BADSESSION || 2248 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2249 if (error) { 2250 /* 2251 * Clear out any state. 2252 */ 2253 nfscl_cleanclient(clp); 2254 NFSLOCKCLSTATE(); 2255 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 2256 NFSCLFLAGS_RECOVER); 2257 } else { 2258 /* 2259 * Expire the state for the client. 2260 */ 2261 nfscl_expireclient(clp, nmp, cred, p); 2262 NFSLOCKCLSTATE(); 2263 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2264 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2265 } 2266 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2267 wakeup(&clp->nfsc_flags); 2268 nfsv4_unlock(&clp->nfsc_lock, 0); 2269 NFSUNLOCKCLSTATE(); 2270 NFSFREECRED(cred); 2271 return (error); 2272 } 2273 2274 /* 2275 * This function inserts a lock in the list after insert_lop. 2276 */ 2277 static void 2278 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2279 struct nfscllock *insert_lop, int local) 2280 { 2281 2282 if ((struct nfscllockowner *)insert_lop == lp) 2283 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2284 else 2285 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2286 if (local) 2287 newnfsstats.cllocallocks++; 2288 else 2289 newnfsstats.cllocks++; 2290 } 2291 2292 /* 2293 * This function updates the locking for a lock owner and given file. It 2294 * maintains a list of lock ranges ordered on increasing file offset that 2295 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2296 * It always adds new_lop to the list and sometimes uses the one pointed 2297 * at by other_lopp. 2298 * Returns 1 if the locks were modified, 0 otherwise. 2299 */ 2300 static int 2301 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2302 struct nfscllock **other_lopp, int local) 2303 { 2304 struct nfscllock *new_lop = *new_lopp; 2305 struct nfscllock *lop, *tlop, *ilop; 2306 struct nfscllock *other_lop; 2307 int unlock = 0, modified = 0; 2308 u_int64_t tmp; 2309 2310 /* 2311 * Work down the list until the lock is merged. 2312 */ 2313 if (new_lop->nfslo_type == F_UNLCK) 2314 unlock = 1; 2315 ilop = (struct nfscllock *)lp; 2316 lop = LIST_FIRST(&lp->nfsl_lock); 2317 while (lop != NULL) { 2318 /* 2319 * Only check locks for this file that aren't before the start of 2320 * new lock's range. 2321 */ 2322 if (lop->nfslo_end >= new_lop->nfslo_first) { 2323 if (new_lop->nfslo_end < lop->nfslo_first) { 2324 /* 2325 * If the new lock ends before the start of the 2326 * current lock's range, no merge, just insert 2327 * the new lock. 2328 */ 2329 break; 2330 } 2331 if (new_lop->nfslo_type == lop->nfslo_type || 2332 (new_lop->nfslo_first <= lop->nfslo_first && 2333 new_lop->nfslo_end >= lop->nfslo_end)) { 2334 /* 2335 * This lock can be absorbed by the new lock/unlock. 2336 * This happens when it covers the entire range 2337 * of the old lock or is contiguous 2338 * with the old lock and is of the same type or an 2339 * unlock. 2340 */ 2341 if (new_lop->nfslo_type != lop->nfslo_type || 2342 new_lop->nfslo_first != lop->nfslo_first || 2343 new_lop->nfslo_end != lop->nfslo_end) 2344 modified = 1; 2345 if (lop->nfslo_first < new_lop->nfslo_first) 2346 new_lop->nfslo_first = lop->nfslo_first; 2347 if (lop->nfslo_end > new_lop->nfslo_end) 2348 new_lop->nfslo_end = lop->nfslo_end; 2349 tlop = lop; 2350 lop = LIST_NEXT(lop, nfslo_list); 2351 nfscl_freelock(tlop, local); 2352 continue; 2353 } 2354 2355 /* 2356 * All these cases are for contiguous locks that are not the 2357 * same type, so they can't be merged. 2358 */ 2359 if (new_lop->nfslo_first <= lop->nfslo_first) { 2360 /* 2361 * This case is where the new lock overlaps with the 2362 * first part of the old lock. Move the start of the 2363 * old lock to just past the end of the new lock. The 2364 * new lock will be inserted in front of the old, since 2365 * ilop hasn't been updated. (We are done now.) 2366 */ 2367 if (lop->nfslo_first != new_lop->nfslo_end) { 2368 lop->nfslo_first = new_lop->nfslo_end; 2369 modified = 1; 2370 } 2371 break; 2372 } 2373 if (new_lop->nfslo_end >= lop->nfslo_end) { 2374 /* 2375 * This case is where the new lock overlaps with the 2376 * end of the old lock's range. Move the old lock's 2377 * end to just before the new lock's first and insert 2378 * the new lock after the old lock. 2379 * Might not be done yet, since the new lock could 2380 * overlap further locks with higher ranges. 2381 */ 2382 if (lop->nfslo_end != new_lop->nfslo_first) { 2383 lop->nfslo_end = new_lop->nfslo_first; 2384 modified = 1; 2385 } 2386 ilop = lop; 2387 lop = LIST_NEXT(lop, nfslo_list); 2388 continue; 2389 } 2390 /* 2391 * The final case is where the new lock's range is in the 2392 * middle of the current lock's and splits the current lock 2393 * up. Use *other_lopp to handle the second part of the 2394 * split old lock range. (We are done now.) 2395 * For unlock, we use new_lop as other_lop and tmp, since 2396 * other_lop and new_lop are the same for this case. 2397 * We noted the unlock case above, so we don't need 2398 * new_lop->nfslo_type any longer. 2399 */ 2400 tmp = new_lop->nfslo_first; 2401 if (unlock) { 2402 other_lop = new_lop; 2403 *new_lopp = NULL; 2404 } else { 2405 other_lop = *other_lopp; 2406 *other_lopp = NULL; 2407 } 2408 other_lop->nfslo_first = new_lop->nfslo_end; 2409 other_lop->nfslo_end = lop->nfslo_end; 2410 other_lop->nfslo_type = lop->nfslo_type; 2411 lop->nfslo_end = tmp; 2412 nfscl_insertlock(lp, other_lop, lop, local); 2413 ilop = lop; 2414 modified = 1; 2415 break; 2416 } 2417 ilop = lop; 2418 lop = LIST_NEXT(lop, nfslo_list); 2419 if (lop == NULL) 2420 break; 2421 } 2422 2423 /* 2424 * Insert the new lock in the list at the appropriate place. 2425 */ 2426 if (!unlock) { 2427 nfscl_insertlock(lp, new_lop, ilop, local); 2428 *new_lopp = NULL; 2429 modified = 1; 2430 } 2431 return (modified); 2432 } 2433 2434 /* 2435 * This function must be run as a kernel thread. 2436 * It does Renew Ops and recovery, when required. 2437 */ 2438 APPLESTATIC void 2439 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2440 { 2441 struct nfsclowner *owp, *nowp; 2442 struct nfsclopen *op; 2443 struct nfscllockowner *lp, *nlp; 2444 struct nfscldeleghead dh; 2445 struct nfscldeleg *dp, *ndp; 2446 struct ucred *cred; 2447 u_int32_t clidrev; 2448 int error, cbpathdown, islept, igotlock, ret, clearok; 2449 uint32_t recover_done_time = 0; 2450 struct timespec mytime; 2451 static time_t prevsec = 0; 2452 struct nfscllockownerfh *lfhp, *nlfhp; 2453 struct nfscllockownerfhhead lfh; 2454 struct nfscllayout *lyp, *nlyp; 2455 struct nfscldevinfo *dip, *ndip; 2456 struct nfscllayouthead rlh; 2457 struct nfsclrecalllayout *recallp; 2458 struct nfsclds *dsp; 2459 2460 cred = newnfs_getcred(); 2461 NFSLOCKCLSTATE(); 2462 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2463 NFSUNLOCKCLSTATE(); 2464 for(;;) { 2465 newnfs_setroot(cred); 2466 cbpathdown = 0; 2467 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2468 /* 2469 * Only allow one recover within 1/2 of the lease 2470 * duration (nfsc_renew). 2471 */ 2472 if (recover_done_time < NFSD_MONOSEC) { 2473 recover_done_time = NFSD_MONOSEC + 2474 clp->nfsc_renew; 2475 nfscl_recover(clp, cred, p); 2476 } else { 2477 NFSLOCKCLSTATE(); 2478 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2479 NFSUNLOCKCLSTATE(); 2480 } 2481 } 2482 if (clp->nfsc_expire <= NFSD_MONOSEC && 2483 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2484 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2485 clidrev = clp->nfsc_clientidrev; 2486 error = nfsrpc_renew(clp, 2487 TAILQ_FIRST(&clp->nfsc_nmp->nm_sess), cred, p); 2488 if (error == NFSERR_CBPATHDOWN) 2489 cbpathdown = 1; 2490 else if (error == NFSERR_STALECLIENTID || 2491 error == NFSERR_BADSESSION) { 2492 NFSLOCKCLSTATE(); 2493 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2494 NFSUNLOCKCLSTATE(); 2495 } else if (error == NFSERR_EXPIRED) 2496 (void) nfscl_hasexpired(clp, clidrev, p); 2497 } 2498 2499 /* Do renews for any DS sessions. */ 2500 checkdsrenew: 2501 NFSLOCKMNT(clp->nfsc_nmp); 2502 /* Skip first entry, since the MDS is handled above. */ 2503 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); 2504 if (dsp != NULL) 2505 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2506 while (dsp != NULL) { 2507 if (dsp->nfsclds_expire <= NFSD_MONOSEC) { 2508 dsp->nfsclds_expire = NFSD_MONOSEC + 2509 clp->nfsc_renew; 2510 NFSUNLOCKMNT(clp->nfsc_nmp); 2511 (void)nfsrpc_renew(clp, dsp, cred, p); 2512 goto checkdsrenew; 2513 } 2514 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2515 } 2516 NFSUNLOCKMNT(clp->nfsc_nmp); 2517 2518 TAILQ_INIT(&dh); 2519 NFSLOCKCLSTATE(); 2520 if (cbpathdown) 2521 /* It's a Total Recall! */ 2522 nfscl_totalrecall(clp); 2523 2524 /* 2525 * Now, handle defunct owners. 2526 */ 2527 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 2528 if (LIST_EMPTY(&owp->nfsow_open)) { 2529 if (owp->nfsow_defunct != 0) 2530 nfscl_freeopenowner(owp, 0); 2531 } 2532 } 2533 2534 /* 2535 * Do the recall on any delegations. To avoid trouble, always 2536 * come back up here after having slept. 2537 */ 2538 igotlock = 0; 2539 tryagain: 2540 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2541 while (dp != NULL) { 2542 ndp = TAILQ_NEXT(dp, nfsdl_list); 2543 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2544 /* 2545 * Wait for outstanding I/O ops to be done. 2546 */ 2547 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2548 if (igotlock) { 2549 nfsv4_unlock(&clp->nfsc_lock, 0); 2550 igotlock = 0; 2551 } 2552 dp->nfsdl_rwlock.nfslock_lock |= 2553 NFSV4LOCK_WANTED; 2554 (void) nfsmsleep(&dp->nfsdl_rwlock, 2555 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", 2556 NULL); 2557 goto tryagain; 2558 } 2559 while (!igotlock) { 2560 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2561 &islept, NFSCLSTATEMUTEXPTR, NULL); 2562 if (islept) 2563 goto tryagain; 2564 } 2565 NFSUNLOCKCLSTATE(); 2566 newnfs_copycred(&dp->nfsdl_cred, cred); 2567 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2568 NULL, cred, p, 1); 2569 if (!ret) { 2570 nfscl_cleandeleg(dp); 2571 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2572 nfsdl_list); 2573 LIST_REMOVE(dp, nfsdl_hash); 2574 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2575 nfscl_delegcnt--; 2576 newnfsstats.cldelegates--; 2577 } 2578 NFSLOCKCLSTATE(); 2579 } 2580 dp = ndp; 2581 } 2582 2583 /* 2584 * Clear out old delegations, if we are above the high water 2585 * mark. Only clear out ones with no state related to them. 2586 * The tailq list is in LRU order. 2587 */ 2588 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2589 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2590 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2591 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2592 dp->nfsdl_rwlock.nfslock_lock == 0 && 2593 dp->nfsdl_timestamp < NFSD_MONOSEC && 2594 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2595 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2596 clearok = 1; 2597 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2598 op = LIST_FIRST(&owp->nfsow_open); 2599 if (op != NULL) { 2600 clearok = 0; 2601 break; 2602 } 2603 } 2604 if (clearok) { 2605 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2606 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2607 clearok = 0; 2608 break; 2609 } 2610 } 2611 } 2612 if (clearok) { 2613 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2614 LIST_REMOVE(dp, nfsdl_hash); 2615 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2616 nfscl_delegcnt--; 2617 newnfsstats.cldelegates--; 2618 } 2619 } 2620 dp = ndp; 2621 } 2622 if (igotlock) 2623 nfsv4_unlock(&clp->nfsc_lock, 0); 2624 2625 /* 2626 * Do the recall on any layouts. To avoid trouble, always 2627 * come back up here after having slept. 2628 */ 2629 TAILQ_INIT(&rlh); 2630 tryagain2: 2631 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { 2632 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { 2633 /* 2634 * Wait for outstanding I/O ops to be done. 2635 */ 2636 if (lyp->nfsly_lock.nfslock_usecnt > 0 || 2637 (lyp->nfsly_lock.nfslock_lock & 2638 NFSV4LOCK_LOCK) != 0) { 2639 lyp->nfsly_lock.nfslock_lock |= 2640 NFSV4LOCK_WANTED; 2641 (void)nfsmsleep(&lyp->nfsly_lock, 2642 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp", 2643 NULL); 2644 goto tryagain2; 2645 } 2646 /* Move the layout to the recall list. */ 2647 TAILQ_REMOVE(&clp->nfsc_layout, lyp, 2648 nfsly_list); 2649 LIST_REMOVE(lyp, nfsly_hash); 2650 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); 2651 2652 /* Handle any layout commits. */ 2653 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && 2654 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 2655 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 2656 NFSUNLOCKCLSTATE(); 2657 NFSCL_DEBUG(3, "do layoutcommit\n"); 2658 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, 2659 cred, p); 2660 NFSLOCKCLSTATE(); 2661 goto tryagain2; 2662 } 2663 } 2664 } 2665 2666 /* Now, look for stale layouts. */ 2667 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); 2668 while (lyp != NULL) { 2669 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); 2670 if (lyp->nfsly_timestamp < NFSD_MONOSEC && 2671 (lyp->nfsly_flags & NFSLY_RECALL) == 0 && 2672 lyp->nfsly_lock.nfslock_usecnt == 0 && 2673 lyp->nfsly_lock.nfslock_lock == 0) { 2674 NFSCL_DEBUG(4, "ret stale lay=%d\n", 2675 nfscl_layoutcnt); 2676 recallp = malloc(sizeof(*recallp), 2677 M_NFSLAYRECALL, M_NOWAIT); 2678 if (recallp == NULL) 2679 break; 2680 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, 2681 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, 2682 lyp->nfsly_stateid.seqid, recallp); 2683 } 2684 lyp = nlyp; 2685 } 2686 2687 /* 2688 * Free up any unreferenced device info structures. 2689 */ 2690 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { 2691 if (dip->nfsdi_layoutrefs == 0 && 2692 dip->nfsdi_refcnt == 0) { 2693 NFSCL_DEBUG(4, "freeing devinfo\n"); 2694 LIST_REMOVE(dip, nfsdi_list); 2695 nfscl_freedevinfo(dip); 2696 } 2697 } 2698 NFSUNLOCKCLSTATE(); 2699 2700 /* Do layout return(s), as required. */ 2701 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { 2702 TAILQ_REMOVE(&rlh, lyp, nfsly_list); 2703 NFSCL_DEBUG(4, "ret layout\n"); 2704 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); 2705 nfscl_freelayout(lyp); 2706 } 2707 2708 /* 2709 * Delegreturn any delegations cleaned out or recalled. 2710 */ 2711 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 2712 newnfs_copycred(&dp->nfsdl_cred, cred); 2713 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 2714 TAILQ_REMOVE(&dh, dp, nfsdl_list); 2715 FREE((caddr_t)dp, M_NFSCLDELEG); 2716 } 2717 2718 SLIST_INIT(&lfh); 2719 /* 2720 * Call nfscl_cleanupkext() once per second to check for 2721 * open/lock owners where the process has exited. 2722 */ 2723 NFSGETNANOTIME(&mytime); 2724 if (prevsec != mytime.tv_sec) { 2725 prevsec = mytime.tv_sec; 2726 nfscl_cleanupkext(clp, &lfh); 2727 } 2728 2729 /* 2730 * Do a ReleaseLockOwner for all lock owners where the 2731 * associated process no longer exists, as found by 2732 * nfscl_cleanupkext(). 2733 */ 2734 newnfs_setroot(cred); 2735 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { 2736 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, 2737 nlp) { 2738 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, 2739 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, 2740 p); 2741 nfscl_freelockowner(lp, 0); 2742 } 2743 free(lfhp, M_TEMP); 2744 } 2745 SLIST_INIT(&lfh); 2746 2747 NFSLOCKCLSTATE(); 2748 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 2749 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 2750 hz); 2751 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 2752 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 2753 NFSUNLOCKCLSTATE(); 2754 NFSFREECRED(cred); 2755 wakeup((caddr_t)clp); 2756 return; 2757 } 2758 NFSUNLOCKCLSTATE(); 2759 } 2760 } 2761 2762 /* 2763 * Initiate state recovery. Called when NFSERR_STALECLIENTID, 2764 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. 2765 */ 2766 APPLESTATIC void 2767 nfscl_initiate_recovery(struct nfsclclient *clp) 2768 { 2769 2770 if (clp == NULL) 2771 return; 2772 NFSLOCKCLSTATE(); 2773 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2774 NFSUNLOCKCLSTATE(); 2775 wakeup((caddr_t)clp); 2776 } 2777 2778 /* 2779 * Dump out the state stuff for debugging. 2780 */ 2781 APPLESTATIC void 2782 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 2783 int lockowner, int locks) 2784 { 2785 struct nfsclclient *clp; 2786 struct nfsclowner *owp; 2787 struct nfsclopen *op; 2788 struct nfscllockowner *lp; 2789 struct nfscllock *lop; 2790 struct nfscldeleg *dp; 2791 2792 clp = nmp->nm_clp; 2793 if (clp == NULL) { 2794 printf("nfscl dumpstate NULL clp\n"); 2795 return; 2796 } 2797 NFSLOCKCLSTATE(); 2798 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2799 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2800 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2801 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2802 owp->nfsow_owner[0], owp->nfsow_owner[1], 2803 owp->nfsow_owner[2], owp->nfsow_owner[3], 2804 owp->nfsow_seqid); 2805 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2806 if (opens) 2807 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2808 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2809 op->nfso_stateid.other[2], op->nfso_opencnt, 2810 op->nfso_fh[12]); 2811 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2812 if (lockowner) 2813 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2814 lp->nfsl_owner[0], lp->nfsl_owner[1], 2815 lp->nfsl_owner[2], lp->nfsl_owner[3], 2816 lp->nfsl_seqid, 2817 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2818 lp->nfsl_stateid.other[2]); 2819 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2820 if (locks) 2821 #ifdef __FreeBSD__ 2822 printf("lck typ=%d fst=%ju end=%ju\n", 2823 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2824 (intmax_t)lop->nfslo_end); 2825 #else 2826 printf("lck typ=%d fst=%qd end=%qd\n", 2827 lop->nfslo_type, lop->nfslo_first, 2828 lop->nfslo_end); 2829 #endif 2830 } 2831 } 2832 } 2833 } 2834 } 2835 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2836 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2837 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2838 owp->nfsow_owner[0], owp->nfsow_owner[1], 2839 owp->nfsow_owner[2], owp->nfsow_owner[3], 2840 owp->nfsow_seqid); 2841 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2842 if (opens) 2843 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2844 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2845 op->nfso_stateid.other[2], op->nfso_opencnt, 2846 op->nfso_fh[12]); 2847 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2848 if (lockowner) 2849 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2850 lp->nfsl_owner[0], lp->nfsl_owner[1], 2851 lp->nfsl_owner[2], lp->nfsl_owner[3], 2852 lp->nfsl_seqid, 2853 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2854 lp->nfsl_stateid.other[2]); 2855 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2856 if (locks) 2857 #ifdef __FreeBSD__ 2858 printf("lck typ=%d fst=%ju end=%ju\n", 2859 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2860 (intmax_t)lop->nfslo_end); 2861 #else 2862 printf("lck typ=%d fst=%qd end=%qd\n", 2863 lop->nfslo_type, lop->nfslo_first, 2864 lop->nfslo_end); 2865 #endif 2866 } 2867 } 2868 } 2869 } 2870 NFSUNLOCKCLSTATE(); 2871 } 2872 2873 /* 2874 * Check for duplicate open owners and opens. 2875 * (Only used as a diagnostic aid.) 2876 */ 2877 APPLESTATIC void 2878 nfscl_dupopen(vnode_t vp, int dupopens) 2879 { 2880 struct nfsclclient *clp; 2881 struct nfsclowner *owp, *owp2; 2882 struct nfsclopen *op, *op2; 2883 struct nfsfh *nfhp; 2884 2885 clp = VFSTONFS(vnode_mount(vp))->nm_clp; 2886 if (clp == NULL) { 2887 printf("nfscl dupopen NULL clp\n"); 2888 return; 2889 } 2890 nfhp = VTONFS(vp)->n_fhp; 2891 NFSLOCKCLSTATE(); 2892 2893 /* 2894 * First, search for duplicate owners. 2895 * These should never happen! 2896 */ 2897 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2898 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2899 if (owp != owp2 && 2900 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 2901 NFSV4CL_LOCKNAMELEN)) { 2902 NFSUNLOCKCLSTATE(); 2903 printf("DUP OWNER\n"); 2904 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); 2905 return; 2906 } 2907 } 2908 } 2909 2910 /* 2911 * Now, search for duplicate stateids. 2912 * These shouldn't happen, either. 2913 */ 2914 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2915 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2916 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2917 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2918 if (op != op2 && 2919 (op->nfso_stateid.other[0] != 0 || 2920 op->nfso_stateid.other[1] != 0 || 2921 op->nfso_stateid.other[2] != 0) && 2922 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 2923 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 2924 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 2925 NFSUNLOCKCLSTATE(); 2926 printf("DUP STATEID\n"); 2927 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 2928 0); 2929 return; 2930 } 2931 } 2932 } 2933 } 2934 } 2935 2936 /* 2937 * Now search for duplicate opens. 2938 * Duplicate opens for the same owner 2939 * should never occur. Other duplicates are 2940 * possible and are checked for if "dupopens" 2941 * is true. 2942 */ 2943 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2944 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2945 if (nfhp->nfh_len == op2->nfso_fhlen && 2946 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 2947 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2948 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2949 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 2950 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 2951 (!NFSBCMP(op->nfso_own->nfsow_owner, 2952 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 2953 dupopens)) { 2954 if (!NFSBCMP(op->nfso_own->nfsow_owner, 2955 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 2956 NFSUNLOCKCLSTATE(); 2957 printf("BADDUP OPEN\n"); 2958 } else { 2959 NFSUNLOCKCLSTATE(); 2960 printf("DUP OPEN\n"); 2961 } 2962 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 2963 0, 0); 2964 return; 2965 } 2966 } 2967 } 2968 } 2969 } 2970 } 2971 NFSUNLOCKCLSTATE(); 2972 } 2973 2974 /* 2975 * During close, find an open that needs to be dereferenced and 2976 * dereference it. If there are no more opens for this file, 2977 * log a message to that effect. 2978 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 2979 * on the file's vnode. 2980 * This is the safe way, since it is difficult to identify 2981 * which open the close is for and I/O can be performed after the 2982 * close(2) system call when a file is mmap'd. 2983 * If it returns 0 for success, there will be a referenced 2984 * clp returned via clpp. 2985 */ 2986 APPLESTATIC int 2987 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 2988 { 2989 struct nfsclclient *clp; 2990 struct nfsclowner *owp; 2991 struct nfsclopen *op; 2992 struct nfscldeleg *dp; 2993 struct nfsfh *nfhp; 2994 int error, notdecr; 2995 2996 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 2997 if (error) 2998 return (error); 2999 *clpp = clp; 3000 3001 nfhp = VTONFS(vp)->n_fhp; 3002 notdecr = 1; 3003 NFSLOCKCLSTATE(); 3004 /* 3005 * First, look for one under a delegation that was locally issued 3006 * and just decrement the opencnt for it. Since all my Opens against 3007 * the server are DENY_NONE, I don't see a problem with hanging 3008 * onto them. (It is much easier to use one of the extant Opens 3009 * that I already have on the server when a Delegation is recalled 3010 * than to do fresh Opens.) Someday, I might need to rethink this, but. 3011 */ 3012 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3013 if (dp != NULL) { 3014 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3015 op = LIST_FIRST(&owp->nfsow_open); 3016 if (op != NULL) { 3017 /* 3018 * Since a delegation is for a file, there 3019 * should never be more than one open for 3020 * each openowner. 3021 */ 3022 if (LIST_NEXT(op, nfso_list) != NULL) 3023 panic("nfscdeleg opens"); 3024 if (notdecr && op->nfso_opencnt > 0) { 3025 notdecr = 0; 3026 op->nfso_opencnt--; 3027 break; 3028 } 3029 } 3030 } 3031 } 3032 3033 /* Now process the opens against the server. */ 3034 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3035 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3036 if (op->nfso_fhlen == nfhp->nfh_len && 3037 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3038 nfhp->nfh_len)) { 3039 /* Found an open, decrement cnt if possible */ 3040 if (notdecr && op->nfso_opencnt > 0) { 3041 notdecr = 0; 3042 op->nfso_opencnt--; 3043 } 3044 /* 3045 * There are more opens, so just return. 3046 */ 3047 if (op->nfso_opencnt > 0) { 3048 NFSUNLOCKCLSTATE(); 3049 return (0); 3050 } 3051 } 3052 } 3053 } 3054 NFSUNLOCKCLSTATE(); 3055 if (notdecr) 3056 printf("nfscl: never fnd open\n"); 3057 return (0); 3058 } 3059 3060 APPLESTATIC int 3061 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 3062 { 3063 struct nfsclclient *clp; 3064 struct nfsclowner *owp, *nowp; 3065 struct nfsclopen *op; 3066 struct nfscldeleg *dp; 3067 struct nfsfh *nfhp; 3068 int error; 3069 3070 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 3071 if (error) 3072 return (error); 3073 *clpp = clp; 3074 3075 nfhp = VTONFS(vp)->n_fhp; 3076 NFSLOCKCLSTATE(); 3077 /* 3078 * First get rid of the local Open structures, which should be no 3079 * longer in use. 3080 */ 3081 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3082 if (dp != NULL) { 3083 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 3084 op = LIST_FIRST(&owp->nfsow_open); 3085 if (op != NULL) { 3086 KASSERT((op->nfso_opencnt == 0), 3087 ("nfscl: bad open cnt on deleg")); 3088 nfscl_freeopen(op, 1); 3089 } 3090 nfscl_freeopenowner(owp, 1); 3091 } 3092 } 3093 3094 /* Return any layouts marked return on close. */ 3095 nfscl_retoncloselayout(clp, nfhp->nfh_fh, nfhp->nfh_len); 3096 3097 /* Now process the opens against the server. */ 3098 lookformore: 3099 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3100 op = LIST_FIRST(&owp->nfsow_open); 3101 while (op != NULL) { 3102 if (op->nfso_fhlen == nfhp->nfh_len && 3103 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3104 nfhp->nfh_len)) { 3105 /* Found an open, close it. */ 3106 KASSERT((op->nfso_opencnt == 0), 3107 ("nfscl: bad open cnt on server")); 3108 NFSUNLOCKCLSTATE(); 3109 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op, 3110 p); 3111 NFSLOCKCLSTATE(); 3112 goto lookformore; 3113 } 3114 op = LIST_NEXT(op, nfso_list); 3115 } 3116 } 3117 NFSUNLOCKCLSTATE(); 3118 return (0); 3119 } 3120 3121 /* 3122 * Return all delegations on this client. 3123 * (Must be called with client sleep lock.) 3124 */ 3125 static void 3126 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) 3127 { 3128 struct nfscldeleg *dp, *ndp; 3129 struct ucred *cred; 3130 3131 cred = newnfs_getcred(); 3132 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3133 nfscl_cleandeleg(dp); 3134 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3135 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3136 } 3137 NFSFREECRED(cred); 3138 } 3139 3140 /* 3141 * Do a callback RPC. 3142 */ 3143 APPLESTATIC void 3144 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3145 { 3146 int clist, gotseq_ok, i, j, k, op, rcalls; 3147 u_int32_t *tl; 3148 struct nfsclclient *clp; 3149 struct nfscldeleg *dp = NULL; 3150 int numops, taglen = -1, error = 0, trunc; 3151 u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident; 3152 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3153 vnode_t vp = NULL; 3154 struct nfsnode *np; 3155 struct vattr va; 3156 struct nfsfh *nfhp; 3157 mount_t mp; 3158 nfsattrbit_t attrbits, rattrbits; 3159 nfsv4stateid_t stateid; 3160 uint32_t seqid, slotid = 0, highslot, cachethis; 3161 uint8_t sessionid[NFSX_V4SESSIONID]; 3162 struct mbuf *rep; 3163 struct nfscllayout *lyp; 3164 uint64_t filesid[2], len, off; 3165 int changed, gotone, laytype, recalltype; 3166 uint32_t iomode; 3167 struct nfsclrecalllayout *recallp = NULL; 3168 3169 gotseq_ok = 0; 3170 nfsrvd_rephead(nd); 3171 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3172 taglen = fxdr_unsigned(int, *tl); 3173 if (taglen < 0) { 3174 error = EBADRPC; 3175 goto nfsmout; 3176 } 3177 if (taglen <= NFSV4_SMALLSTR) 3178 tagstr = tag; 3179 else 3180 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3181 error = nfsrv_mtostr(nd, tagstr, taglen); 3182 if (error) { 3183 if (taglen > NFSV4_SMALLSTR) 3184 free(tagstr, M_TEMP); 3185 taglen = -1; 3186 goto nfsmout; 3187 } 3188 (void) nfsm_strtom(nd, tag, taglen); 3189 if (taglen > NFSV4_SMALLSTR) { 3190 free(tagstr, M_TEMP); 3191 } 3192 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3193 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3194 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3195 if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION) 3196 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3197 cbident = fxdr_unsigned(u_int32_t, *tl++); 3198 if (nd->nd_repstat) 3199 numops = 0; 3200 else 3201 numops = fxdr_unsigned(int, *tl); 3202 /* 3203 * Loop around doing the sub ops. 3204 */ 3205 for (i = 0; i < numops; i++) { 3206 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3207 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3208 *repp++ = *tl; 3209 op = fxdr_unsigned(int, *tl); 3210 if (op < NFSV4OP_CBGETATTR || 3211 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || 3212 (op > NFSV4OP_CBNOTIFYDEVID && 3213 minorvers == NFSV41_MINORVERSION)) { 3214 nd->nd_repstat = NFSERR_OPILLEGAL; 3215 *repp = nfscl_errmap(nd); 3216 retops++; 3217 break; 3218 } 3219 nd->nd_procnum = op; 3220 if (op < NFSV4OP_CBNOPS) 3221 newnfsstats.cbrpccnt[nd->nd_procnum]++; 3222 switch (op) { 3223 case NFSV4OP_CBGETATTR: 3224 NFSCL_DEBUG(4, "cbgetattr\n"); 3225 mp = NULL; 3226 vp = NULL; 3227 error = nfsm_getfh(nd, &nfhp); 3228 if (!error) 3229 error = nfsrv_getattrbits(nd, &attrbits, 3230 NULL, NULL); 3231 if (error == 0 && i == 0 && 3232 minorvers != NFSV4_MINORVERSION) 3233 error = NFSERR_OPNOTINSESS; 3234 if (!error) { 3235 mp = nfscl_getmnt(minorvers, sessionid, cbident, 3236 &clp); 3237 if (mp == NULL) 3238 error = NFSERR_SERVERFAULT; 3239 } 3240 if (!error) { 3241 error = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3242 nfhp->nfh_len, p, &np); 3243 if (!error) 3244 vp = NFSTOV(np); 3245 } 3246 if (!error) { 3247 NFSZERO_ATTRBIT(&rattrbits); 3248 NFSLOCKCLSTATE(); 3249 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3250 nfhp->nfh_len); 3251 if (dp != NULL) { 3252 if (NFSISSET_ATTRBIT(&attrbits, 3253 NFSATTRBIT_SIZE)) { 3254 if (vp != NULL) 3255 va.va_size = np->n_size; 3256 else 3257 va.va_size = 3258 dp->nfsdl_size; 3259 NFSSETBIT_ATTRBIT(&rattrbits, 3260 NFSATTRBIT_SIZE); 3261 } 3262 if (NFSISSET_ATTRBIT(&attrbits, 3263 NFSATTRBIT_CHANGE)) { 3264 va.va_filerev = 3265 dp->nfsdl_change; 3266 if (vp == NULL || 3267 (np->n_flag & NDELEGMOD)) 3268 va.va_filerev++; 3269 NFSSETBIT_ATTRBIT(&rattrbits, 3270 NFSATTRBIT_CHANGE); 3271 } 3272 } else 3273 error = NFSERR_SERVERFAULT; 3274 NFSUNLOCKCLSTATE(); 3275 } 3276 if (vp != NULL) 3277 vrele(vp); 3278 if (mp != NULL) 3279 vfs_unbusy(mp); 3280 if (nfhp != NULL) 3281 FREE((caddr_t)nfhp, M_NFSFH); 3282 if (!error) 3283 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3284 NULL, 0, &rattrbits, NULL, NULL, 0, 0, 0, 0, 3285 (uint64_t)0); 3286 break; 3287 case NFSV4OP_CBRECALL: 3288 NFSCL_DEBUG(4, "cbrecall\n"); 3289 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3290 NFSX_UNSIGNED); 3291 stateid.seqid = *tl++; 3292 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3293 NFSX_STATEIDOTHER); 3294 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3295 trunc = fxdr_unsigned(int, *tl); 3296 error = nfsm_getfh(nd, &nfhp); 3297 if (error == 0 && i == 0 && 3298 minorvers != NFSV4_MINORVERSION) 3299 error = NFSERR_OPNOTINSESS; 3300 if (!error) { 3301 NFSLOCKCLSTATE(); 3302 if (minorvers == NFSV4_MINORVERSION) 3303 clp = nfscl_getclnt(cbident); 3304 else 3305 clp = nfscl_getclntsess(sessionid); 3306 if (clp != NULL) { 3307 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3308 nfhp->nfh_len); 3309 if (dp != NULL && (dp->nfsdl_flags & 3310 NFSCLDL_DELEGRET) == 0) { 3311 dp->nfsdl_flags |= 3312 NFSCLDL_RECALL; 3313 wakeup((caddr_t)clp); 3314 } 3315 } else { 3316 error = NFSERR_SERVERFAULT; 3317 } 3318 NFSUNLOCKCLSTATE(); 3319 } 3320 if (nfhp != NULL) 3321 FREE((caddr_t)nfhp, M_NFSFH); 3322 break; 3323 case NFSV4OP_CBLAYOUTRECALL: 3324 NFSCL_DEBUG(4, "cblayrec\n"); 3325 nfhp = NULL; 3326 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); 3327 laytype = fxdr_unsigned(int, *tl++); 3328 iomode = fxdr_unsigned(uint32_t, *tl++); 3329 if (newnfs_true == *tl++) 3330 changed = 1; 3331 else 3332 changed = 0; 3333 recalltype = fxdr_unsigned(int, *tl); 3334 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, 3335 M_WAITOK); 3336 if (laytype != NFSLAYOUT_NFSV4_1_FILES) 3337 error = NFSERR_NOMATCHLAYOUT; 3338 else if (recalltype == NFSLAYOUTRETURN_FILE) { 3339 error = nfsm_getfh(nd, &nfhp); 3340 NFSCL_DEBUG(4, "retfile getfh=%d\n", error); 3341 if (error != 0) 3342 goto nfsmout; 3343 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + 3344 NFSX_STATEID); 3345 off = fxdr_hyper(tl); tl += 2; 3346 len = fxdr_hyper(tl); tl += 2; 3347 stateid.seqid = fxdr_unsigned(uint32_t, *tl++); 3348 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); 3349 if (minorvers == NFSV4_MINORVERSION) 3350 error = NFSERR_NOTSUPP; 3351 else if (i == 0) 3352 error = NFSERR_OPNOTINSESS; 3353 if (error == 0) { 3354 NFSLOCKCLSTATE(); 3355 clp = nfscl_getclntsess(sessionid); 3356 NFSCL_DEBUG(4, "cbly clp=%p\n", clp); 3357 if (clp != NULL) { 3358 lyp = nfscl_findlayout(clp, 3359 nfhp->nfh_fh, 3360 nfhp->nfh_len); 3361 NFSCL_DEBUG(4, "cblyp=%p\n", 3362 lyp); 3363 if (lyp != NULL && 3364 (lyp->nfsly_flags & 3365 NFSLY_FILES) != 0 && 3366 !NFSBCMP(stateid.other, 3367 lyp->nfsly_stateid.other, 3368 NFSX_STATEIDOTHER)) { 3369 error = 3370 nfscl_layoutrecall( 3371 recalltype, 3372 lyp, iomode, off, 3373 len, stateid.seqid, 3374 recallp); 3375 recallp = NULL; 3376 wakeup(clp); 3377 NFSCL_DEBUG(4, 3378 "aft layrcal=%d\n", 3379 error); 3380 } else 3381 error = 3382 NFSERR_NOMATCHLAYOUT; 3383 } else 3384 error = NFSERR_NOMATCHLAYOUT; 3385 NFSUNLOCKCLSTATE(); 3386 } 3387 free(nfhp, M_NFSFH); 3388 } else if (recalltype == NFSLAYOUTRETURN_FSID) { 3389 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); 3390 filesid[0] = fxdr_hyper(tl); tl += 2; 3391 filesid[1] = fxdr_hyper(tl); tl += 2; 3392 gotone = 0; 3393 NFSLOCKCLSTATE(); 3394 clp = nfscl_getclntsess(sessionid); 3395 if (clp != NULL) { 3396 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3397 nfsly_list) { 3398 if (lyp->nfsly_filesid[0] == 3399 filesid[0] && 3400 lyp->nfsly_filesid[1] == 3401 filesid[1]) { 3402 error = 3403 nfscl_layoutrecall( 3404 recalltype, 3405 lyp, iomode, 0, 3406 UINT64_MAX, 3407 lyp->nfsly_stateid.seqid, 3408 recallp); 3409 recallp = NULL; 3410 gotone = 1; 3411 } 3412 } 3413 if (gotone != 0) 3414 wakeup(clp); 3415 else 3416 error = NFSERR_NOMATCHLAYOUT; 3417 } else 3418 error = NFSERR_NOMATCHLAYOUT; 3419 NFSUNLOCKCLSTATE(); 3420 } else if (recalltype == NFSLAYOUTRETURN_ALL) { 3421 gotone = 0; 3422 NFSLOCKCLSTATE(); 3423 clp = nfscl_getclntsess(sessionid); 3424 if (clp != NULL) { 3425 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3426 nfsly_list) { 3427 error = nfscl_layoutrecall( 3428 recalltype, lyp, iomode, 0, 3429 UINT64_MAX, 3430 lyp->nfsly_stateid.seqid, 3431 recallp); 3432 recallp = NULL; 3433 gotone = 1; 3434 } 3435 if (gotone != 0) 3436 wakeup(clp); 3437 else 3438 error = NFSERR_NOMATCHLAYOUT; 3439 } else 3440 error = NFSERR_NOMATCHLAYOUT; 3441 NFSUNLOCKCLSTATE(); 3442 } else 3443 error = NFSERR_NOMATCHLAYOUT; 3444 if (recallp != NULL) { 3445 free(recallp, M_NFSLAYRECALL); 3446 recallp = NULL; 3447 } 3448 break; 3449 case NFSV4OP_CBSEQUENCE: 3450 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3451 5 * NFSX_UNSIGNED); 3452 bcopy(tl, sessionid, NFSX_V4SESSIONID); 3453 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3454 seqid = fxdr_unsigned(uint32_t, *tl++); 3455 slotid = fxdr_unsigned(uint32_t, *tl++); 3456 highslot = fxdr_unsigned(uint32_t, *tl++); 3457 cachethis = *tl++; 3458 /* Throw away the referring call stuff. */ 3459 clist = fxdr_unsigned(int, *tl); 3460 for (j = 0; j < clist; j++) { 3461 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3462 NFSX_UNSIGNED); 3463 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3464 rcalls = fxdr_unsigned(int, *tl); 3465 for (k = 0; k < rcalls; k++) { 3466 NFSM_DISSECT(tl, uint32_t *, 3467 2 * NFSX_UNSIGNED); 3468 } 3469 } 3470 NFSLOCKCLSTATE(); 3471 if (i == 0) { 3472 clp = nfscl_getclntsess(sessionid); 3473 if (clp == NULL) 3474 error = NFSERR_SERVERFAULT; 3475 } else 3476 error = NFSERR_SEQUENCEPOS; 3477 if (error == 0) 3478 error = nfsv4_seqsession(seqid, slotid, 3479 highslot, 3480 NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3481 nfsess_cbslots, &rep, 3482 NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3483 nfsess_backslots); 3484 NFSUNLOCKCLSTATE(); 3485 if (error == 0) { 3486 gotseq_ok = 1; 3487 if (rep != NULL) { 3488 NFSCL_DEBUG(4, "Got cbretry\n"); 3489 m_freem(nd->nd_mreq); 3490 nd->nd_mreq = rep; 3491 rep = NULL; 3492 goto out; 3493 } 3494 NFSM_BUILD(tl, uint32_t *, 3495 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); 3496 bcopy(sessionid, tl, NFSX_V4SESSIONID); 3497 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3498 *tl++ = txdr_unsigned(seqid); 3499 *tl++ = txdr_unsigned(slotid); 3500 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); 3501 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); 3502 } 3503 break; 3504 default: 3505 if (i == 0 && minorvers == NFSV41_MINORVERSION) 3506 error = NFSERR_OPNOTINSESS; 3507 else { 3508 NFSCL_DEBUG(1, "unsupp callback %d\n", op); 3509 error = NFSERR_NOTSUPP; 3510 } 3511 break; 3512 }; 3513 if (error) { 3514 if (error == EBADRPC || error == NFSERR_BADXDR) { 3515 nd->nd_repstat = NFSERR_BADXDR; 3516 } else { 3517 nd->nd_repstat = error; 3518 } 3519 error = 0; 3520 } 3521 retops++; 3522 if (nd->nd_repstat) { 3523 *repp = nfscl_errmap(nd); 3524 break; 3525 } else 3526 *repp = 0; /* NFS4_OK */ 3527 } 3528 nfsmout: 3529 if (recallp != NULL) 3530 free(recallp, M_NFSLAYRECALL); 3531 if (error) { 3532 if (error == EBADRPC || error == NFSERR_BADXDR) 3533 nd->nd_repstat = NFSERR_BADXDR; 3534 else 3535 printf("nfsv4 comperr1=%d\n", error); 3536 } 3537 if (taglen == -1) { 3538 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3539 *tl++ = 0; 3540 *tl = 0; 3541 } else { 3542 *retopsp = txdr_unsigned(retops); 3543 } 3544 *nd->nd_errp = nfscl_errmap(nd); 3545 out: 3546 if (gotseq_ok != 0) { 3547 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); 3548 NFSLOCKCLSTATE(); 3549 clp = nfscl_getclntsess(sessionid); 3550 if (clp != NULL) { 3551 nfsv4_seqsess_cacherep(slotid, 3552 NFSMNT_MDSSESSION(clp->nfsc_nmp)->nfsess_cbslots, 3553 rep); 3554 NFSUNLOCKCLSTATE(); 3555 } else { 3556 NFSUNLOCKCLSTATE(); 3557 m_freem(rep); 3558 } 3559 } 3560 } 3561 3562 /* 3563 * Generate the next cbident value. Basically just increment a static value 3564 * and then check that it isn't already in the list, if it has wrapped around. 3565 */ 3566 static u_int32_t 3567 nfscl_nextcbident(void) 3568 { 3569 struct nfsclclient *clp; 3570 int matched; 3571 static u_int32_t nextcbident = 0; 3572 static int haswrapped = 0; 3573 3574 nextcbident++; 3575 if (nextcbident == 0) 3576 haswrapped = 1; 3577 if (haswrapped) { 3578 /* 3579 * Search the clientid list for one already using this cbident. 3580 */ 3581 do { 3582 matched = 0; 3583 NFSLOCKCLSTATE(); 3584 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3585 if (clp->nfsc_cbident == nextcbident) { 3586 matched = 1; 3587 break; 3588 } 3589 } 3590 NFSUNLOCKCLSTATE(); 3591 if (matched == 1) 3592 nextcbident++; 3593 } while (matched); 3594 } 3595 return (nextcbident); 3596 } 3597 3598 /* 3599 * Get the mount point related to a given cbident or session and busy it. 3600 */ 3601 static mount_t 3602 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, 3603 struct nfsclclient **clpp) 3604 { 3605 struct nfsclclient *clp; 3606 mount_t mp; 3607 int error; 3608 3609 *clpp = NULL; 3610 NFSLOCKCLSTATE(); 3611 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3612 if (minorvers == NFSV4_MINORVERSION) { 3613 if (clp->nfsc_cbident == cbident) 3614 break; 3615 } else if (!NFSBCMP(NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3616 nfsess_sessionid, sessionid, NFSX_V4SESSIONID)) 3617 break; 3618 } 3619 if (clp == NULL) { 3620 NFSUNLOCKCLSTATE(); 3621 return (NULL); 3622 } 3623 mp = clp->nfsc_nmp->nm_mountp; 3624 vfs_ref(mp); 3625 NFSUNLOCKCLSTATE(); 3626 error = vfs_busy(mp, 0); 3627 vfs_rel(mp); 3628 if (error != 0) 3629 return (NULL); 3630 *clpp = clp; 3631 return (mp); 3632 } 3633 3634 /* 3635 * Get the clientid pointer related to a given cbident. 3636 */ 3637 static struct nfsclclient * 3638 nfscl_getclnt(u_int32_t cbident) 3639 { 3640 struct nfsclclient *clp; 3641 3642 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3643 if (clp->nfsc_cbident == cbident) 3644 break; 3645 return (clp); 3646 } 3647 3648 /* 3649 * Get the clientid pointer related to a given sessionid. 3650 */ 3651 static struct nfsclclient * 3652 nfscl_getclntsess(uint8_t *sessionid) 3653 { 3654 struct nfsclclient *clp; 3655 3656 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3657 if (!NFSBCMP(NFSMNT_MDSSESSION(clp->nfsc_nmp)->nfsess_sessionid, 3658 sessionid, NFSX_V4SESSIONID)) 3659 break; 3660 return (clp); 3661 } 3662 3663 /* 3664 * Search for a lock conflict locally on the client. A conflict occurs if 3665 * - not same owner and overlapping byte range and at least one of them is 3666 * a write lock or this is an unlock. 3667 */ 3668 static int 3669 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 3670 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 3671 struct nfscllock **lopp) 3672 { 3673 struct nfsclowner *owp; 3674 struct nfsclopen *op; 3675 int ret; 3676 3677 if (dp != NULL) { 3678 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 3679 if (ret) 3680 return (ret); 3681 } 3682 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3683 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3684 if (op->nfso_fhlen == fhlen && 3685 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 3686 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 3687 own, lopp); 3688 if (ret) 3689 return (ret); 3690 } 3691 } 3692 } 3693 return (0); 3694 } 3695 3696 static int 3697 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 3698 u_int8_t *own, struct nfscllock **lopp) 3699 { 3700 struct nfscllockowner *lp; 3701 struct nfscllock *lop; 3702 3703 LIST_FOREACH(lp, lhp, nfsl_list) { 3704 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 3705 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3706 if (lop->nfslo_first >= nlop->nfslo_end) 3707 break; 3708 if (lop->nfslo_end <= nlop->nfslo_first) 3709 continue; 3710 if (lop->nfslo_type == F_WRLCK || 3711 nlop->nfslo_type == F_WRLCK || 3712 nlop->nfslo_type == F_UNLCK) { 3713 if (lopp != NULL) 3714 *lopp = lop; 3715 return (NFSERR_DENIED); 3716 } 3717 } 3718 } 3719 } 3720 return (0); 3721 } 3722 3723 /* 3724 * Check for a local conflicting lock. 3725 */ 3726 APPLESTATIC int 3727 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 3728 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) 3729 { 3730 struct nfscllock *lop, nlck; 3731 struct nfscldeleg *dp; 3732 struct nfsnode *np; 3733 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 3734 int error; 3735 3736 nlck.nfslo_type = fl->l_type; 3737 nlck.nfslo_first = off; 3738 if (len == NFS64BITSSET) { 3739 nlck.nfslo_end = NFS64BITSSET; 3740 } else { 3741 nlck.nfslo_end = off + len; 3742 if (nlck.nfslo_end <= nlck.nfslo_first) 3743 return (NFSERR_INVAL); 3744 } 3745 np = VTONFS(vp); 3746 nfscl_filllockowner(id, own, flags); 3747 NFSLOCKCLSTATE(); 3748 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3749 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 3750 &nlck, own, dp, &lop); 3751 if (error != 0) { 3752 fl->l_whence = SEEK_SET; 3753 fl->l_start = lop->nfslo_first; 3754 if (lop->nfslo_end == NFS64BITSSET) 3755 fl->l_len = 0; 3756 else 3757 fl->l_len = lop->nfslo_end - lop->nfslo_first; 3758 fl->l_pid = (pid_t)0; 3759 fl->l_type = lop->nfslo_type; 3760 error = -1; /* no RPC required */ 3761 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 3762 fl->l_type == F_RDLCK)) { 3763 /* 3764 * The delegation ensures that there isn't a conflicting 3765 * lock on the server, so return -1 to indicate an RPC 3766 * isn't required. 3767 */ 3768 fl->l_type = F_UNLCK; 3769 error = -1; 3770 } 3771 NFSUNLOCKCLSTATE(); 3772 return (error); 3773 } 3774 3775 /* 3776 * Handle Recall of a delegation. 3777 * The clp must be exclusive locked when this is called. 3778 */ 3779 static int 3780 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 3781 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 3782 int called_from_renewthread) 3783 { 3784 struct nfsclowner *owp, *lowp, *nowp; 3785 struct nfsclopen *op, *lop; 3786 struct nfscllockowner *lp; 3787 struct nfscllock *lckp; 3788 struct nfsnode *np; 3789 int error = 0, ret, gotvp = 0; 3790 3791 if (vp == NULL) { 3792 /* 3793 * First, get a vnode for the file. This is needed to do RPCs. 3794 */ 3795 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 3796 dp->nfsdl_fhlen, p, &np); 3797 if (ret) { 3798 /* 3799 * File isn't open, so nothing to move over to the 3800 * server. 3801 */ 3802 return (0); 3803 } 3804 vp = NFSTOV(np); 3805 gotvp = 1; 3806 } else { 3807 np = VTONFS(vp); 3808 } 3809 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 3810 3811 /* 3812 * Ok, if it's a write delegation, flush data to the server, so 3813 * that close/open consistency is retained. 3814 */ 3815 ret = 0; 3816 NFSLOCKNODE(np); 3817 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 3818 np->n_flag |= NDELEGRECALL; 3819 NFSUNLOCKNODE(np); 3820 ret = ncl_flush(vp, MNT_WAIT, cred, p, 1, 3821 called_from_renewthread); 3822 NFSLOCKNODE(np); 3823 np->n_flag &= ~NDELEGRECALL; 3824 } 3825 NFSINVALATTRCACHE(np); 3826 NFSUNLOCKNODE(np); 3827 if (ret == EIO && called_from_renewthread != 0) { 3828 /* 3829 * If the flush failed with EIO for the renew thread, 3830 * return now, so that the dirty buffer will be flushed 3831 * later. 3832 */ 3833 if (gotvp != 0) 3834 vrele(vp); 3835 return (ret); 3836 } 3837 3838 /* 3839 * Now, for each openowner with opens issued locally, move them 3840 * over to state against the server. 3841 */ 3842 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 3843 lop = LIST_FIRST(&lowp->nfsow_open); 3844 if (lop != NULL) { 3845 if (LIST_NEXT(lop, nfso_list) != NULL) 3846 panic("nfsdlg mult opens"); 3847 /* 3848 * Look for the same openowner against the server. 3849 */ 3850 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3851 if (!NFSBCMP(lowp->nfsow_owner, 3852 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3853 newnfs_copycred(&dp->nfsdl_cred, cred); 3854 ret = nfscl_moveopen(vp, clp, nmp, lop, 3855 owp, dp, cred, p); 3856 if (ret == NFSERR_STALECLIENTID || 3857 ret == NFSERR_STALEDONTRECOVER || 3858 ret == NFSERR_BADSESSION) { 3859 if (gotvp) 3860 vrele(vp); 3861 return (ret); 3862 } 3863 if (ret) { 3864 nfscl_freeopen(lop, 1); 3865 if (!error) 3866 error = ret; 3867 } 3868 break; 3869 } 3870 } 3871 3872 /* 3873 * If no openowner found, create one and get an open 3874 * for it. 3875 */ 3876 if (owp == NULL) { 3877 MALLOC(nowp, struct nfsclowner *, 3878 sizeof (struct nfsclowner), M_NFSCLOWNER, 3879 M_WAITOK); 3880 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 3881 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 3882 dp->nfsdl_fhlen, NULL); 3883 newnfs_copycred(&dp->nfsdl_cred, cred); 3884 ret = nfscl_moveopen(vp, clp, nmp, lop, 3885 owp, dp, cred, p); 3886 if (ret) { 3887 nfscl_freeopenowner(owp, 0); 3888 if (ret == NFSERR_STALECLIENTID || 3889 ret == NFSERR_STALEDONTRECOVER || 3890 ret == NFSERR_BADSESSION) { 3891 if (gotvp) 3892 vrele(vp); 3893 return (ret); 3894 } 3895 if (ret) { 3896 nfscl_freeopen(lop, 1); 3897 if (!error) 3898 error = ret; 3899 } 3900 } 3901 } 3902 } 3903 } 3904 3905 /* 3906 * Now, get byte range locks for any locks done locally. 3907 */ 3908 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 3909 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 3910 newnfs_copycred(&dp->nfsdl_cred, cred); 3911 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 3912 if (ret == NFSERR_STALESTATEID || 3913 ret == NFSERR_STALEDONTRECOVER || 3914 ret == NFSERR_STALECLIENTID || 3915 ret == NFSERR_BADSESSION) { 3916 if (gotvp) 3917 vrele(vp); 3918 return (ret); 3919 } 3920 if (ret && !error) 3921 error = ret; 3922 } 3923 } 3924 if (gotvp) 3925 vrele(vp); 3926 return (error); 3927 } 3928 3929 /* 3930 * Move a locally issued open over to an owner on the state list. 3931 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 3932 * returns with it unlocked. 3933 */ 3934 static int 3935 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 3936 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 3937 struct ucred *cred, NFSPROC_T *p) 3938 { 3939 struct nfsclopen *op, *nop; 3940 struct nfscldeleg *ndp; 3941 struct nfsnode *np; 3942 int error = 0, newone; 3943 3944 /* 3945 * First, look for an appropriate open, If found, just increment the 3946 * opencnt in it. 3947 */ 3948 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3949 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 3950 op->nfso_fhlen == lop->nfso_fhlen && 3951 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 3952 op->nfso_opencnt += lop->nfso_opencnt; 3953 nfscl_freeopen(lop, 1); 3954 return (0); 3955 } 3956 } 3957 3958 /* No appropriate open, so we have to do one against the server. */ 3959 np = VTONFS(vp); 3960 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 3961 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 3962 newone = 0; 3963 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 3964 lop->nfso_fh, lop->nfso_fhlen, &newone); 3965 ndp = dp; 3966 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, 3967 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 3968 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 3969 if (error) { 3970 if (newone) 3971 nfscl_freeopen(op, 0); 3972 } else { 3973 if (newone) 3974 newnfs_copyincred(cred, &op->nfso_cred); 3975 op->nfso_mode |= lop->nfso_mode; 3976 op->nfso_opencnt += lop->nfso_opencnt; 3977 nfscl_freeopen(lop, 1); 3978 } 3979 if (nop != NULL) 3980 FREE((caddr_t)nop, M_NFSCLOPEN); 3981 if (ndp != NULL) { 3982 /* 3983 * What should I do with the returned delegation, since the 3984 * delegation is being recalled? For now, just printf and 3985 * through it away. 3986 */ 3987 printf("Moveopen returned deleg\n"); 3988 FREE((caddr_t)ndp, M_NFSCLDELEG); 3989 } 3990 return (error); 3991 } 3992 3993 /* 3994 * Recall all delegations on this client. 3995 */ 3996 static void 3997 nfscl_totalrecall(struct nfsclclient *clp) 3998 { 3999 struct nfscldeleg *dp; 4000 4001 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 4002 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 4003 dp->nfsdl_flags |= NFSCLDL_RECALL; 4004 } 4005 } 4006 4007 /* 4008 * Relock byte ranges. Called for delegation recall and state expiry. 4009 */ 4010 static int 4011 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4012 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 4013 NFSPROC_T *p) 4014 { 4015 struct nfscllockowner *nlp; 4016 struct nfsfh *nfhp; 4017 u_int64_t off, len; 4018 u_int32_t clidrev = 0; 4019 int error, newone, donelocally; 4020 4021 off = lop->nfslo_first; 4022 len = lop->nfslo_end - lop->nfslo_first; 4023 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 4024 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, 4025 lp->nfsl_openowner, &nlp, &newone, &donelocally); 4026 if (error || donelocally) 4027 return (error); 4028 if (nmp->nm_clp != NULL) 4029 clidrev = nmp->nm_clp->nfsc_clientidrev; 4030 else 4031 clidrev = 0; 4032 nfhp = VTONFS(vp)->n_fhp; 4033 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 4034 nfhp->nfh_len, nlp, newone, 0, off, 4035 len, lop->nfslo_type, cred, p); 4036 if (error) 4037 nfscl_freelockowner(nlp, 0); 4038 return (error); 4039 } 4040 4041 /* 4042 * Called to re-open a file. Basically get a vnode for the file handle 4043 * and then call nfsrpc_openrpc() to do the rest. 4044 */ 4045 static int 4046 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 4047 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 4048 struct ucred *cred, NFSPROC_T *p) 4049 { 4050 struct nfsnode *np; 4051 vnode_t vp; 4052 int error; 4053 4054 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 4055 if (error) 4056 return (error); 4057 vp = NFSTOV(np); 4058 if (np->n_v4 != NULL) { 4059 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4060 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 4061 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 4062 cred, p); 4063 } else { 4064 error = EINVAL; 4065 } 4066 vrele(vp); 4067 return (error); 4068 } 4069 4070 /* 4071 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 4072 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 4073 * fail. 4074 */ 4075 static int 4076 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4077 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 4078 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 4079 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 4080 { 4081 int error; 4082 4083 do { 4084 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 4085 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 4086 0, 0); 4087 if (error == NFSERR_DELAY) 4088 (void) nfs_catnap(PZERO, error, "nfstryop"); 4089 } while (error == NFSERR_DELAY); 4090 if (error == EAUTH || error == EACCES) { 4091 /* Try again using system credentials */ 4092 newnfs_setroot(cred); 4093 do { 4094 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 4095 newfhlen, mode, op, name, namelen, ndpp, reclaim, 4096 delegtype, cred, p, 1, 0); 4097 if (error == NFSERR_DELAY) 4098 (void) nfs_catnap(PZERO, error, "nfstryop"); 4099 } while (error == NFSERR_DELAY); 4100 } 4101 return (error); 4102 } 4103 4104 /* 4105 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 4106 * NFSERR_DELAY. Also, retry with system credentials, if the provided 4107 * cred don't work. 4108 */ 4109 static int 4110 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 4111 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 4112 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 4113 { 4114 struct nfsrv_descript nfsd, *nd = &nfsd; 4115 int error; 4116 4117 do { 4118 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 4119 reclaim, off, len, type, cred, p, 0); 4120 if (!error && nd->nd_repstat == NFSERR_DELAY) 4121 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4122 "nfstrylck"); 4123 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4124 if (!error) 4125 error = nd->nd_repstat; 4126 if (error == EAUTH || error == EACCES) { 4127 /* Try again using root credentials */ 4128 newnfs_setroot(cred); 4129 do { 4130 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 4131 newone, reclaim, off, len, type, cred, p, 1); 4132 if (!error && nd->nd_repstat == NFSERR_DELAY) 4133 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4134 "nfstrylck"); 4135 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4136 if (!error) 4137 error = nd->nd_repstat; 4138 } 4139 return (error); 4140 } 4141 4142 /* 4143 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 4144 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4145 * credentials fail. 4146 */ 4147 static int 4148 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 4149 struct nfsmount *nmp, NFSPROC_T *p) 4150 { 4151 int error; 4152 4153 do { 4154 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 4155 if (error == NFSERR_DELAY) 4156 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4157 } while (error == NFSERR_DELAY); 4158 if (error == EAUTH || error == EACCES) { 4159 /* Try again using system credentials */ 4160 newnfs_setroot(cred); 4161 do { 4162 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 4163 if (error == NFSERR_DELAY) 4164 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4165 } while (error == NFSERR_DELAY); 4166 } 4167 return (error); 4168 } 4169 4170 /* 4171 * Try a close against the server. Just call nfsrpc_closerpc(), 4172 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4173 * credentials fail. 4174 */ 4175 APPLESTATIC int 4176 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 4177 struct nfsmount *nmp, NFSPROC_T *p) 4178 { 4179 struct nfsrv_descript nfsd, *nd = &nfsd; 4180 int error; 4181 4182 do { 4183 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 4184 if (error == NFSERR_DELAY) 4185 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4186 } while (error == NFSERR_DELAY); 4187 if (error == EAUTH || error == EACCES) { 4188 /* Try again using system credentials */ 4189 newnfs_setroot(cred); 4190 do { 4191 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 4192 if (error == NFSERR_DELAY) 4193 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4194 } while (error == NFSERR_DELAY); 4195 } 4196 return (error); 4197 } 4198 4199 /* 4200 * Decide if a delegation on a file permits close without flushing writes 4201 * to the server. This might be a big performance win in some environments. 4202 * (Not useful until the client does caching on local stable storage.) 4203 */ 4204 APPLESTATIC int 4205 nfscl_mustflush(vnode_t vp) 4206 { 4207 struct nfsclclient *clp; 4208 struct nfscldeleg *dp; 4209 struct nfsnode *np; 4210 struct nfsmount *nmp; 4211 4212 np = VTONFS(vp); 4213 nmp = VFSTONFS(vnode_mount(vp)); 4214 if (!NFSHASNFSV4(nmp)) 4215 return (1); 4216 NFSLOCKCLSTATE(); 4217 clp = nfscl_findcl(nmp); 4218 if (clp == NULL) { 4219 NFSUNLOCKCLSTATE(); 4220 return (1); 4221 } 4222 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4223 if (dp != NULL && (dp->nfsdl_flags & 4224 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 4225 NFSCLDL_WRITE && 4226 (dp->nfsdl_sizelimit >= np->n_size || 4227 !NFSHASSTRICT3530(nmp))) { 4228 NFSUNLOCKCLSTATE(); 4229 return (0); 4230 } 4231 NFSUNLOCKCLSTATE(); 4232 return (1); 4233 } 4234 4235 /* 4236 * See if a (write) delegation exists for this file. 4237 */ 4238 APPLESTATIC int 4239 nfscl_nodeleg(vnode_t vp, int writedeleg) 4240 { 4241 struct nfsclclient *clp; 4242 struct nfscldeleg *dp; 4243 struct nfsnode *np; 4244 struct nfsmount *nmp; 4245 4246 np = VTONFS(vp); 4247 nmp = VFSTONFS(vnode_mount(vp)); 4248 if (!NFSHASNFSV4(nmp)) 4249 return (1); 4250 NFSLOCKCLSTATE(); 4251 clp = nfscl_findcl(nmp); 4252 if (clp == NULL) { 4253 NFSUNLOCKCLSTATE(); 4254 return (1); 4255 } 4256 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4257 if (dp != NULL && 4258 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 4259 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 4260 NFSCLDL_WRITE)) { 4261 NFSUNLOCKCLSTATE(); 4262 return (0); 4263 } 4264 NFSUNLOCKCLSTATE(); 4265 return (1); 4266 } 4267 4268 /* 4269 * Look for an associated delegation that should be DelegReturned. 4270 */ 4271 APPLESTATIC int 4272 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 4273 { 4274 struct nfsclclient *clp; 4275 struct nfscldeleg *dp; 4276 struct nfsclowner *owp; 4277 struct nfscllockowner *lp; 4278 struct nfsmount *nmp; 4279 struct ucred *cred; 4280 struct nfsnode *np; 4281 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4282 4283 nmp = VFSTONFS(vnode_mount(vp)); 4284 np = VTONFS(vp); 4285 NFSLOCKCLSTATE(); 4286 /* 4287 * Loop around waiting for: 4288 * - outstanding I/O operations on delegations to complete 4289 * - for a delegation on vp that has state, lock the client and 4290 * do a recall 4291 * - return delegation with no state 4292 */ 4293 while (1) { 4294 clp = nfscl_findcl(nmp); 4295 if (clp == NULL) { 4296 NFSUNLOCKCLSTATE(); 4297 return (retcnt); 4298 } 4299 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4300 np->n_fhp->nfh_len); 4301 if (dp != NULL) { 4302 /* 4303 * Wait for outstanding I/O ops to be done. 4304 */ 4305 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4306 if (igotlock) { 4307 nfsv4_unlock(&clp->nfsc_lock, 0); 4308 igotlock = 0; 4309 } 4310 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4311 (void) nfsmsleep(&dp->nfsdl_rwlock, 4312 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4313 continue; 4314 } 4315 needsrecall = 0; 4316 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4317 if (!LIST_EMPTY(&owp->nfsow_open)) { 4318 needsrecall = 1; 4319 break; 4320 } 4321 } 4322 if (!needsrecall) { 4323 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4324 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4325 needsrecall = 1; 4326 break; 4327 } 4328 } 4329 } 4330 if (needsrecall && !triedrecall) { 4331 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4332 islept = 0; 4333 while (!igotlock) { 4334 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4335 &islept, NFSCLSTATEMUTEXPTR, NULL); 4336 if (islept) 4337 break; 4338 } 4339 if (islept) 4340 continue; 4341 NFSUNLOCKCLSTATE(); 4342 cred = newnfs_getcred(); 4343 newnfs_copycred(&dp->nfsdl_cred, cred); 4344 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); 4345 NFSFREECRED(cred); 4346 triedrecall = 1; 4347 NFSLOCKCLSTATE(); 4348 nfsv4_unlock(&clp->nfsc_lock, 0); 4349 igotlock = 0; 4350 continue; 4351 } 4352 *stp = dp->nfsdl_stateid; 4353 retcnt = 1; 4354 nfscl_cleandeleg(dp); 4355 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4356 } 4357 if (igotlock) 4358 nfsv4_unlock(&clp->nfsc_lock, 0); 4359 NFSUNLOCKCLSTATE(); 4360 return (retcnt); 4361 } 4362 } 4363 4364 /* 4365 * Look for associated delegation(s) that should be DelegReturned. 4366 */ 4367 APPLESTATIC int 4368 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 4369 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 4370 { 4371 struct nfsclclient *clp; 4372 struct nfscldeleg *dp; 4373 struct nfsclowner *owp; 4374 struct nfscllockowner *lp; 4375 struct nfsmount *nmp; 4376 struct ucred *cred; 4377 struct nfsnode *np; 4378 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4379 4380 nmp = VFSTONFS(vnode_mount(fvp)); 4381 *gotfdp = 0; 4382 *gottdp = 0; 4383 NFSLOCKCLSTATE(); 4384 /* 4385 * Loop around waiting for: 4386 * - outstanding I/O operations on delegations to complete 4387 * - for a delegation on fvp that has state, lock the client and 4388 * do a recall 4389 * - return delegation(s) with no state. 4390 */ 4391 while (1) { 4392 clp = nfscl_findcl(nmp); 4393 if (clp == NULL) { 4394 NFSUNLOCKCLSTATE(); 4395 return (retcnt); 4396 } 4397 np = VTONFS(fvp); 4398 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4399 np->n_fhp->nfh_len); 4400 if (dp != NULL && *gotfdp == 0) { 4401 /* 4402 * Wait for outstanding I/O ops to be done. 4403 */ 4404 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4405 if (igotlock) { 4406 nfsv4_unlock(&clp->nfsc_lock, 0); 4407 igotlock = 0; 4408 } 4409 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4410 (void) nfsmsleep(&dp->nfsdl_rwlock, 4411 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4412 continue; 4413 } 4414 needsrecall = 0; 4415 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4416 if (!LIST_EMPTY(&owp->nfsow_open)) { 4417 needsrecall = 1; 4418 break; 4419 } 4420 } 4421 if (!needsrecall) { 4422 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4423 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4424 needsrecall = 1; 4425 break; 4426 } 4427 } 4428 } 4429 if (needsrecall && !triedrecall) { 4430 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4431 islept = 0; 4432 while (!igotlock) { 4433 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4434 &islept, NFSCLSTATEMUTEXPTR, NULL); 4435 if (islept) 4436 break; 4437 } 4438 if (islept) 4439 continue; 4440 NFSUNLOCKCLSTATE(); 4441 cred = newnfs_getcred(); 4442 newnfs_copycred(&dp->nfsdl_cred, cred); 4443 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); 4444 NFSFREECRED(cred); 4445 triedrecall = 1; 4446 NFSLOCKCLSTATE(); 4447 nfsv4_unlock(&clp->nfsc_lock, 0); 4448 igotlock = 0; 4449 continue; 4450 } 4451 *fstp = dp->nfsdl_stateid; 4452 retcnt++; 4453 *gotfdp = 1; 4454 nfscl_cleandeleg(dp); 4455 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4456 } 4457 if (igotlock) { 4458 nfsv4_unlock(&clp->nfsc_lock, 0); 4459 igotlock = 0; 4460 } 4461 if (tvp != NULL) { 4462 np = VTONFS(tvp); 4463 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4464 np->n_fhp->nfh_len); 4465 if (dp != NULL && *gottdp == 0) { 4466 /* 4467 * Wait for outstanding I/O ops to be done. 4468 */ 4469 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4470 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4471 (void) nfsmsleep(&dp->nfsdl_rwlock, 4472 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4473 continue; 4474 } 4475 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4476 if (!LIST_EMPTY(&owp->nfsow_open)) { 4477 NFSUNLOCKCLSTATE(); 4478 return (retcnt); 4479 } 4480 } 4481 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4482 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4483 NFSUNLOCKCLSTATE(); 4484 return (retcnt); 4485 } 4486 } 4487 *tstp = dp->nfsdl_stateid; 4488 retcnt++; 4489 *gottdp = 1; 4490 nfscl_cleandeleg(dp); 4491 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4492 } 4493 } 4494 NFSUNLOCKCLSTATE(); 4495 return (retcnt); 4496 } 4497 } 4498 4499 /* 4500 * Get a reference on the clientid associated with the mount point. 4501 * Return 1 if success, 0 otherwise. 4502 */ 4503 APPLESTATIC int 4504 nfscl_getref(struct nfsmount *nmp) 4505 { 4506 struct nfsclclient *clp; 4507 4508 NFSLOCKCLSTATE(); 4509 clp = nfscl_findcl(nmp); 4510 if (clp == NULL) { 4511 NFSUNLOCKCLSTATE(); 4512 return (0); 4513 } 4514 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); 4515 NFSUNLOCKCLSTATE(); 4516 return (1); 4517 } 4518 4519 /* 4520 * Release a reference on a clientid acquired with the above call. 4521 */ 4522 APPLESTATIC void 4523 nfscl_relref(struct nfsmount *nmp) 4524 { 4525 struct nfsclclient *clp; 4526 4527 NFSLOCKCLSTATE(); 4528 clp = nfscl_findcl(nmp); 4529 if (clp == NULL) { 4530 NFSUNLOCKCLSTATE(); 4531 return; 4532 } 4533 nfsv4_relref(&clp->nfsc_lock); 4534 NFSUNLOCKCLSTATE(); 4535 } 4536 4537 /* 4538 * Save the size attribute in the delegation, since the nfsnode 4539 * is going away. 4540 */ 4541 APPLESTATIC void 4542 nfscl_reclaimnode(vnode_t vp) 4543 { 4544 struct nfsclclient *clp; 4545 struct nfscldeleg *dp; 4546 struct nfsnode *np = VTONFS(vp); 4547 struct nfsmount *nmp; 4548 4549 nmp = VFSTONFS(vnode_mount(vp)); 4550 if (!NFSHASNFSV4(nmp)) 4551 return; 4552 NFSLOCKCLSTATE(); 4553 clp = nfscl_findcl(nmp); 4554 if (clp == NULL) { 4555 NFSUNLOCKCLSTATE(); 4556 return; 4557 } 4558 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4559 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4560 dp->nfsdl_size = np->n_size; 4561 NFSUNLOCKCLSTATE(); 4562 } 4563 4564 /* 4565 * Get the saved size attribute in the delegation, since it is a 4566 * newly allocated nfsnode. 4567 */ 4568 APPLESTATIC void 4569 nfscl_newnode(vnode_t vp) 4570 { 4571 struct nfsclclient *clp; 4572 struct nfscldeleg *dp; 4573 struct nfsnode *np = VTONFS(vp); 4574 struct nfsmount *nmp; 4575 4576 nmp = VFSTONFS(vnode_mount(vp)); 4577 if (!NFSHASNFSV4(nmp)) 4578 return; 4579 NFSLOCKCLSTATE(); 4580 clp = nfscl_findcl(nmp); 4581 if (clp == NULL) { 4582 NFSUNLOCKCLSTATE(); 4583 return; 4584 } 4585 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4586 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4587 np->n_size = dp->nfsdl_size; 4588 NFSUNLOCKCLSTATE(); 4589 } 4590 4591 /* 4592 * If there is a valid write delegation for this file, set the modtime 4593 * to the local clock time. 4594 */ 4595 APPLESTATIC void 4596 nfscl_delegmodtime(vnode_t vp) 4597 { 4598 struct nfsclclient *clp; 4599 struct nfscldeleg *dp; 4600 struct nfsnode *np = VTONFS(vp); 4601 struct nfsmount *nmp; 4602 4603 nmp = VFSTONFS(vnode_mount(vp)); 4604 if (!NFSHASNFSV4(nmp)) 4605 return; 4606 NFSLOCKCLSTATE(); 4607 clp = nfscl_findcl(nmp); 4608 if (clp == NULL) { 4609 NFSUNLOCKCLSTATE(); 4610 return; 4611 } 4612 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4613 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 4614 NFSGETNANOTIME(&dp->nfsdl_modtime); 4615 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 4616 } 4617 NFSUNLOCKCLSTATE(); 4618 } 4619 4620 /* 4621 * If there is a valid write delegation for this file with a modtime set, 4622 * put that modtime in mtime. 4623 */ 4624 APPLESTATIC void 4625 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 4626 { 4627 struct nfsclclient *clp; 4628 struct nfscldeleg *dp; 4629 struct nfsnode *np = VTONFS(vp); 4630 struct nfsmount *nmp; 4631 4632 nmp = VFSTONFS(vnode_mount(vp)); 4633 if (!NFSHASNFSV4(nmp)) 4634 return; 4635 NFSLOCKCLSTATE(); 4636 clp = nfscl_findcl(nmp); 4637 if (clp == NULL) { 4638 NFSUNLOCKCLSTATE(); 4639 return; 4640 } 4641 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4642 if (dp != NULL && 4643 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 4644 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 4645 *mtime = dp->nfsdl_modtime; 4646 NFSUNLOCKCLSTATE(); 4647 } 4648 4649 static int 4650 nfscl_errmap(struct nfsrv_descript *nd) 4651 { 4652 short *defaulterrp, *errp; 4653 4654 if (!nd->nd_repstat) 4655 return (0); 4656 if (nd->nd_procnum == NFSPROC_NOOP) 4657 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 4658 if (nd->nd_repstat == EBADRPC) 4659 return (txdr_unsigned(NFSERR_BADXDR)); 4660 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 4661 nd->nd_repstat == NFSERR_OPILLEGAL) 4662 return (txdr_unsigned(nd->nd_repstat)); 4663 if (nd->nd_procnum < NFSV4OP_CBNOPS) 4664 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 4665 else 4666 return (txdr_unsigned(nd->nd_repstat)); 4667 while (*++errp) 4668 if (*errp == (short)nd->nd_repstat) 4669 return (txdr_unsigned(nd->nd_repstat)); 4670 return (txdr_unsigned(*defaulterrp)); 4671 } 4672 4673 /* 4674 * Called to find/add a layout to a client. 4675 * This function returns the layout with a refcnt (shared lock) upon 4676 * success (returns 0) or with no lock/refcnt on the layout when an 4677 * error is returned. 4678 * If a layout is passed in via lypp, it is locked (exclusively locked). 4679 */ 4680 APPLESTATIC int 4681 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4682 nfsv4stateid_t *stateidp, int retonclose, 4683 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, 4684 struct ucred *cred, NFSPROC_T *p) 4685 { 4686 struct nfsclclient *clp; 4687 struct nfscllayout *lyp, *tlyp; 4688 struct nfsclflayout *flp; 4689 struct nfsnode *np = VTONFS(vp); 4690 mount_t mp; 4691 int layout_passed_in; 4692 4693 mp = nmp->nm_mountp; 4694 layout_passed_in = 1; 4695 tlyp = NULL; 4696 lyp = *lypp; 4697 if (lyp == NULL) { 4698 layout_passed_in = 0; 4699 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, 4700 M_WAITOK | M_ZERO); 4701 } 4702 4703 NFSLOCKCLSTATE(); 4704 clp = nmp->nm_clp; 4705 if (clp == NULL) { 4706 if (layout_passed_in != 0) 4707 nfsv4_unlock(&lyp->nfsly_lock, 0); 4708 NFSUNLOCKCLSTATE(); 4709 if (tlyp != NULL) 4710 free(tlyp, M_NFSLAYOUT); 4711 return (EPERM); 4712 } 4713 if (lyp == NULL) { 4714 /* 4715 * Although no lyp was passed in, another thread might have 4716 * allocated one. If one is found, just increment it's ref 4717 * count and return it. 4718 */ 4719 lyp = nfscl_findlayout(clp, fhp, fhlen); 4720 if (lyp == NULL) { 4721 lyp = tlyp; 4722 tlyp = NULL; 4723 lyp->nfsly_stateid.seqid = stateidp->seqid; 4724 lyp->nfsly_stateid.other[0] = stateidp->other[0]; 4725 lyp->nfsly_stateid.other[1] = stateidp->other[1]; 4726 lyp->nfsly_stateid.other[2] = stateidp->other[2]; 4727 lyp->nfsly_lastbyte = 0; 4728 LIST_INIT(&lyp->nfsly_flayread); 4729 LIST_INIT(&lyp->nfsly_flayrw); 4730 LIST_INIT(&lyp->nfsly_recall); 4731 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; 4732 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; 4733 lyp->nfsly_clp = clp; 4734 lyp->nfsly_flags = (retonclose != 0) ? 4735 (NFSLY_FILES | NFSLY_RETONCLOSE) : NFSLY_FILES; 4736 lyp->nfsly_fhlen = fhlen; 4737 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); 4738 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4739 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, 4740 nfsly_hash); 4741 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4742 nfscl_layoutcnt++; 4743 } else { 4744 if (retonclose != 0) 4745 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4746 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4747 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4748 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4749 } 4750 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 4751 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 4752 NFSUNLOCKCLSTATE(); 4753 if (tlyp != NULL) 4754 free(tlyp, M_NFSLAYOUT); 4755 return (EPERM); 4756 } 4757 *lypp = lyp; 4758 } else 4759 lyp->nfsly_stateid.seqid = stateidp->seqid; 4760 4761 /* Merge the new list of File Layouts into the list. */ 4762 flp = LIST_FIRST(fhlp); 4763 if (flp != NULL) { 4764 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) 4765 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); 4766 else 4767 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); 4768 } 4769 if (layout_passed_in != 0) 4770 nfsv4_unlock(&lyp->nfsly_lock, 1); 4771 NFSUNLOCKCLSTATE(); 4772 if (tlyp != NULL) 4773 free(tlyp, M_NFSLAYOUT); 4774 return (0); 4775 } 4776 4777 /* 4778 * Search for a layout by MDS file handle. 4779 * If one is found, it is returned with a refcnt (shared lock) iff 4780 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is 4781 * returned NULL. 4782 */ 4783 struct nfscllayout * 4784 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, 4785 uint64_t off, struct nfsclflayout **retflpp, int *recalledp) 4786 { 4787 struct nfscllayout *lyp; 4788 mount_t mp; 4789 int error, igotlock; 4790 4791 mp = clp->nfsc_nmp->nm_mountp; 4792 *recalledp = 0; 4793 *retflpp = NULL; 4794 NFSLOCKCLSTATE(); 4795 lyp = nfscl_findlayout(clp, fhp, fhlen); 4796 if (lyp != NULL) { 4797 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 4798 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4799 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4800 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4801 error = nfscl_findlayoutforio(lyp, off, 4802 NFSV4OPEN_ACCESSREAD, retflpp); 4803 if (error == 0) 4804 nfsv4_getref(&lyp->nfsly_lock, NULL, 4805 NFSCLSTATEMUTEXPTR, mp); 4806 else { 4807 do { 4808 igotlock = nfsv4_lock(&lyp->nfsly_lock, 4809 1, NULL, NFSCLSTATEMUTEXPTR, mp); 4810 } while (igotlock == 0 && 4811 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0); 4812 *retflpp = NULL; 4813 } 4814 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 4815 lyp = NULL; 4816 *recalledp = 1; 4817 } 4818 } else { 4819 lyp = NULL; 4820 *recalledp = 1; 4821 } 4822 } 4823 NFSUNLOCKCLSTATE(); 4824 return (lyp); 4825 } 4826 4827 /* 4828 * Search for a layout by MDS file handle. If one is found that is marked 4829 * "return on close", delete it, since it should now be forgotten. 4830 */ 4831 static void 4832 nfscl_retoncloselayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen) 4833 { 4834 struct nfscllayout *lyp; 4835 4836 tryagain: 4837 lyp = nfscl_findlayout(clp, fhp, fhlen); 4838 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) { 4839 /* 4840 * Wait for outstanding I/O ops to be done. 4841 */ 4842 if (lyp->nfsly_lock.nfslock_usecnt != 0 || 4843 lyp->nfsly_lock.nfslock_lock != 0) { 4844 lyp->nfsly_lock.nfslock_lock |= NFSV4LOCK_WANTED; 4845 (void)mtx_sleep(&lyp->nfsly_lock, 4846 NFSCLSTATEMUTEXPTR, PZERO, "nfslyc", 0); 4847 goto tryagain; 4848 } 4849 nfscl_freelayout(lyp); 4850 } 4851 } 4852 4853 /* 4854 * Dereference a layout. 4855 */ 4856 void 4857 nfscl_rellayout(struct nfscllayout *lyp, int exclocked) 4858 { 4859 4860 NFSLOCKCLSTATE(); 4861 if (exclocked != 0) 4862 nfsv4_unlock(&lyp->nfsly_lock, 0); 4863 else 4864 nfsv4_relref(&lyp->nfsly_lock); 4865 NFSUNLOCKCLSTATE(); 4866 } 4867 4868 /* 4869 * Search for a devinfo by deviceid. If one is found, return it after 4870 * acquiring a reference count on it. 4871 */ 4872 struct nfscldevinfo * 4873 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, 4874 struct nfscldevinfo *dip) 4875 { 4876 4877 NFSLOCKCLSTATE(); 4878 if (dip == NULL) 4879 dip = nfscl_finddevinfo(clp, deviceid); 4880 if (dip != NULL) 4881 dip->nfsdi_refcnt++; 4882 NFSUNLOCKCLSTATE(); 4883 return (dip); 4884 } 4885 4886 /* 4887 * Dereference a devinfo structure. 4888 */ 4889 static void 4890 nfscl_reldevinfo_locked(struct nfscldevinfo *dip) 4891 { 4892 4893 dip->nfsdi_refcnt--; 4894 if (dip->nfsdi_refcnt == 0) 4895 wakeup(&dip->nfsdi_refcnt); 4896 } 4897 4898 /* 4899 * Dereference a devinfo structure. 4900 */ 4901 void 4902 nfscl_reldevinfo(struct nfscldevinfo *dip) 4903 { 4904 4905 NFSLOCKCLSTATE(); 4906 nfscl_reldevinfo_locked(dip); 4907 NFSUNLOCKCLSTATE(); 4908 } 4909 4910 /* 4911 * Find a layout for this file handle. Return NULL upon failure. 4912 */ 4913 static struct nfscllayout * 4914 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 4915 { 4916 struct nfscllayout *lyp; 4917 4918 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) 4919 if (lyp->nfsly_fhlen == fhlen && 4920 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) 4921 break; 4922 return (lyp); 4923 } 4924 4925 /* 4926 * Find a devinfo for this deviceid. Return NULL upon failure. 4927 */ 4928 static struct nfscldevinfo * 4929 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) 4930 { 4931 struct nfscldevinfo *dip; 4932 4933 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) 4934 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) 4935 == 0) 4936 break; 4937 return (dip); 4938 } 4939 4940 /* 4941 * Merge the new file layout list into the main one, maintaining it in 4942 * increasing offset order. 4943 */ 4944 static void 4945 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, 4946 struct nfsclflayouthead *newfhlp) 4947 { 4948 struct nfsclflayout *flp, *nflp, *prevflp, *tflp; 4949 4950 flp = LIST_FIRST(fhlp); 4951 prevflp = NULL; 4952 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { 4953 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { 4954 prevflp = flp; 4955 flp = LIST_NEXT(flp, nfsfl_list); 4956 } 4957 if (prevflp == NULL) 4958 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); 4959 else 4960 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); 4961 prevflp = nflp; 4962 } 4963 } 4964 4965 /* 4966 * Add this nfscldevinfo to the client, if it doesn't already exist. 4967 * This function consumes the structure pointed at by dip, if not NULL. 4968 */ 4969 APPLESTATIC int 4970 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, 4971 struct nfsclflayout *flp) 4972 { 4973 struct nfsclclient *clp; 4974 struct nfscldevinfo *tdip; 4975 4976 NFSLOCKCLSTATE(); 4977 clp = nmp->nm_clp; 4978 if (clp == NULL) { 4979 NFSUNLOCKCLSTATE(); 4980 if (dip != NULL) 4981 free(dip, M_NFSDEVINFO); 4982 return (ENODEV); 4983 } 4984 tdip = nfscl_finddevinfo(clp, flp->nfsfl_dev); 4985 if (tdip != NULL) { 4986 tdip->nfsdi_layoutrefs++; 4987 flp->nfsfl_devp = tdip; 4988 nfscl_reldevinfo_locked(tdip); 4989 NFSUNLOCKCLSTATE(); 4990 if (dip != NULL) 4991 free(dip, M_NFSDEVINFO); 4992 return (0); 4993 } 4994 if (dip != NULL) { 4995 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); 4996 dip->nfsdi_layoutrefs = 1; 4997 flp->nfsfl_devp = dip; 4998 } 4999 NFSUNLOCKCLSTATE(); 5000 if (dip == NULL) 5001 return (ENODEV); 5002 return (0); 5003 } 5004 5005 /* 5006 * Free up a layout structure and associated file layout structure(s). 5007 */ 5008 APPLESTATIC void 5009 nfscl_freelayout(struct nfscllayout *layp) 5010 { 5011 struct nfsclflayout *flp, *nflp; 5012 struct nfsclrecalllayout *rp, *nrp; 5013 5014 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { 5015 LIST_REMOVE(flp, nfsfl_list); 5016 nfscl_freeflayout(flp); 5017 } 5018 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { 5019 LIST_REMOVE(flp, nfsfl_list); 5020 nfscl_freeflayout(flp); 5021 } 5022 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { 5023 LIST_REMOVE(rp, nfsrecly_list); 5024 free(rp, M_NFSLAYRECALL); 5025 } 5026 nfscl_layoutcnt--; 5027 free(layp, M_NFSLAYOUT); 5028 } 5029 5030 /* 5031 * Free up a file layout structure. 5032 */ 5033 APPLESTATIC void 5034 nfscl_freeflayout(struct nfsclflayout *flp) 5035 { 5036 int i; 5037 5038 for (i = 0; i < flp->nfsfl_fhcnt; i++) 5039 free(flp->nfsfl_fh[i], M_NFSFH); 5040 if (flp->nfsfl_devp != NULL) 5041 flp->nfsfl_devp->nfsdi_layoutrefs--; 5042 free(flp, M_NFSFLAYOUT); 5043 } 5044 5045 /* 5046 * Free up a file layout devinfo structure. 5047 */ 5048 APPLESTATIC void 5049 nfscl_freedevinfo(struct nfscldevinfo *dip) 5050 { 5051 5052 free(dip, M_NFSDEVINFO); 5053 } 5054 5055 /* 5056 * Mark any layouts that match as recalled. 5057 */ 5058 static int 5059 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, 5060 uint64_t off, uint64_t len, uint32_t stateseqid, 5061 struct nfsclrecalllayout *recallp) 5062 { 5063 struct nfsclrecalllayout *rp, *orp; 5064 5065 recallp->nfsrecly_recalltype = recalltype; 5066 recallp->nfsrecly_iomode = iomode; 5067 recallp->nfsrecly_stateseqid = stateseqid; 5068 recallp->nfsrecly_off = off; 5069 recallp->nfsrecly_len = len; 5070 /* 5071 * Order the list as file returns first, followed by fsid and any 5072 * returns, both in increasing stateseqid order. 5073 * Note that the seqids wrap around, so 1 is after 0xffffffff. 5074 * (I'm not sure this is correct because I find RFC5661 confusing 5075 * on this, but hopefully it will work ok.) 5076 */ 5077 orp = NULL; 5078 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5079 orp = rp; 5080 if ((recalltype == NFSLAYOUTRETURN_FILE && 5081 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || 5082 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || 5083 (recalltype != NFSLAYOUTRETURN_FILE && 5084 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && 5085 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { 5086 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); 5087 break; 5088 } 5089 } 5090 if (rp == NULL) { 5091 if (orp == NULL) 5092 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, 5093 nfsrecly_list); 5094 else 5095 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); 5096 } 5097 lyp->nfsly_flags |= NFSLY_RECALL; 5098 return (0); 5099 } 5100 5101 /* 5102 * Compare the two seqids for ordering. The trick is that the seqids can 5103 * wrap around from 0xffffffff->0, so check for the cases where one 5104 * has wrapped around. 5105 * Return 1 if seqid1 comes before seqid2, 0 otherwise. 5106 */ 5107 static int 5108 nfscl_seq(uint32_t seqid1, uint32_t seqid2) 5109 { 5110 5111 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) 5112 /* seqid2 has wrapped around. */ 5113 return (0); 5114 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) 5115 /* seqid1 has wrapped around. */ 5116 return (1); 5117 if (seqid1 <= seqid2) 5118 return (1); 5119 return (0); 5120 } 5121 5122 /* 5123 * Do a layout return for each of the recalls. 5124 */ 5125 static void 5126 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, 5127 struct ucred *cred, NFSPROC_T *p) 5128 { 5129 struct nfsclrecalllayout *rp; 5130 nfsv4stateid_t stateid; 5131 5132 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); 5133 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5134 stateid.seqid = rp->nfsrecly_stateseqid; 5135 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, 5136 lyp->nfsly_fhlen, 0, NFSLAYOUT_NFSV4_1_FILES, 5137 rp->nfsrecly_iomode, rp->nfsrecly_recalltype, 5138 rp->nfsrecly_off, rp->nfsrecly_len, 5139 &stateid, 0, NULL, cred, p, NULL); 5140 } 5141 } 5142 5143 /* 5144 * Do the layout commit for a file layout. 5145 */ 5146 static void 5147 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, 5148 struct ucred *cred, NFSPROC_T *p) 5149 { 5150 int error; 5151 5152 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, lyp->nfsly_fhlen, 5153 0, 0, 0, lyp->nfsly_lastbyte, &lyp->nfsly_stateid, 5154 NFSLAYOUT_NFSV4_1_FILES, 0, NULL, cred, p, NULL); 5155 if (error == NFSERR_NOTSUPP) { 5156 /* If the server doesn't want it, don't bother doing it. */ 5157 NFSLOCKMNT(nmp); 5158 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5159 NFSUNLOCKMNT(nmp); 5160 } 5161 } 5162 5163 /* 5164 * Commit all layouts for a file (vnode). 5165 */ 5166 int 5167 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) 5168 { 5169 struct nfsclclient *clp; 5170 struct nfscllayout *lyp; 5171 struct nfsnode *np = VTONFS(vp); 5172 mount_t mp; 5173 struct nfsmount *nmp; 5174 5175 mp = vnode_mount(vp); 5176 nmp = VFSTONFS(mp); 5177 if (NFSHASNOLAYOUTCOMMIT(nmp)) 5178 return (0); 5179 NFSLOCKCLSTATE(); 5180 clp = nmp->nm_clp; 5181 if (clp == NULL) { 5182 NFSUNLOCKCLSTATE(); 5183 return (EPERM); 5184 } 5185 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5186 if (lyp == NULL) { 5187 NFSUNLOCKCLSTATE(); 5188 return (EPERM); 5189 } 5190 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5191 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 5192 NFSUNLOCKCLSTATE(); 5193 return (EPERM); 5194 } 5195 tryagain: 5196 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 5197 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 5198 NFSUNLOCKCLSTATE(); 5199 NFSCL_DEBUG(4, "do layoutcommit2\n"); 5200 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); 5201 NFSLOCKCLSTATE(); 5202 goto tryagain; 5203 } 5204 nfsv4_relref(&lyp->nfsly_lock); 5205 NFSUNLOCKCLSTATE(); 5206 return (0); 5207 } 5208 5209