1 /*- 2 * Copyright (c) 2009 Rick Macklem, University of Guelph 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * These functions implement the client side state handling for NFSv4. 33 * NFSv4 state handling: 34 * - A lockowner is used to determine lock contention, so it 35 * corresponds directly to a Posix pid. (1 to 1 mapping) 36 * - The correct granularity of an OpenOwner is not nearly so 37 * obvious. An OpenOwner does the following: 38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 39 * - is used to check for Open/Share contention (not applicable to 40 * this client, since all Opens are Deny_None) 41 * As such, I considered both extreme. 42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 43 * all Open, Close and Lock (with a new lockowner) Ops. 44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 45 * every Open, for most servers. 46 * So, I chose to use the same mapping as I did for LockOwnwers. 47 * The main concern here is that you can end up with multiple Opens 48 * for the same File Handle, but on different OpenOwners (opens 49 * inherited from parents, grandparents...) and you do not know 50 * which of these the vnodeop close applies to. This is handled by 51 * delaying the Close Op(s) until all of the Opens have been closed. 52 * (It is not yet obvious if this is the correct granularity.) 53 * - How the code handles serialization: 54 * - For the ClientId, it uses an exclusive lock while getting its 55 * SetClientId and during recovery. Otherwise, it uses a shared 56 * lock via a reference count. 57 * - For the rest of the data structures, it uses an SMP mutex 58 * (once the nfs client is SMP safe) and doesn't sleep while 59 * manipulating the linked lists. 60 * - The serialization of Open/Close/Lock/LockU falls out in the 61 * "wash", since OpenOwners and LockOwners are both mapped from 62 * Posix pid. In other words, there is only one Posix pid using 63 * any given owner, so that owner is serialized. (If you change 64 * the granularity of the OpenOwner, then code must be added to 65 * serialize Ops on the OpenOwner.) 66 * - When to get rid of OpenOwners and LockOwners. 67 * - The function nfscl_cleanup_common() is executed after a process exits. 68 * It goes through the client list looking for all Open and Lock Owners. 69 * When one is found, it is marked "defunct" or in the case of 70 * an OpenOwner without any Opens, freed. 71 * The renew thread scans for defunct Owners and gets rid of them, 72 * if it can. The LockOwners will also be deleted when the 73 * associated Open is closed. 74 * - If the LockU or Close Op(s) fail during close in a way 75 * that could be recovered upon retry, they are relinked to the 76 * ClientId's defunct open list and retried by the renew thread 77 * until they succeed or an unmount/recovery occurs. 78 * (Since we are done with them, they do not need to be recovered.) 79 */ 80 81 #ifndef APPLEKEXT 82 #include <fs/nfs/nfsport.h> 83 84 /* 85 * Global variables 86 */ 87 extern struct nfsstats newnfsstats; 88 extern struct nfsreqhead nfsd_reqq; 89 extern u_int32_t newnfs_false, newnfs_true; 90 extern int nfscl_debuglevel; 91 NFSREQSPINLOCK; 92 NFSCLSTATEMUTEX; 93 int nfscl_inited = 0; 94 struct nfsclhead nfsclhead; /* Head of clientid list */ 95 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 96 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; 97 #endif /* !APPLEKEXT */ 98 99 static int nfscl_delegcnt = 0; 100 static int nfscl_layoutcnt = 0; 101 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, 102 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **); 103 static void nfscl_clrelease(struct nfsclclient *); 104 static void nfscl_cleanclient(struct nfsclclient *); 105 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 106 struct ucred *, NFSPROC_T *); 107 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 108 struct nfsmount *, struct ucred *, NFSPROC_T *); 109 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); 110 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 111 struct nfscllock *, int); 112 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 113 struct nfscllock **, int); 114 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); 115 static u_int32_t nfscl_nextcbident(void); 116 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); 117 static struct nfsclclient *nfscl_getclnt(u_int32_t); 118 static struct nfsclclient *nfscl_getclntsess(uint8_t *); 119 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 120 int); 121 static void nfscl_retoncloselayout(struct nfsclclient *, uint8_t *, int); 122 static void nfscl_reldevinfo_locked(struct nfscldevinfo *); 123 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, 124 int); 125 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); 126 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 127 u_int8_t *, struct nfscllock **); 128 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 129 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 130 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 131 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 132 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 133 struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *); 134 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 135 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 136 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 137 static void nfscl_totalrecall(struct nfsclclient *); 138 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 139 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 140 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 141 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 142 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 143 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 144 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 145 struct ucred *, NFSPROC_T *); 146 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 147 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 148 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); 149 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t); 150 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 151 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 152 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); 153 static void nfscl_freeopenowner(struct nfsclowner *, int); 154 static void nfscl_cleandeleg(struct nfscldeleg *); 155 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, 156 struct nfsmount *, NFSPROC_T *); 157 static void nfscl_emptylockowner(struct nfscllockowner *, 158 struct nfscllockownerfhhead *); 159 static void nfscl_mergeflayouts(struct nfsclflayouthead *, 160 struct nfsclflayouthead *); 161 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, 162 uint64_t, uint32_t, struct nfsclrecalllayout *); 163 static int nfscl_seq(uint32_t, uint32_t); 164 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, 165 struct ucred *, NFSPROC_T *); 166 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, 167 struct ucred *, NFSPROC_T *); 168 169 static short nfscberr_null[] = { 170 0, 171 0, 172 }; 173 174 static short nfscberr_getattr[] = { 175 NFSERR_RESOURCE, 176 NFSERR_BADHANDLE, 177 NFSERR_BADXDR, 178 NFSERR_RESOURCE, 179 NFSERR_SERVERFAULT, 180 0, 181 }; 182 183 static short nfscberr_recall[] = { 184 NFSERR_RESOURCE, 185 NFSERR_BADHANDLE, 186 NFSERR_BADSTATEID, 187 NFSERR_BADXDR, 188 NFSERR_RESOURCE, 189 NFSERR_SERVERFAULT, 190 0, 191 }; 192 193 static short *nfscl_cberrmap[] = { 194 nfscberr_null, 195 nfscberr_null, 196 nfscberr_null, 197 nfscberr_getattr, 198 nfscberr_recall 199 }; 200 201 #define NETFAMILY(clp) \ 202 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 203 204 /* 205 * Called for an open operation. 206 * If the nfhp argument is NULL, just get an openowner. 207 */ 208 APPLESTATIC int 209 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 210 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 211 struct nfsclopen **opp, int *newonep, int *retp, int lockit) 212 { 213 struct nfsclclient *clp; 214 struct nfsclowner *owp, *nowp; 215 struct nfsclopen *op = NULL, *nop = NULL; 216 struct nfscldeleg *dp; 217 struct nfsclownerhead *ohp; 218 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 219 int ret; 220 221 if (newonep != NULL) 222 *newonep = 0; 223 if (opp != NULL) 224 *opp = NULL; 225 if (owpp != NULL) 226 *owpp = NULL; 227 228 /* 229 * Might need one or both of these, so MALLOC them now, to 230 * avoid a tsleep() in MALLOC later. 231 */ 232 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner), 233 M_NFSCLOWNER, M_WAITOK); 234 if (nfhp != NULL) 235 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 236 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 237 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 238 if (ret != 0) { 239 FREE((caddr_t)nowp, M_NFSCLOWNER); 240 if (nop != NULL) 241 FREE((caddr_t)nop, M_NFSCLOPEN); 242 return (ret); 243 } 244 245 /* 246 * Get the Open iff it already exists. 247 * If none found, add the new one or return error, depending upon 248 * "create". 249 */ 250 nfscl_filllockowner(p->td_proc, own, F_POSIX); 251 NFSLOCKCLSTATE(); 252 dp = NULL; 253 /* First check the delegation list */ 254 if (nfhp != NULL && usedeleg) { 255 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 256 if (dp->nfsdl_fhlen == fhlen && 257 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 258 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 259 (dp->nfsdl_flags & NFSCLDL_WRITE)) 260 break; 261 dp = NULL; 262 break; 263 } 264 } 265 } 266 267 if (dp != NULL) 268 ohp = &dp->nfsdl_owner; 269 else 270 ohp = &clp->nfsc_owner; 271 /* Now, search for an openowner */ 272 LIST_FOREACH(owp, ohp, nfsow_list) { 273 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 274 break; 275 } 276 277 /* 278 * Create a new open, as required. 279 */ 280 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 281 newonep); 282 283 /* 284 * Serialize modifications to the open owner for multiple threads 285 * within the same process using a read/write sleep lock. 286 */ 287 if (lockit) 288 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 289 NFSUNLOCKCLSTATE(); 290 if (nowp != NULL) 291 FREE((caddr_t)nowp, M_NFSCLOWNER); 292 if (nop != NULL) 293 FREE((caddr_t)nop, M_NFSCLOPEN); 294 if (owpp != NULL) 295 *owpp = owp; 296 if (opp != NULL) 297 *opp = op; 298 if (retp != NULL) { 299 if (nfhp != NULL && dp != NULL && nop == NULL) 300 /* new local open on delegation */ 301 *retp = NFSCLOPEN_SETCRED; 302 else 303 *retp = NFSCLOPEN_OK; 304 } 305 306 /* 307 * Now, check the mode on the open and return the appropriate 308 * value. 309 */ 310 if (op != NULL && (amode & ~(op->nfso_mode))) { 311 op->nfso_mode |= amode; 312 if (retp != NULL && dp == NULL) 313 *retp = NFSCLOPEN_DOOPEN; 314 } 315 return (0); 316 } 317 318 /* 319 * Create a new open, as required. 320 */ 321 static void 322 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 323 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 324 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 325 int *newonep) 326 { 327 struct nfsclowner *owp = *owpp, *nowp; 328 struct nfsclopen *op, *nop; 329 330 if (nowpp != NULL) 331 nowp = *nowpp; 332 else 333 nowp = NULL; 334 if (nopp != NULL) 335 nop = *nopp; 336 else 337 nop = NULL; 338 if (owp == NULL && nowp != NULL) { 339 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 340 LIST_INIT(&nowp->nfsow_open); 341 nowp->nfsow_clp = clp; 342 nowp->nfsow_seqid = 0; 343 nowp->nfsow_defunct = 0; 344 nfscl_lockinit(&nowp->nfsow_rwlock); 345 if (dp != NULL) { 346 newnfsstats.cllocalopenowners++; 347 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 348 } else { 349 newnfsstats.clopenowners++; 350 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 351 } 352 owp = *owpp = nowp; 353 *nowpp = NULL; 354 if (newonep != NULL) 355 *newonep = 1; 356 } 357 358 /* If an fhp has been specified, create an Open as well. */ 359 if (fhp != NULL) { 360 /* and look for the correct open, based upon FH */ 361 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 362 if (op->nfso_fhlen == fhlen && 363 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 364 break; 365 } 366 if (op == NULL && nop != NULL) { 367 nop->nfso_own = owp; 368 nop->nfso_mode = 0; 369 nop->nfso_opencnt = 0; 370 nop->nfso_posixlock = 1; 371 nop->nfso_fhlen = fhlen; 372 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 373 LIST_INIT(&nop->nfso_lock); 374 nop->nfso_stateid.seqid = 0; 375 nop->nfso_stateid.other[0] = 0; 376 nop->nfso_stateid.other[1] = 0; 377 nop->nfso_stateid.other[2] = 0; 378 if (dp != NULL) { 379 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 380 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 381 nfsdl_list); 382 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 383 newnfsstats.cllocalopens++; 384 } else { 385 newnfsstats.clopens++; 386 } 387 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 388 *opp = nop; 389 *nopp = NULL; 390 if (newonep != NULL) 391 *newonep = 1; 392 } else { 393 *opp = op; 394 } 395 } 396 } 397 398 /* 399 * Called to find/add a delegation to a client. 400 */ 401 APPLESTATIC int 402 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 403 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 404 { 405 struct nfscldeleg *dp = *dpp, *tdp; 406 407 /* 408 * First, if we have received a Read delegation for a file on a 409 * read/write file system, just return it, because they aren't 410 * useful, imho. 411 */ 412 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && 413 (dp->nfsdl_flags & NFSCLDL_READ)) { 414 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); 415 FREE((caddr_t)dp, M_NFSCLDELEG); 416 *dpp = NULL; 417 return (0); 418 } 419 420 /* Look for the correct deleg, based upon FH */ 421 NFSLOCKCLSTATE(); 422 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 423 if (tdp == NULL) { 424 if (dp == NULL) { 425 NFSUNLOCKCLSTATE(); 426 return (NFSERR_BADSTATEID); 427 } 428 *dpp = NULL; 429 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 430 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 431 nfsdl_hash); 432 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 433 newnfsstats.cldelegates++; 434 nfscl_delegcnt++; 435 } else { 436 /* 437 * Delegation already exists, what do we do if a new one?? 438 */ 439 if (dp != NULL) { 440 printf("Deleg already exists!\n"); 441 FREE((caddr_t)dp, M_NFSCLDELEG); 442 *dpp = NULL; 443 } else { 444 *dpp = tdp; 445 } 446 } 447 NFSUNLOCKCLSTATE(); 448 return (0); 449 } 450 451 /* 452 * Find a delegation for this file handle. Return NULL upon failure. 453 */ 454 static struct nfscldeleg * 455 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 456 { 457 struct nfscldeleg *dp; 458 459 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 460 if (dp->nfsdl_fhlen == fhlen && 461 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 462 break; 463 } 464 return (dp); 465 } 466 467 /* 468 * Get a stateid for an I/O operation. First, look for an open and iff 469 * found, return either a lockowner stateid or the open stateid. 470 * If no Open is found, just return error and the special stateid of all zeros. 471 */ 472 APPLESTATIC int 473 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 474 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 475 void **lckpp) 476 { 477 struct nfsclclient *clp; 478 struct nfsclowner *owp; 479 struct nfsclopen *op = NULL; 480 struct nfscllockowner *lp; 481 struct nfscldeleg *dp; 482 struct nfsnode *np; 483 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 484 int error, done; 485 486 *lckpp = NULL; 487 /* 488 * Initially, just set the special stateid of all zeros. 489 * (Don't do this for a DS, since the special stateid can't be used.) 490 */ 491 if (fords == 0) { 492 stateidp->seqid = 0; 493 stateidp->other[0] = 0; 494 stateidp->other[1] = 0; 495 stateidp->other[2] = 0; 496 } 497 if (vnode_vtype(vp) != VREG) 498 return (EISDIR); 499 np = VTONFS(vp); 500 NFSLOCKCLSTATE(); 501 clp = nfscl_findcl(VFSTONFS(vnode_mount(vp))); 502 if (clp == NULL) { 503 NFSUNLOCKCLSTATE(); 504 return (EACCES); 505 } 506 507 /* 508 * Wait for recovery to complete. 509 */ 510 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 511 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 512 PZERO, "nfsrecvr", NULL); 513 514 /* 515 * First, look for a delegation. 516 */ 517 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 518 if (dp->nfsdl_fhlen == fhlen && 519 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 520 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 521 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 522 stateidp->seqid = dp->nfsdl_stateid.seqid; 523 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 524 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 525 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 526 if (!(np->n_flag & NDELEGRECALL)) { 527 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 528 nfsdl_list); 529 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 530 nfsdl_list); 531 dp->nfsdl_timestamp = NFSD_MONOSEC + 532 120; 533 dp->nfsdl_rwlock.nfslock_usecnt++; 534 *lckpp = (void *)&dp->nfsdl_rwlock; 535 } 536 NFSUNLOCKCLSTATE(); 537 return (0); 538 } 539 break; 540 } 541 } 542 543 if (p != NULL) { 544 /* 545 * If p != NULL, we want to search the parentage tree 546 * for a matching OpenOwner and use that. 547 */ 548 nfscl_filllockowner(p->td_proc, own, F_POSIX); 549 lp = NULL; 550 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own, 551 mode, &lp, &op); 552 if (error == 0 && lp != NULL && fords == 0) { 553 /* Don't return a lock stateid for a DS. */ 554 stateidp->seqid = 555 lp->nfsl_stateid.seqid; 556 stateidp->other[0] = 557 lp->nfsl_stateid.other[0]; 558 stateidp->other[1] = 559 lp->nfsl_stateid.other[1]; 560 stateidp->other[2] = 561 lp->nfsl_stateid.other[2]; 562 NFSUNLOCKCLSTATE(); 563 return (0); 564 } 565 } 566 if (op == NULL) { 567 /* If not found, just look for any OpenOwner that will work. */ 568 done = 0; 569 owp = LIST_FIRST(&clp->nfsc_owner); 570 while (!done && owp != NULL) { 571 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 572 if (op->nfso_fhlen == fhlen && 573 !NFSBCMP(op->nfso_fh, nfhp, fhlen) && 574 (mode & op->nfso_mode) == mode) { 575 done = 1; 576 break; 577 } 578 } 579 if (!done) 580 owp = LIST_NEXT(owp, nfsow_list); 581 } 582 if (!done) { 583 NFSUNLOCKCLSTATE(); 584 return (ENOENT); 585 } 586 /* 587 * For read aheads or write behinds, use the open cred. 588 * A read ahead or write behind is indicated by p == NULL. 589 */ 590 if (p == NULL) 591 newnfs_copycred(&op->nfso_cred, cred); 592 } 593 594 /* 595 * No lock stateid, so return the open stateid. 596 */ 597 stateidp->seqid = op->nfso_stateid.seqid; 598 stateidp->other[0] = op->nfso_stateid.other[0]; 599 stateidp->other[1] = op->nfso_stateid.other[1]; 600 stateidp->other[2] = op->nfso_stateid.other[2]; 601 NFSUNLOCKCLSTATE(); 602 return (0); 603 } 604 605 /* 606 * Search for a matching file, mode and, optionally, lockowner. 607 */ 608 static int 609 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, 610 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode, 611 struct nfscllockowner **lpp, struct nfsclopen **opp) 612 { 613 struct nfsclowner *owp; 614 struct nfsclopen *op, *rop, *rop2; 615 struct nfscllockowner *lp; 616 int keep_looping; 617 618 if (lpp != NULL) 619 *lpp = NULL; 620 /* 621 * rop will be set to the open to be returned. There are three 622 * variants of this, all for an open of the correct file: 623 * 1 - A match of lockown. 624 * 2 - A match of the openown, when no lockown match exists. 625 * 3 - A match for any open, if no openown or lockown match exists. 626 * Looking for #2 over #3 probably isn't necessary, but since 627 * RFC3530 is vague w.r.t. the relationship between openowners and 628 * lockowners, I think this is the safer way to go. 629 */ 630 rop = NULL; 631 rop2 = NULL; 632 keep_looping = 1; 633 /* Search the client list */ 634 owp = LIST_FIRST(ohp); 635 while (owp != NULL && keep_looping != 0) { 636 /* and look for the correct open */ 637 op = LIST_FIRST(&owp->nfsow_open); 638 while (op != NULL && keep_looping != 0) { 639 if (op->nfso_fhlen == fhlen && 640 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 641 && (op->nfso_mode & mode) == mode) { 642 if (lpp != NULL) { 643 /* Now look for a matching lockowner. */ 644 LIST_FOREACH(lp, &op->nfso_lock, 645 nfsl_list) { 646 if (!NFSBCMP(lp->nfsl_owner, 647 lockown, 648 NFSV4CL_LOCKNAMELEN)) { 649 *lpp = lp; 650 rop = op; 651 keep_looping = 0; 652 break; 653 } 654 } 655 } 656 if (rop == NULL && !NFSBCMP(owp->nfsow_owner, 657 openown, NFSV4CL_LOCKNAMELEN)) { 658 rop = op; 659 if (lpp == NULL) 660 keep_looping = 0; 661 } 662 if (rop2 == NULL) 663 rop2 = op; 664 } 665 op = LIST_NEXT(op, nfso_list); 666 } 667 owp = LIST_NEXT(owp, nfsow_list); 668 } 669 if (rop == NULL) 670 rop = rop2; 671 if (rop == NULL) 672 return (EBADF); 673 *opp = rop; 674 return (0); 675 } 676 677 /* 678 * Release use of an open owner. Called when open operations are done 679 * with the open owner. 680 */ 681 APPLESTATIC void 682 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error, 683 __unused int candelete, int unlocked) 684 { 685 686 if (owp == NULL) 687 return; 688 NFSLOCKCLSTATE(); 689 if (!unlocked) 690 nfscl_lockunlock(&owp->nfsow_rwlock); 691 nfscl_clrelease(owp->nfsow_clp); 692 NFSUNLOCKCLSTATE(); 693 } 694 695 /* 696 * Release use of an open structure under an open owner. 697 */ 698 APPLESTATIC void 699 nfscl_openrelease(struct nfsclopen *op, int error, int candelete) 700 { 701 struct nfsclclient *clp; 702 struct nfsclowner *owp; 703 704 if (op == NULL) 705 return; 706 NFSLOCKCLSTATE(); 707 owp = op->nfso_own; 708 nfscl_lockunlock(&owp->nfsow_rwlock); 709 clp = owp->nfsow_clp; 710 if (error && candelete && op->nfso_opencnt == 0) 711 nfscl_freeopen(op, 0); 712 nfscl_clrelease(clp); 713 NFSUNLOCKCLSTATE(); 714 } 715 716 /* 717 * Called to get a clientid structure. It will optionally lock the 718 * client data structures to do the SetClientId/SetClientId_confirm, 719 * but will release that lock and return the clientid with a refernce 720 * count on it. 721 * If the "cred" argument is NULL, a new clientid should not be created. 722 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 723 * be done. 724 * The start_renewthread argument tells nfscl_getcl() to start a renew 725 * thread if this creates a new clp. 726 * It always clpp with a reference count on it, unless returning an error. 727 */ 728 APPLESTATIC int 729 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, 730 int start_renewthread, struct nfsclclient **clpp) 731 { 732 struct nfsclclient *clp; 733 struct nfsclclient *newclp = NULL; 734 struct nfsmount *nmp; 735 char uuid[HOSTUUIDLEN]; 736 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 737 u_int16_t idlen = 0; 738 739 nmp = VFSTONFS(mp); 740 if (cred != NULL) { 741 getcredhostuuid(cred, uuid, sizeof uuid); 742 idlen = strlen(uuid); 743 if (idlen > 0) 744 idlen += sizeof (u_int64_t); 745 else 746 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 747 MALLOC(newclp, struct nfsclclient *, 748 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 749 M_WAITOK | M_ZERO); 750 } 751 NFSLOCKCLSTATE(); 752 /* 753 * If a forced dismount is already in progress, don't 754 * allocate a new clientid and get out now. For the case where 755 * clp != NULL, this is a harmless optimization. 756 */ 757 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 758 NFSUNLOCKCLSTATE(); 759 if (newclp != NULL) 760 free(newclp, M_NFSCLCLIENT); 761 return (EBADF); 762 } 763 clp = nmp->nm_clp; 764 if (clp == NULL) { 765 if (newclp == NULL) { 766 NFSUNLOCKCLSTATE(); 767 return (EACCES); 768 } 769 clp = newclp; 770 clp->nfsc_idlen = idlen; 771 LIST_INIT(&clp->nfsc_owner); 772 TAILQ_INIT(&clp->nfsc_deleg); 773 TAILQ_INIT(&clp->nfsc_layout); 774 LIST_INIT(&clp->nfsc_devinfo); 775 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 776 LIST_INIT(&clp->nfsc_deleghash[i]); 777 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 778 LIST_INIT(&clp->nfsc_layouthash[i]); 779 clp->nfsc_flags = NFSCLFLAGS_INITED; 780 clp->nfsc_clientidrev = 1; 781 clp->nfsc_cbident = nfscl_nextcbident(); 782 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 783 clp->nfsc_idlen); 784 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 785 nmp->nm_clp = clp; 786 clp->nfsc_nmp = nmp; 787 NFSUNLOCKCLSTATE(); 788 if (start_renewthread != 0) 789 nfscl_start_renewthread(clp); 790 } else { 791 NFSUNLOCKCLSTATE(); 792 if (newclp != NULL) 793 free(newclp, M_NFSCLCLIENT); 794 } 795 NFSLOCKCLSTATE(); 796 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && 797 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0) 798 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 799 NFSCLSTATEMUTEXPTR, mp); 800 if (!igotlock) 801 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 802 if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 803 /* 804 * Both nfsv4_lock() and nfsv4_getref() know to check 805 * for MNTK_UNMOUNTF and return without sleeping to 806 * wait for the exclusive lock to be released, since it 807 * might be held by nfscl_umount() and we need to get out 808 * now for that case and not wait until nfscl_umount() 809 * releases it. 810 */ 811 NFSUNLOCKCLSTATE(); 812 return (EBADF); 813 } 814 NFSUNLOCKCLSTATE(); 815 816 /* 817 * If it needs a clientid, do the setclientid now. 818 */ 819 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 820 if (!igotlock) 821 panic("nfscl_clget"); 822 if (p == NULL || cred == NULL) { 823 NFSLOCKCLSTATE(); 824 nfsv4_unlock(&clp->nfsc_lock, 0); 825 NFSUNLOCKCLSTATE(); 826 return (EACCES); 827 } 828 /* 829 * If RFC3530 Sec. 14.2.33 is taken literally, 830 * NFSERR_CLIDINUSE will be returned persistently for the 831 * case where a new mount of the same file system is using 832 * a different principal. In practice, NFSERR_CLIDINUSE is 833 * only returned when there is outstanding unexpired state 834 * on the clientid. As such, try for twice the lease 835 * interval, if we know what that is. Otherwise, make a 836 * wild ass guess. 837 * The case of returning NFSERR_STALECLIENTID is far less 838 * likely, but might occur if there is a significant delay 839 * between doing the SetClientID and SetClientIDConfirm Ops, 840 * such that the server throws away the clientid before 841 * receiving the SetClientIDConfirm. 842 */ 843 if (clp->nfsc_renew > 0) 844 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 845 else 846 clidinusedelay = 120; 847 trystalecnt = 3; 848 do { 849 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 850 if (error == NFSERR_STALECLIENTID || 851 error == NFSERR_STALEDONTRECOVER || 852 error == NFSERR_BADSESSION || 853 error == NFSERR_CLIDINUSE) { 854 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 855 } 856 } while (((error == NFSERR_STALECLIENTID || 857 error == NFSERR_BADSESSION || 858 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 859 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); 860 if (error) { 861 NFSLOCKCLSTATE(); 862 nfsv4_unlock(&clp->nfsc_lock, 0); 863 NFSUNLOCKCLSTATE(); 864 return (error); 865 } 866 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 867 } 868 if (igotlock) { 869 NFSLOCKCLSTATE(); 870 nfsv4_unlock(&clp->nfsc_lock, 1); 871 NFSUNLOCKCLSTATE(); 872 } 873 874 *clpp = clp; 875 return (0); 876 } 877 878 /* 879 * Get a reference to a clientid and return it, if valid. 880 */ 881 APPLESTATIC struct nfsclclient * 882 nfscl_findcl(struct nfsmount *nmp) 883 { 884 struct nfsclclient *clp; 885 886 clp = nmp->nm_clp; 887 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 888 return (NULL); 889 return (clp); 890 } 891 892 /* 893 * Release the clientid structure. It may be locked or reference counted. 894 */ 895 static void 896 nfscl_clrelease(struct nfsclclient *clp) 897 { 898 899 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 900 nfsv4_unlock(&clp->nfsc_lock, 0); 901 else 902 nfsv4_relref(&clp->nfsc_lock); 903 } 904 905 /* 906 * External call for nfscl_clrelease. 907 */ 908 APPLESTATIC void 909 nfscl_clientrelease(struct nfsclclient *clp) 910 { 911 912 NFSLOCKCLSTATE(); 913 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 914 nfsv4_unlock(&clp->nfsc_lock, 0); 915 else 916 nfsv4_relref(&clp->nfsc_lock); 917 NFSUNLOCKCLSTATE(); 918 } 919 920 /* 921 * Called when wanting to lock a byte region. 922 */ 923 APPLESTATIC int 924 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 925 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 926 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, 927 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 928 { 929 struct nfscllockowner *lp; 930 struct nfsclopen *op; 931 struct nfsclclient *clp; 932 struct nfscllockowner *nlp; 933 struct nfscllock *nlop, *otherlop; 934 struct nfscldeleg *dp = NULL, *ldp = NULL; 935 struct nfscllockownerhead *lhp = NULL; 936 struct nfsnode *np; 937 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; 938 u_int8_t *openownp; 939 int error = 0, ret, donelocally = 0; 940 u_int32_t mode; 941 942 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ 943 mode = 0; 944 np = VTONFS(vp); 945 *lpp = NULL; 946 lp = NULL; 947 *newonep = 0; 948 *donelocallyp = 0; 949 950 /* 951 * Might need these, so MALLOC them now, to 952 * avoid a tsleep() in MALLOC later. 953 */ 954 MALLOC(nlp, struct nfscllockowner *, 955 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 956 MALLOC(otherlop, struct nfscllock *, 957 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 958 MALLOC(nlop, struct nfscllock *, 959 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 960 nlop->nfslo_type = type; 961 nlop->nfslo_first = off; 962 if (len == NFS64BITSSET) { 963 nlop->nfslo_end = NFS64BITSSET; 964 } else { 965 nlop->nfslo_end = off + len; 966 if (nlop->nfslo_end <= nlop->nfslo_first) 967 error = NFSERR_INVAL; 968 } 969 970 if (!error) { 971 if (recovery) 972 clp = rclp; 973 else 974 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 975 } 976 if (error) { 977 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 978 FREE((caddr_t)otherlop, M_NFSCLLOCK); 979 FREE((caddr_t)nlop, M_NFSCLLOCK); 980 return (error); 981 } 982 983 op = NULL; 984 if (recovery) { 985 ownp = rownp; 986 openownp = ropenownp; 987 } else { 988 nfscl_filllockowner(id, own, flags); 989 ownp = own; 990 nfscl_filllockowner(p->td_proc, openown, F_POSIX); 991 openownp = openown; 992 } 993 if (!recovery) { 994 NFSLOCKCLSTATE(); 995 /* 996 * First, search for a delegation. If one exists for this file, 997 * the lock can be done locally against it, so long as there 998 * isn't a local lock conflict. 999 */ 1000 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1001 np->n_fhp->nfh_len); 1002 /* Just sanity check for correct type of delegation */ 1003 if (dp != NULL && ((dp->nfsdl_flags & 1004 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 1005 (type == F_WRLCK && 1006 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 1007 dp = NULL; 1008 } 1009 if (dp != NULL) { 1010 /* Now, find an open and maybe a lockowner. */ 1011 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, 1012 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); 1013 if (ret) 1014 ret = nfscl_getopen(&clp->nfsc_owner, 1015 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1016 ownp, mode, NULL, &op); 1017 if (!ret) { 1018 lhp = &dp->nfsdl_lock; 1019 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 1020 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 1021 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 1022 donelocally = 1; 1023 } else { 1024 dp = NULL; 1025 } 1026 } 1027 if (!donelocally) { 1028 /* 1029 * Get the related Open and maybe lockowner. 1030 */ 1031 error = nfscl_getopen(&clp->nfsc_owner, 1032 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1033 ownp, mode, &lp, &op); 1034 if (!error) 1035 lhp = &op->nfso_lock; 1036 } 1037 if (!error && !recovery) 1038 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 1039 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1040 if (error) { 1041 if (!recovery) { 1042 nfscl_clrelease(clp); 1043 NFSUNLOCKCLSTATE(); 1044 } 1045 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1046 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1047 FREE((caddr_t)nlop, M_NFSCLLOCK); 1048 return (error); 1049 } 1050 1051 /* 1052 * Ok, see if a lockowner exists and create one, as required. 1053 */ 1054 if (lp == NULL) 1055 LIST_FOREACH(lp, lhp, nfsl_list) { 1056 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1057 break; 1058 } 1059 if (lp == NULL) { 1060 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1061 if (recovery) 1062 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1063 NFSV4CL_LOCKNAMELEN); 1064 else 1065 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1066 NFSV4CL_LOCKNAMELEN); 1067 nlp->nfsl_seqid = 0; 1068 nlp->nfsl_lockflags = flags; 1069 nlp->nfsl_inprog = NULL; 1070 nfscl_lockinit(&nlp->nfsl_rwlock); 1071 LIST_INIT(&nlp->nfsl_lock); 1072 if (donelocally) { 1073 nlp->nfsl_open = NULL; 1074 newnfsstats.cllocallockowners++; 1075 } else { 1076 nlp->nfsl_open = op; 1077 newnfsstats.cllockowners++; 1078 } 1079 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1080 lp = nlp; 1081 nlp = NULL; 1082 *newonep = 1; 1083 } 1084 1085 /* 1086 * Now, update the byte ranges for locks. 1087 */ 1088 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1089 if (!ret) 1090 donelocally = 1; 1091 if (donelocally) { 1092 *donelocallyp = 1; 1093 if (!recovery) 1094 nfscl_clrelease(clp); 1095 } else { 1096 /* 1097 * Serial modifications on the lock owner for multiple threads 1098 * for the same process using a read/write lock. 1099 */ 1100 if (!recovery) 1101 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1102 } 1103 if (!recovery) 1104 NFSUNLOCKCLSTATE(); 1105 1106 if (nlp) 1107 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1108 if (nlop) 1109 FREE((caddr_t)nlop, M_NFSCLLOCK); 1110 if (otherlop) 1111 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1112 1113 *lpp = lp; 1114 return (0); 1115 } 1116 1117 /* 1118 * Called to unlock a byte range, for LockU. 1119 */ 1120 APPLESTATIC int 1121 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1122 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1123 struct nfsclclient *clp, void *id, int flags, 1124 struct nfscllockowner **lpp, int *dorpcp) 1125 { 1126 struct nfscllockowner *lp; 1127 struct nfsclowner *owp; 1128 struct nfsclopen *op; 1129 struct nfscllock *nlop, *other_lop = NULL; 1130 struct nfscldeleg *dp; 1131 struct nfsnode *np; 1132 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1133 int ret = 0, fnd; 1134 1135 np = VTONFS(vp); 1136 *lpp = NULL; 1137 *dorpcp = 0; 1138 1139 /* 1140 * Might need these, so MALLOC them now, to 1141 * avoid a tsleep() in MALLOC later. 1142 */ 1143 MALLOC(nlop, struct nfscllock *, 1144 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1145 nlop->nfslo_type = F_UNLCK; 1146 nlop->nfslo_first = off; 1147 if (len == NFS64BITSSET) { 1148 nlop->nfslo_end = NFS64BITSSET; 1149 } else { 1150 nlop->nfslo_end = off + len; 1151 if (nlop->nfslo_end <= nlop->nfslo_first) { 1152 FREE((caddr_t)nlop, M_NFSCLLOCK); 1153 return (NFSERR_INVAL); 1154 } 1155 } 1156 if (callcnt == 0) { 1157 MALLOC(other_lop, struct nfscllock *, 1158 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1159 *other_lop = *nlop; 1160 } 1161 nfscl_filllockowner(id, own, flags); 1162 dp = NULL; 1163 NFSLOCKCLSTATE(); 1164 if (callcnt == 0) 1165 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1166 np->n_fhp->nfh_len); 1167 1168 /* 1169 * First, unlock any local regions on a delegation. 1170 */ 1171 if (dp != NULL) { 1172 /* Look for this lockowner. */ 1173 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1174 if (!NFSBCMP(lp->nfsl_owner, own, 1175 NFSV4CL_LOCKNAMELEN)) 1176 break; 1177 } 1178 if (lp != NULL) 1179 /* Use other_lop, so nlop is still available */ 1180 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1181 } 1182 1183 /* 1184 * Now, find a matching open/lockowner that hasn't already been done, 1185 * as marked by nfsl_inprog. 1186 */ 1187 lp = NULL; 1188 fnd = 0; 1189 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1190 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1191 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1192 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1193 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1194 if (lp->nfsl_inprog == NULL && 1195 !NFSBCMP(lp->nfsl_owner, own, 1196 NFSV4CL_LOCKNAMELEN)) { 1197 fnd = 1; 1198 break; 1199 } 1200 } 1201 if (fnd) 1202 break; 1203 } 1204 } 1205 if (fnd) 1206 break; 1207 } 1208 1209 if (lp != NULL) { 1210 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1211 if (ret) 1212 *dorpcp = 1; 1213 /* 1214 * Serial modifications on the lock owner for multiple 1215 * threads for the same process using a read/write lock. 1216 */ 1217 lp->nfsl_inprog = p; 1218 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1219 *lpp = lp; 1220 } 1221 NFSUNLOCKCLSTATE(); 1222 if (nlop) 1223 FREE((caddr_t)nlop, M_NFSCLLOCK); 1224 if (other_lop) 1225 FREE((caddr_t)other_lop, M_NFSCLLOCK); 1226 return (0); 1227 } 1228 1229 /* 1230 * Release all lockowners marked in progess for this process and file. 1231 */ 1232 APPLESTATIC void 1233 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, 1234 void *id, int flags) 1235 { 1236 struct nfsclowner *owp; 1237 struct nfsclopen *op; 1238 struct nfscllockowner *lp; 1239 struct nfsnode *np; 1240 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1241 1242 np = VTONFS(vp); 1243 nfscl_filllockowner(id, own, flags); 1244 NFSLOCKCLSTATE(); 1245 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1246 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1247 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1248 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1249 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1250 if (lp->nfsl_inprog == p && 1251 !NFSBCMP(lp->nfsl_owner, own, 1252 NFSV4CL_LOCKNAMELEN)) { 1253 lp->nfsl_inprog = NULL; 1254 nfscl_lockunlock(&lp->nfsl_rwlock); 1255 } 1256 } 1257 } 1258 } 1259 } 1260 nfscl_clrelease(clp); 1261 NFSUNLOCKCLSTATE(); 1262 } 1263 1264 /* 1265 * Called to find out if any bytes within the byte range specified are 1266 * write locked by the calling process. Used to determine if flushing 1267 * is required before a LockU. 1268 * If in doubt, return 1, so the flush will occur. 1269 */ 1270 APPLESTATIC int 1271 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1272 struct ucred *cred, NFSPROC_T *p, void *id, int flags) 1273 { 1274 struct nfsclowner *owp; 1275 struct nfscllockowner *lp; 1276 struct nfsclopen *op; 1277 struct nfsclclient *clp; 1278 struct nfscllock *lop; 1279 struct nfscldeleg *dp; 1280 struct nfsnode *np; 1281 u_int64_t off, end; 1282 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1283 int error = 0; 1284 1285 np = VTONFS(vp); 1286 switch (fl->l_whence) { 1287 case SEEK_SET: 1288 case SEEK_CUR: 1289 /* 1290 * Caller is responsible for adding any necessary offset 1291 * when SEEK_CUR is used. 1292 */ 1293 off = fl->l_start; 1294 break; 1295 case SEEK_END: 1296 off = np->n_size + fl->l_start; 1297 break; 1298 default: 1299 return (1); 1300 }; 1301 if (fl->l_len != 0) { 1302 end = off + fl->l_len; 1303 if (end < off) 1304 return (1); 1305 } else { 1306 end = NFS64BITSSET; 1307 } 1308 1309 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 1310 if (error) 1311 return (1); 1312 nfscl_filllockowner(id, own, flags); 1313 NFSLOCKCLSTATE(); 1314 1315 /* 1316 * First check the delegation locks. 1317 */ 1318 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1319 if (dp != NULL) { 1320 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1321 if (!NFSBCMP(lp->nfsl_owner, own, 1322 NFSV4CL_LOCKNAMELEN)) 1323 break; 1324 } 1325 if (lp != NULL) { 1326 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1327 if (lop->nfslo_first >= end) 1328 break; 1329 if (lop->nfslo_end <= off) 1330 continue; 1331 if (lop->nfslo_type == F_WRLCK) { 1332 nfscl_clrelease(clp); 1333 NFSUNLOCKCLSTATE(); 1334 return (1); 1335 } 1336 } 1337 } 1338 } 1339 1340 /* 1341 * Now, check state against the server. 1342 */ 1343 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1344 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1345 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1346 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1347 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1348 if (!NFSBCMP(lp->nfsl_owner, own, 1349 NFSV4CL_LOCKNAMELEN)) 1350 break; 1351 } 1352 if (lp != NULL) { 1353 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1354 if (lop->nfslo_first >= end) 1355 break; 1356 if (lop->nfslo_end <= off) 1357 continue; 1358 if (lop->nfslo_type == F_WRLCK) { 1359 nfscl_clrelease(clp); 1360 NFSUNLOCKCLSTATE(); 1361 return (1); 1362 } 1363 } 1364 } 1365 } 1366 } 1367 } 1368 nfscl_clrelease(clp); 1369 NFSUNLOCKCLSTATE(); 1370 return (0); 1371 } 1372 1373 /* 1374 * Release a byte range lock owner structure. 1375 */ 1376 APPLESTATIC void 1377 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1378 { 1379 struct nfsclclient *clp; 1380 1381 if (lp == NULL) 1382 return; 1383 NFSLOCKCLSTATE(); 1384 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1385 if (error != 0 && candelete && 1386 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1387 nfscl_freelockowner(lp, 0); 1388 else 1389 nfscl_lockunlock(&lp->nfsl_rwlock); 1390 nfscl_clrelease(clp); 1391 NFSUNLOCKCLSTATE(); 1392 } 1393 1394 /* 1395 * Free up an open structure and any associated byte range lock structures. 1396 */ 1397 APPLESTATIC void 1398 nfscl_freeopen(struct nfsclopen *op, int local) 1399 { 1400 1401 LIST_REMOVE(op, nfso_list); 1402 nfscl_freealllocks(&op->nfso_lock, local); 1403 FREE((caddr_t)op, M_NFSCLOPEN); 1404 if (local) 1405 newnfsstats.cllocalopens--; 1406 else 1407 newnfsstats.clopens--; 1408 } 1409 1410 /* 1411 * Free up all lock owners and associated locks. 1412 */ 1413 static void 1414 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1415 { 1416 struct nfscllockowner *lp, *nlp; 1417 1418 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1419 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1420 panic("nfscllckw"); 1421 nfscl_freelockowner(lp, local); 1422 } 1423 } 1424 1425 /* 1426 * Called for an Open when NFSERR_EXPIRED is received from the server. 1427 * If there are no byte range locks nor a Share Deny lost, try to do a 1428 * fresh Open. Otherwise, free the open. 1429 */ 1430 static int 1431 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1432 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1433 { 1434 struct nfscllockowner *lp; 1435 struct nfscldeleg *dp; 1436 int mustdelete = 0, error; 1437 1438 /* 1439 * Look for any byte range lock(s). 1440 */ 1441 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1442 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1443 mustdelete = 1; 1444 break; 1445 } 1446 } 1447 1448 /* 1449 * If no byte range lock(s) nor a Share deny, try to re-open. 1450 */ 1451 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1452 newnfs_copycred(&op->nfso_cred, cred); 1453 dp = NULL; 1454 error = nfsrpc_reopen(nmp, op->nfso_fh, 1455 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1456 if (error) { 1457 mustdelete = 1; 1458 if (dp != NULL) { 1459 FREE((caddr_t)dp, M_NFSCLDELEG); 1460 dp = NULL; 1461 } 1462 } 1463 if (dp != NULL) 1464 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1465 op->nfso_fhlen, cred, p, &dp); 1466 } 1467 1468 /* 1469 * If a byte range lock or Share deny or couldn't re-open, free it. 1470 */ 1471 if (mustdelete) 1472 nfscl_freeopen(op, 0); 1473 return (mustdelete); 1474 } 1475 1476 /* 1477 * Free up an open owner structure. 1478 */ 1479 static void 1480 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1481 { 1482 1483 LIST_REMOVE(owp, nfsow_list); 1484 FREE((caddr_t)owp, M_NFSCLOWNER); 1485 if (local) 1486 newnfsstats.cllocalopenowners--; 1487 else 1488 newnfsstats.clopenowners--; 1489 } 1490 1491 /* 1492 * Free up a byte range lock owner structure. 1493 */ 1494 APPLESTATIC void 1495 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1496 { 1497 struct nfscllock *lop, *nlop; 1498 1499 LIST_REMOVE(lp, nfsl_list); 1500 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1501 nfscl_freelock(lop, local); 1502 } 1503 FREE((caddr_t)lp, M_NFSCLLOCKOWNER); 1504 if (local) 1505 newnfsstats.cllocallockowners--; 1506 else 1507 newnfsstats.cllockowners--; 1508 } 1509 1510 /* 1511 * Free up a byte range lock structure. 1512 */ 1513 APPLESTATIC void 1514 nfscl_freelock(struct nfscllock *lop, int local) 1515 { 1516 1517 LIST_REMOVE(lop, nfslo_list); 1518 FREE((caddr_t)lop, M_NFSCLLOCK); 1519 if (local) 1520 newnfsstats.cllocallocks--; 1521 else 1522 newnfsstats.cllocks--; 1523 } 1524 1525 /* 1526 * Clean out the state related to a delegation. 1527 */ 1528 static void 1529 nfscl_cleandeleg(struct nfscldeleg *dp) 1530 { 1531 struct nfsclowner *owp, *nowp; 1532 struct nfsclopen *op; 1533 1534 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1535 op = LIST_FIRST(&owp->nfsow_open); 1536 if (op != NULL) { 1537 if (LIST_NEXT(op, nfso_list) != NULL) 1538 panic("nfscleandel"); 1539 nfscl_freeopen(op, 1); 1540 } 1541 nfscl_freeopenowner(owp, 1); 1542 } 1543 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1544 } 1545 1546 /* 1547 * Free a delegation. 1548 */ 1549 static void 1550 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) 1551 { 1552 1553 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1554 LIST_REMOVE(dp, nfsdl_hash); 1555 FREE((caddr_t)dp, M_NFSCLDELEG); 1556 newnfsstats.cldelegates--; 1557 nfscl_delegcnt--; 1558 } 1559 1560 /* 1561 * Free up all state related to this client structure. 1562 */ 1563 static void 1564 nfscl_cleanclient(struct nfsclclient *clp) 1565 { 1566 struct nfsclowner *owp, *nowp; 1567 struct nfsclopen *op, *nop; 1568 1569 /* Now, all the OpenOwners, etc. */ 1570 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1571 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1572 nfscl_freeopen(op, 0); 1573 } 1574 nfscl_freeopenowner(owp, 0); 1575 } 1576 } 1577 1578 /* 1579 * Called when an NFSERR_EXPIRED is received from the server. 1580 */ 1581 static void 1582 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1583 struct ucred *cred, NFSPROC_T *p) 1584 { 1585 struct nfsclowner *owp, *nowp, *towp; 1586 struct nfsclopen *op, *nop, *top; 1587 struct nfscldeleg *dp, *ndp; 1588 int ret, printed = 0; 1589 1590 /* 1591 * First, merge locally issued Opens into the list for the server. 1592 */ 1593 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1594 while (dp != NULL) { 1595 ndp = TAILQ_NEXT(dp, nfsdl_list); 1596 owp = LIST_FIRST(&dp->nfsdl_owner); 1597 while (owp != NULL) { 1598 nowp = LIST_NEXT(owp, nfsow_list); 1599 op = LIST_FIRST(&owp->nfsow_open); 1600 if (op != NULL) { 1601 if (LIST_NEXT(op, nfso_list) != NULL) 1602 panic("nfsclexp"); 1603 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1604 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1605 NFSV4CL_LOCKNAMELEN)) 1606 break; 1607 } 1608 if (towp != NULL) { 1609 /* Merge opens in */ 1610 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1611 if (top->nfso_fhlen == op->nfso_fhlen && 1612 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1613 op->nfso_fhlen)) { 1614 top->nfso_mode |= op->nfso_mode; 1615 top->nfso_opencnt += op->nfso_opencnt; 1616 break; 1617 } 1618 } 1619 if (top == NULL) { 1620 /* Just add the open to the owner list */ 1621 LIST_REMOVE(op, nfso_list); 1622 op->nfso_own = towp; 1623 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1624 newnfsstats.cllocalopens--; 1625 newnfsstats.clopens++; 1626 } 1627 } else { 1628 /* Just add the openowner to the client list */ 1629 LIST_REMOVE(owp, nfsow_list); 1630 owp->nfsow_clp = clp; 1631 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1632 newnfsstats.cllocalopenowners--; 1633 newnfsstats.clopenowners++; 1634 newnfsstats.cllocalopens--; 1635 newnfsstats.clopens++; 1636 } 1637 } 1638 owp = nowp; 1639 } 1640 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1641 printed = 1; 1642 printf("nfsv4 expired locks lost\n"); 1643 } 1644 nfscl_cleandeleg(dp); 1645 nfscl_freedeleg(&clp->nfsc_deleg, dp); 1646 dp = ndp; 1647 } 1648 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1649 panic("nfsclexp"); 1650 1651 /* 1652 * Now, try and reopen against the server. 1653 */ 1654 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1655 owp->nfsow_seqid = 0; 1656 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1657 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1658 if (ret && !printed) { 1659 printed = 1; 1660 printf("nfsv4 expired locks lost\n"); 1661 } 1662 } 1663 if (LIST_EMPTY(&owp->nfsow_open)) 1664 nfscl_freeopenowner(owp, 0); 1665 } 1666 } 1667 1668 /* 1669 * This function must be called after the process represented by "own" has 1670 * exited. Must be called with CLSTATE lock held. 1671 */ 1672 static void 1673 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1674 { 1675 struct nfsclowner *owp, *nowp; 1676 struct nfscllockowner *lp, *nlp; 1677 struct nfscldeleg *dp; 1678 1679 /* First, get rid of local locks on delegations. */ 1680 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1681 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1682 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1683 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1684 panic("nfscllckw"); 1685 nfscl_freelockowner(lp, 1); 1686 } 1687 } 1688 } 1689 owp = LIST_FIRST(&clp->nfsc_owner); 1690 while (owp != NULL) { 1691 nowp = LIST_NEXT(owp, nfsow_list); 1692 if (!NFSBCMP(owp->nfsow_owner, own, 1693 NFSV4CL_LOCKNAMELEN)) { 1694 /* 1695 * If there are children that haven't closed the 1696 * file descriptors yet, the opens will still be 1697 * here. For that case, let the renew thread clear 1698 * out the OpenOwner later. 1699 */ 1700 if (LIST_EMPTY(&owp->nfsow_open)) 1701 nfscl_freeopenowner(owp, 0); 1702 else 1703 owp->nfsow_defunct = 1; 1704 } 1705 owp = nowp; 1706 } 1707 } 1708 1709 /* 1710 * Find open/lock owners for processes that have exited. 1711 */ 1712 static void 1713 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) 1714 { 1715 struct nfsclowner *owp, *nowp; 1716 struct nfsclopen *op; 1717 struct nfscllockowner *lp, *nlp; 1718 1719 NFSPROCLISTLOCK(); 1720 NFSLOCKCLSTATE(); 1721 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1722 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1723 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { 1724 if (LIST_EMPTY(&lp->nfsl_lock)) 1725 nfscl_emptylockowner(lp, lhp); 1726 } 1727 } 1728 if (nfscl_procdoesntexist(owp->nfsow_owner)) 1729 nfscl_cleanup_common(clp, owp->nfsow_owner); 1730 } 1731 NFSUNLOCKCLSTATE(); 1732 NFSPROCLISTUNLOCK(); 1733 } 1734 1735 /* 1736 * Take the empty lock owner and move it to the local lhp list if the 1737 * associated process no longer exists. 1738 */ 1739 static void 1740 nfscl_emptylockowner(struct nfscllockowner *lp, 1741 struct nfscllockownerfhhead *lhp) 1742 { 1743 struct nfscllockownerfh *lfhp, *mylfhp; 1744 struct nfscllockowner *nlp; 1745 int fnd_it; 1746 1747 /* If not a Posix lock owner, just return. */ 1748 if ((lp->nfsl_lockflags & F_POSIX) == 0) 1749 return; 1750 1751 fnd_it = 0; 1752 mylfhp = NULL; 1753 /* 1754 * First, search to see if this lock owner is already in the list. 1755 * If it is, then the associated process no longer exists. 1756 */ 1757 SLIST_FOREACH(lfhp, lhp, nfslfh_list) { 1758 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && 1759 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, 1760 lfhp->nfslfh_len)) 1761 mylfhp = lfhp; 1762 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) 1763 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, 1764 NFSV4CL_LOCKNAMELEN)) 1765 fnd_it = 1; 1766 } 1767 /* If not found, check if process still exists. */ 1768 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) 1769 return; 1770 1771 /* Move the lock owner over to the local list. */ 1772 if (mylfhp == NULL) { 1773 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, 1774 M_NOWAIT); 1775 if (mylfhp == NULL) 1776 return; 1777 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; 1778 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, 1779 mylfhp->nfslfh_len); 1780 LIST_INIT(&mylfhp->nfslfh_lock); 1781 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); 1782 } 1783 LIST_REMOVE(lp, nfsl_list); 1784 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); 1785 } 1786 1787 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 1788 /* 1789 * Called from nfs umount to free up the clientid. 1790 */ 1791 APPLESTATIC void 1792 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) 1793 { 1794 struct nfsclclient *clp; 1795 struct ucred *cred; 1796 int igotlock; 1797 1798 /* 1799 * For the case that matters, this is the thread that set 1800 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 1801 * done to ensure that any thread executing nfscl_getcl() after 1802 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 1803 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 1804 * explanation, courtesy of Alan Cox. 1805 * What follows is a snippet from Alan Cox's email at: 1806 * http://docs.FreeBSD.org/cgi/ 1807 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 1808 * 1809 * 1. Set MNTK_UNMOUNTF 1810 * 2. Acquire a standard FreeBSD mutex "m". 1811 * 3. Update some data structures. 1812 * 4. Release mutex "m". 1813 * 1814 * Then, other threads that acquire "m" after step 4 has occurred will 1815 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 1816 * step 2 may or may not see MNTK_UNMOUNTF as set. 1817 */ 1818 NFSLOCKCLSTATE(); 1819 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 1820 fake_global++; 1821 NFSUNLOCKCLSTATE(); 1822 NFSLOCKCLSTATE(); 1823 } 1824 1825 clp = nmp->nm_clp; 1826 if (clp != NULL) { 1827 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 1828 panic("nfscl umount"); 1829 1830 /* 1831 * First, handshake with the nfscl renew thread, to terminate 1832 * it. 1833 */ 1834 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 1835 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 1836 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 1837 "nfsclumnt", hz); 1838 1839 /* 1840 * Now, get the exclusive lock on the client state, so 1841 * that no uses of the state are still in progress. 1842 */ 1843 do { 1844 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1845 NFSCLSTATEMUTEXPTR, NULL); 1846 } while (!igotlock); 1847 NFSUNLOCKCLSTATE(); 1848 1849 /* 1850 * Free up all the state. It will expire on the server, but 1851 * maybe we should do a SetClientId/SetClientIdConfirm so 1852 * the server throws it away? 1853 */ 1854 LIST_REMOVE(clp, nfsc_list); 1855 nfscl_delegreturnall(clp, p); 1856 cred = newnfs_getcred(); 1857 if (NFSHASNFSV4N(nmp)) { 1858 (void)nfsrpc_destroysession(nmp, clp, cred, p); 1859 (void)nfsrpc_destroyclient(nmp, clp, cred, p); 1860 } else 1861 (void)nfsrpc_setclient(nmp, clp, 0, cred, p); 1862 nfscl_cleanclient(clp); 1863 nmp->nm_clp = NULL; 1864 NFSFREECRED(cred); 1865 free(clp, M_NFSCLCLIENT); 1866 } else 1867 NFSUNLOCKCLSTATE(); 1868 } 1869 1870 /* 1871 * This function is called when a server replies with NFSERR_STALECLIENTID 1872 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, 1873 * doing Opens and Locks with reclaim. If these fail, it deletes the 1874 * corresponding state. 1875 */ 1876 static void 1877 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) 1878 { 1879 struct nfsclowner *owp, *nowp; 1880 struct nfsclopen *op, *nop; 1881 struct nfscllockowner *lp, *nlp; 1882 struct nfscllock *lop, *nlop; 1883 struct nfscldeleg *dp, *ndp, *tdp; 1884 struct nfsmount *nmp; 1885 struct ucred *tcred; 1886 struct nfsclopenhead extra_open; 1887 struct nfscldeleghead extra_deleg; 1888 struct nfsreq *rep; 1889 u_int64_t len; 1890 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 1891 int i, igotlock = 0, error, trycnt, firstlock; 1892 struct nfscllayout *lyp, *nlyp; 1893 1894 /* 1895 * First, lock the client structure, so everyone else will 1896 * block when trying to use state. 1897 */ 1898 NFSLOCKCLSTATE(); 1899 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 1900 do { 1901 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1902 NFSCLSTATEMUTEXPTR, NULL); 1903 } while (!igotlock); 1904 NFSUNLOCKCLSTATE(); 1905 1906 nmp = clp->nfsc_nmp; 1907 if (nmp == NULL) 1908 panic("nfscl recover"); 1909 1910 /* 1911 * For now, just get rid of all layouts. There may be a need 1912 * to do LayoutCommit Ops with reclaim == true later. 1913 */ 1914 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 1915 nfscl_freelayout(lyp); 1916 TAILQ_INIT(&clp->nfsc_layout); 1917 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 1918 LIST_INIT(&clp->nfsc_layouthash[i]); 1919 1920 trycnt = 5; 1921 do { 1922 error = nfsrpc_setclient(nmp, clp, 1, cred, p); 1923 } while ((error == NFSERR_STALECLIENTID || 1924 error == NFSERR_BADSESSION || 1925 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 1926 if (error) { 1927 nfscl_cleanclient(clp); 1928 NFSLOCKCLSTATE(); 1929 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 1930 NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG); 1931 wakeup(&clp->nfsc_flags); 1932 nfsv4_unlock(&clp->nfsc_lock, 0); 1933 NFSUNLOCKCLSTATE(); 1934 return; 1935 } 1936 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 1937 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 1938 1939 /* 1940 * Mark requests already queued on the server, so that they don't 1941 * initiate another recovery cycle. Any requests already in the 1942 * queue that handle state information will have the old stale 1943 * clientid/stateid and will get a NFSERR_STALESTATEID, 1944 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. 1945 * This will be translated to NFSERR_STALEDONTRECOVER when 1946 * R_DONTRECOVER is set. 1947 */ 1948 NFSLOCKREQ(); 1949 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 1950 if (rep->r_nmp == nmp) 1951 rep->r_flags |= R_DONTRECOVER; 1952 } 1953 NFSUNLOCKREQ(); 1954 1955 /* 1956 * Now, mark all delegations "need reclaim". 1957 */ 1958 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 1959 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 1960 1961 TAILQ_INIT(&extra_deleg); 1962 LIST_INIT(&extra_open); 1963 /* 1964 * Now traverse the state lists, doing Open and Lock Reclaims. 1965 */ 1966 tcred = newnfs_getcred(); 1967 owp = LIST_FIRST(&clp->nfsc_owner); 1968 while (owp != NULL) { 1969 nowp = LIST_NEXT(owp, nfsow_list); 1970 owp->nfsow_seqid = 0; 1971 op = LIST_FIRST(&owp->nfsow_open); 1972 while (op != NULL) { 1973 nop = LIST_NEXT(op, nfso_list); 1974 if (error != NFSERR_NOGRACE) { 1975 /* Search for a delegation to reclaim with the open */ 1976 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1977 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 1978 continue; 1979 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 1980 mode = NFSV4OPEN_ACCESSWRITE; 1981 delegtype = NFSV4OPEN_DELEGATEWRITE; 1982 } else { 1983 mode = NFSV4OPEN_ACCESSREAD; 1984 delegtype = NFSV4OPEN_DELEGATEREAD; 1985 } 1986 if ((op->nfso_mode & mode) == mode && 1987 op->nfso_fhlen == dp->nfsdl_fhlen && 1988 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 1989 break; 1990 } 1991 ndp = dp; 1992 if (dp == NULL) 1993 delegtype = NFSV4OPEN_DELEGATENONE; 1994 newnfs_copycred(&op->nfso_cred, tcred); 1995 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 1996 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 1997 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 1998 tcred, p); 1999 if (!error) { 2000 /* Handle any replied delegation */ 2001 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 2002 || NFSMNT_RDONLY(nmp->nm_mountp))) { 2003 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 2004 mode = NFSV4OPEN_ACCESSWRITE; 2005 else 2006 mode = NFSV4OPEN_ACCESSREAD; 2007 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2008 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2009 continue; 2010 if ((op->nfso_mode & mode) == mode && 2011 op->nfso_fhlen == dp->nfsdl_fhlen && 2012 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 2013 op->nfso_fhlen)) { 2014 dp->nfsdl_stateid = ndp->nfsdl_stateid; 2015 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 2016 dp->nfsdl_ace = ndp->nfsdl_ace; 2017 dp->nfsdl_change = ndp->nfsdl_change; 2018 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2019 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 2020 dp->nfsdl_flags |= NFSCLDL_RECALL; 2021 FREE((caddr_t)ndp, M_NFSCLDELEG); 2022 ndp = NULL; 2023 break; 2024 } 2025 } 2026 } 2027 if (ndp != NULL) 2028 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 2029 2030 /* and reclaim all byte range locks */ 2031 lp = LIST_FIRST(&op->nfso_lock); 2032 while (lp != NULL) { 2033 nlp = LIST_NEXT(lp, nfsl_list); 2034 lp->nfsl_seqid = 0; 2035 firstlock = 1; 2036 lop = LIST_FIRST(&lp->nfsl_lock); 2037 while (lop != NULL) { 2038 nlop = LIST_NEXT(lop, nfslo_list); 2039 if (lop->nfslo_end == NFS64BITSSET) 2040 len = NFS64BITSSET; 2041 else 2042 len = lop->nfslo_end - lop->nfslo_first; 2043 if (error != NFSERR_NOGRACE) 2044 error = nfscl_trylock(nmp, NULL, 2045 op->nfso_fh, op->nfso_fhlen, lp, 2046 firstlock, 1, lop->nfslo_first, len, 2047 lop->nfslo_type, tcred, p); 2048 if (error != 0) 2049 nfscl_freelock(lop, 0); 2050 else 2051 firstlock = 0; 2052 lop = nlop; 2053 } 2054 /* If no locks, but a lockowner, just delete it. */ 2055 if (LIST_EMPTY(&lp->nfsl_lock)) 2056 nfscl_freelockowner(lp, 0); 2057 lp = nlp; 2058 } 2059 } else { 2060 nfscl_freeopen(op, 0); 2061 } 2062 } 2063 op = nop; 2064 } 2065 owp = nowp; 2066 } 2067 2068 /* 2069 * Now, try and get any delegations not yet reclaimed by cobbling 2070 * to-gether an appropriate open. 2071 */ 2072 nowp = NULL; 2073 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2074 while (dp != NULL) { 2075 ndp = TAILQ_NEXT(dp, nfsdl_list); 2076 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2077 if (nowp == NULL) { 2078 MALLOC(nowp, struct nfsclowner *, 2079 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2080 /* 2081 * Name must be as long an largest possible 2082 * NFSV4CL_LOCKNAMELEN. 12 for now. 2083 */ 2084 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2085 NFSV4CL_LOCKNAMELEN); 2086 LIST_INIT(&nowp->nfsow_open); 2087 nowp->nfsow_clp = clp; 2088 nowp->nfsow_seqid = 0; 2089 nowp->nfsow_defunct = 0; 2090 nfscl_lockinit(&nowp->nfsow_rwlock); 2091 } 2092 nop = NULL; 2093 if (error != NFSERR_NOGRACE) { 2094 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 2095 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2096 nop->nfso_own = nowp; 2097 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2098 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2099 delegtype = NFSV4OPEN_DELEGATEWRITE; 2100 } else { 2101 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2102 delegtype = NFSV4OPEN_DELEGATEREAD; 2103 } 2104 nop->nfso_opencnt = 0; 2105 nop->nfso_posixlock = 1; 2106 nop->nfso_fhlen = dp->nfsdl_fhlen; 2107 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2108 LIST_INIT(&nop->nfso_lock); 2109 nop->nfso_stateid.seqid = 0; 2110 nop->nfso_stateid.other[0] = 0; 2111 nop->nfso_stateid.other[1] = 0; 2112 nop->nfso_stateid.other[2] = 0; 2113 newnfs_copycred(&dp->nfsdl_cred, tcred); 2114 newnfs_copyincred(tcred, &nop->nfso_cred); 2115 tdp = NULL; 2116 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2117 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2118 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2119 delegtype, tcred, p); 2120 if (tdp != NULL) { 2121 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2122 mode = NFSV4OPEN_ACCESSWRITE; 2123 else 2124 mode = NFSV4OPEN_ACCESSREAD; 2125 if ((nop->nfso_mode & mode) == mode && 2126 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2127 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2128 nop->nfso_fhlen)) { 2129 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2130 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2131 dp->nfsdl_ace = tdp->nfsdl_ace; 2132 dp->nfsdl_change = tdp->nfsdl_change; 2133 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2134 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2135 dp->nfsdl_flags |= NFSCLDL_RECALL; 2136 FREE((caddr_t)tdp, M_NFSCLDELEG); 2137 } else { 2138 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2139 } 2140 } 2141 } 2142 if (error) { 2143 if (nop != NULL) 2144 FREE((caddr_t)nop, M_NFSCLOPEN); 2145 /* 2146 * Couldn't reclaim it, so throw the state 2147 * away. Ouch!! 2148 */ 2149 nfscl_cleandeleg(dp); 2150 nfscl_freedeleg(&clp->nfsc_deleg, dp); 2151 } else { 2152 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2153 } 2154 } 2155 dp = ndp; 2156 } 2157 2158 /* 2159 * Now, get rid of extra Opens and Delegations. 2160 */ 2161 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2162 do { 2163 newnfs_copycred(&op->nfso_cred, tcred); 2164 error = nfscl_tryclose(op, tcred, nmp, p); 2165 if (error == NFSERR_GRACE) 2166 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2167 } while (error == NFSERR_GRACE); 2168 LIST_REMOVE(op, nfso_list); 2169 FREE((caddr_t)op, M_NFSCLOPEN); 2170 } 2171 if (nowp != NULL) 2172 FREE((caddr_t)nowp, M_NFSCLOWNER); 2173 2174 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2175 do { 2176 newnfs_copycred(&dp->nfsdl_cred, tcred); 2177 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2178 if (error == NFSERR_GRACE) 2179 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2180 } while (error == NFSERR_GRACE); 2181 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2182 FREE((caddr_t)dp, M_NFSCLDELEG); 2183 } 2184 2185 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ 2186 if (NFSHASNFSV4N(nmp)) 2187 (void)nfsrpc_reclaimcomplete(nmp, cred, p); 2188 2189 NFSLOCKCLSTATE(); 2190 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2191 wakeup(&clp->nfsc_flags); 2192 nfsv4_unlock(&clp->nfsc_lock, 0); 2193 NFSUNLOCKCLSTATE(); 2194 NFSFREECRED(tcred); 2195 } 2196 2197 /* 2198 * This function is called when a server replies with NFSERR_EXPIRED. 2199 * It deletes all state for the client and does a fresh SetClientId/confirm. 2200 * XXX Someday it should post a signal to the process(es) that hold the 2201 * state, so they know that lock state has been lost. 2202 */ 2203 APPLESTATIC int 2204 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2205 { 2206 struct nfsmount *nmp; 2207 struct ucred *cred; 2208 int igotlock = 0, error, trycnt; 2209 2210 /* 2211 * If the clientid has gone away or a new SetClientid has already 2212 * been done, just return ok. 2213 */ 2214 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2215 return (0); 2216 2217 /* 2218 * First, lock the client structure, so everyone else will 2219 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2220 * that only one thread does the work. 2221 */ 2222 NFSLOCKCLSTATE(); 2223 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2224 do { 2225 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2226 NFSCLSTATEMUTEXPTR, NULL); 2227 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2228 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2229 if (igotlock) 2230 nfsv4_unlock(&clp->nfsc_lock, 0); 2231 NFSUNLOCKCLSTATE(); 2232 return (0); 2233 } 2234 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2235 NFSUNLOCKCLSTATE(); 2236 2237 nmp = clp->nfsc_nmp; 2238 if (nmp == NULL) 2239 panic("nfscl expired"); 2240 cred = newnfs_getcred(); 2241 trycnt = 5; 2242 do { 2243 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 2244 } while ((error == NFSERR_STALECLIENTID || 2245 error == NFSERR_BADSESSION || 2246 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2247 if (error) { 2248 /* 2249 * Clear out any state. 2250 */ 2251 nfscl_cleanclient(clp); 2252 NFSLOCKCLSTATE(); 2253 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 2254 NFSCLFLAGS_RECOVER); 2255 } else { 2256 /* 2257 * Expire the state for the client. 2258 */ 2259 nfscl_expireclient(clp, nmp, cred, p); 2260 NFSLOCKCLSTATE(); 2261 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2262 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2263 } 2264 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2265 wakeup(&clp->nfsc_flags); 2266 nfsv4_unlock(&clp->nfsc_lock, 0); 2267 NFSUNLOCKCLSTATE(); 2268 NFSFREECRED(cred); 2269 return (error); 2270 } 2271 2272 /* 2273 * This function inserts a lock in the list after insert_lop. 2274 */ 2275 static void 2276 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2277 struct nfscllock *insert_lop, int local) 2278 { 2279 2280 if ((struct nfscllockowner *)insert_lop == lp) 2281 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2282 else 2283 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2284 if (local) 2285 newnfsstats.cllocallocks++; 2286 else 2287 newnfsstats.cllocks++; 2288 } 2289 2290 /* 2291 * This function updates the locking for a lock owner and given file. It 2292 * maintains a list of lock ranges ordered on increasing file offset that 2293 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2294 * It always adds new_lop to the list and sometimes uses the one pointed 2295 * at by other_lopp. 2296 * Returns 1 if the locks were modified, 0 otherwise. 2297 */ 2298 static int 2299 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2300 struct nfscllock **other_lopp, int local) 2301 { 2302 struct nfscllock *new_lop = *new_lopp; 2303 struct nfscllock *lop, *tlop, *ilop; 2304 struct nfscllock *other_lop; 2305 int unlock = 0, modified = 0; 2306 u_int64_t tmp; 2307 2308 /* 2309 * Work down the list until the lock is merged. 2310 */ 2311 if (new_lop->nfslo_type == F_UNLCK) 2312 unlock = 1; 2313 ilop = (struct nfscllock *)lp; 2314 lop = LIST_FIRST(&lp->nfsl_lock); 2315 while (lop != NULL) { 2316 /* 2317 * Only check locks for this file that aren't before the start of 2318 * new lock's range. 2319 */ 2320 if (lop->nfslo_end >= new_lop->nfslo_first) { 2321 if (new_lop->nfslo_end < lop->nfslo_first) { 2322 /* 2323 * If the new lock ends before the start of the 2324 * current lock's range, no merge, just insert 2325 * the new lock. 2326 */ 2327 break; 2328 } 2329 if (new_lop->nfslo_type == lop->nfslo_type || 2330 (new_lop->nfslo_first <= lop->nfslo_first && 2331 new_lop->nfslo_end >= lop->nfslo_end)) { 2332 /* 2333 * This lock can be absorbed by the new lock/unlock. 2334 * This happens when it covers the entire range 2335 * of the old lock or is contiguous 2336 * with the old lock and is of the same type or an 2337 * unlock. 2338 */ 2339 if (new_lop->nfslo_type != lop->nfslo_type || 2340 new_lop->nfslo_first != lop->nfslo_first || 2341 new_lop->nfslo_end != lop->nfslo_end) 2342 modified = 1; 2343 if (lop->nfslo_first < new_lop->nfslo_first) 2344 new_lop->nfslo_first = lop->nfslo_first; 2345 if (lop->nfslo_end > new_lop->nfslo_end) 2346 new_lop->nfslo_end = lop->nfslo_end; 2347 tlop = lop; 2348 lop = LIST_NEXT(lop, nfslo_list); 2349 nfscl_freelock(tlop, local); 2350 continue; 2351 } 2352 2353 /* 2354 * All these cases are for contiguous locks that are not the 2355 * same type, so they can't be merged. 2356 */ 2357 if (new_lop->nfslo_first <= lop->nfslo_first) { 2358 /* 2359 * This case is where the new lock overlaps with the 2360 * first part of the old lock. Move the start of the 2361 * old lock to just past the end of the new lock. The 2362 * new lock will be inserted in front of the old, since 2363 * ilop hasn't been updated. (We are done now.) 2364 */ 2365 if (lop->nfslo_first != new_lop->nfslo_end) { 2366 lop->nfslo_first = new_lop->nfslo_end; 2367 modified = 1; 2368 } 2369 break; 2370 } 2371 if (new_lop->nfslo_end >= lop->nfslo_end) { 2372 /* 2373 * This case is where the new lock overlaps with the 2374 * end of the old lock's range. Move the old lock's 2375 * end to just before the new lock's first and insert 2376 * the new lock after the old lock. 2377 * Might not be done yet, since the new lock could 2378 * overlap further locks with higher ranges. 2379 */ 2380 if (lop->nfslo_end != new_lop->nfslo_first) { 2381 lop->nfslo_end = new_lop->nfslo_first; 2382 modified = 1; 2383 } 2384 ilop = lop; 2385 lop = LIST_NEXT(lop, nfslo_list); 2386 continue; 2387 } 2388 /* 2389 * The final case is where the new lock's range is in the 2390 * middle of the current lock's and splits the current lock 2391 * up. Use *other_lopp to handle the second part of the 2392 * split old lock range. (We are done now.) 2393 * For unlock, we use new_lop as other_lop and tmp, since 2394 * other_lop and new_lop are the same for this case. 2395 * We noted the unlock case above, so we don't need 2396 * new_lop->nfslo_type any longer. 2397 */ 2398 tmp = new_lop->nfslo_first; 2399 if (unlock) { 2400 other_lop = new_lop; 2401 *new_lopp = NULL; 2402 } else { 2403 other_lop = *other_lopp; 2404 *other_lopp = NULL; 2405 } 2406 other_lop->nfslo_first = new_lop->nfslo_end; 2407 other_lop->nfslo_end = lop->nfslo_end; 2408 other_lop->nfslo_type = lop->nfslo_type; 2409 lop->nfslo_end = tmp; 2410 nfscl_insertlock(lp, other_lop, lop, local); 2411 ilop = lop; 2412 modified = 1; 2413 break; 2414 } 2415 ilop = lop; 2416 lop = LIST_NEXT(lop, nfslo_list); 2417 if (lop == NULL) 2418 break; 2419 } 2420 2421 /* 2422 * Insert the new lock in the list at the appropriate place. 2423 */ 2424 if (!unlock) { 2425 nfscl_insertlock(lp, new_lop, ilop, local); 2426 *new_lopp = NULL; 2427 modified = 1; 2428 } 2429 return (modified); 2430 } 2431 2432 /* 2433 * This function must be run as a kernel thread. 2434 * It does Renew Ops and recovery, when required. 2435 */ 2436 APPLESTATIC void 2437 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2438 { 2439 struct nfsclowner *owp, *nowp; 2440 struct nfsclopen *op; 2441 struct nfscllockowner *lp, *nlp; 2442 struct nfscldeleghead dh; 2443 struct nfscldeleg *dp, *ndp; 2444 struct ucred *cred; 2445 u_int32_t clidrev; 2446 int error, cbpathdown, islept, igotlock, ret, clearok; 2447 uint32_t recover_done_time = 0; 2448 time_t mytime; 2449 static time_t prevsec = 0; 2450 struct nfscllockownerfh *lfhp, *nlfhp; 2451 struct nfscllockownerfhhead lfh; 2452 struct nfscllayout *lyp, *nlyp; 2453 struct nfscldevinfo *dip, *ndip; 2454 struct nfscllayouthead rlh; 2455 struct nfsclrecalllayout *recallp; 2456 struct nfsclds *dsp; 2457 2458 cred = newnfs_getcred(); 2459 NFSLOCKCLSTATE(); 2460 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2461 NFSUNLOCKCLSTATE(); 2462 for(;;) { 2463 newnfs_setroot(cred); 2464 cbpathdown = 0; 2465 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2466 /* 2467 * Only allow one recover within 1/2 of the lease 2468 * duration (nfsc_renew). 2469 */ 2470 if (recover_done_time < NFSD_MONOSEC) { 2471 recover_done_time = NFSD_MONOSEC + 2472 clp->nfsc_renew; 2473 nfscl_recover(clp, cred, p); 2474 } else { 2475 NFSLOCKCLSTATE(); 2476 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2477 NFSUNLOCKCLSTATE(); 2478 } 2479 } 2480 if (clp->nfsc_expire <= NFSD_MONOSEC && 2481 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2482 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2483 clidrev = clp->nfsc_clientidrev; 2484 error = nfsrpc_renew(clp, 2485 TAILQ_FIRST(&clp->nfsc_nmp->nm_sess), cred, p); 2486 if (error == NFSERR_CBPATHDOWN) 2487 cbpathdown = 1; 2488 else if (error == NFSERR_STALECLIENTID || 2489 error == NFSERR_BADSESSION) { 2490 NFSLOCKCLSTATE(); 2491 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2492 NFSUNLOCKCLSTATE(); 2493 } else if (error == NFSERR_EXPIRED) 2494 (void) nfscl_hasexpired(clp, clidrev, p); 2495 } 2496 2497 /* Do renews for any DS sessions. */ 2498 checkdsrenew: 2499 NFSLOCKMNT(clp->nfsc_nmp); 2500 /* Skip first entry, since the MDS is handled above. */ 2501 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); 2502 if (dsp != NULL) 2503 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2504 while (dsp != NULL) { 2505 if (dsp->nfsclds_expire <= NFSD_MONOSEC) { 2506 dsp->nfsclds_expire = NFSD_MONOSEC + 2507 clp->nfsc_renew; 2508 NFSUNLOCKMNT(clp->nfsc_nmp); 2509 (void)nfsrpc_renew(clp, dsp, cred, p); 2510 goto checkdsrenew; 2511 } 2512 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2513 } 2514 NFSUNLOCKMNT(clp->nfsc_nmp); 2515 2516 TAILQ_INIT(&dh); 2517 NFSLOCKCLSTATE(); 2518 if (cbpathdown) 2519 /* It's a Total Recall! */ 2520 nfscl_totalrecall(clp); 2521 2522 /* 2523 * Now, handle defunct owners. 2524 */ 2525 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 2526 if (LIST_EMPTY(&owp->nfsow_open)) { 2527 if (owp->nfsow_defunct != 0) 2528 nfscl_freeopenowner(owp, 0); 2529 } 2530 } 2531 2532 /* 2533 * Do the recall on any delegations. To avoid trouble, always 2534 * come back up here after having slept. 2535 */ 2536 igotlock = 0; 2537 tryagain: 2538 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2539 while (dp != NULL) { 2540 ndp = TAILQ_NEXT(dp, nfsdl_list); 2541 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2542 /* 2543 * Wait for outstanding I/O ops to be done. 2544 */ 2545 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2546 if (igotlock) { 2547 nfsv4_unlock(&clp->nfsc_lock, 0); 2548 igotlock = 0; 2549 } 2550 dp->nfsdl_rwlock.nfslock_lock |= 2551 NFSV4LOCK_WANTED; 2552 (void) nfsmsleep(&dp->nfsdl_rwlock, 2553 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", 2554 NULL); 2555 goto tryagain; 2556 } 2557 while (!igotlock) { 2558 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2559 &islept, NFSCLSTATEMUTEXPTR, NULL); 2560 if (islept) 2561 goto tryagain; 2562 } 2563 NFSUNLOCKCLSTATE(); 2564 newnfs_copycred(&dp->nfsdl_cred, cred); 2565 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2566 NULL, cred, p, 1); 2567 if (!ret) { 2568 nfscl_cleandeleg(dp); 2569 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2570 nfsdl_list); 2571 LIST_REMOVE(dp, nfsdl_hash); 2572 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2573 nfscl_delegcnt--; 2574 newnfsstats.cldelegates--; 2575 } 2576 NFSLOCKCLSTATE(); 2577 } 2578 dp = ndp; 2579 } 2580 2581 /* 2582 * Clear out old delegations, if we are above the high water 2583 * mark. Only clear out ones with no state related to them. 2584 * The tailq list is in LRU order. 2585 */ 2586 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2587 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2588 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2589 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2590 dp->nfsdl_rwlock.nfslock_lock == 0 && 2591 dp->nfsdl_timestamp < NFSD_MONOSEC && 2592 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2593 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2594 clearok = 1; 2595 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2596 op = LIST_FIRST(&owp->nfsow_open); 2597 if (op != NULL) { 2598 clearok = 0; 2599 break; 2600 } 2601 } 2602 if (clearok) { 2603 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2604 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2605 clearok = 0; 2606 break; 2607 } 2608 } 2609 } 2610 if (clearok) { 2611 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2612 LIST_REMOVE(dp, nfsdl_hash); 2613 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2614 nfscl_delegcnt--; 2615 newnfsstats.cldelegates--; 2616 } 2617 } 2618 dp = ndp; 2619 } 2620 if (igotlock) 2621 nfsv4_unlock(&clp->nfsc_lock, 0); 2622 2623 /* 2624 * Do the recall on any layouts. To avoid trouble, always 2625 * come back up here after having slept. 2626 */ 2627 TAILQ_INIT(&rlh); 2628 tryagain2: 2629 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { 2630 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { 2631 /* 2632 * Wait for outstanding I/O ops to be done. 2633 */ 2634 if (lyp->nfsly_lock.nfslock_usecnt > 0 || 2635 (lyp->nfsly_lock.nfslock_lock & 2636 NFSV4LOCK_LOCK) != 0) { 2637 lyp->nfsly_lock.nfslock_lock |= 2638 NFSV4LOCK_WANTED; 2639 (void)nfsmsleep(&lyp->nfsly_lock, 2640 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp", 2641 NULL); 2642 goto tryagain2; 2643 } 2644 /* Move the layout to the recall list. */ 2645 TAILQ_REMOVE(&clp->nfsc_layout, lyp, 2646 nfsly_list); 2647 LIST_REMOVE(lyp, nfsly_hash); 2648 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); 2649 2650 /* Handle any layout commits. */ 2651 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && 2652 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 2653 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 2654 NFSUNLOCKCLSTATE(); 2655 NFSCL_DEBUG(3, "do layoutcommit\n"); 2656 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, 2657 cred, p); 2658 NFSLOCKCLSTATE(); 2659 goto tryagain2; 2660 } 2661 } 2662 } 2663 2664 /* Now, look for stale layouts. */ 2665 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); 2666 while (lyp != NULL) { 2667 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); 2668 if (lyp->nfsly_timestamp < NFSD_MONOSEC && 2669 (lyp->nfsly_flags & NFSLY_RECALL) == 0 && 2670 lyp->nfsly_lock.nfslock_usecnt == 0 && 2671 lyp->nfsly_lock.nfslock_lock == 0) { 2672 NFSCL_DEBUG(4, "ret stale lay=%d\n", 2673 nfscl_layoutcnt); 2674 recallp = malloc(sizeof(*recallp), 2675 M_NFSLAYRECALL, M_NOWAIT); 2676 if (recallp == NULL) 2677 break; 2678 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, 2679 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, 2680 lyp->nfsly_stateid.seqid, recallp); 2681 } 2682 lyp = nlyp; 2683 } 2684 2685 /* 2686 * Free up any unreferenced device info structures. 2687 */ 2688 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { 2689 if (dip->nfsdi_layoutrefs == 0 && 2690 dip->nfsdi_refcnt == 0) { 2691 NFSCL_DEBUG(4, "freeing devinfo\n"); 2692 LIST_REMOVE(dip, nfsdi_list); 2693 nfscl_freedevinfo(dip); 2694 } 2695 } 2696 NFSUNLOCKCLSTATE(); 2697 2698 /* Do layout return(s), as required. */ 2699 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { 2700 TAILQ_REMOVE(&rlh, lyp, nfsly_list); 2701 NFSCL_DEBUG(4, "ret layout\n"); 2702 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); 2703 nfscl_freelayout(lyp); 2704 } 2705 2706 /* 2707 * Delegreturn any delegations cleaned out or recalled. 2708 */ 2709 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 2710 newnfs_copycred(&dp->nfsdl_cred, cred); 2711 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 2712 TAILQ_REMOVE(&dh, dp, nfsdl_list); 2713 FREE((caddr_t)dp, M_NFSCLDELEG); 2714 } 2715 2716 SLIST_INIT(&lfh); 2717 /* 2718 * Call nfscl_cleanupkext() once per second to check for 2719 * open/lock owners where the process has exited. 2720 */ 2721 mytime = NFSD_MONOSEC; 2722 if (prevsec != mytime) { 2723 prevsec = mytime; 2724 nfscl_cleanupkext(clp, &lfh); 2725 } 2726 2727 /* 2728 * Do a ReleaseLockOwner for all lock owners where the 2729 * associated process no longer exists, as found by 2730 * nfscl_cleanupkext(). 2731 */ 2732 newnfs_setroot(cred); 2733 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { 2734 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, 2735 nlp) { 2736 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, 2737 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, 2738 p); 2739 nfscl_freelockowner(lp, 0); 2740 } 2741 free(lfhp, M_TEMP); 2742 } 2743 SLIST_INIT(&lfh); 2744 2745 NFSLOCKCLSTATE(); 2746 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 2747 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 2748 hz); 2749 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 2750 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 2751 NFSUNLOCKCLSTATE(); 2752 NFSFREECRED(cred); 2753 wakeup((caddr_t)clp); 2754 return; 2755 } 2756 NFSUNLOCKCLSTATE(); 2757 } 2758 } 2759 2760 /* 2761 * Initiate state recovery. Called when NFSERR_STALECLIENTID, 2762 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. 2763 */ 2764 APPLESTATIC void 2765 nfscl_initiate_recovery(struct nfsclclient *clp) 2766 { 2767 2768 if (clp == NULL) 2769 return; 2770 NFSLOCKCLSTATE(); 2771 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2772 NFSUNLOCKCLSTATE(); 2773 wakeup((caddr_t)clp); 2774 } 2775 2776 /* 2777 * Dump out the state stuff for debugging. 2778 */ 2779 APPLESTATIC void 2780 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 2781 int lockowner, int locks) 2782 { 2783 struct nfsclclient *clp; 2784 struct nfsclowner *owp; 2785 struct nfsclopen *op; 2786 struct nfscllockowner *lp; 2787 struct nfscllock *lop; 2788 struct nfscldeleg *dp; 2789 2790 clp = nmp->nm_clp; 2791 if (clp == NULL) { 2792 printf("nfscl dumpstate NULL clp\n"); 2793 return; 2794 } 2795 NFSLOCKCLSTATE(); 2796 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2797 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2798 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2799 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2800 owp->nfsow_owner[0], owp->nfsow_owner[1], 2801 owp->nfsow_owner[2], owp->nfsow_owner[3], 2802 owp->nfsow_seqid); 2803 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2804 if (opens) 2805 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2806 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2807 op->nfso_stateid.other[2], op->nfso_opencnt, 2808 op->nfso_fh[12]); 2809 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2810 if (lockowner) 2811 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2812 lp->nfsl_owner[0], lp->nfsl_owner[1], 2813 lp->nfsl_owner[2], lp->nfsl_owner[3], 2814 lp->nfsl_seqid, 2815 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2816 lp->nfsl_stateid.other[2]); 2817 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2818 if (locks) 2819 #ifdef __FreeBSD__ 2820 printf("lck typ=%d fst=%ju end=%ju\n", 2821 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2822 (intmax_t)lop->nfslo_end); 2823 #else 2824 printf("lck typ=%d fst=%qd end=%qd\n", 2825 lop->nfslo_type, lop->nfslo_first, 2826 lop->nfslo_end); 2827 #endif 2828 } 2829 } 2830 } 2831 } 2832 } 2833 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2834 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2835 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2836 owp->nfsow_owner[0], owp->nfsow_owner[1], 2837 owp->nfsow_owner[2], owp->nfsow_owner[3], 2838 owp->nfsow_seqid); 2839 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2840 if (opens) 2841 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2842 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2843 op->nfso_stateid.other[2], op->nfso_opencnt, 2844 op->nfso_fh[12]); 2845 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2846 if (lockowner) 2847 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2848 lp->nfsl_owner[0], lp->nfsl_owner[1], 2849 lp->nfsl_owner[2], lp->nfsl_owner[3], 2850 lp->nfsl_seqid, 2851 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2852 lp->nfsl_stateid.other[2]); 2853 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2854 if (locks) 2855 #ifdef __FreeBSD__ 2856 printf("lck typ=%d fst=%ju end=%ju\n", 2857 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2858 (intmax_t)lop->nfslo_end); 2859 #else 2860 printf("lck typ=%d fst=%qd end=%qd\n", 2861 lop->nfslo_type, lop->nfslo_first, 2862 lop->nfslo_end); 2863 #endif 2864 } 2865 } 2866 } 2867 } 2868 NFSUNLOCKCLSTATE(); 2869 } 2870 2871 /* 2872 * Check for duplicate open owners and opens. 2873 * (Only used as a diagnostic aid.) 2874 */ 2875 APPLESTATIC void 2876 nfscl_dupopen(vnode_t vp, int dupopens) 2877 { 2878 struct nfsclclient *clp; 2879 struct nfsclowner *owp, *owp2; 2880 struct nfsclopen *op, *op2; 2881 struct nfsfh *nfhp; 2882 2883 clp = VFSTONFS(vnode_mount(vp))->nm_clp; 2884 if (clp == NULL) { 2885 printf("nfscl dupopen NULL clp\n"); 2886 return; 2887 } 2888 nfhp = VTONFS(vp)->n_fhp; 2889 NFSLOCKCLSTATE(); 2890 2891 /* 2892 * First, search for duplicate owners. 2893 * These should never happen! 2894 */ 2895 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2896 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2897 if (owp != owp2 && 2898 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 2899 NFSV4CL_LOCKNAMELEN)) { 2900 NFSUNLOCKCLSTATE(); 2901 printf("DUP OWNER\n"); 2902 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); 2903 return; 2904 } 2905 } 2906 } 2907 2908 /* 2909 * Now, search for duplicate stateids. 2910 * These shouldn't happen, either. 2911 */ 2912 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2913 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2914 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2915 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2916 if (op != op2 && 2917 (op->nfso_stateid.other[0] != 0 || 2918 op->nfso_stateid.other[1] != 0 || 2919 op->nfso_stateid.other[2] != 0) && 2920 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 2921 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 2922 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 2923 NFSUNLOCKCLSTATE(); 2924 printf("DUP STATEID\n"); 2925 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 2926 0); 2927 return; 2928 } 2929 } 2930 } 2931 } 2932 } 2933 2934 /* 2935 * Now search for duplicate opens. 2936 * Duplicate opens for the same owner 2937 * should never occur. Other duplicates are 2938 * possible and are checked for if "dupopens" 2939 * is true. 2940 */ 2941 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2942 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2943 if (nfhp->nfh_len == op2->nfso_fhlen && 2944 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 2945 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2946 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2947 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 2948 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 2949 (!NFSBCMP(op->nfso_own->nfsow_owner, 2950 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 2951 dupopens)) { 2952 if (!NFSBCMP(op->nfso_own->nfsow_owner, 2953 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 2954 NFSUNLOCKCLSTATE(); 2955 printf("BADDUP OPEN\n"); 2956 } else { 2957 NFSUNLOCKCLSTATE(); 2958 printf("DUP OPEN\n"); 2959 } 2960 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 2961 0, 0); 2962 return; 2963 } 2964 } 2965 } 2966 } 2967 } 2968 } 2969 NFSUNLOCKCLSTATE(); 2970 } 2971 2972 /* 2973 * During close, find an open that needs to be dereferenced and 2974 * dereference it. If there are no more opens for this file, 2975 * log a message to that effect. 2976 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 2977 * on the file's vnode. 2978 * This is the safe way, since it is difficult to identify 2979 * which open the close is for and I/O can be performed after the 2980 * close(2) system call when a file is mmap'd. 2981 * If it returns 0 for success, there will be a referenced 2982 * clp returned via clpp. 2983 */ 2984 APPLESTATIC int 2985 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 2986 { 2987 struct nfsclclient *clp; 2988 struct nfsclowner *owp; 2989 struct nfsclopen *op; 2990 struct nfscldeleg *dp; 2991 struct nfsfh *nfhp; 2992 int error, notdecr; 2993 2994 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 2995 if (error) 2996 return (error); 2997 *clpp = clp; 2998 2999 nfhp = VTONFS(vp)->n_fhp; 3000 notdecr = 1; 3001 NFSLOCKCLSTATE(); 3002 /* 3003 * First, look for one under a delegation that was locally issued 3004 * and just decrement the opencnt for it. Since all my Opens against 3005 * the server are DENY_NONE, I don't see a problem with hanging 3006 * onto them. (It is much easier to use one of the extant Opens 3007 * that I already have on the server when a Delegation is recalled 3008 * than to do fresh Opens.) Someday, I might need to rethink this, but. 3009 */ 3010 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3011 if (dp != NULL) { 3012 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3013 op = LIST_FIRST(&owp->nfsow_open); 3014 if (op != NULL) { 3015 /* 3016 * Since a delegation is for a file, there 3017 * should never be more than one open for 3018 * each openowner. 3019 */ 3020 if (LIST_NEXT(op, nfso_list) != NULL) 3021 panic("nfscdeleg opens"); 3022 if (notdecr && op->nfso_opencnt > 0) { 3023 notdecr = 0; 3024 op->nfso_opencnt--; 3025 break; 3026 } 3027 } 3028 } 3029 } 3030 3031 /* Now process the opens against the server. */ 3032 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3033 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3034 if (op->nfso_fhlen == nfhp->nfh_len && 3035 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3036 nfhp->nfh_len)) { 3037 /* Found an open, decrement cnt if possible */ 3038 if (notdecr && op->nfso_opencnt > 0) { 3039 notdecr = 0; 3040 op->nfso_opencnt--; 3041 } 3042 /* 3043 * There are more opens, so just return. 3044 */ 3045 if (op->nfso_opencnt > 0) { 3046 NFSUNLOCKCLSTATE(); 3047 return (0); 3048 } 3049 } 3050 } 3051 } 3052 NFSUNLOCKCLSTATE(); 3053 if (notdecr) 3054 printf("nfscl: never fnd open\n"); 3055 return (0); 3056 } 3057 3058 APPLESTATIC int 3059 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 3060 { 3061 struct nfsclclient *clp; 3062 struct nfsclowner *owp, *nowp; 3063 struct nfsclopen *op; 3064 struct nfscldeleg *dp; 3065 struct nfsfh *nfhp; 3066 int error; 3067 3068 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 3069 if (error) 3070 return (error); 3071 *clpp = clp; 3072 3073 nfhp = VTONFS(vp)->n_fhp; 3074 NFSLOCKCLSTATE(); 3075 /* 3076 * First get rid of the local Open structures, which should be no 3077 * longer in use. 3078 */ 3079 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3080 if (dp != NULL) { 3081 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 3082 op = LIST_FIRST(&owp->nfsow_open); 3083 if (op != NULL) { 3084 KASSERT((op->nfso_opencnt == 0), 3085 ("nfscl: bad open cnt on deleg")); 3086 nfscl_freeopen(op, 1); 3087 } 3088 nfscl_freeopenowner(owp, 1); 3089 } 3090 } 3091 3092 /* Return any layouts marked return on close. */ 3093 nfscl_retoncloselayout(clp, nfhp->nfh_fh, nfhp->nfh_len); 3094 3095 /* Now process the opens against the server. */ 3096 lookformore: 3097 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3098 op = LIST_FIRST(&owp->nfsow_open); 3099 while (op != NULL) { 3100 if (op->nfso_fhlen == nfhp->nfh_len && 3101 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3102 nfhp->nfh_len)) { 3103 /* Found an open, close it. */ 3104 KASSERT((op->nfso_opencnt == 0), 3105 ("nfscl: bad open cnt on server")); 3106 NFSUNLOCKCLSTATE(); 3107 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op, 3108 p); 3109 NFSLOCKCLSTATE(); 3110 goto lookformore; 3111 } 3112 op = LIST_NEXT(op, nfso_list); 3113 } 3114 } 3115 NFSUNLOCKCLSTATE(); 3116 return (0); 3117 } 3118 3119 /* 3120 * Return all delegations on this client. 3121 * (Must be called with client sleep lock.) 3122 */ 3123 static void 3124 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) 3125 { 3126 struct nfscldeleg *dp, *ndp; 3127 struct ucred *cred; 3128 3129 cred = newnfs_getcred(); 3130 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3131 nfscl_cleandeleg(dp); 3132 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3133 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3134 } 3135 NFSFREECRED(cred); 3136 } 3137 3138 /* 3139 * Do a callback RPC. 3140 */ 3141 APPLESTATIC void 3142 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3143 { 3144 int clist, gotseq_ok, i, j, k, op, rcalls; 3145 u_int32_t *tl; 3146 struct nfsclclient *clp; 3147 struct nfscldeleg *dp = NULL; 3148 int numops, taglen = -1, error = 0, trunc; 3149 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident; 3150 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3151 vnode_t vp = NULL; 3152 struct nfsnode *np; 3153 struct vattr va; 3154 struct nfsfh *nfhp; 3155 mount_t mp; 3156 nfsattrbit_t attrbits, rattrbits; 3157 nfsv4stateid_t stateid; 3158 uint32_t seqid, slotid = 0, highslot, cachethis; 3159 uint8_t sessionid[NFSX_V4SESSIONID]; 3160 struct mbuf *rep; 3161 struct nfscllayout *lyp; 3162 uint64_t filesid[2], len, off; 3163 int changed, gotone, laytype, recalltype; 3164 uint32_t iomode; 3165 struct nfsclrecalllayout *recallp = NULL; 3166 3167 gotseq_ok = 0; 3168 nfsrvd_rephead(nd); 3169 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3170 taglen = fxdr_unsigned(int, *tl); 3171 if (taglen < 0) { 3172 error = EBADRPC; 3173 goto nfsmout; 3174 } 3175 if (taglen <= NFSV4_SMALLSTR) 3176 tagstr = tag; 3177 else 3178 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3179 error = nfsrv_mtostr(nd, tagstr, taglen); 3180 if (error) { 3181 if (taglen > NFSV4_SMALLSTR) 3182 free(tagstr, M_TEMP); 3183 taglen = -1; 3184 goto nfsmout; 3185 } 3186 (void) nfsm_strtom(nd, tag, taglen); 3187 if (taglen > NFSV4_SMALLSTR) { 3188 free(tagstr, M_TEMP); 3189 } 3190 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3191 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3192 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3193 if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION) 3194 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3195 cbident = fxdr_unsigned(u_int32_t, *tl++); 3196 if (nd->nd_repstat) 3197 numops = 0; 3198 else 3199 numops = fxdr_unsigned(int, *tl); 3200 /* 3201 * Loop around doing the sub ops. 3202 */ 3203 for (i = 0; i < numops; i++) { 3204 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3205 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3206 *repp++ = *tl; 3207 op = fxdr_unsigned(int, *tl); 3208 if (op < NFSV4OP_CBGETATTR || 3209 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || 3210 (op > NFSV4OP_CBNOTIFYDEVID && 3211 minorvers == NFSV41_MINORVERSION)) { 3212 nd->nd_repstat = NFSERR_OPILLEGAL; 3213 *repp = nfscl_errmap(nd, minorvers); 3214 retops++; 3215 break; 3216 } 3217 nd->nd_procnum = op; 3218 if (op < NFSV4OP_CBNOPS) 3219 newnfsstats.cbrpccnt[nd->nd_procnum]++; 3220 switch (op) { 3221 case NFSV4OP_CBGETATTR: 3222 NFSCL_DEBUG(4, "cbgetattr\n"); 3223 mp = NULL; 3224 vp = NULL; 3225 error = nfsm_getfh(nd, &nfhp); 3226 if (!error) 3227 error = nfsrv_getattrbits(nd, &attrbits, 3228 NULL, NULL); 3229 if (error == 0 && i == 0 && 3230 minorvers != NFSV4_MINORVERSION) 3231 error = NFSERR_OPNOTINSESS; 3232 if (!error) { 3233 mp = nfscl_getmnt(minorvers, sessionid, cbident, 3234 &clp); 3235 if (mp == NULL) 3236 error = NFSERR_SERVERFAULT; 3237 } 3238 if (!error) { 3239 error = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3240 nfhp->nfh_len, p, &np); 3241 if (!error) 3242 vp = NFSTOV(np); 3243 } 3244 if (!error) { 3245 NFSZERO_ATTRBIT(&rattrbits); 3246 NFSLOCKCLSTATE(); 3247 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3248 nfhp->nfh_len); 3249 if (dp != NULL) { 3250 if (NFSISSET_ATTRBIT(&attrbits, 3251 NFSATTRBIT_SIZE)) { 3252 if (vp != NULL) 3253 va.va_size = np->n_size; 3254 else 3255 va.va_size = 3256 dp->nfsdl_size; 3257 NFSSETBIT_ATTRBIT(&rattrbits, 3258 NFSATTRBIT_SIZE); 3259 } 3260 if (NFSISSET_ATTRBIT(&attrbits, 3261 NFSATTRBIT_CHANGE)) { 3262 va.va_filerev = 3263 dp->nfsdl_change; 3264 if (vp == NULL || 3265 (np->n_flag & NDELEGMOD)) 3266 va.va_filerev++; 3267 NFSSETBIT_ATTRBIT(&rattrbits, 3268 NFSATTRBIT_CHANGE); 3269 } 3270 } else 3271 error = NFSERR_SERVERFAULT; 3272 NFSUNLOCKCLSTATE(); 3273 } 3274 if (vp != NULL) 3275 vrele(vp); 3276 if (mp != NULL) 3277 vfs_unbusy(mp); 3278 if (nfhp != NULL) 3279 FREE((caddr_t)nfhp, M_NFSFH); 3280 if (!error) 3281 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3282 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0, 3283 (uint64_t)0); 3284 break; 3285 case NFSV4OP_CBRECALL: 3286 NFSCL_DEBUG(4, "cbrecall\n"); 3287 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3288 NFSX_UNSIGNED); 3289 stateid.seqid = *tl++; 3290 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3291 NFSX_STATEIDOTHER); 3292 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3293 trunc = fxdr_unsigned(int, *tl); 3294 error = nfsm_getfh(nd, &nfhp); 3295 if (error == 0 && i == 0 && 3296 minorvers != NFSV4_MINORVERSION) 3297 error = NFSERR_OPNOTINSESS; 3298 if (!error) { 3299 NFSLOCKCLSTATE(); 3300 if (minorvers == NFSV4_MINORVERSION) 3301 clp = nfscl_getclnt(cbident); 3302 else 3303 clp = nfscl_getclntsess(sessionid); 3304 if (clp != NULL) { 3305 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3306 nfhp->nfh_len); 3307 if (dp != NULL && (dp->nfsdl_flags & 3308 NFSCLDL_DELEGRET) == 0) { 3309 dp->nfsdl_flags |= 3310 NFSCLDL_RECALL; 3311 wakeup((caddr_t)clp); 3312 } 3313 } else { 3314 error = NFSERR_SERVERFAULT; 3315 } 3316 NFSUNLOCKCLSTATE(); 3317 } 3318 if (nfhp != NULL) 3319 FREE((caddr_t)nfhp, M_NFSFH); 3320 break; 3321 case NFSV4OP_CBLAYOUTRECALL: 3322 NFSCL_DEBUG(4, "cblayrec\n"); 3323 nfhp = NULL; 3324 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); 3325 laytype = fxdr_unsigned(int, *tl++); 3326 iomode = fxdr_unsigned(uint32_t, *tl++); 3327 if (newnfs_true == *tl++) 3328 changed = 1; 3329 else 3330 changed = 0; 3331 recalltype = fxdr_unsigned(int, *tl); 3332 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, 3333 M_WAITOK); 3334 if (laytype != NFSLAYOUT_NFSV4_1_FILES) 3335 error = NFSERR_NOMATCHLAYOUT; 3336 else if (recalltype == NFSLAYOUTRETURN_FILE) { 3337 error = nfsm_getfh(nd, &nfhp); 3338 NFSCL_DEBUG(4, "retfile getfh=%d\n", error); 3339 if (error != 0) 3340 goto nfsmout; 3341 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + 3342 NFSX_STATEID); 3343 off = fxdr_hyper(tl); tl += 2; 3344 len = fxdr_hyper(tl); tl += 2; 3345 stateid.seqid = fxdr_unsigned(uint32_t, *tl++); 3346 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); 3347 if (minorvers == NFSV4_MINORVERSION) 3348 error = NFSERR_NOTSUPP; 3349 else if (i == 0) 3350 error = NFSERR_OPNOTINSESS; 3351 if (error == 0) { 3352 NFSLOCKCLSTATE(); 3353 clp = nfscl_getclntsess(sessionid); 3354 NFSCL_DEBUG(4, "cbly clp=%p\n", clp); 3355 if (clp != NULL) { 3356 lyp = nfscl_findlayout(clp, 3357 nfhp->nfh_fh, 3358 nfhp->nfh_len); 3359 NFSCL_DEBUG(4, "cblyp=%p\n", 3360 lyp); 3361 if (lyp != NULL && 3362 (lyp->nfsly_flags & 3363 NFSLY_FILES) != 0 && 3364 !NFSBCMP(stateid.other, 3365 lyp->nfsly_stateid.other, 3366 NFSX_STATEIDOTHER)) { 3367 error = 3368 nfscl_layoutrecall( 3369 recalltype, 3370 lyp, iomode, off, 3371 len, stateid.seqid, 3372 recallp); 3373 recallp = NULL; 3374 wakeup(clp); 3375 NFSCL_DEBUG(4, 3376 "aft layrcal=%d\n", 3377 error); 3378 } else 3379 error = 3380 NFSERR_NOMATCHLAYOUT; 3381 } else 3382 error = NFSERR_NOMATCHLAYOUT; 3383 NFSUNLOCKCLSTATE(); 3384 } 3385 free(nfhp, M_NFSFH); 3386 } else if (recalltype == NFSLAYOUTRETURN_FSID) { 3387 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); 3388 filesid[0] = fxdr_hyper(tl); tl += 2; 3389 filesid[1] = fxdr_hyper(tl); tl += 2; 3390 gotone = 0; 3391 NFSLOCKCLSTATE(); 3392 clp = nfscl_getclntsess(sessionid); 3393 if (clp != NULL) { 3394 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3395 nfsly_list) { 3396 if (lyp->nfsly_filesid[0] == 3397 filesid[0] && 3398 lyp->nfsly_filesid[1] == 3399 filesid[1]) { 3400 error = 3401 nfscl_layoutrecall( 3402 recalltype, 3403 lyp, iomode, 0, 3404 UINT64_MAX, 3405 lyp->nfsly_stateid.seqid, 3406 recallp); 3407 recallp = NULL; 3408 gotone = 1; 3409 } 3410 } 3411 if (gotone != 0) 3412 wakeup(clp); 3413 else 3414 error = NFSERR_NOMATCHLAYOUT; 3415 } else 3416 error = NFSERR_NOMATCHLAYOUT; 3417 NFSUNLOCKCLSTATE(); 3418 } else if (recalltype == NFSLAYOUTRETURN_ALL) { 3419 gotone = 0; 3420 NFSLOCKCLSTATE(); 3421 clp = nfscl_getclntsess(sessionid); 3422 if (clp != NULL) { 3423 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3424 nfsly_list) { 3425 error = nfscl_layoutrecall( 3426 recalltype, lyp, iomode, 0, 3427 UINT64_MAX, 3428 lyp->nfsly_stateid.seqid, 3429 recallp); 3430 recallp = NULL; 3431 gotone = 1; 3432 } 3433 if (gotone != 0) 3434 wakeup(clp); 3435 else 3436 error = NFSERR_NOMATCHLAYOUT; 3437 } else 3438 error = NFSERR_NOMATCHLAYOUT; 3439 NFSUNLOCKCLSTATE(); 3440 } else 3441 error = NFSERR_NOMATCHLAYOUT; 3442 if (recallp != NULL) { 3443 free(recallp, M_NFSLAYRECALL); 3444 recallp = NULL; 3445 } 3446 break; 3447 case NFSV4OP_CBSEQUENCE: 3448 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3449 5 * NFSX_UNSIGNED); 3450 bcopy(tl, sessionid, NFSX_V4SESSIONID); 3451 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3452 seqid = fxdr_unsigned(uint32_t, *tl++); 3453 slotid = fxdr_unsigned(uint32_t, *tl++); 3454 highslot = fxdr_unsigned(uint32_t, *tl++); 3455 cachethis = *tl++; 3456 /* Throw away the referring call stuff. */ 3457 clist = fxdr_unsigned(int, *tl); 3458 for (j = 0; j < clist; j++) { 3459 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3460 NFSX_UNSIGNED); 3461 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3462 rcalls = fxdr_unsigned(int, *tl); 3463 for (k = 0; k < rcalls; k++) { 3464 NFSM_DISSECT(tl, uint32_t *, 3465 2 * NFSX_UNSIGNED); 3466 } 3467 } 3468 NFSLOCKCLSTATE(); 3469 if (i == 0) { 3470 clp = nfscl_getclntsess(sessionid); 3471 if (clp == NULL) 3472 error = NFSERR_SERVERFAULT; 3473 } else 3474 error = NFSERR_SEQUENCEPOS; 3475 if (error == 0) 3476 error = nfsv4_seqsession(seqid, slotid, 3477 highslot, 3478 NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3479 nfsess_cbslots, &rep, 3480 NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3481 nfsess_backslots); 3482 NFSUNLOCKCLSTATE(); 3483 if (error == 0) { 3484 gotseq_ok = 1; 3485 if (rep != NULL) { 3486 NFSCL_DEBUG(4, "Got cbretry\n"); 3487 m_freem(nd->nd_mreq); 3488 nd->nd_mreq = rep; 3489 rep = NULL; 3490 goto out; 3491 } 3492 NFSM_BUILD(tl, uint32_t *, 3493 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); 3494 bcopy(sessionid, tl, NFSX_V4SESSIONID); 3495 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3496 *tl++ = txdr_unsigned(seqid); 3497 *tl++ = txdr_unsigned(slotid); 3498 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); 3499 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); 3500 } 3501 break; 3502 default: 3503 if (i == 0 && minorvers == NFSV41_MINORVERSION) 3504 error = NFSERR_OPNOTINSESS; 3505 else { 3506 NFSCL_DEBUG(1, "unsupp callback %d\n", op); 3507 error = NFSERR_NOTSUPP; 3508 } 3509 break; 3510 }; 3511 if (error) { 3512 if (error == EBADRPC || error == NFSERR_BADXDR) { 3513 nd->nd_repstat = NFSERR_BADXDR; 3514 } else { 3515 nd->nd_repstat = error; 3516 } 3517 error = 0; 3518 } 3519 retops++; 3520 if (nd->nd_repstat) { 3521 *repp = nfscl_errmap(nd, minorvers); 3522 break; 3523 } else 3524 *repp = 0; /* NFS4_OK */ 3525 } 3526 nfsmout: 3527 if (recallp != NULL) 3528 free(recallp, M_NFSLAYRECALL); 3529 if (error) { 3530 if (error == EBADRPC || error == NFSERR_BADXDR) 3531 nd->nd_repstat = NFSERR_BADXDR; 3532 else 3533 printf("nfsv4 comperr1=%d\n", error); 3534 } 3535 if (taglen == -1) { 3536 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3537 *tl++ = 0; 3538 *tl = 0; 3539 } else { 3540 *retopsp = txdr_unsigned(retops); 3541 } 3542 *nd->nd_errp = nfscl_errmap(nd, minorvers); 3543 out: 3544 if (gotseq_ok != 0) { 3545 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); 3546 NFSLOCKCLSTATE(); 3547 clp = nfscl_getclntsess(sessionid); 3548 if (clp != NULL) { 3549 nfsv4_seqsess_cacherep(slotid, 3550 NFSMNT_MDSSESSION(clp->nfsc_nmp)->nfsess_cbslots, 3551 rep); 3552 NFSUNLOCKCLSTATE(); 3553 } else { 3554 NFSUNLOCKCLSTATE(); 3555 m_freem(rep); 3556 } 3557 } 3558 } 3559 3560 /* 3561 * Generate the next cbident value. Basically just increment a static value 3562 * and then check that it isn't already in the list, if it has wrapped around. 3563 */ 3564 static u_int32_t 3565 nfscl_nextcbident(void) 3566 { 3567 struct nfsclclient *clp; 3568 int matched; 3569 static u_int32_t nextcbident = 0; 3570 static int haswrapped = 0; 3571 3572 nextcbident++; 3573 if (nextcbident == 0) 3574 haswrapped = 1; 3575 if (haswrapped) { 3576 /* 3577 * Search the clientid list for one already using this cbident. 3578 */ 3579 do { 3580 matched = 0; 3581 NFSLOCKCLSTATE(); 3582 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3583 if (clp->nfsc_cbident == nextcbident) { 3584 matched = 1; 3585 break; 3586 } 3587 } 3588 NFSUNLOCKCLSTATE(); 3589 if (matched == 1) 3590 nextcbident++; 3591 } while (matched); 3592 } 3593 return (nextcbident); 3594 } 3595 3596 /* 3597 * Get the mount point related to a given cbident or session and busy it. 3598 */ 3599 static mount_t 3600 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, 3601 struct nfsclclient **clpp) 3602 { 3603 struct nfsclclient *clp; 3604 mount_t mp; 3605 int error; 3606 3607 *clpp = NULL; 3608 NFSLOCKCLSTATE(); 3609 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3610 if (minorvers == NFSV4_MINORVERSION) { 3611 if (clp->nfsc_cbident == cbident) 3612 break; 3613 } else if (!NFSBCMP(NFSMNT_MDSSESSION(clp->nfsc_nmp)-> 3614 nfsess_sessionid, sessionid, NFSX_V4SESSIONID)) 3615 break; 3616 } 3617 if (clp == NULL) { 3618 NFSUNLOCKCLSTATE(); 3619 return (NULL); 3620 } 3621 mp = clp->nfsc_nmp->nm_mountp; 3622 vfs_ref(mp); 3623 NFSUNLOCKCLSTATE(); 3624 error = vfs_busy(mp, 0); 3625 vfs_rel(mp); 3626 if (error != 0) 3627 return (NULL); 3628 *clpp = clp; 3629 return (mp); 3630 } 3631 3632 /* 3633 * Get the clientid pointer related to a given cbident. 3634 */ 3635 static struct nfsclclient * 3636 nfscl_getclnt(u_int32_t cbident) 3637 { 3638 struct nfsclclient *clp; 3639 3640 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3641 if (clp->nfsc_cbident == cbident) 3642 break; 3643 return (clp); 3644 } 3645 3646 /* 3647 * Get the clientid pointer related to a given sessionid. 3648 */ 3649 static struct nfsclclient * 3650 nfscl_getclntsess(uint8_t *sessionid) 3651 { 3652 struct nfsclclient *clp; 3653 3654 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3655 if (!NFSBCMP(NFSMNT_MDSSESSION(clp->nfsc_nmp)->nfsess_sessionid, 3656 sessionid, NFSX_V4SESSIONID)) 3657 break; 3658 return (clp); 3659 } 3660 3661 /* 3662 * Search for a lock conflict locally on the client. A conflict occurs if 3663 * - not same owner and overlapping byte range and at least one of them is 3664 * a write lock or this is an unlock. 3665 */ 3666 static int 3667 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 3668 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 3669 struct nfscllock **lopp) 3670 { 3671 struct nfsclowner *owp; 3672 struct nfsclopen *op; 3673 int ret; 3674 3675 if (dp != NULL) { 3676 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 3677 if (ret) 3678 return (ret); 3679 } 3680 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3681 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3682 if (op->nfso_fhlen == fhlen && 3683 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 3684 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 3685 own, lopp); 3686 if (ret) 3687 return (ret); 3688 } 3689 } 3690 } 3691 return (0); 3692 } 3693 3694 static int 3695 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 3696 u_int8_t *own, struct nfscllock **lopp) 3697 { 3698 struct nfscllockowner *lp; 3699 struct nfscllock *lop; 3700 3701 LIST_FOREACH(lp, lhp, nfsl_list) { 3702 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 3703 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3704 if (lop->nfslo_first >= nlop->nfslo_end) 3705 break; 3706 if (lop->nfslo_end <= nlop->nfslo_first) 3707 continue; 3708 if (lop->nfslo_type == F_WRLCK || 3709 nlop->nfslo_type == F_WRLCK || 3710 nlop->nfslo_type == F_UNLCK) { 3711 if (lopp != NULL) 3712 *lopp = lop; 3713 return (NFSERR_DENIED); 3714 } 3715 } 3716 } 3717 } 3718 return (0); 3719 } 3720 3721 /* 3722 * Check for a local conflicting lock. 3723 */ 3724 APPLESTATIC int 3725 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 3726 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) 3727 { 3728 struct nfscllock *lop, nlck; 3729 struct nfscldeleg *dp; 3730 struct nfsnode *np; 3731 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 3732 int error; 3733 3734 nlck.nfslo_type = fl->l_type; 3735 nlck.nfslo_first = off; 3736 if (len == NFS64BITSSET) { 3737 nlck.nfslo_end = NFS64BITSSET; 3738 } else { 3739 nlck.nfslo_end = off + len; 3740 if (nlck.nfslo_end <= nlck.nfslo_first) 3741 return (NFSERR_INVAL); 3742 } 3743 np = VTONFS(vp); 3744 nfscl_filllockowner(id, own, flags); 3745 NFSLOCKCLSTATE(); 3746 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3747 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 3748 &nlck, own, dp, &lop); 3749 if (error != 0) { 3750 fl->l_whence = SEEK_SET; 3751 fl->l_start = lop->nfslo_first; 3752 if (lop->nfslo_end == NFS64BITSSET) 3753 fl->l_len = 0; 3754 else 3755 fl->l_len = lop->nfslo_end - lop->nfslo_first; 3756 fl->l_pid = (pid_t)0; 3757 fl->l_type = lop->nfslo_type; 3758 error = -1; /* no RPC required */ 3759 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 3760 fl->l_type == F_RDLCK)) { 3761 /* 3762 * The delegation ensures that there isn't a conflicting 3763 * lock on the server, so return -1 to indicate an RPC 3764 * isn't required. 3765 */ 3766 fl->l_type = F_UNLCK; 3767 error = -1; 3768 } 3769 NFSUNLOCKCLSTATE(); 3770 return (error); 3771 } 3772 3773 /* 3774 * Handle Recall of a delegation. 3775 * The clp must be exclusive locked when this is called. 3776 */ 3777 static int 3778 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 3779 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 3780 int called_from_renewthread) 3781 { 3782 struct nfsclowner *owp, *lowp, *nowp; 3783 struct nfsclopen *op, *lop; 3784 struct nfscllockowner *lp; 3785 struct nfscllock *lckp; 3786 struct nfsnode *np; 3787 int error = 0, ret, gotvp = 0; 3788 3789 if (vp == NULL) { 3790 /* 3791 * First, get a vnode for the file. This is needed to do RPCs. 3792 */ 3793 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 3794 dp->nfsdl_fhlen, p, &np); 3795 if (ret) { 3796 /* 3797 * File isn't open, so nothing to move over to the 3798 * server. 3799 */ 3800 return (0); 3801 } 3802 vp = NFSTOV(np); 3803 gotvp = 1; 3804 } else { 3805 np = VTONFS(vp); 3806 } 3807 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 3808 3809 /* 3810 * Ok, if it's a write delegation, flush data to the server, so 3811 * that close/open consistency is retained. 3812 */ 3813 ret = 0; 3814 NFSLOCKNODE(np); 3815 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 3816 np->n_flag |= NDELEGRECALL; 3817 NFSUNLOCKNODE(np); 3818 ret = ncl_flush(vp, MNT_WAIT, cred, p, 1, 3819 called_from_renewthread); 3820 NFSLOCKNODE(np); 3821 np->n_flag &= ~NDELEGRECALL; 3822 } 3823 NFSINVALATTRCACHE(np); 3824 NFSUNLOCKNODE(np); 3825 if (ret == EIO && called_from_renewthread != 0) { 3826 /* 3827 * If the flush failed with EIO for the renew thread, 3828 * return now, so that the dirty buffer will be flushed 3829 * later. 3830 */ 3831 if (gotvp != 0) 3832 vrele(vp); 3833 return (ret); 3834 } 3835 3836 /* 3837 * Now, for each openowner with opens issued locally, move them 3838 * over to state against the server. 3839 */ 3840 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 3841 lop = LIST_FIRST(&lowp->nfsow_open); 3842 if (lop != NULL) { 3843 if (LIST_NEXT(lop, nfso_list) != NULL) 3844 panic("nfsdlg mult opens"); 3845 /* 3846 * Look for the same openowner against the server. 3847 */ 3848 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3849 if (!NFSBCMP(lowp->nfsow_owner, 3850 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3851 newnfs_copycred(&dp->nfsdl_cred, cred); 3852 ret = nfscl_moveopen(vp, clp, nmp, lop, 3853 owp, dp, cred, p); 3854 if (ret == NFSERR_STALECLIENTID || 3855 ret == NFSERR_STALEDONTRECOVER || 3856 ret == NFSERR_BADSESSION) { 3857 if (gotvp) 3858 vrele(vp); 3859 return (ret); 3860 } 3861 if (ret) { 3862 nfscl_freeopen(lop, 1); 3863 if (!error) 3864 error = ret; 3865 } 3866 break; 3867 } 3868 } 3869 3870 /* 3871 * If no openowner found, create one and get an open 3872 * for it. 3873 */ 3874 if (owp == NULL) { 3875 MALLOC(nowp, struct nfsclowner *, 3876 sizeof (struct nfsclowner), M_NFSCLOWNER, 3877 M_WAITOK); 3878 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 3879 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 3880 dp->nfsdl_fhlen, NULL); 3881 newnfs_copycred(&dp->nfsdl_cred, cred); 3882 ret = nfscl_moveopen(vp, clp, nmp, lop, 3883 owp, dp, cred, p); 3884 if (ret) { 3885 nfscl_freeopenowner(owp, 0); 3886 if (ret == NFSERR_STALECLIENTID || 3887 ret == NFSERR_STALEDONTRECOVER || 3888 ret == NFSERR_BADSESSION) { 3889 if (gotvp) 3890 vrele(vp); 3891 return (ret); 3892 } 3893 if (ret) { 3894 nfscl_freeopen(lop, 1); 3895 if (!error) 3896 error = ret; 3897 } 3898 } 3899 } 3900 } 3901 } 3902 3903 /* 3904 * Now, get byte range locks for any locks done locally. 3905 */ 3906 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 3907 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 3908 newnfs_copycred(&dp->nfsdl_cred, cred); 3909 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 3910 if (ret == NFSERR_STALESTATEID || 3911 ret == NFSERR_STALEDONTRECOVER || 3912 ret == NFSERR_STALECLIENTID || 3913 ret == NFSERR_BADSESSION) { 3914 if (gotvp) 3915 vrele(vp); 3916 return (ret); 3917 } 3918 if (ret && !error) 3919 error = ret; 3920 } 3921 } 3922 if (gotvp) 3923 vrele(vp); 3924 return (error); 3925 } 3926 3927 /* 3928 * Move a locally issued open over to an owner on the state list. 3929 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 3930 * returns with it unlocked. 3931 */ 3932 static int 3933 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 3934 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 3935 struct ucred *cred, NFSPROC_T *p) 3936 { 3937 struct nfsclopen *op, *nop; 3938 struct nfscldeleg *ndp; 3939 struct nfsnode *np; 3940 int error = 0, newone; 3941 3942 /* 3943 * First, look for an appropriate open, If found, just increment the 3944 * opencnt in it. 3945 */ 3946 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3947 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 3948 op->nfso_fhlen == lop->nfso_fhlen && 3949 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 3950 op->nfso_opencnt += lop->nfso_opencnt; 3951 nfscl_freeopen(lop, 1); 3952 return (0); 3953 } 3954 } 3955 3956 /* No appropriate open, so we have to do one against the server. */ 3957 np = VTONFS(vp); 3958 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 3959 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 3960 newone = 0; 3961 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 3962 lop->nfso_fh, lop->nfso_fhlen, &newone); 3963 ndp = dp; 3964 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, 3965 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 3966 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 3967 if (error) { 3968 if (newone) 3969 nfscl_freeopen(op, 0); 3970 } else { 3971 if (newone) 3972 newnfs_copyincred(cred, &op->nfso_cred); 3973 op->nfso_mode |= lop->nfso_mode; 3974 op->nfso_opencnt += lop->nfso_opencnt; 3975 nfscl_freeopen(lop, 1); 3976 } 3977 if (nop != NULL) 3978 FREE((caddr_t)nop, M_NFSCLOPEN); 3979 if (ndp != NULL) { 3980 /* 3981 * What should I do with the returned delegation, since the 3982 * delegation is being recalled? For now, just printf and 3983 * through it away. 3984 */ 3985 printf("Moveopen returned deleg\n"); 3986 FREE((caddr_t)ndp, M_NFSCLDELEG); 3987 } 3988 return (error); 3989 } 3990 3991 /* 3992 * Recall all delegations on this client. 3993 */ 3994 static void 3995 nfscl_totalrecall(struct nfsclclient *clp) 3996 { 3997 struct nfscldeleg *dp; 3998 3999 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 4000 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 4001 dp->nfsdl_flags |= NFSCLDL_RECALL; 4002 } 4003 } 4004 4005 /* 4006 * Relock byte ranges. Called for delegation recall and state expiry. 4007 */ 4008 static int 4009 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4010 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 4011 NFSPROC_T *p) 4012 { 4013 struct nfscllockowner *nlp; 4014 struct nfsfh *nfhp; 4015 u_int64_t off, len; 4016 u_int32_t clidrev = 0; 4017 int error, newone, donelocally; 4018 4019 off = lop->nfslo_first; 4020 len = lop->nfslo_end - lop->nfslo_first; 4021 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 4022 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, 4023 lp->nfsl_openowner, &nlp, &newone, &donelocally); 4024 if (error || donelocally) 4025 return (error); 4026 if (nmp->nm_clp != NULL) 4027 clidrev = nmp->nm_clp->nfsc_clientidrev; 4028 else 4029 clidrev = 0; 4030 nfhp = VTONFS(vp)->n_fhp; 4031 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 4032 nfhp->nfh_len, nlp, newone, 0, off, 4033 len, lop->nfslo_type, cred, p); 4034 if (error) 4035 nfscl_freelockowner(nlp, 0); 4036 return (error); 4037 } 4038 4039 /* 4040 * Called to re-open a file. Basically get a vnode for the file handle 4041 * and then call nfsrpc_openrpc() to do the rest. 4042 */ 4043 static int 4044 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 4045 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 4046 struct ucred *cred, NFSPROC_T *p) 4047 { 4048 struct nfsnode *np; 4049 vnode_t vp; 4050 int error; 4051 4052 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 4053 if (error) 4054 return (error); 4055 vp = NFSTOV(np); 4056 if (np->n_v4 != NULL) { 4057 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4058 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 4059 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 4060 cred, p); 4061 } else { 4062 error = EINVAL; 4063 } 4064 vrele(vp); 4065 return (error); 4066 } 4067 4068 /* 4069 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 4070 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 4071 * fail. 4072 */ 4073 static int 4074 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4075 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 4076 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 4077 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 4078 { 4079 int error; 4080 4081 do { 4082 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 4083 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 4084 0, 0); 4085 if (error == NFSERR_DELAY) 4086 (void) nfs_catnap(PZERO, error, "nfstryop"); 4087 } while (error == NFSERR_DELAY); 4088 if (error == EAUTH || error == EACCES) { 4089 /* Try again using system credentials */ 4090 newnfs_setroot(cred); 4091 do { 4092 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 4093 newfhlen, mode, op, name, namelen, ndpp, reclaim, 4094 delegtype, cred, p, 1, 0); 4095 if (error == NFSERR_DELAY) 4096 (void) nfs_catnap(PZERO, error, "nfstryop"); 4097 } while (error == NFSERR_DELAY); 4098 } 4099 return (error); 4100 } 4101 4102 /* 4103 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 4104 * NFSERR_DELAY. Also, retry with system credentials, if the provided 4105 * cred don't work. 4106 */ 4107 static int 4108 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 4109 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 4110 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 4111 { 4112 struct nfsrv_descript nfsd, *nd = &nfsd; 4113 int error; 4114 4115 do { 4116 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 4117 reclaim, off, len, type, cred, p, 0); 4118 if (!error && nd->nd_repstat == NFSERR_DELAY) 4119 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4120 "nfstrylck"); 4121 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4122 if (!error) 4123 error = nd->nd_repstat; 4124 if (error == EAUTH || error == EACCES) { 4125 /* Try again using root credentials */ 4126 newnfs_setroot(cred); 4127 do { 4128 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 4129 newone, reclaim, off, len, type, cred, p, 1); 4130 if (!error && nd->nd_repstat == NFSERR_DELAY) 4131 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4132 "nfstrylck"); 4133 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4134 if (!error) 4135 error = nd->nd_repstat; 4136 } 4137 return (error); 4138 } 4139 4140 /* 4141 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 4142 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4143 * credentials fail. 4144 */ 4145 static int 4146 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 4147 struct nfsmount *nmp, NFSPROC_T *p) 4148 { 4149 int error; 4150 4151 do { 4152 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 4153 if (error == NFSERR_DELAY) 4154 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4155 } while (error == NFSERR_DELAY); 4156 if (error == EAUTH || error == EACCES) { 4157 /* Try again using system credentials */ 4158 newnfs_setroot(cred); 4159 do { 4160 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 4161 if (error == NFSERR_DELAY) 4162 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4163 } while (error == NFSERR_DELAY); 4164 } 4165 return (error); 4166 } 4167 4168 /* 4169 * Try a close against the server. Just call nfsrpc_closerpc(), 4170 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4171 * credentials fail. 4172 */ 4173 APPLESTATIC int 4174 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 4175 struct nfsmount *nmp, NFSPROC_T *p) 4176 { 4177 struct nfsrv_descript nfsd, *nd = &nfsd; 4178 int error; 4179 4180 do { 4181 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 4182 if (error == NFSERR_DELAY) 4183 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4184 } while (error == NFSERR_DELAY); 4185 if (error == EAUTH || error == EACCES) { 4186 /* Try again using system credentials */ 4187 newnfs_setroot(cred); 4188 do { 4189 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 4190 if (error == NFSERR_DELAY) 4191 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4192 } while (error == NFSERR_DELAY); 4193 } 4194 return (error); 4195 } 4196 4197 /* 4198 * Decide if a delegation on a file permits close without flushing writes 4199 * to the server. This might be a big performance win in some environments. 4200 * (Not useful until the client does caching on local stable storage.) 4201 */ 4202 APPLESTATIC int 4203 nfscl_mustflush(vnode_t vp) 4204 { 4205 struct nfsclclient *clp; 4206 struct nfscldeleg *dp; 4207 struct nfsnode *np; 4208 struct nfsmount *nmp; 4209 4210 np = VTONFS(vp); 4211 nmp = VFSTONFS(vnode_mount(vp)); 4212 if (!NFSHASNFSV4(nmp)) 4213 return (1); 4214 NFSLOCKCLSTATE(); 4215 clp = nfscl_findcl(nmp); 4216 if (clp == NULL) { 4217 NFSUNLOCKCLSTATE(); 4218 return (1); 4219 } 4220 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4221 if (dp != NULL && (dp->nfsdl_flags & 4222 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 4223 NFSCLDL_WRITE && 4224 (dp->nfsdl_sizelimit >= np->n_size || 4225 !NFSHASSTRICT3530(nmp))) { 4226 NFSUNLOCKCLSTATE(); 4227 return (0); 4228 } 4229 NFSUNLOCKCLSTATE(); 4230 return (1); 4231 } 4232 4233 /* 4234 * See if a (write) delegation exists for this file. 4235 */ 4236 APPLESTATIC int 4237 nfscl_nodeleg(vnode_t vp, int writedeleg) 4238 { 4239 struct nfsclclient *clp; 4240 struct nfscldeleg *dp; 4241 struct nfsnode *np; 4242 struct nfsmount *nmp; 4243 4244 np = VTONFS(vp); 4245 nmp = VFSTONFS(vnode_mount(vp)); 4246 if (!NFSHASNFSV4(nmp)) 4247 return (1); 4248 NFSLOCKCLSTATE(); 4249 clp = nfscl_findcl(nmp); 4250 if (clp == NULL) { 4251 NFSUNLOCKCLSTATE(); 4252 return (1); 4253 } 4254 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4255 if (dp != NULL && 4256 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 4257 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 4258 NFSCLDL_WRITE)) { 4259 NFSUNLOCKCLSTATE(); 4260 return (0); 4261 } 4262 NFSUNLOCKCLSTATE(); 4263 return (1); 4264 } 4265 4266 /* 4267 * Look for an associated delegation that should be DelegReturned. 4268 */ 4269 APPLESTATIC int 4270 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 4271 { 4272 struct nfsclclient *clp; 4273 struct nfscldeleg *dp; 4274 struct nfsclowner *owp; 4275 struct nfscllockowner *lp; 4276 struct nfsmount *nmp; 4277 struct ucred *cred; 4278 struct nfsnode *np; 4279 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4280 4281 nmp = VFSTONFS(vnode_mount(vp)); 4282 np = VTONFS(vp); 4283 NFSLOCKCLSTATE(); 4284 /* 4285 * Loop around waiting for: 4286 * - outstanding I/O operations on delegations to complete 4287 * - for a delegation on vp that has state, lock the client and 4288 * do a recall 4289 * - return delegation with no state 4290 */ 4291 while (1) { 4292 clp = nfscl_findcl(nmp); 4293 if (clp == NULL) { 4294 NFSUNLOCKCLSTATE(); 4295 return (retcnt); 4296 } 4297 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4298 np->n_fhp->nfh_len); 4299 if (dp != NULL) { 4300 /* 4301 * Wait for outstanding I/O ops to be done. 4302 */ 4303 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4304 if (igotlock) { 4305 nfsv4_unlock(&clp->nfsc_lock, 0); 4306 igotlock = 0; 4307 } 4308 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4309 (void) nfsmsleep(&dp->nfsdl_rwlock, 4310 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4311 continue; 4312 } 4313 needsrecall = 0; 4314 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4315 if (!LIST_EMPTY(&owp->nfsow_open)) { 4316 needsrecall = 1; 4317 break; 4318 } 4319 } 4320 if (!needsrecall) { 4321 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4322 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4323 needsrecall = 1; 4324 break; 4325 } 4326 } 4327 } 4328 if (needsrecall && !triedrecall) { 4329 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4330 islept = 0; 4331 while (!igotlock) { 4332 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4333 &islept, NFSCLSTATEMUTEXPTR, NULL); 4334 if (islept) 4335 break; 4336 } 4337 if (islept) 4338 continue; 4339 NFSUNLOCKCLSTATE(); 4340 cred = newnfs_getcred(); 4341 newnfs_copycred(&dp->nfsdl_cred, cred); 4342 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); 4343 NFSFREECRED(cred); 4344 triedrecall = 1; 4345 NFSLOCKCLSTATE(); 4346 nfsv4_unlock(&clp->nfsc_lock, 0); 4347 igotlock = 0; 4348 continue; 4349 } 4350 *stp = dp->nfsdl_stateid; 4351 retcnt = 1; 4352 nfscl_cleandeleg(dp); 4353 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4354 } 4355 if (igotlock) 4356 nfsv4_unlock(&clp->nfsc_lock, 0); 4357 NFSUNLOCKCLSTATE(); 4358 return (retcnt); 4359 } 4360 } 4361 4362 /* 4363 * Look for associated delegation(s) that should be DelegReturned. 4364 */ 4365 APPLESTATIC int 4366 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 4367 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 4368 { 4369 struct nfsclclient *clp; 4370 struct nfscldeleg *dp; 4371 struct nfsclowner *owp; 4372 struct nfscllockowner *lp; 4373 struct nfsmount *nmp; 4374 struct ucred *cred; 4375 struct nfsnode *np; 4376 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4377 4378 nmp = VFSTONFS(vnode_mount(fvp)); 4379 *gotfdp = 0; 4380 *gottdp = 0; 4381 NFSLOCKCLSTATE(); 4382 /* 4383 * Loop around waiting for: 4384 * - outstanding I/O operations on delegations to complete 4385 * - for a delegation on fvp that has state, lock the client and 4386 * do a recall 4387 * - return delegation(s) with no state. 4388 */ 4389 while (1) { 4390 clp = nfscl_findcl(nmp); 4391 if (clp == NULL) { 4392 NFSUNLOCKCLSTATE(); 4393 return (retcnt); 4394 } 4395 np = VTONFS(fvp); 4396 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4397 np->n_fhp->nfh_len); 4398 if (dp != NULL && *gotfdp == 0) { 4399 /* 4400 * Wait for outstanding I/O ops to be done. 4401 */ 4402 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4403 if (igotlock) { 4404 nfsv4_unlock(&clp->nfsc_lock, 0); 4405 igotlock = 0; 4406 } 4407 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4408 (void) nfsmsleep(&dp->nfsdl_rwlock, 4409 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4410 continue; 4411 } 4412 needsrecall = 0; 4413 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4414 if (!LIST_EMPTY(&owp->nfsow_open)) { 4415 needsrecall = 1; 4416 break; 4417 } 4418 } 4419 if (!needsrecall) { 4420 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4421 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4422 needsrecall = 1; 4423 break; 4424 } 4425 } 4426 } 4427 if (needsrecall && !triedrecall) { 4428 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4429 islept = 0; 4430 while (!igotlock) { 4431 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4432 &islept, NFSCLSTATEMUTEXPTR, NULL); 4433 if (islept) 4434 break; 4435 } 4436 if (islept) 4437 continue; 4438 NFSUNLOCKCLSTATE(); 4439 cred = newnfs_getcred(); 4440 newnfs_copycred(&dp->nfsdl_cred, cred); 4441 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); 4442 NFSFREECRED(cred); 4443 triedrecall = 1; 4444 NFSLOCKCLSTATE(); 4445 nfsv4_unlock(&clp->nfsc_lock, 0); 4446 igotlock = 0; 4447 continue; 4448 } 4449 *fstp = dp->nfsdl_stateid; 4450 retcnt++; 4451 *gotfdp = 1; 4452 nfscl_cleandeleg(dp); 4453 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4454 } 4455 if (igotlock) { 4456 nfsv4_unlock(&clp->nfsc_lock, 0); 4457 igotlock = 0; 4458 } 4459 if (tvp != NULL) { 4460 np = VTONFS(tvp); 4461 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4462 np->n_fhp->nfh_len); 4463 if (dp != NULL && *gottdp == 0) { 4464 /* 4465 * Wait for outstanding I/O ops to be done. 4466 */ 4467 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4468 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4469 (void) nfsmsleep(&dp->nfsdl_rwlock, 4470 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4471 continue; 4472 } 4473 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4474 if (!LIST_EMPTY(&owp->nfsow_open)) { 4475 NFSUNLOCKCLSTATE(); 4476 return (retcnt); 4477 } 4478 } 4479 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4480 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4481 NFSUNLOCKCLSTATE(); 4482 return (retcnt); 4483 } 4484 } 4485 *tstp = dp->nfsdl_stateid; 4486 retcnt++; 4487 *gottdp = 1; 4488 nfscl_cleandeleg(dp); 4489 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4490 } 4491 } 4492 NFSUNLOCKCLSTATE(); 4493 return (retcnt); 4494 } 4495 } 4496 4497 /* 4498 * Get a reference on the clientid associated with the mount point. 4499 * Return 1 if success, 0 otherwise. 4500 */ 4501 APPLESTATIC int 4502 nfscl_getref(struct nfsmount *nmp) 4503 { 4504 struct nfsclclient *clp; 4505 4506 NFSLOCKCLSTATE(); 4507 clp = nfscl_findcl(nmp); 4508 if (clp == NULL) { 4509 NFSUNLOCKCLSTATE(); 4510 return (0); 4511 } 4512 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); 4513 NFSUNLOCKCLSTATE(); 4514 return (1); 4515 } 4516 4517 /* 4518 * Release a reference on a clientid acquired with the above call. 4519 */ 4520 APPLESTATIC void 4521 nfscl_relref(struct nfsmount *nmp) 4522 { 4523 struct nfsclclient *clp; 4524 4525 NFSLOCKCLSTATE(); 4526 clp = nfscl_findcl(nmp); 4527 if (clp == NULL) { 4528 NFSUNLOCKCLSTATE(); 4529 return; 4530 } 4531 nfsv4_relref(&clp->nfsc_lock); 4532 NFSUNLOCKCLSTATE(); 4533 } 4534 4535 /* 4536 * Save the size attribute in the delegation, since the nfsnode 4537 * is going away. 4538 */ 4539 APPLESTATIC void 4540 nfscl_reclaimnode(vnode_t vp) 4541 { 4542 struct nfsclclient *clp; 4543 struct nfscldeleg *dp; 4544 struct nfsnode *np = VTONFS(vp); 4545 struct nfsmount *nmp; 4546 4547 nmp = VFSTONFS(vnode_mount(vp)); 4548 if (!NFSHASNFSV4(nmp)) 4549 return; 4550 NFSLOCKCLSTATE(); 4551 clp = nfscl_findcl(nmp); 4552 if (clp == NULL) { 4553 NFSUNLOCKCLSTATE(); 4554 return; 4555 } 4556 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4557 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4558 dp->nfsdl_size = np->n_size; 4559 NFSUNLOCKCLSTATE(); 4560 } 4561 4562 /* 4563 * Get the saved size attribute in the delegation, since it is a 4564 * newly allocated nfsnode. 4565 */ 4566 APPLESTATIC void 4567 nfscl_newnode(vnode_t vp) 4568 { 4569 struct nfsclclient *clp; 4570 struct nfscldeleg *dp; 4571 struct nfsnode *np = VTONFS(vp); 4572 struct nfsmount *nmp; 4573 4574 nmp = VFSTONFS(vnode_mount(vp)); 4575 if (!NFSHASNFSV4(nmp)) 4576 return; 4577 NFSLOCKCLSTATE(); 4578 clp = nfscl_findcl(nmp); 4579 if (clp == NULL) { 4580 NFSUNLOCKCLSTATE(); 4581 return; 4582 } 4583 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4584 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4585 np->n_size = dp->nfsdl_size; 4586 NFSUNLOCKCLSTATE(); 4587 } 4588 4589 /* 4590 * If there is a valid write delegation for this file, set the modtime 4591 * to the local clock time. 4592 */ 4593 APPLESTATIC void 4594 nfscl_delegmodtime(vnode_t vp) 4595 { 4596 struct nfsclclient *clp; 4597 struct nfscldeleg *dp; 4598 struct nfsnode *np = VTONFS(vp); 4599 struct nfsmount *nmp; 4600 4601 nmp = VFSTONFS(vnode_mount(vp)); 4602 if (!NFSHASNFSV4(nmp)) 4603 return; 4604 NFSLOCKCLSTATE(); 4605 clp = nfscl_findcl(nmp); 4606 if (clp == NULL) { 4607 NFSUNLOCKCLSTATE(); 4608 return; 4609 } 4610 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4611 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 4612 nanotime(&dp->nfsdl_modtime); 4613 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 4614 } 4615 NFSUNLOCKCLSTATE(); 4616 } 4617 4618 /* 4619 * If there is a valid write delegation for this file with a modtime set, 4620 * put that modtime in mtime. 4621 */ 4622 APPLESTATIC void 4623 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 4624 { 4625 struct nfsclclient *clp; 4626 struct nfscldeleg *dp; 4627 struct nfsnode *np = VTONFS(vp); 4628 struct nfsmount *nmp; 4629 4630 nmp = VFSTONFS(vnode_mount(vp)); 4631 if (!NFSHASNFSV4(nmp)) 4632 return; 4633 NFSLOCKCLSTATE(); 4634 clp = nfscl_findcl(nmp); 4635 if (clp == NULL) { 4636 NFSUNLOCKCLSTATE(); 4637 return; 4638 } 4639 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4640 if (dp != NULL && 4641 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 4642 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 4643 *mtime = dp->nfsdl_modtime; 4644 NFSUNLOCKCLSTATE(); 4645 } 4646 4647 static int 4648 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers) 4649 { 4650 short *defaulterrp, *errp; 4651 4652 if (!nd->nd_repstat) 4653 return (0); 4654 if (nd->nd_procnum == NFSPROC_NOOP) 4655 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 4656 if (nd->nd_repstat == EBADRPC) 4657 return (txdr_unsigned(NFSERR_BADXDR)); 4658 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 4659 nd->nd_repstat == NFSERR_OPILLEGAL) 4660 return (txdr_unsigned(nd->nd_repstat)); 4661 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 && 4662 minorvers > NFSV4_MINORVERSION) { 4663 /* NFSv4.n error. */ 4664 return (txdr_unsigned(nd->nd_repstat)); 4665 } 4666 if (nd->nd_procnum < NFSV4OP_CBNOPS) 4667 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 4668 else 4669 return (txdr_unsigned(nd->nd_repstat)); 4670 while (*++errp) 4671 if (*errp == (short)nd->nd_repstat) 4672 return (txdr_unsigned(nd->nd_repstat)); 4673 return (txdr_unsigned(*defaulterrp)); 4674 } 4675 4676 /* 4677 * Called to find/add a layout to a client. 4678 * This function returns the layout with a refcnt (shared lock) upon 4679 * success (returns 0) or with no lock/refcnt on the layout when an 4680 * error is returned. 4681 * If a layout is passed in via lypp, it is locked (exclusively locked). 4682 */ 4683 APPLESTATIC int 4684 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4685 nfsv4stateid_t *stateidp, int retonclose, 4686 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, 4687 struct ucred *cred, NFSPROC_T *p) 4688 { 4689 struct nfsclclient *clp; 4690 struct nfscllayout *lyp, *tlyp; 4691 struct nfsclflayout *flp; 4692 struct nfsnode *np = VTONFS(vp); 4693 mount_t mp; 4694 int layout_passed_in; 4695 4696 mp = nmp->nm_mountp; 4697 layout_passed_in = 1; 4698 tlyp = NULL; 4699 lyp = *lypp; 4700 if (lyp == NULL) { 4701 layout_passed_in = 0; 4702 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, 4703 M_WAITOK | M_ZERO); 4704 } 4705 4706 NFSLOCKCLSTATE(); 4707 clp = nmp->nm_clp; 4708 if (clp == NULL) { 4709 if (layout_passed_in != 0) 4710 nfsv4_unlock(&lyp->nfsly_lock, 0); 4711 NFSUNLOCKCLSTATE(); 4712 if (tlyp != NULL) 4713 free(tlyp, M_NFSLAYOUT); 4714 return (EPERM); 4715 } 4716 if (lyp == NULL) { 4717 /* 4718 * Although no lyp was passed in, another thread might have 4719 * allocated one. If one is found, just increment it's ref 4720 * count and return it. 4721 */ 4722 lyp = nfscl_findlayout(clp, fhp, fhlen); 4723 if (lyp == NULL) { 4724 lyp = tlyp; 4725 tlyp = NULL; 4726 lyp->nfsly_stateid.seqid = stateidp->seqid; 4727 lyp->nfsly_stateid.other[0] = stateidp->other[0]; 4728 lyp->nfsly_stateid.other[1] = stateidp->other[1]; 4729 lyp->nfsly_stateid.other[2] = stateidp->other[2]; 4730 lyp->nfsly_lastbyte = 0; 4731 LIST_INIT(&lyp->nfsly_flayread); 4732 LIST_INIT(&lyp->nfsly_flayrw); 4733 LIST_INIT(&lyp->nfsly_recall); 4734 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; 4735 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; 4736 lyp->nfsly_clp = clp; 4737 lyp->nfsly_flags = (retonclose != 0) ? 4738 (NFSLY_FILES | NFSLY_RETONCLOSE) : NFSLY_FILES; 4739 lyp->nfsly_fhlen = fhlen; 4740 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); 4741 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4742 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, 4743 nfsly_hash); 4744 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4745 nfscl_layoutcnt++; 4746 } else { 4747 if (retonclose != 0) 4748 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4749 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4750 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4751 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4752 } 4753 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 4754 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 4755 NFSUNLOCKCLSTATE(); 4756 if (tlyp != NULL) 4757 free(tlyp, M_NFSLAYOUT); 4758 return (EPERM); 4759 } 4760 *lypp = lyp; 4761 } else 4762 lyp->nfsly_stateid.seqid = stateidp->seqid; 4763 4764 /* Merge the new list of File Layouts into the list. */ 4765 flp = LIST_FIRST(fhlp); 4766 if (flp != NULL) { 4767 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) 4768 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); 4769 else 4770 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); 4771 } 4772 if (layout_passed_in != 0) 4773 nfsv4_unlock(&lyp->nfsly_lock, 1); 4774 NFSUNLOCKCLSTATE(); 4775 if (tlyp != NULL) 4776 free(tlyp, M_NFSLAYOUT); 4777 return (0); 4778 } 4779 4780 /* 4781 * Search for a layout by MDS file handle. 4782 * If one is found, it is returned with a refcnt (shared lock) iff 4783 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is 4784 * returned NULL. 4785 */ 4786 struct nfscllayout * 4787 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, 4788 uint64_t off, struct nfsclflayout **retflpp, int *recalledp) 4789 { 4790 struct nfscllayout *lyp; 4791 mount_t mp; 4792 int error, igotlock; 4793 4794 mp = clp->nfsc_nmp->nm_mountp; 4795 *recalledp = 0; 4796 *retflpp = NULL; 4797 NFSLOCKCLSTATE(); 4798 lyp = nfscl_findlayout(clp, fhp, fhlen); 4799 if (lyp != NULL) { 4800 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 4801 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4802 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4803 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4804 error = nfscl_findlayoutforio(lyp, off, 4805 NFSV4OPEN_ACCESSREAD, retflpp); 4806 if (error == 0) 4807 nfsv4_getref(&lyp->nfsly_lock, NULL, 4808 NFSCLSTATEMUTEXPTR, mp); 4809 else { 4810 do { 4811 igotlock = nfsv4_lock(&lyp->nfsly_lock, 4812 1, NULL, NFSCLSTATEMUTEXPTR, mp); 4813 } while (igotlock == 0 && 4814 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0); 4815 *retflpp = NULL; 4816 } 4817 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 4818 lyp = NULL; 4819 *recalledp = 1; 4820 } 4821 } else { 4822 lyp = NULL; 4823 *recalledp = 1; 4824 } 4825 } 4826 NFSUNLOCKCLSTATE(); 4827 return (lyp); 4828 } 4829 4830 /* 4831 * Search for a layout by MDS file handle. If one is found that is marked 4832 * "return on close", delete it, since it should now be forgotten. 4833 */ 4834 static void 4835 nfscl_retoncloselayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen) 4836 { 4837 struct nfscllayout *lyp; 4838 4839 tryagain: 4840 lyp = nfscl_findlayout(clp, fhp, fhlen); 4841 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) { 4842 /* 4843 * Wait for outstanding I/O ops to be done. 4844 */ 4845 if (lyp->nfsly_lock.nfslock_usecnt != 0 || 4846 lyp->nfsly_lock.nfslock_lock != 0) { 4847 lyp->nfsly_lock.nfslock_lock |= NFSV4LOCK_WANTED; 4848 (void)mtx_sleep(&lyp->nfsly_lock, 4849 NFSCLSTATEMUTEXPTR, PZERO, "nfslyc", 0); 4850 goto tryagain; 4851 } 4852 nfscl_freelayout(lyp); 4853 } 4854 } 4855 4856 /* 4857 * Dereference a layout. 4858 */ 4859 void 4860 nfscl_rellayout(struct nfscllayout *lyp, int exclocked) 4861 { 4862 4863 NFSLOCKCLSTATE(); 4864 if (exclocked != 0) 4865 nfsv4_unlock(&lyp->nfsly_lock, 0); 4866 else 4867 nfsv4_relref(&lyp->nfsly_lock); 4868 NFSUNLOCKCLSTATE(); 4869 } 4870 4871 /* 4872 * Search for a devinfo by deviceid. If one is found, return it after 4873 * acquiring a reference count on it. 4874 */ 4875 struct nfscldevinfo * 4876 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, 4877 struct nfscldevinfo *dip) 4878 { 4879 4880 NFSLOCKCLSTATE(); 4881 if (dip == NULL) 4882 dip = nfscl_finddevinfo(clp, deviceid); 4883 if (dip != NULL) 4884 dip->nfsdi_refcnt++; 4885 NFSUNLOCKCLSTATE(); 4886 return (dip); 4887 } 4888 4889 /* 4890 * Dereference a devinfo structure. 4891 */ 4892 static void 4893 nfscl_reldevinfo_locked(struct nfscldevinfo *dip) 4894 { 4895 4896 dip->nfsdi_refcnt--; 4897 if (dip->nfsdi_refcnt == 0) 4898 wakeup(&dip->nfsdi_refcnt); 4899 } 4900 4901 /* 4902 * Dereference a devinfo structure. 4903 */ 4904 void 4905 nfscl_reldevinfo(struct nfscldevinfo *dip) 4906 { 4907 4908 NFSLOCKCLSTATE(); 4909 nfscl_reldevinfo_locked(dip); 4910 NFSUNLOCKCLSTATE(); 4911 } 4912 4913 /* 4914 * Find a layout for this file handle. Return NULL upon failure. 4915 */ 4916 static struct nfscllayout * 4917 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 4918 { 4919 struct nfscllayout *lyp; 4920 4921 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) 4922 if (lyp->nfsly_fhlen == fhlen && 4923 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) 4924 break; 4925 return (lyp); 4926 } 4927 4928 /* 4929 * Find a devinfo for this deviceid. Return NULL upon failure. 4930 */ 4931 static struct nfscldevinfo * 4932 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) 4933 { 4934 struct nfscldevinfo *dip; 4935 4936 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) 4937 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) 4938 == 0) 4939 break; 4940 return (dip); 4941 } 4942 4943 /* 4944 * Merge the new file layout list into the main one, maintaining it in 4945 * increasing offset order. 4946 */ 4947 static void 4948 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, 4949 struct nfsclflayouthead *newfhlp) 4950 { 4951 struct nfsclflayout *flp, *nflp, *prevflp, *tflp; 4952 4953 flp = LIST_FIRST(fhlp); 4954 prevflp = NULL; 4955 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { 4956 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { 4957 prevflp = flp; 4958 flp = LIST_NEXT(flp, nfsfl_list); 4959 } 4960 if (prevflp == NULL) 4961 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); 4962 else 4963 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); 4964 prevflp = nflp; 4965 } 4966 } 4967 4968 /* 4969 * Add this nfscldevinfo to the client, if it doesn't already exist. 4970 * This function consumes the structure pointed at by dip, if not NULL. 4971 */ 4972 APPLESTATIC int 4973 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, 4974 struct nfsclflayout *flp) 4975 { 4976 struct nfsclclient *clp; 4977 struct nfscldevinfo *tdip; 4978 4979 NFSLOCKCLSTATE(); 4980 clp = nmp->nm_clp; 4981 if (clp == NULL) { 4982 NFSUNLOCKCLSTATE(); 4983 if (dip != NULL) 4984 free(dip, M_NFSDEVINFO); 4985 return (ENODEV); 4986 } 4987 tdip = nfscl_finddevinfo(clp, flp->nfsfl_dev); 4988 if (tdip != NULL) { 4989 tdip->nfsdi_layoutrefs++; 4990 flp->nfsfl_devp = tdip; 4991 nfscl_reldevinfo_locked(tdip); 4992 NFSUNLOCKCLSTATE(); 4993 if (dip != NULL) 4994 free(dip, M_NFSDEVINFO); 4995 return (0); 4996 } 4997 if (dip != NULL) { 4998 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); 4999 dip->nfsdi_layoutrefs = 1; 5000 flp->nfsfl_devp = dip; 5001 } 5002 NFSUNLOCKCLSTATE(); 5003 if (dip == NULL) 5004 return (ENODEV); 5005 return (0); 5006 } 5007 5008 /* 5009 * Free up a layout structure and associated file layout structure(s). 5010 */ 5011 APPLESTATIC void 5012 nfscl_freelayout(struct nfscllayout *layp) 5013 { 5014 struct nfsclflayout *flp, *nflp; 5015 struct nfsclrecalllayout *rp, *nrp; 5016 5017 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { 5018 LIST_REMOVE(flp, nfsfl_list); 5019 nfscl_freeflayout(flp); 5020 } 5021 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { 5022 LIST_REMOVE(flp, nfsfl_list); 5023 nfscl_freeflayout(flp); 5024 } 5025 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { 5026 LIST_REMOVE(rp, nfsrecly_list); 5027 free(rp, M_NFSLAYRECALL); 5028 } 5029 nfscl_layoutcnt--; 5030 free(layp, M_NFSLAYOUT); 5031 } 5032 5033 /* 5034 * Free up a file layout structure. 5035 */ 5036 APPLESTATIC void 5037 nfscl_freeflayout(struct nfsclflayout *flp) 5038 { 5039 int i; 5040 5041 for (i = 0; i < flp->nfsfl_fhcnt; i++) 5042 free(flp->nfsfl_fh[i], M_NFSFH); 5043 if (flp->nfsfl_devp != NULL) 5044 flp->nfsfl_devp->nfsdi_layoutrefs--; 5045 free(flp, M_NFSFLAYOUT); 5046 } 5047 5048 /* 5049 * Free up a file layout devinfo structure. 5050 */ 5051 APPLESTATIC void 5052 nfscl_freedevinfo(struct nfscldevinfo *dip) 5053 { 5054 5055 free(dip, M_NFSDEVINFO); 5056 } 5057 5058 /* 5059 * Mark any layouts that match as recalled. 5060 */ 5061 static int 5062 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, 5063 uint64_t off, uint64_t len, uint32_t stateseqid, 5064 struct nfsclrecalllayout *recallp) 5065 { 5066 struct nfsclrecalllayout *rp, *orp; 5067 5068 recallp->nfsrecly_recalltype = recalltype; 5069 recallp->nfsrecly_iomode = iomode; 5070 recallp->nfsrecly_stateseqid = stateseqid; 5071 recallp->nfsrecly_off = off; 5072 recallp->nfsrecly_len = len; 5073 /* 5074 * Order the list as file returns first, followed by fsid and any 5075 * returns, both in increasing stateseqid order. 5076 * Note that the seqids wrap around, so 1 is after 0xffffffff. 5077 * (I'm not sure this is correct because I find RFC5661 confusing 5078 * on this, but hopefully it will work ok.) 5079 */ 5080 orp = NULL; 5081 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5082 orp = rp; 5083 if ((recalltype == NFSLAYOUTRETURN_FILE && 5084 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || 5085 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || 5086 (recalltype != NFSLAYOUTRETURN_FILE && 5087 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && 5088 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { 5089 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); 5090 break; 5091 } 5092 } 5093 if (rp == NULL) { 5094 if (orp == NULL) 5095 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, 5096 nfsrecly_list); 5097 else 5098 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); 5099 } 5100 lyp->nfsly_flags |= NFSLY_RECALL; 5101 return (0); 5102 } 5103 5104 /* 5105 * Compare the two seqids for ordering. The trick is that the seqids can 5106 * wrap around from 0xffffffff->0, so check for the cases where one 5107 * has wrapped around. 5108 * Return 1 if seqid1 comes before seqid2, 0 otherwise. 5109 */ 5110 static int 5111 nfscl_seq(uint32_t seqid1, uint32_t seqid2) 5112 { 5113 5114 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) 5115 /* seqid2 has wrapped around. */ 5116 return (0); 5117 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) 5118 /* seqid1 has wrapped around. */ 5119 return (1); 5120 if (seqid1 <= seqid2) 5121 return (1); 5122 return (0); 5123 } 5124 5125 /* 5126 * Do a layout return for each of the recalls. 5127 */ 5128 static void 5129 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, 5130 struct ucred *cred, NFSPROC_T *p) 5131 { 5132 struct nfsclrecalllayout *rp; 5133 nfsv4stateid_t stateid; 5134 5135 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); 5136 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5137 stateid.seqid = rp->nfsrecly_stateseqid; 5138 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, 5139 lyp->nfsly_fhlen, 0, NFSLAYOUT_NFSV4_1_FILES, 5140 rp->nfsrecly_iomode, rp->nfsrecly_recalltype, 5141 rp->nfsrecly_off, rp->nfsrecly_len, 5142 &stateid, 0, NULL, cred, p, NULL); 5143 } 5144 } 5145 5146 /* 5147 * Do the layout commit for a file layout. 5148 */ 5149 static void 5150 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, 5151 struct ucred *cred, NFSPROC_T *p) 5152 { 5153 struct nfsclflayout *flp; 5154 uint64_t len; 5155 int error; 5156 5157 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) { 5158 if (flp->nfsfl_off <= lyp->nfsly_lastbyte) { 5159 len = flp->nfsfl_end - flp->nfsfl_off; 5160 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, 5161 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len, 5162 lyp->nfsly_lastbyte, &lyp->nfsly_stateid, 5163 NFSLAYOUT_NFSV4_1_FILES, 0, NULL, cred, p, NULL); 5164 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error); 5165 if (error == NFSERR_NOTSUPP) { 5166 /* If not supported, don't bother doing it. */ 5167 NFSLOCKMNT(nmp); 5168 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5169 NFSUNLOCKMNT(nmp); 5170 break; 5171 } 5172 } 5173 } 5174 } 5175 5176 /* 5177 * Commit all layouts for a file (vnode). 5178 */ 5179 int 5180 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) 5181 { 5182 struct nfsclclient *clp; 5183 struct nfscllayout *lyp; 5184 struct nfsnode *np = VTONFS(vp); 5185 mount_t mp; 5186 struct nfsmount *nmp; 5187 5188 mp = vnode_mount(vp); 5189 nmp = VFSTONFS(mp); 5190 if (NFSHASNOLAYOUTCOMMIT(nmp)) 5191 return (0); 5192 NFSLOCKCLSTATE(); 5193 clp = nmp->nm_clp; 5194 if (clp == NULL) { 5195 NFSUNLOCKCLSTATE(); 5196 return (EPERM); 5197 } 5198 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5199 if (lyp == NULL) { 5200 NFSUNLOCKCLSTATE(); 5201 return (EPERM); 5202 } 5203 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5204 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 5205 NFSUNLOCKCLSTATE(); 5206 return (EPERM); 5207 } 5208 tryagain: 5209 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 5210 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 5211 NFSUNLOCKCLSTATE(); 5212 NFSCL_DEBUG(4, "do layoutcommit2\n"); 5213 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); 5214 NFSLOCKCLSTATE(); 5215 goto tryagain; 5216 } 5217 nfsv4_relref(&lyp->nfsly_lock); 5218 NFSUNLOCKCLSTATE(); 5219 return (0); 5220 } 5221 5222