1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2009 Rick Macklem, University of Guelph 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 /* 32 * These functions implement the client side state handling for NFSv4. 33 * NFSv4 state handling: 34 * - A lockowner is used to determine lock contention, so it 35 * corresponds directly to a Posix pid. (1 to 1 mapping) 36 * - The correct granularity of an OpenOwner is not nearly so 37 * obvious. An OpenOwner does the following: 38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 39 * - is used to check for Open/Share contention (not applicable to 40 * this client, since all Opens are Deny_None) 41 * As such, I considered both extreme. 42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 43 * all Open, Close and Lock (with a new lockowner) Ops. 44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 45 * every Open, for most servers. 46 * So, I chose to use the same mapping as I did for LockOwnwers. 47 * The main concern here is that you can end up with multiple Opens 48 * for the same File Handle, but on different OpenOwners (opens 49 * inherited from parents, grandparents...) and you do not know 50 * which of these the vnodeop close applies to. This is handled by 51 * delaying the Close Op(s) until all of the Opens have been closed. 52 * (It is not yet obvious if this is the correct granularity.) 53 * - How the code handles serialization: 54 * - For the ClientId, it uses an exclusive lock while getting its 55 * SetClientId and during recovery. Otherwise, it uses a shared 56 * lock via a reference count. 57 * - For the rest of the data structures, it uses an SMP mutex 58 * (once the nfs client is SMP safe) and doesn't sleep while 59 * manipulating the linked lists. 60 * - The serialization of Open/Close/Lock/LockU falls out in the 61 * "wash", since OpenOwners and LockOwners are both mapped from 62 * Posix pid. In other words, there is only one Posix pid using 63 * any given owner, so that owner is serialized. (If you change 64 * the granularity of the OpenOwner, then code must be added to 65 * serialize Ops on the OpenOwner.) 66 * - When to get rid of OpenOwners and LockOwners. 67 * - The function nfscl_cleanup_common() is executed after a process exits. 68 * It goes through the client list looking for all Open and Lock Owners. 69 * When one is found, it is marked "defunct" or in the case of 70 * an OpenOwner without any Opens, freed. 71 * The renew thread scans for defunct Owners and gets rid of them, 72 * if it can. The LockOwners will also be deleted when the 73 * associated Open is closed. 74 * - If the LockU or Close Op(s) fail during close in a way 75 * that could be recovered upon retry, they are relinked to the 76 * ClientId's defunct open list and retried by the renew thread 77 * until they succeed or an unmount/recovery occurs. 78 * (Since we are done with them, they do not need to be recovered.) 79 */ 80 81 #include <fs/nfs/nfsport.h> 82 83 /* 84 * Global variables 85 */ 86 extern struct nfsstatsv1 nfsstatsv1; 87 extern struct nfsreqhead nfsd_reqq; 88 extern u_int32_t newnfs_false, newnfs_true; 89 extern int nfscl_debuglevel; 90 extern int nfscl_enablecallb; 91 extern int nfs_numnfscbd; 92 NFSREQSPINLOCK; 93 NFSCLSTATEMUTEX; 94 int nfscl_inited = 0; 95 struct nfsclhead nfsclhead; /* Head of clientid list */ 96 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 97 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; 98 99 static int nfscl_delegcnt = 0; 100 static int nfscl_layoutcnt = 0; 101 static int nfscl_getopen(struct nfsclownerhead *, struct nfsclopenhash *, 102 u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t, 103 struct nfscllockowner **, struct nfsclopen **); 104 static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *, 105 uint8_t *, struct nfscllockowner **, struct nfsclopen **, 106 struct nfsclopen **); 107 static void nfscl_clrelease(struct nfsclclient *); 108 static void nfscl_unlinkopen(struct nfsclopen *); 109 static void nfscl_cleanclient(struct nfsclclient *); 110 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 111 struct ucred *, NFSPROC_T *); 112 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 113 struct nfsmount *, struct ucred *, NFSPROC_T *); 114 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *, 115 NFSPROC_T *); 116 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 117 struct nfscllock *, int); 118 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 119 struct nfscllock **, int); 120 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *, 121 struct nfscldeleghead *); 122 static u_int32_t nfscl_nextcbident(void); 123 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); 124 static struct nfsclclient *nfscl_getclnt(u_int32_t); 125 static struct nfsclclient *nfscl_getclntsess(uint8_t *); 126 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 127 int); 128 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *, 129 int, struct nfsclrecalllayout **, struct nfscllayout **); 130 static void nfscl_reldevinfo_locked(struct nfscldevinfo *); 131 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, 132 int); 133 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); 134 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 135 u_int8_t *, struct nfscllock **); 136 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 137 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 138 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 139 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 140 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 141 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *); 142 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 143 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 144 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 145 static void nfscl_totalrecall(struct nfsclclient *); 146 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 147 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 148 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 149 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 150 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 151 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 152 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 153 struct ucred *, NFSPROC_T *); 154 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 155 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 156 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *, 157 bool); 158 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t); 159 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 160 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 161 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int, 162 vnode_t *); 163 static void nfscl_freeopenowner(struct nfsclowner *, int); 164 static void nfscl_cleandeleg(struct nfscldeleg *); 165 static void nfscl_emptylockowner(struct nfscllockowner *, 166 struct nfscllockownerfhhead *); 167 static void nfscl_mergeflayouts(struct nfsclflayouthead *, 168 struct nfsclflayouthead *); 169 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, 170 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *); 171 static int nfscl_seq(uint32_t, uint32_t); 172 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, 173 struct ucred *, NFSPROC_T *); 174 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, 175 struct ucred *, NFSPROC_T *); 176 177 static short nfscberr_null[] = { 178 0, 179 0, 180 }; 181 182 static short nfscberr_getattr[] = { 183 NFSERR_RESOURCE, 184 NFSERR_BADHANDLE, 185 NFSERR_BADXDR, 186 NFSERR_RESOURCE, 187 NFSERR_SERVERFAULT, 188 0, 189 }; 190 191 static short nfscberr_recall[] = { 192 NFSERR_RESOURCE, 193 NFSERR_BADHANDLE, 194 NFSERR_BADSTATEID, 195 NFSERR_BADXDR, 196 NFSERR_RESOURCE, 197 NFSERR_SERVERFAULT, 198 0, 199 }; 200 201 static short *nfscl_cberrmap[] = { 202 nfscberr_null, 203 nfscberr_null, 204 nfscberr_null, 205 nfscberr_getattr, 206 nfscberr_recall 207 }; 208 209 #define NETFAMILY(clp) \ 210 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 211 212 /* 213 * Called for an open operation. 214 * If the nfhp argument is NULL, just get an openowner. 215 */ 216 int 217 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 218 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 219 struct nfsclopen **opp, int *newonep, int *retp, int lockit, bool firstref) 220 { 221 struct nfsclclient *clp; 222 struct nfsclowner *owp, *nowp; 223 struct nfsclopen *op = NULL, *nop = NULL; 224 struct nfscldeleg *dp; 225 struct nfsclownerhead *ohp; 226 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 227 int ret; 228 229 if (newonep != NULL) 230 *newonep = 0; 231 if (opp != NULL) 232 *opp = NULL; 233 if (owpp != NULL) 234 *owpp = NULL; 235 236 /* 237 * Might need one or both of these, so MALLOC them now, to 238 * avoid a tsleep() in MALLOC later. 239 */ 240 nowp = malloc(sizeof (struct nfsclowner), 241 M_NFSCLOWNER, M_WAITOK); 242 if (nfhp != NULL) { 243 nop = malloc(sizeof (struct nfsclopen) + 244 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 245 nop->nfso_hash.le_prev = NULL; 246 } 247 ret = nfscl_getcl(vp->v_mount, cred, p, false, firstref, &clp); 248 if (ret != 0) { 249 free(nowp, M_NFSCLOWNER); 250 if (nop != NULL) 251 free(nop, M_NFSCLOPEN); 252 return (ret); 253 } 254 255 /* 256 * Get the Open iff it already exists. 257 * If none found, add the new one or return error, depending upon 258 * "create". 259 */ 260 NFSLOCKCLSTATE(); 261 dp = NULL; 262 /* First check the delegation list */ 263 if (nfhp != NULL && usedeleg) { 264 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 265 if (dp->nfsdl_fhlen == fhlen && 266 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 267 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 268 (dp->nfsdl_flags & NFSCLDL_WRITE)) 269 break; 270 dp = NULL; 271 break; 272 } 273 } 274 } 275 276 /* For NFSv4.1/4.2 and this option, use a single open_owner. */ 277 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 278 nfscl_filllockowner(NULL, own, F_POSIX); 279 else 280 nfscl_filllockowner(p->td_proc, own, F_POSIX); 281 if (dp != NULL) 282 ohp = &dp->nfsdl_owner; 283 else 284 ohp = &clp->nfsc_owner; 285 /* Now, search for an openowner */ 286 LIST_FOREACH(owp, ohp, nfsow_list) { 287 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 288 break; 289 } 290 291 /* 292 * Create a new open, as required. 293 */ 294 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 295 cred, newonep); 296 297 /* 298 * Now, check the mode on the open and return the appropriate 299 * value. 300 */ 301 if (retp != NULL) { 302 if (nfhp != NULL && dp != NULL && nop == NULL) 303 /* new local open on delegation */ 304 *retp = NFSCLOPEN_SETCRED; 305 else 306 *retp = NFSCLOPEN_OK; 307 } 308 if (op != NULL && (amode & ~(op->nfso_mode))) { 309 op->nfso_mode |= amode; 310 if (retp != NULL && dp == NULL) 311 *retp = NFSCLOPEN_DOOPEN; 312 } 313 314 /* 315 * Serialize modifications to the open owner for multiple threads 316 * within the same process using a read/write sleep lock. 317 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations 318 * by acquiring a shared lock. The close operations still use an 319 * exclusive lock for this case. 320 */ 321 if (lockit != 0) { 322 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) { 323 /* 324 * Get a shared lock on the OpenOwner, but first 325 * wait for any pending exclusive lock, so that the 326 * exclusive locker gets priority. 327 */ 328 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL, 329 NFSCLSTATEMUTEXPTR, NULL); 330 nfsv4_getref(&owp->nfsow_rwlock, NULL, 331 NFSCLSTATEMUTEXPTR, NULL); 332 } else 333 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 334 } 335 NFSUNLOCKCLSTATE(); 336 if (nowp != NULL) 337 free(nowp, M_NFSCLOWNER); 338 if (nop != NULL) 339 free(nop, M_NFSCLOPEN); 340 if (owpp != NULL) 341 *owpp = owp; 342 if (opp != NULL) 343 *opp = op; 344 return (0); 345 } 346 347 /* 348 * Create a new open, as required. 349 */ 350 static void 351 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 352 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 353 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 354 struct ucred *cred, int *newonep) 355 { 356 struct nfsclowner *owp = *owpp, *nowp; 357 struct nfsclopen *op, *nop; 358 359 if (nowpp != NULL) 360 nowp = *nowpp; 361 else 362 nowp = NULL; 363 if (nopp != NULL) 364 nop = *nopp; 365 else 366 nop = NULL; 367 if (owp == NULL && nowp != NULL) { 368 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 369 LIST_INIT(&nowp->nfsow_open); 370 nowp->nfsow_clp = clp; 371 nowp->nfsow_seqid = 0; 372 nowp->nfsow_defunct = 0; 373 nfscl_lockinit(&nowp->nfsow_rwlock); 374 if (dp != NULL) { 375 nfsstatsv1.cllocalopenowners++; 376 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 377 } else { 378 nfsstatsv1.clopenowners++; 379 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 380 } 381 owp = *owpp = nowp; 382 *nowpp = NULL; 383 if (newonep != NULL) 384 *newonep = 1; 385 } 386 387 /* If an fhp has been specified, create an Open as well. */ 388 if (fhp != NULL) { 389 /* and look for the correct open, based upon FH */ 390 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 391 if (op->nfso_fhlen == fhlen && 392 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 393 break; 394 } 395 if (op == NULL && nop != NULL) { 396 nop->nfso_own = owp; 397 nop->nfso_mode = 0; 398 nop->nfso_opencnt = 0; 399 nop->nfso_posixlock = 1; 400 nop->nfso_fhlen = fhlen; 401 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 402 LIST_INIT(&nop->nfso_lock); 403 nop->nfso_stateid.seqid = 0; 404 nop->nfso_stateid.other[0] = 0; 405 nop->nfso_stateid.other[1] = 0; 406 nop->nfso_stateid.other[2] = 0; 407 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__)); 408 newnfs_copyincred(cred, &nop->nfso_cred); 409 if (dp != NULL) { 410 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 411 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 412 nfsdl_list); 413 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 414 nfsstatsv1.cllocalopens++; 415 } else { 416 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, fhp, fhlen), 417 nop, nfso_hash); 418 nfsstatsv1.clopens++; 419 } 420 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 421 *opp = nop; 422 *nopp = NULL; 423 if (newonep != NULL) 424 *newonep = 1; 425 } else { 426 *opp = op; 427 } 428 } 429 } 430 431 /* 432 * Called to find/add a delegation to a client. 433 */ 434 int 435 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 436 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 437 { 438 struct nfscldeleg *dp = *dpp, *tdp; 439 struct nfsmount *nmp; 440 441 KASSERT(mp != NULL, ("nfscl_deleg: mp NULL")); 442 nmp = VFSTONFS(mp); 443 /* 444 * First, if we have received a Read delegation for a file on a 445 * read/write file system, just return it, because they aren't 446 * useful, imho. 447 */ 448 if (dp != NULL && !NFSMNT_RDONLY(mp) && 449 (dp->nfsdl_flags & NFSCLDL_READ)) { 450 nfscl_trydelegreturn(dp, cred, nmp, p); 451 free(dp, M_NFSCLDELEG); 452 *dpp = NULL; 453 return (0); 454 } 455 456 /* 457 * Since a delegation might be added to the mount, 458 * set NFSMNTP_DELEGISSUED now. If a delegation already 459 * exagain ists, setting this flag is harmless. 460 */ 461 NFSLOCKMNT(nmp); 462 nmp->nm_privflag |= NFSMNTP_DELEGISSUED; 463 NFSUNLOCKMNT(nmp); 464 465 /* Look for the correct deleg, based upon FH */ 466 NFSLOCKCLSTATE(); 467 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 468 if (tdp == NULL) { 469 if (dp == NULL) { 470 NFSUNLOCKCLSTATE(); 471 return (NFSERR_BADSTATEID); 472 } 473 *dpp = NULL; 474 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 475 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 476 nfsdl_hash); 477 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 478 nfsstatsv1.cldelegates++; 479 nfscl_delegcnt++; 480 } else { 481 /* 482 * Delegation already exists, what do we do if a new one?? 483 */ 484 if (dp != NULL) { 485 printf("Deleg already exists!\n"); 486 free(dp, M_NFSCLDELEG); 487 *dpp = NULL; 488 } else { 489 *dpp = tdp; 490 } 491 } 492 NFSUNLOCKCLSTATE(); 493 return (0); 494 } 495 496 /* 497 * Find a delegation for this file handle. Return NULL upon failure. 498 */ 499 static struct nfscldeleg * 500 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 501 { 502 struct nfscldeleg *dp; 503 504 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 505 if (dp->nfsdl_fhlen == fhlen && 506 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 507 break; 508 } 509 return (dp); 510 } 511 512 /* 513 * Get a stateid for an I/O operation. First, look for an open and iff 514 * found, return either a lockowner stateid or the open stateid. 515 * If no Open is found, just return error and the special stateid of all zeros. 516 */ 517 int 518 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 519 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 520 void **lckpp) 521 { 522 struct nfsclclient *clp; 523 struct nfsclopen *op = NULL, *top; 524 struct nfsclopenhash *oph; 525 struct nfscllockowner *lp; 526 struct nfscldeleg *dp; 527 struct nfsnode *np; 528 struct nfsmount *nmp; 529 struct nfscred ncr; 530 u_int8_t own[NFSV4CL_LOCKNAMELEN], lockown[NFSV4CL_LOCKNAMELEN]; 531 int error; 532 bool done; 533 534 *lckpp = NULL; 535 /* 536 * Initially, just set the special stateid of all zeros. 537 * (Don't do this for a DS, since the special stateid can't be used.) 538 */ 539 if (fords == 0) { 540 stateidp->seqid = 0; 541 stateidp->other[0] = 0; 542 stateidp->other[1] = 0; 543 stateidp->other[2] = 0; 544 } 545 if (vp->v_type != VREG) 546 return (EISDIR); 547 np = VTONFS(vp); 548 nmp = VFSTONFS(vp->v_mount); 549 550 /* 551 * For "oneopenown" mounts, first check for a cached open in the 552 * NFS vnode, that can be used as a stateid. This can only be 553 * done if no delegations have been issued to the mount and no 554 * byte range file locking has been done for the file. 555 */ 556 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && fords == 0) { 557 NFSLOCKMNT(nmp); 558 NFSLOCKNODE(np); 559 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 && 560 (np->n_flag & NMIGHTBELOCKED) == 0 && 561 np->n_openstateid != NULL) { 562 stateidp->seqid = 0; 563 stateidp->other[0] = 564 np->n_openstateid->nfso_stateid.other[0]; 565 stateidp->other[1] = 566 np->n_openstateid->nfso_stateid.other[1]; 567 stateidp->other[2] = 568 np->n_openstateid->nfso_stateid.other[2]; 569 NFSUNLOCKNODE(np); 570 NFSUNLOCKMNT(nmp); 571 return (0); 572 } 573 NFSUNLOCKNODE(np); 574 NFSUNLOCKMNT(nmp); 575 } 576 577 NFSLOCKCLSTATE(); 578 clp = nfscl_findcl(nmp); 579 if (clp == NULL) { 580 NFSUNLOCKCLSTATE(); 581 return (EACCES); 582 } 583 584 /* 585 * Wait for recovery to complete. 586 */ 587 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 588 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 589 PZERO, "nfsrecvr", NULL); 590 591 /* 592 * First, look for a delegation. 593 */ 594 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 595 if (dp->nfsdl_fhlen == fhlen && 596 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 597 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 598 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 599 if (NFSHASNFSV4N(nmp)) 600 stateidp->seqid = 0; 601 else 602 stateidp->seqid = 603 dp->nfsdl_stateid.seqid; 604 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 605 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 606 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 607 if (!(np->n_flag & NDELEGRECALL)) { 608 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 609 nfsdl_list); 610 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 611 nfsdl_list); 612 dp->nfsdl_timestamp = NFSD_MONOSEC + 613 120; 614 dp->nfsdl_rwlock.nfslock_usecnt++; 615 *lckpp = (void *)&dp->nfsdl_rwlock; 616 } 617 NFSUNLOCKCLSTATE(); 618 return (0); 619 } 620 break; 621 } 622 } 623 624 if (p != NULL) { 625 /* 626 * If p != NULL, we want to search the parentage tree 627 * for a matching OpenOwner and use that. 628 */ 629 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 630 nfscl_filllockowner(NULL, own, F_POSIX); 631 else 632 nfscl_filllockowner(p->td_proc, own, F_POSIX); 633 nfscl_filllockowner(p->td_proc, lockown, F_POSIX); 634 lp = NULL; 635 error = nfscl_getopen(NULL, clp->nfsc_openhash, nfhp, fhlen, 636 own, lockown, mode, &lp, &op); 637 if (error == 0 && lp != NULL && fords == 0) { 638 /* Don't return a lock stateid for a DS. */ 639 if (NFSHASNFSV4N(nmp)) 640 stateidp->seqid = 0; 641 else 642 stateidp->seqid = lp->nfsl_stateid.seqid; 643 stateidp->other[0] = 644 lp->nfsl_stateid.other[0]; 645 stateidp->other[1] = 646 lp->nfsl_stateid.other[1]; 647 stateidp->other[2] = 648 lp->nfsl_stateid.other[2]; 649 NFSUNLOCKCLSTATE(); 650 return (0); 651 } 652 } 653 if (op == NULL) { 654 /* If not found, just look for any OpenOwner that will work. */ 655 top = NULL; 656 done = false; 657 oph = NFSCLOPENHASH(clp, nfhp, fhlen); 658 LIST_FOREACH(op, oph, nfso_hash) { 659 if (op->nfso_fhlen == fhlen && 660 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) { 661 if (top == NULL && (op->nfso_mode & 662 NFSV4OPEN_ACCESSWRITE) != 0 && 663 (mode & NFSV4OPEN_ACCESSREAD) != 0) 664 top = op; 665 if ((mode & op->nfso_mode) == mode) { 666 /* LRU order the hash list. */ 667 LIST_REMOVE(op, nfso_hash); 668 LIST_INSERT_HEAD(oph, op, nfso_hash); 669 done = true; 670 break; 671 } 672 } 673 } 674 if (!done) { 675 NFSCL_DEBUG(2, "openmode top=%p\n", top); 676 if (top == NULL || NFSHASOPENMODE(nmp)) { 677 NFSUNLOCKCLSTATE(); 678 return (ENOENT); 679 } else 680 op = top; 681 } 682 /* 683 * For read aheads or write behinds, use the open cred. 684 * A read ahead or write behind is indicated by p == NULL. 685 */ 686 if (p == NULL) 687 memcpy(&ncr, &op->nfso_cred, sizeof(ncr)); 688 } 689 690 /* 691 * No lock stateid, so return the open stateid. 692 */ 693 if (NFSHASNFSV4N(nmp)) 694 stateidp->seqid = 0; 695 else 696 stateidp->seqid = op->nfso_stateid.seqid; 697 stateidp->other[0] = op->nfso_stateid.other[0]; 698 stateidp->other[1] = op->nfso_stateid.other[1]; 699 stateidp->other[2] = op->nfso_stateid.other[2]; 700 NFSUNLOCKCLSTATE(); 701 if (p == NULL) 702 newnfs_copycred(&ncr, cred); 703 return (0); 704 } 705 706 /* 707 * Search for a matching file, mode and, optionally, lockowner. 708 */ 709 static int 710 nfscl_getopen(struct nfsclownerhead *ohp, struct nfsclopenhash *ohashp, 711 u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown, 712 u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp) 713 { 714 struct nfsclowner *owp; 715 struct nfsclopen *op, *rop, *rop2; 716 struct nfsclopenhash *oph; 717 bool keep_looping; 718 719 KASSERT(ohp == NULL || ohashp == NULL, ("nfscl_getopen: " 720 "only one of ohp and ohashp can be set")); 721 if (lpp != NULL) 722 *lpp = NULL; 723 /* 724 * rop will be set to the open to be returned. There are three 725 * variants of this, all for an open of the correct file: 726 * 1 - A match of lockown. 727 * 2 - A match of the openown, when no lockown match exists. 728 * 3 - A match for any open, if no openown or lockown match exists. 729 * Looking for #2 over #3 probably isn't necessary, but since 730 * RFC3530 is vague w.r.t. the relationship between openowners and 731 * lockowners, I think this is the safer way to go. 732 */ 733 rop = NULL; 734 rop2 = NULL; 735 keep_looping = true; 736 /* Search the client list */ 737 if (ohashp == NULL) { 738 /* Search the local opens on the delegation. */ 739 LIST_FOREACH(owp, ohp, nfsow_list) { 740 /* and look for the correct open */ 741 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 742 if (op->nfso_fhlen == fhlen && 743 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 744 && (op->nfso_mode & mode) == mode) 745 keep_looping = nfscl_checkown(owp, op, openown, 746 lockown, lpp, &rop, &rop2); 747 if (!keep_looping) 748 break; 749 } 750 if (!keep_looping) 751 break; 752 } 753 } else { 754 /* Search for matching opens on the hash list. */ 755 oph = &ohashp[NFSCLOPENHASHFUNC(nfhp, fhlen)]; 756 LIST_FOREACH(op, oph, nfso_hash) { 757 if (op->nfso_fhlen == fhlen && 758 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 759 && (op->nfso_mode & mode) == mode) 760 keep_looping = nfscl_checkown(op->nfso_own, op, 761 openown, lockown, lpp, &rop, &rop2); 762 if (!keep_looping) { 763 /* LRU order the hash list. */ 764 LIST_REMOVE(op, nfso_hash); 765 LIST_INSERT_HEAD(oph, op, nfso_hash); 766 break; 767 } 768 } 769 } 770 if (rop == NULL) 771 rop = rop2; 772 if (rop == NULL) 773 return (EBADF); 774 *opp = rop; 775 return (0); 776 } 777 778 /* Check for an owner match. */ 779 static bool 780 nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown, 781 uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp, 782 struct nfsclopen **ropp2) 783 { 784 struct nfscllockowner *lp; 785 bool keep_looping; 786 787 keep_looping = true; 788 if (lpp != NULL) { 789 /* Now look for a matching lockowner. */ 790 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 791 if (!NFSBCMP(lp->nfsl_owner, lockown, 792 NFSV4CL_LOCKNAMELEN)) { 793 *lpp = lp; 794 *ropp = op; 795 return (false); 796 } 797 } 798 } 799 if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown, 800 NFSV4CL_LOCKNAMELEN)) { 801 *ropp = op; 802 if (lpp == NULL) 803 keep_looping = false; 804 } 805 if (*ropp2 == NULL) 806 *ropp2 = op; 807 return (keep_looping); 808 } 809 810 /* 811 * Release use of an open owner. Called when open operations are done 812 * with the open owner. 813 */ 814 void 815 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp, 816 __unused int error, __unused int candelete, int unlocked) 817 { 818 819 if (owp == NULL) 820 return; 821 NFSLOCKCLSTATE(); 822 if (unlocked == 0) { 823 if (NFSHASONEOPENOWN(nmp)) 824 nfsv4_relref(&owp->nfsow_rwlock); 825 else 826 nfscl_lockunlock(&owp->nfsow_rwlock); 827 } 828 nfscl_clrelease(owp->nfsow_clp); 829 NFSUNLOCKCLSTATE(); 830 } 831 832 /* 833 * Release use of an open structure under an open owner. 834 */ 835 void 836 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error, 837 int candelete) 838 { 839 struct nfsclclient *clp; 840 struct nfsclowner *owp; 841 842 if (op == NULL) 843 return; 844 NFSLOCKCLSTATE(); 845 owp = op->nfso_own; 846 if (NFSHASONEOPENOWN(nmp)) 847 nfsv4_relref(&owp->nfsow_rwlock); 848 else 849 nfscl_lockunlock(&owp->nfsow_rwlock); 850 clp = owp->nfsow_clp; 851 if (error && candelete && op->nfso_opencnt == 0) 852 nfscl_freeopen(op, 0, true); 853 nfscl_clrelease(clp); 854 NFSUNLOCKCLSTATE(); 855 } 856 857 /* 858 * Called to get a clientid structure. It will optionally lock the 859 * client data structures to do the SetClientId/SetClientId_confirm, 860 * but will release that lock and return the clientid with a reference 861 * count on it. 862 * If the "cred" argument is NULL, a new clientid should not be created. 863 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 864 * be done. 865 * It always clpp with a reference count on it, unless returning an error. 866 */ 867 int 868 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, 869 bool tryminvers, bool firstref, struct nfsclclient **clpp) 870 { 871 struct nfsclclient *clp; 872 struct nfsclclient *newclp = NULL; 873 struct nfsmount *nmp; 874 char uuid[HOSTUUIDLEN]; 875 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 876 u_int16_t idlen = 0; 877 878 nmp = VFSTONFS(mp); 879 if (cred != NULL) { 880 getcredhostuuid(cred, uuid, sizeof uuid); 881 idlen = strlen(uuid); 882 if (idlen > 0) 883 idlen += sizeof (u_int64_t); 884 else 885 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 886 newclp = malloc( 887 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 888 M_WAITOK | M_ZERO); 889 } 890 NFSLOCKCLSTATE(); 891 /* 892 * If a forced dismount is already in progress, don't 893 * allocate a new clientid and get out now. For the case where 894 * clp != NULL, this is a harmless optimization. 895 */ 896 if (NFSCL_FORCEDISM(mp)) { 897 NFSUNLOCKCLSTATE(); 898 if (newclp != NULL) 899 free(newclp, M_NFSCLCLIENT); 900 return (EBADF); 901 } 902 clp = nmp->nm_clp; 903 if (clp == NULL) { 904 if (newclp == NULL) { 905 NFSUNLOCKCLSTATE(); 906 return (EACCES); 907 } 908 clp = newclp; 909 clp->nfsc_idlen = idlen; 910 LIST_INIT(&clp->nfsc_owner); 911 TAILQ_INIT(&clp->nfsc_deleg); 912 TAILQ_INIT(&clp->nfsc_layout); 913 LIST_INIT(&clp->nfsc_devinfo); 914 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 915 LIST_INIT(&clp->nfsc_deleghash[i]); 916 for (i = 0; i < NFSCLOPENHASHSIZE; i++) 917 LIST_INIT(&clp->nfsc_openhash[i]); 918 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 919 LIST_INIT(&clp->nfsc_layouthash[i]); 920 clp->nfsc_flags = NFSCLFLAGS_INITED; 921 clp->nfsc_clientidrev = 1; 922 clp->nfsc_cbident = nfscl_nextcbident(); 923 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 924 clp->nfsc_idlen); 925 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 926 nmp->nm_clp = clp; 927 clp->nfsc_nmp = nmp; 928 } else { 929 if (newclp != NULL) 930 free(newclp, M_NFSCLCLIENT); 931 } 932 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && 933 !NFSCL_FORCEDISM(mp)) 934 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 935 NFSCLSTATEMUTEXPTR, mp); 936 if (igotlock == 0) { 937 /* 938 * Call nfsv4_lock() with "iwantlock == 0" on the firstref so 939 * that it will wait for a pending exclusive lock request. 940 * This gives the exclusive lock request priority over this 941 * shared lock request. 942 * An exclusive lock on nfsc_lock is used mainly for server 943 * crash recoveries and delegation recalls. 944 */ 945 if (firstref) 946 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, 947 mp); 948 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 949 } 950 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) { 951 /* 952 * Both nfsv4_lock() and nfsv4_getref() know to check 953 * for NFSCL_FORCEDISM() and return without sleeping to 954 * wait for the exclusive lock to be released, since it 955 * might be held by nfscl_umount() and we need to get out 956 * now for that case and not wait until nfscl_umount() 957 * releases it. 958 */ 959 NFSUNLOCKCLSTATE(); 960 return (EBADF); 961 } 962 NFSUNLOCKCLSTATE(); 963 964 /* 965 * If it needs a clientid, do the setclientid now. 966 */ 967 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 968 if (!igotlock) 969 panic("nfscl_clget"); 970 if (p == NULL || cred == NULL) { 971 NFSLOCKCLSTATE(); 972 nfsv4_unlock(&clp->nfsc_lock, 0); 973 NFSUNLOCKCLSTATE(); 974 return (EACCES); 975 } 976 /* 977 * If RFC3530 Sec. 14.2.33 is taken literally, 978 * NFSERR_CLIDINUSE will be returned persistently for the 979 * case where a new mount of the same file system is using 980 * a different principal. In practice, NFSERR_CLIDINUSE is 981 * only returned when there is outstanding unexpired state 982 * on the clientid. As such, try for twice the lease 983 * interval, if we know what that is. Otherwise, make a 984 * wild ass guess. 985 * The case of returning NFSERR_STALECLIENTID is far less 986 * likely, but might occur if there is a significant delay 987 * between doing the SetClientID and SetClientIDConfirm Ops, 988 * such that the server throws away the clientid before 989 * receiving the SetClientIDConfirm. 990 */ 991 if (clp->nfsc_renew > 0) 992 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 993 else 994 clidinusedelay = 120; 995 trystalecnt = 3; 996 do { 997 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 998 if (error == NFSERR_STALECLIENTID || 999 error == NFSERR_STALEDONTRECOVER || 1000 error == NFSERR_BADSESSION || 1001 error == NFSERR_CLIDINUSE) { 1002 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 1003 } else if (error == NFSERR_MINORVERMISMATCH && 1004 tryminvers) { 1005 if (nmp->nm_minorvers > 0) 1006 nmp->nm_minorvers--; 1007 else 1008 tryminvers = false; 1009 } 1010 } while (((error == NFSERR_STALECLIENTID || 1011 error == NFSERR_BADSESSION || 1012 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 1013 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0) || 1014 (error == NFSERR_MINORVERMISMATCH && tryminvers)); 1015 if (error) { 1016 NFSLOCKCLSTATE(); 1017 nfsv4_unlock(&clp->nfsc_lock, 0); 1018 NFSUNLOCKCLSTATE(); 1019 return (error); 1020 } 1021 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 1022 } 1023 if (igotlock) { 1024 NFSLOCKCLSTATE(); 1025 nfsv4_unlock(&clp->nfsc_lock, 1); 1026 NFSUNLOCKCLSTATE(); 1027 } 1028 1029 *clpp = clp; 1030 return (0); 1031 } 1032 1033 /* 1034 * Get a reference to a clientid and return it, if valid. 1035 */ 1036 struct nfsclclient * 1037 nfscl_findcl(struct nfsmount *nmp) 1038 { 1039 struct nfsclclient *clp; 1040 1041 clp = nmp->nm_clp; 1042 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 1043 return (NULL); 1044 return (clp); 1045 } 1046 1047 /* 1048 * Release the clientid structure. It may be locked or reference counted. 1049 */ 1050 static void 1051 nfscl_clrelease(struct nfsclclient *clp) 1052 { 1053 1054 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 1055 nfsv4_unlock(&clp->nfsc_lock, 0); 1056 else 1057 nfsv4_relref(&clp->nfsc_lock); 1058 } 1059 1060 /* 1061 * External call for nfscl_clrelease. 1062 */ 1063 void 1064 nfscl_clientrelease(struct nfsclclient *clp) 1065 { 1066 1067 NFSLOCKCLSTATE(); 1068 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 1069 nfsv4_unlock(&clp->nfsc_lock, 0); 1070 else 1071 nfsv4_relref(&clp->nfsc_lock); 1072 NFSUNLOCKCLSTATE(); 1073 } 1074 1075 /* 1076 * Called when wanting to lock a byte region. 1077 */ 1078 int 1079 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1080 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 1081 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, 1082 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 1083 { 1084 struct nfscllockowner *lp; 1085 struct nfsclopen *op; 1086 struct nfsclclient *clp; 1087 struct nfscllockowner *nlp; 1088 struct nfscllock *nlop, *otherlop; 1089 struct nfscldeleg *dp = NULL, *ldp = NULL; 1090 struct nfscllockownerhead *lhp = NULL; 1091 struct nfsnode *np; 1092 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; 1093 u_int8_t *openownp; 1094 int error = 0, ret, donelocally = 0; 1095 u_int32_t mode; 1096 1097 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ 1098 mode = 0; 1099 np = VTONFS(vp); 1100 *lpp = NULL; 1101 lp = NULL; 1102 *newonep = 0; 1103 *donelocallyp = 0; 1104 1105 /* 1106 * Might need these, so MALLOC them now, to 1107 * avoid a tsleep() in MALLOC later. 1108 */ 1109 nlp = malloc( 1110 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 1111 otherlop = malloc( 1112 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1113 nlop = malloc( 1114 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1115 nlop->nfslo_type = type; 1116 nlop->nfslo_first = off; 1117 if (len == NFS64BITSSET) { 1118 nlop->nfslo_end = NFS64BITSSET; 1119 } else { 1120 nlop->nfslo_end = off + len; 1121 if (nlop->nfslo_end <= nlop->nfslo_first) 1122 error = NFSERR_INVAL; 1123 } 1124 1125 if (!error) { 1126 if (recovery) 1127 clp = rclp; 1128 else 1129 error = nfscl_getcl(vp->v_mount, cred, p, false, true, 1130 &clp); 1131 } 1132 if (error) { 1133 free(nlp, M_NFSCLLOCKOWNER); 1134 free(otherlop, M_NFSCLLOCK); 1135 free(nlop, M_NFSCLLOCK); 1136 return (error); 1137 } 1138 1139 op = NULL; 1140 if (recovery) { 1141 ownp = rownp; 1142 openownp = ropenownp; 1143 } else { 1144 nfscl_filllockowner(id, own, flags); 1145 ownp = own; 1146 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 1147 nfscl_filllockowner(NULL, openown, F_POSIX); 1148 else 1149 nfscl_filllockowner(p->td_proc, openown, F_POSIX); 1150 openownp = openown; 1151 } 1152 if (!recovery) { 1153 NFSLOCKCLSTATE(); 1154 /* 1155 * First, search for a delegation. If one exists for this file, 1156 * the lock can be done locally against it, so long as there 1157 * isn't a local lock conflict. 1158 */ 1159 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1160 np->n_fhp->nfh_len); 1161 /* Just sanity check for correct type of delegation */ 1162 if (dp != NULL && ((dp->nfsdl_flags & 1163 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 1164 (type == F_WRLCK && 1165 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 1166 dp = NULL; 1167 } 1168 if (dp != NULL) { 1169 /* Now, find an open and maybe a lockowner. */ 1170 ret = nfscl_getopen(&dp->nfsdl_owner, NULL, np->n_fhp->nfh_fh, 1171 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); 1172 if (ret) 1173 ret = nfscl_getopen(NULL, clp->nfsc_openhash, 1174 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1175 ownp, mode, NULL, &op); 1176 if (!ret) { 1177 lhp = &dp->nfsdl_lock; 1178 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 1179 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 1180 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 1181 donelocally = 1; 1182 } else { 1183 dp = NULL; 1184 } 1185 } 1186 if (!donelocally) { 1187 /* 1188 * Get the related Open and maybe lockowner. 1189 */ 1190 error = nfscl_getopen(NULL, clp->nfsc_openhash, 1191 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1192 ownp, mode, &lp, &op); 1193 if (!error) 1194 lhp = &op->nfso_lock; 1195 } 1196 if (!error && !recovery) 1197 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 1198 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1199 if (error) { 1200 if (!recovery) { 1201 nfscl_clrelease(clp); 1202 NFSUNLOCKCLSTATE(); 1203 } 1204 free(nlp, M_NFSCLLOCKOWNER); 1205 free(otherlop, M_NFSCLLOCK); 1206 free(nlop, M_NFSCLLOCK); 1207 return (error); 1208 } 1209 1210 /* 1211 * Ok, see if a lockowner exists and create one, as required. 1212 */ 1213 if (lp == NULL) 1214 LIST_FOREACH(lp, lhp, nfsl_list) { 1215 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1216 break; 1217 } 1218 if (lp == NULL) { 1219 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1220 if (recovery) 1221 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1222 NFSV4CL_LOCKNAMELEN); 1223 else 1224 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1225 NFSV4CL_LOCKNAMELEN); 1226 nlp->nfsl_seqid = 0; 1227 nlp->nfsl_lockflags = flags; 1228 nlp->nfsl_inprog = NULL; 1229 nfscl_lockinit(&nlp->nfsl_rwlock); 1230 LIST_INIT(&nlp->nfsl_lock); 1231 if (donelocally) { 1232 nlp->nfsl_open = NULL; 1233 nfsstatsv1.cllocallockowners++; 1234 } else { 1235 nlp->nfsl_open = op; 1236 nfsstatsv1.cllockowners++; 1237 } 1238 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1239 lp = nlp; 1240 nlp = NULL; 1241 *newonep = 1; 1242 } 1243 1244 /* 1245 * Now, update the byte ranges for locks. 1246 */ 1247 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1248 if (!ret) 1249 donelocally = 1; 1250 if (donelocally) { 1251 *donelocallyp = 1; 1252 if (!recovery) 1253 nfscl_clrelease(clp); 1254 } else { 1255 /* 1256 * Serial modifications on the lock owner for multiple threads 1257 * for the same process using a read/write lock. 1258 */ 1259 if (!recovery) 1260 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1261 } 1262 if (!recovery) 1263 NFSUNLOCKCLSTATE(); 1264 1265 if (nlp) 1266 free(nlp, M_NFSCLLOCKOWNER); 1267 if (nlop) 1268 free(nlop, M_NFSCLLOCK); 1269 if (otherlop) 1270 free(otherlop, M_NFSCLLOCK); 1271 1272 *lpp = lp; 1273 return (0); 1274 } 1275 1276 /* 1277 * Called to unlock a byte range, for LockU. 1278 */ 1279 int 1280 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1281 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1282 struct nfsclclient *clp, void *id, int flags, 1283 struct nfscllockowner **lpp, int *dorpcp) 1284 { 1285 struct nfscllockowner *lp; 1286 struct nfsclopen *op; 1287 struct nfscllock *nlop, *other_lop = NULL; 1288 struct nfscldeleg *dp; 1289 struct nfsnode *np; 1290 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1291 int ret = 0, fnd; 1292 1293 np = VTONFS(vp); 1294 *lpp = NULL; 1295 *dorpcp = 0; 1296 1297 /* 1298 * Might need these, so MALLOC them now, to 1299 * avoid a tsleep() in MALLOC later. 1300 */ 1301 nlop = malloc( 1302 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1303 nlop->nfslo_type = F_UNLCK; 1304 nlop->nfslo_first = off; 1305 if (len == NFS64BITSSET) { 1306 nlop->nfslo_end = NFS64BITSSET; 1307 } else { 1308 nlop->nfslo_end = off + len; 1309 if (nlop->nfslo_end <= nlop->nfslo_first) { 1310 free(nlop, M_NFSCLLOCK); 1311 return (NFSERR_INVAL); 1312 } 1313 } 1314 if (callcnt == 0) { 1315 other_lop = malloc( 1316 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1317 *other_lop = *nlop; 1318 } 1319 nfscl_filllockowner(id, own, flags); 1320 dp = NULL; 1321 NFSLOCKCLSTATE(); 1322 if (callcnt == 0) 1323 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1324 np->n_fhp->nfh_len); 1325 1326 /* 1327 * First, unlock any local regions on a delegation. 1328 */ 1329 if (dp != NULL) { 1330 /* Look for this lockowner. */ 1331 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1332 if (!NFSBCMP(lp->nfsl_owner, own, 1333 NFSV4CL_LOCKNAMELEN)) 1334 break; 1335 } 1336 if (lp != NULL) 1337 /* Use other_lop, so nlop is still available */ 1338 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1339 } 1340 1341 /* 1342 * Now, find a matching open/lockowner that hasn't already been done, 1343 * as marked by nfsl_inprog. 1344 */ 1345 lp = NULL; 1346 fnd = 0; 1347 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh, 1348 np->n_fhp->nfh_len), nfso_hash) { 1349 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1350 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1351 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1352 if (lp->nfsl_inprog == NULL && 1353 !NFSBCMP(lp->nfsl_owner, own, 1354 NFSV4CL_LOCKNAMELEN)) { 1355 fnd = 1; 1356 break; 1357 } 1358 } 1359 } 1360 if (fnd) 1361 break; 1362 } 1363 1364 if (lp != NULL) { 1365 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1366 if (ret) 1367 *dorpcp = 1; 1368 /* 1369 * Serial modifications on the lock owner for multiple 1370 * threads for the same process using a read/write lock. 1371 */ 1372 lp->nfsl_inprog = p; 1373 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1374 *lpp = lp; 1375 } 1376 NFSUNLOCKCLSTATE(); 1377 if (nlop) 1378 free(nlop, M_NFSCLLOCK); 1379 if (other_lop) 1380 free(other_lop, M_NFSCLLOCK); 1381 return (0); 1382 } 1383 1384 /* 1385 * Release all lockowners marked in progess for this process and file. 1386 */ 1387 void 1388 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, 1389 void *id, int flags) 1390 { 1391 struct nfsclopen *op; 1392 struct nfscllockowner *lp; 1393 struct nfsnode *np; 1394 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1395 1396 np = VTONFS(vp); 1397 nfscl_filllockowner(id, own, flags); 1398 NFSLOCKCLSTATE(); 1399 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh, 1400 np->n_fhp->nfh_len), nfso_hash) { 1401 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1402 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1403 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1404 if (lp->nfsl_inprog == p && 1405 !NFSBCMP(lp->nfsl_owner, own, 1406 NFSV4CL_LOCKNAMELEN)) { 1407 lp->nfsl_inprog = NULL; 1408 nfscl_lockunlock(&lp->nfsl_rwlock); 1409 } 1410 } 1411 } 1412 } 1413 nfscl_clrelease(clp); 1414 NFSUNLOCKCLSTATE(); 1415 } 1416 1417 /* 1418 * Called to find out if any bytes within the byte range specified are 1419 * write locked by the calling process. Used to determine if flushing 1420 * is required before a LockU. 1421 * If in doubt, return 1, so the flush will occur. 1422 */ 1423 int 1424 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1425 struct ucred *cred, NFSPROC_T *p, void *id, int flags) 1426 { 1427 struct nfscllockowner *lp; 1428 struct nfsclopen *op; 1429 struct nfsclclient *clp; 1430 struct nfscllock *lop; 1431 struct nfscldeleg *dp; 1432 struct nfsnode *np; 1433 u_int64_t off, end; 1434 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1435 int error = 0; 1436 1437 np = VTONFS(vp); 1438 switch (fl->l_whence) { 1439 case SEEK_SET: 1440 case SEEK_CUR: 1441 /* 1442 * Caller is responsible for adding any necessary offset 1443 * when SEEK_CUR is used. 1444 */ 1445 off = fl->l_start; 1446 break; 1447 case SEEK_END: 1448 off = np->n_size + fl->l_start; 1449 break; 1450 default: 1451 return (1); 1452 } 1453 if (fl->l_len != 0) { 1454 end = off + fl->l_len; 1455 if (end < off) 1456 return (1); 1457 } else { 1458 end = NFS64BITSSET; 1459 } 1460 1461 error = nfscl_getcl(vp->v_mount, cred, p, false, true, &clp); 1462 if (error) 1463 return (1); 1464 nfscl_filllockowner(id, own, flags); 1465 NFSLOCKCLSTATE(); 1466 1467 /* 1468 * First check the delegation locks. 1469 */ 1470 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1471 if (dp != NULL) { 1472 /* No need to flush if it is a write delegation. */ 1473 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0) { 1474 nfscl_clrelease(clp); 1475 NFSUNLOCKCLSTATE(); 1476 return (0); 1477 } 1478 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1479 if (!NFSBCMP(lp->nfsl_owner, own, 1480 NFSV4CL_LOCKNAMELEN)) 1481 break; 1482 } 1483 if (lp != NULL) { 1484 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1485 if (lop->nfslo_first >= end) 1486 break; 1487 if (lop->nfslo_end <= off) 1488 continue; 1489 if (lop->nfslo_type == F_WRLCK) { 1490 nfscl_clrelease(clp); 1491 NFSUNLOCKCLSTATE(); 1492 return (1); 1493 } 1494 } 1495 } 1496 } 1497 1498 /* 1499 * Now, check state against the server. 1500 */ 1501 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh, 1502 np->n_fhp->nfh_len), nfso_hash) { 1503 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1504 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1505 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1506 if (!NFSBCMP(lp->nfsl_owner, own, 1507 NFSV4CL_LOCKNAMELEN)) 1508 break; 1509 } 1510 if (lp != NULL) { 1511 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1512 if (lop->nfslo_first >= end) 1513 break; 1514 if (lop->nfslo_end <= off) 1515 continue; 1516 if (lop->nfslo_type == F_WRLCK) { 1517 nfscl_clrelease(clp); 1518 NFSUNLOCKCLSTATE(); 1519 return (1); 1520 } 1521 } 1522 } 1523 } 1524 } 1525 nfscl_clrelease(clp); 1526 NFSUNLOCKCLSTATE(); 1527 return (0); 1528 } 1529 1530 /* 1531 * Release a byte range lock owner structure. 1532 */ 1533 void 1534 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1535 { 1536 struct nfsclclient *clp; 1537 1538 if (lp == NULL) 1539 return; 1540 NFSLOCKCLSTATE(); 1541 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1542 if (error != 0 && candelete && 1543 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1544 nfscl_freelockowner(lp, 0); 1545 else 1546 nfscl_lockunlock(&lp->nfsl_rwlock); 1547 nfscl_clrelease(clp); 1548 NFSUNLOCKCLSTATE(); 1549 } 1550 1551 /* 1552 * Unlink the open structure. 1553 */ 1554 static void 1555 nfscl_unlinkopen(struct nfsclopen *op) 1556 { 1557 1558 LIST_REMOVE(op, nfso_list); 1559 if (op->nfso_hash.le_prev != NULL) 1560 LIST_REMOVE(op, nfso_hash); 1561 } 1562 1563 /* 1564 * Free up an open structure and any associated byte range lock structures. 1565 */ 1566 void 1567 nfscl_freeopen(struct nfsclopen *op, int local, bool unlink) 1568 { 1569 1570 if (unlink) 1571 nfscl_unlinkopen(op); 1572 nfscl_freealllocks(&op->nfso_lock, local); 1573 free(op, M_NFSCLOPEN); 1574 if (local) 1575 nfsstatsv1.cllocalopens--; 1576 else 1577 nfsstatsv1.clopens--; 1578 } 1579 1580 /* 1581 * Free up all lock owners and associated locks. 1582 */ 1583 static void 1584 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1585 { 1586 struct nfscllockowner *lp, *nlp; 1587 1588 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1589 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1590 panic("nfscllckw"); 1591 nfscl_freelockowner(lp, local); 1592 } 1593 } 1594 1595 /* 1596 * Called for an Open when NFSERR_EXPIRED is received from the server. 1597 * If there are no byte range locks nor a Share Deny lost, try to do a 1598 * fresh Open. Otherwise, free the open. 1599 */ 1600 static int 1601 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1602 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1603 { 1604 struct nfscllockowner *lp; 1605 struct nfscldeleg *dp; 1606 int mustdelete = 0, error; 1607 1608 /* 1609 * Look for any byte range lock(s). 1610 */ 1611 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1612 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1613 mustdelete = 1; 1614 break; 1615 } 1616 } 1617 1618 /* 1619 * If no byte range lock(s) nor a Share deny, try to re-open. 1620 */ 1621 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1622 newnfs_copycred(&op->nfso_cred, cred); 1623 dp = NULL; 1624 error = nfsrpc_reopen(nmp, op->nfso_fh, 1625 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1626 if (error) { 1627 mustdelete = 1; 1628 if (dp != NULL) { 1629 free(dp, M_NFSCLDELEG); 1630 dp = NULL; 1631 } 1632 } 1633 if (dp != NULL) 1634 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1635 op->nfso_fhlen, cred, p, &dp); 1636 } 1637 1638 /* 1639 * If a byte range lock or Share deny or couldn't re-open, free it. 1640 */ 1641 if (mustdelete) 1642 nfscl_freeopen(op, 0, true); 1643 return (mustdelete); 1644 } 1645 1646 /* 1647 * Free up an open owner structure. 1648 */ 1649 static void 1650 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1651 { 1652 int owned; 1653 1654 /* 1655 * Make sure the NFSCLSTATE mutex is held, to avoid races with 1656 * calls in nfscl_renewthread() that do not hold a reference 1657 * count on the nfsclclient and just the mutex. 1658 * The mutex will not be held for calls done with the exclusive 1659 * nfsclclient lock held, in particular, nfscl_hasexpired() 1660 * and nfscl_recalldeleg() might do this. 1661 */ 1662 owned = mtx_owned(NFSCLSTATEMUTEXPTR); 1663 if (owned == 0) 1664 NFSLOCKCLSTATE(); 1665 LIST_REMOVE(owp, nfsow_list); 1666 if (owned == 0) 1667 NFSUNLOCKCLSTATE(); 1668 free(owp, M_NFSCLOWNER); 1669 if (local) 1670 nfsstatsv1.cllocalopenowners--; 1671 else 1672 nfsstatsv1.clopenowners--; 1673 } 1674 1675 /* 1676 * Free up a byte range lock owner structure. 1677 */ 1678 void 1679 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1680 { 1681 struct nfscllock *lop, *nlop; 1682 int owned; 1683 1684 /* 1685 * Make sure the NFSCLSTATE mutex is held, to avoid races with 1686 * calls in nfscl_renewthread() that do not hold a reference 1687 * count on the nfsclclient and just the mutex. 1688 * The mutex will not be held for calls done with the exclusive 1689 * nfsclclient lock held, in particular, nfscl_hasexpired() 1690 * and nfscl_recalldeleg() might do this. 1691 */ 1692 owned = mtx_owned(NFSCLSTATEMUTEXPTR); 1693 if (owned == 0) 1694 NFSLOCKCLSTATE(); 1695 LIST_REMOVE(lp, nfsl_list); 1696 if (owned == 0) 1697 NFSUNLOCKCLSTATE(); 1698 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1699 nfscl_freelock(lop, local); 1700 } 1701 free(lp, M_NFSCLLOCKOWNER); 1702 if (local) 1703 nfsstatsv1.cllocallockowners--; 1704 else 1705 nfsstatsv1.cllockowners--; 1706 } 1707 1708 /* 1709 * Free up a byte range lock structure. 1710 */ 1711 void 1712 nfscl_freelock(struct nfscllock *lop, int local) 1713 { 1714 1715 LIST_REMOVE(lop, nfslo_list); 1716 free(lop, M_NFSCLLOCK); 1717 if (local) 1718 nfsstatsv1.cllocallocks--; 1719 else 1720 nfsstatsv1.cllocks--; 1721 } 1722 1723 /* 1724 * Clean out the state related to a delegation. 1725 */ 1726 static void 1727 nfscl_cleandeleg(struct nfscldeleg *dp) 1728 { 1729 struct nfsclowner *owp, *nowp; 1730 struct nfsclopen *op; 1731 1732 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1733 op = LIST_FIRST(&owp->nfsow_open); 1734 if (op != NULL) { 1735 if (LIST_NEXT(op, nfso_list) != NULL) 1736 panic("nfscleandel"); 1737 nfscl_freeopen(op, 1, true); 1738 } 1739 nfscl_freeopenowner(owp, 1); 1740 } 1741 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1742 } 1743 1744 /* 1745 * Free a delegation. 1746 */ 1747 static void 1748 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp, bool freeit) 1749 { 1750 1751 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1752 LIST_REMOVE(dp, nfsdl_hash); 1753 if (freeit) 1754 free(dp, M_NFSCLDELEG); 1755 nfsstatsv1.cldelegates--; 1756 nfscl_delegcnt--; 1757 } 1758 1759 /* 1760 * Free up all state related to this client structure. 1761 */ 1762 static void 1763 nfscl_cleanclient(struct nfsclclient *clp) 1764 { 1765 struct nfsclowner *owp, *nowp; 1766 struct nfsclopen *op, *nop; 1767 struct nfscllayout *lyp, *nlyp; 1768 struct nfscldevinfo *dip, *ndip; 1769 1770 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 1771 nfscl_freelayout(lyp); 1772 1773 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) 1774 nfscl_freedevinfo(dip); 1775 1776 /* Now, all the OpenOwners, etc. */ 1777 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1778 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1779 nfscl_freeopen(op, 0, true); 1780 } 1781 nfscl_freeopenowner(owp, 0); 1782 } 1783 } 1784 1785 /* 1786 * Called when an NFSERR_EXPIRED is received from the server. 1787 */ 1788 static void 1789 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1790 struct ucred *cred, NFSPROC_T *p) 1791 { 1792 struct nfsclowner *owp, *nowp, *towp; 1793 struct nfsclopen *op, *nop, *top; 1794 struct nfscldeleg *dp, *ndp; 1795 int ret, printed = 0; 1796 1797 /* 1798 * First, merge locally issued Opens into the list for the server. 1799 */ 1800 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1801 while (dp != NULL) { 1802 ndp = TAILQ_NEXT(dp, nfsdl_list); 1803 owp = LIST_FIRST(&dp->nfsdl_owner); 1804 while (owp != NULL) { 1805 nowp = LIST_NEXT(owp, nfsow_list); 1806 op = LIST_FIRST(&owp->nfsow_open); 1807 if (op != NULL) { 1808 if (LIST_NEXT(op, nfso_list) != NULL) 1809 panic("nfsclexp"); 1810 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1811 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1812 NFSV4CL_LOCKNAMELEN)) 1813 break; 1814 } 1815 if (towp != NULL) { 1816 /* Merge opens in */ 1817 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1818 if (top->nfso_fhlen == op->nfso_fhlen && 1819 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1820 op->nfso_fhlen)) { 1821 top->nfso_mode |= op->nfso_mode; 1822 top->nfso_opencnt += op->nfso_opencnt; 1823 break; 1824 } 1825 } 1826 if (top == NULL) { 1827 /* Just add the open to the owner list */ 1828 LIST_REMOVE(op, nfso_list); 1829 op->nfso_own = towp; 1830 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1831 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh, 1832 op->nfso_fhlen), op, nfso_hash); 1833 nfsstatsv1.cllocalopens--; 1834 nfsstatsv1.clopens++; 1835 } 1836 } else { 1837 /* Just add the openowner to the client list */ 1838 LIST_REMOVE(owp, nfsow_list); 1839 owp->nfsow_clp = clp; 1840 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1841 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh, 1842 op->nfso_fhlen), op, nfso_hash); 1843 nfsstatsv1.cllocalopenowners--; 1844 nfsstatsv1.clopenowners++; 1845 nfsstatsv1.cllocalopens--; 1846 nfsstatsv1.clopens++; 1847 } 1848 } 1849 owp = nowp; 1850 } 1851 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1852 printed = 1; 1853 printf("nfsv4 expired locks lost\n"); 1854 } 1855 nfscl_cleandeleg(dp); 1856 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 1857 dp = ndp; 1858 } 1859 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1860 panic("nfsclexp"); 1861 1862 /* 1863 * Now, try and reopen against the server. 1864 */ 1865 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1866 owp->nfsow_seqid = 0; 1867 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1868 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1869 if (ret && !printed) { 1870 printed = 1; 1871 printf("nfsv4 expired locks lost\n"); 1872 } 1873 } 1874 if (LIST_EMPTY(&owp->nfsow_open)) 1875 nfscl_freeopenowner(owp, 0); 1876 } 1877 } 1878 1879 /* 1880 * This function must be called after the process represented by "own" has 1881 * exited. Must be called with CLSTATE lock held. 1882 */ 1883 static void 1884 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1885 { 1886 struct nfsclowner *owp, *nowp; 1887 struct nfscllockowner *lp; 1888 struct nfscldeleg *dp; 1889 1890 /* First, get rid of local locks on delegations. */ 1891 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1892 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1893 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1894 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1895 panic("nfscllckw"); 1896 nfscl_freelockowner(lp, 1); 1897 break; 1898 } 1899 } 1900 } 1901 owp = LIST_FIRST(&clp->nfsc_owner); 1902 while (owp != NULL) { 1903 nowp = LIST_NEXT(owp, nfsow_list); 1904 if (!NFSBCMP(owp->nfsow_owner, own, 1905 NFSV4CL_LOCKNAMELEN)) { 1906 /* 1907 * If there are children that haven't closed the 1908 * file descriptors yet, the opens will still be 1909 * here. For that case, let the renew thread clear 1910 * out the OpenOwner later. 1911 */ 1912 if (LIST_EMPTY(&owp->nfsow_open)) 1913 nfscl_freeopenowner(owp, 0); 1914 else 1915 owp->nfsow_defunct = 1; 1916 break; 1917 } 1918 owp = nowp; 1919 } 1920 } 1921 1922 /* 1923 * Find open/lock owners for processes that have exited. 1924 */ 1925 static void 1926 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) 1927 { 1928 struct nfsclowner *owp, *nowp; 1929 struct nfsclopen *op; 1930 struct nfscllockowner *lp, *nlp; 1931 struct nfscldeleg *dp; 1932 uint8_t own[NFSV4CL_LOCKNAMELEN]; 1933 1934 /* 1935 * All the pidhash locks must be acquired, since they are sx locks 1936 * and must be acquired before the mutexes. The pid(s) that will 1937 * be used aren't known yet, so all the locks need to be acquired. 1938 * Fortunately, this function is only performed once/sec. 1939 */ 1940 pidhash_slockall(); 1941 NFSLOCKCLSTATE(); 1942 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1943 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1944 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { 1945 if (LIST_EMPTY(&lp->nfsl_lock)) 1946 nfscl_emptylockowner(lp, lhp); 1947 } 1948 } 1949 if (nfscl_procdoesntexist(owp->nfsow_owner)) { 1950 memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 1951 nfscl_cleanup_common(clp, own); 1952 } 1953 } 1954 1955 /* 1956 * For the single open_owner case, these lock owners need to be 1957 * checked to see if they still exist separately. 1958 * This is because nfscl_procdoesntexist() never returns true for 1959 * the single open_owner so that the above doesn't ever call 1960 * nfscl_cleanup_common(). 1961 */ 1962 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1963 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1964 if (nfscl_procdoesntexist(lp->nfsl_owner)) { 1965 memcpy(own, lp->nfsl_owner, 1966 NFSV4CL_LOCKNAMELEN); 1967 nfscl_cleanup_common(clp, own); 1968 } 1969 } 1970 } 1971 NFSUNLOCKCLSTATE(); 1972 pidhash_sunlockall(); 1973 } 1974 1975 /* 1976 * Take the empty lock owner and move it to the local lhp list if the 1977 * associated process no longer exists. 1978 */ 1979 static void 1980 nfscl_emptylockowner(struct nfscllockowner *lp, 1981 struct nfscllockownerfhhead *lhp) 1982 { 1983 struct nfscllockownerfh *lfhp, *mylfhp; 1984 struct nfscllockowner *nlp; 1985 int fnd_it; 1986 1987 /* If not a Posix lock owner, just return. */ 1988 if ((lp->nfsl_lockflags & F_POSIX) == 0) 1989 return; 1990 1991 fnd_it = 0; 1992 mylfhp = NULL; 1993 /* 1994 * First, search to see if this lock owner is already in the list. 1995 * If it is, then the associated process no longer exists. 1996 */ 1997 SLIST_FOREACH(lfhp, lhp, nfslfh_list) { 1998 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && 1999 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, 2000 lfhp->nfslfh_len)) 2001 mylfhp = lfhp; 2002 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) 2003 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, 2004 NFSV4CL_LOCKNAMELEN)) 2005 fnd_it = 1; 2006 } 2007 /* If not found, check if process still exists. */ 2008 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) 2009 return; 2010 2011 /* Move the lock owner over to the local list. */ 2012 if (mylfhp == NULL) { 2013 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, 2014 M_NOWAIT); 2015 if (mylfhp == NULL) 2016 return; 2017 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; 2018 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, 2019 mylfhp->nfslfh_len); 2020 LIST_INIT(&mylfhp->nfslfh_lock); 2021 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); 2022 } 2023 LIST_REMOVE(lp, nfsl_list); 2024 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); 2025 } 2026 2027 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 2028 /* 2029 * Called from nfs umount to free up the clientid. 2030 */ 2031 void 2032 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p, struct nfscldeleghead *dhp) 2033 { 2034 struct nfsclclient *clp; 2035 struct ucred *cred; 2036 int igotlock; 2037 2038 /* 2039 * For the case that matters, this is the thread that set 2040 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 2041 * done to ensure that any thread executing nfscl_getcl() after 2042 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 2043 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 2044 * explanation, courtesy of Alan Cox. 2045 * What follows is a snippet from Alan Cox's email at: 2046 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 2047 * 2048 * 1. Set MNTK_UNMOUNTF 2049 * 2. Acquire a standard FreeBSD mutex "m". 2050 * 3. Update some data structures. 2051 * 4. Release mutex "m". 2052 * 2053 * Then, other threads that acquire "m" after step 4 has occurred will 2054 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 2055 * step 2 may or may not see MNTK_UNMOUNTF as set. 2056 */ 2057 NFSLOCKCLSTATE(); 2058 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 2059 fake_global++; 2060 NFSUNLOCKCLSTATE(); 2061 NFSLOCKCLSTATE(); 2062 } 2063 2064 clp = nmp->nm_clp; 2065 if (clp != NULL) { 2066 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 2067 panic("nfscl umount"); 2068 2069 /* 2070 * First, handshake with the nfscl renew thread, to terminate 2071 * it. 2072 */ 2073 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 2074 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 2075 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 2076 "nfsclumnt", hz); 2077 2078 /* 2079 * Now, get the exclusive lock on the client state, so 2080 * that no uses of the state are still in progress. 2081 */ 2082 do { 2083 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2084 NFSCLSTATEMUTEXPTR, NULL); 2085 } while (!igotlock); 2086 NFSUNLOCKCLSTATE(); 2087 2088 /* 2089 * Free up all the state. It will expire on the server, but 2090 * maybe we should do a SetClientId/SetClientIdConfirm so 2091 * the server throws it away? 2092 */ 2093 LIST_REMOVE(clp, nfsc_list); 2094 nfscl_delegreturnall(clp, p, dhp); 2095 cred = newnfs_getcred(); 2096 if (NFSHASNFSV4N(nmp)) { 2097 nfsrpc_destroysession(nmp, NULL, cred, p); 2098 nfsrpc_destroyclient(nmp, clp, cred, p); 2099 } else 2100 nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 2101 nfscl_cleanclient(clp); 2102 nmp->nm_clp = NULL; 2103 NFSFREECRED(cred); 2104 free(clp, M_NFSCLCLIENT); 2105 } else 2106 NFSUNLOCKCLSTATE(); 2107 } 2108 2109 /* 2110 * This function is called when a server replies with NFSERR_STALECLIENTID 2111 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, 2112 * doing Opens and Locks with reclaim. If these fail, it deletes the 2113 * corresponding state. 2114 */ 2115 static void 2116 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred, 2117 NFSPROC_T *p) 2118 { 2119 struct nfsclowner *owp, *nowp; 2120 struct nfsclopen *op, *nop; 2121 struct nfscllockowner *lp, *nlp; 2122 struct nfscllock *lop, *nlop; 2123 struct nfscldeleg *dp, *ndp, *tdp; 2124 struct nfsmount *nmp; 2125 struct ucred *tcred; 2126 struct nfsclopenhead extra_open; 2127 struct nfscldeleghead extra_deleg; 2128 struct nfsreq *rep; 2129 u_int64_t len; 2130 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 2131 int i, igotlock = 0, error, trycnt, firstlock; 2132 struct nfscllayout *lyp, *nlyp; 2133 bool recovered_one; 2134 2135 /* 2136 * First, lock the client structure, so everyone else will 2137 * block when trying to use state. 2138 */ 2139 NFSLOCKCLSTATE(); 2140 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2141 do { 2142 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2143 NFSCLSTATEMUTEXPTR, NULL); 2144 } while (!igotlock); 2145 NFSUNLOCKCLSTATE(); 2146 2147 nmp = clp->nfsc_nmp; 2148 if (nmp == NULL) 2149 panic("nfscl recover"); 2150 2151 /* 2152 * For now, just get rid of all layouts. There may be a need 2153 * to do LayoutCommit Ops with reclaim == true later. 2154 */ 2155 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 2156 nfscl_freelayout(lyp); 2157 TAILQ_INIT(&clp->nfsc_layout); 2158 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 2159 LIST_INIT(&clp->nfsc_layouthash[i]); 2160 2161 trycnt = 5; 2162 tcred = NULL; 2163 do { 2164 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p); 2165 } while ((error == NFSERR_STALECLIENTID || 2166 error == NFSERR_BADSESSION || 2167 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2168 if (error) { 2169 NFSLOCKCLSTATE(); 2170 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER | 2171 NFSCLFLAGS_RECVRINPROG); 2172 wakeup(&clp->nfsc_flags); 2173 nfsv4_unlock(&clp->nfsc_lock, 0); 2174 NFSUNLOCKCLSTATE(); 2175 return; 2176 } 2177 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2178 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2179 2180 /* 2181 * Mark requests already queued on the server, so that they don't 2182 * initiate another recovery cycle. Any requests already in the 2183 * queue that handle state information will have the old stale 2184 * clientid/stateid and will get a NFSERR_STALESTATEID, 2185 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. 2186 * This will be translated to NFSERR_STALEDONTRECOVER when 2187 * R_DONTRECOVER is set. 2188 */ 2189 NFSLOCKREQ(); 2190 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 2191 if (rep->r_nmp == nmp) 2192 rep->r_flags |= R_DONTRECOVER; 2193 } 2194 NFSUNLOCKREQ(); 2195 2196 /* 2197 * If nfsrpc_setclient() returns *retokp == true, 2198 * no more recovery is needed. 2199 */ 2200 if (*retokp) 2201 goto out; 2202 2203 /* 2204 * Now, mark all delegations "need reclaim". 2205 */ 2206 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 2207 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 2208 2209 TAILQ_INIT(&extra_deleg); 2210 LIST_INIT(&extra_open); 2211 /* 2212 * Now traverse the state lists, doing Open and Lock Reclaims. 2213 */ 2214 tcred = newnfs_getcred(); 2215 recovered_one = false; 2216 owp = LIST_FIRST(&clp->nfsc_owner); 2217 while (owp != NULL) { 2218 nowp = LIST_NEXT(owp, nfsow_list); 2219 owp->nfsow_seqid = 0; 2220 op = LIST_FIRST(&owp->nfsow_open); 2221 while (op != NULL) { 2222 nop = LIST_NEXT(op, nfso_list); 2223 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2224 /* Search for a delegation to reclaim with the open */ 2225 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2226 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2227 continue; 2228 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2229 mode = NFSV4OPEN_ACCESSWRITE; 2230 delegtype = NFSV4OPEN_DELEGATEWRITE; 2231 } else { 2232 mode = NFSV4OPEN_ACCESSREAD; 2233 delegtype = NFSV4OPEN_DELEGATEREAD; 2234 } 2235 if ((op->nfso_mode & mode) == mode && 2236 op->nfso_fhlen == dp->nfsdl_fhlen && 2237 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 2238 break; 2239 } 2240 ndp = dp; 2241 if (dp == NULL) 2242 delegtype = NFSV4OPEN_DELEGATENONE; 2243 newnfs_copycred(&op->nfso_cred, tcred); 2244 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 2245 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 2246 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 2247 tcred, p); 2248 if (!error) { 2249 recovered_one = true; 2250 /* Handle any replied delegation */ 2251 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 2252 || NFSMNT_RDONLY(nmp->nm_mountp))) { 2253 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 2254 mode = NFSV4OPEN_ACCESSWRITE; 2255 else 2256 mode = NFSV4OPEN_ACCESSREAD; 2257 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2258 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2259 continue; 2260 if ((op->nfso_mode & mode) == mode && 2261 op->nfso_fhlen == dp->nfsdl_fhlen && 2262 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 2263 op->nfso_fhlen)) { 2264 dp->nfsdl_stateid = ndp->nfsdl_stateid; 2265 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 2266 dp->nfsdl_ace = ndp->nfsdl_ace; 2267 dp->nfsdl_change = ndp->nfsdl_change; 2268 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2269 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 2270 dp->nfsdl_flags |= NFSCLDL_RECALL; 2271 free(ndp, M_NFSCLDELEG); 2272 ndp = NULL; 2273 break; 2274 } 2275 } 2276 } 2277 if (ndp != NULL) 2278 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 2279 2280 /* and reclaim all byte range locks */ 2281 lp = LIST_FIRST(&op->nfso_lock); 2282 while (lp != NULL) { 2283 nlp = LIST_NEXT(lp, nfsl_list); 2284 lp->nfsl_seqid = 0; 2285 firstlock = 1; 2286 lop = LIST_FIRST(&lp->nfsl_lock); 2287 while (lop != NULL) { 2288 nlop = LIST_NEXT(lop, nfslo_list); 2289 if (lop->nfslo_end == NFS64BITSSET) 2290 len = NFS64BITSSET; 2291 else 2292 len = lop->nfslo_end - lop->nfslo_first; 2293 error = nfscl_trylock(nmp, NULL, 2294 op->nfso_fh, op->nfso_fhlen, lp, 2295 firstlock, 1, lop->nfslo_first, len, 2296 lop->nfslo_type, tcred, p); 2297 if (error != 0) 2298 nfscl_freelock(lop, 0); 2299 else 2300 firstlock = 0; 2301 lop = nlop; 2302 } 2303 /* If no locks, but a lockowner, just delete it. */ 2304 if (LIST_EMPTY(&lp->nfsl_lock)) 2305 nfscl_freelockowner(lp, 0); 2306 lp = nlp; 2307 } 2308 } else if (error == NFSERR_NOGRACE && !recovered_one && 2309 NFSHASNFSV4N(nmp)) { 2310 /* 2311 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will 2312 * actually end up here, since the client will do 2313 * a recovery for NFSERR_BADSESSION, but will get 2314 * an NFSERR_NOGRACE reply for the first "reclaim" 2315 * attempt. 2316 * So, call nfscl_expireclient() to recover the 2317 * opens as best we can and then do a reclaim 2318 * complete and return. 2319 */ 2320 nfsrpc_reclaimcomplete(nmp, cred, p); 2321 nfscl_expireclient(clp, nmp, tcred, p); 2322 goto out; 2323 } 2324 } 2325 if (error != 0 && error != NFSERR_BADSESSION) 2326 nfscl_freeopen(op, 0, true); 2327 op = nop; 2328 } 2329 owp = nowp; 2330 } 2331 2332 /* 2333 * Now, try and get any delegations not yet reclaimed by cobbling 2334 * to-gether an appropriate open. 2335 */ 2336 nowp = NULL; 2337 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2338 while (dp != NULL) { 2339 ndp = TAILQ_NEXT(dp, nfsdl_list); 2340 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2341 if (nowp == NULL) { 2342 nowp = malloc( 2343 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2344 /* 2345 * Name must be as long an largest possible 2346 * NFSV4CL_LOCKNAMELEN. 12 for now. 2347 */ 2348 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2349 NFSV4CL_LOCKNAMELEN); 2350 LIST_INIT(&nowp->nfsow_open); 2351 nowp->nfsow_clp = clp; 2352 nowp->nfsow_seqid = 0; 2353 nowp->nfsow_defunct = 0; 2354 nfscl_lockinit(&nowp->nfsow_rwlock); 2355 } 2356 nop = NULL; 2357 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2358 nop = malloc(sizeof (struct nfsclopen) + 2359 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2360 nop->nfso_own = nowp; 2361 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2362 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2363 delegtype = NFSV4OPEN_DELEGATEWRITE; 2364 } else { 2365 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2366 delegtype = NFSV4OPEN_DELEGATEREAD; 2367 } 2368 nop->nfso_opencnt = 0; 2369 nop->nfso_posixlock = 1; 2370 nop->nfso_fhlen = dp->nfsdl_fhlen; 2371 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2372 LIST_INIT(&nop->nfso_lock); 2373 nop->nfso_stateid.seqid = 0; 2374 nop->nfso_stateid.other[0] = 0; 2375 nop->nfso_stateid.other[1] = 0; 2376 nop->nfso_stateid.other[2] = 0; 2377 newnfs_copycred(&dp->nfsdl_cred, tcred); 2378 newnfs_copyincred(tcred, &nop->nfso_cred); 2379 tdp = NULL; 2380 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2381 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2382 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2383 delegtype, tcred, p); 2384 if (tdp != NULL) { 2385 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2386 mode = NFSV4OPEN_ACCESSWRITE; 2387 else 2388 mode = NFSV4OPEN_ACCESSREAD; 2389 if ((nop->nfso_mode & mode) == mode && 2390 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2391 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2392 nop->nfso_fhlen)) { 2393 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2394 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2395 dp->nfsdl_ace = tdp->nfsdl_ace; 2396 dp->nfsdl_change = tdp->nfsdl_change; 2397 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2398 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2399 dp->nfsdl_flags |= NFSCLDL_RECALL; 2400 free(tdp, M_NFSCLDELEG); 2401 } else { 2402 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2403 } 2404 } 2405 } 2406 if (error) { 2407 if (nop != NULL) 2408 free(nop, M_NFSCLOPEN); 2409 if (error == NFSERR_NOGRACE && !recovered_one && 2410 NFSHASNFSV4N(nmp)) { 2411 /* 2412 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will 2413 * actually end up here, since the client will do 2414 * a recovery for NFSERR_BADSESSION, but will get 2415 * an NFSERR_NOGRACE reply for the first "reclaim" 2416 * attempt. 2417 * So, call nfscl_expireclient() to recover the 2418 * opens as best we can and then do a reclaim 2419 * complete and return. 2420 */ 2421 nfsrpc_reclaimcomplete(nmp, cred, p); 2422 nfscl_expireclient(clp, nmp, tcred, p); 2423 free(nowp, M_NFSCLOWNER); 2424 goto out; 2425 } 2426 /* 2427 * Couldn't reclaim it, so throw the state 2428 * away. Ouch!! 2429 */ 2430 nfscl_cleandeleg(dp); 2431 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 2432 } else { 2433 recovered_one = true; 2434 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2435 } 2436 } 2437 dp = ndp; 2438 } 2439 2440 /* 2441 * Now, get rid of extra Opens and Delegations. 2442 */ 2443 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2444 do { 2445 newnfs_copycred(&op->nfso_cred, tcred); 2446 error = nfscl_tryclose(op, tcred, nmp, p, true); 2447 if (error == NFSERR_GRACE) 2448 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2449 } while (error == NFSERR_GRACE); 2450 LIST_REMOVE(op, nfso_list); 2451 free(op, M_NFSCLOPEN); 2452 } 2453 if (nowp != NULL) 2454 free(nowp, M_NFSCLOWNER); 2455 2456 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2457 do { 2458 newnfs_copycred(&dp->nfsdl_cred, tcred); 2459 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2460 if (error == NFSERR_GRACE) 2461 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2462 } while (error == NFSERR_GRACE); 2463 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2464 free(dp, M_NFSCLDELEG); 2465 } 2466 2467 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ 2468 if (NFSHASNFSV4N(nmp)) 2469 (void)nfsrpc_reclaimcomplete(nmp, cred, p); 2470 2471 out: 2472 NFSLOCKCLSTATE(); 2473 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2474 wakeup(&clp->nfsc_flags); 2475 nfsv4_unlock(&clp->nfsc_lock, 0); 2476 NFSUNLOCKCLSTATE(); 2477 if (tcred != NULL) 2478 NFSFREECRED(tcred); 2479 } 2480 2481 /* 2482 * This function is called when a server replies with NFSERR_EXPIRED. 2483 * It deletes all state for the client and does a fresh SetClientId/confirm. 2484 * XXX Someday it should post a signal to the process(es) that hold the 2485 * state, so they know that lock state has been lost. 2486 */ 2487 int 2488 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2489 { 2490 struct nfsmount *nmp; 2491 struct ucred *cred; 2492 int igotlock = 0, error, trycnt; 2493 2494 /* 2495 * If the clientid has gone away or a new SetClientid has already 2496 * been done, just return ok. 2497 */ 2498 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2499 return (0); 2500 2501 /* 2502 * First, lock the client structure, so everyone else will 2503 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2504 * that only one thread does the work. 2505 */ 2506 NFSLOCKCLSTATE(); 2507 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2508 do { 2509 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2510 NFSCLSTATEMUTEXPTR, NULL); 2511 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2512 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2513 if (igotlock) 2514 nfsv4_unlock(&clp->nfsc_lock, 0); 2515 NFSUNLOCKCLSTATE(); 2516 return (0); 2517 } 2518 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2519 NFSUNLOCKCLSTATE(); 2520 2521 nmp = clp->nfsc_nmp; 2522 if (nmp == NULL) 2523 panic("nfscl expired"); 2524 cred = newnfs_getcred(); 2525 trycnt = 5; 2526 do { 2527 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 2528 } while ((error == NFSERR_STALECLIENTID || 2529 error == NFSERR_BADSESSION || 2530 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2531 if (error) { 2532 NFSLOCKCLSTATE(); 2533 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2534 } else { 2535 /* 2536 * Expire the state for the client. 2537 */ 2538 nfscl_expireclient(clp, nmp, cred, p); 2539 NFSLOCKCLSTATE(); 2540 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2541 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2542 } 2543 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2544 wakeup(&clp->nfsc_flags); 2545 nfsv4_unlock(&clp->nfsc_lock, 0); 2546 NFSUNLOCKCLSTATE(); 2547 NFSFREECRED(cred); 2548 return (error); 2549 } 2550 2551 /* 2552 * This function inserts a lock in the list after insert_lop. 2553 */ 2554 static void 2555 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2556 struct nfscllock *insert_lop, int local) 2557 { 2558 2559 if ((struct nfscllockowner *)insert_lop == lp) 2560 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2561 else 2562 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2563 if (local) 2564 nfsstatsv1.cllocallocks++; 2565 else 2566 nfsstatsv1.cllocks++; 2567 } 2568 2569 /* 2570 * This function updates the locking for a lock owner and given file. It 2571 * maintains a list of lock ranges ordered on increasing file offset that 2572 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2573 * It always adds new_lop to the list and sometimes uses the one pointed 2574 * at by other_lopp. 2575 * Returns 1 if the locks were modified, 0 otherwise. 2576 */ 2577 static int 2578 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2579 struct nfscllock **other_lopp, int local) 2580 { 2581 struct nfscllock *new_lop = *new_lopp; 2582 struct nfscllock *lop, *tlop, *ilop; 2583 struct nfscllock *other_lop; 2584 int unlock = 0, modified = 0; 2585 u_int64_t tmp; 2586 2587 /* 2588 * Work down the list until the lock is merged. 2589 */ 2590 if (new_lop->nfslo_type == F_UNLCK) 2591 unlock = 1; 2592 ilop = (struct nfscllock *)lp; 2593 lop = LIST_FIRST(&lp->nfsl_lock); 2594 while (lop != NULL) { 2595 /* 2596 * Only check locks for this file that aren't before the start of 2597 * new lock's range. 2598 */ 2599 if (lop->nfslo_end >= new_lop->nfslo_first) { 2600 if (new_lop->nfslo_end < lop->nfslo_first) { 2601 /* 2602 * If the new lock ends before the start of the 2603 * current lock's range, no merge, just insert 2604 * the new lock. 2605 */ 2606 break; 2607 } 2608 if (new_lop->nfslo_type == lop->nfslo_type || 2609 (new_lop->nfslo_first <= lop->nfslo_first && 2610 new_lop->nfslo_end >= lop->nfslo_end)) { 2611 /* 2612 * This lock can be absorbed by the new lock/unlock. 2613 * This happens when it covers the entire range 2614 * of the old lock or is contiguous 2615 * with the old lock and is of the same type or an 2616 * unlock. 2617 */ 2618 if (new_lop->nfslo_type != lop->nfslo_type || 2619 new_lop->nfslo_first != lop->nfslo_first || 2620 new_lop->nfslo_end != lop->nfslo_end) 2621 modified = 1; 2622 if (lop->nfslo_first < new_lop->nfslo_first) 2623 new_lop->nfslo_first = lop->nfslo_first; 2624 if (lop->nfslo_end > new_lop->nfslo_end) 2625 new_lop->nfslo_end = lop->nfslo_end; 2626 tlop = lop; 2627 lop = LIST_NEXT(lop, nfslo_list); 2628 nfscl_freelock(tlop, local); 2629 continue; 2630 } 2631 2632 /* 2633 * All these cases are for contiguous locks that are not the 2634 * same type, so they can't be merged. 2635 */ 2636 if (new_lop->nfslo_first <= lop->nfslo_first) { 2637 /* 2638 * This case is where the new lock overlaps with the 2639 * first part of the old lock. Move the start of the 2640 * old lock to just past the end of the new lock. The 2641 * new lock will be inserted in front of the old, since 2642 * ilop hasn't been updated. (We are done now.) 2643 */ 2644 if (lop->nfslo_first != new_lop->nfslo_end) { 2645 lop->nfslo_first = new_lop->nfslo_end; 2646 modified = 1; 2647 } 2648 break; 2649 } 2650 if (new_lop->nfslo_end >= lop->nfslo_end) { 2651 /* 2652 * This case is where the new lock overlaps with the 2653 * end of the old lock's range. Move the old lock's 2654 * end to just before the new lock's first and insert 2655 * the new lock after the old lock. 2656 * Might not be done yet, since the new lock could 2657 * overlap further locks with higher ranges. 2658 */ 2659 if (lop->nfslo_end != new_lop->nfslo_first) { 2660 lop->nfslo_end = new_lop->nfslo_first; 2661 modified = 1; 2662 } 2663 ilop = lop; 2664 lop = LIST_NEXT(lop, nfslo_list); 2665 continue; 2666 } 2667 /* 2668 * The final case is where the new lock's range is in the 2669 * middle of the current lock's and splits the current lock 2670 * up. Use *other_lopp to handle the second part of the 2671 * split old lock range. (We are done now.) 2672 * For unlock, we use new_lop as other_lop and tmp, since 2673 * other_lop and new_lop are the same for this case. 2674 * We noted the unlock case above, so we don't need 2675 * new_lop->nfslo_type any longer. 2676 */ 2677 tmp = new_lop->nfslo_first; 2678 if (unlock) { 2679 other_lop = new_lop; 2680 *new_lopp = NULL; 2681 } else { 2682 other_lop = *other_lopp; 2683 *other_lopp = NULL; 2684 } 2685 other_lop->nfslo_first = new_lop->nfslo_end; 2686 other_lop->nfslo_end = lop->nfslo_end; 2687 other_lop->nfslo_type = lop->nfslo_type; 2688 lop->nfslo_end = tmp; 2689 nfscl_insertlock(lp, other_lop, lop, local); 2690 ilop = lop; 2691 modified = 1; 2692 break; 2693 } 2694 ilop = lop; 2695 lop = LIST_NEXT(lop, nfslo_list); 2696 if (lop == NULL) 2697 break; 2698 } 2699 2700 /* 2701 * Insert the new lock in the list at the appropriate place. 2702 */ 2703 if (!unlock) { 2704 nfscl_insertlock(lp, new_lop, ilop, local); 2705 *new_lopp = NULL; 2706 modified = 1; 2707 } 2708 return (modified); 2709 } 2710 2711 /* 2712 * This function must be run as a kernel thread. 2713 * It does Renew Ops and recovery, when required. 2714 */ 2715 void 2716 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2717 { 2718 struct nfsclowner *owp, *nowp; 2719 struct nfsclopen *op; 2720 struct nfscllockowner *lp, *nlp; 2721 struct nfscldeleghead dh; 2722 struct nfscldeleg *dp, *ndp; 2723 struct ucred *cred; 2724 u_int32_t clidrev; 2725 int error, cbpathdown, islept, igotlock, ret, clearok; 2726 uint32_t recover_done_time = 0; 2727 time_t mytime; 2728 static time_t prevsec = 0; 2729 struct nfscllockownerfh *lfhp, *nlfhp; 2730 struct nfscllockownerfhhead lfh; 2731 struct nfscllayout *lyp, *nlyp; 2732 struct nfscldevinfo *dip, *ndip; 2733 struct nfscllayouthead rlh; 2734 struct nfsclrecalllayout *recallp; 2735 struct nfsclds *dsp; 2736 bool retok; 2737 struct mount *mp; 2738 vnode_t vp; 2739 2740 cred = newnfs_getcred(); 2741 NFSLOCKCLSTATE(); 2742 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2743 mp = clp->nfsc_nmp->nm_mountp; 2744 NFSUNLOCKCLSTATE(); 2745 for(;;) { 2746 newnfs_setroot(cred); 2747 cbpathdown = 0; 2748 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2749 /* 2750 * Only allow one full recover within 1/2 of the lease 2751 * duration (nfsc_renew). 2752 * retok is value/result. If passed in set to true, 2753 * it indicates only a CreateSession operation should 2754 * be attempted. 2755 * If it is returned true, it indicates that the 2756 * recovery only required a CreateSession. 2757 */ 2758 retok = true; 2759 if (recover_done_time < NFSD_MONOSEC) { 2760 recover_done_time = NFSD_MONOSEC + 2761 clp->nfsc_renew; 2762 retok = false; 2763 } 2764 NFSCL_DEBUG(1, "Doing recovery, only " 2765 "createsession=%d\n", retok); 2766 nfscl_recover(clp, &retok, cred, p); 2767 } 2768 if (clp->nfsc_expire <= NFSD_MONOSEC && 2769 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2770 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2771 clidrev = clp->nfsc_clientidrev; 2772 error = nfsrpc_renew(clp, NULL, cred, p); 2773 if (error == NFSERR_CBPATHDOWN) 2774 cbpathdown = 1; 2775 else if (error == NFSERR_STALECLIENTID) { 2776 NFSLOCKCLSTATE(); 2777 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2778 NFSUNLOCKCLSTATE(); 2779 } else if (error == NFSERR_EXPIRED) 2780 (void) nfscl_hasexpired(clp, clidrev, p); 2781 } 2782 2783 checkdsrenew: 2784 if (NFSHASNFSV4N(clp->nfsc_nmp)) { 2785 /* Do renews for any DS sessions. */ 2786 NFSLOCKMNT(clp->nfsc_nmp); 2787 /* Skip first entry, since the MDS is handled above. */ 2788 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); 2789 if (dsp != NULL) 2790 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2791 while (dsp != NULL) { 2792 if (dsp->nfsclds_expire <= NFSD_MONOSEC && 2793 dsp->nfsclds_sess.nfsess_defunct == 0) { 2794 dsp->nfsclds_expire = NFSD_MONOSEC + 2795 clp->nfsc_renew; 2796 NFSUNLOCKMNT(clp->nfsc_nmp); 2797 (void)nfsrpc_renew(clp, dsp, cred, p); 2798 goto checkdsrenew; 2799 } 2800 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2801 } 2802 NFSUNLOCKMNT(clp->nfsc_nmp); 2803 } 2804 2805 TAILQ_INIT(&dh); 2806 NFSLOCKCLSTATE(); 2807 if (cbpathdown) 2808 /* It's a Total Recall! */ 2809 nfscl_totalrecall(clp); 2810 2811 /* 2812 * Now, handle defunct owners. 2813 */ 2814 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 2815 if (LIST_EMPTY(&owp->nfsow_open)) { 2816 if (owp->nfsow_defunct != 0) 2817 nfscl_freeopenowner(owp, 0); 2818 } 2819 } 2820 2821 /* 2822 * Do the recall on any delegations. To avoid trouble, always 2823 * come back up here after having slept. 2824 */ 2825 igotlock = 0; 2826 tryagain: 2827 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2828 while (dp != NULL) { 2829 ndp = TAILQ_NEXT(dp, nfsdl_list); 2830 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2831 /* 2832 * Wait for outstanding I/O ops to be done. 2833 */ 2834 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2835 if (igotlock) { 2836 nfsv4_unlock(&clp->nfsc_lock, 0); 2837 igotlock = 0; 2838 } 2839 dp->nfsdl_rwlock.nfslock_lock |= 2840 NFSV4LOCK_WANTED; 2841 msleep(&dp->nfsdl_rwlock, 2842 NFSCLSTATEMUTEXPTR, PVFS, "nfscld", 2843 5 * hz); 2844 if (NFSCL_FORCEDISM(mp)) 2845 goto terminate; 2846 goto tryagain; 2847 } 2848 while (!igotlock) { 2849 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2850 &islept, NFSCLSTATEMUTEXPTR, mp); 2851 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) 2852 goto terminate; 2853 if (islept) 2854 goto tryagain; 2855 } 2856 NFSUNLOCKCLSTATE(); 2857 newnfs_copycred(&dp->nfsdl_cred, cred); 2858 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2859 NULL, cred, p, 1, &vp); 2860 if (!ret) { 2861 nfscl_cleandeleg(dp); 2862 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2863 nfsdl_list); 2864 LIST_REMOVE(dp, nfsdl_hash); 2865 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2866 nfscl_delegcnt--; 2867 nfsstatsv1.cldelegates--; 2868 } 2869 NFSLOCKCLSTATE(); 2870 /* 2871 * The nfsc_lock must be released before doing 2872 * vrele(), since it might call nfs_inactive(). 2873 * For the unlikely case where the vnode failed 2874 * to be acquired by nfscl_recalldeleg(), a 2875 * VOP_RECLAIM() should be in progress and it 2876 * will return the delegation. 2877 */ 2878 nfsv4_unlock(&clp->nfsc_lock, 0); 2879 igotlock = 0; 2880 if (vp != NULL) { 2881 NFSUNLOCKCLSTATE(); 2882 vrele(vp); 2883 NFSLOCKCLSTATE(); 2884 } 2885 goto tryagain; 2886 } 2887 dp = ndp; 2888 } 2889 2890 /* 2891 * Clear out old delegations, if we are above the high water 2892 * mark. Only clear out ones with no state related to them. 2893 * The tailq list is in LRU order. 2894 */ 2895 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2896 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2897 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2898 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2899 dp->nfsdl_rwlock.nfslock_lock == 0 && 2900 dp->nfsdl_timestamp < NFSD_MONOSEC && 2901 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2902 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2903 clearok = 1; 2904 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2905 op = LIST_FIRST(&owp->nfsow_open); 2906 if (op != NULL) { 2907 clearok = 0; 2908 break; 2909 } 2910 } 2911 if (clearok) { 2912 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2913 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2914 clearok = 0; 2915 break; 2916 } 2917 } 2918 } 2919 if (clearok) { 2920 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2921 LIST_REMOVE(dp, nfsdl_hash); 2922 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2923 nfscl_delegcnt--; 2924 nfsstatsv1.cldelegates--; 2925 } 2926 } 2927 dp = ndp; 2928 } 2929 if (igotlock) 2930 nfsv4_unlock(&clp->nfsc_lock, 0); 2931 2932 /* 2933 * Do the recall on any layouts. To avoid trouble, always 2934 * come back up here after having slept. 2935 */ 2936 TAILQ_INIT(&rlh); 2937 tryagain2: 2938 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { 2939 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { 2940 /* 2941 * Wait for outstanding I/O ops to be done. 2942 */ 2943 if (lyp->nfsly_lock.nfslock_usecnt > 0 || 2944 (lyp->nfsly_lock.nfslock_lock & 2945 NFSV4LOCK_LOCK) != 0) { 2946 lyp->nfsly_lock.nfslock_lock |= 2947 NFSV4LOCK_WANTED; 2948 msleep(&lyp->nfsly_lock.nfslock_lock, 2949 NFSCLSTATEMUTEXPTR, PVFS, "nfslyp", 2950 5 * hz); 2951 if (NFSCL_FORCEDISM(mp)) 2952 goto terminate; 2953 goto tryagain2; 2954 } 2955 /* Move the layout to the recall list. */ 2956 TAILQ_REMOVE(&clp->nfsc_layout, lyp, 2957 nfsly_list); 2958 LIST_REMOVE(lyp, nfsly_hash); 2959 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); 2960 2961 /* Handle any layout commits. */ 2962 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && 2963 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 2964 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 2965 NFSUNLOCKCLSTATE(); 2966 NFSCL_DEBUG(3, "do layoutcommit\n"); 2967 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, 2968 cred, p); 2969 NFSLOCKCLSTATE(); 2970 goto tryagain2; 2971 } 2972 } 2973 } 2974 2975 /* Now, look for stale layouts. */ 2976 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); 2977 while (lyp != NULL) { 2978 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); 2979 if (lyp->nfsly_timestamp < NFSD_MONOSEC && 2980 (lyp->nfsly_flags & (NFSLY_RECALL | 2981 NFSLY_RETONCLOSE)) == 0 && 2982 lyp->nfsly_lock.nfslock_usecnt == 0 && 2983 lyp->nfsly_lock.nfslock_lock == 0) { 2984 NFSCL_DEBUG(4, "ret stale lay=%d\n", 2985 nfscl_layoutcnt); 2986 recallp = malloc(sizeof(*recallp), 2987 M_NFSLAYRECALL, M_NOWAIT); 2988 if (recallp == NULL) 2989 break; 2990 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, 2991 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, 2992 lyp->nfsly_stateid.seqid, 0, 0, NULL, 2993 recallp); 2994 } 2995 lyp = nlyp; 2996 } 2997 2998 /* 2999 * Free up any unreferenced device info structures. 3000 */ 3001 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { 3002 if (dip->nfsdi_layoutrefs == 0 && 3003 dip->nfsdi_refcnt == 0) { 3004 NFSCL_DEBUG(4, "freeing devinfo\n"); 3005 LIST_REMOVE(dip, nfsdi_list); 3006 nfscl_freedevinfo(dip); 3007 } 3008 } 3009 NFSUNLOCKCLSTATE(); 3010 3011 /* Do layout return(s), as required. */ 3012 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { 3013 TAILQ_REMOVE(&rlh, lyp, nfsly_list); 3014 NFSCL_DEBUG(4, "ret layout\n"); 3015 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); 3016 if ((lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) { 3017 NFSLOCKCLSTATE(); 3018 lyp->nfsly_flags |= NFSLY_RETURNED; 3019 wakeup(lyp); 3020 NFSUNLOCKCLSTATE(); 3021 } else 3022 nfscl_freelayout(lyp); 3023 } 3024 3025 /* 3026 * Delegreturn any delegations cleaned out or recalled. 3027 */ 3028 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 3029 newnfs_copycred(&dp->nfsdl_cred, cred); 3030 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3031 TAILQ_REMOVE(&dh, dp, nfsdl_list); 3032 free(dp, M_NFSCLDELEG); 3033 } 3034 3035 SLIST_INIT(&lfh); 3036 /* 3037 * Call nfscl_cleanupkext() once per second to check for 3038 * open/lock owners where the process has exited. 3039 */ 3040 mytime = NFSD_MONOSEC; 3041 if (prevsec != mytime) { 3042 prevsec = mytime; 3043 nfscl_cleanupkext(clp, &lfh); 3044 } 3045 3046 /* 3047 * Do a ReleaseLockOwner for all lock owners where the 3048 * associated process no longer exists, as found by 3049 * nfscl_cleanupkext(). 3050 */ 3051 newnfs_setroot(cred); 3052 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { 3053 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, 3054 nlp) { 3055 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, 3056 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, 3057 p); 3058 nfscl_freelockowner(lp, 0); 3059 } 3060 free(lfhp, M_TEMP); 3061 } 3062 SLIST_INIT(&lfh); 3063 3064 NFSLOCKCLSTATE(); 3065 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 3066 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 3067 hz); 3068 terminate: 3069 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 3070 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 3071 NFSUNLOCKCLSTATE(); 3072 NFSFREECRED(cred); 3073 wakeup((caddr_t)clp); 3074 return; 3075 } 3076 NFSUNLOCKCLSTATE(); 3077 } 3078 } 3079 3080 /* 3081 * Initiate state recovery. Called when NFSERR_STALECLIENTID, 3082 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. 3083 */ 3084 void 3085 nfscl_initiate_recovery(struct nfsclclient *clp) 3086 { 3087 3088 if (clp == NULL) 3089 return; 3090 NFSLOCKCLSTATE(); 3091 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 3092 NFSUNLOCKCLSTATE(); 3093 wakeup((caddr_t)clp); 3094 } 3095 3096 /* 3097 * Dump out the state stuff for debugging. 3098 */ 3099 void 3100 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 3101 int lockowner, int locks) 3102 { 3103 struct nfsclclient *clp; 3104 struct nfsclowner *owp; 3105 struct nfsclopen *op; 3106 struct nfscllockowner *lp; 3107 struct nfscllock *lop; 3108 struct nfscldeleg *dp; 3109 3110 clp = nmp->nm_clp; 3111 if (clp == NULL) { 3112 printf("nfscl dumpstate NULL clp\n"); 3113 return; 3114 } 3115 NFSLOCKCLSTATE(); 3116 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 3117 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3118 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 3119 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 3120 owp->nfsow_owner[0], owp->nfsow_owner[1], 3121 owp->nfsow_owner[2], owp->nfsow_owner[3], 3122 owp->nfsow_seqid); 3123 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3124 if (opens) 3125 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 3126 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 3127 op->nfso_stateid.other[2], op->nfso_opencnt, 3128 op->nfso_fh[12]); 3129 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 3130 if (lockowner) 3131 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 3132 lp->nfsl_owner[0], lp->nfsl_owner[1], 3133 lp->nfsl_owner[2], lp->nfsl_owner[3], 3134 lp->nfsl_seqid, 3135 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 3136 lp->nfsl_stateid.other[2]); 3137 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3138 if (locks) 3139 #ifdef __FreeBSD__ 3140 printf("lck typ=%d fst=%ju end=%ju\n", 3141 lop->nfslo_type, (intmax_t)lop->nfslo_first, 3142 (intmax_t)lop->nfslo_end); 3143 #else 3144 printf("lck typ=%d fst=%qd end=%qd\n", 3145 lop->nfslo_type, lop->nfslo_first, 3146 lop->nfslo_end); 3147 #endif 3148 } 3149 } 3150 } 3151 } 3152 } 3153 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3154 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 3155 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 3156 owp->nfsow_owner[0], owp->nfsow_owner[1], 3157 owp->nfsow_owner[2], owp->nfsow_owner[3], 3158 owp->nfsow_seqid); 3159 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3160 if (opens) 3161 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 3162 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 3163 op->nfso_stateid.other[2], op->nfso_opencnt, 3164 op->nfso_fh[12]); 3165 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 3166 if (lockowner) 3167 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 3168 lp->nfsl_owner[0], lp->nfsl_owner[1], 3169 lp->nfsl_owner[2], lp->nfsl_owner[3], 3170 lp->nfsl_seqid, 3171 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 3172 lp->nfsl_stateid.other[2]); 3173 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3174 if (locks) 3175 #ifdef __FreeBSD__ 3176 printf("lck typ=%d fst=%ju end=%ju\n", 3177 lop->nfslo_type, (intmax_t)lop->nfslo_first, 3178 (intmax_t)lop->nfslo_end); 3179 #else 3180 printf("lck typ=%d fst=%qd end=%qd\n", 3181 lop->nfslo_type, lop->nfslo_first, 3182 lop->nfslo_end); 3183 #endif 3184 } 3185 } 3186 } 3187 } 3188 NFSUNLOCKCLSTATE(); 3189 } 3190 3191 /* 3192 * Check for duplicate open owners and opens. 3193 * (Only used as a diagnostic aid.) 3194 */ 3195 void 3196 nfscl_dupopen(vnode_t vp, int dupopens) 3197 { 3198 struct nfsclclient *clp; 3199 struct nfsclowner *owp, *owp2; 3200 struct nfsclopen *op, *op2; 3201 struct nfsfh *nfhp; 3202 3203 clp = VFSTONFS(vp->v_mount)->nm_clp; 3204 if (clp == NULL) { 3205 printf("nfscl dupopen NULL clp\n"); 3206 return; 3207 } 3208 nfhp = VTONFS(vp)->n_fhp; 3209 NFSLOCKCLSTATE(); 3210 3211 /* 3212 * First, search for duplicate owners. 3213 * These should never happen! 3214 */ 3215 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3216 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3217 if (owp != owp2 && 3218 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 3219 NFSV4CL_LOCKNAMELEN)) { 3220 NFSUNLOCKCLSTATE(); 3221 printf("DUP OWNER\n"); 3222 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0); 3223 return; 3224 } 3225 } 3226 } 3227 3228 /* 3229 * Now, search for duplicate stateids. 3230 * These shouldn't happen, either. 3231 */ 3232 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3233 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3234 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3235 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3236 if (op != op2 && 3237 (op->nfso_stateid.other[0] != 0 || 3238 op->nfso_stateid.other[1] != 0 || 3239 op->nfso_stateid.other[2] != 0) && 3240 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 3241 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 3242 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 3243 NFSUNLOCKCLSTATE(); 3244 printf("DUP STATEID\n"); 3245 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0); 3246 return; 3247 } 3248 } 3249 } 3250 } 3251 } 3252 3253 /* 3254 * Now search for duplicate opens. 3255 * Duplicate opens for the same owner 3256 * should never occur. Other duplicates are 3257 * possible and are checked for if "dupopens" 3258 * is true. 3259 */ 3260 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3261 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3262 if (nfhp->nfh_len == op2->nfso_fhlen && 3263 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 3264 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3265 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3266 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 3267 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 3268 (!NFSBCMP(op->nfso_own->nfsow_owner, 3269 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 3270 dupopens)) { 3271 if (!NFSBCMP(op->nfso_own->nfsow_owner, 3272 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3273 NFSUNLOCKCLSTATE(); 3274 printf("BADDUP OPEN\n"); 3275 } else { 3276 NFSUNLOCKCLSTATE(); 3277 printf("DUP OPEN\n"); 3278 } 3279 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 3280 0); 3281 return; 3282 } 3283 } 3284 } 3285 } 3286 } 3287 } 3288 NFSUNLOCKCLSTATE(); 3289 } 3290 3291 /* 3292 * During close, find an open that needs to be dereferenced and 3293 * dereference it. If there are no more opens for this file, 3294 * log a message to that effect. 3295 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 3296 * on the file's vnode. 3297 * This is the safe way, since it is difficult to identify 3298 * which open the close is for and I/O can be performed after the 3299 * close(2) system call when a file is mmap'd. 3300 * If it returns 0 for success, there will be a referenced 3301 * clp returned via clpp. 3302 */ 3303 int 3304 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 3305 { 3306 struct nfsclclient *clp; 3307 struct nfsclowner *owp; 3308 struct nfsclopen *op; 3309 struct nfscldeleg *dp; 3310 struct nfsfh *nfhp; 3311 int error, notdecr; 3312 3313 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp); 3314 if (error) 3315 return (error); 3316 *clpp = clp; 3317 3318 nfhp = VTONFS(vp)->n_fhp; 3319 notdecr = 1; 3320 NFSLOCKCLSTATE(); 3321 /* 3322 * First, look for one under a delegation that was locally issued 3323 * and just decrement the opencnt for it. Since all my Opens against 3324 * the server are DENY_NONE, I don't see a problem with hanging 3325 * onto them. (It is much easier to use one of the extant Opens 3326 * that I already have on the server when a Delegation is recalled 3327 * than to do fresh Opens.) Someday, I might need to rethink this, but. 3328 */ 3329 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3330 if (dp != NULL) { 3331 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3332 op = LIST_FIRST(&owp->nfsow_open); 3333 if (op != NULL) { 3334 /* 3335 * Since a delegation is for a file, there 3336 * should never be more than one open for 3337 * each openowner. 3338 */ 3339 if (LIST_NEXT(op, nfso_list) != NULL) 3340 panic("nfscdeleg opens"); 3341 if (notdecr && op->nfso_opencnt > 0) { 3342 notdecr = 0; 3343 op->nfso_opencnt--; 3344 break; 3345 } 3346 } 3347 } 3348 } 3349 3350 /* Now process the opens against the server. */ 3351 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len), 3352 nfso_hash) { 3353 if (op->nfso_fhlen == nfhp->nfh_len && 3354 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3355 nfhp->nfh_len)) { 3356 /* Found an open, decrement cnt if possible */ 3357 if (notdecr && op->nfso_opencnt > 0) { 3358 notdecr = 0; 3359 op->nfso_opencnt--; 3360 } 3361 /* 3362 * There are more opens, so just return. 3363 */ 3364 if (op->nfso_opencnt > 0) { 3365 NFSUNLOCKCLSTATE(); 3366 return (0); 3367 } 3368 } 3369 } 3370 NFSUNLOCKCLSTATE(); 3371 if (notdecr) 3372 printf("nfscl: never fnd open\n"); 3373 return (0); 3374 } 3375 3376 int 3377 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 3378 { 3379 struct nfsclclient *clp; 3380 struct nfsmount *nmp; 3381 struct nfsclowner *owp, *nowp; 3382 struct nfsclopen *op, *nop; 3383 struct nfsclopenhead delayed; 3384 struct nfscldeleg *dp; 3385 struct nfsfh *nfhp; 3386 struct nfsclrecalllayout *recallp; 3387 struct nfscllayout *lyp; 3388 int error; 3389 3390 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp); 3391 if (error) 3392 return (error); 3393 *clpp = clp; 3394 3395 nmp = VFSTONFS(vp->v_mount); 3396 nfhp = VTONFS(vp)->n_fhp; 3397 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 3398 NFSLOCKCLSTATE(); 3399 /* 3400 * First get rid of the local Open structures, which should be no 3401 * longer in use. 3402 */ 3403 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3404 if (dp != NULL) { 3405 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 3406 op = LIST_FIRST(&owp->nfsow_open); 3407 if (op != NULL) { 3408 KASSERT((op->nfso_opencnt == 0), 3409 ("nfscl: bad open cnt on deleg")); 3410 nfscl_freeopen(op, 1, true); 3411 } 3412 nfscl_freeopenowner(owp, 1); 3413 } 3414 } 3415 3416 /* Return any layouts marked return on close. */ 3417 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp, 3418 &lyp); 3419 3420 /* Now process the opens against the server. */ 3421 LIST_INIT(&delayed); 3422 lookformore: 3423 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len), 3424 nfso_hash) { 3425 if (op->nfso_fhlen == nfhp->nfh_len && 3426 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3427 nfhp->nfh_len)) { 3428 /* Found an open, close it. */ 3429 #ifdef DIAGNOSTIC 3430 KASSERT((op->nfso_opencnt == 0), 3431 ("nfscl: bad open cnt on server (%d)", 3432 op->nfso_opencnt)); 3433 #endif 3434 NFSUNLOCKCLSTATE(); 3435 if (NFSHASNFSV4N(nmp)) 3436 error = nfsrpc_doclose(nmp, op, p, false, true); 3437 else 3438 error = nfsrpc_doclose(nmp, op, p, true, true); 3439 NFSLOCKCLSTATE(); 3440 if (error == NFSERR_DELAY) { 3441 nfscl_unlinkopen(op); 3442 op->nfso_own = NULL; 3443 LIST_INSERT_HEAD(&delayed, op, nfso_list); 3444 } 3445 goto lookformore; 3446 } 3447 } 3448 nfscl_clrelease(clp); 3449 3450 /* Now, wait for any layout that is returned upon close. */ 3451 if (lyp != NULL) { 3452 while ((lyp->nfsly_flags & NFSLY_RETURNED) == 0) { 3453 if (NFSCL_FORCEDISM(nmp->nm_mountp)) { 3454 lyp = NULL; 3455 break; 3456 } 3457 msleep(lyp, NFSCLSTATEMUTEXPTR, PZERO, "nfslroc", hz); 3458 } 3459 if (lyp != NULL) 3460 nfscl_freelayout(lyp); 3461 } 3462 3463 NFSUNLOCKCLSTATE(); 3464 /* 3465 * recallp has been set NULL by nfscl_retoncloselayout() if it was 3466 * used by the function, but calling free() with a NULL pointer is ok. 3467 */ 3468 free(recallp, M_NFSLAYRECALL); 3469 3470 /* Now, loop retrying the delayed closes. */ 3471 LIST_FOREACH_SAFE(op, &delayed, nfso_list, nop) { 3472 nfsrpc_doclose(nmp, op, p, true, false); 3473 LIST_REMOVE(op, nfso_list); 3474 nfscl_freeopen(op, 0, false); 3475 } 3476 return (0); 3477 } 3478 3479 /* 3480 * Return all delegations on this client. 3481 * (Must be called with client sleep lock.) 3482 */ 3483 static void 3484 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p, 3485 struct nfscldeleghead *dhp) 3486 { 3487 struct nfscldeleg *dp, *ndp; 3488 struct ucred *cred; 3489 3490 cred = newnfs_getcred(); 3491 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3492 nfscl_cleandeleg(dp); 3493 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3494 if (dhp != NULL) { 3495 nfscl_freedeleg(&clp->nfsc_deleg, dp, false); 3496 TAILQ_INSERT_HEAD(dhp, dp, nfsdl_list); 3497 } else 3498 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 3499 } 3500 NFSFREECRED(cred); 3501 } 3502 3503 /* 3504 * Return any delegation for this vp. 3505 */ 3506 void 3507 nfscl_delegreturnvp(vnode_t vp, NFSPROC_T *p) 3508 { 3509 struct nfsclclient *clp; 3510 struct nfscldeleg *dp; 3511 struct ucred *cred; 3512 struct nfsnode *np; 3513 struct nfsmount *nmp; 3514 3515 nmp = VFSTONFS(vp->v_mount); 3516 NFSLOCKMNT(nmp); 3517 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 3518 NFSUNLOCKMNT(nmp); 3519 return; 3520 } 3521 NFSUNLOCKMNT(nmp); 3522 np = VTONFS(vp); 3523 cred = newnfs_getcred(); 3524 dp = NULL; 3525 NFSLOCKCLSTATE(); 3526 clp = nmp->nm_clp; 3527 if (clp != NULL) 3528 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 3529 np->n_fhp->nfh_len); 3530 if (dp != NULL) { 3531 nfscl_cleandeleg(dp); 3532 nfscl_freedeleg(&clp->nfsc_deleg, dp, false); 3533 NFSUNLOCKCLSTATE(); 3534 newnfs_copycred(&dp->nfsdl_cred, cred); 3535 nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3536 free(dp, M_NFSCLDELEG); 3537 } else 3538 NFSUNLOCKCLSTATE(); 3539 NFSFREECRED(cred); 3540 } 3541 3542 /* 3543 * Do a callback RPC. 3544 */ 3545 void 3546 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3547 { 3548 int clist, gotseq_ok, i, j, k, op, rcalls; 3549 u_int32_t *tl; 3550 struct nfsclclient *clp; 3551 struct nfscldeleg *dp = NULL; 3552 int numops, taglen = -1, error = 0, trunc __unused; 3553 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident; 3554 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3555 vnode_t vp = NULL; 3556 struct nfsnode *np; 3557 struct vattr va; 3558 struct nfsfh *nfhp; 3559 mount_t mp; 3560 nfsattrbit_t attrbits, rattrbits; 3561 nfsv4stateid_t stateid; 3562 uint32_t seqid, slotid = 0, highslot, cachethis __unused; 3563 uint8_t sessionid[NFSX_V4SESSIONID]; 3564 struct mbuf *rep; 3565 struct nfscllayout *lyp; 3566 uint64_t filesid[2], len, off; 3567 int changed, gotone, laytype, recalltype; 3568 uint32_t iomode; 3569 struct nfsclrecalllayout *recallp = NULL; 3570 struct nfsclsession *tsep; 3571 3572 gotseq_ok = 0; 3573 nfsrvd_rephead(nd); 3574 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3575 taglen = fxdr_unsigned(int, *tl); 3576 if (taglen < 0 || taglen > NFSV4_OPAQUELIMIT) { 3577 error = EBADRPC; 3578 taglen = -1; 3579 goto nfsmout; 3580 } 3581 if (taglen <= NFSV4_SMALLSTR) 3582 tagstr = tag; 3583 else 3584 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3585 error = nfsrv_mtostr(nd, tagstr, taglen); 3586 if (error) { 3587 if (taglen > NFSV4_SMALLSTR) 3588 free(tagstr, M_TEMP); 3589 taglen = -1; 3590 goto nfsmout; 3591 } 3592 (void) nfsm_strtom(nd, tag, taglen); 3593 if (taglen > NFSV4_SMALLSTR) { 3594 free(tagstr, M_TEMP); 3595 } 3596 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3597 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3598 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3599 if (minorvers != NFSV4_MINORVERSION && 3600 minorvers != NFSV41_MINORVERSION && 3601 minorvers != NFSV42_MINORVERSION) 3602 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3603 cbident = fxdr_unsigned(u_int32_t, *tl++); 3604 if (nd->nd_repstat) 3605 numops = 0; 3606 else 3607 numops = fxdr_unsigned(int, *tl); 3608 /* 3609 * Loop around doing the sub ops. 3610 */ 3611 for (i = 0; i < numops; i++) { 3612 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3613 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3614 *repp++ = *tl; 3615 op = fxdr_unsigned(int, *tl); 3616 nd->nd_procnum = op; 3617 if (i == 0 && op != NFSV4OP_CBSEQUENCE && minorvers != 3618 NFSV4_MINORVERSION) { 3619 nd->nd_repstat = NFSERR_OPNOTINSESS; 3620 *repp = nfscl_errmap(nd, minorvers); 3621 retops++; 3622 break; 3623 } 3624 if (op < NFSV4OP_CBGETATTR || 3625 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || 3626 (op > NFSV4OP_CBNOTIFYDEVID && 3627 minorvers == NFSV41_MINORVERSION) || 3628 (op > NFSV4OP_CBOFFLOAD && 3629 minorvers == NFSV42_MINORVERSION)) { 3630 nd->nd_repstat = NFSERR_OPILLEGAL; 3631 *repp = nfscl_errmap(nd, minorvers); 3632 retops++; 3633 break; 3634 } 3635 if (op < NFSV42_CBNOPS) 3636 nfsstatsv1.cbrpccnt[nd->nd_procnum]++; 3637 switch (op) { 3638 case NFSV4OP_CBGETATTR: 3639 NFSCL_DEBUG(4, "cbgetattr\n"); 3640 mp = NULL; 3641 vp = NULL; 3642 error = nfsm_getfh(nd, &nfhp); 3643 if (!error) 3644 error = nfsrv_getattrbits(nd, &attrbits, 3645 NULL, NULL); 3646 if (!error) { 3647 mp = nfscl_getmnt(minorvers, sessionid, cbident, 3648 &clp); 3649 if (mp == NULL) 3650 error = NFSERR_SERVERFAULT; 3651 } 3652 if (!error) { 3653 error = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3654 nfhp->nfh_len, p, &np); 3655 if (!error) 3656 vp = NFSTOV(np); 3657 } 3658 if (!error) { 3659 NFSZERO_ATTRBIT(&rattrbits); 3660 NFSLOCKCLSTATE(); 3661 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3662 nfhp->nfh_len); 3663 if (dp != NULL) { 3664 if (NFSISSET_ATTRBIT(&attrbits, 3665 NFSATTRBIT_SIZE)) { 3666 if (vp != NULL) 3667 va.va_size = np->n_size; 3668 else 3669 va.va_size = 3670 dp->nfsdl_size; 3671 NFSSETBIT_ATTRBIT(&rattrbits, 3672 NFSATTRBIT_SIZE); 3673 } 3674 if (NFSISSET_ATTRBIT(&attrbits, 3675 NFSATTRBIT_CHANGE)) { 3676 va.va_filerev = 3677 dp->nfsdl_change; 3678 if (vp == NULL || 3679 (np->n_flag & NDELEGMOD)) 3680 va.va_filerev++; 3681 NFSSETBIT_ATTRBIT(&rattrbits, 3682 NFSATTRBIT_CHANGE); 3683 } 3684 } else 3685 error = NFSERR_SERVERFAULT; 3686 NFSUNLOCKCLSTATE(); 3687 } 3688 if (vp != NULL) 3689 vrele(vp); 3690 if (mp != NULL) 3691 vfs_unbusy(mp); 3692 if (nfhp != NULL) 3693 free(nfhp, M_NFSFH); 3694 if (!error) 3695 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3696 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0, 3697 (uint64_t)0, NULL); 3698 break; 3699 case NFSV4OP_CBRECALL: 3700 NFSCL_DEBUG(4, "cbrecall\n"); 3701 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3702 NFSX_UNSIGNED); 3703 stateid.seqid = *tl++; 3704 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3705 NFSX_STATEIDOTHER); 3706 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3707 trunc = fxdr_unsigned(int, *tl); 3708 error = nfsm_getfh(nd, &nfhp); 3709 if (!error) { 3710 NFSLOCKCLSTATE(); 3711 if (minorvers == NFSV4_MINORVERSION) 3712 clp = nfscl_getclnt(cbident); 3713 else 3714 clp = nfscl_getclntsess(sessionid); 3715 if (clp != NULL) { 3716 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3717 nfhp->nfh_len); 3718 if (dp != NULL && (dp->nfsdl_flags & 3719 NFSCLDL_DELEGRET) == 0) { 3720 dp->nfsdl_flags |= 3721 NFSCLDL_RECALL; 3722 wakeup((caddr_t)clp); 3723 } 3724 } else { 3725 error = NFSERR_SERVERFAULT; 3726 } 3727 NFSUNLOCKCLSTATE(); 3728 } 3729 if (nfhp != NULL) 3730 free(nfhp, M_NFSFH); 3731 break; 3732 case NFSV4OP_CBLAYOUTRECALL: 3733 NFSCL_DEBUG(4, "cblayrec\n"); 3734 nfhp = NULL; 3735 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); 3736 laytype = fxdr_unsigned(int, *tl++); 3737 iomode = fxdr_unsigned(uint32_t, *tl++); 3738 if (newnfs_true == *tl++) 3739 changed = 1; 3740 else 3741 changed = 0; 3742 recalltype = fxdr_unsigned(int, *tl); 3743 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n", 3744 laytype, iomode, changed, recalltype); 3745 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, 3746 M_WAITOK); 3747 if (laytype != NFSLAYOUT_NFSV4_1_FILES && 3748 laytype != NFSLAYOUT_FLEXFILE) 3749 error = NFSERR_NOMATCHLAYOUT; 3750 else if (recalltype == NFSLAYOUTRETURN_FILE) { 3751 error = nfsm_getfh(nd, &nfhp); 3752 NFSCL_DEBUG(4, "retfile getfh=%d\n", error); 3753 if (error != 0) 3754 goto nfsmout; 3755 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + 3756 NFSX_STATEID); 3757 off = fxdr_hyper(tl); tl += 2; 3758 len = fxdr_hyper(tl); tl += 2; 3759 stateid.seqid = fxdr_unsigned(uint32_t, *tl++); 3760 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); 3761 if (minorvers == NFSV4_MINORVERSION) 3762 error = NFSERR_NOTSUPP; 3763 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n", 3764 (uintmax_t)off, (uintmax_t)len, 3765 stateid.seqid, error); 3766 if (error == 0) { 3767 NFSLOCKCLSTATE(); 3768 clp = nfscl_getclntsess(sessionid); 3769 NFSCL_DEBUG(4, "cbly clp=%p\n", clp); 3770 if (clp != NULL) { 3771 lyp = nfscl_findlayout(clp, 3772 nfhp->nfh_fh, 3773 nfhp->nfh_len); 3774 NFSCL_DEBUG(4, "cblyp=%p\n", 3775 lyp); 3776 if (lyp != NULL && 3777 (lyp->nfsly_flags & 3778 (NFSLY_FILES | 3779 NFSLY_FLEXFILE)) != 0 && 3780 !NFSBCMP(stateid.other, 3781 lyp->nfsly_stateid.other, 3782 NFSX_STATEIDOTHER)) { 3783 error = 3784 nfscl_layoutrecall( 3785 recalltype, 3786 lyp, iomode, off, 3787 len, stateid.seqid, 3788 0, 0, NULL, 3789 recallp); 3790 if (error == 0 && 3791 stateid.seqid > 3792 lyp->nfsly_stateid.seqid) 3793 lyp->nfsly_stateid.seqid = 3794 stateid.seqid; 3795 recallp = NULL; 3796 wakeup(clp); 3797 NFSCL_DEBUG(4, 3798 "aft layrcal=%d " 3799 "layseqid=%d\n", 3800 error, 3801 lyp->nfsly_stateid.seqid); 3802 } else 3803 error = 3804 NFSERR_NOMATCHLAYOUT; 3805 } else 3806 error = NFSERR_NOMATCHLAYOUT; 3807 NFSUNLOCKCLSTATE(); 3808 } 3809 free(nfhp, M_NFSFH); 3810 } else if (recalltype == NFSLAYOUTRETURN_FSID) { 3811 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); 3812 filesid[0] = fxdr_hyper(tl); tl += 2; 3813 filesid[1] = fxdr_hyper(tl); tl += 2; 3814 gotone = 0; 3815 NFSLOCKCLSTATE(); 3816 clp = nfscl_getclntsess(sessionid); 3817 if (clp != NULL) { 3818 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3819 nfsly_list) { 3820 if (lyp->nfsly_filesid[0] == 3821 filesid[0] && 3822 lyp->nfsly_filesid[1] == 3823 filesid[1]) { 3824 error = 3825 nfscl_layoutrecall( 3826 recalltype, 3827 lyp, iomode, 0, 3828 UINT64_MAX, 3829 lyp->nfsly_stateid.seqid, 3830 0, 0, NULL, 3831 recallp); 3832 recallp = NULL; 3833 gotone = 1; 3834 } 3835 } 3836 if (gotone != 0) 3837 wakeup(clp); 3838 else 3839 error = NFSERR_NOMATCHLAYOUT; 3840 } else 3841 error = NFSERR_NOMATCHLAYOUT; 3842 NFSUNLOCKCLSTATE(); 3843 } else if (recalltype == NFSLAYOUTRETURN_ALL) { 3844 gotone = 0; 3845 NFSLOCKCLSTATE(); 3846 clp = nfscl_getclntsess(sessionid); 3847 if (clp != NULL) { 3848 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3849 nfsly_list) { 3850 error = nfscl_layoutrecall( 3851 recalltype, lyp, iomode, 0, 3852 UINT64_MAX, 3853 lyp->nfsly_stateid.seqid, 3854 0, 0, NULL, recallp); 3855 recallp = NULL; 3856 gotone = 1; 3857 } 3858 if (gotone != 0) 3859 wakeup(clp); 3860 else 3861 error = NFSERR_NOMATCHLAYOUT; 3862 } else 3863 error = NFSERR_NOMATCHLAYOUT; 3864 NFSUNLOCKCLSTATE(); 3865 } else 3866 error = NFSERR_NOMATCHLAYOUT; 3867 if (recallp != NULL) { 3868 free(recallp, M_NFSLAYRECALL); 3869 recallp = NULL; 3870 } 3871 break; 3872 case NFSV4OP_CBSEQUENCE: 3873 if (i != 0) { 3874 error = NFSERR_SEQUENCEPOS; 3875 break; 3876 } 3877 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3878 5 * NFSX_UNSIGNED); 3879 bcopy(tl, sessionid, NFSX_V4SESSIONID); 3880 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3881 seqid = fxdr_unsigned(uint32_t, *tl++); 3882 slotid = fxdr_unsigned(uint32_t, *tl++); 3883 highslot = fxdr_unsigned(uint32_t, *tl++); 3884 cachethis = *tl++; 3885 /* Throw away the referring call stuff. */ 3886 clist = fxdr_unsigned(int, *tl); 3887 for (j = 0; j < clist; j++) { 3888 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3889 NFSX_UNSIGNED); 3890 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3891 rcalls = fxdr_unsigned(int, *tl); 3892 for (k = 0; k < rcalls; k++) { 3893 NFSM_DISSECT(tl, uint32_t *, 3894 2 * NFSX_UNSIGNED); 3895 } 3896 } 3897 NFSLOCKCLSTATE(); 3898 clp = nfscl_getclntsess(sessionid); 3899 if (clp == NULL) 3900 error = NFSERR_SERVERFAULT; 3901 if (error == 0) { 3902 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3903 error = nfsv4_seqsession(seqid, slotid, 3904 highslot, tsep->nfsess_cbslots, &rep, 3905 tsep->nfsess_backslots); 3906 } 3907 NFSUNLOCKCLSTATE(); 3908 if (error == 0 || error == NFSERR_REPLYFROMCACHE) { 3909 gotseq_ok = 1; 3910 if (rep != NULL) { 3911 /* 3912 * Handle a reply for a retried 3913 * callback. The reply will be 3914 * re-inserted in the session cache 3915 * by the nfsv4_seqsess_cacherep() call 3916 * after out: 3917 */ 3918 KASSERT(error == NFSERR_REPLYFROMCACHE, 3919 ("cbsequence: non-NULL rep")); 3920 NFSCL_DEBUG(4, "Got cbretry\n"); 3921 m_freem(nd->nd_mreq); 3922 nd->nd_mreq = rep; 3923 rep = NULL; 3924 goto out; 3925 } 3926 NFSM_BUILD(tl, uint32_t *, 3927 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); 3928 bcopy(sessionid, tl, NFSX_V4SESSIONID); 3929 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3930 *tl++ = txdr_unsigned(seqid); 3931 *tl++ = txdr_unsigned(slotid); 3932 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); 3933 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); 3934 } 3935 break; 3936 default: 3937 if (i == 0 && minorvers != NFSV4_MINORVERSION) 3938 error = NFSERR_OPNOTINSESS; 3939 else { 3940 NFSCL_DEBUG(1, "unsupp callback %d\n", op); 3941 error = NFSERR_NOTSUPP; 3942 } 3943 break; 3944 } 3945 if (error) { 3946 if (error == EBADRPC || error == NFSERR_BADXDR) { 3947 nd->nd_repstat = NFSERR_BADXDR; 3948 } else { 3949 nd->nd_repstat = error; 3950 } 3951 error = 0; 3952 } 3953 retops++; 3954 if (nd->nd_repstat) { 3955 *repp = nfscl_errmap(nd, minorvers); 3956 break; 3957 } else 3958 *repp = 0; /* NFS4_OK */ 3959 } 3960 nfsmout: 3961 if (recallp != NULL) 3962 free(recallp, M_NFSLAYRECALL); 3963 if (error) { 3964 if (error == EBADRPC || error == NFSERR_BADXDR) 3965 nd->nd_repstat = NFSERR_BADXDR; 3966 else 3967 printf("nfsv4 comperr1=%d\n", error); 3968 } 3969 if (taglen == -1) { 3970 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3971 *tl++ = 0; 3972 *tl = 0; 3973 } else { 3974 *retopsp = txdr_unsigned(retops); 3975 } 3976 *nd->nd_errp = nfscl_errmap(nd, minorvers); 3977 out: 3978 if (gotseq_ok != 0) { 3979 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); 3980 NFSLOCKCLSTATE(); 3981 clp = nfscl_getclntsess(sessionid); 3982 if (clp != NULL) { 3983 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3984 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots, 3985 NFSERR_OK, &rep); 3986 NFSUNLOCKCLSTATE(); 3987 } else { 3988 NFSUNLOCKCLSTATE(); 3989 m_freem(rep); 3990 } 3991 } 3992 } 3993 3994 /* 3995 * Generate the next cbident value. Basically just increment a static value 3996 * and then check that it isn't already in the list, if it has wrapped around. 3997 */ 3998 static u_int32_t 3999 nfscl_nextcbident(void) 4000 { 4001 struct nfsclclient *clp; 4002 int matched; 4003 static u_int32_t nextcbident = 0; 4004 static int haswrapped = 0; 4005 4006 nextcbident++; 4007 if (nextcbident == 0) 4008 haswrapped = 1; 4009 if (haswrapped) { 4010 /* 4011 * Search the clientid list for one already using this cbident. 4012 */ 4013 do { 4014 matched = 0; 4015 NFSLOCKCLSTATE(); 4016 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 4017 if (clp->nfsc_cbident == nextcbident) { 4018 matched = 1; 4019 break; 4020 } 4021 } 4022 NFSUNLOCKCLSTATE(); 4023 if (matched == 1) 4024 nextcbident++; 4025 } while (matched); 4026 } 4027 return (nextcbident); 4028 } 4029 4030 /* 4031 * Get the mount point related to a given cbident or session and busy it. 4032 */ 4033 static mount_t 4034 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, 4035 struct nfsclclient **clpp) 4036 { 4037 struct nfsclclient *clp; 4038 mount_t mp; 4039 int error; 4040 struct nfsclsession *tsep; 4041 4042 *clpp = NULL; 4043 NFSLOCKCLSTATE(); 4044 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 4045 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 4046 if (minorvers == NFSV4_MINORVERSION) { 4047 if (clp->nfsc_cbident == cbident) 4048 break; 4049 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 4050 NFSX_V4SESSIONID)) 4051 break; 4052 } 4053 if (clp == NULL) { 4054 NFSUNLOCKCLSTATE(); 4055 return (NULL); 4056 } 4057 mp = clp->nfsc_nmp->nm_mountp; 4058 vfs_ref(mp); 4059 NFSUNLOCKCLSTATE(); 4060 error = vfs_busy(mp, 0); 4061 vfs_rel(mp); 4062 if (error != 0) 4063 return (NULL); 4064 *clpp = clp; 4065 return (mp); 4066 } 4067 4068 /* 4069 * Get the clientid pointer related to a given cbident. 4070 */ 4071 static struct nfsclclient * 4072 nfscl_getclnt(u_int32_t cbident) 4073 { 4074 struct nfsclclient *clp; 4075 4076 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 4077 if (clp->nfsc_cbident == cbident) 4078 break; 4079 return (clp); 4080 } 4081 4082 /* 4083 * Get the clientid pointer related to a given sessionid. 4084 */ 4085 static struct nfsclclient * 4086 nfscl_getclntsess(uint8_t *sessionid) 4087 { 4088 struct nfsclclient *clp; 4089 struct nfsclsession *tsep; 4090 4091 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 4092 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 4093 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 4094 NFSX_V4SESSIONID)) 4095 break; 4096 } 4097 return (clp); 4098 } 4099 4100 /* 4101 * Search for a lock conflict locally on the client. A conflict occurs if 4102 * - not same owner and overlapping byte range and at least one of them is 4103 * a write lock or this is an unlock. 4104 */ 4105 static int 4106 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 4107 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 4108 struct nfscllock **lopp) 4109 { 4110 struct nfsclopen *op; 4111 int ret; 4112 4113 if (dp != NULL) { 4114 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 4115 if (ret) 4116 return (ret); 4117 } 4118 LIST_FOREACH(op, NFSCLOPENHASH(clp, fhp, fhlen), nfso_hash) { 4119 if (op->nfso_fhlen == fhlen && 4120 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 4121 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 4122 own, lopp); 4123 if (ret) 4124 return (ret); 4125 } 4126 } 4127 return (0); 4128 } 4129 4130 static int 4131 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 4132 u_int8_t *own, struct nfscllock **lopp) 4133 { 4134 struct nfscllockowner *lp; 4135 struct nfscllock *lop; 4136 4137 LIST_FOREACH(lp, lhp, nfsl_list) { 4138 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 4139 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 4140 if (lop->nfslo_first >= nlop->nfslo_end) 4141 break; 4142 if (lop->nfslo_end <= nlop->nfslo_first) 4143 continue; 4144 if (lop->nfslo_type == F_WRLCK || 4145 nlop->nfslo_type == F_WRLCK || 4146 nlop->nfslo_type == F_UNLCK) { 4147 if (lopp != NULL) 4148 *lopp = lop; 4149 return (NFSERR_DENIED); 4150 } 4151 } 4152 } 4153 } 4154 return (0); 4155 } 4156 4157 /* 4158 * Check for a local conflicting lock. 4159 */ 4160 int 4161 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 4162 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) 4163 { 4164 struct nfscllock *lop, nlck; 4165 struct nfscldeleg *dp; 4166 struct nfsnode *np; 4167 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 4168 int error; 4169 4170 nlck.nfslo_type = fl->l_type; 4171 nlck.nfslo_first = off; 4172 if (len == NFS64BITSSET) { 4173 nlck.nfslo_end = NFS64BITSSET; 4174 } else { 4175 nlck.nfslo_end = off + len; 4176 if (nlck.nfslo_end <= nlck.nfslo_first) 4177 return (NFSERR_INVAL); 4178 } 4179 np = VTONFS(vp); 4180 nfscl_filllockowner(id, own, flags); 4181 NFSLOCKCLSTATE(); 4182 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4183 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 4184 &nlck, own, dp, &lop); 4185 if (error != 0) { 4186 fl->l_whence = SEEK_SET; 4187 fl->l_start = lop->nfslo_first; 4188 if (lop->nfslo_end == NFS64BITSSET) 4189 fl->l_len = 0; 4190 else 4191 fl->l_len = lop->nfslo_end - lop->nfslo_first; 4192 fl->l_pid = (pid_t)0; 4193 fl->l_type = lop->nfslo_type; 4194 error = -1; /* no RPC required */ 4195 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 4196 fl->l_type == F_RDLCK)) { 4197 /* 4198 * The delegation ensures that there isn't a conflicting 4199 * lock on the server, so return -1 to indicate an RPC 4200 * isn't required. 4201 */ 4202 fl->l_type = F_UNLCK; 4203 error = -1; 4204 } 4205 NFSUNLOCKCLSTATE(); 4206 return (error); 4207 } 4208 4209 /* 4210 * Handle Recall of a delegation. 4211 * The clp must be exclusive locked when this is called. 4212 */ 4213 static int 4214 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 4215 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 4216 int called_from_renewthread, vnode_t *vpp) 4217 { 4218 struct nfsclowner *owp, *lowp, *nowp; 4219 struct nfsclopen *op, *lop; 4220 struct nfscllockowner *lp; 4221 struct nfscllock *lckp; 4222 struct nfsnode *np; 4223 int error = 0, ret; 4224 4225 if (vp == NULL) { 4226 KASSERT(vpp != NULL, ("nfscl_recalldeleg: vpp NULL")); 4227 *vpp = NULL; 4228 /* 4229 * First, get a vnode for the file. This is needed to do RPCs. 4230 */ 4231 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 4232 dp->nfsdl_fhlen, p, &np); 4233 if (ret) { 4234 /* 4235 * File isn't open, so nothing to move over to the 4236 * server. 4237 */ 4238 return (0); 4239 } 4240 vp = NFSTOV(np); 4241 *vpp = vp; 4242 } else { 4243 np = VTONFS(vp); 4244 } 4245 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 4246 4247 /* 4248 * Ok, if it's a write delegation, flush data to the server, so 4249 * that close/open consistency is retained. 4250 */ 4251 ret = 0; 4252 NFSLOCKNODE(np); 4253 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 4254 np->n_flag |= NDELEGRECALL; 4255 NFSUNLOCKNODE(np); 4256 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread); 4257 NFSLOCKNODE(np); 4258 np->n_flag &= ~NDELEGRECALL; 4259 } 4260 NFSINVALATTRCACHE(np); 4261 NFSUNLOCKNODE(np); 4262 if (ret == EIO && called_from_renewthread != 0) { 4263 /* 4264 * If the flush failed with EIO for the renew thread, 4265 * return now, so that the dirty buffer will be flushed 4266 * later. 4267 */ 4268 return (ret); 4269 } 4270 4271 /* 4272 * Now, for each openowner with opens issued locally, move them 4273 * over to state against the server. 4274 */ 4275 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 4276 lop = LIST_FIRST(&lowp->nfsow_open); 4277 if (lop != NULL) { 4278 if (LIST_NEXT(lop, nfso_list) != NULL) 4279 panic("nfsdlg mult opens"); 4280 /* 4281 * Look for the same openowner against the server. 4282 */ 4283 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 4284 if (!NFSBCMP(lowp->nfsow_owner, 4285 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 4286 newnfs_copycred(&dp->nfsdl_cred, cred); 4287 ret = nfscl_moveopen(vp, clp, nmp, lop, 4288 owp, dp, cred, p); 4289 if (ret == NFSERR_STALECLIENTID || 4290 ret == NFSERR_STALEDONTRECOVER || 4291 ret == NFSERR_BADSESSION) 4292 return (ret); 4293 if (ret) { 4294 nfscl_freeopen(lop, 1, true); 4295 if (!error) 4296 error = ret; 4297 } 4298 break; 4299 } 4300 } 4301 4302 /* 4303 * If no openowner found, create one and get an open 4304 * for it. 4305 */ 4306 if (owp == NULL) { 4307 nowp = malloc( 4308 sizeof (struct nfsclowner), M_NFSCLOWNER, 4309 M_WAITOK); 4310 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 4311 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 4312 dp->nfsdl_fhlen, NULL, NULL); 4313 newnfs_copycred(&dp->nfsdl_cred, cred); 4314 ret = nfscl_moveopen(vp, clp, nmp, lop, 4315 owp, dp, cred, p); 4316 if (ret) { 4317 nfscl_freeopenowner(owp, 0); 4318 if (ret == NFSERR_STALECLIENTID || 4319 ret == NFSERR_STALEDONTRECOVER || 4320 ret == NFSERR_BADSESSION) 4321 return (ret); 4322 if (ret) { 4323 nfscl_freeopen(lop, 1, true); 4324 if (!error) 4325 error = ret; 4326 } 4327 } 4328 } 4329 } 4330 } 4331 4332 /* 4333 * Now, get byte range locks for any locks done locally. 4334 */ 4335 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4336 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 4337 newnfs_copycred(&dp->nfsdl_cred, cred); 4338 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 4339 if (ret == NFSERR_STALESTATEID || 4340 ret == NFSERR_STALEDONTRECOVER || 4341 ret == NFSERR_STALECLIENTID || 4342 ret == NFSERR_BADSESSION) 4343 return (ret); 4344 if (ret && !error) 4345 error = ret; 4346 } 4347 } 4348 return (error); 4349 } 4350 4351 /* 4352 * Move a locally issued open over to an owner on the state list. 4353 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 4354 * returns with it unlocked. 4355 */ 4356 static int 4357 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4358 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 4359 struct ucred *cred, NFSPROC_T *p) 4360 { 4361 struct nfsclopen *op, *nop; 4362 struct nfscldeleg *ndp; 4363 struct nfsnode *np; 4364 int error = 0, newone; 4365 4366 /* 4367 * First, look for an appropriate open, If found, just increment the 4368 * opencnt in it. 4369 */ 4370 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 4371 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 4372 op->nfso_fhlen == lop->nfso_fhlen && 4373 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 4374 op->nfso_opencnt += lop->nfso_opencnt; 4375 nfscl_freeopen(lop, 1, true); 4376 return (0); 4377 } 4378 } 4379 4380 /* No appropriate open, so we have to do one against the server. */ 4381 np = VTONFS(vp); 4382 nop = malloc(sizeof (struct nfsclopen) + 4383 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 4384 nop->nfso_hash.le_prev = NULL; 4385 newone = 0; 4386 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 4387 lop->nfso_fh, lop->nfso_fhlen, cred, &newone); 4388 ndp = dp; 4389 if (NFSHASNFSV4N(nmp)) 4390 error = nfscl_tryopen(nmp, vp, lop->nfso_fh, lop->nfso_fhlen, 4391 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 4392 NULL, 0, &ndp, 0, 0, cred, p); 4393 else 4394 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4395 np->n_v4->n4_fhlen, lop->nfso_fh, lop->nfso_fhlen, 4396 lop->nfso_mode, op, NFS4NODENAME(np->n_v4), 4397 np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 4398 if (error) { 4399 if (newone) 4400 nfscl_freeopen(op, 0, true); 4401 } else { 4402 op->nfso_mode |= lop->nfso_mode; 4403 op->nfso_opencnt += lop->nfso_opencnt; 4404 nfscl_freeopen(lop, 1, true); 4405 } 4406 if (nop != NULL) 4407 free(nop, M_NFSCLOPEN); 4408 if (ndp != NULL) { 4409 /* 4410 * What should I do with the returned delegation, since the 4411 * delegation is being recalled? For now, just printf and 4412 * through it away. 4413 */ 4414 printf("Moveopen returned deleg\n"); 4415 free(ndp, M_NFSCLDELEG); 4416 } 4417 return (error); 4418 } 4419 4420 /* 4421 * Recall all delegations on this client. 4422 */ 4423 static void 4424 nfscl_totalrecall(struct nfsclclient *clp) 4425 { 4426 struct nfscldeleg *dp; 4427 4428 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 4429 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 4430 dp->nfsdl_flags |= NFSCLDL_RECALL; 4431 } 4432 } 4433 4434 /* 4435 * Relock byte ranges. Called for delegation recall and state expiry. 4436 */ 4437 static int 4438 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4439 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 4440 NFSPROC_T *p) 4441 { 4442 struct nfscllockowner *nlp; 4443 struct nfsfh *nfhp; 4444 struct nfsnode *np; 4445 u_int64_t off, len; 4446 int error, newone, donelocally; 4447 4448 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) { 4449 np = VTONFS(vp); 4450 NFSLOCKNODE(np); 4451 np->n_flag |= NMIGHTBELOCKED; 4452 NFSUNLOCKNODE(np); 4453 } 4454 4455 off = lop->nfslo_first; 4456 len = lop->nfslo_end - lop->nfslo_first; 4457 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 4458 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, 4459 lp->nfsl_openowner, &nlp, &newone, &donelocally); 4460 if (error || donelocally) 4461 return (error); 4462 nfhp = VTONFS(vp)->n_fhp; 4463 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 4464 nfhp->nfh_len, nlp, newone, 0, off, 4465 len, lop->nfslo_type, cred, p); 4466 if (error) 4467 nfscl_freelockowner(nlp, 0); 4468 return (error); 4469 } 4470 4471 /* 4472 * Called to re-open a file. Basically get a vnode for the file handle 4473 * and then call nfsrpc_openrpc() to do the rest. 4474 */ 4475 static int 4476 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 4477 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 4478 struct ucred *cred, NFSPROC_T *p) 4479 { 4480 struct nfsnode *np; 4481 vnode_t vp; 4482 int error; 4483 4484 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 4485 if (error) 4486 return (error); 4487 vp = NFSTOV(np); 4488 if (NFSHASNFSV4N(nmp)) 4489 error = nfscl_tryopen(nmp, vp, fhp, fhlen, fhp, fhlen, mode, op, 4490 NULL, 0, dpp, 0, 0, cred, p); 4491 else if (np->n_v4 != NULL) 4492 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4493 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 4494 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 4495 cred, p); 4496 else 4497 error = EINVAL; 4498 vrele(vp); 4499 return (error); 4500 } 4501 4502 /* 4503 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 4504 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 4505 * fail. 4506 */ 4507 static int 4508 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4509 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 4510 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 4511 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 4512 { 4513 int error; 4514 struct nfscldeleg *dp; 4515 4516 dp = *ndpp; 4517 do { 4518 *ndpp = dp; /* *ndpp needs to be set for retries. */ 4519 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 4520 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 4521 0, 0); 4522 if (error == NFSERR_DELAY) 4523 (void) nfs_catnap(PZERO, error, "nfstryop"); 4524 } while (error == NFSERR_DELAY); 4525 if (error == EAUTH || error == EACCES) { 4526 /* Try again using system credentials */ 4527 newnfs_setroot(cred); 4528 do { 4529 *ndpp = dp; /* *ndpp needs to be set for retries. */ 4530 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 4531 newfhlen, mode, op, name, namelen, ndpp, reclaim, 4532 delegtype, cred, p, 1, 0); 4533 if (error == NFSERR_DELAY) 4534 (void) nfs_catnap(PZERO, error, "nfstryop"); 4535 } while (error == NFSERR_DELAY); 4536 } 4537 return (error); 4538 } 4539 4540 /* 4541 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 4542 * NFSERR_DELAY. Also, retry with system credentials, if the provided 4543 * cred don't work. 4544 */ 4545 static int 4546 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 4547 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 4548 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 4549 { 4550 struct nfsrv_descript nfsd, *nd = &nfsd; 4551 int error; 4552 4553 do { 4554 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 4555 reclaim, off, len, type, cred, p, 0); 4556 if (!error && nd->nd_repstat == NFSERR_DELAY) 4557 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4558 "nfstrylck"); 4559 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4560 if (!error) 4561 error = nd->nd_repstat; 4562 if (error == EAUTH || error == EACCES) { 4563 /* Try again using root credentials */ 4564 newnfs_setroot(cred); 4565 do { 4566 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 4567 newone, reclaim, off, len, type, cred, p, 1); 4568 if (!error && nd->nd_repstat == NFSERR_DELAY) 4569 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4570 "nfstrylck"); 4571 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4572 if (!error) 4573 error = nd->nd_repstat; 4574 } 4575 return (error); 4576 } 4577 4578 /* 4579 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 4580 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4581 * credentials fail. 4582 */ 4583 int 4584 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 4585 struct nfsmount *nmp, NFSPROC_T *p) 4586 { 4587 int error; 4588 4589 do { 4590 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 4591 if (error == NFSERR_DELAY) 4592 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4593 } while (error == NFSERR_DELAY); 4594 if (error == EAUTH || error == EACCES) { 4595 /* Try again using system credentials */ 4596 newnfs_setroot(cred); 4597 do { 4598 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 4599 if (error == NFSERR_DELAY) 4600 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4601 } while (error == NFSERR_DELAY); 4602 } 4603 return (error); 4604 } 4605 4606 /* 4607 * Try a close against the server. Just call nfsrpc_closerpc(), 4608 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4609 * credentials fail. 4610 */ 4611 int 4612 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 4613 struct nfsmount *nmp, NFSPROC_T *p, bool loop_on_delayed) 4614 { 4615 struct nfsrv_descript nfsd, *nd = &nfsd; 4616 int error; 4617 4618 do { 4619 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 4620 if (loop_on_delayed && error == NFSERR_DELAY) 4621 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4622 } while (loop_on_delayed && error == NFSERR_DELAY); 4623 if (error == EAUTH || error == EACCES) { 4624 /* Try again using system credentials */ 4625 newnfs_setroot(cred); 4626 do { 4627 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 4628 if (loop_on_delayed && error == NFSERR_DELAY) 4629 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4630 } while (loop_on_delayed && error == NFSERR_DELAY); 4631 } 4632 return (error); 4633 } 4634 4635 /* 4636 * Decide if a delegation on a file permits close without flushing writes 4637 * to the server. This might be a big performance win in some environments. 4638 * (Not useful until the client does caching on local stable storage.) 4639 */ 4640 int 4641 nfscl_mustflush(vnode_t vp) 4642 { 4643 struct nfsclclient *clp; 4644 struct nfscldeleg *dp; 4645 struct nfsnode *np; 4646 struct nfsmount *nmp; 4647 4648 np = VTONFS(vp); 4649 nmp = VFSTONFS(vp->v_mount); 4650 if (!NFSHASNFSV4(nmp)) 4651 return (1); 4652 NFSLOCKMNT(nmp); 4653 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 4654 NFSUNLOCKMNT(nmp); 4655 return (1); 4656 } 4657 NFSUNLOCKMNT(nmp); 4658 NFSLOCKCLSTATE(); 4659 clp = nfscl_findcl(nmp); 4660 if (clp == NULL) { 4661 NFSUNLOCKCLSTATE(); 4662 return (1); 4663 } 4664 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4665 if (dp != NULL && (dp->nfsdl_flags & 4666 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 4667 NFSCLDL_WRITE && 4668 (dp->nfsdl_sizelimit >= np->n_size || 4669 !NFSHASSTRICT3530(nmp))) { 4670 NFSUNLOCKCLSTATE(); 4671 return (0); 4672 } 4673 NFSUNLOCKCLSTATE(); 4674 return (1); 4675 } 4676 4677 /* 4678 * See if a (write) delegation exists for this file. 4679 */ 4680 int 4681 nfscl_nodeleg(vnode_t vp, int writedeleg) 4682 { 4683 struct nfsclclient *clp; 4684 struct nfscldeleg *dp; 4685 struct nfsnode *np; 4686 struct nfsmount *nmp; 4687 4688 np = VTONFS(vp); 4689 nmp = VFSTONFS(vp->v_mount); 4690 if (!NFSHASNFSV4(nmp)) 4691 return (1); 4692 NFSLOCKMNT(nmp); 4693 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 4694 NFSUNLOCKMNT(nmp); 4695 return (1); 4696 } 4697 NFSUNLOCKMNT(nmp); 4698 NFSLOCKCLSTATE(); 4699 clp = nfscl_findcl(nmp); 4700 if (clp == NULL) { 4701 NFSUNLOCKCLSTATE(); 4702 return (1); 4703 } 4704 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4705 if (dp != NULL && 4706 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 4707 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 4708 NFSCLDL_WRITE)) { 4709 NFSUNLOCKCLSTATE(); 4710 return (0); 4711 } 4712 NFSUNLOCKCLSTATE(); 4713 return (1); 4714 } 4715 4716 /* 4717 * Look for an associated delegation that should be DelegReturned. 4718 */ 4719 int 4720 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 4721 { 4722 struct nfsclclient *clp; 4723 struct nfscldeleg *dp; 4724 struct nfsclowner *owp; 4725 struct nfscllockowner *lp; 4726 struct nfsmount *nmp; 4727 struct mount *mp; 4728 struct ucred *cred; 4729 struct nfsnode *np; 4730 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4731 4732 nmp = VFSTONFS(vp->v_mount); 4733 if (NFSHASPNFS(nmp)) 4734 return (retcnt); 4735 NFSLOCKMNT(nmp); 4736 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 4737 NFSUNLOCKMNT(nmp); 4738 return (retcnt); 4739 } 4740 NFSUNLOCKMNT(nmp); 4741 np = VTONFS(vp); 4742 mp = nmp->nm_mountp; 4743 NFSLOCKCLSTATE(); 4744 /* 4745 * Loop around waiting for: 4746 * - outstanding I/O operations on delegations to complete 4747 * - for a delegation on vp that has state, lock the client and 4748 * do a recall 4749 * - return delegation with no state 4750 */ 4751 while (1) { 4752 clp = nfscl_findcl(nmp); 4753 if (clp == NULL) { 4754 NFSUNLOCKCLSTATE(); 4755 return (retcnt); 4756 } 4757 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4758 np->n_fhp->nfh_len); 4759 if (dp != NULL) { 4760 /* 4761 * Wait for outstanding I/O ops to be done. 4762 */ 4763 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4764 if (igotlock) { 4765 nfsv4_unlock(&clp->nfsc_lock, 0); 4766 igotlock = 0; 4767 } 4768 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4769 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, 4770 "nfscld", hz); 4771 if (NFSCL_FORCEDISM(mp)) { 4772 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET; 4773 NFSUNLOCKCLSTATE(); 4774 return (0); 4775 } 4776 continue; 4777 } 4778 needsrecall = 0; 4779 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4780 if (!LIST_EMPTY(&owp->nfsow_open)) { 4781 needsrecall = 1; 4782 break; 4783 } 4784 } 4785 if (!needsrecall) { 4786 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4787 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4788 needsrecall = 1; 4789 break; 4790 } 4791 } 4792 } 4793 if (needsrecall && !triedrecall) { 4794 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4795 islept = 0; 4796 while (!igotlock) { 4797 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4798 &islept, NFSCLSTATEMUTEXPTR, mp); 4799 if (NFSCL_FORCEDISM(mp)) { 4800 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET; 4801 if (igotlock) 4802 nfsv4_unlock(&clp->nfsc_lock, 0); 4803 NFSUNLOCKCLSTATE(); 4804 return (0); 4805 } 4806 if (islept) 4807 break; 4808 } 4809 if (islept) 4810 continue; 4811 NFSUNLOCKCLSTATE(); 4812 cred = newnfs_getcred(); 4813 newnfs_copycred(&dp->nfsdl_cred, cred); 4814 nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0, NULL); 4815 NFSFREECRED(cred); 4816 triedrecall = 1; 4817 NFSLOCKCLSTATE(); 4818 nfsv4_unlock(&clp->nfsc_lock, 0); 4819 igotlock = 0; 4820 continue; 4821 } 4822 *stp = dp->nfsdl_stateid; 4823 retcnt = 1; 4824 nfscl_cleandeleg(dp); 4825 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 4826 } 4827 if (igotlock) 4828 nfsv4_unlock(&clp->nfsc_lock, 0); 4829 NFSUNLOCKCLSTATE(); 4830 return (retcnt); 4831 } 4832 } 4833 4834 /* 4835 * Look for associated delegation(s) that should be DelegReturned. 4836 */ 4837 int 4838 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 4839 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 4840 { 4841 struct nfsclclient *clp; 4842 struct nfscldeleg *dp; 4843 struct nfsclowner *owp; 4844 struct nfscllockowner *lp; 4845 struct nfsmount *nmp; 4846 struct mount *mp; 4847 struct ucred *cred; 4848 struct nfsnode *np; 4849 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4850 4851 nmp = VFSTONFS(fvp->v_mount); 4852 *gotfdp = 0; 4853 *gottdp = 0; 4854 if (NFSHASPNFS(nmp)) 4855 return (retcnt); 4856 NFSLOCKMNT(nmp); 4857 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 4858 NFSUNLOCKMNT(nmp); 4859 return (retcnt); 4860 } 4861 NFSUNLOCKMNT(nmp); 4862 mp = nmp->nm_mountp; 4863 NFSLOCKCLSTATE(); 4864 /* 4865 * Loop around waiting for: 4866 * - outstanding I/O operations on delegations to complete 4867 * - for a delegation on fvp that has state, lock the client and 4868 * do a recall 4869 * - return delegation(s) with no state. 4870 */ 4871 while (1) { 4872 clp = nfscl_findcl(nmp); 4873 if (clp == NULL) { 4874 NFSUNLOCKCLSTATE(); 4875 return (retcnt); 4876 } 4877 np = VTONFS(fvp); 4878 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4879 np->n_fhp->nfh_len); 4880 if (dp != NULL && *gotfdp == 0) { 4881 /* 4882 * Wait for outstanding I/O ops to be done. 4883 */ 4884 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4885 if (igotlock) { 4886 nfsv4_unlock(&clp->nfsc_lock, 0); 4887 igotlock = 0; 4888 } 4889 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4890 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, 4891 "nfscld", hz); 4892 if (NFSCL_FORCEDISM(mp)) { 4893 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET; 4894 NFSUNLOCKCLSTATE(); 4895 *gotfdp = 0; 4896 *gottdp = 0; 4897 return (0); 4898 } 4899 continue; 4900 } 4901 needsrecall = 0; 4902 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4903 if (!LIST_EMPTY(&owp->nfsow_open)) { 4904 needsrecall = 1; 4905 break; 4906 } 4907 } 4908 if (!needsrecall) { 4909 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4910 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4911 needsrecall = 1; 4912 break; 4913 } 4914 } 4915 } 4916 if (needsrecall && !triedrecall) { 4917 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4918 islept = 0; 4919 while (!igotlock) { 4920 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4921 &islept, NFSCLSTATEMUTEXPTR, mp); 4922 if (NFSCL_FORCEDISM(mp)) { 4923 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET; 4924 if (igotlock) 4925 nfsv4_unlock(&clp->nfsc_lock, 0); 4926 NFSUNLOCKCLSTATE(); 4927 *gotfdp = 0; 4928 *gottdp = 0; 4929 return (0); 4930 } 4931 if (islept) 4932 break; 4933 } 4934 if (islept) 4935 continue; 4936 NFSUNLOCKCLSTATE(); 4937 cred = newnfs_getcred(); 4938 newnfs_copycred(&dp->nfsdl_cred, cred); 4939 nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0, NULL); 4940 NFSFREECRED(cred); 4941 triedrecall = 1; 4942 NFSLOCKCLSTATE(); 4943 nfsv4_unlock(&clp->nfsc_lock, 0); 4944 igotlock = 0; 4945 continue; 4946 } 4947 *fstp = dp->nfsdl_stateid; 4948 retcnt++; 4949 *gotfdp = 1; 4950 nfscl_cleandeleg(dp); 4951 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 4952 } 4953 if (igotlock) { 4954 nfsv4_unlock(&clp->nfsc_lock, 0); 4955 igotlock = 0; 4956 } 4957 if (tvp != NULL) { 4958 np = VTONFS(tvp); 4959 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4960 np->n_fhp->nfh_len); 4961 if (dp != NULL && *gottdp == 0) { 4962 /* 4963 * Wait for outstanding I/O ops to be done. 4964 */ 4965 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4966 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4967 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO, 4968 "nfscld", hz); 4969 if (NFSCL_FORCEDISM(mp)) { 4970 NFSUNLOCKCLSTATE(); 4971 *gotfdp = 0; 4972 *gottdp = 0; 4973 return (0); 4974 } 4975 continue; 4976 } 4977 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4978 if (!LIST_EMPTY(&owp->nfsow_open)) { 4979 NFSUNLOCKCLSTATE(); 4980 return (retcnt); 4981 } 4982 } 4983 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4984 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4985 NFSUNLOCKCLSTATE(); 4986 return (retcnt); 4987 } 4988 } 4989 *tstp = dp->nfsdl_stateid; 4990 retcnt++; 4991 *gottdp = 1; 4992 nfscl_cleandeleg(dp); 4993 nfscl_freedeleg(&clp->nfsc_deleg, dp, true); 4994 } 4995 } 4996 NFSUNLOCKCLSTATE(); 4997 return (retcnt); 4998 } 4999 } 5000 5001 /* 5002 * Get a reference on the clientid associated with the mount point. 5003 * Return 1 if success, 0 otherwise. 5004 */ 5005 int 5006 nfscl_getref(struct nfsmount *nmp) 5007 { 5008 struct nfsclclient *clp; 5009 int ret; 5010 5011 NFSLOCKCLSTATE(); 5012 clp = nfscl_findcl(nmp); 5013 if (clp == NULL) { 5014 NFSUNLOCKCLSTATE(); 5015 return (0); 5016 } 5017 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, nmp->nm_mountp); 5018 ret = 1; 5019 if (NFSCL_FORCEDISM(nmp->nm_mountp)) 5020 ret = 0; 5021 NFSUNLOCKCLSTATE(); 5022 return (ret); 5023 } 5024 5025 /* 5026 * Release a reference on a clientid acquired with the above call. 5027 */ 5028 void 5029 nfscl_relref(struct nfsmount *nmp) 5030 { 5031 struct nfsclclient *clp; 5032 5033 NFSLOCKCLSTATE(); 5034 clp = nfscl_findcl(nmp); 5035 if (clp == NULL) { 5036 NFSUNLOCKCLSTATE(); 5037 return; 5038 } 5039 nfsv4_relref(&clp->nfsc_lock); 5040 NFSUNLOCKCLSTATE(); 5041 } 5042 5043 /* 5044 * Save the size attribute in the delegation, since the nfsnode 5045 * is going away. 5046 */ 5047 void 5048 nfscl_reclaimnode(vnode_t vp) 5049 { 5050 struct nfsclclient *clp; 5051 struct nfscldeleg *dp; 5052 struct nfsnode *np = VTONFS(vp); 5053 struct nfsmount *nmp; 5054 5055 nmp = VFSTONFS(vp->v_mount); 5056 if (!NFSHASNFSV4(nmp)) 5057 return; 5058 NFSLOCKCLSTATE(); 5059 clp = nfscl_findcl(nmp); 5060 if (clp == NULL) { 5061 NFSUNLOCKCLSTATE(); 5062 return; 5063 } 5064 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5065 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 5066 dp->nfsdl_size = np->n_size; 5067 NFSUNLOCKCLSTATE(); 5068 } 5069 5070 /* 5071 * Get the saved size attribute in the delegation, since it is a 5072 * newly allocated nfsnode. 5073 */ 5074 void 5075 nfscl_newnode(vnode_t vp) 5076 { 5077 struct nfsclclient *clp; 5078 struct nfscldeleg *dp; 5079 struct nfsnode *np = VTONFS(vp); 5080 struct nfsmount *nmp; 5081 5082 nmp = VFSTONFS(vp->v_mount); 5083 if (!NFSHASNFSV4(nmp)) 5084 return; 5085 NFSLOCKCLSTATE(); 5086 clp = nfscl_findcl(nmp); 5087 if (clp == NULL) { 5088 NFSUNLOCKCLSTATE(); 5089 return; 5090 } 5091 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5092 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 5093 np->n_size = dp->nfsdl_size; 5094 NFSUNLOCKCLSTATE(); 5095 } 5096 5097 /* 5098 * If there is a valid write delegation for this file, set the modtime 5099 * to the local clock time. 5100 */ 5101 void 5102 nfscl_delegmodtime(vnode_t vp) 5103 { 5104 struct nfsclclient *clp; 5105 struct nfscldeleg *dp; 5106 struct nfsnode *np = VTONFS(vp); 5107 struct nfsmount *nmp; 5108 5109 nmp = VFSTONFS(vp->v_mount); 5110 if (!NFSHASNFSV4(nmp)) 5111 return; 5112 NFSLOCKMNT(nmp); 5113 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 5114 NFSUNLOCKMNT(nmp); 5115 return; 5116 } 5117 NFSUNLOCKMNT(nmp); 5118 NFSLOCKCLSTATE(); 5119 clp = nfscl_findcl(nmp); 5120 if (clp == NULL) { 5121 NFSUNLOCKCLSTATE(); 5122 return; 5123 } 5124 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5125 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 5126 nanotime(&dp->nfsdl_modtime); 5127 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 5128 } 5129 NFSUNLOCKCLSTATE(); 5130 } 5131 5132 /* 5133 * If there is a valid write delegation for this file with a modtime set, 5134 * put that modtime in mtime. 5135 */ 5136 void 5137 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 5138 { 5139 struct nfsclclient *clp; 5140 struct nfscldeleg *dp; 5141 struct nfsnode *np = VTONFS(vp); 5142 struct nfsmount *nmp; 5143 5144 nmp = VFSTONFS(vp->v_mount); 5145 if (!NFSHASNFSV4(nmp)) 5146 return; 5147 NFSLOCKMNT(nmp); 5148 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) { 5149 NFSUNLOCKMNT(nmp); 5150 return; 5151 } 5152 NFSUNLOCKMNT(nmp); 5153 NFSLOCKCLSTATE(); 5154 clp = nfscl_findcl(nmp); 5155 if (clp == NULL) { 5156 NFSUNLOCKCLSTATE(); 5157 return; 5158 } 5159 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5160 if (dp != NULL && 5161 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 5162 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 5163 *mtime = dp->nfsdl_modtime; 5164 NFSUNLOCKCLSTATE(); 5165 } 5166 5167 static int 5168 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers) 5169 { 5170 short *defaulterrp, *errp; 5171 5172 if (!nd->nd_repstat) 5173 return (0); 5174 if (nd->nd_procnum == NFSPROC_NOOP) 5175 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 5176 if (nd->nd_repstat == EBADRPC) 5177 return (txdr_unsigned(NFSERR_BADXDR)); 5178 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 5179 nd->nd_repstat == NFSERR_OPILLEGAL) 5180 return (txdr_unsigned(nd->nd_repstat)); 5181 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 && 5182 minorvers > NFSV4_MINORVERSION) { 5183 /* NFSv4.n error. */ 5184 return (txdr_unsigned(nd->nd_repstat)); 5185 } 5186 if (nd->nd_procnum < NFSV4OP_CBNOPS) 5187 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 5188 else 5189 return (txdr_unsigned(nd->nd_repstat)); 5190 while (*++errp) 5191 if (*errp == (short)nd->nd_repstat) 5192 return (txdr_unsigned(nd->nd_repstat)); 5193 return (txdr_unsigned(*defaulterrp)); 5194 } 5195 5196 /* 5197 * Called to find/add a layout to a client. 5198 * This function returns the layout with a refcnt (shared lock) upon 5199 * success (returns 0) or with no lock/refcnt on the layout when an 5200 * error is returned. 5201 * If a layout is passed in via lypp, it is locked (exclusively locked). 5202 */ 5203 int 5204 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 5205 nfsv4stateid_t *stateidp, int layouttype, int retonclose, 5206 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, 5207 struct ucred *cred, NFSPROC_T *p) 5208 { 5209 struct nfsclclient *clp; 5210 struct nfscllayout *lyp, *tlyp; 5211 struct nfsclflayout *flp; 5212 struct nfsnode *np = VTONFS(vp); 5213 mount_t mp; 5214 int layout_passed_in; 5215 5216 mp = nmp->nm_mountp; 5217 layout_passed_in = 1; 5218 tlyp = NULL; 5219 lyp = *lypp; 5220 if (lyp == NULL) { 5221 layout_passed_in = 0; 5222 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, 5223 M_WAITOK | M_ZERO); 5224 } 5225 5226 NFSLOCKCLSTATE(); 5227 clp = nmp->nm_clp; 5228 if (clp == NULL) { 5229 if (layout_passed_in != 0) 5230 nfsv4_unlock(&lyp->nfsly_lock, 0); 5231 NFSUNLOCKCLSTATE(); 5232 if (tlyp != NULL) 5233 free(tlyp, M_NFSLAYOUT); 5234 return (EPERM); 5235 } 5236 if (lyp == NULL) { 5237 /* 5238 * Although no lyp was passed in, another thread might have 5239 * allocated one. If one is found, just increment it's ref 5240 * count and return it. 5241 */ 5242 lyp = nfscl_findlayout(clp, fhp, fhlen); 5243 if (lyp == NULL) { 5244 lyp = tlyp; 5245 tlyp = NULL; 5246 lyp->nfsly_stateid.seqid = stateidp->seqid; 5247 lyp->nfsly_stateid.other[0] = stateidp->other[0]; 5248 lyp->nfsly_stateid.other[1] = stateidp->other[1]; 5249 lyp->nfsly_stateid.other[2] = stateidp->other[2]; 5250 lyp->nfsly_lastbyte = 0; 5251 LIST_INIT(&lyp->nfsly_flayread); 5252 LIST_INIT(&lyp->nfsly_flayrw); 5253 LIST_INIT(&lyp->nfsly_recall); 5254 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; 5255 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; 5256 lyp->nfsly_clp = clp; 5257 if (layouttype == NFSLAYOUT_FLEXFILE) 5258 lyp->nfsly_flags = NFSLY_FLEXFILE; 5259 else 5260 lyp->nfsly_flags = NFSLY_FILES; 5261 if (retonclose != 0) 5262 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 5263 lyp->nfsly_fhlen = fhlen; 5264 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); 5265 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 5266 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, 5267 nfsly_hash); 5268 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 5269 nfscl_layoutcnt++; 5270 nfsstatsv1.cllayouts++; 5271 } else { 5272 if (retonclose != 0) 5273 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 5274 if (stateidp->seqid > lyp->nfsly_stateid.seqid) 5275 lyp->nfsly_stateid.seqid = stateidp->seqid; 5276 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 5277 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 5278 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 5279 } 5280 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5281 if (NFSCL_FORCEDISM(mp)) { 5282 NFSUNLOCKCLSTATE(); 5283 if (tlyp != NULL) 5284 free(tlyp, M_NFSLAYOUT); 5285 return (EPERM); 5286 } 5287 *lypp = lyp; 5288 } else if (stateidp->seqid > lyp->nfsly_stateid.seqid) 5289 lyp->nfsly_stateid.seqid = stateidp->seqid; 5290 5291 /* Merge the new list of File Layouts into the list. */ 5292 flp = LIST_FIRST(fhlp); 5293 if (flp != NULL) { 5294 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) 5295 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); 5296 else 5297 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); 5298 } 5299 if (layout_passed_in != 0) 5300 nfsv4_unlock(&lyp->nfsly_lock, 1); 5301 NFSUNLOCKCLSTATE(); 5302 if (tlyp != NULL) 5303 free(tlyp, M_NFSLAYOUT); 5304 return (0); 5305 } 5306 5307 /* 5308 * Search for a layout by MDS file handle. 5309 * If one is found, it is returned with a refcnt (shared lock) iff 5310 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is 5311 * returned NULL. 5312 */ 5313 struct nfscllayout * 5314 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, 5315 uint64_t off, uint32_t rwaccess, struct nfsclflayout **retflpp, 5316 int *recalledp) 5317 { 5318 struct nfscllayout *lyp; 5319 mount_t mp; 5320 int error, igotlock; 5321 5322 mp = clp->nfsc_nmp->nm_mountp; 5323 *recalledp = 0; 5324 *retflpp = NULL; 5325 NFSLOCKCLSTATE(); 5326 lyp = nfscl_findlayout(clp, fhp, fhlen); 5327 if (lyp != NULL) { 5328 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 5329 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 5330 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 5331 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 5332 error = nfscl_findlayoutforio(lyp, off, rwaccess, 5333 retflpp); 5334 if (error == 0) 5335 nfsv4_getref(&lyp->nfsly_lock, NULL, 5336 NFSCLSTATEMUTEXPTR, mp); 5337 else { 5338 do { 5339 igotlock = nfsv4_lock(&lyp->nfsly_lock, 5340 1, NULL, NFSCLSTATEMUTEXPTR, mp); 5341 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp)); 5342 *retflpp = NULL; 5343 } 5344 if (NFSCL_FORCEDISM(mp)) { 5345 lyp = NULL; 5346 *recalledp = 1; 5347 } 5348 } else { 5349 lyp = NULL; 5350 *recalledp = 1; 5351 } 5352 } 5353 NFSUNLOCKCLSTATE(); 5354 return (lyp); 5355 } 5356 5357 /* 5358 * Search for a layout by MDS file handle. If one is found, mark in to be 5359 * recalled, if it already marked "return on close". 5360 */ 5361 static void 5362 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp, 5363 int fhlen, struct nfsclrecalllayout **recallpp, struct nfscllayout **lypp) 5364 { 5365 struct nfscllayout *lyp; 5366 uint32_t iomode; 5367 5368 *lypp = NULL; 5369 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) || 5370 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 || 5371 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0) 5372 return; 5373 lyp = nfscl_findlayout(clp, fhp, fhlen); 5374 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) { 5375 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 5376 iomode = 0; 5377 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 5378 iomode |= NFSLAYOUTIOMODE_READ; 5379 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 5380 iomode |= NFSLAYOUTIOMODE_RW; 5381 nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 5382 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, 5383 *recallpp); 5384 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode); 5385 *recallpp = NULL; 5386 } 5387 5388 /* Now, wake up renew thread to do LayoutReturn. */ 5389 wakeup(clp); 5390 *lypp = lyp; 5391 } 5392 } 5393 5394 /* 5395 * Mark the layout to be recalled and with an error. 5396 * Also, disable the dsp from further use. 5397 */ 5398 void 5399 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp, 5400 struct nfscllayout *lyp, struct nfsclds *dsp) 5401 { 5402 struct nfsclrecalllayout *recallp; 5403 uint32_t iomode; 5404 5405 printf("DS being disabled, error=%d\n", stat); 5406 /* Set up the return of the layout. */ 5407 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 5408 iomode = 0; 5409 NFSLOCKCLSTATE(); 5410 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 5411 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 5412 iomode |= NFSLAYOUTIOMODE_READ; 5413 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 5414 iomode |= NFSLAYOUTIOMODE_RW; 5415 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 5416 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op, 5417 dp->nfsdi_deviceid, recallp); 5418 NFSUNLOCKCLSTATE(); 5419 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode); 5420 } else { 5421 NFSUNLOCKCLSTATE(); 5422 free(recallp, M_NFSLAYRECALL); 5423 } 5424 5425 /* And shut the TCP connection down. */ 5426 nfscl_cancelreqs(dsp); 5427 } 5428 5429 /* 5430 * Cancel all RPCs for this "dsp" by closing the connection. 5431 * Also, mark the session as defunct. 5432 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and 5433 * cannot be shut down. 5434 */ 5435 void 5436 nfscl_cancelreqs(struct nfsclds *dsp) 5437 { 5438 struct __rpc_client *cl; 5439 static int non_event; 5440 5441 NFSLOCKDS(dsp); 5442 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 && 5443 dsp->nfsclds_sockp != NULL && 5444 dsp->nfsclds_sockp->nr_client != NULL) { 5445 dsp->nfsclds_flags |= NFSCLDS_CLOSED; 5446 cl = dsp->nfsclds_sockp->nr_client; 5447 dsp->nfsclds_sess.nfsess_defunct = 1; 5448 NFSUNLOCKDS(dsp); 5449 CLNT_CLOSE(cl); 5450 /* 5451 * This 1sec sleep is done to reduce the number of reconnect 5452 * attempts made on the DS while it has failed. 5453 */ 5454 tsleep(&non_event, PVFS, "ndscls", hz); 5455 return; 5456 } 5457 NFSUNLOCKDS(dsp); 5458 } 5459 5460 /* 5461 * Dereference a layout. 5462 */ 5463 void 5464 nfscl_rellayout(struct nfscllayout *lyp, int exclocked) 5465 { 5466 5467 NFSLOCKCLSTATE(); 5468 if (exclocked != 0) 5469 nfsv4_unlock(&lyp->nfsly_lock, 0); 5470 else 5471 nfsv4_relref(&lyp->nfsly_lock); 5472 NFSUNLOCKCLSTATE(); 5473 } 5474 5475 /* 5476 * Search for a devinfo by deviceid. If one is found, return it after 5477 * acquiring a reference count on it. 5478 */ 5479 struct nfscldevinfo * 5480 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, 5481 struct nfscldevinfo *dip) 5482 { 5483 5484 NFSLOCKCLSTATE(); 5485 if (dip == NULL) 5486 dip = nfscl_finddevinfo(clp, deviceid); 5487 if (dip != NULL) 5488 dip->nfsdi_refcnt++; 5489 NFSUNLOCKCLSTATE(); 5490 return (dip); 5491 } 5492 5493 /* 5494 * Dereference a devinfo structure. 5495 */ 5496 static void 5497 nfscl_reldevinfo_locked(struct nfscldevinfo *dip) 5498 { 5499 5500 dip->nfsdi_refcnt--; 5501 if (dip->nfsdi_refcnt == 0) 5502 wakeup(&dip->nfsdi_refcnt); 5503 } 5504 5505 /* 5506 * Dereference a devinfo structure. 5507 */ 5508 void 5509 nfscl_reldevinfo(struct nfscldevinfo *dip) 5510 { 5511 5512 NFSLOCKCLSTATE(); 5513 nfscl_reldevinfo_locked(dip); 5514 NFSUNLOCKCLSTATE(); 5515 } 5516 5517 /* 5518 * Find a layout for this file handle. Return NULL upon failure. 5519 */ 5520 static struct nfscllayout * 5521 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 5522 { 5523 struct nfscllayout *lyp; 5524 5525 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) 5526 if (lyp->nfsly_fhlen == fhlen && 5527 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) 5528 break; 5529 return (lyp); 5530 } 5531 5532 /* 5533 * Find a devinfo for this deviceid. Return NULL upon failure. 5534 */ 5535 static struct nfscldevinfo * 5536 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) 5537 { 5538 struct nfscldevinfo *dip; 5539 5540 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) 5541 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) 5542 == 0) 5543 break; 5544 return (dip); 5545 } 5546 5547 /* 5548 * Merge the new file layout list into the main one, maintaining it in 5549 * increasing offset order. 5550 */ 5551 static void 5552 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, 5553 struct nfsclflayouthead *newfhlp) 5554 { 5555 struct nfsclflayout *flp, *nflp, *prevflp, *tflp; 5556 5557 flp = LIST_FIRST(fhlp); 5558 prevflp = NULL; 5559 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { 5560 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { 5561 prevflp = flp; 5562 flp = LIST_NEXT(flp, nfsfl_list); 5563 } 5564 if (prevflp == NULL) 5565 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); 5566 else 5567 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); 5568 prevflp = nflp; 5569 } 5570 } 5571 5572 /* 5573 * Add this nfscldevinfo to the client, if it doesn't already exist. 5574 * This function consumes the structure pointed at by dip, if not NULL. 5575 */ 5576 int 5577 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind, 5578 struct nfsclflayout *flp) 5579 { 5580 struct nfsclclient *clp; 5581 struct nfscldevinfo *tdip; 5582 uint8_t *dev; 5583 5584 NFSLOCKCLSTATE(); 5585 clp = nmp->nm_clp; 5586 if (clp == NULL) { 5587 NFSUNLOCKCLSTATE(); 5588 if (dip != NULL) 5589 free(dip, M_NFSDEVINFO); 5590 return (ENODEV); 5591 } 5592 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5593 dev = flp->nfsfl_dev; 5594 else 5595 dev = flp->nfsfl_ffm[ind].dev; 5596 tdip = nfscl_finddevinfo(clp, dev); 5597 if (tdip != NULL) { 5598 tdip->nfsdi_layoutrefs++; 5599 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5600 flp->nfsfl_devp = tdip; 5601 else 5602 flp->nfsfl_ffm[ind].devp = tdip; 5603 nfscl_reldevinfo_locked(tdip); 5604 NFSUNLOCKCLSTATE(); 5605 if (dip != NULL) 5606 free(dip, M_NFSDEVINFO); 5607 return (0); 5608 } 5609 if (dip != NULL) { 5610 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); 5611 dip->nfsdi_layoutrefs = 1; 5612 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5613 flp->nfsfl_devp = dip; 5614 else 5615 flp->nfsfl_ffm[ind].devp = dip; 5616 } 5617 NFSUNLOCKCLSTATE(); 5618 if (dip == NULL) 5619 return (ENODEV); 5620 return (0); 5621 } 5622 5623 /* 5624 * Free up a layout structure and associated file layout structure(s). 5625 */ 5626 void 5627 nfscl_freelayout(struct nfscllayout *layp) 5628 { 5629 struct nfsclflayout *flp, *nflp; 5630 struct nfsclrecalllayout *rp, *nrp; 5631 5632 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { 5633 LIST_REMOVE(flp, nfsfl_list); 5634 nfscl_freeflayout(flp); 5635 } 5636 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { 5637 LIST_REMOVE(flp, nfsfl_list); 5638 nfscl_freeflayout(flp); 5639 } 5640 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { 5641 LIST_REMOVE(rp, nfsrecly_list); 5642 free(rp, M_NFSLAYRECALL); 5643 } 5644 nfscl_layoutcnt--; 5645 nfsstatsv1.cllayouts--; 5646 free(layp, M_NFSLAYOUT); 5647 } 5648 5649 /* 5650 * Free up a file layout structure. 5651 */ 5652 void 5653 nfscl_freeflayout(struct nfsclflayout *flp) 5654 { 5655 int i, j; 5656 5657 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) { 5658 for (i = 0; i < flp->nfsfl_fhcnt; i++) 5659 free(flp->nfsfl_fh[i], M_NFSFH); 5660 if (flp->nfsfl_devp != NULL) 5661 flp->nfsfl_devp->nfsdi_layoutrefs--; 5662 } 5663 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0) 5664 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) { 5665 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++) 5666 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH); 5667 if (flp->nfsfl_ffm[i].devp != NULL) 5668 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--; 5669 } 5670 free(flp, M_NFSFLAYOUT); 5671 } 5672 5673 /* 5674 * Free up a file layout devinfo structure. 5675 */ 5676 void 5677 nfscl_freedevinfo(struct nfscldevinfo *dip) 5678 { 5679 5680 free(dip, M_NFSDEVINFO); 5681 } 5682 5683 /* 5684 * Mark any layouts that match as recalled. 5685 */ 5686 static int 5687 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, 5688 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op, 5689 char *devid, struct nfsclrecalllayout *recallp) 5690 { 5691 struct nfsclrecalllayout *rp, *orp; 5692 5693 recallp->nfsrecly_recalltype = recalltype; 5694 recallp->nfsrecly_iomode = iomode; 5695 recallp->nfsrecly_stateseqid = stateseqid; 5696 recallp->nfsrecly_off = off; 5697 recallp->nfsrecly_len = len; 5698 recallp->nfsrecly_stat = stat; 5699 recallp->nfsrecly_op = op; 5700 if (devid != NULL) 5701 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID); 5702 /* 5703 * Order the list as file returns first, followed by fsid and any 5704 * returns, both in increasing stateseqid order. 5705 * Note that the seqids wrap around, so 1 is after 0xffffffff. 5706 * (I'm not sure this is correct because I find RFC5661 confusing 5707 * on this, but hopefully it will work ok.) 5708 */ 5709 orp = NULL; 5710 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5711 orp = rp; 5712 if ((recalltype == NFSLAYOUTRETURN_FILE && 5713 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || 5714 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || 5715 (recalltype != NFSLAYOUTRETURN_FILE && 5716 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && 5717 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { 5718 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); 5719 break; 5720 } 5721 5722 /* 5723 * Put any error return on all the file returns that will 5724 * preceed this one. 5725 */ 5726 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE && 5727 stat != 0 && rp->nfsrecly_stat == 0) { 5728 rp->nfsrecly_stat = stat; 5729 rp->nfsrecly_op = op; 5730 if (devid != NULL) 5731 NFSBCOPY(devid, rp->nfsrecly_devid, 5732 NFSX_V4DEVICEID); 5733 } 5734 } 5735 if (rp == NULL) { 5736 if (orp == NULL) 5737 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, 5738 nfsrecly_list); 5739 else 5740 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); 5741 } 5742 lyp->nfsly_flags |= NFSLY_RECALL; 5743 wakeup(lyp->nfsly_clp); 5744 return (0); 5745 } 5746 5747 /* 5748 * Compare the two seqids for ordering. The trick is that the seqids can 5749 * wrap around from 0xffffffff->0, so check for the cases where one 5750 * has wrapped around. 5751 * Return 1 if seqid1 comes before seqid2, 0 otherwise. 5752 */ 5753 static int 5754 nfscl_seq(uint32_t seqid1, uint32_t seqid2) 5755 { 5756 5757 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) 5758 /* seqid2 has wrapped around. */ 5759 return (0); 5760 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) 5761 /* seqid1 has wrapped around. */ 5762 return (1); 5763 if (seqid1 <= seqid2) 5764 return (1); 5765 return (0); 5766 } 5767 5768 /* 5769 * Do a layout return for each of the recalls. 5770 */ 5771 static void 5772 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, 5773 struct ucred *cred, NFSPROC_T *p) 5774 { 5775 struct nfsclrecalllayout *rp; 5776 nfsv4stateid_t stateid; 5777 int layouttype; 5778 5779 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); 5780 stateid.seqid = lyp->nfsly_stateid.seqid; 5781 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5782 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5783 else 5784 layouttype = NFSLAYOUT_FLEXFILE; 5785 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5786 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, 5787 lyp->nfsly_fhlen, 0, layouttype, 5788 rp->nfsrecly_iomode, rp->nfsrecly_recalltype, 5789 rp->nfsrecly_off, rp->nfsrecly_len, 5790 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op, 5791 rp->nfsrecly_devid); 5792 } 5793 } 5794 5795 /* 5796 * Do the layout commit for a file layout. 5797 */ 5798 static void 5799 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, 5800 struct ucred *cred, NFSPROC_T *p) 5801 { 5802 struct nfsclflayout *flp; 5803 uint64_t len; 5804 int error, layouttype; 5805 5806 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5807 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5808 else 5809 layouttype = NFSLAYOUT_FLEXFILE; 5810 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) { 5811 if (layouttype == NFSLAYOUT_FLEXFILE && 5812 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) { 5813 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n"); 5814 /* If not supported, don't bother doing it. */ 5815 NFSLOCKMNT(nmp); 5816 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5817 NFSUNLOCKMNT(nmp); 5818 break; 5819 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) { 5820 len = flp->nfsfl_end - flp->nfsfl_off; 5821 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, 5822 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len, 5823 lyp->nfsly_lastbyte, &lyp->nfsly_stateid, 5824 layouttype, cred, p); 5825 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error); 5826 if (error == NFSERR_NOTSUPP) { 5827 /* If not supported, don't bother doing it. */ 5828 NFSLOCKMNT(nmp); 5829 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5830 NFSUNLOCKMNT(nmp); 5831 break; 5832 } 5833 } 5834 } 5835 } 5836 5837 /* 5838 * Commit all layouts for a file (vnode). 5839 */ 5840 int 5841 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) 5842 { 5843 struct nfsclclient *clp; 5844 struct nfscllayout *lyp; 5845 struct nfsnode *np = VTONFS(vp); 5846 mount_t mp; 5847 struct nfsmount *nmp; 5848 5849 mp = vp->v_mount; 5850 nmp = VFSTONFS(mp); 5851 if (NFSHASNOLAYOUTCOMMIT(nmp)) 5852 return (0); 5853 NFSLOCKCLSTATE(); 5854 clp = nmp->nm_clp; 5855 if (clp == NULL) { 5856 NFSUNLOCKCLSTATE(); 5857 return (EPERM); 5858 } 5859 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5860 if (lyp == NULL) { 5861 NFSUNLOCKCLSTATE(); 5862 return (EPERM); 5863 } 5864 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5865 if (NFSCL_FORCEDISM(mp)) { 5866 NFSUNLOCKCLSTATE(); 5867 return (EPERM); 5868 } 5869 tryagain: 5870 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 5871 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 5872 NFSUNLOCKCLSTATE(); 5873 NFSCL_DEBUG(4, "do layoutcommit2\n"); 5874 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); 5875 NFSLOCKCLSTATE(); 5876 goto tryagain; 5877 } 5878 nfsv4_relref(&lyp->nfsly_lock); 5879 NFSUNLOCKCLSTATE(); 5880 return (0); 5881 } 5882