1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009 Rick Macklem, University of Guelph 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * These functions implement the client side state handling for NFSv4. 35 * NFSv4 state handling: 36 * - A lockowner is used to determine lock contention, so it 37 * corresponds directly to a Posix pid. (1 to 1 mapping) 38 * - The correct granularity of an OpenOwner is not nearly so 39 * obvious. An OpenOwner does the following: 40 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 41 * - is used to check for Open/Share contention (not applicable to 42 * this client, since all Opens are Deny_None) 43 * As such, I considered both extreme. 44 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 45 * all Open, Close and Lock (with a new lockowner) Ops. 46 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 47 * every Open, for most servers. 48 * So, I chose to use the same mapping as I did for LockOwnwers. 49 * The main concern here is that you can end up with multiple Opens 50 * for the same File Handle, but on different OpenOwners (opens 51 * inherited from parents, grandparents...) and you do not know 52 * which of these the vnodeop close applies to. This is handled by 53 * delaying the Close Op(s) until all of the Opens have been closed. 54 * (It is not yet obvious if this is the correct granularity.) 55 * - How the code handles serialization: 56 * - For the ClientId, it uses an exclusive lock while getting its 57 * SetClientId and during recovery. Otherwise, it uses a shared 58 * lock via a reference count. 59 * - For the rest of the data structures, it uses an SMP mutex 60 * (once the nfs client is SMP safe) and doesn't sleep while 61 * manipulating the linked lists. 62 * - The serialization of Open/Close/Lock/LockU falls out in the 63 * "wash", since OpenOwners and LockOwners are both mapped from 64 * Posix pid. In other words, there is only one Posix pid using 65 * any given owner, so that owner is serialized. (If you change 66 * the granularity of the OpenOwner, then code must be added to 67 * serialize Ops on the OpenOwner.) 68 * - When to get rid of OpenOwners and LockOwners. 69 * - The function nfscl_cleanup_common() is executed after a process exits. 70 * It goes through the client list looking for all Open and Lock Owners. 71 * When one is found, it is marked "defunct" or in the case of 72 * an OpenOwner without any Opens, freed. 73 * The renew thread scans for defunct Owners and gets rid of them, 74 * if it can. The LockOwners will also be deleted when the 75 * associated Open is closed. 76 * - If the LockU or Close Op(s) fail during close in a way 77 * that could be recovered upon retry, they are relinked to the 78 * ClientId's defunct open list and retried by the renew thread 79 * until they succeed or an unmount/recovery occurs. 80 * (Since we are done with them, they do not need to be recovered.) 81 */ 82 83 #include <fs/nfs/nfsport.h> 84 85 /* 86 * Global variables 87 */ 88 extern struct nfsstatsv1 nfsstatsv1; 89 extern struct nfsreqhead nfsd_reqq; 90 extern u_int32_t newnfs_false, newnfs_true; 91 extern int nfscl_debuglevel; 92 extern int nfscl_enablecallb; 93 extern int nfs_numnfscbd; 94 NFSREQSPINLOCK; 95 NFSCLSTATEMUTEX; 96 int nfscl_inited = 0; 97 struct nfsclhead nfsclhead; /* Head of clientid list */ 98 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 99 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; 100 101 static int nfscl_delegcnt = 0; 102 static int nfscl_layoutcnt = 0; 103 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, 104 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **); 105 static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *, 106 uint8_t *, struct nfscllockowner **, struct nfsclopen **, 107 struct nfsclopen **); 108 static void nfscl_clrelease(struct nfsclclient *); 109 static void nfscl_cleanclient(struct nfsclclient *); 110 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 111 struct ucred *, NFSPROC_T *); 112 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 113 struct nfsmount *, struct ucred *, NFSPROC_T *); 114 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *, 115 NFSPROC_T *); 116 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 117 struct nfscllock *, int); 118 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 119 struct nfscllock **, int); 120 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); 121 static u_int32_t nfscl_nextcbident(void); 122 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); 123 static struct nfsclclient *nfscl_getclnt(u_int32_t); 124 static struct nfsclclient *nfscl_getclntsess(uint8_t *); 125 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 126 int); 127 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *, 128 int, struct nfsclrecalllayout **); 129 static void nfscl_reldevinfo_locked(struct nfscldevinfo *); 130 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, 131 int); 132 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); 133 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 134 u_int8_t *, struct nfscllock **); 135 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 136 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 137 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 138 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 139 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 140 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *); 141 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 142 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 143 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 144 static void nfscl_totalrecall(struct nfsclclient *); 145 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 146 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 147 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 148 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 149 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 150 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 151 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 152 struct ucred *, NFSPROC_T *); 153 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 154 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 155 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); 156 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t); 157 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 158 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 159 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); 160 static void nfscl_freeopenowner(struct nfsclowner *, int); 161 static void nfscl_cleandeleg(struct nfscldeleg *); 162 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, 163 struct nfsmount *, NFSPROC_T *); 164 static void nfscl_emptylockowner(struct nfscllockowner *, 165 struct nfscllockownerfhhead *); 166 static void nfscl_mergeflayouts(struct nfsclflayouthead *, 167 struct nfsclflayouthead *); 168 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, 169 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *); 170 static int nfscl_seq(uint32_t, uint32_t); 171 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, 172 struct ucred *, NFSPROC_T *); 173 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, 174 struct ucred *, NFSPROC_T *); 175 176 static short nfscberr_null[] = { 177 0, 178 0, 179 }; 180 181 static short nfscberr_getattr[] = { 182 NFSERR_RESOURCE, 183 NFSERR_BADHANDLE, 184 NFSERR_BADXDR, 185 NFSERR_RESOURCE, 186 NFSERR_SERVERFAULT, 187 0, 188 }; 189 190 static short nfscberr_recall[] = { 191 NFSERR_RESOURCE, 192 NFSERR_BADHANDLE, 193 NFSERR_BADSTATEID, 194 NFSERR_BADXDR, 195 NFSERR_RESOURCE, 196 NFSERR_SERVERFAULT, 197 0, 198 }; 199 200 static short *nfscl_cberrmap[] = { 201 nfscberr_null, 202 nfscberr_null, 203 nfscberr_null, 204 nfscberr_getattr, 205 nfscberr_recall 206 }; 207 208 #define NETFAMILY(clp) \ 209 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 210 211 /* 212 * Called for an open operation. 213 * If the nfhp argument is NULL, just get an openowner. 214 */ 215 int 216 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 217 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 218 struct nfsclopen **opp, int *newonep, int *retp, int lockit) 219 { 220 struct nfsclclient *clp; 221 struct nfsclowner *owp, *nowp; 222 struct nfsclopen *op = NULL, *nop = NULL; 223 struct nfscldeleg *dp; 224 struct nfsclownerhead *ohp; 225 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 226 int ret; 227 228 if (newonep != NULL) 229 *newonep = 0; 230 if (opp != NULL) 231 *opp = NULL; 232 if (owpp != NULL) 233 *owpp = NULL; 234 235 /* 236 * Might need one or both of these, so MALLOC them now, to 237 * avoid a tsleep() in MALLOC later. 238 */ 239 nowp = malloc(sizeof (struct nfsclowner), 240 M_NFSCLOWNER, M_WAITOK); 241 if (nfhp != NULL) 242 nop = malloc(sizeof (struct nfsclopen) + 243 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 244 ret = nfscl_getcl(vp->v_mount, cred, p, 1, &clp); 245 if (ret != 0) { 246 free(nowp, M_NFSCLOWNER); 247 if (nop != NULL) 248 free(nop, M_NFSCLOPEN); 249 return (ret); 250 } 251 252 /* 253 * Get the Open iff it already exists. 254 * If none found, add the new one or return error, depending upon 255 * "create". 256 */ 257 NFSLOCKCLSTATE(); 258 dp = NULL; 259 /* First check the delegation list */ 260 if (nfhp != NULL && usedeleg) { 261 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 262 if (dp->nfsdl_fhlen == fhlen && 263 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 264 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 265 (dp->nfsdl_flags & NFSCLDL_WRITE)) 266 break; 267 dp = NULL; 268 break; 269 } 270 } 271 } 272 273 /* For NFSv4.1/4.2 and this option, use a single open_owner. */ 274 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 275 nfscl_filllockowner(NULL, own, F_POSIX); 276 else 277 nfscl_filllockowner(p->td_proc, own, F_POSIX); 278 if (dp != NULL) 279 ohp = &dp->nfsdl_owner; 280 else 281 ohp = &clp->nfsc_owner; 282 /* Now, search for an openowner */ 283 LIST_FOREACH(owp, ohp, nfsow_list) { 284 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 285 break; 286 } 287 288 /* 289 * Create a new open, as required. 290 */ 291 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 292 cred, newonep); 293 294 /* 295 * Now, check the mode on the open and return the appropriate 296 * value. 297 */ 298 if (retp != NULL) { 299 if (nfhp != NULL && dp != NULL && nop == NULL) 300 /* new local open on delegation */ 301 *retp = NFSCLOPEN_SETCRED; 302 else 303 *retp = NFSCLOPEN_OK; 304 } 305 if (op != NULL && (amode & ~(op->nfso_mode))) { 306 op->nfso_mode |= amode; 307 if (retp != NULL && dp == NULL) 308 *retp = NFSCLOPEN_DOOPEN; 309 } 310 311 /* 312 * Serialize modifications to the open owner for multiple threads 313 * within the same process using a read/write sleep lock. 314 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations 315 * by acquiring a shared lock. The close operations still use an 316 * exclusive lock for this case. 317 */ 318 if (lockit != 0) { 319 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) { 320 /* 321 * Get a shared lock on the OpenOwner, but first 322 * wait for any pending exclusive lock, so that the 323 * exclusive locker gets priority. 324 */ 325 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL, 326 NFSCLSTATEMUTEXPTR, NULL); 327 nfsv4_getref(&owp->nfsow_rwlock, NULL, 328 NFSCLSTATEMUTEXPTR, NULL); 329 } else 330 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 331 } 332 NFSUNLOCKCLSTATE(); 333 if (nowp != NULL) 334 free(nowp, M_NFSCLOWNER); 335 if (nop != NULL) 336 free(nop, M_NFSCLOPEN); 337 if (owpp != NULL) 338 *owpp = owp; 339 if (opp != NULL) 340 *opp = op; 341 return (0); 342 } 343 344 /* 345 * Create a new open, as required. 346 */ 347 static void 348 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 349 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 350 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 351 struct ucred *cred, int *newonep) 352 { 353 struct nfsclowner *owp = *owpp, *nowp; 354 struct nfsclopen *op, *nop; 355 356 if (nowpp != NULL) 357 nowp = *nowpp; 358 else 359 nowp = NULL; 360 if (nopp != NULL) 361 nop = *nopp; 362 else 363 nop = NULL; 364 if (owp == NULL && nowp != NULL) { 365 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 366 LIST_INIT(&nowp->nfsow_open); 367 nowp->nfsow_clp = clp; 368 nowp->nfsow_seqid = 0; 369 nowp->nfsow_defunct = 0; 370 nfscl_lockinit(&nowp->nfsow_rwlock); 371 if (dp != NULL) { 372 nfsstatsv1.cllocalopenowners++; 373 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 374 } else { 375 nfsstatsv1.clopenowners++; 376 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 377 } 378 owp = *owpp = nowp; 379 *nowpp = NULL; 380 if (newonep != NULL) 381 *newonep = 1; 382 } 383 384 /* If an fhp has been specified, create an Open as well. */ 385 if (fhp != NULL) { 386 /* and look for the correct open, based upon FH */ 387 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 388 if (op->nfso_fhlen == fhlen && 389 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 390 break; 391 } 392 if (op == NULL && nop != NULL) { 393 nop->nfso_own = owp; 394 nop->nfso_mode = 0; 395 nop->nfso_opencnt = 0; 396 nop->nfso_posixlock = 1; 397 nop->nfso_fhlen = fhlen; 398 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 399 LIST_INIT(&nop->nfso_lock); 400 nop->nfso_stateid.seqid = 0; 401 nop->nfso_stateid.other[0] = 0; 402 nop->nfso_stateid.other[1] = 0; 403 nop->nfso_stateid.other[2] = 0; 404 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__)); 405 newnfs_copyincred(cred, &nop->nfso_cred); 406 if (dp != NULL) { 407 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 408 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 409 nfsdl_list); 410 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 411 nfsstatsv1.cllocalopens++; 412 } else { 413 nfsstatsv1.clopens++; 414 } 415 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 416 *opp = nop; 417 *nopp = NULL; 418 if (newonep != NULL) 419 *newonep = 1; 420 } else { 421 *opp = op; 422 } 423 } 424 } 425 426 /* 427 * Called to find/add a delegation to a client. 428 */ 429 int 430 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 431 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 432 { 433 struct nfscldeleg *dp = *dpp, *tdp; 434 435 /* 436 * First, if we have received a Read delegation for a file on a 437 * read/write file system, just return it, because they aren't 438 * useful, imho. 439 */ 440 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && 441 (dp->nfsdl_flags & NFSCLDL_READ)) { 442 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); 443 free(dp, M_NFSCLDELEG); 444 *dpp = NULL; 445 return (0); 446 } 447 448 /* Look for the correct deleg, based upon FH */ 449 NFSLOCKCLSTATE(); 450 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 451 if (tdp == NULL) { 452 if (dp == NULL) { 453 NFSUNLOCKCLSTATE(); 454 return (NFSERR_BADSTATEID); 455 } 456 *dpp = NULL; 457 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 458 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 459 nfsdl_hash); 460 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 461 nfsstatsv1.cldelegates++; 462 nfscl_delegcnt++; 463 } else { 464 /* 465 * Delegation already exists, what do we do if a new one?? 466 */ 467 if (dp != NULL) { 468 printf("Deleg already exists!\n"); 469 free(dp, M_NFSCLDELEG); 470 *dpp = NULL; 471 } else { 472 *dpp = tdp; 473 } 474 } 475 NFSUNLOCKCLSTATE(); 476 return (0); 477 } 478 479 /* 480 * Find a delegation for this file handle. Return NULL upon failure. 481 */ 482 static struct nfscldeleg * 483 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 484 { 485 struct nfscldeleg *dp; 486 487 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 488 if (dp->nfsdl_fhlen == fhlen && 489 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 490 break; 491 } 492 return (dp); 493 } 494 495 /* 496 * Get a stateid for an I/O operation. First, look for an open and iff 497 * found, return either a lockowner stateid or the open stateid. 498 * If no Open is found, just return error and the special stateid of all zeros. 499 */ 500 int 501 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 502 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 503 void **lckpp) 504 { 505 struct nfsclclient *clp; 506 struct nfsclowner *owp; 507 struct nfsclopen *op = NULL, *top; 508 struct nfscllockowner *lp; 509 struct nfscldeleg *dp; 510 struct nfsnode *np; 511 struct nfsmount *nmp; 512 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 513 int error; 514 bool done; 515 516 *lckpp = NULL; 517 /* 518 * Initially, just set the special stateid of all zeros. 519 * (Don't do this for a DS, since the special stateid can't be used.) 520 */ 521 if (fords == 0) { 522 stateidp->seqid = 0; 523 stateidp->other[0] = 0; 524 stateidp->other[1] = 0; 525 stateidp->other[2] = 0; 526 } 527 if (vnode_vtype(vp) != VREG) 528 return (EISDIR); 529 np = VTONFS(vp); 530 nmp = VFSTONFS(vp->v_mount); 531 NFSLOCKCLSTATE(); 532 clp = nfscl_findcl(nmp); 533 if (clp == NULL) { 534 NFSUNLOCKCLSTATE(); 535 return (EACCES); 536 } 537 538 /* 539 * Wait for recovery to complete. 540 */ 541 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 542 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 543 PZERO, "nfsrecvr", NULL); 544 545 /* 546 * First, look for a delegation. 547 */ 548 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 549 if (dp->nfsdl_fhlen == fhlen && 550 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 551 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 552 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 553 stateidp->seqid = dp->nfsdl_stateid.seqid; 554 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 555 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 556 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 557 if (!(np->n_flag & NDELEGRECALL)) { 558 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 559 nfsdl_list); 560 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 561 nfsdl_list); 562 dp->nfsdl_timestamp = NFSD_MONOSEC + 563 120; 564 dp->nfsdl_rwlock.nfslock_usecnt++; 565 *lckpp = (void *)&dp->nfsdl_rwlock; 566 } 567 NFSUNLOCKCLSTATE(); 568 return (0); 569 } 570 break; 571 } 572 } 573 574 if (p != NULL) { 575 /* 576 * If p != NULL, we want to search the parentage tree 577 * for a matching OpenOwner and use that. 578 */ 579 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 580 nfscl_filllockowner(NULL, own, F_POSIX); 581 else 582 nfscl_filllockowner(p->td_proc, own, F_POSIX); 583 lp = NULL; 584 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own, 585 mode, &lp, &op); 586 if (error == 0 && lp != NULL && fords == 0) { 587 /* Don't return a lock stateid for a DS. */ 588 stateidp->seqid = 589 lp->nfsl_stateid.seqid; 590 stateidp->other[0] = 591 lp->nfsl_stateid.other[0]; 592 stateidp->other[1] = 593 lp->nfsl_stateid.other[1]; 594 stateidp->other[2] = 595 lp->nfsl_stateid.other[2]; 596 NFSUNLOCKCLSTATE(); 597 return (0); 598 } 599 } 600 if (op == NULL) { 601 /* If not found, just look for any OpenOwner that will work. */ 602 top = NULL; 603 done = false; 604 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 605 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 606 if (op->nfso_fhlen == fhlen && 607 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) { 608 if (top == NULL && (op->nfso_mode & 609 NFSV4OPEN_ACCESSWRITE) != 0 && 610 (mode & NFSV4OPEN_ACCESSREAD) != 0) 611 top = op; 612 if ((mode & op->nfso_mode) == mode) { 613 done = true; 614 break; 615 } 616 } 617 } 618 if (done) 619 break; 620 } 621 if (!done) { 622 NFSCL_DEBUG(2, "openmode top=%p\n", top); 623 if (top == NULL || NFSHASOPENMODE(nmp)) { 624 NFSUNLOCKCLSTATE(); 625 return (ENOENT); 626 } else 627 op = top; 628 } 629 /* 630 * For read aheads or write behinds, use the open cred. 631 * A read ahead or write behind is indicated by p == NULL. 632 */ 633 if (p == NULL) 634 newnfs_copycred(&op->nfso_cred, cred); 635 } 636 637 /* 638 * No lock stateid, so return the open stateid. 639 */ 640 stateidp->seqid = op->nfso_stateid.seqid; 641 stateidp->other[0] = op->nfso_stateid.other[0]; 642 stateidp->other[1] = op->nfso_stateid.other[1]; 643 stateidp->other[2] = op->nfso_stateid.other[2]; 644 NFSUNLOCKCLSTATE(); 645 return (0); 646 } 647 648 /* 649 * Search for a matching file, mode and, optionally, lockowner. 650 */ 651 static int 652 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, 653 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode, 654 struct nfscllockowner **lpp, struct nfsclopen **opp) 655 { 656 struct nfsclowner *owp; 657 struct nfsclopen *op, *rop, *rop2; 658 bool keep_looping; 659 660 if (lpp != NULL) 661 *lpp = NULL; 662 /* 663 * rop will be set to the open to be returned. There are three 664 * variants of this, all for an open of the correct file: 665 * 1 - A match of lockown. 666 * 2 - A match of the openown, when no lockown match exists. 667 * 3 - A match for any open, if no openown or lockown match exists. 668 * Looking for #2 over #3 probably isn't necessary, but since 669 * RFC3530 is vague w.r.t. the relationship between openowners and 670 * lockowners, I think this is the safer way to go. 671 */ 672 rop = NULL; 673 rop2 = NULL; 674 keep_looping = true; 675 /* Search the client list */ 676 LIST_FOREACH(owp, ohp, nfsow_list) { 677 /* and look for the correct open */ 678 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 679 if (op->nfso_fhlen == fhlen && 680 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 681 && (op->nfso_mode & mode) == mode) 682 keep_looping = nfscl_checkown(owp, op, openown, 683 lockown, lpp, &rop, &rop2); 684 if (!keep_looping) 685 break; 686 } 687 if (!keep_looping) 688 break; 689 } 690 if (rop == NULL) 691 rop = rop2; 692 if (rop == NULL) 693 return (EBADF); 694 *opp = rop; 695 return (0); 696 } 697 698 /* Check for an owner match. */ 699 static bool 700 nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown, 701 uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp, 702 struct nfsclopen **ropp2) 703 { 704 struct nfscllockowner *lp; 705 bool keep_looping; 706 707 keep_looping = true; 708 if (lpp != NULL) { 709 /* Now look for a matching lockowner. */ 710 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 711 if (!NFSBCMP(lp->nfsl_owner, lockown, 712 NFSV4CL_LOCKNAMELEN)) { 713 *lpp = lp; 714 *ropp = op; 715 return (false); 716 } 717 } 718 } 719 if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown, 720 NFSV4CL_LOCKNAMELEN)) { 721 *ropp = op; 722 if (lpp == NULL) 723 keep_looping = false; 724 } 725 if (*ropp2 == NULL) 726 *ropp2 = op; 727 return (keep_looping); 728 } 729 730 /* 731 * Release use of an open owner. Called when open operations are done 732 * with the open owner. 733 */ 734 void 735 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp, 736 __unused int error, __unused int candelete, int unlocked) 737 { 738 739 if (owp == NULL) 740 return; 741 NFSLOCKCLSTATE(); 742 if (unlocked == 0) { 743 if (NFSHASONEOPENOWN(nmp)) 744 nfsv4_relref(&owp->nfsow_rwlock); 745 else 746 nfscl_lockunlock(&owp->nfsow_rwlock); 747 } 748 nfscl_clrelease(owp->nfsow_clp); 749 NFSUNLOCKCLSTATE(); 750 } 751 752 /* 753 * Release use of an open structure under an open owner. 754 */ 755 void 756 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error, 757 int candelete) 758 { 759 struct nfsclclient *clp; 760 struct nfsclowner *owp; 761 762 if (op == NULL) 763 return; 764 NFSLOCKCLSTATE(); 765 owp = op->nfso_own; 766 if (NFSHASONEOPENOWN(nmp)) 767 nfsv4_relref(&owp->nfsow_rwlock); 768 else 769 nfscl_lockunlock(&owp->nfsow_rwlock); 770 clp = owp->nfsow_clp; 771 if (error && candelete && op->nfso_opencnt == 0) 772 nfscl_freeopen(op, 0); 773 nfscl_clrelease(clp); 774 NFSUNLOCKCLSTATE(); 775 } 776 777 /* 778 * Called to get a clientid structure. It will optionally lock the 779 * client data structures to do the SetClientId/SetClientId_confirm, 780 * but will release that lock and return the clientid with a reference 781 * count on it. 782 * If the "cred" argument is NULL, a new clientid should not be created. 783 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 784 * be done. 785 * The start_renewthread argument tells nfscl_getcl() to start a renew 786 * thread if this creates a new clp. 787 * It always clpp with a reference count on it, unless returning an error. 788 */ 789 int 790 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, 791 int start_renewthread, struct nfsclclient **clpp) 792 { 793 struct nfsclclient *clp; 794 struct nfsclclient *newclp = NULL; 795 struct nfsmount *nmp; 796 char uuid[HOSTUUIDLEN]; 797 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 798 u_int16_t idlen = 0; 799 800 nmp = VFSTONFS(mp); 801 if (cred != NULL) { 802 getcredhostuuid(cred, uuid, sizeof uuid); 803 idlen = strlen(uuid); 804 if (idlen > 0) 805 idlen += sizeof (u_int64_t); 806 else 807 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 808 newclp = malloc( 809 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 810 M_WAITOK | M_ZERO); 811 } 812 NFSLOCKCLSTATE(); 813 /* 814 * If a forced dismount is already in progress, don't 815 * allocate a new clientid and get out now. For the case where 816 * clp != NULL, this is a harmless optimization. 817 */ 818 if (NFSCL_FORCEDISM(mp)) { 819 NFSUNLOCKCLSTATE(); 820 if (newclp != NULL) 821 free(newclp, M_NFSCLCLIENT); 822 return (EBADF); 823 } 824 clp = nmp->nm_clp; 825 if (clp == NULL) { 826 if (newclp == NULL) { 827 NFSUNLOCKCLSTATE(); 828 return (EACCES); 829 } 830 clp = newclp; 831 clp->nfsc_idlen = idlen; 832 LIST_INIT(&clp->nfsc_owner); 833 TAILQ_INIT(&clp->nfsc_deleg); 834 TAILQ_INIT(&clp->nfsc_layout); 835 LIST_INIT(&clp->nfsc_devinfo); 836 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 837 LIST_INIT(&clp->nfsc_deleghash[i]); 838 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 839 LIST_INIT(&clp->nfsc_layouthash[i]); 840 clp->nfsc_flags = NFSCLFLAGS_INITED; 841 clp->nfsc_clientidrev = 1; 842 clp->nfsc_cbident = nfscl_nextcbident(); 843 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 844 clp->nfsc_idlen); 845 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 846 nmp->nm_clp = clp; 847 clp->nfsc_nmp = nmp; 848 NFSUNLOCKCLSTATE(); 849 if (start_renewthread != 0) 850 nfscl_start_renewthread(clp); 851 } else { 852 NFSUNLOCKCLSTATE(); 853 if (newclp != NULL) 854 free(newclp, M_NFSCLCLIENT); 855 } 856 NFSLOCKCLSTATE(); 857 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && 858 !NFSCL_FORCEDISM(mp)) 859 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 860 NFSCLSTATEMUTEXPTR, mp); 861 if (igotlock == 0) { 862 /* 863 * Call nfsv4_lock() with "iwantlock == 0" so that it will 864 * wait for a pending exclusive lock request. This gives the 865 * exclusive lock request priority over this shared lock 866 * request. 867 * An exclusive lock on nfsc_lock is used mainly for server 868 * crash recoveries. 869 */ 870 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp); 871 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 872 } 873 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) { 874 /* 875 * Both nfsv4_lock() and nfsv4_getref() know to check 876 * for NFSCL_FORCEDISM() and return without sleeping to 877 * wait for the exclusive lock to be released, since it 878 * might be held by nfscl_umount() and we need to get out 879 * now for that case and not wait until nfscl_umount() 880 * releases it. 881 */ 882 NFSUNLOCKCLSTATE(); 883 return (EBADF); 884 } 885 NFSUNLOCKCLSTATE(); 886 887 /* 888 * If it needs a clientid, do the setclientid now. 889 */ 890 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 891 if (!igotlock) 892 panic("nfscl_clget"); 893 if (p == NULL || cred == NULL) { 894 NFSLOCKCLSTATE(); 895 nfsv4_unlock(&clp->nfsc_lock, 0); 896 NFSUNLOCKCLSTATE(); 897 return (EACCES); 898 } 899 /* 900 * If RFC3530 Sec. 14.2.33 is taken literally, 901 * NFSERR_CLIDINUSE will be returned persistently for the 902 * case where a new mount of the same file system is using 903 * a different principal. In practice, NFSERR_CLIDINUSE is 904 * only returned when there is outstanding unexpired state 905 * on the clientid. As such, try for twice the lease 906 * interval, if we know what that is. Otherwise, make a 907 * wild ass guess. 908 * The case of returning NFSERR_STALECLIENTID is far less 909 * likely, but might occur if there is a significant delay 910 * between doing the SetClientID and SetClientIDConfirm Ops, 911 * such that the server throws away the clientid before 912 * receiving the SetClientIDConfirm. 913 */ 914 if (clp->nfsc_renew > 0) 915 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 916 else 917 clidinusedelay = 120; 918 trystalecnt = 3; 919 do { 920 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 921 if (error == NFSERR_STALECLIENTID || 922 error == NFSERR_STALEDONTRECOVER || 923 error == NFSERR_BADSESSION || 924 error == NFSERR_CLIDINUSE) { 925 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 926 } 927 } while (((error == NFSERR_STALECLIENTID || 928 error == NFSERR_BADSESSION || 929 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 930 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); 931 if (error) { 932 NFSLOCKCLSTATE(); 933 nfsv4_unlock(&clp->nfsc_lock, 0); 934 NFSUNLOCKCLSTATE(); 935 return (error); 936 } 937 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 938 } 939 if (igotlock) { 940 NFSLOCKCLSTATE(); 941 nfsv4_unlock(&clp->nfsc_lock, 1); 942 NFSUNLOCKCLSTATE(); 943 } 944 945 *clpp = clp; 946 return (0); 947 } 948 949 /* 950 * Get a reference to a clientid and return it, if valid. 951 */ 952 struct nfsclclient * 953 nfscl_findcl(struct nfsmount *nmp) 954 { 955 struct nfsclclient *clp; 956 957 clp = nmp->nm_clp; 958 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 959 return (NULL); 960 return (clp); 961 } 962 963 /* 964 * Release the clientid structure. It may be locked or reference counted. 965 */ 966 static void 967 nfscl_clrelease(struct nfsclclient *clp) 968 { 969 970 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 971 nfsv4_unlock(&clp->nfsc_lock, 0); 972 else 973 nfsv4_relref(&clp->nfsc_lock); 974 } 975 976 /* 977 * External call for nfscl_clrelease. 978 */ 979 void 980 nfscl_clientrelease(struct nfsclclient *clp) 981 { 982 983 NFSLOCKCLSTATE(); 984 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 985 nfsv4_unlock(&clp->nfsc_lock, 0); 986 else 987 nfsv4_relref(&clp->nfsc_lock); 988 NFSUNLOCKCLSTATE(); 989 } 990 991 /* 992 * Called when wanting to lock a byte region. 993 */ 994 int 995 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 996 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 997 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, 998 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 999 { 1000 struct nfscllockowner *lp; 1001 struct nfsclopen *op; 1002 struct nfsclclient *clp; 1003 struct nfscllockowner *nlp; 1004 struct nfscllock *nlop, *otherlop; 1005 struct nfscldeleg *dp = NULL, *ldp = NULL; 1006 struct nfscllockownerhead *lhp = NULL; 1007 struct nfsnode *np; 1008 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; 1009 u_int8_t *openownp; 1010 int error = 0, ret, donelocally = 0; 1011 u_int32_t mode; 1012 1013 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ 1014 mode = 0; 1015 np = VTONFS(vp); 1016 *lpp = NULL; 1017 lp = NULL; 1018 *newonep = 0; 1019 *donelocallyp = 0; 1020 1021 /* 1022 * Might need these, so MALLOC them now, to 1023 * avoid a tsleep() in MALLOC later. 1024 */ 1025 nlp = malloc( 1026 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 1027 otherlop = malloc( 1028 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1029 nlop = malloc( 1030 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1031 nlop->nfslo_type = type; 1032 nlop->nfslo_first = off; 1033 if (len == NFS64BITSSET) { 1034 nlop->nfslo_end = NFS64BITSSET; 1035 } else { 1036 nlop->nfslo_end = off + len; 1037 if (nlop->nfslo_end <= nlop->nfslo_first) 1038 error = NFSERR_INVAL; 1039 } 1040 1041 if (!error) { 1042 if (recovery) 1043 clp = rclp; 1044 else 1045 error = nfscl_getcl(vp->v_mount, cred, p, 1, &clp); 1046 } 1047 if (error) { 1048 free(nlp, M_NFSCLLOCKOWNER); 1049 free(otherlop, M_NFSCLLOCK); 1050 free(nlop, M_NFSCLLOCK); 1051 return (error); 1052 } 1053 1054 op = NULL; 1055 if (recovery) { 1056 ownp = rownp; 1057 openownp = ropenownp; 1058 } else { 1059 nfscl_filllockowner(id, own, flags); 1060 ownp = own; 1061 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) 1062 nfscl_filllockowner(NULL, openown, F_POSIX); 1063 else 1064 nfscl_filllockowner(p->td_proc, openown, F_POSIX); 1065 openownp = openown; 1066 } 1067 if (!recovery) { 1068 NFSLOCKCLSTATE(); 1069 /* 1070 * First, search for a delegation. If one exists for this file, 1071 * the lock can be done locally against it, so long as there 1072 * isn't a local lock conflict. 1073 */ 1074 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1075 np->n_fhp->nfh_len); 1076 /* Just sanity check for correct type of delegation */ 1077 if (dp != NULL && ((dp->nfsdl_flags & 1078 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 1079 (type == F_WRLCK && 1080 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 1081 dp = NULL; 1082 } 1083 if (dp != NULL) { 1084 /* Now, find an open and maybe a lockowner. */ 1085 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, 1086 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); 1087 if (ret) 1088 ret = nfscl_getopen(&clp->nfsc_owner, 1089 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1090 ownp, mode, NULL, &op); 1091 if (!ret) { 1092 lhp = &dp->nfsdl_lock; 1093 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 1094 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 1095 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 1096 donelocally = 1; 1097 } else { 1098 dp = NULL; 1099 } 1100 } 1101 if (!donelocally) { 1102 /* 1103 * Get the related Open and maybe lockowner. 1104 */ 1105 error = nfscl_getopen(&clp->nfsc_owner, 1106 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1107 ownp, mode, &lp, &op); 1108 if (!error) 1109 lhp = &op->nfso_lock; 1110 } 1111 if (!error && !recovery) 1112 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 1113 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1114 if (error) { 1115 if (!recovery) { 1116 nfscl_clrelease(clp); 1117 NFSUNLOCKCLSTATE(); 1118 } 1119 free(nlp, M_NFSCLLOCKOWNER); 1120 free(otherlop, M_NFSCLLOCK); 1121 free(nlop, M_NFSCLLOCK); 1122 return (error); 1123 } 1124 1125 /* 1126 * Ok, see if a lockowner exists and create one, as required. 1127 */ 1128 if (lp == NULL) 1129 LIST_FOREACH(lp, lhp, nfsl_list) { 1130 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1131 break; 1132 } 1133 if (lp == NULL) { 1134 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1135 if (recovery) 1136 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1137 NFSV4CL_LOCKNAMELEN); 1138 else 1139 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1140 NFSV4CL_LOCKNAMELEN); 1141 nlp->nfsl_seqid = 0; 1142 nlp->nfsl_lockflags = flags; 1143 nlp->nfsl_inprog = NULL; 1144 nfscl_lockinit(&nlp->nfsl_rwlock); 1145 LIST_INIT(&nlp->nfsl_lock); 1146 if (donelocally) { 1147 nlp->nfsl_open = NULL; 1148 nfsstatsv1.cllocallockowners++; 1149 } else { 1150 nlp->nfsl_open = op; 1151 nfsstatsv1.cllockowners++; 1152 } 1153 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1154 lp = nlp; 1155 nlp = NULL; 1156 *newonep = 1; 1157 } 1158 1159 /* 1160 * Now, update the byte ranges for locks. 1161 */ 1162 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1163 if (!ret) 1164 donelocally = 1; 1165 if (donelocally) { 1166 *donelocallyp = 1; 1167 if (!recovery) 1168 nfscl_clrelease(clp); 1169 } else { 1170 /* 1171 * Serial modifications on the lock owner for multiple threads 1172 * for the same process using a read/write lock. 1173 */ 1174 if (!recovery) 1175 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1176 } 1177 if (!recovery) 1178 NFSUNLOCKCLSTATE(); 1179 1180 if (nlp) 1181 free(nlp, M_NFSCLLOCKOWNER); 1182 if (nlop) 1183 free(nlop, M_NFSCLLOCK); 1184 if (otherlop) 1185 free(otherlop, M_NFSCLLOCK); 1186 1187 *lpp = lp; 1188 return (0); 1189 } 1190 1191 /* 1192 * Called to unlock a byte range, for LockU. 1193 */ 1194 int 1195 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1196 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1197 struct nfsclclient *clp, void *id, int flags, 1198 struct nfscllockowner **lpp, int *dorpcp) 1199 { 1200 struct nfscllockowner *lp; 1201 struct nfsclowner *owp; 1202 struct nfsclopen *op; 1203 struct nfscllock *nlop, *other_lop = NULL; 1204 struct nfscldeleg *dp; 1205 struct nfsnode *np; 1206 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1207 int ret = 0, fnd; 1208 1209 np = VTONFS(vp); 1210 *lpp = NULL; 1211 *dorpcp = 0; 1212 1213 /* 1214 * Might need these, so MALLOC them now, to 1215 * avoid a tsleep() in MALLOC later. 1216 */ 1217 nlop = malloc( 1218 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1219 nlop->nfslo_type = F_UNLCK; 1220 nlop->nfslo_first = off; 1221 if (len == NFS64BITSSET) { 1222 nlop->nfslo_end = NFS64BITSSET; 1223 } else { 1224 nlop->nfslo_end = off + len; 1225 if (nlop->nfslo_end <= nlop->nfslo_first) { 1226 free(nlop, M_NFSCLLOCK); 1227 return (NFSERR_INVAL); 1228 } 1229 } 1230 if (callcnt == 0) { 1231 other_lop = malloc( 1232 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1233 *other_lop = *nlop; 1234 } 1235 nfscl_filllockowner(id, own, flags); 1236 dp = NULL; 1237 NFSLOCKCLSTATE(); 1238 if (callcnt == 0) 1239 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1240 np->n_fhp->nfh_len); 1241 1242 /* 1243 * First, unlock any local regions on a delegation. 1244 */ 1245 if (dp != NULL) { 1246 /* Look for this lockowner. */ 1247 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1248 if (!NFSBCMP(lp->nfsl_owner, own, 1249 NFSV4CL_LOCKNAMELEN)) 1250 break; 1251 } 1252 if (lp != NULL) 1253 /* Use other_lop, so nlop is still available */ 1254 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1255 } 1256 1257 /* 1258 * Now, find a matching open/lockowner that hasn't already been done, 1259 * as marked by nfsl_inprog. 1260 */ 1261 lp = NULL; 1262 fnd = 0; 1263 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1264 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1265 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1266 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1267 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1268 if (lp->nfsl_inprog == NULL && 1269 !NFSBCMP(lp->nfsl_owner, own, 1270 NFSV4CL_LOCKNAMELEN)) { 1271 fnd = 1; 1272 break; 1273 } 1274 } 1275 if (fnd) 1276 break; 1277 } 1278 } 1279 if (fnd) 1280 break; 1281 } 1282 1283 if (lp != NULL) { 1284 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1285 if (ret) 1286 *dorpcp = 1; 1287 /* 1288 * Serial modifications on the lock owner for multiple 1289 * threads for the same process using a read/write lock. 1290 */ 1291 lp->nfsl_inprog = p; 1292 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1293 *lpp = lp; 1294 } 1295 NFSUNLOCKCLSTATE(); 1296 if (nlop) 1297 free(nlop, M_NFSCLLOCK); 1298 if (other_lop) 1299 free(other_lop, M_NFSCLLOCK); 1300 return (0); 1301 } 1302 1303 /* 1304 * Release all lockowners marked in progess for this process and file. 1305 */ 1306 void 1307 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, 1308 void *id, int flags) 1309 { 1310 struct nfsclowner *owp; 1311 struct nfsclopen *op; 1312 struct nfscllockowner *lp; 1313 struct nfsnode *np; 1314 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1315 1316 np = VTONFS(vp); 1317 nfscl_filllockowner(id, own, flags); 1318 NFSLOCKCLSTATE(); 1319 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1320 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1321 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1322 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1323 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1324 if (lp->nfsl_inprog == p && 1325 !NFSBCMP(lp->nfsl_owner, own, 1326 NFSV4CL_LOCKNAMELEN)) { 1327 lp->nfsl_inprog = NULL; 1328 nfscl_lockunlock(&lp->nfsl_rwlock); 1329 } 1330 } 1331 } 1332 } 1333 } 1334 nfscl_clrelease(clp); 1335 NFSUNLOCKCLSTATE(); 1336 } 1337 1338 /* 1339 * Called to find out if any bytes within the byte range specified are 1340 * write locked by the calling process. Used to determine if flushing 1341 * is required before a LockU. 1342 * If in doubt, return 1, so the flush will occur. 1343 */ 1344 int 1345 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1346 struct ucred *cred, NFSPROC_T *p, void *id, int flags) 1347 { 1348 struct nfsclowner *owp; 1349 struct nfscllockowner *lp; 1350 struct nfsclopen *op; 1351 struct nfsclclient *clp; 1352 struct nfscllock *lop; 1353 struct nfscldeleg *dp; 1354 struct nfsnode *np; 1355 u_int64_t off, end; 1356 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1357 int error = 0; 1358 1359 np = VTONFS(vp); 1360 switch (fl->l_whence) { 1361 case SEEK_SET: 1362 case SEEK_CUR: 1363 /* 1364 * Caller is responsible for adding any necessary offset 1365 * when SEEK_CUR is used. 1366 */ 1367 off = fl->l_start; 1368 break; 1369 case SEEK_END: 1370 off = np->n_size + fl->l_start; 1371 break; 1372 default: 1373 return (1); 1374 } 1375 if (fl->l_len != 0) { 1376 end = off + fl->l_len; 1377 if (end < off) 1378 return (1); 1379 } else { 1380 end = NFS64BITSSET; 1381 } 1382 1383 error = nfscl_getcl(vp->v_mount, cred, p, 1, &clp); 1384 if (error) 1385 return (1); 1386 nfscl_filllockowner(id, own, flags); 1387 NFSLOCKCLSTATE(); 1388 1389 /* 1390 * First check the delegation locks. 1391 */ 1392 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1393 if (dp != NULL) { 1394 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1395 if (!NFSBCMP(lp->nfsl_owner, own, 1396 NFSV4CL_LOCKNAMELEN)) 1397 break; 1398 } 1399 if (lp != NULL) { 1400 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1401 if (lop->nfslo_first >= end) 1402 break; 1403 if (lop->nfslo_end <= off) 1404 continue; 1405 if (lop->nfslo_type == F_WRLCK) { 1406 nfscl_clrelease(clp); 1407 NFSUNLOCKCLSTATE(); 1408 return (1); 1409 } 1410 } 1411 } 1412 } 1413 1414 /* 1415 * Now, check state against the server. 1416 */ 1417 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1418 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1419 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1420 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1421 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1422 if (!NFSBCMP(lp->nfsl_owner, own, 1423 NFSV4CL_LOCKNAMELEN)) 1424 break; 1425 } 1426 if (lp != NULL) { 1427 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1428 if (lop->nfslo_first >= end) 1429 break; 1430 if (lop->nfslo_end <= off) 1431 continue; 1432 if (lop->nfslo_type == F_WRLCK) { 1433 nfscl_clrelease(clp); 1434 NFSUNLOCKCLSTATE(); 1435 return (1); 1436 } 1437 } 1438 } 1439 } 1440 } 1441 } 1442 nfscl_clrelease(clp); 1443 NFSUNLOCKCLSTATE(); 1444 return (0); 1445 } 1446 1447 /* 1448 * Release a byte range lock owner structure. 1449 */ 1450 void 1451 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1452 { 1453 struct nfsclclient *clp; 1454 1455 if (lp == NULL) 1456 return; 1457 NFSLOCKCLSTATE(); 1458 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1459 if (error != 0 && candelete && 1460 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1461 nfscl_freelockowner(lp, 0); 1462 else 1463 nfscl_lockunlock(&lp->nfsl_rwlock); 1464 nfscl_clrelease(clp); 1465 NFSUNLOCKCLSTATE(); 1466 } 1467 1468 /* 1469 * Free up an open structure and any associated byte range lock structures. 1470 */ 1471 void 1472 nfscl_freeopen(struct nfsclopen *op, int local) 1473 { 1474 1475 LIST_REMOVE(op, nfso_list); 1476 nfscl_freealllocks(&op->nfso_lock, local); 1477 free(op, M_NFSCLOPEN); 1478 if (local) 1479 nfsstatsv1.cllocalopens--; 1480 else 1481 nfsstatsv1.clopens--; 1482 } 1483 1484 /* 1485 * Free up all lock owners and associated locks. 1486 */ 1487 static void 1488 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1489 { 1490 struct nfscllockowner *lp, *nlp; 1491 1492 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1493 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1494 panic("nfscllckw"); 1495 nfscl_freelockowner(lp, local); 1496 } 1497 } 1498 1499 /* 1500 * Called for an Open when NFSERR_EXPIRED is received from the server. 1501 * If there are no byte range locks nor a Share Deny lost, try to do a 1502 * fresh Open. Otherwise, free the open. 1503 */ 1504 static int 1505 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1506 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1507 { 1508 struct nfscllockowner *lp; 1509 struct nfscldeleg *dp; 1510 int mustdelete = 0, error; 1511 1512 /* 1513 * Look for any byte range lock(s). 1514 */ 1515 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1516 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1517 mustdelete = 1; 1518 break; 1519 } 1520 } 1521 1522 /* 1523 * If no byte range lock(s) nor a Share deny, try to re-open. 1524 */ 1525 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1526 newnfs_copycred(&op->nfso_cred, cred); 1527 dp = NULL; 1528 error = nfsrpc_reopen(nmp, op->nfso_fh, 1529 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1530 if (error) { 1531 mustdelete = 1; 1532 if (dp != NULL) { 1533 free(dp, M_NFSCLDELEG); 1534 dp = NULL; 1535 } 1536 } 1537 if (dp != NULL) 1538 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1539 op->nfso_fhlen, cred, p, &dp); 1540 } 1541 1542 /* 1543 * If a byte range lock or Share deny or couldn't re-open, free it. 1544 */ 1545 if (mustdelete) 1546 nfscl_freeopen(op, 0); 1547 return (mustdelete); 1548 } 1549 1550 /* 1551 * Free up an open owner structure. 1552 */ 1553 static void 1554 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1555 { 1556 1557 LIST_REMOVE(owp, nfsow_list); 1558 free(owp, M_NFSCLOWNER); 1559 if (local) 1560 nfsstatsv1.cllocalopenowners--; 1561 else 1562 nfsstatsv1.clopenowners--; 1563 } 1564 1565 /* 1566 * Free up a byte range lock owner structure. 1567 */ 1568 void 1569 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1570 { 1571 struct nfscllock *lop, *nlop; 1572 1573 LIST_REMOVE(lp, nfsl_list); 1574 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1575 nfscl_freelock(lop, local); 1576 } 1577 free(lp, M_NFSCLLOCKOWNER); 1578 if (local) 1579 nfsstatsv1.cllocallockowners--; 1580 else 1581 nfsstatsv1.cllockowners--; 1582 } 1583 1584 /* 1585 * Free up a byte range lock structure. 1586 */ 1587 void 1588 nfscl_freelock(struct nfscllock *lop, int local) 1589 { 1590 1591 LIST_REMOVE(lop, nfslo_list); 1592 free(lop, M_NFSCLLOCK); 1593 if (local) 1594 nfsstatsv1.cllocallocks--; 1595 else 1596 nfsstatsv1.cllocks--; 1597 } 1598 1599 /* 1600 * Clean out the state related to a delegation. 1601 */ 1602 static void 1603 nfscl_cleandeleg(struct nfscldeleg *dp) 1604 { 1605 struct nfsclowner *owp, *nowp; 1606 struct nfsclopen *op; 1607 1608 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1609 op = LIST_FIRST(&owp->nfsow_open); 1610 if (op != NULL) { 1611 if (LIST_NEXT(op, nfso_list) != NULL) 1612 panic("nfscleandel"); 1613 nfscl_freeopen(op, 1); 1614 } 1615 nfscl_freeopenowner(owp, 1); 1616 } 1617 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1618 } 1619 1620 /* 1621 * Free a delegation. 1622 */ 1623 static void 1624 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) 1625 { 1626 1627 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1628 LIST_REMOVE(dp, nfsdl_hash); 1629 free(dp, M_NFSCLDELEG); 1630 nfsstatsv1.cldelegates--; 1631 nfscl_delegcnt--; 1632 } 1633 1634 /* 1635 * Free up all state related to this client structure. 1636 */ 1637 static void 1638 nfscl_cleanclient(struct nfsclclient *clp) 1639 { 1640 struct nfsclowner *owp, *nowp; 1641 struct nfsclopen *op, *nop; 1642 struct nfscllayout *lyp, *nlyp; 1643 struct nfscldevinfo *dip, *ndip; 1644 1645 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 1646 nfscl_freelayout(lyp); 1647 1648 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) 1649 nfscl_freedevinfo(dip); 1650 1651 /* Now, all the OpenOwners, etc. */ 1652 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1653 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1654 nfscl_freeopen(op, 0); 1655 } 1656 nfscl_freeopenowner(owp, 0); 1657 } 1658 } 1659 1660 /* 1661 * Called when an NFSERR_EXPIRED is received from the server. 1662 */ 1663 static void 1664 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1665 struct ucred *cred, NFSPROC_T *p) 1666 { 1667 struct nfsclowner *owp, *nowp, *towp; 1668 struct nfsclopen *op, *nop, *top; 1669 struct nfscldeleg *dp, *ndp; 1670 int ret, printed = 0; 1671 1672 /* 1673 * First, merge locally issued Opens into the list for the server. 1674 */ 1675 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1676 while (dp != NULL) { 1677 ndp = TAILQ_NEXT(dp, nfsdl_list); 1678 owp = LIST_FIRST(&dp->nfsdl_owner); 1679 while (owp != NULL) { 1680 nowp = LIST_NEXT(owp, nfsow_list); 1681 op = LIST_FIRST(&owp->nfsow_open); 1682 if (op != NULL) { 1683 if (LIST_NEXT(op, nfso_list) != NULL) 1684 panic("nfsclexp"); 1685 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1686 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1687 NFSV4CL_LOCKNAMELEN)) 1688 break; 1689 } 1690 if (towp != NULL) { 1691 /* Merge opens in */ 1692 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1693 if (top->nfso_fhlen == op->nfso_fhlen && 1694 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1695 op->nfso_fhlen)) { 1696 top->nfso_mode |= op->nfso_mode; 1697 top->nfso_opencnt += op->nfso_opencnt; 1698 break; 1699 } 1700 } 1701 if (top == NULL) { 1702 /* Just add the open to the owner list */ 1703 LIST_REMOVE(op, nfso_list); 1704 op->nfso_own = towp; 1705 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1706 nfsstatsv1.cllocalopens--; 1707 nfsstatsv1.clopens++; 1708 } 1709 } else { 1710 /* Just add the openowner to the client list */ 1711 LIST_REMOVE(owp, nfsow_list); 1712 owp->nfsow_clp = clp; 1713 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1714 nfsstatsv1.cllocalopenowners--; 1715 nfsstatsv1.clopenowners++; 1716 nfsstatsv1.cllocalopens--; 1717 nfsstatsv1.clopens++; 1718 } 1719 } 1720 owp = nowp; 1721 } 1722 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1723 printed = 1; 1724 printf("nfsv4 expired locks lost\n"); 1725 } 1726 nfscl_cleandeleg(dp); 1727 nfscl_freedeleg(&clp->nfsc_deleg, dp); 1728 dp = ndp; 1729 } 1730 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1731 panic("nfsclexp"); 1732 1733 /* 1734 * Now, try and reopen against the server. 1735 */ 1736 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1737 owp->nfsow_seqid = 0; 1738 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1739 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1740 if (ret && !printed) { 1741 printed = 1; 1742 printf("nfsv4 expired locks lost\n"); 1743 } 1744 } 1745 if (LIST_EMPTY(&owp->nfsow_open)) 1746 nfscl_freeopenowner(owp, 0); 1747 } 1748 } 1749 1750 /* 1751 * This function must be called after the process represented by "own" has 1752 * exited. Must be called with CLSTATE lock held. 1753 */ 1754 static void 1755 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1756 { 1757 struct nfsclowner *owp, *nowp; 1758 struct nfscllockowner *lp, *nlp; 1759 struct nfscldeleg *dp; 1760 1761 /* First, get rid of local locks on delegations. */ 1762 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1763 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1764 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1765 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1766 panic("nfscllckw"); 1767 nfscl_freelockowner(lp, 1); 1768 } 1769 } 1770 } 1771 owp = LIST_FIRST(&clp->nfsc_owner); 1772 while (owp != NULL) { 1773 nowp = LIST_NEXT(owp, nfsow_list); 1774 if (!NFSBCMP(owp->nfsow_owner, own, 1775 NFSV4CL_LOCKNAMELEN)) { 1776 /* 1777 * If there are children that haven't closed the 1778 * file descriptors yet, the opens will still be 1779 * here. For that case, let the renew thread clear 1780 * out the OpenOwner later. 1781 */ 1782 if (LIST_EMPTY(&owp->nfsow_open)) 1783 nfscl_freeopenowner(owp, 0); 1784 else 1785 owp->nfsow_defunct = 1; 1786 } 1787 owp = nowp; 1788 } 1789 } 1790 1791 /* 1792 * Find open/lock owners for processes that have exited. 1793 */ 1794 static void 1795 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) 1796 { 1797 struct nfsclowner *owp, *nowp; 1798 struct nfsclopen *op; 1799 struct nfscllockowner *lp, *nlp; 1800 struct nfscldeleg *dp; 1801 1802 /* 1803 * All the pidhash locks must be acquired, since they are sx locks 1804 * and must be acquired before the mutexes. The pid(s) that will 1805 * be used aren't known yet, so all the locks need to be acquired. 1806 * Fortunately, this function is only performed once/sec. 1807 */ 1808 pidhash_slockall(); 1809 NFSLOCKCLSTATE(); 1810 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1811 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1812 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { 1813 if (LIST_EMPTY(&lp->nfsl_lock)) 1814 nfscl_emptylockowner(lp, lhp); 1815 } 1816 } 1817 if (nfscl_procdoesntexist(owp->nfsow_owner)) 1818 nfscl_cleanup_common(clp, owp->nfsow_owner); 1819 } 1820 1821 /* 1822 * For the single open_owner case, these lock owners need to be 1823 * checked to see if they still exist separately. 1824 * This is because nfscl_procdoesntexist() never returns true for 1825 * the single open_owner so that the above doesn't ever call 1826 * nfscl_cleanup_common(). 1827 */ 1828 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1829 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1830 if (nfscl_procdoesntexist(lp->nfsl_owner)) 1831 nfscl_cleanup_common(clp, lp->nfsl_owner); 1832 } 1833 } 1834 NFSUNLOCKCLSTATE(); 1835 pidhash_sunlockall(); 1836 } 1837 1838 /* 1839 * Take the empty lock owner and move it to the local lhp list if the 1840 * associated process no longer exists. 1841 */ 1842 static void 1843 nfscl_emptylockowner(struct nfscllockowner *lp, 1844 struct nfscllockownerfhhead *lhp) 1845 { 1846 struct nfscllockownerfh *lfhp, *mylfhp; 1847 struct nfscllockowner *nlp; 1848 int fnd_it; 1849 1850 /* If not a Posix lock owner, just return. */ 1851 if ((lp->nfsl_lockflags & F_POSIX) == 0) 1852 return; 1853 1854 fnd_it = 0; 1855 mylfhp = NULL; 1856 /* 1857 * First, search to see if this lock owner is already in the list. 1858 * If it is, then the associated process no longer exists. 1859 */ 1860 SLIST_FOREACH(lfhp, lhp, nfslfh_list) { 1861 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && 1862 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, 1863 lfhp->nfslfh_len)) 1864 mylfhp = lfhp; 1865 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) 1866 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, 1867 NFSV4CL_LOCKNAMELEN)) 1868 fnd_it = 1; 1869 } 1870 /* If not found, check if process still exists. */ 1871 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) 1872 return; 1873 1874 /* Move the lock owner over to the local list. */ 1875 if (mylfhp == NULL) { 1876 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, 1877 M_NOWAIT); 1878 if (mylfhp == NULL) 1879 return; 1880 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; 1881 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, 1882 mylfhp->nfslfh_len); 1883 LIST_INIT(&mylfhp->nfslfh_lock); 1884 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); 1885 } 1886 LIST_REMOVE(lp, nfsl_list); 1887 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); 1888 } 1889 1890 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 1891 /* 1892 * Called from nfs umount to free up the clientid. 1893 */ 1894 void 1895 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) 1896 { 1897 struct nfsclclient *clp; 1898 struct ucred *cred; 1899 int igotlock; 1900 1901 /* 1902 * For the case that matters, this is the thread that set 1903 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 1904 * done to ensure that any thread executing nfscl_getcl() after 1905 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 1906 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 1907 * explanation, courtesy of Alan Cox. 1908 * What follows is a snippet from Alan Cox's email at: 1909 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 1910 * 1911 * 1. Set MNTK_UNMOUNTF 1912 * 2. Acquire a standard FreeBSD mutex "m". 1913 * 3. Update some data structures. 1914 * 4. Release mutex "m". 1915 * 1916 * Then, other threads that acquire "m" after step 4 has occurred will 1917 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 1918 * step 2 may or may not see MNTK_UNMOUNTF as set. 1919 */ 1920 NFSLOCKCLSTATE(); 1921 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 1922 fake_global++; 1923 NFSUNLOCKCLSTATE(); 1924 NFSLOCKCLSTATE(); 1925 } 1926 1927 clp = nmp->nm_clp; 1928 if (clp != NULL) { 1929 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 1930 panic("nfscl umount"); 1931 1932 /* 1933 * First, handshake with the nfscl renew thread, to terminate 1934 * it. 1935 */ 1936 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 1937 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 1938 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 1939 "nfsclumnt", hz); 1940 1941 /* 1942 * Now, get the exclusive lock on the client state, so 1943 * that no uses of the state are still in progress. 1944 */ 1945 do { 1946 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1947 NFSCLSTATEMUTEXPTR, NULL); 1948 } while (!igotlock); 1949 NFSUNLOCKCLSTATE(); 1950 1951 /* 1952 * Free up all the state. It will expire on the server, but 1953 * maybe we should do a SetClientId/SetClientIdConfirm so 1954 * the server throws it away? 1955 */ 1956 LIST_REMOVE(clp, nfsc_list); 1957 nfscl_delegreturnall(clp, p); 1958 cred = newnfs_getcred(); 1959 if (NFSHASNFSV4N(nmp)) { 1960 (void)nfsrpc_destroysession(nmp, clp, cred, p); 1961 (void)nfsrpc_destroyclient(nmp, clp, cred, p); 1962 } else 1963 (void)nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 1964 nfscl_cleanclient(clp); 1965 nmp->nm_clp = NULL; 1966 NFSFREECRED(cred); 1967 free(clp, M_NFSCLCLIENT); 1968 } else 1969 NFSUNLOCKCLSTATE(); 1970 } 1971 1972 /* 1973 * This function is called when a server replies with NFSERR_STALECLIENTID 1974 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, 1975 * doing Opens and Locks with reclaim. If these fail, it deletes the 1976 * corresponding state. 1977 */ 1978 static void 1979 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred, 1980 NFSPROC_T *p) 1981 { 1982 struct nfsclowner *owp, *nowp; 1983 struct nfsclopen *op, *nop; 1984 struct nfscllockowner *lp, *nlp; 1985 struct nfscllock *lop, *nlop; 1986 struct nfscldeleg *dp, *ndp, *tdp; 1987 struct nfsmount *nmp; 1988 struct ucred *tcred; 1989 struct nfsclopenhead extra_open; 1990 struct nfscldeleghead extra_deleg; 1991 struct nfsreq *rep; 1992 u_int64_t len; 1993 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 1994 int i, igotlock = 0, error, trycnt, firstlock; 1995 struct nfscllayout *lyp, *nlyp; 1996 1997 /* 1998 * First, lock the client structure, so everyone else will 1999 * block when trying to use state. 2000 */ 2001 NFSLOCKCLSTATE(); 2002 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2003 do { 2004 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2005 NFSCLSTATEMUTEXPTR, NULL); 2006 } while (!igotlock); 2007 NFSUNLOCKCLSTATE(); 2008 2009 nmp = clp->nfsc_nmp; 2010 if (nmp == NULL) 2011 panic("nfscl recover"); 2012 2013 /* 2014 * For now, just get rid of all layouts. There may be a need 2015 * to do LayoutCommit Ops with reclaim == true later. 2016 */ 2017 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 2018 nfscl_freelayout(lyp); 2019 TAILQ_INIT(&clp->nfsc_layout); 2020 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 2021 LIST_INIT(&clp->nfsc_layouthash[i]); 2022 2023 trycnt = 5; 2024 tcred = NULL; 2025 do { 2026 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p); 2027 } while ((error == NFSERR_STALECLIENTID || 2028 error == NFSERR_BADSESSION || 2029 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2030 if (error) { 2031 NFSLOCKCLSTATE(); 2032 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER | 2033 NFSCLFLAGS_RECVRINPROG); 2034 wakeup(&clp->nfsc_flags); 2035 nfsv4_unlock(&clp->nfsc_lock, 0); 2036 NFSUNLOCKCLSTATE(); 2037 return; 2038 } 2039 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2040 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2041 2042 /* 2043 * Mark requests already queued on the server, so that they don't 2044 * initiate another recovery cycle. Any requests already in the 2045 * queue that handle state information will have the old stale 2046 * clientid/stateid and will get a NFSERR_STALESTATEID, 2047 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. 2048 * This will be translated to NFSERR_STALEDONTRECOVER when 2049 * R_DONTRECOVER is set. 2050 */ 2051 NFSLOCKREQ(); 2052 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 2053 if (rep->r_nmp == nmp) 2054 rep->r_flags |= R_DONTRECOVER; 2055 } 2056 NFSUNLOCKREQ(); 2057 2058 /* 2059 * If nfsrpc_setclient() returns *retokp == true, 2060 * no more recovery is needed. 2061 */ 2062 if (*retokp) 2063 goto out; 2064 2065 /* 2066 * Now, mark all delegations "need reclaim". 2067 */ 2068 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 2069 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 2070 2071 TAILQ_INIT(&extra_deleg); 2072 LIST_INIT(&extra_open); 2073 /* 2074 * Now traverse the state lists, doing Open and Lock Reclaims. 2075 */ 2076 tcred = newnfs_getcred(); 2077 owp = LIST_FIRST(&clp->nfsc_owner); 2078 while (owp != NULL) { 2079 nowp = LIST_NEXT(owp, nfsow_list); 2080 owp->nfsow_seqid = 0; 2081 op = LIST_FIRST(&owp->nfsow_open); 2082 while (op != NULL) { 2083 nop = LIST_NEXT(op, nfso_list); 2084 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2085 /* Search for a delegation to reclaim with the open */ 2086 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2087 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2088 continue; 2089 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2090 mode = NFSV4OPEN_ACCESSWRITE; 2091 delegtype = NFSV4OPEN_DELEGATEWRITE; 2092 } else { 2093 mode = NFSV4OPEN_ACCESSREAD; 2094 delegtype = NFSV4OPEN_DELEGATEREAD; 2095 } 2096 if ((op->nfso_mode & mode) == mode && 2097 op->nfso_fhlen == dp->nfsdl_fhlen && 2098 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 2099 break; 2100 } 2101 ndp = dp; 2102 if (dp == NULL) 2103 delegtype = NFSV4OPEN_DELEGATENONE; 2104 newnfs_copycred(&op->nfso_cred, tcred); 2105 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 2106 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 2107 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 2108 tcred, p); 2109 if (!error) { 2110 /* Handle any replied delegation */ 2111 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 2112 || NFSMNT_RDONLY(nmp->nm_mountp))) { 2113 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 2114 mode = NFSV4OPEN_ACCESSWRITE; 2115 else 2116 mode = NFSV4OPEN_ACCESSREAD; 2117 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2118 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2119 continue; 2120 if ((op->nfso_mode & mode) == mode && 2121 op->nfso_fhlen == dp->nfsdl_fhlen && 2122 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 2123 op->nfso_fhlen)) { 2124 dp->nfsdl_stateid = ndp->nfsdl_stateid; 2125 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 2126 dp->nfsdl_ace = ndp->nfsdl_ace; 2127 dp->nfsdl_change = ndp->nfsdl_change; 2128 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2129 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 2130 dp->nfsdl_flags |= NFSCLDL_RECALL; 2131 free(ndp, M_NFSCLDELEG); 2132 ndp = NULL; 2133 break; 2134 } 2135 } 2136 } 2137 if (ndp != NULL) 2138 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 2139 2140 /* and reclaim all byte range locks */ 2141 lp = LIST_FIRST(&op->nfso_lock); 2142 while (lp != NULL) { 2143 nlp = LIST_NEXT(lp, nfsl_list); 2144 lp->nfsl_seqid = 0; 2145 firstlock = 1; 2146 lop = LIST_FIRST(&lp->nfsl_lock); 2147 while (lop != NULL) { 2148 nlop = LIST_NEXT(lop, nfslo_list); 2149 if (lop->nfslo_end == NFS64BITSSET) 2150 len = NFS64BITSSET; 2151 else 2152 len = lop->nfslo_end - lop->nfslo_first; 2153 error = nfscl_trylock(nmp, NULL, 2154 op->nfso_fh, op->nfso_fhlen, lp, 2155 firstlock, 1, lop->nfslo_first, len, 2156 lop->nfslo_type, tcred, p); 2157 if (error != 0) 2158 nfscl_freelock(lop, 0); 2159 else 2160 firstlock = 0; 2161 lop = nlop; 2162 } 2163 /* If no locks, but a lockowner, just delete it. */ 2164 if (LIST_EMPTY(&lp->nfsl_lock)) 2165 nfscl_freelockowner(lp, 0); 2166 lp = nlp; 2167 } 2168 } 2169 } 2170 if (error != 0 && error != NFSERR_BADSESSION) 2171 nfscl_freeopen(op, 0); 2172 op = nop; 2173 } 2174 owp = nowp; 2175 } 2176 2177 /* 2178 * Now, try and get any delegations not yet reclaimed by cobbling 2179 * to-gether an appropriate open. 2180 */ 2181 nowp = NULL; 2182 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2183 while (dp != NULL) { 2184 ndp = TAILQ_NEXT(dp, nfsdl_list); 2185 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2186 if (nowp == NULL) { 2187 nowp = malloc( 2188 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2189 /* 2190 * Name must be as long an largest possible 2191 * NFSV4CL_LOCKNAMELEN. 12 for now. 2192 */ 2193 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2194 NFSV4CL_LOCKNAMELEN); 2195 LIST_INIT(&nowp->nfsow_open); 2196 nowp->nfsow_clp = clp; 2197 nowp->nfsow_seqid = 0; 2198 nowp->nfsow_defunct = 0; 2199 nfscl_lockinit(&nowp->nfsow_rwlock); 2200 } 2201 nop = NULL; 2202 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2203 nop = malloc(sizeof (struct nfsclopen) + 2204 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2205 nop->nfso_own = nowp; 2206 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2207 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2208 delegtype = NFSV4OPEN_DELEGATEWRITE; 2209 } else { 2210 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2211 delegtype = NFSV4OPEN_DELEGATEREAD; 2212 } 2213 nop->nfso_opencnt = 0; 2214 nop->nfso_posixlock = 1; 2215 nop->nfso_fhlen = dp->nfsdl_fhlen; 2216 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2217 LIST_INIT(&nop->nfso_lock); 2218 nop->nfso_stateid.seqid = 0; 2219 nop->nfso_stateid.other[0] = 0; 2220 nop->nfso_stateid.other[1] = 0; 2221 nop->nfso_stateid.other[2] = 0; 2222 newnfs_copycred(&dp->nfsdl_cred, tcred); 2223 newnfs_copyincred(tcred, &nop->nfso_cred); 2224 tdp = NULL; 2225 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2226 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2227 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2228 delegtype, tcred, p); 2229 if (tdp != NULL) { 2230 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2231 mode = NFSV4OPEN_ACCESSWRITE; 2232 else 2233 mode = NFSV4OPEN_ACCESSREAD; 2234 if ((nop->nfso_mode & mode) == mode && 2235 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2236 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2237 nop->nfso_fhlen)) { 2238 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2239 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2240 dp->nfsdl_ace = tdp->nfsdl_ace; 2241 dp->nfsdl_change = tdp->nfsdl_change; 2242 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2243 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2244 dp->nfsdl_flags |= NFSCLDL_RECALL; 2245 free(tdp, M_NFSCLDELEG); 2246 } else { 2247 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2248 } 2249 } 2250 } 2251 if (error) { 2252 if (nop != NULL) 2253 free(nop, M_NFSCLOPEN); 2254 /* 2255 * Couldn't reclaim it, so throw the state 2256 * away. Ouch!! 2257 */ 2258 nfscl_cleandeleg(dp); 2259 nfscl_freedeleg(&clp->nfsc_deleg, dp); 2260 } else { 2261 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2262 } 2263 } 2264 dp = ndp; 2265 } 2266 2267 /* 2268 * Now, get rid of extra Opens and Delegations. 2269 */ 2270 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2271 do { 2272 newnfs_copycred(&op->nfso_cred, tcred); 2273 error = nfscl_tryclose(op, tcred, nmp, p); 2274 if (error == NFSERR_GRACE) 2275 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2276 } while (error == NFSERR_GRACE); 2277 LIST_REMOVE(op, nfso_list); 2278 free(op, M_NFSCLOPEN); 2279 } 2280 if (nowp != NULL) 2281 free(nowp, M_NFSCLOWNER); 2282 2283 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2284 do { 2285 newnfs_copycred(&dp->nfsdl_cred, tcred); 2286 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2287 if (error == NFSERR_GRACE) 2288 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2289 } while (error == NFSERR_GRACE); 2290 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2291 free(dp, M_NFSCLDELEG); 2292 } 2293 2294 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ 2295 if (NFSHASNFSV4N(nmp)) 2296 (void)nfsrpc_reclaimcomplete(nmp, cred, p); 2297 2298 out: 2299 NFSLOCKCLSTATE(); 2300 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2301 wakeup(&clp->nfsc_flags); 2302 nfsv4_unlock(&clp->nfsc_lock, 0); 2303 NFSUNLOCKCLSTATE(); 2304 if (tcred != NULL) 2305 NFSFREECRED(tcred); 2306 } 2307 2308 /* 2309 * This function is called when a server replies with NFSERR_EXPIRED. 2310 * It deletes all state for the client and does a fresh SetClientId/confirm. 2311 * XXX Someday it should post a signal to the process(es) that hold the 2312 * state, so they know that lock state has been lost. 2313 */ 2314 int 2315 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2316 { 2317 struct nfsmount *nmp; 2318 struct ucred *cred; 2319 int igotlock = 0, error, trycnt; 2320 2321 /* 2322 * If the clientid has gone away or a new SetClientid has already 2323 * been done, just return ok. 2324 */ 2325 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2326 return (0); 2327 2328 /* 2329 * First, lock the client structure, so everyone else will 2330 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2331 * that only one thread does the work. 2332 */ 2333 NFSLOCKCLSTATE(); 2334 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2335 do { 2336 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2337 NFSCLSTATEMUTEXPTR, NULL); 2338 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2339 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2340 if (igotlock) 2341 nfsv4_unlock(&clp->nfsc_lock, 0); 2342 NFSUNLOCKCLSTATE(); 2343 return (0); 2344 } 2345 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2346 NFSUNLOCKCLSTATE(); 2347 2348 nmp = clp->nfsc_nmp; 2349 if (nmp == NULL) 2350 panic("nfscl expired"); 2351 cred = newnfs_getcred(); 2352 trycnt = 5; 2353 do { 2354 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p); 2355 } while ((error == NFSERR_STALECLIENTID || 2356 error == NFSERR_BADSESSION || 2357 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2358 if (error) { 2359 NFSLOCKCLSTATE(); 2360 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2361 } else { 2362 /* 2363 * Expire the state for the client. 2364 */ 2365 nfscl_expireclient(clp, nmp, cred, p); 2366 NFSLOCKCLSTATE(); 2367 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2368 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2369 } 2370 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2371 wakeup(&clp->nfsc_flags); 2372 nfsv4_unlock(&clp->nfsc_lock, 0); 2373 NFSUNLOCKCLSTATE(); 2374 NFSFREECRED(cred); 2375 return (error); 2376 } 2377 2378 /* 2379 * This function inserts a lock in the list after insert_lop. 2380 */ 2381 static void 2382 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2383 struct nfscllock *insert_lop, int local) 2384 { 2385 2386 if ((struct nfscllockowner *)insert_lop == lp) 2387 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2388 else 2389 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2390 if (local) 2391 nfsstatsv1.cllocallocks++; 2392 else 2393 nfsstatsv1.cllocks++; 2394 } 2395 2396 /* 2397 * This function updates the locking for a lock owner and given file. It 2398 * maintains a list of lock ranges ordered on increasing file offset that 2399 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2400 * It always adds new_lop to the list and sometimes uses the one pointed 2401 * at by other_lopp. 2402 * Returns 1 if the locks were modified, 0 otherwise. 2403 */ 2404 static int 2405 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2406 struct nfscllock **other_lopp, int local) 2407 { 2408 struct nfscllock *new_lop = *new_lopp; 2409 struct nfscllock *lop, *tlop, *ilop; 2410 struct nfscllock *other_lop; 2411 int unlock = 0, modified = 0; 2412 u_int64_t tmp; 2413 2414 /* 2415 * Work down the list until the lock is merged. 2416 */ 2417 if (new_lop->nfslo_type == F_UNLCK) 2418 unlock = 1; 2419 ilop = (struct nfscllock *)lp; 2420 lop = LIST_FIRST(&lp->nfsl_lock); 2421 while (lop != NULL) { 2422 /* 2423 * Only check locks for this file that aren't before the start of 2424 * new lock's range. 2425 */ 2426 if (lop->nfslo_end >= new_lop->nfslo_first) { 2427 if (new_lop->nfslo_end < lop->nfslo_first) { 2428 /* 2429 * If the new lock ends before the start of the 2430 * current lock's range, no merge, just insert 2431 * the new lock. 2432 */ 2433 break; 2434 } 2435 if (new_lop->nfslo_type == lop->nfslo_type || 2436 (new_lop->nfslo_first <= lop->nfslo_first && 2437 new_lop->nfslo_end >= lop->nfslo_end)) { 2438 /* 2439 * This lock can be absorbed by the new lock/unlock. 2440 * This happens when it covers the entire range 2441 * of the old lock or is contiguous 2442 * with the old lock and is of the same type or an 2443 * unlock. 2444 */ 2445 if (new_lop->nfslo_type != lop->nfslo_type || 2446 new_lop->nfslo_first != lop->nfslo_first || 2447 new_lop->nfslo_end != lop->nfslo_end) 2448 modified = 1; 2449 if (lop->nfslo_first < new_lop->nfslo_first) 2450 new_lop->nfslo_first = lop->nfslo_first; 2451 if (lop->nfslo_end > new_lop->nfslo_end) 2452 new_lop->nfslo_end = lop->nfslo_end; 2453 tlop = lop; 2454 lop = LIST_NEXT(lop, nfslo_list); 2455 nfscl_freelock(tlop, local); 2456 continue; 2457 } 2458 2459 /* 2460 * All these cases are for contiguous locks that are not the 2461 * same type, so they can't be merged. 2462 */ 2463 if (new_lop->nfslo_first <= lop->nfslo_first) { 2464 /* 2465 * This case is where the new lock overlaps with the 2466 * first part of the old lock. Move the start of the 2467 * old lock to just past the end of the new lock. The 2468 * new lock will be inserted in front of the old, since 2469 * ilop hasn't been updated. (We are done now.) 2470 */ 2471 if (lop->nfslo_first != new_lop->nfslo_end) { 2472 lop->nfslo_first = new_lop->nfslo_end; 2473 modified = 1; 2474 } 2475 break; 2476 } 2477 if (new_lop->nfslo_end >= lop->nfslo_end) { 2478 /* 2479 * This case is where the new lock overlaps with the 2480 * end of the old lock's range. Move the old lock's 2481 * end to just before the new lock's first and insert 2482 * the new lock after the old lock. 2483 * Might not be done yet, since the new lock could 2484 * overlap further locks with higher ranges. 2485 */ 2486 if (lop->nfslo_end != new_lop->nfslo_first) { 2487 lop->nfslo_end = new_lop->nfslo_first; 2488 modified = 1; 2489 } 2490 ilop = lop; 2491 lop = LIST_NEXT(lop, nfslo_list); 2492 continue; 2493 } 2494 /* 2495 * The final case is where the new lock's range is in the 2496 * middle of the current lock's and splits the current lock 2497 * up. Use *other_lopp to handle the second part of the 2498 * split old lock range. (We are done now.) 2499 * For unlock, we use new_lop as other_lop and tmp, since 2500 * other_lop and new_lop are the same for this case. 2501 * We noted the unlock case above, so we don't need 2502 * new_lop->nfslo_type any longer. 2503 */ 2504 tmp = new_lop->nfslo_first; 2505 if (unlock) { 2506 other_lop = new_lop; 2507 *new_lopp = NULL; 2508 } else { 2509 other_lop = *other_lopp; 2510 *other_lopp = NULL; 2511 } 2512 other_lop->nfslo_first = new_lop->nfslo_end; 2513 other_lop->nfslo_end = lop->nfslo_end; 2514 other_lop->nfslo_type = lop->nfslo_type; 2515 lop->nfslo_end = tmp; 2516 nfscl_insertlock(lp, other_lop, lop, local); 2517 ilop = lop; 2518 modified = 1; 2519 break; 2520 } 2521 ilop = lop; 2522 lop = LIST_NEXT(lop, nfslo_list); 2523 if (lop == NULL) 2524 break; 2525 } 2526 2527 /* 2528 * Insert the new lock in the list at the appropriate place. 2529 */ 2530 if (!unlock) { 2531 nfscl_insertlock(lp, new_lop, ilop, local); 2532 *new_lopp = NULL; 2533 modified = 1; 2534 } 2535 return (modified); 2536 } 2537 2538 /* 2539 * This function must be run as a kernel thread. 2540 * It does Renew Ops and recovery, when required. 2541 */ 2542 void 2543 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2544 { 2545 struct nfsclowner *owp, *nowp; 2546 struct nfsclopen *op; 2547 struct nfscllockowner *lp, *nlp; 2548 struct nfscldeleghead dh; 2549 struct nfscldeleg *dp, *ndp; 2550 struct ucred *cred; 2551 u_int32_t clidrev; 2552 int error, cbpathdown, islept, igotlock, ret, clearok; 2553 uint32_t recover_done_time = 0; 2554 time_t mytime; 2555 static time_t prevsec = 0; 2556 struct nfscllockownerfh *lfhp, *nlfhp; 2557 struct nfscllockownerfhhead lfh; 2558 struct nfscllayout *lyp, *nlyp; 2559 struct nfscldevinfo *dip, *ndip; 2560 struct nfscllayouthead rlh; 2561 struct nfsclrecalllayout *recallp; 2562 struct nfsclds *dsp; 2563 bool retok; 2564 struct mount *mp; 2565 2566 cred = newnfs_getcred(); 2567 NFSLOCKCLSTATE(); 2568 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2569 mp = clp->nfsc_nmp->nm_mountp; 2570 NFSUNLOCKCLSTATE(); 2571 for(;;) { 2572 newnfs_setroot(cred); 2573 cbpathdown = 0; 2574 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2575 /* 2576 * Only allow one full recover within 1/2 of the lease 2577 * duration (nfsc_renew). 2578 * retok is value/result. If passed in set to true, 2579 * it indicates only a CreateSession operation should 2580 * be attempted. 2581 * If it is returned true, it indicates that the 2582 * recovery only required a CreateSession. 2583 */ 2584 retok = true; 2585 if (recover_done_time < NFSD_MONOSEC) { 2586 recover_done_time = NFSD_MONOSEC + 2587 clp->nfsc_renew; 2588 retok = false; 2589 } 2590 NFSCL_DEBUG(1, "Doing recovery, only " 2591 "createsession=%d\n", retok); 2592 nfscl_recover(clp, &retok, cred, p); 2593 } 2594 if (clp->nfsc_expire <= NFSD_MONOSEC && 2595 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2596 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2597 clidrev = clp->nfsc_clientidrev; 2598 error = nfsrpc_renew(clp, NULL, cred, p); 2599 if (error == NFSERR_CBPATHDOWN) 2600 cbpathdown = 1; 2601 else if (error == NFSERR_STALECLIENTID || 2602 error == NFSERR_BADSESSION) { 2603 NFSLOCKCLSTATE(); 2604 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2605 NFSUNLOCKCLSTATE(); 2606 } else if (error == NFSERR_EXPIRED) 2607 (void) nfscl_hasexpired(clp, clidrev, p); 2608 } 2609 2610 checkdsrenew: 2611 if (NFSHASNFSV4N(clp->nfsc_nmp)) { 2612 /* Do renews for any DS sessions. */ 2613 NFSLOCKMNT(clp->nfsc_nmp); 2614 /* Skip first entry, since the MDS is handled above. */ 2615 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); 2616 if (dsp != NULL) 2617 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2618 while (dsp != NULL) { 2619 if (dsp->nfsclds_expire <= NFSD_MONOSEC && 2620 dsp->nfsclds_sess.nfsess_defunct == 0) { 2621 dsp->nfsclds_expire = NFSD_MONOSEC + 2622 clp->nfsc_renew; 2623 NFSUNLOCKMNT(clp->nfsc_nmp); 2624 (void)nfsrpc_renew(clp, dsp, cred, p); 2625 goto checkdsrenew; 2626 } 2627 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2628 } 2629 NFSUNLOCKMNT(clp->nfsc_nmp); 2630 } 2631 2632 TAILQ_INIT(&dh); 2633 NFSLOCKCLSTATE(); 2634 if (cbpathdown) 2635 /* It's a Total Recall! */ 2636 nfscl_totalrecall(clp); 2637 2638 /* 2639 * Now, handle defunct owners. 2640 */ 2641 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 2642 if (LIST_EMPTY(&owp->nfsow_open)) { 2643 if (owp->nfsow_defunct != 0) 2644 nfscl_freeopenowner(owp, 0); 2645 } 2646 } 2647 2648 /* 2649 * Do the recall on any delegations. To avoid trouble, always 2650 * come back up here after having slept. 2651 */ 2652 igotlock = 0; 2653 tryagain: 2654 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2655 while (dp != NULL) { 2656 ndp = TAILQ_NEXT(dp, nfsdl_list); 2657 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2658 /* 2659 * Wait for outstanding I/O ops to be done. 2660 */ 2661 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2662 if (igotlock) { 2663 nfsv4_unlock(&clp->nfsc_lock, 0); 2664 igotlock = 0; 2665 } 2666 dp->nfsdl_rwlock.nfslock_lock |= 2667 NFSV4LOCK_WANTED; 2668 msleep(&dp->nfsdl_rwlock, 2669 NFSCLSTATEMUTEXPTR, PVFS, "nfscld", 2670 5 * hz); 2671 if (NFSCL_FORCEDISM(mp)) 2672 goto terminate; 2673 goto tryagain; 2674 } 2675 while (!igotlock) { 2676 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2677 &islept, NFSCLSTATEMUTEXPTR, mp); 2678 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) 2679 goto terminate; 2680 if (islept) 2681 goto tryagain; 2682 } 2683 NFSUNLOCKCLSTATE(); 2684 newnfs_copycred(&dp->nfsdl_cred, cred); 2685 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2686 NULL, cred, p, 1); 2687 if (!ret) { 2688 nfscl_cleandeleg(dp); 2689 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2690 nfsdl_list); 2691 LIST_REMOVE(dp, nfsdl_hash); 2692 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2693 nfscl_delegcnt--; 2694 nfsstatsv1.cldelegates--; 2695 } 2696 NFSLOCKCLSTATE(); 2697 } 2698 dp = ndp; 2699 } 2700 2701 /* 2702 * Clear out old delegations, if we are above the high water 2703 * mark. Only clear out ones with no state related to them. 2704 * The tailq list is in LRU order. 2705 */ 2706 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2707 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2708 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2709 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2710 dp->nfsdl_rwlock.nfslock_lock == 0 && 2711 dp->nfsdl_timestamp < NFSD_MONOSEC && 2712 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2713 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2714 clearok = 1; 2715 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2716 op = LIST_FIRST(&owp->nfsow_open); 2717 if (op != NULL) { 2718 clearok = 0; 2719 break; 2720 } 2721 } 2722 if (clearok) { 2723 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2724 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2725 clearok = 0; 2726 break; 2727 } 2728 } 2729 } 2730 if (clearok) { 2731 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2732 LIST_REMOVE(dp, nfsdl_hash); 2733 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2734 nfscl_delegcnt--; 2735 nfsstatsv1.cldelegates--; 2736 } 2737 } 2738 dp = ndp; 2739 } 2740 if (igotlock) 2741 nfsv4_unlock(&clp->nfsc_lock, 0); 2742 2743 /* 2744 * Do the recall on any layouts. To avoid trouble, always 2745 * come back up here after having slept. 2746 */ 2747 TAILQ_INIT(&rlh); 2748 tryagain2: 2749 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { 2750 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { 2751 /* 2752 * Wait for outstanding I/O ops to be done. 2753 */ 2754 if (lyp->nfsly_lock.nfslock_usecnt > 0 || 2755 (lyp->nfsly_lock.nfslock_lock & 2756 NFSV4LOCK_LOCK) != 0) { 2757 lyp->nfsly_lock.nfslock_lock |= 2758 NFSV4LOCK_WANTED; 2759 msleep(&lyp->nfsly_lock.nfslock_lock, 2760 NFSCLSTATEMUTEXPTR, PVFS, "nfslyp", 2761 5 * hz); 2762 if (NFSCL_FORCEDISM(mp)) 2763 goto terminate; 2764 goto tryagain2; 2765 } 2766 /* Move the layout to the recall list. */ 2767 TAILQ_REMOVE(&clp->nfsc_layout, lyp, 2768 nfsly_list); 2769 LIST_REMOVE(lyp, nfsly_hash); 2770 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); 2771 2772 /* Handle any layout commits. */ 2773 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && 2774 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 2775 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 2776 NFSUNLOCKCLSTATE(); 2777 NFSCL_DEBUG(3, "do layoutcommit\n"); 2778 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, 2779 cred, p); 2780 NFSLOCKCLSTATE(); 2781 goto tryagain2; 2782 } 2783 } 2784 } 2785 2786 /* Now, look for stale layouts. */ 2787 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); 2788 while (lyp != NULL) { 2789 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); 2790 if (lyp->nfsly_timestamp < NFSD_MONOSEC && 2791 (lyp->nfsly_flags & NFSLY_RECALL) == 0 && 2792 lyp->nfsly_lock.nfslock_usecnt == 0 && 2793 lyp->nfsly_lock.nfslock_lock == 0) { 2794 NFSCL_DEBUG(4, "ret stale lay=%d\n", 2795 nfscl_layoutcnt); 2796 recallp = malloc(sizeof(*recallp), 2797 M_NFSLAYRECALL, M_NOWAIT); 2798 if (recallp == NULL) 2799 break; 2800 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, 2801 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, 2802 lyp->nfsly_stateid.seqid, 0, 0, NULL, 2803 recallp); 2804 } 2805 lyp = nlyp; 2806 } 2807 2808 /* 2809 * Free up any unreferenced device info structures. 2810 */ 2811 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { 2812 if (dip->nfsdi_layoutrefs == 0 && 2813 dip->nfsdi_refcnt == 0) { 2814 NFSCL_DEBUG(4, "freeing devinfo\n"); 2815 LIST_REMOVE(dip, nfsdi_list); 2816 nfscl_freedevinfo(dip); 2817 } 2818 } 2819 NFSUNLOCKCLSTATE(); 2820 2821 /* Do layout return(s), as required. */ 2822 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { 2823 TAILQ_REMOVE(&rlh, lyp, nfsly_list); 2824 NFSCL_DEBUG(4, "ret layout\n"); 2825 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); 2826 nfscl_freelayout(lyp); 2827 } 2828 2829 /* 2830 * Delegreturn any delegations cleaned out or recalled. 2831 */ 2832 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 2833 newnfs_copycred(&dp->nfsdl_cred, cred); 2834 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 2835 TAILQ_REMOVE(&dh, dp, nfsdl_list); 2836 free(dp, M_NFSCLDELEG); 2837 } 2838 2839 SLIST_INIT(&lfh); 2840 /* 2841 * Call nfscl_cleanupkext() once per second to check for 2842 * open/lock owners where the process has exited. 2843 */ 2844 mytime = NFSD_MONOSEC; 2845 if (prevsec != mytime) { 2846 prevsec = mytime; 2847 nfscl_cleanupkext(clp, &lfh); 2848 } 2849 2850 /* 2851 * Do a ReleaseLockOwner for all lock owners where the 2852 * associated process no longer exists, as found by 2853 * nfscl_cleanupkext(). 2854 */ 2855 newnfs_setroot(cred); 2856 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { 2857 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, 2858 nlp) { 2859 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, 2860 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, 2861 p); 2862 nfscl_freelockowner(lp, 0); 2863 } 2864 free(lfhp, M_TEMP); 2865 } 2866 SLIST_INIT(&lfh); 2867 2868 NFSLOCKCLSTATE(); 2869 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 2870 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 2871 hz); 2872 terminate: 2873 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 2874 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 2875 NFSUNLOCKCLSTATE(); 2876 NFSFREECRED(cred); 2877 wakeup((caddr_t)clp); 2878 return; 2879 } 2880 NFSUNLOCKCLSTATE(); 2881 } 2882 } 2883 2884 /* 2885 * Initiate state recovery. Called when NFSERR_STALECLIENTID, 2886 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. 2887 */ 2888 void 2889 nfscl_initiate_recovery(struct nfsclclient *clp) 2890 { 2891 2892 if (clp == NULL) 2893 return; 2894 NFSLOCKCLSTATE(); 2895 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2896 NFSUNLOCKCLSTATE(); 2897 wakeup((caddr_t)clp); 2898 } 2899 2900 /* 2901 * Dump out the state stuff for debugging. 2902 */ 2903 void 2904 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 2905 int lockowner, int locks) 2906 { 2907 struct nfsclclient *clp; 2908 struct nfsclowner *owp; 2909 struct nfsclopen *op; 2910 struct nfscllockowner *lp; 2911 struct nfscllock *lop; 2912 struct nfscldeleg *dp; 2913 2914 clp = nmp->nm_clp; 2915 if (clp == NULL) { 2916 printf("nfscl dumpstate NULL clp\n"); 2917 return; 2918 } 2919 NFSLOCKCLSTATE(); 2920 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2921 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2922 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2923 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2924 owp->nfsow_owner[0], owp->nfsow_owner[1], 2925 owp->nfsow_owner[2], owp->nfsow_owner[3], 2926 owp->nfsow_seqid); 2927 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2928 if (opens) 2929 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2930 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2931 op->nfso_stateid.other[2], op->nfso_opencnt, 2932 op->nfso_fh[12]); 2933 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2934 if (lockowner) 2935 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2936 lp->nfsl_owner[0], lp->nfsl_owner[1], 2937 lp->nfsl_owner[2], lp->nfsl_owner[3], 2938 lp->nfsl_seqid, 2939 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2940 lp->nfsl_stateid.other[2]); 2941 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2942 if (locks) 2943 #ifdef __FreeBSD__ 2944 printf("lck typ=%d fst=%ju end=%ju\n", 2945 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2946 (intmax_t)lop->nfslo_end); 2947 #else 2948 printf("lck typ=%d fst=%qd end=%qd\n", 2949 lop->nfslo_type, lop->nfslo_first, 2950 lop->nfslo_end); 2951 #endif 2952 } 2953 } 2954 } 2955 } 2956 } 2957 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2958 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2959 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2960 owp->nfsow_owner[0], owp->nfsow_owner[1], 2961 owp->nfsow_owner[2], owp->nfsow_owner[3], 2962 owp->nfsow_seqid); 2963 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2964 if (opens) 2965 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2966 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2967 op->nfso_stateid.other[2], op->nfso_opencnt, 2968 op->nfso_fh[12]); 2969 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2970 if (lockowner) 2971 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2972 lp->nfsl_owner[0], lp->nfsl_owner[1], 2973 lp->nfsl_owner[2], lp->nfsl_owner[3], 2974 lp->nfsl_seqid, 2975 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2976 lp->nfsl_stateid.other[2]); 2977 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2978 if (locks) 2979 #ifdef __FreeBSD__ 2980 printf("lck typ=%d fst=%ju end=%ju\n", 2981 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2982 (intmax_t)lop->nfslo_end); 2983 #else 2984 printf("lck typ=%d fst=%qd end=%qd\n", 2985 lop->nfslo_type, lop->nfslo_first, 2986 lop->nfslo_end); 2987 #endif 2988 } 2989 } 2990 } 2991 } 2992 NFSUNLOCKCLSTATE(); 2993 } 2994 2995 /* 2996 * Check for duplicate open owners and opens. 2997 * (Only used as a diagnostic aid.) 2998 */ 2999 void 3000 nfscl_dupopen(vnode_t vp, int dupopens) 3001 { 3002 struct nfsclclient *clp; 3003 struct nfsclowner *owp, *owp2; 3004 struct nfsclopen *op, *op2; 3005 struct nfsfh *nfhp; 3006 3007 clp = VFSTONFS(vp->v_mount)->nm_clp; 3008 if (clp == NULL) { 3009 printf("nfscl dupopen NULL clp\n"); 3010 return; 3011 } 3012 nfhp = VTONFS(vp)->n_fhp; 3013 NFSLOCKCLSTATE(); 3014 3015 /* 3016 * First, search for duplicate owners. 3017 * These should never happen! 3018 */ 3019 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3020 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3021 if (owp != owp2 && 3022 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 3023 NFSV4CL_LOCKNAMELEN)) { 3024 NFSUNLOCKCLSTATE(); 3025 printf("DUP OWNER\n"); 3026 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0); 3027 return; 3028 } 3029 } 3030 } 3031 3032 /* 3033 * Now, search for duplicate stateids. 3034 * These shouldn't happen, either. 3035 */ 3036 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3037 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3038 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3039 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3040 if (op != op2 && 3041 (op->nfso_stateid.other[0] != 0 || 3042 op->nfso_stateid.other[1] != 0 || 3043 op->nfso_stateid.other[2] != 0) && 3044 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 3045 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 3046 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 3047 NFSUNLOCKCLSTATE(); 3048 printf("DUP STATEID\n"); 3049 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0); 3050 return; 3051 } 3052 } 3053 } 3054 } 3055 } 3056 3057 /* 3058 * Now search for duplicate opens. 3059 * Duplicate opens for the same owner 3060 * should never occur. Other duplicates are 3061 * possible and are checked for if "dupopens" 3062 * is true. 3063 */ 3064 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3065 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3066 if (nfhp->nfh_len == op2->nfso_fhlen && 3067 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 3068 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3069 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3070 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 3071 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 3072 (!NFSBCMP(op->nfso_own->nfsow_owner, 3073 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 3074 dupopens)) { 3075 if (!NFSBCMP(op->nfso_own->nfsow_owner, 3076 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3077 NFSUNLOCKCLSTATE(); 3078 printf("BADDUP OPEN\n"); 3079 } else { 3080 NFSUNLOCKCLSTATE(); 3081 printf("DUP OPEN\n"); 3082 } 3083 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 3084 0); 3085 return; 3086 } 3087 } 3088 } 3089 } 3090 } 3091 } 3092 NFSUNLOCKCLSTATE(); 3093 } 3094 3095 /* 3096 * During close, find an open that needs to be dereferenced and 3097 * dereference it. If there are no more opens for this file, 3098 * log a message to that effect. 3099 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 3100 * on the file's vnode. 3101 * This is the safe way, since it is difficult to identify 3102 * which open the close is for and I/O can be performed after the 3103 * close(2) system call when a file is mmap'd. 3104 * If it returns 0 for success, there will be a referenced 3105 * clp returned via clpp. 3106 */ 3107 int 3108 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 3109 { 3110 struct nfsclclient *clp; 3111 struct nfsclowner *owp; 3112 struct nfsclopen *op; 3113 struct nfscldeleg *dp; 3114 struct nfsfh *nfhp; 3115 int error, notdecr; 3116 3117 error = nfscl_getcl(vp->v_mount, NULL, NULL, 1, &clp); 3118 if (error) 3119 return (error); 3120 *clpp = clp; 3121 3122 nfhp = VTONFS(vp)->n_fhp; 3123 notdecr = 1; 3124 NFSLOCKCLSTATE(); 3125 /* 3126 * First, look for one under a delegation that was locally issued 3127 * and just decrement the opencnt for it. Since all my Opens against 3128 * the server are DENY_NONE, I don't see a problem with hanging 3129 * onto them. (It is much easier to use one of the extant Opens 3130 * that I already have on the server when a Delegation is recalled 3131 * than to do fresh Opens.) Someday, I might need to rethink this, but. 3132 */ 3133 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3134 if (dp != NULL) { 3135 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3136 op = LIST_FIRST(&owp->nfsow_open); 3137 if (op != NULL) { 3138 /* 3139 * Since a delegation is for a file, there 3140 * should never be more than one open for 3141 * each openowner. 3142 */ 3143 if (LIST_NEXT(op, nfso_list) != NULL) 3144 panic("nfscdeleg opens"); 3145 if (notdecr && op->nfso_opencnt > 0) { 3146 notdecr = 0; 3147 op->nfso_opencnt--; 3148 break; 3149 } 3150 } 3151 } 3152 } 3153 3154 /* Now process the opens against the server. */ 3155 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3156 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3157 if (op->nfso_fhlen == nfhp->nfh_len && 3158 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3159 nfhp->nfh_len)) { 3160 /* Found an open, decrement cnt if possible */ 3161 if (notdecr && op->nfso_opencnt > 0) { 3162 notdecr = 0; 3163 op->nfso_opencnt--; 3164 } 3165 /* 3166 * There are more opens, so just return. 3167 */ 3168 if (op->nfso_opencnt > 0) { 3169 NFSUNLOCKCLSTATE(); 3170 return (0); 3171 } 3172 } 3173 } 3174 } 3175 NFSUNLOCKCLSTATE(); 3176 if (notdecr) 3177 printf("nfscl: never fnd open\n"); 3178 return (0); 3179 } 3180 3181 int 3182 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 3183 { 3184 struct nfsclclient *clp; 3185 struct nfsclowner *owp, *nowp; 3186 struct nfsclopen *op; 3187 struct nfscldeleg *dp; 3188 struct nfsfh *nfhp; 3189 struct nfsclrecalllayout *recallp; 3190 int error; 3191 3192 error = nfscl_getcl(vp->v_mount, NULL, NULL, 1, &clp); 3193 if (error) 3194 return (error); 3195 *clpp = clp; 3196 3197 nfhp = VTONFS(vp)->n_fhp; 3198 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 3199 NFSLOCKCLSTATE(); 3200 /* 3201 * First get rid of the local Open structures, which should be no 3202 * longer in use. 3203 */ 3204 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3205 if (dp != NULL) { 3206 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 3207 op = LIST_FIRST(&owp->nfsow_open); 3208 if (op != NULL) { 3209 KASSERT((op->nfso_opencnt == 0), 3210 ("nfscl: bad open cnt on deleg")); 3211 nfscl_freeopen(op, 1); 3212 } 3213 nfscl_freeopenowner(owp, 1); 3214 } 3215 } 3216 3217 /* Return any layouts marked return on close. */ 3218 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp); 3219 3220 /* Now process the opens against the server. */ 3221 lookformore: 3222 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3223 op = LIST_FIRST(&owp->nfsow_open); 3224 while (op != NULL) { 3225 if (op->nfso_fhlen == nfhp->nfh_len && 3226 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3227 nfhp->nfh_len)) { 3228 /* Found an open, close it. */ 3229 #ifdef DIAGNOSTIC 3230 KASSERT((op->nfso_opencnt == 0), 3231 ("nfscl: bad open cnt on server (%d)", 3232 op->nfso_opencnt)); 3233 #endif 3234 NFSUNLOCKCLSTATE(); 3235 nfsrpc_doclose(VFSTONFS(vp->v_mount), op, p); 3236 NFSLOCKCLSTATE(); 3237 goto lookformore; 3238 } 3239 op = LIST_NEXT(op, nfso_list); 3240 } 3241 } 3242 NFSUNLOCKCLSTATE(); 3243 /* 3244 * recallp has been set NULL by nfscl_retoncloselayout() if it was 3245 * used by the function, but calling free() with a NULL pointer is ok. 3246 */ 3247 free(recallp, M_NFSLAYRECALL); 3248 return (0); 3249 } 3250 3251 /* 3252 * Return all delegations on this client. 3253 * (Must be called with client sleep lock.) 3254 */ 3255 static void 3256 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) 3257 { 3258 struct nfscldeleg *dp, *ndp; 3259 struct ucred *cred; 3260 3261 cred = newnfs_getcred(); 3262 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3263 nfscl_cleandeleg(dp); 3264 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3265 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3266 } 3267 NFSFREECRED(cred); 3268 } 3269 3270 /* 3271 * Do a callback RPC. 3272 */ 3273 void 3274 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3275 { 3276 int clist, gotseq_ok, i, j, k, op, rcalls; 3277 u_int32_t *tl; 3278 struct nfsclclient *clp; 3279 struct nfscldeleg *dp = NULL; 3280 int numops, taglen = -1, error = 0, trunc __unused; 3281 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident; 3282 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3283 vnode_t vp = NULL; 3284 struct nfsnode *np; 3285 struct vattr va; 3286 struct nfsfh *nfhp; 3287 mount_t mp; 3288 nfsattrbit_t attrbits, rattrbits; 3289 nfsv4stateid_t stateid; 3290 uint32_t seqid, slotid = 0, highslot, cachethis __unused; 3291 uint8_t sessionid[NFSX_V4SESSIONID]; 3292 struct mbuf *rep; 3293 struct nfscllayout *lyp; 3294 uint64_t filesid[2], len, off; 3295 int changed, gotone, laytype, recalltype; 3296 uint32_t iomode; 3297 struct nfsclrecalllayout *recallp = NULL; 3298 struct nfsclsession *tsep; 3299 3300 gotseq_ok = 0; 3301 nfsrvd_rephead(nd); 3302 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3303 taglen = fxdr_unsigned(int, *tl); 3304 if (taglen < 0) { 3305 error = EBADRPC; 3306 goto nfsmout; 3307 } 3308 if (taglen <= NFSV4_SMALLSTR) 3309 tagstr = tag; 3310 else 3311 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3312 error = nfsrv_mtostr(nd, tagstr, taglen); 3313 if (error) { 3314 if (taglen > NFSV4_SMALLSTR) 3315 free(tagstr, M_TEMP); 3316 taglen = -1; 3317 goto nfsmout; 3318 } 3319 (void) nfsm_strtom(nd, tag, taglen); 3320 if (taglen > NFSV4_SMALLSTR) { 3321 free(tagstr, M_TEMP); 3322 } 3323 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3324 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3325 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3326 if (minorvers != NFSV4_MINORVERSION && 3327 minorvers != NFSV41_MINORVERSION && 3328 minorvers != NFSV42_MINORVERSION) 3329 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3330 cbident = fxdr_unsigned(u_int32_t, *tl++); 3331 if (nd->nd_repstat) 3332 numops = 0; 3333 else 3334 numops = fxdr_unsigned(int, *tl); 3335 /* 3336 * Loop around doing the sub ops. 3337 */ 3338 for (i = 0; i < numops; i++) { 3339 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3340 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3341 *repp++ = *tl; 3342 op = fxdr_unsigned(int, *tl); 3343 if (op < NFSV4OP_CBGETATTR || 3344 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || 3345 (op > NFSV4OP_CBNOTIFYDEVID && 3346 minorvers == NFSV41_MINORVERSION) || 3347 (op > NFSV4OP_CBOFFLOAD && 3348 minorvers == NFSV42_MINORVERSION)) { 3349 nd->nd_repstat = NFSERR_OPILLEGAL; 3350 *repp = nfscl_errmap(nd, minorvers); 3351 retops++; 3352 break; 3353 } 3354 nd->nd_procnum = op; 3355 if (op < NFSV42_CBNOPS) 3356 nfsstatsv1.cbrpccnt[nd->nd_procnum]++; 3357 switch (op) { 3358 case NFSV4OP_CBGETATTR: 3359 NFSCL_DEBUG(4, "cbgetattr\n"); 3360 mp = NULL; 3361 vp = NULL; 3362 error = nfsm_getfh(nd, &nfhp); 3363 if (!error) 3364 error = nfsrv_getattrbits(nd, &attrbits, 3365 NULL, NULL); 3366 if (error == 0 && i == 0 && 3367 minorvers != NFSV4_MINORVERSION) 3368 error = NFSERR_OPNOTINSESS; 3369 if (!error) { 3370 mp = nfscl_getmnt(minorvers, sessionid, cbident, 3371 &clp); 3372 if (mp == NULL) 3373 error = NFSERR_SERVERFAULT; 3374 } 3375 if (!error) { 3376 error = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3377 nfhp->nfh_len, p, &np); 3378 if (!error) 3379 vp = NFSTOV(np); 3380 } 3381 if (!error) { 3382 NFSZERO_ATTRBIT(&rattrbits); 3383 NFSLOCKCLSTATE(); 3384 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3385 nfhp->nfh_len); 3386 if (dp != NULL) { 3387 if (NFSISSET_ATTRBIT(&attrbits, 3388 NFSATTRBIT_SIZE)) { 3389 if (vp != NULL) 3390 va.va_size = np->n_size; 3391 else 3392 va.va_size = 3393 dp->nfsdl_size; 3394 NFSSETBIT_ATTRBIT(&rattrbits, 3395 NFSATTRBIT_SIZE); 3396 } 3397 if (NFSISSET_ATTRBIT(&attrbits, 3398 NFSATTRBIT_CHANGE)) { 3399 va.va_filerev = 3400 dp->nfsdl_change; 3401 if (vp == NULL || 3402 (np->n_flag & NDELEGMOD)) 3403 va.va_filerev++; 3404 NFSSETBIT_ATTRBIT(&rattrbits, 3405 NFSATTRBIT_CHANGE); 3406 } 3407 } else 3408 error = NFSERR_SERVERFAULT; 3409 NFSUNLOCKCLSTATE(); 3410 } 3411 if (vp != NULL) 3412 vrele(vp); 3413 if (mp != NULL) 3414 vfs_unbusy(mp); 3415 if (nfhp != NULL) 3416 free(nfhp, M_NFSFH); 3417 if (!error) 3418 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3419 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0, 3420 (uint64_t)0, NULL); 3421 break; 3422 case NFSV4OP_CBRECALL: 3423 NFSCL_DEBUG(4, "cbrecall\n"); 3424 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3425 NFSX_UNSIGNED); 3426 stateid.seqid = *tl++; 3427 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3428 NFSX_STATEIDOTHER); 3429 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3430 trunc = fxdr_unsigned(int, *tl); 3431 error = nfsm_getfh(nd, &nfhp); 3432 if (error == 0 && i == 0 && 3433 minorvers != NFSV4_MINORVERSION) 3434 error = NFSERR_OPNOTINSESS; 3435 if (!error) { 3436 NFSLOCKCLSTATE(); 3437 if (minorvers == NFSV4_MINORVERSION) 3438 clp = nfscl_getclnt(cbident); 3439 else 3440 clp = nfscl_getclntsess(sessionid); 3441 if (clp != NULL) { 3442 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3443 nfhp->nfh_len); 3444 if (dp != NULL && (dp->nfsdl_flags & 3445 NFSCLDL_DELEGRET) == 0) { 3446 dp->nfsdl_flags |= 3447 NFSCLDL_RECALL; 3448 wakeup((caddr_t)clp); 3449 } 3450 } else { 3451 error = NFSERR_SERVERFAULT; 3452 } 3453 NFSUNLOCKCLSTATE(); 3454 } 3455 if (nfhp != NULL) 3456 free(nfhp, M_NFSFH); 3457 break; 3458 case NFSV4OP_CBLAYOUTRECALL: 3459 NFSCL_DEBUG(4, "cblayrec\n"); 3460 nfhp = NULL; 3461 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); 3462 laytype = fxdr_unsigned(int, *tl++); 3463 iomode = fxdr_unsigned(uint32_t, *tl++); 3464 if (newnfs_true == *tl++) 3465 changed = 1; 3466 else 3467 changed = 0; 3468 recalltype = fxdr_unsigned(int, *tl); 3469 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n", 3470 laytype, iomode, changed, recalltype); 3471 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, 3472 M_WAITOK); 3473 if (laytype != NFSLAYOUT_NFSV4_1_FILES && 3474 laytype != NFSLAYOUT_FLEXFILE) 3475 error = NFSERR_NOMATCHLAYOUT; 3476 else if (recalltype == NFSLAYOUTRETURN_FILE) { 3477 error = nfsm_getfh(nd, &nfhp); 3478 NFSCL_DEBUG(4, "retfile getfh=%d\n", error); 3479 if (error != 0) 3480 goto nfsmout; 3481 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + 3482 NFSX_STATEID); 3483 off = fxdr_hyper(tl); tl += 2; 3484 len = fxdr_hyper(tl); tl += 2; 3485 stateid.seqid = fxdr_unsigned(uint32_t, *tl++); 3486 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); 3487 if (minorvers == NFSV4_MINORVERSION) 3488 error = NFSERR_NOTSUPP; 3489 else if (i == 0) 3490 error = NFSERR_OPNOTINSESS; 3491 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n", 3492 (uintmax_t)off, (uintmax_t)len, 3493 stateid.seqid, error); 3494 if (error == 0) { 3495 NFSLOCKCLSTATE(); 3496 clp = nfscl_getclntsess(sessionid); 3497 NFSCL_DEBUG(4, "cbly clp=%p\n", clp); 3498 if (clp != NULL) { 3499 lyp = nfscl_findlayout(clp, 3500 nfhp->nfh_fh, 3501 nfhp->nfh_len); 3502 NFSCL_DEBUG(4, "cblyp=%p\n", 3503 lyp); 3504 if (lyp != NULL && 3505 (lyp->nfsly_flags & 3506 (NFSLY_FILES | 3507 NFSLY_FLEXFILE)) != 0 && 3508 !NFSBCMP(stateid.other, 3509 lyp->nfsly_stateid.other, 3510 NFSX_STATEIDOTHER)) { 3511 error = 3512 nfscl_layoutrecall( 3513 recalltype, 3514 lyp, iomode, off, 3515 len, stateid.seqid, 3516 0, 0, NULL, 3517 recallp); 3518 if (error == 0 && 3519 stateid.seqid > 3520 lyp->nfsly_stateid.seqid) 3521 lyp->nfsly_stateid.seqid = 3522 stateid.seqid; 3523 recallp = NULL; 3524 wakeup(clp); 3525 NFSCL_DEBUG(4, 3526 "aft layrcal=%d " 3527 "layseqid=%d\n", 3528 error, 3529 lyp->nfsly_stateid.seqid); 3530 } else 3531 error = 3532 NFSERR_NOMATCHLAYOUT; 3533 } else 3534 error = NFSERR_NOMATCHLAYOUT; 3535 NFSUNLOCKCLSTATE(); 3536 } 3537 free(nfhp, M_NFSFH); 3538 } else if (recalltype == NFSLAYOUTRETURN_FSID) { 3539 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); 3540 filesid[0] = fxdr_hyper(tl); tl += 2; 3541 filesid[1] = fxdr_hyper(tl); tl += 2; 3542 gotone = 0; 3543 NFSLOCKCLSTATE(); 3544 clp = nfscl_getclntsess(sessionid); 3545 if (clp != NULL) { 3546 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3547 nfsly_list) { 3548 if (lyp->nfsly_filesid[0] == 3549 filesid[0] && 3550 lyp->nfsly_filesid[1] == 3551 filesid[1]) { 3552 error = 3553 nfscl_layoutrecall( 3554 recalltype, 3555 lyp, iomode, 0, 3556 UINT64_MAX, 3557 lyp->nfsly_stateid.seqid, 3558 0, 0, NULL, 3559 recallp); 3560 recallp = NULL; 3561 gotone = 1; 3562 } 3563 } 3564 if (gotone != 0) 3565 wakeup(clp); 3566 else 3567 error = NFSERR_NOMATCHLAYOUT; 3568 } else 3569 error = NFSERR_NOMATCHLAYOUT; 3570 NFSUNLOCKCLSTATE(); 3571 } else if (recalltype == NFSLAYOUTRETURN_ALL) { 3572 gotone = 0; 3573 NFSLOCKCLSTATE(); 3574 clp = nfscl_getclntsess(sessionid); 3575 if (clp != NULL) { 3576 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3577 nfsly_list) { 3578 error = nfscl_layoutrecall( 3579 recalltype, lyp, iomode, 0, 3580 UINT64_MAX, 3581 lyp->nfsly_stateid.seqid, 3582 0, 0, NULL, recallp); 3583 recallp = NULL; 3584 gotone = 1; 3585 } 3586 if (gotone != 0) 3587 wakeup(clp); 3588 else 3589 error = NFSERR_NOMATCHLAYOUT; 3590 } else 3591 error = NFSERR_NOMATCHLAYOUT; 3592 NFSUNLOCKCLSTATE(); 3593 } else 3594 error = NFSERR_NOMATCHLAYOUT; 3595 if (recallp != NULL) { 3596 free(recallp, M_NFSLAYRECALL); 3597 recallp = NULL; 3598 } 3599 break; 3600 case NFSV4OP_CBSEQUENCE: 3601 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3602 5 * NFSX_UNSIGNED); 3603 bcopy(tl, sessionid, NFSX_V4SESSIONID); 3604 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3605 seqid = fxdr_unsigned(uint32_t, *tl++); 3606 slotid = fxdr_unsigned(uint32_t, *tl++); 3607 highslot = fxdr_unsigned(uint32_t, *tl++); 3608 cachethis = *tl++; 3609 /* Throw away the referring call stuff. */ 3610 clist = fxdr_unsigned(int, *tl); 3611 for (j = 0; j < clist; j++) { 3612 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3613 NFSX_UNSIGNED); 3614 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3615 rcalls = fxdr_unsigned(int, *tl); 3616 for (k = 0; k < rcalls; k++) { 3617 NFSM_DISSECT(tl, uint32_t *, 3618 2 * NFSX_UNSIGNED); 3619 } 3620 } 3621 NFSLOCKCLSTATE(); 3622 if (i == 0) { 3623 clp = nfscl_getclntsess(sessionid); 3624 if (clp == NULL) 3625 error = NFSERR_SERVERFAULT; 3626 } else 3627 error = NFSERR_SEQUENCEPOS; 3628 if (error == 0) { 3629 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3630 error = nfsv4_seqsession(seqid, slotid, 3631 highslot, tsep->nfsess_cbslots, &rep, 3632 tsep->nfsess_backslots); 3633 } 3634 NFSUNLOCKCLSTATE(); 3635 if (error == 0 || error == NFSERR_REPLYFROMCACHE) { 3636 gotseq_ok = 1; 3637 if (rep != NULL) { 3638 /* 3639 * Handle a reply for a retried 3640 * callback. The reply will be 3641 * re-inserted in the session cache 3642 * by the nfsv4_seqsess_cacherep() call 3643 * after out: 3644 */ 3645 KASSERT(error == NFSERR_REPLYFROMCACHE, 3646 ("cbsequence: non-NULL rep")); 3647 NFSCL_DEBUG(4, "Got cbretry\n"); 3648 m_freem(nd->nd_mreq); 3649 nd->nd_mreq = rep; 3650 rep = NULL; 3651 goto out; 3652 } 3653 NFSM_BUILD(tl, uint32_t *, 3654 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); 3655 bcopy(sessionid, tl, NFSX_V4SESSIONID); 3656 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3657 *tl++ = txdr_unsigned(seqid); 3658 *tl++ = txdr_unsigned(slotid); 3659 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); 3660 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); 3661 } 3662 break; 3663 default: 3664 if (i == 0 && minorvers != NFSV4_MINORVERSION) 3665 error = NFSERR_OPNOTINSESS; 3666 else { 3667 NFSCL_DEBUG(1, "unsupp callback %d\n", op); 3668 error = NFSERR_NOTSUPP; 3669 } 3670 break; 3671 } 3672 if (error) { 3673 if (error == EBADRPC || error == NFSERR_BADXDR) { 3674 nd->nd_repstat = NFSERR_BADXDR; 3675 } else { 3676 nd->nd_repstat = error; 3677 } 3678 error = 0; 3679 } 3680 retops++; 3681 if (nd->nd_repstat) { 3682 *repp = nfscl_errmap(nd, minorvers); 3683 break; 3684 } else 3685 *repp = 0; /* NFS4_OK */ 3686 } 3687 nfsmout: 3688 if (recallp != NULL) 3689 free(recallp, M_NFSLAYRECALL); 3690 if (error) { 3691 if (error == EBADRPC || error == NFSERR_BADXDR) 3692 nd->nd_repstat = NFSERR_BADXDR; 3693 else 3694 printf("nfsv4 comperr1=%d\n", error); 3695 } 3696 if (taglen == -1) { 3697 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3698 *tl++ = 0; 3699 *tl = 0; 3700 } else { 3701 *retopsp = txdr_unsigned(retops); 3702 } 3703 *nd->nd_errp = nfscl_errmap(nd, minorvers); 3704 out: 3705 if (gotseq_ok != 0) { 3706 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); 3707 NFSLOCKCLSTATE(); 3708 clp = nfscl_getclntsess(sessionid); 3709 if (clp != NULL) { 3710 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3711 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots, 3712 NFSERR_OK, &rep); 3713 NFSUNLOCKCLSTATE(); 3714 } else { 3715 NFSUNLOCKCLSTATE(); 3716 m_freem(rep); 3717 } 3718 } 3719 } 3720 3721 /* 3722 * Generate the next cbident value. Basically just increment a static value 3723 * and then check that it isn't already in the list, if it has wrapped around. 3724 */ 3725 static u_int32_t 3726 nfscl_nextcbident(void) 3727 { 3728 struct nfsclclient *clp; 3729 int matched; 3730 static u_int32_t nextcbident = 0; 3731 static int haswrapped = 0; 3732 3733 nextcbident++; 3734 if (nextcbident == 0) 3735 haswrapped = 1; 3736 if (haswrapped) { 3737 /* 3738 * Search the clientid list for one already using this cbident. 3739 */ 3740 do { 3741 matched = 0; 3742 NFSLOCKCLSTATE(); 3743 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3744 if (clp->nfsc_cbident == nextcbident) { 3745 matched = 1; 3746 break; 3747 } 3748 } 3749 NFSUNLOCKCLSTATE(); 3750 if (matched == 1) 3751 nextcbident++; 3752 } while (matched); 3753 } 3754 return (nextcbident); 3755 } 3756 3757 /* 3758 * Get the mount point related to a given cbident or session and busy it. 3759 */ 3760 static mount_t 3761 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, 3762 struct nfsclclient **clpp) 3763 { 3764 struct nfsclclient *clp; 3765 mount_t mp; 3766 int error; 3767 struct nfsclsession *tsep; 3768 3769 *clpp = NULL; 3770 NFSLOCKCLSTATE(); 3771 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3772 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3773 if (minorvers == NFSV4_MINORVERSION) { 3774 if (clp->nfsc_cbident == cbident) 3775 break; 3776 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 3777 NFSX_V4SESSIONID)) 3778 break; 3779 } 3780 if (clp == NULL) { 3781 NFSUNLOCKCLSTATE(); 3782 return (NULL); 3783 } 3784 mp = clp->nfsc_nmp->nm_mountp; 3785 vfs_ref(mp); 3786 NFSUNLOCKCLSTATE(); 3787 error = vfs_busy(mp, 0); 3788 vfs_rel(mp); 3789 if (error != 0) 3790 return (NULL); 3791 *clpp = clp; 3792 return (mp); 3793 } 3794 3795 /* 3796 * Get the clientid pointer related to a given cbident. 3797 */ 3798 static struct nfsclclient * 3799 nfscl_getclnt(u_int32_t cbident) 3800 { 3801 struct nfsclclient *clp; 3802 3803 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3804 if (clp->nfsc_cbident == cbident) 3805 break; 3806 return (clp); 3807 } 3808 3809 /* 3810 * Get the clientid pointer related to a given sessionid. 3811 */ 3812 static struct nfsclclient * 3813 nfscl_getclntsess(uint8_t *sessionid) 3814 { 3815 struct nfsclclient *clp; 3816 struct nfsclsession *tsep; 3817 3818 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3819 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3820 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 3821 NFSX_V4SESSIONID)) 3822 break; 3823 } 3824 return (clp); 3825 } 3826 3827 /* 3828 * Search for a lock conflict locally on the client. A conflict occurs if 3829 * - not same owner and overlapping byte range and at least one of them is 3830 * a write lock or this is an unlock. 3831 */ 3832 static int 3833 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 3834 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 3835 struct nfscllock **lopp) 3836 { 3837 struct nfsclowner *owp; 3838 struct nfsclopen *op; 3839 int ret; 3840 3841 if (dp != NULL) { 3842 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 3843 if (ret) 3844 return (ret); 3845 } 3846 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3847 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3848 if (op->nfso_fhlen == fhlen && 3849 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 3850 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 3851 own, lopp); 3852 if (ret) 3853 return (ret); 3854 } 3855 } 3856 } 3857 return (0); 3858 } 3859 3860 static int 3861 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 3862 u_int8_t *own, struct nfscllock **lopp) 3863 { 3864 struct nfscllockowner *lp; 3865 struct nfscllock *lop; 3866 3867 LIST_FOREACH(lp, lhp, nfsl_list) { 3868 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 3869 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3870 if (lop->nfslo_first >= nlop->nfslo_end) 3871 break; 3872 if (lop->nfslo_end <= nlop->nfslo_first) 3873 continue; 3874 if (lop->nfslo_type == F_WRLCK || 3875 nlop->nfslo_type == F_WRLCK || 3876 nlop->nfslo_type == F_UNLCK) { 3877 if (lopp != NULL) 3878 *lopp = lop; 3879 return (NFSERR_DENIED); 3880 } 3881 } 3882 } 3883 } 3884 return (0); 3885 } 3886 3887 /* 3888 * Check for a local conflicting lock. 3889 */ 3890 int 3891 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 3892 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) 3893 { 3894 struct nfscllock *lop, nlck; 3895 struct nfscldeleg *dp; 3896 struct nfsnode *np; 3897 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 3898 int error; 3899 3900 nlck.nfslo_type = fl->l_type; 3901 nlck.nfslo_first = off; 3902 if (len == NFS64BITSSET) { 3903 nlck.nfslo_end = NFS64BITSSET; 3904 } else { 3905 nlck.nfslo_end = off + len; 3906 if (nlck.nfslo_end <= nlck.nfslo_first) 3907 return (NFSERR_INVAL); 3908 } 3909 np = VTONFS(vp); 3910 nfscl_filllockowner(id, own, flags); 3911 NFSLOCKCLSTATE(); 3912 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3913 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 3914 &nlck, own, dp, &lop); 3915 if (error != 0) { 3916 fl->l_whence = SEEK_SET; 3917 fl->l_start = lop->nfslo_first; 3918 if (lop->nfslo_end == NFS64BITSSET) 3919 fl->l_len = 0; 3920 else 3921 fl->l_len = lop->nfslo_end - lop->nfslo_first; 3922 fl->l_pid = (pid_t)0; 3923 fl->l_type = lop->nfslo_type; 3924 error = -1; /* no RPC required */ 3925 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 3926 fl->l_type == F_RDLCK)) { 3927 /* 3928 * The delegation ensures that there isn't a conflicting 3929 * lock on the server, so return -1 to indicate an RPC 3930 * isn't required. 3931 */ 3932 fl->l_type = F_UNLCK; 3933 error = -1; 3934 } 3935 NFSUNLOCKCLSTATE(); 3936 return (error); 3937 } 3938 3939 /* 3940 * Handle Recall of a delegation. 3941 * The clp must be exclusive locked when this is called. 3942 */ 3943 static int 3944 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 3945 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 3946 int called_from_renewthread) 3947 { 3948 struct nfsclowner *owp, *lowp, *nowp; 3949 struct nfsclopen *op, *lop; 3950 struct nfscllockowner *lp; 3951 struct nfscllock *lckp; 3952 struct nfsnode *np; 3953 int error = 0, ret, gotvp = 0; 3954 3955 if (vp == NULL) { 3956 /* 3957 * First, get a vnode for the file. This is needed to do RPCs. 3958 */ 3959 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 3960 dp->nfsdl_fhlen, p, &np); 3961 if (ret) { 3962 /* 3963 * File isn't open, so nothing to move over to the 3964 * server. 3965 */ 3966 return (0); 3967 } 3968 vp = NFSTOV(np); 3969 gotvp = 1; 3970 } else { 3971 np = VTONFS(vp); 3972 } 3973 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 3974 3975 /* 3976 * Ok, if it's a write delegation, flush data to the server, so 3977 * that close/open consistency is retained. 3978 */ 3979 ret = 0; 3980 NFSLOCKNODE(np); 3981 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 3982 np->n_flag |= NDELEGRECALL; 3983 NFSUNLOCKNODE(np); 3984 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread); 3985 NFSLOCKNODE(np); 3986 np->n_flag &= ~NDELEGRECALL; 3987 } 3988 NFSINVALATTRCACHE(np); 3989 NFSUNLOCKNODE(np); 3990 if (ret == EIO && called_from_renewthread != 0) { 3991 /* 3992 * If the flush failed with EIO for the renew thread, 3993 * return now, so that the dirty buffer will be flushed 3994 * later. 3995 */ 3996 if (gotvp != 0) 3997 vrele(vp); 3998 return (ret); 3999 } 4000 4001 /* 4002 * Now, for each openowner with opens issued locally, move them 4003 * over to state against the server. 4004 */ 4005 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 4006 lop = LIST_FIRST(&lowp->nfsow_open); 4007 if (lop != NULL) { 4008 if (LIST_NEXT(lop, nfso_list) != NULL) 4009 panic("nfsdlg mult opens"); 4010 /* 4011 * Look for the same openowner against the server. 4012 */ 4013 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 4014 if (!NFSBCMP(lowp->nfsow_owner, 4015 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 4016 newnfs_copycred(&dp->nfsdl_cred, cred); 4017 ret = nfscl_moveopen(vp, clp, nmp, lop, 4018 owp, dp, cred, p); 4019 if (ret == NFSERR_STALECLIENTID || 4020 ret == NFSERR_STALEDONTRECOVER || 4021 ret == NFSERR_BADSESSION) { 4022 if (gotvp) 4023 vrele(vp); 4024 return (ret); 4025 } 4026 if (ret) { 4027 nfscl_freeopen(lop, 1); 4028 if (!error) 4029 error = ret; 4030 } 4031 break; 4032 } 4033 } 4034 4035 /* 4036 * If no openowner found, create one and get an open 4037 * for it. 4038 */ 4039 if (owp == NULL) { 4040 nowp = malloc( 4041 sizeof (struct nfsclowner), M_NFSCLOWNER, 4042 M_WAITOK); 4043 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 4044 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 4045 dp->nfsdl_fhlen, NULL, NULL); 4046 newnfs_copycred(&dp->nfsdl_cred, cred); 4047 ret = nfscl_moveopen(vp, clp, nmp, lop, 4048 owp, dp, cred, p); 4049 if (ret) { 4050 nfscl_freeopenowner(owp, 0); 4051 if (ret == NFSERR_STALECLIENTID || 4052 ret == NFSERR_STALEDONTRECOVER || 4053 ret == NFSERR_BADSESSION) { 4054 if (gotvp) 4055 vrele(vp); 4056 return (ret); 4057 } 4058 if (ret) { 4059 nfscl_freeopen(lop, 1); 4060 if (!error) 4061 error = ret; 4062 } 4063 } 4064 } 4065 } 4066 } 4067 4068 /* 4069 * Now, get byte range locks for any locks done locally. 4070 */ 4071 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4072 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 4073 newnfs_copycred(&dp->nfsdl_cred, cred); 4074 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 4075 if (ret == NFSERR_STALESTATEID || 4076 ret == NFSERR_STALEDONTRECOVER || 4077 ret == NFSERR_STALECLIENTID || 4078 ret == NFSERR_BADSESSION) { 4079 if (gotvp) 4080 vrele(vp); 4081 return (ret); 4082 } 4083 if (ret && !error) 4084 error = ret; 4085 } 4086 } 4087 if (gotvp) 4088 vrele(vp); 4089 return (error); 4090 } 4091 4092 /* 4093 * Move a locally issued open over to an owner on the state list. 4094 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 4095 * returns with it unlocked. 4096 */ 4097 static int 4098 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4099 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 4100 struct ucred *cred, NFSPROC_T *p) 4101 { 4102 struct nfsclopen *op, *nop; 4103 struct nfscldeleg *ndp; 4104 struct nfsnode *np; 4105 int error = 0, newone; 4106 4107 /* 4108 * First, look for an appropriate open, If found, just increment the 4109 * opencnt in it. 4110 */ 4111 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 4112 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 4113 op->nfso_fhlen == lop->nfso_fhlen && 4114 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 4115 op->nfso_opencnt += lop->nfso_opencnt; 4116 nfscl_freeopen(lop, 1); 4117 return (0); 4118 } 4119 } 4120 4121 /* No appropriate open, so we have to do one against the server. */ 4122 np = VTONFS(vp); 4123 nop = malloc(sizeof (struct nfsclopen) + 4124 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 4125 newone = 0; 4126 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 4127 lop->nfso_fh, lop->nfso_fhlen, cred, &newone); 4128 ndp = dp; 4129 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, 4130 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 4131 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 4132 if (error) { 4133 if (newone) 4134 nfscl_freeopen(op, 0); 4135 } else { 4136 op->nfso_mode |= lop->nfso_mode; 4137 op->nfso_opencnt += lop->nfso_opencnt; 4138 nfscl_freeopen(lop, 1); 4139 } 4140 if (nop != NULL) 4141 free(nop, M_NFSCLOPEN); 4142 if (ndp != NULL) { 4143 /* 4144 * What should I do with the returned delegation, since the 4145 * delegation is being recalled? For now, just printf and 4146 * through it away. 4147 */ 4148 printf("Moveopen returned deleg\n"); 4149 free(ndp, M_NFSCLDELEG); 4150 } 4151 return (error); 4152 } 4153 4154 /* 4155 * Recall all delegations on this client. 4156 */ 4157 static void 4158 nfscl_totalrecall(struct nfsclclient *clp) 4159 { 4160 struct nfscldeleg *dp; 4161 4162 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 4163 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 4164 dp->nfsdl_flags |= NFSCLDL_RECALL; 4165 } 4166 } 4167 4168 /* 4169 * Relock byte ranges. Called for delegation recall and state expiry. 4170 */ 4171 static int 4172 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4173 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 4174 NFSPROC_T *p) 4175 { 4176 struct nfscllockowner *nlp; 4177 struct nfsfh *nfhp; 4178 u_int64_t off, len; 4179 int error, newone, donelocally; 4180 4181 off = lop->nfslo_first; 4182 len = lop->nfslo_end - lop->nfslo_first; 4183 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 4184 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, 4185 lp->nfsl_openowner, &nlp, &newone, &donelocally); 4186 if (error || donelocally) 4187 return (error); 4188 nfhp = VTONFS(vp)->n_fhp; 4189 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 4190 nfhp->nfh_len, nlp, newone, 0, off, 4191 len, lop->nfslo_type, cred, p); 4192 if (error) 4193 nfscl_freelockowner(nlp, 0); 4194 return (error); 4195 } 4196 4197 /* 4198 * Called to re-open a file. Basically get a vnode for the file handle 4199 * and then call nfsrpc_openrpc() to do the rest. 4200 */ 4201 static int 4202 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 4203 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 4204 struct ucred *cred, NFSPROC_T *p) 4205 { 4206 struct nfsnode *np; 4207 vnode_t vp; 4208 int error; 4209 4210 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 4211 if (error) 4212 return (error); 4213 vp = NFSTOV(np); 4214 if (np->n_v4 != NULL) { 4215 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4216 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 4217 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 4218 cred, p); 4219 } else { 4220 error = EINVAL; 4221 } 4222 vrele(vp); 4223 return (error); 4224 } 4225 4226 /* 4227 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 4228 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 4229 * fail. 4230 */ 4231 static int 4232 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4233 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 4234 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 4235 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 4236 { 4237 int error; 4238 4239 do { 4240 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 4241 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 4242 0, 0); 4243 if (error == NFSERR_DELAY) 4244 (void) nfs_catnap(PZERO, error, "nfstryop"); 4245 } while (error == NFSERR_DELAY); 4246 if (error == EAUTH || error == EACCES) { 4247 /* Try again using system credentials */ 4248 newnfs_setroot(cred); 4249 do { 4250 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 4251 newfhlen, mode, op, name, namelen, ndpp, reclaim, 4252 delegtype, cred, p, 1, 0); 4253 if (error == NFSERR_DELAY) 4254 (void) nfs_catnap(PZERO, error, "nfstryop"); 4255 } while (error == NFSERR_DELAY); 4256 } 4257 return (error); 4258 } 4259 4260 /* 4261 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 4262 * NFSERR_DELAY. Also, retry with system credentials, if the provided 4263 * cred don't work. 4264 */ 4265 static int 4266 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 4267 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 4268 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 4269 { 4270 struct nfsrv_descript nfsd, *nd = &nfsd; 4271 int error; 4272 4273 do { 4274 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 4275 reclaim, off, len, type, cred, p, 0); 4276 if (!error && nd->nd_repstat == NFSERR_DELAY) 4277 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4278 "nfstrylck"); 4279 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4280 if (!error) 4281 error = nd->nd_repstat; 4282 if (error == EAUTH || error == EACCES) { 4283 /* Try again using root credentials */ 4284 newnfs_setroot(cred); 4285 do { 4286 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 4287 newone, reclaim, off, len, type, cred, p, 1); 4288 if (!error && nd->nd_repstat == NFSERR_DELAY) 4289 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4290 "nfstrylck"); 4291 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4292 if (!error) 4293 error = nd->nd_repstat; 4294 } 4295 return (error); 4296 } 4297 4298 /* 4299 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 4300 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4301 * credentials fail. 4302 */ 4303 static int 4304 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 4305 struct nfsmount *nmp, NFSPROC_T *p) 4306 { 4307 int error; 4308 4309 do { 4310 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 4311 if (error == NFSERR_DELAY) 4312 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4313 } while (error == NFSERR_DELAY); 4314 if (error == EAUTH || error == EACCES) { 4315 /* Try again using system credentials */ 4316 newnfs_setroot(cred); 4317 do { 4318 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 4319 if (error == NFSERR_DELAY) 4320 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4321 } while (error == NFSERR_DELAY); 4322 } 4323 return (error); 4324 } 4325 4326 /* 4327 * Try a close against the server. Just call nfsrpc_closerpc(), 4328 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4329 * credentials fail. 4330 */ 4331 int 4332 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 4333 struct nfsmount *nmp, NFSPROC_T *p) 4334 { 4335 struct nfsrv_descript nfsd, *nd = &nfsd; 4336 int error; 4337 4338 do { 4339 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 4340 if (error == NFSERR_DELAY) 4341 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4342 } while (error == NFSERR_DELAY); 4343 if (error == EAUTH || error == EACCES) { 4344 /* Try again using system credentials */ 4345 newnfs_setroot(cred); 4346 do { 4347 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 4348 if (error == NFSERR_DELAY) 4349 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4350 } while (error == NFSERR_DELAY); 4351 } 4352 return (error); 4353 } 4354 4355 /* 4356 * Decide if a delegation on a file permits close without flushing writes 4357 * to the server. This might be a big performance win in some environments. 4358 * (Not useful until the client does caching on local stable storage.) 4359 */ 4360 int 4361 nfscl_mustflush(vnode_t vp) 4362 { 4363 struct nfsclclient *clp; 4364 struct nfscldeleg *dp; 4365 struct nfsnode *np; 4366 struct nfsmount *nmp; 4367 4368 np = VTONFS(vp); 4369 nmp = VFSTONFS(vp->v_mount); 4370 if (!NFSHASNFSV4(nmp)) 4371 return (1); 4372 NFSLOCKCLSTATE(); 4373 clp = nfscl_findcl(nmp); 4374 if (clp == NULL) { 4375 NFSUNLOCKCLSTATE(); 4376 return (1); 4377 } 4378 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4379 if (dp != NULL && (dp->nfsdl_flags & 4380 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 4381 NFSCLDL_WRITE && 4382 (dp->nfsdl_sizelimit >= np->n_size || 4383 !NFSHASSTRICT3530(nmp))) { 4384 NFSUNLOCKCLSTATE(); 4385 return (0); 4386 } 4387 NFSUNLOCKCLSTATE(); 4388 return (1); 4389 } 4390 4391 /* 4392 * See if a (write) delegation exists for this file. 4393 */ 4394 int 4395 nfscl_nodeleg(vnode_t vp, int writedeleg) 4396 { 4397 struct nfsclclient *clp; 4398 struct nfscldeleg *dp; 4399 struct nfsnode *np; 4400 struct nfsmount *nmp; 4401 4402 np = VTONFS(vp); 4403 nmp = VFSTONFS(vp->v_mount); 4404 if (!NFSHASNFSV4(nmp)) 4405 return (1); 4406 NFSLOCKCLSTATE(); 4407 clp = nfscl_findcl(nmp); 4408 if (clp == NULL) { 4409 NFSUNLOCKCLSTATE(); 4410 return (1); 4411 } 4412 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4413 if (dp != NULL && 4414 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 4415 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 4416 NFSCLDL_WRITE)) { 4417 NFSUNLOCKCLSTATE(); 4418 return (0); 4419 } 4420 NFSUNLOCKCLSTATE(); 4421 return (1); 4422 } 4423 4424 /* 4425 * Look for an associated delegation that should be DelegReturned. 4426 */ 4427 int 4428 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 4429 { 4430 struct nfsclclient *clp; 4431 struct nfscldeleg *dp; 4432 struct nfsclowner *owp; 4433 struct nfscllockowner *lp; 4434 struct nfsmount *nmp; 4435 struct ucred *cred; 4436 struct nfsnode *np; 4437 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4438 4439 nmp = VFSTONFS(vp->v_mount); 4440 np = VTONFS(vp); 4441 NFSLOCKCLSTATE(); 4442 /* 4443 * Loop around waiting for: 4444 * - outstanding I/O operations on delegations to complete 4445 * - for a delegation on vp that has state, lock the client and 4446 * do a recall 4447 * - return delegation with no state 4448 */ 4449 while (1) { 4450 clp = nfscl_findcl(nmp); 4451 if (clp == NULL) { 4452 NFSUNLOCKCLSTATE(); 4453 return (retcnt); 4454 } 4455 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4456 np->n_fhp->nfh_len); 4457 if (dp != NULL) { 4458 /* 4459 * Wait for outstanding I/O ops to be done. 4460 */ 4461 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4462 if (igotlock) { 4463 nfsv4_unlock(&clp->nfsc_lock, 0); 4464 igotlock = 0; 4465 } 4466 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4467 (void) nfsmsleep(&dp->nfsdl_rwlock, 4468 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4469 continue; 4470 } 4471 needsrecall = 0; 4472 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4473 if (!LIST_EMPTY(&owp->nfsow_open)) { 4474 needsrecall = 1; 4475 break; 4476 } 4477 } 4478 if (!needsrecall) { 4479 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4480 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4481 needsrecall = 1; 4482 break; 4483 } 4484 } 4485 } 4486 if (needsrecall && !triedrecall) { 4487 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4488 islept = 0; 4489 while (!igotlock) { 4490 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4491 &islept, NFSCLSTATEMUTEXPTR, NULL); 4492 if (islept) 4493 break; 4494 } 4495 if (islept) 4496 continue; 4497 NFSUNLOCKCLSTATE(); 4498 cred = newnfs_getcred(); 4499 newnfs_copycred(&dp->nfsdl_cred, cred); 4500 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); 4501 NFSFREECRED(cred); 4502 triedrecall = 1; 4503 NFSLOCKCLSTATE(); 4504 nfsv4_unlock(&clp->nfsc_lock, 0); 4505 igotlock = 0; 4506 continue; 4507 } 4508 *stp = dp->nfsdl_stateid; 4509 retcnt = 1; 4510 nfscl_cleandeleg(dp); 4511 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4512 } 4513 if (igotlock) 4514 nfsv4_unlock(&clp->nfsc_lock, 0); 4515 NFSUNLOCKCLSTATE(); 4516 return (retcnt); 4517 } 4518 } 4519 4520 /* 4521 * Look for associated delegation(s) that should be DelegReturned. 4522 */ 4523 int 4524 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 4525 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 4526 { 4527 struct nfsclclient *clp; 4528 struct nfscldeleg *dp; 4529 struct nfsclowner *owp; 4530 struct nfscllockowner *lp; 4531 struct nfsmount *nmp; 4532 struct ucred *cred; 4533 struct nfsnode *np; 4534 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4535 4536 nmp = VFSTONFS(fvp->v_mount); 4537 *gotfdp = 0; 4538 *gottdp = 0; 4539 NFSLOCKCLSTATE(); 4540 /* 4541 * Loop around waiting for: 4542 * - outstanding I/O operations on delegations to complete 4543 * - for a delegation on fvp that has state, lock the client and 4544 * do a recall 4545 * - return delegation(s) with no state. 4546 */ 4547 while (1) { 4548 clp = nfscl_findcl(nmp); 4549 if (clp == NULL) { 4550 NFSUNLOCKCLSTATE(); 4551 return (retcnt); 4552 } 4553 np = VTONFS(fvp); 4554 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4555 np->n_fhp->nfh_len); 4556 if (dp != NULL && *gotfdp == 0) { 4557 /* 4558 * Wait for outstanding I/O ops to be done. 4559 */ 4560 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4561 if (igotlock) { 4562 nfsv4_unlock(&clp->nfsc_lock, 0); 4563 igotlock = 0; 4564 } 4565 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4566 (void) nfsmsleep(&dp->nfsdl_rwlock, 4567 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4568 continue; 4569 } 4570 needsrecall = 0; 4571 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4572 if (!LIST_EMPTY(&owp->nfsow_open)) { 4573 needsrecall = 1; 4574 break; 4575 } 4576 } 4577 if (!needsrecall) { 4578 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4579 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4580 needsrecall = 1; 4581 break; 4582 } 4583 } 4584 } 4585 if (needsrecall && !triedrecall) { 4586 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4587 islept = 0; 4588 while (!igotlock) { 4589 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4590 &islept, NFSCLSTATEMUTEXPTR, NULL); 4591 if (islept) 4592 break; 4593 } 4594 if (islept) 4595 continue; 4596 NFSUNLOCKCLSTATE(); 4597 cred = newnfs_getcred(); 4598 newnfs_copycred(&dp->nfsdl_cred, cred); 4599 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); 4600 NFSFREECRED(cred); 4601 triedrecall = 1; 4602 NFSLOCKCLSTATE(); 4603 nfsv4_unlock(&clp->nfsc_lock, 0); 4604 igotlock = 0; 4605 continue; 4606 } 4607 *fstp = dp->nfsdl_stateid; 4608 retcnt++; 4609 *gotfdp = 1; 4610 nfscl_cleandeleg(dp); 4611 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4612 } 4613 if (igotlock) { 4614 nfsv4_unlock(&clp->nfsc_lock, 0); 4615 igotlock = 0; 4616 } 4617 if (tvp != NULL) { 4618 np = VTONFS(tvp); 4619 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4620 np->n_fhp->nfh_len); 4621 if (dp != NULL && *gottdp == 0) { 4622 /* 4623 * Wait for outstanding I/O ops to be done. 4624 */ 4625 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4626 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4627 (void) nfsmsleep(&dp->nfsdl_rwlock, 4628 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4629 continue; 4630 } 4631 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4632 if (!LIST_EMPTY(&owp->nfsow_open)) { 4633 NFSUNLOCKCLSTATE(); 4634 return (retcnt); 4635 } 4636 } 4637 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4638 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4639 NFSUNLOCKCLSTATE(); 4640 return (retcnt); 4641 } 4642 } 4643 *tstp = dp->nfsdl_stateid; 4644 retcnt++; 4645 *gottdp = 1; 4646 nfscl_cleandeleg(dp); 4647 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4648 } 4649 } 4650 NFSUNLOCKCLSTATE(); 4651 return (retcnt); 4652 } 4653 } 4654 4655 /* 4656 * Get a reference on the clientid associated with the mount point. 4657 * Return 1 if success, 0 otherwise. 4658 */ 4659 int 4660 nfscl_getref(struct nfsmount *nmp) 4661 { 4662 struct nfsclclient *clp; 4663 4664 NFSLOCKCLSTATE(); 4665 clp = nfscl_findcl(nmp); 4666 if (clp == NULL) { 4667 NFSUNLOCKCLSTATE(); 4668 return (0); 4669 } 4670 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); 4671 NFSUNLOCKCLSTATE(); 4672 return (1); 4673 } 4674 4675 /* 4676 * Release a reference on a clientid acquired with the above call. 4677 */ 4678 void 4679 nfscl_relref(struct nfsmount *nmp) 4680 { 4681 struct nfsclclient *clp; 4682 4683 NFSLOCKCLSTATE(); 4684 clp = nfscl_findcl(nmp); 4685 if (clp == NULL) { 4686 NFSUNLOCKCLSTATE(); 4687 return; 4688 } 4689 nfsv4_relref(&clp->nfsc_lock); 4690 NFSUNLOCKCLSTATE(); 4691 } 4692 4693 /* 4694 * Save the size attribute in the delegation, since the nfsnode 4695 * is going away. 4696 */ 4697 void 4698 nfscl_reclaimnode(vnode_t vp) 4699 { 4700 struct nfsclclient *clp; 4701 struct nfscldeleg *dp; 4702 struct nfsnode *np = VTONFS(vp); 4703 struct nfsmount *nmp; 4704 4705 nmp = VFSTONFS(vp->v_mount); 4706 if (!NFSHASNFSV4(nmp)) 4707 return; 4708 NFSLOCKCLSTATE(); 4709 clp = nfscl_findcl(nmp); 4710 if (clp == NULL) { 4711 NFSUNLOCKCLSTATE(); 4712 return; 4713 } 4714 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4715 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4716 dp->nfsdl_size = np->n_size; 4717 NFSUNLOCKCLSTATE(); 4718 } 4719 4720 /* 4721 * Get the saved size attribute in the delegation, since it is a 4722 * newly allocated nfsnode. 4723 */ 4724 void 4725 nfscl_newnode(vnode_t vp) 4726 { 4727 struct nfsclclient *clp; 4728 struct nfscldeleg *dp; 4729 struct nfsnode *np = VTONFS(vp); 4730 struct nfsmount *nmp; 4731 4732 nmp = VFSTONFS(vp->v_mount); 4733 if (!NFSHASNFSV4(nmp)) 4734 return; 4735 NFSLOCKCLSTATE(); 4736 clp = nfscl_findcl(nmp); 4737 if (clp == NULL) { 4738 NFSUNLOCKCLSTATE(); 4739 return; 4740 } 4741 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4742 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4743 np->n_size = dp->nfsdl_size; 4744 NFSUNLOCKCLSTATE(); 4745 } 4746 4747 /* 4748 * If there is a valid write delegation for this file, set the modtime 4749 * to the local clock time. 4750 */ 4751 void 4752 nfscl_delegmodtime(vnode_t vp) 4753 { 4754 struct nfsclclient *clp; 4755 struct nfscldeleg *dp; 4756 struct nfsnode *np = VTONFS(vp); 4757 struct nfsmount *nmp; 4758 4759 nmp = VFSTONFS(vp->v_mount); 4760 if (!NFSHASNFSV4(nmp)) 4761 return; 4762 NFSLOCKCLSTATE(); 4763 clp = nfscl_findcl(nmp); 4764 if (clp == NULL) { 4765 NFSUNLOCKCLSTATE(); 4766 return; 4767 } 4768 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4769 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 4770 nanotime(&dp->nfsdl_modtime); 4771 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 4772 } 4773 NFSUNLOCKCLSTATE(); 4774 } 4775 4776 /* 4777 * If there is a valid write delegation for this file with a modtime set, 4778 * put that modtime in mtime. 4779 */ 4780 void 4781 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 4782 { 4783 struct nfsclclient *clp; 4784 struct nfscldeleg *dp; 4785 struct nfsnode *np = VTONFS(vp); 4786 struct nfsmount *nmp; 4787 4788 nmp = VFSTONFS(vp->v_mount); 4789 if (!NFSHASNFSV4(nmp)) 4790 return; 4791 NFSLOCKCLSTATE(); 4792 clp = nfscl_findcl(nmp); 4793 if (clp == NULL) { 4794 NFSUNLOCKCLSTATE(); 4795 return; 4796 } 4797 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4798 if (dp != NULL && 4799 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 4800 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 4801 *mtime = dp->nfsdl_modtime; 4802 NFSUNLOCKCLSTATE(); 4803 } 4804 4805 static int 4806 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers) 4807 { 4808 short *defaulterrp, *errp; 4809 4810 if (!nd->nd_repstat) 4811 return (0); 4812 if (nd->nd_procnum == NFSPROC_NOOP) 4813 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 4814 if (nd->nd_repstat == EBADRPC) 4815 return (txdr_unsigned(NFSERR_BADXDR)); 4816 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 4817 nd->nd_repstat == NFSERR_OPILLEGAL) 4818 return (txdr_unsigned(nd->nd_repstat)); 4819 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 && 4820 minorvers > NFSV4_MINORVERSION) { 4821 /* NFSv4.n error. */ 4822 return (txdr_unsigned(nd->nd_repstat)); 4823 } 4824 if (nd->nd_procnum < NFSV4OP_CBNOPS) 4825 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 4826 else 4827 return (txdr_unsigned(nd->nd_repstat)); 4828 while (*++errp) 4829 if (*errp == (short)nd->nd_repstat) 4830 return (txdr_unsigned(nd->nd_repstat)); 4831 return (txdr_unsigned(*defaulterrp)); 4832 } 4833 4834 /* 4835 * Called to find/add a layout to a client. 4836 * This function returns the layout with a refcnt (shared lock) upon 4837 * success (returns 0) or with no lock/refcnt on the layout when an 4838 * error is returned. 4839 * If a layout is passed in via lypp, it is locked (exclusively locked). 4840 */ 4841 int 4842 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4843 nfsv4stateid_t *stateidp, int layouttype, int retonclose, 4844 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, 4845 struct ucred *cred, NFSPROC_T *p) 4846 { 4847 struct nfsclclient *clp; 4848 struct nfscllayout *lyp, *tlyp; 4849 struct nfsclflayout *flp; 4850 struct nfsnode *np = VTONFS(vp); 4851 mount_t mp; 4852 int layout_passed_in; 4853 4854 mp = nmp->nm_mountp; 4855 layout_passed_in = 1; 4856 tlyp = NULL; 4857 lyp = *lypp; 4858 if (lyp == NULL) { 4859 layout_passed_in = 0; 4860 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, 4861 M_WAITOK | M_ZERO); 4862 } 4863 4864 NFSLOCKCLSTATE(); 4865 clp = nmp->nm_clp; 4866 if (clp == NULL) { 4867 if (layout_passed_in != 0) 4868 nfsv4_unlock(&lyp->nfsly_lock, 0); 4869 NFSUNLOCKCLSTATE(); 4870 if (tlyp != NULL) 4871 free(tlyp, M_NFSLAYOUT); 4872 return (EPERM); 4873 } 4874 if (lyp == NULL) { 4875 /* 4876 * Although no lyp was passed in, another thread might have 4877 * allocated one. If one is found, just increment it's ref 4878 * count and return it. 4879 */ 4880 lyp = nfscl_findlayout(clp, fhp, fhlen); 4881 if (lyp == NULL) { 4882 lyp = tlyp; 4883 tlyp = NULL; 4884 lyp->nfsly_stateid.seqid = stateidp->seqid; 4885 lyp->nfsly_stateid.other[0] = stateidp->other[0]; 4886 lyp->nfsly_stateid.other[1] = stateidp->other[1]; 4887 lyp->nfsly_stateid.other[2] = stateidp->other[2]; 4888 lyp->nfsly_lastbyte = 0; 4889 LIST_INIT(&lyp->nfsly_flayread); 4890 LIST_INIT(&lyp->nfsly_flayrw); 4891 LIST_INIT(&lyp->nfsly_recall); 4892 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; 4893 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; 4894 lyp->nfsly_clp = clp; 4895 if (layouttype == NFSLAYOUT_FLEXFILE) 4896 lyp->nfsly_flags = NFSLY_FLEXFILE; 4897 else 4898 lyp->nfsly_flags = NFSLY_FILES; 4899 if (retonclose != 0) 4900 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4901 lyp->nfsly_fhlen = fhlen; 4902 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); 4903 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4904 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, 4905 nfsly_hash); 4906 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4907 nfscl_layoutcnt++; 4908 } else { 4909 if (retonclose != 0) 4910 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4911 if (stateidp->seqid > lyp->nfsly_stateid.seqid) 4912 lyp->nfsly_stateid.seqid = stateidp->seqid; 4913 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4914 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4915 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4916 } 4917 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 4918 if (NFSCL_FORCEDISM(mp)) { 4919 NFSUNLOCKCLSTATE(); 4920 if (tlyp != NULL) 4921 free(tlyp, M_NFSLAYOUT); 4922 return (EPERM); 4923 } 4924 *lypp = lyp; 4925 } else if (stateidp->seqid > lyp->nfsly_stateid.seqid) 4926 lyp->nfsly_stateid.seqid = stateidp->seqid; 4927 4928 /* Merge the new list of File Layouts into the list. */ 4929 flp = LIST_FIRST(fhlp); 4930 if (flp != NULL) { 4931 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) 4932 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); 4933 else 4934 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); 4935 } 4936 if (layout_passed_in != 0) 4937 nfsv4_unlock(&lyp->nfsly_lock, 1); 4938 NFSUNLOCKCLSTATE(); 4939 if (tlyp != NULL) 4940 free(tlyp, M_NFSLAYOUT); 4941 return (0); 4942 } 4943 4944 /* 4945 * Search for a layout by MDS file handle. 4946 * If one is found, it is returned with a refcnt (shared lock) iff 4947 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is 4948 * returned NULL. 4949 */ 4950 struct nfscllayout * 4951 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, 4952 uint64_t off, struct nfsclflayout **retflpp, int *recalledp) 4953 { 4954 struct nfscllayout *lyp; 4955 mount_t mp; 4956 int error, igotlock; 4957 4958 mp = clp->nfsc_nmp->nm_mountp; 4959 *recalledp = 0; 4960 *retflpp = NULL; 4961 NFSLOCKCLSTATE(); 4962 lyp = nfscl_findlayout(clp, fhp, fhlen); 4963 if (lyp != NULL) { 4964 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 4965 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4966 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4967 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4968 error = nfscl_findlayoutforio(lyp, off, 4969 NFSV4OPEN_ACCESSREAD, retflpp); 4970 if (error == 0) 4971 nfsv4_getref(&lyp->nfsly_lock, NULL, 4972 NFSCLSTATEMUTEXPTR, mp); 4973 else { 4974 do { 4975 igotlock = nfsv4_lock(&lyp->nfsly_lock, 4976 1, NULL, NFSCLSTATEMUTEXPTR, mp); 4977 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp)); 4978 *retflpp = NULL; 4979 } 4980 if (NFSCL_FORCEDISM(mp)) { 4981 lyp = NULL; 4982 *recalledp = 1; 4983 } 4984 } else { 4985 lyp = NULL; 4986 *recalledp = 1; 4987 } 4988 } 4989 NFSUNLOCKCLSTATE(); 4990 return (lyp); 4991 } 4992 4993 /* 4994 * Search for a layout by MDS file handle. If one is found, mark in to be 4995 * recalled, if it already marked "return on close". 4996 */ 4997 static void 4998 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp, 4999 int fhlen, struct nfsclrecalllayout **recallpp) 5000 { 5001 struct nfscllayout *lyp; 5002 uint32_t iomode; 5003 5004 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) || 5005 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 || 5006 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0) 5007 return; 5008 lyp = nfscl_findlayout(clp, fhp, fhlen); 5009 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE | 5010 NFSLY_RECALL)) == NFSLY_RETONCLOSE) { 5011 iomode = 0; 5012 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 5013 iomode |= NFSLAYOUTIOMODE_READ; 5014 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 5015 iomode |= NFSLAYOUTIOMODE_RW; 5016 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 5017 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, 5018 *recallpp); 5019 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode); 5020 *recallpp = NULL; 5021 } 5022 } 5023 5024 /* 5025 * Mark the layout to be recalled and with an error. 5026 * Also, disable the dsp from further use. 5027 */ 5028 void 5029 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp, 5030 struct nfscllayout *lyp, struct nfsclds *dsp) 5031 { 5032 struct nfsclrecalllayout *recallp; 5033 uint32_t iomode; 5034 5035 printf("DS being disabled, error=%d\n", stat); 5036 /* Set up the return of the layout. */ 5037 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 5038 iomode = 0; 5039 NFSLOCKCLSTATE(); 5040 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 5041 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 5042 iomode |= NFSLAYOUTIOMODE_READ; 5043 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 5044 iomode |= NFSLAYOUTIOMODE_RW; 5045 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 5046 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op, 5047 dp->nfsdi_deviceid, recallp); 5048 NFSUNLOCKCLSTATE(); 5049 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode); 5050 } else { 5051 NFSUNLOCKCLSTATE(); 5052 free(recallp, M_NFSLAYRECALL); 5053 } 5054 5055 /* And shut the TCP connection down. */ 5056 nfscl_cancelreqs(dsp); 5057 } 5058 5059 /* 5060 * Cancel all RPCs for this "dsp" by closing the connection. 5061 * Also, mark the session as defunct. 5062 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and 5063 * cannot be shut down. 5064 */ 5065 void 5066 nfscl_cancelreqs(struct nfsclds *dsp) 5067 { 5068 struct __rpc_client *cl; 5069 static int non_event; 5070 5071 NFSLOCKDS(dsp); 5072 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 && 5073 dsp->nfsclds_sockp != NULL && 5074 dsp->nfsclds_sockp->nr_client != NULL) { 5075 dsp->nfsclds_flags |= NFSCLDS_CLOSED; 5076 cl = dsp->nfsclds_sockp->nr_client; 5077 dsp->nfsclds_sess.nfsess_defunct = 1; 5078 NFSUNLOCKDS(dsp); 5079 CLNT_CLOSE(cl); 5080 /* 5081 * This 1sec sleep is done to reduce the number of reconnect 5082 * attempts made on the DS while it has failed. 5083 */ 5084 tsleep(&non_event, PVFS, "ndscls", hz); 5085 return; 5086 } 5087 NFSUNLOCKDS(dsp); 5088 } 5089 5090 /* 5091 * Dereference a layout. 5092 */ 5093 void 5094 nfscl_rellayout(struct nfscllayout *lyp, int exclocked) 5095 { 5096 5097 NFSLOCKCLSTATE(); 5098 if (exclocked != 0) 5099 nfsv4_unlock(&lyp->nfsly_lock, 0); 5100 else 5101 nfsv4_relref(&lyp->nfsly_lock); 5102 NFSUNLOCKCLSTATE(); 5103 } 5104 5105 /* 5106 * Search for a devinfo by deviceid. If one is found, return it after 5107 * acquiring a reference count on it. 5108 */ 5109 struct nfscldevinfo * 5110 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, 5111 struct nfscldevinfo *dip) 5112 { 5113 5114 NFSLOCKCLSTATE(); 5115 if (dip == NULL) 5116 dip = nfscl_finddevinfo(clp, deviceid); 5117 if (dip != NULL) 5118 dip->nfsdi_refcnt++; 5119 NFSUNLOCKCLSTATE(); 5120 return (dip); 5121 } 5122 5123 /* 5124 * Dereference a devinfo structure. 5125 */ 5126 static void 5127 nfscl_reldevinfo_locked(struct nfscldevinfo *dip) 5128 { 5129 5130 dip->nfsdi_refcnt--; 5131 if (dip->nfsdi_refcnt == 0) 5132 wakeup(&dip->nfsdi_refcnt); 5133 } 5134 5135 /* 5136 * Dereference a devinfo structure. 5137 */ 5138 void 5139 nfscl_reldevinfo(struct nfscldevinfo *dip) 5140 { 5141 5142 NFSLOCKCLSTATE(); 5143 nfscl_reldevinfo_locked(dip); 5144 NFSUNLOCKCLSTATE(); 5145 } 5146 5147 /* 5148 * Find a layout for this file handle. Return NULL upon failure. 5149 */ 5150 static struct nfscllayout * 5151 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 5152 { 5153 struct nfscllayout *lyp; 5154 5155 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) 5156 if (lyp->nfsly_fhlen == fhlen && 5157 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) 5158 break; 5159 return (lyp); 5160 } 5161 5162 /* 5163 * Find a devinfo for this deviceid. Return NULL upon failure. 5164 */ 5165 static struct nfscldevinfo * 5166 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) 5167 { 5168 struct nfscldevinfo *dip; 5169 5170 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) 5171 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) 5172 == 0) 5173 break; 5174 return (dip); 5175 } 5176 5177 /* 5178 * Merge the new file layout list into the main one, maintaining it in 5179 * increasing offset order. 5180 */ 5181 static void 5182 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, 5183 struct nfsclflayouthead *newfhlp) 5184 { 5185 struct nfsclflayout *flp, *nflp, *prevflp, *tflp; 5186 5187 flp = LIST_FIRST(fhlp); 5188 prevflp = NULL; 5189 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { 5190 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { 5191 prevflp = flp; 5192 flp = LIST_NEXT(flp, nfsfl_list); 5193 } 5194 if (prevflp == NULL) 5195 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); 5196 else 5197 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); 5198 prevflp = nflp; 5199 } 5200 } 5201 5202 /* 5203 * Add this nfscldevinfo to the client, if it doesn't already exist. 5204 * This function consumes the structure pointed at by dip, if not NULL. 5205 */ 5206 int 5207 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind, 5208 struct nfsclflayout *flp) 5209 { 5210 struct nfsclclient *clp; 5211 struct nfscldevinfo *tdip; 5212 uint8_t *dev; 5213 5214 NFSLOCKCLSTATE(); 5215 clp = nmp->nm_clp; 5216 if (clp == NULL) { 5217 NFSUNLOCKCLSTATE(); 5218 if (dip != NULL) 5219 free(dip, M_NFSDEVINFO); 5220 return (ENODEV); 5221 } 5222 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5223 dev = flp->nfsfl_dev; 5224 else 5225 dev = flp->nfsfl_ffm[ind].dev; 5226 tdip = nfscl_finddevinfo(clp, dev); 5227 if (tdip != NULL) { 5228 tdip->nfsdi_layoutrefs++; 5229 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5230 flp->nfsfl_devp = tdip; 5231 else 5232 flp->nfsfl_ffm[ind].devp = tdip; 5233 nfscl_reldevinfo_locked(tdip); 5234 NFSUNLOCKCLSTATE(); 5235 if (dip != NULL) 5236 free(dip, M_NFSDEVINFO); 5237 return (0); 5238 } 5239 if (dip != NULL) { 5240 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); 5241 dip->nfsdi_layoutrefs = 1; 5242 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5243 flp->nfsfl_devp = dip; 5244 else 5245 flp->nfsfl_ffm[ind].devp = dip; 5246 } 5247 NFSUNLOCKCLSTATE(); 5248 if (dip == NULL) 5249 return (ENODEV); 5250 return (0); 5251 } 5252 5253 /* 5254 * Free up a layout structure and associated file layout structure(s). 5255 */ 5256 void 5257 nfscl_freelayout(struct nfscllayout *layp) 5258 { 5259 struct nfsclflayout *flp, *nflp; 5260 struct nfsclrecalllayout *rp, *nrp; 5261 5262 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { 5263 LIST_REMOVE(flp, nfsfl_list); 5264 nfscl_freeflayout(flp); 5265 } 5266 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { 5267 LIST_REMOVE(flp, nfsfl_list); 5268 nfscl_freeflayout(flp); 5269 } 5270 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { 5271 LIST_REMOVE(rp, nfsrecly_list); 5272 free(rp, M_NFSLAYRECALL); 5273 } 5274 nfscl_layoutcnt--; 5275 free(layp, M_NFSLAYOUT); 5276 } 5277 5278 /* 5279 * Free up a file layout structure. 5280 */ 5281 void 5282 nfscl_freeflayout(struct nfsclflayout *flp) 5283 { 5284 int i, j; 5285 5286 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) { 5287 for (i = 0; i < flp->nfsfl_fhcnt; i++) 5288 free(flp->nfsfl_fh[i], M_NFSFH); 5289 if (flp->nfsfl_devp != NULL) 5290 flp->nfsfl_devp->nfsdi_layoutrefs--; 5291 } 5292 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0) 5293 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) { 5294 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++) 5295 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH); 5296 if (flp->nfsfl_ffm[i].devp != NULL) 5297 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--; 5298 } 5299 free(flp, M_NFSFLAYOUT); 5300 } 5301 5302 /* 5303 * Free up a file layout devinfo structure. 5304 */ 5305 void 5306 nfscl_freedevinfo(struct nfscldevinfo *dip) 5307 { 5308 5309 free(dip, M_NFSDEVINFO); 5310 } 5311 5312 /* 5313 * Mark any layouts that match as recalled. 5314 */ 5315 static int 5316 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, 5317 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op, 5318 char *devid, struct nfsclrecalllayout *recallp) 5319 { 5320 struct nfsclrecalllayout *rp, *orp; 5321 5322 recallp->nfsrecly_recalltype = recalltype; 5323 recallp->nfsrecly_iomode = iomode; 5324 recallp->nfsrecly_stateseqid = stateseqid; 5325 recallp->nfsrecly_off = off; 5326 recallp->nfsrecly_len = len; 5327 recallp->nfsrecly_stat = stat; 5328 recallp->nfsrecly_op = op; 5329 if (devid != NULL) 5330 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID); 5331 /* 5332 * Order the list as file returns first, followed by fsid and any 5333 * returns, both in increasing stateseqid order. 5334 * Note that the seqids wrap around, so 1 is after 0xffffffff. 5335 * (I'm not sure this is correct because I find RFC5661 confusing 5336 * on this, but hopefully it will work ok.) 5337 */ 5338 orp = NULL; 5339 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5340 orp = rp; 5341 if ((recalltype == NFSLAYOUTRETURN_FILE && 5342 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || 5343 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || 5344 (recalltype != NFSLAYOUTRETURN_FILE && 5345 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && 5346 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { 5347 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); 5348 break; 5349 } 5350 5351 /* 5352 * Put any error return on all the file returns that will 5353 * preceed this one. 5354 */ 5355 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE && 5356 stat != 0 && rp->nfsrecly_stat == 0) { 5357 rp->nfsrecly_stat = stat; 5358 rp->nfsrecly_op = op; 5359 if (devid != NULL) 5360 NFSBCOPY(devid, rp->nfsrecly_devid, 5361 NFSX_V4DEVICEID); 5362 } 5363 } 5364 if (rp == NULL) { 5365 if (orp == NULL) 5366 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, 5367 nfsrecly_list); 5368 else 5369 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); 5370 } 5371 lyp->nfsly_flags |= NFSLY_RECALL; 5372 wakeup(lyp->nfsly_clp); 5373 return (0); 5374 } 5375 5376 /* 5377 * Compare the two seqids for ordering. The trick is that the seqids can 5378 * wrap around from 0xffffffff->0, so check for the cases where one 5379 * has wrapped around. 5380 * Return 1 if seqid1 comes before seqid2, 0 otherwise. 5381 */ 5382 static int 5383 nfscl_seq(uint32_t seqid1, uint32_t seqid2) 5384 { 5385 5386 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) 5387 /* seqid2 has wrapped around. */ 5388 return (0); 5389 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) 5390 /* seqid1 has wrapped around. */ 5391 return (1); 5392 if (seqid1 <= seqid2) 5393 return (1); 5394 return (0); 5395 } 5396 5397 /* 5398 * Do a layout return for each of the recalls. 5399 */ 5400 static void 5401 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, 5402 struct ucred *cred, NFSPROC_T *p) 5403 { 5404 struct nfsclrecalllayout *rp; 5405 nfsv4stateid_t stateid; 5406 int layouttype; 5407 5408 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); 5409 stateid.seqid = lyp->nfsly_stateid.seqid; 5410 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5411 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5412 else 5413 layouttype = NFSLAYOUT_FLEXFILE; 5414 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5415 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, 5416 lyp->nfsly_fhlen, 0, layouttype, 5417 rp->nfsrecly_iomode, rp->nfsrecly_recalltype, 5418 rp->nfsrecly_off, rp->nfsrecly_len, 5419 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op, 5420 rp->nfsrecly_devid); 5421 } 5422 } 5423 5424 /* 5425 * Do the layout commit for a file layout. 5426 */ 5427 static void 5428 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, 5429 struct ucred *cred, NFSPROC_T *p) 5430 { 5431 struct nfsclflayout *flp; 5432 uint64_t len; 5433 int error, layouttype; 5434 5435 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5436 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5437 else 5438 layouttype = NFSLAYOUT_FLEXFILE; 5439 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) { 5440 if (layouttype == NFSLAYOUT_FLEXFILE && 5441 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) { 5442 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n"); 5443 /* If not supported, don't bother doing it. */ 5444 NFSLOCKMNT(nmp); 5445 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5446 NFSUNLOCKMNT(nmp); 5447 break; 5448 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) { 5449 len = flp->nfsfl_end - flp->nfsfl_off; 5450 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, 5451 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len, 5452 lyp->nfsly_lastbyte, &lyp->nfsly_stateid, 5453 layouttype, cred, p, NULL); 5454 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error); 5455 if (error == NFSERR_NOTSUPP) { 5456 /* If not supported, don't bother doing it. */ 5457 NFSLOCKMNT(nmp); 5458 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5459 NFSUNLOCKMNT(nmp); 5460 break; 5461 } 5462 } 5463 } 5464 } 5465 5466 /* 5467 * Commit all layouts for a file (vnode). 5468 */ 5469 int 5470 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) 5471 { 5472 struct nfsclclient *clp; 5473 struct nfscllayout *lyp; 5474 struct nfsnode *np = VTONFS(vp); 5475 mount_t mp; 5476 struct nfsmount *nmp; 5477 5478 mp = vp->v_mount; 5479 nmp = VFSTONFS(mp); 5480 if (NFSHASNOLAYOUTCOMMIT(nmp)) 5481 return (0); 5482 NFSLOCKCLSTATE(); 5483 clp = nmp->nm_clp; 5484 if (clp == NULL) { 5485 NFSUNLOCKCLSTATE(); 5486 return (EPERM); 5487 } 5488 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5489 if (lyp == NULL) { 5490 NFSUNLOCKCLSTATE(); 5491 return (EPERM); 5492 } 5493 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5494 if (NFSCL_FORCEDISM(mp)) { 5495 NFSUNLOCKCLSTATE(); 5496 return (EPERM); 5497 } 5498 tryagain: 5499 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 5500 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 5501 NFSUNLOCKCLSTATE(); 5502 NFSCL_DEBUG(4, "do layoutcommit2\n"); 5503 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); 5504 NFSLOCKCLSTATE(); 5505 goto tryagain; 5506 } 5507 nfsv4_relref(&lyp->nfsly_lock); 5508 NFSUNLOCKCLSTATE(); 5509 return (0); 5510 } 5511