1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009 Rick Macklem, University of Guelph 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * These functions implement the client side state handling for NFSv4. 35 * NFSv4 state handling: 36 * - A lockowner is used to determine lock contention, so it 37 * corresponds directly to a Posix pid. (1 to 1 mapping) 38 * - The correct granularity of an OpenOwner is not nearly so 39 * obvious. An OpenOwner does the following: 40 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 41 * - is used to check for Open/Share contention (not applicable to 42 * this client, since all Opens are Deny_None) 43 * As such, I considered both extreme. 44 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 45 * all Open, Close and Lock (with a new lockowner) Ops. 46 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 47 * every Open, for most servers. 48 * So, I chose to use the same mapping as I did for LockOwnwers. 49 * The main concern here is that you can end up with multiple Opens 50 * for the same File Handle, but on different OpenOwners (opens 51 * inherited from parents, grandparents...) and you do not know 52 * which of these the vnodeop close applies to. This is handled by 53 * delaying the Close Op(s) until all of the Opens have been closed. 54 * (It is not yet obvious if this is the correct granularity.) 55 * - How the code handles serialization: 56 * - For the ClientId, it uses an exclusive lock while getting its 57 * SetClientId and during recovery. Otherwise, it uses a shared 58 * lock via a reference count. 59 * - For the rest of the data structures, it uses an SMP mutex 60 * (once the nfs client is SMP safe) and doesn't sleep while 61 * manipulating the linked lists. 62 * - The serialization of Open/Close/Lock/LockU falls out in the 63 * "wash", since OpenOwners and LockOwners are both mapped from 64 * Posix pid. In other words, there is only one Posix pid using 65 * any given owner, so that owner is serialized. (If you change 66 * the granularity of the OpenOwner, then code must be added to 67 * serialize Ops on the OpenOwner.) 68 * - When to get rid of OpenOwners and LockOwners. 69 * - The function nfscl_cleanup_common() is executed after a process exits. 70 * It goes through the client list looking for all Open and Lock Owners. 71 * When one is found, it is marked "defunct" or in the case of 72 * an OpenOwner without any Opens, freed. 73 * The renew thread scans for defunct Owners and gets rid of them, 74 * if it can. The LockOwners will also be deleted when the 75 * associated Open is closed. 76 * - If the LockU or Close Op(s) fail during close in a way 77 * that could be recovered upon retry, they are relinked to the 78 * ClientId's defunct open list and retried by the renew thread 79 * until they succeed or an unmount/recovery occurs. 80 * (Since we are done with them, they do not need to be recovered.) 81 */ 82 83 #ifndef APPLEKEXT 84 #include <fs/nfs/nfsport.h> 85 86 /* 87 * Global variables 88 */ 89 extern struct nfsstatsv1 nfsstatsv1; 90 extern struct nfsreqhead nfsd_reqq; 91 extern u_int32_t newnfs_false, newnfs_true; 92 extern int nfscl_debuglevel; 93 extern int nfscl_enablecallb; 94 extern int nfs_numnfscbd; 95 NFSREQSPINLOCK; 96 NFSCLSTATEMUTEX; 97 int nfscl_inited = 0; 98 struct nfsclhead nfsclhead; /* Head of clientid list */ 99 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 100 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER; 101 #endif /* !APPLEKEXT */ 102 103 static int nfscl_delegcnt = 0; 104 static int nfscl_layoutcnt = 0; 105 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, 106 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **); 107 static void nfscl_clrelease(struct nfsclclient *); 108 static void nfscl_cleanclient(struct nfsclclient *); 109 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 110 struct ucred *, NFSPROC_T *); 111 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 112 struct nfsmount *, struct ucred *, NFSPROC_T *); 113 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); 114 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 115 struct nfscllock *, int); 116 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 117 struct nfscllock **, int); 118 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); 119 static u_int32_t nfscl_nextcbident(void); 120 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **); 121 static struct nfsclclient *nfscl_getclnt(u_int32_t); 122 static struct nfsclclient *nfscl_getclntsess(uint8_t *); 123 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 124 int); 125 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *, 126 int, struct nfsclrecalllayout **); 127 static void nfscl_reldevinfo_locked(struct nfscldevinfo *); 128 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *, 129 int); 130 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *); 131 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 132 u_int8_t *, struct nfscllock **); 133 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 134 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 135 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 136 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 137 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 138 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *); 139 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 140 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 141 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 142 static void nfscl_totalrecall(struct nfsclclient *); 143 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 144 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 145 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 146 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 147 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 148 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 149 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 150 struct ucred *, NFSPROC_T *); 151 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 152 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 153 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); 154 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t); 155 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 156 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 157 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); 158 static void nfscl_freeopenowner(struct nfsclowner *, int); 159 static void nfscl_cleandeleg(struct nfscldeleg *); 160 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, 161 struct nfsmount *, NFSPROC_T *); 162 static void nfscl_emptylockowner(struct nfscllockowner *, 163 struct nfscllockownerfhhead *); 164 static void nfscl_mergeflayouts(struct nfsclflayouthead *, 165 struct nfsclflayouthead *); 166 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t, 167 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *); 168 static int nfscl_seq(uint32_t, uint32_t); 169 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *, 170 struct ucred *, NFSPROC_T *); 171 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *, 172 struct ucred *, NFSPROC_T *); 173 174 static short nfscberr_null[] = { 175 0, 176 0, 177 }; 178 179 static short nfscberr_getattr[] = { 180 NFSERR_RESOURCE, 181 NFSERR_BADHANDLE, 182 NFSERR_BADXDR, 183 NFSERR_RESOURCE, 184 NFSERR_SERVERFAULT, 185 0, 186 }; 187 188 static short nfscberr_recall[] = { 189 NFSERR_RESOURCE, 190 NFSERR_BADHANDLE, 191 NFSERR_BADSTATEID, 192 NFSERR_BADXDR, 193 NFSERR_RESOURCE, 194 NFSERR_SERVERFAULT, 195 0, 196 }; 197 198 static short *nfscl_cberrmap[] = { 199 nfscberr_null, 200 nfscberr_null, 201 nfscberr_null, 202 nfscberr_getattr, 203 nfscberr_recall 204 }; 205 206 #define NETFAMILY(clp) \ 207 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 208 209 /* 210 * Called for an open operation. 211 * If the nfhp argument is NULL, just get an openowner. 212 */ 213 APPLESTATIC int 214 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 215 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 216 struct nfsclopen **opp, int *newonep, int *retp, int lockit) 217 { 218 struct nfsclclient *clp; 219 struct nfsclowner *owp, *nowp; 220 struct nfsclopen *op = NULL, *nop = NULL; 221 struct nfscldeleg *dp; 222 struct nfsclownerhead *ohp; 223 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 224 int ret; 225 226 if (newonep != NULL) 227 *newonep = 0; 228 if (opp != NULL) 229 *opp = NULL; 230 if (owpp != NULL) 231 *owpp = NULL; 232 233 /* 234 * Might need one or both of these, so MALLOC them now, to 235 * avoid a tsleep() in MALLOC later. 236 */ 237 nowp = malloc(sizeof (struct nfsclowner), 238 M_NFSCLOWNER, M_WAITOK); 239 if (nfhp != NULL) 240 nop = malloc(sizeof (struct nfsclopen) + 241 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 242 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 243 if (ret != 0) { 244 free(nowp, M_NFSCLOWNER); 245 if (nop != NULL) 246 free(nop, M_NFSCLOPEN); 247 return (ret); 248 } 249 250 /* 251 * Get the Open iff it already exists. 252 * If none found, add the new one or return error, depending upon 253 * "create". 254 */ 255 NFSLOCKCLSTATE(); 256 dp = NULL; 257 /* First check the delegation list */ 258 if (nfhp != NULL && usedeleg) { 259 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 260 if (dp->nfsdl_fhlen == fhlen && 261 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 262 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 263 (dp->nfsdl_flags & NFSCLDL_WRITE)) 264 break; 265 dp = NULL; 266 break; 267 } 268 } 269 } 270 271 if (dp != NULL) { 272 nfscl_filllockowner(p->td_proc, own, F_POSIX); 273 ohp = &dp->nfsdl_owner; 274 } else { 275 /* For NFSv4.1 and this option, use a single open_owner. */ 276 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) 277 nfscl_filllockowner(NULL, own, F_POSIX); 278 else 279 nfscl_filllockowner(p->td_proc, own, F_POSIX); 280 ohp = &clp->nfsc_owner; 281 } 282 /* Now, search for an openowner */ 283 LIST_FOREACH(owp, ohp, nfsow_list) { 284 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 285 break; 286 } 287 288 /* 289 * Create a new open, as required. 290 */ 291 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 292 cred, newonep); 293 294 /* 295 * Now, check the mode on the open and return the appropriate 296 * value. 297 */ 298 if (retp != NULL) { 299 if (nfhp != NULL && dp != NULL && nop == NULL) 300 /* new local open on delegation */ 301 *retp = NFSCLOPEN_SETCRED; 302 else 303 *retp = NFSCLOPEN_OK; 304 } 305 if (op != NULL && (amode & ~(op->nfso_mode))) { 306 op->nfso_mode |= amode; 307 if (retp != NULL && dp == NULL) 308 *retp = NFSCLOPEN_DOOPEN; 309 } 310 311 /* 312 * Serialize modifications to the open owner for multiple threads 313 * within the same process using a read/write sleep lock. 314 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations 315 * by acquiring a shared lock. The close operations still use an 316 * exclusive lock for this case. 317 */ 318 if (lockit != 0) { 319 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) { 320 /* 321 * Get a shared lock on the OpenOwner, but first 322 * wait for any pending exclusive lock, so that the 323 * exclusive locker gets priority. 324 */ 325 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL, 326 NFSCLSTATEMUTEXPTR, NULL); 327 nfsv4_getref(&owp->nfsow_rwlock, NULL, 328 NFSCLSTATEMUTEXPTR, NULL); 329 } else 330 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 331 } 332 NFSUNLOCKCLSTATE(); 333 if (nowp != NULL) 334 free(nowp, M_NFSCLOWNER); 335 if (nop != NULL) 336 free(nop, M_NFSCLOPEN); 337 if (owpp != NULL) 338 *owpp = owp; 339 if (opp != NULL) 340 *opp = op; 341 return (0); 342 } 343 344 /* 345 * Create a new open, as required. 346 */ 347 static void 348 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 349 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 350 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 351 struct ucred *cred, int *newonep) 352 { 353 struct nfsclowner *owp = *owpp, *nowp; 354 struct nfsclopen *op, *nop; 355 356 if (nowpp != NULL) 357 nowp = *nowpp; 358 else 359 nowp = NULL; 360 if (nopp != NULL) 361 nop = *nopp; 362 else 363 nop = NULL; 364 if (owp == NULL && nowp != NULL) { 365 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 366 LIST_INIT(&nowp->nfsow_open); 367 nowp->nfsow_clp = clp; 368 nowp->nfsow_seqid = 0; 369 nowp->nfsow_defunct = 0; 370 nfscl_lockinit(&nowp->nfsow_rwlock); 371 if (dp != NULL) { 372 nfsstatsv1.cllocalopenowners++; 373 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 374 } else { 375 nfsstatsv1.clopenowners++; 376 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 377 } 378 owp = *owpp = nowp; 379 *nowpp = NULL; 380 if (newonep != NULL) 381 *newonep = 1; 382 } 383 384 /* If an fhp has been specified, create an Open as well. */ 385 if (fhp != NULL) { 386 /* and look for the correct open, based upon FH */ 387 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 388 if (op->nfso_fhlen == fhlen && 389 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 390 break; 391 } 392 if (op == NULL && nop != NULL) { 393 nop->nfso_own = owp; 394 nop->nfso_mode = 0; 395 nop->nfso_opencnt = 0; 396 nop->nfso_posixlock = 1; 397 nop->nfso_fhlen = fhlen; 398 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 399 LIST_INIT(&nop->nfso_lock); 400 nop->nfso_stateid.seqid = 0; 401 nop->nfso_stateid.other[0] = 0; 402 nop->nfso_stateid.other[1] = 0; 403 nop->nfso_stateid.other[2] = 0; 404 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__)); 405 newnfs_copyincred(cred, &nop->nfso_cred); 406 if (dp != NULL) { 407 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 408 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 409 nfsdl_list); 410 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 411 nfsstatsv1.cllocalopens++; 412 } else { 413 nfsstatsv1.clopens++; 414 } 415 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 416 *opp = nop; 417 *nopp = NULL; 418 if (newonep != NULL) 419 *newonep = 1; 420 } else { 421 *opp = op; 422 } 423 } 424 } 425 426 /* 427 * Called to find/add a delegation to a client. 428 */ 429 APPLESTATIC int 430 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 431 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 432 { 433 struct nfscldeleg *dp = *dpp, *tdp; 434 435 /* 436 * First, if we have received a Read delegation for a file on a 437 * read/write file system, just return it, because they aren't 438 * useful, imho. 439 */ 440 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && 441 (dp->nfsdl_flags & NFSCLDL_READ)) { 442 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); 443 free(dp, M_NFSCLDELEG); 444 *dpp = NULL; 445 return (0); 446 } 447 448 /* Look for the correct deleg, based upon FH */ 449 NFSLOCKCLSTATE(); 450 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 451 if (tdp == NULL) { 452 if (dp == NULL) { 453 NFSUNLOCKCLSTATE(); 454 return (NFSERR_BADSTATEID); 455 } 456 *dpp = NULL; 457 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 458 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 459 nfsdl_hash); 460 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 461 nfsstatsv1.cldelegates++; 462 nfscl_delegcnt++; 463 } else { 464 /* 465 * Delegation already exists, what do we do if a new one?? 466 */ 467 if (dp != NULL) { 468 printf("Deleg already exists!\n"); 469 free(dp, M_NFSCLDELEG); 470 *dpp = NULL; 471 } else { 472 *dpp = tdp; 473 } 474 } 475 NFSUNLOCKCLSTATE(); 476 return (0); 477 } 478 479 /* 480 * Find a delegation for this file handle. Return NULL upon failure. 481 */ 482 static struct nfscldeleg * 483 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 484 { 485 struct nfscldeleg *dp; 486 487 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 488 if (dp->nfsdl_fhlen == fhlen && 489 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 490 break; 491 } 492 return (dp); 493 } 494 495 /* 496 * Get a stateid for an I/O operation. First, look for an open and iff 497 * found, return either a lockowner stateid or the open stateid. 498 * If no Open is found, just return error and the special stateid of all zeros. 499 */ 500 APPLESTATIC int 501 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 502 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 503 void **lckpp) 504 { 505 struct nfsclclient *clp; 506 struct nfsclowner *owp; 507 struct nfsclopen *op = NULL, *top; 508 struct nfscllockowner *lp; 509 struct nfscldeleg *dp; 510 struct nfsnode *np; 511 struct nfsmount *nmp; 512 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 513 int error, done; 514 515 *lckpp = NULL; 516 /* 517 * Initially, just set the special stateid of all zeros. 518 * (Don't do this for a DS, since the special stateid can't be used.) 519 */ 520 if (fords == 0) { 521 stateidp->seqid = 0; 522 stateidp->other[0] = 0; 523 stateidp->other[1] = 0; 524 stateidp->other[2] = 0; 525 } 526 if (vnode_vtype(vp) != VREG) 527 return (EISDIR); 528 np = VTONFS(vp); 529 nmp = VFSTONFS(vnode_mount(vp)); 530 NFSLOCKCLSTATE(); 531 clp = nfscl_findcl(nmp); 532 if (clp == NULL) { 533 NFSUNLOCKCLSTATE(); 534 return (EACCES); 535 } 536 537 /* 538 * Wait for recovery to complete. 539 */ 540 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 541 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 542 PZERO, "nfsrecvr", NULL); 543 544 /* 545 * First, look for a delegation. 546 */ 547 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 548 if (dp->nfsdl_fhlen == fhlen && 549 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 550 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 551 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 552 stateidp->seqid = dp->nfsdl_stateid.seqid; 553 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 554 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 555 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 556 if (!(np->n_flag & NDELEGRECALL)) { 557 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 558 nfsdl_list); 559 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 560 nfsdl_list); 561 dp->nfsdl_timestamp = NFSD_MONOSEC + 562 120; 563 dp->nfsdl_rwlock.nfslock_usecnt++; 564 *lckpp = (void *)&dp->nfsdl_rwlock; 565 } 566 NFSUNLOCKCLSTATE(); 567 return (0); 568 } 569 break; 570 } 571 } 572 573 if (p != NULL) { 574 /* 575 * If p != NULL, we want to search the parentage tree 576 * for a matching OpenOwner and use that. 577 */ 578 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) 579 nfscl_filllockowner(NULL, own, F_POSIX); 580 else 581 nfscl_filllockowner(p->td_proc, own, F_POSIX); 582 lp = NULL; 583 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own, 584 mode, &lp, &op); 585 if (error == 0 && lp != NULL && fords == 0) { 586 /* Don't return a lock stateid for a DS. */ 587 stateidp->seqid = 588 lp->nfsl_stateid.seqid; 589 stateidp->other[0] = 590 lp->nfsl_stateid.other[0]; 591 stateidp->other[1] = 592 lp->nfsl_stateid.other[1]; 593 stateidp->other[2] = 594 lp->nfsl_stateid.other[2]; 595 NFSUNLOCKCLSTATE(); 596 return (0); 597 } 598 } 599 if (op == NULL) { 600 /* If not found, just look for any OpenOwner that will work. */ 601 top = NULL; 602 done = 0; 603 owp = LIST_FIRST(&clp->nfsc_owner); 604 while (!done && owp != NULL) { 605 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 606 if (op->nfso_fhlen == fhlen && 607 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) { 608 if (top == NULL && (op->nfso_mode & 609 NFSV4OPEN_ACCESSWRITE) != 0 && 610 (mode & NFSV4OPEN_ACCESSREAD) != 0) 611 top = op; 612 if ((mode & op->nfso_mode) == mode) { 613 done = 1; 614 break; 615 } 616 } 617 } 618 if (!done) 619 owp = LIST_NEXT(owp, nfsow_list); 620 } 621 if (!done) { 622 NFSCL_DEBUG(2, "openmode top=%p\n", top); 623 if (top == NULL || NFSHASOPENMODE(nmp)) { 624 NFSUNLOCKCLSTATE(); 625 return (ENOENT); 626 } else 627 op = top; 628 } 629 /* 630 * For read aheads or write behinds, use the open cred. 631 * A read ahead or write behind is indicated by p == NULL. 632 */ 633 if (p == NULL) 634 newnfs_copycred(&op->nfso_cred, cred); 635 } 636 637 /* 638 * No lock stateid, so return the open stateid. 639 */ 640 stateidp->seqid = op->nfso_stateid.seqid; 641 stateidp->other[0] = op->nfso_stateid.other[0]; 642 stateidp->other[1] = op->nfso_stateid.other[1]; 643 stateidp->other[2] = op->nfso_stateid.other[2]; 644 NFSUNLOCKCLSTATE(); 645 return (0); 646 } 647 648 /* 649 * Search for a matching file, mode and, optionally, lockowner. 650 */ 651 static int 652 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, 653 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode, 654 struct nfscllockowner **lpp, struct nfsclopen **opp) 655 { 656 struct nfsclowner *owp; 657 struct nfsclopen *op, *rop, *rop2; 658 struct nfscllockowner *lp; 659 int keep_looping; 660 661 if (lpp != NULL) 662 *lpp = NULL; 663 /* 664 * rop will be set to the open to be returned. There are three 665 * variants of this, all for an open of the correct file: 666 * 1 - A match of lockown. 667 * 2 - A match of the openown, when no lockown match exists. 668 * 3 - A match for any open, if no openown or lockown match exists. 669 * Looking for #2 over #3 probably isn't necessary, but since 670 * RFC3530 is vague w.r.t. the relationship between openowners and 671 * lockowners, I think this is the safer way to go. 672 */ 673 rop = NULL; 674 rop2 = NULL; 675 keep_looping = 1; 676 /* Search the client list */ 677 owp = LIST_FIRST(ohp); 678 while (owp != NULL && keep_looping != 0) { 679 /* and look for the correct open */ 680 op = LIST_FIRST(&owp->nfsow_open); 681 while (op != NULL && keep_looping != 0) { 682 if (op->nfso_fhlen == fhlen && 683 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 684 && (op->nfso_mode & mode) == mode) { 685 if (lpp != NULL) { 686 /* Now look for a matching lockowner. */ 687 LIST_FOREACH(lp, &op->nfso_lock, 688 nfsl_list) { 689 if (!NFSBCMP(lp->nfsl_owner, 690 lockown, 691 NFSV4CL_LOCKNAMELEN)) { 692 *lpp = lp; 693 rop = op; 694 keep_looping = 0; 695 break; 696 } 697 } 698 } 699 if (rop == NULL && !NFSBCMP(owp->nfsow_owner, 700 openown, NFSV4CL_LOCKNAMELEN)) { 701 rop = op; 702 if (lpp == NULL) 703 keep_looping = 0; 704 } 705 if (rop2 == NULL) 706 rop2 = op; 707 } 708 op = LIST_NEXT(op, nfso_list); 709 } 710 owp = LIST_NEXT(owp, nfsow_list); 711 } 712 if (rop == NULL) 713 rop = rop2; 714 if (rop == NULL) 715 return (EBADF); 716 *opp = rop; 717 return (0); 718 } 719 720 /* 721 * Release use of an open owner. Called when open operations are done 722 * with the open owner. 723 */ 724 APPLESTATIC void 725 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp, 726 __unused int error, __unused int candelete, int unlocked) 727 { 728 729 if (owp == NULL) 730 return; 731 NFSLOCKCLSTATE(); 732 if (unlocked == 0) { 733 if (NFSHASONEOPENOWN(nmp)) 734 nfsv4_relref(&owp->nfsow_rwlock); 735 else 736 nfscl_lockunlock(&owp->nfsow_rwlock); 737 } 738 nfscl_clrelease(owp->nfsow_clp); 739 NFSUNLOCKCLSTATE(); 740 } 741 742 /* 743 * Release use of an open structure under an open owner. 744 */ 745 APPLESTATIC void 746 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error, 747 int candelete) 748 { 749 struct nfsclclient *clp; 750 struct nfsclowner *owp; 751 752 if (op == NULL) 753 return; 754 NFSLOCKCLSTATE(); 755 owp = op->nfso_own; 756 if (NFSHASONEOPENOWN(nmp)) 757 nfsv4_relref(&owp->nfsow_rwlock); 758 else 759 nfscl_lockunlock(&owp->nfsow_rwlock); 760 clp = owp->nfsow_clp; 761 if (error && candelete && op->nfso_opencnt == 0) 762 nfscl_freeopen(op, 0); 763 nfscl_clrelease(clp); 764 NFSUNLOCKCLSTATE(); 765 } 766 767 /* 768 * Called to get a clientid structure. It will optionally lock the 769 * client data structures to do the SetClientId/SetClientId_confirm, 770 * but will release that lock and return the clientid with a reference 771 * count on it. 772 * If the "cred" argument is NULL, a new clientid should not be created. 773 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 774 * be done. 775 * The start_renewthread argument tells nfscl_getcl() to start a renew 776 * thread if this creates a new clp. 777 * It always clpp with a reference count on it, unless returning an error. 778 */ 779 APPLESTATIC int 780 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p, 781 int start_renewthread, struct nfsclclient **clpp) 782 { 783 struct nfsclclient *clp; 784 struct nfsclclient *newclp = NULL; 785 struct nfsmount *nmp; 786 char uuid[HOSTUUIDLEN]; 787 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 788 u_int16_t idlen = 0; 789 790 nmp = VFSTONFS(mp); 791 if (cred != NULL) { 792 getcredhostuuid(cred, uuid, sizeof uuid); 793 idlen = strlen(uuid); 794 if (idlen > 0) 795 idlen += sizeof (u_int64_t); 796 else 797 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 798 newclp = malloc( 799 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 800 M_WAITOK | M_ZERO); 801 } 802 NFSLOCKCLSTATE(); 803 /* 804 * If a forced dismount is already in progress, don't 805 * allocate a new clientid and get out now. For the case where 806 * clp != NULL, this is a harmless optimization. 807 */ 808 if (NFSCL_FORCEDISM(mp)) { 809 NFSUNLOCKCLSTATE(); 810 if (newclp != NULL) 811 free(newclp, M_NFSCLCLIENT); 812 return (EBADF); 813 } 814 clp = nmp->nm_clp; 815 if (clp == NULL) { 816 if (newclp == NULL) { 817 NFSUNLOCKCLSTATE(); 818 return (EACCES); 819 } 820 clp = newclp; 821 clp->nfsc_idlen = idlen; 822 LIST_INIT(&clp->nfsc_owner); 823 TAILQ_INIT(&clp->nfsc_deleg); 824 TAILQ_INIT(&clp->nfsc_layout); 825 LIST_INIT(&clp->nfsc_devinfo); 826 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 827 LIST_INIT(&clp->nfsc_deleghash[i]); 828 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 829 LIST_INIT(&clp->nfsc_layouthash[i]); 830 clp->nfsc_flags = NFSCLFLAGS_INITED; 831 clp->nfsc_clientidrev = 1; 832 clp->nfsc_cbident = nfscl_nextcbident(); 833 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 834 clp->nfsc_idlen); 835 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 836 nmp->nm_clp = clp; 837 clp->nfsc_nmp = nmp; 838 NFSUNLOCKCLSTATE(); 839 if (start_renewthread != 0) 840 nfscl_start_renewthread(clp); 841 } else { 842 NFSUNLOCKCLSTATE(); 843 if (newclp != NULL) 844 free(newclp, M_NFSCLCLIENT); 845 } 846 NFSLOCKCLSTATE(); 847 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock && 848 !NFSCL_FORCEDISM(mp)) 849 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 850 NFSCLSTATEMUTEXPTR, mp); 851 if (igotlock == 0) { 852 /* 853 * Call nfsv4_lock() with "iwantlock == 0" so that it will 854 * wait for a pending exclusive lock request. This gives the 855 * exclusive lock request priority over this shared lock 856 * request. 857 * An exclusive lock on nfsc_lock is used mainly for server 858 * crash recoveries. 859 */ 860 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp); 861 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 862 } 863 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) { 864 /* 865 * Both nfsv4_lock() and nfsv4_getref() know to check 866 * for NFSCL_FORCEDISM() and return without sleeping to 867 * wait for the exclusive lock to be released, since it 868 * might be held by nfscl_umount() and we need to get out 869 * now for that case and not wait until nfscl_umount() 870 * releases it. 871 */ 872 NFSUNLOCKCLSTATE(); 873 return (EBADF); 874 } 875 NFSUNLOCKCLSTATE(); 876 877 /* 878 * If it needs a clientid, do the setclientid now. 879 */ 880 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 881 if (!igotlock) 882 panic("nfscl_clget"); 883 if (p == NULL || cred == NULL) { 884 NFSLOCKCLSTATE(); 885 nfsv4_unlock(&clp->nfsc_lock, 0); 886 NFSUNLOCKCLSTATE(); 887 return (EACCES); 888 } 889 /* 890 * If RFC3530 Sec. 14.2.33 is taken literally, 891 * NFSERR_CLIDINUSE will be returned persistently for the 892 * case where a new mount of the same file system is using 893 * a different principal. In practice, NFSERR_CLIDINUSE is 894 * only returned when there is outstanding unexpired state 895 * on the clientid. As such, try for twice the lease 896 * interval, if we know what that is. Otherwise, make a 897 * wild ass guess. 898 * The case of returning NFSERR_STALECLIENTID is far less 899 * likely, but might occur if there is a significant delay 900 * between doing the SetClientID and SetClientIDConfirm Ops, 901 * such that the server throws away the clientid before 902 * receiving the SetClientIDConfirm. 903 */ 904 if (clp->nfsc_renew > 0) 905 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 906 else 907 clidinusedelay = 120; 908 trystalecnt = 3; 909 do { 910 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 911 if (error == NFSERR_STALECLIENTID || 912 error == NFSERR_STALEDONTRECOVER || 913 error == NFSERR_BADSESSION || 914 error == NFSERR_CLIDINUSE) { 915 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 916 } 917 } while (((error == NFSERR_STALECLIENTID || 918 error == NFSERR_BADSESSION || 919 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 920 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); 921 if (error) { 922 NFSLOCKCLSTATE(); 923 nfsv4_unlock(&clp->nfsc_lock, 0); 924 NFSUNLOCKCLSTATE(); 925 return (error); 926 } 927 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 928 } 929 if (igotlock) { 930 NFSLOCKCLSTATE(); 931 nfsv4_unlock(&clp->nfsc_lock, 1); 932 NFSUNLOCKCLSTATE(); 933 } 934 935 *clpp = clp; 936 return (0); 937 } 938 939 /* 940 * Get a reference to a clientid and return it, if valid. 941 */ 942 APPLESTATIC struct nfsclclient * 943 nfscl_findcl(struct nfsmount *nmp) 944 { 945 struct nfsclclient *clp; 946 947 clp = nmp->nm_clp; 948 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 949 return (NULL); 950 return (clp); 951 } 952 953 /* 954 * Release the clientid structure. It may be locked or reference counted. 955 */ 956 static void 957 nfscl_clrelease(struct nfsclclient *clp) 958 { 959 960 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 961 nfsv4_unlock(&clp->nfsc_lock, 0); 962 else 963 nfsv4_relref(&clp->nfsc_lock); 964 } 965 966 /* 967 * External call for nfscl_clrelease. 968 */ 969 APPLESTATIC void 970 nfscl_clientrelease(struct nfsclclient *clp) 971 { 972 973 NFSLOCKCLSTATE(); 974 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 975 nfsv4_unlock(&clp->nfsc_lock, 0); 976 else 977 nfsv4_relref(&clp->nfsc_lock); 978 NFSUNLOCKCLSTATE(); 979 } 980 981 /* 982 * Called when wanting to lock a byte region. 983 */ 984 APPLESTATIC int 985 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 986 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 987 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp, 988 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 989 { 990 struct nfscllockowner *lp; 991 struct nfsclopen *op; 992 struct nfsclclient *clp; 993 struct nfscllockowner *nlp; 994 struct nfscllock *nlop, *otherlop; 995 struct nfscldeleg *dp = NULL, *ldp = NULL; 996 struct nfscllockownerhead *lhp = NULL; 997 struct nfsnode *np; 998 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN]; 999 u_int8_t *openownp; 1000 int error = 0, ret, donelocally = 0; 1001 u_int32_t mode; 1002 1003 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */ 1004 mode = 0; 1005 np = VTONFS(vp); 1006 *lpp = NULL; 1007 lp = NULL; 1008 *newonep = 0; 1009 *donelocallyp = 0; 1010 1011 /* 1012 * Might need these, so MALLOC them now, to 1013 * avoid a tsleep() in MALLOC later. 1014 */ 1015 nlp = malloc( 1016 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 1017 otherlop = malloc( 1018 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1019 nlop = malloc( 1020 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1021 nlop->nfslo_type = type; 1022 nlop->nfslo_first = off; 1023 if (len == NFS64BITSSET) { 1024 nlop->nfslo_end = NFS64BITSSET; 1025 } else { 1026 nlop->nfslo_end = off + len; 1027 if (nlop->nfslo_end <= nlop->nfslo_first) 1028 error = NFSERR_INVAL; 1029 } 1030 1031 if (!error) { 1032 if (recovery) 1033 clp = rclp; 1034 else 1035 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 1036 } 1037 if (error) { 1038 free(nlp, M_NFSCLLOCKOWNER); 1039 free(otherlop, M_NFSCLLOCK); 1040 free(nlop, M_NFSCLLOCK); 1041 return (error); 1042 } 1043 1044 op = NULL; 1045 if (recovery) { 1046 ownp = rownp; 1047 openownp = ropenownp; 1048 } else { 1049 nfscl_filllockowner(id, own, flags); 1050 ownp = own; 1051 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) 1052 nfscl_filllockowner(NULL, openown, F_POSIX); 1053 else 1054 nfscl_filllockowner(p->td_proc, openown, F_POSIX); 1055 openownp = openown; 1056 } 1057 if (!recovery) { 1058 NFSLOCKCLSTATE(); 1059 /* 1060 * First, search for a delegation. If one exists for this file, 1061 * the lock can be done locally against it, so long as there 1062 * isn't a local lock conflict. 1063 */ 1064 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1065 np->n_fhp->nfh_len); 1066 /* Just sanity check for correct type of delegation */ 1067 if (dp != NULL && ((dp->nfsdl_flags & 1068 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 1069 (type == F_WRLCK && 1070 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 1071 dp = NULL; 1072 } 1073 if (dp != NULL) { 1074 /* Now, find an open and maybe a lockowner. */ 1075 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, 1076 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op); 1077 if (ret) 1078 ret = nfscl_getopen(&clp->nfsc_owner, 1079 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1080 ownp, mode, NULL, &op); 1081 if (!ret) { 1082 lhp = &dp->nfsdl_lock; 1083 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 1084 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 1085 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 1086 donelocally = 1; 1087 } else { 1088 dp = NULL; 1089 } 1090 } 1091 if (!donelocally) { 1092 /* 1093 * Get the related Open and maybe lockowner. 1094 */ 1095 error = nfscl_getopen(&clp->nfsc_owner, 1096 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp, 1097 ownp, mode, &lp, &op); 1098 if (!error) 1099 lhp = &op->nfso_lock; 1100 } 1101 if (!error && !recovery) 1102 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 1103 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1104 if (error) { 1105 if (!recovery) { 1106 nfscl_clrelease(clp); 1107 NFSUNLOCKCLSTATE(); 1108 } 1109 free(nlp, M_NFSCLLOCKOWNER); 1110 free(otherlop, M_NFSCLLOCK); 1111 free(nlop, M_NFSCLLOCK); 1112 return (error); 1113 } 1114 1115 /* 1116 * Ok, see if a lockowner exists and create one, as required. 1117 */ 1118 if (lp == NULL) 1119 LIST_FOREACH(lp, lhp, nfsl_list) { 1120 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1121 break; 1122 } 1123 if (lp == NULL) { 1124 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1125 if (recovery) 1126 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1127 NFSV4CL_LOCKNAMELEN); 1128 else 1129 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1130 NFSV4CL_LOCKNAMELEN); 1131 nlp->nfsl_seqid = 0; 1132 nlp->nfsl_lockflags = flags; 1133 nlp->nfsl_inprog = NULL; 1134 nfscl_lockinit(&nlp->nfsl_rwlock); 1135 LIST_INIT(&nlp->nfsl_lock); 1136 if (donelocally) { 1137 nlp->nfsl_open = NULL; 1138 nfsstatsv1.cllocallockowners++; 1139 } else { 1140 nlp->nfsl_open = op; 1141 nfsstatsv1.cllockowners++; 1142 } 1143 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1144 lp = nlp; 1145 nlp = NULL; 1146 *newonep = 1; 1147 } 1148 1149 /* 1150 * Now, update the byte ranges for locks. 1151 */ 1152 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1153 if (!ret) 1154 donelocally = 1; 1155 if (donelocally) { 1156 *donelocallyp = 1; 1157 if (!recovery) 1158 nfscl_clrelease(clp); 1159 } else { 1160 /* 1161 * Serial modifications on the lock owner for multiple threads 1162 * for the same process using a read/write lock. 1163 */ 1164 if (!recovery) 1165 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1166 } 1167 if (!recovery) 1168 NFSUNLOCKCLSTATE(); 1169 1170 if (nlp) 1171 free(nlp, M_NFSCLLOCKOWNER); 1172 if (nlop) 1173 free(nlop, M_NFSCLLOCK); 1174 if (otherlop) 1175 free(otherlop, M_NFSCLLOCK); 1176 1177 *lpp = lp; 1178 return (0); 1179 } 1180 1181 /* 1182 * Called to unlock a byte range, for LockU. 1183 */ 1184 APPLESTATIC int 1185 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1186 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1187 struct nfsclclient *clp, void *id, int flags, 1188 struct nfscllockowner **lpp, int *dorpcp) 1189 { 1190 struct nfscllockowner *lp; 1191 struct nfsclowner *owp; 1192 struct nfsclopen *op; 1193 struct nfscllock *nlop, *other_lop = NULL; 1194 struct nfscldeleg *dp; 1195 struct nfsnode *np; 1196 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1197 int ret = 0, fnd; 1198 1199 np = VTONFS(vp); 1200 *lpp = NULL; 1201 *dorpcp = 0; 1202 1203 /* 1204 * Might need these, so MALLOC them now, to 1205 * avoid a tsleep() in MALLOC later. 1206 */ 1207 nlop = malloc( 1208 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1209 nlop->nfslo_type = F_UNLCK; 1210 nlop->nfslo_first = off; 1211 if (len == NFS64BITSSET) { 1212 nlop->nfslo_end = NFS64BITSSET; 1213 } else { 1214 nlop->nfslo_end = off + len; 1215 if (nlop->nfslo_end <= nlop->nfslo_first) { 1216 free(nlop, M_NFSCLLOCK); 1217 return (NFSERR_INVAL); 1218 } 1219 } 1220 if (callcnt == 0) { 1221 other_lop = malloc( 1222 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1223 *other_lop = *nlop; 1224 } 1225 nfscl_filllockowner(id, own, flags); 1226 dp = NULL; 1227 NFSLOCKCLSTATE(); 1228 if (callcnt == 0) 1229 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1230 np->n_fhp->nfh_len); 1231 1232 /* 1233 * First, unlock any local regions on a delegation. 1234 */ 1235 if (dp != NULL) { 1236 /* Look for this lockowner. */ 1237 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1238 if (!NFSBCMP(lp->nfsl_owner, own, 1239 NFSV4CL_LOCKNAMELEN)) 1240 break; 1241 } 1242 if (lp != NULL) 1243 /* Use other_lop, so nlop is still available */ 1244 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1245 } 1246 1247 /* 1248 * Now, find a matching open/lockowner that hasn't already been done, 1249 * as marked by nfsl_inprog. 1250 */ 1251 lp = NULL; 1252 fnd = 0; 1253 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1254 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1255 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1256 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1257 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1258 if (lp->nfsl_inprog == NULL && 1259 !NFSBCMP(lp->nfsl_owner, own, 1260 NFSV4CL_LOCKNAMELEN)) { 1261 fnd = 1; 1262 break; 1263 } 1264 } 1265 if (fnd) 1266 break; 1267 } 1268 } 1269 if (fnd) 1270 break; 1271 } 1272 1273 if (lp != NULL) { 1274 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1275 if (ret) 1276 *dorpcp = 1; 1277 /* 1278 * Serial modifications on the lock owner for multiple 1279 * threads for the same process using a read/write lock. 1280 */ 1281 lp->nfsl_inprog = p; 1282 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1283 *lpp = lp; 1284 } 1285 NFSUNLOCKCLSTATE(); 1286 if (nlop) 1287 free(nlop, M_NFSCLLOCK); 1288 if (other_lop) 1289 free(other_lop, M_NFSCLLOCK); 1290 return (0); 1291 } 1292 1293 /* 1294 * Release all lockowners marked in progess for this process and file. 1295 */ 1296 APPLESTATIC void 1297 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p, 1298 void *id, int flags) 1299 { 1300 struct nfsclowner *owp; 1301 struct nfsclopen *op; 1302 struct nfscllockowner *lp; 1303 struct nfsnode *np; 1304 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1305 1306 np = VTONFS(vp); 1307 nfscl_filllockowner(id, own, flags); 1308 NFSLOCKCLSTATE(); 1309 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1310 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1311 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1312 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1313 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1314 if (lp->nfsl_inprog == p && 1315 !NFSBCMP(lp->nfsl_owner, own, 1316 NFSV4CL_LOCKNAMELEN)) { 1317 lp->nfsl_inprog = NULL; 1318 nfscl_lockunlock(&lp->nfsl_rwlock); 1319 } 1320 } 1321 } 1322 } 1323 } 1324 nfscl_clrelease(clp); 1325 NFSUNLOCKCLSTATE(); 1326 } 1327 1328 /* 1329 * Called to find out if any bytes within the byte range specified are 1330 * write locked by the calling process. Used to determine if flushing 1331 * is required before a LockU. 1332 * If in doubt, return 1, so the flush will occur. 1333 */ 1334 APPLESTATIC int 1335 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1336 struct ucred *cred, NFSPROC_T *p, void *id, int flags) 1337 { 1338 struct nfsclowner *owp; 1339 struct nfscllockowner *lp; 1340 struct nfsclopen *op; 1341 struct nfsclclient *clp; 1342 struct nfscllock *lop; 1343 struct nfscldeleg *dp; 1344 struct nfsnode *np; 1345 u_int64_t off, end; 1346 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1347 int error = 0; 1348 1349 np = VTONFS(vp); 1350 switch (fl->l_whence) { 1351 case SEEK_SET: 1352 case SEEK_CUR: 1353 /* 1354 * Caller is responsible for adding any necessary offset 1355 * when SEEK_CUR is used. 1356 */ 1357 off = fl->l_start; 1358 break; 1359 case SEEK_END: 1360 off = np->n_size + fl->l_start; 1361 break; 1362 default: 1363 return (1); 1364 } 1365 if (fl->l_len != 0) { 1366 end = off + fl->l_len; 1367 if (end < off) 1368 return (1); 1369 } else { 1370 end = NFS64BITSSET; 1371 } 1372 1373 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp); 1374 if (error) 1375 return (1); 1376 nfscl_filllockowner(id, own, flags); 1377 NFSLOCKCLSTATE(); 1378 1379 /* 1380 * First check the delegation locks. 1381 */ 1382 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1383 if (dp != NULL) { 1384 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1385 if (!NFSBCMP(lp->nfsl_owner, own, 1386 NFSV4CL_LOCKNAMELEN)) 1387 break; 1388 } 1389 if (lp != NULL) { 1390 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1391 if (lop->nfslo_first >= end) 1392 break; 1393 if (lop->nfslo_end <= off) 1394 continue; 1395 if (lop->nfslo_type == F_WRLCK) { 1396 nfscl_clrelease(clp); 1397 NFSUNLOCKCLSTATE(); 1398 return (1); 1399 } 1400 } 1401 } 1402 } 1403 1404 /* 1405 * Now, check state against the server. 1406 */ 1407 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1408 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1409 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1410 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1411 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1412 if (!NFSBCMP(lp->nfsl_owner, own, 1413 NFSV4CL_LOCKNAMELEN)) 1414 break; 1415 } 1416 if (lp != NULL) { 1417 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1418 if (lop->nfslo_first >= end) 1419 break; 1420 if (lop->nfslo_end <= off) 1421 continue; 1422 if (lop->nfslo_type == F_WRLCK) { 1423 nfscl_clrelease(clp); 1424 NFSUNLOCKCLSTATE(); 1425 return (1); 1426 } 1427 } 1428 } 1429 } 1430 } 1431 } 1432 nfscl_clrelease(clp); 1433 NFSUNLOCKCLSTATE(); 1434 return (0); 1435 } 1436 1437 /* 1438 * Release a byte range lock owner structure. 1439 */ 1440 APPLESTATIC void 1441 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1442 { 1443 struct nfsclclient *clp; 1444 1445 if (lp == NULL) 1446 return; 1447 NFSLOCKCLSTATE(); 1448 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1449 if (error != 0 && candelete && 1450 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1451 nfscl_freelockowner(lp, 0); 1452 else 1453 nfscl_lockunlock(&lp->nfsl_rwlock); 1454 nfscl_clrelease(clp); 1455 NFSUNLOCKCLSTATE(); 1456 } 1457 1458 /* 1459 * Free up an open structure and any associated byte range lock structures. 1460 */ 1461 APPLESTATIC void 1462 nfscl_freeopen(struct nfsclopen *op, int local) 1463 { 1464 1465 LIST_REMOVE(op, nfso_list); 1466 nfscl_freealllocks(&op->nfso_lock, local); 1467 free(op, M_NFSCLOPEN); 1468 if (local) 1469 nfsstatsv1.cllocalopens--; 1470 else 1471 nfsstatsv1.clopens--; 1472 } 1473 1474 /* 1475 * Free up all lock owners and associated locks. 1476 */ 1477 static void 1478 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1479 { 1480 struct nfscllockowner *lp, *nlp; 1481 1482 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1483 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1484 panic("nfscllckw"); 1485 nfscl_freelockowner(lp, local); 1486 } 1487 } 1488 1489 /* 1490 * Called for an Open when NFSERR_EXPIRED is received from the server. 1491 * If there are no byte range locks nor a Share Deny lost, try to do a 1492 * fresh Open. Otherwise, free the open. 1493 */ 1494 static int 1495 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1496 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1497 { 1498 struct nfscllockowner *lp; 1499 struct nfscldeleg *dp; 1500 int mustdelete = 0, error; 1501 1502 /* 1503 * Look for any byte range lock(s). 1504 */ 1505 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1506 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1507 mustdelete = 1; 1508 break; 1509 } 1510 } 1511 1512 /* 1513 * If no byte range lock(s) nor a Share deny, try to re-open. 1514 */ 1515 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1516 newnfs_copycred(&op->nfso_cred, cred); 1517 dp = NULL; 1518 error = nfsrpc_reopen(nmp, op->nfso_fh, 1519 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1520 if (error) { 1521 mustdelete = 1; 1522 if (dp != NULL) { 1523 free(dp, M_NFSCLDELEG); 1524 dp = NULL; 1525 } 1526 } 1527 if (dp != NULL) 1528 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1529 op->nfso_fhlen, cred, p, &dp); 1530 } 1531 1532 /* 1533 * If a byte range lock or Share deny or couldn't re-open, free it. 1534 */ 1535 if (mustdelete) 1536 nfscl_freeopen(op, 0); 1537 return (mustdelete); 1538 } 1539 1540 /* 1541 * Free up an open owner structure. 1542 */ 1543 static void 1544 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1545 { 1546 1547 LIST_REMOVE(owp, nfsow_list); 1548 free(owp, M_NFSCLOWNER); 1549 if (local) 1550 nfsstatsv1.cllocalopenowners--; 1551 else 1552 nfsstatsv1.clopenowners--; 1553 } 1554 1555 /* 1556 * Free up a byte range lock owner structure. 1557 */ 1558 APPLESTATIC void 1559 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1560 { 1561 struct nfscllock *lop, *nlop; 1562 1563 LIST_REMOVE(lp, nfsl_list); 1564 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1565 nfscl_freelock(lop, local); 1566 } 1567 free(lp, M_NFSCLLOCKOWNER); 1568 if (local) 1569 nfsstatsv1.cllocallockowners--; 1570 else 1571 nfsstatsv1.cllockowners--; 1572 } 1573 1574 /* 1575 * Free up a byte range lock structure. 1576 */ 1577 APPLESTATIC void 1578 nfscl_freelock(struct nfscllock *lop, int local) 1579 { 1580 1581 LIST_REMOVE(lop, nfslo_list); 1582 free(lop, M_NFSCLLOCK); 1583 if (local) 1584 nfsstatsv1.cllocallocks--; 1585 else 1586 nfsstatsv1.cllocks--; 1587 } 1588 1589 /* 1590 * Clean out the state related to a delegation. 1591 */ 1592 static void 1593 nfscl_cleandeleg(struct nfscldeleg *dp) 1594 { 1595 struct nfsclowner *owp, *nowp; 1596 struct nfsclopen *op; 1597 1598 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1599 op = LIST_FIRST(&owp->nfsow_open); 1600 if (op != NULL) { 1601 if (LIST_NEXT(op, nfso_list) != NULL) 1602 panic("nfscleandel"); 1603 nfscl_freeopen(op, 1); 1604 } 1605 nfscl_freeopenowner(owp, 1); 1606 } 1607 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1608 } 1609 1610 /* 1611 * Free a delegation. 1612 */ 1613 static void 1614 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) 1615 { 1616 1617 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1618 LIST_REMOVE(dp, nfsdl_hash); 1619 free(dp, M_NFSCLDELEG); 1620 nfsstatsv1.cldelegates--; 1621 nfscl_delegcnt--; 1622 } 1623 1624 /* 1625 * Free up all state related to this client structure. 1626 */ 1627 static void 1628 nfscl_cleanclient(struct nfsclclient *clp) 1629 { 1630 struct nfsclowner *owp, *nowp; 1631 struct nfsclopen *op, *nop; 1632 struct nfscllayout *lyp, *nlyp; 1633 struct nfscldevinfo *dip, *ndip; 1634 1635 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 1636 nfscl_freelayout(lyp); 1637 1638 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) 1639 nfscl_freedevinfo(dip); 1640 1641 /* Now, all the OpenOwners, etc. */ 1642 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1643 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1644 nfscl_freeopen(op, 0); 1645 } 1646 nfscl_freeopenowner(owp, 0); 1647 } 1648 } 1649 1650 /* 1651 * Called when an NFSERR_EXPIRED is received from the server. 1652 */ 1653 static void 1654 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1655 struct ucred *cred, NFSPROC_T *p) 1656 { 1657 struct nfsclowner *owp, *nowp, *towp; 1658 struct nfsclopen *op, *nop, *top; 1659 struct nfscldeleg *dp, *ndp; 1660 int ret, printed = 0; 1661 1662 /* 1663 * First, merge locally issued Opens into the list for the server. 1664 */ 1665 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1666 while (dp != NULL) { 1667 ndp = TAILQ_NEXT(dp, nfsdl_list); 1668 owp = LIST_FIRST(&dp->nfsdl_owner); 1669 while (owp != NULL) { 1670 nowp = LIST_NEXT(owp, nfsow_list); 1671 op = LIST_FIRST(&owp->nfsow_open); 1672 if (op != NULL) { 1673 if (LIST_NEXT(op, nfso_list) != NULL) 1674 panic("nfsclexp"); 1675 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1676 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1677 NFSV4CL_LOCKNAMELEN)) 1678 break; 1679 } 1680 if (towp != NULL) { 1681 /* Merge opens in */ 1682 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1683 if (top->nfso_fhlen == op->nfso_fhlen && 1684 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1685 op->nfso_fhlen)) { 1686 top->nfso_mode |= op->nfso_mode; 1687 top->nfso_opencnt += op->nfso_opencnt; 1688 break; 1689 } 1690 } 1691 if (top == NULL) { 1692 /* Just add the open to the owner list */ 1693 LIST_REMOVE(op, nfso_list); 1694 op->nfso_own = towp; 1695 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1696 nfsstatsv1.cllocalopens--; 1697 nfsstatsv1.clopens++; 1698 } 1699 } else { 1700 /* Just add the openowner to the client list */ 1701 LIST_REMOVE(owp, nfsow_list); 1702 owp->nfsow_clp = clp; 1703 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1704 nfsstatsv1.cllocalopenowners--; 1705 nfsstatsv1.clopenowners++; 1706 nfsstatsv1.cllocalopens--; 1707 nfsstatsv1.clopens++; 1708 } 1709 } 1710 owp = nowp; 1711 } 1712 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1713 printed = 1; 1714 printf("nfsv4 expired locks lost\n"); 1715 } 1716 nfscl_cleandeleg(dp); 1717 nfscl_freedeleg(&clp->nfsc_deleg, dp); 1718 dp = ndp; 1719 } 1720 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1721 panic("nfsclexp"); 1722 1723 /* 1724 * Now, try and reopen against the server. 1725 */ 1726 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1727 owp->nfsow_seqid = 0; 1728 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1729 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1730 if (ret && !printed) { 1731 printed = 1; 1732 printf("nfsv4 expired locks lost\n"); 1733 } 1734 } 1735 if (LIST_EMPTY(&owp->nfsow_open)) 1736 nfscl_freeopenowner(owp, 0); 1737 } 1738 } 1739 1740 /* 1741 * This function must be called after the process represented by "own" has 1742 * exited. Must be called with CLSTATE lock held. 1743 */ 1744 static void 1745 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1746 { 1747 struct nfsclowner *owp, *nowp; 1748 struct nfscllockowner *lp, *nlp; 1749 struct nfscldeleg *dp; 1750 1751 /* First, get rid of local locks on delegations. */ 1752 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1753 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1754 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1755 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1756 panic("nfscllckw"); 1757 nfscl_freelockowner(lp, 1); 1758 } 1759 } 1760 } 1761 owp = LIST_FIRST(&clp->nfsc_owner); 1762 while (owp != NULL) { 1763 nowp = LIST_NEXT(owp, nfsow_list); 1764 if (!NFSBCMP(owp->nfsow_owner, own, 1765 NFSV4CL_LOCKNAMELEN)) { 1766 /* 1767 * If there are children that haven't closed the 1768 * file descriptors yet, the opens will still be 1769 * here. For that case, let the renew thread clear 1770 * out the OpenOwner later. 1771 */ 1772 if (LIST_EMPTY(&owp->nfsow_open)) 1773 nfscl_freeopenowner(owp, 0); 1774 else 1775 owp->nfsow_defunct = 1; 1776 } 1777 owp = nowp; 1778 } 1779 } 1780 1781 /* 1782 * Find open/lock owners for processes that have exited. 1783 */ 1784 static void 1785 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp) 1786 { 1787 struct nfsclowner *owp, *nowp; 1788 struct nfsclopen *op; 1789 struct nfscllockowner *lp, *nlp; 1790 struct nfscldeleg *dp; 1791 1792 /* 1793 * All the pidhash locks must be acquired, since they are sx locks 1794 * and must be acquired before the mutexes. The pid(s) that will 1795 * be used aren't known yet, so all the locks need to be acquired. 1796 * Fortunately, this function is only performed once/sec. 1797 */ 1798 pidhash_slockall(); 1799 NFSLOCKCLSTATE(); 1800 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1801 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1802 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) { 1803 if (LIST_EMPTY(&lp->nfsl_lock)) 1804 nfscl_emptylockowner(lp, lhp); 1805 } 1806 } 1807 if (nfscl_procdoesntexist(owp->nfsow_owner)) 1808 nfscl_cleanup_common(clp, owp->nfsow_owner); 1809 } 1810 1811 /* 1812 * For the single open_owner case, these lock owners need to be 1813 * checked to see if they still exist separately. 1814 * This is because nfscl_procdoesntexist() never returns true for 1815 * the single open_owner so that the above doesn't ever call 1816 * nfscl_cleanup_common(). 1817 */ 1818 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1819 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1820 if (nfscl_procdoesntexist(lp->nfsl_owner)) 1821 nfscl_cleanup_common(clp, lp->nfsl_owner); 1822 } 1823 } 1824 NFSUNLOCKCLSTATE(); 1825 pidhash_sunlockall(); 1826 } 1827 1828 /* 1829 * Take the empty lock owner and move it to the local lhp list if the 1830 * associated process no longer exists. 1831 */ 1832 static void 1833 nfscl_emptylockowner(struct nfscllockowner *lp, 1834 struct nfscllockownerfhhead *lhp) 1835 { 1836 struct nfscllockownerfh *lfhp, *mylfhp; 1837 struct nfscllockowner *nlp; 1838 int fnd_it; 1839 1840 /* If not a Posix lock owner, just return. */ 1841 if ((lp->nfsl_lockflags & F_POSIX) == 0) 1842 return; 1843 1844 fnd_it = 0; 1845 mylfhp = NULL; 1846 /* 1847 * First, search to see if this lock owner is already in the list. 1848 * If it is, then the associated process no longer exists. 1849 */ 1850 SLIST_FOREACH(lfhp, lhp, nfslfh_list) { 1851 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen && 1852 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh, 1853 lfhp->nfslfh_len)) 1854 mylfhp = lfhp; 1855 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list) 1856 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner, 1857 NFSV4CL_LOCKNAMELEN)) 1858 fnd_it = 1; 1859 } 1860 /* If not found, check if process still exists. */ 1861 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0) 1862 return; 1863 1864 /* Move the lock owner over to the local list. */ 1865 if (mylfhp == NULL) { 1866 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP, 1867 M_NOWAIT); 1868 if (mylfhp == NULL) 1869 return; 1870 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen; 1871 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh, 1872 mylfhp->nfslfh_len); 1873 LIST_INIT(&mylfhp->nfslfh_lock); 1874 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list); 1875 } 1876 LIST_REMOVE(lp, nfsl_list); 1877 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list); 1878 } 1879 1880 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 1881 /* 1882 * Called from nfs umount to free up the clientid. 1883 */ 1884 APPLESTATIC void 1885 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) 1886 { 1887 struct nfsclclient *clp; 1888 struct ucred *cred; 1889 int igotlock; 1890 1891 /* 1892 * For the case that matters, this is the thread that set 1893 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 1894 * done to ensure that any thread executing nfscl_getcl() after 1895 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 1896 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 1897 * explanation, courtesy of Alan Cox. 1898 * What follows is a snippet from Alan Cox's email at: 1899 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 1900 * 1901 * 1. Set MNTK_UNMOUNTF 1902 * 2. Acquire a standard FreeBSD mutex "m". 1903 * 3. Update some data structures. 1904 * 4. Release mutex "m". 1905 * 1906 * Then, other threads that acquire "m" after step 4 has occurred will 1907 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 1908 * step 2 may or may not see MNTK_UNMOUNTF as set. 1909 */ 1910 NFSLOCKCLSTATE(); 1911 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 1912 fake_global++; 1913 NFSUNLOCKCLSTATE(); 1914 NFSLOCKCLSTATE(); 1915 } 1916 1917 clp = nmp->nm_clp; 1918 if (clp != NULL) { 1919 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 1920 panic("nfscl umount"); 1921 1922 /* 1923 * First, handshake with the nfscl renew thread, to terminate 1924 * it. 1925 */ 1926 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 1927 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 1928 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 1929 "nfsclumnt", hz); 1930 1931 /* 1932 * Now, get the exclusive lock on the client state, so 1933 * that no uses of the state are still in progress. 1934 */ 1935 do { 1936 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1937 NFSCLSTATEMUTEXPTR, NULL); 1938 } while (!igotlock); 1939 NFSUNLOCKCLSTATE(); 1940 1941 /* 1942 * Free up all the state. It will expire on the server, but 1943 * maybe we should do a SetClientId/SetClientIdConfirm so 1944 * the server throws it away? 1945 */ 1946 LIST_REMOVE(clp, nfsc_list); 1947 nfscl_delegreturnall(clp, p); 1948 cred = newnfs_getcred(); 1949 if (NFSHASNFSV4N(nmp)) { 1950 (void)nfsrpc_destroysession(nmp, clp, cred, p); 1951 (void)nfsrpc_destroyclient(nmp, clp, cred, p); 1952 } else 1953 (void)nfsrpc_setclient(nmp, clp, 0, cred, p); 1954 nfscl_cleanclient(clp); 1955 nmp->nm_clp = NULL; 1956 NFSFREECRED(cred); 1957 free(clp, M_NFSCLCLIENT); 1958 } else 1959 NFSUNLOCKCLSTATE(); 1960 } 1961 1962 /* 1963 * This function is called when a server replies with NFSERR_STALECLIENTID 1964 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists, 1965 * doing Opens and Locks with reclaim. If these fail, it deletes the 1966 * corresponding state. 1967 */ 1968 static void 1969 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) 1970 { 1971 struct nfsclowner *owp, *nowp; 1972 struct nfsclopen *op, *nop; 1973 struct nfscllockowner *lp, *nlp; 1974 struct nfscllock *lop, *nlop; 1975 struct nfscldeleg *dp, *ndp, *tdp; 1976 struct nfsmount *nmp; 1977 struct ucred *tcred; 1978 struct nfsclopenhead extra_open; 1979 struct nfscldeleghead extra_deleg; 1980 struct nfsreq *rep; 1981 u_int64_t len; 1982 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 1983 int i, igotlock = 0, error, trycnt, firstlock; 1984 struct nfscllayout *lyp, *nlyp; 1985 1986 /* 1987 * First, lock the client structure, so everyone else will 1988 * block when trying to use state. 1989 */ 1990 NFSLOCKCLSTATE(); 1991 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 1992 do { 1993 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1994 NFSCLSTATEMUTEXPTR, NULL); 1995 } while (!igotlock); 1996 NFSUNLOCKCLSTATE(); 1997 1998 nmp = clp->nfsc_nmp; 1999 if (nmp == NULL) 2000 panic("nfscl recover"); 2001 2002 /* 2003 * For now, just get rid of all layouts. There may be a need 2004 * to do LayoutCommit Ops with reclaim == true later. 2005 */ 2006 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) 2007 nfscl_freelayout(lyp); 2008 TAILQ_INIT(&clp->nfsc_layout); 2009 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++) 2010 LIST_INIT(&clp->nfsc_layouthash[i]); 2011 2012 trycnt = 5; 2013 do { 2014 error = nfsrpc_setclient(nmp, clp, 1, cred, p); 2015 } while ((error == NFSERR_STALECLIENTID || 2016 error == NFSERR_BADSESSION || 2017 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2018 if (error) { 2019 NFSLOCKCLSTATE(); 2020 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER | 2021 NFSCLFLAGS_RECVRINPROG); 2022 wakeup(&clp->nfsc_flags); 2023 nfsv4_unlock(&clp->nfsc_lock, 0); 2024 NFSUNLOCKCLSTATE(); 2025 return; 2026 } 2027 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2028 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2029 2030 /* 2031 * Mark requests already queued on the server, so that they don't 2032 * initiate another recovery cycle. Any requests already in the 2033 * queue that handle state information will have the old stale 2034 * clientid/stateid and will get a NFSERR_STALESTATEID, 2035 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server. 2036 * This will be translated to NFSERR_STALEDONTRECOVER when 2037 * R_DONTRECOVER is set. 2038 */ 2039 NFSLOCKREQ(); 2040 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 2041 if (rep->r_nmp == nmp) 2042 rep->r_flags |= R_DONTRECOVER; 2043 } 2044 NFSUNLOCKREQ(); 2045 2046 /* 2047 * Now, mark all delegations "need reclaim". 2048 */ 2049 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 2050 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 2051 2052 TAILQ_INIT(&extra_deleg); 2053 LIST_INIT(&extra_open); 2054 /* 2055 * Now traverse the state lists, doing Open and Lock Reclaims. 2056 */ 2057 tcred = newnfs_getcred(); 2058 owp = LIST_FIRST(&clp->nfsc_owner); 2059 while (owp != NULL) { 2060 nowp = LIST_NEXT(owp, nfsow_list); 2061 owp->nfsow_seqid = 0; 2062 op = LIST_FIRST(&owp->nfsow_open); 2063 while (op != NULL) { 2064 nop = LIST_NEXT(op, nfso_list); 2065 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2066 /* Search for a delegation to reclaim with the open */ 2067 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2068 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2069 continue; 2070 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2071 mode = NFSV4OPEN_ACCESSWRITE; 2072 delegtype = NFSV4OPEN_DELEGATEWRITE; 2073 } else { 2074 mode = NFSV4OPEN_ACCESSREAD; 2075 delegtype = NFSV4OPEN_DELEGATEREAD; 2076 } 2077 if ((op->nfso_mode & mode) == mode && 2078 op->nfso_fhlen == dp->nfsdl_fhlen && 2079 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 2080 break; 2081 } 2082 ndp = dp; 2083 if (dp == NULL) 2084 delegtype = NFSV4OPEN_DELEGATENONE; 2085 newnfs_copycred(&op->nfso_cred, tcred); 2086 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 2087 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 2088 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 2089 tcred, p); 2090 if (!error) { 2091 /* Handle any replied delegation */ 2092 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 2093 || NFSMNT_RDONLY(nmp->nm_mountp))) { 2094 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 2095 mode = NFSV4OPEN_ACCESSWRITE; 2096 else 2097 mode = NFSV4OPEN_ACCESSREAD; 2098 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2099 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 2100 continue; 2101 if ((op->nfso_mode & mode) == mode && 2102 op->nfso_fhlen == dp->nfsdl_fhlen && 2103 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 2104 op->nfso_fhlen)) { 2105 dp->nfsdl_stateid = ndp->nfsdl_stateid; 2106 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 2107 dp->nfsdl_ace = ndp->nfsdl_ace; 2108 dp->nfsdl_change = ndp->nfsdl_change; 2109 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2110 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 2111 dp->nfsdl_flags |= NFSCLDL_RECALL; 2112 free(ndp, M_NFSCLDELEG); 2113 ndp = NULL; 2114 break; 2115 } 2116 } 2117 } 2118 if (ndp != NULL) 2119 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 2120 2121 /* and reclaim all byte range locks */ 2122 lp = LIST_FIRST(&op->nfso_lock); 2123 while (lp != NULL) { 2124 nlp = LIST_NEXT(lp, nfsl_list); 2125 lp->nfsl_seqid = 0; 2126 firstlock = 1; 2127 lop = LIST_FIRST(&lp->nfsl_lock); 2128 while (lop != NULL) { 2129 nlop = LIST_NEXT(lop, nfslo_list); 2130 if (lop->nfslo_end == NFS64BITSSET) 2131 len = NFS64BITSSET; 2132 else 2133 len = lop->nfslo_end - lop->nfslo_first; 2134 error = nfscl_trylock(nmp, NULL, 2135 op->nfso_fh, op->nfso_fhlen, lp, 2136 firstlock, 1, lop->nfslo_first, len, 2137 lop->nfslo_type, tcred, p); 2138 if (error != 0) 2139 nfscl_freelock(lop, 0); 2140 else 2141 firstlock = 0; 2142 lop = nlop; 2143 } 2144 /* If no locks, but a lockowner, just delete it. */ 2145 if (LIST_EMPTY(&lp->nfsl_lock)) 2146 nfscl_freelockowner(lp, 0); 2147 lp = nlp; 2148 } 2149 } 2150 } 2151 if (error != 0 && error != NFSERR_BADSESSION) 2152 nfscl_freeopen(op, 0); 2153 op = nop; 2154 } 2155 owp = nowp; 2156 } 2157 2158 /* 2159 * Now, try and get any delegations not yet reclaimed by cobbling 2160 * to-gether an appropriate open. 2161 */ 2162 nowp = NULL; 2163 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2164 while (dp != NULL) { 2165 ndp = TAILQ_NEXT(dp, nfsdl_list); 2166 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2167 if (nowp == NULL) { 2168 nowp = malloc( 2169 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2170 /* 2171 * Name must be as long an largest possible 2172 * NFSV4CL_LOCKNAMELEN. 12 for now. 2173 */ 2174 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2175 NFSV4CL_LOCKNAMELEN); 2176 LIST_INIT(&nowp->nfsow_open); 2177 nowp->nfsow_clp = clp; 2178 nowp->nfsow_seqid = 0; 2179 nowp->nfsow_defunct = 0; 2180 nfscl_lockinit(&nowp->nfsow_rwlock); 2181 } 2182 nop = NULL; 2183 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) { 2184 nop = malloc(sizeof (struct nfsclopen) + 2185 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2186 nop->nfso_own = nowp; 2187 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2188 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2189 delegtype = NFSV4OPEN_DELEGATEWRITE; 2190 } else { 2191 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2192 delegtype = NFSV4OPEN_DELEGATEREAD; 2193 } 2194 nop->nfso_opencnt = 0; 2195 nop->nfso_posixlock = 1; 2196 nop->nfso_fhlen = dp->nfsdl_fhlen; 2197 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2198 LIST_INIT(&nop->nfso_lock); 2199 nop->nfso_stateid.seqid = 0; 2200 nop->nfso_stateid.other[0] = 0; 2201 nop->nfso_stateid.other[1] = 0; 2202 nop->nfso_stateid.other[2] = 0; 2203 newnfs_copycred(&dp->nfsdl_cred, tcred); 2204 newnfs_copyincred(tcred, &nop->nfso_cred); 2205 tdp = NULL; 2206 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2207 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2208 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2209 delegtype, tcred, p); 2210 if (tdp != NULL) { 2211 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2212 mode = NFSV4OPEN_ACCESSWRITE; 2213 else 2214 mode = NFSV4OPEN_ACCESSREAD; 2215 if ((nop->nfso_mode & mode) == mode && 2216 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2217 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2218 nop->nfso_fhlen)) { 2219 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2220 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2221 dp->nfsdl_ace = tdp->nfsdl_ace; 2222 dp->nfsdl_change = tdp->nfsdl_change; 2223 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2224 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2225 dp->nfsdl_flags |= NFSCLDL_RECALL; 2226 free(tdp, M_NFSCLDELEG); 2227 } else { 2228 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2229 } 2230 } 2231 } 2232 if (error) { 2233 if (nop != NULL) 2234 free(nop, M_NFSCLOPEN); 2235 /* 2236 * Couldn't reclaim it, so throw the state 2237 * away. Ouch!! 2238 */ 2239 nfscl_cleandeleg(dp); 2240 nfscl_freedeleg(&clp->nfsc_deleg, dp); 2241 } else { 2242 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2243 } 2244 } 2245 dp = ndp; 2246 } 2247 2248 /* 2249 * Now, get rid of extra Opens and Delegations. 2250 */ 2251 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2252 do { 2253 newnfs_copycred(&op->nfso_cred, tcred); 2254 error = nfscl_tryclose(op, tcred, nmp, p); 2255 if (error == NFSERR_GRACE) 2256 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2257 } while (error == NFSERR_GRACE); 2258 LIST_REMOVE(op, nfso_list); 2259 free(op, M_NFSCLOPEN); 2260 } 2261 if (nowp != NULL) 2262 free(nowp, M_NFSCLOWNER); 2263 2264 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2265 do { 2266 newnfs_copycred(&dp->nfsdl_cred, tcred); 2267 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2268 if (error == NFSERR_GRACE) 2269 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2270 } while (error == NFSERR_GRACE); 2271 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2272 free(dp, M_NFSCLDELEG); 2273 } 2274 2275 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */ 2276 if (NFSHASNFSV4N(nmp)) 2277 (void)nfsrpc_reclaimcomplete(nmp, cred, p); 2278 2279 NFSLOCKCLSTATE(); 2280 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2281 wakeup(&clp->nfsc_flags); 2282 nfsv4_unlock(&clp->nfsc_lock, 0); 2283 NFSUNLOCKCLSTATE(); 2284 NFSFREECRED(tcred); 2285 } 2286 2287 /* 2288 * This function is called when a server replies with NFSERR_EXPIRED. 2289 * It deletes all state for the client and does a fresh SetClientId/confirm. 2290 * XXX Someday it should post a signal to the process(es) that hold the 2291 * state, so they know that lock state has been lost. 2292 */ 2293 APPLESTATIC int 2294 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2295 { 2296 struct nfsmount *nmp; 2297 struct ucred *cred; 2298 int igotlock = 0, error, trycnt; 2299 2300 /* 2301 * If the clientid has gone away or a new SetClientid has already 2302 * been done, just return ok. 2303 */ 2304 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2305 return (0); 2306 2307 /* 2308 * First, lock the client structure, so everyone else will 2309 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2310 * that only one thread does the work. 2311 */ 2312 NFSLOCKCLSTATE(); 2313 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2314 do { 2315 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2316 NFSCLSTATEMUTEXPTR, NULL); 2317 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2318 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2319 if (igotlock) 2320 nfsv4_unlock(&clp->nfsc_lock, 0); 2321 NFSUNLOCKCLSTATE(); 2322 return (0); 2323 } 2324 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2325 NFSUNLOCKCLSTATE(); 2326 2327 nmp = clp->nfsc_nmp; 2328 if (nmp == NULL) 2329 panic("nfscl expired"); 2330 cred = newnfs_getcred(); 2331 trycnt = 5; 2332 do { 2333 error = nfsrpc_setclient(nmp, clp, 0, cred, p); 2334 } while ((error == NFSERR_STALECLIENTID || 2335 error == NFSERR_BADSESSION || 2336 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2337 if (error) { 2338 NFSLOCKCLSTATE(); 2339 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2340 } else { 2341 /* 2342 * Expire the state for the client. 2343 */ 2344 nfscl_expireclient(clp, nmp, cred, p); 2345 NFSLOCKCLSTATE(); 2346 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2347 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2348 } 2349 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2350 wakeup(&clp->nfsc_flags); 2351 nfsv4_unlock(&clp->nfsc_lock, 0); 2352 NFSUNLOCKCLSTATE(); 2353 NFSFREECRED(cred); 2354 return (error); 2355 } 2356 2357 /* 2358 * This function inserts a lock in the list after insert_lop. 2359 */ 2360 static void 2361 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2362 struct nfscllock *insert_lop, int local) 2363 { 2364 2365 if ((struct nfscllockowner *)insert_lop == lp) 2366 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2367 else 2368 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2369 if (local) 2370 nfsstatsv1.cllocallocks++; 2371 else 2372 nfsstatsv1.cllocks++; 2373 } 2374 2375 /* 2376 * This function updates the locking for a lock owner and given file. It 2377 * maintains a list of lock ranges ordered on increasing file offset that 2378 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2379 * It always adds new_lop to the list and sometimes uses the one pointed 2380 * at by other_lopp. 2381 * Returns 1 if the locks were modified, 0 otherwise. 2382 */ 2383 static int 2384 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2385 struct nfscllock **other_lopp, int local) 2386 { 2387 struct nfscllock *new_lop = *new_lopp; 2388 struct nfscllock *lop, *tlop, *ilop; 2389 struct nfscllock *other_lop; 2390 int unlock = 0, modified = 0; 2391 u_int64_t tmp; 2392 2393 /* 2394 * Work down the list until the lock is merged. 2395 */ 2396 if (new_lop->nfslo_type == F_UNLCK) 2397 unlock = 1; 2398 ilop = (struct nfscllock *)lp; 2399 lop = LIST_FIRST(&lp->nfsl_lock); 2400 while (lop != NULL) { 2401 /* 2402 * Only check locks for this file that aren't before the start of 2403 * new lock's range. 2404 */ 2405 if (lop->nfslo_end >= new_lop->nfslo_first) { 2406 if (new_lop->nfslo_end < lop->nfslo_first) { 2407 /* 2408 * If the new lock ends before the start of the 2409 * current lock's range, no merge, just insert 2410 * the new lock. 2411 */ 2412 break; 2413 } 2414 if (new_lop->nfslo_type == lop->nfslo_type || 2415 (new_lop->nfslo_first <= lop->nfslo_first && 2416 new_lop->nfslo_end >= lop->nfslo_end)) { 2417 /* 2418 * This lock can be absorbed by the new lock/unlock. 2419 * This happens when it covers the entire range 2420 * of the old lock or is contiguous 2421 * with the old lock and is of the same type or an 2422 * unlock. 2423 */ 2424 if (new_lop->nfslo_type != lop->nfslo_type || 2425 new_lop->nfslo_first != lop->nfslo_first || 2426 new_lop->nfslo_end != lop->nfslo_end) 2427 modified = 1; 2428 if (lop->nfslo_first < new_lop->nfslo_first) 2429 new_lop->nfslo_first = lop->nfslo_first; 2430 if (lop->nfslo_end > new_lop->nfslo_end) 2431 new_lop->nfslo_end = lop->nfslo_end; 2432 tlop = lop; 2433 lop = LIST_NEXT(lop, nfslo_list); 2434 nfscl_freelock(tlop, local); 2435 continue; 2436 } 2437 2438 /* 2439 * All these cases are for contiguous locks that are not the 2440 * same type, so they can't be merged. 2441 */ 2442 if (new_lop->nfslo_first <= lop->nfslo_first) { 2443 /* 2444 * This case is where the new lock overlaps with the 2445 * first part of the old lock. Move the start of the 2446 * old lock to just past the end of the new lock. The 2447 * new lock will be inserted in front of the old, since 2448 * ilop hasn't been updated. (We are done now.) 2449 */ 2450 if (lop->nfslo_first != new_lop->nfslo_end) { 2451 lop->nfslo_first = new_lop->nfslo_end; 2452 modified = 1; 2453 } 2454 break; 2455 } 2456 if (new_lop->nfslo_end >= lop->nfslo_end) { 2457 /* 2458 * This case is where the new lock overlaps with the 2459 * end of the old lock's range. Move the old lock's 2460 * end to just before the new lock's first and insert 2461 * the new lock after the old lock. 2462 * Might not be done yet, since the new lock could 2463 * overlap further locks with higher ranges. 2464 */ 2465 if (lop->nfslo_end != new_lop->nfslo_first) { 2466 lop->nfslo_end = new_lop->nfslo_first; 2467 modified = 1; 2468 } 2469 ilop = lop; 2470 lop = LIST_NEXT(lop, nfslo_list); 2471 continue; 2472 } 2473 /* 2474 * The final case is where the new lock's range is in the 2475 * middle of the current lock's and splits the current lock 2476 * up. Use *other_lopp to handle the second part of the 2477 * split old lock range. (We are done now.) 2478 * For unlock, we use new_lop as other_lop and tmp, since 2479 * other_lop and new_lop are the same for this case. 2480 * We noted the unlock case above, so we don't need 2481 * new_lop->nfslo_type any longer. 2482 */ 2483 tmp = new_lop->nfslo_first; 2484 if (unlock) { 2485 other_lop = new_lop; 2486 *new_lopp = NULL; 2487 } else { 2488 other_lop = *other_lopp; 2489 *other_lopp = NULL; 2490 } 2491 other_lop->nfslo_first = new_lop->nfslo_end; 2492 other_lop->nfslo_end = lop->nfslo_end; 2493 other_lop->nfslo_type = lop->nfslo_type; 2494 lop->nfslo_end = tmp; 2495 nfscl_insertlock(lp, other_lop, lop, local); 2496 ilop = lop; 2497 modified = 1; 2498 break; 2499 } 2500 ilop = lop; 2501 lop = LIST_NEXT(lop, nfslo_list); 2502 if (lop == NULL) 2503 break; 2504 } 2505 2506 /* 2507 * Insert the new lock in the list at the appropriate place. 2508 */ 2509 if (!unlock) { 2510 nfscl_insertlock(lp, new_lop, ilop, local); 2511 *new_lopp = NULL; 2512 modified = 1; 2513 } 2514 return (modified); 2515 } 2516 2517 /* 2518 * This function must be run as a kernel thread. 2519 * It does Renew Ops and recovery, when required. 2520 */ 2521 APPLESTATIC void 2522 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2523 { 2524 struct nfsclowner *owp, *nowp; 2525 struct nfsclopen *op; 2526 struct nfscllockowner *lp, *nlp; 2527 struct nfscldeleghead dh; 2528 struct nfscldeleg *dp, *ndp; 2529 struct ucred *cred; 2530 u_int32_t clidrev; 2531 int error, cbpathdown, islept, igotlock, ret, clearok; 2532 uint32_t recover_done_time = 0; 2533 time_t mytime; 2534 static time_t prevsec = 0; 2535 struct nfscllockownerfh *lfhp, *nlfhp; 2536 struct nfscllockownerfhhead lfh; 2537 struct nfscllayout *lyp, *nlyp; 2538 struct nfscldevinfo *dip, *ndip; 2539 struct nfscllayouthead rlh; 2540 struct nfsclrecalllayout *recallp; 2541 struct nfsclds *dsp; 2542 2543 cred = newnfs_getcred(); 2544 NFSLOCKCLSTATE(); 2545 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2546 NFSUNLOCKCLSTATE(); 2547 for(;;) { 2548 newnfs_setroot(cred); 2549 cbpathdown = 0; 2550 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2551 /* 2552 * Only allow one recover within 1/2 of the lease 2553 * duration (nfsc_renew). 2554 */ 2555 if (recover_done_time < NFSD_MONOSEC) { 2556 recover_done_time = NFSD_MONOSEC + 2557 clp->nfsc_renew; 2558 NFSCL_DEBUG(1, "Doing recovery..\n"); 2559 nfscl_recover(clp, cred, p); 2560 } else { 2561 NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n", 2562 recover_done_time, (intmax_t)NFSD_MONOSEC); 2563 NFSLOCKCLSTATE(); 2564 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2565 NFSUNLOCKCLSTATE(); 2566 } 2567 } 2568 if (clp->nfsc_expire <= NFSD_MONOSEC && 2569 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2570 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2571 clidrev = clp->nfsc_clientidrev; 2572 error = nfsrpc_renew(clp, NULL, cred, p); 2573 if (error == NFSERR_CBPATHDOWN) 2574 cbpathdown = 1; 2575 else if (error == NFSERR_STALECLIENTID || 2576 error == NFSERR_BADSESSION) { 2577 NFSLOCKCLSTATE(); 2578 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2579 NFSUNLOCKCLSTATE(); 2580 } else if (error == NFSERR_EXPIRED) 2581 (void) nfscl_hasexpired(clp, clidrev, p); 2582 } 2583 2584 checkdsrenew: 2585 if (NFSHASNFSV4N(clp->nfsc_nmp)) { 2586 /* Do renews for any DS sessions. */ 2587 NFSLOCKMNT(clp->nfsc_nmp); 2588 /* Skip first entry, since the MDS is handled above. */ 2589 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess); 2590 if (dsp != NULL) 2591 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2592 while (dsp != NULL) { 2593 if (dsp->nfsclds_expire <= NFSD_MONOSEC && 2594 dsp->nfsclds_sess.nfsess_defunct == 0) { 2595 dsp->nfsclds_expire = NFSD_MONOSEC + 2596 clp->nfsc_renew; 2597 NFSUNLOCKMNT(clp->nfsc_nmp); 2598 (void)nfsrpc_renew(clp, dsp, cred, p); 2599 goto checkdsrenew; 2600 } 2601 dsp = TAILQ_NEXT(dsp, nfsclds_list); 2602 } 2603 NFSUNLOCKMNT(clp->nfsc_nmp); 2604 } 2605 2606 TAILQ_INIT(&dh); 2607 NFSLOCKCLSTATE(); 2608 if (cbpathdown) 2609 /* It's a Total Recall! */ 2610 nfscl_totalrecall(clp); 2611 2612 /* 2613 * Now, handle defunct owners. 2614 */ 2615 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 2616 if (LIST_EMPTY(&owp->nfsow_open)) { 2617 if (owp->nfsow_defunct != 0) 2618 nfscl_freeopenowner(owp, 0); 2619 } 2620 } 2621 2622 /* 2623 * Do the recall on any delegations. To avoid trouble, always 2624 * come back up here after having slept. 2625 */ 2626 igotlock = 0; 2627 tryagain: 2628 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2629 while (dp != NULL) { 2630 ndp = TAILQ_NEXT(dp, nfsdl_list); 2631 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2632 /* 2633 * Wait for outstanding I/O ops to be done. 2634 */ 2635 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2636 if (igotlock) { 2637 nfsv4_unlock(&clp->nfsc_lock, 0); 2638 igotlock = 0; 2639 } 2640 dp->nfsdl_rwlock.nfslock_lock |= 2641 NFSV4LOCK_WANTED; 2642 (void) nfsmsleep(&dp->nfsdl_rwlock, 2643 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", 2644 NULL); 2645 goto tryagain; 2646 } 2647 while (!igotlock) { 2648 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2649 &islept, NFSCLSTATEMUTEXPTR, NULL); 2650 if (islept) 2651 goto tryagain; 2652 } 2653 NFSUNLOCKCLSTATE(); 2654 newnfs_copycred(&dp->nfsdl_cred, cred); 2655 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2656 NULL, cred, p, 1); 2657 if (!ret) { 2658 nfscl_cleandeleg(dp); 2659 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2660 nfsdl_list); 2661 LIST_REMOVE(dp, nfsdl_hash); 2662 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2663 nfscl_delegcnt--; 2664 nfsstatsv1.cldelegates--; 2665 } 2666 NFSLOCKCLSTATE(); 2667 } 2668 dp = ndp; 2669 } 2670 2671 /* 2672 * Clear out old delegations, if we are above the high water 2673 * mark. Only clear out ones with no state related to them. 2674 * The tailq list is in LRU order. 2675 */ 2676 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2677 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2678 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2679 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2680 dp->nfsdl_rwlock.nfslock_lock == 0 && 2681 dp->nfsdl_timestamp < NFSD_MONOSEC && 2682 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2683 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2684 clearok = 1; 2685 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2686 op = LIST_FIRST(&owp->nfsow_open); 2687 if (op != NULL) { 2688 clearok = 0; 2689 break; 2690 } 2691 } 2692 if (clearok) { 2693 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2694 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2695 clearok = 0; 2696 break; 2697 } 2698 } 2699 } 2700 if (clearok) { 2701 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2702 LIST_REMOVE(dp, nfsdl_hash); 2703 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2704 nfscl_delegcnt--; 2705 nfsstatsv1.cldelegates--; 2706 } 2707 } 2708 dp = ndp; 2709 } 2710 if (igotlock) 2711 nfsv4_unlock(&clp->nfsc_lock, 0); 2712 2713 /* 2714 * Do the recall on any layouts. To avoid trouble, always 2715 * come back up here after having slept. 2716 */ 2717 TAILQ_INIT(&rlh); 2718 tryagain2: 2719 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) { 2720 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) { 2721 /* 2722 * Wait for outstanding I/O ops to be done. 2723 */ 2724 if (lyp->nfsly_lock.nfslock_usecnt > 0 || 2725 (lyp->nfsly_lock.nfslock_lock & 2726 NFSV4LOCK_LOCK) != 0) { 2727 lyp->nfsly_lock.nfslock_lock |= 2728 NFSV4LOCK_WANTED; 2729 nfsmsleep(&lyp->nfsly_lock.nfslock_lock, 2730 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp", 2731 NULL); 2732 goto tryagain2; 2733 } 2734 /* Move the layout to the recall list. */ 2735 TAILQ_REMOVE(&clp->nfsc_layout, lyp, 2736 nfsly_list); 2737 LIST_REMOVE(lyp, nfsly_hash); 2738 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list); 2739 2740 /* Handle any layout commits. */ 2741 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) && 2742 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 2743 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 2744 NFSUNLOCKCLSTATE(); 2745 NFSCL_DEBUG(3, "do layoutcommit\n"); 2746 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, 2747 cred, p); 2748 NFSLOCKCLSTATE(); 2749 goto tryagain2; 2750 } 2751 } 2752 } 2753 2754 /* Now, look for stale layouts. */ 2755 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead); 2756 while (lyp != NULL) { 2757 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list); 2758 if (lyp->nfsly_timestamp < NFSD_MONOSEC && 2759 (lyp->nfsly_flags & NFSLY_RECALL) == 0 && 2760 lyp->nfsly_lock.nfslock_usecnt == 0 && 2761 lyp->nfsly_lock.nfslock_lock == 0) { 2762 NFSCL_DEBUG(4, "ret stale lay=%d\n", 2763 nfscl_layoutcnt); 2764 recallp = malloc(sizeof(*recallp), 2765 M_NFSLAYRECALL, M_NOWAIT); 2766 if (recallp == NULL) 2767 break; 2768 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, 2769 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX, 2770 lyp->nfsly_stateid.seqid, 0, 0, NULL, 2771 recallp); 2772 } 2773 lyp = nlyp; 2774 } 2775 2776 /* 2777 * Free up any unreferenced device info structures. 2778 */ 2779 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) { 2780 if (dip->nfsdi_layoutrefs == 0 && 2781 dip->nfsdi_refcnt == 0) { 2782 NFSCL_DEBUG(4, "freeing devinfo\n"); 2783 LIST_REMOVE(dip, nfsdi_list); 2784 nfscl_freedevinfo(dip); 2785 } 2786 } 2787 NFSUNLOCKCLSTATE(); 2788 2789 /* Do layout return(s), as required. */ 2790 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) { 2791 TAILQ_REMOVE(&rlh, lyp, nfsly_list); 2792 NFSCL_DEBUG(4, "ret layout\n"); 2793 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p); 2794 nfscl_freelayout(lyp); 2795 } 2796 2797 /* 2798 * Delegreturn any delegations cleaned out or recalled. 2799 */ 2800 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 2801 newnfs_copycred(&dp->nfsdl_cred, cred); 2802 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 2803 TAILQ_REMOVE(&dh, dp, nfsdl_list); 2804 free(dp, M_NFSCLDELEG); 2805 } 2806 2807 SLIST_INIT(&lfh); 2808 /* 2809 * Call nfscl_cleanupkext() once per second to check for 2810 * open/lock owners where the process has exited. 2811 */ 2812 mytime = NFSD_MONOSEC; 2813 if (prevsec != mytime) { 2814 prevsec = mytime; 2815 nfscl_cleanupkext(clp, &lfh); 2816 } 2817 2818 /* 2819 * Do a ReleaseLockOwner for all lock owners where the 2820 * associated process no longer exists, as found by 2821 * nfscl_cleanupkext(). 2822 */ 2823 newnfs_setroot(cred); 2824 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) { 2825 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list, 2826 nlp) { 2827 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp, 2828 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred, 2829 p); 2830 nfscl_freelockowner(lp, 0); 2831 } 2832 free(lfhp, M_TEMP); 2833 } 2834 SLIST_INIT(&lfh); 2835 2836 NFSLOCKCLSTATE(); 2837 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 2838 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 2839 hz); 2840 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 2841 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 2842 NFSUNLOCKCLSTATE(); 2843 NFSFREECRED(cred); 2844 wakeup((caddr_t)clp); 2845 return; 2846 } 2847 NFSUNLOCKCLSTATE(); 2848 } 2849 } 2850 2851 /* 2852 * Initiate state recovery. Called when NFSERR_STALECLIENTID, 2853 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received. 2854 */ 2855 APPLESTATIC void 2856 nfscl_initiate_recovery(struct nfsclclient *clp) 2857 { 2858 2859 if (clp == NULL) 2860 return; 2861 NFSLOCKCLSTATE(); 2862 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2863 NFSUNLOCKCLSTATE(); 2864 wakeup((caddr_t)clp); 2865 } 2866 2867 /* 2868 * Dump out the state stuff for debugging. 2869 */ 2870 APPLESTATIC void 2871 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 2872 int lockowner, int locks) 2873 { 2874 struct nfsclclient *clp; 2875 struct nfsclowner *owp; 2876 struct nfsclopen *op; 2877 struct nfscllockowner *lp; 2878 struct nfscllock *lop; 2879 struct nfscldeleg *dp; 2880 2881 clp = nmp->nm_clp; 2882 if (clp == NULL) { 2883 printf("nfscl dumpstate NULL clp\n"); 2884 return; 2885 } 2886 NFSLOCKCLSTATE(); 2887 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2888 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2889 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2890 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2891 owp->nfsow_owner[0], owp->nfsow_owner[1], 2892 owp->nfsow_owner[2], owp->nfsow_owner[3], 2893 owp->nfsow_seqid); 2894 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2895 if (opens) 2896 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2897 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2898 op->nfso_stateid.other[2], op->nfso_opencnt, 2899 op->nfso_fh[12]); 2900 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2901 if (lockowner) 2902 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2903 lp->nfsl_owner[0], lp->nfsl_owner[1], 2904 lp->nfsl_owner[2], lp->nfsl_owner[3], 2905 lp->nfsl_seqid, 2906 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2907 lp->nfsl_stateid.other[2]); 2908 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2909 if (locks) 2910 #ifdef __FreeBSD__ 2911 printf("lck typ=%d fst=%ju end=%ju\n", 2912 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2913 (intmax_t)lop->nfslo_end); 2914 #else 2915 printf("lck typ=%d fst=%qd end=%qd\n", 2916 lop->nfslo_type, lop->nfslo_first, 2917 lop->nfslo_end); 2918 #endif 2919 } 2920 } 2921 } 2922 } 2923 } 2924 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2925 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2926 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2927 owp->nfsow_owner[0], owp->nfsow_owner[1], 2928 owp->nfsow_owner[2], owp->nfsow_owner[3], 2929 owp->nfsow_seqid); 2930 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2931 if (opens) 2932 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2933 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2934 op->nfso_stateid.other[2], op->nfso_opencnt, 2935 op->nfso_fh[12]); 2936 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2937 if (lockowner) 2938 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2939 lp->nfsl_owner[0], lp->nfsl_owner[1], 2940 lp->nfsl_owner[2], lp->nfsl_owner[3], 2941 lp->nfsl_seqid, 2942 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2943 lp->nfsl_stateid.other[2]); 2944 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2945 if (locks) 2946 #ifdef __FreeBSD__ 2947 printf("lck typ=%d fst=%ju end=%ju\n", 2948 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2949 (intmax_t)lop->nfslo_end); 2950 #else 2951 printf("lck typ=%d fst=%qd end=%qd\n", 2952 lop->nfslo_type, lop->nfslo_first, 2953 lop->nfslo_end); 2954 #endif 2955 } 2956 } 2957 } 2958 } 2959 NFSUNLOCKCLSTATE(); 2960 } 2961 2962 /* 2963 * Check for duplicate open owners and opens. 2964 * (Only used as a diagnostic aid.) 2965 */ 2966 APPLESTATIC void 2967 nfscl_dupopen(vnode_t vp, int dupopens) 2968 { 2969 struct nfsclclient *clp; 2970 struct nfsclowner *owp, *owp2; 2971 struct nfsclopen *op, *op2; 2972 struct nfsfh *nfhp; 2973 2974 clp = VFSTONFS(vnode_mount(vp))->nm_clp; 2975 if (clp == NULL) { 2976 printf("nfscl dupopen NULL clp\n"); 2977 return; 2978 } 2979 nfhp = VTONFS(vp)->n_fhp; 2980 NFSLOCKCLSTATE(); 2981 2982 /* 2983 * First, search for duplicate owners. 2984 * These should never happen! 2985 */ 2986 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2987 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2988 if (owp != owp2 && 2989 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 2990 NFSV4CL_LOCKNAMELEN)) { 2991 NFSUNLOCKCLSTATE(); 2992 printf("DUP OWNER\n"); 2993 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); 2994 return; 2995 } 2996 } 2997 } 2998 2999 /* 3000 * Now, search for duplicate stateids. 3001 * These shouldn't happen, either. 3002 */ 3003 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3004 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3005 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3006 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3007 if (op != op2 && 3008 (op->nfso_stateid.other[0] != 0 || 3009 op->nfso_stateid.other[1] != 0 || 3010 op->nfso_stateid.other[2] != 0) && 3011 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 3012 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 3013 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 3014 NFSUNLOCKCLSTATE(); 3015 printf("DUP STATEID\n"); 3016 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 3017 0); 3018 return; 3019 } 3020 } 3021 } 3022 } 3023 } 3024 3025 /* 3026 * Now search for duplicate opens. 3027 * Duplicate opens for the same owner 3028 * should never occur. Other duplicates are 3029 * possible and are checked for if "dupopens" 3030 * is true. 3031 */ 3032 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 3033 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 3034 if (nfhp->nfh_len == op2->nfso_fhlen && 3035 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 3036 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3037 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3038 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 3039 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 3040 (!NFSBCMP(op->nfso_own->nfsow_owner, 3041 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 3042 dupopens)) { 3043 if (!NFSBCMP(op->nfso_own->nfsow_owner, 3044 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3045 NFSUNLOCKCLSTATE(); 3046 printf("BADDUP OPEN\n"); 3047 } else { 3048 NFSUNLOCKCLSTATE(); 3049 printf("DUP OPEN\n"); 3050 } 3051 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 3052 0, 0); 3053 return; 3054 } 3055 } 3056 } 3057 } 3058 } 3059 } 3060 NFSUNLOCKCLSTATE(); 3061 } 3062 3063 /* 3064 * During close, find an open that needs to be dereferenced and 3065 * dereference it. If there are no more opens for this file, 3066 * log a message to that effect. 3067 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 3068 * on the file's vnode. 3069 * This is the safe way, since it is difficult to identify 3070 * which open the close is for and I/O can be performed after the 3071 * close(2) system call when a file is mmap'd. 3072 * If it returns 0 for success, there will be a referenced 3073 * clp returned via clpp. 3074 */ 3075 APPLESTATIC int 3076 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 3077 { 3078 struct nfsclclient *clp; 3079 struct nfsclowner *owp; 3080 struct nfsclopen *op; 3081 struct nfscldeleg *dp; 3082 struct nfsfh *nfhp; 3083 int error, notdecr; 3084 3085 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 3086 if (error) 3087 return (error); 3088 *clpp = clp; 3089 3090 nfhp = VTONFS(vp)->n_fhp; 3091 notdecr = 1; 3092 NFSLOCKCLSTATE(); 3093 /* 3094 * First, look for one under a delegation that was locally issued 3095 * and just decrement the opencnt for it. Since all my Opens against 3096 * the server are DENY_NONE, I don't see a problem with hanging 3097 * onto them. (It is much easier to use one of the extant Opens 3098 * that I already have on the server when a Delegation is recalled 3099 * than to do fresh Opens.) Someday, I might need to rethink this, but. 3100 */ 3101 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3102 if (dp != NULL) { 3103 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3104 op = LIST_FIRST(&owp->nfsow_open); 3105 if (op != NULL) { 3106 /* 3107 * Since a delegation is for a file, there 3108 * should never be more than one open for 3109 * each openowner. 3110 */ 3111 if (LIST_NEXT(op, nfso_list) != NULL) 3112 panic("nfscdeleg opens"); 3113 if (notdecr && op->nfso_opencnt > 0) { 3114 notdecr = 0; 3115 op->nfso_opencnt--; 3116 break; 3117 } 3118 } 3119 } 3120 } 3121 3122 /* Now process the opens against the server. */ 3123 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3124 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3125 if (op->nfso_fhlen == nfhp->nfh_len && 3126 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3127 nfhp->nfh_len)) { 3128 /* Found an open, decrement cnt if possible */ 3129 if (notdecr && op->nfso_opencnt > 0) { 3130 notdecr = 0; 3131 op->nfso_opencnt--; 3132 } 3133 /* 3134 * There are more opens, so just return. 3135 */ 3136 if (op->nfso_opencnt > 0) { 3137 NFSUNLOCKCLSTATE(); 3138 return (0); 3139 } 3140 } 3141 } 3142 } 3143 NFSUNLOCKCLSTATE(); 3144 if (notdecr) 3145 printf("nfscl: never fnd open\n"); 3146 return (0); 3147 } 3148 3149 APPLESTATIC int 3150 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 3151 { 3152 struct nfsclclient *clp; 3153 struct nfsclowner *owp, *nowp; 3154 struct nfsclopen *op; 3155 struct nfscldeleg *dp; 3156 struct nfsfh *nfhp; 3157 struct nfsclrecalllayout *recallp; 3158 int error; 3159 3160 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp); 3161 if (error) 3162 return (error); 3163 *clpp = clp; 3164 3165 nfhp = VTONFS(vp)->n_fhp; 3166 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 3167 NFSLOCKCLSTATE(); 3168 /* 3169 * First get rid of the local Open structures, which should be no 3170 * longer in use. 3171 */ 3172 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 3173 if (dp != NULL) { 3174 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 3175 op = LIST_FIRST(&owp->nfsow_open); 3176 if (op != NULL) { 3177 KASSERT((op->nfso_opencnt == 0), 3178 ("nfscl: bad open cnt on deleg")); 3179 nfscl_freeopen(op, 1); 3180 } 3181 nfscl_freeopenowner(owp, 1); 3182 } 3183 } 3184 3185 /* Return any layouts marked return on close. */ 3186 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp); 3187 3188 /* Now process the opens against the server. */ 3189 lookformore: 3190 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3191 op = LIST_FIRST(&owp->nfsow_open); 3192 while (op != NULL) { 3193 if (op->nfso_fhlen == nfhp->nfh_len && 3194 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 3195 nfhp->nfh_len)) { 3196 /* Found an open, close it. */ 3197 #ifdef DIAGNOSTIC 3198 KASSERT((op->nfso_opencnt == 0), 3199 ("nfscl: bad open cnt on server (%d)", 3200 op->nfso_opencnt)); 3201 #endif 3202 NFSUNLOCKCLSTATE(); 3203 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op, 3204 p); 3205 NFSLOCKCLSTATE(); 3206 goto lookformore; 3207 } 3208 op = LIST_NEXT(op, nfso_list); 3209 } 3210 } 3211 NFSUNLOCKCLSTATE(); 3212 /* 3213 * recallp has been set NULL by nfscl_retoncloselayout() if it was 3214 * used by the function, but calling free() with a NULL pointer is ok. 3215 */ 3216 free(recallp, M_NFSLAYRECALL); 3217 return (0); 3218 } 3219 3220 /* 3221 * Return all delegations on this client. 3222 * (Must be called with client sleep lock.) 3223 */ 3224 static void 3225 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) 3226 { 3227 struct nfscldeleg *dp, *ndp; 3228 struct ucred *cred; 3229 3230 cred = newnfs_getcred(); 3231 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3232 nfscl_cleandeleg(dp); 3233 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3234 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3235 } 3236 NFSFREECRED(cred); 3237 } 3238 3239 /* 3240 * Do a callback RPC. 3241 */ 3242 APPLESTATIC void 3243 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3244 { 3245 int clist, gotseq_ok, i, j, k, op, rcalls; 3246 u_int32_t *tl; 3247 struct nfsclclient *clp; 3248 struct nfscldeleg *dp = NULL; 3249 int numops, taglen = -1, error = 0, trunc __unused; 3250 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident; 3251 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3252 vnode_t vp = NULL; 3253 struct nfsnode *np; 3254 struct vattr va; 3255 struct nfsfh *nfhp; 3256 mount_t mp; 3257 nfsattrbit_t attrbits, rattrbits; 3258 nfsv4stateid_t stateid; 3259 uint32_t seqid, slotid = 0, highslot, cachethis __unused; 3260 uint8_t sessionid[NFSX_V4SESSIONID]; 3261 struct mbuf *rep; 3262 struct nfscllayout *lyp; 3263 uint64_t filesid[2], len, off; 3264 int changed, gotone, laytype, recalltype; 3265 uint32_t iomode; 3266 struct nfsclrecalllayout *recallp = NULL; 3267 struct nfsclsession *tsep; 3268 3269 gotseq_ok = 0; 3270 nfsrvd_rephead(nd); 3271 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3272 taglen = fxdr_unsigned(int, *tl); 3273 if (taglen < 0) { 3274 error = EBADRPC; 3275 goto nfsmout; 3276 } 3277 if (taglen <= NFSV4_SMALLSTR) 3278 tagstr = tag; 3279 else 3280 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3281 error = nfsrv_mtostr(nd, tagstr, taglen); 3282 if (error) { 3283 if (taglen > NFSV4_SMALLSTR) 3284 free(tagstr, M_TEMP); 3285 taglen = -1; 3286 goto nfsmout; 3287 } 3288 (void) nfsm_strtom(nd, tag, taglen); 3289 if (taglen > NFSV4_SMALLSTR) { 3290 free(tagstr, M_TEMP); 3291 } 3292 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3293 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3294 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3295 if (minorvers != NFSV4_MINORVERSION && 3296 minorvers != NFSV41_MINORVERSION && 3297 minorvers != NFSV42_MINORVERSION) 3298 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3299 cbident = fxdr_unsigned(u_int32_t, *tl++); 3300 if (nd->nd_repstat) 3301 numops = 0; 3302 else 3303 numops = fxdr_unsigned(int, *tl); 3304 /* 3305 * Loop around doing the sub ops. 3306 */ 3307 for (i = 0; i < numops; i++) { 3308 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3309 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3310 *repp++ = *tl; 3311 op = fxdr_unsigned(int, *tl); 3312 if (op < NFSV4OP_CBGETATTR || 3313 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) || 3314 (op > NFSV4OP_CBNOTIFYDEVID && 3315 minorvers == NFSV41_MINORVERSION) || 3316 (op > NFSV4OP_CBOFFLOAD && 3317 minorvers == NFSV42_MINORVERSION)) { 3318 nd->nd_repstat = NFSERR_OPILLEGAL; 3319 *repp = nfscl_errmap(nd, minorvers); 3320 retops++; 3321 break; 3322 } 3323 nd->nd_procnum = op; 3324 if (op < NFSV42_CBNOPS) 3325 nfsstatsv1.cbrpccnt[nd->nd_procnum]++; 3326 switch (op) { 3327 case NFSV4OP_CBGETATTR: 3328 NFSCL_DEBUG(4, "cbgetattr\n"); 3329 mp = NULL; 3330 vp = NULL; 3331 error = nfsm_getfh(nd, &nfhp); 3332 if (!error) 3333 error = nfsrv_getattrbits(nd, &attrbits, 3334 NULL, NULL); 3335 if (error == 0 && i == 0 && 3336 minorvers != NFSV4_MINORVERSION) 3337 error = NFSERR_OPNOTINSESS; 3338 if (!error) { 3339 mp = nfscl_getmnt(minorvers, sessionid, cbident, 3340 &clp); 3341 if (mp == NULL) 3342 error = NFSERR_SERVERFAULT; 3343 } 3344 if (!error) { 3345 error = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3346 nfhp->nfh_len, p, &np); 3347 if (!error) 3348 vp = NFSTOV(np); 3349 } 3350 if (!error) { 3351 NFSZERO_ATTRBIT(&rattrbits); 3352 NFSLOCKCLSTATE(); 3353 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3354 nfhp->nfh_len); 3355 if (dp != NULL) { 3356 if (NFSISSET_ATTRBIT(&attrbits, 3357 NFSATTRBIT_SIZE)) { 3358 if (vp != NULL) 3359 va.va_size = np->n_size; 3360 else 3361 va.va_size = 3362 dp->nfsdl_size; 3363 NFSSETBIT_ATTRBIT(&rattrbits, 3364 NFSATTRBIT_SIZE); 3365 } 3366 if (NFSISSET_ATTRBIT(&attrbits, 3367 NFSATTRBIT_CHANGE)) { 3368 va.va_filerev = 3369 dp->nfsdl_change; 3370 if (vp == NULL || 3371 (np->n_flag & NDELEGMOD)) 3372 va.va_filerev++; 3373 NFSSETBIT_ATTRBIT(&rattrbits, 3374 NFSATTRBIT_CHANGE); 3375 } 3376 } else 3377 error = NFSERR_SERVERFAULT; 3378 NFSUNLOCKCLSTATE(); 3379 } 3380 if (vp != NULL) 3381 vrele(vp); 3382 if (mp != NULL) 3383 vfs_unbusy(mp); 3384 if (nfhp != NULL) 3385 free(nfhp, M_NFSFH); 3386 if (!error) 3387 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3388 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0, 3389 (uint64_t)0, NULL); 3390 break; 3391 case NFSV4OP_CBRECALL: 3392 NFSCL_DEBUG(4, "cbrecall\n"); 3393 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3394 NFSX_UNSIGNED); 3395 stateid.seqid = *tl++; 3396 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3397 NFSX_STATEIDOTHER); 3398 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3399 trunc = fxdr_unsigned(int, *tl); 3400 error = nfsm_getfh(nd, &nfhp); 3401 if (error == 0 && i == 0 && 3402 minorvers != NFSV4_MINORVERSION) 3403 error = NFSERR_OPNOTINSESS; 3404 if (!error) { 3405 NFSLOCKCLSTATE(); 3406 if (minorvers == NFSV4_MINORVERSION) 3407 clp = nfscl_getclnt(cbident); 3408 else 3409 clp = nfscl_getclntsess(sessionid); 3410 if (clp != NULL) { 3411 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3412 nfhp->nfh_len); 3413 if (dp != NULL && (dp->nfsdl_flags & 3414 NFSCLDL_DELEGRET) == 0) { 3415 dp->nfsdl_flags |= 3416 NFSCLDL_RECALL; 3417 wakeup((caddr_t)clp); 3418 } 3419 } else { 3420 error = NFSERR_SERVERFAULT; 3421 } 3422 NFSUNLOCKCLSTATE(); 3423 } 3424 if (nfhp != NULL) 3425 free(nfhp, M_NFSFH); 3426 break; 3427 case NFSV4OP_CBLAYOUTRECALL: 3428 NFSCL_DEBUG(4, "cblayrec\n"); 3429 nfhp = NULL; 3430 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED); 3431 laytype = fxdr_unsigned(int, *tl++); 3432 iomode = fxdr_unsigned(uint32_t, *tl++); 3433 if (newnfs_true == *tl++) 3434 changed = 1; 3435 else 3436 changed = 0; 3437 recalltype = fxdr_unsigned(int, *tl); 3438 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n", 3439 laytype, iomode, changed, recalltype); 3440 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, 3441 M_WAITOK); 3442 if (laytype != NFSLAYOUT_NFSV4_1_FILES && 3443 laytype != NFSLAYOUT_FLEXFILE) 3444 error = NFSERR_NOMATCHLAYOUT; 3445 else if (recalltype == NFSLAYOUTRETURN_FILE) { 3446 error = nfsm_getfh(nd, &nfhp); 3447 NFSCL_DEBUG(4, "retfile getfh=%d\n", error); 3448 if (error != 0) 3449 goto nfsmout; 3450 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER + 3451 NFSX_STATEID); 3452 off = fxdr_hyper(tl); tl += 2; 3453 len = fxdr_hyper(tl); tl += 2; 3454 stateid.seqid = fxdr_unsigned(uint32_t, *tl++); 3455 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER); 3456 if (minorvers == NFSV4_MINORVERSION) 3457 error = NFSERR_NOTSUPP; 3458 else if (i == 0) 3459 error = NFSERR_OPNOTINSESS; 3460 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n", 3461 (uintmax_t)off, (uintmax_t)len, 3462 stateid.seqid, error); 3463 if (error == 0) { 3464 NFSLOCKCLSTATE(); 3465 clp = nfscl_getclntsess(sessionid); 3466 NFSCL_DEBUG(4, "cbly clp=%p\n", clp); 3467 if (clp != NULL) { 3468 lyp = nfscl_findlayout(clp, 3469 nfhp->nfh_fh, 3470 nfhp->nfh_len); 3471 NFSCL_DEBUG(4, "cblyp=%p\n", 3472 lyp); 3473 if (lyp != NULL && 3474 (lyp->nfsly_flags & 3475 (NFSLY_FILES | 3476 NFSLY_FLEXFILE)) != 0 && 3477 !NFSBCMP(stateid.other, 3478 lyp->nfsly_stateid.other, 3479 NFSX_STATEIDOTHER)) { 3480 error = 3481 nfscl_layoutrecall( 3482 recalltype, 3483 lyp, iomode, off, 3484 len, stateid.seqid, 3485 0, 0, NULL, 3486 recallp); 3487 recallp = NULL; 3488 wakeup(clp); 3489 NFSCL_DEBUG(4, 3490 "aft layrcal=%d\n", 3491 error); 3492 } else 3493 error = 3494 NFSERR_NOMATCHLAYOUT; 3495 } else 3496 error = NFSERR_NOMATCHLAYOUT; 3497 NFSUNLOCKCLSTATE(); 3498 } 3499 free(nfhp, M_NFSFH); 3500 } else if (recalltype == NFSLAYOUTRETURN_FSID) { 3501 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER); 3502 filesid[0] = fxdr_hyper(tl); tl += 2; 3503 filesid[1] = fxdr_hyper(tl); tl += 2; 3504 gotone = 0; 3505 NFSLOCKCLSTATE(); 3506 clp = nfscl_getclntsess(sessionid); 3507 if (clp != NULL) { 3508 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3509 nfsly_list) { 3510 if (lyp->nfsly_filesid[0] == 3511 filesid[0] && 3512 lyp->nfsly_filesid[1] == 3513 filesid[1]) { 3514 error = 3515 nfscl_layoutrecall( 3516 recalltype, 3517 lyp, iomode, 0, 3518 UINT64_MAX, 3519 lyp->nfsly_stateid.seqid, 3520 0, 0, NULL, 3521 recallp); 3522 recallp = NULL; 3523 gotone = 1; 3524 } 3525 } 3526 if (gotone != 0) 3527 wakeup(clp); 3528 else 3529 error = NFSERR_NOMATCHLAYOUT; 3530 } else 3531 error = NFSERR_NOMATCHLAYOUT; 3532 NFSUNLOCKCLSTATE(); 3533 } else if (recalltype == NFSLAYOUTRETURN_ALL) { 3534 gotone = 0; 3535 NFSLOCKCLSTATE(); 3536 clp = nfscl_getclntsess(sessionid); 3537 if (clp != NULL) { 3538 TAILQ_FOREACH(lyp, &clp->nfsc_layout, 3539 nfsly_list) { 3540 error = nfscl_layoutrecall( 3541 recalltype, lyp, iomode, 0, 3542 UINT64_MAX, 3543 lyp->nfsly_stateid.seqid, 3544 0, 0, NULL, recallp); 3545 recallp = NULL; 3546 gotone = 1; 3547 } 3548 if (gotone != 0) 3549 wakeup(clp); 3550 else 3551 error = NFSERR_NOMATCHLAYOUT; 3552 } else 3553 error = NFSERR_NOMATCHLAYOUT; 3554 NFSUNLOCKCLSTATE(); 3555 } else 3556 error = NFSERR_NOMATCHLAYOUT; 3557 if (recallp != NULL) { 3558 free(recallp, M_NFSLAYRECALL); 3559 recallp = NULL; 3560 } 3561 break; 3562 case NFSV4OP_CBSEQUENCE: 3563 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3564 5 * NFSX_UNSIGNED); 3565 bcopy(tl, sessionid, NFSX_V4SESSIONID); 3566 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3567 seqid = fxdr_unsigned(uint32_t, *tl++); 3568 slotid = fxdr_unsigned(uint32_t, *tl++); 3569 highslot = fxdr_unsigned(uint32_t, *tl++); 3570 cachethis = *tl++; 3571 /* Throw away the referring call stuff. */ 3572 clist = fxdr_unsigned(int, *tl); 3573 for (j = 0; j < clist; j++) { 3574 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID + 3575 NFSX_UNSIGNED); 3576 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3577 rcalls = fxdr_unsigned(int, *tl); 3578 for (k = 0; k < rcalls; k++) { 3579 NFSM_DISSECT(tl, uint32_t *, 3580 2 * NFSX_UNSIGNED); 3581 } 3582 } 3583 NFSLOCKCLSTATE(); 3584 if (i == 0) { 3585 clp = nfscl_getclntsess(sessionid); 3586 if (clp == NULL) 3587 error = NFSERR_SERVERFAULT; 3588 } else 3589 error = NFSERR_SEQUENCEPOS; 3590 if (error == 0) { 3591 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3592 error = nfsv4_seqsession(seqid, slotid, 3593 highslot, tsep->nfsess_cbslots, &rep, 3594 tsep->nfsess_backslots); 3595 } 3596 NFSUNLOCKCLSTATE(); 3597 if (error == 0 || error == NFSERR_REPLYFROMCACHE) { 3598 gotseq_ok = 1; 3599 if (rep != NULL) { 3600 /* 3601 * Handle a reply for a retried 3602 * callback. The reply will be 3603 * re-inserted in the session cache 3604 * by the nfsv4_seqsess_cacherep() call 3605 * after out: 3606 */ 3607 KASSERT(error == NFSERR_REPLYFROMCACHE, 3608 ("cbsequence: non-NULL rep")); 3609 NFSCL_DEBUG(4, "Got cbretry\n"); 3610 m_freem(nd->nd_mreq); 3611 nd->nd_mreq = rep; 3612 rep = NULL; 3613 goto out; 3614 } 3615 NFSM_BUILD(tl, uint32_t *, 3616 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED); 3617 bcopy(sessionid, tl, NFSX_V4SESSIONID); 3618 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 3619 *tl++ = txdr_unsigned(seqid); 3620 *tl++ = txdr_unsigned(slotid); 3621 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1); 3622 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1); 3623 } 3624 break; 3625 default: 3626 if (i == 0 && minorvers != NFSV4_MINORVERSION) 3627 error = NFSERR_OPNOTINSESS; 3628 else { 3629 NFSCL_DEBUG(1, "unsupp callback %d\n", op); 3630 error = NFSERR_NOTSUPP; 3631 } 3632 break; 3633 } 3634 if (error) { 3635 if (error == EBADRPC || error == NFSERR_BADXDR) { 3636 nd->nd_repstat = NFSERR_BADXDR; 3637 } else { 3638 nd->nd_repstat = error; 3639 } 3640 error = 0; 3641 } 3642 retops++; 3643 if (nd->nd_repstat) { 3644 *repp = nfscl_errmap(nd, minorvers); 3645 break; 3646 } else 3647 *repp = 0; /* NFS4_OK */ 3648 } 3649 nfsmout: 3650 if (recallp != NULL) 3651 free(recallp, M_NFSLAYRECALL); 3652 if (error) { 3653 if (error == EBADRPC || error == NFSERR_BADXDR) 3654 nd->nd_repstat = NFSERR_BADXDR; 3655 else 3656 printf("nfsv4 comperr1=%d\n", error); 3657 } 3658 if (taglen == -1) { 3659 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3660 *tl++ = 0; 3661 *tl = 0; 3662 } else { 3663 *retopsp = txdr_unsigned(retops); 3664 } 3665 *nd->nd_errp = nfscl_errmap(nd, minorvers); 3666 out: 3667 if (gotseq_ok != 0) { 3668 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK); 3669 NFSLOCKCLSTATE(); 3670 clp = nfscl_getclntsess(sessionid); 3671 if (clp != NULL) { 3672 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3673 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots, 3674 NFSERR_OK, &rep); 3675 NFSUNLOCKCLSTATE(); 3676 } else { 3677 NFSUNLOCKCLSTATE(); 3678 m_freem(rep); 3679 } 3680 } 3681 } 3682 3683 /* 3684 * Generate the next cbident value. Basically just increment a static value 3685 * and then check that it isn't already in the list, if it has wrapped around. 3686 */ 3687 static u_int32_t 3688 nfscl_nextcbident(void) 3689 { 3690 struct nfsclclient *clp; 3691 int matched; 3692 static u_int32_t nextcbident = 0; 3693 static int haswrapped = 0; 3694 3695 nextcbident++; 3696 if (nextcbident == 0) 3697 haswrapped = 1; 3698 if (haswrapped) { 3699 /* 3700 * Search the clientid list for one already using this cbident. 3701 */ 3702 do { 3703 matched = 0; 3704 NFSLOCKCLSTATE(); 3705 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3706 if (clp->nfsc_cbident == nextcbident) { 3707 matched = 1; 3708 break; 3709 } 3710 } 3711 NFSUNLOCKCLSTATE(); 3712 if (matched == 1) 3713 nextcbident++; 3714 } while (matched); 3715 } 3716 return (nextcbident); 3717 } 3718 3719 /* 3720 * Get the mount point related to a given cbident or session and busy it. 3721 */ 3722 static mount_t 3723 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident, 3724 struct nfsclclient **clpp) 3725 { 3726 struct nfsclclient *clp; 3727 mount_t mp; 3728 int error; 3729 struct nfsclsession *tsep; 3730 3731 *clpp = NULL; 3732 NFSLOCKCLSTATE(); 3733 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3734 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3735 if (minorvers == NFSV4_MINORVERSION) { 3736 if (clp->nfsc_cbident == cbident) 3737 break; 3738 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 3739 NFSX_V4SESSIONID)) 3740 break; 3741 } 3742 if (clp == NULL) { 3743 NFSUNLOCKCLSTATE(); 3744 return (NULL); 3745 } 3746 mp = clp->nfsc_nmp->nm_mountp; 3747 vfs_ref(mp); 3748 NFSUNLOCKCLSTATE(); 3749 error = vfs_busy(mp, 0); 3750 vfs_rel(mp); 3751 if (error != 0) 3752 return (NULL); 3753 *clpp = clp; 3754 return (mp); 3755 } 3756 3757 /* 3758 * Get the clientid pointer related to a given cbident. 3759 */ 3760 static struct nfsclclient * 3761 nfscl_getclnt(u_int32_t cbident) 3762 { 3763 struct nfsclclient *clp; 3764 3765 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 3766 if (clp->nfsc_cbident == cbident) 3767 break; 3768 return (clp); 3769 } 3770 3771 /* 3772 * Get the clientid pointer related to a given sessionid. 3773 */ 3774 static struct nfsclclient * 3775 nfscl_getclntsess(uint8_t *sessionid) 3776 { 3777 struct nfsclclient *clp; 3778 struct nfsclsession *tsep; 3779 3780 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3781 tsep = nfsmnt_mdssession(clp->nfsc_nmp); 3782 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid, 3783 NFSX_V4SESSIONID)) 3784 break; 3785 } 3786 return (clp); 3787 } 3788 3789 /* 3790 * Search for a lock conflict locally on the client. A conflict occurs if 3791 * - not same owner and overlapping byte range and at least one of them is 3792 * a write lock or this is an unlock. 3793 */ 3794 static int 3795 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 3796 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 3797 struct nfscllock **lopp) 3798 { 3799 struct nfsclowner *owp; 3800 struct nfsclopen *op; 3801 int ret; 3802 3803 if (dp != NULL) { 3804 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 3805 if (ret) 3806 return (ret); 3807 } 3808 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3809 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3810 if (op->nfso_fhlen == fhlen && 3811 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 3812 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 3813 own, lopp); 3814 if (ret) 3815 return (ret); 3816 } 3817 } 3818 } 3819 return (0); 3820 } 3821 3822 static int 3823 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 3824 u_int8_t *own, struct nfscllock **lopp) 3825 { 3826 struct nfscllockowner *lp; 3827 struct nfscllock *lop; 3828 3829 LIST_FOREACH(lp, lhp, nfsl_list) { 3830 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 3831 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3832 if (lop->nfslo_first >= nlop->nfslo_end) 3833 break; 3834 if (lop->nfslo_end <= nlop->nfslo_first) 3835 continue; 3836 if (lop->nfslo_type == F_WRLCK || 3837 nlop->nfslo_type == F_WRLCK || 3838 nlop->nfslo_type == F_UNLCK) { 3839 if (lopp != NULL) 3840 *lopp = lop; 3841 return (NFSERR_DENIED); 3842 } 3843 } 3844 } 3845 } 3846 return (0); 3847 } 3848 3849 /* 3850 * Check for a local conflicting lock. 3851 */ 3852 APPLESTATIC int 3853 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 3854 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags) 3855 { 3856 struct nfscllock *lop, nlck; 3857 struct nfscldeleg *dp; 3858 struct nfsnode *np; 3859 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 3860 int error; 3861 3862 nlck.nfslo_type = fl->l_type; 3863 nlck.nfslo_first = off; 3864 if (len == NFS64BITSSET) { 3865 nlck.nfslo_end = NFS64BITSSET; 3866 } else { 3867 nlck.nfslo_end = off + len; 3868 if (nlck.nfslo_end <= nlck.nfslo_first) 3869 return (NFSERR_INVAL); 3870 } 3871 np = VTONFS(vp); 3872 nfscl_filllockowner(id, own, flags); 3873 NFSLOCKCLSTATE(); 3874 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3875 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 3876 &nlck, own, dp, &lop); 3877 if (error != 0) { 3878 fl->l_whence = SEEK_SET; 3879 fl->l_start = lop->nfslo_first; 3880 if (lop->nfslo_end == NFS64BITSSET) 3881 fl->l_len = 0; 3882 else 3883 fl->l_len = lop->nfslo_end - lop->nfslo_first; 3884 fl->l_pid = (pid_t)0; 3885 fl->l_type = lop->nfslo_type; 3886 error = -1; /* no RPC required */ 3887 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 3888 fl->l_type == F_RDLCK)) { 3889 /* 3890 * The delegation ensures that there isn't a conflicting 3891 * lock on the server, so return -1 to indicate an RPC 3892 * isn't required. 3893 */ 3894 fl->l_type = F_UNLCK; 3895 error = -1; 3896 } 3897 NFSUNLOCKCLSTATE(); 3898 return (error); 3899 } 3900 3901 /* 3902 * Handle Recall of a delegation. 3903 * The clp must be exclusive locked when this is called. 3904 */ 3905 static int 3906 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 3907 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 3908 int called_from_renewthread) 3909 { 3910 struct nfsclowner *owp, *lowp, *nowp; 3911 struct nfsclopen *op, *lop; 3912 struct nfscllockowner *lp; 3913 struct nfscllock *lckp; 3914 struct nfsnode *np; 3915 int error = 0, ret, gotvp = 0; 3916 3917 if (vp == NULL) { 3918 /* 3919 * First, get a vnode for the file. This is needed to do RPCs. 3920 */ 3921 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 3922 dp->nfsdl_fhlen, p, &np); 3923 if (ret) { 3924 /* 3925 * File isn't open, so nothing to move over to the 3926 * server. 3927 */ 3928 return (0); 3929 } 3930 vp = NFSTOV(np); 3931 gotvp = 1; 3932 } else { 3933 np = VTONFS(vp); 3934 } 3935 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 3936 3937 /* 3938 * Ok, if it's a write delegation, flush data to the server, so 3939 * that close/open consistency is retained. 3940 */ 3941 ret = 0; 3942 NFSLOCKNODE(np); 3943 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 3944 np->n_flag |= NDELEGRECALL; 3945 NFSUNLOCKNODE(np); 3946 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread); 3947 NFSLOCKNODE(np); 3948 np->n_flag &= ~NDELEGRECALL; 3949 } 3950 NFSINVALATTRCACHE(np); 3951 NFSUNLOCKNODE(np); 3952 if (ret == EIO && called_from_renewthread != 0) { 3953 /* 3954 * If the flush failed with EIO for the renew thread, 3955 * return now, so that the dirty buffer will be flushed 3956 * later. 3957 */ 3958 if (gotvp != 0) 3959 vrele(vp); 3960 return (ret); 3961 } 3962 3963 /* 3964 * Now, for each openowner with opens issued locally, move them 3965 * over to state against the server. 3966 */ 3967 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 3968 lop = LIST_FIRST(&lowp->nfsow_open); 3969 if (lop != NULL) { 3970 if (LIST_NEXT(lop, nfso_list) != NULL) 3971 panic("nfsdlg mult opens"); 3972 /* 3973 * Look for the same openowner against the server. 3974 */ 3975 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3976 if (!NFSBCMP(lowp->nfsow_owner, 3977 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3978 newnfs_copycred(&dp->nfsdl_cred, cred); 3979 ret = nfscl_moveopen(vp, clp, nmp, lop, 3980 owp, dp, cred, p); 3981 if (ret == NFSERR_STALECLIENTID || 3982 ret == NFSERR_STALEDONTRECOVER || 3983 ret == NFSERR_BADSESSION) { 3984 if (gotvp) 3985 vrele(vp); 3986 return (ret); 3987 } 3988 if (ret) { 3989 nfscl_freeopen(lop, 1); 3990 if (!error) 3991 error = ret; 3992 } 3993 break; 3994 } 3995 } 3996 3997 /* 3998 * If no openowner found, create one and get an open 3999 * for it. 4000 */ 4001 if (owp == NULL) { 4002 nowp = malloc( 4003 sizeof (struct nfsclowner), M_NFSCLOWNER, 4004 M_WAITOK); 4005 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 4006 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 4007 dp->nfsdl_fhlen, NULL, NULL); 4008 newnfs_copycred(&dp->nfsdl_cred, cred); 4009 ret = nfscl_moveopen(vp, clp, nmp, lop, 4010 owp, dp, cred, p); 4011 if (ret) { 4012 nfscl_freeopenowner(owp, 0); 4013 if (ret == NFSERR_STALECLIENTID || 4014 ret == NFSERR_STALEDONTRECOVER || 4015 ret == NFSERR_BADSESSION) { 4016 if (gotvp) 4017 vrele(vp); 4018 return (ret); 4019 } 4020 if (ret) { 4021 nfscl_freeopen(lop, 1); 4022 if (!error) 4023 error = ret; 4024 } 4025 } 4026 } 4027 } 4028 } 4029 4030 /* 4031 * Now, get byte range locks for any locks done locally. 4032 */ 4033 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4034 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 4035 newnfs_copycred(&dp->nfsdl_cred, cred); 4036 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 4037 if (ret == NFSERR_STALESTATEID || 4038 ret == NFSERR_STALEDONTRECOVER || 4039 ret == NFSERR_STALECLIENTID || 4040 ret == NFSERR_BADSESSION) { 4041 if (gotvp) 4042 vrele(vp); 4043 return (ret); 4044 } 4045 if (ret && !error) 4046 error = ret; 4047 } 4048 } 4049 if (gotvp) 4050 vrele(vp); 4051 return (error); 4052 } 4053 4054 /* 4055 * Move a locally issued open over to an owner on the state list. 4056 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 4057 * returns with it unlocked. 4058 */ 4059 static int 4060 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4061 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 4062 struct ucred *cred, NFSPROC_T *p) 4063 { 4064 struct nfsclopen *op, *nop; 4065 struct nfscldeleg *ndp; 4066 struct nfsnode *np; 4067 int error = 0, newone; 4068 4069 /* 4070 * First, look for an appropriate open, If found, just increment the 4071 * opencnt in it. 4072 */ 4073 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 4074 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 4075 op->nfso_fhlen == lop->nfso_fhlen && 4076 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 4077 op->nfso_opencnt += lop->nfso_opencnt; 4078 nfscl_freeopen(lop, 1); 4079 return (0); 4080 } 4081 } 4082 4083 /* No appropriate open, so we have to do one against the server. */ 4084 np = VTONFS(vp); 4085 nop = malloc(sizeof (struct nfsclopen) + 4086 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 4087 newone = 0; 4088 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 4089 lop->nfso_fh, lop->nfso_fhlen, cred, &newone); 4090 ndp = dp; 4091 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, 4092 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 4093 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 4094 if (error) { 4095 if (newone) 4096 nfscl_freeopen(op, 0); 4097 } else { 4098 op->nfso_mode |= lop->nfso_mode; 4099 op->nfso_opencnt += lop->nfso_opencnt; 4100 nfscl_freeopen(lop, 1); 4101 } 4102 if (nop != NULL) 4103 free(nop, M_NFSCLOPEN); 4104 if (ndp != NULL) { 4105 /* 4106 * What should I do with the returned delegation, since the 4107 * delegation is being recalled? For now, just printf and 4108 * through it away. 4109 */ 4110 printf("Moveopen returned deleg\n"); 4111 free(ndp, M_NFSCLDELEG); 4112 } 4113 return (error); 4114 } 4115 4116 /* 4117 * Recall all delegations on this client. 4118 */ 4119 static void 4120 nfscl_totalrecall(struct nfsclclient *clp) 4121 { 4122 struct nfscldeleg *dp; 4123 4124 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 4125 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 4126 dp->nfsdl_flags |= NFSCLDL_RECALL; 4127 } 4128 } 4129 4130 /* 4131 * Relock byte ranges. Called for delegation recall and state expiry. 4132 */ 4133 static int 4134 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 4135 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 4136 NFSPROC_T *p) 4137 { 4138 struct nfscllockowner *nlp; 4139 struct nfsfh *nfhp; 4140 u_int64_t off, len; 4141 int error, newone, donelocally; 4142 4143 off = lop->nfslo_first; 4144 len = lop->nfslo_end - lop->nfslo_first; 4145 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 4146 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner, 4147 lp->nfsl_openowner, &nlp, &newone, &donelocally); 4148 if (error || donelocally) 4149 return (error); 4150 nfhp = VTONFS(vp)->n_fhp; 4151 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 4152 nfhp->nfh_len, nlp, newone, 0, off, 4153 len, lop->nfslo_type, cred, p); 4154 if (error) 4155 nfscl_freelockowner(nlp, 0); 4156 return (error); 4157 } 4158 4159 /* 4160 * Called to re-open a file. Basically get a vnode for the file handle 4161 * and then call nfsrpc_openrpc() to do the rest. 4162 */ 4163 static int 4164 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 4165 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 4166 struct ucred *cred, NFSPROC_T *p) 4167 { 4168 struct nfsnode *np; 4169 vnode_t vp; 4170 int error; 4171 4172 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 4173 if (error) 4174 return (error); 4175 vp = NFSTOV(np); 4176 if (np->n_v4 != NULL) { 4177 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 4178 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 4179 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 4180 cred, p); 4181 } else { 4182 error = EINVAL; 4183 } 4184 vrele(vp); 4185 return (error); 4186 } 4187 4188 /* 4189 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 4190 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 4191 * fail. 4192 */ 4193 static int 4194 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4195 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 4196 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 4197 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 4198 { 4199 int error; 4200 4201 do { 4202 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 4203 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 4204 0, 0); 4205 if (error == NFSERR_DELAY) 4206 (void) nfs_catnap(PZERO, error, "nfstryop"); 4207 } while (error == NFSERR_DELAY); 4208 if (error == EAUTH || error == EACCES) { 4209 /* Try again using system credentials */ 4210 newnfs_setroot(cred); 4211 do { 4212 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 4213 newfhlen, mode, op, name, namelen, ndpp, reclaim, 4214 delegtype, cred, p, 1, 0); 4215 if (error == NFSERR_DELAY) 4216 (void) nfs_catnap(PZERO, error, "nfstryop"); 4217 } while (error == NFSERR_DELAY); 4218 } 4219 return (error); 4220 } 4221 4222 /* 4223 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 4224 * NFSERR_DELAY. Also, retry with system credentials, if the provided 4225 * cred don't work. 4226 */ 4227 static int 4228 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 4229 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 4230 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 4231 { 4232 struct nfsrv_descript nfsd, *nd = &nfsd; 4233 int error; 4234 4235 do { 4236 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 4237 reclaim, off, len, type, cred, p, 0); 4238 if (!error && nd->nd_repstat == NFSERR_DELAY) 4239 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4240 "nfstrylck"); 4241 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4242 if (!error) 4243 error = nd->nd_repstat; 4244 if (error == EAUTH || error == EACCES) { 4245 /* Try again using root credentials */ 4246 newnfs_setroot(cred); 4247 do { 4248 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 4249 newone, reclaim, off, len, type, cred, p, 1); 4250 if (!error && nd->nd_repstat == NFSERR_DELAY) 4251 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 4252 "nfstrylck"); 4253 } while (!error && nd->nd_repstat == NFSERR_DELAY); 4254 if (!error) 4255 error = nd->nd_repstat; 4256 } 4257 return (error); 4258 } 4259 4260 /* 4261 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 4262 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4263 * credentials fail. 4264 */ 4265 static int 4266 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 4267 struct nfsmount *nmp, NFSPROC_T *p) 4268 { 4269 int error; 4270 4271 do { 4272 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 4273 if (error == NFSERR_DELAY) 4274 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4275 } while (error == NFSERR_DELAY); 4276 if (error == EAUTH || error == EACCES) { 4277 /* Try again using system credentials */ 4278 newnfs_setroot(cred); 4279 do { 4280 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 4281 if (error == NFSERR_DELAY) 4282 (void) nfs_catnap(PZERO, error, "nfstrydp"); 4283 } while (error == NFSERR_DELAY); 4284 } 4285 return (error); 4286 } 4287 4288 /* 4289 * Try a close against the server. Just call nfsrpc_closerpc(), 4290 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 4291 * credentials fail. 4292 */ 4293 APPLESTATIC int 4294 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 4295 struct nfsmount *nmp, NFSPROC_T *p) 4296 { 4297 struct nfsrv_descript nfsd, *nd = &nfsd; 4298 int error; 4299 4300 do { 4301 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 4302 if (error == NFSERR_DELAY) 4303 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4304 } while (error == NFSERR_DELAY); 4305 if (error == EAUTH || error == EACCES) { 4306 /* Try again using system credentials */ 4307 newnfs_setroot(cred); 4308 do { 4309 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 4310 if (error == NFSERR_DELAY) 4311 (void) nfs_catnap(PZERO, error, "nfstrycl"); 4312 } while (error == NFSERR_DELAY); 4313 } 4314 return (error); 4315 } 4316 4317 /* 4318 * Decide if a delegation on a file permits close without flushing writes 4319 * to the server. This might be a big performance win in some environments. 4320 * (Not useful until the client does caching on local stable storage.) 4321 */ 4322 APPLESTATIC int 4323 nfscl_mustflush(vnode_t vp) 4324 { 4325 struct nfsclclient *clp; 4326 struct nfscldeleg *dp; 4327 struct nfsnode *np; 4328 struct nfsmount *nmp; 4329 4330 np = VTONFS(vp); 4331 nmp = VFSTONFS(vnode_mount(vp)); 4332 if (!NFSHASNFSV4(nmp)) 4333 return (1); 4334 NFSLOCKCLSTATE(); 4335 clp = nfscl_findcl(nmp); 4336 if (clp == NULL) { 4337 NFSUNLOCKCLSTATE(); 4338 return (1); 4339 } 4340 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4341 if (dp != NULL && (dp->nfsdl_flags & 4342 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 4343 NFSCLDL_WRITE && 4344 (dp->nfsdl_sizelimit >= np->n_size || 4345 !NFSHASSTRICT3530(nmp))) { 4346 NFSUNLOCKCLSTATE(); 4347 return (0); 4348 } 4349 NFSUNLOCKCLSTATE(); 4350 return (1); 4351 } 4352 4353 /* 4354 * See if a (write) delegation exists for this file. 4355 */ 4356 APPLESTATIC int 4357 nfscl_nodeleg(vnode_t vp, int writedeleg) 4358 { 4359 struct nfsclclient *clp; 4360 struct nfscldeleg *dp; 4361 struct nfsnode *np; 4362 struct nfsmount *nmp; 4363 4364 np = VTONFS(vp); 4365 nmp = VFSTONFS(vnode_mount(vp)); 4366 if (!NFSHASNFSV4(nmp)) 4367 return (1); 4368 NFSLOCKCLSTATE(); 4369 clp = nfscl_findcl(nmp); 4370 if (clp == NULL) { 4371 NFSUNLOCKCLSTATE(); 4372 return (1); 4373 } 4374 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4375 if (dp != NULL && 4376 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 4377 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 4378 NFSCLDL_WRITE)) { 4379 NFSUNLOCKCLSTATE(); 4380 return (0); 4381 } 4382 NFSUNLOCKCLSTATE(); 4383 return (1); 4384 } 4385 4386 /* 4387 * Look for an associated delegation that should be DelegReturned. 4388 */ 4389 APPLESTATIC int 4390 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 4391 { 4392 struct nfsclclient *clp; 4393 struct nfscldeleg *dp; 4394 struct nfsclowner *owp; 4395 struct nfscllockowner *lp; 4396 struct nfsmount *nmp; 4397 struct ucred *cred; 4398 struct nfsnode *np; 4399 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4400 4401 nmp = VFSTONFS(vnode_mount(vp)); 4402 np = VTONFS(vp); 4403 NFSLOCKCLSTATE(); 4404 /* 4405 * Loop around waiting for: 4406 * - outstanding I/O operations on delegations to complete 4407 * - for a delegation on vp that has state, lock the client and 4408 * do a recall 4409 * - return delegation with no state 4410 */ 4411 while (1) { 4412 clp = nfscl_findcl(nmp); 4413 if (clp == NULL) { 4414 NFSUNLOCKCLSTATE(); 4415 return (retcnt); 4416 } 4417 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4418 np->n_fhp->nfh_len); 4419 if (dp != NULL) { 4420 /* 4421 * Wait for outstanding I/O ops to be done. 4422 */ 4423 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4424 if (igotlock) { 4425 nfsv4_unlock(&clp->nfsc_lock, 0); 4426 igotlock = 0; 4427 } 4428 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4429 (void) nfsmsleep(&dp->nfsdl_rwlock, 4430 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4431 continue; 4432 } 4433 needsrecall = 0; 4434 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4435 if (!LIST_EMPTY(&owp->nfsow_open)) { 4436 needsrecall = 1; 4437 break; 4438 } 4439 } 4440 if (!needsrecall) { 4441 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4442 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4443 needsrecall = 1; 4444 break; 4445 } 4446 } 4447 } 4448 if (needsrecall && !triedrecall) { 4449 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4450 islept = 0; 4451 while (!igotlock) { 4452 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4453 &islept, NFSCLSTATEMUTEXPTR, NULL); 4454 if (islept) 4455 break; 4456 } 4457 if (islept) 4458 continue; 4459 NFSUNLOCKCLSTATE(); 4460 cred = newnfs_getcred(); 4461 newnfs_copycred(&dp->nfsdl_cred, cred); 4462 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); 4463 NFSFREECRED(cred); 4464 triedrecall = 1; 4465 NFSLOCKCLSTATE(); 4466 nfsv4_unlock(&clp->nfsc_lock, 0); 4467 igotlock = 0; 4468 continue; 4469 } 4470 *stp = dp->nfsdl_stateid; 4471 retcnt = 1; 4472 nfscl_cleandeleg(dp); 4473 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4474 } 4475 if (igotlock) 4476 nfsv4_unlock(&clp->nfsc_lock, 0); 4477 NFSUNLOCKCLSTATE(); 4478 return (retcnt); 4479 } 4480 } 4481 4482 /* 4483 * Look for associated delegation(s) that should be DelegReturned. 4484 */ 4485 APPLESTATIC int 4486 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 4487 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 4488 { 4489 struct nfsclclient *clp; 4490 struct nfscldeleg *dp; 4491 struct nfsclowner *owp; 4492 struct nfscllockowner *lp; 4493 struct nfsmount *nmp; 4494 struct ucred *cred; 4495 struct nfsnode *np; 4496 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 4497 4498 nmp = VFSTONFS(vnode_mount(fvp)); 4499 *gotfdp = 0; 4500 *gottdp = 0; 4501 NFSLOCKCLSTATE(); 4502 /* 4503 * Loop around waiting for: 4504 * - outstanding I/O operations on delegations to complete 4505 * - for a delegation on fvp that has state, lock the client and 4506 * do a recall 4507 * - return delegation(s) with no state. 4508 */ 4509 while (1) { 4510 clp = nfscl_findcl(nmp); 4511 if (clp == NULL) { 4512 NFSUNLOCKCLSTATE(); 4513 return (retcnt); 4514 } 4515 np = VTONFS(fvp); 4516 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4517 np->n_fhp->nfh_len); 4518 if (dp != NULL && *gotfdp == 0) { 4519 /* 4520 * Wait for outstanding I/O ops to be done. 4521 */ 4522 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4523 if (igotlock) { 4524 nfsv4_unlock(&clp->nfsc_lock, 0); 4525 igotlock = 0; 4526 } 4527 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4528 (void) nfsmsleep(&dp->nfsdl_rwlock, 4529 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4530 continue; 4531 } 4532 needsrecall = 0; 4533 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4534 if (!LIST_EMPTY(&owp->nfsow_open)) { 4535 needsrecall = 1; 4536 break; 4537 } 4538 } 4539 if (!needsrecall) { 4540 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4541 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4542 needsrecall = 1; 4543 break; 4544 } 4545 } 4546 } 4547 if (needsrecall && !triedrecall) { 4548 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4549 islept = 0; 4550 while (!igotlock) { 4551 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4552 &islept, NFSCLSTATEMUTEXPTR, NULL); 4553 if (islept) 4554 break; 4555 } 4556 if (islept) 4557 continue; 4558 NFSUNLOCKCLSTATE(); 4559 cred = newnfs_getcred(); 4560 newnfs_copycred(&dp->nfsdl_cred, cred); 4561 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); 4562 NFSFREECRED(cred); 4563 triedrecall = 1; 4564 NFSLOCKCLSTATE(); 4565 nfsv4_unlock(&clp->nfsc_lock, 0); 4566 igotlock = 0; 4567 continue; 4568 } 4569 *fstp = dp->nfsdl_stateid; 4570 retcnt++; 4571 *gotfdp = 1; 4572 nfscl_cleandeleg(dp); 4573 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4574 } 4575 if (igotlock) { 4576 nfsv4_unlock(&clp->nfsc_lock, 0); 4577 igotlock = 0; 4578 } 4579 if (tvp != NULL) { 4580 np = VTONFS(tvp); 4581 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4582 np->n_fhp->nfh_len); 4583 if (dp != NULL && *gottdp == 0) { 4584 /* 4585 * Wait for outstanding I/O ops to be done. 4586 */ 4587 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4588 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4589 (void) nfsmsleep(&dp->nfsdl_rwlock, 4590 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4591 continue; 4592 } 4593 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4594 if (!LIST_EMPTY(&owp->nfsow_open)) { 4595 NFSUNLOCKCLSTATE(); 4596 return (retcnt); 4597 } 4598 } 4599 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4600 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4601 NFSUNLOCKCLSTATE(); 4602 return (retcnt); 4603 } 4604 } 4605 *tstp = dp->nfsdl_stateid; 4606 retcnt++; 4607 *gottdp = 1; 4608 nfscl_cleandeleg(dp); 4609 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4610 } 4611 } 4612 NFSUNLOCKCLSTATE(); 4613 return (retcnt); 4614 } 4615 } 4616 4617 /* 4618 * Get a reference on the clientid associated with the mount point. 4619 * Return 1 if success, 0 otherwise. 4620 */ 4621 APPLESTATIC int 4622 nfscl_getref(struct nfsmount *nmp) 4623 { 4624 struct nfsclclient *clp; 4625 4626 NFSLOCKCLSTATE(); 4627 clp = nfscl_findcl(nmp); 4628 if (clp == NULL) { 4629 NFSUNLOCKCLSTATE(); 4630 return (0); 4631 } 4632 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); 4633 NFSUNLOCKCLSTATE(); 4634 return (1); 4635 } 4636 4637 /* 4638 * Release a reference on a clientid acquired with the above call. 4639 */ 4640 APPLESTATIC void 4641 nfscl_relref(struct nfsmount *nmp) 4642 { 4643 struct nfsclclient *clp; 4644 4645 NFSLOCKCLSTATE(); 4646 clp = nfscl_findcl(nmp); 4647 if (clp == NULL) { 4648 NFSUNLOCKCLSTATE(); 4649 return; 4650 } 4651 nfsv4_relref(&clp->nfsc_lock); 4652 NFSUNLOCKCLSTATE(); 4653 } 4654 4655 /* 4656 * Save the size attribute in the delegation, since the nfsnode 4657 * is going away. 4658 */ 4659 APPLESTATIC void 4660 nfscl_reclaimnode(vnode_t vp) 4661 { 4662 struct nfsclclient *clp; 4663 struct nfscldeleg *dp; 4664 struct nfsnode *np = VTONFS(vp); 4665 struct nfsmount *nmp; 4666 4667 nmp = VFSTONFS(vnode_mount(vp)); 4668 if (!NFSHASNFSV4(nmp)) 4669 return; 4670 NFSLOCKCLSTATE(); 4671 clp = nfscl_findcl(nmp); 4672 if (clp == NULL) { 4673 NFSUNLOCKCLSTATE(); 4674 return; 4675 } 4676 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4677 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4678 dp->nfsdl_size = np->n_size; 4679 NFSUNLOCKCLSTATE(); 4680 } 4681 4682 /* 4683 * Get the saved size attribute in the delegation, since it is a 4684 * newly allocated nfsnode. 4685 */ 4686 APPLESTATIC void 4687 nfscl_newnode(vnode_t vp) 4688 { 4689 struct nfsclclient *clp; 4690 struct nfscldeleg *dp; 4691 struct nfsnode *np = VTONFS(vp); 4692 struct nfsmount *nmp; 4693 4694 nmp = VFSTONFS(vnode_mount(vp)); 4695 if (!NFSHASNFSV4(nmp)) 4696 return; 4697 NFSLOCKCLSTATE(); 4698 clp = nfscl_findcl(nmp); 4699 if (clp == NULL) { 4700 NFSUNLOCKCLSTATE(); 4701 return; 4702 } 4703 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4704 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4705 np->n_size = dp->nfsdl_size; 4706 NFSUNLOCKCLSTATE(); 4707 } 4708 4709 /* 4710 * If there is a valid write delegation for this file, set the modtime 4711 * to the local clock time. 4712 */ 4713 APPLESTATIC void 4714 nfscl_delegmodtime(vnode_t vp) 4715 { 4716 struct nfsclclient *clp; 4717 struct nfscldeleg *dp; 4718 struct nfsnode *np = VTONFS(vp); 4719 struct nfsmount *nmp; 4720 4721 nmp = VFSTONFS(vnode_mount(vp)); 4722 if (!NFSHASNFSV4(nmp)) 4723 return; 4724 NFSLOCKCLSTATE(); 4725 clp = nfscl_findcl(nmp); 4726 if (clp == NULL) { 4727 NFSUNLOCKCLSTATE(); 4728 return; 4729 } 4730 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4731 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 4732 nanotime(&dp->nfsdl_modtime); 4733 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 4734 } 4735 NFSUNLOCKCLSTATE(); 4736 } 4737 4738 /* 4739 * If there is a valid write delegation for this file with a modtime set, 4740 * put that modtime in mtime. 4741 */ 4742 APPLESTATIC void 4743 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 4744 { 4745 struct nfsclclient *clp; 4746 struct nfscldeleg *dp; 4747 struct nfsnode *np = VTONFS(vp); 4748 struct nfsmount *nmp; 4749 4750 nmp = VFSTONFS(vnode_mount(vp)); 4751 if (!NFSHASNFSV4(nmp)) 4752 return; 4753 NFSLOCKCLSTATE(); 4754 clp = nfscl_findcl(nmp); 4755 if (clp == NULL) { 4756 NFSUNLOCKCLSTATE(); 4757 return; 4758 } 4759 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4760 if (dp != NULL && 4761 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 4762 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 4763 *mtime = dp->nfsdl_modtime; 4764 NFSUNLOCKCLSTATE(); 4765 } 4766 4767 static int 4768 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers) 4769 { 4770 short *defaulterrp, *errp; 4771 4772 if (!nd->nd_repstat) 4773 return (0); 4774 if (nd->nd_procnum == NFSPROC_NOOP) 4775 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 4776 if (nd->nd_repstat == EBADRPC) 4777 return (txdr_unsigned(NFSERR_BADXDR)); 4778 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 4779 nd->nd_repstat == NFSERR_OPILLEGAL) 4780 return (txdr_unsigned(nd->nd_repstat)); 4781 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 && 4782 minorvers > NFSV4_MINORVERSION) { 4783 /* NFSv4.n error. */ 4784 return (txdr_unsigned(nd->nd_repstat)); 4785 } 4786 if (nd->nd_procnum < NFSV4OP_CBNOPS) 4787 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 4788 else 4789 return (txdr_unsigned(nd->nd_repstat)); 4790 while (*++errp) 4791 if (*errp == (short)nd->nd_repstat) 4792 return (txdr_unsigned(nd->nd_repstat)); 4793 return (txdr_unsigned(*defaulterrp)); 4794 } 4795 4796 /* 4797 * Called to find/add a layout to a client. 4798 * This function returns the layout with a refcnt (shared lock) upon 4799 * success (returns 0) or with no lock/refcnt on the layout when an 4800 * error is returned. 4801 * If a layout is passed in via lypp, it is locked (exclusively locked). 4802 */ 4803 APPLESTATIC int 4804 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 4805 nfsv4stateid_t *stateidp, int layouttype, int retonclose, 4806 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp, 4807 struct ucred *cred, NFSPROC_T *p) 4808 { 4809 struct nfsclclient *clp; 4810 struct nfscllayout *lyp, *tlyp; 4811 struct nfsclflayout *flp; 4812 struct nfsnode *np = VTONFS(vp); 4813 mount_t mp; 4814 int layout_passed_in; 4815 4816 mp = nmp->nm_mountp; 4817 layout_passed_in = 1; 4818 tlyp = NULL; 4819 lyp = *lypp; 4820 if (lyp == NULL) { 4821 layout_passed_in = 0; 4822 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT, 4823 M_WAITOK | M_ZERO); 4824 } 4825 4826 NFSLOCKCLSTATE(); 4827 clp = nmp->nm_clp; 4828 if (clp == NULL) { 4829 if (layout_passed_in != 0) 4830 nfsv4_unlock(&lyp->nfsly_lock, 0); 4831 NFSUNLOCKCLSTATE(); 4832 if (tlyp != NULL) 4833 free(tlyp, M_NFSLAYOUT); 4834 return (EPERM); 4835 } 4836 if (lyp == NULL) { 4837 /* 4838 * Although no lyp was passed in, another thread might have 4839 * allocated one. If one is found, just increment it's ref 4840 * count and return it. 4841 */ 4842 lyp = nfscl_findlayout(clp, fhp, fhlen); 4843 if (lyp == NULL) { 4844 lyp = tlyp; 4845 tlyp = NULL; 4846 lyp->nfsly_stateid.seqid = stateidp->seqid; 4847 lyp->nfsly_stateid.other[0] = stateidp->other[0]; 4848 lyp->nfsly_stateid.other[1] = stateidp->other[1]; 4849 lyp->nfsly_stateid.other[2] = stateidp->other[2]; 4850 lyp->nfsly_lastbyte = 0; 4851 LIST_INIT(&lyp->nfsly_flayread); 4852 LIST_INIT(&lyp->nfsly_flayrw); 4853 LIST_INIT(&lyp->nfsly_recall); 4854 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0]; 4855 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1]; 4856 lyp->nfsly_clp = clp; 4857 if (layouttype == NFSLAYOUT_FLEXFILE) 4858 lyp->nfsly_flags = NFSLY_FLEXFILE; 4859 else 4860 lyp->nfsly_flags = NFSLY_FILES; 4861 if (retonclose != 0) 4862 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4863 lyp->nfsly_fhlen = fhlen; 4864 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen); 4865 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4866 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp, 4867 nfsly_hash); 4868 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4869 nfscl_layoutcnt++; 4870 } else { 4871 if (retonclose != 0) 4872 lyp->nfsly_flags |= NFSLY_RETONCLOSE; 4873 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4874 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4875 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4876 } 4877 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 4878 if (NFSCL_FORCEDISM(mp)) { 4879 NFSUNLOCKCLSTATE(); 4880 if (tlyp != NULL) 4881 free(tlyp, M_NFSLAYOUT); 4882 return (EPERM); 4883 } 4884 *lypp = lyp; 4885 } else 4886 lyp->nfsly_stateid.seqid = stateidp->seqid; 4887 4888 /* Merge the new list of File Layouts into the list. */ 4889 flp = LIST_FIRST(fhlp); 4890 if (flp != NULL) { 4891 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ) 4892 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp); 4893 else 4894 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp); 4895 } 4896 if (layout_passed_in != 0) 4897 nfsv4_unlock(&lyp->nfsly_lock, 1); 4898 NFSUNLOCKCLSTATE(); 4899 if (tlyp != NULL) 4900 free(tlyp, M_NFSLAYOUT); 4901 return (0); 4902 } 4903 4904 /* 4905 * Search for a layout by MDS file handle. 4906 * If one is found, it is returned with a refcnt (shared lock) iff 4907 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is 4908 * returned NULL. 4909 */ 4910 struct nfscllayout * 4911 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen, 4912 uint64_t off, struct nfsclflayout **retflpp, int *recalledp) 4913 { 4914 struct nfscllayout *lyp; 4915 mount_t mp; 4916 int error, igotlock; 4917 4918 mp = clp->nfsc_nmp->nm_mountp; 4919 *recalledp = 0; 4920 *retflpp = NULL; 4921 NFSLOCKCLSTATE(); 4922 lyp = nfscl_findlayout(clp, fhp, fhlen); 4923 if (lyp != NULL) { 4924 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 4925 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list); 4926 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list); 4927 lyp->nfsly_timestamp = NFSD_MONOSEC + 120; 4928 error = nfscl_findlayoutforio(lyp, off, 4929 NFSV4OPEN_ACCESSREAD, retflpp); 4930 if (error == 0) 4931 nfsv4_getref(&lyp->nfsly_lock, NULL, 4932 NFSCLSTATEMUTEXPTR, mp); 4933 else { 4934 do { 4935 igotlock = nfsv4_lock(&lyp->nfsly_lock, 4936 1, NULL, NFSCLSTATEMUTEXPTR, mp); 4937 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp)); 4938 *retflpp = NULL; 4939 } 4940 if (NFSCL_FORCEDISM(mp)) { 4941 lyp = NULL; 4942 *recalledp = 1; 4943 } 4944 } else { 4945 lyp = NULL; 4946 *recalledp = 1; 4947 } 4948 } 4949 NFSUNLOCKCLSTATE(); 4950 return (lyp); 4951 } 4952 4953 /* 4954 * Search for a layout by MDS file handle. If one is found, mark in to be 4955 * recalled, if it already marked "return on close". 4956 */ 4957 static void 4958 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp, 4959 int fhlen, struct nfsclrecalllayout **recallpp) 4960 { 4961 struct nfscllayout *lyp; 4962 uint32_t iomode; 4963 4964 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) || 4965 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 || 4966 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0) 4967 return; 4968 lyp = nfscl_findlayout(clp, fhp, fhlen); 4969 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE | 4970 NFSLY_RECALL)) == NFSLY_RETONCLOSE) { 4971 iomode = 0; 4972 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 4973 iomode |= NFSLAYOUTIOMODE_READ; 4974 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 4975 iomode |= NFSLAYOUTIOMODE_RW; 4976 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 4977 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL, 4978 *recallpp); 4979 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode); 4980 *recallpp = NULL; 4981 } 4982 } 4983 4984 /* 4985 * Mark the layout to be recalled and with an error. 4986 * Also, disable the dsp from further use. 4987 */ 4988 void 4989 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp, 4990 struct nfscllayout *lyp, struct nfsclds *dsp) 4991 { 4992 struct nfsclrecalllayout *recallp; 4993 uint32_t iomode; 4994 4995 printf("DS being disabled, error=%d\n", stat); 4996 /* Set up the return of the layout. */ 4997 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK); 4998 iomode = 0; 4999 NFSLOCKCLSTATE(); 5000 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) { 5001 if (!LIST_EMPTY(&lyp->nfsly_flayread)) 5002 iomode |= NFSLAYOUTIOMODE_READ; 5003 if (!LIST_EMPTY(&lyp->nfsly_flayrw)) 5004 iomode |= NFSLAYOUTIOMODE_RW; 5005 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode, 5006 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op, 5007 dp->nfsdi_deviceid, recallp); 5008 NFSUNLOCKCLSTATE(); 5009 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode); 5010 } else { 5011 NFSUNLOCKCLSTATE(); 5012 free(recallp, M_NFSLAYRECALL); 5013 } 5014 5015 /* And shut the TCP connection down. */ 5016 nfscl_cancelreqs(dsp); 5017 } 5018 5019 /* 5020 * Cancel all RPCs for this "dsp" by closing the connection. 5021 * Also, mark the session as defunct. 5022 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and 5023 * cannot be shut down. 5024 */ 5025 APPLESTATIC void 5026 nfscl_cancelreqs(struct nfsclds *dsp) 5027 { 5028 struct __rpc_client *cl; 5029 static int non_event; 5030 5031 NFSLOCKDS(dsp); 5032 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 && 5033 dsp->nfsclds_sockp != NULL && 5034 dsp->nfsclds_sockp->nr_client != NULL) { 5035 dsp->nfsclds_flags |= NFSCLDS_CLOSED; 5036 cl = dsp->nfsclds_sockp->nr_client; 5037 dsp->nfsclds_sess.nfsess_defunct = 1; 5038 NFSUNLOCKDS(dsp); 5039 CLNT_CLOSE(cl); 5040 /* 5041 * This 1sec sleep is done to reduce the number of reconnect 5042 * attempts made on the DS while it has failed. 5043 */ 5044 tsleep(&non_event, PVFS, "ndscls", hz); 5045 return; 5046 } 5047 NFSUNLOCKDS(dsp); 5048 } 5049 5050 /* 5051 * Dereference a layout. 5052 */ 5053 void 5054 nfscl_rellayout(struct nfscllayout *lyp, int exclocked) 5055 { 5056 5057 NFSLOCKCLSTATE(); 5058 if (exclocked != 0) 5059 nfsv4_unlock(&lyp->nfsly_lock, 0); 5060 else 5061 nfsv4_relref(&lyp->nfsly_lock); 5062 NFSUNLOCKCLSTATE(); 5063 } 5064 5065 /* 5066 * Search for a devinfo by deviceid. If one is found, return it after 5067 * acquiring a reference count on it. 5068 */ 5069 struct nfscldevinfo * 5070 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid, 5071 struct nfscldevinfo *dip) 5072 { 5073 5074 NFSLOCKCLSTATE(); 5075 if (dip == NULL) 5076 dip = nfscl_finddevinfo(clp, deviceid); 5077 if (dip != NULL) 5078 dip->nfsdi_refcnt++; 5079 NFSUNLOCKCLSTATE(); 5080 return (dip); 5081 } 5082 5083 /* 5084 * Dereference a devinfo structure. 5085 */ 5086 static void 5087 nfscl_reldevinfo_locked(struct nfscldevinfo *dip) 5088 { 5089 5090 dip->nfsdi_refcnt--; 5091 if (dip->nfsdi_refcnt == 0) 5092 wakeup(&dip->nfsdi_refcnt); 5093 } 5094 5095 /* 5096 * Dereference a devinfo structure. 5097 */ 5098 void 5099 nfscl_reldevinfo(struct nfscldevinfo *dip) 5100 { 5101 5102 NFSLOCKCLSTATE(); 5103 nfscl_reldevinfo_locked(dip); 5104 NFSUNLOCKCLSTATE(); 5105 } 5106 5107 /* 5108 * Find a layout for this file handle. Return NULL upon failure. 5109 */ 5110 static struct nfscllayout * 5111 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 5112 { 5113 struct nfscllayout *lyp; 5114 5115 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash) 5116 if (lyp->nfsly_fhlen == fhlen && 5117 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen)) 5118 break; 5119 return (lyp); 5120 } 5121 5122 /* 5123 * Find a devinfo for this deviceid. Return NULL upon failure. 5124 */ 5125 static struct nfscldevinfo * 5126 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid) 5127 { 5128 struct nfscldevinfo *dip; 5129 5130 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list) 5131 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID) 5132 == 0) 5133 break; 5134 return (dip); 5135 } 5136 5137 /* 5138 * Merge the new file layout list into the main one, maintaining it in 5139 * increasing offset order. 5140 */ 5141 static void 5142 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp, 5143 struct nfsclflayouthead *newfhlp) 5144 { 5145 struct nfsclflayout *flp, *nflp, *prevflp, *tflp; 5146 5147 flp = LIST_FIRST(fhlp); 5148 prevflp = NULL; 5149 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) { 5150 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) { 5151 prevflp = flp; 5152 flp = LIST_NEXT(flp, nfsfl_list); 5153 } 5154 if (prevflp == NULL) 5155 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list); 5156 else 5157 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list); 5158 prevflp = nflp; 5159 } 5160 } 5161 5162 /* 5163 * Add this nfscldevinfo to the client, if it doesn't already exist. 5164 * This function consumes the structure pointed at by dip, if not NULL. 5165 */ 5166 APPLESTATIC int 5167 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind, 5168 struct nfsclflayout *flp) 5169 { 5170 struct nfsclclient *clp; 5171 struct nfscldevinfo *tdip; 5172 uint8_t *dev; 5173 5174 NFSLOCKCLSTATE(); 5175 clp = nmp->nm_clp; 5176 if (clp == NULL) { 5177 NFSUNLOCKCLSTATE(); 5178 if (dip != NULL) 5179 free(dip, M_NFSDEVINFO); 5180 return (ENODEV); 5181 } 5182 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5183 dev = flp->nfsfl_dev; 5184 else 5185 dev = flp->nfsfl_ffm[ind].dev; 5186 tdip = nfscl_finddevinfo(clp, dev); 5187 if (tdip != NULL) { 5188 tdip->nfsdi_layoutrefs++; 5189 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5190 flp->nfsfl_devp = tdip; 5191 else 5192 flp->nfsfl_ffm[ind].devp = tdip; 5193 nfscl_reldevinfo_locked(tdip); 5194 NFSUNLOCKCLSTATE(); 5195 if (dip != NULL) 5196 free(dip, M_NFSDEVINFO); 5197 return (0); 5198 } 5199 if (dip != NULL) { 5200 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list); 5201 dip->nfsdi_layoutrefs = 1; 5202 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) 5203 flp->nfsfl_devp = dip; 5204 else 5205 flp->nfsfl_ffm[ind].devp = dip; 5206 } 5207 NFSUNLOCKCLSTATE(); 5208 if (dip == NULL) 5209 return (ENODEV); 5210 return (0); 5211 } 5212 5213 /* 5214 * Free up a layout structure and associated file layout structure(s). 5215 */ 5216 APPLESTATIC void 5217 nfscl_freelayout(struct nfscllayout *layp) 5218 { 5219 struct nfsclflayout *flp, *nflp; 5220 struct nfsclrecalllayout *rp, *nrp; 5221 5222 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) { 5223 LIST_REMOVE(flp, nfsfl_list); 5224 nfscl_freeflayout(flp); 5225 } 5226 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) { 5227 LIST_REMOVE(flp, nfsfl_list); 5228 nfscl_freeflayout(flp); 5229 } 5230 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) { 5231 LIST_REMOVE(rp, nfsrecly_list); 5232 free(rp, M_NFSLAYRECALL); 5233 } 5234 nfscl_layoutcnt--; 5235 free(layp, M_NFSLAYOUT); 5236 } 5237 5238 /* 5239 * Free up a file layout structure. 5240 */ 5241 APPLESTATIC void 5242 nfscl_freeflayout(struct nfsclflayout *flp) 5243 { 5244 int i, j; 5245 5246 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) { 5247 for (i = 0; i < flp->nfsfl_fhcnt; i++) 5248 free(flp->nfsfl_fh[i], M_NFSFH); 5249 if (flp->nfsfl_devp != NULL) 5250 flp->nfsfl_devp->nfsdi_layoutrefs--; 5251 } 5252 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0) 5253 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) { 5254 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++) 5255 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH); 5256 if (flp->nfsfl_ffm[i].devp != NULL) 5257 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--; 5258 } 5259 free(flp, M_NFSFLAYOUT); 5260 } 5261 5262 /* 5263 * Free up a file layout devinfo structure. 5264 */ 5265 APPLESTATIC void 5266 nfscl_freedevinfo(struct nfscldevinfo *dip) 5267 { 5268 5269 free(dip, M_NFSDEVINFO); 5270 } 5271 5272 /* 5273 * Mark any layouts that match as recalled. 5274 */ 5275 static int 5276 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode, 5277 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op, 5278 char *devid, struct nfsclrecalllayout *recallp) 5279 { 5280 struct nfsclrecalllayout *rp, *orp; 5281 5282 recallp->nfsrecly_recalltype = recalltype; 5283 recallp->nfsrecly_iomode = iomode; 5284 recallp->nfsrecly_stateseqid = stateseqid; 5285 recallp->nfsrecly_off = off; 5286 recallp->nfsrecly_len = len; 5287 recallp->nfsrecly_stat = stat; 5288 recallp->nfsrecly_op = op; 5289 if (devid != NULL) 5290 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID); 5291 /* 5292 * Order the list as file returns first, followed by fsid and any 5293 * returns, both in increasing stateseqid order. 5294 * Note that the seqids wrap around, so 1 is after 0xffffffff. 5295 * (I'm not sure this is correct because I find RFC5661 confusing 5296 * on this, but hopefully it will work ok.) 5297 */ 5298 orp = NULL; 5299 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5300 orp = rp; 5301 if ((recalltype == NFSLAYOUTRETURN_FILE && 5302 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE || 5303 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) || 5304 (recalltype != NFSLAYOUTRETURN_FILE && 5305 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE && 5306 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) { 5307 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list); 5308 break; 5309 } 5310 5311 /* 5312 * Put any error return on all the file returns that will 5313 * preceed this one. 5314 */ 5315 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE && 5316 stat != 0 && rp->nfsrecly_stat == 0) { 5317 rp->nfsrecly_stat = stat; 5318 rp->nfsrecly_op = op; 5319 if (devid != NULL) 5320 NFSBCOPY(devid, rp->nfsrecly_devid, 5321 NFSX_V4DEVICEID); 5322 } 5323 } 5324 if (rp == NULL) { 5325 if (orp == NULL) 5326 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp, 5327 nfsrecly_list); 5328 else 5329 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list); 5330 } 5331 lyp->nfsly_flags |= NFSLY_RECALL; 5332 wakeup(lyp->nfsly_clp); 5333 return (0); 5334 } 5335 5336 /* 5337 * Compare the two seqids for ordering. The trick is that the seqids can 5338 * wrap around from 0xffffffff->0, so check for the cases where one 5339 * has wrapped around. 5340 * Return 1 if seqid1 comes before seqid2, 0 otherwise. 5341 */ 5342 static int 5343 nfscl_seq(uint32_t seqid1, uint32_t seqid2) 5344 { 5345 5346 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff) 5347 /* seqid2 has wrapped around. */ 5348 return (0); 5349 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff) 5350 /* seqid1 has wrapped around. */ 5351 return (1); 5352 if (seqid1 <= seqid2) 5353 return (1); 5354 return (0); 5355 } 5356 5357 /* 5358 * Do a layout return for each of the recalls. 5359 */ 5360 static void 5361 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp, 5362 struct ucred *cred, NFSPROC_T *p) 5363 { 5364 struct nfsclrecalllayout *rp; 5365 nfsv4stateid_t stateid; 5366 int layouttype; 5367 5368 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER); 5369 stateid.seqid = lyp->nfsly_stateid.seqid; 5370 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5371 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5372 else 5373 layouttype = NFSLAYOUT_FLEXFILE; 5374 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) { 5375 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh, 5376 lyp->nfsly_fhlen, 0, layouttype, 5377 rp->nfsrecly_iomode, rp->nfsrecly_recalltype, 5378 rp->nfsrecly_off, rp->nfsrecly_len, 5379 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op, 5380 rp->nfsrecly_devid); 5381 } 5382 } 5383 5384 /* 5385 * Do the layout commit for a file layout. 5386 */ 5387 static void 5388 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp, 5389 struct ucred *cred, NFSPROC_T *p) 5390 { 5391 struct nfsclflayout *flp; 5392 uint64_t len; 5393 int error, layouttype; 5394 5395 if ((lyp->nfsly_flags & NFSLY_FILES) != 0) 5396 layouttype = NFSLAYOUT_NFSV4_1_FILES; 5397 else 5398 layouttype = NFSLAYOUT_FLEXFILE; 5399 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) { 5400 if (layouttype == NFSLAYOUT_FLEXFILE && 5401 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) { 5402 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n"); 5403 /* If not supported, don't bother doing it. */ 5404 NFSLOCKMNT(nmp); 5405 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5406 NFSUNLOCKMNT(nmp); 5407 break; 5408 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) { 5409 len = flp->nfsfl_end - flp->nfsfl_off; 5410 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh, 5411 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len, 5412 lyp->nfsly_lastbyte, &lyp->nfsly_stateid, 5413 layouttype, cred, p, NULL); 5414 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error); 5415 if (error == NFSERR_NOTSUPP) { 5416 /* If not supported, don't bother doing it. */ 5417 NFSLOCKMNT(nmp); 5418 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT; 5419 NFSUNLOCKMNT(nmp); 5420 break; 5421 } 5422 } 5423 } 5424 } 5425 5426 /* 5427 * Commit all layouts for a file (vnode). 5428 */ 5429 int 5430 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p) 5431 { 5432 struct nfsclclient *clp; 5433 struct nfscllayout *lyp; 5434 struct nfsnode *np = VTONFS(vp); 5435 mount_t mp; 5436 struct nfsmount *nmp; 5437 5438 mp = vnode_mount(vp); 5439 nmp = VFSTONFS(mp); 5440 if (NFSHASNOLAYOUTCOMMIT(nmp)) 5441 return (0); 5442 NFSLOCKCLSTATE(); 5443 clp = nmp->nm_clp; 5444 if (clp == NULL) { 5445 NFSUNLOCKCLSTATE(); 5446 return (EPERM); 5447 } 5448 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 5449 if (lyp == NULL) { 5450 NFSUNLOCKCLSTATE(); 5451 return (EPERM); 5452 } 5453 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 5454 if (NFSCL_FORCEDISM(mp)) { 5455 NFSUNLOCKCLSTATE(); 5456 return (EPERM); 5457 } 5458 tryagain: 5459 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) { 5460 lyp->nfsly_flags &= ~NFSLY_WRITTEN; 5461 NFSUNLOCKCLSTATE(); 5462 NFSCL_DEBUG(4, "do layoutcommit2\n"); 5463 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p); 5464 NFSLOCKCLSTATE(); 5465 goto tryagain; 5466 } 5467 nfsv4_relref(&lyp->nfsly_lock); 5468 NFSUNLOCKCLSTATE(); 5469 return (0); 5470 } 5471 5472