1 /*- 2 * Copyright (c) 2009 Rick Macklem, University of Guelph 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * These functions implement the client side state handling for NFSv4. 33 * NFSv4 state handling: 34 * - A lockowner is used to determine lock contention, so it 35 * corresponds directly to a Posix pid. (1 to 1 mapping) 36 * - The correct granularity of an OpenOwner is not nearly so 37 * obvious. An OpenOwner does the following: 38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner 39 * - is used to check for Open/Share contention (not applicable to 40 * this client, since all Opens are Deny_None) 41 * As such, I considered both extreme. 42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes 43 * all Open, Close and Lock (with a new lockowner) Ops. 44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for 45 * every Open, for most servers. 46 * So, I chose to use the same mapping as I did for LockOwnwers. 47 * The main concern here is that you can end up with multiple Opens 48 * for the same File Handle, but on different OpenOwners (opens 49 * inherited from parents, grandparents...) and you do not know 50 * which of these the vnodeop close applies to. This is handled by 51 * delaying the Close Op(s) until all of the Opens have been closed. 52 * (It is not yet obvious if this is the correct granularity.) 53 * - How the code handles serialization: 54 * - For the ClientId, it uses an exclusive lock while getting its 55 * SetClientId and during recovery. Otherwise, it uses a shared 56 * lock via a reference count. 57 * - For the rest of the data structures, it uses an SMP mutex 58 * (once the nfs client is SMP safe) and doesn't sleep while 59 * manipulating the linked lists. 60 * - The serialization of Open/Close/Lock/LockU falls out in the 61 * "wash", since OpenOwners and LockOwners are both mapped from 62 * Posix pid. In other words, there is only one Posix pid using 63 * any given owner, so that owner is serialized. (If you change 64 * the granularity of the OpenOwner, then code must be added to 65 * serialize Ops on the OpenOwner.) 66 * - When to get rid of OpenOwners and LockOwners. 67 * - When a process exits, it calls nfscl_cleanup(), which goes 68 * through the client list looking for all Open and Lock Owners. 69 * When one is found, it is marked "defunct" or in the case of 70 * an OpenOwner without any Opens, freed. 71 * The renew thread scans for defunct Owners and gets rid of them, 72 * if it can. The LockOwners will also be deleted when the 73 * associated Open is closed. 74 * - If the LockU or Close Op(s) fail during close in a way 75 * that could be recovered upon retry, they are relinked to the 76 * ClientId's defunct open list and retried by the renew thread 77 * until they succeed or an unmount/recovery occurs. 78 * (Since we are done with them, they do not need to be recovered.) 79 */ 80 81 #ifndef APPLEKEXT 82 #include <fs/nfs/nfsport.h> 83 84 /* 85 * Global variables 86 */ 87 extern struct nfsstats newnfsstats; 88 extern struct nfsreqhead nfsd_reqq; 89 NFSREQSPINLOCK; 90 NFSCLSTATEMUTEX; 91 int nfscl_inited = 0; 92 struct nfsclhead nfsclhead; /* Head of clientid list */ 93 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; 94 #endif /* !APPLEKEXT */ 95 96 static int nfscl_delegcnt = 0; 97 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, 98 NFSPROC_T *, u_int32_t, struct nfsclowner **, struct nfsclopen **); 99 static void nfscl_clrelease(struct nfsclclient *); 100 static void nfscl_cleanclient(struct nfsclclient *); 101 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, 102 struct ucred *, NFSPROC_T *); 103 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, 104 struct nfsmount *, struct ucred *, NFSPROC_T *); 105 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); 106 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, 107 struct nfscllock *, int); 108 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, 109 struct nfscllock **, int); 110 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); 111 static u_int32_t nfscl_nextcbident(void); 112 static mount_t nfscl_getmnt(u_int32_t); 113 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, 114 int); 115 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, 116 u_int8_t *, struct nfscllock **); 117 static void nfscl_freelockowner(struct nfscllockowner *, int); 118 static void nfscl_freealllocks(struct nfscllockownerhead *, int); 119 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int, 120 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **); 121 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, 122 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, 123 struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *); 124 static int nfscl_moveopen(vnode_t , struct nfsclclient *, 125 struct nfsmount *, struct nfsclopen *, struct nfsclowner *, 126 struct nfscldeleg *, struct ucred *, NFSPROC_T *); 127 static void nfscl_totalrecall(struct nfsclclient *); 128 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, 129 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); 130 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, 131 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, 132 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); 133 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, 134 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, 135 struct ucred *, NFSPROC_T *); 136 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, 137 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); 138 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); 139 static int nfscl_errmap(struct nfsrv_descript *); 140 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); 141 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, 142 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int); 143 static void nfscl_freeopenowner(struct nfsclowner *, int); 144 static void nfscl_cleandeleg(struct nfscldeleg *); 145 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, 146 struct nfsmount *, NFSPROC_T *); 147 148 static short nfscberr_null[] = { 149 0, 150 0, 151 }; 152 153 static short nfscberr_getattr[] = { 154 NFSERR_RESOURCE, 155 NFSERR_BADHANDLE, 156 NFSERR_BADXDR, 157 NFSERR_RESOURCE, 158 NFSERR_SERVERFAULT, 159 0, 160 }; 161 162 static short nfscberr_recall[] = { 163 NFSERR_RESOURCE, 164 NFSERR_BADHANDLE, 165 NFSERR_BADSTATEID, 166 NFSERR_BADXDR, 167 NFSERR_RESOURCE, 168 NFSERR_SERVERFAULT, 169 0, 170 }; 171 172 static short *nfscl_cberrmap[] = { 173 nfscberr_null, 174 nfscberr_null, 175 nfscberr_null, 176 nfscberr_getattr, 177 nfscberr_recall 178 }; 179 180 #define NETFAMILY(clp) \ 181 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) 182 183 /* 184 * Called for an open operation. 185 * If the nfhp argument is NULL, just get an openowner. 186 */ 187 APPLESTATIC int 188 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, 189 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, 190 struct nfsclopen **opp, int *newonep, int *retp, int lockit) 191 { 192 struct nfsclclient *clp; 193 struct nfsclowner *owp, *nowp; 194 struct nfsclopen *op = NULL, *nop = NULL; 195 struct nfscldeleg *dp; 196 struct nfsclownerhead *ohp; 197 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 198 int ret; 199 200 if (newonep != NULL) 201 *newonep = 0; 202 if (opp != NULL) 203 *opp = NULL; 204 if (owpp != NULL) 205 *owpp = NULL; 206 207 /* 208 * Might need one or both of these, so MALLOC them now, to 209 * avoid a tsleep() in MALLOC later. 210 */ 211 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner), 212 M_NFSCLOWNER, M_WAITOK); 213 if (nfhp != NULL) 214 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 215 fhlen - 1, M_NFSCLOPEN, M_WAITOK); 216 ret = nfscl_getcl(vp, cred, p, &clp); 217 if (ret != 0) { 218 FREE((caddr_t)nowp, M_NFSCLOWNER); 219 if (nop != NULL) 220 FREE((caddr_t)nop, M_NFSCLOPEN); 221 return (ret); 222 } 223 224 /* 225 * Get the Open iff it already exists. 226 * If none found, add the new one or return error, depending upon 227 * "create". 228 */ 229 nfscl_filllockowner(p, own); 230 NFSLOCKCLSTATE(); 231 dp = NULL; 232 /* First check the delegation list */ 233 if (nfhp != NULL && usedeleg) { 234 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 235 if (dp->nfsdl_fhlen == fhlen && 236 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 237 if (!(amode & NFSV4OPEN_ACCESSWRITE) || 238 (dp->nfsdl_flags & NFSCLDL_WRITE)) 239 break; 240 dp = NULL; 241 break; 242 } 243 } 244 } 245 246 if (dp != NULL) 247 ohp = &dp->nfsdl_owner; 248 else 249 ohp = &clp->nfsc_owner; 250 /* Now, search for an openowner */ 251 LIST_FOREACH(owp, ohp, nfsow_list) { 252 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) 253 break; 254 } 255 256 /* 257 * Create a new open, as required. 258 */ 259 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, 260 newonep); 261 262 /* 263 * Serialize modifications to the open owner for multiple threads 264 * within the same process using a read/write sleep lock. 265 */ 266 if (lockit) 267 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); 268 NFSUNLOCKCLSTATE(); 269 if (nowp != NULL) 270 FREE((caddr_t)nowp, M_NFSCLOWNER); 271 if (nop != NULL) 272 FREE((caddr_t)nop, M_NFSCLOPEN); 273 if (owpp != NULL) 274 *owpp = owp; 275 if (opp != NULL) 276 *opp = op; 277 if (retp != NULL) { 278 if (nfhp != NULL && dp != NULL && nop == NULL) 279 /* new local open on delegation */ 280 *retp = NFSCLOPEN_SETCRED; 281 else 282 *retp = NFSCLOPEN_OK; 283 } 284 285 /* 286 * Now, check the mode on the open and return the appropriate 287 * value. 288 */ 289 if (op != NULL && (amode & ~(op->nfso_mode))) { 290 op->nfso_mode |= amode; 291 if (retp != NULL && dp == NULL) 292 *retp = NFSCLOPEN_DOOPEN; 293 } 294 return (0); 295 } 296 297 /* 298 * Create a new open, as required. 299 */ 300 static void 301 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, 302 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, 303 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, 304 int *newonep) 305 { 306 struct nfsclowner *owp = *owpp, *nowp; 307 struct nfsclopen *op, *nop; 308 309 if (nowpp != NULL) 310 nowp = *nowpp; 311 else 312 nowp = NULL; 313 if (nopp != NULL) 314 nop = *nopp; 315 else 316 nop = NULL; 317 if (owp == NULL && nowp != NULL) { 318 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); 319 LIST_INIT(&nowp->nfsow_open); 320 nowp->nfsow_clp = clp; 321 nowp->nfsow_seqid = 0; 322 nowp->nfsow_defunct = 0; 323 nfscl_lockinit(&nowp->nfsow_rwlock); 324 if (dp != NULL) { 325 newnfsstats.cllocalopenowners++; 326 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); 327 } else { 328 newnfsstats.clopenowners++; 329 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); 330 } 331 owp = *owpp = nowp; 332 *nowpp = NULL; 333 if (newonep != NULL) 334 *newonep = 1; 335 } 336 337 /* If an fhp has been specified, create an Open as well. */ 338 if (fhp != NULL) { 339 /* and look for the correct open, based upon FH */ 340 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 341 if (op->nfso_fhlen == fhlen && 342 !NFSBCMP(op->nfso_fh, fhp, fhlen)) 343 break; 344 } 345 if (op == NULL && nop != NULL) { 346 nop->nfso_own = owp; 347 nop->nfso_mode = 0; 348 nop->nfso_opencnt = 0; 349 nop->nfso_posixlock = 1; 350 nop->nfso_fhlen = fhlen; 351 NFSBCOPY(fhp, nop->nfso_fh, fhlen); 352 LIST_INIT(&nop->nfso_lock); 353 nop->nfso_stateid.seqid = 0; 354 nop->nfso_stateid.other[0] = 0; 355 nop->nfso_stateid.other[1] = 0; 356 nop->nfso_stateid.other[2] = 0; 357 if (dp != NULL) { 358 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 359 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 360 nfsdl_list); 361 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 362 newnfsstats.cllocalopens++; 363 } else { 364 newnfsstats.clopens++; 365 } 366 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); 367 *opp = nop; 368 *nopp = NULL; 369 if (newonep != NULL) 370 *newonep = 1; 371 } else { 372 *opp = op; 373 } 374 } 375 } 376 377 /* 378 * Called to find/add a delegation to a client. 379 */ 380 APPLESTATIC int 381 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, 382 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) 383 { 384 struct nfscldeleg *dp = *dpp, *tdp; 385 386 /* 387 * First, if we have received a Read delegation for a file on a 388 * read/write file system, just return it, because they aren't 389 * useful, imho. 390 */ 391 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && 392 (dp->nfsdl_flags & NFSCLDL_READ)) { 393 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); 394 FREE((caddr_t)dp, M_NFSCLDELEG); 395 *dpp = NULL; 396 return (0); 397 } 398 399 /* Look for the correct deleg, based upon FH */ 400 NFSLOCKCLSTATE(); 401 tdp = nfscl_finddeleg(clp, nfhp, fhlen); 402 if (tdp == NULL) { 403 if (dp == NULL) { 404 NFSUNLOCKCLSTATE(); 405 return (NFSERR_BADSTATEID); 406 } 407 *dpp = NULL; 408 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 409 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, 410 nfsdl_hash); 411 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 412 newnfsstats.cldelegates++; 413 nfscl_delegcnt++; 414 } else { 415 /* 416 * Delegation already exists, what do we do if a new one?? 417 */ 418 if (dp != NULL) { 419 printf("Deleg already exists!\n"); 420 FREE((caddr_t)dp, M_NFSCLDELEG); 421 *dpp = NULL; 422 } else { 423 *dpp = tdp; 424 } 425 } 426 NFSUNLOCKCLSTATE(); 427 return (0); 428 } 429 430 /* 431 * Find a delegation for this file handle. Return NULL upon failure. 432 */ 433 static struct nfscldeleg * 434 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) 435 { 436 struct nfscldeleg *dp; 437 438 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { 439 if (dp->nfsdl_fhlen == fhlen && 440 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) 441 break; 442 } 443 return (dp); 444 } 445 446 /* 447 * Get a stateid for an I/O operation. First, look for an open and iff 448 * found, return either a lockowner stateid or the open stateid. 449 * If no Open is found, just return error and the special stateid of all zeros. 450 */ 451 APPLESTATIC int 452 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, 453 struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, 454 void **lckpp) 455 { 456 struct nfsclclient *clp; 457 struct nfsclowner *owp; 458 struct nfsclopen *op = NULL; 459 struct nfscllockowner *lp; 460 struct nfscldeleg *dp; 461 struct nfsnode *np; 462 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 463 int error, done; 464 465 *lckpp = NULL; 466 /* 467 * Initially, just set the special stateid of all zeros. 468 */ 469 stateidp->seqid = 0; 470 stateidp->other[0] = 0; 471 stateidp->other[1] = 0; 472 stateidp->other[2] = 0; 473 if (vnode_vtype(vp) != VREG) 474 return (EISDIR); 475 np = VTONFS(vp); 476 NFSLOCKCLSTATE(); 477 clp = nfscl_findcl(VFSTONFS(vnode_mount(vp))); 478 if (clp == NULL) { 479 NFSUNLOCKCLSTATE(); 480 return (EACCES); 481 } 482 483 /* 484 * Wait for recovery to complete. 485 */ 486 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG)) 487 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR, 488 PZERO, "nfsrecvr", NULL); 489 490 /* 491 * First, look for a delegation. 492 */ 493 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { 494 if (dp->nfsdl_fhlen == fhlen && 495 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { 496 if (!(mode & NFSV4OPEN_ACCESSWRITE) || 497 (dp->nfsdl_flags & NFSCLDL_WRITE)) { 498 stateidp->seqid = dp->nfsdl_stateid.seqid; 499 stateidp->other[0] = dp->nfsdl_stateid.other[0]; 500 stateidp->other[1] = dp->nfsdl_stateid.other[1]; 501 stateidp->other[2] = dp->nfsdl_stateid.other[2]; 502 if (!(np->n_flag & NDELEGRECALL)) { 503 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 504 nfsdl_list); 505 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, 506 nfsdl_list); 507 dp->nfsdl_timestamp = NFSD_MONOSEC + 508 120; 509 dp->nfsdl_rwlock.nfslock_usecnt++; 510 *lckpp = (void *)&dp->nfsdl_rwlock; 511 } 512 NFSUNLOCKCLSTATE(); 513 return (0); 514 } 515 break; 516 } 517 } 518 519 if (p != NULL) { 520 /* 521 * If p != NULL, we want to search the parentage tree 522 * for a matching OpenOwner and use that. 523 */ 524 nfscl_filllockowner(p, own); 525 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, NULL, p, 526 mode, NULL, &op); 527 if (error == 0) { 528 /* now look for a lockowner */ 529 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 530 if (!NFSBCMP(lp->nfsl_owner, own, 531 NFSV4CL_LOCKNAMELEN)) { 532 stateidp->seqid = 533 lp->nfsl_stateid.seqid; 534 stateidp->other[0] = 535 lp->nfsl_stateid.other[0]; 536 stateidp->other[1] = 537 lp->nfsl_stateid.other[1]; 538 stateidp->other[2] = 539 lp->nfsl_stateid.other[2]; 540 NFSUNLOCKCLSTATE(); 541 return (0); 542 } 543 } 544 } 545 } 546 if (op == NULL) { 547 /* If not found, just look for any OpenOwner that will work. */ 548 done = 0; 549 owp = LIST_FIRST(&clp->nfsc_owner); 550 while (!done && owp != NULL) { 551 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 552 if (op->nfso_fhlen == fhlen && 553 !NFSBCMP(op->nfso_fh, nfhp, fhlen) && 554 (mode & op->nfso_mode) == mode) { 555 done = 1; 556 break; 557 } 558 } 559 if (!done) 560 owp = LIST_NEXT(owp, nfsow_list); 561 } 562 if (!done) { 563 NFSUNLOCKCLSTATE(); 564 return (ENOENT); 565 } 566 /* for read aheads or write behinds, use the open cred */ 567 newnfs_copycred(&op->nfso_cred, cred); 568 } 569 570 /* 571 * No lock stateid, so return the open stateid. 572 */ 573 stateidp->seqid = op->nfso_stateid.seqid; 574 stateidp->other[0] = op->nfso_stateid.other[0]; 575 stateidp->other[1] = op->nfso_stateid.other[1]; 576 stateidp->other[2] = op->nfso_stateid.other[2]; 577 NFSUNLOCKCLSTATE(); 578 return (0); 579 } 580 581 /* 582 * Get an existing open. Search up the parentage tree for a match and 583 * return with the first one found. 584 */ 585 static int 586 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, 587 u_int8_t *rown, NFSPROC_T *p, u_int32_t mode, struct nfsclowner **owpp, 588 struct nfsclopen **opp) 589 { 590 struct nfsclowner *owp = NULL; 591 struct nfsclopen *op; 592 NFSPROC_T *nproc; 593 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp; 594 595 nproc = p; 596 op = NULL; 597 while (op == NULL && (nproc != NULL || rown != NULL)) { 598 if (nproc != NULL) { 599 nfscl_filllockowner(nproc, own); 600 ownp = own; 601 } else { 602 ownp = rown; 603 } 604 /* Search the client list */ 605 LIST_FOREACH(owp, ohp, nfsow_list) { 606 if (!NFSBCMP(owp->nfsow_owner, ownp, 607 NFSV4CL_LOCKNAMELEN)) 608 break; 609 } 610 if (owp != NULL) { 611 /* and look for the correct open */ 612 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 613 if (op->nfso_fhlen == fhlen && 614 !NFSBCMP(op->nfso_fh, nfhp, fhlen) 615 && (op->nfso_mode & mode) == mode) { 616 break; 617 } 618 } 619 } 620 if (rown != NULL) 621 break; 622 if (op == NULL) 623 nproc = nfscl_getparent(nproc); 624 } 625 if (op == NULL) { 626 return (EBADF); 627 } 628 if (owpp) 629 *owpp = owp; 630 *opp = op; 631 return (0); 632 } 633 634 /* 635 * Release use of an open owner. Called when open operations are done 636 * with the open owner. 637 */ 638 APPLESTATIC void 639 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error, 640 __unused int candelete, int unlocked) 641 { 642 643 if (owp == NULL) 644 return; 645 NFSLOCKCLSTATE(); 646 if (!unlocked) 647 nfscl_lockunlock(&owp->nfsow_rwlock); 648 nfscl_clrelease(owp->nfsow_clp); 649 NFSUNLOCKCLSTATE(); 650 } 651 652 /* 653 * Release use of an open structure under an open owner. 654 */ 655 APPLESTATIC void 656 nfscl_openrelease(struct nfsclopen *op, int error, int candelete) 657 { 658 struct nfsclclient *clp; 659 struct nfsclowner *owp; 660 661 if (op == NULL) 662 return; 663 NFSLOCKCLSTATE(); 664 owp = op->nfso_own; 665 nfscl_lockunlock(&owp->nfsow_rwlock); 666 clp = owp->nfsow_clp; 667 if (error && candelete && op->nfso_opencnt == 0) 668 nfscl_freeopen(op, 0); 669 nfscl_clrelease(clp); 670 NFSUNLOCKCLSTATE(); 671 } 672 673 /* 674 * Called to get a clientid structure. It will optionally lock the 675 * client data structures to do the SetClientId/SetClientId_confirm, 676 * but will release that lock and return the clientid with a refernce 677 * count on it. 678 * If the "cred" argument is NULL, a new clientid should not be created. 679 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot 680 * be done. 681 * It always clpp with a reference count on it, unless returning an error. 682 */ 683 APPLESTATIC int 684 nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p, 685 struct nfsclclient **clpp) 686 { 687 struct nfsclclient *clp; 688 struct nfsclclient *newclp = NULL; 689 struct nfscllockowner *lp, *nlp; 690 struct mount *mp; 691 struct nfsmount *nmp; 692 char uuid[HOSTUUIDLEN]; 693 int igotlock = 0, error, trystalecnt, clidinusedelay, i; 694 u_int16_t idlen = 0; 695 696 mp = vnode_mount(vp); 697 nmp = VFSTONFS(mp); 698 if (cred != NULL) { 699 getcredhostuuid(cred, uuid, sizeof uuid); 700 idlen = strlen(uuid); 701 if (idlen > 0) 702 idlen += sizeof (u_int64_t); 703 else 704 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ 705 MALLOC(newclp, struct nfsclclient *, 706 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT, 707 M_WAITOK); 708 } 709 NFSLOCKCLSTATE(); 710 /* 711 * If a forced dismount is already in progress, don't 712 * allocate a new clientid and get out now. For the case where 713 * clp != NULL, this is a harmless optimization. 714 */ 715 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 716 NFSUNLOCKCLSTATE(); 717 if (newclp != NULL) 718 free(newclp, M_NFSCLCLIENT); 719 return (EBADF); 720 } 721 clp = nmp->nm_clp; 722 if (clp == NULL) { 723 if (newclp == NULL) { 724 NFSUNLOCKCLSTATE(); 725 return (EACCES); 726 } 727 clp = newclp; 728 NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1); 729 clp->nfsc_idlen = idlen; 730 LIST_INIT(&clp->nfsc_owner); 731 TAILQ_INIT(&clp->nfsc_deleg); 732 for (i = 0; i < NFSCLDELEGHASHSIZE; i++) 733 LIST_INIT(&clp->nfsc_deleghash[i]); 734 LIST_INIT(&clp->nfsc_defunctlockowner); 735 clp->nfsc_flags = NFSCLFLAGS_INITED; 736 clp->nfsc_clientidrev = 1; 737 clp->nfsc_cbident = nfscl_nextcbident(); 738 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id, 739 clp->nfsc_idlen); 740 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); 741 nmp->nm_clp = clp; 742 clp->nfsc_nmp = nmp; 743 NFSUNLOCKCLSTATE(); 744 nfscl_start_renewthread(clp); 745 } else { 746 NFSUNLOCKCLSTATE(); 747 if (newclp != NULL) 748 FREE((caddr_t)newclp, M_NFSCLCLIENT); 749 } 750 NFSLOCKCLSTATE(); 751 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock) 752 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 753 NFSCLSTATEMUTEXPTR, mp); 754 if (!igotlock) 755 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp); 756 if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 757 /* 758 * Both nfsv4_lock() and nfsv4_getref() know to check 759 * for MNTK_UNMOUNTF and return without sleeping to 760 * wait for the exclusive lock to be released, since it 761 * might be held by nfscl_umount() and we need to get out 762 * now for that case and not wait until nfscl_umount() 763 * releases it. 764 */ 765 NFSUNLOCKCLSTATE(); 766 return (EBADF); 767 } 768 NFSUNLOCKCLSTATE(); 769 770 /* 771 * If it needs a clientid, do the setclientid now. 772 */ 773 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { 774 if (!igotlock) 775 panic("nfscl_clget"); 776 if (p == NULL || cred == NULL) { 777 NFSLOCKCLSTATE(); 778 nfsv4_unlock(&clp->nfsc_lock, 0); 779 NFSUNLOCKCLSTATE(); 780 return (EACCES); 781 } 782 /* get rid of defunct lockowners */ 783 LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, 784 nlp) { 785 nfscl_freelockowner(lp, 0); 786 } 787 /* 788 * If RFC3530 Sec. 14.2.33 is taken literally, 789 * NFSERR_CLIDINUSE will be returned persistently for the 790 * case where a new mount of the same file system is using 791 * a different principal. In practice, NFSERR_CLIDINUSE is 792 * only returned when there is outstanding unexpired state 793 * on the clientid. As such, try for twice the lease 794 * interval, if we know what that is. Otherwise, make a 795 * wild ass guess. 796 * The case of returning NFSERR_STALECLIENTID is far less 797 * likely, but might occur if there is a significant delay 798 * between doing the SetClientID and SetClientIDConfirm Ops, 799 * such that the server throws away the clientid before 800 * receiving the SetClientIDConfirm. 801 */ 802 if (clp->nfsc_renew > 0) 803 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; 804 else 805 clidinusedelay = 120; 806 trystalecnt = 3; 807 do { 808 error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)), 809 clp, cred, p); 810 if (error == NFSERR_STALECLIENTID || 811 error == NFSERR_STALEDONTRECOVER || 812 error == NFSERR_CLIDINUSE) { 813 (void) nfs_catnap(PZERO, error, "nfs_setcl"); 814 } 815 } while (((error == NFSERR_STALECLIENTID || 816 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || 817 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); 818 if (error) { 819 NFSLOCKCLSTATE(); 820 nfsv4_unlock(&clp->nfsc_lock, 0); 821 NFSUNLOCKCLSTATE(); 822 return (error); 823 } 824 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 825 } 826 if (igotlock) { 827 NFSLOCKCLSTATE(); 828 nfsv4_unlock(&clp->nfsc_lock, 1); 829 NFSUNLOCKCLSTATE(); 830 } 831 832 *clpp = clp; 833 return (0); 834 } 835 836 /* 837 * Get a reference to a clientid and return it, if valid. 838 */ 839 APPLESTATIC struct nfsclclient * 840 nfscl_findcl(struct nfsmount *nmp) 841 { 842 struct nfsclclient *clp; 843 844 clp = nmp->nm_clp; 845 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) 846 return (NULL); 847 return (clp); 848 } 849 850 /* 851 * Release the clientid structure. It may be locked or reference counted. 852 */ 853 static void 854 nfscl_clrelease(struct nfsclclient *clp) 855 { 856 857 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 858 nfsv4_unlock(&clp->nfsc_lock, 0); 859 else 860 nfsv4_relref(&clp->nfsc_lock); 861 } 862 863 /* 864 * External call for nfscl_clrelease. 865 */ 866 APPLESTATIC void 867 nfscl_clientrelease(struct nfsclclient *clp) 868 { 869 870 NFSLOCKCLSTATE(); 871 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) 872 nfsv4_unlock(&clp->nfsc_lock, 0); 873 else 874 nfsv4_relref(&clp->nfsc_lock); 875 NFSUNLOCKCLSTATE(); 876 } 877 878 /* 879 * Called when wanting to lock a byte region. 880 */ 881 APPLESTATIC int 882 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 883 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, 884 int recovery, u_int8_t *rownp, u_int8_t *ropenownp, 885 struct nfscllockowner **lpp, int *newonep, int *donelocallyp) 886 { 887 struct nfscllockowner *lp; 888 struct nfsclopen *op; 889 struct nfsclclient *clp; 890 struct nfscllockowner *nlp; 891 struct nfscllock *nlop, *otherlop; 892 struct nfscldeleg *dp = NULL, *ldp = NULL; 893 struct nfscllockownerhead *lhp = NULL; 894 struct nfsnode *np; 895 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp; 896 int error = 0, ret, donelocally = 0; 897 u_int32_t mode; 898 899 if (type == F_WRLCK) 900 mode = NFSV4OPEN_ACCESSWRITE; 901 else 902 mode = NFSV4OPEN_ACCESSREAD; 903 np = VTONFS(vp); 904 *lpp = NULL; 905 *newonep = 0; 906 *donelocallyp = 0; 907 908 /* 909 * Might need these, so MALLOC them now, to 910 * avoid a tsleep() in MALLOC later. 911 */ 912 MALLOC(nlp, struct nfscllockowner *, 913 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); 914 MALLOC(otherlop, struct nfscllock *, 915 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 916 MALLOC(nlop, struct nfscllock *, 917 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 918 nlop->nfslo_type = type; 919 nlop->nfslo_first = off; 920 if (len == NFS64BITSSET) { 921 nlop->nfslo_end = NFS64BITSSET; 922 } else { 923 nlop->nfslo_end = off + len; 924 if (nlop->nfslo_end <= nlop->nfslo_first) 925 error = NFSERR_INVAL; 926 } 927 928 if (!error) { 929 if (recovery) 930 clp = rclp; 931 else 932 error = nfscl_getcl(vp, cred, p, &clp); 933 } 934 if (error) { 935 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 936 FREE((caddr_t)otherlop, M_NFSCLLOCK); 937 FREE((caddr_t)nlop, M_NFSCLLOCK); 938 return (error); 939 } 940 941 op = NULL; 942 if (recovery) { 943 ownp = rownp; 944 } else { 945 nfscl_filllockowner(p, own); 946 ownp = own; 947 } 948 if (!recovery) { 949 NFSLOCKCLSTATE(); 950 /* 951 * First, search for a delegation. If one exists for this file, 952 * the lock can be done locally against it, so long as there 953 * isn't a local lock conflict. 954 */ 955 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 956 np->n_fhp->nfh_len); 957 /* Just sanity check for correct type of delegation */ 958 if (dp != NULL && ((dp->nfsdl_flags & 959 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 || 960 (type == F_WRLCK && 961 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0))) 962 dp = NULL; 963 } 964 if (dp != NULL) { 965 /* Now, find the associated open to get the correct openowner */ 966 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, 967 np->n_fhp->nfh_len, NULL, p, mode, NULL, &op); 968 if (ret) 969 ret = nfscl_getopen(&clp->nfsc_owner, 970 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p, 971 mode, NULL, &op); 972 if (!ret) { 973 lhp = &dp->nfsdl_lock; 974 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 975 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); 976 dp->nfsdl_timestamp = NFSD_MONOSEC + 120; 977 donelocally = 1; 978 } else { 979 dp = NULL; 980 } 981 } 982 if (!donelocally) { 983 /* 984 * Get the related Open. 985 */ 986 if (recovery) 987 error = nfscl_getopen(&clp->nfsc_owner, 988 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, ropenownp, 989 NULL, mode, NULL, &op); 990 else 991 error = nfscl_getopen(&clp->nfsc_owner, 992 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p, 993 mode, NULL, &op); 994 if (!error) 995 lhp = &op->nfso_lock; 996 } 997 if (!error && !recovery) 998 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, 999 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL); 1000 if (error) { 1001 if (!recovery) { 1002 nfscl_clrelease(clp); 1003 NFSUNLOCKCLSTATE(); 1004 } 1005 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1006 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1007 FREE((caddr_t)nlop, M_NFSCLLOCK); 1008 return (error); 1009 } 1010 1011 /* 1012 * Ok, see if a lockowner exists and create one, as required. 1013 */ 1014 LIST_FOREACH(lp, lhp, nfsl_list) { 1015 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) 1016 break; 1017 } 1018 if (lp == NULL) { 1019 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); 1020 if (recovery) 1021 NFSBCOPY(ropenownp, nlp->nfsl_openowner, 1022 NFSV4CL_LOCKNAMELEN); 1023 else 1024 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, 1025 NFSV4CL_LOCKNAMELEN); 1026 nlp->nfsl_seqid = 0; 1027 nlp->nfsl_defunct = 0; 1028 nlp->nfsl_inprog = NULL; 1029 nfscl_lockinit(&nlp->nfsl_rwlock); 1030 LIST_INIT(&nlp->nfsl_lock); 1031 if (donelocally) { 1032 nlp->nfsl_open = NULL; 1033 newnfsstats.cllocallockowners++; 1034 } else { 1035 nlp->nfsl_open = op; 1036 newnfsstats.cllockowners++; 1037 } 1038 LIST_INSERT_HEAD(lhp, nlp, nfsl_list); 1039 lp = nlp; 1040 nlp = NULL; 1041 *newonep = 1; 1042 } 1043 1044 /* 1045 * Now, update the byte ranges for locks. 1046 */ 1047 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); 1048 if (!ret) 1049 donelocally = 1; 1050 if (donelocally) { 1051 *donelocallyp = 1; 1052 if (!recovery) 1053 nfscl_clrelease(clp); 1054 } else { 1055 /* 1056 * Serial modifications on the lock owner for multiple threads 1057 * for the same process using a read/write lock. 1058 */ 1059 if (!recovery) 1060 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1061 } 1062 if (!recovery) 1063 NFSUNLOCKCLSTATE(); 1064 1065 if (nlp) 1066 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); 1067 if (nlop) 1068 FREE((caddr_t)nlop, M_NFSCLLOCK); 1069 if (otherlop) 1070 FREE((caddr_t)otherlop, M_NFSCLLOCK); 1071 1072 *lpp = lp; 1073 return (0); 1074 } 1075 1076 /* 1077 * Called to unlock a byte range, for LockU. 1078 */ 1079 APPLESTATIC int 1080 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, 1081 __unused struct ucred *cred, NFSPROC_T *p, int callcnt, 1082 struct nfsclclient *clp, struct nfscllockowner **lpp, int *dorpcp) 1083 { 1084 struct nfscllockowner *lp; 1085 struct nfsclowner *owp; 1086 struct nfsclopen *op; 1087 struct nfscllock *nlop, *other_lop = NULL; 1088 struct nfscldeleg *dp; 1089 struct nfsnode *np; 1090 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1091 int ret = 0, fnd; 1092 1093 np = VTONFS(vp); 1094 *lpp = NULL; 1095 *dorpcp = 0; 1096 1097 /* 1098 * Might need these, so MALLOC them now, to 1099 * avoid a tsleep() in MALLOC later. 1100 */ 1101 MALLOC(nlop, struct nfscllock *, 1102 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1103 nlop->nfslo_type = F_UNLCK; 1104 nlop->nfslo_first = off; 1105 if (len == NFS64BITSSET) { 1106 nlop->nfslo_end = NFS64BITSSET; 1107 } else { 1108 nlop->nfslo_end = off + len; 1109 if (nlop->nfslo_end <= nlop->nfslo_first) { 1110 FREE((caddr_t)nlop, M_NFSCLLOCK); 1111 return (NFSERR_INVAL); 1112 } 1113 } 1114 if (callcnt == 0) { 1115 MALLOC(other_lop, struct nfscllock *, 1116 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); 1117 *other_lop = *nlop; 1118 } 1119 nfscl_filllockowner(p, own); 1120 dp = NULL; 1121 NFSLOCKCLSTATE(); 1122 if (callcnt == 0) 1123 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 1124 np->n_fhp->nfh_len); 1125 1126 /* 1127 * First, unlock any local regions on a delegation. 1128 */ 1129 if (dp != NULL) { 1130 /* Look for this lockowner. */ 1131 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1132 if (!NFSBCMP(lp->nfsl_owner, own, 1133 NFSV4CL_LOCKNAMELEN)) 1134 break; 1135 } 1136 if (lp != NULL) 1137 /* Use other_lop, so nlop is still available */ 1138 (void)nfscl_updatelock(lp, &other_lop, NULL, 1); 1139 } 1140 1141 /* 1142 * Now, find a matching open/lockowner that hasn't already been done, 1143 * as marked by nfsl_inprog. 1144 */ 1145 lp = NULL; 1146 fnd = 0; 1147 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1148 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1149 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1150 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1151 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1152 if (lp->nfsl_inprog == NULL && 1153 !NFSBCMP(lp->nfsl_owner, own, 1154 NFSV4CL_LOCKNAMELEN)) { 1155 fnd = 1; 1156 break; 1157 } 1158 } 1159 if (fnd) 1160 break; 1161 } 1162 } 1163 if (fnd) 1164 break; 1165 } 1166 1167 if (lp != NULL) { 1168 ret = nfscl_updatelock(lp, &nlop, NULL, 0); 1169 if (ret) 1170 *dorpcp = 1; 1171 /* 1172 * Serial modifications on the lock owner for multiple 1173 * threads for the same process using a read/write lock. 1174 */ 1175 lp->nfsl_inprog = p; 1176 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); 1177 *lpp = lp; 1178 } 1179 NFSUNLOCKCLSTATE(); 1180 if (nlop) 1181 FREE((caddr_t)nlop, M_NFSCLLOCK); 1182 if (other_lop) 1183 FREE((caddr_t)other_lop, M_NFSCLLOCK); 1184 return (0); 1185 } 1186 1187 /* 1188 * Release all lockowners marked in progess for this process and file. 1189 */ 1190 APPLESTATIC void 1191 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p) 1192 { 1193 struct nfsclowner *owp; 1194 struct nfsclopen *op; 1195 struct nfscllockowner *lp; 1196 struct nfsnode *np; 1197 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1198 1199 np = VTONFS(vp); 1200 nfscl_filllockowner(p, own); 1201 NFSLOCKCLSTATE(); 1202 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1203 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1204 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1205 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1206 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1207 if (lp->nfsl_inprog == p && 1208 !NFSBCMP(lp->nfsl_owner, own, 1209 NFSV4CL_LOCKNAMELEN)) { 1210 lp->nfsl_inprog = NULL; 1211 nfscl_lockunlock(&lp->nfsl_rwlock); 1212 } 1213 } 1214 } 1215 } 1216 } 1217 nfscl_clrelease(clp); 1218 NFSUNLOCKCLSTATE(); 1219 } 1220 1221 /* 1222 * Called to find out if any bytes within the byte range specified are 1223 * write locked by the calling process. Used to determine if flushing 1224 * is required before a LockU. 1225 * If in doubt, return 1, so the flush will occur. 1226 */ 1227 APPLESTATIC int 1228 nfscl_checkwritelocked(vnode_t vp, struct flock *fl, 1229 struct ucred *cred, NFSPROC_T *p) 1230 { 1231 struct nfsclowner *owp; 1232 struct nfscllockowner *lp; 1233 struct nfsclopen *op; 1234 struct nfsclclient *clp; 1235 struct nfscllock *lop; 1236 struct nfscldeleg *dp; 1237 struct nfsnode *np; 1238 u_int64_t off, end; 1239 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1240 int error = 0; 1241 1242 np = VTONFS(vp); 1243 switch (fl->l_whence) { 1244 case SEEK_SET: 1245 case SEEK_CUR: 1246 /* 1247 * Caller is responsible for adding any necessary offset 1248 * when SEEK_CUR is used. 1249 */ 1250 off = fl->l_start; 1251 break; 1252 case SEEK_END: 1253 off = np->n_size + fl->l_start; 1254 break; 1255 default: 1256 return (1); 1257 }; 1258 if (fl->l_len != 0) { 1259 end = off + fl->l_len; 1260 if (end < off) 1261 return (1); 1262 } else { 1263 end = NFS64BITSSET; 1264 } 1265 1266 error = nfscl_getcl(vp, cred, p, &clp); 1267 if (error) 1268 return (1); 1269 nfscl_filllockowner(p, own); 1270 NFSLOCKCLSTATE(); 1271 1272 /* 1273 * First check the delegation locks. 1274 */ 1275 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 1276 if (dp != NULL) { 1277 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 1278 if (!NFSBCMP(lp->nfsl_owner, own, 1279 NFSV4CL_LOCKNAMELEN)) 1280 break; 1281 } 1282 if (lp != NULL) { 1283 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1284 if (lop->nfslo_first >= end) 1285 break; 1286 if (lop->nfslo_end <= off) 1287 continue; 1288 if (lop->nfslo_type == F_WRLCK) { 1289 nfscl_clrelease(clp); 1290 NFSUNLOCKCLSTATE(); 1291 return (1); 1292 } 1293 } 1294 } 1295 } 1296 1297 /* 1298 * Now, check state against the server. 1299 */ 1300 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 1301 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1302 if (op->nfso_fhlen == np->n_fhp->nfh_len && 1303 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { 1304 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1305 if (!NFSBCMP(lp->nfsl_owner, own, 1306 NFSV4CL_LOCKNAMELEN)) 1307 break; 1308 } 1309 if (lp != NULL) { 1310 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 1311 if (lop->nfslo_first >= end) 1312 break; 1313 if (lop->nfslo_end <= off) 1314 continue; 1315 if (lop->nfslo_type == F_WRLCK) { 1316 nfscl_clrelease(clp); 1317 NFSUNLOCKCLSTATE(); 1318 return (1); 1319 } 1320 } 1321 } 1322 } 1323 } 1324 } 1325 nfscl_clrelease(clp); 1326 NFSUNLOCKCLSTATE(); 1327 return (0); 1328 } 1329 1330 /* 1331 * Release a byte range lock owner structure. 1332 */ 1333 APPLESTATIC void 1334 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) 1335 { 1336 struct nfsclclient *clp; 1337 1338 if (lp == NULL) 1339 return; 1340 NFSLOCKCLSTATE(); 1341 clp = lp->nfsl_open->nfso_own->nfsow_clp; 1342 if (error != 0 && candelete && 1343 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) 1344 nfscl_freelockowner(lp, 0); 1345 else 1346 nfscl_lockunlock(&lp->nfsl_rwlock); 1347 nfscl_clrelease(clp); 1348 NFSUNLOCKCLSTATE(); 1349 } 1350 1351 /* 1352 * Free up an open structure and any associated byte range lock structures. 1353 */ 1354 APPLESTATIC void 1355 nfscl_freeopen(struct nfsclopen *op, int local) 1356 { 1357 1358 LIST_REMOVE(op, nfso_list); 1359 nfscl_freealllocks(&op->nfso_lock, local); 1360 FREE((caddr_t)op, M_NFSCLOPEN); 1361 if (local) 1362 newnfsstats.cllocalopens--; 1363 else 1364 newnfsstats.clopens--; 1365 } 1366 1367 /* 1368 * Free up all lock owners and associated locks. 1369 */ 1370 static void 1371 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) 1372 { 1373 struct nfscllockowner *lp, *nlp; 1374 1375 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { 1376 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1377 panic("nfscllckw"); 1378 nfscl_freelockowner(lp, local); 1379 } 1380 } 1381 1382 /* 1383 * Called for an Open when NFSERR_EXPIRED is received from the server. 1384 * If there are no byte range locks nor a Share Deny lost, try to do a 1385 * fresh Open. Otherwise, free the open. 1386 */ 1387 static int 1388 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, 1389 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) 1390 { 1391 struct nfscllockowner *lp; 1392 struct nfscldeleg *dp; 1393 int mustdelete = 0, error; 1394 1395 /* 1396 * Look for any byte range lock(s). 1397 */ 1398 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1399 if (!LIST_EMPTY(&lp->nfsl_lock)) { 1400 mustdelete = 1; 1401 break; 1402 } 1403 } 1404 1405 /* 1406 * If no byte range lock(s) nor a Share deny, try to re-open. 1407 */ 1408 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { 1409 newnfs_copycred(&op->nfso_cred, cred); 1410 dp = NULL; 1411 error = nfsrpc_reopen(nmp, op->nfso_fh, 1412 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); 1413 if (error) { 1414 mustdelete = 1; 1415 if (dp != NULL) { 1416 FREE((caddr_t)dp, M_NFSCLDELEG); 1417 dp = NULL; 1418 } 1419 } 1420 if (dp != NULL) 1421 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, 1422 op->nfso_fhlen, cred, p, &dp); 1423 } 1424 1425 /* 1426 * If a byte range lock or Share deny or couldn't re-open, free it. 1427 */ 1428 if (mustdelete) 1429 nfscl_freeopen(op, 0); 1430 return (mustdelete); 1431 } 1432 1433 /* 1434 * Free up an open owner structure. 1435 */ 1436 static void 1437 nfscl_freeopenowner(struct nfsclowner *owp, int local) 1438 { 1439 1440 LIST_REMOVE(owp, nfsow_list); 1441 FREE((caddr_t)owp, M_NFSCLOWNER); 1442 if (local) 1443 newnfsstats.cllocalopenowners--; 1444 else 1445 newnfsstats.clopenowners--; 1446 } 1447 1448 /* 1449 * Free up a byte range lock owner structure. 1450 */ 1451 static void 1452 nfscl_freelockowner(struct nfscllockowner *lp, int local) 1453 { 1454 struct nfscllock *lop, *nlop; 1455 1456 LIST_REMOVE(lp, nfsl_list); 1457 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { 1458 nfscl_freelock(lop, local); 1459 } 1460 FREE((caddr_t)lp, M_NFSCLLOCKOWNER); 1461 if (local) 1462 newnfsstats.cllocallockowners--; 1463 else 1464 newnfsstats.cllockowners--; 1465 } 1466 1467 /* 1468 * Free up a byte range lock structure. 1469 */ 1470 APPLESTATIC void 1471 nfscl_freelock(struct nfscllock *lop, int local) 1472 { 1473 1474 LIST_REMOVE(lop, nfslo_list); 1475 FREE((caddr_t)lop, M_NFSCLLOCK); 1476 if (local) 1477 newnfsstats.cllocallocks--; 1478 else 1479 newnfsstats.cllocks--; 1480 } 1481 1482 /* 1483 * Clean out the state related to a delegation. 1484 */ 1485 static void 1486 nfscl_cleandeleg(struct nfscldeleg *dp) 1487 { 1488 struct nfsclowner *owp, *nowp; 1489 struct nfsclopen *op; 1490 1491 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 1492 op = LIST_FIRST(&owp->nfsow_open); 1493 if (op != NULL) { 1494 if (LIST_NEXT(op, nfso_list) != NULL) 1495 panic("nfscleandel"); 1496 nfscl_freeopen(op, 1); 1497 } 1498 nfscl_freeopenowner(owp, 1); 1499 } 1500 nfscl_freealllocks(&dp->nfsdl_lock, 1); 1501 } 1502 1503 /* 1504 * Free a delegation. 1505 */ 1506 static void 1507 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) 1508 { 1509 1510 TAILQ_REMOVE(hdp, dp, nfsdl_list); 1511 LIST_REMOVE(dp, nfsdl_hash); 1512 FREE((caddr_t)dp, M_NFSCLDELEG); 1513 newnfsstats.cldelegates--; 1514 nfscl_delegcnt--; 1515 } 1516 1517 /* 1518 * Free up all state related to this client structure. 1519 */ 1520 static void 1521 nfscl_cleanclient(struct nfsclclient *clp) 1522 { 1523 struct nfsclowner *owp, *nowp; 1524 struct nfsclopen *op, *nop; 1525 struct nfscllockowner *lp, *nlp; 1526 1527 1528 /* get rid of defunct lockowners */ 1529 LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) { 1530 nfscl_freelockowner(lp, 0); 1531 } 1532 1533 /* Now, all the OpenOwners, etc. */ 1534 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1535 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1536 nfscl_freeopen(op, 0); 1537 } 1538 nfscl_freeopenowner(owp, 0); 1539 } 1540 } 1541 1542 /* 1543 * Called when an NFSERR_EXPIRED is received from the server. 1544 */ 1545 static void 1546 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, 1547 struct ucred *cred, NFSPROC_T *p) 1548 { 1549 struct nfsclowner *owp, *nowp, *towp; 1550 struct nfsclopen *op, *nop, *top; 1551 struct nfscldeleg *dp, *ndp; 1552 int ret, printed = 0; 1553 1554 /* 1555 * First, merge locally issued Opens into the list for the server. 1556 */ 1557 dp = TAILQ_FIRST(&clp->nfsc_deleg); 1558 while (dp != NULL) { 1559 ndp = TAILQ_NEXT(dp, nfsdl_list); 1560 owp = LIST_FIRST(&dp->nfsdl_owner); 1561 while (owp != NULL) { 1562 nowp = LIST_NEXT(owp, nfsow_list); 1563 op = LIST_FIRST(&owp->nfsow_open); 1564 if (op != NULL) { 1565 if (LIST_NEXT(op, nfso_list) != NULL) 1566 panic("nfsclexp"); 1567 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { 1568 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, 1569 NFSV4CL_LOCKNAMELEN)) 1570 break; 1571 } 1572 if (towp != NULL) { 1573 /* Merge opens in */ 1574 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { 1575 if (top->nfso_fhlen == op->nfso_fhlen && 1576 !NFSBCMP(top->nfso_fh, op->nfso_fh, 1577 op->nfso_fhlen)) { 1578 top->nfso_mode |= op->nfso_mode; 1579 top->nfso_opencnt += op->nfso_opencnt; 1580 break; 1581 } 1582 } 1583 if (top == NULL) { 1584 /* Just add the open to the owner list */ 1585 LIST_REMOVE(op, nfso_list); 1586 op->nfso_own = towp; 1587 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); 1588 newnfsstats.cllocalopens--; 1589 newnfsstats.clopens++; 1590 } 1591 } else { 1592 /* Just add the openowner to the client list */ 1593 LIST_REMOVE(owp, nfsow_list); 1594 owp->nfsow_clp = clp; 1595 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); 1596 newnfsstats.cllocalopenowners--; 1597 newnfsstats.clopenowners++; 1598 newnfsstats.cllocalopens--; 1599 newnfsstats.clopens++; 1600 } 1601 } 1602 owp = nowp; 1603 } 1604 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { 1605 printed = 1; 1606 printf("nfsv4 expired locks lost\n"); 1607 } 1608 nfscl_cleandeleg(dp); 1609 nfscl_freedeleg(&clp->nfsc_deleg, dp); 1610 dp = ndp; 1611 } 1612 if (!TAILQ_EMPTY(&clp->nfsc_deleg)) 1613 panic("nfsclexp"); 1614 1615 /* 1616 * Now, try and reopen against the server. 1617 */ 1618 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1619 owp->nfsow_seqid = 0; 1620 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { 1621 ret = nfscl_expireopen(clp, op, nmp, cred, p); 1622 if (ret && !printed) { 1623 printed = 1; 1624 printf("nfsv4 expired locks lost\n"); 1625 } 1626 } 1627 if (LIST_EMPTY(&owp->nfsow_open)) 1628 nfscl_freeopenowner(owp, 0); 1629 } 1630 } 1631 1632 #ifndef __FreeBSD__ 1633 /* 1634 * Called from exit() upon process termination. 1635 */ 1636 APPLESTATIC void 1637 nfscl_cleanup(NFSPROC_T *p) 1638 { 1639 struct nfsclclient *clp; 1640 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 1641 1642 if (!nfscl_inited) 1643 return; 1644 nfscl_filllockowner(p, own); 1645 1646 NFSLOCKCLSTATE(); 1647 /* 1648 * Loop through all the clientids, looking for the OpenOwners. 1649 */ 1650 LIST_FOREACH(clp, &nfsclhead, nfsc_list) 1651 nfscl_cleanup_common(clp, own); 1652 NFSUNLOCKCLSTATE(); 1653 } 1654 #endif /* !__FreeBSD__ */ 1655 1656 /* 1657 * Common code used by nfscl_cleanup() and nfscl_cleanupkext(). 1658 * Must be called with CLSTATE lock held. 1659 */ 1660 static void 1661 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) 1662 { 1663 struct nfsclowner *owp, *nowp; 1664 struct nfsclopen *op; 1665 struct nfscllockowner *lp, *nlp; 1666 struct nfscldeleg *dp; 1667 1668 /* First, get rid of local locks on delegations. */ 1669 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1670 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { 1671 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 1672 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) 1673 panic("nfscllckw"); 1674 nfscl_freelockowner(lp, 1); 1675 } 1676 } 1677 } 1678 owp = LIST_FIRST(&clp->nfsc_owner); 1679 while (owp != NULL) { 1680 nowp = LIST_NEXT(owp, nfsow_list); 1681 if (!NFSBCMP(owp->nfsow_owner, own, 1682 NFSV4CL_LOCKNAMELEN)) { 1683 /* 1684 * If there are children that haven't closed the 1685 * file descriptors yet, the opens will still be 1686 * here. For that case, let the renew thread clear 1687 * out the OpenOwner later. 1688 */ 1689 if (LIST_EMPTY(&owp->nfsow_open)) 1690 nfscl_freeopenowner(owp, 0); 1691 else 1692 owp->nfsow_defunct = 1; 1693 } else { 1694 /* look for lockowners on other opens */ 1695 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 1696 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 1697 if (!NFSBCMP(lp->nfsl_owner, own, 1698 NFSV4CL_LOCKNAMELEN)) 1699 lp->nfsl_defunct = 1; 1700 } 1701 } 1702 } 1703 owp = nowp; 1704 } 1705 1706 /* and check the defunct list */ 1707 LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) { 1708 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) 1709 lp->nfsl_defunct = 1; 1710 } 1711 } 1712 1713 #if defined(APPLEKEXT) || defined(__FreeBSD__) 1714 /* 1715 * Simulate the call nfscl_cleanup() by looking for open owners associated 1716 * with processes that no longer exist, since a call to nfscl_cleanup() 1717 * can't be patched into exit(). 1718 */ 1719 static void 1720 nfscl_cleanupkext(struct nfsclclient *clp) 1721 { 1722 struct nfsclowner *owp, *nowp; 1723 struct nfscllockowner *lp; 1724 1725 NFSPROCLISTLOCK(); 1726 NFSLOCKCLSTATE(); 1727 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { 1728 if (nfscl_procdoesntexist(owp->nfsow_owner)) 1729 nfscl_cleanup_common(clp, owp->nfsow_owner); 1730 } 1731 1732 /* and check the defunct list */ 1733 LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) { 1734 if (nfscl_procdoesntexist(lp->nfsl_owner)) 1735 lp->nfsl_defunct = 1; 1736 } 1737 NFSUNLOCKCLSTATE(); 1738 NFSPROCLISTUNLOCK(); 1739 } 1740 #endif /* APPLEKEXT || __FreeBSD__ */ 1741 1742 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */ 1743 /* 1744 * Called from nfs umount to free up the clientid. 1745 */ 1746 APPLESTATIC void 1747 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) 1748 { 1749 struct nfsclclient *clp; 1750 struct ucred *cred; 1751 int igotlock; 1752 1753 /* 1754 * For the case that matters, this is the thread that set 1755 * MNTK_UNMOUNTF, so it will see it set. The code that follows is 1756 * done to ensure that any thread executing nfscl_getcl() after 1757 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the 1758 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following 1759 * explanation, courtesy of Alan Cox. 1760 * What follows is a snippet from Alan Cox's email at: 1761 * http://docs.FreeBSD.org/cgi/ 1762 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw 1763 * 1764 * 1. Set MNTK_UNMOUNTF 1765 * 2. Acquire a standard FreeBSD mutex "m". 1766 * 3. Update some data structures. 1767 * 4. Release mutex "m". 1768 * 1769 * Then, other threads that acquire "m" after step 4 has occurred will 1770 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to 1771 * step 2 may or may not see MNTK_UNMOUNTF as set. 1772 */ 1773 NFSLOCKCLSTATE(); 1774 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { 1775 fake_global++; 1776 NFSUNLOCKCLSTATE(); 1777 NFSLOCKCLSTATE(); 1778 } 1779 1780 clp = nmp->nm_clp; 1781 if (clp != NULL) { 1782 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) 1783 panic("nfscl umount"); 1784 1785 /* 1786 * First, handshake with the nfscl renew thread, to terminate 1787 * it. 1788 */ 1789 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; 1790 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) 1791 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, 1792 "nfsclumnt", hz); 1793 1794 /* 1795 * Now, get the exclusive lock on the client state, so 1796 * that no uses of the state are still in progress. 1797 */ 1798 do { 1799 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1800 NFSCLSTATEMUTEXPTR, NULL); 1801 } while (!igotlock); 1802 NFSUNLOCKCLSTATE(); 1803 1804 /* 1805 * Free up all the state. It will expire on the server, but 1806 * maybe we should do a SetClientId/SetClientIdConfirm so 1807 * the server throws it away? 1808 */ 1809 LIST_REMOVE(clp, nfsc_list); 1810 nfscl_delegreturnall(clp, p); 1811 cred = newnfs_getcred(); 1812 (void) nfsrpc_setclient(nmp, clp, cred, p); 1813 nfscl_cleanclient(clp); 1814 nmp->nm_clp = NULL; 1815 NFSFREECRED(cred); 1816 FREE((caddr_t)clp, M_NFSCLCLIENT); 1817 } else 1818 NFSUNLOCKCLSTATE(); 1819 } 1820 1821 /* 1822 * This function is called when a server replies with NFSERR_STALECLIENTID 1823 * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens 1824 * and Locks with reclaim. If these fail, it deletes the corresponding state. 1825 */ 1826 static void 1827 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) 1828 { 1829 struct nfsclowner *owp, *nowp; 1830 struct nfsclopen *op, *nop; 1831 struct nfscllockowner *lp, *nlp; 1832 struct nfscllock *lop, *nlop; 1833 struct nfscldeleg *dp, *ndp, *tdp; 1834 struct nfsmount *nmp; 1835 struct ucred *tcred; 1836 struct nfsclopenhead extra_open; 1837 struct nfscldeleghead extra_deleg; 1838 struct nfsreq *rep; 1839 u_int64_t len; 1840 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; 1841 int igotlock = 0, error, trycnt, firstlock, s; 1842 1843 /* 1844 * First, lock the client structure, so everyone else will 1845 * block when trying to use state. 1846 */ 1847 NFSLOCKCLSTATE(); 1848 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 1849 do { 1850 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 1851 NFSCLSTATEMUTEXPTR, NULL); 1852 } while (!igotlock); 1853 NFSUNLOCKCLSTATE(); 1854 1855 nmp = clp->nfsc_nmp; 1856 if (nmp == NULL) 1857 panic("nfscl recover"); 1858 trycnt = 5; 1859 do { 1860 error = nfsrpc_setclient(nmp, clp, cred, p); 1861 } while ((error == NFSERR_STALECLIENTID || 1862 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 1863 if (error) { 1864 nfscl_cleanclient(clp); 1865 NFSLOCKCLSTATE(); 1866 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 1867 NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG); 1868 wakeup(&clp->nfsc_flags); 1869 nfsv4_unlock(&clp->nfsc_lock, 0); 1870 NFSUNLOCKCLSTATE(); 1871 return; 1872 } 1873 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 1874 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 1875 1876 /* 1877 * Mark requests already queued on the server, so that they don't 1878 * initiate another recovery cycle. Any requests already in the 1879 * queue that handle state information will have the old stale 1880 * clientid/stateid and will get a NFSERR_STALESTATEID or 1881 * NFSERR_STALECLIENTID reply from the server. This will be 1882 * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set. 1883 */ 1884 s = splsoftclock(); 1885 NFSLOCKREQ(); 1886 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { 1887 if (rep->r_nmp == nmp) 1888 rep->r_flags |= R_DONTRECOVER; 1889 } 1890 NFSUNLOCKREQ(); 1891 splx(s); 1892 1893 /* get rid of defunct lockowners */ 1894 LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) { 1895 nfscl_freelockowner(lp, 0); 1896 } 1897 1898 /* 1899 * Now, mark all delegations "need reclaim". 1900 */ 1901 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) 1902 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; 1903 1904 TAILQ_INIT(&extra_deleg); 1905 LIST_INIT(&extra_open); 1906 /* 1907 * Now traverse the state lists, doing Open and Lock Reclaims. 1908 */ 1909 tcred = newnfs_getcred(); 1910 owp = LIST_FIRST(&clp->nfsc_owner); 1911 while (owp != NULL) { 1912 nowp = LIST_NEXT(owp, nfsow_list); 1913 owp->nfsow_seqid = 0; 1914 op = LIST_FIRST(&owp->nfsow_open); 1915 while (op != NULL) { 1916 nop = LIST_NEXT(op, nfso_list); 1917 if (error != NFSERR_NOGRACE) { 1918 /* Search for a delegation to reclaim with the open */ 1919 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1920 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 1921 continue; 1922 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 1923 mode = NFSV4OPEN_ACCESSWRITE; 1924 delegtype = NFSV4OPEN_DELEGATEWRITE; 1925 } else { 1926 mode = NFSV4OPEN_ACCESSREAD; 1927 delegtype = NFSV4OPEN_DELEGATEREAD; 1928 } 1929 if ((op->nfso_mode & mode) == mode && 1930 op->nfso_fhlen == dp->nfsdl_fhlen && 1931 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) 1932 break; 1933 } 1934 ndp = dp; 1935 if (dp == NULL) 1936 delegtype = NFSV4OPEN_DELEGATENONE; 1937 newnfs_copycred(&op->nfso_cred, tcred); 1938 error = nfscl_tryopen(nmp, NULL, op->nfso_fh, 1939 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, 1940 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, 1941 tcred, p); 1942 if (!error) { 1943 /* Handle any replied delegation */ 1944 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) 1945 || NFSMNT_RDONLY(nmp->nm_mountp))) { 1946 if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) 1947 mode = NFSV4OPEN_ACCESSWRITE; 1948 else 1949 mode = NFSV4OPEN_ACCESSREAD; 1950 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 1951 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) 1952 continue; 1953 if ((op->nfso_mode & mode) == mode && 1954 op->nfso_fhlen == dp->nfsdl_fhlen && 1955 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, 1956 op->nfso_fhlen)) { 1957 dp->nfsdl_stateid = ndp->nfsdl_stateid; 1958 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; 1959 dp->nfsdl_ace = ndp->nfsdl_ace; 1960 dp->nfsdl_change = ndp->nfsdl_change; 1961 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 1962 if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) 1963 dp->nfsdl_flags |= NFSCLDL_RECALL; 1964 FREE((caddr_t)ndp, M_NFSCLDELEG); 1965 ndp = NULL; 1966 break; 1967 } 1968 } 1969 } 1970 if (ndp != NULL) 1971 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); 1972 1973 /* and reclaim all byte range locks */ 1974 lp = LIST_FIRST(&op->nfso_lock); 1975 while (lp != NULL) { 1976 nlp = LIST_NEXT(lp, nfsl_list); 1977 lp->nfsl_seqid = 0; 1978 firstlock = 1; 1979 lop = LIST_FIRST(&lp->nfsl_lock); 1980 while (lop != NULL) { 1981 nlop = LIST_NEXT(lop, nfslo_list); 1982 if (lop->nfslo_end == NFS64BITSSET) 1983 len = NFS64BITSSET; 1984 else 1985 len = lop->nfslo_end - lop->nfslo_first; 1986 if (error != NFSERR_NOGRACE) 1987 error = nfscl_trylock(nmp, NULL, 1988 op->nfso_fh, op->nfso_fhlen, lp, 1989 firstlock, 1, lop->nfslo_first, len, 1990 lop->nfslo_type, tcred, p); 1991 if (error != 0) 1992 nfscl_freelock(lop, 0); 1993 else 1994 firstlock = 0; 1995 lop = nlop; 1996 } 1997 /* If no locks, but a lockowner, just delete it. */ 1998 if (LIST_EMPTY(&lp->nfsl_lock)) 1999 nfscl_freelockowner(lp, 0); 2000 lp = nlp; 2001 } 2002 } else { 2003 nfscl_freeopen(op, 0); 2004 } 2005 } 2006 op = nop; 2007 } 2008 owp = nowp; 2009 } 2010 2011 /* 2012 * Now, try and get any delegations not yet reclaimed by cobbling 2013 * to-gether an appropriate open. 2014 */ 2015 nowp = NULL; 2016 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2017 while (dp != NULL) { 2018 ndp = TAILQ_NEXT(dp, nfsdl_list); 2019 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { 2020 if (nowp == NULL) { 2021 MALLOC(nowp, struct nfsclowner *, 2022 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); 2023 /* 2024 * Name must be as long an largest possible 2025 * NFSV4CL_LOCKNAMELEN. 12 for now. 2026 */ 2027 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, 2028 NFSV4CL_LOCKNAMELEN); 2029 LIST_INIT(&nowp->nfsow_open); 2030 nowp->nfsow_clp = clp; 2031 nowp->nfsow_seqid = 0; 2032 nowp->nfsow_defunct = 0; 2033 nfscl_lockinit(&nowp->nfsow_rwlock); 2034 } 2035 nop = NULL; 2036 if (error != NFSERR_NOGRACE) { 2037 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 2038 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 2039 nop->nfso_own = nowp; 2040 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { 2041 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; 2042 delegtype = NFSV4OPEN_DELEGATEWRITE; 2043 } else { 2044 nop->nfso_mode = NFSV4OPEN_ACCESSREAD; 2045 delegtype = NFSV4OPEN_DELEGATEREAD; 2046 } 2047 nop->nfso_opencnt = 0; 2048 nop->nfso_posixlock = 1; 2049 nop->nfso_fhlen = dp->nfsdl_fhlen; 2050 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); 2051 LIST_INIT(&nop->nfso_lock); 2052 nop->nfso_stateid.seqid = 0; 2053 nop->nfso_stateid.other[0] = 0; 2054 nop->nfso_stateid.other[1] = 0; 2055 nop->nfso_stateid.other[2] = 0; 2056 newnfs_copycred(&dp->nfsdl_cred, tcred); 2057 newnfs_copyincred(tcred, &nop->nfso_cred); 2058 tdp = NULL; 2059 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, 2060 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, 2061 nop->nfso_mode, nop, NULL, 0, &tdp, 1, 2062 delegtype, tcred, p); 2063 if (tdp != NULL) { 2064 if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) 2065 mode = NFSV4OPEN_ACCESSWRITE; 2066 else 2067 mode = NFSV4OPEN_ACCESSREAD; 2068 if ((nop->nfso_mode & mode) == mode && 2069 nop->nfso_fhlen == tdp->nfsdl_fhlen && 2070 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, 2071 nop->nfso_fhlen)) { 2072 dp->nfsdl_stateid = tdp->nfsdl_stateid; 2073 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; 2074 dp->nfsdl_ace = tdp->nfsdl_ace; 2075 dp->nfsdl_change = tdp->nfsdl_change; 2076 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; 2077 if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) 2078 dp->nfsdl_flags |= NFSCLDL_RECALL; 2079 FREE((caddr_t)tdp, M_NFSCLDELEG); 2080 } else { 2081 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); 2082 } 2083 } 2084 } 2085 if (error) { 2086 if (nop != NULL) 2087 FREE((caddr_t)nop, M_NFSCLOPEN); 2088 /* 2089 * Couldn't reclaim it, so throw the state 2090 * away. Ouch!! 2091 */ 2092 nfscl_cleandeleg(dp); 2093 nfscl_freedeleg(&clp->nfsc_deleg, dp); 2094 } else { 2095 LIST_INSERT_HEAD(&extra_open, nop, nfso_list); 2096 } 2097 } 2098 dp = ndp; 2099 } 2100 2101 /* 2102 * Now, get rid of extra Opens and Delegations. 2103 */ 2104 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { 2105 do { 2106 newnfs_copycred(&op->nfso_cred, tcred); 2107 error = nfscl_tryclose(op, tcred, nmp, p); 2108 if (error == NFSERR_GRACE) 2109 (void) nfs_catnap(PZERO, error, "nfsexcls"); 2110 } while (error == NFSERR_GRACE); 2111 LIST_REMOVE(op, nfso_list); 2112 FREE((caddr_t)op, M_NFSCLOPEN); 2113 } 2114 if (nowp != NULL) 2115 FREE((caddr_t)nowp, M_NFSCLOWNER); 2116 2117 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { 2118 do { 2119 newnfs_copycred(&dp->nfsdl_cred, tcred); 2120 error = nfscl_trydelegreturn(dp, tcred, nmp, p); 2121 if (error == NFSERR_GRACE) 2122 (void) nfs_catnap(PZERO, error, "nfsexdlg"); 2123 } while (error == NFSERR_GRACE); 2124 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); 2125 FREE((caddr_t)dp, M_NFSCLDELEG); 2126 } 2127 2128 NFSLOCKCLSTATE(); 2129 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG; 2130 wakeup(&clp->nfsc_flags); 2131 nfsv4_unlock(&clp->nfsc_lock, 0); 2132 NFSUNLOCKCLSTATE(); 2133 NFSFREECRED(tcred); 2134 } 2135 2136 /* 2137 * This function is called when a server replies with NFSERR_EXPIRED. 2138 * It deletes all state for the client and does a fresh SetClientId/confirm. 2139 * XXX Someday it should post a signal to the process(es) that hold the 2140 * state, so they know that lock state has been lost. 2141 */ 2142 APPLESTATIC int 2143 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) 2144 { 2145 struct nfscllockowner *lp, *nlp; 2146 struct nfsmount *nmp; 2147 struct ucred *cred; 2148 int igotlock = 0, error, trycnt; 2149 2150 /* 2151 * If the clientid has gone away or a new SetClientid has already 2152 * been done, just return ok. 2153 */ 2154 if (clp == NULL || clidrev != clp->nfsc_clientidrev) 2155 return (0); 2156 2157 /* 2158 * First, lock the client structure, so everyone else will 2159 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so 2160 * that only one thread does the work. 2161 */ 2162 NFSLOCKCLSTATE(); 2163 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; 2164 do { 2165 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, 2166 NFSCLSTATEMUTEXPTR, NULL); 2167 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); 2168 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { 2169 if (igotlock) 2170 nfsv4_unlock(&clp->nfsc_lock, 0); 2171 NFSUNLOCKCLSTATE(); 2172 return (0); 2173 } 2174 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG; 2175 NFSUNLOCKCLSTATE(); 2176 2177 nmp = clp->nfsc_nmp; 2178 if (nmp == NULL) 2179 panic("nfscl expired"); 2180 cred = newnfs_getcred(); 2181 trycnt = 5; 2182 do { 2183 error = nfsrpc_setclient(nmp, clp, cred, p); 2184 } while ((error == NFSERR_STALECLIENTID || 2185 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); 2186 if (error) { 2187 /* 2188 * Clear out any state. 2189 */ 2190 nfscl_cleanclient(clp); 2191 NFSLOCKCLSTATE(); 2192 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | 2193 NFSCLFLAGS_RECOVER); 2194 } else { 2195 /* get rid of defunct lockowners */ 2196 LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, 2197 nlp) { 2198 nfscl_freelockowner(lp, 0); 2199 } 2200 2201 /* 2202 * Expire the state for the client. 2203 */ 2204 nfscl_expireclient(clp, nmp, cred, p); 2205 NFSLOCKCLSTATE(); 2206 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; 2207 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2208 } 2209 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG); 2210 wakeup(&clp->nfsc_flags); 2211 nfsv4_unlock(&clp->nfsc_lock, 0); 2212 NFSUNLOCKCLSTATE(); 2213 NFSFREECRED(cred); 2214 return (error); 2215 } 2216 2217 /* 2218 * This function inserts a lock in the list after insert_lop. 2219 */ 2220 static void 2221 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, 2222 struct nfscllock *insert_lop, int local) 2223 { 2224 2225 if ((struct nfscllockowner *)insert_lop == lp) 2226 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); 2227 else 2228 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); 2229 if (local) 2230 newnfsstats.cllocallocks++; 2231 else 2232 newnfsstats.cllocks++; 2233 } 2234 2235 /* 2236 * This function updates the locking for a lock owner and given file. It 2237 * maintains a list of lock ranges ordered on increasing file offset that 2238 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). 2239 * It always adds new_lop to the list and sometimes uses the one pointed 2240 * at by other_lopp. 2241 * Returns 1 if the locks were modified, 0 otherwise. 2242 */ 2243 static int 2244 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, 2245 struct nfscllock **other_lopp, int local) 2246 { 2247 struct nfscllock *new_lop = *new_lopp; 2248 struct nfscllock *lop, *tlop, *ilop; 2249 struct nfscllock *other_lop; 2250 int unlock = 0, modified = 0; 2251 u_int64_t tmp; 2252 2253 /* 2254 * Work down the list until the lock is merged. 2255 */ 2256 if (new_lop->nfslo_type == F_UNLCK) 2257 unlock = 1; 2258 ilop = (struct nfscllock *)lp; 2259 lop = LIST_FIRST(&lp->nfsl_lock); 2260 while (lop != NULL) { 2261 /* 2262 * Only check locks for this file that aren't before the start of 2263 * new lock's range. 2264 */ 2265 if (lop->nfslo_end >= new_lop->nfslo_first) { 2266 if (new_lop->nfslo_end < lop->nfslo_first) { 2267 /* 2268 * If the new lock ends before the start of the 2269 * current lock's range, no merge, just insert 2270 * the new lock. 2271 */ 2272 break; 2273 } 2274 if (new_lop->nfslo_type == lop->nfslo_type || 2275 (new_lop->nfslo_first <= lop->nfslo_first && 2276 new_lop->nfslo_end >= lop->nfslo_end)) { 2277 /* 2278 * This lock can be absorbed by the new lock/unlock. 2279 * This happens when it covers the entire range 2280 * of the old lock or is contiguous 2281 * with the old lock and is of the same type or an 2282 * unlock. 2283 */ 2284 if (new_lop->nfslo_type != lop->nfslo_type || 2285 new_lop->nfslo_first != lop->nfslo_first || 2286 new_lop->nfslo_end != lop->nfslo_end) 2287 modified = 1; 2288 if (lop->nfslo_first < new_lop->nfslo_first) 2289 new_lop->nfslo_first = lop->nfslo_first; 2290 if (lop->nfslo_end > new_lop->nfslo_end) 2291 new_lop->nfslo_end = lop->nfslo_end; 2292 tlop = lop; 2293 lop = LIST_NEXT(lop, nfslo_list); 2294 nfscl_freelock(tlop, local); 2295 continue; 2296 } 2297 2298 /* 2299 * All these cases are for contiguous locks that are not the 2300 * same type, so they can't be merged. 2301 */ 2302 if (new_lop->nfslo_first <= lop->nfslo_first) { 2303 /* 2304 * This case is where the new lock overlaps with the 2305 * first part of the old lock. Move the start of the 2306 * old lock to just past the end of the new lock. The 2307 * new lock will be inserted in front of the old, since 2308 * ilop hasn't been updated. (We are done now.) 2309 */ 2310 if (lop->nfslo_first != new_lop->nfslo_end) { 2311 lop->nfslo_first = new_lop->nfslo_end; 2312 modified = 1; 2313 } 2314 break; 2315 } 2316 if (new_lop->nfslo_end >= lop->nfslo_end) { 2317 /* 2318 * This case is where the new lock overlaps with the 2319 * end of the old lock's range. Move the old lock's 2320 * end to just before the new lock's first and insert 2321 * the new lock after the old lock. 2322 * Might not be done yet, since the new lock could 2323 * overlap further locks with higher ranges. 2324 */ 2325 if (lop->nfslo_end != new_lop->nfslo_first) { 2326 lop->nfslo_end = new_lop->nfslo_first; 2327 modified = 1; 2328 } 2329 ilop = lop; 2330 lop = LIST_NEXT(lop, nfslo_list); 2331 continue; 2332 } 2333 /* 2334 * The final case is where the new lock's range is in the 2335 * middle of the current lock's and splits the current lock 2336 * up. Use *other_lopp to handle the second part of the 2337 * split old lock range. (We are done now.) 2338 * For unlock, we use new_lop as other_lop and tmp, since 2339 * other_lop and new_lop are the same for this case. 2340 * We noted the unlock case above, so we don't need 2341 * new_lop->nfslo_type any longer. 2342 */ 2343 tmp = new_lop->nfslo_first; 2344 if (unlock) { 2345 other_lop = new_lop; 2346 *new_lopp = NULL; 2347 } else { 2348 other_lop = *other_lopp; 2349 *other_lopp = NULL; 2350 } 2351 other_lop->nfslo_first = new_lop->nfslo_end; 2352 other_lop->nfslo_end = lop->nfslo_end; 2353 other_lop->nfslo_type = lop->nfslo_type; 2354 lop->nfslo_end = tmp; 2355 nfscl_insertlock(lp, other_lop, lop, local); 2356 ilop = lop; 2357 modified = 1; 2358 break; 2359 } 2360 ilop = lop; 2361 lop = LIST_NEXT(lop, nfslo_list); 2362 if (lop == NULL) 2363 break; 2364 } 2365 2366 /* 2367 * Insert the new lock in the list at the appropriate place. 2368 */ 2369 if (!unlock) { 2370 nfscl_insertlock(lp, new_lop, ilop, local); 2371 *new_lopp = NULL; 2372 modified = 1; 2373 } 2374 return (modified); 2375 } 2376 2377 /* 2378 * This function must be run as a kernel thread. 2379 * It does Renew Ops and recovery, when required. 2380 */ 2381 APPLESTATIC void 2382 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) 2383 { 2384 struct nfsclowner *owp, *nowp; 2385 struct nfsclopen *op; 2386 struct nfscllockowner *lp, *nlp, *olp; 2387 struct nfscldeleghead dh; 2388 struct nfscllockownerhead lh; 2389 struct nfscldeleg *dp, *ndp; 2390 struct ucred *cred; 2391 u_int32_t clidrev; 2392 int error, cbpathdown, islept, igotlock, ret, clearok; 2393 uint32_t recover_done_time = 0; 2394 2395 cred = newnfs_getcred(); 2396 NFSLOCKCLSTATE(); 2397 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; 2398 NFSUNLOCKCLSTATE(); 2399 for(;;) { 2400 newnfs_setroot(cred); 2401 cbpathdown = 0; 2402 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) { 2403 /* 2404 * Only allow one recover within 1/2 of the lease 2405 * duration (nfsc_renew). 2406 */ 2407 if (recover_done_time < NFSD_MONOSEC) { 2408 recover_done_time = NFSD_MONOSEC + 2409 clp->nfsc_renew; 2410 nfscl_recover(clp, cred, p); 2411 } else { 2412 NFSLOCKCLSTATE(); 2413 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; 2414 NFSUNLOCKCLSTATE(); 2415 } 2416 } 2417 if (clp->nfsc_expire <= NFSD_MONOSEC && 2418 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { 2419 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; 2420 clidrev = clp->nfsc_clientidrev; 2421 error = nfsrpc_renew(clp, cred, p); 2422 if (error == NFSERR_CBPATHDOWN) 2423 cbpathdown = 1; 2424 else if (error == NFSERR_STALECLIENTID) { 2425 NFSLOCKCLSTATE(); 2426 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2427 NFSUNLOCKCLSTATE(); 2428 } else if (error == NFSERR_EXPIRED) 2429 (void) nfscl_hasexpired(clp, clidrev, p); 2430 } 2431 2432 LIST_INIT(&lh); 2433 TAILQ_INIT(&dh); 2434 NFSLOCKCLSTATE(); 2435 if (cbpathdown) 2436 /* It's a Total Recall! */ 2437 nfscl_totalrecall(clp); 2438 2439 /* 2440 * Now, handle defunct owners. 2441 */ 2442 owp = LIST_FIRST(&clp->nfsc_owner); 2443 while (owp != NULL) { 2444 nowp = LIST_NEXT(owp, nfsow_list); 2445 if (LIST_EMPTY(&owp->nfsow_open)) { 2446 if (owp->nfsow_defunct) 2447 nfscl_freeopenowner(owp, 0); 2448 } else { 2449 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2450 lp = LIST_FIRST(&op->nfso_lock); 2451 while (lp != NULL) { 2452 nlp = LIST_NEXT(lp, nfsl_list); 2453 if (lp->nfsl_defunct && 2454 LIST_EMPTY(&lp->nfsl_lock)) { 2455 LIST_FOREACH(olp, &lh, nfsl_list) { 2456 if (!NFSBCMP(olp->nfsl_owner, 2457 lp->nfsl_owner,NFSV4CL_LOCKNAMELEN)) 2458 break; 2459 } 2460 if (olp == NULL) { 2461 LIST_REMOVE(lp, nfsl_list); 2462 LIST_INSERT_HEAD(&lh, lp, nfsl_list); 2463 } else { 2464 nfscl_freelockowner(lp, 0); 2465 } 2466 } 2467 lp = nlp; 2468 } 2469 } 2470 } 2471 owp = nowp; 2472 } 2473 2474 /* also search the defunct list */ 2475 lp = LIST_FIRST(&clp->nfsc_defunctlockowner); 2476 while (lp != NULL) { 2477 nlp = LIST_NEXT(lp, nfsl_list); 2478 if (lp->nfsl_defunct) { 2479 LIST_FOREACH(olp, &lh, nfsl_list) { 2480 if (!NFSBCMP(olp->nfsl_owner, lp->nfsl_owner, 2481 NFSV4CL_LOCKNAMELEN)) 2482 break; 2483 } 2484 if (olp == NULL) { 2485 LIST_REMOVE(lp, nfsl_list); 2486 LIST_INSERT_HEAD(&lh, lp, nfsl_list); 2487 } else { 2488 nfscl_freelockowner(lp, 0); 2489 } 2490 } 2491 lp = nlp; 2492 } 2493 /* and release defunct lock owners */ 2494 LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) { 2495 nfscl_freelockowner(lp, 0); 2496 } 2497 2498 /* 2499 * Do the recall on any delegations. To avoid trouble, always 2500 * come back up here after having slept. 2501 */ 2502 igotlock = 0; 2503 tryagain: 2504 dp = TAILQ_FIRST(&clp->nfsc_deleg); 2505 while (dp != NULL) { 2506 ndp = TAILQ_NEXT(dp, nfsdl_list); 2507 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { 2508 /* 2509 * Wait for outstanding I/O ops to be done. 2510 */ 2511 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 2512 if (igotlock) { 2513 nfsv4_unlock(&clp->nfsc_lock, 0); 2514 igotlock = 0; 2515 } 2516 dp->nfsdl_rwlock.nfslock_lock |= 2517 NFSV4LOCK_WANTED; 2518 (void) nfsmsleep(&dp->nfsdl_rwlock, 2519 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", 2520 NULL); 2521 goto tryagain; 2522 } 2523 while (!igotlock) { 2524 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 2525 &islept, NFSCLSTATEMUTEXPTR, NULL); 2526 if (islept) 2527 goto tryagain; 2528 } 2529 NFSUNLOCKCLSTATE(); 2530 newnfs_copycred(&dp->nfsdl_cred, cred); 2531 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, 2532 NULL, cred, p, 1); 2533 if (!ret) { 2534 nfscl_cleandeleg(dp); 2535 TAILQ_REMOVE(&clp->nfsc_deleg, dp, 2536 nfsdl_list); 2537 LIST_REMOVE(dp, nfsdl_hash); 2538 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2539 nfscl_delegcnt--; 2540 newnfsstats.cldelegates--; 2541 } 2542 NFSLOCKCLSTATE(); 2543 } 2544 dp = ndp; 2545 } 2546 2547 /* 2548 * Clear out old delegations, if we are above the high water 2549 * mark. Only clear out ones with no state related to them. 2550 * The tailq list is in LRU order. 2551 */ 2552 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); 2553 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { 2554 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); 2555 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && 2556 dp->nfsdl_rwlock.nfslock_lock == 0 && 2557 dp->nfsdl_timestamp < NFSD_MONOSEC && 2558 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | 2559 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) { 2560 clearok = 1; 2561 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2562 op = LIST_FIRST(&owp->nfsow_open); 2563 if (op != NULL) { 2564 clearok = 0; 2565 break; 2566 } 2567 } 2568 if (clearok) { 2569 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 2570 if (!LIST_EMPTY(&lp->nfsl_lock)) { 2571 clearok = 0; 2572 break; 2573 } 2574 } 2575 } 2576 if (clearok) { 2577 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); 2578 LIST_REMOVE(dp, nfsdl_hash); 2579 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); 2580 nfscl_delegcnt--; 2581 newnfsstats.cldelegates--; 2582 } 2583 } 2584 dp = ndp; 2585 } 2586 if (igotlock) 2587 nfsv4_unlock(&clp->nfsc_lock, 0); 2588 NFSUNLOCKCLSTATE(); 2589 2590 /* 2591 * Delegreturn any delegations cleaned out or recalled. 2592 */ 2593 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { 2594 newnfs_copycred(&dp->nfsdl_cred, cred); 2595 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 2596 TAILQ_REMOVE(&dh, dp, nfsdl_list); 2597 FREE((caddr_t)dp, M_NFSCLDELEG); 2598 } 2599 2600 #if defined(APPLEKEXT) || defined(__FreeBSD__) 2601 /* 2602 * Simulate the calls to nfscl_cleanup() when a process 2603 * exits, since the call can't be patched into exit(). 2604 */ 2605 { 2606 struct timespec mytime; 2607 static time_t prevsec = 0; 2608 2609 NFSGETNANOTIME(&mytime); 2610 if (prevsec != mytime.tv_sec) { 2611 prevsec = mytime.tv_sec; 2612 nfscl_cleanupkext(clp); 2613 } 2614 } 2615 #endif /* APPLEKEXT || __FreeBSD__ */ 2616 2617 NFSLOCKCLSTATE(); 2618 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) 2619 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl", 2620 hz); 2621 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { 2622 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; 2623 NFSUNLOCKCLSTATE(); 2624 NFSFREECRED(cred); 2625 wakeup((caddr_t)clp); 2626 return; 2627 } 2628 NFSUNLOCKCLSTATE(); 2629 } 2630 } 2631 2632 /* 2633 * Initiate state recovery. Called when NFSERR_STALECLIENTID or 2634 * NFSERR_STALESTATEID is received. 2635 */ 2636 APPLESTATIC void 2637 nfscl_initiate_recovery(struct nfsclclient *clp) 2638 { 2639 2640 if (clp == NULL) 2641 return; 2642 NFSLOCKCLSTATE(); 2643 clp->nfsc_flags |= NFSCLFLAGS_RECOVER; 2644 NFSUNLOCKCLSTATE(); 2645 wakeup((caddr_t)clp); 2646 } 2647 2648 /* 2649 * Dump out the state stuff for debugging. 2650 */ 2651 APPLESTATIC void 2652 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, 2653 int lockowner, int locks) 2654 { 2655 struct nfsclclient *clp; 2656 struct nfsclowner *owp; 2657 struct nfsclopen *op; 2658 struct nfscllockowner *lp; 2659 struct nfscllock *lop; 2660 struct nfscldeleg *dp; 2661 2662 clp = nmp->nm_clp; 2663 if (clp == NULL) { 2664 printf("nfscl dumpstate NULL clp\n"); 2665 return; 2666 } 2667 NFSLOCKCLSTATE(); 2668 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 2669 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2670 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2671 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2672 owp->nfsow_owner[0], owp->nfsow_owner[1], 2673 owp->nfsow_owner[2], owp->nfsow_owner[3], 2674 owp->nfsow_seqid); 2675 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2676 if (opens) 2677 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2678 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2679 op->nfso_stateid.other[2], op->nfso_opencnt, 2680 op->nfso_fh[12]); 2681 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2682 if (lockowner) 2683 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2684 lp->nfsl_owner[0], lp->nfsl_owner[1], 2685 lp->nfsl_owner[2], lp->nfsl_owner[3], 2686 lp->nfsl_seqid, 2687 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2688 lp->nfsl_stateid.other[2]); 2689 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2690 if (locks) 2691 #ifdef __FreeBSD__ 2692 printf("lck typ=%d fst=%ju end=%ju\n", 2693 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2694 (intmax_t)lop->nfslo_end); 2695 #else 2696 printf("lck typ=%d fst=%qd end=%qd\n", 2697 lop->nfslo_type, lop->nfslo_first, 2698 lop->nfslo_end); 2699 #endif 2700 } 2701 } 2702 } 2703 } 2704 } 2705 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2706 if (openowner && !LIST_EMPTY(&owp->nfsow_open)) 2707 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", 2708 owp->nfsow_owner[0], owp->nfsow_owner[1], 2709 owp->nfsow_owner[2], owp->nfsow_owner[3], 2710 owp->nfsow_seqid); 2711 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2712 if (opens) 2713 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", 2714 op->nfso_stateid.other[0], op->nfso_stateid.other[1], 2715 op->nfso_stateid.other[2], op->nfso_opencnt, 2716 op->nfso_fh[12]); 2717 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { 2718 if (lockowner) 2719 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", 2720 lp->nfsl_owner[0], lp->nfsl_owner[1], 2721 lp->nfsl_owner[2], lp->nfsl_owner[3], 2722 lp->nfsl_seqid, 2723 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], 2724 lp->nfsl_stateid.other[2]); 2725 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 2726 if (locks) 2727 #ifdef __FreeBSD__ 2728 printf("lck typ=%d fst=%ju end=%ju\n", 2729 lop->nfslo_type, (intmax_t)lop->nfslo_first, 2730 (intmax_t)lop->nfslo_end); 2731 #else 2732 printf("lck typ=%d fst=%qd end=%qd\n", 2733 lop->nfslo_type, lop->nfslo_first, 2734 lop->nfslo_end); 2735 #endif 2736 } 2737 } 2738 } 2739 } 2740 NFSUNLOCKCLSTATE(); 2741 } 2742 2743 /* 2744 * Check for duplicate open owners and opens. 2745 * (Only used as a diagnostic aid.) 2746 */ 2747 APPLESTATIC void 2748 nfscl_dupopen(vnode_t vp, int dupopens) 2749 { 2750 struct nfsclclient *clp; 2751 struct nfsclowner *owp, *owp2; 2752 struct nfsclopen *op, *op2; 2753 struct nfsfh *nfhp; 2754 2755 clp = VFSTONFS(vnode_mount(vp))->nm_clp; 2756 if (clp == NULL) { 2757 printf("nfscl dupopen NULL clp\n"); 2758 return; 2759 } 2760 nfhp = VTONFS(vp)->n_fhp; 2761 NFSLOCKCLSTATE(); 2762 2763 /* 2764 * First, search for duplicate owners. 2765 * These should never happen! 2766 */ 2767 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2768 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2769 if (owp != owp2 && 2770 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, 2771 NFSV4CL_LOCKNAMELEN)) { 2772 NFSUNLOCKCLSTATE(); 2773 printf("DUP OWNER\n"); 2774 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); 2775 return; 2776 } 2777 } 2778 } 2779 2780 /* 2781 * Now, search for duplicate stateids. 2782 * These shouldn't happen, either. 2783 */ 2784 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2785 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2786 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2787 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2788 if (op != op2 && 2789 (op->nfso_stateid.other[0] != 0 || 2790 op->nfso_stateid.other[1] != 0 || 2791 op->nfso_stateid.other[2] != 0) && 2792 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && 2793 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && 2794 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { 2795 NFSUNLOCKCLSTATE(); 2796 printf("DUP STATEID\n"); 2797 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 2798 0); 2799 return; 2800 } 2801 } 2802 } 2803 } 2804 } 2805 2806 /* 2807 * Now search for duplicate opens. 2808 * Duplicate opens for the same owner 2809 * should never occur. Other duplicates are 2810 * possible and are checked for if "dupopens" 2811 * is true. 2812 */ 2813 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { 2814 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { 2815 if (nfhp->nfh_len == op2->nfso_fhlen && 2816 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { 2817 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2818 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2819 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && 2820 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && 2821 (!NFSBCMP(op->nfso_own->nfsow_owner, 2822 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || 2823 dupopens)) { 2824 if (!NFSBCMP(op->nfso_own->nfsow_owner, 2825 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 2826 NFSUNLOCKCLSTATE(); 2827 printf("BADDUP OPEN\n"); 2828 } else { 2829 NFSUNLOCKCLSTATE(); 2830 printf("DUP OPEN\n"); 2831 } 2832 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 2833 0, 0); 2834 return; 2835 } 2836 } 2837 } 2838 } 2839 } 2840 } 2841 NFSUNLOCKCLSTATE(); 2842 } 2843 2844 /* 2845 * During close, find an open that needs to be dereferenced and 2846 * dereference it. If there are no more opens for this file, 2847 * log a message to that effect. 2848 * Opens aren't actually Close'd until VOP_INACTIVE() is performed 2849 * on the file's vnode. 2850 * This is the safe way, since it is difficult to identify 2851 * which open the close is for and I/O can be performed after the 2852 * close(2) system call when a file is mmap'd. 2853 * If it returns 0 for success, there will be a referenced 2854 * clp returned via clpp. 2855 */ 2856 APPLESTATIC int 2857 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp) 2858 { 2859 struct nfsclclient *clp; 2860 struct nfsclowner *owp; 2861 struct nfsclopen *op; 2862 struct nfscldeleg *dp; 2863 struct nfsfh *nfhp; 2864 int error, notdecr; 2865 2866 error = nfscl_getcl(vp, NULL, NULL, &clp); 2867 if (error) 2868 return (error); 2869 *clpp = clp; 2870 2871 nfhp = VTONFS(vp)->n_fhp; 2872 notdecr = 1; 2873 NFSLOCKCLSTATE(); 2874 /* 2875 * First, look for one under a delegation that was locally issued 2876 * and just decrement the opencnt for it. Since all my Opens against 2877 * the server are DENY_NONE, I don't see a problem with hanging 2878 * onto them. (It is much easier to use one of the extant Opens 2879 * that I already have on the server when a Delegation is recalled 2880 * than to do fresh Opens.) Someday, I might need to rethink this, but. 2881 */ 2882 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 2883 if (dp != NULL) { 2884 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 2885 op = LIST_FIRST(&owp->nfsow_open); 2886 if (op != NULL) { 2887 /* 2888 * Since a delegation is for a file, there 2889 * should never be more than one open for 2890 * each openowner. 2891 */ 2892 if (LIST_NEXT(op, nfso_list) != NULL) 2893 panic("nfscdeleg opens"); 2894 if (notdecr && op->nfso_opencnt > 0) { 2895 notdecr = 0; 2896 op->nfso_opencnt--; 2897 break; 2898 } 2899 } 2900 } 2901 } 2902 2903 /* Now process the opens against the server. */ 2904 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2905 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 2906 if (op->nfso_fhlen == nfhp->nfh_len && 2907 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 2908 nfhp->nfh_len)) { 2909 /* Found an open, decrement cnt if possible */ 2910 if (notdecr && op->nfso_opencnt > 0) { 2911 notdecr = 0; 2912 op->nfso_opencnt--; 2913 } 2914 /* 2915 * There are more opens, so just return. 2916 */ 2917 if (op->nfso_opencnt > 0) { 2918 NFSUNLOCKCLSTATE(); 2919 return (0); 2920 } 2921 } 2922 } 2923 } 2924 NFSUNLOCKCLSTATE(); 2925 if (notdecr) 2926 printf("nfscl: never fnd open\n"); 2927 return (0); 2928 } 2929 2930 APPLESTATIC int 2931 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p) 2932 { 2933 struct nfsclclient *clp; 2934 struct nfsclowner *owp, *nowp; 2935 struct nfsclopen *op; 2936 struct nfscldeleg *dp; 2937 struct nfsfh *nfhp; 2938 int error; 2939 2940 error = nfscl_getcl(vp, NULL, NULL, &clp); 2941 if (error) 2942 return (error); 2943 *clpp = clp; 2944 2945 nfhp = VTONFS(vp)->n_fhp; 2946 NFSLOCKCLSTATE(); 2947 /* 2948 * First get rid of the local Open structures, which should be no 2949 * longer in use. 2950 */ 2951 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); 2952 if (dp != NULL) { 2953 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { 2954 op = LIST_FIRST(&owp->nfsow_open); 2955 if (op != NULL) { 2956 KASSERT((op->nfso_opencnt == 0), 2957 ("nfscl: bad open cnt on deleg")); 2958 nfscl_freeopen(op, 1); 2959 } 2960 nfscl_freeopenowner(owp, 1); 2961 } 2962 } 2963 2964 /* Now process the opens against the server. */ 2965 lookformore: 2966 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 2967 op = LIST_FIRST(&owp->nfsow_open); 2968 while (op != NULL) { 2969 if (op->nfso_fhlen == nfhp->nfh_len && 2970 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, 2971 nfhp->nfh_len)) { 2972 /* Found an open, close it. */ 2973 KASSERT((op->nfso_opencnt == 0), 2974 ("nfscl: bad open cnt on server")); 2975 NFSUNLOCKCLSTATE(); 2976 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op, 2977 p); 2978 NFSLOCKCLSTATE(); 2979 goto lookformore; 2980 } 2981 op = LIST_NEXT(op, nfso_list); 2982 } 2983 } 2984 NFSUNLOCKCLSTATE(); 2985 return (0); 2986 } 2987 2988 /* 2989 * Return all delegations on this client. 2990 * (Must be called with client sleep lock.) 2991 */ 2992 static void 2993 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) 2994 { 2995 struct nfscldeleg *dp, *ndp; 2996 struct ucred *cred; 2997 2998 cred = newnfs_getcred(); 2999 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { 3000 nfscl_cleandeleg(dp); 3001 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); 3002 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3003 } 3004 NFSFREECRED(cred); 3005 } 3006 3007 /* 3008 * Do a callback RPC. 3009 */ 3010 APPLESTATIC void 3011 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) 3012 { 3013 int i, op; 3014 u_int32_t *tl; 3015 struct nfsclclient *clp; 3016 struct nfscldeleg *dp = NULL; 3017 int numops, taglen = -1, error = 0, trunc, ret = 0; 3018 u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident; 3019 u_char tag[NFSV4_SMALLSTR + 1], *tagstr; 3020 vnode_t vp = NULL; 3021 struct nfsnode *np; 3022 struct vattr va; 3023 struct nfsfh *nfhp; 3024 mount_t mp; 3025 nfsattrbit_t attrbits, rattrbits; 3026 nfsv4stateid_t stateid; 3027 3028 nfsrvd_rephead(nd); 3029 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3030 taglen = fxdr_unsigned(int, *tl); 3031 if (taglen < 0) { 3032 error = EBADRPC; 3033 goto nfsmout; 3034 } 3035 if (taglen <= NFSV4_SMALLSTR) 3036 tagstr = tag; 3037 else 3038 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); 3039 error = nfsrv_mtostr(nd, tagstr, taglen); 3040 if (error) { 3041 if (taglen > NFSV4_SMALLSTR) 3042 free(tagstr, M_TEMP); 3043 taglen = -1; 3044 goto nfsmout; 3045 } 3046 (void) nfsm_strtom(nd, tag, taglen); 3047 if (taglen > NFSV4_SMALLSTR) { 3048 free(tagstr, M_TEMP); 3049 } 3050 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); 3051 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3052 minorvers = fxdr_unsigned(u_int32_t, *tl++); 3053 if (minorvers != NFSV4_MINORVERSION) 3054 nd->nd_repstat = NFSERR_MINORVERMISMATCH; 3055 cbident = fxdr_unsigned(u_int32_t, *tl++); 3056 if (nd->nd_repstat) 3057 numops = 0; 3058 else 3059 numops = fxdr_unsigned(int, *tl); 3060 /* 3061 * Loop around doing the sub ops. 3062 */ 3063 for (i = 0; i < numops; i++) { 3064 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 3065 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); 3066 *repp++ = *tl; 3067 op = fxdr_unsigned(int, *tl); 3068 if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) { 3069 nd->nd_repstat = NFSERR_OPILLEGAL; 3070 *repp = nfscl_errmap(nd); 3071 retops++; 3072 break; 3073 } 3074 nd->nd_procnum = op; 3075 newnfsstats.cbrpccnt[nd->nd_procnum]++; 3076 switch (op) { 3077 case NFSV4OP_CBGETATTR: 3078 clp = NULL; 3079 error = nfsm_getfh(nd, &nfhp); 3080 if (!error) 3081 error = nfsrv_getattrbits(nd, &attrbits, 3082 NULL, NULL); 3083 if (!error) { 3084 mp = nfscl_getmnt(cbident); 3085 if (mp == NULL) 3086 error = NFSERR_SERVERFAULT; 3087 } 3088 if (!error) { 3089 dp = NULL; 3090 NFSLOCKCLSTATE(); 3091 clp = nfscl_findcl(VFSTONFS(mp)); 3092 if (clp != NULL) 3093 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3094 nfhp->nfh_len); 3095 NFSUNLOCKCLSTATE(); 3096 if (dp == NULL) 3097 error = NFSERR_SERVERFAULT; 3098 } 3099 if (!error) { 3100 ret = nfscl_ngetreopen(mp, nfhp->nfh_fh, 3101 nfhp->nfh_len, p, &np); 3102 if (!ret) 3103 vp = NFSTOV(np); 3104 } 3105 if (nfhp != NULL) 3106 FREE((caddr_t)nfhp, M_NFSFH); 3107 if (!error) { 3108 NFSZERO_ATTRBIT(&rattrbits); 3109 if (NFSISSET_ATTRBIT(&attrbits, 3110 NFSATTRBIT_SIZE)) { 3111 if (!ret) 3112 va.va_size = np->n_size; 3113 else 3114 va.va_size = dp->nfsdl_size; 3115 NFSSETBIT_ATTRBIT(&rattrbits, 3116 NFSATTRBIT_SIZE); 3117 } 3118 if (NFSISSET_ATTRBIT(&attrbits, 3119 NFSATTRBIT_CHANGE)) { 3120 va.va_filerev = dp->nfsdl_change; 3121 if (ret || (np->n_flag & NDELEGMOD)) 3122 va.va_filerev++; 3123 NFSSETBIT_ATTRBIT(&rattrbits, 3124 NFSATTRBIT_CHANGE); 3125 } 3126 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va, 3127 NULL, 0, &rattrbits, NULL, NULL, 0, 0, 0, 0, 3128 (uint64_t)0); 3129 if (!ret) 3130 vrele(vp); 3131 } 3132 break; 3133 case NFSV4OP_CBRECALL: 3134 clp = NULL; 3135 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3136 NFSX_UNSIGNED); 3137 stateid.seqid = *tl++; 3138 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, 3139 NFSX_STATEIDOTHER); 3140 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); 3141 trunc = fxdr_unsigned(int, *tl); 3142 error = nfsm_getfh(nd, &nfhp); 3143 if (!error) { 3144 mp = nfscl_getmnt(cbident); 3145 if (mp == NULL) 3146 error = NFSERR_SERVERFAULT; 3147 } 3148 if (!error) { 3149 NFSLOCKCLSTATE(); 3150 clp = nfscl_findcl(VFSTONFS(mp)); 3151 if (clp != NULL) { 3152 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, 3153 nfhp->nfh_len); 3154 if (dp != NULL && (dp->nfsdl_flags & 3155 NFSCLDL_DELEGRET) == 0) { 3156 dp->nfsdl_flags |= 3157 NFSCLDL_RECALL; 3158 wakeup((caddr_t)clp); 3159 } 3160 } else { 3161 error = NFSERR_SERVERFAULT; 3162 } 3163 NFSUNLOCKCLSTATE(); 3164 } 3165 if (nfhp != NULL) 3166 FREE((caddr_t)nfhp, M_NFSFH); 3167 break; 3168 }; 3169 if (error) { 3170 if (error == EBADRPC || error == NFSERR_BADXDR) { 3171 nd->nd_repstat = NFSERR_BADXDR; 3172 } else { 3173 nd->nd_repstat = error; 3174 } 3175 error = 0; 3176 } 3177 retops++; 3178 if (nd->nd_repstat) { 3179 *repp = nfscl_errmap(nd); 3180 break; 3181 } else 3182 *repp = 0; /* NFS4_OK */ 3183 } 3184 nfsmout: 3185 if (error) { 3186 if (error == EBADRPC || error == NFSERR_BADXDR) 3187 nd->nd_repstat = NFSERR_BADXDR; 3188 else 3189 printf("nfsv4 comperr1=%d\n", error); 3190 } 3191 if (taglen == -1) { 3192 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 3193 *tl++ = 0; 3194 *tl = 0; 3195 } else { 3196 *retopsp = txdr_unsigned(retops); 3197 } 3198 *nd->nd_errp = nfscl_errmap(nd); 3199 } 3200 3201 /* 3202 * Generate the next cbident value. Basically just increment a static value 3203 * and then check that it isn't already in the list, if it has wrapped around. 3204 */ 3205 static u_int32_t 3206 nfscl_nextcbident(void) 3207 { 3208 struct nfsclclient *clp; 3209 int matched; 3210 static u_int32_t nextcbident = 0; 3211 static int haswrapped = 0; 3212 3213 nextcbident++; 3214 if (nextcbident == 0) 3215 haswrapped = 1; 3216 if (haswrapped) { 3217 /* 3218 * Search the clientid list for one already using this cbident. 3219 */ 3220 do { 3221 matched = 0; 3222 NFSLOCKCLSTATE(); 3223 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3224 if (clp->nfsc_cbident == nextcbident) { 3225 matched = 1; 3226 break; 3227 } 3228 } 3229 NFSUNLOCKCLSTATE(); 3230 if (matched == 1) 3231 nextcbident++; 3232 } while (matched); 3233 } 3234 return (nextcbident); 3235 } 3236 3237 /* 3238 * Get the mount point related to a given cbident. 3239 */ 3240 static mount_t 3241 nfscl_getmnt(u_int32_t cbident) 3242 { 3243 struct nfsclclient *clp; 3244 struct nfsmount *nmp; 3245 3246 NFSLOCKCLSTATE(); 3247 LIST_FOREACH(clp, &nfsclhead, nfsc_list) { 3248 if (clp->nfsc_cbident == cbident) 3249 break; 3250 } 3251 if (clp == NULL) { 3252 NFSUNLOCKCLSTATE(); 3253 return (NULL); 3254 } 3255 nmp = clp->nfsc_nmp; 3256 NFSUNLOCKCLSTATE(); 3257 return (nmp->nm_mountp); 3258 } 3259 3260 /* 3261 * Search for a lock conflict locally on the client. A conflict occurs if 3262 * - not same owner and overlapping byte range and at least one of them is 3263 * a write lock or this is an unlock. 3264 */ 3265 static int 3266 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen, 3267 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp, 3268 struct nfscllock **lopp) 3269 { 3270 struct nfsclowner *owp; 3271 struct nfsclopen *op; 3272 int ret; 3273 3274 if (dp != NULL) { 3275 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); 3276 if (ret) 3277 return (ret); 3278 } 3279 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3280 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3281 if (op->nfso_fhlen == fhlen && 3282 !NFSBCMP(op->nfso_fh, fhp, fhlen)) { 3283 ret = nfscl_checkconflict(&op->nfso_lock, nlop, 3284 own, lopp); 3285 if (ret) 3286 return (ret); 3287 } 3288 } 3289 } 3290 return (0); 3291 } 3292 3293 static int 3294 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, 3295 u_int8_t *own, struct nfscllock **lopp) 3296 { 3297 struct nfscllockowner *lp; 3298 struct nfscllock *lop; 3299 3300 LIST_FOREACH(lp, lhp, nfsl_list) { 3301 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { 3302 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { 3303 if (lop->nfslo_first >= nlop->nfslo_end) 3304 break; 3305 if (lop->nfslo_end <= nlop->nfslo_first) 3306 continue; 3307 if (lop->nfslo_type == F_WRLCK || 3308 nlop->nfslo_type == F_WRLCK || 3309 nlop->nfslo_type == F_UNLCK) { 3310 if (lopp != NULL) 3311 *lopp = lop; 3312 return (NFSERR_DENIED); 3313 } 3314 } 3315 } 3316 } 3317 return (0); 3318 } 3319 3320 /* 3321 * Check for a local conflicting lock. 3322 */ 3323 APPLESTATIC int 3324 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, 3325 u_int64_t len, struct flock *fl, NFSPROC_T *p) 3326 { 3327 struct nfscllock *lop, nlck; 3328 struct nfscldeleg *dp; 3329 struct nfsnode *np; 3330 u_int8_t own[NFSV4CL_LOCKNAMELEN]; 3331 int error; 3332 3333 nlck.nfslo_type = fl->l_type; 3334 nlck.nfslo_first = off; 3335 if (len == NFS64BITSSET) { 3336 nlck.nfslo_end = NFS64BITSSET; 3337 } else { 3338 nlck.nfslo_end = off + len; 3339 if (nlck.nfslo_end <= nlck.nfslo_first) 3340 return (NFSERR_INVAL); 3341 } 3342 np = VTONFS(vp); 3343 nfscl_filllockowner(p, own); 3344 NFSLOCKCLSTATE(); 3345 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3346 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 3347 &nlck, own, dp, &lop); 3348 if (error != 0) { 3349 fl->l_whence = SEEK_SET; 3350 fl->l_start = lop->nfslo_first; 3351 if (lop->nfslo_end == NFS64BITSSET) 3352 fl->l_len = 0; 3353 else 3354 fl->l_len = lop->nfslo_end - lop->nfslo_first; 3355 fl->l_pid = (pid_t)0; 3356 fl->l_type = lop->nfslo_type; 3357 error = -1; /* no RPC required */ 3358 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || 3359 fl->l_type == F_RDLCK)) { 3360 /* 3361 * The delegation ensures that there isn't a conflicting 3362 * lock on the server, so return -1 to indicate an RPC 3363 * isn't required. 3364 */ 3365 fl->l_type = F_UNLCK; 3366 error = -1; 3367 } 3368 NFSUNLOCKCLSTATE(); 3369 return (error); 3370 } 3371 3372 /* 3373 * Handle Recall of a delegation. 3374 * The clp must be exclusive locked when this is called. 3375 */ 3376 static int 3377 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, 3378 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p, 3379 int called_from_renewthread) 3380 { 3381 struct nfsclowner *owp, *lowp, *nowp; 3382 struct nfsclopen *op, *lop; 3383 struct nfscllockowner *lp; 3384 struct nfscllock *lckp; 3385 struct nfsnode *np; 3386 int error = 0, ret, gotvp = 0; 3387 3388 if (vp == NULL) { 3389 /* 3390 * First, get a vnode for the file. This is needed to do RPCs. 3391 */ 3392 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, 3393 dp->nfsdl_fhlen, p, &np); 3394 if (ret) { 3395 /* 3396 * File isn't open, so nothing to move over to the 3397 * server. 3398 */ 3399 return (0); 3400 } 3401 vp = NFSTOV(np); 3402 gotvp = 1; 3403 } else { 3404 np = VTONFS(vp); 3405 } 3406 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; 3407 3408 /* 3409 * Ok, if it's a write delegation, flush data to the server, so 3410 * that close/open consistency is retained. 3411 */ 3412 ret = 0; 3413 NFSLOCKNODE(np); 3414 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { 3415 np->n_flag |= NDELEGRECALL; 3416 NFSUNLOCKNODE(np); 3417 ret = ncl_flush(vp, MNT_WAIT, cred, p, 1, 3418 called_from_renewthread); 3419 NFSLOCKNODE(np); 3420 np->n_flag &= ~NDELEGRECALL; 3421 } 3422 NFSINVALATTRCACHE(np); 3423 NFSUNLOCKNODE(np); 3424 if (ret == EIO && called_from_renewthread != 0) { 3425 /* 3426 * If the flush failed with EIO for the renew thread, 3427 * return now, so that the dirty buffer will be flushed 3428 * later. 3429 */ 3430 if (gotvp != 0) 3431 vrele(vp); 3432 return (ret); 3433 } 3434 3435 /* 3436 * Now, for each openowner with opens issued locally, move them 3437 * over to state against the server. 3438 */ 3439 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { 3440 lop = LIST_FIRST(&lowp->nfsow_open); 3441 if (lop != NULL) { 3442 if (LIST_NEXT(lop, nfso_list) != NULL) 3443 panic("nfsdlg mult opens"); 3444 /* 3445 * Look for the same openowner against the server. 3446 */ 3447 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { 3448 if (!NFSBCMP(lowp->nfsow_owner, 3449 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { 3450 newnfs_copycred(&dp->nfsdl_cred, cred); 3451 ret = nfscl_moveopen(vp, clp, nmp, lop, 3452 owp, dp, cred, p); 3453 if (ret == NFSERR_STALECLIENTID || 3454 ret == NFSERR_STALEDONTRECOVER) { 3455 if (gotvp) 3456 vrele(vp); 3457 return (ret); 3458 } 3459 if (ret) { 3460 nfscl_freeopen(lop, 1); 3461 if (!error) 3462 error = ret; 3463 } 3464 break; 3465 } 3466 } 3467 3468 /* 3469 * If no openowner found, create one and get an open 3470 * for it. 3471 */ 3472 if (owp == NULL) { 3473 MALLOC(nowp, struct nfsclowner *, 3474 sizeof (struct nfsclowner), M_NFSCLOWNER, 3475 M_WAITOK); 3476 nfscl_newopen(clp, NULL, &owp, &nowp, &op, 3477 NULL, lowp->nfsow_owner, dp->nfsdl_fh, 3478 dp->nfsdl_fhlen, NULL); 3479 newnfs_copycred(&dp->nfsdl_cred, cred); 3480 ret = nfscl_moveopen(vp, clp, nmp, lop, 3481 owp, dp, cred, p); 3482 if (ret) { 3483 nfscl_freeopenowner(owp, 0); 3484 if (ret == NFSERR_STALECLIENTID || 3485 ret == NFSERR_STALEDONTRECOVER) { 3486 if (gotvp) 3487 vrele(vp); 3488 return (ret); 3489 } 3490 if (ret) { 3491 nfscl_freeopen(lop, 1); 3492 if (!error) 3493 error = ret; 3494 } 3495 } 3496 } 3497 } 3498 } 3499 3500 /* 3501 * Now, get byte range locks for any locks done locally. 3502 */ 3503 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 3504 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { 3505 newnfs_copycred(&dp->nfsdl_cred, cred); 3506 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); 3507 if (ret == NFSERR_STALESTATEID || 3508 ret == NFSERR_STALEDONTRECOVER || 3509 ret == NFSERR_STALECLIENTID) { 3510 if (gotvp) 3511 vrele(vp); 3512 return (ret); 3513 } 3514 if (ret && !error) 3515 error = ret; 3516 } 3517 } 3518 if (gotvp) 3519 vrele(vp); 3520 return (error); 3521 } 3522 3523 /* 3524 * Move a locally issued open over to an owner on the state list. 3525 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and 3526 * returns with it unlocked. 3527 */ 3528 static int 3529 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 3530 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, 3531 struct ucred *cred, NFSPROC_T *p) 3532 { 3533 struct nfsclopen *op, *nop; 3534 struct nfscldeleg *ndp; 3535 struct nfsnode *np; 3536 int error = 0, newone; 3537 3538 /* 3539 * First, look for an appropriate open, If found, just increment the 3540 * opencnt in it. 3541 */ 3542 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { 3543 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && 3544 op->nfso_fhlen == lop->nfso_fhlen && 3545 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { 3546 op->nfso_opencnt += lop->nfso_opencnt; 3547 nfscl_freeopen(lop, 1); 3548 return (0); 3549 } 3550 } 3551 3552 /* No appropriate open, so we have to do one against the server. */ 3553 np = VTONFS(vp); 3554 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + 3555 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); 3556 newone = 0; 3557 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, 3558 lop->nfso_fh, lop->nfso_fhlen, &newone); 3559 ndp = dp; 3560 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, 3561 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, 3562 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); 3563 if (error) { 3564 if (newone) 3565 nfscl_freeopen(op, 0); 3566 } else { 3567 if (newone) 3568 newnfs_copyincred(cred, &op->nfso_cred); 3569 op->nfso_mode |= lop->nfso_mode; 3570 op->nfso_opencnt += lop->nfso_opencnt; 3571 nfscl_freeopen(lop, 1); 3572 } 3573 if (nop != NULL) 3574 FREE((caddr_t)nop, M_NFSCLOPEN); 3575 if (ndp != NULL) { 3576 /* 3577 * What should I do with the returned delegation, since the 3578 * delegation is being recalled? For now, just printf and 3579 * through it away. 3580 */ 3581 printf("Moveopen returned deleg\n"); 3582 FREE((caddr_t)ndp, M_NFSCLDELEG); 3583 } 3584 return (error); 3585 } 3586 3587 /* 3588 * Recall all delegations on this client. 3589 */ 3590 static void 3591 nfscl_totalrecall(struct nfsclclient *clp) 3592 { 3593 struct nfscldeleg *dp; 3594 3595 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { 3596 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) 3597 dp->nfsdl_flags |= NFSCLDL_RECALL; 3598 } 3599 } 3600 3601 /* 3602 * Relock byte ranges. Called for delegation recall and state expiry. 3603 */ 3604 static int 3605 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, 3606 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, 3607 NFSPROC_T *p) 3608 { 3609 struct nfscllockowner *nlp; 3610 struct nfsfh *nfhp; 3611 u_int64_t off, len; 3612 u_int32_t clidrev = 0; 3613 int error, newone, donelocally; 3614 3615 off = lop->nfslo_first; 3616 len = lop->nfslo_end - lop->nfslo_first; 3617 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, 3618 clp, 1, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone, 3619 &donelocally); 3620 if (error || donelocally) 3621 return (error); 3622 if (nmp->nm_clp != NULL) 3623 clidrev = nmp->nm_clp->nfsc_clientidrev; 3624 else 3625 clidrev = 0; 3626 nfhp = VTONFS(vp)->n_fhp; 3627 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, 3628 nfhp->nfh_len, nlp, newone, 0, off, 3629 len, lop->nfslo_type, cred, p); 3630 if (error) 3631 nfscl_freelockowner(nlp, 0); 3632 return (error); 3633 } 3634 3635 /* 3636 * Called to re-open a file. Basically get a vnode for the file handle 3637 * and then call nfsrpc_openrpc() to do the rest. 3638 */ 3639 static int 3640 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, 3641 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, 3642 struct ucred *cred, NFSPROC_T *p) 3643 { 3644 struct nfsnode *np; 3645 vnode_t vp; 3646 int error; 3647 3648 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); 3649 if (error) 3650 return (error); 3651 vp = NFSTOV(np); 3652 if (np->n_v4 != NULL) { 3653 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, 3654 np->n_v4->n4_fhlen, fhp, fhlen, mode, op, 3655 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, 3656 cred, p); 3657 } else { 3658 error = EINVAL; 3659 } 3660 vrele(vp); 3661 return (error); 3662 } 3663 3664 /* 3665 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while 3666 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials 3667 * fail. 3668 */ 3669 static int 3670 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, 3671 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, 3672 u_int8_t *name, int namelen, struct nfscldeleg **ndpp, 3673 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) 3674 { 3675 int error; 3676 3677 do { 3678 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, 3679 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, 3680 0, 0); 3681 if (error == NFSERR_DELAY) 3682 (void) nfs_catnap(PZERO, error, "nfstryop"); 3683 } while (error == NFSERR_DELAY); 3684 if (error == EAUTH || error == EACCES) { 3685 /* Try again using system credentials */ 3686 newnfs_setroot(cred); 3687 do { 3688 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, 3689 newfhlen, mode, op, name, namelen, ndpp, reclaim, 3690 delegtype, cred, p, 1, 0); 3691 if (error == NFSERR_DELAY) 3692 (void) nfs_catnap(PZERO, error, "nfstryop"); 3693 } while (error == NFSERR_DELAY); 3694 } 3695 return (error); 3696 } 3697 3698 /* 3699 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns 3700 * NFSERR_DELAY. Also, retry with system credentials, if the provided 3701 * cred don't work. 3702 */ 3703 static int 3704 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, 3705 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, 3706 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) 3707 { 3708 struct nfsrv_descript nfsd, *nd = &nfsd; 3709 int error; 3710 3711 do { 3712 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, 3713 reclaim, off, len, type, cred, p, 0); 3714 if (!error && nd->nd_repstat == NFSERR_DELAY) 3715 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 3716 "nfstrylck"); 3717 } while (!error && nd->nd_repstat == NFSERR_DELAY); 3718 if (!error) 3719 error = nd->nd_repstat; 3720 if (error == EAUTH || error == EACCES) { 3721 /* Try again using root credentials */ 3722 newnfs_setroot(cred); 3723 do { 3724 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, 3725 newone, reclaim, off, len, type, cred, p, 1); 3726 if (!error && nd->nd_repstat == NFSERR_DELAY) 3727 (void) nfs_catnap(PZERO, (int)nd->nd_repstat, 3728 "nfstrylck"); 3729 } while (!error && nd->nd_repstat == NFSERR_DELAY); 3730 if (!error) 3731 error = nd->nd_repstat; 3732 } 3733 return (error); 3734 } 3735 3736 /* 3737 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), 3738 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 3739 * credentials fail. 3740 */ 3741 static int 3742 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, 3743 struct nfsmount *nmp, NFSPROC_T *p) 3744 { 3745 int error; 3746 3747 do { 3748 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); 3749 if (error == NFSERR_DELAY) 3750 (void) nfs_catnap(PZERO, error, "nfstrydp"); 3751 } while (error == NFSERR_DELAY); 3752 if (error == EAUTH || error == EACCES) { 3753 /* Try again using system credentials */ 3754 newnfs_setroot(cred); 3755 do { 3756 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); 3757 if (error == NFSERR_DELAY) 3758 (void) nfs_catnap(PZERO, error, "nfstrydp"); 3759 } while (error == NFSERR_DELAY); 3760 } 3761 return (error); 3762 } 3763 3764 /* 3765 * Try a close against the server. Just call nfsrpc_closerpc(), 3766 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in 3767 * credentials fail. 3768 */ 3769 APPLESTATIC int 3770 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, 3771 struct nfsmount *nmp, NFSPROC_T *p) 3772 { 3773 struct nfsrv_descript nfsd, *nd = &nfsd; 3774 int error; 3775 3776 do { 3777 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); 3778 if (error == NFSERR_DELAY) 3779 (void) nfs_catnap(PZERO, error, "nfstrycl"); 3780 } while (error == NFSERR_DELAY); 3781 if (error == EAUTH || error == EACCES) { 3782 /* Try again using system credentials */ 3783 newnfs_setroot(cred); 3784 do { 3785 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); 3786 if (error == NFSERR_DELAY) 3787 (void) nfs_catnap(PZERO, error, "nfstrycl"); 3788 } while (error == NFSERR_DELAY); 3789 } 3790 return (error); 3791 } 3792 3793 /* 3794 * Decide if a delegation on a file permits close without flushing writes 3795 * to the server. This might be a big performance win in some environments. 3796 * (Not useful until the client does caching on local stable storage.) 3797 */ 3798 APPLESTATIC int 3799 nfscl_mustflush(vnode_t vp) 3800 { 3801 struct nfsclclient *clp; 3802 struct nfscldeleg *dp; 3803 struct nfsnode *np; 3804 struct nfsmount *nmp; 3805 3806 np = VTONFS(vp); 3807 nmp = VFSTONFS(vnode_mount(vp)); 3808 if (!NFSHASNFSV4(nmp)) 3809 return (1); 3810 NFSLOCKCLSTATE(); 3811 clp = nfscl_findcl(nmp); 3812 if (clp == NULL) { 3813 NFSUNLOCKCLSTATE(); 3814 return (1); 3815 } 3816 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3817 if (dp != NULL && (dp->nfsdl_flags & 3818 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 3819 NFSCLDL_WRITE && 3820 (dp->nfsdl_sizelimit >= np->n_size || 3821 !NFSHASSTRICT3530(nmp))) { 3822 NFSUNLOCKCLSTATE(); 3823 return (0); 3824 } 3825 NFSUNLOCKCLSTATE(); 3826 return (1); 3827 } 3828 3829 /* 3830 * See if a (write) delegation exists for this file. 3831 */ 3832 APPLESTATIC int 3833 nfscl_nodeleg(vnode_t vp, int writedeleg) 3834 { 3835 struct nfsclclient *clp; 3836 struct nfscldeleg *dp; 3837 struct nfsnode *np; 3838 struct nfsmount *nmp; 3839 3840 np = VTONFS(vp); 3841 nmp = VFSTONFS(vnode_mount(vp)); 3842 if (!NFSHASNFSV4(nmp)) 3843 return (1); 3844 NFSLOCKCLSTATE(); 3845 clp = nfscl_findcl(nmp); 3846 if (clp == NULL) { 3847 NFSUNLOCKCLSTATE(); 3848 return (1); 3849 } 3850 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 3851 if (dp != NULL && 3852 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 && 3853 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) == 3854 NFSCLDL_WRITE)) { 3855 NFSUNLOCKCLSTATE(); 3856 return (0); 3857 } 3858 NFSUNLOCKCLSTATE(); 3859 return (1); 3860 } 3861 3862 /* 3863 * Look for an associated delegation that should be DelegReturned. 3864 */ 3865 APPLESTATIC int 3866 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) 3867 { 3868 struct nfsclclient *clp; 3869 struct nfscldeleg *dp; 3870 struct nfsclowner *owp; 3871 struct nfscllockowner *lp; 3872 struct nfsmount *nmp; 3873 struct ucred *cred; 3874 struct nfsnode *np; 3875 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 3876 3877 nmp = VFSTONFS(vnode_mount(vp)); 3878 np = VTONFS(vp); 3879 NFSLOCKCLSTATE(); 3880 /* 3881 * Loop around waiting for: 3882 * - outstanding I/O operations on delegations to complete 3883 * - for a delegation on vp that has state, lock the client and 3884 * do a recall 3885 * - return delegation with no state 3886 */ 3887 while (1) { 3888 clp = nfscl_findcl(nmp); 3889 if (clp == NULL) { 3890 NFSUNLOCKCLSTATE(); 3891 return (retcnt); 3892 } 3893 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 3894 np->n_fhp->nfh_len); 3895 if (dp != NULL) { 3896 /* 3897 * Wait for outstanding I/O ops to be done. 3898 */ 3899 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 3900 if (igotlock) { 3901 nfsv4_unlock(&clp->nfsc_lock, 0); 3902 igotlock = 0; 3903 } 3904 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 3905 (void) nfsmsleep(&dp->nfsdl_rwlock, 3906 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 3907 continue; 3908 } 3909 needsrecall = 0; 3910 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 3911 if (!LIST_EMPTY(&owp->nfsow_open)) { 3912 needsrecall = 1; 3913 break; 3914 } 3915 } 3916 if (!needsrecall) { 3917 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 3918 if (!LIST_EMPTY(&lp->nfsl_lock)) { 3919 needsrecall = 1; 3920 break; 3921 } 3922 } 3923 } 3924 if (needsrecall && !triedrecall) { 3925 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 3926 islept = 0; 3927 while (!igotlock) { 3928 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 3929 &islept, NFSCLSTATEMUTEXPTR, NULL); 3930 if (islept) 3931 break; 3932 } 3933 if (islept) 3934 continue; 3935 NFSUNLOCKCLSTATE(); 3936 cred = newnfs_getcred(); 3937 newnfs_copycred(&dp->nfsdl_cred, cred); 3938 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0); 3939 NFSFREECRED(cred); 3940 triedrecall = 1; 3941 NFSLOCKCLSTATE(); 3942 nfsv4_unlock(&clp->nfsc_lock, 0); 3943 igotlock = 0; 3944 continue; 3945 } 3946 *stp = dp->nfsdl_stateid; 3947 retcnt = 1; 3948 nfscl_cleandeleg(dp); 3949 nfscl_freedeleg(&clp->nfsc_deleg, dp); 3950 } 3951 if (igotlock) 3952 nfsv4_unlock(&clp->nfsc_lock, 0); 3953 NFSUNLOCKCLSTATE(); 3954 return (retcnt); 3955 } 3956 } 3957 3958 /* 3959 * Look for associated delegation(s) that should be DelegReturned. 3960 */ 3961 APPLESTATIC int 3962 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, 3963 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) 3964 { 3965 struct nfsclclient *clp; 3966 struct nfscldeleg *dp; 3967 struct nfsclowner *owp; 3968 struct nfscllockowner *lp; 3969 struct nfsmount *nmp; 3970 struct ucred *cred; 3971 struct nfsnode *np; 3972 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; 3973 3974 nmp = VFSTONFS(vnode_mount(fvp)); 3975 *gotfdp = 0; 3976 *gottdp = 0; 3977 NFSLOCKCLSTATE(); 3978 /* 3979 * Loop around waiting for: 3980 * - outstanding I/O operations on delegations to complete 3981 * - for a delegation on fvp that has state, lock the client and 3982 * do a recall 3983 * - return delegation(s) with no state. 3984 */ 3985 while (1) { 3986 clp = nfscl_findcl(nmp); 3987 if (clp == NULL) { 3988 NFSUNLOCKCLSTATE(); 3989 return (retcnt); 3990 } 3991 np = VTONFS(fvp); 3992 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 3993 np->n_fhp->nfh_len); 3994 if (dp != NULL && *gotfdp == 0) { 3995 /* 3996 * Wait for outstanding I/O ops to be done. 3997 */ 3998 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 3999 if (igotlock) { 4000 nfsv4_unlock(&clp->nfsc_lock, 0); 4001 igotlock = 0; 4002 } 4003 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4004 (void) nfsmsleep(&dp->nfsdl_rwlock, 4005 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4006 continue; 4007 } 4008 needsrecall = 0; 4009 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4010 if (!LIST_EMPTY(&owp->nfsow_open)) { 4011 needsrecall = 1; 4012 break; 4013 } 4014 } 4015 if (!needsrecall) { 4016 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4017 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4018 needsrecall = 1; 4019 break; 4020 } 4021 } 4022 } 4023 if (needsrecall && !triedrecall) { 4024 dp->nfsdl_flags |= NFSCLDL_DELEGRET; 4025 islept = 0; 4026 while (!igotlock) { 4027 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, 4028 &islept, NFSCLSTATEMUTEXPTR, NULL); 4029 if (islept) 4030 break; 4031 } 4032 if (islept) 4033 continue; 4034 NFSUNLOCKCLSTATE(); 4035 cred = newnfs_getcred(); 4036 newnfs_copycred(&dp->nfsdl_cred, cred); 4037 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0); 4038 NFSFREECRED(cred); 4039 triedrecall = 1; 4040 NFSLOCKCLSTATE(); 4041 nfsv4_unlock(&clp->nfsc_lock, 0); 4042 igotlock = 0; 4043 continue; 4044 } 4045 *fstp = dp->nfsdl_stateid; 4046 retcnt++; 4047 *gotfdp = 1; 4048 nfscl_cleandeleg(dp); 4049 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4050 } 4051 if (igotlock) { 4052 nfsv4_unlock(&clp->nfsc_lock, 0); 4053 igotlock = 0; 4054 } 4055 if (tvp != NULL) { 4056 np = VTONFS(tvp); 4057 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, 4058 np->n_fhp->nfh_len); 4059 if (dp != NULL && *gottdp == 0) { 4060 /* 4061 * Wait for outstanding I/O ops to be done. 4062 */ 4063 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { 4064 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; 4065 (void) nfsmsleep(&dp->nfsdl_rwlock, 4066 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); 4067 continue; 4068 } 4069 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { 4070 if (!LIST_EMPTY(&owp->nfsow_open)) { 4071 NFSUNLOCKCLSTATE(); 4072 return (retcnt); 4073 } 4074 } 4075 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { 4076 if (!LIST_EMPTY(&lp->nfsl_lock)) { 4077 NFSUNLOCKCLSTATE(); 4078 return (retcnt); 4079 } 4080 } 4081 *tstp = dp->nfsdl_stateid; 4082 retcnt++; 4083 *gottdp = 1; 4084 nfscl_cleandeleg(dp); 4085 nfscl_freedeleg(&clp->nfsc_deleg, dp); 4086 } 4087 } 4088 NFSUNLOCKCLSTATE(); 4089 return (retcnt); 4090 } 4091 } 4092 4093 /* 4094 * Get a reference on the clientid associated with the mount point. 4095 * Return 1 if success, 0 otherwise. 4096 */ 4097 APPLESTATIC int 4098 nfscl_getref(struct nfsmount *nmp) 4099 { 4100 struct nfsclclient *clp; 4101 4102 NFSLOCKCLSTATE(); 4103 clp = nfscl_findcl(nmp); 4104 if (clp == NULL) { 4105 NFSUNLOCKCLSTATE(); 4106 return (0); 4107 } 4108 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL); 4109 NFSUNLOCKCLSTATE(); 4110 return (1); 4111 } 4112 4113 /* 4114 * Release a reference on a clientid acquired with the above call. 4115 */ 4116 APPLESTATIC void 4117 nfscl_relref(struct nfsmount *nmp) 4118 { 4119 struct nfsclclient *clp; 4120 4121 NFSLOCKCLSTATE(); 4122 clp = nfscl_findcl(nmp); 4123 if (clp == NULL) { 4124 NFSUNLOCKCLSTATE(); 4125 return; 4126 } 4127 nfsv4_relref(&clp->nfsc_lock); 4128 NFSUNLOCKCLSTATE(); 4129 } 4130 4131 /* 4132 * Save the size attribute in the delegation, since the nfsnode 4133 * is going away. 4134 */ 4135 APPLESTATIC void 4136 nfscl_reclaimnode(vnode_t vp) 4137 { 4138 struct nfsclclient *clp; 4139 struct nfscldeleg *dp; 4140 struct nfsnode *np = VTONFS(vp); 4141 struct nfsmount *nmp; 4142 4143 nmp = VFSTONFS(vnode_mount(vp)); 4144 if (!NFSHASNFSV4(nmp)) 4145 return; 4146 NFSLOCKCLSTATE(); 4147 clp = nfscl_findcl(nmp); 4148 if (clp == NULL) { 4149 NFSUNLOCKCLSTATE(); 4150 return; 4151 } 4152 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4153 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4154 dp->nfsdl_size = np->n_size; 4155 NFSUNLOCKCLSTATE(); 4156 } 4157 4158 /* 4159 * Get the saved size attribute in the delegation, since it is a 4160 * newly allocated nfsnode. 4161 */ 4162 APPLESTATIC void 4163 nfscl_newnode(vnode_t vp) 4164 { 4165 struct nfsclclient *clp; 4166 struct nfscldeleg *dp; 4167 struct nfsnode *np = VTONFS(vp); 4168 struct nfsmount *nmp; 4169 4170 nmp = VFSTONFS(vnode_mount(vp)); 4171 if (!NFSHASNFSV4(nmp)) 4172 return; 4173 NFSLOCKCLSTATE(); 4174 clp = nfscl_findcl(nmp); 4175 if (clp == NULL) { 4176 NFSUNLOCKCLSTATE(); 4177 return; 4178 } 4179 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4180 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) 4181 np->n_size = dp->nfsdl_size; 4182 NFSUNLOCKCLSTATE(); 4183 } 4184 4185 /* 4186 * If there is a valid write delegation for this file, set the modtime 4187 * to the local clock time. 4188 */ 4189 APPLESTATIC void 4190 nfscl_delegmodtime(vnode_t vp) 4191 { 4192 struct nfsclclient *clp; 4193 struct nfscldeleg *dp; 4194 struct nfsnode *np = VTONFS(vp); 4195 struct nfsmount *nmp; 4196 4197 nmp = VFSTONFS(vnode_mount(vp)); 4198 if (!NFSHASNFSV4(nmp)) 4199 return; 4200 NFSLOCKCLSTATE(); 4201 clp = nfscl_findcl(nmp); 4202 if (clp == NULL) { 4203 NFSUNLOCKCLSTATE(); 4204 return; 4205 } 4206 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4207 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { 4208 NFSGETNANOTIME(&dp->nfsdl_modtime); 4209 dp->nfsdl_flags |= NFSCLDL_MODTIMESET; 4210 } 4211 NFSUNLOCKCLSTATE(); 4212 } 4213 4214 /* 4215 * If there is a valid write delegation for this file with a modtime set, 4216 * put that modtime in mtime. 4217 */ 4218 APPLESTATIC void 4219 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) 4220 { 4221 struct nfsclclient *clp; 4222 struct nfscldeleg *dp; 4223 struct nfsnode *np = VTONFS(vp); 4224 struct nfsmount *nmp; 4225 4226 nmp = VFSTONFS(vnode_mount(vp)); 4227 if (!NFSHASNFSV4(nmp)) 4228 return; 4229 NFSLOCKCLSTATE(); 4230 clp = nfscl_findcl(nmp); 4231 if (clp == NULL) { 4232 NFSUNLOCKCLSTATE(); 4233 return; 4234 } 4235 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); 4236 if (dp != NULL && 4237 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == 4238 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) 4239 *mtime = dp->nfsdl_modtime; 4240 NFSUNLOCKCLSTATE(); 4241 } 4242 4243 static int 4244 nfscl_errmap(struct nfsrv_descript *nd) 4245 { 4246 short *defaulterrp, *errp; 4247 4248 if (!nd->nd_repstat) 4249 return (0); 4250 if (nd->nd_procnum == NFSPROC_NOOP) 4251 return (txdr_unsigned(nd->nd_repstat & 0xffff)); 4252 if (nd->nd_repstat == EBADRPC) 4253 return (txdr_unsigned(NFSERR_BADXDR)); 4254 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || 4255 nd->nd_repstat == NFSERR_OPILLEGAL) 4256 return (txdr_unsigned(nd->nd_repstat)); 4257 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; 4258 while (*++errp) 4259 if (*errp == (short)nd->nd_repstat) 4260 return (txdr_unsigned(nd->nd_repstat)); 4261 return (txdr_unsigned(*defaulterrp)); 4262 } 4263 4264