xref: /freebsd/sys/fs/nfsclient/nfs_clstate.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2009 Rick Macklem, University of Guelph
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * These functions implement the client side state handling for NFSv4.
33  * NFSv4 state handling:
34  * - A lockowner is used to determine lock contention, so it
35  *   corresponds directly to a Posix pid. (1 to 1 mapping)
36  * - The correct granularity of an OpenOwner is not nearly so
37  *   obvious. An OpenOwner does the following:
38  *   - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39  *   - is used to check for Open/SHare contention (not applicable to
40  *     this client, since all Opens are Deny_None)
41  *   As such, I considered both extrema.
42  *   1 OpenOwner per ClientID - Simple to manage, but fully serializes
43  *   all Open, Close and Lock (with a new lockowner) Ops.
44  *   1 OpenOwner for each Open - This one results in an OpenConfirm for
45  *   every Open, for most servers.
46  *   So, I chose to use the same mapping as I did for LockOwnwers.
47  *   The main concern here is that you can end up with multiple Opens
48  *   for the same File Handle, but on different OpenOwners (opens
49  *   inherited from parents, grandparents...) and you do not know
50  *   which of these the vnodeop close applies to. This is handled by
51  *   delaying the Close Op(s) until all of the Opens have been closed.
52  *   (It is not yet obvious if this is the correct granularity.)
53  * - How the code handles serailization:
54  *   - For the ClientId, is uses an exclusive lock while getting its
55  *     SetClientId and during recovery. Otherwise, it uses a shared
56  *     lock via a reference count.
57  *   - For the rest of the data structures, it uses an SMP mutex
58  *     (once the nfs client is SMP safe) and doesn't sleep while
59  *     manipulating the linked lists.
60  *   - The serialization of Open/Close/Lock/LockU falls out in the
61  *     "wash", since OpenOwners and LockOwners are both mapped from
62  *     Posix pid. In other words, there is only one Posix pid using
63  *     any given owner, so that owner is serialized. (If you change
64  *     the granularity of the OpenOwner, then code must be added to
65  *     serialize Ops on the OpenOwner.)
66  * - When to get rid of OpenOwners and LockOwners.
67  *   - When a process exits, it calls nfscl_cleanup(), which goes
68  *     through the client list looking for all Open and Lock Owners.
69  *     When one is found, it is marked "defunct" or in the case of
70  *     an OpenOwner without any Opens, freed.
71  *     The renew thread scans for defunct Owners and gets rid of them,
72  *     if it can. The LockOwners will also be deleted when the
73  *     associated Open is closed.
74  *   - If the LockU or Close Op(s) fail during close in a way
75  *     that could be recovered upon retry, they are relinked to the
76  *     ClientId's defunct open list and retried by the renew thread
77  *     until they succeed or an unmount/recovery occurs.
78  *     (Since we are done with them, they do not need to be recovered.)
79  */
80 
81 #ifndef APPLEKEXT
82 #include <fs/nfs/nfsport.h>
83 
84 /*
85  * Global variables
86  */
87 extern struct nfsstats newnfsstats;
88 extern struct nfsreqhead nfsd_reqq;
89 NFSREQSPINLOCK;
90 NFSCLSTATEMUTEX;
91 int nfscl_inited = 0;
92 struct nfsclhead nfsclhead;	/* Head of clientid list */
93 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
94 #endif	/* !APPLEKEXT */
95 
96 static int nfscl_delegcnt = 0;
97 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
98     NFSPROC_T *, u_int32_t, struct nfsclowner **, struct nfsclopen **);
99 static void nfscl_clrelease(struct nfsclclient *);
100 static void nfscl_cleanclient(struct nfsclclient *);
101 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
102     struct ucred *, NFSPROC_T *);
103 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
104     struct nfsmount *, struct ucred *, NFSPROC_T *);
105 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
106 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
107     struct nfscllock *, int);
108 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
109     struct nfscllock **, int);
110 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
111 static u_int32_t nfscl_nextcbident(void);
112 static mount_t nfscl_getmnt(u_int32_t);
113 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
114     int);
115 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
116     u_int8_t *, struct nfscllock **);
117 static void nfscl_freelockowner(struct nfscllockowner *, int);
118 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
119 static int nfscl_localconflict(struct nfsclclient *, struct nfscllock *,
120     u_int8_t *, struct nfscldeleg *, struct nfscllock **);
121 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
122     struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
123     struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *);
124 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
125     struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
126     struct nfscldeleg *, struct ucred *, NFSPROC_T *);
127 static void nfscl_totalrecall(struct nfsclclient *);
128 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
129     struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
130 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
131     u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
132     struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
133 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
134     int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
135     struct ucred *, NFSPROC_T *);
136 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
137     struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
138 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
139 static int nfscl_errmap(struct nfsrv_descript *);
140 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
141 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
142     struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *);
143 static void nfscl_freeopenowner(struct nfsclowner *, int);
144 static void nfscl_cleandeleg(struct nfscldeleg *);
145 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
146     struct nfsmount *, NFSPROC_T *);
147 
148 static short nfscberr_null[] = {
149 	0,
150 	0,
151 };
152 
153 static short nfscberr_getattr[] = {
154 	NFSERR_RESOURCE,
155 	NFSERR_BADHANDLE,
156 	NFSERR_BADXDR,
157 	NFSERR_RESOURCE,
158 	NFSERR_SERVERFAULT,
159 	0,
160 };
161 
162 static short nfscberr_recall[] = {
163 	NFSERR_RESOURCE,
164 	NFSERR_BADHANDLE,
165 	NFSERR_BADSTATEID,
166 	NFSERR_BADXDR,
167 	NFSERR_RESOURCE,
168 	NFSERR_SERVERFAULT,
169 	0,
170 };
171 
172 static short *nfscl_cberrmap[] = {
173 	nfscberr_null,
174 	nfscberr_null,
175 	nfscberr_null,
176 	nfscberr_getattr,
177 	nfscberr_recall
178 };
179 
180 #define	NETFAMILY(clp) \
181 		(((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
182 
183 /*
184  * Called for an open operation.
185  * If the nfhp argument is NULL, just get an openowner.
186  */
187 APPLESTATIC int
188 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
189     struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
190     struct nfsclopen **opp, int *newonep, int *retp, int lockit)
191 {
192 	struct nfsclclient *clp;
193 	struct nfsclowner *owp, *nowp;
194 	struct nfsclopen *op = NULL, *nop = NULL;
195 	struct nfscldeleg *dp;
196 	struct nfsclownerhead *ohp;
197 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
198 	int ret;
199 
200 	if (newonep != NULL)
201 		*newonep = 0;
202 	if (opp != NULL)
203 		*opp = NULL;
204 	if (owpp != NULL)
205 		*owpp = NULL;
206 
207 	/*
208 	 * Might need one or both of these, so MALLOC them now, to
209 	 * avoid a tsleep() in MALLOC later.
210 	 */
211 	MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
212 	    M_NFSCLOWNER, M_WAITOK);
213 	if (nfhp != NULL)
214 	    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
215 		fhlen - 1, M_NFSCLOPEN, M_WAITOK);
216 	ret = nfscl_getcl(vp, cred, p, &clp);
217 	if (ret != 0) {
218 		FREE((caddr_t)nowp, M_NFSCLOWNER);
219 		if (nop != NULL)
220 			FREE((caddr_t)nop, M_NFSCLOPEN);
221 		return (ret);
222 	}
223 
224 	/*
225 	 * Get the Open iff it already exists.
226 	 * If none found, add the new one or return error, depending upon
227 	 * "create".
228 	 */
229 	nfscl_filllockowner(p, own);
230 	NFSLOCKCLSTATE();
231 	dp = NULL;
232 	/* First check the delegation list */
233 	if (nfhp != NULL && usedeleg) {
234 		LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
235 			if (dp->nfsdl_fhlen == fhlen &&
236 			    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
237 				if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
238 				    (dp->nfsdl_flags & NFSCLDL_WRITE))
239 					break;
240 				dp = NULL;
241 				break;
242 			}
243 		}
244 	}
245 
246 	if (dp != NULL)
247 		ohp = &dp->nfsdl_owner;
248 	else
249 		ohp = &clp->nfsc_owner;
250 	/* Now, search for an openowner */
251 	LIST_FOREACH(owp, ohp, nfsow_list) {
252 		if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
253 			break;
254 	}
255 
256 	/*
257 	 * Create a new open, as required.
258 	 */
259 	nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
260 	    newonep);
261 
262 	/*
263 	 * Serialize modifications to the open owner for multiple threads
264 	 * within the same process using a read/write sleep lock.
265 	 */
266 	if (lockit)
267 		nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
268 	NFSUNLOCKCLSTATE();
269 	if (nowp != NULL)
270 		FREE((caddr_t)nowp, M_NFSCLOWNER);
271 	if (nop != NULL)
272 		FREE((caddr_t)nop, M_NFSCLOPEN);
273 	if (owpp != NULL)
274 		*owpp = owp;
275 	if (opp != NULL)
276 		*opp = op;
277 	if (retp != NULL)
278 		*retp = NFSCLOPEN_OK;
279 
280 	/*
281 	 * Now, check the mode on the open and return the appropriate
282 	 * value.
283 	 */
284 	if (op != NULL && (amode & ~(op->nfso_mode))) {
285 		op->nfso_mode |= amode;
286 		if (retp != NULL && dp == NULL)
287 			*retp = NFSCLOPEN_DOOPEN;
288 	}
289 	return (0);
290 }
291 
292 /*
293  * Create a new open, as required.
294  */
295 static void
296 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
297     struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
298     struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
299     int *newonep)
300 {
301 	struct nfsclowner *owp = *owpp, *nowp;
302 	struct nfsclopen *op, *nop;
303 
304 	if (nowpp != NULL)
305 		nowp = *nowpp;
306 	else
307 		nowp = NULL;
308 	if (nopp != NULL)
309 		nop = *nopp;
310 	else
311 		nop = NULL;
312 	if (owp == NULL && nowp != NULL) {
313 		NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
314 		LIST_INIT(&nowp->nfsow_open);
315 		nowp->nfsow_clp = clp;
316 		nowp->nfsow_seqid = 0;
317 		nowp->nfsow_defunct = 0;
318 		nfscl_lockinit(&nowp->nfsow_rwlock);
319 		if (dp != NULL) {
320 			newnfsstats.cllocalopenowners++;
321 			LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
322 		} else {
323 			newnfsstats.clopenowners++;
324 			LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
325 		}
326 		owp = *owpp = nowp;
327 		*nowpp = NULL;
328 		if (newonep != NULL)
329 			*newonep = 1;
330 	}
331 
332 	 /* If an fhp has been specified, create an Open as well. */
333 	if (fhp != NULL) {
334 		/* and look for the correct open, based upon FH */
335 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
336 			if (op->nfso_fhlen == fhlen &&
337 			    !NFSBCMP(op->nfso_fh, fhp, fhlen))
338 				break;
339 		}
340 		if (op == NULL && nop != NULL) {
341 			nop->nfso_own = owp;
342 			nop->nfso_mode = 0;
343 			nop->nfso_opencnt = 0;
344 			nop->nfso_posixlock = 1;
345 			nop->nfso_fhlen = fhlen;
346 			NFSBCOPY(fhp, nop->nfso_fh, fhlen);
347 			LIST_INIT(&nop->nfso_lock);
348 			nop->nfso_stateid.seqid = 0;
349 			nop->nfso_stateid.other[0] = 0;
350 			nop->nfso_stateid.other[1] = 0;
351 			nop->nfso_stateid.other[2] = 0;
352 			if (dp != NULL) {
353 				TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
354 				TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
355 				    nfsdl_list);
356 				dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
357 				newnfsstats.cllocalopens++;
358 			} else {
359 				newnfsstats.clopens++;
360 			}
361 			LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
362 			*opp = nop;
363 			*nopp = NULL;
364 			if (newonep != NULL)
365 				*newonep = 1;
366 		} else {
367 			*opp = op;
368 		}
369 	}
370 }
371 
372 /*
373  * Called to find/add a delegation to a client.
374  */
375 APPLESTATIC int
376 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
377     int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
378 {
379 	struct nfscldeleg *dp = *dpp, *tdp;
380 
381 	/*
382 	 * First, if we have received a Read delegation for a file on a
383 	 * read/write file system, just return it, because they aren't
384 	 * useful, imho.
385 	 */
386 	if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
387 	    (dp->nfsdl_flags & NFSCLDL_READ)) {
388 		(void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
389 		FREE((caddr_t)dp, M_NFSCLDELEG);
390 		*dpp = NULL;
391 		return (0);
392 	}
393 
394 	/* Look for the correct deleg, based upon FH */
395 	NFSLOCKCLSTATE();
396 	tdp = nfscl_finddeleg(clp, nfhp, fhlen);
397 	if (tdp == NULL) {
398 		if (dp == NULL) {
399 			NFSUNLOCKCLSTATE();
400 			return (NFSERR_BADSTATEID);
401 		}
402 		*dpp = NULL;
403 		TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
404 		LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
405 		    nfsdl_hash);
406 		dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
407 		newnfsstats.cldelegates++;
408 		nfscl_delegcnt++;
409 	} else {
410 		/*
411 		 * Delegation already exists, what do we do if a new one??
412 		 */
413 		if (dp != NULL) {
414 			printf("Deleg already exists!\n");
415 			FREE((caddr_t)dp, M_NFSCLDELEG);
416 			*dpp = NULL;
417 		} else {
418 			*dpp = tdp;
419 		}
420 	}
421 	NFSUNLOCKCLSTATE();
422 	return (0);
423 }
424 
425 /*
426  * Find a delegation for this file handle. Return NULL upon failure.
427  */
428 static struct nfscldeleg *
429 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
430 {
431 	struct nfscldeleg *dp;
432 
433 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
434 	    if (dp->nfsdl_fhlen == fhlen &&
435 		!NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
436 		break;
437 	}
438 	return (dp);
439 }
440 
441 /*
442  * Get a stateid for an I/O operation. First, look for an open and iff
443  * found, return either a lockowner stateid or the open stateid.
444  * If no Open is found, just return error and the special stateid of all zeros.
445  */
446 APPLESTATIC int
447 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
448     struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
449     void **lckpp)
450 {
451 	struct nfsclclient *clp;
452 	struct nfsclowner *owp;
453 	struct nfsclopen *op = NULL;
454 	struct nfscllockowner *lp;
455 	struct nfscldeleg *dp;
456 	struct nfsnode *np;
457 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
458 	int error, done;
459 
460 	*lckpp = NULL;
461 	/*
462 	 * Initially, just set the special stateid of all zeros.
463 	 */
464 	stateidp->seqid = 0;
465 	stateidp->other[0] = 0;
466 	stateidp->other[1] = 0;
467 	stateidp->other[2] = 0;
468 	if (vnode_vtype(vp) != VREG)
469 		return (EISDIR);
470 	np = VTONFS(vp);
471 	NFSLOCKCLSTATE();
472 	clp = nfscl_findcl(VFSTONFS(vnode_mount(vp)));
473 	if (clp == NULL) {
474 		NFSUNLOCKCLSTATE();
475 		return (EACCES);
476 	}
477 
478 	/*
479 	 * First, look for a delegation.
480 	 */
481 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
482 		if (dp->nfsdl_fhlen == fhlen &&
483 		    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
484 			if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
485 			    (dp->nfsdl_flags & NFSCLDL_WRITE)) {
486 				stateidp->seqid = dp->nfsdl_stateid.seqid;
487 				stateidp->other[0] = dp->nfsdl_stateid.other[0];
488 				stateidp->other[1] = dp->nfsdl_stateid.other[1];
489 				stateidp->other[2] = dp->nfsdl_stateid.other[2];
490 				if (!(np->n_flag & NDELEGRECALL)) {
491 					TAILQ_REMOVE(&clp->nfsc_deleg, dp,
492 					    nfsdl_list);
493 					TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
494 					    nfsdl_list);
495 					dp->nfsdl_timestamp = NFSD_MONOSEC +
496 					    120;
497 					dp->nfsdl_rwlock.nfslock_usecnt++;
498 					*lckpp = (void *)&dp->nfsdl_rwlock;
499 				}
500 				NFSUNLOCKCLSTATE();
501 				return (0);
502 			}
503 			break;
504 		}
505 	}
506 
507 	if (p != NULL) {
508 		/*
509 		 * If p != NULL, we want to search the parentage tree
510 		 * for a matching OpenOwner and use that.
511 		 */
512 		nfscl_filllockowner(p, own);
513 		error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, NULL, p,
514 		    mode, NULL, &op);
515 		if (error == 0) {
516 			/* now look for a lockowner */
517 			LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
518 				if (!NFSBCMP(lp->nfsl_owner, own,
519 				    NFSV4CL_LOCKNAMELEN)) {
520 					stateidp->seqid =
521 					    lp->nfsl_stateid.seqid;
522 					stateidp->other[0] =
523 					    lp->nfsl_stateid.other[0];
524 					stateidp->other[1] =
525 					    lp->nfsl_stateid.other[1];
526 					stateidp->other[2] =
527 					    lp->nfsl_stateid.other[2];
528 					NFSUNLOCKCLSTATE();
529 					return (0);
530 				}
531 			}
532 		}
533 	}
534 	if (op == NULL) {
535 		/* If not found, just look for any OpenOwner that will work. */
536 		done = 0;
537 		owp = LIST_FIRST(&clp->nfsc_owner);
538 		while (!done && owp != NULL) {
539 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
540 				if (op->nfso_fhlen == fhlen &&
541 				    !NFSBCMP(op->nfso_fh, nfhp, fhlen) &&
542 				    (mode & op->nfso_mode) == mode) {
543 					done = 1;
544 					break;
545 				}
546 			}
547 			if (!done)
548 				owp = LIST_NEXT(owp, nfsow_list);
549 		}
550 		if (!done) {
551 			NFSUNLOCKCLSTATE();
552 			return (ENOENT);
553 		}
554 		/* for read aheads or write behinds, use the open cred */
555 		newnfs_copycred(&op->nfso_cred, cred);
556 	}
557 
558 	/*
559 	 * No lock stateid, so return the open stateid.
560 	 */
561 	stateidp->seqid = op->nfso_stateid.seqid;
562 	stateidp->other[0] = op->nfso_stateid.other[0];
563 	stateidp->other[1] = op->nfso_stateid.other[1];
564 	stateidp->other[2] = op->nfso_stateid.other[2];
565 	NFSUNLOCKCLSTATE();
566 	return (0);
567 }
568 
569 /*
570  * Get an existing open. Search up the parentage tree for a match and
571  * return with the first one found.
572  */
573 static int
574 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
575     u_int8_t *rown, NFSPROC_T *p, u_int32_t mode, struct nfsclowner **owpp,
576     struct nfsclopen **opp)
577 {
578 	struct nfsclowner *owp = NULL;
579 	struct nfsclopen *op;
580 	NFSPROC_T *nproc;
581 	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp;
582 
583 	nproc = p;
584 	op = NULL;
585 	while (op == NULL && (nproc != NULL || rown != NULL)) {
586 		if (nproc != NULL) {
587 			nfscl_filllockowner(nproc, own);
588 			ownp = own;
589 		} else {
590 			ownp = rown;
591 		}
592 		/* Search the client list */
593 		LIST_FOREACH(owp, ohp, nfsow_list) {
594 			if (!NFSBCMP(owp->nfsow_owner, ownp,
595 			    NFSV4CL_LOCKNAMELEN))
596 				break;
597 		}
598 		if (owp != NULL) {
599 			/* and look for the correct open */
600 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
601 				if (op->nfso_fhlen == fhlen &&
602 				    !NFSBCMP(op->nfso_fh, nfhp, fhlen)
603 				    && (op->nfso_mode & mode) == mode) {
604 					break;
605 				}
606 			}
607 		}
608 		if (rown != NULL)
609 			break;
610 		if (op == NULL)
611 			nproc = nfscl_getparent(nproc);
612 	}
613 	if (op == NULL) {
614 		return (EBADF);
615 	}
616 	if (owpp)
617 		*owpp = owp;
618 	*opp = op;
619 	return (0);
620 }
621 
622 /*
623  * Release use of an open owner. Called when open operations are done
624  * with the open owner.
625  */
626 APPLESTATIC void
627 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error,
628     __unused int candelete, int unlocked)
629 {
630 
631 	if (owp == NULL)
632 		return;
633 	NFSLOCKCLSTATE();
634 	if (!unlocked)
635 		nfscl_lockunlock(&owp->nfsow_rwlock);
636 	nfscl_clrelease(owp->nfsow_clp);
637 	NFSUNLOCKCLSTATE();
638 }
639 
640 /*
641  * Release use of an open structure under an open owner.
642  */
643 APPLESTATIC void
644 nfscl_openrelease(struct nfsclopen *op, int error, int candelete)
645 {
646 	struct nfsclclient *clp;
647 	struct nfsclowner *owp;
648 
649 	if (op == NULL)
650 		return;
651 	NFSLOCKCLSTATE();
652 	owp = op->nfso_own;
653 	nfscl_lockunlock(&owp->nfsow_rwlock);
654 	clp = owp->nfsow_clp;
655 	if (error && candelete && op->nfso_opencnt == 0)
656 		nfscl_freeopen(op, 0);
657 	nfscl_clrelease(clp);
658 	NFSUNLOCKCLSTATE();
659 }
660 
661 /*
662  * Called to get a clientid structure. It will optionally lock the
663  * client data structures to do the SetClientId/SetClientId_confirm,
664  * but will release that lock and return the clientid with a refernce
665  * count on it.
666  * If the "cred" argument is NULL, a new clientid should not be created.
667  * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
668  * be done.
669  * It always clpp with a reference count on it, unless returning an error.
670  */
671 APPLESTATIC int
672 nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
673     struct nfsclclient **clpp)
674 {
675 	struct nfsclclient *clp;
676 	struct nfsclclient *newclp = NULL;
677 	struct nfscllockowner *lp, *nlp;
678 	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
679 	char uuid[HOSTUUIDLEN];
680 	int igotlock = 0, error, trystalecnt, clidinusedelay, i;
681 	u_int16_t idlen = 0;
682 
683 	if (cred != NULL) {
684 		getcredhostuuid(cred, uuid, sizeof uuid);
685 		idlen = strlen(uuid);
686 		if (idlen > 0)
687 			idlen += sizeof (u_int64_t);
688 		else
689 			idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
690 		MALLOC(newclp, struct nfsclclient *,
691 		    sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
692 		    M_WAITOK);
693 	}
694 	NFSLOCKCLSTATE();
695 	clp = nmp->nm_clp;
696 	if (clp == NULL) {
697 		if (newclp == NULL) {
698 			NFSUNLOCKCLSTATE();
699 			return (EACCES);
700 		}
701 		clp = newclp;
702 		NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1);
703 		clp->nfsc_idlen = idlen;
704 		LIST_INIT(&clp->nfsc_owner);
705 		TAILQ_INIT(&clp->nfsc_deleg);
706 		for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
707 			LIST_INIT(&clp->nfsc_deleghash[i]);
708 		LIST_INIT(&clp->nfsc_defunctlockowner);
709 		clp->nfsc_flags = NFSCLFLAGS_INITED;
710 		clp->nfsc_clientidrev = 1;
711 		clp->nfsc_cbident = nfscl_nextcbident();
712 		nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
713 		    clp->nfsc_idlen);
714 		LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
715 		nmp->nm_clp = clp;
716 		clp->nfsc_nmp = nmp;
717 		NFSUNLOCKCLSTATE();
718 		nfscl_start_renewthread(clp);
719 	} else {
720 		NFSUNLOCKCLSTATE();
721 		if (newclp != NULL)
722 			FREE((caddr_t)newclp, M_NFSCLCLIENT);
723 	}
724 	NFSLOCKCLSTATE();
725 	while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock)
726 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
727 		    NFSCLSTATEMUTEXPTR);
728 	if (!igotlock)
729 		nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR);
730 	NFSUNLOCKCLSTATE();
731 
732 	/*
733 	 * If it needs a clientid, do the setclientid now.
734 	 */
735 	if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
736 		if (!igotlock)
737 			panic("nfscl_clget");
738 		if (p == NULL || cred == NULL) {
739 			NFSLOCKCLSTATE();
740 			nfsv4_unlock(&clp->nfsc_lock, 0);
741 			NFSUNLOCKCLSTATE();
742 			return (EACCES);
743 		}
744 		/* get rid of defunct lockowners */
745 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
746 		    nlp) {
747 			nfscl_freelockowner(lp, 0);
748 		}
749 		/*
750 		 * If RFC3530 Sec. 14.2.33 is taken literally,
751 		 * NFSERR_CLIDINUSE will be returned persistently for the
752 		 * case where a new mount of the same file system is using
753 		 * a different principal. In practice, NFSERR_CLIDINUSE is
754 		 * only returned when there is outstanding unexpired state
755 		 * on the clientid. As such, try for twice the lease
756 		 * interval, if we know what that is. Otherwise, make a
757 		 * wild ass guess.
758 		 * The case of returning NFSERR_STALECLIENTID is far less
759 		 * likely, but might occur if there is a significant delay
760 		 * between doing the SetClientID and SetClientIDConfirm Ops,
761 		 * such that the server throws away the clientid before
762 		 * receiving the SetClientIDConfirm.
763 		 */
764 		if (clp->nfsc_renew > 0)
765 			clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
766 		else
767 			clidinusedelay = 120;
768 		trystalecnt = 3;
769 		do {
770 			error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)),
771 			    clp, cred, p);
772 			if (error == NFSERR_STALECLIENTID ||
773 			    error == NFSERR_STALEDONTRECOVER ||
774 			    error == NFSERR_CLIDINUSE) {
775 				(void) nfs_catnap(PZERO, "nfs_setcl");
776 			}
777 		} while (((error == NFSERR_STALECLIENTID ||
778 		     error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
779 		    (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
780 		if (error) {
781 			NFSLOCKCLSTATE();
782 			nfsv4_unlock(&clp->nfsc_lock, 0);
783 			NFSUNLOCKCLSTATE();
784 			return (error);
785 		}
786 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
787 	}
788 	if (igotlock) {
789 		NFSLOCKCLSTATE();
790 		nfsv4_unlock(&clp->nfsc_lock, 1);
791 		NFSUNLOCKCLSTATE();
792 	}
793 
794 	*clpp = clp;
795 	return (0);
796 }
797 
798 /*
799  * Get a reference to a clientid and return it, if valid.
800  */
801 APPLESTATIC struct nfsclclient *
802 nfscl_findcl(struct nfsmount *nmp)
803 {
804 	struct nfsclclient *clp;
805 
806 	clp = nmp->nm_clp;
807 	if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
808 		return (NULL);
809 	return (clp);
810 }
811 
812 /*
813  * Release the clientid structure. It may be locked or reference counted.
814  */
815 static void
816 nfscl_clrelease(struct nfsclclient *clp)
817 {
818 
819 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
820 		nfsv4_unlock(&clp->nfsc_lock, 0);
821 	else
822 		nfsv4_relref(&clp->nfsc_lock);
823 }
824 
825 /*
826  * External call for nfscl_clrelease.
827  */
828 APPLESTATIC void
829 nfscl_clientrelease(struct nfsclclient *clp)
830 {
831 
832 	NFSLOCKCLSTATE();
833 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
834 		nfsv4_unlock(&clp->nfsc_lock, 0);
835 	else
836 		nfsv4_relref(&clp->nfsc_lock);
837 	NFSUNLOCKCLSTATE();
838 }
839 
840 /*
841  * Called when wanting to lock a byte region.
842  */
843 APPLESTATIC int
844 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
845     short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
846     int recovery, u_int8_t *rownp, u_int8_t *ropenownp,
847     struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
848 {
849 	struct nfscllockowner *lp;
850 	struct nfsclopen *op;
851 	struct nfsclclient *clp;
852 	struct nfscllockowner *nlp;
853 	struct nfscllock *nlop, *otherlop;
854 	struct nfscldeleg *dp = NULL, *ldp = NULL;
855 	struct nfscllockownerhead *lhp = NULL;
856 	struct nfsnode *np;
857 	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp;
858 	int error = 0, ret, donelocally = 0;
859 	u_int32_t mode;
860 
861 	if (type == F_WRLCK)
862 		mode = NFSV4OPEN_ACCESSWRITE;
863 	else
864 		mode = NFSV4OPEN_ACCESSREAD;
865 	np = VTONFS(vp);
866 	*lpp = NULL;
867 	*newonep = 0;
868 	*donelocallyp = 0;
869 
870 	/*
871 	 * Might need these, so MALLOC them now, to
872 	 * avoid a tsleep() in MALLOC later.
873 	 */
874 	MALLOC(nlp, struct nfscllockowner *,
875 	    sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
876 	MALLOC(otherlop, struct nfscllock *,
877 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
878 	MALLOC(nlop, struct nfscllock *,
879 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
880 	nlop->nfslo_type = type;
881 	nlop->nfslo_first = off;
882 	if (len == NFS64BITSSET) {
883 		nlop->nfslo_end = NFS64BITSSET;
884 	} else {
885 		nlop->nfslo_end = off + len;
886 		if (nlop->nfslo_end <= nlop->nfslo_first)
887 			error = NFSERR_INVAL;
888 	}
889 
890 	if (!error) {
891 		if (recovery)
892 			clp = rclp;
893 		else
894 			error = nfscl_getcl(vp, cred, p, &clp);
895 	}
896 	if (error) {
897 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
898 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
899 		FREE((caddr_t)nlop, M_NFSCLLOCK);
900 		return (error);
901 	}
902 
903 	op = NULL;
904 	if (recovery) {
905 		ownp = rownp;
906 	} else {
907 		nfscl_filllockowner(p, own);
908 		ownp = own;
909 	}
910 	if (!recovery) {
911 		NFSLOCKCLSTATE();
912 		/*
913 		 * First, search for a delegation. If one exists for this file,
914 		 * the lock can be done locally against it, so long as there
915 		 * isn't a local lock conflict.
916 		 */
917 		ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
918 		    np->n_fhp->nfh_len);
919 		/* Just sanity check for correct type of delegation */
920 		if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_RECALL) ||
921 		    (type == F_WRLCK && !(dp->nfsdl_flags & NFSCLDL_WRITE))))
922 			dp = NULL;
923 	}
924 	if (dp != NULL) {
925 		/* Now, find the associated open to get the correct openowner */
926 		ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
927 		    np->n_fhp->nfh_len, NULL, p, mode, NULL, &op);
928 		if (ret)
929 			ret = nfscl_getopen(&clp->nfsc_owner,
930 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p,
931 			    mode, NULL, &op);
932 		if (!ret) {
933 			lhp = &dp->nfsdl_lock;
934 			TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
935 			TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
936 			dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
937 			donelocally = 1;
938 		} else {
939 			dp = NULL;
940 		}
941 	}
942 	if (!donelocally) {
943 		/*
944 		 * Get the related Open.
945 		 */
946 		if (recovery)
947 			error = nfscl_getopen(&clp->nfsc_owner,
948 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, ropenownp,
949 			    NULL, mode, NULL, &op);
950 		else
951 			error = nfscl_getopen(&clp->nfsc_owner,
952 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p,
953 			    mode, NULL, &op);
954 		if (!error)
955 			lhp = &op->nfso_lock;
956 	}
957 	if (!error && !recovery)
958 		error = nfscl_localconflict(clp, nlop, ownp, ldp, NULL);
959 	if (error) {
960 		if (!recovery) {
961 			nfscl_clrelease(clp);
962 			NFSUNLOCKCLSTATE();
963 		}
964 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
965 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
966 		FREE((caddr_t)nlop, M_NFSCLLOCK);
967 		return (error);
968 	}
969 
970 	/*
971 	 * Ok, see if a lockowner exists and create one, as required.
972 	 */
973 	LIST_FOREACH(lp, lhp, nfsl_list) {
974 		if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
975 			break;
976 	}
977 	if (lp == NULL) {
978 		NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
979 		if (recovery)
980 			NFSBCOPY(ropenownp, nlp->nfsl_openowner,
981 			    NFSV4CL_LOCKNAMELEN);
982 		else
983 			NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
984 			    NFSV4CL_LOCKNAMELEN);
985 		nlp->nfsl_seqid = 0;
986 		nlp->nfsl_defunct = 0;
987 		nlp->nfsl_inprog = NULL;
988 		nfscl_lockinit(&nlp->nfsl_rwlock);
989 		LIST_INIT(&nlp->nfsl_lock);
990 		if (donelocally) {
991 			nlp->nfsl_open = NULL;
992 			newnfsstats.cllocallockowners++;
993 		} else {
994 			nlp->nfsl_open = op;
995 			newnfsstats.cllockowners++;
996 		}
997 		LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
998 		lp = nlp;
999 		nlp = NULL;
1000 		*newonep = 1;
1001 	}
1002 
1003 	/*
1004 	 * Now, update the byte ranges for locks.
1005 	 */
1006 	ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1007 	if (!ret)
1008 		donelocally = 1;
1009 	if (donelocally) {
1010 		*donelocallyp = 1;
1011 		if (!recovery)
1012 			nfscl_clrelease(clp);
1013 	} else {
1014 		/*
1015 		 * Serial modifications on the lock owner for multiple threads
1016 		 * for the same process using a read/write lock.
1017 		 */
1018 		if (!recovery)
1019 			nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1020 	}
1021 	if (!recovery)
1022 		NFSUNLOCKCLSTATE();
1023 
1024 	if (nlp)
1025 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1026 	if (nlop)
1027 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1028 	if (otherlop)
1029 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
1030 
1031 	*lpp = lp;
1032 	return (0);
1033 }
1034 
1035 /*
1036  * Called to unlock a byte range, for LockU.
1037  */
1038 APPLESTATIC int
1039 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1040     __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1041     struct nfsclclient *clp, struct nfscllockowner **lpp, int *dorpcp)
1042 {
1043 	struct nfscllockowner *lp;
1044 	struct nfsclowner *owp;
1045 	struct nfsclopen *op;
1046 	struct nfscllock *nlop, *other_lop = NULL;
1047 	struct nfscldeleg *dp;
1048 	struct nfsnode *np;
1049 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1050 	int ret = 0, fnd, error;
1051 
1052 	np = VTONFS(vp);
1053 	*lpp = NULL;
1054 	*dorpcp = 0;
1055 
1056 	/*
1057 	 * Might need these, so MALLOC them now, to
1058 	 * avoid a tsleep() in MALLOC later.
1059 	 */
1060 	MALLOC(nlop, struct nfscllock *,
1061 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1062 	nlop->nfslo_type = F_UNLCK;
1063 	nlop->nfslo_first = off;
1064 	if (len == NFS64BITSSET) {
1065 		nlop->nfslo_end = NFS64BITSSET;
1066 	} else {
1067 		nlop->nfslo_end = off + len;
1068 		if (nlop->nfslo_end <= nlop->nfslo_first) {
1069 			FREE((caddr_t)nlop, M_NFSCLLOCK);
1070 			return (NFSERR_INVAL);
1071 		}
1072 	}
1073 	if (callcnt == 0) {
1074 		MALLOC(other_lop, struct nfscllock *,
1075 		    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1076 		*other_lop = *nlop;
1077 	}
1078 	nfscl_filllockowner(p, own);
1079 	dp = NULL;
1080 	NFSLOCKCLSTATE();
1081 	if (callcnt == 0)
1082 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1083 		    np->n_fhp->nfh_len);
1084 
1085 	/* Search for a local conflict. */
1086 	error = nfscl_localconflict(clp, nlop, own, dp, NULL);
1087 	if (error) {
1088 		NFSUNLOCKCLSTATE();
1089 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1090 		if (other_lop != NULL)
1091 			FREE((caddr_t)other_lop, M_NFSCLLOCK);
1092 		return (error);
1093 	}
1094 
1095 	/*
1096 	 * First, unlock any local regions on a delegation.
1097 	 */
1098 	if (dp != NULL) {
1099 		/* Look for this lockowner. */
1100 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1101 			if (!NFSBCMP(lp->nfsl_owner, own,
1102 			    NFSV4CL_LOCKNAMELEN))
1103 				break;
1104 		}
1105 		if (lp != NULL)
1106 			/* Use other_lop, so nlop is still available */
1107 			(void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1108 	}
1109 
1110 	/*
1111 	 * Now, find a matching open/lockowner that hasn't already been done,
1112 	 * as marked by nfsl_inprog.
1113 	 */
1114 	lp = NULL;
1115 	fnd = 0;
1116 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1117 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1118 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1119 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1120 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1121 			if (lp->nfsl_inprog == NULL &&
1122 			    !NFSBCMP(lp->nfsl_owner, own,
1123 			     NFSV4CL_LOCKNAMELEN)) {
1124 				fnd = 1;
1125 				break;
1126 			}
1127 		    }
1128 		    if (fnd)
1129 			break;
1130 		}
1131 	    }
1132 	    if (fnd)
1133 		break;
1134 	}
1135 
1136 	if (lp != NULL) {
1137 		ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1138 		if (ret)
1139 			*dorpcp = 1;
1140 		/*
1141 		 * Serial modifications on the lock owner for multiple
1142 		 * threads for the same process using a read/write lock.
1143 		 */
1144 		lp->nfsl_inprog = p;
1145 		nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1146 		*lpp = lp;
1147 	}
1148 	NFSUNLOCKCLSTATE();
1149 	if (nlop)
1150 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1151 	if (other_lop)
1152 		FREE((caddr_t)other_lop, M_NFSCLLOCK);
1153 	return (0);
1154 }
1155 
1156 /*
1157  * Release all lockowners marked in progess for this process and file.
1158  */
1159 APPLESTATIC void
1160 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p)
1161 {
1162 	struct nfsclowner *owp;
1163 	struct nfsclopen *op;
1164 	struct nfscllockowner *lp;
1165 	struct nfsnode *np;
1166 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1167 
1168 	np = VTONFS(vp);
1169 	nfscl_filllockowner(p, own);
1170 	NFSLOCKCLSTATE();
1171 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1172 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1173 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1174 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1175 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1176 			if (lp->nfsl_inprog == p &&
1177 			    !NFSBCMP(lp->nfsl_owner, own,
1178 			    NFSV4CL_LOCKNAMELEN)) {
1179 			    lp->nfsl_inprog = NULL;
1180 			    nfscl_lockunlock(&lp->nfsl_rwlock);
1181 			}
1182 		    }
1183 		}
1184 	    }
1185 	}
1186 	nfscl_clrelease(clp);
1187 	NFSUNLOCKCLSTATE();
1188 }
1189 
1190 /*
1191  * Called to find out if any bytes within the byte range specified are
1192  * write locked by the calling process. Used to determine if flushing
1193  * is required before a LockU.
1194  * If in doubt, return 1, so the flush will occur.
1195  */
1196 APPLESTATIC int
1197 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1198     struct ucred *cred, NFSPROC_T *p)
1199 {
1200 	struct nfsclowner *owp;
1201 	struct nfscllockowner *lp;
1202 	struct nfsclopen *op;
1203 	struct nfsclclient *clp;
1204 	struct nfscllock *lop;
1205 	struct nfscldeleg *dp;
1206 	struct nfsnode *np;
1207 	u_int64_t off, end;
1208 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1209 	int error = 0;
1210 
1211 	np = VTONFS(vp);
1212 	switch (fl->l_whence) {
1213 	case SEEK_SET:
1214 	case SEEK_CUR:
1215 		/*
1216 		 * Caller is responsible for adding any necessary offset
1217 		 * when SEEK_CUR is used.
1218 		 */
1219 		off = fl->l_start;
1220 		break;
1221 	case SEEK_END:
1222 		off = np->n_size + fl->l_start;
1223 		break;
1224 	default:
1225 		return (1);
1226 	};
1227 	if (fl->l_len != 0) {
1228 		end = off + fl->l_len;
1229 		if (end < off)
1230 			return (1);
1231 	} else {
1232 		end = NFS64BITSSET;
1233 	}
1234 
1235 	error = nfscl_getcl(vp, cred, p, &clp);
1236 	if (error)
1237 		return (1);
1238 	nfscl_filllockowner(p, own);
1239 	NFSLOCKCLSTATE();
1240 
1241 	/*
1242 	 * First check the delegation locks.
1243 	 */
1244 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1245 	if (dp != NULL) {
1246 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1247 			if (!NFSBCMP(lp->nfsl_owner, own,
1248 			    NFSV4CL_LOCKNAMELEN))
1249 				break;
1250 		}
1251 		if (lp != NULL) {
1252 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1253 				if (lop->nfslo_first >= end)
1254 					break;
1255 				if (lop->nfslo_end <= off)
1256 					continue;
1257 				if (lop->nfslo_type == F_WRLCK) {
1258 					nfscl_clrelease(clp);
1259 					NFSUNLOCKCLSTATE();
1260 					return (1);
1261 				}
1262 			}
1263 		}
1264 	}
1265 
1266 	/*
1267 	 * Now, check state against the server.
1268 	 */
1269 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1270 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1271 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1272 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1273 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1274 			if (!NFSBCMP(lp->nfsl_owner, own,
1275 			    NFSV4CL_LOCKNAMELEN))
1276 			    break;
1277 		    }
1278 		    if (lp != NULL) {
1279 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1280 			    if (lop->nfslo_first >= end)
1281 				break;
1282 			    if (lop->nfslo_end <= off)
1283 				continue;
1284 			    if (lop->nfslo_type == F_WRLCK) {
1285 				nfscl_clrelease(clp);
1286 				NFSUNLOCKCLSTATE();
1287 				return (1);
1288 			    }
1289 			}
1290 		    }
1291 		}
1292 	    }
1293 	}
1294 	nfscl_clrelease(clp);
1295 	NFSUNLOCKCLSTATE();
1296 	return (0);
1297 }
1298 
1299 /*
1300  * Release a byte range lock owner structure.
1301  */
1302 APPLESTATIC void
1303 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1304 {
1305 	struct nfsclclient *clp;
1306 
1307 	if (lp == NULL)
1308 		return;
1309 	NFSLOCKCLSTATE();
1310 	clp = lp->nfsl_open->nfso_own->nfsow_clp;
1311 	if (error != 0 && candelete &&
1312 	    (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1313 		nfscl_freelockowner(lp, 0);
1314 	else
1315 		nfscl_lockunlock(&lp->nfsl_rwlock);
1316 	nfscl_clrelease(clp);
1317 	NFSUNLOCKCLSTATE();
1318 }
1319 
1320 /*
1321  * Free up an open structure and any associated byte range lock structures.
1322  */
1323 APPLESTATIC void
1324 nfscl_freeopen(struct nfsclopen *op, int local)
1325 {
1326 
1327 	LIST_REMOVE(op, nfso_list);
1328 	nfscl_freealllocks(&op->nfso_lock, local);
1329 	FREE((caddr_t)op, M_NFSCLOPEN);
1330 	if (local)
1331 		newnfsstats.cllocalopens--;
1332 	else
1333 		newnfsstats.clopens--;
1334 }
1335 
1336 /*
1337  * Free up all lock owners and associated locks.
1338  */
1339 static void
1340 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1341 {
1342 	struct nfscllockowner *lp, *nlp;
1343 
1344 	LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1345 		if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1346 			panic("nfscllckw");
1347 		nfscl_freelockowner(lp, local);
1348 	}
1349 }
1350 
1351 /*
1352  * Called for an Open when NFSERR_EXPIRED is received from the server.
1353  * If there are no byte range locks nor a Share Deny lost, try to do a
1354  * fresh Open. Otherwise, free the open.
1355  */
1356 static int
1357 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1358     struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1359 {
1360 	struct nfscllockowner *lp;
1361 	struct nfscldeleg *dp;
1362 	int mustdelete = 0, error;
1363 
1364 	/*
1365 	 * Look for any byte range lock(s).
1366 	 */
1367 	LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1368 		if (!LIST_EMPTY(&lp->nfsl_lock)) {
1369 			mustdelete = 1;
1370 			break;
1371 		}
1372 	}
1373 
1374 	/*
1375 	 * If no byte range lock(s) nor a Share deny, try to re-open.
1376 	 */
1377 	if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1378 		newnfs_copycred(&op->nfso_cred, cred);
1379 		dp = NULL;
1380 		error = nfsrpc_reopen(nmp, op->nfso_fh,
1381 		    op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1382 		if (error) {
1383 			mustdelete = 1;
1384 			if (dp != NULL) {
1385 				FREE((caddr_t)dp, M_NFSCLDELEG);
1386 				dp = NULL;
1387 			}
1388 		}
1389 		if (dp != NULL)
1390 			nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1391 			    op->nfso_fhlen, cred, p, &dp);
1392 	}
1393 
1394 	/*
1395 	 * If a byte range lock or Share deny or couldn't re-open, free it.
1396 	 */
1397 	if (mustdelete)
1398 		nfscl_freeopen(op, 0);
1399 	return (mustdelete);
1400 }
1401 
1402 /*
1403  * Free up an open owner structure.
1404  */
1405 static void
1406 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1407 {
1408 
1409 	LIST_REMOVE(owp, nfsow_list);
1410 	FREE((caddr_t)owp, M_NFSCLOWNER);
1411 	if (local)
1412 		newnfsstats.cllocalopenowners--;
1413 	else
1414 		newnfsstats.clopenowners--;
1415 }
1416 
1417 /*
1418  * Free up a byte range lock owner structure.
1419  */
1420 static void
1421 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1422 {
1423 	struct nfscllock *lop, *nlop;
1424 
1425 	LIST_REMOVE(lp, nfsl_list);
1426 	LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1427 		nfscl_freelock(lop, local);
1428 	}
1429 	FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1430 	if (local)
1431 		newnfsstats.cllocallockowners--;
1432 	else
1433 		newnfsstats.cllockowners--;
1434 }
1435 
1436 /*
1437  * Free up a byte range lock structure.
1438  */
1439 APPLESTATIC void
1440 nfscl_freelock(struct nfscllock *lop, int local)
1441 {
1442 
1443 	LIST_REMOVE(lop, nfslo_list);
1444 	FREE((caddr_t)lop, M_NFSCLLOCK);
1445 	if (local)
1446 		newnfsstats.cllocallocks--;
1447 	else
1448 		newnfsstats.cllocks--;
1449 }
1450 
1451 /*
1452  * Clean out the state related to a delegation.
1453  */
1454 static void
1455 nfscl_cleandeleg(struct nfscldeleg *dp)
1456 {
1457 	struct nfsclowner *owp, *nowp;
1458 	struct nfsclopen *op;
1459 
1460 	LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1461 		op = LIST_FIRST(&owp->nfsow_open);
1462 		if (op != NULL) {
1463 			if (LIST_NEXT(op, nfso_list) != NULL)
1464 				panic("nfscleandel");
1465 			nfscl_freeopen(op, 1);
1466 		}
1467 		nfscl_freeopenowner(owp, 1);
1468 	}
1469 	nfscl_freealllocks(&dp->nfsdl_lock, 1);
1470 }
1471 
1472 /*
1473  * Free a delegation.
1474  */
1475 static void
1476 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1477 {
1478 
1479 	TAILQ_REMOVE(hdp, dp, nfsdl_list);
1480 	LIST_REMOVE(dp, nfsdl_hash);
1481 	FREE((caddr_t)dp, M_NFSCLDELEG);
1482 	newnfsstats.cldelegates--;
1483 	nfscl_delegcnt--;
1484 }
1485 
1486 /*
1487  * Free up all state related to this client structure.
1488  */
1489 static void
1490 nfscl_cleanclient(struct nfsclclient *clp)
1491 {
1492 	struct nfsclowner *owp, *nowp;
1493 	struct nfsclopen *op, *nop;
1494 	struct nfscllockowner *lp, *nlp;
1495 
1496 
1497 	/* get rid of defunct lockowners */
1498 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1499 		nfscl_freelockowner(lp, 0);
1500 	}
1501 
1502 	/* Now, all the OpenOwners, etc. */
1503 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1504 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1505 			nfscl_freeopen(op, 0);
1506 		}
1507 		nfscl_freeopenowner(owp, 0);
1508 	}
1509 }
1510 
1511 /*
1512  * Called when an NFSERR_EXPIRED is received from the server.
1513  */
1514 static void
1515 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1516     struct ucred *cred, NFSPROC_T *p)
1517 {
1518 	struct nfsclowner *owp, *nowp, *towp;
1519 	struct nfsclopen *op, *nop, *top;
1520 	struct nfscldeleg *dp, *ndp;
1521 	int ret, printed = 0;
1522 
1523 	/*
1524 	 * First, merge locally issued Opens into the list for the server.
1525 	 */
1526 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
1527 	while (dp != NULL) {
1528 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
1529 	    owp = LIST_FIRST(&dp->nfsdl_owner);
1530 	    while (owp != NULL) {
1531 		nowp = LIST_NEXT(owp, nfsow_list);
1532 		op = LIST_FIRST(&owp->nfsow_open);
1533 		if (op != NULL) {
1534 		    if (LIST_NEXT(op, nfso_list) != NULL)
1535 			panic("nfsclexp");
1536 		    LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1537 			if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1538 			    NFSV4CL_LOCKNAMELEN))
1539 			    break;
1540 		    }
1541 		    if (towp != NULL) {
1542 			/* Merge opens in */
1543 			LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1544 			    if (top->nfso_fhlen == op->nfso_fhlen &&
1545 				!NFSBCMP(top->nfso_fh, op->nfso_fh,
1546 				 op->nfso_fhlen)) {
1547 				top->nfso_mode |= op->nfso_mode;
1548 				top->nfso_opencnt += op->nfso_opencnt;
1549 				break;
1550 			    }
1551 			}
1552 			if (top == NULL) {
1553 			    /* Just add the open to the owner list */
1554 			    LIST_REMOVE(op, nfso_list);
1555 			    op->nfso_own = towp;
1556 			    LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1557 			    newnfsstats.cllocalopens--;
1558 			    newnfsstats.clopens++;
1559 			}
1560 		    } else {
1561 			/* Just add the openowner to the client list */
1562 			LIST_REMOVE(owp, nfsow_list);
1563 			owp->nfsow_clp = clp;
1564 			LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1565 			newnfsstats.cllocalopenowners--;
1566 			newnfsstats.clopenowners++;
1567 			newnfsstats.cllocalopens--;
1568 			newnfsstats.clopens++;
1569 		    }
1570 		}
1571 		owp = nowp;
1572 	    }
1573 	    if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1574 		printed = 1;
1575 		printf("nfsv4 expired locks lost\n");
1576 	    }
1577 	    nfscl_cleandeleg(dp);
1578 	    nfscl_freedeleg(&clp->nfsc_deleg, dp);
1579 	    dp = ndp;
1580 	}
1581 	if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1582 	    panic("nfsclexp");
1583 
1584 	/*
1585 	 * Now, try and reopen against the server.
1586 	 */
1587 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1588 		owp->nfsow_seqid = 0;
1589 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1590 			ret = nfscl_expireopen(clp, op, nmp, cred, p);
1591 			if (ret && !printed) {
1592 				printed = 1;
1593 				printf("nfsv4 expired locks lost\n");
1594 			}
1595 		}
1596 		if (LIST_EMPTY(&owp->nfsow_open))
1597 			nfscl_freeopenowner(owp, 0);
1598 	}
1599 }
1600 
1601 #ifndef	__FreeBSD__
1602 /*
1603  * Called from exit() upon process termination.
1604  */
1605 APPLESTATIC void
1606 nfscl_cleanup(NFSPROC_T *p)
1607 {
1608 	struct nfsclclient *clp;
1609 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1610 
1611 	if (!nfscl_inited)
1612 		return;
1613 	nfscl_filllockowner(p, own);
1614 
1615 	NFSLOCKCLSTATE();
1616 	/*
1617 	 * Loop through all the clientids, looking for the OpenOwners.
1618 	 */
1619 	LIST_FOREACH(clp, &nfsclhead, nfsc_list)
1620 		nfscl_cleanup_common(clp, own);
1621 	NFSUNLOCKCLSTATE();
1622 }
1623 #endif	/* !__FreeBSD__ */
1624 
1625 /*
1626  * Common code used by nfscl_cleanup() and nfscl_cleanupkext().
1627  * Must be called with CLSTATE lock held.
1628  */
1629 static void
1630 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1631 {
1632 	struct nfsclowner *owp, *nowp;
1633 	struct nfsclopen *op;
1634 	struct nfscllockowner *lp, *nlp;
1635 	struct nfscldeleg *dp;
1636 
1637 	/* First, get rid of local locks on delegations. */
1638 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1639 		LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1640 		    if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1641 			if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1642 			    panic("nfscllckw");
1643 			nfscl_freelockowner(lp, 1);
1644 		    }
1645 		}
1646 	}
1647 	owp = LIST_FIRST(&clp->nfsc_owner);
1648 	while (owp != NULL) {
1649 		nowp = LIST_NEXT(owp, nfsow_list);
1650 		if (!NFSBCMP(owp->nfsow_owner, own,
1651 		    NFSV4CL_LOCKNAMELEN)) {
1652 			/*
1653 			 * If there are children that haven't closed the
1654 			 * file descriptors yet, the opens will still be
1655 			 * here. For that case, let the renew thread clear
1656 			 * out the OpenOwner later.
1657 			 */
1658 			if (LIST_EMPTY(&owp->nfsow_open))
1659 				nfscl_freeopenowner(owp, 0);
1660 			else
1661 				owp->nfsow_defunct = 1;
1662 		} else {
1663 			/* look for lockowners on other opens */
1664 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1665 				LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1666 					if (!NFSBCMP(lp->nfsl_owner, own,
1667 					    NFSV4CL_LOCKNAMELEN))
1668 						lp->nfsl_defunct = 1;
1669 				}
1670 			}
1671 		}
1672 		owp = nowp;
1673 	}
1674 
1675 	/* and check the defunct list */
1676 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1677 		if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN))
1678 		    lp->nfsl_defunct = 1;
1679 	}
1680 }
1681 
1682 #if defined(APPLEKEXT) || defined(__FreeBSD__)
1683 /*
1684  * Simulate the call nfscl_cleanup() by looking for open owners associated
1685  * with processes that no longer exist, since a call to nfscl_cleanup()
1686  * can't be patched into exit().
1687  */
1688 static void
1689 nfscl_cleanupkext(struct nfsclclient *clp)
1690 {
1691 	struct nfsclowner *owp, *nowp;
1692 	struct nfscllockowner *lp;
1693 
1694 	NFSPROCLISTLOCK();
1695 	NFSLOCKCLSTATE();
1696 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1697 		if (nfscl_procdoesntexist(owp->nfsow_owner))
1698 			nfscl_cleanup_common(clp, owp->nfsow_owner);
1699 	}
1700 
1701 	/* and check the defunct list */
1702 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1703 		if (nfscl_procdoesntexist(lp->nfsl_owner))
1704 			lp->nfsl_defunct = 1;
1705 	}
1706 	NFSUNLOCKCLSTATE();
1707 	NFSPROCLISTUNLOCK();
1708 }
1709 #endif	/* APPLEKEXT || __FreeBSD__ */
1710 
1711 /*
1712  * Called from nfs umount to free up the clientid.
1713  */
1714 APPLESTATIC void
1715 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1716 {
1717 	struct nfsclclient *clp;
1718 	struct ucred *cred;
1719 	int igotlock;
1720 
1721 	clp = nmp->nm_clp;
1722 	if (clp != NULL) {
1723 		if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1724 			panic("nfscl umount");
1725 
1726 		/*
1727 		 * First, handshake with the nfscl renew thread, to terminate
1728 		 * it.
1729 		 */
1730 		clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1731 		while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1732 			(void) tsleep((caddr_t)clp, PWAIT, "nfsclumnt", hz);
1733 
1734 		NFSLOCKCLSTATE();
1735 		do {
1736 			igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1737 			    NFSCLSTATEMUTEXPTR);
1738 		} while (!igotlock);
1739 		NFSUNLOCKCLSTATE();
1740 
1741 		/*
1742 		 * Free up all the state. It will expire on the server, but
1743 		 * maybe we should do a SetClientId/SetClientIdConfirm so
1744 		 * the server throws it away?
1745 		 */
1746 		LIST_REMOVE(clp, nfsc_list);
1747 		nfscl_delegreturnall(clp, p);
1748 		cred = newnfs_getcred();
1749 		(void) nfsrpc_setclient(nmp, clp, cred, p);
1750 		nfscl_cleanclient(clp);
1751 		nmp->nm_clp = NULL;
1752 		NFSFREECRED(cred);
1753 		FREE((caddr_t)clp, M_NFSCLCLIENT);
1754 	}
1755 
1756 }
1757 
1758 /*
1759  * This function is called when a server replies with NFSERR_STALECLIENTID
1760  * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens
1761  * and Locks with reclaim. If these fail, it deletes the corresponding state.
1762  */
1763 static void
1764 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1765 {
1766 	struct nfsclowner *owp, *nowp;
1767 	struct nfsclopen *op, *nop;
1768 	struct nfscllockowner *lp, *nlp;
1769 	struct nfscllock *lop, *nlop;
1770 	struct nfscldeleg *dp, *ndp, *tdp;
1771 	struct nfsmount *nmp;
1772 	struct ucred *tcred;
1773 	struct nfsclopenhead extra_open;
1774 	struct nfscldeleghead extra_deleg;
1775 	struct nfsreq *rep;
1776 	u_int64_t len;
1777 	u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1778 	int igotlock = 0, error, trycnt, firstlock, s;
1779 
1780 	/*
1781 	 * First, lock the client structure, so everyone else will
1782 	 * block when trying to use state.
1783 	 */
1784 	NFSLOCKCLSTATE();
1785 	do {
1786 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1787 		    NFSCLSTATEMUTEXPTR);
1788 	} while (!igotlock);
1789 	NFSUNLOCKCLSTATE();
1790 
1791 	nmp = clp->nfsc_nmp;
1792 	if (nmp == NULL)
1793 		panic("nfscl recover");
1794 	trycnt = 5;
1795 	do {
1796 		error = nfsrpc_setclient(nmp, clp, cred, p);
1797 	} while ((error == NFSERR_STALECLIENTID ||
1798 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
1799 	if (error) {
1800 		nfscl_cleanclient(clp);
1801 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
1802 		    NFSCLFLAGS_RECOVER);
1803 		NFSLOCKCLSTATE();
1804 		nfsv4_unlock(&clp->nfsc_lock, 0);
1805 		NFSUNLOCKCLSTATE();
1806 		return;
1807 	}
1808 	clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1809 	clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
1810 
1811 	/*
1812 	 * Mark requests already queued on the server, so that they don't
1813 	 * initiate another recovery cycle. Any requests already in the
1814 	 * queue that handle state information will have the old stale
1815 	 * clientid/stateid and will get a NFSERR_STALESTATEID or
1816 	 * NFSERR_STALECLIENTID reply from the server. This will be
1817 	 * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set.
1818 	 */
1819 	s = splsoftclock();
1820 	NFSLOCKREQ();
1821 	TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
1822 		if (rep->r_nmp == nmp)
1823 			rep->r_flags |= R_DONTRECOVER;
1824 	}
1825 	NFSUNLOCKREQ();
1826 	splx(s);
1827 
1828 	/* get rid of defunct lockowners */
1829 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1830 		nfscl_freelockowner(lp, 0);
1831 	}
1832 
1833 	/*
1834 	 * Now, mark all delegations "need reclaim".
1835 	 */
1836 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
1837 		dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
1838 
1839 	TAILQ_INIT(&extra_deleg);
1840 	LIST_INIT(&extra_open);
1841 	/*
1842 	 * Now traverse the state lists, doing Open and Lock Reclaims.
1843 	 */
1844 	tcred = newnfs_getcred();
1845 	owp = LIST_FIRST(&clp->nfsc_owner);
1846 	while (owp != NULL) {
1847 	    nowp = LIST_NEXT(owp, nfsow_list);
1848 	    owp->nfsow_seqid = 0;
1849 	    op = LIST_FIRST(&owp->nfsow_open);
1850 	    while (op != NULL) {
1851 		nop = LIST_NEXT(op, nfso_list);
1852 		if (error != NFSERR_NOGRACE) {
1853 		    /* Search for a delegation to reclaim with the open */
1854 		    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1855 			if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1856 			    continue;
1857 			if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1858 			    mode = NFSV4OPEN_ACCESSWRITE;
1859 			    delegtype = NFSV4OPEN_DELEGATEWRITE;
1860 			} else {
1861 			    mode = NFSV4OPEN_ACCESSREAD;
1862 			    delegtype = NFSV4OPEN_DELEGATEREAD;
1863 			}
1864 			if ((op->nfso_mode & mode) == mode &&
1865 			    op->nfso_fhlen == dp->nfsdl_fhlen &&
1866 			    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
1867 			    break;
1868 		    }
1869 		    ndp = dp;
1870 		    if (dp == NULL)
1871 			delegtype = NFSV4OPEN_DELEGATENONE;
1872 		    newnfs_copycred(&op->nfso_cred, tcred);
1873 		    error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
1874 			op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
1875 			op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
1876 			tcred, p);
1877 		    if (!error) {
1878 			/* Handle any replied delegation */
1879 			if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
1880 			    || NFSMNT_RDONLY(nmp->nm_mountp))) {
1881 			    if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
1882 				mode = NFSV4OPEN_ACCESSWRITE;
1883 			    else
1884 				mode = NFSV4OPEN_ACCESSREAD;
1885 			    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1886 				if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1887 				    continue;
1888 				if ((op->nfso_mode & mode) == mode &&
1889 				    op->nfso_fhlen == dp->nfsdl_fhlen &&
1890 				    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
1891 				    op->nfso_fhlen)) {
1892 				    dp->nfsdl_stateid = ndp->nfsdl_stateid;
1893 				    dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
1894 				    dp->nfsdl_ace = ndp->nfsdl_ace;
1895 				    dp->nfsdl_change = ndp->nfsdl_change;
1896 				    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
1897 				    if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
1898 					dp->nfsdl_flags |= NFSCLDL_RECALL;
1899 				    FREE((caddr_t)ndp, M_NFSCLDELEG);
1900 				    ndp = NULL;
1901 				    break;
1902 				}
1903 			    }
1904 			}
1905 			if (ndp != NULL)
1906 			    TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
1907 
1908 			/* and reclaim all byte range locks */
1909 			lp = LIST_FIRST(&op->nfso_lock);
1910 			while (lp != NULL) {
1911 			    nlp = LIST_NEXT(lp, nfsl_list);
1912 			    lp->nfsl_seqid = 0;
1913 			    firstlock = 1;
1914 			    lop = LIST_FIRST(&lp->nfsl_lock);
1915 			    while (lop != NULL) {
1916 				nlop = LIST_NEXT(lop, nfslo_list);
1917 				if (lop->nfslo_end == NFS64BITSSET)
1918 				    len = NFS64BITSSET;
1919 				else
1920 				    len = lop->nfslo_end - lop->nfslo_first;
1921 				if (error != NFSERR_NOGRACE)
1922 				    error = nfscl_trylock(nmp, NULL,
1923 					op->nfso_fh, op->nfso_fhlen, lp,
1924 					firstlock, 1, lop->nfslo_first, len,
1925 					lop->nfslo_type, tcred, p);
1926 				if (error != 0)
1927 				    nfscl_freelock(lop, 0);
1928 				else
1929 				    firstlock = 0;
1930 				lop = nlop;
1931 			    }
1932 			    /* If no locks, but a lockowner, just delete it. */
1933 			    if (LIST_EMPTY(&lp->nfsl_lock))
1934 				nfscl_freelockowner(lp, 0);
1935 			    lp = nlp;
1936 			}
1937 		    } else {
1938 			nfscl_freeopen(op, 0);
1939 		    }
1940 		}
1941 		op = nop;
1942 	    }
1943 	    owp = nowp;
1944 	}
1945 
1946 	/*
1947 	 * Now, try and get any delegations not yet reclaimed by cobbling
1948 	 * to-gether an appropriate open.
1949 	 */
1950 	nowp = NULL;
1951 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
1952 	while (dp != NULL) {
1953 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
1954 	    if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
1955 		if (nowp == NULL) {
1956 		    MALLOC(nowp, struct nfsclowner *,
1957 			sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
1958 		    /*
1959 		     * Name must be as long an largest possible
1960 		     * NFSV4CL_LOCKNAMELEN. 12 for now.
1961 		     */
1962 		    NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
1963 			NFSV4CL_LOCKNAMELEN);
1964 		    LIST_INIT(&nowp->nfsow_open);
1965 		    nowp->nfsow_clp = clp;
1966 		    nowp->nfsow_seqid = 0;
1967 		    nowp->nfsow_defunct = 0;
1968 		    nfscl_lockinit(&nowp->nfsow_rwlock);
1969 		}
1970 		nop = NULL;
1971 		if (error != NFSERR_NOGRACE) {
1972 		    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
1973 			dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
1974 		    nop->nfso_own = nowp;
1975 		    if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1976 			nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
1977 			delegtype = NFSV4OPEN_DELEGATEWRITE;
1978 		    } else {
1979 			nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
1980 			delegtype = NFSV4OPEN_DELEGATEREAD;
1981 		    }
1982 		    nop->nfso_opencnt = 0;
1983 		    nop->nfso_posixlock = 1;
1984 		    nop->nfso_fhlen = dp->nfsdl_fhlen;
1985 		    NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
1986 		    LIST_INIT(&nop->nfso_lock);
1987 		    nop->nfso_stateid.seqid = 0;
1988 		    nop->nfso_stateid.other[0] = 0;
1989 		    nop->nfso_stateid.other[1] = 0;
1990 		    nop->nfso_stateid.other[2] = 0;
1991 		    newnfs_copycred(&dp->nfsdl_cred, tcred);
1992 		    newnfs_copyincred(tcred, &nop->nfso_cred);
1993 		    tdp = NULL;
1994 		    error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
1995 			nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
1996 			nop->nfso_mode, nop, NULL, 0, &tdp, 1,
1997 			delegtype, tcred, p);
1998 		    if (tdp != NULL) {
1999 			if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2000 			    mode = NFSV4OPEN_ACCESSWRITE;
2001 			else
2002 			    mode = NFSV4OPEN_ACCESSREAD;
2003 			if ((nop->nfso_mode & mode) == mode &&
2004 			    nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2005 			    !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2006 			    nop->nfso_fhlen)) {
2007 			    dp->nfsdl_stateid = tdp->nfsdl_stateid;
2008 			    dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2009 			    dp->nfsdl_ace = tdp->nfsdl_ace;
2010 			    dp->nfsdl_change = tdp->nfsdl_change;
2011 			    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2012 			    if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2013 				dp->nfsdl_flags |= NFSCLDL_RECALL;
2014 			    FREE((caddr_t)tdp, M_NFSCLDELEG);
2015 			} else {
2016 			    TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2017 			}
2018 		    }
2019 		}
2020 		if (error) {
2021 		    if (nop != NULL)
2022 			FREE((caddr_t)nop, M_NFSCLOPEN);
2023 		    /*
2024 		     * Couldn't reclaim it, so throw the state
2025 		     * away. Ouch!!
2026 		     */
2027 		    nfscl_cleandeleg(dp);
2028 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
2029 		} else {
2030 		    LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2031 		}
2032 	    }
2033 	    dp = ndp;
2034 	}
2035 
2036 	/*
2037 	 * Now, get rid of extra Opens and Delegations.
2038 	 */
2039 	LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2040 		do {
2041 			newnfs_copycred(&op->nfso_cred, tcred);
2042 			error = nfscl_tryclose(op, tcred, nmp, p);
2043 			if (error == NFSERR_GRACE)
2044 				(void) nfs_catnap(PZERO, "nfsexcls");
2045 		} while (error == NFSERR_GRACE);
2046 		LIST_REMOVE(op, nfso_list);
2047 		FREE((caddr_t)op, M_NFSCLOPEN);
2048 	}
2049 	if (nowp != NULL)
2050 		FREE((caddr_t)nowp, M_NFSCLOWNER);
2051 
2052 	TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2053 		do {
2054 			newnfs_copycred(&dp->nfsdl_cred, tcred);
2055 			error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2056 			if (error == NFSERR_GRACE)
2057 				(void) nfs_catnap(PZERO, "nfsexdlg");
2058 		} while (error == NFSERR_GRACE);
2059 		TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2060 		FREE((caddr_t)dp, M_NFSCLDELEG);
2061 	}
2062 
2063 	NFSLOCKCLSTATE();
2064 	nfsv4_unlock(&clp->nfsc_lock, 0);
2065 	NFSUNLOCKCLSTATE();
2066 	NFSFREECRED(tcred);
2067 }
2068 
2069 /*
2070  * This function is called when a server replies with NFSERR_EXPIRED.
2071  * It deletes all state for the client and does a fresh SetClientId/confirm.
2072  * XXX Someday it should post a signal to the process(es) that hold the
2073  * state, so they know that lock state has been lost.
2074  */
2075 APPLESTATIC int
2076 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2077 {
2078 	struct nfscllockowner *lp, *nlp;
2079 	struct nfsmount *nmp;
2080 	struct ucred *cred;
2081 	int igotlock = 0, error, trycnt;
2082 
2083 	/*
2084 	 * If the clientid has gone away or a new SetClientid has already
2085 	 * been done, just return ok.
2086 	 */
2087 	if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2088 		return (0);
2089 
2090 	/*
2091 	 * First, lock the client structure, so everyone else will
2092 	 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2093 	 * that only one thread does the work.
2094 	 */
2095 	NFSLOCKCLSTATE();
2096 	clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2097 	do {
2098 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2099 		    NFSCLSTATEMUTEXPTR);
2100 	} while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2101 	if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2102 		if (igotlock)
2103 			nfsv4_unlock(&clp->nfsc_lock, 0);
2104 		NFSUNLOCKCLSTATE();
2105 		return (0);
2106 	}
2107 	NFSUNLOCKCLSTATE();
2108 
2109 	nmp = clp->nfsc_nmp;
2110 	if (nmp == NULL)
2111 		panic("nfscl expired");
2112 	cred = newnfs_getcred();
2113 	trycnt = 5;
2114 	do {
2115 		error = nfsrpc_setclient(nmp, clp, cred, p);
2116 	} while ((error == NFSERR_STALECLIENTID ||
2117 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2118 	if (error) {
2119 		/*
2120 		 * Clear out any state.
2121 		 */
2122 		nfscl_cleanclient(clp);
2123 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
2124 		    NFSCLFLAGS_RECOVER);
2125 	} else {
2126 		/* get rid of defunct lockowners */
2127 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
2128 		    nlp) {
2129 			nfscl_freelockowner(lp, 0);
2130 		}
2131 
2132 		/*
2133 		 * Expire the state for the client.
2134 		 */
2135 		nfscl_expireclient(clp, nmp, cred, p);
2136 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2137 		clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2138 	}
2139 	NFSFREECRED(cred);
2140 	clp->nfsc_flags &= ~NFSCLFLAGS_EXPIREIT;
2141 	NFSLOCKCLSTATE();
2142 	nfsv4_unlock(&clp->nfsc_lock, 0);
2143 	NFSUNLOCKCLSTATE();
2144 	return (error);
2145 }
2146 
2147 /*
2148  * This function inserts a lock in the list after insert_lop.
2149  */
2150 static void
2151 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2152     struct nfscllock *insert_lop, int local)
2153 {
2154 
2155 	if ((struct nfscllockowner *)insert_lop == lp)
2156 		LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2157 	else
2158 		LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2159 	if (local)
2160 		newnfsstats.cllocallocks++;
2161 	else
2162 		newnfsstats.cllocks++;
2163 }
2164 
2165 /*
2166  * This function updates the locking for a lock owner and given file. It
2167  * maintains a list of lock ranges ordered on increasing file offset that
2168  * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2169  * It always adds new_lop to the list and sometimes uses the one pointed
2170  * at by other_lopp.
2171  * Returns 1 if the locks were modified, 0 otherwise.
2172  */
2173 static int
2174 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2175     struct nfscllock **other_lopp, int local)
2176 {
2177 	struct nfscllock *new_lop = *new_lopp;
2178 	struct nfscllock *lop, *tlop, *ilop;
2179 	struct nfscllock *other_lop;
2180 	int unlock = 0, modified = 0;
2181 	u_int64_t tmp;
2182 
2183 	/*
2184 	 * Work down the list until the lock is merged.
2185 	 */
2186 	if (new_lop->nfslo_type == F_UNLCK)
2187 		unlock = 1;
2188 	ilop = (struct nfscllock *)lp;
2189 	lop = LIST_FIRST(&lp->nfsl_lock);
2190 	while (lop != NULL) {
2191 	    /*
2192 	     * Only check locks for this file that aren't before the start of
2193 	     * new lock's range.
2194 	     */
2195 	    if (lop->nfslo_end >= new_lop->nfslo_first) {
2196 		if (new_lop->nfslo_end < lop->nfslo_first) {
2197 		    /*
2198 		     * If the new lock ends before the start of the
2199 		     * current lock's range, no merge, just insert
2200 		     * the new lock.
2201 		     */
2202 		    break;
2203 		}
2204 		if (new_lop->nfslo_type == lop->nfslo_type ||
2205 		    (new_lop->nfslo_first <= lop->nfslo_first &&
2206 		     new_lop->nfslo_end >= lop->nfslo_end)) {
2207 		    /*
2208 		     * This lock can be absorbed by the new lock/unlock.
2209 		     * This happens when it covers the entire range
2210 		     * of the old lock or is contiguous
2211 		     * with the old lock and is of the same type or an
2212 		     * unlock.
2213 		     */
2214 		    if (new_lop->nfslo_type != lop->nfslo_type ||
2215 			new_lop->nfslo_first != lop->nfslo_first ||
2216 			new_lop->nfslo_end != lop->nfslo_end)
2217 			modified = 1;
2218 		    if (lop->nfslo_first < new_lop->nfslo_first)
2219 			new_lop->nfslo_first = lop->nfslo_first;
2220 		    if (lop->nfslo_end > new_lop->nfslo_end)
2221 			new_lop->nfslo_end = lop->nfslo_end;
2222 		    tlop = lop;
2223 		    lop = LIST_NEXT(lop, nfslo_list);
2224 		    nfscl_freelock(tlop, local);
2225 		    continue;
2226 		}
2227 
2228 		/*
2229 		 * All these cases are for contiguous locks that are not the
2230 		 * same type, so they can't be merged.
2231 		 */
2232 		if (new_lop->nfslo_first <= lop->nfslo_first) {
2233 		    /*
2234 		     * This case is where the new lock overlaps with the
2235 		     * first part of the old lock. Move the start of the
2236 		     * old lock to just past the end of the new lock. The
2237 		     * new lock will be inserted in front of the old, since
2238 		     * ilop hasn't been updated. (We are done now.)
2239 		     */
2240 		    if (lop->nfslo_first != new_lop->nfslo_end) {
2241 			lop->nfslo_first = new_lop->nfslo_end;
2242 			modified = 1;
2243 		    }
2244 		    break;
2245 		}
2246 		if (new_lop->nfslo_end >= lop->nfslo_end) {
2247 		    /*
2248 		     * This case is where the new lock overlaps with the
2249 		     * end of the old lock's range. Move the old lock's
2250 		     * end to just before the new lock's first and insert
2251 		     * the new lock after the old lock.
2252 		     * Might not be done yet, since the new lock could
2253 		     * overlap further locks with higher ranges.
2254 		     */
2255 		    if (lop->nfslo_end != new_lop->nfslo_first) {
2256 			lop->nfslo_end = new_lop->nfslo_first;
2257 			modified = 1;
2258 		    }
2259 		    ilop = lop;
2260 		    lop = LIST_NEXT(lop, nfslo_list);
2261 		    continue;
2262 		}
2263 		/*
2264 		 * The final case is where the new lock's range is in the
2265 		 * middle of the current lock's and splits the current lock
2266 		 * up. Use *other_lopp to handle the second part of the
2267 		 * split old lock range. (We are done now.)
2268 		 * For unlock, we use new_lop as other_lop and tmp, since
2269 		 * other_lop and new_lop are the same for this case.
2270 		 * We noted the unlock case above, so we don't need
2271 		 * new_lop->nfslo_type any longer.
2272 		 */
2273 		tmp = new_lop->nfslo_first;
2274 		if (unlock) {
2275 		    other_lop = new_lop;
2276 		    *new_lopp = NULL;
2277 		} else {
2278 		    other_lop = *other_lopp;
2279 		    *other_lopp = NULL;
2280 		}
2281 		other_lop->nfslo_first = new_lop->nfslo_end;
2282 		other_lop->nfslo_end = lop->nfslo_end;
2283 		other_lop->nfslo_type = lop->nfslo_type;
2284 		lop->nfslo_end = tmp;
2285 		nfscl_insertlock(lp, other_lop, lop, local);
2286 		ilop = lop;
2287 		modified = 1;
2288 		break;
2289 	    }
2290 	    ilop = lop;
2291 	    lop = LIST_NEXT(lop, nfslo_list);
2292 	    if (lop == NULL)
2293 		break;
2294 	}
2295 
2296 	/*
2297 	 * Insert the new lock in the list at the appropriate place.
2298 	 */
2299 	if (!unlock) {
2300 		nfscl_insertlock(lp, new_lop, ilop, local);
2301 		*new_lopp = NULL;
2302 		modified = 1;
2303 	}
2304 	return (modified);
2305 }
2306 
2307 /*
2308  * This function must be run as a kernel thread.
2309  * It does Renew Ops and recovery, when required.
2310  */
2311 APPLESTATIC void
2312 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2313 {
2314 	struct nfsclowner *owp, *nowp;
2315 	struct nfsclopen *op;
2316 	struct nfscllockowner *lp, *nlp, *olp;
2317 	struct nfscldeleghead dh;
2318 	struct nfscllockownerhead lh;
2319 	struct nfscldeleg *dp, *ndp;
2320 	struct ucred *cred;
2321 	u_int32_t clidrev;
2322 	int error, cbpathdown, islept, igotlock, ret, clearok;
2323 
2324 	cred = newnfs_getcred();
2325 	clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2326 	for(;;) {
2327 		newnfs_setroot(cred);
2328 		cbpathdown = 0;
2329 		if (clp->nfsc_flags & NFSCLFLAGS_RECOVER)
2330 			nfscl_recover(clp, cred, p);
2331 		if (clp->nfsc_expire <= NFSD_MONOSEC &&
2332 		    (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2333 			clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2334 			clidrev = clp->nfsc_clientidrev;
2335 			error = nfsrpc_renew(clp, cred, p);
2336 			if (error == NFSERR_CBPATHDOWN)
2337 			    cbpathdown = 1;
2338 			else if (error == NFSERR_STALECLIENTID)
2339 			    clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2340 			else if (error == NFSERR_EXPIRED)
2341 			    (void) nfscl_hasexpired(clp, clidrev, p);
2342 		}
2343 
2344 		LIST_INIT(&lh);
2345 		TAILQ_INIT(&dh);
2346 		NFSLOCKCLSTATE();
2347 		if (cbpathdown)
2348 			/* It's a Total Recall! */
2349 			nfscl_totalrecall(clp);
2350 
2351 		/*
2352 		 * Now, handle defunct owners.
2353 		 */
2354 		owp = LIST_FIRST(&clp->nfsc_owner);
2355 		while (owp != NULL) {
2356 		    nowp = LIST_NEXT(owp, nfsow_list);
2357 		    if (LIST_EMPTY(&owp->nfsow_open)) {
2358 			if (owp->nfsow_defunct)
2359 			    nfscl_freeopenowner(owp, 0);
2360 		    } else {
2361 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2362 			    lp = LIST_FIRST(&op->nfso_lock);
2363 			    while (lp != NULL) {
2364 				nlp = LIST_NEXT(lp, nfsl_list);
2365 				if (lp->nfsl_defunct &&
2366 				    LIST_EMPTY(&lp->nfsl_lock)) {
2367 				    LIST_FOREACH(olp, &lh, nfsl_list) {
2368 					if (!NFSBCMP(olp->nfsl_owner,
2369 					    lp->nfsl_owner,NFSV4CL_LOCKNAMELEN))
2370 					    break;
2371 				    }
2372 				    if (olp == NULL) {
2373 					LIST_REMOVE(lp, nfsl_list);
2374 					LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2375 				    } else {
2376 					nfscl_freelockowner(lp, 0);
2377 				    }
2378 				}
2379 				lp = nlp;
2380 			    }
2381 			}
2382 		    }
2383 		    owp = nowp;
2384 		}
2385 
2386 		/* also search the defunct list */
2387 		lp = LIST_FIRST(&clp->nfsc_defunctlockowner);
2388 		while (lp != NULL) {
2389 		    nlp = LIST_NEXT(lp, nfsl_list);
2390 		    if (lp->nfsl_defunct) {
2391 			LIST_FOREACH(olp, &lh, nfsl_list) {
2392 			    if (!NFSBCMP(olp->nfsl_owner, lp->nfsl_owner,
2393 				NFSV4CL_LOCKNAMELEN))
2394 				break;
2395 			}
2396 			if (olp == NULL) {
2397 			    LIST_REMOVE(lp, nfsl_list);
2398 			    LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2399 			} else {
2400 			    nfscl_freelockowner(lp, 0);
2401 			}
2402 		    }
2403 		    lp = nlp;
2404 		}
2405 		/* and release defunct lock owners */
2406 		LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) {
2407 		    nfscl_freelockowner(lp, 0);
2408 		}
2409 
2410 		/*
2411 		 * Do the recall on any delegations. To avoid trouble, always
2412 		 * come back up here after having slept.
2413 		 */
2414 		igotlock = 0;
2415 tryagain:
2416 		dp = TAILQ_FIRST(&clp->nfsc_deleg);
2417 		while (dp != NULL) {
2418 			ndp = TAILQ_NEXT(dp, nfsdl_list);
2419 			if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2420 				/*
2421 				 * Wait for outstanding I/O ops to be done.
2422 				 */
2423 				if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2424 				    if (igotlock) {
2425 					nfsv4_unlock(&clp->nfsc_lock, 0);
2426 					igotlock = 0;
2427 				    }
2428 				    dp->nfsdl_rwlock.nfslock_lock |=
2429 					NFSV4LOCK_WANTED;
2430 				    (void) nfsmsleep(&dp->nfsdl_rwlock,
2431 					NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2432 					NULL);
2433 				    goto tryagain;
2434 				}
2435 				while (!igotlock) {
2436 				    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2437 					&islept, NFSCLSTATEMUTEXPTR);
2438 				    if (islept)
2439 					goto tryagain;
2440 				}
2441 				NFSUNLOCKCLSTATE();
2442 				newnfs_copycred(&dp->nfsdl_cred, cred);
2443 				ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2444 				    NULL, cred, p);
2445 				if (!ret) {
2446 				    nfscl_cleandeleg(dp);
2447 				    TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2448 					nfsdl_list);
2449 				    LIST_REMOVE(dp, nfsdl_hash);
2450 				    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2451 				    nfscl_delegcnt--;
2452 				    newnfsstats.cldelegates--;
2453 				}
2454 				NFSLOCKCLSTATE();
2455 			}
2456 			dp = ndp;
2457 		}
2458 
2459 		/*
2460 		 * Clear out old delegations, if we are above the high water
2461 		 * mark. Only clear out ones with no state related to them.
2462 		 * The tailq list is in LRU order.
2463 		 */
2464 		dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2465 		while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2466 		    ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2467 		    if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2468 			dp->nfsdl_rwlock.nfslock_lock == 0 &&
2469 			dp->nfsdl_timestamp < NFSD_MONOSEC &&
2470 			!(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2471 			  NFSCLDL_NEEDRECLAIM))) {
2472 			clearok = 1;
2473 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2474 			    op = LIST_FIRST(&owp->nfsow_open);
2475 			    if (op != NULL) {
2476 				clearok = 0;
2477 				break;
2478 			    }
2479 			}
2480 			if (clearok) {
2481 			    LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2482 				if (!LIST_EMPTY(&lp->nfsl_lock)) {
2483 				    clearok = 0;
2484 				    break;
2485 				}
2486 			    }
2487 			}
2488 			if (clearok) {
2489 			    TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2490 			    LIST_REMOVE(dp, nfsdl_hash);
2491 			    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2492 			    nfscl_delegcnt--;
2493 			    newnfsstats.cldelegates--;
2494 			}
2495 		    }
2496 		    dp = ndp;
2497 		}
2498 		if (igotlock)
2499 			nfsv4_unlock(&clp->nfsc_lock, 0);
2500 		NFSUNLOCKCLSTATE();
2501 
2502 		/*
2503 		 * Delegreturn any delegations cleaned out or recalled.
2504 		 */
2505 		TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2506 			newnfs_copycred(&dp->nfsdl_cred, cred);
2507 			(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2508 			TAILQ_REMOVE(&dh, dp, nfsdl_list);
2509 			FREE((caddr_t)dp, M_NFSCLDELEG);
2510 		}
2511 
2512 #if defined(APPLEKEXT) || defined(__FreeBSD__)
2513 		/*
2514 		 * Simulate the calls to nfscl_cleanup() when a process
2515 		 * exits, since the call can't be patched into exit().
2516 		 */
2517 		{
2518 			struct timespec mytime;
2519 			static time_t prevsec = 0;
2520 
2521 			NFSGETNANOTIME(&mytime);
2522 			if (prevsec != mytime.tv_sec) {
2523 				prevsec = mytime.tv_sec;
2524 				nfscl_cleanupkext(clp);
2525 			}
2526 		}
2527 #endif	/* APPLEKEXT || __FreeBSD__ */
2528 
2529 		if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2530 		    (void) tsleep((caddr_t)clp, PWAIT, "nfscl", hz);
2531 		if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2532 			NFSFREECRED(cred);
2533 			clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2534 			wakeup((caddr_t)clp);
2535 			return;
2536 		}
2537 	}
2538 }
2539 
2540 /*
2541  * Initiate state recovery. Called when NFSERR_STALECLIENTID or
2542  * NFSERR_STALESTATEID is received.
2543  */
2544 APPLESTATIC void
2545 nfscl_initiate_recovery(struct nfsclclient *clp)
2546 {
2547 
2548 	if (clp == NULL)
2549 		return;
2550 	NFSLOCKCLSTATE();
2551 	clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2552 	NFSUNLOCKCLSTATE();
2553 	wakeup((caddr_t)clp);
2554 }
2555 
2556 /*
2557  * Dump out the state stuff for debugging.
2558  */
2559 APPLESTATIC void
2560 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2561     int lockowner, int locks)
2562 {
2563 	struct nfsclclient *clp;
2564 	struct nfsclowner *owp;
2565 	struct nfsclopen *op;
2566 	struct nfscllockowner *lp;
2567 	struct nfscllock *lop;
2568 	struct nfscldeleg *dp;
2569 
2570 	clp = nmp->nm_clp;
2571 	if (clp == NULL) {
2572 		printf("nfscl dumpstate NULL clp\n");
2573 		return;
2574 	}
2575 	NFSLOCKCLSTATE();
2576 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2577 	  LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2578 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2579 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2580 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2581 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2582 		    owp->nfsow_seqid);
2583 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2584 		if (opens)
2585 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2586 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2587 			op->nfso_stateid.other[2], op->nfso_opencnt,
2588 			op->nfso_fh[12]);
2589 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2590 		    if (lockowner)
2591 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2592 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2593 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2594 			    lp->nfsl_seqid,
2595 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2596 			    lp->nfsl_stateid.other[2]);
2597 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2598 			if (locks)
2599 #ifdef __FreeBSD__
2600 			    printf("lck typ=%d fst=%ju end=%ju\n",
2601 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2602 				(intmax_t)lop->nfslo_end);
2603 #else
2604 			    printf("lck typ=%d fst=%qd end=%qd\n",
2605 				lop->nfslo_type, lop->nfslo_first,
2606 				lop->nfslo_end);
2607 #endif
2608 		    }
2609 		}
2610 	    }
2611 	  }
2612 	}
2613 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2614 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2615 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2616 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2617 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2618 		    owp->nfsow_seqid);
2619 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2620 		if (opens)
2621 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2622 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2623 			op->nfso_stateid.other[2], op->nfso_opencnt,
2624 			op->nfso_fh[12]);
2625 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2626 		    if (lockowner)
2627 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2628 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2629 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2630 			    lp->nfsl_seqid,
2631 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2632 			    lp->nfsl_stateid.other[2]);
2633 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2634 			if (locks)
2635 #ifdef __FreeBSD__
2636 			    printf("lck typ=%d fst=%ju end=%ju\n",
2637 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2638 				(intmax_t)lop->nfslo_end);
2639 #else
2640 			    printf("lck typ=%d fst=%qd end=%qd\n",
2641 				lop->nfslo_type, lop->nfslo_first,
2642 				lop->nfslo_end);
2643 #endif
2644 		    }
2645 		}
2646 	    }
2647 	}
2648 	NFSUNLOCKCLSTATE();
2649 }
2650 
2651 /*
2652  * Check for duplicate open owners and opens.
2653  * (Only used as a diagnostic aid.)
2654  */
2655 APPLESTATIC void
2656 nfscl_dupopen(vnode_t vp, int dupopens)
2657 {
2658 	struct nfsclclient *clp;
2659 	struct nfsclowner *owp, *owp2;
2660 	struct nfsclopen *op, *op2;
2661 	struct nfsfh *nfhp;
2662 
2663 	clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2664 	if (clp == NULL) {
2665 		printf("nfscl dupopen NULL clp\n");
2666 		return;
2667 	}
2668 	nfhp = VTONFS(vp)->n_fhp;
2669 	NFSLOCKCLSTATE();
2670 
2671 	/*
2672 	 * First, search for duplicate owners.
2673 	 * These should never happen!
2674 	 */
2675 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2676 	    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2677 		if (owp != owp2 &&
2678 		    !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2679 		    NFSV4CL_LOCKNAMELEN)) {
2680 			NFSUNLOCKCLSTATE();
2681 			printf("DUP OWNER\n");
2682 			nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2683 			return;
2684 		}
2685 	    }
2686 	}
2687 
2688 	/*
2689 	 * Now, search for duplicate stateids.
2690 	 * These shouldn't happen, either.
2691 	 */
2692 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2693 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2694 		LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2695 		    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2696 			if (op != op2 &&
2697 			    (op->nfso_stateid.other[0] != 0 ||
2698 			     op->nfso_stateid.other[1] != 0 ||
2699 			     op->nfso_stateid.other[2] != 0) &&
2700 			    op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2701 			    op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2702 			    op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2703 			    NFSUNLOCKCLSTATE();
2704 			    printf("DUP STATEID\n");
2705 			    nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
2706 				0);
2707 			    return;
2708 			}
2709 		    }
2710 		}
2711 	    }
2712 	}
2713 
2714 	/*
2715 	 * Now search for duplicate opens.
2716 	 * Duplicate opens for the same owner
2717 	 * should never occur. Other duplicates are
2718 	 * possible and are checked for if "dupopens"
2719 	 * is true.
2720 	 */
2721 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2722 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2723 		if (nfhp->nfh_len == op2->nfso_fhlen &&
2724 		    !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
2725 		    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2726 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2727 			    if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
2728 				!NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
2729 				(!NFSBCMP(op->nfso_own->nfsow_owner,
2730 				 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
2731 				 dupopens)) {
2732 				if (!NFSBCMP(op->nfso_own->nfsow_owner,
2733 				    op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
2734 				    NFSUNLOCKCLSTATE();
2735 				    printf("BADDUP OPEN\n");
2736 				} else {
2737 				    NFSUNLOCKCLSTATE();
2738 				    printf("DUP OPEN\n");
2739 				}
2740 				nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
2741 				    0, 0);
2742 				return;
2743 			    }
2744 			}
2745 		    }
2746 		}
2747 	    }
2748 	}
2749 	NFSUNLOCKCLSTATE();
2750 }
2751 
2752 /*
2753  * During close, find an open that needs to be dereferenced and
2754  * dereference it. If there are no more opens for this file,
2755  * log a message to that effect.
2756  * Opens aren't actually Close'd until VOP_INACTIVE() is performed
2757  * on the file's vnode.
2758  * This is the safe way, since it is difficult to identify
2759  * which open the close is for and I/O can be performed after the
2760  * close(2) system call when a file is mmap'd.
2761  * If it returns 0 for success, there will be a referenced
2762  * clp returned via clpp.
2763  */
2764 APPLESTATIC int
2765 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
2766 {
2767 	struct nfsclclient *clp;
2768 	struct nfsclowner *owp;
2769 	struct nfsclopen *op;
2770 	struct nfscldeleg *dp;
2771 	struct nfsfh *nfhp;
2772 	int error, notdecr;
2773 
2774 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2775 	if (error)
2776 		return (error);
2777 	*clpp = clp;
2778 
2779 	nfhp = VTONFS(vp)->n_fhp;
2780 	notdecr = 1;
2781 	NFSLOCKCLSTATE();
2782 	/*
2783 	 * First, look for one under a delegation that was locally issued
2784 	 * and just decrement the opencnt for it. Since all my Opens against
2785 	 * the server are DENY_NONE, I don't see a problem with hanging
2786 	 * onto them. (It is much easier to use one of the extant Opens
2787 	 * that I already have on the server when a Delegation is recalled
2788 	 * than to do fresh Opens.) Someday, I might need to rethink this, but.
2789 	 */
2790 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2791 	if (dp != NULL) {
2792 		LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2793 			op = LIST_FIRST(&owp->nfsow_open);
2794 			if (op != NULL) {
2795 				/*
2796 				 * Since a delegation is for a file, there
2797 				 * should never be more than one open for
2798 				 * each openowner.
2799 				 */
2800 				if (LIST_NEXT(op, nfso_list) != NULL)
2801 					panic("nfscdeleg opens");
2802 				if (notdecr && op->nfso_opencnt > 0) {
2803 					notdecr = 0;
2804 					op->nfso_opencnt--;
2805 					break;
2806 				}
2807 			}
2808 		}
2809 	}
2810 
2811 	/* Now process the opens against the server. */
2812 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2813 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2814 			if (op->nfso_fhlen == nfhp->nfh_len &&
2815 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2816 			    nfhp->nfh_len)) {
2817 				/* Found an open, decrement cnt if possible */
2818 				if (notdecr && op->nfso_opencnt > 0) {
2819 					notdecr = 0;
2820 					op->nfso_opencnt--;
2821 				}
2822 				/*
2823 				 * There are more opens, so just return.
2824 				 */
2825 				if (op->nfso_opencnt > 0) {
2826 					NFSUNLOCKCLSTATE();
2827 					return (0);
2828 				}
2829 			}
2830 		}
2831 	}
2832 	NFSUNLOCKCLSTATE();
2833 	if (notdecr)
2834 		printf("nfscl: never fnd open\n");
2835 	return (0);
2836 }
2837 
2838 APPLESTATIC int
2839 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
2840 {
2841 	struct nfsclclient *clp;
2842 	struct nfsclowner *owp, *nowp;
2843 	struct nfsclopen *op;
2844 	struct nfscldeleg *dp;
2845 	struct nfsfh *nfhp;
2846 	int error;
2847 
2848 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2849 	if (error)
2850 		return (error);
2851 	*clpp = clp;
2852 
2853 	nfhp = VTONFS(vp)->n_fhp;
2854 	NFSLOCKCLSTATE();
2855 	/*
2856 	 * First get rid of the local Open structures, which should be no
2857 	 * longer in use.
2858 	 */
2859 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2860 	if (dp != NULL) {
2861 		LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
2862 			op = LIST_FIRST(&owp->nfsow_open);
2863 			if (op != NULL) {
2864 				KASSERT((op->nfso_opencnt == 0),
2865 				    ("nfscl: bad open cnt on deleg"));
2866 				nfscl_freeopen(op, 1);
2867 			}
2868 			nfscl_freeopenowner(owp, 1);
2869 		}
2870 	}
2871 
2872 	/* Now process the opens against the server. */
2873 lookformore:
2874 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2875 		op = LIST_FIRST(&owp->nfsow_open);
2876 		while (op != NULL) {
2877 			if (op->nfso_fhlen == nfhp->nfh_len &&
2878 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2879 			    nfhp->nfh_len)) {
2880 				/* Found an open, close it. */
2881 				KASSERT((op->nfso_opencnt == 0),
2882 				    ("nfscl: bad open cnt on server"));
2883 				NFSUNLOCKCLSTATE();
2884 				nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
2885 				    p);
2886 				NFSLOCKCLSTATE();
2887 				goto lookformore;
2888 			}
2889 			op = LIST_NEXT(op, nfso_list);
2890 		}
2891 	}
2892 	NFSUNLOCKCLSTATE();
2893 	return (0);
2894 }
2895 
2896 /*
2897  * Return all delegations on this client.
2898  * (Must be called with client sleep lock.)
2899  */
2900 static void
2901 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
2902 {
2903 	struct nfscldeleg *dp, *ndp;
2904 	struct ucred *cred;
2905 
2906 	cred = newnfs_getcred();
2907 	TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
2908 		nfscl_cleandeleg(dp);
2909 		(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2910 		nfscl_freedeleg(&clp->nfsc_deleg, dp);
2911 	}
2912 	NFSFREECRED(cred);
2913 }
2914 
2915 /*
2916  * Do a callback RPC.
2917  */
2918 APPLESTATIC void
2919 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
2920 {
2921 	int i, op;
2922 	u_int32_t *tl;
2923 	struct nfsclclient *clp;
2924 	struct nfscldeleg *dp = NULL;
2925 	int numops, taglen = -1, error = 0, trunc, ret = 0;
2926 	u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident;
2927 	u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
2928 	vnode_t vp = NULL;
2929 	struct nfsnode *np;
2930 	struct vattr va;
2931 	struct nfsfh *nfhp;
2932 	mount_t mp;
2933 	nfsattrbit_t attrbits, rattrbits;
2934 	nfsv4stateid_t stateid;
2935 
2936 	nfsrvd_rephead(nd);
2937 	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2938 	taglen = fxdr_unsigned(int, *tl);
2939 	if (taglen < 0) {
2940 		error = EBADRPC;
2941 		goto nfsmout;
2942 	}
2943 	if (taglen <= NFSV4_SMALLSTR)
2944 		tagstr = tag;
2945 	else
2946 		tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
2947 	error = nfsrv_mtostr(nd, tagstr, taglen);
2948 	if (error) {
2949 		if (taglen > NFSV4_SMALLSTR)
2950 			free(tagstr, M_TEMP);
2951 		taglen = -1;
2952 		goto nfsmout;
2953 	}
2954 	(void) nfsm_strtom(nd, tag, taglen);
2955 	if (taglen > NFSV4_SMALLSTR) {
2956 		free(tagstr, M_TEMP);
2957 	}
2958 	NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
2959 	NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2960 	minorvers = fxdr_unsigned(u_int32_t, *tl++);
2961 	if (minorvers != NFSV4_MINORVERSION)
2962 		nd->nd_repstat = NFSERR_MINORVERMISMATCH;
2963 	cbident = fxdr_unsigned(u_int32_t, *tl++);
2964 	if (nd->nd_repstat)
2965 		numops = 0;
2966 	else
2967 		numops = fxdr_unsigned(int, *tl);
2968 	/*
2969 	 * Loop around doing the sub ops.
2970 	 */
2971 	for (i = 0; i < numops; i++) {
2972 		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2973 		NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
2974 		*repp++ = *tl;
2975 		op = fxdr_unsigned(int, *tl);
2976 		if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) {
2977 		    nd->nd_repstat = NFSERR_OPILLEGAL;
2978 		    *repp = nfscl_errmap(nd);
2979 		    retops++;
2980 		    break;
2981 		}
2982 		nd->nd_procnum = op;
2983 		newnfsstats.cbrpccnt[nd->nd_procnum]++;
2984 		switch (op) {
2985 		case NFSV4OP_CBGETATTR:
2986 			clp = NULL;
2987 			error = nfsm_getfh(nd, &nfhp);
2988 			if (!error)
2989 				error = nfsrv_getattrbits(nd, &attrbits,
2990 				    NULL, NULL);
2991 			if (!error) {
2992 				mp = nfscl_getmnt(cbident);
2993 				if (mp == NULL)
2994 					error = NFSERR_SERVERFAULT;
2995 			}
2996 			if (!error) {
2997 				dp = NULL;
2998 				NFSLOCKCLSTATE();
2999 				clp = nfscl_findcl(VFSTONFS(mp));
3000 				if (clp != NULL)
3001 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3002 					    nfhp->nfh_len);
3003 				NFSUNLOCKCLSTATE();
3004 				if (dp == NULL)
3005 					error = NFSERR_SERVERFAULT;
3006 			}
3007 			if (!error) {
3008 				ret = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3009 				    nfhp->nfh_len, p, &np);
3010 				if (!ret)
3011 					vp = NFSTOV(np);
3012 			}
3013 			if (nfhp != NULL)
3014 				FREE((caddr_t)nfhp, M_NFSFH);
3015 			if (!error) {
3016 				NFSZERO_ATTRBIT(&rattrbits);
3017 				if (NFSISSET_ATTRBIT(&attrbits,
3018 				    NFSATTRBIT_SIZE)) {
3019 					if (!ret)
3020 						va.va_size = np->n_size;
3021 					else
3022 						va.va_size = dp->nfsdl_size;
3023 					NFSSETBIT_ATTRBIT(&rattrbits,
3024 					    NFSATTRBIT_SIZE);
3025 				}
3026 				if (NFSISSET_ATTRBIT(&attrbits,
3027 				    NFSATTRBIT_CHANGE)) {
3028 					va.va_filerev = dp->nfsdl_change;
3029 					if (ret || (np->n_flag & NDELEGMOD))
3030 						va.va_filerev++;
3031 					NFSSETBIT_ATTRBIT(&rattrbits,
3032 					    NFSATTRBIT_CHANGE);
3033 				}
3034 				(void) nfsv4_fillattr(nd, NULL, NULL, &va,
3035 				    NULL, 0, &rattrbits, NULL, NULL, 0, 0);
3036 				if (!ret)
3037 					vrele(vp);
3038 			}
3039 			break;
3040 		case NFSV4OP_CBRECALL:
3041 			clp = NULL;
3042 			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3043 			    NFSX_UNSIGNED);
3044 			stateid.seqid = *tl++;
3045 			NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3046 			    NFSX_STATEIDOTHER);
3047 			tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3048 			trunc = fxdr_unsigned(int, *tl);
3049 			error = nfsm_getfh(nd, &nfhp);
3050 			if (!error) {
3051 				mp = nfscl_getmnt(cbident);
3052 				if (mp == NULL)
3053 					error = NFSERR_SERVERFAULT;
3054 			}
3055 			if (!error) {
3056 				NFSLOCKCLSTATE();
3057 				clp = nfscl_findcl(VFSTONFS(mp));
3058 				if (clp != NULL) {
3059 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3060 					    nfhp->nfh_len);
3061 					if (dp != NULL) {
3062 						dp->nfsdl_flags |=
3063 						    NFSCLDL_RECALL;
3064 						wakeup((caddr_t)clp);
3065 					}
3066 				} else {
3067 					error = NFSERR_SERVERFAULT;
3068 				}
3069 				NFSUNLOCKCLSTATE();
3070 			}
3071 			if (nfhp != NULL)
3072 				FREE((caddr_t)nfhp, M_NFSFH);
3073 			break;
3074 		};
3075 		if (error) {
3076 			if (error == EBADRPC || error == NFSERR_BADXDR) {
3077 				nd->nd_repstat = NFSERR_BADXDR;
3078 			} else {
3079 				nd->nd_repstat = error;
3080 			}
3081 			error = 0;
3082 		}
3083 		retops++;
3084 		if (nd->nd_repstat) {
3085 			*repp = nfscl_errmap(nd);
3086 			break;
3087 		} else
3088 			*repp = 0;	/* NFS4_OK */
3089 	}
3090 nfsmout:
3091 	if (error) {
3092 		if (error == EBADRPC || error == NFSERR_BADXDR)
3093 			nd->nd_repstat = NFSERR_BADXDR;
3094 		else
3095 			printf("nfsv4 comperr1=%d\n", error);
3096 	}
3097 	if (taglen == -1) {
3098 		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3099 		*tl++ = 0;
3100 		*tl = 0;
3101 	} else {
3102 		*retopsp = txdr_unsigned(retops);
3103 	}
3104 	*nd->nd_errp = nfscl_errmap(nd);
3105 }
3106 
3107 /*
3108  * Generate the next cbident value. Basically just increment a static value
3109  * and then check that it isn't already in the list, if it has wrapped around.
3110  */
3111 static u_int32_t
3112 nfscl_nextcbident(void)
3113 {
3114 	struct nfsclclient *clp;
3115 	int matched;
3116 	static u_int32_t nextcbident = 0;
3117 	static int haswrapped = 0;
3118 
3119 	nextcbident++;
3120 	if (nextcbident == 0)
3121 		haswrapped = 1;
3122 	if (haswrapped) {
3123 		/*
3124 		 * Search the clientid list for one already using this cbident.
3125 		 */
3126 		do {
3127 			matched = 0;
3128 			NFSLOCKCLSTATE();
3129 			LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3130 				if (clp->nfsc_cbident == nextcbident) {
3131 					matched = 1;
3132 					break;
3133 				}
3134 			}
3135 			NFSUNLOCKCLSTATE();
3136 			if (matched == 1)
3137 				nextcbident++;
3138 		} while (matched);
3139 	}
3140 	return (nextcbident);
3141 }
3142 
3143 /*
3144  * Get the mount point related to a given cbident.
3145  */
3146 static mount_t
3147 nfscl_getmnt(u_int32_t cbident)
3148 {
3149 	struct nfsclclient *clp;
3150 	struct nfsmount *nmp;
3151 
3152 	NFSLOCKCLSTATE();
3153 	LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3154 		if (clp->nfsc_cbident == cbident)
3155 			break;
3156 	}
3157 	if (clp == NULL) {
3158 		NFSUNLOCKCLSTATE();
3159 		return (NULL);
3160 	}
3161 	nmp = clp->nfsc_nmp;
3162 	NFSUNLOCKCLSTATE();
3163 	return (nmp->nm_mountp);
3164 }
3165 
3166 /*
3167  * Search for a lock conflict locally on the client. A conflict occurs if
3168  * - not same owner and overlapping byte range and at least one of them is
3169  *   a write lock or this is an unlock.
3170  */
3171 static int
3172 nfscl_localconflict(struct nfsclclient *clp, struct nfscllock *nlop,
3173     u_int8_t *own, struct nfscldeleg *dp, struct nfscllock **lopp)
3174 {
3175 	struct nfsclowner *owp;
3176 	struct nfsclopen *op;
3177 	int ret;
3178 
3179 	if (dp != NULL) {
3180 		ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3181 		if (ret)
3182 			return (ret);
3183 	}
3184 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3185 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3186 			ret = nfscl_checkconflict(&op->nfso_lock, nlop, own,
3187 			    lopp);
3188 			if (ret)
3189 				return (ret);
3190 		}
3191 	}
3192 	return (0);
3193 }
3194 
3195 static int
3196 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3197     u_int8_t *own, struct nfscllock **lopp)
3198 {
3199 	struct nfscllockowner *lp;
3200 	struct nfscllock *lop;
3201 
3202 	LIST_FOREACH(lp, lhp, nfsl_list) {
3203 		if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3204 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3205 				if (lop->nfslo_first >= nlop->nfslo_end)
3206 					break;
3207 				if (lop->nfslo_end <= nlop->nfslo_first)
3208 					continue;
3209 				if (lop->nfslo_type == F_WRLCK ||
3210 				    nlop->nfslo_type == F_WRLCK ||
3211 				    nlop->nfslo_type == F_UNLCK) {
3212 					if (lopp != NULL)
3213 						*lopp = lop;
3214 					return (NFSERR_DENIED);
3215 				}
3216 			}
3217 		}
3218 	}
3219 	return (0);
3220 }
3221 
3222 /*
3223  * Check for a local conflicting lock.
3224  */
3225 APPLESTATIC int
3226 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3227     u_int64_t len, struct flock *fl, NFSPROC_T *p)
3228 {
3229 	struct nfscllock *lop, nlck;
3230 	struct nfscldeleg *dp;
3231 	struct nfsnode *np;
3232 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
3233 	int error;
3234 
3235 	nlck.nfslo_type = fl->l_type;
3236 	nlck.nfslo_first = off;
3237 	if (len == NFS64BITSSET) {
3238 		nlck.nfslo_end = NFS64BITSSET;
3239 	} else {
3240 		nlck.nfslo_end = off + len;
3241 		if (nlck.nfslo_end <= nlck.nfslo_first)
3242 			return (NFSERR_INVAL);
3243 	}
3244 	np = VTONFS(vp);
3245 	nfscl_filllockowner(p, own);
3246 	NFSLOCKCLSTATE();
3247 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3248 	error = nfscl_localconflict(clp, &nlck, own, dp, &lop);
3249 	if (error == NFSERR_DENIED)
3250 		error = EACCES;
3251 	if (error) {
3252 		fl->l_whence = SEEK_SET;
3253 		fl->l_start = lop->nfslo_first;
3254 		if (lop->nfslo_end == NFS64BITSSET)
3255 			fl->l_len = 0;
3256 		else
3257 			fl->l_len = lop->nfslo_end - lop->nfslo_first;
3258 		fl->l_pid = (pid_t)0;
3259 		fl->l_type = lop->nfslo_type;
3260 	} else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3261 	    fl->l_type == F_RDLCK)) {
3262 		/*
3263 		 * The delegation ensures that there isn't a conflicting
3264 		 * lock on the server, so return -1 to indicate an RPC
3265 		 * isn't required.
3266 		 */
3267 		fl->l_type = F_UNLCK;
3268 		error = -1;
3269 	}
3270 	NFSUNLOCKCLSTATE();
3271 	return (error);
3272 }
3273 
3274 /*
3275  * Handle Recall of a delegation.
3276  * The clp must be exclusive locked when this is called.
3277  */
3278 static int
3279 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3280     struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p)
3281 {
3282 	struct nfsclowner *owp, *lowp, *nowp;
3283 	struct nfsclopen *op, *lop;
3284 	struct nfscllockowner *lp;
3285 	struct nfscllock *lckp;
3286 	struct nfsnode *np;
3287 	int error = 0, ret, gotvp = 0;
3288 
3289 	if (vp == NULL) {
3290 		/*
3291 		 * First, get a vnode for the file. This is needed to do RPCs.
3292 		 */
3293 		ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3294 		    dp->nfsdl_fhlen, p, &np);
3295 		if (ret) {
3296 			/*
3297 			 * File isn't open, so nothing to move over to the
3298 			 * server.
3299 			 */
3300 			return (0);
3301 		}
3302 		vp = NFSTOV(np);
3303 		gotvp = 1;
3304 	} else {
3305 		np = VTONFS(vp);
3306 	}
3307 	dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3308 	NFSINVALATTRCACHE(np);
3309 
3310 	/*
3311 	 * Ok, if it's a write delegation, flush data to the server, so
3312 	 * that close/open consistency is retained.
3313 	 */
3314 	NFSLOCKNODE(np);
3315 	if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3316 #ifdef APPLE
3317 		OSBitOrAtomic((u_int32_t)NDELEGRECALL, (UInt32 *)&np->n_flag);
3318 #else
3319 		np->n_flag |= NDELEGRECALL;
3320 #endif
3321 		NFSUNLOCKNODE(np);
3322 		(void) ncl_flush(vp, MNT_WAIT, cred, p, 1);
3323 		NFSLOCKNODE(np);
3324 #ifdef APPLE
3325 		OSBitAndAtomic((int32_t)~(NMODIFIED | NDELEGRECALL), (UInt32 *)&np->n_flag);
3326 #else
3327 		np->n_flag &= ~(NMODIFIED | NDELEGRECALL);
3328 #endif
3329 	}
3330 	NFSUNLOCKNODE(np);
3331 
3332 	/*
3333 	 * Now, for each openowner with opens issued locally, move them
3334 	 * over to state against the server.
3335 	 */
3336 	LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3337 		lop = LIST_FIRST(&lowp->nfsow_open);
3338 		if (lop != NULL) {
3339 			if (LIST_NEXT(lop, nfso_list) != NULL)
3340 				panic("nfsdlg mult opens");
3341 			/*
3342 			 * Look for the same openowner against the server.
3343 			 */
3344 			LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3345 				if (!NFSBCMP(lowp->nfsow_owner,
3346 				    owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3347 					newnfs_copycred(&dp->nfsdl_cred, cred);
3348 					ret = nfscl_moveopen(vp, clp, nmp, lop,
3349 					    owp, dp, cred, p);
3350 					if (ret == NFSERR_STALECLIENTID ||
3351 					    ret == NFSERR_STALEDONTRECOVER) {
3352 						if (gotvp)
3353 							vrele(vp);
3354 						return (ret);
3355 					}
3356 					if (ret) {
3357 						nfscl_freeopen(lop, 1);
3358 						if (!error)
3359 							error = ret;
3360 					}
3361 					break;
3362 				}
3363 			}
3364 
3365 			/*
3366 			 * If no openowner found, create one and get an open
3367 			 * for it.
3368 			 */
3369 			if (owp == NULL) {
3370 				MALLOC(nowp, struct nfsclowner *,
3371 				    sizeof (struct nfsclowner), M_NFSCLOWNER,
3372 				    M_WAITOK);
3373 				nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3374 				    NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3375 				    dp->nfsdl_fhlen, NULL);
3376 				newnfs_copycred(&dp->nfsdl_cred, cred);
3377 				ret = nfscl_moveopen(vp, clp, nmp, lop,
3378 				    owp, dp, cred, p);
3379 				if (ret) {
3380 					nfscl_freeopenowner(owp, 0);
3381 					if (ret == NFSERR_STALECLIENTID ||
3382 					    ret == NFSERR_STALEDONTRECOVER) {
3383 						if (gotvp)
3384 							vrele(vp);
3385 						return (ret);
3386 					}
3387 					if (ret) {
3388 						nfscl_freeopen(lop, 1);
3389 						if (!error)
3390 							error = ret;
3391 					}
3392 				}
3393 			}
3394 		}
3395 	}
3396 
3397 	/*
3398 	 * Now, get byte range locks for any locks done locally.
3399 	 */
3400 	LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3401 		LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
3402 			newnfs_copycred(&dp->nfsdl_cred, cred);
3403 			ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
3404 			if (ret == NFSERR_STALESTATEID ||
3405 			    ret == NFSERR_STALEDONTRECOVER ||
3406 			    ret == NFSERR_STALECLIENTID) {
3407 				if (gotvp)
3408 					vrele(vp);
3409 				return (ret);
3410 			}
3411 			if (ret && !error)
3412 				error = ret;
3413 		}
3414 	}
3415 	if (gotvp)
3416 		vrele(vp);
3417 	return (error);
3418 }
3419 
3420 /*
3421  * Move a locally issued open over to an owner on the state list.
3422  * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
3423  * returns with it unlocked.
3424  */
3425 static int
3426 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3427     struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
3428     struct ucred *cred, NFSPROC_T *p)
3429 {
3430 	struct nfsclopen *op, *nop;
3431 	struct nfscldeleg *ndp;
3432 	struct nfsnode *np;
3433 	int error = 0, newone;
3434 
3435 	/*
3436 	 * First, look for an appropriate open, If found, just increment the
3437 	 * opencnt in it.
3438 	 */
3439 	LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3440 		if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
3441 		    op->nfso_fhlen == lop->nfso_fhlen &&
3442 		    !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
3443 			op->nfso_opencnt += lop->nfso_opencnt;
3444 			nfscl_freeopen(lop, 1);
3445 			return (0);
3446 		}
3447 	}
3448 
3449 	/* No appropriate open, so we have to do one against the server. */
3450 	np = VTONFS(vp);
3451 	MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
3452 	    lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
3453 	newone = 0;
3454 	nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
3455 	    lop->nfso_fh, lop->nfso_fhlen, &newone);
3456 	ndp = dp;
3457 	error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
3458 	    lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
3459 	    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
3460 	if (error) {
3461 		if (newone)
3462 			nfscl_freeopen(op, 0);
3463 	} else {
3464 		if (newone)
3465 			newnfs_copyincred(cred, &op->nfso_cred);
3466 		op->nfso_mode |= lop->nfso_mode;
3467 		op->nfso_opencnt += lop->nfso_opencnt;
3468 		nfscl_freeopen(lop, 1);
3469 	}
3470 	if (nop != NULL)
3471 		FREE((caddr_t)nop, M_NFSCLOPEN);
3472 	if (ndp != NULL) {
3473 		/*
3474 		 * What should I do with the returned delegation, since the
3475 		 * delegation is being recalled? For now, just printf and
3476 		 * through it away.
3477 		 */
3478 		printf("Moveopen returned deleg\n");
3479 		FREE((caddr_t)ndp, M_NFSCLDELEG);
3480 	}
3481 	return (error);
3482 }
3483 
3484 /*
3485  * Recall all delegations on this client.
3486  */
3487 static void
3488 nfscl_totalrecall(struct nfsclclient *clp)
3489 {
3490 	struct nfscldeleg *dp;
3491 
3492 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
3493 		dp->nfsdl_flags |= NFSCLDL_RECALL;
3494 }
3495 
3496 /*
3497  * Relock byte ranges. Called for delegation recall and state expiry.
3498  */
3499 static int
3500 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3501     struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
3502     NFSPROC_T *p)
3503 {
3504 	struct nfscllockowner *nlp;
3505 	struct nfsfh *nfhp;
3506 	u_int64_t off, len;
3507 	u_int32_t clidrev = 0;
3508 	int error, newone, donelocally;
3509 
3510 	off = lop->nfslo_first;
3511 	len = lop->nfslo_end - lop->nfslo_first;
3512 	error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
3513 	    clp, 1, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
3514 	    &donelocally);
3515 	if (error || donelocally)
3516 		return (error);
3517 	if (nmp->nm_clp != NULL)
3518 		clidrev = nmp->nm_clp->nfsc_clientidrev;
3519 	else
3520 		clidrev = 0;
3521 	nfhp = VTONFS(vp)->n_fhp;
3522 	error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
3523 	    nfhp->nfh_len, nlp, newone, 0, off,
3524 	    len, lop->nfslo_type, cred, p);
3525 	if (error)
3526 		nfscl_freelockowner(nlp, 0);
3527 	return (error);
3528 }
3529 
3530 /*
3531  * Called to re-open a file. Basically get a vnode for the file handle
3532  * and then call nfsrpc_openrpc() to do the rest.
3533  */
3534 static int
3535 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
3536     u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
3537     struct ucred *cred, NFSPROC_T *p)
3538 {
3539 	struct nfsnode *np;
3540 	vnode_t vp;
3541 	int error;
3542 
3543 	error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
3544 	if (error)
3545 		return (error);
3546 	vp = NFSTOV(np);
3547 	if (np->n_v4 != NULL) {
3548 		error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
3549 		    np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
3550 		    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
3551 		    cred, p);
3552 	} else {
3553 		error = EINVAL;
3554 	}
3555 	vrele(vp);
3556 	return (error);
3557 }
3558 
3559 /*
3560  * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
3561  * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
3562  * fail.
3563  */
3564 static int
3565 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
3566     u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
3567     u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
3568     int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
3569 {
3570 	int error;
3571 
3572 	do {
3573 		error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
3574 		    mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
3575 		    0, 0);
3576 		if (error == NFSERR_DELAY)
3577 			(void) nfs_catnap(PZERO, "nfstryop");
3578 	} while (error == NFSERR_DELAY);
3579 	if (error == EAUTH || error == EACCES) {
3580 		/* Try again using system credentials */
3581 		newnfs_setroot(cred);
3582 		do {
3583 		    error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
3584 			newfhlen, mode, op, name, namelen, ndpp, reclaim,
3585 			delegtype, cred, p, 1, 0);
3586 		    if (error == NFSERR_DELAY)
3587 			(void) nfs_catnap(PZERO, "nfstryop");
3588 		} while (error == NFSERR_DELAY);
3589 	}
3590 	return (error);
3591 }
3592 
3593 /*
3594  * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
3595  * NFSERR_DELAY. Also, retry with system credentials, if the provided
3596  * cred don't work.
3597  */
3598 static int
3599 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
3600     int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
3601     u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
3602 {
3603 	struct nfsrv_descript nfsd, *nd = &nfsd;
3604 	int error;
3605 
3606 	do {
3607 		error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
3608 		    reclaim, off, len, type, cred, p, 0);
3609 		if (!error && nd->nd_repstat == NFSERR_DELAY)
3610 			(void) nfs_catnap(PZERO, "nfstrylck");
3611 	} while (!error && nd->nd_repstat == NFSERR_DELAY);
3612 	if (!error)
3613 		error = nd->nd_repstat;
3614 	if (error == EAUTH || error == EACCES) {
3615 		/* Try again using root credentials */
3616 		newnfs_setroot(cred);
3617 		do {
3618 			error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
3619 			    newone, reclaim, off, len, type, cred, p, 1);
3620 			if (!error && nd->nd_repstat == NFSERR_DELAY)
3621 				(void) nfs_catnap(PZERO, "nfstrylck");
3622 		} while (!error && nd->nd_repstat == NFSERR_DELAY);
3623 		if (!error)
3624 			error = nd->nd_repstat;
3625 	}
3626 	return (error);
3627 }
3628 
3629 /*
3630  * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
3631  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3632  * credentials fail.
3633  */
3634 static int
3635 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
3636     struct nfsmount *nmp, NFSPROC_T *p)
3637 {
3638 	int error;
3639 
3640 	do {
3641 		error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
3642 		if (error == NFSERR_DELAY)
3643 			(void) nfs_catnap(PZERO, "nfstrydp");
3644 	} while (error == NFSERR_DELAY);
3645 	if (error == EAUTH || error == EACCES) {
3646 		/* Try again using system credentials */
3647 		newnfs_setroot(cred);
3648 		do {
3649 			error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
3650 			if (error == NFSERR_DELAY)
3651 				(void) nfs_catnap(PZERO, "nfstrydp");
3652 		} while (error == NFSERR_DELAY);
3653 	}
3654 	return (error);
3655 }
3656 
3657 /*
3658  * Try a close against the server. Just call nfsrpc_closerpc(),
3659  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3660  * credentials fail.
3661  */
3662 APPLESTATIC int
3663 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
3664     struct nfsmount *nmp, NFSPROC_T *p)
3665 {
3666 	struct nfsrv_descript nfsd, *nd = &nfsd;
3667 	int error;
3668 
3669 	do {
3670 		error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
3671 		if (error == NFSERR_DELAY)
3672 			(void) nfs_catnap(PZERO, "nfstrycl");
3673 	} while (error == NFSERR_DELAY);
3674 	if (error == EAUTH || error == EACCES) {
3675 		/* Try again using system credentials */
3676 		newnfs_setroot(cred);
3677 		do {
3678 			error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
3679 			if (error == NFSERR_DELAY)
3680 				(void) nfs_catnap(PZERO, "nfstrycl");
3681 		} while (error == NFSERR_DELAY);
3682 	}
3683 	return (error);
3684 }
3685 
3686 /*
3687  * Decide if a delegation on a file permits close without flushing writes
3688  * to the server. This might be a big performance win in some environments.
3689  * (Not useful until the client does caching on local stable storage.)
3690  */
3691 APPLESTATIC int
3692 nfscl_mustflush(vnode_t vp)
3693 {
3694 	struct nfsclclient *clp;
3695 	struct nfscldeleg *dp;
3696 	struct nfsnode *np;
3697 	struct nfsmount *nmp;
3698 
3699 	np = VTONFS(vp);
3700 	nmp = VFSTONFS(vnode_mount(vp));
3701 	if (!NFSHASNFSV4(nmp))
3702 		return (1);
3703 	NFSLOCKCLSTATE();
3704 	clp = nfscl_findcl(nmp);
3705 	if (clp == NULL) {
3706 		NFSUNLOCKCLSTATE();
3707 		return (1);
3708 	}
3709 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3710 	if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_RECALL))
3711 	     == NFSCLDL_WRITE &&
3712 	    (dp->nfsdl_sizelimit >= np->n_size ||
3713 	     !NFSHASSTRICT3530(nmp))) {
3714 		NFSUNLOCKCLSTATE();
3715 		return (0);
3716 	}
3717 	NFSUNLOCKCLSTATE();
3718 	return (1);
3719 }
3720 
3721 /*
3722  * See if a (write) delegation exists for this file.
3723  */
3724 APPLESTATIC int
3725 nfscl_nodeleg(vnode_t vp, int writedeleg)
3726 {
3727 	struct nfsclclient *clp;
3728 	struct nfscldeleg *dp;
3729 	struct nfsnode *np;
3730 	struct nfsmount *nmp;
3731 
3732 	np = VTONFS(vp);
3733 	nmp = VFSTONFS(vnode_mount(vp));
3734 	if (!NFSHASNFSV4(nmp))
3735 		return (1);
3736 	NFSLOCKCLSTATE();
3737 	clp = nfscl_findcl(nmp);
3738 	if (clp == NULL) {
3739 		NFSUNLOCKCLSTATE();
3740 		return (1);
3741 	}
3742 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3743 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_RECALL) == 0 &&
3744 	    (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE)
3745 	     == NFSCLDL_WRITE)) {
3746 		NFSUNLOCKCLSTATE();
3747 		return (0);
3748 	}
3749 	NFSUNLOCKCLSTATE();
3750 	return (1);
3751 }
3752 
3753 /*
3754  * Look for an associated delegation that should be DelegReturned.
3755  */
3756 APPLESTATIC int
3757 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
3758 {
3759 	struct nfsclclient *clp;
3760 	struct nfscldeleg *dp;
3761 	struct nfsclowner *owp;
3762 	struct nfscllockowner *lp;
3763 	struct nfsmount *nmp;
3764 	struct ucred *cred;
3765 	struct nfsnode *np;
3766 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3767 
3768 	nmp = VFSTONFS(vnode_mount(vp));
3769 	np = VTONFS(vp);
3770 	NFSLOCKCLSTATE();
3771 	/*
3772 	 * Loop around waiting for:
3773 	 * - outstanding I/O operations on delegations to complete
3774 	 * - for a delegation on vp that has state, lock the client and
3775 	 *   do a recall
3776 	 * - return delegation with no state
3777 	 */
3778 	while (1) {
3779 		clp = nfscl_findcl(nmp);
3780 		if (clp == NULL) {
3781 			NFSUNLOCKCLSTATE();
3782 			return (retcnt);
3783 		}
3784 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3785 		    np->n_fhp->nfh_len);
3786 		if (dp != NULL) {
3787 		    /*
3788 		     * Wait for outstanding I/O ops to be done.
3789 		     */
3790 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3791 			if (igotlock) {
3792 			    nfsv4_unlock(&clp->nfsc_lock, 0);
3793 			    igotlock = 0;
3794 			}
3795 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3796 			(void) nfsmsleep(&dp->nfsdl_rwlock,
3797 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3798 			continue;
3799 		    }
3800 		    needsrecall = 0;
3801 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3802 			if (!LIST_EMPTY(&owp->nfsow_open)) {
3803 			    needsrecall = 1;
3804 			    break;
3805 			}
3806 		    }
3807 		    if (!needsrecall) {
3808 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3809 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3810 				needsrecall = 1;
3811 				break;
3812 			    }
3813 			}
3814 		    }
3815 		    if (needsrecall && !triedrecall) {
3816 			islept = 0;
3817 			while (!igotlock) {
3818 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3819 				&islept, NFSCLSTATEMUTEXPTR);
3820 			    if (islept)
3821 				break;
3822 			}
3823 			if (islept)
3824 			    continue;
3825 			NFSUNLOCKCLSTATE();
3826 			cred = newnfs_getcred();
3827 			newnfs_copycred(&dp->nfsdl_cred, cred);
3828 			(void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p);
3829 			NFSFREECRED(cred);
3830 			triedrecall = 1;
3831 			NFSLOCKCLSTATE();
3832 			nfsv4_unlock(&clp->nfsc_lock, 0);
3833 			igotlock = 0;
3834 			continue;
3835 		    }
3836 		    *stp = dp->nfsdl_stateid;
3837 		    retcnt = 1;
3838 		    nfscl_cleandeleg(dp);
3839 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
3840 		}
3841 		if (igotlock)
3842 		    nfsv4_unlock(&clp->nfsc_lock, 0);
3843 		NFSUNLOCKCLSTATE();
3844 		return (retcnt);
3845 	}
3846 }
3847 
3848 /*
3849  * Look for associated delegation(s) that should be DelegReturned.
3850  */
3851 APPLESTATIC int
3852 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
3853     nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
3854 {
3855 	struct nfsclclient *clp;
3856 	struct nfscldeleg *dp;
3857 	struct nfsclowner *owp;
3858 	struct nfscllockowner *lp;
3859 	struct nfsmount *nmp;
3860 	struct ucred *cred;
3861 	struct nfsnode *np;
3862 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3863 
3864 	nmp = VFSTONFS(vnode_mount(fvp));
3865 	*gotfdp = 0;
3866 	*gottdp = 0;
3867 	NFSLOCKCLSTATE();
3868 	/*
3869 	 * Loop around waiting for:
3870 	 * - outstanding I/O operations on delegations to complete
3871 	 * - for a delegation on fvp that has state, lock the client and
3872 	 *   do a recall
3873 	 * - return delegation(s) with no state.
3874 	 */
3875 	while (1) {
3876 		clp = nfscl_findcl(nmp);
3877 		if (clp == NULL) {
3878 			NFSUNLOCKCLSTATE();
3879 			return (retcnt);
3880 		}
3881 		np = VTONFS(fvp);
3882 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3883 		    np->n_fhp->nfh_len);
3884 		if (dp != NULL && *gotfdp == 0) {
3885 		    /*
3886 		     * Wait for outstanding I/O ops to be done.
3887 		     */
3888 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3889 			if (igotlock) {
3890 			    nfsv4_unlock(&clp->nfsc_lock, 0);
3891 			    igotlock = 0;
3892 			}
3893 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3894 			(void) nfsmsleep(&dp->nfsdl_rwlock,
3895 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3896 			continue;
3897 		    }
3898 		    needsrecall = 0;
3899 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3900 			if (!LIST_EMPTY(&owp->nfsow_open)) {
3901 			    needsrecall = 1;
3902 			    break;
3903 			}
3904 		    }
3905 		    if (!needsrecall) {
3906 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3907 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3908 				needsrecall = 1;
3909 				break;
3910 			    }
3911 			}
3912 		    }
3913 		    if (needsrecall && !triedrecall) {
3914 			islept = 0;
3915 			while (!igotlock) {
3916 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3917 				&islept, NFSCLSTATEMUTEXPTR);
3918 			    if (islept)
3919 				break;
3920 			}
3921 			if (islept)
3922 			    continue;
3923 			NFSUNLOCKCLSTATE();
3924 			cred = newnfs_getcred();
3925 			newnfs_copycred(&dp->nfsdl_cred, cred);
3926 			(void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p);
3927 			NFSFREECRED(cred);
3928 			triedrecall = 1;
3929 			NFSLOCKCLSTATE();
3930 			nfsv4_unlock(&clp->nfsc_lock, 0);
3931 			igotlock = 0;
3932 			continue;
3933 		    }
3934 		    *fstp = dp->nfsdl_stateid;
3935 		    retcnt++;
3936 		    *gotfdp = 1;
3937 		    nfscl_cleandeleg(dp);
3938 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
3939 		}
3940 		if (igotlock) {
3941 		    nfsv4_unlock(&clp->nfsc_lock, 0);
3942 		    igotlock = 0;
3943 		}
3944 		if (tvp != NULL) {
3945 		    np = VTONFS(tvp);
3946 		    dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3947 			np->n_fhp->nfh_len);
3948 		    if (dp != NULL && *gottdp == 0) {
3949 			/*
3950 			 * Wait for outstanding I/O ops to be done.
3951 			 */
3952 			if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3953 			    dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3954 			    (void) nfsmsleep(&dp->nfsdl_rwlock,
3955 				NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3956 			    continue;
3957 			}
3958 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3959 			    if (!LIST_EMPTY(&owp->nfsow_open)) {
3960 				NFSUNLOCKCLSTATE();
3961 				return (retcnt);
3962 			    }
3963 			}
3964 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3965 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3966 				NFSUNLOCKCLSTATE();
3967 				return (retcnt);
3968 			    }
3969 			}
3970 			*tstp = dp->nfsdl_stateid;
3971 			retcnt++;
3972 			*gottdp = 1;
3973 			nfscl_cleandeleg(dp);
3974 			nfscl_freedeleg(&clp->nfsc_deleg, dp);
3975 		    }
3976 		}
3977 		NFSUNLOCKCLSTATE();
3978 		return (retcnt);
3979 	}
3980 }
3981 
3982 /*
3983  * Get a reference on the clientid associated with the mount point.
3984  * Return 1 if success, 0 otherwise.
3985  */
3986 APPLESTATIC int
3987 nfscl_getref(struct nfsmount *nmp)
3988 {
3989 	struct nfsclclient *clp;
3990 
3991 	NFSLOCKCLSTATE();
3992 	clp = nfscl_findcl(nmp);
3993 	if (clp == NULL) {
3994 		NFSUNLOCKCLSTATE();
3995 		return (0);
3996 	}
3997 	nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR);
3998 	NFSUNLOCKCLSTATE();
3999 	return (1);
4000 }
4001 
4002 /*
4003  * Release a reference on a clientid acquired with the above call.
4004  */
4005 APPLESTATIC void
4006 nfscl_relref(struct nfsmount *nmp)
4007 {
4008 	struct nfsclclient *clp;
4009 
4010 	NFSLOCKCLSTATE();
4011 	clp = nfscl_findcl(nmp);
4012 	if (clp == NULL) {
4013 		NFSUNLOCKCLSTATE();
4014 		return;
4015 	}
4016 	nfsv4_relref(&clp->nfsc_lock);
4017 	NFSUNLOCKCLSTATE();
4018 }
4019 
4020 /*
4021  * Save the size attribute in the delegation, since the nfsnode
4022  * is going away.
4023  */
4024 APPLESTATIC void
4025 nfscl_reclaimnode(vnode_t vp)
4026 {
4027 	struct nfsclclient *clp;
4028 	struct nfscldeleg *dp;
4029 	struct nfsnode *np = VTONFS(vp);
4030 	struct nfsmount *nmp;
4031 
4032 	nmp = VFSTONFS(vnode_mount(vp));
4033 	if (!NFSHASNFSV4(nmp))
4034 		return;
4035 	NFSLOCKCLSTATE();
4036 	clp = nfscl_findcl(nmp);
4037 	if (clp == NULL) {
4038 		NFSUNLOCKCLSTATE();
4039 		return;
4040 	}
4041 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4042 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4043 		dp->nfsdl_size = np->n_size;
4044 	NFSUNLOCKCLSTATE();
4045 }
4046 
4047 /*
4048  * Get the saved size attribute in the delegation, since it is a
4049  * newly allocated nfsnode.
4050  */
4051 APPLESTATIC void
4052 nfscl_newnode(vnode_t vp)
4053 {
4054 	struct nfsclclient *clp;
4055 	struct nfscldeleg *dp;
4056 	struct nfsnode *np = VTONFS(vp);
4057 	struct nfsmount *nmp;
4058 
4059 	nmp = VFSTONFS(vnode_mount(vp));
4060 	if (!NFSHASNFSV4(nmp))
4061 		return;
4062 	NFSLOCKCLSTATE();
4063 	clp = nfscl_findcl(nmp);
4064 	if (clp == NULL) {
4065 		NFSUNLOCKCLSTATE();
4066 		return;
4067 	}
4068 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4069 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4070 		np->n_size = dp->nfsdl_size;
4071 	NFSUNLOCKCLSTATE();
4072 }
4073 
4074 /*
4075  * If there is a valid write delegation for this file, set the modtime
4076  * to the local clock time.
4077  */
4078 APPLESTATIC void
4079 nfscl_delegmodtime(vnode_t vp)
4080 {
4081 	struct nfsclclient *clp;
4082 	struct nfscldeleg *dp;
4083 	struct nfsnode *np = VTONFS(vp);
4084 	struct nfsmount *nmp;
4085 
4086 	nmp = VFSTONFS(vnode_mount(vp));
4087 	if (!NFSHASNFSV4(nmp))
4088 		return;
4089 	NFSLOCKCLSTATE();
4090 	clp = nfscl_findcl(nmp);
4091 	if (clp == NULL) {
4092 		NFSUNLOCKCLSTATE();
4093 		return;
4094 	}
4095 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4096 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4097 		NFSGETNANOTIME(&dp->nfsdl_modtime);
4098 		dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4099 	}
4100 	NFSUNLOCKCLSTATE();
4101 }
4102 
4103 /*
4104  * If there is a valid write delegation for this file with a modtime set,
4105  * put that modtime in mtime.
4106  */
4107 APPLESTATIC void
4108 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4109 {
4110 	struct nfsclclient *clp;
4111 	struct nfscldeleg *dp;
4112 	struct nfsnode *np = VTONFS(vp);
4113 	struct nfsmount *nmp;
4114 
4115 	nmp = VFSTONFS(vnode_mount(vp));
4116 	if (!NFSHASNFSV4(nmp))
4117 		return;
4118 	NFSLOCKCLSTATE();
4119 	clp = nfscl_findcl(nmp);
4120 	if (clp == NULL) {
4121 		NFSUNLOCKCLSTATE();
4122 		return;
4123 	}
4124 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4125 	if (dp != NULL &&
4126 	    (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4127 	    (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4128 		*mtime = dp->nfsdl_modtime;
4129 	NFSUNLOCKCLSTATE();
4130 }
4131 
4132 static int
4133 nfscl_errmap(struct nfsrv_descript *nd)
4134 {
4135 	short *defaulterrp, *errp;
4136 
4137 	if (!nd->nd_repstat)
4138 		return (0);
4139 	if (nd->nd_procnum == NFSPROC_NOOP)
4140 		return (txdr_unsigned(nd->nd_repstat & 0xffff));
4141 	if (nd->nd_repstat == EBADRPC)
4142 		return (txdr_unsigned(NFSERR_BADXDR));
4143 	if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4144 	    nd->nd_repstat == NFSERR_OPILLEGAL)
4145 		return (txdr_unsigned(nd->nd_repstat));
4146 	errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4147 	while (*++errp)
4148 		if (*errp == (short)nd->nd_repstat)
4149 			return (txdr_unsigned(nd->nd_repstat));
4150 	return (txdr_unsigned(*defaulterrp));
4151 }
4152 
4153