xref: /freebsd/sys/fs/nfsclient/nfs_clstate.c (revision 4ed925457ab06e83238a5db33e89ccc94b99a713)
1 /*-
2  * Copyright (c) 2009 Rick Macklem, University of Guelph
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * These functions implement the client side state handling for NFSv4.
33  * NFSv4 state handling:
34  * - A lockowner is used to determine lock contention, so it
35  *   corresponds directly to a Posix pid. (1 to 1 mapping)
36  * - The correct granularity of an OpenOwner is not nearly so
37  *   obvious. An OpenOwner does the following:
38  *   - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39  *   - is used to check for Open/Share contention (not applicable to
40  *     this client, since all Opens are Deny_None)
41  *   As such, I considered both extreme.
42  *   1 OpenOwner per ClientID - Simple to manage, but fully serializes
43  *   all Open, Close and Lock (with a new lockowner) Ops.
44  *   1 OpenOwner for each Open - This one results in an OpenConfirm for
45  *   every Open, for most servers.
46  *   So, I chose to use the same mapping as I did for LockOwnwers.
47  *   The main concern here is that you can end up with multiple Opens
48  *   for the same File Handle, but on different OpenOwners (opens
49  *   inherited from parents, grandparents...) and you do not know
50  *   which of these the vnodeop close applies to. This is handled by
51  *   delaying the Close Op(s) until all of the Opens have been closed.
52  *   (It is not yet obvious if this is the correct granularity.)
53  * - How the code handles serialization:
54  *   - For the ClientId, it uses an exclusive lock while getting its
55  *     SetClientId and during recovery. Otherwise, it uses a shared
56  *     lock via a reference count.
57  *   - For the rest of the data structures, it uses an SMP mutex
58  *     (once the nfs client is SMP safe) and doesn't sleep while
59  *     manipulating the linked lists.
60  *   - The serialization of Open/Close/Lock/LockU falls out in the
61  *     "wash", since OpenOwners and LockOwners are both mapped from
62  *     Posix pid. In other words, there is only one Posix pid using
63  *     any given owner, so that owner is serialized. (If you change
64  *     the granularity of the OpenOwner, then code must be added to
65  *     serialize Ops on the OpenOwner.)
66  * - When to get rid of OpenOwners and LockOwners.
67  *   - When a process exits, it calls nfscl_cleanup(), which goes
68  *     through the client list looking for all Open and Lock Owners.
69  *     When one is found, it is marked "defunct" or in the case of
70  *     an OpenOwner without any Opens, freed.
71  *     The renew thread scans for defunct Owners and gets rid of them,
72  *     if it can. The LockOwners will also be deleted when the
73  *     associated Open is closed.
74  *   - If the LockU or Close Op(s) fail during close in a way
75  *     that could be recovered upon retry, they are relinked to the
76  *     ClientId's defunct open list and retried by the renew thread
77  *     until they succeed or an unmount/recovery occurs.
78  *     (Since we are done with them, they do not need to be recovered.)
79  */
80 
81 #ifndef APPLEKEXT
82 #include <fs/nfs/nfsport.h>
83 
84 /*
85  * Global variables
86  */
87 extern struct nfsstats newnfsstats;
88 extern struct nfsreqhead nfsd_reqq;
89 NFSREQSPINLOCK;
90 NFSCLSTATEMUTEX;
91 int nfscl_inited = 0;
92 struct nfsclhead nfsclhead;	/* Head of clientid list */
93 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
94 #endif	/* !APPLEKEXT */
95 
96 static int nfscl_delegcnt = 0;
97 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
98     NFSPROC_T *, u_int32_t, struct nfsclowner **, struct nfsclopen **);
99 static void nfscl_clrelease(struct nfsclclient *);
100 static void nfscl_cleanclient(struct nfsclclient *);
101 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
102     struct ucred *, NFSPROC_T *);
103 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
104     struct nfsmount *, struct ucred *, NFSPROC_T *);
105 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
106 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
107     struct nfscllock *, int);
108 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
109     struct nfscllock **, int);
110 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
111 static u_int32_t nfscl_nextcbident(void);
112 static mount_t nfscl_getmnt(u_int32_t);
113 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
114     int);
115 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
116     u_int8_t *, struct nfscllock **);
117 static void nfscl_freelockowner(struct nfscllockowner *, int);
118 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
119 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
120     struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
121 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
122     struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
123     struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *);
124 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
125     struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
126     struct nfscldeleg *, struct ucred *, NFSPROC_T *);
127 static void nfscl_totalrecall(struct nfsclclient *);
128 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
129     struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
130 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
131     u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
132     struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
133 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
134     int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
135     struct ucred *, NFSPROC_T *);
136 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
137     struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
138 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
139 static int nfscl_errmap(struct nfsrv_descript *);
140 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
141 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
142     struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *);
143 static void nfscl_freeopenowner(struct nfsclowner *, int);
144 static void nfscl_cleandeleg(struct nfscldeleg *);
145 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
146     struct nfsmount *, NFSPROC_T *);
147 
148 static short nfscberr_null[] = {
149 	0,
150 	0,
151 };
152 
153 static short nfscberr_getattr[] = {
154 	NFSERR_RESOURCE,
155 	NFSERR_BADHANDLE,
156 	NFSERR_BADXDR,
157 	NFSERR_RESOURCE,
158 	NFSERR_SERVERFAULT,
159 	0,
160 };
161 
162 static short nfscberr_recall[] = {
163 	NFSERR_RESOURCE,
164 	NFSERR_BADHANDLE,
165 	NFSERR_BADSTATEID,
166 	NFSERR_BADXDR,
167 	NFSERR_RESOURCE,
168 	NFSERR_SERVERFAULT,
169 	0,
170 };
171 
172 static short *nfscl_cberrmap[] = {
173 	nfscberr_null,
174 	nfscberr_null,
175 	nfscberr_null,
176 	nfscberr_getattr,
177 	nfscberr_recall
178 };
179 
180 #define	NETFAMILY(clp) \
181 		(((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
182 
183 /*
184  * Called for an open operation.
185  * If the nfhp argument is NULL, just get an openowner.
186  */
187 APPLESTATIC int
188 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
189     struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
190     struct nfsclopen **opp, int *newonep, int *retp, int lockit)
191 {
192 	struct nfsclclient *clp;
193 	struct nfsclowner *owp, *nowp;
194 	struct nfsclopen *op = NULL, *nop = NULL;
195 	struct nfscldeleg *dp;
196 	struct nfsclownerhead *ohp;
197 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
198 	int ret;
199 
200 	if (newonep != NULL)
201 		*newonep = 0;
202 	if (opp != NULL)
203 		*opp = NULL;
204 	if (owpp != NULL)
205 		*owpp = NULL;
206 
207 	/*
208 	 * Might need one or both of these, so MALLOC them now, to
209 	 * avoid a tsleep() in MALLOC later.
210 	 */
211 	MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
212 	    M_NFSCLOWNER, M_WAITOK);
213 	if (nfhp != NULL)
214 	    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
215 		fhlen - 1, M_NFSCLOPEN, M_WAITOK);
216 	ret = nfscl_getcl(vp, cred, p, &clp);
217 	if (ret != 0) {
218 		FREE((caddr_t)nowp, M_NFSCLOWNER);
219 		if (nop != NULL)
220 			FREE((caddr_t)nop, M_NFSCLOPEN);
221 		return (ret);
222 	}
223 
224 	/*
225 	 * Get the Open iff it already exists.
226 	 * If none found, add the new one or return error, depending upon
227 	 * "create".
228 	 */
229 	nfscl_filllockowner(p, own);
230 	NFSLOCKCLSTATE();
231 	dp = NULL;
232 	/* First check the delegation list */
233 	if (nfhp != NULL && usedeleg) {
234 		LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
235 			if (dp->nfsdl_fhlen == fhlen &&
236 			    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
237 				if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
238 				    (dp->nfsdl_flags & NFSCLDL_WRITE))
239 					break;
240 				dp = NULL;
241 				break;
242 			}
243 		}
244 	}
245 
246 	if (dp != NULL)
247 		ohp = &dp->nfsdl_owner;
248 	else
249 		ohp = &clp->nfsc_owner;
250 	/* Now, search for an openowner */
251 	LIST_FOREACH(owp, ohp, nfsow_list) {
252 		if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
253 			break;
254 	}
255 
256 	/*
257 	 * Create a new open, as required.
258 	 */
259 	nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
260 	    newonep);
261 
262 	/*
263 	 * Serialize modifications to the open owner for multiple threads
264 	 * within the same process using a read/write sleep lock.
265 	 */
266 	if (lockit)
267 		nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
268 	NFSUNLOCKCLSTATE();
269 	if (nowp != NULL)
270 		FREE((caddr_t)nowp, M_NFSCLOWNER);
271 	if (nop != NULL)
272 		FREE((caddr_t)nop, M_NFSCLOPEN);
273 	if (owpp != NULL)
274 		*owpp = owp;
275 	if (opp != NULL)
276 		*opp = op;
277 	if (retp != NULL)
278 		*retp = NFSCLOPEN_OK;
279 
280 	/*
281 	 * Now, check the mode on the open and return the appropriate
282 	 * value.
283 	 */
284 	if (op != NULL && (amode & ~(op->nfso_mode))) {
285 		op->nfso_mode |= amode;
286 		if (retp != NULL && dp == NULL)
287 			*retp = NFSCLOPEN_DOOPEN;
288 	}
289 	return (0);
290 }
291 
292 /*
293  * Create a new open, as required.
294  */
295 static void
296 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
297     struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
298     struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
299     int *newonep)
300 {
301 	struct nfsclowner *owp = *owpp, *nowp;
302 	struct nfsclopen *op, *nop;
303 
304 	if (nowpp != NULL)
305 		nowp = *nowpp;
306 	else
307 		nowp = NULL;
308 	if (nopp != NULL)
309 		nop = *nopp;
310 	else
311 		nop = NULL;
312 	if (owp == NULL && nowp != NULL) {
313 		NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
314 		LIST_INIT(&nowp->nfsow_open);
315 		nowp->nfsow_clp = clp;
316 		nowp->nfsow_seqid = 0;
317 		nowp->nfsow_defunct = 0;
318 		nfscl_lockinit(&nowp->nfsow_rwlock);
319 		if (dp != NULL) {
320 			newnfsstats.cllocalopenowners++;
321 			LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
322 		} else {
323 			newnfsstats.clopenowners++;
324 			LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
325 		}
326 		owp = *owpp = nowp;
327 		*nowpp = NULL;
328 		if (newonep != NULL)
329 			*newonep = 1;
330 	}
331 
332 	 /* If an fhp has been specified, create an Open as well. */
333 	if (fhp != NULL) {
334 		/* and look for the correct open, based upon FH */
335 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
336 			if (op->nfso_fhlen == fhlen &&
337 			    !NFSBCMP(op->nfso_fh, fhp, fhlen))
338 				break;
339 		}
340 		if (op == NULL && nop != NULL) {
341 			nop->nfso_own = owp;
342 			nop->nfso_mode = 0;
343 			nop->nfso_opencnt = 0;
344 			nop->nfso_posixlock = 1;
345 			nop->nfso_fhlen = fhlen;
346 			NFSBCOPY(fhp, nop->nfso_fh, fhlen);
347 			LIST_INIT(&nop->nfso_lock);
348 			nop->nfso_stateid.seqid = 0;
349 			nop->nfso_stateid.other[0] = 0;
350 			nop->nfso_stateid.other[1] = 0;
351 			nop->nfso_stateid.other[2] = 0;
352 			if (dp != NULL) {
353 				TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
354 				TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
355 				    nfsdl_list);
356 				dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
357 				newnfsstats.cllocalopens++;
358 			} else {
359 				newnfsstats.clopens++;
360 			}
361 			LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
362 			*opp = nop;
363 			*nopp = NULL;
364 			if (newonep != NULL)
365 				*newonep = 1;
366 		} else {
367 			*opp = op;
368 		}
369 	}
370 }
371 
372 /*
373  * Called to find/add a delegation to a client.
374  */
375 APPLESTATIC int
376 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
377     int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
378 {
379 	struct nfscldeleg *dp = *dpp, *tdp;
380 
381 	/*
382 	 * First, if we have received a Read delegation for a file on a
383 	 * read/write file system, just return it, because they aren't
384 	 * useful, imho.
385 	 */
386 	if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
387 	    (dp->nfsdl_flags & NFSCLDL_READ)) {
388 		(void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
389 		FREE((caddr_t)dp, M_NFSCLDELEG);
390 		*dpp = NULL;
391 		return (0);
392 	}
393 
394 	/* Look for the correct deleg, based upon FH */
395 	NFSLOCKCLSTATE();
396 	tdp = nfscl_finddeleg(clp, nfhp, fhlen);
397 	if (tdp == NULL) {
398 		if (dp == NULL) {
399 			NFSUNLOCKCLSTATE();
400 			return (NFSERR_BADSTATEID);
401 		}
402 		*dpp = NULL;
403 		TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
404 		LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
405 		    nfsdl_hash);
406 		dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
407 		newnfsstats.cldelegates++;
408 		nfscl_delegcnt++;
409 	} else {
410 		/*
411 		 * Delegation already exists, what do we do if a new one??
412 		 */
413 		if (dp != NULL) {
414 			printf("Deleg already exists!\n");
415 			FREE((caddr_t)dp, M_NFSCLDELEG);
416 			*dpp = NULL;
417 		} else {
418 			*dpp = tdp;
419 		}
420 	}
421 	NFSUNLOCKCLSTATE();
422 	return (0);
423 }
424 
425 /*
426  * Find a delegation for this file handle. Return NULL upon failure.
427  */
428 static struct nfscldeleg *
429 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
430 {
431 	struct nfscldeleg *dp;
432 
433 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
434 	    if (dp->nfsdl_fhlen == fhlen &&
435 		!NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
436 		break;
437 	}
438 	return (dp);
439 }
440 
441 /*
442  * Get a stateid for an I/O operation. First, look for an open and iff
443  * found, return either a lockowner stateid or the open stateid.
444  * If no Open is found, just return error and the special stateid of all zeros.
445  */
446 APPLESTATIC int
447 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
448     struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
449     void **lckpp)
450 {
451 	struct nfsclclient *clp;
452 	struct nfsclowner *owp;
453 	struct nfsclopen *op = NULL;
454 	struct nfscllockowner *lp;
455 	struct nfscldeleg *dp;
456 	struct nfsnode *np;
457 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
458 	int error, done;
459 
460 	*lckpp = NULL;
461 	/*
462 	 * Initially, just set the special stateid of all zeros.
463 	 */
464 	stateidp->seqid = 0;
465 	stateidp->other[0] = 0;
466 	stateidp->other[1] = 0;
467 	stateidp->other[2] = 0;
468 	if (vnode_vtype(vp) != VREG)
469 		return (EISDIR);
470 	np = VTONFS(vp);
471 	NFSLOCKCLSTATE();
472 	clp = nfscl_findcl(VFSTONFS(vnode_mount(vp)));
473 	if (clp == NULL) {
474 		NFSUNLOCKCLSTATE();
475 		return (EACCES);
476 	}
477 
478 	/*
479 	 * First, look for a delegation.
480 	 */
481 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
482 		if (dp->nfsdl_fhlen == fhlen &&
483 		    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
484 			if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
485 			    (dp->nfsdl_flags & NFSCLDL_WRITE)) {
486 				stateidp->seqid = dp->nfsdl_stateid.seqid;
487 				stateidp->other[0] = dp->nfsdl_stateid.other[0];
488 				stateidp->other[1] = dp->nfsdl_stateid.other[1];
489 				stateidp->other[2] = dp->nfsdl_stateid.other[2];
490 				if (!(np->n_flag & NDELEGRECALL)) {
491 					TAILQ_REMOVE(&clp->nfsc_deleg, dp,
492 					    nfsdl_list);
493 					TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
494 					    nfsdl_list);
495 					dp->nfsdl_timestamp = NFSD_MONOSEC +
496 					    120;
497 					dp->nfsdl_rwlock.nfslock_usecnt++;
498 					*lckpp = (void *)&dp->nfsdl_rwlock;
499 				}
500 				NFSUNLOCKCLSTATE();
501 				return (0);
502 			}
503 			break;
504 		}
505 	}
506 
507 	if (p != NULL) {
508 		/*
509 		 * If p != NULL, we want to search the parentage tree
510 		 * for a matching OpenOwner and use that.
511 		 */
512 		nfscl_filllockowner(p, own);
513 		error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, NULL, p,
514 		    mode, NULL, &op);
515 		if (error == 0) {
516 			/* now look for a lockowner */
517 			LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
518 				if (!NFSBCMP(lp->nfsl_owner, own,
519 				    NFSV4CL_LOCKNAMELEN)) {
520 					stateidp->seqid =
521 					    lp->nfsl_stateid.seqid;
522 					stateidp->other[0] =
523 					    lp->nfsl_stateid.other[0];
524 					stateidp->other[1] =
525 					    lp->nfsl_stateid.other[1];
526 					stateidp->other[2] =
527 					    lp->nfsl_stateid.other[2];
528 					NFSUNLOCKCLSTATE();
529 					return (0);
530 				}
531 			}
532 		}
533 	}
534 	if (op == NULL) {
535 		/* If not found, just look for any OpenOwner that will work. */
536 		done = 0;
537 		owp = LIST_FIRST(&clp->nfsc_owner);
538 		while (!done && owp != NULL) {
539 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
540 				if (op->nfso_fhlen == fhlen &&
541 				    !NFSBCMP(op->nfso_fh, nfhp, fhlen) &&
542 				    (mode & op->nfso_mode) == mode) {
543 					done = 1;
544 					break;
545 				}
546 			}
547 			if (!done)
548 				owp = LIST_NEXT(owp, nfsow_list);
549 		}
550 		if (!done) {
551 			NFSUNLOCKCLSTATE();
552 			return (ENOENT);
553 		}
554 		/* for read aheads or write behinds, use the open cred */
555 		newnfs_copycred(&op->nfso_cred, cred);
556 	}
557 
558 	/*
559 	 * No lock stateid, so return the open stateid.
560 	 */
561 	stateidp->seqid = op->nfso_stateid.seqid;
562 	stateidp->other[0] = op->nfso_stateid.other[0];
563 	stateidp->other[1] = op->nfso_stateid.other[1];
564 	stateidp->other[2] = op->nfso_stateid.other[2];
565 	NFSUNLOCKCLSTATE();
566 	return (0);
567 }
568 
569 /*
570  * Get an existing open. Search up the parentage tree for a match and
571  * return with the first one found.
572  */
573 static int
574 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
575     u_int8_t *rown, NFSPROC_T *p, u_int32_t mode, struct nfsclowner **owpp,
576     struct nfsclopen **opp)
577 {
578 	struct nfsclowner *owp = NULL;
579 	struct nfsclopen *op;
580 	NFSPROC_T *nproc;
581 	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp;
582 
583 	nproc = p;
584 	op = NULL;
585 	while (op == NULL && (nproc != NULL || rown != NULL)) {
586 		if (nproc != NULL) {
587 			nfscl_filllockowner(nproc, own);
588 			ownp = own;
589 		} else {
590 			ownp = rown;
591 		}
592 		/* Search the client list */
593 		LIST_FOREACH(owp, ohp, nfsow_list) {
594 			if (!NFSBCMP(owp->nfsow_owner, ownp,
595 			    NFSV4CL_LOCKNAMELEN))
596 				break;
597 		}
598 		if (owp != NULL) {
599 			/* and look for the correct open */
600 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
601 				if (op->nfso_fhlen == fhlen &&
602 				    !NFSBCMP(op->nfso_fh, nfhp, fhlen)
603 				    && (op->nfso_mode & mode) == mode) {
604 					break;
605 				}
606 			}
607 		}
608 		if (rown != NULL)
609 			break;
610 		if (op == NULL)
611 			nproc = nfscl_getparent(nproc);
612 	}
613 	if (op == NULL) {
614 		return (EBADF);
615 	}
616 	if (owpp)
617 		*owpp = owp;
618 	*opp = op;
619 	return (0);
620 }
621 
622 /*
623  * Release use of an open owner. Called when open operations are done
624  * with the open owner.
625  */
626 APPLESTATIC void
627 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error,
628     __unused int candelete, int unlocked)
629 {
630 
631 	if (owp == NULL)
632 		return;
633 	NFSLOCKCLSTATE();
634 	if (!unlocked)
635 		nfscl_lockunlock(&owp->nfsow_rwlock);
636 	nfscl_clrelease(owp->nfsow_clp);
637 	NFSUNLOCKCLSTATE();
638 }
639 
640 /*
641  * Release use of an open structure under an open owner.
642  */
643 APPLESTATIC void
644 nfscl_openrelease(struct nfsclopen *op, int error, int candelete)
645 {
646 	struct nfsclclient *clp;
647 	struct nfsclowner *owp;
648 
649 	if (op == NULL)
650 		return;
651 	NFSLOCKCLSTATE();
652 	owp = op->nfso_own;
653 	nfscl_lockunlock(&owp->nfsow_rwlock);
654 	clp = owp->nfsow_clp;
655 	if (error && candelete && op->nfso_opencnt == 0)
656 		nfscl_freeopen(op, 0);
657 	nfscl_clrelease(clp);
658 	NFSUNLOCKCLSTATE();
659 }
660 
661 /*
662  * Called to get a clientid structure. It will optionally lock the
663  * client data structures to do the SetClientId/SetClientId_confirm,
664  * but will release that lock and return the clientid with a refernce
665  * count on it.
666  * If the "cred" argument is NULL, a new clientid should not be created.
667  * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
668  * be done.
669  * It always clpp with a reference count on it, unless returning an error.
670  */
671 APPLESTATIC int
672 nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
673     struct nfsclclient **clpp)
674 {
675 	struct nfsclclient *clp;
676 	struct nfsclclient *newclp = NULL;
677 	struct nfscllockowner *lp, *nlp;
678 	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
679 	char uuid[HOSTUUIDLEN];
680 	int igotlock = 0, error, trystalecnt, clidinusedelay, i;
681 	u_int16_t idlen = 0;
682 
683 	if (cred != NULL) {
684 		getcredhostuuid(cred, uuid, sizeof uuid);
685 		idlen = strlen(uuid);
686 		if (idlen > 0)
687 			idlen += sizeof (u_int64_t);
688 		else
689 			idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
690 		MALLOC(newclp, struct nfsclclient *,
691 		    sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
692 		    M_WAITOK);
693 	}
694 	NFSLOCKCLSTATE();
695 	clp = nmp->nm_clp;
696 	if (clp == NULL) {
697 		if (newclp == NULL) {
698 			NFSUNLOCKCLSTATE();
699 			return (EACCES);
700 		}
701 		clp = newclp;
702 		NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1);
703 		clp->nfsc_idlen = idlen;
704 		LIST_INIT(&clp->nfsc_owner);
705 		TAILQ_INIT(&clp->nfsc_deleg);
706 		for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
707 			LIST_INIT(&clp->nfsc_deleghash[i]);
708 		LIST_INIT(&clp->nfsc_defunctlockowner);
709 		clp->nfsc_flags = NFSCLFLAGS_INITED;
710 		clp->nfsc_clientidrev = 1;
711 		clp->nfsc_cbident = nfscl_nextcbident();
712 		nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
713 		    clp->nfsc_idlen);
714 		LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
715 		nmp->nm_clp = clp;
716 		clp->nfsc_nmp = nmp;
717 		NFSUNLOCKCLSTATE();
718 		nfscl_start_renewthread(clp);
719 	} else {
720 		NFSUNLOCKCLSTATE();
721 		if (newclp != NULL)
722 			FREE((caddr_t)newclp, M_NFSCLCLIENT);
723 	}
724 	NFSLOCKCLSTATE();
725 	while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock)
726 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
727 		    NFSCLSTATEMUTEXPTR);
728 	if (!igotlock)
729 		nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR);
730 	NFSUNLOCKCLSTATE();
731 
732 	/*
733 	 * If it needs a clientid, do the setclientid now.
734 	 */
735 	if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
736 		if (!igotlock)
737 			panic("nfscl_clget");
738 		if (p == NULL || cred == NULL) {
739 			NFSLOCKCLSTATE();
740 			nfsv4_unlock(&clp->nfsc_lock, 0);
741 			NFSUNLOCKCLSTATE();
742 			return (EACCES);
743 		}
744 		/* get rid of defunct lockowners */
745 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
746 		    nlp) {
747 			nfscl_freelockowner(lp, 0);
748 		}
749 		/*
750 		 * If RFC3530 Sec. 14.2.33 is taken literally,
751 		 * NFSERR_CLIDINUSE will be returned persistently for the
752 		 * case where a new mount of the same file system is using
753 		 * a different principal. In practice, NFSERR_CLIDINUSE is
754 		 * only returned when there is outstanding unexpired state
755 		 * on the clientid. As such, try for twice the lease
756 		 * interval, if we know what that is. Otherwise, make a
757 		 * wild ass guess.
758 		 * The case of returning NFSERR_STALECLIENTID is far less
759 		 * likely, but might occur if there is a significant delay
760 		 * between doing the SetClientID and SetClientIDConfirm Ops,
761 		 * such that the server throws away the clientid before
762 		 * receiving the SetClientIDConfirm.
763 		 */
764 		if (clp->nfsc_renew > 0)
765 			clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
766 		else
767 			clidinusedelay = 120;
768 		trystalecnt = 3;
769 		do {
770 			error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)),
771 			    clp, cred, p);
772 			if (error == NFSERR_STALECLIENTID ||
773 			    error == NFSERR_STALEDONTRECOVER ||
774 			    error == NFSERR_CLIDINUSE) {
775 				(void) nfs_catnap(PZERO, "nfs_setcl");
776 			}
777 		} while (((error == NFSERR_STALECLIENTID ||
778 		     error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
779 		    (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
780 		if (error) {
781 			NFSLOCKCLSTATE();
782 			nfsv4_unlock(&clp->nfsc_lock, 0);
783 			NFSUNLOCKCLSTATE();
784 			return (error);
785 		}
786 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
787 	}
788 	if (igotlock) {
789 		NFSLOCKCLSTATE();
790 		nfsv4_unlock(&clp->nfsc_lock, 1);
791 		NFSUNLOCKCLSTATE();
792 	}
793 
794 	*clpp = clp;
795 	return (0);
796 }
797 
798 /*
799  * Get a reference to a clientid and return it, if valid.
800  */
801 APPLESTATIC struct nfsclclient *
802 nfscl_findcl(struct nfsmount *nmp)
803 {
804 	struct nfsclclient *clp;
805 
806 	clp = nmp->nm_clp;
807 	if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
808 		return (NULL);
809 	return (clp);
810 }
811 
812 /*
813  * Release the clientid structure. It may be locked or reference counted.
814  */
815 static void
816 nfscl_clrelease(struct nfsclclient *clp)
817 {
818 
819 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
820 		nfsv4_unlock(&clp->nfsc_lock, 0);
821 	else
822 		nfsv4_relref(&clp->nfsc_lock);
823 }
824 
825 /*
826  * External call for nfscl_clrelease.
827  */
828 APPLESTATIC void
829 nfscl_clientrelease(struct nfsclclient *clp)
830 {
831 
832 	NFSLOCKCLSTATE();
833 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
834 		nfsv4_unlock(&clp->nfsc_lock, 0);
835 	else
836 		nfsv4_relref(&clp->nfsc_lock);
837 	NFSUNLOCKCLSTATE();
838 }
839 
840 /*
841  * Called when wanting to lock a byte region.
842  */
843 APPLESTATIC int
844 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
845     short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
846     int recovery, u_int8_t *rownp, u_int8_t *ropenownp,
847     struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
848 {
849 	struct nfscllockowner *lp;
850 	struct nfsclopen *op;
851 	struct nfsclclient *clp;
852 	struct nfscllockowner *nlp;
853 	struct nfscllock *nlop, *otherlop;
854 	struct nfscldeleg *dp = NULL, *ldp = NULL;
855 	struct nfscllockownerhead *lhp = NULL;
856 	struct nfsnode *np;
857 	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp;
858 	int error = 0, ret, donelocally = 0;
859 	u_int32_t mode;
860 
861 	if (type == F_WRLCK)
862 		mode = NFSV4OPEN_ACCESSWRITE;
863 	else
864 		mode = NFSV4OPEN_ACCESSREAD;
865 	np = VTONFS(vp);
866 	*lpp = NULL;
867 	*newonep = 0;
868 	*donelocallyp = 0;
869 
870 	/*
871 	 * Might need these, so MALLOC them now, to
872 	 * avoid a tsleep() in MALLOC later.
873 	 */
874 	MALLOC(nlp, struct nfscllockowner *,
875 	    sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
876 	MALLOC(otherlop, struct nfscllock *,
877 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
878 	MALLOC(nlop, struct nfscllock *,
879 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
880 	nlop->nfslo_type = type;
881 	nlop->nfslo_first = off;
882 	if (len == NFS64BITSSET) {
883 		nlop->nfslo_end = NFS64BITSSET;
884 	} else {
885 		nlop->nfslo_end = off + len;
886 		if (nlop->nfslo_end <= nlop->nfslo_first)
887 			error = NFSERR_INVAL;
888 	}
889 
890 	if (!error) {
891 		if (recovery)
892 			clp = rclp;
893 		else
894 			error = nfscl_getcl(vp, cred, p, &clp);
895 	}
896 	if (error) {
897 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
898 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
899 		FREE((caddr_t)nlop, M_NFSCLLOCK);
900 		return (error);
901 	}
902 
903 	op = NULL;
904 	if (recovery) {
905 		ownp = rownp;
906 	} else {
907 		nfscl_filllockowner(p, own);
908 		ownp = own;
909 	}
910 	if (!recovery) {
911 		NFSLOCKCLSTATE();
912 		/*
913 		 * First, search for a delegation. If one exists for this file,
914 		 * the lock can be done locally against it, so long as there
915 		 * isn't a local lock conflict.
916 		 */
917 		ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
918 		    np->n_fhp->nfh_len);
919 		/* Just sanity check for correct type of delegation */
920 		if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_RECALL) ||
921 		    (type == F_WRLCK && !(dp->nfsdl_flags & NFSCLDL_WRITE))))
922 			dp = NULL;
923 	}
924 	if (dp != NULL) {
925 		/* Now, find the associated open to get the correct openowner */
926 		ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
927 		    np->n_fhp->nfh_len, NULL, p, mode, NULL, &op);
928 		if (ret)
929 			ret = nfscl_getopen(&clp->nfsc_owner,
930 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p,
931 			    mode, NULL, &op);
932 		if (!ret) {
933 			lhp = &dp->nfsdl_lock;
934 			TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
935 			TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
936 			dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
937 			donelocally = 1;
938 		} else {
939 			dp = NULL;
940 		}
941 	}
942 	if (!donelocally) {
943 		/*
944 		 * Get the related Open.
945 		 */
946 		if (recovery)
947 			error = nfscl_getopen(&clp->nfsc_owner,
948 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, ropenownp,
949 			    NULL, mode, NULL, &op);
950 		else
951 			error = nfscl_getopen(&clp->nfsc_owner,
952 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p,
953 			    mode, NULL, &op);
954 		if (!error)
955 			lhp = &op->nfso_lock;
956 	}
957 	if (!error && !recovery)
958 		error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
959 		    np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
960 	if (error) {
961 		if (!recovery) {
962 			nfscl_clrelease(clp);
963 			NFSUNLOCKCLSTATE();
964 		}
965 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
966 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
967 		FREE((caddr_t)nlop, M_NFSCLLOCK);
968 		return (error);
969 	}
970 
971 	/*
972 	 * Ok, see if a lockowner exists and create one, as required.
973 	 */
974 	LIST_FOREACH(lp, lhp, nfsl_list) {
975 		if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
976 			break;
977 	}
978 	if (lp == NULL) {
979 		NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
980 		if (recovery)
981 			NFSBCOPY(ropenownp, nlp->nfsl_openowner,
982 			    NFSV4CL_LOCKNAMELEN);
983 		else
984 			NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
985 			    NFSV4CL_LOCKNAMELEN);
986 		nlp->nfsl_seqid = 0;
987 		nlp->nfsl_defunct = 0;
988 		nlp->nfsl_inprog = NULL;
989 		nfscl_lockinit(&nlp->nfsl_rwlock);
990 		LIST_INIT(&nlp->nfsl_lock);
991 		if (donelocally) {
992 			nlp->nfsl_open = NULL;
993 			newnfsstats.cllocallockowners++;
994 		} else {
995 			nlp->nfsl_open = op;
996 			newnfsstats.cllockowners++;
997 		}
998 		LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
999 		lp = nlp;
1000 		nlp = NULL;
1001 		*newonep = 1;
1002 	}
1003 
1004 	/*
1005 	 * Now, update the byte ranges for locks.
1006 	 */
1007 	ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1008 	if (!ret)
1009 		donelocally = 1;
1010 	if (donelocally) {
1011 		*donelocallyp = 1;
1012 		if (!recovery)
1013 			nfscl_clrelease(clp);
1014 	} else {
1015 		/*
1016 		 * Serial modifications on the lock owner for multiple threads
1017 		 * for the same process using a read/write lock.
1018 		 */
1019 		if (!recovery)
1020 			nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1021 	}
1022 	if (!recovery)
1023 		NFSUNLOCKCLSTATE();
1024 
1025 	if (nlp)
1026 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1027 	if (nlop)
1028 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1029 	if (otherlop)
1030 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
1031 
1032 	*lpp = lp;
1033 	return (0);
1034 }
1035 
1036 /*
1037  * Called to unlock a byte range, for LockU.
1038  */
1039 APPLESTATIC int
1040 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1041     __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1042     struct nfsclclient *clp, struct nfscllockowner **lpp, int *dorpcp)
1043 {
1044 	struct nfscllockowner *lp;
1045 	struct nfsclowner *owp;
1046 	struct nfsclopen *op;
1047 	struct nfscllock *nlop, *other_lop = NULL;
1048 	struct nfscldeleg *dp;
1049 	struct nfsnode *np;
1050 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1051 	int ret = 0, fnd;
1052 
1053 	np = VTONFS(vp);
1054 	*lpp = NULL;
1055 	*dorpcp = 0;
1056 
1057 	/*
1058 	 * Might need these, so MALLOC them now, to
1059 	 * avoid a tsleep() in MALLOC later.
1060 	 */
1061 	MALLOC(nlop, struct nfscllock *,
1062 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1063 	nlop->nfslo_type = F_UNLCK;
1064 	nlop->nfslo_first = off;
1065 	if (len == NFS64BITSSET) {
1066 		nlop->nfslo_end = NFS64BITSSET;
1067 	} else {
1068 		nlop->nfslo_end = off + len;
1069 		if (nlop->nfslo_end <= nlop->nfslo_first) {
1070 			FREE((caddr_t)nlop, M_NFSCLLOCK);
1071 			return (NFSERR_INVAL);
1072 		}
1073 	}
1074 	if (callcnt == 0) {
1075 		MALLOC(other_lop, struct nfscllock *,
1076 		    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1077 		*other_lop = *nlop;
1078 	}
1079 	nfscl_filllockowner(p, own);
1080 	dp = NULL;
1081 	NFSLOCKCLSTATE();
1082 	if (callcnt == 0)
1083 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1084 		    np->n_fhp->nfh_len);
1085 
1086 	/*
1087 	 * First, unlock any local regions on a delegation.
1088 	 */
1089 	if (dp != NULL) {
1090 		/* Look for this lockowner. */
1091 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1092 			if (!NFSBCMP(lp->nfsl_owner, own,
1093 			    NFSV4CL_LOCKNAMELEN))
1094 				break;
1095 		}
1096 		if (lp != NULL)
1097 			/* Use other_lop, so nlop is still available */
1098 			(void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1099 	}
1100 
1101 	/*
1102 	 * Now, find a matching open/lockowner that hasn't already been done,
1103 	 * as marked by nfsl_inprog.
1104 	 */
1105 	lp = NULL;
1106 	fnd = 0;
1107 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1108 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1109 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1110 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1111 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1112 			if (lp->nfsl_inprog == NULL &&
1113 			    !NFSBCMP(lp->nfsl_owner, own,
1114 			     NFSV4CL_LOCKNAMELEN)) {
1115 				fnd = 1;
1116 				break;
1117 			}
1118 		    }
1119 		    if (fnd)
1120 			break;
1121 		}
1122 	    }
1123 	    if (fnd)
1124 		break;
1125 	}
1126 
1127 	if (lp != NULL) {
1128 		ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1129 		if (ret)
1130 			*dorpcp = 1;
1131 		/*
1132 		 * Serial modifications on the lock owner for multiple
1133 		 * threads for the same process using a read/write lock.
1134 		 */
1135 		lp->nfsl_inprog = p;
1136 		nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1137 		*lpp = lp;
1138 	}
1139 	NFSUNLOCKCLSTATE();
1140 	if (nlop)
1141 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1142 	if (other_lop)
1143 		FREE((caddr_t)other_lop, M_NFSCLLOCK);
1144 	return (0);
1145 }
1146 
1147 /*
1148  * Release all lockowners marked in progess for this process and file.
1149  */
1150 APPLESTATIC void
1151 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p)
1152 {
1153 	struct nfsclowner *owp;
1154 	struct nfsclopen *op;
1155 	struct nfscllockowner *lp;
1156 	struct nfsnode *np;
1157 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1158 
1159 	np = VTONFS(vp);
1160 	nfscl_filllockowner(p, own);
1161 	NFSLOCKCLSTATE();
1162 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1163 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1164 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1165 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1166 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1167 			if (lp->nfsl_inprog == p &&
1168 			    !NFSBCMP(lp->nfsl_owner, own,
1169 			    NFSV4CL_LOCKNAMELEN)) {
1170 			    lp->nfsl_inprog = NULL;
1171 			    nfscl_lockunlock(&lp->nfsl_rwlock);
1172 			}
1173 		    }
1174 		}
1175 	    }
1176 	}
1177 	nfscl_clrelease(clp);
1178 	NFSUNLOCKCLSTATE();
1179 }
1180 
1181 /*
1182  * Called to find out if any bytes within the byte range specified are
1183  * write locked by the calling process. Used to determine if flushing
1184  * is required before a LockU.
1185  * If in doubt, return 1, so the flush will occur.
1186  */
1187 APPLESTATIC int
1188 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1189     struct ucred *cred, NFSPROC_T *p)
1190 {
1191 	struct nfsclowner *owp;
1192 	struct nfscllockowner *lp;
1193 	struct nfsclopen *op;
1194 	struct nfsclclient *clp;
1195 	struct nfscllock *lop;
1196 	struct nfscldeleg *dp;
1197 	struct nfsnode *np;
1198 	u_int64_t off, end;
1199 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1200 	int error = 0;
1201 
1202 	np = VTONFS(vp);
1203 	switch (fl->l_whence) {
1204 	case SEEK_SET:
1205 	case SEEK_CUR:
1206 		/*
1207 		 * Caller is responsible for adding any necessary offset
1208 		 * when SEEK_CUR is used.
1209 		 */
1210 		off = fl->l_start;
1211 		break;
1212 	case SEEK_END:
1213 		off = np->n_size + fl->l_start;
1214 		break;
1215 	default:
1216 		return (1);
1217 	};
1218 	if (fl->l_len != 0) {
1219 		end = off + fl->l_len;
1220 		if (end < off)
1221 			return (1);
1222 	} else {
1223 		end = NFS64BITSSET;
1224 	}
1225 
1226 	error = nfscl_getcl(vp, cred, p, &clp);
1227 	if (error)
1228 		return (1);
1229 	nfscl_filllockowner(p, own);
1230 	NFSLOCKCLSTATE();
1231 
1232 	/*
1233 	 * First check the delegation locks.
1234 	 */
1235 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1236 	if (dp != NULL) {
1237 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1238 			if (!NFSBCMP(lp->nfsl_owner, own,
1239 			    NFSV4CL_LOCKNAMELEN))
1240 				break;
1241 		}
1242 		if (lp != NULL) {
1243 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1244 				if (lop->nfslo_first >= end)
1245 					break;
1246 				if (lop->nfslo_end <= off)
1247 					continue;
1248 				if (lop->nfslo_type == F_WRLCK) {
1249 					nfscl_clrelease(clp);
1250 					NFSUNLOCKCLSTATE();
1251 					return (1);
1252 				}
1253 			}
1254 		}
1255 	}
1256 
1257 	/*
1258 	 * Now, check state against the server.
1259 	 */
1260 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1261 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1262 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1263 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1264 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1265 			if (!NFSBCMP(lp->nfsl_owner, own,
1266 			    NFSV4CL_LOCKNAMELEN))
1267 			    break;
1268 		    }
1269 		    if (lp != NULL) {
1270 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1271 			    if (lop->nfslo_first >= end)
1272 				break;
1273 			    if (lop->nfslo_end <= off)
1274 				continue;
1275 			    if (lop->nfslo_type == F_WRLCK) {
1276 				nfscl_clrelease(clp);
1277 				NFSUNLOCKCLSTATE();
1278 				return (1);
1279 			    }
1280 			}
1281 		    }
1282 		}
1283 	    }
1284 	}
1285 	nfscl_clrelease(clp);
1286 	NFSUNLOCKCLSTATE();
1287 	return (0);
1288 }
1289 
1290 /*
1291  * Release a byte range lock owner structure.
1292  */
1293 APPLESTATIC void
1294 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1295 {
1296 	struct nfsclclient *clp;
1297 
1298 	if (lp == NULL)
1299 		return;
1300 	NFSLOCKCLSTATE();
1301 	clp = lp->nfsl_open->nfso_own->nfsow_clp;
1302 	if (error != 0 && candelete &&
1303 	    (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1304 		nfscl_freelockowner(lp, 0);
1305 	else
1306 		nfscl_lockunlock(&lp->nfsl_rwlock);
1307 	nfscl_clrelease(clp);
1308 	NFSUNLOCKCLSTATE();
1309 }
1310 
1311 /*
1312  * Free up an open structure and any associated byte range lock structures.
1313  */
1314 APPLESTATIC void
1315 nfscl_freeopen(struct nfsclopen *op, int local)
1316 {
1317 
1318 	LIST_REMOVE(op, nfso_list);
1319 	nfscl_freealllocks(&op->nfso_lock, local);
1320 	FREE((caddr_t)op, M_NFSCLOPEN);
1321 	if (local)
1322 		newnfsstats.cllocalopens--;
1323 	else
1324 		newnfsstats.clopens--;
1325 }
1326 
1327 /*
1328  * Free up all lock owners and associated locks.
1329  */
1330 static void
1331 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1332 {
1333 	struct nfscllockowner *lp, *nlp;
1334 
1335 	LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1336 		if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1337 			panic("nfscllckw");
1338 		nfscl_freelockowner(lp, local);
1339 	}
1340 }
1341 
1342 /*
1343  * Called for an Open when NFSERR_EXPIRED is received from the server.
1344  * If there are no byte range locks nor a Share Deny lost, try to do a
1345  * fresh Open. Otherwise, free the open.
1346  */
1347 static int
1348 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1349     struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1350 {
1351 	struct nfscllockowner *lp;
1352 	struct nfscldeleg *dp;
1353 	int mustdelete = 0, error;
1354 
1355 	/*
1356 	 * Look for any byte range lock(s).
1357 	 */
1358 	LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1359 		if (!LIST_EMPTY(&lp->nfsl_lock)) {
1360 			mustdelete = 1;
1361 			break;
1362 		}
1363 	}
1364 
1365 	/*
1366 	 * If no byte range lock(s) nor a Share deny, try to re-open.
1367 	 */
1368 	if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1369 		newnfs_copycred(&op->nfso_cred, cred);
1370 		dp = NULL;
1371 		error = nfsrpc_reopen(nmp, op->nfso_fh,
1372 		    op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1373 		if (error) {
1374 			mustdelete = 1;
1375 			if (dp != NULL) {
1376 				FREE((caddr_t)dp, M_NFSCLDELEG);
1377 				dp = NULL;
1378 			}
1379 		}
1380 		if (dp != NULL)
1381 			nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1382 			    op->nfso_fhlen, cred, p, &dp);
1383 	}
1384 
1385 	/*
1386 	 * If a byte range lock or Share deny or couldn't re-open, free it.
1387 	 */
1388 	if (mustdelete)
1389 		nfscl_freeopen(op, 0);
1390 	return (mustdelete);
1391 }
1392 
1393 /*
1394  * Free up an open owner structure.
1395  */
1396 static void
1397 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1398 {
1399 
1400 	LIST_REMOVE(owp, nfsow_list);
1401 	FREE((caddr_t)owp, M_NFSCLOWNER);
1402 	if (local)
1403 		newnfsstats.cllocalopenowners--;
1404 	else
1405 		newnfsstats.clopenowners--;
1406 }
1407 
1408 /*
1409  * Free up a byte range lock owner structure.
1410  */
1411 static void
1412 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1413 {
1414 	struct nfscllock *lop, *nlop;
1415 
1416 	LIST_REMOVE(lp, nfsl_list);
1417 	LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1418 		nfscl_freelock(lop, local);
1419 	}
1420 	FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1421 	if (local)
1422 		newnfsstats.cllocallockowners--;
1423 	else
1424 		newnfsstats.cllockowners--;
1425 }
1426 
1427 /*
1428  * Free up a byte range lock structure.
1429  */
1430 APPLESTATIC void
1431 nfscl_freelock(struct nfscllock *lop, int local)
1432 {
1433 
1434 	LIST_REMOVE(lop, nfslo_list);
1435 	FREE((caddr_t)lop, M_NFSCLLOCK);
1436 	if (local)
1437 		newnfsstats.cllocallocks--;
1438 	else
1439 		newnfsstats.cllocks--;
1440 }
1441 
1442 /*
1443  * Clean out the state related to a delegation.
1444  */
1445 static void
1446 nfscl_cleandeleg(struct nfscldeleg *dp)
1447 {
1448 	struct nfsclowner *owp, *nowp;
1449 	struct nfsclopen *op;
1450 
1451 	LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1452 		op = LIST_FIRST(&owp->nfsow_open);
1453 		if (op != NULL) {
1454 			if (LIST_NEXT(op, nfso_list) != NULL)
1455 				panic("nfscleandel");
1456 			nfscl_freeopen(op, 1);
1457 		}
1458 		nfscl_freeopenowner(owp, 1);
1459 	}
1460 	nfscl_freealllocks(&dp->nfsdl_lock, 1);
1461 }
1462 
1463 /*
1464  * Free a delegation.
1465  */
1466 static void
1467 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1468 {
1469 
1470 	TAILQ_REMOVE(hdp, dp, nfsdl_list);
1471 	LIST_REMOVE(dp, nfsdl_hash);
1472 	FREE((caddr_t)dp, M_NFSCLDELEG);
1473 	newnfsstats.cldelegates--;
1474 	nfscl_delegcnt--;
1475 }
1476 
1477 /*
1478  * Free up all state related to this client structure.
1479  */
1480 static void
1481 nfscl_cleanclient(struct nfsclclient *clp)
1482 {
1483 	struct nfsclowner *owp, *nowp;
1484 	struct nfsclopen *op, *nop;
1485 	struct nfscllockowner *lp, *nlp;
1486 
1487 
1488 	/* get rid of defunct lockowners */
1489 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1490 		nfscl_freelockowner(lp, 0);
1491 	}
1492 
1493 	/* Now, all the OpenOwners, etc. */
1494 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1495 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1496 			nfscl_freeopen(op, 0);
1497 		}
1498 		nfscl_freeopenowner(owp, 0);
1499 	}
1500 }
1501 
1502 /*
1503  * Called when an NFSERR_EXPIRED is received from the server.
1504  */
1505 static void
1506 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1507     struct ucred *cred, NFSPROC_T *p)
1508 {
1509 	struct nfsclowner *owp, *nowp, *towp;
1510 	struct nfsclopen *op, *nop, *top;
1511 	struct nfscldeleg *dp, *ndp;
1512 	int ret, printed = 0;
1513 
1514 	/*
1515 	 * First, merge locally issued Opens into the list for the server.
1516 	 */
1517 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
1518 	while (dp != NULL) {
1519 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
1520 	    owp = LIST_FIRST(&dp->nfsdl_owner);
1521 	    while (owp != NULL) {
1522 		nowp = LIST_NEXT(owp, nfsow_list);
1523 		op = LIST_FIRST(&owp->nfsow_open);
1524 		if (op != NULL) {
1525 		    if (LIST_NEXT(op, nfso_list) != NULL)
1526 			panic("nfsclexp");
1527 		    LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1528 			if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1529 			    NFSV4CL_LOCKNAMELEN))
1530 			    break;
1531 		    }
1532 		    if (towp != NULL) {
1533 			/* Merge opens in */
1534 			LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1535 			    if (top->nfso_fhlen == op->nfso_fhlen &&
1536 				!NFSBCMP(top->nfso_fh, op->nfso_fh,
1537 				 op->nfso_fhlen)) {
1538 				top->nfso_mode |= op->nfso_mode;
1539 				top->nfso_opencnt += op->nfso_opencnt;
1540 				break;
1541 			    }
1542 			}
1543 			if (top == NULL) {
1544 			    /* Just add the open to the owner list */
1545 			    LIST_REMOVE(op, nfso_list);
1546 			    op->nfso_own = towp;
1547 			    LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1548 			    newnfsstats.cllocalopens--;
1549 			    newnfsstats.clopens++;
1550 			}
1551 		    } else {
1552 			/* Just add the openowner to the client list */
1553 			LIST_REMOVE(owp, nfsow_list);
1554 			owp->nfsow_clp = clp;
1555 			LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1556 			newnfsstats.cllocalopenowners--;
1557 			newnfsstats.clopenowners++;
1558 			newnfsstats.cllocalopens--;
1559 			newnfsstats.clopens++;
1560 		    }
1561 		}
1562 		owp = nowp;
1563 	    }
1564 	    if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1565 		printed = 1;
1566 		printf("nfsv4 expired locks lost\n");
1567 	    }
1568 	    nfscl_cleandeleg(dp);
1569 	    nfscl_freedeleg(&clp->nfsc_deleg, dp);
1570 	    dp = ndp;
1571 	}
1572 	if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1573 	    panic("nfsclexp");
1574 
1575 	/*
1576 	 * Now, try and reopen against the server.
1577 	 */
1578 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1579 		owp->nfsow_seqid = 0;
1580 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1581 			ret = nfscl_expireopen(clp, op, nmp, cred, p);
1582 			if (ret && !printed) {
1583 				printed = 1;
1584 				printf("nfsv4 expired locks lost\n");
1585 			}
1586 		}
1587 		if (LIST_EMPTY(&owp->nfsow_open))
1588 			nfscl_freeopenowner(owp, 0);
1589 	}
1590 }
1591 
1592 #ifndef	__FreeBSD__
1593 /*
1594  * Called from exit() upon process termination.
1595  */
1596 APPLESTATIC void
1597 nfscl_cleanup(NFSPROC_T *p)
1598 {
1599 	struct nfsclclient *clp;
1600 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1601 
1602 	if (!nfscl_inited)
1603 		return;
1604 	nfscl_filllockowner(p, own);
1605 
1606 	NFSLOCKCLSTATE();
1607 	/*
1608 	 * Loop through all the clientids, looking for the OpenOwners.
1609 	 */
1610 	LIST_FOREACH(clp, &nfsclhead, nfsc_list)
1611 		nfscl_cleanup_common(clp, own);
1612 	NFSUNLOCKCLSTATE();
1613 }
1614 #endif	/* !__FreeBSD__ */
1615 
1616 /*
1617  * Common code used by nfscl_cleanup() and nfscl_cleanupkext().
1618  * Must be called with CLSTATE lock held.
1619  */
1620 static void
1621 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1622 {
1623 	struct nfsclowner *owp, *nowp;
1624 	struct nfsclopen *op;
1625 	struct nfscllockowner *lp, *nlp;
1626 	struct nfscldeleg *dp;
1627 
1628 	/* First, get rid of local locks on delegations. */
1629 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1630 		LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1631 		    if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1632 			if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1633 			    panic("nfscllckw");
1634 			nfscl_freelockowner(lp, 1);
1635 		    }
1636 		}
1637 	}
1638 	owp = LIST_FIRST(&clp->nfsc_owner);
1639 	while (owp != NULL) {
1640 		nowp = LIST_NEXT(owp, nfsow_list);
1641 		if (!NFSBCMP(owp->nfsow_owner, own,
1642 		    NFSV4CL_LOCKNAMELEN)) {
1643 			/*
1644 			 * If there are children that haven't closed the
1645 			 * file descriptors yet, the opens will still be
1646 			 * here. For that case, let the renew thread clear
1647 			 * out the OpenOwner later.
1648 			 */
1649 			if (LIST_EMPTY(&owp->nfsow_open))
1650 				nfscl_freeopenowner(owp, 0);
1651 			else
1652 				owp->nfsow_defunct = 1;
1653 		} else {
1654 			/* look for lockowners on other opens */
1655 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1656 				LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1657 					if (!NFSBCMP(lp->nfsl_owner, own,
1658 					    NFSV4CL_LOCKNAMELEN))
1659 						lp->nfsl_defunct = 1;
1660 				}
1661 			}
1662 		}
1663 		owp = nowp;
1664 	}
1665 
1666 	/* and check the defunct list */
1667 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1668 		if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN))
1669 		    lp->nfsl_defunct = 1;
1670 	}
1671 }
1672 
1673 #if defined(APPLEKEXT) || defined(__FreeBSD__)
1674 /*
1675  * Simulate the call nfscl_cleanup() by looking for open owners associated
1676  * with processes that no longer exist, since a call to nfscl_cleanup()
1677  * can't be patched into exit().
1678  */
1679 static void
1680 nfscl_cleanupkext(struct nfsclclient *clp)
1681 {
1682 	struct nfsclowner *owp, *nowp;
1683 	struct nfscllockowner *lp;
1684 
1685 	NFSPROCLISTLOCK();
1686 	NFSLOCKCLSTATE();
1687 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1688 		if (nfscl_procdoesntexist(owp->nfsow_owner))
1689 			nfscl_cleanup_common(clp, owp->nfsow_owner);
1690 	}
1691 
1692 	/* and check the defunct list */
1693 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1694 		if (nfscl_procdoesntexist(lp->nfsl_owner))
1695 			lp->nfsl_defunct = 1;
1696 	}
1697 	NFSUNLOCKCLSTATE();
1698 	NFSPROCLISTUNLOCK();
1699 }
1700 #endif	/* APPLEKEXT || __FreeBSD__ */
1701 
1702 /*
1703  * Called from nfs umount to free up the clientid.
1704  */
1705 APPLESTATIC void
1706 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1707 {
1708 	struct nfsclclient *clp;
1709 	struct ucred *cred;
1710 	int igotlock;
1711 
1712 	clp = nmp->nm_clp;
1713 	if (clp != NULL) {
1714 		if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1715 			panic("nfscl umount");
1716 
1717 		/*
1718 		 * First, handshake with the nfscl renew thread, to terminate
1719 		 * it.
1720 		 */
1721 		clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1722 		while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1723 			(void) tsleep((caddr_t)clp, PWAIT, "nfsclumnt", hz);
1724 
1725 		NFSLOCKCLSTATE();
1726 		do {
1727 			igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1728 			    NFSCLSTATEMUTEXPTR);
1729 		} while (!igotlock);
1730 		NFSUNLOCKCLSTATE();
1731 
1732 		/*
1733 		 * Free up all the state. It will expire on the server, but
1734 		 * maybe we should do a SetClientId/SetClientIdConfirm so
1735 		 * the server throws it away?
1736 		 */
1737 		LIST_REMOVE(clp, nfsc_list);
1738 		nfscl_delegreturnall(clp, p);
1739 		cred = newnfs_getcred();
1740 		(void) nfsrpc_setclient(nmp, clp, cred, p);
1741 		nfscl_cleanclient(clp);
1742 		nmp->nm_clp = NULL;
1743 		NFSFREECRED(cred);
1744 		FREE((caddr_t)clp, M_NFSCLCLIENT);
1745 	}
1746 
1747 }
1748 
1749 /*
1750  * This function is called when a server replies with NFSERR_STALECLIENTID
1751  * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens
1752  * and Locks with reclaim. If these fail, it deletes the corresponding state.
1753  */
1754 static void
1755 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1756 {
1757 	struct nfsclowner *owp, *nowp;
1758 	struct nfsclopen *op, *nop;
1759 	struct nfscllockowner *lp, *nlp;
1760 	struct nfscllock *lop, *nlop;
1761 	struct nfscldeleg *dp, *ndp, *tdp;
1762 	struct nfsmount *nmp;
1763 	struct ucred *tcred;
1764 	struct nfsclopenhead extra_open;
1765 	struct nfscldeleghead extra_deleg;
1766 	struct nfsreq *rep;
1767 	u_int64_t len;
1768 	u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1769 	int igotlock = 0, error, trycnt, firstlock, s;
1770 
1771 	/*
1772 	 * First, lock the client structure, so everyone else will
1773 	 * block when trying to use state.
1774 	 */
1775 	NFSLOCKCLSTATE();
1776 	do {
1777 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1778 		    NFSCLSTATEMUTEXPTR);
1779 	} while (!igotlock);
1780 	NFSUNLOCKCLSTATE();
1781 
1782 	nmp = clp->nfsc_nmp;
1783 	if (nmp == NULL)
1784 		panic("nfscl recover");
1785 	trycnt = 5;
1786 	do {
1787 		error = nfsrpc_setclient(nmp, clp, cred, p);
1788 	} while ((error == NFSERR_STALECLIENTID ||
1789 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
1790 	if (error) {
1791 		nfscl_cleanclient(clp);
1792 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
1793 		    NFSCLFLAGS_RECOVER);
1794 		NFSLOCKCLSTATE();
1795 		nfsv4_unlock(&clp->nfsc_lock, 0);
1796 		NFSUNLOCKCLSTATE();
1797 		return;
1798 	}
1799 	clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1800 	clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
1801 
1802 	/*
1803 	 * Mark requests already queued on the server, so that they don't
1804 	 * initiate another recovery cycle. Any requests already in the
1805 	 * queue that handle state information will have the old stale
1806 	 * clientid/stateid and will get a NFSERR_STALESTATEID or
1807 	 * NFSERR_STALECLIENTID reply from the server. This will be
1808 	 * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set.
1809 	 */
1810 	s = splsoftclock();
1811 	NFSLOCKREQ();
1812 	TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
1813 		if (rep->r_nmp == nmp)
1814 			rep->r_flags |= R_DONTRECOVER;
1815 	}
1816 	NFSUNLOCKREQ();
1817 	splx(s);
1818 
1819 	/* get rid of defunct lockowners */
1820 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1821 		nfscl_freelockowner(lp, 0);
1822 	}
1823 
1824 	/*
1825 	 * Now, mark all delegations "need reclaim".
1826 	 */
1827 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
1828 		dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
1829 
1830 	TAILQ_INIT(&extra_deleg);
1831 	LIST_INIT(&extra_open);
1832 	/*
1833 	 * Now traverse the state lists, doing Open and Lock Reclaims.
1834 	 */
1835 	tcred = newnfs_getcred();
1836 	owp = LIST_FIRST(&clp->nfsc_owner);
1837 	while (owp != NULL) {
1838 	    nowp = LIST_NEXT(owp, nfsow_list);
1839 	    owp->nfsow_seqid = 0;
1840 	    op = LIST_FIRST(&owp->nfsow_open);
1841 	    while (op != NULL) {
1842 		nop = LIST_NEXT(op, nfso_list);
1843 		if (error != NFSERR_NOGRACE) {
1844 		    /* Search for a delegation to reclaim with the open */
1845 		    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1846 			if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1847 			    continue;
1848 			if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1849 			    mode = NFSV4OPEN_ACCESSWRITE;
1850 			    delegtype = NFSV4OPEN_DELEGATEWRITE;
1851 			} else {
1852 			    mode = NFSV4OPEN_ACCESSREAD;
1853 			    delegtype = NFSV4OPEN_DELEGATEREAD;
1854 			}
1855 			if ((op->nfso_mode & mode) == mode &&
1856 			    op->nfso_fhlen == dp->nfsdl_fhlen &&
1857 			    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
1858 			    break;
1859 		    }
1860 		    ndp = dp;
1861 		    if (dp == NULL)
1862 			delegtype = NFSV4OPEN_DELEGATENONE;
1863 		    newnfs_copycred(&op->nfso_cred, tcred);
1864 		    error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
1865 			op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
1866 			op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
1867 			tcred, p);
1868 		    if (!error) {
1869 			/* Handle any replied delegation */
1870 			if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
1871 			    || NFSMNT_RDONLY(nmp->nm_mountp))) {
1872 			    if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
1873 				mode = NFSV4OPEN_ACCESSWRITE;
1874 			    else
1875 				mode = NFSV4OPEN_ACCESSREAD;
1876 			    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1877 				if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1878 				    continue;
1879 				if ((op->nfso_mode & mode) == mode &&
1880 				    op->nfso_fhlen == dp->nfsdl_fhlen &&
1881 				    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
1882 				    op->nfso_fhlen)) {
1883 				    dp->nfsdl_stateid = ndp->nfsdl_stateid;
1884 				    dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
1885 				    dp->nfsdl_ace = ndp->nfsdl_ace;
1886 				    dp->nfsdl_change = ndp->nfsdl_change;
1887 				    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
1888 				    if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
1889 					dp->nfsdl_flags |= NFSCLDL_RECALL;
1890 				    FREE((caddr_t)ndp, M_NFSCLDELEG);
1891 				    ndp = NULL;
1892 				    break;
1893 				}
1894 			    }
1895 			}
1896 			if (ndp != NULL)
1897 			    TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
1898 
1899 			/* and reclaim all byte range locks */
1900 			lp = LIST_FIRST(&op->nfso_lock);
1901 			while (lp != NULL) {
1902 			    nlp = LIST_NEXT(lp, nfsl_list);
1903 			    lp->nfsl_seqid = 0;
1904 			    firstlock = 1;
1905 			    lop = LIST_FIRST(&lp->nfsl_lock);
1906 			    while (lop != NULL) {
1907 				nlop = LIST_NEXT(lop, nfslo_list);
1908 				if (lop->nfslo_end == NFS64BITSSET)
1909 				    len = NFS64BITSSET;
1910 				else
1911 				    len = lop->nfslo_end - lop->nfslo_first;
1912 				if (error != NFSERR_NOGRACE)
1913 				    error = nfscl_trylock(nmp, NULL,
1914 					op->nfso_fh, op->nfso_fhlen, lp,
1915 					firstlock, 1, lop->nfslo_first, len,
1916 					lop->nfslo_type, tcred, p);
1917 				if (error != 0)
1918 				    nfscl_freelock(lop, 0);
1919 				else
1920 				    firstlock = 0;
1921 				lop = nlop;
1922 			    }
1923 			    /* If no locks, but a lockowner, just delete it. */
1924 			    if (LIST_EMPTY(&lp->nfsl_lock))
1925 				nfscl_freelockowner(lp, 0);
1926 			    lp = nlp;
1927 			}
1928 		    } else {
1929 			nfscl_freeopen(op, 0);
1930 		    }
1931 		}
1932 		op = nop;
1933 	    }
1934 	    owp = nowp;
1935 	}
1936 
1937 	/*
1938 	 * Now, try and get any delegations not yet reclaimed by cobbling
1939 	 * to-gether an appropriate open.
1940 	 */
1941 	nowp = NULL;
1942 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
1943 	while (dp != NULL) {
1944 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
1945 	    if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
1946 		if (nowp == NULL) {
1947 		    MALLOC(nowp, struct nfsclowner *,
1948 			sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
1949 		    /*
1950 		     * Name must be as long an largest possible
1951 		     * NFSV4CL_LOCKNAMELEN. 12 for now.
1952 		     */
1953 		    NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
1954 			NFSV4CL_LOCKNAMELEN);
1955 		    LIST_INIT(&nowp->nfsow_open);
1956 		    nowp->nfsow_clp = clp;
1957 		    nowp->nfsow_seqid = 0;
1958 		    nowp->nfsow_defunct = 0;
1959 		    nfscl_lockinit(&nowp->nfsow_rwlock);
1960 		}
1961 		nop = NULL;
1962 		if (error != NFSERR_NOGRACE) {
1963 		    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
1964 			dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
1965 		    nop->nfso_own = nowp;
1966 		    if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1967 			nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
1968 			delegtype = NFSV4OPEN_DELEGATEWRITE;
1969 		    } else {
1970 			nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
1971 			delegtype = NFSV4OPEN_DELEGATEREAD;
1972 		    }
1973 		    nop->nfso_opencnt = 0;
1974 		    nop->nfso_posixlock = 1;
1975 		    nop->nfso_fhlen = dp->nfsdl_fhlen;
1976 		    NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
1977 		    LIST_INIT(&nop->nfso_lock);
1978 		    nop->nfso_stateid.seqid = 0;
1979 		    nop->nfso_stateid.other[0] = 0;
1980 		    nop->nfso_stateid.other[1] = 0;
1981 		    nop->nfso_stateid.other[2] = 0;
1982 		    newnfs_copycred(&dp->nfsdl_cred, tcred);
1983 		    newnfs_copyincred(tcred, &nop->nfso_cred);
1984 		    tdp = NULL;
1985 		    error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
1986 			nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
1987 			nop->nfso_mode, nop, NULL, 0, &tdp, 1,
1988 			delegtype, tcred, p);
1989 		    if (tdp != NULL) {
1990 			if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
1991 			    mode = NFSV4OPEN_ACCESSWRITE;
1992 			else
1993 			    mode = NFSV4OPEN_ACCESSREAD;
1994 			if ((nop->nfso_mode & mode) == mode &&
1995 			    nop->nfso_fhlen == tdp->nfsdl_fhlen &&
1996 			    !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
1997 			    nop->nfso_fhlen)) {
1998 			    dp->nfsdl_stateid = tdp->nfsdl_stateid;
1999 			    dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2000 			    dp->nfsdl_ace = tdp->nfsdl_ace;
2001 			    dp->nfsdl_change = tdp->nfsdl_change;
2002 			    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2003 			    if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2004 				dp->nfsdl_flags |= NFSCLDL_RECALL;
2005 			    FREE((caddr_t)tdp, M_NFSCLDELEG);
2006 			} else {
2007 			    TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2008 			}
2009 		    }
2010 		}
2011 		if (error) {
2012 		    if (nop != NULL)
2013 			FREE((caddr_t)nop, M_NFSCLOPEN);
2014 		    /*
2015 		     * Couldn't reclaim it, so throw the state
2016 		     * away. Ouch!!
2017 		     */
2018 		    nfscl_cleandeleg(dp);
2019 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
2020 		} else {
2021 		    LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2022 		}
2023 	    }
2024 	    dp = ndp;
2025 	}
2026 
2027 	/*
2028 	 * Now, get rid of extra Opens and Delegations.
2029 	 */
2030 	LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2031 		do {
2032 			newnfs_copycred(&op->nfso_cred, tcred);
2033 			error = nfscl_tryclose(op, tcred, nmp, p);
2034 			if (error == NFSERR_GRACE)
2035 				(void) nfs_catnap(PZERO, "nfsexcls");
2036 		} while (error == NFSERR_GRACE);
2037 		LIST_REMOVE(op, nfso_list);
2038 		FREE((caddr_t)op, M_NFSCLOPEN);
2039 	}
2040 	if (nowp != NULL)
2041 		FREE((caddr_t)nowp, M_NFSCLOWNER);
2042 
2043 	TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2044 		do {
2045 			newnfs_copycred(&dp->nfsdl_cred, tcred);
2046 			error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2047 			if (error == NFSERR_GRACE)
2048 				(void) nfs_catnap(PZERO, "nfsexdlg");
2049 		} while (error == NFSERR_GRACE);
2050 		TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2051 		FREE((caddr_t)dp, M_NFSCLDELEG);
2052 	}
2053 
2054 	NFSLOCKCLSTATE();
2055 	nfsv4_unlock(&clp->nfsc_lock, 0);
2056 	NFSUNLOCKCLSTATE();
2057 	NFSFREECRED(tcred);
2058 }
2059 
2060 /*
2061  * This function is called when a server replies with NFSERR_EXPIRED.
2062  * It deletes all state for the client and does a fresh SetClientId/confirm.
2063  * XXX Someday it should post a signal to the process(es) that hold the
2064  * state, so they know that lock state has been lost.
2065  */
2066 APPLESTATIC int
2067 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2068 {
2069 	struct nfscllockowner *lp, *nlp;
2070 	struct nfsmount *nmp;
2071 	struct ucred *cred;
2072 	int igotlock = 0, error, trycnt;
2073 
2074 	/*
2075 	 * If the clientid has gone away or a new SetClientid has already
2076 	 * been done, just return ok.
2077 	 */
2078 	if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2079 		return (0);
2080 
2081 	/*
2082 	 * First, lock the client structure, so everyone else will
2083 	 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2084 	 * that only one thread does the work.
2085 	 */
2086 	NFSLOCKCLSTATE();
2087 	clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2088 	do {
2089 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2090 		    NFSCLSTATEMUTEXPTR);
2091 	} while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2092 	if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2093 		if (igotlock)
2094 			nfsv4_unlock(&clp->nfsc_lock, 0);
2095 		NFSUNLOCKCLSTATE();
2096 		return (0);
2097 	}
2098 	NFSUNLOCKCLSTATE();
2099 
2100 	nmp = clp->nfsc_nmp;
2101 	if (nmp == NULL)
2102 		panic("nfscl expired");
2103 	cred = newnfs_getcred();
2104 	trycnt = 5;
2105 	do {
2106 		error = nfsrpc_setclient(nmp, clp, cred, p);
2107 	} while ((error == NFSERR_STALECLIENTID ||
2108 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2109 	if (error) {
2110 		/*
2111 		 * Clear out any state.
2112 		 */
2113 		nfscl_cleanclient(clp);
2114 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
2115 		    NFSCLFLAGS_RECOVER);
2116 	} else {
2117 		/* get rid of defunct lockowners */
2118 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
2119 		    nlp) {
2120 			nfscl_freelockowner(lp, 0);
2121 		}
2122 
2123 		/*
2124 		 * Expire the state for the client.
2125 		 */
2126 		nfscl_expireclient(clp, nmp, cred, p);
2127 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2128 		clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2129 	}
2130 	NFSFREECRED(cred);
2131 	clp->nfsc_flags &= ~NFSCLFLAGS_EXPIREIT;
2132 	NFSLOCKCLSTATE();
2133 	nfsv4_unlock(&clp->nfsc_lock, 0);
2134 	NFSUNLOCKCLSTATE();
2135 	return (error);
2136 }
2137 
2138 /*
2139  * This function inserts a lock in the list after insert_lop.
2140  */
2141 static void
2142 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2143     struct nfscllock *insert_lop, int local)
2144 {
2145 
2146 	if ((struct nfscllockowner *)insert_lop == lp)
2147 		LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2148 	else
2149 		LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2150 	if (local)
2151 		newnfsstats.cllocallocks++;
2152 	else
2153 		newnfsstats.cllocks++;
2154 }
2155 
2156 /*
2157  * This function updates the locking for a lock owner and given file. It
2158  * maintains a list of lock ranges ordered on increasing file offset that
2159  * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2160  * It always adds new_lop to the list and sometimes uses the one pointed
2161  * at by other_lopp.
2162  * Returns 1 if the locks were modified, 0 otherwise.
2163  */
2164 static int
2165 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2166     struct nfscllock **other_lopp, int local)
2167 {
2168 	struct nfscllock *new_lop = *new_lopp;
2169 	struct nfscllock *lop, *tlop, *ilop;
2170 	struct nfscllock *other_lop;
2171 	int unlock = 0, modified = 0;
2172 	u_int64_t tmp;
2173 
2174 	/*
2175 	 * Work down the list until the lock is merged.
2176 	 */
2177 	if (new_lop->nfslo_type == F_UNLCK)
2178 		unlock = 1;
2179 	ilop = (struct nfscllock *)lp;
2180 	lop = LIST_FIRST(&lp->nfsl_lock);
2181 	while (lop != NULL) {
2182 	    /*
2183 	     * Only check locks for this file that aren't before the start of
2184 	     * new lock's range.
2185 	     */
2186 	    if (lop->nfslo_end >= new_lop->nfslo_first) {
2187 		if (new_lop->nfslo_end < lop->nfslo_first) {
2188 		    /*
2189 		     * If the new lock ends before the start of the
2190 		     * current lock's range, no merge, just insert
2191 		     * the new lock.
2192 		     */
2193 		    break;
2194 		}
2195 		if (new_lop->nfslo_type == lop->nfslo_type ||
2196 		    (new_lop->nfslo_first <= lop->nfslo_first &&
2197 		     new_lop->nfslo_end >= lop->nfslo_end)) {
2198 		    /*
2199 		     * This lock can be absorbed by the new lock/unlock.
2200 		     * This happens when it covers the entire range
2201 		     * of the old lock or is contiguous
2202 		     * with the old lock and is of the same type or an
2203 		     * unlock.
2204 		     */
2205 		    if (new_lop->nfslo_type != lop->nfslo_type ||
2206 			new_lop->nfslo_first != lop->nfslo_first ||
2207 			new_lop->nfslo_end != lop->nfslo_end)
2208 			modified = 1;
2209 		    if (lop->nfslo_first < new_lop->nfslo_first)
2210 			new_lop->nfslo_first = lop->nfslo_first;
2211 		    if (lop->nfslo_end > new_lop->nfslo_end)
2212 			new_lop->nfslo_end = lop->nfslo_end;
2213 		    tlop = lop;
2214 		    lop = LIST_NEXT(lop, nfslo_list);
2215 		    nfscl_freelock(tlop, local);
2216 		    continue;
2217 		}
2218 
2219 		/*
2220 		 * All these cases are for contiguous locks that are not the
2221 		 * same type, so they can't be merged.
2222 		 */
2223 		if (new_lop->nfslo_first <= lop->nfslo_first) {
2224 		    /*
2225 		     * This case is where the new lock overlaps with the
2226 		     * first part of the old lock. Move the start of the
2227 		     * old lock to just past the end of the new lock. The
2228 		     * new lock will be inserted in front of the old, since
2229 		     * ilop hasn't been updated. (We are done now.)
2230 		     */
2231 		    if (lop->nfslo_first != new_lop->nfslo_end) {
2232 			lop->nfslo_first = new_lop->nfslo_end;
2233 			modified = 1;
2234 		    }
2235 		    break;
2236 		}
2237 		if (new_lop->nfslo_end >= lop->nfslo_end) {
2238 		    /*
2239 		     * This case is where the new lock overlaps with the
2240 		     * end of the old lock's range. Move the old lock's
2241 		     * end to just before the new lock's first and insert
2242 		     * the new lock after the old lock.
2243 		     * Might not be done yet, since the new lock could
2244 		     * overlap further locks with higher ranges.
2245 		     */
2246 		    if (lop->nfslo_end != new_lop->nfslo_first) {
2247 			lop->nfslo_end = new_lop->nfslo_first;
2248 			modified = 1;
2249 		    }
2250 		    ilop = lop;
2251 		    lop = LIST_NEXT(lop, nfslo_list);
2252 		    continue;
2253 		}
2254 		/*
2255 		 * The final case is where the new lock's range is in the
2256 		 * middle of the current lock's and splits the current lock
2257 		 * up. Use *other_lopp to handle the second part of the
2258 		 * split old lock range. (We are done now.)
2259 		 * For unlock, we use new_lop as other_lop and tmp, since
2260 		 * other_lop and new_lop are the same for this case.
2261 		 * We noted the unlock case above, so we don't need
2262 		 * new_lop->nfslo_type any longer.
2263 		 */
2264 		tmp = new_lop->nfslo_first;
2265 		if (unlock) {
2266 		    other_lop = new_lop;
2267 		    *new_lopp = NULL;
2268 		} else {
2269 		    other_lop = *other_lopp;
2270 		    *other_lopp = NULL;
2271 		}
2272 		other_lop->nfslo_first = new_lop->nfslo_end;
2273 		other_lop->nfslo_end = lop->nfslo_end;
2274 		other_lop->nfslo_type = lop->nfslo_type;
2275 		lop->nfslo_end = tmp;
2276 		nfscl_insertlock(lp, other_lop, lop, local);
2277 		ilop = lop;
2278 		modified = 1;
2279 		break;
2280 	    }
2281 	    ilop = lop;
2282 	    lop = LIST_NEXT(lop, nfslo_list);
2283 	    if (lop == NULL)
2284 		break;
2285 	}
2286 
2287 	/*
2288 	 * Insert the new lock in the list at the appropriate place.
2289 	 */
2290 	if (!unlock) {
2291 		nfscl_insertlock(lp, new_lop, ilop, local);
2292 		*new_lopp = NULL;
2293 		modified = 1;
2294 	}
2295 	return (modified);
2296 }
2297 
2298 /*
2299  * This function must be run as a kernel thread.
2300  * It does Renew Ops and recovery, when required.
2301  */
2302 APPLESTATIC void
2303 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2304 {
2305 	struct nfsclowner *owp, *nowp;
2306 	struct nfsclopen *op;
2307 	struct nfscllockowner *lp, *nlp, *olp;
2308 	struct nfscldeleghead dh;
2309 	struct nfscllockownerhead lh;
2310 	struct nfscldeleg *dp, *ndp;
2311 	struct ucred *cred;
2312 	u_int32_t clidrev;
2313 	int error, cbpathdown, islept, igotlock, ret, clearok;
2314 
2315 	cred = newnfs_getcred();
2316 	clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2317 	for(;;) {
2318 		newnfs_setroot(cred);
2319 		cbpathdown = 0;
2320 		if (clp->nfsc_flags & NFSCLFLAGS_RECOVER)
2321 			nfscl_recover(clp, cred, p);
2322 		if (clp->nfsc_expire <= NFSD_MONOSEC &&
2323 		    (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2324 			clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2325 			clidrev = clp->nfsc_clientidrev;
2326 			error = nfsrpc_renew(clp, cred, p);
2327 			if (error == NFSERR_CBPATHDOWN)
2328 			    cbpathdown = 1;
2329 			else if (error == NFSERR_STALECLIENTID)
2330 			    clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2331 			else if (error == NFSERR_EXPIRED)
2332 			    (void) nfscl_hasexpired(clp, clidrev, p);
2333 		}
2334 
2335 		LIST_INIT(&lh);
2336 		TAILQ_INIT(&dh);
2337 		NFSLOCKCLSTATE();
2338 		if (cbpathdown)
2339 			/* It's a Total Recall! */
2340 			nfscl_totalrecall(clp);
2341 
2342 		/*
2343 		 * Now, handle defunct owners.
2344 		 */
2345 		owp = LIST_FIRST(&clp->nfsc_owner);
2346 		while (owp != NULL) {
2347 		    nowp = LIST_NEXT(owp, nfsow_list);
2348 		    if (LIST_EMPTY(&owp->nfsow_open)) {
2349 			if (owp->nfsow_defunct)
2350 			    nfscl_freeopenowner(owp, 0);
2351 		    } else {
2352 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2353 			    lp = LIST_FIRST(&op->nfso_lock);
2354 			    while (lp != NULL) {
2355 				nlp = LIST_NEXT(lp, nfsl_list);
2356 				if (lp->nfsl_defunct &&
2357 				    LIST_EMPTY(&lp->nfsl_lock)) {
2358 				    LIST_FOREACH(olp, &lh, nfsl_list) {
2359 					if (!NFSBCMP(olp->nfsl_owner,
2360 					    lp->nfsl_owner,NFSV4CL_LOCKNAMELEN))
2361 					    break;
2362 				    }
2363 				    if (olp == NULL) {
2364 					LIST_REMOVE(lp, nfsl_list);
2365 					LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2366 				    } else {
2367 					nfscl_freelockowner(lp, 0);
2368 				    }
2369 				}
2370 				lp = nlp;
2371 			    }
2372 			}
2373 		    }
2374 		    owp = nowp;
2375 		}
2376 
2377 		/* also search the defunct list */
2378 		lp = LIST_FIRST(&clp->nfsc_defunctlockowner);
2379 		while (lp != NULL) {
2380 		    nlp = LIST_NEXT(lp, nfsl_list);
2381 		    if (lp->nfsl_defunct) {
2382 			LIST_FOREACH(olp, &lh, nfsl_list) {
2383 			    if (!NFSBCMP(olp->nfsl_owner, lp->nfsl_owner,
2384 				NFSV4CL_LOCKNAMELEN))
2385 				break;
2386 			}
2387 			if (olp == NULL) {
2388 			    LIST_REMOVE(lp, nfsl_list);
2389 			    LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2390 			} else {
2391 			    nfscl_freelockowner(lp, 0);
2392 			}
2393 		    }
2394 		    lp = nlp;
2395 		}
2396 		/* and release defunct lock owners */
2397 		LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) {
2398 		    nfscl_freelockowner(lp, 0);
2399 		}
2400 
2401 		/*
2402 		 * Do the recall on any delegations. To avoid trouble, always
2403 		 * come back up here after having slept.
2404 		 */
2405 		igotlock = 0;
2406 tryagain:
2407 		dp = TAILQ_FIRST(&clp->nfsc_deleg);
2408 		while (dp != NULL) {
2409 			ndp = TAILQ_NEXT(dp, nfsdl_list);
2410 			if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2411 				/*
2412 				 * Wait for outstanding I/O ops to be done.
2413 				 */
2414 				if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2415 				    if (igotlock) {
2416 					nfsv4_unlock(&clp->nfsc_lock, 0);
2417 					igotlock = 0;
2418 				    }
2419 				    dp->nfsdl_rwlock.nfslock_lock |=
2420 					NFSV4LOCK_WANTED;
2421 				    (void) nfsmsleep(&dp->nfsdl_rwlock,
2422 					NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2423 					NULL);
2424 				    goto tryagain;
2425 				}
2426 				while (!igotlock) {
2427 				    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2428 					&islept, NFSCLSTATEMUTEXPTR);
2429 				    if (islept)
2430 					goto tryagain;
2431 				}
2432 				NFSUNLOCKCLSTATE();
2433 				newnfs_copycred(&dp->nfsdl_cred, cred);
2434 				ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2435 				    NULL, cred, p);
2436 				if (!ret) {
2437 				    nfscl_cleandeleg(dp);
2438 				    TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2439 					nfsdl_list);
2440 				    LIST_REMOVE(dp, nfsdl_hash);
2441 				    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2442 				    nfscl_delegcnt--;
2443 				    newnfsstats.cldelegates--;
2444 				}
2445 				NFSLOCKCLSTATE();
2446 			}
2447 			dp = ndp;
2448 		}
2449 
2450 		/*
2451 		 * Clear out old delegations, if we are above the high water
2452 		 * mark. Only clear out ones with no state related to them.
2453 		 * The tailq list is in LRU order.
2454 		 */
2455 		dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2456 		while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2457 		    ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2458 		    if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2459 			dp->nfsdl_rwlock.nfslock_lock == 0 &&
2460 			dp->nfsdl_timestamp < NFSD_MONOSEC &&
2461 			!(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2462 			  NFSCLDL_NEEDRECLAIM))) {
2463 			clearok = 1;
2464 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2465 			    op = LIST_FIRST(&owp->nfsow_open);
2466 			    if (op != NULL) {
2467 				clearok = 0;
2468 				break;
2469 			    }
2470 			}
2471 			if (clearok) {
2472 			    LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2473 				if (!LIST_EMPTY(&lp->nfsl_lock)) {
2474 				    clearok = 0;
2475 				    break;
2476 				}
2477 			    }
2478 			}
2479 			if (clearok) {
2480 			    TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2481 			    LIST_REMOVE(dp, nfsdl_hash);
2482 			    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2483 			    nfscl_delegcnt--;
2484 			    newnfsstats.cldelegates--;
2485 			}
2486 		    }
2487 		    dp = ndp;
2488 		}
2489 		if (igotlock)
2490 			nfsv4_unlock(&clp->nfsc_lock, 0);
2491 		NFSUNLOCKCLSTATE();
2492 
2493 		/*
2494 		 * Delegreturn any delegations cleaned out or recalled.
2495 		 */
2496 		TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2497 			newnfs_copycred(&dp->nfsdl_cred, cred);
2498 			(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2499 			TAILQ_REMOVE(&dh, dp, nfsdl_list);
2500 			FREE((caddr_t)dp, M_NFSCLDELEG);
2501 		}
2502 
2503 #if defined(APPLEKEXT) || defined(__FreeBSD__)
2504 		/*
2505 		 * Simulate the calls to nfscl_cleanup() when a process
2506 		 * exits, since the call can't be patched into exit().
2507 		 */
2508 		{
2509 			struct timespec mytime;
2510 			static time_t prevsec = 0;
2511 
2512 			NFSGETNANOTIME(&mytime);
2513 			if (prevsec != mytime.tv_sec) {
2514 				prevsec = mytime.tv_sec;
2515 				nfscl_cleanupkext(clp);
2516 			}
2517 		}
2518 #endif	/* APPLEKEXT || __FreeBSD__ */
2519 
2520 		if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2521 		    (void) tsleep((caddr_t)clp, PWAIT, "nfscl", hz);
2522 		if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2523 			NFSFREECRED(cred);
2524 			clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2525 			wakeup((caddr_t)clp);
2526 			return;
2527 		}
2528 	}
2529 }
2530 
2531 /*
2532  * Initiate state recovery. Called when NFSERR_STALECLIENTID or
2533  * NFSERR_STALESTATEID is received.
2534  */
2535 APPLESTATIC void
2536 nfscl_initiate_recovery(struct nfsclclient *clp)
2537 {
2538 
2539 	if (clp == NULL)
2540 		return;
2541 	NFSLOCKCLSTATE();
2542 	clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2543 	NFSUNLOCKCLSTATE();
2544 	wakeup((caddr_t)clp);
2545 }
2546 
2547 /*
2548  * Dump out the state stuff for debugging.
2549  */
2550 APPLESTATIC void
2551 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2552     int lockowner, int locks)
2553 {
2554 	struct nfsclclient *clp;
2555 	struct nfsclowner *owp;
2556 	struct nfsclopen *op;
2557 	struct nfscllockowner *lp;
2558 	struct nfscllock *lop;
2559 	struct nfscldeleg *dp;
2560 
2561 	clp = nmp->nm_clp;
2562 	if (clp == NULL) {
2563 		printf("nfscl dumpstate NULL clp\n");
2564 		return;
2565 	}
2566 	NFSLOCKCLSTATE();
2567 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2568 	  LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2569 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2570 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2571 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2572 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2573 		    owp->nfsow_seqid);
2574 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2575 		if (opens)
2576 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2577 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2578 			op->nfso_stateid.other[2], op->nfso_opencnt,
2579 			op->nfso_fh[12]);
2580 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2581 		    if (lockowner)
2582 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2583 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2584 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2585 			    lp->nfsl_seqid,
2586 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2587 			    lp->nfsl_stateid.other[2]);
2588 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2589 			if (locks)
2590 #ifdef __FreeBSD__
2591 			    printf("lck typ=%d fst=%ju end=%ju\n",
2592 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2593 				(intmax_t)lop->nfslo_end);
2594 #else
2595 			    printf("lck typ=%d fst=%qd end=%qd\n",
2596 				lop->nfslo_type, lop->nfslo_first,
2597 				lop->nfslo_end);
2598 #endif
2599 		    }
2600 		}
2601 	    }
2602 	  }
2603 	}
2604 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2605 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2606 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2607 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2608 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2609 		    owp->nfsow_seqid);
2610 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2611 		if (opens)
2612 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2613 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2614 			op->nfso_stateid.other[2], op->nfso_opencnt,
2615 			op->nfso_fh[12]);
2616 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2617 		    if (lockowner)
2618 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2619 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2620 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2621 			    lp->nfsl_seqid,
2622 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2623 			    lp->nfsl_stateid.other[2]);
2624 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2625 			if (locks)
2626 #ifdef __FreeBSD__
2627 			    printf("lck typ=%d fst=%ju end=%ju\n",
2628 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2629 				(intmax_t)lop->nfslo_end);
2630 #else
2631 			    printf("lck typ=%d fst=%qd end=%qd\n",
2632 				lop->nfslo_type, lop->nfslo_first,
2633 				lop->nfslo_end);
2634 #endif
2635 		    }
2636 		}
2637 	    }
2638 	}
2639 	NFSUNLOCKCLSTATE();
2640 }
2641 
2642 /*
2643  * Check for duplicate open owners and opens.
2644  * (Only used as a diagnostic aid.)
2645  */
2646 APPLESTATIC void
2647 nfscl_dupopen(vnode_t vp, int dupopens)
2648 {
2649 	struct nfsclclient *clp;
2650 	struct nfsclowner *owp, *owp2;
2651 	struct nfsclopen *op, *op2;
2652 	struct nfsfh *nfhp;
2653 
2654 	clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2655 	if (clp == NULL) {
2656 		printf("nfscl dupopen NULL clp\n");
2657 		return;
2658 	}
2659 	nfhp = VTONFS(vp)->n_fhp;
2660 	NFSLOCKCLSTATE();
2661 
2662 	/*
2663 	 * First, search for duplicate owners.
2664 	 * These should never happen!
2665 	 */
2666 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2667 	    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2668 		if (owp != owp2 &&
2669 		    !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2670 		    NFSV4CL_LOCKNAMELEN)) {
2671 			NFSUNLOCKCLSTATE();
2672 			printf("DUP OWNER\n");
2673 			nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2674 			return;
2675 		}
2676 	    }
2677 	}
2678 
2679 	/*
2680 	 * Now, search for duplicate stateids.
2681 	 * These shouldn't happen, either.
2682 	 */
2683 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2684 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2685 		LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2686 		    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2687 			if (op != op2 &&
2688 			    (op->nfso_stateid.other[0] != 0 ||
2689 			     op->nfso_stateid.other[1] != 0 ||
2690 			     op->nfso_stateid.other[2] != 0) &&
2691 			    op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2692 			    op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2693 			    op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2694 			    NFSUNLOCKCLSTATE();
2695 			    printf("DUP STATEID\n");
2696 			    nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
2697 				0);
2698 			    return;
2699 			}
2700 		    }
2701 		}
2702 	    }
2703 	}
2704 
2705 	/*
2706 	 * Now search for duplicate opens.
2707 	 * Duplicate opens for the same owner
2708 	 * should never occur. Other duplicates are
2709 	 * possible and are checked for if "dupopens"
2710 	 * is true.
2711 	 */
2712 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2713 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2714 		if (nfhp->nfh_len == op2->nfso_fhlen &&
2715 		    !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
2716 		    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2717 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2718 			    if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
2719 				!NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
2720 				(!NFSBCMP(op->nfso_own->nfsow_owner,
2721 				 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
2722 				 dupopens)) {
2723 				if (!NFSBCMP(op->nfso_own->nfsow_owner,
2724 				    op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
2725 				    NFSUNLOCKCLSTATE();
2726 				    printf("BADDUP OPEN\n");
2727 				} else {
2728 				    NFSUNLOCKCLSTATE();
2729 				    printf("DUP OPEN\n");
2730 				}
2731 				nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
2732 				    0, 0);
2733 				return;
2734 			    }
2735 			}
2736 		    }
2737 		}
2738 	    }
2739 	}
2740 	NFSUNLOCKCLSTATE();
2741 }
2742 
2743 /*
2744  * During close, find an open that needs to be dereferenced and
2745  * dereference it. If there are no more opens for this file,
2746  * log a message to that effect.
2747  * Opens aren't actually Close'd until VOP_INACTIVE() is performed
2748  * on the file's vnode.
2749  * This is the safe way, since it is difficult to identify
2750  * which open the close is for and I/O can be performed after the
2751  * close(2) system call when a file is mmap'd.
2752  * If it returns 0 for success, there will be a referenced
2753  * clp returned via clpp.
2754  */
2755 APPLESTATIC int
2756 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
2757 {
2758 	struct nfsclclient *clp;
2759 	struct nfsclowner *owp;
2760 	struct nfsclopen *op;
2761 	struct nfscldeleg *dp;
2762 	struct nfsfh *nfhp;
2763 	int error, notdecr;
2764 
2765 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2766 	if (error)
2767 		return (error);
2768 	*clpp = clp;
2769 
2770 	nfhp = VTONFS(vp)->n_fhp;
2771 	notdecr = 1;
2772 	NFSLOCKCLSTATE();
2773 	/*
2774 	 * First, look for one under a delegation that was locally issued
2775 	 * and just decrement the opencnt for it. Since all my Opens against
2776 	 * the server are DENY_NONE, I don't see a problem with hanging
2777 	 * onto them. (It is much easier to use one of the extant Opens
2778 	 * that I already have on the server when a Delegation is recalled
2779 	 * than to do fresh Opens.) Someday, I might need to rethink this, but.
2780 	 */
2781 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2782 	if (dp != NULL) {
2783 		LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2784 			op = LIST_FIRST(&owp->nfsow_open);
2785 			if (op != NULL) {
2786 				/*
2787 				 * Since a delegation is for a file, there
2788 				 * should never be more than one open for
2789 				 * each openowner.
2790 				 */
2791 				if (LIST_NEXT(op, nfso_list) != NULL)
2792 					panic("nfscdeleg opens");
2793 				if (notdecr && op->nfso_opencnt > 0) {
2794 					notdecr = 0;
2795 					op->nfso_opencnt--;
2796 					break;
2797 				}
2798 			}
2799 		}
2800 	}
2801 
2802 	/* Now process the opens against the server. */
2803 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2804 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2805 			if (op->nfso_fhlen == nfhp->nfh_len &&
2806 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2807 			    nfhp->nfh_len)) {
2808 				/* Found an open, decrement cnt if possible */
2809 				if (notdecr && op->nfso_opencnt > 0) {
2810 					notdecr = 0;
2811 					op->nfso_opencnt--;
2812 				}
2813 				/*
2814 				 * There are more opens, so just return.
2815 				 */
2816 				if (op->nfso_opencnt > 0) {
2817 					NFSUNLOCKCLSTATE();
2818 					return (0);
2819 				}
2820 			}
2821 		}
2822 	}
2823 	NFSUNLOCKCLSTATE();
2824 	if (notdecr)
2825 		printf("nfscl: never fnd open\n");
2826 	return (0);
2827 }
2828 
2829 APPLESTATIC int
2830 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
2831 {
2832 	struct nfsclclient *clp;
2833 	struct nfsclowner *owp, *nowp;
2834 	struct nfsclopen *op;
2835 	struct nfscldeleg *dp;
2836 	struct nfsfh *nfhp;
2837 	int error;
2838 
2839 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2840 	if (error)
2841 		return (error);
2842 	*clpp = clp;
2843 
2844 	nfhp = VTONFS(vp)->n_fhp;
2845 	NFSLOCKCLSTATE();
2846 	/*
2847 	 * First get rid of the local Open structures, which should be no
2848 	 * longer in use.
2849 	 */
2850 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2851 	if (dp != NULL) {
2852 		LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
2853 			op = LIST_FIRST(&owp->nfsow_open);
2854 			if (op != NULL) {
2855 				KASSERT((op->nfso_opencnt == 0),
2856 				    ("nfscl: bad open cnt on deleg"));
2857 				nfscl_freeopen(op, 1);
2858 			}
2859 			nfscl_freeopenowner(owp, 1);
2860 		}
2861 	}
2862 
2863 	/* Now process the opens against the server. */
2864 lookformore:
2865 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2866 		op = LIST_FIRST(&owp->nfsow_open);
2867 		while (op != NULL) {
2868 			if (op->nfso_fhlen == nfhp->nfh_len &&
2869 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2870 			    nfhp->nfh_len)) {
2871 				/* Found an open, close it. */
2872 				KASSERT((op->nfso_opencnt == 0),
2873 				    ("nfscl: bad open cnt on server"));
2874 				NFSUNLOCKCLSTATE();
2875 				nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
2876 				    p);
2877 				NFSLOCKCLSTATE();
2878 				goto lookformore;
2879 			}
2880 			op = LIST_NEXT(op, nfso_list);
2881 		}
2882 	}
2883 	NFSUNLOCKCLSTATE();
2884 	return (0);
2885 }
2886 
2887 /*
2888  * Return all delegations on this client.
2889  * (Must be called with client sleep lock.)
2890  */
2891 static void
2892 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
2893 {
2894 	struct nfscldeleg *dp, *ndp;
2895 	struct ucred *cred;
2896 
2897 	cred = newnfs_getcred();
2898 	TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
2899 		nfscl_cleandeleg(dp);
2900 		(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2901 		nfscl_freedeleg(&clp->nfsc_deleg, dp);
2902 	}
2903 	NFSFREECRED(cred);
2904 }
2905 
2906 /*
2907  * Do a callback RPC.
2908  */
2909 APPLESTATIC void
2910 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
2911 {
2912 	int i, op;
2913 	u_int32_t *tl;
2914 	struct nfsclclient *clp;
2915 	struct nfscldeleg *dp = NULL;
2916 	int numops, taglen = -1, error = 0, trunc, ret = 0;
2917 	u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident;
2918 	u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
2919 	vnode_t vp = NULL;
2920 	struct nfsnode *np;
2921 	struct vattr va;
2922 	struct nfsfh *nfhp;
2923 	mount_t mp;
2924 	nfsattrbit_t attrbits, rattrbits;
2925 	nfsv4stateid_t stateid;
2926 
2927 	nfsrvd_rephead(nd);
2928 	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2929 	taglen = fxdr_unsigned(int, *tl);
2930 	if (taglen < 0) {
2931 		error = EBADRPC;
2932 		goto nfsmout;
2933 	}
2934 	if (taglen <= NFSV4_SMALLSTR)
2935 		tagstr = tag;
2936 	else
2937 		tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
2938 	error = nfsrv_mtostr(nd, tagstr, taglen);
2939 	if (error) {
2940 		if (taglen > NFSV4_SMALLSTR)
2941 			free(tagstr, M_TEMP);
2942 		taglen = -1;
2943 		goto nfsmout;
2944 	}
2945 	(void) nfsm_strtom(nd, tag, taglen);
2946 	if (taglen > NFSV4_SMALLSTR) {
2947 		free(tagstr, M_TEMP);
2948 	}
2949 	NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
2950 	NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2951 	minorvers = fxdr_unsigned(u_int32_t, *tl++);
2952 	if (minorvers != NFSV4_MINORVERSION)
2953 		nd->nd_repstat = NFSERR_MINORVERMISMATCH;
2954 	cbident = fxdr_unsigned(u_int32_t, *tl++);
2955 	if (nd->nd_repstat)
2956 		numops = 0;
2957 	else
2958 		numops = fxdr_unsigned(int, *tl);
2959 	/*
2960 	 * Loop around doing the sub ops.
2961 	 */
2962 	for (i = 0; i < numops; i++) {
2963 		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2964 		NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
2965 		*repp++ = *tl;
2966 		op = fxdr_unsigned(int, *tl);
2967 		if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) {
2968 		    nd->nd_repstat = NFSERR_OPILLEGAL;
2969 		    *repp = nfscl_errmap(nd);
2970 		    retops++;
2971 		    break;
2972 		}
2973 		nd->nd_procnum = op;
2974 		newnfsstats.cbrpccnt[nd->nd_procnum]++;
2975 		switch (op) {
2976 		case NFSV4OP_CBGETATTR:
2977 			clp = NULL;
2978 			error = nfsm_getfh(nd, &nfhp);
2979 			if (!error)
2980 				error = nfsrv_getattrbits(nd, &attrbits,
2981 				    NULL, NULL);
2982 			if (!error) {
2983 				mp = nfscl_getmnt(cbident);
2984 				if (mp == NULL)
2985 					error = NFSERR_SERVERFAULT;
2986 			}
2987 			if (!error) {
2988 				dp = NULL;
2989 				NFSLOCKCLSTATE();
2990 				clp = nfscl_findcl(VFSTONFS(mp));
2991 				if (clp != NULL)
2992 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
2993 					    nfhp->nfh_len);
2994 				NFSUNLOCKCLSTATE();
2995 				if (dp == NULL)
2996 					error = NFSERR_SERVERFAULT;
2997 			}
2998 			if (!error) {
2999 				ret = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3000 				    nfhp->nfh_len, p, &np);
3001 				if (!ret)
3002 					vp = NFSTOV(np);
3003 			}
3004 			if (nfhp != NULL)
3005 				FREE((caddr_t)nfhp, M_NFSFH);
3006 			if (!error) {
3007 				NFSZERO_ATTRBIT(&rattrbits);
3008 				if (NFSISSET_ATTRBIT(&attrbits,
3009 				    NFSATTRBIT_SIZE)) {
3010 					if (!ret)
3011 						va.va_size = np->n_size;
3012 					else
3013 						va.va_size = dp->nfsdl_size;
3014 					NFSSETBIT_ATTRBIT(&rattrbits,
3015 					    NFSATTRBIT_SIZE);
3016 				}
3017 				if (NFSISSET_ATTRBIT(&attrbits,
3018 				    NFSATTRBIT_CHANGE)) {
3019 					va.va_filerev = dp->nfsdl_change;
3020 					if (ret || (np->n_flag & NDELEGMOD))
3021 						va.va_filerev++;
3022 					NFSSETBIT_ATTRBIT(&rattrbits,
3023 					    NFSATTRBIT_CHANGE);
3024 				}
3025 				(void) nfsv4_fillattr(nd, NULL, NULL, &va,
3026 				    NULL, 0, &rattrbits, NULL, NULL, 0, 0);
3027 				if (!ret)
3028 					vrele(vp);
3029 			}
3030 			break;
3031 		case NFSV4OP_CBRECALL:
3032 			clp = NULL;
3033 			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3034 			    NFSX_UNSIGNED);
3035 			stateid.seqid = *tl++;
3036 			NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3037 			    NFSX_STATEIDOTHER);
3038 			tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3039 			trunc = fxdr_unsigned(int, *tl);
3040 			error = nfsm_getfh(nd, &nfhp);
3041 			if (!error) {
3042 				mp = nfscl_getmnt(cbident);
3043 				if (mp == NULL)
3044 					error = NFSERR_SERVERFAULT;
3045 			}
3046 			if (!error) {
3047 				NFSLOCKCLSTATE();
3048 				clp = nfscl_findcl(VFSTONFS(mp));
3049 				if (clp != NULL) {
3050 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3051 					    nfhp->nfh_len);
3052 					if (dp != NULL) {
3053 						dp->nfsdl_flags |=
3054 						    NFSCLDL_RECALL;
3055 						wakeup((caddr_t)clp);
3056 					}
3057 				} else {
3058 					error = NFSERR_SERVERFAULT;
3059 				}
3060 				NFSUNLOCKCLSTATE();
3061 			}
3062 			if (nfhp != NULL)
3063 				FREE((caddr_t)nfhp, M_NFSFH);
3064 			break;
3065 		};
3066 		if (error) {
3067 			if (error == EBADRPC || error == NFSERR_BADXDR) {
3068 				nd->nd_repstat = NFSERR_BADXDR;
3069 			} else {
3070 				nd->nd_repstat = error;
3071 			}
3072 			error = 0;
3073 		}
3074 		retops++;
3075 		if (nd->nd_repstat) {
3076 			*repp = nfscl_errmap(nd);
3077 			break;
3078 		} else
3079 			*repp = 0;	/* NFS4_OK */
3080 	}
3081 nfsmout:
3082 	if (error) {
3083 		if (error == EBADRPC || error == NFSERR_BADXDR)
3084 			nd->nd_repstat = NFSERR_BADXDR;
3085 		else
3086 			printf("nfsv4 comperr1=%d\n", error);
3087 	}
3088 	if (taglen == -1) {
3089 		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3090 		*tl++ = 0;
3091 		*tl = 0;
3092 	} else {
3093 		*retopsp = txdr_unsigned(retops);
3094 	}
3095 	*nd->nd_errp = nfscl_errmap(nd);
3096 }
3097 
3098 /*
3099  * Generate the next cbident value. Basically just increment a static value
3100  * and then check that it isn't already in the list, if it has wrapped around.
3101  */
3102 static u_int32_t
3103 nfscl_nextcbident(void)
3104 {
3105 	struct nfsclclient *clp;
3106 	int matched;
3107 	static u_int32_t nextcbident = 0;
3108 	static int haswrapped = 0;
3109 
3110 	nextcbident++;
3111 	if (nextcbident == 0)
3112 		haswrapped = 1;
3113 	if (haswrapped) {
3114 		/*
3115 		 * Search the clientid list for one already using this cbident.
3116 		 */
3117 		do {
3118 			matched = 0;
3119 			NFSLOCKCLSTATE();
3120 			LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3121 				if (clp->nfsc_cbident == nextcbident) {
3122 					matched = 1;
3123 					break;
3124 				}
3125 			}
3126 			NFSUNLOCKCLSTATE();
3127 			if (matched == 1)
3128 				nextcbident++;
3129 		} while (matched);
3130 	}
3131 	return (nextcbident);
3132 }
3133 
3134 /*
3135  * Get the mount point related to a given cbident.
3136  */
3137 static mount_t
3138 nfscl_getmnt(u_int32_t cbident)
3139 {
3140 	struct nfsclclient *clp;
3141 	struct nfsmount *nmp;
3142 
3143 	NFSLOCKCLSTATE();
3144 	LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3145 		if (clp->nfsc_cbident == cbident)
3146 			break;
3147 	}
3148 	if (clp == NULL) {
3149 		NFSUNLOCKCLSTATE();
3150 		return (NULL);
3151 	}
3152 	nmp = clp->nfsc_nmp;
3153 	NFSUNLOCKCLSTATE();
3154 	return (nmp->nm_mountp);
3155 }
3156 
3157 /*
3158  * Search for a lock conflict locally on the client. A conflict occurs if
3159  * - not same owner and overlapping byte range and at least one of them is
3160  *   a write lock or this is an unlock.
3161  */
3162 static int
3163 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3164     struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3165     struct nfscllock **lopp)
3166 {
3167 	struct nfsclowner *owp;
3168 	struct nfsclopen *op;
3169 	int ret;
3170 
3171 	if (dp != NULL) {
3172 		ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3173 		if (ret)
3174 			return (ret);
3175 	}
3176 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3177 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3178 			if (op->nfso_fhlen == fhlen &&
3179 			    !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3180 				ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3181 				    own, lopp);
3182 				if (ret)
3183 					return (ret);
3184 			}
3185 		}
3186 	}
3187 	return (0);
3188 }
3189 
3190 static int
3191 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3192     u_int8_t *own, struct nfscllock **lopp)
3193 {
3194 	struct nfscllockowner *lp;
3195 	struct nfscllock *lop;
3196 
3197 	LIST_FOREACH(lp, lhp, nfsl_list) {
3198 		if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3199 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3200 				if (lop->nfslo_first >= nlop->nfslo_end)
3201 					break;
3202 				if (lop->nfslo_end <= nlop->nfslo_first)
3203 					continue;
3204 				if (lop->nfslo_type == F_WRLCK ||
3205 				    nlop->nfslo_type == F_WRLCK ||
3206 				    nlop->nfslo_type == F_UNLCK) {
3207 					if (lopp != NULL)
3208 						*lopp = lop;
3209 					return (NFSERR_DENIED);
3210 				}
3211 			}
3212 		}
3213 	}
3214 	return (0);
3215 }
3216 
3217 /*
3218  * Check for a local conflicting lock.
3219  */
3220 APPLESTATIC int
3221 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3222     u_int64_t len, struct flock *fl, NFSPROC_T *p)
3223 {
3224 	struct nfscllock *lop, nlck;
3225 	struct nfscldeleg *dp;
3226 	struct nfsnode *np;
3227 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
3228 	int error;
3229 
3230 	nlck.nfslo_type = fl->l_type;
3231 	nlck.nfslo_first = off;
3232 	if (len == NFS64BITSSET) {
3233 		nlck.nfslo_end = NFS64BITSSET;
3234 	} else {
3235 		nlck.nfslo_end = off + len;
3236 		if (nlck.nfslo_end <= nlck.nfslo_first)
3237 			return (NFSERR_INVAL);
3238 	}
3239 	np = VTONFS(vp);
3240 	nfscl_filllockowner(p, own);
3241 	NFSLOCKCLSTATE();
3242 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3243 	error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3244 	    &nlck, own, dp, &lop);
3245 	if (error != 0) {
3246 		fl->l_whence = SEEK_SET;
3247 		fl->l_start = lop->nfslo_first;
3248 		if (lop->nfslo_end == NFS64BITSSET)
3249 			fl->l_len = 0;
3250 		else
3251 			fl->l_len = lop->nfslo_end - lop->nfslo_first;
3252 		fl->l_pid = (pid_t)0;
3253 		fl->l_type = lop->nfslo_type;
3254 		error = -1;			/* no RPC required */
3255 	} else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3256 	    fl->l_type == F_RDLCK)) {
3257 		/*
3258 		 * The delegation ensures that there isn't a conflicting
3259 		 * lock on the server, so return -1 to indicate an RPC
3260 		 * isn't required.
3261 		 */
3262 		fl->l_type = F_UNLCK;
3263 		error = -1;
3264 	}
3265 	NFSUNLOCKCLSTATE();
3266 	return (error);
3267 }
3268 
3269 /*
3270  * Handle Recall of a delegation.
3271  * The clp must be exclusive locked when this is called.
3272  */
3273 static int
3274 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3275     struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p)
3276 {
3277 	struct nfsclowner *owp, *lowp, *nowp;
3278 	struct nfsclopen *op, *lop;
3279 	struct nfscllockowner *lp;
3280 	struct nfscllock *lckp;
3281 	struct nfsnode *np;
3282 	int error = 0, ret, gotvp = 0;
3283 
3284 	if (vp == NULL) {
3285 		/*
3286 		 * First, get a vnode for the file. This is needed to do RPCs.
3287 		 */
3288 		ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3289 		    dp->nfsdl_fhlen, p, &np);
3290 		if (ret) {
3291 			/*
3292 			 * File isn't open, so nothing to move over to the
3293 			 * server.
3294 			 */
3295 			return (0);
3296 		}
3297 		vp = NFSTOV(np);
3298 		gotvp = 1;
3299 	} else {
3300 		np = VTONFS(vp);
3301 	}
3302 	dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3303 	NFSINVALATTRCACHE(np);
3304 
3305 	/*
3306 	 * Ok, if it's a write delegation, flush data to the server, so
3307 	 * that close/open consistency is retained.
3308 	 */
3309 	NFSLOCKNODE(np);
3310 	if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3311 #ifdef APPLE
3312 		OSBitOrAtomic((u_int32_t)NDELEGRECALL, (UInt32 *)&np->n_flag);
3313 #else
3314 		np->n_flag |= NDELEGRECALL;
3315 #endif
3316 		NFSUNLOCKNODE(np);
3317 		(void) ncl_flush(vp, MNT_WAIT, cred, p, 1);
3318 		NFSLOCKNODE(np);
3319 #ifdef APPLE
3320 		OSBitAndAtomic((int32_t)~(NMODIFIED | NDELEGRECALL), (UInt32 *)&np->n_flag);
3321 #else
3322 		np->n_flag &= ~(NMODIFIED | NDELEGRECALL);
3323 #endif
3324 	}
3325 	NFSUNLOCKNODE(np);
3326 
3327 	/*
3328 	 * Now, for each openowner with opens issued locally, move them
3329 	 * over to state against the server.
3330 	 */
3331 	LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3332 		lop = LIST_FIRST(&lowp->nfsow_open);
3333 		if (lop != NULL) {
3334 			if (LIST_NEXT(lop, nfso_list) != NULL)
3335 				panic("nfsdlg mult opens");
3336 			/*
3337 			 * Look for the same openowner against the server.
3338 			 */
3339 			LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3340 				if (!NFSBCMP(lowp->nfsow_owner,
3341 				    owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3342 					newnfs_copycred(&dp->nfsdl_cred, cred);
3343 					ret = nfscl_moveopen(vp, clp, nmp, lop,
3344 					    owp, dp, cred, p);
3345 					if (ret == NFSERR_STALECLIENTID ||
3346 					    ret == NFSERR_STALEDONTRECOVER) {
3347 						if (gotvp)
3348 							vrele(vp);
3349 						return (ret);
3350 					}
3351 					if (ret) {
3352 						nfscl_freeopen(lop, 1);
3353 						if (!error)
3354 							error = ret;
3355 					}
3356 					break;
3357 				}
3358 			}
3359 
3360 			/*
3361 			 * If no openowner found, create one and get an open
3362 			 * for it.
3363 			 */
3364 			if (owp == NULL) {
3365 				MALLOC(nowp, struct nfsclowner *,
3366 				    sizeof (struct nfsclowner), M_NFSCLOWNER,
3367 				    M_WAITOK);
3368 				nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3369 				    NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3370 				    dp->nfsdl_fhlen, NULL);
3371 				newnfs_copycred(&dp->nfsdl_cred, cred);
3372 				ret = nfscl_moveopen(vp, clp, nmp, lop,
3373 				    owp, dp, cred, p);
3374 				if (ret) {
3375 					nfscl_freeopenowner(owp, 0);
3376 					if (ret == NFSERR_STALECLIENTID ||
3377 					    ret == NFSERR_STALEDONTRECOVER) {
3378 						if (gotvp)
3379 							vrele(vp);
3380 						return (ret);
3381 					}
3382 					if (ret) {
3383 						nfscl_freeopen(lop, 1);
3384 						if (!error)
3385 							error = ret;
3386 					}
3387 				}
3388 			}
3389 		}
3390 	}
3391 
3392 	/*
3393 	 * Now, get byte range locks for any locks done locally.
3394 	 */
3395 	LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3396 		LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
3397 			newnfs_copycred(&dp->nfsdl_cred, cred);
3398 			ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
3399 			if (ret == NFSERR_STALESTATEID ||
3400 			    ret == NFSERR_STALEDONTRECOVER ||
3401 			    ret == NFSERR_STALECLIENTID) {
3402 				if (gotvp)
3403 					vrele(vp);
3404 				return (ret);
3405 			}
3406 			if (ret && !error)
3407 				error = ret;
3408 		}
3409 	}
3410 	if (gotvp)
3411 		vrele(vp);
3412 	return (error);
3413 }
3414 
3415 /*
3416  * Move a locally issued open over to an owner on the state list.
3417  * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
3418  * returns with it unlocked.
3419  */
3420 static int
3421 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3422     struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
3423     struct ucred *cred, NFSPROC_T *p)
3424 {
3425 	struct nfsclopen *op, *nop;
3426 	struct nfscldeleg *ndp;
3427 	struct nfsnode *np;
3428 	int error = 0, newone;
3429 
3430 	/*
3431 	 * First, look for an appropriate open, If found, just increment the
3432 	 * opencnt in it.
3433 	 */
3434 	LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3435 		if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
3436 		    op->nfso_fhlen == lop->nfso_fhlen &&
3437 		    !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
3438 			op->nfso_opencnt += lop->nfso_opencnt;
3439 			nfscl_freeopen(lop, 1);
3440 			return (0);
3441 		}
3442 	}
3443 
3444 	/* No appropriate open, so we have to do one against the server. */
3445 	np = VTONFS(vp);
3446 	MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
3447 	    lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
3448 	newone = 0;
3449 	nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
3450 	    lop->nfso_fh, lop->nfso_fhlen, &newone);
3451 	ndp = dp;
3452 	error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
3453 	    lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
3454 	    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
3455 	if (error) {
3456 		if (newone)
3457 			nfscl_freeopen(op, 0);
3458 	} else {
3459 		if (newone)
3460 			newnfs_copyincred(cred, &op->nfso_cred);
3461 		op->nfso_mode |= lop->nfso_mode;
3462 		op->nfso_opencnt += lop->nfso_opencnt;
3463 		nfscl_freeopen(lop, 1);
3464 	}
3465 	if (nop != NULL)
3466 		FREE((caddr_t)nop, M_NFSCLOPEN);
3467 	if (ndp != NULL) {
3468 		/*
3469 		 * What should I do with the returned delegation, since the
3470 		 * delegation is being recalled? For now, just printf and
3471 		 * through it away.
3472 		 */
3473 		printf("Moveopen returned deleg\n");
3474 		FREE((caddr_t)ndp, M_NFSCLDELEG);
3475 	}
3476 	return (error);
3477 }
3478 
3479 /*
3480  * Recall all delegations on this client.
3481  */
3482 static void
3483 nfscl_totalrecall(struct nfsclclient *clp)
3484 {
3485 	struct nfscldeleg *dp;
3486 
3487 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
3488 		dp->nfsdl_flags |= NFSCLDL_RECALL;
3489 }
3490 
3491 /*
3492  * Relock byte ranges. Called for delegation recall and state expiry.
3493  */
3494 static int
3495 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3496     struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
3497     NFSPROC_T *p)
3498 {
3499 	struct nfscllockowner *nlp;
3500 	struct nfsfh *nfhp;
3501 	u_int64_t off, len;
3502 	u_int32_t clidrev = 0;
3503 	int error, newone, donelocally;
3504 
3505 	off = lop->nfslo_first;
3506 	len = lop->nfslo_end - lop->nfslo_first;
3507 	error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
3508 	    clp, 1, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
3509 	    &donelocally);
3510 	if (error || donelocally)
3511 		return (error);
3512 	if (nmp->nm_clp != NULL)
3513 		clidrev = nmp->nm_clp->nfsc_clientidrev;
3514 	else
3515 		clidrev = 0;
3516 	nfhp = VTONFS(vp)->n_fhp;
3517 	error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
3518 	    nfhp->nfh_len, nlp, newone, 0, off,
3519 	    len, lop->nfslo_type, cred, p);
3520 	if (error)
3521 		nfscl_freelockowner(nlp, 0);
3522 	return (error);
3523 }
3524 
3525 /*
3526  * Called to re-open a file. Basically get a vnode for the file handle
3527  * and then call nfsrpc_openrpc() to do the rest.
3528  */
3529 static int
3530 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
3531     u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
3532     struct ucred *cred, NFSPROC_T *p)
3533 {
3534 	struct nfsnode *np;
3535 	vnode_t vp;
3536 	int error;
3537 
3538 	error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
3539 	if (error)
3540 		return (error);
3541 	vp = NFSTOV(np);
3542 	if (np->n_v4 != NULL) {
3543 		error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
3544 		    np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
3545 		    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
3546 		    cred, p);
3547 	} else {
3548 		error = EINVAL;
3549 	}
3550 	vrele(vp);
3551 	return (error);
3552 }
3553 
3554 /*
3555  * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
3556  * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
3557  * fail.
3558  */
3559 static int
3560 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
3561     u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
3562     u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
3563     int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
3564 {
3565 	int error;
3566 
3567 	do {
3568 		error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
3569 		    mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
3570 		    0, 0);
3571 		if (error == NFSERR_DELAY)
3572 			(void) nfs_catnap(PZERO, "nfstryop");
3573 	} while (error == NFSERR_DELAY);
3574 	if (error == EAUTH || error == EACCES) {
3575 		/* Try again using system credentials */
3576 		newnfs_setroot(cred);
3577 		do {
3578 		    error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
3579 			newfhlen, mode, op, name, namelen, ndpp, reclaim,
3580 			delegtype, cred, p, 1, 0);
3581 		    if (error == NFSERR_DELAY)
3582 			(void) nfs_catnap(PZERO, "nfstryop");
3583 		} while (error == NFSERR_DELAY);
3584 	}
3585 	return (error);
3586 }
3587 
3588 /*
3589  * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
3590  * NFSERR_DELAY. Also, retry with system credentials, if the provided
3591  * cred don't work.
3592  */
3593 static int
3594 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
3595     int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
3596     u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
3597 {
3598 	struct nfsrv_descript nfsd, *nd = &nfsd;
3599 	int error;
3600 
3601 	do {
3602 		error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
3603 		    reclaim, off, len, type, cred, p, 0);
3604 		if (!error && nd->nd_repstat == NFSERR_DELAY)
3605 			(void) nfs_catnap(PZERO, "nfstrylck");
3606 	} while (!error && nd->nd_repstat == NFSERR_DELAY);
3607 	if (!error)
3608 		error = nd->nd_repstat;
3609 	if (error == EAUTH || error == EACCES) {
3610 		/* Try again using root credentials */
3611 		newnfs_setroot(cred);
3612 		do {
3613 			error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
3614 			    newone, reclaim, off, len, type, cred, p, 1);
3615 			if (!error && nd->nd_repstat == NFSERR_DELAY)
3616 				(void) nfs_catnap(PZERO, "nfstrylck");
3617 		} while (!error && nd->nd_repstat == NFSERR_DELAY);
3618 		if (!error)
3619 			error = nd->nd_repstat;
3620 	}
3621 	return (error);
3622 }
3623 
3624 /*
3625  * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
3626  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3627  * credentials fail.
3628  */
3629 static int
3630 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
3631     struct nfsmount *nmp, NFSPROC_T *p)
3632 {
3633 	int error;
3634 
3635 	do {
3636 		error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
3637 		if (error == NFSERR_DELAY)
3638 			(void) nfs_catnap(PZERO, "nfstrydp");
3639 	} while (error == NFSERR_DELAY);
3640 	if (error == EAUTH || error == EACCES) {
3641 		/* Try again using system credentials */
3642 		newnfs_setroot(cred);
3643 		do {
3644 			error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
3645 			if (error == NFSERR_DELAY)
3646 				(void) nfs_catnap(PZERO, "nfstrydp");
3647 		} while (error == NFSERR_DELAY);
3648 	}
3649 	return (error);
3650 }
3651 
3652 /*
3653  * Try a close against the server. Just call nfsrpc_closerpc(),
3654  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3655  * credentials fail.
3656  */
3657 APPLESTATIC int
3658 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
3659     struct nfsmount *nmp, NFSPROC_T *p)
3660 {
3661 	struct nfsrv_descript nfsd, *nd = &nfsd;
3662 	int error;
3663 
3664 	do {
3665 		error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
3666 		if (error == NFSERR_DELAY)
3667 			(void) nfs_catnap(PZERO, "nfstrycl");
3668 	} while (error == NFSERR_DELAY);
3669 	if (error == EAUTH || error == EACCES) {
3670 		/* Try again using system credentials */
3671 		newnfs_setroot(cred);
3672 		do {
3673 			error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
3674 			if (error == NFSERR_DELAY)
3675 				(void) nfs_catnap(PZERO, "nfstrycl");
3676 		} while (error == NFSERR_DELAY);
3677 	}
3678 	return (error);
3679 }
3680 
3681 /*
3682  * Decide if a delegation on a file permits close without flushing writes
3683  * to the server. This might be a big performance win in some environments.
3684  * (Not useful until the client does caching on local stable storage.)
3685  */
3686 APPLESTATIC int
3687 nfscl_mustflush(vnode_t vp)
3688 {
3689 	struct nfsclclient *clp;
3690 	struct nfscldeleg *dp;
3691 	struct nfsnode *np;
3692 	struct nfsmount *nmp;
3693 
3694 	np = VTONFS(vp);
3695 	nmp = VFSTONFS(vnode_mount(vp));
3696 	if (!NFSHASNFSV4(nmp))
3697 		return (1);
3698 	NFSLOCKCLSTATE();
3699 	clp = nfscl_findcl(nmp);
3700 	if (clp == NULL) {
3701 		NFSUNLOCKCLSTATE();
3702 		return (1);
3703 	}
3704 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3705 	if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_RECALL))
3706 	     == NFSCLDL_WRITE &&
3707 	    (dp->nfsdl_sizelimit >= np->n_size ||
3708 	     !NFSHASSTRICT3530(nmp))) {
3709 		NFSUNLOCKCLSTATE();
3710 		return (0);
3711 	}
3712 	NFSUNLOCKCLSTATE();
3713 	return (1);
3714 }
3715 
3716 /*
3717  * See if a (write) delegation exists for this file.
3718  */
3719 APPLESTATIC int
3720 nfscl_nodeleg(vnode_t vp, int writedeleg)
3721 {
3722 	struct nfsclclient *clp;
3723 	struct nfscldeleg *dp;
3724 	struct nfsnode *np;
3725 	struct nfsmount *nmp;
3726 
3727 	np = VTONFS(vp);
3728 	nmp = VFSTONFS(vnode_mount(vp));
3729 	if (!NFSHASNFSV4(nmp))
3730 		return (1);
3731 	NFSLOCKCLSTATE();
3732 	clp = nfscl_findcl(nmp);
3733 	if (clp == NULL) {
3734 		NFSUNLOCKCLSTATE();
3735 		return (1);
3736 	}
3737 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3738 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_RECALL) == 0 &&
3739 	    (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE)
3740 	     == NFSCLDL_WRITE)) {
3741 		NFSUNLOCKCLSTATE();
3742 		return (0);
3743 	}
3744 	NFSUNLOCKCLSTATE();
3745 	return (1);
3746 }
3747 
3748 /*
3749  * Look for an associated delegation that should be DelegReturned.
3750  */
3751 APPLESTATIC int
3752 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
3753 {
3754 	struct nfsclclient *clp;
3755 	struct nfscldeleg *dp;
3756 	struct nfsclowner *owp;
3757 	struct nfscllockowner *lp;
3758 	struct nfsmount *nmp;
3759 	struct ucred *cred;
3760 	struct nfsnode *np;
3761 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3762 
3763 	nmp = VFSTONFS(vnode_mount(vp));
3764 	np = VTONFS(vp);
3765 	NFSLOCKCLSTATE();
3766 	/*
3767 	 * Loop around waiting for:
3768 	 * - outstanding I/O operations on delegations to complete
3769 	 * - for a delegation on vp that has state, lock the client and
3770 	 *   do a recall
3771 	 * - return delegation with no state
3772 	 */
3773 	while (1) {
3774 		clp = nfscl_findcl(nmp);
3775 		if (clp == NULL) {
3776 			NFSUNLOCKCLSTATE();
3777 			return (retcnt);
3778 		}
3779 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3780 		    np->n_fhp->nfh_len);
3781 		if (dp != NULL) {
3782 		    /*
3783 		     * Wait for outstanding I/O ops to be done.
3784 		     */
3785 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3786 			if (igotlock) {
3787 			    nfsv4_unlock(&clp->nfsc_lock, 0);
3788 			    igotlock = 0;
3789 			}
3790 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3791 			(void) nfsmsleep(&dp->nfsdl_rwlock,
3792 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3793 			continue;
3794 		    }
3795 		    needsrecall = 0;
3796 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3797 			if (!LIST_EMPTY(&owp->nfsow_open)) {
3798 			    needsrecall = 1;
3799 			    break;
3800 			}
3801 		    }
3802 		    if (!needsrecall) {
3803 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3804 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3805 				needsrecall = 1;
3806 				break;
3807 			    }
3808 			}
3809 		    }
3810 		    if (needsrecall && !triedrecall) {
3811 			islept = 0;
3812 			while (!igotlock) {
3813 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3814 				&islept, NFSCLSTATEMUTEXPTR);
3815 			    if (islept)
3816 				break;
3817 			}
3818 			if (islept)
3819 			    continue;
3820 			NFSUNLOCKCLSTATE();
3821 			cred = newnfs_getcred();
3822 			newnfs_copycred(&dp->nfsdl_cred, cred);
3823 			(void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p);
3824 			NFSFREECRED(cred);
3825 			triedrecall = 1;
3826 			NFSLOCKCLSTATE();
3827 			nfsv4_unlock(&clp->nfsc_lock, 0);
3828 			igotlock = 0;
3829 			continue;
3830 		    }
3831 		    *stp = dp->nfsdl_stateid;
3832 		    retcnt = 1;
3833 		    nfscl_cleandeleg(dp);
3834 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
3835 		}
3836 		if (igotlock)
3837 		    nfsv4_unlock(&clp->nfsc_lock, 0);
3838 		NFSUNLOCKCLSTATE();
3839 		return (retcnt);
3840 	}
3841 }
3842 
3843 /*
3844  * Look for associated delegation(s) that should be DelegReturned.
3845  */
3846 APPLESTATIC int
3847 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
3848     nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
3849 {
3850 	struct nfsclclient *clp;
3851 	struct nfscldeleg *dp;
3852 	struct nfsclowner *owp;
3853 	struct nfscllockowner *lp;
3854 	struct nfsmount *nmp;
3855 	struct ucred *cred;
3856 	struct nfsnode *np;
3857 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3858 
3859 	nmp = VFSTONFS(vnode_mount(fvp));
3860 	*gotfdp = 0;
3861 	*gottdp = 0;
3862 	NFSLOCKCLSTATE();
3863 	/*
3864 	 * Loop around waiting for:
3865 	 * - outstanding I/O operations on delegations to complete
3866 	 * - for a delegation on fvp that has state, lock the client and
3867 	 *   do a recall
3868 	 * - return delegation(s) with no state.
3869 	 */
3870 	while (1) {
3871 		clp = nfscl_findcl(nmp);
3872 		if (clp == NULL) {
3873 			NFSUNLOCKCLSTATE();
3874 			return (retcnt);
3875 		}
3876 		np = VTONFS(fvp);
3877 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3878 		    np->n_fhp->nfh_len);
3879 		if (dp != NULL && *gotfdp == 0) {
3880 		    /*
3881 		     * Wait for outstanding I/O ops to be done.
3882 		     */
3883 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3884 			if (igotlock) {
3885 			    nfsv4_unlock(&clp->nfsc_lock, 0);
3886 			    igotlock = 0;
3887 			}
3888 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3889 			(void) nfsmsleep(&dp->nfsdl_rwlock,
3890 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3891 			continue;
3892 		    }
3893 		    needsrecall = 0;
3894 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3895 			if (!LIST_EMPTY(&owp->nfsow_open)) {
3896 			    needsrecall = 1;
3897 			    break;
3898 			}
3899 		    }
3900 		    if (!needsrecall) {
3901 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3902 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3903 				needsrecall = 1;
3904 				break;
3905 			    }
3906 			}
3907 		    }
3908 		    if (needsrecall && !triedrecall) {
3909 			islept = 0;
3910 			while (!igotlock) {
3911 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3912 				&islept, NFSCLSTATEMUTEXPTR);
3913 			    if (islept)
3914 				break;
3915 			}
3916 			if (islept)
3917 			    continue;
3918 			NFSUNLOCKCLSTATE();
3919 			cred = newnfs_getcred();
3920 			newnfs_copycred(&dp->nfsdl_cred, cred);
3921 			(void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p);
3922 			NFSFREECRED(cred);
3923 			triedrecall = 1;
3924 			NFSLOCKCLSTATE();
3925 			nfsv4_unlock(&clp->nfsc_lock, 0);
3926 			igotlock = 0;
3927 			continue;
3928 		    }
3929 		    *fstp = dp->nfsdl_stateid;
3930 		    retcnt++;
3931 		    *gotfdp = 1;
3932 		    nfscl_cleandeleg(dp);
3933 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
3934 		}
3935 		if (igotlock) {
3936 		    nfsv4_unlock(&clp->nfsc_lock, 0);
3937 		    igotlock = 0;
3938 		}
3939 		if (tvp != NULL) {
3940 		    np = VTONFS(tvp);
3941 		    dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3942 			np->n_fhp->nfh_len);
3943 		    if (dp != NULL && *gottdp == 0) {
3944 			/*
3945 			 * Wait for outstanding I/O ops to be done.
3946 			 */
3947 			if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3948 			    dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3949 			    (void) nfsmsleep(&dp->nfsdl_rwlock,
3950 				NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3951 			    continue;
3952 			}
3953 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3954 			    if (!LIST_EMPTY(&owp->nfsow_open)) {
3955 				NFSUNLOCKCLSTATE();
3956 				return (retcnt);
3957 			    }
3958 			}
3959 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3960 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3961 				NFSUNLOCKCLSTATE();
3962 				return (retcnt);
3963 			    }
3964 			}
3965 			*tstp = dp->nfsdl_stateid;
3966 			retcnt++;
3967 			*gottdp = 1;
3968 			nfscl_cleandeleg(dp);
3969 			nfscl_freedeleg(&clp->nfsc_deleg, dp);
3970 		    }
3971 		}
3972 		NFSUNLOCKCLSTATE();
3973 		return (retcnt);
3974 	}
3975 }
3976 
3977 /*
3978  * Get a reference on the clientid associated with the mount point.
3979  * Return 1 if success, 0 otherwise.
3980  */
3981 APPLESTATIC int
3982 nfscl_getref(struct nfsmount *nmp)
3983 {
3984 	struct nfsclclient *clp;
3985 
3986 	NFSLOCKCLSTATE();
3987 	clp = nfscl_findcl(nmp);
3988 	if (clp == NULL) {
3989 		NFSUNLOCKCLSTATE();
3990 		return (0);
3991 	}
3992 	nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR);
3993 	NFSUNLOCKCLSTATE();
3994 	return (1);
3995 }
3996 
3997 /*
3998  * Release a reference on a clientid acquired with the above call.
3999  */
4000 APPLESTATIC void
4001 nfscl_relref(struct nfsmount *nmp)
4002 {
4003 	struct nfsclclient *clp;
4004 
4005 	NFSLOCKCLSTATE();
4006 	clp = nfscl_findcl(nmp);
4007 	if (clp == NULL) {
4008 		NFSUNLOCKCLSTATE();
4009 		return;
4010 	}
4011 	nfsv4_relref(&clp->nfsc_lock);
4012 	NFSUNLOCKCLSTATE();
4013 }
4014 
4015 /*
4016  * Save the size attribute in the delegation, since the nfsnode
4017  * is going away.
4018  */
4019 APPLESTATIC void
4020 nfscl_reclaimnode(vnode_t vp)
4021 {
4022 	struct nfsclclient *clp;
4023 	struct nfscldeleg *dp;
4024 	struct nfsnode *np = VTONFS(vp);
4025 	struct nfsmount *nmp;
4026 
4027 	nmp = VFSTONFS(vnode_mount(vp));
4028 	if (!NFSHASNFSV4(nmp))
4029 		return;
4030 	NFSLOCKCLSTATE();
4031 	clp = nfscl_findcl(nmp);
4032 	if (clp == NULL) {
4033 		NFSUNLOCKCLSTATE();
4034 		return;
4035 	}
4036 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4037 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4038 		dp->nfsdl_size = np->n_size;
4039 	NFSUNLOCKCLSTATE();
4040 }
4041 
4042 /*
4043  * Get the saved size attribute in the delegation, since it is a
4044  * newly allocated nfsnode.
4045  */
4046 APPLESTATIC void
4047 nfscl_newnode(vnode_t vp)
4048 {
4049 	struct nfsclclient *clp;
4050 	struct nfscldeleg *dp;
4051 	struct nfsnode *np = VTONFS(vp);
4052 	struct nfsmount *nmp;
4053 
4054 	nmp = VFSTONFS(vnode_mount(vp));
4055 	if (!NFSHASNFSV4(nmp))
4056 		return;
4057 	NFSLOCKCLSTATE();
4058 	clp = nfscl_findcl(nmp);
4059 	if (clp == NULL) {
4060 		NFSUNLOCKCLSTATE();
4061 		return;
4062 	}
4063 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4064 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4065 		np->n_size = dp->nfsdl_size;
4066 	NFSUNLOCKCLSTATE();
4067 }
4068 
4069 /*
4070  * If there is a valid write delegation for this file, set the modtime
4071  * to the local clock time.
4072  */
4073 APPLESTATIC void
4074 nfscl_delegmodtime(vnode_t vp)
4075 {
4076 	struct nfsclclient *clp;
4077 	struct nfscldeleg *dp;
4078 	struct nfsnode *np = VTONFS(vp);
4079 	struct nfsmount *nmp;
4080 
4081 	nmp = VFSTONFS(vnode_mount(vp));
4082 	if (!NFSHASNFSV4(nmp))
4083 		return;
4084 	NFSLOCKCLSTATE();
4085 	clp = nfscl_findcl(nmp);
4086 	if (clp == NULL) {
4087 		NFSUNLOCKCLSTATE();
4088 		return;
4089 	}
4090 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4091 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4092 		NFSGETNANOTIME(&dp->nfsdl_modtime);
4093 		dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4094 	}
4095 	NFSUNLOCKCLSTATE();
4096 }
4097 
4098 /*
4099  * If there is a valid write delegation for this file with a modtime set,
4100  * put that modtime in mtime.
4101  */
4102 APPLESTATIC void
4103 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4104 {
4105 	struct nfsclclient *clp;
4106 	struct nfscldeleg *dp;
4107 	struct nfsnode *np = VTONFS(vp);
4108 	struct nfsmount *nmp;
4109 
4110 	nmp = VFSTONFS(vnode_mount(vp));
4111 	if (!NFSHASNFSV4(nmp))
4112 		return;
4113 	NFSLOCKCLSTATE();
4114 	clp = nfscl_findcl(nmp);
4115 	if (clp == NULL) {
4116 		NFSUNLOCKCLSTATE();
4117 		return;
4118 	}
4119 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4120 	if (dp != NULL &&
4121 	    (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4122 	    (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4123 		*mtime = dp->nfsdl_modtime;
4124 	NFSUNLOCKCLSTATE();
4125 }
4126 
4127 static int
4128 nfscl_errmap(struct nfsrv_descript *nd)
4129 {
4130 	short *defaulterrp, *errp;
4131 
4132 	if (!nd->nd_repstat)
4133 		return (0);
4134 	if (nd->nd_procnum == NFSPROC_NOOP)
4135 		return (txdr_unsigned(nd->nd_repstat & 0xffff));
4136 	if (nd->nd_repstat == EBADRPC)
4137 		return (txdr_unsigned(NFSERR_BADXDR));
4138 	if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4139 	    nd->nd_repstat == NFSERR_OPILLEGAL)
4140 		return (txdr_unsigned(nd->nd_repstat));
4141 	errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4142 	while (*++errp)
4143 		if (*errp == (short)nd->nd_repstat)
4144 			return (txdr_unsigned(nd->nd_repstat));
4145 	return (txdr_unsigned(*defaulterrp));
4146 }
4147 
4148