xref: /freebsd/sys/fs/nfsclient/nfs_clstate.c (revision 10b59a9b4add0320d52c15ce057dd697261e7dfc)
1 /*-
2  * Copyright (c) 2009 Rick Macklem, University of Guelph
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * These functions implement the client side state handling for NFSv4.
33  * NFSv4 state handling:
34  * - A lockowner is used to determine lock contention, so it
35  *   corresponds directly to a Posix pid. (1 to 1 mapping)
36  * - The correct granularity of an OpenOwner is not nearly so
37  *   obvious. An OpenOwner does the following:
38  *   - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39  *   - is used to check for Open/Share contention (not applicable to
40  *     this client, since all Opens are Deny_None)
41  *   As such, I considered both extreme.
42  *   1 OpenOwner per ClientID - Simple to manage, but fully serializes
43  *   all Open, Close and Lock (with a new lockowner) Ops.
44  *   1 OpenOwner for each Open - This one results in an OpenConfirm for
45  *   every Open, for most servers.
46  *   So, I chose to use the same mapping as I did for LockOwnwers.
47  *   The main concern here is that you can end up with multiple Opens
48  *   for the same File Handle, but on different OpenOwners (opens
49  *   inherited from parents, grandparents...) and you do not know
50  *   which of these the vnodeop close applies to. This is handled by
51  *   delaying the Close Op(s) until all of the Opens have been closed.
52  *   (It is not yet obvious if this is the correct granularity.)
53  * - How the code handles serialization:
54  *   - For the ClientId, it uses an exclusive lock while getting its
55  *     SetClientId and during recovery. Otherwise, it uses a shared
56  *     lock via a reference count.
57  *   - For the rest of the data structures, it uses an SMP mutex
58  *     (once the nfs client is SMP safe) and doesn't sleep while
59  *     manipulating the linked lists.
60  *   - The serialization of Open/Close/Lock/LockU falls out in the
61  *     "wash", since OpenOwners and LockOwners are both mapped from
62  *     Posix pid. In other words, there is only one Posix pid using
63  *     any given owner, so that owner is serialized. (If you change
64  *     the granularity of the OpenOwner, then code must be added to
65  *     serialize Ops on the OpenOwner.)
66  * - When to get rid of OpenOwners and LockOwners.
67  *   - When a process exits, it calls nfscl_cleanup(), which goes
68  *     through the client list looking for all Open and Lock Owners.
69  *     When one is found, it is marked "defunct" or in the case of
70  *     an OpenOwner without any Opens, freed.
71  *     The renew thread scans for defunct Owners and gets rid of them,
72  *     if it can. The LockOwners will also be deleted when the
73  *     associated Open is closed.
74  *   - If the LockU or Close Op(s) fail during close in a way
75  *     that could be recovered upon retry, they are relinked to the
76  *     ClientId's defunct open list and retried by the renew thread
77  *     until they succeed or an unmount/recovery occurs.
78  *     (Since we are done with them, they do not need to be recovered.)
79  */
80 
81 #ifndef APPLEKEXT
82 #include <fs/nfs/nfsport.h>
83 
84 /*
85  * Global variables
86  */
87 extern struct nfsstats newnfsstats;
88 extern struct nfsreqhead nfsd_reqq;
89 NFSREQSPINLOCK;
90 NFSCLSTATEMUTEX;
91 int nfscl_inited = 0;
92 struct nfsclhead nfsclhead;	/* Head of clientid list */
93 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
94 #endif	/* !APPLEKEXT */
95 
96 static int nfscl_delegcnt = 0;
97 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
98     u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
99 static void nfscl_clrelease(struct nfsclclient *);
100 static void nfscl_cleanclient(struct nfsclclient *);
101 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
102     struct ucred *, NFSPROC_T *);
103 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
104     struct nfsmount *, struct ucred *, NFSPROC_T *);
105 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
106 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
107     struct nfscllock *, int);
108 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
109     struct nfscllock **, int);
110 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
111 static u_int32_t nfscl_nextcbident(void);
112 static mount_t nfscl_getmnt(u_int32_t);
113 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
114     int);
115 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
116     u_int8_t *, struct nfscllock **);
117 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
118 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
119     struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
120 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
121     struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
122     struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *);
123 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
124     struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
125     struct nfscldeleg *, struct ucred *, NFSPROC_T *);
126 static void nfscl_totalrecall(struct nfsclclient *);
127 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
128     struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
129 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
130     u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
131     struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
132 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
133     int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
134     struct ucred *, NFSPROC_T *);
135 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
136     struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
137 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
138 static int nfscl_errmap(struct nfsrv_descript *);
139 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
140 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
141     struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
142 static void nfscl_freeopenowner(struct nfsclowner *, int);
143 static void nfscl_cleandeleg(struct nfscldeleg *);
144 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
145     struct nfsmount *, NFSPROC_T *);
146 
147 static short nfscberr_null[] = {
148 	0,
149 	0,
150 };
151 
152 static short nfscberr_getattr[] = {
153 	NFSERR_RESOURCE,
154 	NFSERR_BADHANDLE,
155 	NFSERR_BADXDR,
156 	NFSERR_RESOURCE,
157 	NFSERR_SERVERFAULT,
158 	0,
159 };
160 
161 static short nfscberr_recall[] = {
162 	NFSERR_RESOURCE,
163 	NFSERR_BADHANDLE,
164 	NFSERR_BADSTATEID,
165 	NFSERR_BADXDR,
166 	NFSERR_RESOURCE,
167 	NFSERR_SERVERFAULT,
168 	0,
169 };
170 
171 static short *nfscl_cberrmap[] = {
172 	nfscberr_null,
173 	nfscberr_null,
174 	nfscberr_null,
175 	nfscberr_getattr,
176 	nfscberr_recall
177 };
178 
179 #define	NETFAMILY(clp) \
180 		(((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
181 
182 /*
183  * Called for an open operation.
184  * If the nfhp argument is NULL, just get an openowner.
185  */
186 APPLESTATIC int
187 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
188     struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
189     struct nfsclopen **opp, int *newonep, int *retp, int lockit)
190 {
191 	struct nfsclclient *clp;
192 	struct nfsclowner *owp, *nowp;
193 	struct nfsclopen *op = NULL, *nop = NULL;
194 	struct nfscldeleg *dp;
195 	struct nfsclownerhead *ohp;
196 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
197 	int ret;
198 
199 	if (newonep != NULL)
200 		*newonep = 0;
201 	if (opp != NULL)
202 		*opp = NULL;
203 	if (owpp != NULL)
204 		*owpp = NULL;
205 
206 	/*
207 	 * Might need one or both of these, so MALLOC them now, to
208 	 * avoid a tsleep() in MALLOC later.
209 	 */
210 	MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
211 	    M_NFSCLOWNER, M_WAITOK);
212 	if (nfhp != NULL)
213 	    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
214 		fhlen - 1, M_NFSCLOPEN, M_WAITOK);
215 	ret = nfscl_getcl(vp, cred, p, &clp);
216 	if (ret != 0) {
217 		FREE((caddr_t)nowp, M_NFSCLOWNER);
218 		if (nop != NULL)
219 			FREE((caddr_t)nop, M_NFSCLOPEN);
220 		return (ret);
221 	}
222 
223 	/*
224 	 * Get the Open iff it already exists.
225 	 * If none found, add the new one or return error, depending upon
226 	 * "create".
227 	 */
228 	nfscl_filllockowner(p->td_proc, own, F_POSIX);
229 	NFSLOCKCLSTATE();
230 	dp = NULL;
231 	/* First check the delegation list */
232 	if (nfhp != NULL && usedeleg) {
233 		LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
234 			if (dp->nfsdl_fhlen == fhlen &&
235 			    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
236 				if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
237 				    (dp->nfsdl_flags & NFSCLDL_WRITE))
238 					break;
239 				dp = NULL;
240 				break;
241 			}
242 		}
243 	}
244 
245 	if (dp != NULL)
246 		ohp = &dp->nfsdl_owner;
247 	else
248 		ohp = &clp->nfsc_owner;
249 	/* Now, search for an openowner */
250 	LIST_FOREACH(owp, ohp, nfsow_list) {
251 		if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
252 			break;
253 	}
254 
255 	/*
256 	 * Create a new open, as required.
257 	 */
258 	nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
259 	    newonep);
260 
261 	/*
262 	 * Serialize modifications to the open owner for multiple threads
263 	 * within the same process using a read/write sleep lock.
264 	 */
265 	if (lockit)
266 		nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
267 	NFSUNLOCKCLSTATE();
268 	if (nowp != NULL)
269 		FREE((caddr_t)nowp, M_NFSCLOWNER);
270 	if (nop != NULL)
271 		FREE((caddr_t)nop, M_NFSCLOPEN);
272 	if (owpp != NULL)
273 		*owpp = owp;
274 	if (opp != NULL)
275 		*opp = op;
276 	if (retp != NULL) {
277 		if (nfhp != NULL && dp != NULL && nop == NULL)
278 			/* new local open on delegation */
279 			*retp = NFSCLOPEN_SETCRED;
280 		else
281 			*retp = NFSCLOPEN_OK;
282 	}
283 
284 	/*
285 	 * Now, check the mode on the open and return the appropriate
286 	 * value.
287 	 */
288 	if (op != NULL && (amode & ~(op->nfso_mode))) {
289 		op->nfso_mode |= amode;
290 		if (retp != NULL && dp == NULL)
291 			*retp = NFSCLOPEN_DOOPEN;
292 	}
293 	return (0);
294 }
295 
296 /*
297  * Create a new open, as required.
298  */
299 static void
300 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
301     struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
302     struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
303     int *newonep)
304 {
305 	struct nfsclowner *owp = *owpp, *nowp;
306 	struct nfsclopen *op, *nop;
307 
308 	if (nowpp != NULL)
309 		nowp = *nowpp;
310 	else
311 		nowp = NULL;
312 	if (nopp != NULL)
313 		nop = *nopp;
314 	else
315 		nop = NULL;
316 	if (owp == NULL && nowp != NULL) {
317 		NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
318 		LIST_INIT(&nowp->nfsow_open);
319 		nowp->nfsow_clp = clp;
320 		nowp->nfsow_seqid = 0;
321 		nowp->nfsow_defunct = 0;
322 		nfscl_lockinit(&nowp->nfsow_rwlock);
323 		if (dp != NULL) {
324 			newnfsstats.cllocalopenowners++;
325 			LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
326 		} else {
327 			newnfsstats.clopenowners++;
328 			LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
329 		}
330 		owp = *owpp = nowp;
331 		*nowpp = NULL;
332 		if (newonep != NULL)
333 			*newonep = 1;
334 	}
335 
336 	 /* If an fhp has been specified, create an Open as well. */
337 	if (fhp != NULL) {
338 		/* and look for the correct open, based upon FH */
339 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
340 			if (op->nfso_fhlen == fhlen &&
341 			    !NFSBCMP(op->nfso_fh, fhp, fhlen))
342 				break;
343 		}
344 		if (op == NULL && nop != NULL) {
345 			nop->nfso_own = owp;
346 			nop->nfso_mode = 0;
347 			nop->nfso_opencnt = 0;
348 			nop->nfso_posixlock = 1;
349 			nop->nfso_fhlen = fhlen;
350 			NFSBCOPY(fhp, nop->nfso_fh, fhlen);
351 			LIST_INIT(&nop->nfso_lock);
352 			nop->nfso_stateid.seqid = 0;
353 			nop->nfso_stateid.other[0] = 0;
354 			nop->nfso_stateid.other[1] = 0;
355 			nop->nfso_stateid.other[2] = 0;
356 			if (dp != NULL) {
357 				TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
358 				TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
359 				    nfsdl_list);
360 				dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
361 				newnfsstats.cllocalopens++;
362 			} else {
363 				newnfsstats.clopens++;
364 			}
365 			LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
366 			*opp = nop;
367 			*nopp = NULL;
368 			if (newonep != NULL)
369 				*newonep = 1;
370 		} else {
371 			*opp = op;
372 		}
373 	}
374 }
375 
376 /*
377  * Called to find/add a delegation to a client.
378  */
379 APPLESTATIC int
380 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
381     int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
382 {
383 	struct nfscldeleg *dp = *dpp, *tdp;
384 
385 	/*
386 	 * First, if we have received a Read delegation for a file on a
387 	 * read/write file system, just return it, because they aren't
388 	 * useful, imho.
389 	 */
390 	if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
391 	    (dp->nfsdl_flags & NFSCLDL_READ)) {
392 		(void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
393 		FREE((caddr_t)dp, M_NFSCLDELEG);
394 		*dpp = NULL;
395 		return (0);
396 	}
397 
398 	/* Look for the correct deleg, based upon FH */
399 	NFSLOCKCLSTATE();
400 	tdp = nfscl_finddeleg(clp, nfhp, fhlen);
401 	if (tdp == NULL) {
402 		if (dp == NULL) {
403 			NFSUNLOCKCLSTATE();
404 			return (NFSERR_BADSTATEID);
405 		}
406 		*dpp = NULL;
407 		TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
408 		LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
409 		    nfsdl_hash);
410 		dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
411 		newnfsstats.cldelegates++;
412 		nfscl_delegcnt++;
413 	} else {
414 		/*
415 		 * Delegation already exists, what do we do if a new one??
416 		 */
417 		if (dp != NULL) {
418 			printf("Deleg already exists!\n");
419 			FREE((caddr_t)dp, M_NFSCLDELEG);
420 			*dpp = NULL;
421 		} else {
422 			*dpp = tdp;
423 		}
424 	}
425 	NFSUNLOCKCLSTATE();
426 	return (0);
427 }
428 
429 /*
430  * Find a delegation for this file handle. Return NULL upon failure.
431  */
432 static struct nfscldeleg *
433 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
434 {
435 	struct nfscldeleg *dp;
436 
437 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
438 	    if (dp->nfsdl_fhlen == fhlen &&
439 		!NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
440 		break;
441 	}
442 	return (dp);
443 }
444 
445 /*
446  * Get a stateid for an I/O operation. First, look for an open and iff
447  * found, return either a lockowner stateid or the open stateid.
448  * If no Open is found, just return error and the special stateid of all zeros.
449  */
450 APPLESTATIC int
451 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
452     struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
453     void **lckpp)
454 {
455 	struct nfsclclient *clp;
456 	struct nfsclowner *owp;
457 	struct nfsclopen *op = NULL;
458 	struct nfscllockowner *lp;
459 	struct nfscldeleg *dp;
460 	struct nfsnode *np;
461 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
462 	int error, done;
463 
464 	*lckpp = NULL;
465 	/*
466 	 * Initially, just set the special stateid of all zeros.
467 	 */
468 	stateidp->seqid = 0;
469 	stateidp->other[0] = 0;
470 	stateidp->other[1] = 0;
471 	stateidp->other[2] = 0;
472 	if (vnode_vtype(vp) != VREG)
473 		return (EISDIR);
474 	np = VTONFS(vp);
475 	NFSLOCKCLSTATE();
476 	clp = nfscl_findcl(VFSTONFS(vnode_mount(vp)));
477 	if (clp == NULL) {
478 		NFSUNLOCKCLSTATE();
479 		return (EACCES);
480 	}
481 
482 	/*
483 	 * Wait for recovery to complete.
484 	 */
485 	while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
486 		(void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
487 		    PZERO, "nfsrecvr", NULL);
488 
489 	/*
490 	 * First, look for a delegation.
491 	 */
492 	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
493 		if (dp->nfsdl_fhlen == fhlen &&
494 		    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
495 			if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
496 			    (dp->nfsdl_flags & NFSCLDL_WRITE)) {
497 				stateidp->seqid = dp->nfsdl_stateid.seqid;
498 				stateidp->other[0] = dp->nfsdl_stateid.other[0];
499 				stateidp->other[1] = dp->nfsdl_stateid.other[1];
500 				stateidp->other[2] = dp->nfsdl_stateid.other[2];
501 				if (!(np->n_flag & NDELEGRECALL)) {
502 					TAILQ_REMOVE(&clp->nfsc_deleg, dp,
503 					    nfsdl_list);
504 					TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
505 					    nfsdl_list);
506 					dp->nfsdl_timestamp = NFSD_MONOSEC +
507 					    120;
508 					dp->nfsdl_rwlock.nfslock_usecnt++;
509 					*lckpp = (void *)&dp->nfsdl_rwlock;
510 				}
511 				NFSUNLOCKCLSTATE();
512 				return (0);
513 			}
514 			break;
515 		}
516 	}
517 
518 	if (p != NULL) {
519 		/*
520 		 * If p != NULL, we want to search the parentage tree
521 		 * for a matching OpenOwner and use that.
522 		 */
523 		nfscl_filllockowner(p->td_proc, own, F_POSIX);
524 		lp = NULL;
525 		error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
526 		    mode, &lp, &op);
527 		if (error == 0 && lp != NULL) {
528 			stateidp->seqid =
529 			    lp->nfsl_stateid.seqid;
530 			stateidp->other[0] =
531 			    lp->nfsl_stateid.other[0];
532 			stateidp->other[1] =
533 			    lp->nfsl_stateid.other[1];
534 			stateidp->other[2] =
535 			    lp->nfsl_stateid.other[2];
536 			NFSUNLOCKCLSTATE();
537 			return (0);
538 		}
539 	}
540 	if (op == NULL) {
541 		/* If not found, just look for any OpenOwner that will work. */
542 		done = 0;
543 		owp = LIST_FIRST(&clp->nfsc_owner);
544 		while (!done && owp != NULL) {
545 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
546 				if (op->nfso_fhlen == fhlen &&
547 				    !NFSBCMP(op->nfso_fh, nfhp, fhlen) &&
548 				    (mode & op->nfso_mode) == mode) {
549 					done = 1;
550 					break;
551 				}
552 			}
553 			if (!done)
554 				owp = LIST_NEXT(owp, nfsow_list);
555 		}
556 		if (!done) {
557 			NFSUNLOCKCLSTATE();
558 			return (ENOENT);
559 		}
560 		/* for read aheads or write behinds, use the open cred */
561 		newnfs_copycred(&op->nfso_cred, cred);
562 	}
563 
564 	/*
565 	 * No lock stateid, so return the open stateid.
566 	 */
567 	stateidp->seqid = op->nfso_stateid.seqid;
568 	stateidp->other[0] = op->nfso_stateid.other[0];
569 	stateidp->other[1] = op->nfso_stateid.other[1];
570 	stateidp->other[2] = op->nfso_stateid.other[2];
571 	NFSUNLOCKCLSTATE();
572 	return (0);
573 }
574 
575 /*
576  * Search for a matching file, mode and, optionally, lockowner.
577  */
578 static int
579 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
580     u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
581     struct nfscllockowner **lpp, struct nfsclopen **opp)
582 {
583 	struct nfsclowner *owp;
584 	struct nfsclopen *op, *rop, *rop2;
585 	struct nfscllockowner *lp;
586 	int keep_looping;
587 
588 	if (lpp != NULL)
589 		*lpp = NULL;
590 	/*
591 	 * rop will be set to the open to be returned. There are three
592 	 * variants of this, all for an open of the correct file:
593 	 * 1 - A match of lockown.
594 	 * 2 - A match of the openown, when no lockown match exists.
595 	 * 3 - A match for any open, if no openown or lockown match exists.
596 	 * Looking for #2 over #3 probably isn't necessary, but since
597 	 * RFC3530 is vague w.r.t. the relationship between openowners and
598 	 * lockowners, I think this is the safer way to go.
599 	 */
600 	rop = NULL;
601 	rop2 = NULL;
602 	keep_looping = 1;
603 	/* Search the client list */
604 	owp = LIST_FIRST(ohp);
605 	while (owp != NULL && keep_looping != 0) {
606 		/* and look for the correct open */
607 		op = LIST_FIRST(&owp->nfsow_open);
608 		while (op != NULL && keep_looping != 0) {
609 			if (op->nfso_fhlen == fhlen &&
610 			    !NFSBCMP(op->nfso_fh, nfhp, fhlen)
611 			    && (op->nfso_mode & mode) == mode) {
612 				if (lpp != NULL) {
613 					/* Now look for a matching lockowner. */
614 					LIST_FOREACH(lp, &op->nfso_lock,
615 					    nfsl_list) {
616 						if (!NFSBCMP(lp->nfsl_owner,
617 						    lockown,
618 						    NFSV4CL_LOCKNAMELEN)) {
619 							*lpp = lp;
620 							rop = op;
621 							keep_looping = 0;
622 							break;
623 						}
624 					}
625 				}
626 				if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
627 				    openown, NFSV4CL_LOCKNAMELEN)) {
628 					rop = op;
629 					if (lpp == NULL)
630 						keep_looping = 0;
631 				}
632 				if (rop2 == NULL)
633 					rop2 = op;
634 			}
635 			op = LIST_NEXT(op, nfso_list);
636 		}
637 		owp = LIST_NEXT(owp, nfsow_list);
638 	}
639 	if (rop == NULL)
640 		rop = rop2;
641 	if (rop == NULL)
642 		return (EBADF);
643 	*opp = rop;
644 	return (0);
645 }
646 
647 /*
648  * Release use of an open owner. Called when open operations are done
649  * with the open owner.
650  */
651 APPLESTATIC void
652 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error,
653     __unused int candelete, int unlocked)
654 {
655 
656 	if (owp == NULL)
657 		return;
658 	NFSLOCKCLSTATE();
659 	if (!unlocked)
660 		nfscl_lockunlock(&owp->nfsow_rwlock);
661 	nfscl_clrelease(owp->nfsow_clp);
662 	NFSUNLOCKCLSTATE();
663 }
664 
665 /*
666  * Release use of an open structure under an open owner.
667  */
668 APPLESTATIC void
669 nfscl_openrelease(struct nfsclopen *op, int error, int candelete)
670 {
671 	struct nfsclclient *clp;
672 	struct nfsclowner *owp;
673 
674 	if (op == NULL)
675 		return;
676 	NFSLOCKCLSTATE();
677 	owp = op->nfso_own;
678 	nfscl_lockunlock(&owp->nfsow_rwlock);
679 	clp = owp->nfsow_clp;
680 	if (error && candelete && op->nfso_opencnt == 0)
681 		nfscl_freeopen(op, 0);
682 	nfscl_clrelease(clp);
683 	NFSUNLOCKCLSTATE();
684 }
685 
686 /*
687  * Called to get a clientid structure. It will optionally lock the
688  * client data structures to do the SetClientId/SetClientId_confirm,
689  * but will release that lock and return the clientid with a refernce
690  * count on it.
691  * If the "cred" argument is NULL, a new clientid should not be created.
692  * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
693  * be done.
694  * It always clpp with a reference count on it, unless returning an error.
695  */
696 APPLESTATIC int
697 nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
698     struct nfsclclient **clpp)
699 {
700 	struct nfsclclient *clp;
701 	struct nfsclclient *newclp = NULL;
702 	struct nfscllockowner *lp, *nlp;
703 	struct mount *mp;
704 	struct nfsmount *nmp;
705 	char uuid[HOSTUUIDLEN];
706 	int igotlock = 0, error, trystalecnt, clidinusedelay, i;
707 	u_int16_t idlen = 0;
708 
709 	mp = vnode_mount(vp);
710 	nmp = VFSTONFS(mp);
711 	if (cred != NULL) {
712 		getcredhostuuid(cred, uuid, sizeof uuid);
713 		idlen = strlen(uuid);
714 		if (idlen > 0)
715 			idlen += sizeof (u_int64_t);
716 		else
717 			idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
718 		MALLOC(newclp, struct nfsclclient *,
719 		    sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
720 		    M_WAITOK);
721 	}
722 	NFSLOCKCLSTATE();
723 	/*
724 	 * If a forced dismount is already in progress, don't
725 	 * allocate a new clientid and get out now. For the case where
726 	 * clp != NULL, this is a harmless optimization.
727 	 */
728 	if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
729 		NFSUNLOCKCLSTATE();
730 		if (newclp != NULL)
731 			free(newclp, M_NFSCLCLIENT);
732 		return (EBADF);
733 	}
734 	clp = nmp->nm_clp;
735 	if (clp == NULL) {
736 		if (newclp == NULL) {
737 			NFSUNLOCKCLSTATE();
738 			return (EACCES);
739 		}
740 		clp = newclp;
741 		NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1);
742 		clp->nfsc_idlen = idlen;
743 		LIST_INIT(&clp->nfsc_owner);
744 		TAILQ_INIT(&clp->nfsc_deleg);
745 		for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
746 			LIST_INIT(&clp->nfsc_deleghash[i]);
747 		LIST_INIT(&clp->nfsc_defunctlockowner);
748 		clp->nfsc_flags = NFSCLFLAGS_INITED;
749 		clp->nfsc_clientidrev = 1;
750 		clp->nfsc_cbident = nfscl_nextcbident();
751 		nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
752 		    clp->nfsc_idlen);
753 		LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
754 		nmp->nm_clp = clp;
755 		clp->nfsc_nmp = nmp;
756 		NFSUNLOCKCLSTATE();
757 		nfscl_start_renewthread(clp);
758 	} else {
759 		NFSUNLOCKCLSTATE();
760 		if (newclp != NULL)
761 			FREE((caddr_t)newclp, M_NFSCLCLIENT);
762 	}
763 	NFSLOCKCLSTATE();
764 	while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
765 	    (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0)
766 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
767 		    NFSCLSTATEMUTEXPTR, mp);
768 	if (!igotlock)
769 		nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
770 	if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
771 		/*
772 		 * Both nfsv4_lock() and nfsv4_getref() know to check
773 		 * for MNTK_UNMOUNTF and return without sleeping to
774 		 * wait for the exclusive lock to be released, since it
775 		 * might be held by nfscl_umount() and we need to get out
776 		 * now for that case and not wait until nfscl_umount()
777 		 * releases it.
778 		 */
779 		NFSUNLOCKCLSTATE();
780 		return (EBADF);
781 	}
782 	NFSUNLOCKCLSTATE();
783 
784 	/*
785 	 * If it needs a clientid, do the setclientid now.
786 	 */
787 	if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
788 		if (!igotlock)
789 			panic("nfscl_clget");
790 		if (p == NULL || cred == NULL) {
791 			NFSLOCKCLSTATE();
792 			nfsv4_unlock(&clp->nfsc_lock, 0);
793 			NFSUNLOCKCLSTATE();
794 			return (EACCES);
795 		}
796 		/* get rid of defunct lockowners */
797 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
798 		    nlp) {
799 			nfscl_freelockowner(lp, 0);
800 		}
801 		/*
802 		 * If RFC3530 Sec. 14.2.33 is taken literally,
803 		 * NFSERR_CLIDINUSE will be returned persistently for the
804 		 * case where a new mount of the same file system is using
805 		 * a different principal. In practice, NFSERR_CLIDINUSE is
806 		 * only returned when there is outstanding unexpired state
807 		 * on the clientid. As such, try for twice the lease
808 		 * interval, if we know what that is. Otherwise, make a
809 		 * wild ass guess.
810 		 * The case of returning NFSERR_STALECLIENTID is far less
811 		 * likely, but might occur if there is a significant delay
812 		 * between doing the SetClientID and SetClientIDConfirm Ops,
813 		 * such that the server throws away the clientid before
814 		 * receiving the SetClientIDConfirm.
815 		 */
816 		if (clp->nfsc_renew > 0)
817 			clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
818 		else
819 			clidinusedelay = 120;
820 		trystalecnt = 3;
821 		do {
822 			error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)),
823 			    clp, cred, p);
824 			if (error == NFSERR_STALECLIENTID ||
825 			    error == NFSERR_STALEDONTRECOVER ||
826 			    error == NFSERR_CLIDINUSE) {
827 				(void) nfs_catnap(PZERO, error, "nfs_setcl");
828 			}
829 		} while (((error == NFSERR_STALECLIENTID ||
830 		     error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
831 		    (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
832 		if (error) {
833 			NFSLOCKCLSTATE();
834 			nfsv4_unlock(&clp->nfsc_lock, 0);
835 			NFSUNLOCKCLSTATE();
836 			return (error);
837 		}
838 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
839 	}
840 	if (igotlock) {
841 		NFSLOCKCLSTATE();
842 		nfsv4_unlock(&clp->nfsc_lock, 1);
843 		NFSUNLOCKCLSTATE();
844 	}
845 
846 	*clpp = clp;
847 	return (0);
848 }
849 
850 /*
851  * Get a reference to a clientid and return it, if valid.
852  */
853 APPLESTATIC struct nfsclclient *
854 nfscl_findcl(struct nfsmount *nmp)
855 {
856 	struct nfsclclient *clp;
857 
858 	clp = nmp->nm_clp;
859 	if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
860 		return (NULL);
861 	return (clp);
862 }
863 
864 /*
865  * Release the clientid structure. It may be locked or reference counted.
866  */
867 static void
868 nfscl_clrelease(struct nfsclclient *clp)
869 {
870 
871 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
872 		nfsv4_unlock(&clp->nfsc_lock, 0);
873 	else
874 		nfsv4_relref(&clp->nfsc_lock);
875 }
876 
877 /*
878  * External call for nfscl_clrelease.
879  */
880 APPLESTATIC void
881 nfscl_clientrelease(struct nfsclclient *clp)
882 {
883 
884 	NFSLOCKCLSTATE();
885 	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
886 		nfsv4_unlock(&clp->nfsc_lock, 0);
887 	else
888 		nfsv4_relref(&clp->nfsc_lock);
889 	NFSUNLOCKCLSTATE();
890 }
891 
892 /*
893  * Called when wanting to lock a byte region.
894  */
895 APPLESTATIC int
896 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
897     short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
898     int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
899     struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
900 {
901 	struct nfscllockowner *lp;
902 	struct nfsclopen *op;
903 	struct nfsclclient *clp;
904 	struct nfscllockowner *nlp;
905 	struct nfscllock *nlop, *otherlop;
906 	struct nfscldeleg *dp = NULL, *ldp = NULL;
907 	struct nfscllockownerhead *lhp = NULL;
908 	struct nfsnode *np;
909 	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
910 	u_int8_t *openownp;
911 	int error = 0, ret, donelocally = 0;
912 	u_int32_t mode;
913 
914 	/* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
915 	mode = 0;
916 	np = VTONFS(vp);
917 	*lpp = NULL;
918 	lp = NULL;
919 	*newonep = 0;
920 	*donelocallyp = 0;
921 
922 	/*
923 	 * Might need these, so MALLOC them now, to
924 	 * avoid a tsleep() in MALLOC later.
925 	 */
926 	MALLOC(nlp, struct nfscllockowner *,
927 	    sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
928 	MALLOC(otherlop, struct nfscllock *,
929 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
930 	MALLOC(nlop, struct nfscllock *,
931 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
932 	nlop->nfslo_type = type;
933 	nlop->nfslo_first = off;
934 	if (len == NFS64BITSSET) {
935 		nlop->nfslo_end = NFS64BITSSET;
936 	} else {
937 		nlop->nfslo_end = off + len;
938 		if (nlop->nfslo_end <= nlop->nfslo_first)
939 			error = NFSERR_INVAL;
940 	}
941 
942 	if (!error) {
943 		if (recovery)
944 			clp = rclp;
945 		else
946 			error = nfscl_getcl(vp, cred, p, &clp);
947 	}
948 	if (error) {
949 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
950 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
951 		FREE((caddr_t)nlop, M_NFSCLLOCK);
952 		return (error);
953 	}
954 
955 	op = NULL;
956 	if (recovery) {
957 		ownp = rownp;
958 		openownp = ropenownp;
959 	} else {
960 		nfscl_filllockowner(id, own, flags);
961 		ownp = own;
962 		nfscl_filllockowner(p->td_proc, openown, F_POSIX);
963 		openownp = openown;
964 	}
965 	if (!recovery) {
966 		NFSLOCKCLSTATE();
967 		/*
968 		 * First, search for a delegation. If one exists for this file,
969 		 * the lock can be done locally against it, so long as there
970 		 * isn't a local lock conflict.
971 		 */
972 		ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
973 		    np->n_fhp->nfh_len);
974 		/* Just sanity check for correct type of delegation */
975 		if (dp != NULL && ((dp->nfsdl_flags &
976 		    (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
977 		     (type == F_WRLCK &&
978 		      (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
979 			dp = NULL;
980 	}
981 	if (dp != NULL) {
982 		/* Now, find an open and maybe a lockowner. */
983 		ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
984 		    np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
985 		if (ret)
986 			ret = nfscl_getopen(&clp->nfsc_owner,
987 			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
988 			    ownp, mode, NULL, &op);
989 		if (!ret) {
990 			lhp = &dp->nfsdl_lock;
991 			TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
992 			TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
993 			dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
994 			donelocally = 1;
995 		} else {
996 			dp = NULL;
997 		}
998 	}
999 	if (!donelocally) {
1000 		/*
1001 		 * Get the related Open and maybe lockowner.
1002 		 */
1003 		error = nfscl_getopen(&clp->nfsc_owner,
1004 		    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1005 		    ownp, mode, &lp, &op);
1006 		if (!error)
1007 			lhp = &op->nfso_lock;
1008 	}
1009 	if (!error && !recovery)
1010 		error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1011 		    np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1012 	if (error) {
1013 		if (!recovery) {
1014 			nfscl_clrelease(clp);
1015 			NFSUNLOCKCLSTATE();
1016 		}
1017 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1018 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
1019 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1020 		return (error);
1021 	}
1022 
1023 	/*
1024 	 * Ok, see if a lockowner exists and create one, as required.
1025 	 */
1026 	if (lp == NULL)
1027 		LIST_FOREACH(lp, lhp, nfsl_list) {
1028 			if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1029 				break;
1030 		}
1031 	if (lp == NULL) {
1032 		NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1033 		if (recovery)
1034 			NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1035 			    NFSV4CL_LOCKNAMELEN);
1036 		else
1037 			NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1038 			    NFSV4CL_LOCKNAMELEN);
1039 		nlp->nfsl_seqid = 0;
1040 		nlp->nfsl_defunct = 0;
1041 		nlp->nfsl_inprog = NULL;
1042 		nfscl_lockinit(&nlp->nfsl_rwlock);
1043 		LIST_INIT(&nlp->nfsl_lock);
1044 		if (donelocally) {
1045 			nlp->nfsl_open = NULL;
1046 			newnfsstats.cllocallockowners++;
1047 		} else {
1048 			nlp->nfsl_open = op;
1049 			newnfsstats.cllockowners++;
1050 		}
1051 		LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1052 		lp = nlp;
1053 		nlp = NULL;
1054 		*newonep = 1;
1055 	}
1056 
1057 	/*
1058 	 * Now, update the byte ranges for locks.
1059 	 */
1060 	ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1061 	if (!ret)
1062 		donelocally = 1;
1063 	if (donelocally) {
1064 		*donelocallyp = 1;
1065 		if (!recovery)
1066 			nfscl_clrelease(clp);
1067 	} else {
1068 		/*
1069 		 * Serial modifications on the lock owner for multiple threads
1070 		 * for the same process using a read/write lock.
1071 		 */
1072 		if (!recovery)
1073 			nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1074 	}
1075 	if (!recovery)
1076 		NFSUNLOCKCLSTATE();
1077 
1078 	if (nlp)
1079 		FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1080 	if (nlop)
1081 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1082 	if (otherlop)
1083 		FREE((caddr_t)otherlop, M_NFSCLLOCK);
1084 
1085 	*lpp = lp;
1086 	return (0);
1087 }
1088 
1089 /*
1090  * Called to unlock a byte range, for LockU.
1091  */
1092 APPLESTATIC int
1093 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1094     __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1095     struct nfsclclient *clp, void *id, int flags,
1096     struct nfscllockowner **lpp, int *dorpcp)
1097 {
1098 	struct nfscllockowner *lp;
1099 	struct nfsclowner *owp;
1100 	struct nfsclopen *op;
1101 	struct nfscllock *nlop, *other_lop = NULL;
1102 	struct nfscldeleg *dp;
1103 	struct nfsnode *np;
1104 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1105 	int ret = 0, fnd;
1106 
1107 	np = VTONFS(vp);
1108 	*lpp = NULL;
1109 	*dorpcp = 0;
1110 
1111 	/*
1112 	 * Might need these, so MALLOC them now, to
1113 	 * avoid a tsleep() in MALLOC later.
1114 	 */
1115 	MALLOC(nlop, struct nfscllock *,
1116 	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1117 	nlop->nfslo_type = F_UNLCK;
1118 	nlop->nfslo_first = off;
1119 	if (len == NFS64BITSSET) {
1120 		nlop->nfslo_end = NFS64BITSSET;
1121 	} else {
1122 		nlop->nfslo_end = off + len;
1123 		if (nlop->nfslo_end <= nlop->nfslo_first) {
1124 			FREE((caddr_t)nlop, M_NFSCLLOCK);
1125 			return (NFSERR_INVAL);
1126 		}
1127 	}
1128 	if (callcnt == 0) {
1129 		MALLOC(other_lop, struct nfscllock *,
1130 		    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1131 		*other_lop = *nlop;
1132 	}
1133 	nfscl_filllockowner(id, own, flags);
1134 	dp = NULL;
1135 	NFSLOCKCLSTATE();
1136 	if (callcnt == 0)
1137 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1138 		    np->n_fhp->nfh_len);
1139 
1140 	/*
1141 	 * First, unlock any local regions on a delegation.
1142 	 */
1143 	if (dp != NULL) {
1144 		/* Look for this lockowner. */
1145 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1146 			if (!NFSBCMP(lp->nfsl_owner, own,
1147 			    NFSV4CL_LOCKNAMELEN))
1148 				break;
1149 		}
1150 		if (lp != NULL)
1151 			/* Use other_lop, so nlop is still available */
1152 			(void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1153 	}
1154 
1155 	/*
1156 	 * Now, find a matching open/lockowner that hasn't already been done,
1157 	 * as marked by nfsl_inprog.
1158 	 */
1159 	lp = NULL;
1160 	fnd = 0;
1161 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1162 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1163 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1164 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1165 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1166 			if (lp->nfsl_inprog == NULL &&
1167 			    !NFSBCMP(lp->nfsl_owner, own,
1168 			     NFSV4CL_LOCKNAMELEN)) {
1169 				fnd = 1;
1170 				break;
1171 			}
1172 		    }
1173 		    if (fnd)
1174 			break;
1175 		}
1176 	    }
1177 	    if (fnd)
1178 		break;
1179 	}
1180 
1181 	if (lp != NULL) {
1182 		ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1183 		if (ret)
1184 			*dorpcp = 1;
1185 		/*
1186 		 * Serial modifications on the lock owner for multiple
1187 		 * threads for the same process using a read/write lock.
1188 		 */
1189 		lp->nfsl_inprog = p;
1190 		nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1191 		*lpp = lp;
1192 	}
1193 	NFSUNLOCKCLSTATE();
1194 	if (nlop)
1195 		FREE((caddr_t)nlop, M_NFSCLLOCK);
1196 	if (other_lop)
1197 		FREE((caddr_t)other_lop, M_NFSCLLOCK);
1198 	return (0);
1199 }
1200 
1201 /*
1202  * Release all lockowners marked in progess for this process and file.
1203  */
1204 APPLESTATIC void
1205 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1206     void *id, int flags)
1207 {
1208 	struct nfsclowner *owp;
1209 	struct nfsclopen *op;
1210 	struct nfscllockowner *lp;
1211 	struct nfsnode *np;
1212 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1213 
1214 	np = VTONFS(vp);
1215 	nfscl_filllockowner(id, own, flags);
1216 	NFSLOCKCLSTATE();
1217 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1218 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1219 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1220 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1221 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1222 			if (lp->nfsl_inprog == p &&
1223 			    !NFSBCMP(lp->nfsl_owner, own,
1224 			    NFSV4CL_LOCKNAMELEN)) {
1225 			    lp->nfsl_inprog = NULL;
1226 			    nfscl_lockunlock(&lp->nfsl_rwlock);
1227 			}
1228 		    }
1229 		}
1230 	    }
1231 	}
1232 	nfscl_clrelease(clp);
1233 	NFSUNLOCKCLSTATE();
1234 }
1235 
1236 /*
1237  * Called to find out if any bytes within the byte range specified are
1238  * write locked by the calling process. Used to determine if flushing
1239  * is required before a LockU.
1240  * If in doubt, return 1, so the flush will occur.
1241  */
1242 APPLESTATIC int
1243 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1244     struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1245 {
1246 	struct nfsclowner *owp;
1247 	struct nfscllockowner *lp;
1248 	struct nfsclopen *op;
1249 	struct nfsclclient *clp;
1250 	struct nfscllock *lop;
1251 	struct nfscldeleg *dp;
1252 	struct nfsnode *np;
1253 	u_int64_t off, end;
1254 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1255 	int error = 0;
1256 
1257 	np = VTONFS(vp);
1258 	switch (fl->l_whence) {
1259 	case SEEK_SET:
1260 	case SEEK_CUR:
1261 		/*
1262 		 * Caller is responsible for adding any necessary offset
1263 		 * when SEEK_CUR is used.
1264 		 */
1265 		off = fl->l_start;
1266 		break;
1267 	case SEEK_END:
1268 		off = np->n_size + fl->l_start;
1269 		break;
1270 	default:
1271 		return (1);
1272 	};
1273 	if (fl->l_len != 0) {
1274 		end = off + fl->l_len;
1275 		if (end < off)
1276 			return (1);
1277 	} else {
1278 		end = NFS64BITSSET;
1279 	}
1280 
1281 	error = nfscl_getcl(vp, cred, p, &clp);
1282 	if (error)
1283 		return (1);
1284 	nfscl_filllockowner(id, own, flags);
1285 	NFSLOCKCLSTATE();
1286 
1287 	/*
1288 	 * First check the delegation locks.
1289 	 */
1290 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1291 	if (dp != NULL) {
1292 		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1293 			if (!NFSBCMP(lp->nfsl_owner, own,
1294 			    NFSV4CL_LOCKNAMELEN))
1295 				break;
1296 		}
1297 		if (lp != NULL) {
1298 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1299 				if (lop->nfslo_first >= end)
1300 					break;
1301 				if (lop->nfslo_end <= off)
1302 					continue;
1303 				if (lop->nfslo_type == F_WRLCK) {
1304 					nfscl_clrelease(clp);
1305 					NFSUNLOCKCLSTATE();
1306 					return (1);
1307 				}
1308 			}
1309 		}
1310 	}
1311 
1312 	/*
1313 	 * Now, check state against the server.
1314 	 */
1315 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1316 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1317 		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1318 		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1319 		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1320 			if (!NFSBCMP(lp->nfsl_owner, own,
1321 			    NFSV4CL_LOCKNAMELEN))
1322 			    break;
1323 		    }
1324 		    if (lp != NULL) {
1325 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1326 			    if (lop->nfslo_first >= end)
1327 				break;
1328 			    if (lop->nfslo_end <= off)
1329 				continue;
1330 			    if (lop->nfslo_type == F_WRLCK) {
1331 				nfscl_clrelease(clp);
1332 				NFSUNLOCKCLSTATE();
1333 				return (1);
1334 			    }
1335 			}
1336 		    }
1337 		}
1338 	    }
1339 	}
1340 	nfscl_clrelease(clp);
1341 	NFSUNLOCKCLSTATE();
1342 	return (0);
1343 }
1344 
1345 /*
1346  * Release a byte range lock owner structure.
1347  */
1348 APPLESTATIC void
1349 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1350 {
1351 	struct nfsclclient *clp;
1352 
1353 	if (lp == NULL)
1354 		return;
1355 	NFSLOCKCLSTATE();
1356 	clp = lp->nfsl_open->nfso_own->nfsow_clp;
1357 	if (error != 0 && candelete &&
1358 	    (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1359 		nfscl_freelockowner(lp, 0);
1360 	else
1361 		nfscl_lockunlock(&lp->nfsl_rwlock);
1362 	nfscl_clrelease(clp);
1363 	NFSUNLOCKCLSTATE();
1364 }
1365 
1366 /*
1367  * Free up an open structure and any associated byte range lock structures.
1368  */
1369 APPLESTATIC void
1370 nfscl_freeopen(struct nfsclopen *op, int local)
1371 {
1372 
1373 	LIST_REMOVE(op, nfso_list);
1374 	nfscl_freealllocks(&op->nfso_lock, local);
1375 	FREE((caddr_t)op, M_NFSCLOPEN);
1376 	if (local)
1377 		newnfsstats.cllocalopens--;
1378 	else
1379 		newnfsstats.clopens--;
1380 }
1381 
1382 /*
1383  * Free up all lock owners and associated locks.
1384  */
1385 static void
1386 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1387 {
1388 	struct nfscllockowner *lp, *nlp;
1389 
1390 	LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1391 		if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1392 			panic("nfscllckw");
1393 		nfscl_freelockowner(lp, local);
1394 	}
1395 }
1396 
1397 /*
1398  * Called for an Open when NFSERR_EXPIRED is received from the server.
1399  * If there are no byte range locks nor a Share Deny lost, try to do a
1400  * fresh Open. Otherwise, free the open.
1401  */
1402 static int
1403 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1404     struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1405 {
1406 	struct nfscllockowner *lp;
1407 	struct nfscldeleg *dp;
1408 	int mustdelete = 0, error;
1409 
1410 	/*
1411 	 * Look for any byte range lock(s).
1412 	 */
1413 	LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1414 		if (!LIST_EMPTY(&lp->nfsl_lock)) {
1415 			mustdelete = 1;
1416 			break;
1417 		}
1418 	}
1419 
1420 	/*
1421 	 * If no byte range lock(s) nor a Share deny, try to re-open.
1422 	 */
1423 	if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1424 		newnfs_copycred(&op->nfso_cred, cred);
1425 		dp = NULL;
1426 		error = nfsrpc_reopen(nmp, op->nfso_fh,
1427 		    op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1428 		if (error) {
1429 			mustdelete = 1;
1430 			if (dp != NULL) {
1431 				FREE((caddr_t)dp, M_NFSCLDELEG);
1432 				dp = NULL;
1433 			}
1434 		}
1435 		if (dp != NULL)
1436 			nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1437 			    op->nfso_fhlen, cred, p, &dp);
1438 	}
1439 
1440 	/*
1441 	 * If a byte range lock or Share deny or couldn't re-open, free it.
1442 	 */
1443 	if (mustdelete)
1444 		nfscl_freeopen(op, 0);
1445 	return (mustdelete);
1446 }
1447 
1448 /*
1449  * Free up an open owner structure.
1450  */
1451 static void
1452 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1453 {
1454 
1455 	LIST_REMOVE(owp, nfsow_list);
1456 	FREE((caddr_t)owp, M_NFSCLOWNER);
1457 	if (local)
1458 		newnfsstats.cllocalopenowners--;
1459 	else
1460 		newnfsstats.clopenowners--;
1461 }
1462 
1463 /*
1464  * Free up a byte range lock owner structure.
1465  */
1466 APPLESTATIC void
1467 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1468 {
1469 	struct nfscllock *lop, *nlop;
1470 
1471 	LIST_REMOVE(lp, nfsl_list);
1472 	LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1473 		nfscl_freelock(lop, local);
1474 	}
1475 	FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1476 	if (local)
1477 		newnfsstats.cllocallockowners--;
1478 	else
1479 		newnfsstats.cllockowners--;
1480 }
1481 
1482 /*
1483  * Free up a byte range lock structure.
1484  */
1485 APPLESTATIC void
1486 nfscl_freelock(struct nfscllock *lop, int local)
1487 {
1488 
1489 	LIST_REMOVE(lop, nfslo_list);
1490 	FREE((caddr_t)lop, M_NFSCLLOCK);
1491 	if (local)
1492 		newnfsstats.cllocallocks--;
1493 	else
1494 		newnfsstats.cllocks--;
1495 }
1496 
1497 /*
1498  * Clean out the state related to a delegation.
1499  */
1500 static void
1501 nfscl_cleandeleg(struct nfscldeleg *dp)
1502 {
1503 	struct nfsclowner *owp, *nowp;
1504 	struct nfsclopen *op;
1505 
1506 	LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1507 		op = LIST_FIRST(&owp->nfsow_open);
1508 		if (op != NULL) {
1509 			if (LIST_NEXT(op, nfso_list) != NULL)
1510 				panic("nfscleandel");
1511 			nfscl_freeopen(op, 1);
1512 		}
1513 		nfscl_freeopenowner(owp, 1);
1514 	}
1515 	nfscl_freealllocks(&dp->nfsdl_lock, 1);
1516 }
1517 
1518 /*
1519  * Free a delegation.
1520  */
1521 static void
1522 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1523 {
1524 
1525 	TAILQ_REMOVE(hdp, dp, nfsdl_list);
1526 	LIST_REMOVE(dp, nfsdl_hash);
1527 	FREE((caddr_t)dp, M_NFSCLDELEG);
1528 	newnfsstats.cldelegates--;
1529 	nfscl_delegcnt--;
1530 }
1531 
1532 /*
1533  * Free up all state related to this client structure.
1534  */
1535 static void
1536 nfscl_cleanclient(struct nfsclclient *clp)
1537 {
1538 	struct nfsclowner *owp, *nowp;
1539 	struct nfsclopen *op, *nop;
1540 	struct nfscllockowner *lp, *nlp;
1541 
1542 
1543 	/* get rid of defunct lockowners */
1544 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1545 		nfscl_freelockowner(lp, 0);
1546 	}
1547 
1548 	/* Now, all the OpenOwners, etc. */
1549 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1550 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1551 			nfscl_freeopen(op, 0);
1552 		}
1553 		nfscl_freeopenowner(owp, 0);
1554 	}
1555 }
1556 
1557 /*
1558  * Called when an NFSERR_EXPIRED is received from the server.
1559  */
1560 static void
1561 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1562     struct ucred *cred, NFSPROC_T *p)
1563 {
1564 	struct nfsclowner *owp, *nowp, *towp;
1565 	struct nfsclopen *op, *nop, *top;
1566 	struct nfscldeleg *dp, *ndp;
1567 	int ret, printed = 0;
1568 
1569 	/*
1570 	 * First, merge locally issued Opens into the list for the server.
1571 	 */
1572 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
1573 	while (dp != NULL) {
1574 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
1575 	    owp = LIST_FIRST(&dp->nfsdl_owner);
1576 	    while (owp != NULL) {
1577 		nowp = LIST_NEXT(owp, nfsow_list);
1578 		op = LIST_FIRST(&owp->nfsow_open);
1579 		if (op != NULL) {
1580 		    if (LIST_NEXT(op, nfso_list) != NULL)
1581 			panic("nfsclexp");
1582 		    LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1583 			if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1584 			    NFSV4CL_LOCKNAMELEN))
1585 			    break;
1586 		    }
1587 		    if (towp != NULL) {
1588 			/* Merge opens in */
1589 			LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1590 			    if (top->nfso_fhlen == op->nfso_fhlen &&
1591 				!NFSBCMP(top->nfso_fh, op->nfso_fh,
1592 				 op->nfso_fhlen)) {
1593 				top->nfso_mode |= op->nfso_mode;
1594 				top->nfso_opencnt += op->nfso_opencnt;
1595 				break;
1596 			    }
1597 			}
1598 			if (top == NULL) {
1599 			    /* Just add the open to the owner list */
1600 			    LIST_REMOVE(op, nfso_list);
1601 			    op->nfso_own = towp;
1602 			    LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1603 			    newnfsstats.cllocalopens--;
1604 			    newnfsstats.clopens++;
1605 			}
1606 		    } else {
1607 			/* Just add the openowner to the client list */
1608 			LIST_REMOVE(owp, nfsow_list);
1609 			owp->nfsow_clp = clp;
1610 			LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1611 			newnfsstats.cllocalopenowners--;
1612 			newnfsstats.clopenowners++;
1613 			newnfsstats.cllocalopens--;
1614 			newnfsstats.clopens++;
1615 		    }
1616 		}
1617 		owp = nowp;
1618 	    }
1619 	    if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1620 		printed = 1;
1621 		printf("nfsv4 expired locks lost\n");
1622 	    }
1623 	    nfscl_cleandeleg(dp);
1624 	    nfscl_freedeleg(&clp->nfsc_deleg, dp);
1625 	    dp = ndp;
1626 	}
1627 	if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1628 	    panic("nfsclexp");
1629 
1630 	/*
1631 	 * Now, try and reopen against the server.
1632 	 */
1633 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1634 		owp->nfsow_seqid = 0;
1635 		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1636 			ret = nfscl_expireopen(clp, op, nmp, cred, p);
1637 			if (ret && !printed) {
1638 				printed = 1;
1639 				printf("nfsv4 expired locks lost\n");
1640 			}
1641 		}
1642 		if (LIST_EMPTY(&owp->nfsow_open))
1643 			nfscl_freeopenowner(owp, 0);
1644 	}
1645 }
1646 
1647 #ifndef	__FreeBSD__
1648 /*
1649  * Called from exit() upon process termination.
1650  */
1651 APPLESTATIC void
1652 nfscl_cleanup(NFSPROC_T *p)
1653 {
1654 	struct nfsclclient *clp;
1655 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
1656 
1657 	if (!nfscl_inited)
1658 		return;
1659 	nfscl_filllockowner(p->td_proc, own, F_POSIX);
1660 
1661 	NFSLOCKCLSTATE();
1662 	/*
1663 	 * Loop through all the clientids, looking for the OpenOwners.
1664 	 */
1665 	LIST_FOREACH(clp, &nfsclhead, nfsc_list)
1666 		nfscl_cleanup_common(clp, own);
1667 	NFSUNLOCKCLSTATE();
1668 }
1669 #endif	/* !__FreeBSD__ */
1670 
1671 /*
1672  * Common code used by nfscl_cleanup() and nfscl_cleanupkext().
1673  * Must be called with CLSTATE lock held.
1674  */
1675 static void
1676 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1677 {
1678 	struct nfsclowner *owp, *nowp;
1679 	struct nfsclopen *op;
1680 	struct nfscllockowner *lp, *nlp;
1681 	struct nfscldeleg *dp;
1682 
1683 	/* First, get rid of local locks on delegations. */
1684 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1685 		LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1686 		    if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1687 			if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1688 			    panic("nfscllckw");
1689 			nfscl_freelockowner(lp, 1);
1690 		    }
1691 		}
1692 	}
1693 	owp = LIST_FIRST(&clp->nfsc_owner);
1694 	while (owp != NULL) {
1695 		nowp = LIST_NEXT(owp, nfsow_list);
1696 		if (!NFSBCMP(owp->nfsow_owner, own,
1697 		    NFSV4CL_LOCKNAMELEN)) {
1698 			/*
1699 			 * If there are children that haven't closed the
1700 			 * file descriptors yet, the opens will still be
1701 			 * here. For that case, let the renew thread clear
1702 			 * out the OpenOwner later.
1703 			 */
1704 			if (LIST_EMPTY(&owp->nfsow_open))
1705 				nfscl_freeopenowner(owp, 0);
1706 			else
1707 				owp->nfsow_defunct = 1;
1708 		} else {
1709 			/* look for lockowners on other opens */
1710 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1711 				LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1712 					if (!NFSBCMP(lp->nfsl_owner, own,
1713 					    NFSV4CL_LOCKNAMELEN))
1714 						lp->nfsl_defunct = 1;
1715 				}
1716 			}
1717 		}
1718 		owp = nowp;
1719 	}
1720 
1721 	/* and check the defunct list */
1722 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1723 		if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN))
1724 		    lp->nfsl_defunct = 1;
1725 	}
1726 }
1727 
1728 #if defined(APPLEKEXT) || defined(__FreeBSD__)
1729 /*
1730  * Simulate the call nfscl_cleanup() by looking for open owners associated
1731  * with processes that no longer exist, since a call to nfscl_cleanup()
1732  * can't be patched into exit().
1733  */
1734 static void
1735 nfscl_cleanupkext(struct nfsclclient *clp)
1736 {
1737 	struct nfsclowner *owp, *nowp;
1738 	struct nfscllockowner *lp;
1739 
1740 	NFSPROCLISTLOCK();
1741 	NFSLOCKCLSTATE();
1742 	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1743 		if (nfscl_procdoesntexist(owp->nfsow_owner))
1744 			nfscl_cleanup_common(clp, owp->nfsow_owner);
1745 	}
1746 
1747 	/* and check the defunct list */
1748 	LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) {
1749 		if (nfscl_procdoesntexist(lp->nfsl_owner))
1750 			lp->nfsl_defunct = 1;
1751 	}
1752 	NFSUNLOCKCLSTATE();
1753 	NFSPROCLISTUNLOCK();
1754 }
1755 #endif	/* APPLEKEXT || __FreeBSD__ */
1756 
1757 static int	fake_global;	/* Used to force visibility of MNTK_UNMOUNTF */
1758 /*
1759  * Called from nfs umount to free up the clientid.
1760  */
1761 APPLESTATIC void
1762 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1763 {
1764 	struct nfsclclient *clp;
1765 	struct ucred *cred;
1766 	int igotlock;
1767 
1768 	/*
1769 	 * For the case that matters, this is the thread that set
1770 	 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1771 	 * done to ensure that any thread executing nfscl_getcl() after
1772 	 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1773 	 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1774 	 * explanation, courtesy of Alan Cox.
1775 	 * What follows is a snippet from Alan Cox's email at:
1776 	 * http://docs.FreeBSD.org/cgi/
1777 	 *     mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1778 	 *
1779 	 * 1. Set MNTK_UNMOUNTF
1780 	 * 2. Acquire a standard FreeBSD mutex "m".
1781 	 * 3. Update some data structures.
1782 	 * 4. Release mutex "m".
1783 	 *
1784 	 * Then, other threads that acquire "m" after step 4 has occurred will
1785 	 * see MNTK_UNMOUNTF as set.  But, other threads that beat thread X to
1786 	 * step 2 may or may not see MNTK_UNMOUNTF as set.
1787 	 */
1788 	NFSLOCKCLSTATE();
1789 	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1790 		fake_global++;
1791 		NFSUNLOCKCLSTATE();
1792 		NFSLOCKCLSTATE();
1793 	}
1794 
1795 	clp = nmp->nm_clp;
1796 	if (clp != NULL) {
1797 		if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1798 			panic("nfscl umount");
1799 
1800 		/*
1801 		 * First, handshake with the nfscl renew thread, to terminate
1802 		 * it.
1803 		 */
1804 		clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1805 		while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1806 			(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1807 			    "nfsclumnt", hz);
1808 
1809 		/*
1810 		 * Now, get the exclusive lock on the client state, so
1811 		 * that no uses of the state are still in progress.
1812 		 */
1813 		do {
1814 			igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1815 			    NFSCLSTATEMUTEXPTR, NULL);
1816 		} while (!igotlock);
1817 		NFSUNLOCKCLSTATE();
1818 
1819 		/*
1820 		 * Free up all the state. It will expire on the server, but
1821 		 * maybe we should do a SetClientId/SetClientIdConfirm so
1822 		 * the server throws it away?
1823 		 */
1824 		LIST_REMOVE(clp, nfsc_list);
1825 		nfscl_delegreturnall(clp, p);
1826 		cred = newnfs_getcred();
1827 		(void) nfsrpc_setclient(nmp, clp, cred, p);
1828 		nfscl_cleanclient(clp);
1829 		nmp->nm_clp = NULL;
1830 		NFSFREECRED(cred);
1831 		FREE((caddr_t)clp, M_NFSCLCLIENT);
1832 	} else
1833 		NFSUNLOCKCLSTATE();
1834 }
1835 
1836 /*
1837  * This function is called when a server replies with NFSERR_STALECLIENTID
1838  * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens
1839  * and Locks with reclaim. If these fail, it deletes the corresponding state.
1840  */
1841 static void
1842 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1843 {
1844 	struct nfsclowner *owp, *nowp;
1845 	struct nfsclopen *op, *nop;
1846 	struct nfscllockowner *lp, *nlp;
1847 	struct nfscllock *lop, *nlop;
1848 	struct nfscldeleg *dp, *ndp, *tdp;
1849 	struct nfsmount *nmp;
1850 	struct ucred *tcred;
1851 	struct nfsclopenhead extra_open;
1852 	struct nfscldeleghead extra_deleg;
1853 	struct nfsreq *rep;
1854 	u_int64_t len;
1855 	u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1856 	int igotlock = 0, error, trycnt, firstlock, s;
1857 
1858 	/*
1859 	 * First, lock the client structure, so everyone else will
1860 	 * block when trying to use state.
1861 	 */
1862 	NFSLOCKCLSTATE();
1863 	clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1864 	do {
1865 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1866 		    NFSCLSTATEMUTEXPTR, NULL);
1867 	} while (!igotlock);
1868 	NFSUNLOCKCLSTATE();
1869 
1870 	nmp = clp->nfsc_nmp;
1871 	if (nmp == NULL)
1872 		panic("nfscl recover");
1873 	trycnt = 5;
1874 	do {
1875 		error = nfsrpc_setclient(nmp, clp, cred, p);
1876 	} while ((error == NFSERR_STALECLIENTID ||
1877 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
1878 	if (error) {
1879 		nfscl_cleanclient(clp);
1880 		NFSLOCKCLSTATE();
1881 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
1882 		    NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG);
1883 		wakeup(&clp->nfsc_flags);
1884 		nfsv4_unlock(&clp->nfsc_lock, 0);
1885 		NFSUNLOCKCLSTATE();
1886 		return;
1887 	}
1888 	clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1889 	clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
1890 
1891 	/*
1892 	 * Mark requests already queued on the server, so that they don't
1893 	 * initiate another recovery cycle. Any requests already in the
1894 	 * queue that handle state information will have the old stale
1895 	 * clientid/stateid and will get a NFSERR_STALESTATEID or
1896 	 * NFSERR_STALECLIENTID reply from the server. This will be
1897 	 * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set.
1898 	 */
1899 	s = splsoftclock();
1900 	NFSLOCKREQ();
1901 	TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
1902 		if (rep->r_nmp == nmp)
1903 			rep->r_flags |= R_DONTRECOVER;
1904 	}
1905 	NFSUNLOCKREQ();
1906 	splx(s);
1907 
1908 	/* get rid of defunct lockowners */
1909 	LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) {
1910 		nfscl_freelockowner(lp, 0);
1911 	}
1912 
1913 	/*
1914 	 * Now, mark all delegations "need reclaim".
1915 	 */
1916 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
1917 		dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
1918 
1919 	TAILQ_INIT(&extra_deleg);
1920 	LIST_INIT(&extra_open);
1921 	/*
1922 	 * Now traverse the state lists, doing Open and Lock Reclaims.
1923 	 */
1924 	tcred = newnfs_getcred();
1925 	owp = LIST_FIRST(&clp->nfsc_owner);
1926 	while (owp != NULL) {
1927 	    nowp = LIST_NEXT(owp, nfsow_list);
1928 	    owp->nfsow_seqid = 0;
1929 	    op = LIST_FIRST(&owp->nfsow_open);
1930 	    while (op != NULL) {
1931 		nop = LIST_NEXT(op, nfso_list);
1932 		if (error != NFSERR_NOGRACE) {
1933 		    /* Search for a delegation to reclaim with the open */
1934 		    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1935 			if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1936 			    continue;
1937 			if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1938 			    mode = NFSV4OPEN_ACCESSWRITE;
1939 			    delegtype = NFSV4OPEN_DELEGATEWRITE;
1940 			} else {
1941 			    mode = NFSV4OPEN_ACCESSREAD;
1942 			    delegtype = NFSV4OPEN_DELEGATEREAD;
1943 			}
1944 			if ((op->nfso_mode & mode) == mode &&
1945 			    op->nfso_fhlen == dp->nfsdl_fhlen &&
1946 			    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
1947 			    break;
1948 		    }
1949 		    ndp = dp;
1950 		    if (dp == NULL)
1951 			delegtype = NFSV4OPEN_DELEGATENONE;
1952 		    newnfs_copycred(&op->nfso_cred, tcred);
1953 		    error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
1954 			op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
1955 			op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
1956 			tcred, p);
1957 		    if (!error) {
1958 			/* Handle any replied delegation */
1959 			if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
1960 			    || NFSMNT_RDONLY(nmp->nm_mountp))) {
1961 			    if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
1962 				mode = NFSV4OPEN_ACCESSWRITE;
1963 			    else
1964 				mode = NFSV4OPEN_ACCESSREAD;
1965 			    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1966 				if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1967 				    continue;
1968 				if ((op->nfso_mode & mode) == mode &&
1969 				    op->nfso_fhlen == dp->nfsdl_fhlen &&
1970 				    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
1971 				    op->nfso_fhlen)) {
1972 				    dp->nfsdl_stateid = ndp->nfsdl_stateid;
1973 				    dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
1974 				    dp->nfsdl_ace = ndp->nfsdl_ace;
1975 				    dp->nfsdl_change = ndp->nfsdl_change;
1976 				    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
1977 				    if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
1978 					dp->nfsdl_flags |= NFSCLDL_RECALL;
1979 				    FREE((caddr_t)ndp, M_NFSCLDELEG);
1980 				    ndp = NULL;
1981 				    break;
1982 				}
1983 			    }
1984 			}
1985 			if (ndp != NULL)
1986 			    TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
1987 
1988 			/* and reclaim all byte range locks */
1989 			lp = LIST_FIRST(&op->nfso_lock);
1990 			while (lp != NULL) {
1991 			    nlp = LIST_NEXT(lp, nfsl_list);
1992 			    lp->nfsl_seqid = 0;
1993 			    firstlock = 1;
1994 			    lop = LIST_FIRST(&lp->nfsl_lock);
1995 			    while (lop != NULL) {
1996 				nlop = LIST_NEXT(lop, nfslo_list);
1997 				if (lop->nfslo_end == NFS64BITSSET)
1998 				    len = NFS64BITSSET;
1999 				else
2000 				    len = lop->nfslo_end - lop->nfslo_first;
2001 				if (error != NFSERR_NOGRACE)
2002 				    error = nfscl_trylock(nmp, NULL,
2003 					op->nfso_fh, op->nfso_fhlen, lp,
2004 					firstlock, 1, lop->nfslo_first, len,
2005 					lop->nfslo_type, tcred, p);
2006 				if (error != 0)
2007 				    nfscl_freelock(lop, 0);
2008 				else
2009 				    firstlock = 0;
2010 				lop = nlop;
2011 			    }
2012 			    /* If no locks, but a lockowner, just delete it. */
2013 			    if (LIST_EMPTY(&lp->nfsl_lock))
2014 				nfscl_freelockowner(lp, 0);
2015 			    lp = nlp;
2016 			}
2017 		    } else {
2018 			nfscl_freeopen(op, 0);
2019 		    }
2020 		}
2021 		op = nop;
2022 	    }
2023 	    owp = nowp;
2024 	}
2025 
2026 	/*
2027 	 * Now, try and get any delegations not yet reclaimed by cobbling
2028 	 * to-gether an appropriate open.
2029 	 */
2030 	nowp = NULL;
2031 	dp = TAILQ_FIRST(&clp->nfsc_deleg);
2032 	while (dp != NULL) {
2033 	    ndp = TAILQ_NEXT(dp, nfsdl_list);
2034 	    if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2035 		if (nowp == NULL) {
2036 		    MALLOC(nowp, struct nfsclowner *,
2037 			sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2038 		    /*
2039 		     * Name must be as long an largest possible
2040 		     * NFSV4CL_LOCKNAMELEN. 12 for now.
2041 		     */
2042 		    NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2043 			NFSV4CL_LOCKNAMELEN);
2044 		    LIST_INIT(&nowp->nfsow_open);
2045 		    nowp->nfsow_clp = clp;
2046 		    nowp->nfsow_seqid = 0;
2047 		    nowp->nfsow_defunct = 0;
2048 		    nfscl_lockinit(&nowp->nfsow_rwlock);
2049 		}
2050 		nop = NULL;
2051 		if (error != NFSERR_NOGRACE) {
2052 		    MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
2053 			dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2054 		    nop->nfso_own = nowp;
2055 		    if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2056 			nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2057 			delegtype = NFSV4OPEN_DELEGATEWRITE;
2058 		    } else {
2059 			nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2060 			delegtype = NFSV4OPEN_DELEGATEREAD;
2061 		    }
2062 		    nop->nfso_opencnt = 0;
2063 		    nop->nfso_posixlock = 1;
2064 		    nop->nfso_fhlen = dp->nfsdl_fhlen;
2065 		    NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2066 		    LIST_INIT(&nop->nfso_lock);
2067 		    nop->nfso_stateid.seqid = 0;
2068 		    nop->nfso_stateid.other[0] = 0;
2069 		    nop->nfso_stateid.other[1] = 0;
2070 		    nop->nfso_stateid.other[2] = 0;
2071 		    newnfs_copycred(&dp->nfsdl_cred, tcred);
2072 		    newnfs_copyincred(tcred, &nop->nfso_cred);
2073 		    tdp = NULL;
2074 		    error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2075 			nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2076 			nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2077 			delegtype, tcred, p);
2078 		    if (tdp != NULL) {
2079 			if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2080 			    mode = NFSV4OPEN_ACCESSWRITE;
2081 			else
2082 			    mode = NFSV4OPEN_ACCESSREAD;
2083 			if ((nop->nfso_mode & mode) == mode &&
2084 			    nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2085 			    !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2086 			    nop->nfso_fhlen)) {
2087 			    dp->nfsdl_stateid = tdp->nfsdl_stateid;
2088 			    dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2089 			    dp->nfsdl_ace = tdp->nfsdl_ace;
2090 			    dp->nfsdl_change = tdp->nfsdl_change;
2091 			    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2092 			    if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2093 				dp->nfsdl_flags |= NFSCLDL_RECALL;
2094 			    FREE((caddr_t)tdp, M_NFSCLDELEG);
2095 			} else {
2096 			    TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2097 			}
2098 		    }
2099 		}
2100 		if (error) {
2101 		    if (nop != NULL)
2102 			FREE((caddr_t)nop, M_NFSCLOPEN);
2103 		    /*
2104 		     * Couldn't reclaim it, so throw the state
2105 		     * away. Ouch!!
2106 		     */
2107 		    nfscl_cleandeleg(dp);
2108 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
2109 		} else {
2110 		    LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2111 		}
2112 	    }
2113 	    dp = ndp;
2114 	}
2115 
2116 	/*
2117 	 * Now, get rid of extra Opens and Delegations.
2118 	 */
2119 	LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2120 		do {
2121 			newnfs_copycred(&op->nfso_cred, tcred);
2122 			error = nfscl_tryclose(op, tcred, nmp, p);
2123 			if (error == NFSERR_GRACE)
2124 				(void) nfs_catnap(PZERO, error, "nfsexcls");
2125 		} while (error == NFSERR_GRACE);
2126 		LIST_REMOVE(op, nfso_list);
2127 		FREE((caddr_t)op, M_NFSCLOPEN);
2128 	}
2129 	if (nowp != NULL)
2130 		FREE((caddr_t)nowp, M_NFSCLOWNER);
2131 
2132 	TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2133 		do {
2134 			newnfs_copycred(&dp->nfsdl_cred, tcred);
2135 			error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2136 			if (error == NFSERR_GRACE)
2137 				(void) nfs_catnap(PZERO, error, "nfsexdlg");
2138 		} while (error == NFSERR_GRACE);
2139 		TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2140 		FREE((caddr_t)dp, M_NFSCLDELEG);
2141 	}
2142 
2143 	NFSLOCKCLSTATE();
2144 	clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2145 	wakeup(&clp->nfsc_flags);
2146 	nfsv4_unlock(&clp->nfsc_lock, 0);
2147 	NFSUNLOCKCLSTATE();
2148 	NFSFREECRED(tcred);
2149 }
2150 
2151 /*
2152  * This function is called when a server replies with NFSERR_EXPIRED.
2153  * It deletes all state for the client and does a fresh SetClientId/confirm.
2154  * XXX Someday it should post a signal to the process(es) that hold the
2155  * state, so they know that lock state has been lost.
2156  */
2157 APPLESTATIC int
2158 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2159 {
2160 	struct nfscllockowner *lp, *nlp;
2161 	struct nfsmount *nmp;
2162 	struct ucred *cred;
2163 	int igotlock = 0, error, trycnt;
2164 
2165 	/*
2166 	 * If the clientid has gone away or a new SetClientid has already
2167 	 * been done, just return ok.
2168 	 */
2169 	if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2170 		return (0);
2171 
2172 	/*
2173 	 * First, lock the client structure, so everyone else will
2174 	 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2175 	 * that only one thread does the work.
2176 	 */
2177 	NFSLOCKCLSTATE();
2178 	clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2179 	do {
2180 		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2181 		    NFSCLSTATEMUTEXPTR, NULL);
2182 	} while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2183 	if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2184 		if (igotlock)
2185 			nfsv4_unlock(&clp->nfsc_lock, 0);
2186 		NFSUNLOCKCLSTATE();
2187 		return (0);
2188 	}
2189 	clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2190 	NFSUNLOCKCLSTATE();
2191 
2192 	nmp = clp->nfsc_nmp;
2193 	if (nmp == NULL)
2194 		panic("nfscl expired");
2195 	cred = newnfs_getcred();
2196 	trycnt = 5;
2197 	do {
2198 		error = nfsrpc_setclient(nmp, clp, cred, p);
2199 	} while ((error == NFSERR_STALECLIENTID ||
2200 	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2201 	if (error) {
2202 		/*
2203 		 * Clear out any state.
2204 		 */
2205 		nfscl_cleanclient(clp);
2206 		NFSLOCKCLSTATE();
2207 		clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
2208 		    NFSCLFLAGS_RECOVER);
2209 	} else {
2210 		/* get rid of defunct lockowners */
2211 		LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list,
2212 		    nlp) {
2213 			nfscl_freelockowner(lp, 0);
2214 		}
2215 
2216 		/*
2217 		 * Expire the state for the client.
2218 		 */
2219 		nfscl_expireclient(clp, nmp, cred, p);
2220 		NFSLOCKCLSTATE();
2221 		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2222 		clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2223 	}
2224 	clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2225 	wakeup(&clp->nfsc_flags);
2226 	nfsv4_unlock(&clp->nfsc_lock, 0);
2227 	NFSUNLOCKCLSTATE();
2228 	NFSFREECRED(cred);
2229 	return (error);
2230 }
2231 
2232 /*
2233  * This function inserts a lock in the list after insert_lop.
2234  */
2235 static void
2236 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2237     struct nfscllock *insert_lop, int local)
2238 {
2239 
2240 	if ((struct nfscllockowner *)insert_lop == lp)
2241 		LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2242 	else
2243 		LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2244 	if (local)
2245 		newnfsstats.cllocallocks++;
2246 	else
2247 		newnfsstats.cllocks++;
2248 }
2249 
2250 /*
2251  * This function updates the locking for a lock owner and given file. It
2252  * maintains a list of lock ranges ordered on increasing file offset that
2253  * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2254  * It always adds new_lop to the list and sometimes uses the one pointed
2255  * at by other_lopp.
2256  * Returns 1 if the locks were modified, 0 otherwise.
2257  */
2258 static int
2259 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2260     struct nfscllock **other_lopp, int local)
2261 {
2262 	struct nfscllock *new_lop = *new_lopp;
2263 	struct nfscllock *lop, *tlop, *ilop;
2264 	struct nfscllock *other_lop;
2265 	int unlock = 0, modified = 0;
2266 	u_int64_t tmp;
2267 
2268 	/*
2269 	 * Work down the list until the lock is merged.
2270 	 */
2271 	if (new_lop->nfslo_type == F_UNLCK)
2272 		unlock = 1;
2273 	ilop = (struct nfscllock *)lp;
2274 	lop = LIST_FIRST(&lp->nfsl_lock);
2275 	while (lop != NULL) {
2276 	    /*
2277 	     * Only check locks for this file that aren't before the start of
2278 	     * new lock's range.
2279 	     */
2280 	    if (lop->nfslo_end >= new_lop->nfslo_first) {
2281 		if (new_lop->nfslo_end < lop->nfslo_first) {
2282 		    /*
2283 		     * If the new lock ends before the start of the
2284 		     * current lock's range, no merge, just insert
2285 		     * the new lock.
2286 		     */
2287 		    break;
2288 		}
2289 		if (new_lop->nfslo_type == lop->nfslo_type ||
2290 		    (new_lop->nfslo_first <= lop->nfslo_first &&
2291 		     new_lop->nfslo_end >= lop->nfslo_end)) {
2292 		    /*
2293 		     * This lock can be absorbed by the new lock/unlock.
2294 		     * This happens when it covers the entire range
2295 		     * of the old lock or is contiguous
2296 		     * with the old lock and is of the same type or an
2297 		     * unlock.
2298 		     */
2299 		    if (new_lop->nfslo_type != lop->nfslo_type ||
2300 			new_lop->nfslo_first != lop->nfslo_first ||
2301 			new_lop->nfslo_end != lop->nfslo_end)
2302 			modified = 1;
2303 		    if (lop->nfslo_first < new_lop->nfslo_first)
2304 			new_lop->nfslo_first = lop->nfslo_first;
2305 		    if (lop->nfslo_end > new_lop->nfslo_end)
2306 			new_lop->nfslo_end = lop->nfslo_end;
2307 		    tlop = lop;
2308 		    lop = LIST_NEXT(lop, nfslo_list);
2309 		    nfscl_freelock(tlop, local);
2310 		    continue;
2311 		}
2312 
2313 		/*
2314 		 * All these cases are for contiguous locks that are not the
2315 		 * same type, so they can't be merged.
2316 		 */
2317 		if (new_lop->nfslo_first <= lop->nfslo_first) {
2318 		    /*
2319 		     * This case is where the new lock overlaps with the
2320 		     * first part of the old lock. Move the start of the
2321 		     * old lock to just past the end of the new lock. The
2322 		     * new lock will be inserted in front of the old, since
2323 		     * ilop hasn't been updated. (We are done now.)
2324 		     */
2325 		    if (lop->nfslo_first != new_lop->nfslo_end) {
2326 			lop->nfslo_first = new_lop->nfslo_end;
2327 			modified = 1;
2328 		    }
2329 		    break;
2330 		}
2331 		if (new_lop->nfslo_end >= lop->nfslo_end) {
2332 		    /*
2333 		     * This case is where the new lock overlaps with the
2334 		     * end of the old lock's range. Move the old lock's
2335 		     * end to just before the new lock's first and insert
2336 		     * the new lock after the old lock.
2337 		     * Might not be done yet, since the new lock could
2338 		     * overlap further locks with higher ranges.
2339 		     */
2340 		    if (lop->nfslo_end != new_lop->nfslo_first) {
2341 			lop->nfslo_end = new_lop->nfslo_first;
2342 			modified = 1;
2343 		    }
2344 		    ilop = lop;
2345 		    lop = LIST_NEXT(lop, nfslo_list);
2346 		    continue;
2347 		}
2348 		/*
2349 		 * The final case is where the new lock's range is in the
2350 		 * middle of the current lock's and splits the current lock
2351 		 * up. Use *other_lopp to handle the second part of the
2352 		 * split old lock range. (We are done now.)
2353 		 * For unlock, we use new_lop as other_lop and tmp, since
2354 		 * other_lop and new_lop are the same for this case.
2355 		 * We noted the unlock case above, so we don't need
2356 		 * new_lop->nfslo_type any longer.
2357 		 */
2358 		tmp = new_lop->nfslo_first;
2359 		if (unlock) {
2360 		    other_lop = new_lop;
2361 		    *new_lopp = NULL;
2362 		} else {
2363 		    other_lop = *other_lopp;
2364 		    *other_lopp = NULL;
2365 		}
2366 		other_lop->nfslo_first = new_lop->nfslo_end;
2367 		other_lop->nfslo_end = lop->nfslo_end;
2368 		other_lop->nfslo_type = lop->nfslo_type;
2369 		lop->nfslo_end = tmp;
2370 		nfscl_insertlock(lp, other_lop, lop, local);
2371 		ilop = lop;
2372 		modified = 1;
2373 		break;
2374 	    }
2375 	    ilop = lop;
2376 	    lop = LIST_NEXT(lop, nfslo_list);
2377 	    if (lop == NULL)
2378 		break;
2379 	}
2380 
2381 	/*
2382 	 * Insert the new lock in the list at the appropriate place.
2383 	 */
2384 	if (!unlock) {
2385 		nfscl_insertlock(lp, new_lop, ilop, local);
2386 		*new_lopp = NULL;
2387 		modified = 1;
2388 	}
2389 	return (modified);
2390 }
2391 
2392 /*
2393  * This function must be run as a kernel thread.
2394  * It does Renew Ops and recovery, when required.
2395  */
2396 APPLESTATIC void
2397 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2398 {
2399 	struct nfsclowner *owp, *nowp;
2400 	struct nfsclopen *op;
2401 	struct nfscllockowner *lp, *nlp, *olp;
2402 	struct nfscldeleghead dh;
2403 	struct nfscllockownerhead lh;
2404 	struct nfscldeleg *dp, *ndp;
2405 	struct ucred *cred;
2406 	u_int32_t clidrev;
2407 	int error, cbpathdown, islept, igotlock, ret, clearok;
2408 	uint32_t recover_done_time = 0;
2409 
2410 	cred = newnfs_getcred();
2411 	NFSLOCKCLSTATE();
2412 	clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2413 	NFSUNLOCKCLSTATE();
2414 	for(;;) {
2415 		newnfs_setroot(cred);
2416 		cbpathdown = 0;
2417 		if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2418 			/*
2419 			 * Only allow one recover within 1/2 of the lease
2420 			 * duration (nfsc_renew).
2421 			 */
2422 			if (recover_done_time < NFSD_MONOSEC) {
2423 				recover_done_time = NFSD_MONOSEC +
2424 				    clp->nfsc_renew;
2425 				nfscl_recover(clp, cred, p);
2426 			} else {
2427 				NFSLOCKCLSTATE();
2428 				clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2429 				NFSUNLOCKCLSTATE();
2430 			}
2431 		}
2432 		if (clp->nfsc_expire <= NFSD_MONOSEC &&
2433 		    (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2434 			clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2435 			clidrev = clp->nfsc_clientidrev;
2436 			error = nfsrpc_renew(clp, cred, p);
2437 			if (error == NFSERR_CBPATHDOWN)
2438 			    cbpathdown = 1;
2439 			else if (error == NFSERR_STALECLIENTID) {
2440 			    NFSLOCKCLSTATE();
2441 			    clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2442 			    NFSUNLOCKCLSTATE();
2443 			} else if (error == NFSERR_EXPIRED)
2444 			    (void) nfscl_hasexpired(clp, clidrev, p);
2445 		}
2446 
2447 		LIST_INIT(&lh);
2448 		TAILQ_INIT(&dh);
2449 		NFSLOCKCLSTATE();
2450 		if (cbpathdown)
2451 			/* It's a Total Recall! */
2452 			nfscl_totalrecall(clp);
2453 
2454 		/*
2455 		 * Now, handle defunct owners.
2456 		 */
2457 		owp = LIST_FIRST(&clp->nfsc_owner);
2458 		while (owp != NULL) {
2459 		    nowp = LIST_NEXT(owp, nfsow_list);
2460 		    if (LIST_EMPTY(&owp->nfsow_open)) {
2461 			if (owp->nfsow_defunct)
2462 			    nfscl_freeopenowner(owp, 0);
2463 		    } else {
2464 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2465 			    lp = LIST_FIRST(&op->nfso_lock);
2466 			    while (lp != NULL) {
2467 				nlp = LIST_NEXT(lp, nfsl_list);
2468 				if (lp->nfsl_defunct &&
2469 				    LIST_EMPTY(&lp->nfsl_lock)) {
2470 				    LIST_FOREACH(olp, &lh, nfsl_list) {
2471 					if (!NFSBCMP(olp->nfsl_owner,
2472 					    lp->nfsl_owner,NFSV4CL_LOCKNAMELEN))
2473 					    break;
2474 				    }
2475 				    if (olp == NULL) {
2476 					LIST_REMOVE(lp, nfsl_list);
2477 					LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2478 				    } else {
2479 					nfscl_freelockowner(lp, 0);
2480 				    }
2481 				}
2482 				lp = nlp;
2483 			    }
2484 			}
2485 		    }
2486 		    owp = nowp;
2487 		}
2488 
2489 		/* also search the defunct list */
2490 		lp = LIST_FIRST(&clp->nfsc_defunctlockowner);
2491 		while (lp != NULL) {
2492 		    nlp = LIST_NEXT(lp, nfsl_list);
2493 		    if (lp->nfsl_defunct) {
2494 			LIST_FOREACH(olp, &lh, nfsl_list) {
2495 			    if (!NFSBCMP(olp->nfsl_owner, lp->nfsl_owner,
2496 				NFSV4CL_LOCKNAMELEN))
2497 				break;
2498 			}
2499 			if (olp == NULL) {
2500 			    LIST_REMOVE(lp, nfsl_list);
2501 			    LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2502 			} else {
2503 			    nfscl_freelockowner(lp, 0);
2504 			}
2505 		    }
2506 		    lp = nlp;
2507 		}
2508 		/* and release defunct lock owners */
2509 		LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) {
2510 		    nfscl_freelockowner(lp, 0);
2511 		}
2512 
2513 		/*
2514 		 * Do the recall on any delegations. To avoid trouble, always
2515 		 * come back up here after having slept.
2516 		 */
2517 		igotlock = 0;
2518 tryagain:
2519 		dp = TAILQ_FIRST(&clp->nfsc_deleg);
2520 		while (dp != NULL) {
2521 			ndp = TAILQ_NEXT(dp, nfsdl_list);
2522 			if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2523 				/*
2524 				 * Wait for outstanding I/O ops to be done.
2525 				 */
2526 				if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2527 				    if (igotlock) {
2528 					nfsv4_unlock(&clp->nfsc_lock, 0);
2529 					igotlock = 0;
2530 				    }
2531 				    dp->nfsdl_rwlock.nfslock_lock |=
2532 					NFSV4LOCK_WANTED;
2533 				    (void) nfsmsleep(&dp->nfsdl_rwlock,
2534 					NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2535 					NULL);
2536 				    goto tryagain;
2537 				}
2538 				while (!igotlock) {
2539 				    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2540 					&islept, NFSCLSTATEMUTEXPTR, NULL);
2541 				    if (islept)
2542 					goto tryagain;
2543 				}
2544 				NFSUNLOCKCLSTATE();
2545 				newnfs_copycred(&dp->nfsdl_cred, cred);
2546 				ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2547 				    NULL, cred, p, 1);
2548 				if (!ret) {
2549 				    nfscl_cleandeleg(dp);
2550 				    TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2551 					nfsdl_list);
2552 				    LIST_REMOVE(dp, nfsdl_hash);
2553 				    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2554 				    nfscl_delegcnt--;
2555 				    newnfsstats.cldelegates--;
2556 				}
2557 				NFSLOCKCLSTATE();
2558 			}
2559 			dp = ndp;
2560 		}
2561 
2562 		/*
2563 		 * Clear out old delegations, if we are above the high water
2564 		 * mark. Only clear out ones with no state related to them.
2565 		 * The tailq list is in LRU order.
2566 		 */
2567 		dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2568 		while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2569 		    ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2570 		    if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2571 			dp->nfsdl_rwlock.nfslock_lock == 0 &&
2572 			dp->nfsdl_timestamp < NFSD_MONOSEC &&
2573 			(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2574 			  NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2575 			clearok = 1;
2576 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2577 			    op = LIST_FIRST(&owp->nfsow_open);
2578 			    if (op != NULL) {
2579 				clearok = 0;
2580 				break;
2581 			    }
2582 			}
2583 			if (clearok) {
2584 			    LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2585 				if (!LIST_EMPTY(&lp->nfsl_lock)) {
2586 				    clearok = 0;
2587 				    break;
2588 				}
2589 			    }
2590 			}
2591 			if (clearok) {
2592 			    TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2593 			    LIST_REMOVE(dp, nfsdl_hash);
2594 			    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2595 			    nfscl_delegcnt--;
2596 			    newnfsstats.cldelegates--;
2597 			}
2598 		    }
2599 		    dp = ndp;
2600 		}
2601 		if (igotlock)
2602 			nfsv4_unlock(&clp->nfsc_lock, 0);
2603 		NFSUNLOCKCLSTATE();
2604 
2605 		/*
2606 		 * Delegreturn any delegations cleaned out or recalled.
2607 		 */
2608 		TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2609 			newnfs_copycred(&dp->nfsdl_cred, cred);
2610 			(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2611 			TAILQ_REMOVE(&dh, dp, nfsdl_list);
2612 			FREE((caddr_t)dp, M_NFSCLDELEG);
2613 		}
2614 
2615 #if defined(APPLEKEXT) || defined(__FreeBSD__)
2616 		/*
2617 		 * Simulate the calls to nfscl_cleanup() when a process
2618 		 * exits, since the call can't be patched into exit().
2619 		 */
2620 		{
2621 			struct timespec mytime;
2622 			static time_t prevsec = 0;
2623 
2624 			NFSGETNANOTIME(&mytime);
2625 			if (prevsec != mytime.tv_sec) {
2626 				prevsec = mytime.tv_sec;
2627 				nfscl_cleanupkext(clp);
2628 			}
2629 		}
2630 #endif	/* APPLEKEXT || __FreeBSD__ */
2631 
2632 		NFSLOCKCLSTATE();
2633 		if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2634 			(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2635 			    hz);
2636 		if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2637 			clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2638 			NFSUNLOCKCLSTATE();
2639 			NFSFREECRED(cred);
2640 			wakeup((caddr_t)clp);
2641 			return;
2642 		}
2643 		NFSUNLOCKCLSTATE();
2644 	}
2645 }
2646 
2647 /*
2648  * Initiate state recovery. Called when NFSERR_STALECLIENTID or
2649  * NFSERR_STALESTATEID is received.
2650  */
2651 APPLESTATIC void
2652 nfscl_initiate_recovery(struct nfsclclient *clp)
2653 {
2654 
2655 	if (clp == NULL)
2656 		return;
2657 	NFSLOCKCLSTATE();
2658 	clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2659 	NFSUNLOCKCLSTATE();
2660 	wakeup((caddr_t)clp);
2661 }
2662 
2663 /*
2664  * Dump out the state stuff for debugging.
2665  */
2666 APPLESTATIC void
2667 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2668     int lockowner, int locks)
2669 {
2670 	struct nfsclclient *clp;
2671 	struct nfsclowner *owp;
2672 	struct nfsclopen *op;
2673 	struct nfscllockowner *lp;
2674 	struct nfscllock *lop;
2675 	struct nfscldeleg *dp;
2676 
2677 	clp = nmp->nm_clp;
2678 	if (clp == NULL) {
2679 		printf("nfscl dumpstate NULL clp\n");
2680 		return;
2681 	}
2682 	NFSLOCKCLSTATE();
2683 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2684 	  LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2685 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2686 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2687 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2688 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2689 		    owp->nfsow_seqid);
2690 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2691 		if (opens)
2692 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2693 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2694 			op->nfso_stateid.other[2], op->nfso_opencnt,
2695 			op->nfso_fh[12]);
2696 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2697 		    if (lockowner)
2698 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2699 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2700 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2701 			    lp->nfsl_seqid,
2702 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2703 			    lp->nfsl_stateid.other[2]);
2704 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2705 			if (locks)
2706 #ifdef __FreeBSD__
2707 			    printf("lck typ=%d fst=%ju end=%ju\n",
2708 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2709 				(intmax_t)lop->nfslo_end);
2710 #else
2711 			    printf("lck typ=%d fst=%qd end=%qd\n",
2712 				lop->nfslo_type, lop->nfslo_first,
2713 				lop->nfslo_end);
2714 #endif
2715 		    }
2716 		}
2717 	    }
2718 	  }
2719 	}
2720 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2721 	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2722 		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2723 		    owp->nfsow_owner[0], owp->nfsow_owner[1],
2724 		    owp->nfsow_owner[2], owp->nfsow_owner[3],
2725 		    owp->nfsow_seqid);
2726 	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2727 		if (opens)
2728 		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2729 			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2730 			op->nfso_stateid.other[2], op->nfso_opencnt,
2731 			op->nfso_fh[12]);
2732 		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2733 		    if (lockowner)
2734 			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2735 			    lp->nfsl_owner[0], lp->nfsl_owner[1],
2736 			    lp->nfsl_owner[2], lp->nfsl_owner[3],
2737 			    lp->nfsl_seqid,
2738 			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2739 			    lp->nfsl_stateid.other[2]);
2740 		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2741 			if (locks)
2742 #ifdef __FreeBSD__
2743 			    printf("lck typ=%d fst=%ju end=%ju\n",
2744 				lop->nfslo_type, (intmax_t)lop->nfslo_first,
2745 				(intmax_t)lop->nfslo_end);
2746 #else
2747 			    printf("lck typ=%d fst=%qd end=%qd\n",
2748 				lop->nfslo_type, lop->nfslo_first,
2749 				lop->nfslo_end);
2750 #endif
2751 		    }
2752 		}
2753 	    }
2754 	}
2755 	NFSUNLOCKCLSTATE();
2756 }
2757 
2758 /*
2759  * Check for duplicate open owners and opens.
2760  * (Only used as a diagnostic aid.)
2761  */
2762 APPLESTATIC void
2763 nfscl_dupopen(vnode_t vp, int dupopens)
2764 {
2765 	struct nfsclclient *clp;
2766 	struct nfsclowner *owp, *owp2;
2767 	struct nfsclopen *op, *op2;
2768 	struct nfsfh *nfhp;
2769 
2770 	clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2771 	if (clp == NULL) {
2772 		printf("nfscl dupopen NULL clp\n");
2773 		return;
2774 	}
2775 	nfhp = VTONFS(vp)->n_fhp;
2776 	NFSLOCKCLSTATE();
2777 
2778 	/*
2779 	 * First, search for duplicate owners.
2780 	 * These should never happen!
2781 	 */
2782 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2783 	    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2784 		if (owp != owp2 &&
2785 		    !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2786 		    NFSV4CL_LOCKNAMELEN)) {
2787 			NFSUNLOCKCLSTATE();
2788 			printf("DUP OWNER\n");
2789 			nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2790 			return;
2791 		}
2792 	    }
2793 	}
2794 
2795 	/*
2796 	 * Now, search for duplicate stateids.
2797 	 * These shouldn't happen, either.
2798 	 */
2799 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2800 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2801 		LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2802 		    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2803 			if (op != op2 &&
2804 			    (op->nfso_stateid.other[0] != 0 ||
2805 			     op->nfso_stateid.other[1] != 0 ||
2806 			     op->nfso_stateid.other[2] != 0) &&
2807 			    op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2808 			    op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2809 			    op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2810 			    NFSUNLOCKCLSTATE();
2811 			    printf("DUP STATEID\n");
2812 			    nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
2813 				0);
2814 			    return;
2815 			}
2816 		    }
2817 		}
2818 	    }
2819 	}
2820 
2821 	/*
2822 	 * Now search for duplicate opens.
2823 	 * Duplicate opens for the same owner
2824 	 * should never occur. Other duplicates are
2825 	 * possible and are checked for if "dupopens"
2826 	 * is true.
2827 	 */
2828 	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2829 	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2830 		if (nfhp->nfh_len == op2->nfso_fhlen &&
2831 		    !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
2832 		    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2833 			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2834 			    if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
2835 				!NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
2836 				(!NFSBCMP(op->nfso_own->nfsow_owner,
2837 				 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
2838 				 dupopens)) {
2839 				if (!NFSBCMP(op->nfso_own->nfsow_owner,
2840 				    op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
2841 				    NFSUNLOCKCLSTATE();
2842 				    printf("BADDUP OPEN\n");
2843 				} else {
2844 				    NFSUNLOCKCLSTATE();
2845 				    printf("DUP OPEN\n");
2846 				}
2847 				nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
2848 				    0, 0);
2849 				return;
2850 			    }
2851 			}
2852 		    }
2853 		}
2854 	    }
2855 	}
2856 	NFSUNLOCKCLSTATE();
2857 }
2858 
2859 /*
2860  * During close, find an open that needs to be dereferenced and
2861  * dereference it. If there are no more opens for this file,
2862  * log a message to that effect.
2863  * Opens aren't actually Close'd until VOP_INACTIVE() is performed
2864  * on the file's vnode.
2865  * This is the safe way, since it is difficult to identify
2866  * which open the close is for and I/O can be performed after the
2867  * close(2) system call when a file is mmap'd.
2868  * If it returns 0 for success, there will be a referenced
2869  * clp returned via clpp.
2870  */
2871 APPLESTATIC int
2872 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
2873 {
2874 	struct nfsclclient *clp;
2875 	struct nfsclowner *owp;
2876 	struct nfsclopen *op;
2877 	struct nfscldeleg *dp;
2878 	struct nfsfh *nfhp;
2879 	int error, notdecr;
2880 
2881 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2882 	if (error)
2883 		return (error);
2884 	*clpp = clp;
2885 
2886 	nfhp = VTONFS(vp)->n_fhp;
2887 	notdecr = 1;
2888 	NFSLOCKCLSTATE();
2889 	/*
2890 	 * First, look for one under a delegation that was locally issued
2891 	 * and just decrement the opencnt for it. Since all my Opens against
2892 	 * the server are DENY_NONE, I don't see a problem with hanging
2893 	 * onto them. (It is much easier to use one of the extant Opens
2894 	 * that I already have on the server when a Delegation is recalled
2895 	 * than to do fresh Opens.) Someday, I might need to rethink this, but.
2896 	 */
2897 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2898 	if (dp != NULL) {
2899 		LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2900 			op = LIST_FIRST(&owp->nfsow_open);
2901 			if (op != NULL) {
2902 				/*
2903 				 * Since a delegation is for a file, there
2904 				 * should never be more than one open for
2905 				 * each openowner.
2906 				 */
2907 				if (LIST_NEXT(op, nfso_list) != NULL)
2908 					panic("nfscdeleg opens");
2909 				if (notdecr && op->nfso_opencnt > 0) {
2910 					notdecr = 0;
2911 					op->nfso_opencnt--;
2912 					break;
2913 				}
2914 			}
2915 		}
2916 	}
2917 
2918 	/* Now process the opens against the server. */
2919 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2920 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2921 			if (op->nfso_fhlen == nfhp->nfh_len &&
2922 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2923 			    nfhp->nfh_len)) {
2924 				/* Found an open, decrement cnt if possible */
2925 				if (notdecr && op->nfso_opencnt > 0) {
2926 					notdecr = 0;
2927 					op->nfso_opencnt--;
2928 				}
2929 				/*
2930 				 * There are more opens, so just return.
2931 				 */
2932 				if (op->nfso_opencnt > 0) {
2933 					NFSUNLOCKCLSTATE();
2934 					return (0);
2935 				}
2936 			}
2937 		}
2938 	}
2939 	NFSUNLOCKCLSTATE();
2940 	if (notdecr)
2941 		printf("nfscl: never fnd open\n");
2942 	return (0);
2943 }
2944 
2945 APPLESTATIC int
2946 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
2947 {
2948 	struct nfsclclient *clp;
2949 	struct nfsclowner *owp, *nowp;
2950 	struct nfsclopen *op;
2951 	struct nfscldeleg *dp;
2952 	struct nfsfh *nfhp;
2953 	int error;
2954 
2955 	error = nfscl_getcl(vp, NULL, NULL, &clp);
2956 	if (error)
2957 		return (error);
2958 	*clpp = clp;
2959 
2960 	nfhp = VTONFS(vp)->n_fhp;
2961 	NFSLOCKCLSTATE();
2962 	/*
2963 	 * First get rid of the local Open structures, which should be no
2964 	 * longer in use.
2965 	 */
2966 	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2967 	if (dp != NULL) {
2968 		LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
2969 			op = LIST_FIRST(&owp->nfsow_open);
2970 			if (op != NULL) {
2971 				KASSERT((op->nfso_opencnt == 0),
2972 				    ("nfscl: bad open cnt on deleg"));
2973 				nfscl_freeopen(op, 1);
2974 			}
2975 			nfscl_freeopenowner(owp, 1);
2976 		}
2977 	}
2978 
2979 	/* Now process the opens against the server. */
2980 lookformore:
2981 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2982 		op = LIST_FIRST(&owp->nfsow_open);
2983 		while (op != NULL) {
2984 			if (op->nfso_fhlen == nfhp->nfh_len &&
2985 			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2986 			    nfhp->nfh_len)) {
2987 				/* Found an open, close it. */
2988 				KASSERT((op->nfso_opencnt == 0),
2989 				    ("nfscl: bad open cnt on server"));
2990 				NFSUNLOCKCLSTATE();
2991 				nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
2992 				    p);
2993 				NFSLOCKCLSTATE();
2994 				goto lookformore;
2995 			}
2996 			op = LIST_NEXT(op, nfso_list);
2997 		}
2998 	}
2999 	NFSUNLOCKCLSTATE();
3000 	return (0);
3001 }
3002 
3003 /*
3004  * Return all delegations on this client.
3005  * (Must be called with client sleep lock.)
3006  */
3007 static void
3008 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
3009 {
3010 	struct nfscldeleg *dp, *ndp;
3011 	struct ucred *cred;
3012 
3013 	cred = newnfs_getcred();
3014 	TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3015 		nfscl_cleandeleg(dp);
3016 		(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3017 		nfscl_freedeleg(&clp->nfsc_deleg, dp);
3018 	}
3019 	NFSFREECRED(cred);
3020 }
3021 
3022 /*
3023  * Do a callback RPC.
3024  */
3025 APPLESTATIC void
3026 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3027 {
3028 	int i, op;
3029 	u_int32_t *tl;
3030 	struct nfsclclient *clp;
3031 	struct nfscldeleg *dp = NULL;
3032 	int numops, taglen = -1, error = 0, trunc, ret = 0;
3033 	u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident;
3034 	u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3035 	vnode_t vp = NULL;
3036 	struct nfsnode *np;
3037 	struct vattr va;
3038 	struct nfsfh *nfhp;
3039 	mount_t mp;
3040 	nfsattrbit_t attrbits, rattrbits;
3041 	nfsv4stateid_t stateid;
3042 
3043 	nfsrvd_rephead(nd);
3044 	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3045 	taglen = fxdr_unsigned(int, *tl);
3046 	if (taglen < 0) {
3047 		error = EBADRPC;
3048 		goto nfsmout;
3049 	}
3050 	if (taglen <= NFSV4_SMALLSTR)
3051 		tagstr = tag;
3052 	else
3053 		tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3054 	error = nfsrv_mtostr(nd, tagstr, taglen);
3055 	if (error) {
3056 		if (taglen > NFSV4_SMALLSTR)
3057 			free(tagstr, M_TEMP);
3058 		taglen = -1;
3059 		goto nfsmout;
3060 	}
3061 	(void) nfsm_strtom(nd, tag, taglen);
3062 	if (taglen > NFSV4_SMALLSTR) {
3063 		free(tagstr, M_TEMP);
3064 	}
3065 	NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3066 	NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3067 	minorvers = fxdr_unsigned(u_int32_t, *tl++);
3068 	if (minorvers != NFSV4_MINORVERSION)
3069 		nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3070 	cbident = fxdr_unsigned(u_int32_t, *tl++);
3071 	if (nd->nd_repstat)
3072 		numops = 0;
3073 	else
3074 		numops = fxdr_unsigned(int, *tl);
3075 	/*
3076 	 * Loop around doing the sub ops.
3077 	 */
3078 	for (i = 0; i < numops; i++) {
3079 		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3080 		NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3081 		*repp++ = *tl;
3082 		op = fxdr_unsigned(int, *tl);
3083 		if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) {
3084 		    nd->nd_repstat = NFSERR_OPILLEGAL;
3085 		    *repp = nfscl_errmap(nd);
3086 		    retops++;
3087 		    break;
3088 		}
3089 		nd->nd_procnum = op;
3090 		newnfsstats.cbrpccnt[nd->nd_procnum]++;
3091 		switch (op) {
3092 		case NFSV4OP_CBGETATTR:
3093 			clp = NULL;
3094 			error = nfsm_getfh(nd, &nfhp);
3095 			if (!error)
3096 				error = nfsrv_getattrbits(nd, &attrbits,
3097 				    NULL, NULL);
3098 			if (!error) {
3099 				mp = nfscl_getmnt(cbident);
3100 				if (mp == NULL)
3101 					error = NFSERR_SERVERFAULT;
3102 			}
3103 			if (!error) {
3104 				dp = NULL;
3105 				NFSLOCKCLSTATE();
3106 				clp = nfscl_findcl(VFSTONFS(mp));
3107 				if (clp != NULL)
3108 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3109 					    nfhp->nfh_len);
3110 				NFSUNLOCKCLSTATE();
3111 				if (dp == NULL)
3112 					error = NFSERR_SERVERFAULT;
3113 			}
3114 			if (!error) {
3115 				ret = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3116 				    nfhp->nfh_len, p, &np);
3117 				if (!ret)
3118 					vp = NFSTOV(np);
3119 			}
3120 			if (nfhp != NULL)
3121 				FREE((caddr_t)nfhp, M_NFSFH);
3122 			if (!error) {
3123 				NFSZERO_ATTRBIT(&rattrbits);
3124 				if (NFSISSET_ATTRBIT(&attrbits,
3125 				    NFSATTRBIT_SIZE)) {
3126 					if (!ret)
3127 						va.va_size = np->n_size;
3128 					else
3129 						va.va_size = dp->nfsdl_size;
3130 					NFSSETBIT_ATTRBIT(&rattrbits,
3131 					    NFSATTRBIT_SIZE);
3132 				}
3133 				if (NFSISSET_ATTRBIT(&attrbits,
3134 				    NFSATTRBIT_CHANGE)) {
3135 					va.va_filerev = dp->nfsdl_change;
3136 					if (ret || (np->n_flag & NDELEGMOD))
3137 						va.va_filerev++;
3138 					NFSSETBIT_ATTRBIT(&rattrbits,
3139 					    NFSATTRBIT_CHANGE);
3140 				}
3141 				(void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3142 				    NULL, 0, &rattrbits, NULL, NULL, 0, 0, 0, 0,
3143 				    (uint64_t)0);
3144 				if (!ret)
3145 					vrele(vp);
3146 			}
3147 			break;
3148 		case NFSV4OP_CBRECALL:
3149 			clp = NULL;
3150 			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3151 			    NFSX_UNSIGNED);
3152 			stateid.seqid = *tl++;
3153 			NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3154 			    NFSX_STATEIDOTHER);
3155 			tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3156 			trunc = fxdr_unsigned(int, *tl);
3157 			error = nfsm_getfh(nd, &nfhp);
3158 			if (!error) {
3159 				mp = nfscl_getmnt(cbident);
3160 				if (mp == NULL)
3161 					error = NFSERR_SERVERFAULT;
3162 			}
3163 			if (!error) {
3164 				NFSLOCKCLSTATE();
3165 				clp = nfscl_findcl(VFSTONFS(mp));
3166 				if (clp != NULL) {
3167 					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3168 					    nfhp->nfh_len);
3169 					if (dp != NULL && (dp->nfsdl_flags &
3170 					    NFSCLDL_DELEGRET) == 0) {
3171 						dp->nfsdl_flags |=
3172 						    NFSCLDL_RECALL;
3173 						wakeup((caddr_t)clp);
3174 					}
3175 				} else {
3176 					error = NFSERR_SERVERFAULT;
3177 				}
3178 				NFSUNLOCKCLSTATE();
3179 			}
3180 			if (nfhp != NULL)
3181 				FREE((caddr_t)nfhp, M_NFSFH);
3182 			break;
3183 		};
3184 		if (error) {
3185 			if (error == EBADRPC || error == NFSERR_BADXDR) {
3186 				nd->nd_repstat = NFSERR_BADXDR;
3187 			} else {
3188 				nd->nd_repstat = error;
3189 			}
3190 			error = 0;
3191 		}
3192 		retops++;
3193 		if (nd->nd_repstat) {
3194 			*repp = nfscl_errmap(nd);
3195 			break;
3196 		} else
3197 			*repp = 0;	/* NFS4_OK */
3198 	}
3199 nfsmout:
3200 	if (error) {
3201 		if (error == EBADRPC || error == NFSERR_BADXDR)
3202 			nd->nd_repstat = NFSERR_BADXDR;
3203 		else
3204 			printf("nfsv4 comperr1=%d\n", error);
3205 	}
3206 	if (taglen == -1) {
3207 		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3208 		*tl++ = 0;
3209 		*tl = 0;
3210 	} else {
3211 		*retopsp = txdr_unsigned(retops);
3212 	}
3213 	*nd->nd_errp = nfscl_errmap(nd);
3214 }
3215 
3216 /*
3217  * Generate the next cbident value. Basically just increment a static value
3218  * and then check that it isn't already in the list, if it has wrapped around.
3219  */
3220 static u_int32_t
3221 nfscl_nextcbident(void)
3222 {
3223 	struct nfsclclient *clp;
3224 	int matched;
3225 	static u_int32_t nextcbident = 0;
3226 	static int haswrapped = 0;
3227 
3228 	nextcbident++;
3229 	if (nextcbident == 0)
3230 		haswrapped = 1;
3231 	if (haswrapped) {
3232 		/*
3233 		 * Search the clientid list for one already using this cbident.
3234 		 */
3235 		do {
3236 			matched = 0;
3237 			NFSLOCKCLSTATE();
3238 			LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3239 				if (clp->nfsc_cbident == nextcbident) {
3240 					matched = 1;
3241 					break;
3242 				}
3243 			}
3244 			NFSUNLOCKCLSTATE();
3245 			if (matched == 1)
3246 				nextcbident++;
3247 		} while (matched);
3248 	}
3249 	return (nextcbident);
3250 }
3251 
3252 /*
3253  * Get the mount point related to a given cbident.
3254  */
3255 static mount_t
3256 nfscl_getmnt(u_int32_t cbident)
3257 {
3258 	struct nfsclclient *clp;
3259 	struct nfsmount *nmp;
3260 
3261 	NFSLOCKCLSTATE();
3262 	LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3263 		if (clp->nfsc_cbident == cbident)
3264 			break;
3265 	}
3266 	if (clp == NULL) {
3267 		NFSUNLOCKCLSTATE();
3268 		return (NULL);
3269 	}
3270 	nmp = clp->nfsc_nmp;
3271 	NFSUNLOCKCLSTATE();
3272 	return (nmp->nm_mountp);
3273 }
3274 
3275 /*
3276  * Search for a lock conflict locally on the client. A conflict occurs if
3277  * - not same owner and overlapping byte range and at least one of them is
3278  *   a write lock or this is an unlock.
3279  */
3280 static int
3281 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3282     struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3283     struct nfscllock **lopp)
3284 {
3285 	struct nfsclowner *owp;
3286 	struct nfsclopen *op;
3287 	int ret;
3288 
3289 	if (dp != NULL) {
3290 		ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3291 		if (ret)
3292 			return (ret);
3293 	}
3294 	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3295 		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3296 			if (op->nfso_fhlen == fhlen &&
3297 			    !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3298 				ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3299 				    own, lopp);
3300 				if (ret)
3301 					return (ret);
3302 			}
3303 		}
3304 	}
3305 	return (0);
3306 }
3307 
3308 static int
3309 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3310     u_int8_t *own, struct nfscllock **lopp)
3311 {
3312 	struct nfscllockowner *lp;
3313 	struct nfscllock *lop;
3314 
3315 	LIST_FOREACH(lp, lhp, nfsl_list) {
3316 		if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3317 			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3318 				if (lop->nfslo_first >= nlop->nfslo_end)
3319 					break;
3320 				if (lop->nfslo_end <= nlop->nfslo_first)
3321 					continue;
3322 				if (lop->nfslo_type == F_WRLCK ||
3323 				    nlop->nfslo_type == F_WRLCK ||
3324 				    nlop->nfslo_type == F_UNLCK) {
3325 					if (lopp != NULL)
3326 						*lopp = lop;
3327 					return (NFSERR_DENIED);
3328 				}
3329 			}
3330 		}
3331 	}
3332 	return (0);
3333 }
3334 
3335 /*
3336  * Check for a local conflicting lock.
3337  */
3338 APPLESTATIC int
3339 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3340     u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3341 {
3342 	struct nfscllock *lop, nlck;
3343 	struct nfscldeleg *dp;
3344 	struct nfsnode *np;
3345 	u_int8_t own[NFSV4CL_LOCKNAMELEN];
3346 	int error;
3347 
3348 	nlck.nfslo_type = fl->l_type;
3349 	nlck.nfslo_first = off;
3350 	if (len == NFS64BITSSET) {
3351 		nlck.nfslo_end = NFS64BITSSET;
3352 	} else {
3353 		nlck.nfslo_end = off + len;
3354 		if (nlck.nfslo_end <= nlck.nfslo_first)
3355 			return (NFSERR_INVAL);
3356 	}
3357 	np = VTONFS(vp);
3358 	nfscl_filllockowner(id, own, flags);
3359 	NFSLOCKCLSTATE();
3360 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3361 	error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3362 	    &nlck, own, dp, &lop);
3363 	if (error != 0) {
3364 		fl->l_whence = SEEK_SET;
3365 		fl->l_start = lop->nfslo_first;
3366 		if (lop->nfslo_end == NFS64BITSSET)
3367 			fl->l_len = 0;
3368 		else
3369 			fl->l_len = lop->nfslo_end - lop->nfslo_first;
3370 		fl->l_pid = (pid_t)0;
3371 		fl->l_type = lop->nfslo_type;
3372 		error = -1;			/* no RPC required */
3373 	} else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3374 	    fl->l_type == F_RDLCK)) {
3375 		/*
3376 		 * The delegation ensures that there isn't a conflicting
3377 		 * lock on the server, so return -1 to indicate an RPC
3378 		 * isn't required.
3379 		 */
3380 		fl->l_type = F_UNLCK;
3381 		error = -1;
3382 	}
3383 	NFSUNLOCKCLSTATE();
3384 	return (error);
3385 }
3386 
3387 /*
3388  * Handle Recall of a delegation.
3389  * The clp must be exclusive locked when this is called.
3390  */
3391 static int
3392 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3393     struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3394     int called_from_renewthread)
3395 {
3396 	struct nfsclowner *owp, *lowp, *nowp;
3397 	struct nfsclopen *op, *lop;
3398 	struct nfscllockowner *lp;
3399 	struct nfscllock *lckp;
3400 	struct nfsnode *np;
3401 	int error = 0, ret, gotvp = 0;
3402 
3403 	if (vp == NULL) {
3404 		/*
3405 		 * First, get a vnode for the file. This is needed to do RPCs.
3406 		 */
3407 		ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3408 		    dp->nfsdl_fhlen, p, &np);
3409 		if (ret) {
3410 			/*
3411 			 * File isn't open, so nothing to move over to the
3412 			 * server.
3413 			 */
3414 			return (0);
3415 		}
3416 		vp = NFSTOV(np);
3417 		gotvp = 1;
3418 	} else {
3419 		np = VTONFS(vp);
3420 	}
3421 	dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3422 
3423 	/*
3424 	 * Ok, if it's a write delegation, flush data to the server, so
3425 	 * that close/open consistency is retained.
3426 	 */
3427 	ret = 0;
3428 	NFSLOCKNODE(np);
3429 	if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3430 		np->n_flag |= NDELEGRECALL;
3431 		NFSUNLOCKNODE(np);
3432 		ret = ncl_flush(vp, MNT_WAIT, cred, p, 1,
3433 		    called_from_renewthread);
3434 		NFSLOCKNODE(np);
3435 		np->n_flag &= ~NDELEGRECALL;
3436 	}
3437 	NFSINVALATTRCACHE(np);
3438 	NFSUNLOCKNODE(np);
3439 	if (ret == EIO && called_from_renewthread != 0) {
3440 		/*
3441 		 * If the flush failed with EIO for the renew thread,
3442 		 * return now, so that the dirty buffer will be flushed
3443 		 * later.
3444 		 */
3445 		if (gotvp != 0)
3446 			vrele(vp);
3447 		return (ret);
3448 	}
3449 
3450 	/*
3451 	 * Now, for each openowner with opens issued locally, move them
3452 	 * over to state against the server.
3453 	 */
3454 	LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3455 		lop = LIST_FIRST(&lowp->nfsow_open);
3456 		if (lop != NULL) {
3457 			if (LIST_NEXT(lop, nfso_list) != NULL)
3458 				panic("nfsdlg mult opens");
3459 			/*
3460 			 * Look for the same openowner against the server.
3461 			 */
3462 			LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3463 				if (!NFSBCMP(lowp->nfsow_owner,
3464 				    owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3465 					newnfs_copycred(&dp->nfsdl_cred, cred);
3466 					ret = nfscl_moveopen(vp, clp, nmp, lop,
3467 					    owp, dp, cred, p);
3468 					if (ret == NFSERR_STALECLIENTID ||
3469 					    ret == NFSERR_STALEDONTRECOVER) {
3470 						if (gotvp)
3471 							vrele(vp);
3472 						return (ret);
3473 					}
3474 					if (ret) {
3475 						nfscl_freeopen(lop, 1);
3476 						if (!error)
3477 							error = ret;
3478 					}
3479 					break;
3480 				}
3481 			}
3482 
3483 			/*
3484 			 * If no openowner found, create one and get an open
3485 			 * for it.
3486 			 */
3487 			if (owp == NULL) {
3488 				MALLOC(nowp, struct nfsclowner *,
3489 				    sizeof (struct nfsclowner), M_NFSCLOWNER,
3490 				    M_WAITOK);
3491 				nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3492 				    NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3493 				    dp->nfsdl_fhlen, NULL);
3494 				newnfs_copycred(&dp->nfsdl_cred, cred);
3495 				ret = nfscl_moveopen(vp, clp, nmp, lop,
3496 				    owp, dp, cred, p);
3497 				if (ret) {
3498 					nfscl_freeopenowner(owp, 0);
3499 					if (ret == NFSERR_STALECLIENTID ||
3500 					    ret == NFSERR_STALEDONTRECOVER) {
3501 						if (gotvp)
3502 							vrele(vp);
3503 						return (ret);
3504 					}
3505 					if (ret) {
3506 						nfscl_freeopen(lop, 1);
3507 						if (!error)
3508 							error = ret;
3509 					}
3510 				}
3511 			}
3512 		}
3513 	}
3514 
3515 	/*
3516 	 * Now, get byte range locks for any locks done locally.
3517 	 */
3518 	LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3519 		LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
3520 			newnfs_copycred(&dp->nfsdl_cred, cred);
3521 			ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
3522 			if (ret == NFSERR_STALESTATEID ||
3523 			    ret == NFSERR_STALEDONTRECOVER ||
3524 			    ret == NFSERR_STALECLIENTID) {
3525 				if (gotvp)
3526 					vrele(vp);
3527 				return (ret);
3528 			}
3529 			if (ret && !error)
3530 				error = ret;
3531 		}
3532 	}
3533 	if (gotvp)
3534 		vrele(vp);
3535 	return (error);
3536 }
3537 
3538 /*
3539  * Move a locally issued open over to an owner on the state list.
3540  * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
3541  * returns with it unlocked.
3542  */
3543 static int
3544 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3545     struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
3546     struct ucred *cred, NFSPROC_T *p)
3547 {
3548 	struct nfsclopen *op, *nop;
3549 	struct nfscldeleg *ndp;
3550 	struct nfsnode *np;
3551 	int error = 0, newone;
3552 
3553 	/*
3554 	 * First, look for an appropriate open, If found, just increment the
3555 	 * opencnt in it.
3556 	 */
3557 	LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3558 		if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
3559 		    op->nfso_fhlen == lop->nfso_fhlen &&
3560 		    !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
3561 			op->nfso_opencnt += lop->nfso_opencnt;
3562 			nfscl_freeopen(lop, 1);
3563 			return (0);
3564 		}
3565 	}
3566 
3567 	/* No appropriate open, so we have to do one against the server. */
3568 	np = VTONFS(vp);
3569 	MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
3570 	    lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
3571 	newone = 0;
3572 	nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
3573 	    lop->nfso_fh, lop->nfso_fhlen, &newone);
3574 	ndp = dp;
3575 	error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
3576 	    lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
3577 	    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
3578 	if (error) {
3579 		if (newone)
3580 			nfscl_freeopen(op, 0);
3581 	} else {
3582 		if (newone)
3583 			newnfs_copyincred(cred, &op->nfso_cred);
3584 		op->nfso_mode |= lop->nfso_mode;
3585 		op->nfso_opencnt += lop->nfso_opencnt;
3586 		nfscl_freeopen(lop, 1);
3587 	}
3588 	if (nop != NULL)
3589 		FREE((caddr_t)nop, M_NFSCLOPEN);
3590 	if (ndp != NULL) {
3591 		/*
3592 		 * What should I do with the returned delegation, since the
3593 		 * delegation is being recalled? For now, just printf and
3594 		 * through it away.
3595 		 */
3596 		printf("Moveopen returned deleg\n");
3597 		FREE((caddr_t)ndp, M_NFSCLDELEG);
3598 	}
3599 	return (error);
3600 }
3601 
3602 /*
3603  * Recall all delegations on this client.
3604  */
3605 static void
3606 nfscl_totalrecall(struct nfsclclient *clp)
3607 {
3608 	struct nfscldeleg *dp;
3609 
3610 	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3611 		if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
3612 			dp->nfsdl_flags |= NFSCLDL_RECALL;
3613 	}
3614 }
3615 
3616 /*
3617  * Relock byte ranges. Called for delegation recall and state expiry.
3618  */
3619 static int
3620 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3621     struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
3622     NFSPROC_T *p)
3623 {
3624 	struct nfscllockowner *nlp;
3625 	struct nfsfh *nfhp;
3626 	u_int64_t off, len;
3627 	u_int32_t clidrev = 0;
3628 	int error, newone, donelocally;
3629 
3630 	off = lop->nfslo_first;
3631 	len = lop->nfslo_end - lop->nfslo_first;
3632 	error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
3633 	    clp, 1, NULL, 0, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
3634 	    &donelocally);
3635 	if (error || donelocally)
3636 		return (error);
3637 	if (nmp->nm_clp != NULL)
3638 		clidrev = nmp->nm_clp->nfsc_clientidrev;
3639 	else
3640 		clidrev = 0;
3641 	nfhp = VTONFS(vp)->n_fhp;
3642 	error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
3643 	    nfhp->nfh_len, nlp, newone, 0, off,
3644 	    len, lop->nfslo_type, cred, p);
3645 	if (error)
3646 		nfscl_freelockowner(nlp, 0);
3647 	return (error);
3648 }
3649 
3650 /*
3651  * Called to re-open a file. Basically get a vnode for the file handle
3652  * and then call nfsrpc_openrpc() to do the rest.
3653  */
3654 static int
3655 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
3656     u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
3657     struct ucred *cred, NFSPROC_T *p)
3658 {
3659 	struct nfsnode *np;
3660 	vnode_t vp;
3661 	int error;
3662 
3663 	error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
3664 	if (error)
3665 		return (error);
3666 	vp = NFSTOV(np);
3667 	if (np->n_v4 != NULL) {
3668 		error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
3669 		    np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
3670 		    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
3671 		    cred, p);
3672 	} else {
3673 		error = EINVAL;
3674 	}
3675 	vrele(vp);
3676 	return (error);
3677 }
3678 
3679 /*
3680  * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
3681  * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
3682  * fail.
3683  */
3684 static int
3685 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
3686     u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
3687     u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
3688     int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
3689 {
3690 	int error;
3691 
3692 	do {
3693 		error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
3694 		    mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
3695 		    0, 0);
3696 		if (error == NFSERR_DELAY)
3697 			(void) nfs_catnap(PZERO, error, "nfstryop");
3698 	} while (error == NFSERR_DELAY);
3699 	if (error == EAUTH || error == EACCES) {
3700 		/* Try again using system credentials */
3701 		newnfs_setroot(cred);
3702 		do {
3703 		    error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
3704 			newfhlen, mode, op, name, namelen, ndpp, reclaim,
3705 			delegtype, cred, p, 1, 0);
3706 		    if (error == NFSERR_DELAY)
3707 			(void) nfs_catnap(PZERO, error, "nfstryop");
3708 		} while (error == NFSERR_DELAY);
3709 	}
3710 	return (error);
3711 }
3712 
3713 /*
3714  * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
3715  * NFSERR_DELAY. Also, retry with system credentials, if the provided
3716  * cred don't work.
3717  */
3718 static int
3719 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
3720     int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
3721     u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
3722 {
3723 	struct nfsrv_descript nfsd, *nd = &nfsd;
3724 	int error;
3725 
3726 	do {
3727 		error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
3728 		    reclaim, off, len, type, cred, p, 0);
3729 		if (!error && nd->nd_repstat == NFSERR_DELAY)
3730 			(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
3731 			    "nfstrylck");
3732 	} while (!error && nd->nd_repstat == NFSERR_DELAY);
3733 	if (!error)
3734 		error = nd->nd_repstat;
3735 	if (error == EAUTH || error == EACCES) {
3736 		/* Try again using root credentials */
3737 		newnfs_setroot(cred);
3738 		do {
3739 			error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
3740 			    newone, reclaim, off, len, type, cred, p, 1);
3741 			if (!error && nd->nd_repstat == NFSERR_DELAY)
3742 				(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
3743 				    "nfstrylck");
3744 		} while (!error && nd->nd_repstat == NFSERR_DELAY);
3745 		if (!error)
3746 			error = nd->nd_repstat;
3747 	}
3748 	return (error);
3749 }
3750 
3751 /*
3752  * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
3753  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3754  * credentials fail.
3755  */
3756 static int
3757 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
3758     struct nfsmount *nmp, NFSPROC_T *p)
3759 {
3760 	int error;
3761 
3762 	do {
3763 		error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
3764 		if (error == NFSERR_DELAY)
3765 			(void) nfs_catnap(PZERO, error, "nfstrydp");
3766 	} while (error == NFSERR_DELAY);
3767 	if (error == EAUTH || error == EACCES) {
3768 		/* Try again using system credentials */
3769 		newnfs_setroot(cred);
3770 		do {
3771 			error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
3772 			if (error == NFSERR_DELAY)
3773 				(void) nfs_catnap(PZERO, error, "nfstrydp");
3774 		} while (error == NFSERR_DELAY);
3775 	}
3776 	return (error);
3777 }
3778 
3779 /*
3780  * Try a close against the server. Just call nfsrpc_closerpc(),
3781  * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3782  * credentials fail.
3783  */
3784 APPLESTATIC int
3785 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
3786     struct nfsmount *nmp, NFSPROC_T *p)
3787 {
3788 	struct nfsrv_descript nfsd, *nd = &nfsd;
3789 	int error;
3790 
3791 	do {
3792 		error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
3793 		if (error == NFSERR_DELAY)
3794 			(void) nfs_catnap(PZERO, error, "nfstrycl");
3795 	} while (error == NFSERR_DELAY);
3796 	if (error == EAUTH || error == EACCES) {
3797 		/* Try again using system credentials */
3798 		newnfs_setroot(cred);
3799 		do {
3800 			error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
3801 			if (error == NFSERR_DELAY)
3802 				(void) nfs_catnap(PZERO, error, "nfstrycl");
3803 		} while (error == NFSERR_DELAY);
3804 	}
3805 	return (error);
3806 }
3807 
3808 /*
3809  * Decide if a delegation on a file permits close without flushing writes
3810  * to the server. This might be a big performance win in some environments.
3811  * (Not useful until the client does caching on local stable storage.)
3812  */
3813 APPLESTATIC int
3814 nfscl_mustflush(vnode_t vp)
3815 {
3816 	struct nfsclclient *clp;
3817 	struct nfscldeleg *dp;
3818 	struct nfsnode *np;
3819 	struct nfsmount *nmp;
3820 
3821 	np = VTONFS(vp);
3822 	nmp = VFSTONFS(vnode_mount(vp));
3823 	if (!NFSHASNFSV4(nmp))
3824 		return (1);
3825 	NFSLOCKCLSTATE();
3826 	clp = nfscl_findcl(nmp);
3827 	if (clp == NULL) {
3828 		NFSUNLOCKCLSTATE();
3829 		return (1);
3830 	}
3831 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3832 	if (dp != NULL && (dp->nfsdl_flags &
3833 	    (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
3834 	     NFSCLDL_WRITE &&
3835 	    (dp->nfsdl_sizelimit >= np->n_size ||
3836 	     !NFSHASSTRICT3530(nmp))) {
3837 		NFSUNLOCKCLSTATE();
3838 		return (0);
3839 	}
3840 	NFSUNLOCKCLSTATE();
3841 	return (1);
3842 }
3843 
3844 /*
3845  * See if a (write) delegation exists for this file.
3846  */
3847 APPLESTATIC int
3848 nfscl_nodeleg(vnode_t vp, int writedeleg)
3849 {
3850 	struct nfsclclient *clp;
3851 	struct nfscldeleg *dp;
3852 	struct nfsnode *np;
3853 	struct nfsmount *nmp;
3854 
3855 	np = VTONFS(vp);
3856 	nmp = VFSTONFS(vnode_mount(vp));
3857 	if (!NFSHASNFSV4(nmp))
3858 		return (1);
3859 	NFSLOCKCLSTATE();
3860 	clp = nfscl_findcl(nmp);
3861 	if (clp == NULL) {
3862 		NFSUNLOCKCLSTATE();
3863 		return (1);
3864 	}
3865 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3866 	if (dp != NULL &&
3867 	    (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
3868 	    (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
3869 	     NFSCLDL_WRITE)) {
3870 		NFSUNLOCKCLSTATE();
3871 		return (0);
3872 	}
3873 	NFSUNLOCKCLSTATE();
3874 	return (1);
3875 }
3876 
3877 /*
3878  * Look for an associated delegation that should be DelegReturned.
3879  */
3880 APPLESTATIC int
3881 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
3882 {
3883 	struct nfsclclient *clp;
3884 	struct nfscldeleg *dp;
3885 	struct nfsclowner *owp;
3886 	struct nfscllockowner *lp;
3887 	struct nfsmount *nmp;
3888 	struct ucred *cred;
3889 	struct nfsnode *np;
3890 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3891 
3892 	nmp = VFSTONFS(vnode_mount(vp));
3893 	np = VTONFS(vp);
3894 	NFSLOCKCLSTATE();
3895 	/*
3896 	 * Loop around waiting for:
3897 	 * - outstanding I/O operations on delegations to complete
3898 	 * - for a delegation on vp that has state, lock the client and
3899 	 *   do a recall
3900 	 * - return delegation with no state
3901 	 */
3902 	while (1) {
3903 		clp = nfscl_findcl(nmp);
3904 		if (clp == NULL) {
3905 			NFSUNLOCKCLSTATE();
3906 			return (retcnt);
3907 		}
3908 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3909 		    np->n_fhp->nfh_len);
3910 		if (dp != NULL) {
3911 		    /*
3912 		     * Wait for outstanding I/O ops to be done.
3913 		     */
3914 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3915 			if (igotlock) {
3916 			    nfsv4_unlock(&clp->nfsc_lock, 0);
3917 			    igotlock = 0;
3918 			}
3919 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3920 			(void) nfsmsleep(&dp->nfsdl_rwlock,
3921 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3922 			continue;
3923 		    }
3924 		    needsrecall = 0;
3925 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3926 			if (!LIST_EMPTY(&owp->nfsow_open)) {
3927 			    needsrecall = 1;
3928 			    break;
3929 			}
3930 		    }
3931 		    if (!needsrecall) {
3932 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3933 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
3934 				needsrecall = 1;
3935 				break;
3936 			    }
3937 			}
3938 		    }
3939 		    if (needsrecall && !triedrecall) {
3940 			dp->nfsdl_flags |= NFSCLDL_DELEGRET;
3941 			islept = 0;
3942 			while (!igotlock) {
3943 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3944 				&islept, NFSCLSTATEMUTEXPTR, NULL);
3945 			    if (islept)
3946 				break;
3947 			}
3948 			if (islept)
3949 			    continue;
3950 			NFSUNLOCKCLSTATE();
3951 			cred = newnfs_getcred();
3952 			newnfs_copycred(&dp->nfsdl_cred, cred);
3953 			(void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
3954 			NFSFREECRED(cred);
3955 			triedrecall = 1;
3956 			NFSLOCKCLSTATE();
3957 			nfsv4_unlock(&clp->nfsc_lock, 0);
3958 			igotlock = 0;
3959 			continue;
3960 		    }
3961 		    *stp = dp->nfsdl_stateid;
3962 		    retcnt = 1;
3963 		    nfscl_cleandeleg(dp);
3964 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
3965 		}
3966 		if (igotlock)
3967 		    nfsv4_unlock(&clp->nfsc_lock, 0);
3968 		NFSUNLOCKCLSTATE();
3969 		return (retcnt);
3970 	}
3971 }
3972 
3973 /*
3974  * Look for associated delegation(s) that should be DelegReturned.
3975  */
3976 APPLESTATIC int
3977 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
3978     nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
3979 {
3980 	struct nfsclclient *clp;
3981 	struct nfscldeleg *dp;
3982 	struct nfsclowner *owp;
3983 	struct nfscllockowner *lp;
3984 	struct nfsmount *nmp;
3985 	struct ucred *cred;
3986 	struct nfsnode *np;
3987 	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3988 
3989 	nmp = VFSTONFS(vnode_mount(fvp));
3990 	*gotfdp = 0;
3991 	*gottdp = 0;
3992 	NFSLOCKCLSTATE();
3993 	/*
3994 	 * Loop around waiting for:
3995 	 * - outstanding I/O operations on delegations to complete
3996 	 * - for a delegation on fvp that has state, lock the client and
3997 	 *   do a recall
3998 	 * - return delegation(s) with no state.
3999 	 */
4000 	while (1) {
4001 		clp = nfscl_findcl(nmp);
4002 		if (clp == NULL) {
4003 			NFSUNLOCKCLSTATE();
4004 			return (retcnt);
4005 		}
4006 		np = VTONFS(fvp);
4007 		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4008 		    np->n_fhp->nfh_len);
4009 		if (dp != NULL && *gotfdp == 0) {
4010 		    /*
4011 		     * Wait for outstanding I/O ops to be done.
4012 		     */
4013 		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4014 			if (igotlock) {
4015 			    nfsv4_unlock(&clp->nfsc_lock, 0);
4016 			    igotlock = 0;
4017 			}
4018 			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4019 			(void) nfsmsleep(&dp->nfsdl_rwlock,
4020 			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4021 			continue;
4022 		    }
4023 		    needsrecall = 0;
4024 		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4025 			if (!LIST_EMPTY(&owp->nfsow_open)) {
4026 			    needsrecall = 1;
4027 			    break;
4028 			}
4029 		    }
4030 		    if (!needsrecall) {
4031 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4032 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
4033 				needsrecall = 1;
4034 				break;
4035 			    }
4036 			}
4037 		    }
4038 		    if (needsrecall && !triedrecall) {
4039 			dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4040 			islept = 0;
4041 			while (!igotlock) {
4042 			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4043 				&islept, NFSCLSTATEMUTEXPTR, NULL);
4044 			    if (islept)
4045 				break;
4046 			}
4047 			if (islept)
4048 			    continue;
4049 			NFSUNLOCKCLSTATE();
4050 			cred = newnfs_getcred();
4051 			newnfs_copycred(&dp->nfsdl_cred, cred);
4052 			(void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
4053 			NFSFREECRED(cred);
4054 			triedrecall = 1;
4055 			NFSLOCKCLSTATE();
4056 			nfsv4_unlock(&clp->nfsc_lock, 0);
4057 			igotlock = 0;
4058 			continue;
4059 		    }
4060 		    *fstp = dp->nfsdl_stateid;
4061 		    retcnt++;
4062 		    *gotfdp = 1;
4063 		    nfscl_cleandeleg(dp);
4064 		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
4065 		}
4066 		if (igotlock) {
4067 		    nfsv4_unlock(&clp->nfsc_lock, 0);
4068 		    igotlock = 0;
4069 		}
4070 		if (tvp != NULL) {
4071 		    np = VTONFS(tvp);
4072 		    dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4073 			np->n_fhp->nfh_len);
4074 		    if (dp != NULL && *gottdp == 0) {
4075 			/*
4076 			 * Wait for outstanding I/O ops to be done.
4077 			 */
4078 			if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4079 			    dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4080 			    (void) nfsmsleep(&dp->nfsdl_rwlock,
4081 				NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4082 			    continue;
4083 			}
4084 			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4085 			    if (!LIST_EMPTY(&owp->nfsow_open)) {
4086 				NFSUNLOCKCLSTATE();
4087 				return (retcnt);
4088 			    }
4089 			}
4090 			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4091 			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
4092 				NFSUNLOCKCLSTATE();
4093 				return (retcnt);
4094 			    }
4095 			}
4096 			*tstp = dp->nfsdl_stateid;
4097 			retcnt++;
4098 			*gottdp = 1;
4099 			nfscl_cleandeleg(dp);
4100 			nfscl_freedeleg(&clp->nfsc_deleg, dp);
4101 		    }
4102 		}
4103 		NFSUNLOCKCLSTATE();
4104 		return (retcnt);
4105 	}
4106 }
4107 
4108 /*
4109  * Get a reference on the clientid associated with the mount point.
4110  * Return 1 if success, 0 otherwise.
4111  */
4112 APPLESTATIC int
4113 nfscl_getref(struct nfsmount *nmp)
4114 {
4115 	struct nfsclclient *clp;
4116 
4117 	NFSLOCKCLSTATE();
4118 	clp = nfscl_findcl(nmp);
4119 	if (clp == NULL) {
4120 		NFSUNLOCKCLSTATE();
4121 		return (0);
4122 	}
4123 	nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4124 	NFSUNLOCKCLSTATE();
4125 	return (1);
4126 }
4127 
4128 /*
4129  * Release a reference on a clientid acquired with the above call.
4130  */
4131 APPLESTATIC void
4132 nfscl_relref(struct nfsmount *nmp)
4133 {
4134 	struct nfsclclient *clp;
4135 
4136 	NFSLOCKCLSTATE();
4137 	clp = nfscl_findcl(nmp);
4138 	if (clp == NULL) {
4139 		NFSUNLOCKCLSTATE();
4140 		return;
4141 	}
4142 	nfsv4_relref(&clp->nfsc_lock);
4143 	NFSUNLOCKCLSTATE();
4144 }
4145 
4146 /*
4147  * Save the size attribute in the delegation, since the nfsnode
4148  * is going away.
4149  */
4150 APPLESTATIC void
4151 nfscl_reclaimnode(vnode_t vp)
4152 {
4153 	struct nfsclclient *clp;
4154 	struct nfscldeleg *dp;
4155 	struct nfsnode *np = VTONFS(vp);
4156 	struct nfsmount *nmp;
4157 
4158 	nmp = VFSTONFS(vnode_mount(vp));
4159 	if (!NFSHASNFSV4(nmp))
4160 		return;
4161 	NFSLOCKCLSTATE();
4162 	clp = nfscl_findcl(nmp);
4163 	if (clp == NULL) {
4164 		NFSUNLOCKCLSTATE();
4165 		return;
4166 	}
4167 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4168 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4169 		dp->nfsdl_size = np->n_size;
4170 	NFSUNLOCKCLSTATE();
4171 }
4172 
4173 /*
4174  * Get the saved size attribute in the delegation, since it is a
4175  * newly allocated nfsnode.
4176  */
4177 APPLESTATIC void
4178 nfscl_newnode(vnode_t vp)
4179 {
4180 	struct nfsclclient *clp;
4181 	struct nfscldeleg *dp;
4182 	struct nfsnode *np = VTONFS(vp);
4183 	struct nfsmount *nmp;
4184 
4185 	nmp = VFSTONFS(vnode_mount(vp));
4186 	if (!NFSHASNFSV4(nmp))
4187 		return;
4188 	NFSLOCKCLSTATE();
4189 	clp = nfscl_findcl(nmp);
4190 	if (clp == NULL) {
4191 		NFSUNLOCKCLSTATE();
4192 		return;
4193 	}
4194 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4195 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4196 		np->n_size = dp->nfsdl_size;
4197 	NFSUNLOCKCLSTATE();
4198 }
4199 
4200 /*
4201  * If there is a valid write delegation for this file, set the modtime
4202  * to the local clock time.
4203  */
4204 APPLESTATIC void
4205 nfscl_delegmodtime(vnode_t vp)
4206 {
4207 	struct nfsclclient *clp;
4208 	struct nfscldeleg *dp;
4209 	struct nfsnode *np = VTONFS(vp);
4210 	struct nfsmount *nmp;
4211 
4212 	nmp = VFSTONFS(vnode_mount(vp));
4213 	if (!NFSHASNFSV4(nmp))
4214 		return;
4215 	NFSLOCKCLSTATE();
4216 	clp = nfscl_findcl(nmp);
4217 	if (clp == NULL) {
4218 		NFSUNLOCKCLSTATE();
4219 		return;
4220 	}
4221 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4222 	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4223 		NFSGETNANOTIME(&dp->nfsdl_modtime);
4224 		dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4225 	}
4226 	NFSUNLOCKCLSTATE();
4227 }
4228 
4229 /*
4230  * If there is a valid write delegation for this file with a modtime set,
4231  * put that modtime in mtime.
4232  */
4233 APPLESTATIC void
4234 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4235 {
4236 	struct nfsclclient *clp;
4237 	struct nfscldeleg *dp;
4238 	struct nfsnode *np = VTONFS(vp);
4239 	struct nfsmount *nmp;
4240 
4241 	nmp = VFSTONFS(vnode_mount(vp));
4242 	if (!NFSHASNFSV4(nmp))
4243 		return;
4244 	NFSLOCKCLSTATE();
4245 	clp = nfscl_findcl(nmp);
4246 	if (clp == NULL) {
4247 		NFSUNLOCKCLSTATE();
4248 		return;
4249 	}
4250 	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4251 	if (dp != NULL &&
4252 	    (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4253 	    (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4254 		*mtime = dp->nfsdl_modtime;
4255 	NFSUNLOCKCLSTATE();
4256 }
4257 
4258 static int
4259 nfscl_errmap(struct nfsrv_descript *nd)
4260 {
4261 	short *defaulterrp, *errp;
4262 
4263 	if (!nd->nd_repstat)
4264 		return (0);
4265 	if (nd->nd_procnum == NFSPROC_NOOP)
4266 		return (txdr_unsigned(nd->nd_repstat & 0xffff));
4267 	if (nd->nd_repstat == EBADRPC)
4268 		return (txdr_unsigned(NFSERR_BADXDR));
4269 	if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4270 	    nd->nd_repstat == NFSERR_OPILLEGAL)
4271 		return (txdr_unsigned(nd->nd_repstat));
4272 	errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4273 	while (*++errp)
4274 		if (*errp == (short)nd->nd_repstat)
4275 			return (txdr_unsigned(nd->nd_repstat));
4276 	return (txdr_unsigned(*defaulterrp));
4277 }
4278 
4279