1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009 Rick Macklem, University of Guelph
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/cdefs.h>
31 /*
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
79 */
80
81 #include <fs/nfs/nfsport.h>
82
83 /*
84 * Global variables
85 */
86 extern struct nfsstatsv1 nfsstatsv1;
87 extern struct nfsreqhead nfsd_reqq;
88 extern u_int32_t newnfs_false, newnfs_true;
89 extern int nfscl_debuglevel;
90 extern int nfscl_enablecallb;
91 extern int nfs_numnfscbd;
92 NFSREQSPINLOCK;
93 NFSCLSTATEMUTEX;
94 int nfscl_inited = 0;
95 struct nfsclhead nfsclhead; /* Head of clientid list */
96
97 static int nfscl_getopen(struct nfsclownerhead *, struct nfsclopenhash *,
98 u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t,
99 struct nfscllockowner **, struct nfsclopen **);
100 static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *,
101 uint8_t *, struct nfscllockowner **, struct nfsclopen **,
102 struct nfsclopen **);
103 static void nfscl_clrelease(struct nfsclclient *);
104 static void nfscl_unlinkopen(struct nfsclopen *);
105 static void nfscl_cleanclient(struct nfsclclient *);
106 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
107 struct ucred *, NFSPROC_T *);
108 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
109 struct nfsmount *, struct ucred *, NFSPROC_T *);
110 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *,
111 NFSPROC_T *);
112 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
113 struct nfscllock *, int);
114 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
115 struct nfscllock **, int);
116 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *,
117 struct nfscldeleghead *);
118 static u_int32_t nfscl_nextcbident(void);
119 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
120 static struct nfsclclient *nfscl_getclnt(u_int32_t);
121 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
122 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
123 int);
124 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
125 int, struct nfsclrecalllayout **, struct nfscllayout **);
126 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
127 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
128 int);
129 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
130 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
131 u_int8_t *, struct nfscllock **);
132 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
133 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
134 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
135 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
136 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
137 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
138 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
139 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
140 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
141 static void nfscl_totalrecall(struct nfsclclient *);
142 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
143 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
144 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
145 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
146 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
147 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
148 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
149 struct ucred *, NFSPROC_T *);
150 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
151 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
152 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *,
153 bool);
154 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
155 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
156 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
157 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int,
158 vnode_t *);
159 static void nfscl_freeopenowner(struct nfsclowner *, int);
160 static void nfscl_cleandeleg(struct nfscldeleg *);
161 static void nfscl_emptylockowner(struct nfscllockowner *,
162 struct nfscllockownerfhhead *);
163 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
164 struct nfsclflayouthead *);
165 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
166 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
167 static int nfscl_seq(uint32_t, uint32_t);
168 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
169 struct ucred *, NFSPROC_T *);
170 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
171 struct ucred *, NFSPROC_T *);
172
173 static short nfscberr_null[] = {
174 0,
175 0,
176 };
177
178 static short nfscberr_getattr[] = {
179 NFSERR_RESOURCE,
180 NFSERR_BADHANDLE,
181 NFSERR_BADXDR,
182 NFSERR_RESOURCE,
183 NFSERR_SERVERFAULT,
184 0,
185 };
186
187 static short nfscberr_recall[] = {
188 NFSERR_RESOURCE,
189 NFSERR_BADHANDLE,
190 NFSERR_BADSTATEID,
191 NFSERR_BADXDR,
192 NFSERR_RESOURCE,
193 NFSERR_SERVERFAULT,
194 0,
195 };
196
197 static short *nfscl_cberrmap[] = {
198 nfscberr_null,
199 nfscberr_null,
200 nfscberr_null,
201 nfscberr_getattr,
202 nfscberr_recall
203 };
204
205 #define NETFAMILY(clp) \
206 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
207
208 /*
209 * Called for an open operation.
210 * If the nfhp argument is NULL, just get an openowner.
211 */
212 int
nfscl_open(vnode_t vp,u_int8_t * nfhp,int fhlen,u_int32_t amode,int usedeleg,struct ucred * cred,NFSPROC_T * p,struct nfsclowner ** owpp,struct nfsclopen ** opp,int * newonep,int * retp,int lockit,bool firstref)213 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
214 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
215 struct nfsclopen **opp, int *newonep, int *retp, int lockit, bool firstref)
216 {
217 struct nfsclclient *clp;
218 struct nfsclowner *owp, *nowp;
219 struct nfsclopen *op = NULL, *nop = NULL;
220 struct nfscldeleg *dp;
221 struct nfsclownerhead *ohp;
222 u_int8_t own[NFSV4CL_LOCKNAMELEN];
223 int ret;
224
225 if (newonep != NULL)
226 *newonep = 0;
227 if (opp != NULL)
228 *opp = NULL;
229 if (owpp != NULL)
230 *owpp = NULL;
231
232 /*
233 * Might need one or both of these, so MALLOC them now, to
234 * avoid a tsleep() in MALLOC later.
235 */
236 nowp = malloc(sizeof (struct nfsclowner),
237 M_NFSCLOWNER, M_WAITOK);
238 if (nfhp != NULL) {
239 nop = malloc(sizeof (struct nfsclopen) +
240 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
241 nop->nfso_hash.le_prev = NULL;
242 }
243 ret = nfscl_getcl(vp->v_mount, cred, p, false, firstref, &clp);
244 if (ret != 0) {
245 free(nowp, M_NFSCLOWNER);
246 if (nop != NULL)
247 free(nop, M_NFSCLOPEN);
248 return (ret);
249 }
250
251 /*
252 * Get the Open iff it already exists.
253 * If none found, add the new one or return error, depending upon
254 * "create".
255 */
256 NFSLOCKCLSTATE();
257 dp = NULL;
258 /* First check the delegation list */
259 if (nfhp != NULL && usedeleg) {
260 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
261 if (dp->nfsdl_fhlen == fhlen &&
262 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
263 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
264 (dp->nfsdl_flags & NFSCLDL_WRITE))
265 break;
266 dp = NULL;
267 break;
268 }
269 }
270 }
271
272 /* For NFSv4.1/4.2 and this option, use a single open_owner. */
273 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
274 nfscl_filllockowner(NULL, own, F_POSIX);
275 else
276 nfscl_filllockowner(p->td_proc, own, F_POSIX);
277 if (dp != NULL)
278 ohp = &dp->nfsdl_owner;
279 else
280 ohp = &clp->nfsc_owner;
281 /* Now, search for an openowner */
282 LIST_FOREACH(owp, ohp, nfsow_list) {
283 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
284 break;
285 }
286
287 /*
288 * Create a new open, as required.
289 */
290 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
291 cred, newonep);
292
293 /*
294 * Now, check the mode on the open and return the appropriate
295 * value.
296 */
297 if (retp != NULL) {
298 if (nfhp != NULL && dp != NULL && nop == NULL)
299 /* new local open on delegation */
300 *retp = NFSCLOPEN_SETCRED;
301 else
302 *retp = NFSCLOPEN_OK;
303 }
304 if (op != NULL && (amode & ~(op->nfso_mode))) {
305 op->nfso_mode |= amode;
306 if (retp != NULL && dp == NULL)
307 *retp = NFSCLOPEN_DOOPEN;
308 }
309
310 /*
311 * Serialize modifications to the open owner for multiple threads
312 * within the same process using a read/write sleep lock.
313 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
314 * by acquiring a shared lock. The close operations still use an
315 * exclusive lock for this case.
316 */
317 if (lockit != 0) {
318 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) {
319 /*
320 * Get a shared lock on the OpenOwner, but first
321 * wait for any pending exclusive lock, so that the
322 * exclusive locker gets priority.
323 */
324 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
325 NFSCLSTATEMUTEXPTR, NULL);
326 nfsv4_getref(&owp->nfsow_rwlock, NULL,
327 NFSCLSTATEMUTEXPTR, NULL);
328 } else
329 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
330 }
331 NFSUNLOCKCLSTATE();
332 if (nowp != NULL)
333 free(nowp, M_NFSCLOWNER);
334 if (nop != NULL)
335 free(nop, M_NFSCLOPEN);
336 if (owpp != NULL)
337 *owpp = owp;
338 if (opp != NULL)
339 *opp = op;
340 return (0);
341 }
342
343 /*
344 * Create a new open, as required.
345 */
346 static void
nfscl_newopen(struct nfsclclient * clp,struct nfscldeleg * dp,struct nfsclowner ** owpp,struct nfsclowner ** nowpp,struct nfsclopen ** opp,struct nfsclopen ** nopp,u_int8_t * own,u_int8_t * fhp,int fhlen,struct ucred * cred,int * newonep)347 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
348 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
349 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
350 struct ucred *cred, int *newonep)
351 {
352 struct nfsclowner *owp = *owpp, *nowp;
353 struct nfsclopen *op, *nop;
354
355 if (nowpp != NULL)
356 nowp = *nowpp;
357 else
358 nowp = NULL;
359 if (nopp != NULL)
360 nop = *nopp;
361 else
362 nop = NULL;
363 if (owp == NULL && nowp != NULL) {
364 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
365 LIST_INIT(&nowp->nfsow_open);
366 nowp->nfsow_clp = clp;
367 nowp->nfsow_seqid = 0;
368 nowp->nfsow_defunct = 0;
369 nfscl_lockinit(&nowp->nfsow_rwlock);
370 if (dp != NULL) {
371 nfsstatsv1.cllocalopenowners++;
372 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
373 } else {
374 nfsstatsv1.clopenowners++;
375 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
376 }
377 owp = *owpp = nowp;
378 *nowpp = NULL;
379 if (newonep != NULL)
380 *newonep = 1;
381 }
382
383 /* If an fhp has been specified, create an Open as well. */
384 if (fhp != NULL) {
385 /* and look for the correct open, based upon FH */
386 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
387 if (op->nfso_fhlen == fhlen &&
388 !NFSBCMP(op->nfso_fh, fhp, fhlen))
389 break;
390 }
391 if (op == NULL && nop != NULL) {
392 nop->nfso_own = owp;
393 nop->nfso_mode = 0;
394 nop->nfso_opencnt = 0;
395 nop->nfso_posixlock = 1;
396 nop->nfso_fhlen = fhlen;
397 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
398 LIST_INIT(&nop->nfso_lock);
399 nop->nfso_stateid.seqid = 0;
400 nop->nfso_stateid.other[0] = 0;
401 nop->nfso_stateid.other[1] = 0;
402 nop->nfso_stateid.other[2] = 0;
403 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
404 newnfs_copyincred(cred, &nop->nfso_cred);
405 if (dp != NULL) {
406 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
407 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
408 nfsdl_list);
409 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
410 nfsstatsv1.cllocalopens++;
411 } else {
412 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, fhp, fhlen),
413 nop, nfso_hash);
414 nfsstatsv1.clopens++;
415 }
416 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
417 *opp = nop;
418 *nopp = NULL;
419 if (newonep != NULL)
420 *newonep = 1;
421 } else {
422 *opp = op;
423 }
424 }
425 }
426
427 /*
428 * Called to find/add a delegation to a client.
429 */
430 int
nfscl_deleg(mount_t mp,struct nfsclclient * clp,u_int8_t * nfhp,int fhlen,struct ucred * cred,NFSPROC_T * p,struct nfscldeleg * dp)431 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
432 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg *dp)
433 {
434 struct nfscldeleg *tdp;
435 struct nfsmount *nmp;
436
437 KASSERT(mp != NULL, ("nfscl_deleg: mp NULL"));
438 nmp = VFSTONFS(mp);
439
440 /*
441 * Since a delegation might be added to the mount,
442 * set NFSMNTP_DELEGISSUED now. If a delegation already
443 * exagain ists, setting this flag is harmless.
444 */
445 NFSLOCKMNT(nmp);
446 nmp->nm_privflag |= NFSMNTP_DELEGISSUED;
447 NFSUNLOCKMNT(nmp);
448
449 /* Look for the correct deleg, based upon FH */
450 NFSLOCKCLSTATE();
451 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
452 if (tdp == NULL) {
453 if (dp == NULL) {
454 NFSUNLOCKCLSTATE();
455 return (NFSERR_BADSTATEID);
456 }
457 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
458 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
459 nfsdl_hash);
460 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
461 nfsstatsv1.cldelegates++;
462 clp->nfsc_delegcnt++;
463 } else {
464 /*
465 * A delegation already exists. If the new one is a Write
466 * delegation and the old one a Read delegation, return the
467 * Read delegation. Otherwise, return the new delegation.
468 */
469 if (dp != NULL) {
470 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0 &&
471 (tdp->nfsdl_flags & NFSCLDL_READ) != 0) {
472 TAILQ_REMOVE(&clp->nfsc_deleg, tdp, nfsdl_list);
473 LIST_REMOVE(tdp, nfsdl_hash);
474 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
475 nfsdl_list);
476 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp,
477 fhlen), dp, nfsdl_hash);
478 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
479 } else {
480 tdp = dp; /* Return this one. */
481 }
482 } else {
483 tdp = NULL;
484 }
485 }
486 NFSUNLOCKCLSTATE();
487 if (tdp != NULL) {
488 nfscl_trydelegreturn(tdp, cred, nmp, p);
489 free(tdp, M_NFSCLDELEG);
490 }
491 return (0);
492 }
493
494 /*
495 * Find a delegation for this file handle. Return NULL upon failure.
496 */
497 static struct nfscldeleg *
nfscl_finddeleg(struct nfsclclient * clp,u_int8_t * fhp,int fhlen)498 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
499 {
500 struct nfscldeleg *dp;
501
502 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
503 if (dp->nfsdl_fhlen == fhlen &&
504 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
505 break;
506 }
507 return (dp);
508 }
509
510 /*
511 * Get a stateid for an I/O operation. First, look for an open and iff
512 * found, return either a lockowner stateid or the open stateid.
513 * If no Open is found, just return error and the special stateid of all zeros.
514 */
515 int
nfscl_getstateid(vnode_t vp,u_int8_t * nfhp,int fhlen,u_int32_t mode,int fords,struct ucred * cred,NFSPROC_T * p,nfsv4stateid_t * stateidp,void ** lckpp)516 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
517 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
518 void **lckpp)
519 {
520 struct nfsclclient *clp;
521 struct nfsclopen *op = NULL, *top;
522 struct nfsclopenhash *oph;
523 struct nfscllockowner *lp;
524 struct nfscldeleg *dp;
525 struct nfsnode *np;
526 struct nfsmount *nmp;
527 struct nfscred ncr;
528 u_int8_t own[NFSV4CL_LOCKNAMELEN], lockown[NFSV4CL_LOCKNAMELEN];
529 int error;
530 bool done;
531
532 *lckpp = NULL;
533 /*
534 * Initially, just set the special stateid of all zeros.
535 * (Don't do this for a DS, since the special stateid can't be used.)
536 */
537 if (fords == 0) {
538 stateidp->seqid = 0;
539 stateidp->other[0] = 0;
540 stateidp->other[1] = 0;
541 stateidp->other[2] = 0;
542 }
543 if (vp->v_type != VREG)
544 return (EISDIR);
545 np = VTONFS(vp);
546 nmp = VFSTONFS(vp->v_mount);
547
548 /*
549 * For "oneopenown" mounts, first check for a cached open in the
550 * NFS vnode, that can be used as a stateid. This can only be
551 * done if no delegations have been issued to the mount and no
552 * byte range file locking has been done for the file.
553 */
554 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && fords == 0) {
555 NFSLOCKMNT(nmp);
556 NFSLOCKNODE(np);
557 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 &&
558 (np->n_flag & NMIGHTBELOCKED) == 0 &&
559 np->n_openstateid != NULL) {
560 stateidp->seqid = 0;
561 stateidp->other[0] =
562 np->n_openstateid->nfso_stateid.other[0];
563 stateidp->other[1] =
564 np->n_openstateid->nfso_stateid.other[1];
565 stateidp->other[2] =
566 np->n_openstateid->nfso_stateid.other[2];
567 NFSUNLOCKNODE(np);
568 NFSUNLOCKMNT(nmp);
569 return (0);
570 }
571 NFSUNLOCKNODE(np);
572 NFSUNLOCKMNT(nmp);
573 }
574
575 NFSLOCKCLSTATE();
576 clp = nfscl_findcl(nmp);
577 if (clp == NULL) {
578 NFSUNLOCKCLSTATE();
579 return (EACCES);
580 }
581
582 /*
583 * Wait for recovery to complete.
584 */
585 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
586 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
587 PZERO, "nfsrecvr", NULL);
588
589 /*
590 * First, look for a delegation.
591 */
592 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
593 if (dp->nfsdl_fhlen == fhlen &&
594 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
595 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
596 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
597 if (NFSHASNFSV4N(nmp))
598 stateidp->seqid = 0;
599 else
600 stateidp->seqid =
601 dp->nfsdl_stateid.seqid;
602 stateidp->other[0] = dp->nfsdl_stateid.other[0];
603 stateidp->other[1] = dp->nfsdl_stateid.other[1];
604 stateidp->other[2] = dp->nfsdl_stateid.other[2];
605 if (!(np->n_flag & NDELEGRECALL)) {
606 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
607 nfsdl_list);
608 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
609 nfsdl_list);
610 dp->nfsdl_timestamp = NFSD_MONOSEC +
611 120;
612 dp->nfsdl_rwlock.nfslock_usecnt++;
613 *lckpp = (void *)&dp->nfsdl_rwlock;
614 }
615 NFSUNLOCKCLSTATE();
616 return (0);
617 }
618 break;
619 }
620 }
621
622 if (p != NULL) {
623 /*
624 * If p != NULL, we want to search the parentage tree
625 * for a matching OpenOwner and use that.
626 */
627 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
628 nfscl_filllockowner(NULL, own, F_POSIX);
629 else
630 nfscl_filllockowner(p->td_proc, own, F_POSIX);
631 nfscl_filllockowner(p->td_proc, lockown, F_POSIX);
632 lp = NULL;
633 error = nfscl_getopen(NULL, clp->nfsc_openhash, nfhp, fhlen,
634 own, lockown, mode, &lp, &op);
635 if (error == 0 && lp != NULL && fords == 0) {
636 /* Don't return a lock stateid for a DS. */
637 if (NFSHASNFSV4N(nmp))
638 stateidp->seqid = 0;
639 else
640 stateidp->seqid = lp->nfsl_stateid.seqid;
641 stateidp->other[0] =
642 lp->nfsl_stateid.other[0];
643 stateidp->other[1] =
644 lp->nfsl_stateid.other[1];
645 stateidp->other[2] =
646 lp->nfsl_stateid.other[2];
647 NFSUNLOCKCLSTATE();
648 return (0);
649 }
650 }
651 if (op == NULL) {
652 /* If not found, just look for any OpenOwner that will work. */
653 top = NULL;
654 done = false;
655 oph = NFSCLOPENHASH(clp, nfhp, fhlen);
656 LIST_FOREACH(op, oph, nfso_hash) {
657 if (op->nfso_fhlen == fhlen &&
658 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
659 if (top == NULL && (op->nfso_mode &
660 NFSV4OPEN_ACCESSWRITE) != 0 &&
661 (mode & NFSV4OPEN_ACCESSREAD) != 0)
662 top = op;
663 if ((mode & op->nfso_mode) == mode) {
664 /* LRU order the hash list. */
665 LIST_REMOVE(op, nfso_hash);
666 LIST_INSERT_HEAD(oph, op, nfso_hash);
667 done = true;
668 break;
669 }
670 }
671 }
672 if (!done) {
673 NFSCL_DEBUG(2, "openmode top=%p\n", top);
674 if (top == NULL || NFSHASOPENMODE(nmp)) {
675 NFSUNLOCKCLSTATE();
676 return (ENOENT);
677 } else
678 op = top;
679 }
680 /*
681 * For read aheads or write behinds, use the open cred.
682 * A read ahead or write behind is indicated by p == NULL.
683 */
684 if (p == NULL)
685 memcpy(&ncr, &op->nfso_cred, sizeof(ncr));
686 }
687
688 /*
689 * No lock stateid, so return the open stateid.
690 */
691 if (NFSHASNFSV4N(nmp))
692 stateidp->seqid = 0;
693 else
694 stateidp->seqid = op->nfso_stateid.seqid;
695 stateidp->other[0] = op->nfso_stateid.other[0];
696 stateidp->other[1] = op->nfso_stateid.other[1];
697 stateidp->other[2] = op->nfso_stateid.other[2];
698 NFSUNLOCKCLSTATE();
699 if (p == NULL)
700 newnfs_copycred(&ncr, cred);
701 return (0);
702 }
703
704 /*
705 * Search for a matching file, mode and, optionally, lockowner.
706 */
707 static int
nfscl_getopen(struct nfsclownerhead * ohp,struct nfsclopenhash * ohashp,u_int8_t * nfhp,int fhlen,u_int8_t * openown,u_int8_t * lockown,u_int32_t mode,struct nfscllockowner ** lpp,struct nfsclopen ** opp)708 nfscl_getopen(struct nfsclownerhead *ohp, struct nfsclopenhash *ohashp,
709 u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown,
710 u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp)
711 {
712 struct nfsclowner *owp;
713 struct nfsclopen *op, *rop, *rop2;
714 struct nfsclopenhash *oph;
715 bool keep_looping;
716
717 KASSERT(ohp == NULL || ohashp == NULL, ("nfscl_getopen: "
718 "only one of ohp and ohashp can be set"));
719 if (lpp != NULL)
720 *lpp = NULL;
721 /*
722 * rop will be set to the open to be returned. There are three
723 * variants of this, all for an open of the correct file:
724 * 1 - A match of lockown.
725 * 2 - A match of the openown, when no lockown match exists.
726 * 3 - A match for any open, if no openown or lockown match exists.
727 * Looking for #2 over #3 probably isn't necessary, but since
728 * RFC3530 is vague w.r.t. the relationship between openowners and
729 * lockowners, I think this is the safer way to go.
730 */
731 rop = NULL;
732 rop2 = NULL;
733 keep_looping = true;
734 /* Search the client list */
735 if (ohashp == NULL) {
736 /* Search the local opens on the delegation. */
737 LIST_FOREACH(owp, ohp, nfsow_list) {
738 /* and look for the correct open */
739 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
740 if (op->nfso_fhlen == fhlen &&
741 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
742 && (op->nfso_mode & mode) == mode)
743 keep_looping = nfscl_checkown(owp, op, openown,
744 lockown, lpp, &rop, &rop2);
745 if (!keep_looping)
746 break;
747 }
748 if (!keep_looping)
749 break;
750 }
751 } else {
752 /* Search for matching opens on the hash list. */
753 oph = &ohashp[NFSCLOPENHASHFUNC(nfhp, fhlen)];
754 LIST_FOREACH(op, oph, nfso_hash) {
755 if (op->nfso_fhlen == fhlen &&
756 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
757 && (op->nfso_mode & mode) == mode)
758 keep_looping = nfscl_checkown(op->nfso_own, op,
759 openown, lockown, lpp, &rop, &rop2);
760 if (!keep_looping) {
761 /* LRU order the hash list. */
762 LIST_REMOVE(op, nfso_hash);
763 LIST_INSERT_HEAD(oph, op, nfso_hash);
764 break;
765 }
766 }
767 }
768 if (rop == NULL)
769 rop = rop2;
770 if (rop == NULL)
771 return (EBADF);
772 *opp = rop;
773 return (0);
774 }
775
776 /* Check for an owner match. */
777 static bool
nfscl_checkown(struct nfsclowner * owp,struct nfsclopen * op,uint8_t * openown,uint8_t * lockown,struct nfscllockowner ** lpp,struct nfsclopen ** ropp,struct nfsclopen ** ropp2)778 nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown,
779 uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp,
780 struct nfsclopen **ropp2)
781 {
782 struct nfscllockowner *lp;
783 bool keep_looping;
784
785 keep_looping = true;
786 if (lpp != NULL) {
787 /* Now look for a matching lockowner. */
788 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
789 if (!NFSBCMP(lp->nfsl_owner, lockown,
790 NFSV4CL_LOCKNAMELEN)) {
791 *lpp = lp;
792 *ropp = op;
793 return (false);
794 }
795 }
796 }
797 if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown,
798 NFSV4CL_LOCKNAMELEN)) {
799 *ropp = op;
800 if (lpp == NULL)
801 keep_looping = false;
802 }
803 if (*ropp2 == NULL)
804 *ropp2 = op;
805 return (keep_looping);
806 }
807
808 /*
809 * Release use of an open owner. Called when open operations are done
810 * with the open owner.
811 */
812 void
nfscl_ownerrelease(struct nfsmount * nmp,struct nfsclowner * owp,__unused int error,__unused int candelete,int unlocked)813 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
814 __unused int error, __unused int candelete, int unlocked)
815 {
816
817 if (owp == NULL)
818 return;
819 NFSLOCKCLSTATE();
820 if (unlocked == 0) {
821 if (NFSHASONEOPENOWN(nmp))
822 nfsv4_relref(&owp->nfsow_rwlock);
823 else
824 nfscl_lockunlock(&owp->nfsow_rwlock);
825 }
826 nfscl_clrelease(owp->nfsow_clp);
827 NFSUNLOCKCLSTATE();
828 }
829
830 /*
831 * Release use of an open structure under an open owner.
832 */
833 void
nfscl_openrelease(struct nfsmount * nmp,struct nfsclopen * op,int error,int candelete)834 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
835 int candelete)
836 {
837 struct nfsclclient *clp;
838 struct nfsclowner *owp;
839
840 if (op == NULL)
841 return;
842 NFSLOCKCLSTATE();
843 owp = op->nfso_own;
844 if (NFSHASONEOPENOWN(nmp))
845 nfsv4_relref(&owp->nfsow_rwlock);
846 else
847 nfscl_lockunlock(&owp->nfsow_rwlock);
848 clp = owp->nfsow_clp;
849 if (error && candelete && op->nfso_opencnt == 0)
850 nfscl_freeopen(op, 0, true);
851 nfscl_clrelease(clp);
852 NFSUNLOCKCLSTATE();
853 }
854
855 /*
856 * Called to get a clientid structure. It will optionally lock the
857 * client data structures to do the SetClientId/SetClientId_confirm,
858 * but will release that lock and return the clientid with a reference
859 * count on it.
860 * If the "cred" argument is NULL, a new clientid should not be created.
861 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
862 * be done.
863 * It always clpp with a reference count on it, unless returning an error.
864 */
865 int
nfscl_getcl(struct mount * mp,struct ucred * cred,NFSPROC_T * p,bool tryminvers,bool firstref,struct nfsclclient ** clpp)866 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
867 bool tryminvers, bool firstref, struct nfsclclient **clpp)
868 {
869 struct nfsclclient *clp;
870 struct nfsclclient *newclp = NULL;
871 struct nfsmount *nmp;
872 char uuid[HOSTUUIDLEN];
873 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
874 u_int16_t idlen = 0;
875
876 nmp = VFSTONFS(mp);
877 if (cred != NULL) {
878 getcredhostuuid(cred, uuid, sizeof uuid);
879 idlen = strlen(uuid);
880 if (idlen > 0)
881 idlen += sizeof (u_int64_t);
882 else
883 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
884 newclp = malloc(
885 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
886 M_WAITOK | M_ZERO);
887 }
888 NFSLOCKCLSTATE();
889 /*
890 * If a forced dismount is already in progress, don't
891 * allocate a new clientid and get out now. For the case where
892 * clp != NULL, this is a harmless optimization.
893 */
894 if (NFSCL_FORCEDISM(mp)) {
895 NFSUNLOCKCLSTATE();
896 if (newclp != NULL)
897 free(newclp, M_NFSCLCLIENT);
898 return (EBADF);
899 }
900 clp = nmp->nm_clp;
901 if (clp == NULL) {
902 if (newclp == NULL) {
903 NFSUNLOCKCLSTATE();
904 return (EACCES);
905 }
906 clp = newclp;
907 clp->nfsc_idlen = idlen;
908 LIST_INIT(&clp->nfsc_owner);
909 TAILQ_INIT(&clp->nfsc_deleg);
910 TAILQ_INIT(&clp->nfsc_layout);
911 LIST_INIT(&clp->nfsc_devinfo);
912 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
913 LIST_INIT(&clp->nfsc_deleghash[i]);
914 for (i = 0; i < NFSCLOPENHASHSIZE; i++)
915 LIST_INIT(&clp->nfsc_openhash[i]);
916 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
917 LIST_INIT(&clp->nfsc_layouthash[i]);
918 clp->nfsc_flags = NFSCLFLAGS_INITED;
919 clp->nfsc_delegcnt = 0;
920 clp->nfsc_deleghighwater = NFSCLDELEGHIGHWATER;
921 clp->nfsc_layoutcnt = 0;
922 clp->nfsc_layouthighwater = NFSCLLAYOUTHIGHWATER;
923 clp->nfsc_clientidrev = 1;
924 clp->nfsc_cbident = nfscl_nextcbident();
925 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
926 clp->nfsc_idlen);
927 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
928 nmp->nm_clp = clp;
929 clp->nfsc_nmp = nmp;
930 } else {
931 if (newclp != NULL)
932 free(newclp, M_NFSCLCLIENT);
933 }
934 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
935 !NFSCL_FORCEDISM(mp))
936 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
937 NFSCLSTATEMUTEXPTR, mp);
938 if (igotlock == 0) {
939 /*
940 * Call nfsv4_lock() with "iwantlock == 0" on the firstref so
941 * that it will wait for a pending exclusive lock request.
942 * This gives the exclusive lock request priority over this
943 * shared lock request.
944 * An exclusive lock on nfsc_lock is used mainly for server
945 * crash recoveries and delegation recalls.
946 */
947 if (firstref)
948 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR,
949 mp);
950 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
951 }
952 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
953 /*
954 * Both nfsv4_lock() and nfsv4_getref() know to check
955 * for NFSCL_FORCEDISM() and return without sleeping to
956 * wait for the exclusive lock to be released, since it
957 * might be held by nfscl_umount() and we need to get out
958 * now for that case and not wait until nfscl_umount()
959 * releases it.
960 */
961 NFSUNLOCKCLSTATE();
962 return (EBADF);
963 }
964 NFSUNLOCKCLSTATE();
965
966 /*
967 * If it needs a clientid, do the setclientid now.
968 */
969 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
970 if (!igotlock)
971 panic("nfscl_clget");
972 if (p == NULL || cred == NULL) {
973 NFSLOCKCLSTATE();
974 nfsv4_unlock(&clp->nfsc_lock, 0);
975 NFSUNLOCKCLSTATE();
976 return (EACCES);
977 }
978 /*
979 * If RFC3530 Sec. 14.2.33 is taken literally,
980 * NFSERR_CLIDINUSE will be returned persistently for the
981 * case where a new mount of the same file system is using
982 * a different principal. In practice, NFSERR_CLIDINUSE is
983 * only returned when there is outstanding unexpired state
984 * on the clientid. As such, try for twice the lease
985 * interval, if we know what that is. Otherwise, make a
986 * wild ass guess.
987 * The case of returning NFSERR_STALECLIENTID is far less
988 * likely, but might occur if there is a significant delay
989 * between doing the SetClientID and SetClientIDConfirm Ops,
990 * such that the server throws away the clientid before
991 * receiving the SetClientIDConfirm.
992 */
993 if (clp->nfsc_renew > 0)
994 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
995 else
996 clidinusedelay = 120;
997 trystalecnt = 3;
998 do {
999 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
1000 if (error == NFSERR_STALECLIENTID ||
1001 error == NFSERR_STALEDONTRECOVER ||
1002 error == NFSERR_BADSESSION ||
1003 error == NFSERR_CLIDINUSE) {
1004 (void) nfs_catnap(PZERO, error, "nfs_setcl");
1005 } else if (error == NFSERR_MINORVERMISMATCH &&
1006 tryminvers) {
1007 if (nmp->nm_minorvers > 0)
1008 nmp->nm_minorvers--;
1009 else
1010 tryminvers = false;
1011 }
1012 } while (((error == NFSERR_STALECLIENTID ||
1013 error == NFSERR_BADSESSION ||
1014 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
1015 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0) ||
1016 (error == NFSERR_MINORVERMISMATCH && tryminvers));
1017 if (error) {
1018 NFSLOCKCLSTATE();
1019 nfsv4_unlock(&clp->nfsc_lock, 0);
1020 NFSUNLOCKCLSTATE();
1021 return (error);
1022 }
1023 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1024 }
1025 if (igotlock) {
1026 NFSLOCKCLSTATE();
1027 nfsv4_unlock(&clp->nfsc_lock, 1);
1028 NFSUNLOCKCLSTATE();
1029 }
1030
1031 *clpp = clp;
1032 return (0);
1033 }
1034
1035 /*
1036 * Get a reference to a clientid and return it, if valid.
1037 */
1038 struct nfsclclient *
nfscl_findcl(struct nfsmount * nmp)1039 nfscl_findcl(struct nfsmount *nmp)
1040 {
1041 struct nfsclclient *clp;
1042
1043 clp = nmp->nm_clp;
1044 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
1045 return (NULL);
1046 return (clp);
1047 }
1048
1049 /*
1050 * Release the clientid structure. It may be locked or reference counted.
1051 */
1052 static void
nfscl_clrelease(struct nfsclclient * clp)1053 nfscl_clrelease(struct nfsclclient *clp)
1054 {
1055
1056 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1057 nfsv4_unlock(&clp->nfsc_lock, 0);
1058 else
1059 nfsv4_relref(&clp->nfsc_lock);
1060 }
1061
1062 /*
1063 * External call for nfscl_clrelease.
1064 */
1065 void
nfscl_clientrelease(struct nfsclclient * clp)1066 nfscl_clientrelease(struct nfsclclient *clp)
1067 {
1068
1069 NFSLOCKCLSTATE();
1070 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1071 nfsv4_unlock(&clp->nfsc_lock, 0);
1072 else
1073 nfsv4_relref(&clp->nfsc_lock);
1074 NFSUNLOCKCLSTATE();
1075 }
1076
1077 /*
1078 * Called when wanting to lock a byte region.
1079 */
1080 int
nfscl_getbytelock(vnode_t vp,u_int64_t off,u_int64_t len,short type,struct ucred * cred,NFSPROC_T * p,struct nfsclclient * rclp,int recovery,void * id,int flags,u_int8_t * rownp,u_int8_t * ropenownp,struct nfscllockowner ** lpp,int * newonep,int * donelocallyp)1081 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1082 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
1083 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
1084 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
1085 {
1086 struct nfscllockowner *lp;
1087 struct nfsclopen *op;
1088 struct nfsclclient *clp;
1089 struct nfscllockowner *nlp;
1090 struct nfscllock *nlop, *otherlop;
1091 struct nfscldeleg *dp = NULL, *ldp = NULL;
1092 struct nfscllockownerhead *lhp = NULL;
1093 struct nfsnode *np;
1094 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
1095 u_int8_t *openownp;
1096 int error = 0, ret, donelocally = 0;
1097 u_int32_t mode;
1098
1099 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1100 mode = 0;
1101 np = VTONFS(vp);
1102 *lpp = NULL;
1103 lp = NULL;
1104 *newonep = 0;
1105 *donelocallyp = 0;
1106
1107 /*
1108 * Might need these, so MALLOC them now, to
1109 * avoid a tsleep() in MALLOC later.
1110 */
1111 nlp = malloc(
1112 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1113 otherlop = malloc(
1114 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1115 nlop = malloc(
1116 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1117 nlop->nfslo_type = type;
1118 nlop->nfslo_first = off;
1119 if (len == NFS64BITSSET) {
1120 nlop->nfslo_end = NFS64BITSSET;
1121 } else {
1122 nlop->nfslo_end = off + len;
1123 if (nlop->nfslo_end <= nlop->nfslo_first)
1124 error = NFSERR_INVAL;
1125 }
1126
1127 if (!error) {
1128 if (recovery)
1129 clp = rclp;
1130 else
1131 error = nfscl_getcl(vp->v_mount, cred, p, false, true,
1132 &clp);
1133 }
1134 if (error) {
1135 free(nlp, M_NFSCLLOCKOWNER);
1136 free(otherlop, M_NFSCLLOCK);
1137 free(nlop, M_NFSCLLOCK);
1138 return (error);
1139 }
1140
1141 op = NULL;
1142 if (recovery) {
1143 ownp = rownp;
1144 openownp = ropenownp;
1145 } else {
1146 nfscl_filllockowner(id, own, flags);
1147 ownp = own;
1148 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
1149 nfscl_filllockowner(NULL, openown, F_POSIX);
1150 else
1151 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1152 openownp = openown;
1153 }
1154 if (!recovery) {
1155 NFSLOCKCLSTATE();
1156 /*
1157 * First, search for a delegation. If one exists for this file,
1158 * the lock can be done locally against it, so long as there
1159 * isn't a local lock conflict.
1160 */
1161 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1162 np->n_fhp->nfh_len);
1163 /* Just sanity check for correct type of delegation */
1164 if (dp != NULL && ((dp->nfsdl_flags &
1165 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1166 (type == F_WRLCK &&
1167 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1168 dp = NULL;
1169 }
1170 if (dp != NULL) {
1171 /* Now, find an open and maybe a lockowner. */
1172 ret = nfscl_getopen(&dp->nfsdl_owner, NULL, np->n_fhp->nfh_fh,
1173 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1174 if (ret)
1175 ret = nfscl_getopen(NULL, clp->nfsc_openhash,
1176 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1177 ownp, mode, NULL, &op);
1178 if (!ret) {
1179 lhp = &dp->nfsdl_lock;
1180 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1181 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1182 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1183 donelocally = 1;
1184 } else {
1185 dp = NULL;
1186 }
1187 }
1188 if (!donelocally) {
1189 /*
1190 * Get the related Open and maybe lockowner.
1191 */
1192 error = nfscl_getopen(NULL, clp->nfsc_openhash,
1193 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1194 ownp, mode, &lp, &op);
1195 if (!error)
1196 lhp = &op->nfso_lock;
1197 }
1198 if (!error && !recovery)
1199 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1200 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1201 if (error) {
1202 if (!recovery) {
1203 nfscl_clrelease(clp);
1204 NFSUNLOCKCLSTATE();
1205 }
1206 free(nlp, M_NFSCLLOCKOWNER);
1207 free(otherlop, M_NFSCLLOCK);
1208 free(nlop, M_NFSCLLOCK);
1209 return (error);
1210 }
1211
1212 /*
1213 * Ok, see if a lockowner exists and create one, as required.
1214 */
1215 if (lp == NULL)
1216 LIST_FOREACH(lp, lhp, nfsl_list) {
1217 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1218 break;
1219 }
1220 if (lp == NULL) {
1221 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1222 if (recovery)
1223 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1224 NFSV4CL_LOCKNAMELEN);
1225 else
1226 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1227 NFSV4CL_LOCKNAMELEN);
1228 nlp->nfsl_seqid = 0;
1229 nlp->nfsl_lockflags = flags;
1230 nlp->nfsl_inprog = NULL;
1231 nfscl_lockinit(&nlp->nfsl_rwlock);
1232 LIST_INIT(&nlp->nfsl_lock);
1233 if (donelocally) {
1234 nlp->nfsl_open = NULL;
1235 nfsstatsv1.cllocallockowners++;
1236 } else {
1237 nlp->nfsl_open = op;
1238 nfsstatsv1.cllockowners++;
1239 }
1240 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1241 lp = nlp;
1242 nlp = NULL;
1243 *newonep = 1;
1244 }
1245
1246 /*
1247 * Now, update the byte ranges for locks.
1248 */
1249 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1250 if (!ret)
1251 donelocally = 1;
1252 if (donelocally) {
1253 *donelocallyp = 1;
1254 if (!recovery)
1255 nfscl_clrelease(clp);
1256 } else {
1257 /*
1258 * Serial modifications on the lock owner for multiple threads
1259 * for the same process using a read/write lock.
1260 */
1261 if (!recovery)
1262 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1263 }
1264 if (!recovery)
1265 NFSUNLOCKCLSTATE();
1266
1267 if (nlp)
1268 free(nlp, M_NFSCLLOCKOWNER);
1269 if (nlop)
1270 free(nlop, M_NFSCLLOCK);
1271 if (otherlop)
1272 free(otherlop, M_NFSCLLOCK);
1273
1274 *lpp = lp;
1275 return (0);
1276 }
1277
1278 /*
1279 * Called to unlock a byte range, for LockU.
1280 */
1281 int
nfscl_relbytelock(vnode_t vp,u_int64_t off,u_int64_t len,__unused struct ucred * cred,NFSPROC_T * p,int callcnt,struct nfsclclient * clp,void * id,int flags,struct nfscllockowner ** lpp,int * dorpcp)1282 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1283 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1284 struct nfsclclient *clp, void *id, int flags,
1285 struct nfscllockowner **lpp, int *dorpcp)
1286 {
1287 struct nfscllockowner *lp;
1288 struct nfsclopen *op;
1289 struct nfscllock *nlop, *other_lop = NULL;
1290 struct nfscldeleg *dp;
1291 struct nfsnode *np;
1292 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1293 int ret = 0, fnd;
1294
1295 np = VTONFS(vp);
1296 *lpp = NULL;
1297 *dorpcp = 0;
1298
1299 /*
1300 * Might need these, so MALLOC them now, to
1301 * avoid a tsleep() in MALLOC later.
1302 */
1303 nlop = malloc(
1304 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1305 nlop->nfslo_type = F_UNLCK;
1306 nlop->nfslo_first = off;
1307 if (len == NFS64BITSSET) {
1308 nlop->nfslo_end = NFS64BITSSET;
1309 } else {
1310 nlop->nfslo_end = off + len;
1311 if (nlop->nfslo_end <= nlop->nfslo_first) {
1312 free(nlop, M_NFSCLLOCK);
1313 return (NFSERR_INVAL);
1314 }
1315 }
1316 if (callcnt == 0) {
1317 other_lop = malloc(
1318 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1319 *other_lop = *nlop;
1320 }
1321 nfscl_filllockowner(id, own, flags);
1322 dp = NULL;
1323 NFSLOCKCLSTATE();
1324 if (callcnt == 0)
1325 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1326 np->n_fhp->nfh_len);
1327
1328 /*
1329 * First, unlock any local regions on a delegation.
1330 */
1331 if (dp != NULL) {
1332 /* Look for this lockowner. */
1333 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1334 if (!NFSBCMP(lp->nfsl_owner, own,
1335 NFSV4CL_LOCKNAMELEN))
1336 break;
1337 }
1338 if (lp != NULL)
1339 /* Use other_lop, so nlop is still available */
1340 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1341 }
1342
1343 /*
1344 * Now, find a matching open/lockowner that hasn't already been done,
1345 * as marked by nfsl_inprog.
1346 */
1347 lp = NULL;
1348 fnd = 0;
1349 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1350 np->n_fhp->nfh_len), nfso_hash) {
1351 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1352 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1353 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1354 if (lp->nfsl_inprog == NULL &&
1355 !NFSBCMP(lp->nfsl_owner, own,
1356 NFSV4CL_LOCKNAMELEN)) {
1357 fnd = 1;
1358 break;
1359 }
1360 }
1361 }
1362 if (fnd)
1363 break;
1364 }
1365
1366 if (lp != NULL) {
1367 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1368 if (ret)
1369 *dorpcp = 1;
1370 /*
1371 * Serial modifications on the lock owner for multiple
1372 * threads for the same process using a read/write lock.
1373 */
1374 lp->nfsl_inprog = p;
1375 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1376 *lpp = lp;
1377 }
1378 NFSUNLOCKCLSTATE();
1379 if (nlop)
1380 free(nlop, M_NFSCLLOCK);
1381 if (other_lop)
1382 free(other_lop, M_NFSCLLOCK);
1383 return (0);
1384 }
1385
1386 /*
1387 * Release all lockowners marked in progess for this process and file.
1388 */
1389 void
nfscl_releasealllocks(struct nfsclclient * clp,vnode_t vp,NFSPROC_T * p,void * id,int flags)1390 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1391 void *id, int flags)
1392 {
1393 struct nfsclopen *op;
1394 struct nfscllockowner *lp;
1395 struct nfsnode *np;
1396 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1397
1398 np = VTONFS(vp);
1399 nfscl_filllockowner(id, own, flags);
1400 NFSLOCKCLSTATE();
1401 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1402 np->n_fhp->nfh_len), nfso_hash) {
1403 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1404 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1405 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1406 if (lp->nfsl_inprog == p &&
1407 !NFSBCMP(lp->nfsl_owner, own,
1408 NFSV4CL_LOCKNAMELEN)) {
1409 lp->nfsl_inprog = NULL;
1410 nfscl_lockunlock(&lp->nfsl_rwlock);
1411 }
1412 }
1413 }
1414 }
1415 nfscl_clrelease(clp);
1416 NFSUNLOCKCLSTATE();
1417 }
1418
1419 /*
1420 * Called to find out if any bytes within the byte range specified are
1421 * write locked by the calling process. Used to determine if flushing
1422 * is required before a LockU.
1423 * If in doubt, return 1, so the flush will occur.
1424 */
1425 int
nfscl_checkwritelocked(vnode_t vp,struct flock * fl,struct ucred * cred,NFSPROC_T * p,void * id,int flags)1426 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1427 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1428 {
1429 struct nfscllockowner *lp;
1430 struct nfsclopen *op;
1431 struct nfsclclient *clp;
1432 struct nfscllock *lop;
1433 struct nfscldeleg *dp;
1434 struct nfsnode *np;
1435 u_int64_t off, end;
1436 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1437 int error = 0;
1438
1439 np = VTONFS(vp);
1440 switch (fl->l_whence) {
1441 case SEEK_SET:
1442 case SEEK_CUR:
1443 /*
1444 * Caller is responsible for adding any necessary offset
1445 * when SEEK_CUR is used.
1446 */
1447 off = fl->l_start;
1448 break;
1449 case SEEK_END:
1450 off = np->n_size + fl->l_start;
1451 break;
1452 default:
1453 return (1);
1454 }
1455 if (fl->l_len != 0) {
1456 end = off + fl->l_len;
1457 if (end < off)
1458 return (1);
1459 } else {
1460 end = NFS64BITSSET;
1461 }
1462
1463 error = nfscl_getcl(vp->v_mount, cred, p, false, true, &clp);
1464 if (error)
1465 return (1);
1466 nfscl_filllockowner(id, own, flags);
1467 NFSLOCKCLSTATE();
1468
1469 /*
1470 * First check the delegation locks.
1471 */
1472 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1473 if (dp != NULL) {
1474 /* No need to flush if it is a write delegation. */
1475 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0) {
1476 nfscl_clrelease(clp);
1477 NFSUNLOCKCLSTATE();
1478 return (0);
1479 }
1480 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1481 if (!NFSBCMP(lp->nfsl_owner, own,
1482 NFSV4CL_LOCKNAMELEN))
1483 break;
1484 }
1485 if (lp != NULL) {
1486 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1487 if (lop->nfslo_first >= end)
1488 break;
1489 if (lop->nfslo_end <= off)
1490 continue;
1491 if (lop->nfslo_type == F_WRLCK) {
1492 nfscl_clrelease(clp);
1493 NFSUNLOCKCLSTATE();
1494 return (1);
1495 }
1496 }
1497 }
1498 }
1499
1500 /*
1501 * Now, check state against the server.
1502 */
1503 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1504 np->n_fhp->nfh_len), nfso_hash) {
1505 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1506 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1507 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1508 if (!NFSBCMP(lp->nfsl_owner, own,
1509 NFSV4CL_LOCKNAMELEN))
1510 break;
1511 }
1512 if (lp != NULL) {
1513 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1514 if (lop->nfslo_first >= end)
1515 break;
1516 if (lop->nfslo_end <= off)
1517 continue;
1518 if (lop->nfslo_type == F_WRLCK) {
1519 nfscl_clrelease(clp);
1520 NFSUNLOCKCLSTATE();
1521 return (1);
1522 }
1523 }
1524 }
1525 }
1526 }
1527 nfscl_clrelease(clp);
1528 NFSUNLOCKCLSTATE();
1529 return (0);
1530 }
1531
1532 /*
1533 * Release a byte range lock owner structure.
1534 */
1535 void
nfscl_lockrelease(struct nfscllockowner * lp,int error,int candelete)1536 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1537 {
1538 struct nfsclclient *clp;
1539
1540 if (lp == NULL)
1541 return;
1542 NFSLOCKCLSTATE();
1543 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1544 if (error != 0 && candelete &&
1545 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1546 nfscl_freelockowner(lp, 0);
1547 else
1548 nfscl_lockunlock(&lp->nfsl_rwlock);
1549 nfscl_clrelease(clp);
1550 NFSUNLOCKCLSTATE();
1551 }
1552
1553 /*
1554 * Unlink the open structure.
1555 */
1556 static void
nfscl_unlinkopen(struct nfsclopen * op)1557 nfscl_unlinkopen(struct nfsclopen *op)
1558 {
1559
1560 LIST_REMOVE(op, nfso_list);
1561 if (op->nfso_hash.le_prev != NULL)
1562 LIST_REMOVE(op, nfso_hash);
1563 }
1564
1565 /*
1566 * Free up an open structure and any associated byte range lock structures.
1567 */
1568 void
nfscl_freeopen(struct nfsclopen * op,int local,bool unlink)1569 nfscl_freeopen(struct nfsclopen *op, int local, bool unlink)
1570 {
1571
1572 if (unlink)
1573 nfscl_unlinkopen(op);
1574 nfscl_freealllocks(&op->nfso_lock, local);
1575 free(op, M_NFSCLOPEN);
1576 if (local)
1577 nfsstatsv1.cllocalopens--;
1578 else
1579 nfsstatsv1.clopens--;
1580 }
1581
1582 /*
1583 * Free up all lock owners and associated locks.
1584 */
1585 static void
nfscl_freealllocks(struct nfscllockownerhead * lhp,int local)1586 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1587 {
1588 struct nfscllockowner *lp, *nlp;
1589
1590 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1591 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1592 panic("nfscllckw");
1593 nfscl_freelockowner(lp, local);
1594 }
1595 }
1596
1597 /*
1598 * Called for an Open when NFSERR_EXPIRED is received from the server.
1599 * If there are no byte range locks nor a Share Deny lost, try to do a
1600 * fresh Open. Otherwise, free the open.
1601 */
1602 static int
nfscl_expireopen(struct nfsclclient * clp,struct nfsclopen * op,struct nfsmount * nmp,struct ucred * cred,NFSPROC_T * p)1603 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1604 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1605 {
1606 struct nfscllockowner *lp;
1607 struct nfscldeleg *dp;
1608 int mustdelete = 0, error;
1609
1610 /*
1611 * Look for any byte range lock(s).
1612 */
1613 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1614 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1615 mustdelete = 1;
1616 break;
1617 }
1618 }
1619
1620 /*
1621 * If no byte range lock(s) nor a Share deny, try to re-open.
1622 */
1623 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1624 newnfs_copycred(&op->nfso_cred, cred);
1625 dp = NULL;
1626 error = nfsrpc_reopen(nmp, op->nfso_fh,
1627 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1628 if (error) {
1629 mustdelete = 1;
1630 if (dp != NULL) {
1631 free(dp, M_NFSCLDELEG);
1632 dp = NULL;
1633 }
1634 }
1635 if (dp != NULL)
1636 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1637 op->nfso_fhlen, cred, p, dp);
1638 }
1639
1640 /*
1641 * If a byte range lock or Share deny or couldn't re-open, free it.
1642 */
1643 if (mustdelete)
1644 nfscl_freeopen(op, 0, true);
1645 return (mustdelete);
1646 }
1647
1648 /*
1649 * Free up an open owner structure.
1650 */
1651 static void
nfscl_freeopenowner(struct nfsclowner * owp,int local)1652 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1653 {
1654 int owned;
1655
1656 /*
1657 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1658 * calls in nfscl_renewthread() that do not hold a reference
1659 * count on the nfsclclient and just the mutex.
1660 * The mutex will not be held for calls done with the exclusive
1661 * nfsclclient lock held, in particular, nfscl_hasexpired()
1662 * and nfscl_recalldeleg() might do this.
1663 */
1664 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1665 if (owned == 0)
1666 NFSLOCKCLSTATE();
1667 LIST_REMOVE(owp, nfsow_list);
1668 if (owned == 0)
1669 NFSUNLOCKCLSTATE();
1670 free(owp, M_NFSCLOWNER);
1671 if (local)
1672 nfsstatsv1.cllocalopenowners--;
1673 else
1674 nfsstatsv1.clopenowners--;
1675 }
1676
1677 /*
1678 * Free up a byte range lock owner structure.
1679 */
1680 void
nfscl_freelockowner(struct nfscllockowner * lp,int local)1681 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1682 {
1683 struct nfscllock *lop, *nlop;
1684 int owned;
1685
1686 /*
1687 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1688 * calls in nfscl_renewthread() that do not hold a reference
1689 * count on the nfsclclient and just the mutex.
1690 * The mutex will not be held for calls done with the exclusive
1691 * nfsclclient lock held, in particular, nfscl_hasexpired()
1692 * and nfscl_recalldeleg() might do this.
1693 */
1694 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1695 if (owned == 0)
1696 NFSLOCKCLSTATE();
1697 LIST_REMOVE(lp, nfsl_list);
1698 if (owned == 0)
1699 NFSUNLOCKCLSTATE();
1700 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1701 nfscl_freelock(lop, local);
1702 }
1703 free(lp, M_NFSCLLOCKOWNER);
1704 if (local)
1705 nfsstatsv1.cllocallockowners--;
1706 else
1707 nfsstatsv1.cllockowners--;
1708 }
1709
1710 /*
1711 * Free up a byte range lock structure.
1712 */
1713 void
nfscl_freelock(struct nfscllock * lop,int local)1714 nfscl_freelock(struct nfscllock *lop, int local)
1715 {
1716
1717 LIST_REMOVE(lop, nfslo_list);
1718 free(lop, M_NFSCLLOCK);
1719 if (local)
1720 nfsstatsv1.cllocallocks--;
1721 else
1722 nfsstatsv1.cllocks--;
1723 }
1724
1725 /*
1726 * Clean out the state related to a delegation.
1727 */
1728 static void
nfscl_cleandeleg(struct nfscldeleg * dp)1729 nfscl_cleandeleg(struct nfscldeleg *dp)
1730 {
1731 struct nfsclowner *owp, *nowp;
1732 struct nfsclopen *op;
1733
1734 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1735 op = LIST_FIRST(&owp->nfsow_open);
1736 if (op != NULL) {
1737 if (LIST_NEXT(op, nfso_list) != NULL)
1738 panic("nfscleandel");
1739 nfscl_freeopen(op, 1, true);
1740 }
1741 nfscl_freeopenowner(owp, 1);
1742 }
1743 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1744 }
1745
1746 /*
1747 * Free a delegation.
1748 */
1749 static void
nfscl_freedeleg(struct nfscldeleghead * hdp,struct nfscldeleg * dp,bool freeit)1750 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp, bool freeit)
1751 {
1752
1753 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1754 LIST_REMOVE(dp, nfsdl_hash);
1755 dp->nfsdl_clp->nfsc_delegcnt--;
1756 if (freeit)
1757 free(dp, M_NFSCLDELEG);
1758 nfsstatsv1.cldelegates--;
1759 }
1760
1761 /*
1762 * Free up all state related to this client structure.
1763 */
1764 static void
nfscl_cleanclient(struct nfsclclient * clp)1765 nfscl_cleanclient(struct nfsclclient *clp)
1766 {
1767 struct nfsclowner *owp, *nowp;
1768 struct nfsclopen *op, *nop;
1769 struct nfscllayout *lyp, *nlyp;
1770 struct nfscldevinfo *dip, *ndip;
1771
1772 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1773 nfscl_freelayout(lyp);
1774
1775 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1776 nfscl_freedevinfo(dip);
1777
1778 /* Now, all the OpenOwners, etc. */
1779 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1780 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1781 nfscl_freeopen(op, 0, true);
1782 }
1783 nfscl_freeopenowner(owp, 0);
1784 }
1785 }
1786
1787 /*
1788 * Called when an NFSERR_EXPIRED is received from the server.
1789 */
1790 static void
nfscl_expireclient(struct nfsclclient * clp,struct nfsmount * nmp,struct ucred * cred,NFSPROC_T * p)1791 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1792 struct ucred *cred, NFSPROC_T *p)
1793 {
1794 struct nfsclowner *owp, *nowp, *towp;
1795 struct nfsclopen *op, *nop, *top;
1796 struct nfscldeleg *dp, *ndp;
1797 int ret, printed = 0;
1798
1799 /*
1800 * First, merge locally issued Opens into the list for the server.
1801 */
1802 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1803 while (dp != NULL) {
1804 ndp = TAILQ_NEXT(dp, nfsdl_list);
1805 owp = LIST_FIRST(&dp->nfsdl_owner);
1806 while (owp != NULL) {
1807 nowp = LIST_NEXT(owp, nfsow_list);
1808 op = LIST_FIRST(&owp->nfsow_open);
1809 if (op != NULL) {
1810 if (LIST_NEXT(op, nfso_list) != NULL)
1811 panic("nfsclexp");
1812 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1813 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1814 NFSV4CL_LOCKNAMELEN))
1815 break;
1816 }
1817 if (towp != NULL) {
1818 /* Merge opens in */
1819 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1820 if (top->nfso_fhlen == op->nfso_fhlen &&
1821 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1822 op->nfso_fhlen)) {
1823 top->nfso_mode |= op->nfso_mode;
1824 top->nfso_opencnt += op->nfso_opencnt;
1825 break;
1826 }
1827 }
1828 if (top == NULL) {
1829 /* Just add the open to the owner list */
1830 LIST_REMOVE(op, nfso_list);
1831 op->nfso_own = towp;
1832 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1833 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1834 op->nfso_fhlen), op, nfso_hash);
1835 nfsstatsv1.cllocalopens--;
1836 nfsstatsv1.clopens++;
1837 }
1838 } else {
1839 /* Just add the openowner to the client list */
1840 LIST_REMOVE(owp, nfsow_list);
1841 owp->nfsow_clp = clp;
1842 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1843 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1844 op->nfso_fhlen), op, nfso_hash);
1845 nfsstatsv1.cllocalopenowners--;
1846 nfsstatsv1.clopenowners++;
1847 nfsstatsv1.cllocalopens--;
1848 nfsstatsv1.clopens++;
1849 }
1850 }
1851 owp = nowp;
1852 }
1853 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1854 printed = 1;
1855 printf("nfsv4 expired locks lost\n");
1856 }
1857 nfscl_cleandeleg(dp);
1858 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
1859 dp = ndp;
1860 }
1861 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1862 panic("nfsclexp");
1863
1864 /*
1865 * Now, try and reopen against the server.
1866 */
1867 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1868 owp->nfsow_seqid = 0;
1869 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1870 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1871 if (ret && !printed) {
1872 printed = 1;
1873 printf("nfsv4 expired locks lost\n");
1874 }
1875 }
1876 if (LIST_EMPTY(&owp->nfsow_open))
1877 nfscl_freeopenowner(owp, 0);
1878 }
1879 }
1880
1881 /*
1882 * This function must be called after the process represented by "own" has
1883 * exited. Must be called with CLSTATE lock held.
1884 */
1885 static void
nfscl_cleanup_common(struct nfsclclient * clp,u_int8_t * own)1886 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1887 {
1888 struct nfsclowner *owp, *nowp;
1889 struct nfscllockowner *lp;
1890 struct nfscldeleg *dp;
1891
1892 /* First, get rid of local locks on delegations. */
1893 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1894 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1895 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1896 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1897 panic("nfscllckw");
1898 nfscl_freelockowner(lp, 1);
1899 break;
1900 }
1901 }
1902 }
1903 owp = LIST_FIRST(&clp->nfsc_owner);
1904 while (owp != NULL) {
1905 nowp = LIST_NEXT(owp, nfsow_list);
1906 if (!NFSBCMP(owp->nfsow_owner, own,
1907 NFSV4CL_LOCKNAMELEN)) {
1908 /*
1909 * If there are children that haven't closed the
1910 * file descriptors yet, the opens will still be
1911 * here. For that case, let the renew thread clear
1912 * out the OpenOwner later.
1913 */
1914 if (LIST_EMPTY(&owp->nfsow_open))
1915 nfscl_freeopenowner(owp, 0);
1916 else
1917 owp->nfsow_defunct = 1;
1918 break;
1919 }
1920 owp = nowp;
1921 }
1922 }
1923
1924 /*
1925 * Find open/lock owners for processes that have exited.
1926 */
1927 static void
nfscl_cleanupkext(struct nfsclclient * clp,struct nfscllockownerfhhead * lhp)1928 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1929 {
1930 struct nfsclowner *owp, *nowp;
1931 struct nfsclopen *op;
1932 struct nfscllockowner *lp, *nlp;
1933 struct nfscldeleg *dp;
1934 uint8_t own[NFSV4CL_LOCKNAMELEN];
1935
1936 /*
1937 * All the pidhash locks must be acquired, since they are sx locks
1938 * and must be acquired before the mutexes. The pid(s) that will
1939 * be used aren't known yet, so all the locks need to be acquired.
1940 * Fortunately, this function is only performed once/sec.
1941 */
1942 pidhash_slockall();
1943 NFSLOCKCLSTATE();
1944 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1945 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1946 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1947 if (LIST_EMPTY(&lp->nfsl_lock))
1948 nfscl_emptylockowner(lp, lhp);
1949 }
1950 }
1951 if (nfscl_procdoesntexist(owp->nfsow_owner)) {
1952 memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
1953 nfscl_cleanup_common(clp, own);
1954 }
1955 }
1956
1957 /*
1958 * For the single open_owner case, these lock owners need to be
1959 * checked to see if they still exist separately.
1960 * This is because nfscl_procdoesntexist() never returns true for
1961 * the single open_owner so that the above doesn't ever call
1962 * nfscl_cleanup_common().
1963 */
1964 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1965 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1966 if (nfscl_procdoesntexist(lp->nfsl_owner)) {
1967 memcpy(own, lp->nfsl_owner,
1968 NFSV4CL_LOCKNAMELEN);
1969 nfscl_cleanup_common(clp, own);
1970 }
1971 }
1972 }
1973 NFSUNLOCKCLSTATE();
1974 pidhash_sunlockall();
1975 }
1976
1977 /*
1978 * Take the empty lock owner and move it to the local lhp list if the
1979 * associated process no longer exists.
1980 */
1981 static void
nfscl_emptylockowner(struct nfscllockowner * lp,struct nfscllockownerfhhead * lhp)1982 nfscl_emptylockowner(struct nfscllockowner *lp,
1983 struct nfscllockownerfhhead *lhp)
1984 {
1985 struct nfscllockownerfh *lfhp, *mylfhp;
1986 struct nfscllockowner *nlp;
1987 int fnd_it;
1988
1989 /* If not a Posix lock owner, just return. */
1990 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1991 return;
1992
1993 fnd_it = 0;
1994 mylfhp = NULL;
1995 /*
1996 * First, search to see if this lock owner is already in the list.
1997 * If it is, then the associated process no longer exists.
1998 */
1999 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
2000 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
2001 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
2002 lfhp->nfslfh_len))
2003 mylfhp = lfhp;
2004 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
2005 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
2006 NFSV4CL_LOCKNAMELEN))
2007 fnd_it = 1;
2008 }
2009 /* If not found, check if process still exists. */
2010 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
2011 return;
2012
2013 /* Move the lock owner over to the local list. */
2014 if (mylfhp == NULL) {
2015 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
2016 M_NOWAIT);
2017 if (mylfhp == NULL)
2018 return;
2019 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
2020 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
2021 mylfhp->nfslfh_len);
2022 LIST_INIT(&mylfhp->nfslfh_lock);
2023 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
2024 }
2025 LIST_REMOVE(lp, nfsl_list);
2026 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
2027 }
2028
2029 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
2030 /*
2031 * Called from nfs umount to free up the clientid.
2032 */
2033 void
nfscl_umount(struct nfsmount * nmp,NFSPROC_T * p,struct nfscldeleghead * dhp)2034 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p, struct nfscldeleghead *dhp)
2035 {
2036 struct nfsclclient *clp;
2037 struct ucred *cred;
2038 int igotlock;
2039
2040 /*
2041 * For the case that matters, this is the thread that set
2042 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
2043 * done to ensure that any thread executing nfscl_getcl() after
2044 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
2045 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
2046 * explanation, courtesy of Alan Cox.
2047 * What follows is a snippet from Alan Cox's email at:
2048 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
2049 *
2050 * 1. Set MNTK_UNMOUNTF
2051 * 2. Acquire a standard FreeBSD mutex "m".
2052 * 3. Update some data structures.
2053 * 4. Release mutex "m".
2054 *
2055 * Then, other threads that acquire "m" after step 4 has occurred will
2056 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
2057 * step 2 may or may not see MNTK_UNMOUNTF as set.
2058 */
2059 NFSLOCKCLSTATE();
2060 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
2061 fake_global++;
2062 NFSUNLOCKCLSTATE();
2063 NFSLOCKCLSTATE();
2064 }
2065
2066 clp = nmp->nm_clp;
2067 if (clp != NULL) {
2068 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
2069 panic("nfscl umount");
2070
2071 /*
2072 * First, handshake with the nfscl renew thread, to terminate
2073 * it.
2074 */
2075 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
2076 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
2077 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
2078 "nfsclumnt", hz);
2079
2080 /*
2081 * Now, get the exclusive lock on the client state, so
2082 * that no uses of the state are still in progress.
2083 */
2084 do {
2085 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2086 NFSCLSTATEMUTEXPTR, NULL);
2087 } while (!igotlock);
2088 NFSUNLOCKCLSTATE();
2089
2090 /*
2091 * Free up all the state. It will expire on the server, but
2092 * maybe we should do a SetClientId/SetClientIdConfirm so
2093 * the server throws it away?
2094 */
2095 LIST_REMOVE(clp, nfsc_list);
2096 nfscl_delegreturnall(clp, p, dhp);
2097 cred = newnfs_getcred();
2098 if (NFSHASNFSV4N(nmp)) {
2099 nfsrpc_destroysession(nmp, NULL, cred, p);
2100 nfsrpc_destroyclient(nmp, clp, cred, p);
2101 } else
2102 nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2103 nfscl_cleanclient(clp);
2104 nmp->nm_clp = NULL;
2105 NFSFREECRED(cred);
2106 free(clp, M_NFSCLCLIENT);
2107 } else
2108 NFSUNLOCKCLSTATE();
2109 }
2110
2111 /*
2112 * This function is called when a server replies with NFSERR_STALECLIENTID
2113 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
2114 * doing Opens and Locks with reclaim. If these fail, it deletes the
2115 * corresponding state.
2116 */
2117 static void
nfscl_recover(struct nfsclclient * clp,bool * retokp,struct ucred * cred,NFSPROC_T * p)2118 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred,
2119 NFSPROC_T *p)
2120 {
2121 struct nfsclowner *owp, *nowp;
2122 struct nfsclopen *op, *nop;
2123 struct nfscllockowner *lp, *nlp;
2124 struct nfscllock *lop, *nlop;
2125 struct nfscldeleg *dp, *ndp, *tdp;
2126 struct nfsmount *nmp;
2127 struct ucred *tcred;
2128 struct nfsclopenhead extra_open;
2129 struct nfscldeleghead extra_deleg;
2130 struct nfsreq *rep;
2131 u_int64_t len;
2132 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
2133 int i, igotlock = 0, error, trycnt, firstlock;
2134 struct nfscllayout *lyp, *nlyp;
2135 bool recovered_one;
2136
2137 /*
2138 * First, lock the client structure, so everyone else will
2139 * block when trying to use state.
2140 */
2141 NFSLOCKCLSTATE();
2142 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2143 do {
2144 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2145 NFSCLSTATEMUTEXPTR, NULL);
2146 } while (!igotlock);
2147 NFSUNLOCKCLSTATE();
2148
2149 nmp = clp->nfsc_nmp;
2150 if (nmp == NULL)
2151 panic("nfscl recover");
2152
2153 /*
2154 * For now, just get rid of all layouts. There may be a need
2155 * to do LayoutCommit Ops with reclaim == true later.
2156 */
2157 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2158 nfscl_freelayout(lyp);
2159 TAILQ_INIT(&clp->nfsc_layout);
2160 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2161 LIST_INIT(&clp->nfsc_layouthash[i]);
2162
2163 trycnt = 5;
2164 tcred = NULL;
2165 do {
2166 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p);
2167 } while ((error == NFSERR_STALECLIENTID ||
2168 error == NFSERR_BADSESSION ||
2169 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2170 if (error) {
2171 NFSLOCKCLSTATE();
2172 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2173 NFSCLFLAGS_RECVRINPROG);
2174 wakeup(&clp->nfsc_flags);
2175 nfsv4_unlock(&clp->nfsc_lock, 0);
2176 NFSUNLOCKCLSTATE();
2177 return;
2178 }
2179 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2180 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2181
2182 /*
2183 * Mark requests already queued on the server, so that they don't
2184 * initiate another recovery cycle. Any requests already in the
2185 * queue that handle state information will have the old stale
2186 * clientid/stateid and will get a NFSERR_STALESTATEID,
2187 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2188 * This will be translated to NFSERR_STALEDONTRECOVER when
2189 * R_DONTRECOVER is set.
2190 */
2191 NFSLOCKREQ();
2192 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2193 if (rep->r_nmp == nmp)
2194 rep->r_flags |= R_DONTRECOVER;
2195 }
2196 NFSUNLOCKREQ();
2197
2198 /*
2199 * If nfsrpc_setclient() returns *retokp == true,
2200 * no more recovery is needed.
2201 */
2202 if (*retokp)
2203 goto out;
2204
2205 /*
2206 * Now, mark all delegations "need reclaim".
2207 */
2208 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2209 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2210
2211 TAILQ_INIT(&extra_deleg);
2212 LIST_INIT(&extra_open);
2213 /*
2214 * Now traverse the state lists, doing Open and Lock Reclaims.
2215 */
2216 tcred = newnfs_getcred();
2217 recovered_one = false;
2218 owp = LIST_FIRST(&clp->nfsc_owner);
2219 while (owp != NULL) {
2220 nowp = LIST_NEXT(owp, nfsow_list);
2221 owp->nfsow_seqid = 0;
2222 op = LIST_FIRST(&owp->nfsow_open);
2223 while (op != NULL) {
2224 nop = LIST_NEXT(op, nfso_list);
2225 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2226 /* Search for a delegation to reclaim with the open */
2227 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2228 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2229 continue;
2230 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2231 mode = NFSV4OPEN_ACCESSWRITE;
2232 delegtype = NFSV4OPEN_DELEGATEWRITE;
2233 } else {
2234 mode = NFSV4OPEN_ACCESSREAD;
2235 delegtype = NFSV4OPEN_DELEGATEREAD;
2236 }
2237 if ((op->nfso_mode & mode) == mode &&
2238 op->nfso_fhlen == dp->nfsdl_fhlen &&
2239 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2240 break;
2241 }
2242 ndp = dp;
2243 if (dp == NULL)
2244 delegtype = NFSV4OPEN_DELEGATENONE;
2245 newnfs_copycred(&op->nfso_cred, tcred);
2246 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2247 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2248 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2249 tcred, p);
2250 if (!error) {
2251 recovered_one = true;
2252 /* Handle any replied delegation */
2253 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2254 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2255 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2256 mode = NFSV4OPEN_ACCESSWRITE;
2257 else
2258 mode = NFSV4OPEN_ACCESSREAD;
2259 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2260 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2261 continue;
2262 if ((op->nfso_mode & mode) == mode &&
2263 op->nfso_fhlen == dp->nfsdl_fhlen &&
2264 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2265 op->nfso_fhlen)) {
2266 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2267 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2268 dp->nfsdl_ace = ndp->nfsdl_ace;
2269 dp->nfsdl_change = ndp->nfsdl_change;
2270 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2271 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2272 dp->nfsdl_flags |= NFSCLDL_RECALL;
2273 free(ndp, M_NFSCLDELEG);
2274 ndp = NULL;
2275 break;
2276 }
2277 }
2278 }
2279 if (ndp != NULL)
2280 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2281
2282 /* and reclaim all byte range locks */
2283 lp = LIST_FIRST(&op->nfso_lock);
2284 while (lp != NULL) {
2285 nlp = LIST_NEXT(lp, nfsl_list);
2286 lp->nfsl_seqid = 0;
2287 firstlock = 1;
2288 lop = LIST_FIRST(&lp->nfsl_lock);
2289 while (lop != NULL) {
2290 nlop = LIST_NEXT(lop, nfslo_list);
2291 if (lop->nfslo_end == NFS64BITSSET)
2292 len = NFS64BITSSET;
2293 else
2294 len = lop->nfslo_end - lop->nfslo_first;
2295 error = nfscl_trylock(nmp, NULL,
2296 op->nfso_fh, op->nfso_fhlen, lp,
2297 firstlock, 1, lop->nfslo_first, len,
2298 lop->nfslo_type, tcred, p);
2299 if (error != 0)
2300 nfscl_freelock(lop, 0);
2301 else
2302 firstlock = 0;
2303 lop = nlop;
2304 }
2305 /* If no locks, but a lockowner, just delete it. */
2306 if (LIST_EMPTY(&lp->nfsl_lock))
2307 nfscl_freelockowner(lp, 0);
2308 lp = nlp;
2309 }
2310 } else if (error == NFSERR_NOGRACE && !recovered_one &&
2311 NFSHASNFSV4N(nmp)) {
2312 /*
2313 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2314 * actually end up here, since the client will do
2315 * a recovery for NFSERR_BADSESSION, but will get
2316 * an NFSERR_NOGRACE reply for the first "reclaim"
2317 * attempt.
2318 * So, call nfscl_expireclient() to recover the
2319 * opens as best we can and then do a reclaim
2320 * complete and return.
2321 */
2322 nfsrpc_reclaimcomplete(nmp, cred, p);
2323 nfscl_expireclient(clp, nmp, tcred, p);
2324 goto out;
2325 }
2326 }
2327 if (error != 0 && error != NFSERR_BADSESSION)
2328 nfscl_freeopen(op, 0, true);
2329 op = nop;
2330 }
2331 owp = nowp;
2332 }
2333
2334 /*
2335 * Now, try and get any delegations not yet reclaimed by cobbling
2336 * to-gether an appropriate open.
2337 */
2338 nowp = NULL;
2339 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2340 while (dp != NULL) {
2341 ndp = TAILQ_NEXT(dp, nfsdl_list);
2342 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2343 if (nowp == NULL) {
2344 nowp = malloc(
2345 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2346 /*
2347 * Name must be as long an largest possible
2348 * NFSV4CL_LOCKNAMELEN. 12 for now.
2349 */
2350 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2351 NFSV4CL_LOCKNAMELEN);
2352 LIST_INIT(&nowp->nfsow_open);
2353 nowp->nfsow_clp = clp;
2354 nowp->nfsow_seqid = 0;
2355 nowp->nfsow_defunct = 0;
2356 nfscl_lockinit(&nowp->nfsow_rwlock);
2357 }
2358 nop = NULL;
2359 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2360 nop = malloc(sizeof (struct nfsclopen) +
2361 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2362 nop->nfso_own = nowp;
2363 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2364 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2365 delegtype = NFSV4OPEN_DELEGATEWRITE;
2366 } else {
2367 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2368 delegtype = NFSV4OPEN_DELEGATEREAD;
2369 }
2370 nop->nfso_opencnt = 0;
2371 nop->nfso_posixlock = 1;
2372 nop->nfso_fhlen = dp->nfsdl_fhlen;
2373 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2374 LIST_INIT(&nop->nfso_lock);
2375 nop->nfso_stateid.seqid = 0;
2376 nop->nfso_stateid.other[0] = 0;
2377 nop->nfso_stateid.other[1] = 0;
2378 nop->nfso_stateid.other[2] = 0;
2379 newnfs_copycred(&dp->nfsdl_cred, tcred);
2380 newnfs_copyincred(tcred, &nop->nfso_cred);
2381 tdp = NULL;
2382 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2383 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2384 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2385 delegtype, tcred, p);
2386 if (tdp != NULL) {
2387 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2388 mode = NFSV4OPEN_ACCESSWRITE;
2389 else
2390 mode = NFSV4OPEN_ACCESSREAD;
2391 if ((nop->nfso_mode & mode) == mode &&
2392 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2393 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2394 nop->nfso_fhlen)) {
2395 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2396 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2397 dp->nfsdl_ace = tdp->nfsdl_ace;
2398 dp->nfsdl_change = tdp->nfsdl_change;
2399 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2400 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2401 dp->nfsdl_flags |= NFSCLDL_RECALL;
2402 free(tdp, M_NFSCLDELEG);
2403 } else {
2404 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2405 }
2406 }
2407 }
2408 if (error) {
2409 if (nop != NULL)
2410 free(nop, M_NFSCLOPEN);
2411 if (error == NFSERR_NOGRACE && !recovered_one &&
2412 NFSHASNFSV4N(nmp)) {
2413 /*
2414 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2415 * actually end up here, since the client will do
2416 * a recovery for NFSERR_BADSESSION, but will get
2417 * an NFSERR_NOGRACE reply for the first "reclaim"
2418 * attempt.
2419 * So, call nfscl_expireclient() to recover the
2420 * opens as best we can and then do a reclaim
2421 * complete and return.
2422 */
2423 nfsrpc_reclaimcomplete(nmp, cred, p);
2424 nfscl_expireclient(clp, nmp, tcred, p);
2425 free(nowp, M_NFSCLOWNER);
2426 goto out;
2427 }
2428 /*
2429 * Couldn't reclaim it, so throw the state
2430 * away. Ouch!!
2431 */
2432 nfscl_cleandeleg(dp);
2433 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
2434 } else {
2435 recovered_one = true;
2436 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2437 }
2438 }
2439 dp = ndp;
2440 }
2441
2442 /*
2443 * Now, get rid of extra Opens and Delegations.
2444 */
2445 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2446 do {
2447 newnfs_copycred(&op->nfso_cred, tcred);
2448 error = nfscl_tryclose(op, tcred, nmp, p, true);
2449 if (error == NFSERR_GRACE)
2450 (void) nfs_catnap(PZERO, error, "nfsexcls");
2451 } while (error == NFSERR_GRACE);
2452 LIST_REMOVE(op, nfso_list);
2453 free(op, M_NFSCLOPEN);
2454 }
2455 if (nowp != NULL)
2456 free(nowp, M_NFSCLOWNER);
2457
2458 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2459 do {
2460 newnfs_copycred(&dp->nfsdl_cred, tcred);
2461 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2462 if (error == NFSERR_GRACE)
2463 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2464 } while (error == NFSERR_GRACE);
2465 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2466 free(dp, M_NFSCLDELEG);
2467 }
2468
2469 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2470 if (NFSHASNFSV4N(nmp))
2471 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2472
2473 out:
2474 NFSLOCKCLSTATE();
2475 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2476 wakeup(&clp->nfsc_flags);
2477 nfsv4_unlock(&clp->nfsc_lock, 0);
2478 NFSUNLOCKCLSTATE();
2479 if (tcred != NULL)
2480 NFSFREECRED(tcred);
2481 }
2482
2483 /*
2484 * This function is called when a server replies with NFSERR_EXPIRED.
2485 * It deletes all state for the client and does a fresh SetClientId/confirm.
2486 * XXX Someday it should post a signal to the process(es) that hold the
2487 * state, so they know that lock state has been lost.
2488 */
2489 int
nfscl_hasexpired(struct nfsclclient * clp,u_int32_t clidrev,NFSPROC_T * p)2490 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2491 {
2492 struct nfsmount *nmp;
2493 struct ucred *cred;
2494 int igotlock = 0, error, trycnt;
2495
2496 /*
2497 * If the clientid has gone away or a new SetClientid has already
2498 * been done, just return ok.
2499 */
2500 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2501 return (0);
2502
2503 /*
2504 * First, lock the client structure, so everyone else will
2505 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2506 * that only one thread does the work.
2507 */
2508 NFSLOCKCLSTATE();
2509 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2510 do {
2511 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2512 NFSCLSTATEMUTEXPTR, NULL);
2513 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2514 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2515 if (igotlock)
2516 nfsv4_unlock(&clp->nfsc_lock, 0);
2517 NFSUNLOCKCLSTATE();
2518 return (0);
2519 }
2520 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2521 NFSUNLOCKCLSTATE();
2522
2523 nmp = clp->nfsc_nmp;
2524 if (nmp == NULL)
2525 panic("nfscl expired");
2526 cred = newnfs_getcred();
2527 trycnt = 5;
2528 do {
2529 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2530 } while ((error == NFSERR_STALECLIENTID ||
2531 error == NFSERR_BADSESSION ||
2532 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2533 if (error) {
2534 NFSLOCKCLSTATE();
2535 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2536 } else {
2537 /*
2538 * Expire the state for the client.
2539 */
2540 nfscl_expireclient(clp, nmp, cred, p);
2541 NFSLOCKCLSTATE();
2542 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2543 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2544 }
2545 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2546 wakeup(&clp->nfsc_flags);
2547 nfsv4_unlock(&clp->nfsc_lock, 0);
2548 NFSUNLOCKCLSTATE();
2549 NFSFREECRED(cred);
2550 return (error);
2551 }
2552
2553 /*
2554 * This function inserts a lock in the list after insert_lop.
2555 */
2556 static void
nfscl_insertlock(struct nfscllockowner * lp,struct nfscllock * new_lop,struct nfscllock * insert_lop,int local)2557 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2558 struct nfscllock *insert_lop, int local)
2559 {
2560
2561 if ((struct nfscllockowner *)insert_lop == lp)
2562 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2563 else
2564 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2565 if (local)
2566 nfsstatsv1.cllocallocks++;
2567 else
2568 nfsstatsv1.cllocks++;
2569 }
2570
2571 /*
2572 * This function updates the locking for a lock owner and given file. It
2573 * maintains a list of lock ranges ordered on increasing file offset that
2574 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2575 * It always adds new_lop to the list and sometimes uses the one pointed
2576 * at by other_lopp.
2577 * Returns 1 if the locks were modified, 0 otherwise.
2578 */
2579 static int
nfscl_updatelock(struct nfscllockowner * lp,struct nfscllock ** new_lopp,struct nfscllock ** other_lopp,int local)2580 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2581 struct nfscllock **other_lopp, int local)
2582 {
2583 struct nfscllock *new_lop = *new_lopp;
2584 struct nfscllock *lop, *tlop, *ilop;
2585 struct nfscllock *other_lop;
2586 int unlock = 0, modified = 0;
2587 u_int64_t tmp;
2588
2589 /*
2590 * Work down the list until the lock is merged.
2591 */
2592 if (new_lop->nfslo_type == F_UNLCK)
2593 unlock = 1;
2594 ilop = (struct nfscllock *)lp;
2595 lop = LIST_FIRST(&lp->nfsl_lock);
2596 while (lop != NULL) {
2597 /*
2598 * Only check locks for this file that aren't before the start of
2599 * new lock's range.
2600 */
2601 if (lop->nfslo_end >= new_lop->nfslo_first) {
2602 if (new_lop->nfslo_end < lop->nfslo_first) {
2603 /*
2604 * If the new lock ends before the start of the
2605 * current lock's range, no merge, just insert
2606 * the new lock.
2607 */
2608 break;
2609 }
2610 if (new_lop->nfslo_type == lop->nfslo_type ||
2611 (new_lop->nfslo_first <= lop->nfslo_first &&
2612 new_lop->nfslo_end >= lop->nfslo_end)) {
2613 /*
2614 * This lock can be absorbed by the new lock/unlock.
2615 * This happens when it covers the entire range
2616 * of the old lock or is contiguous
2617 * with the old lock and is of the same type or an
2618 * unlock.
2619 */
2620 if (new_lop->nfslo_type != lop->nfslo_type ||
2621 new_lop->nfslo_first != lop->nfslo_first ||
2622 new_lop->nfslo_end != lop->nfslo_end)
2623 modified = 1;
2624 if (lop->nfslo_first < new_lop->nfslo_first)
2625 new_lop->nfslo_first = lop->nfslo_first;
2626 if (lop->nfslo_end > new_lop->nfslo_end)
2627 new_lop->nfslo_end = lop->nfslo_end;
2628 tlop = lop;
2629 lop = LIST_NEXT(lop, nfslo_list);
2630 nfscl_freelock(tlop, local);
2631 continue;
2632 }
2633
2634 /*
2635 * All these cases are for contiguous locks that are not the
2636 * same type, so they can't be merged.
2637 */
2638 if (new_lop->nfslo_first <= lop->nfslo_first) {
2639 /*
2640 * This case is where the new lock overlaps with the
2641 * first part of the old lock. Move the start of the
2642 * old lock to just past the end of the new lock. The
2643 * new lock will be inserted in front of the old, since
2644 * ilop hasn't been updated. (We are done now.)
2645 */
2646 if (lop->nfslo_first != new_lop->nfslo_end) {
2647 lop->nfslo_first = new_lop->nfslo_end;
2648 modified = 1;
2649 }
2650 break;
2651 }
2652 if (new_lop->nfslo_end >= lop->nfslo_end) {
2653 /*
2654 * This case is where the new lock overlaps with the
2655 * end of the old lock's range. Move the old lock's
2656 * end to just before the new lock's first and insert
2657 * the new lock after the old lock.
2658 * Might not be done yet, since the new lock could
2659 * overlap further locks with higher ranges.
2660 */
2661 if (lop->nfslo_end != new_lop->nfslo_first) {
2662 lop->nfslo_end = new_lop->nfslo_first;
2663 modified = 1;
2664 }
2665 ilop = lop;
2666 lop = LIST_NEXT(lop, nfslo_list);
2667 continue;
2668 }
2669 /*
2670 * The final case is where the new lock's range is in the
2671 * middle of the current lock's and splits the current lock
2672 * up. Use *other_lopp to handle the second part of the
2673 * split old lock range. (We are done now.)
2674 * For unlock, we use new_lop as other_lop and tmp, since
2675 * other_lop and new_lop are the same for this case.
2676 * We noted the unlock case above, so we don't need
2677 * new_lop->nfslo_type any longer.
2678 */
2679 tmp = new_lop->nfslo_first;
2680 if (unlock) {
2681 other_lop = new_lop;
2682 *new_lopp = NULL;
2683 } else {
2684 other_lop = *other_lopp;
2685 *other_lopp = NULL;
2686 }
2687 other_lop->nfslo_first = new_lop->nfslo_end;
2688 other_lop->nfslo_end = lop->nfslo_end;
2689 other_lop->nfslo_type = lop->nfslo_type;
2690 lop->nfslo_end = tmp;
2691 nfscl_insertlock(lp, other_lop, lop, local);
2692 ilop = lop;
2693 modified = 1;
2694 break;
2695 }
2696 ilop = lop;
2697 lop = LIST_NEXT(lop, nfslo_list);
2698 if (lop == NULL)
2699 break;
2700 }
2701
2702 /*
2703 * Insert the new lock in the list at the appropriate place.
2704 */
2705 if (!unlock) {
2706 nfscl_insertlock(lp, new_lop, ilop, local);
2707 *new_lopp = NULL;
2708 modified = 1;
2709 }
2710 return (modified);
2711 }
2712
2713 /*
2714 * This function must be run as a kernel thread.
2715 * It does Renew Ops and recovery, when required.
2716 */
2717 void
nfscl_renewthread(struct nfsclclient * clp,NFSPROC_T * p)2718 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2719 {
2720 struct nfsclowner *owp, *nowp;
2721 struct nfsclopen *op;
2722 struct nfscllockowner *lp, *nlp;
2723 struct nfscldeleghead dh;
2724 struct nfscldeleg *dp, *ndp;
2725 struct ucred *cred;
2726 u_int32_t clidrev;
2727 int error, cbpathdown, islept, igotlock, ret, clearok;
2728 uint32_t recover_done_time = 0;
2729 time_t mytime;
2730 static time_t prevsec = 0;
2731 struct nfscllockownerfh *lfhp, *nlfhp;
2732 struct nfscllockownerfhhead lfh;
2733 struct nfscllayout *lyp, *nlyp;
2734 struct nfscldevinfo *dip, *ndip;
2735 struct nfscllayouthead rlh;
2736 struct nfsclrecalllayout *recallp;
2737 struct nfsclds *dsp;
2738 bool retok;
2739 struct mount *mp;
2740 vnode_t vp;
2741
2742 cred = newnfs_getcred();
2743 NFSLOCKCLSTATE();
2744 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2745 mp = clp->nfsc_nmp->nm_mountp;
2746 NFSUNLOCKCLSTATE();
2747 for(;;) {
2748 newnfs_setroot(cred);
2749 cbpathdown = 0;
2750 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2751 /*
2752 * Only allow one full recover within 1/2 of the lease
2753 * duration (nfsc_renew).
2754 * retok is value/result. If passed in set to true,
2755 * it indicates only a CreateSession operation should
2756 * be attempted.
2757 * If it is returned true, it indicates that the
2758 * recovery only required a CreateSession.
2759 */
2760 retok = true;
2761 if (recover_done_time < NFSD_MONOSEC) {
2762 recover_done_time = NFSD_MONOSEC +
2763 clp->nfsc_renew;
2764 retok = false;
2765 }
2766 NFSCL_DEBUG(1, "Doing recovery, only "
2767 "createsession=%d\n", retok);
2768 nfscl_recover(clp, &retok, cred, p);
2769 }
2770 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2771 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2772 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2773 clidrev = clp->nfsc_clientidrev;
2774 error = nfsrpc_renew(clp, NULL, cred, p);
2775 if (error == NFSERR_CBPATHDOWN)
2776 cbpathdown = 1;
2777 else if (error == NFSERR_STALECLIENTID) {
2778 NFSLOCKCLSTATE();
2779 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2780 NFSUNLOCKCLSTATE();
2781 } else if (error == NFSERR_EXPIRED)
2782 (void) nfscl_hasexpired(clp, clidrev, p);
2783 }
2784
2785 checkdsrenew:
2786 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2787 /* Do renews for any DS sessions. */
2788 NFSLOCKMNT(clp->nfsc_nmp);
2789 /* Skip first entry, since the MDS is handled above. */
2790 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2791 if (dsp != NULL)
2792 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2793 while (dsp != NULL) {
2794 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2795 dsp->nfsclds_sess.nfsess_defunct == 0) {
2796 dsp->nfsclds_expire = NFSD_MONOSEC +
2797 clp->nfsc_renew;
2798 NFSUNLOCKMNT(clp->nfsc_nmp);
2799 (void)nfsrpc_renew(clp, dsp, cred, p);
2800 goto checkdsrenew;
2801 }
2802 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2803 }
2804 NFSUNLOCKMNT(clp->nfsc_nmp);
2805 }
2806
2807 TAILQ_INIT(&dh);
2808 NFSLOCKCLSTATE();
2809 if (cbpathdown)
2810 /* It's a Total Recall! */
2811 nfscl_totalrecall(clp);
2812
2813 /*
2814 * Now, handle defunct owners.
2815 */
2816 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2817 if (LIST_EMPTY(&owp->nfsow_open)) {
2818 if (owp->nfsow_defunct != 0)
2819 nfscl_freeopenowner(owp, 0);
2820 }
2821 }
2822
2823 /*
2824 * Do the recall on any delegations. To avoid trouble, always
2825 * come back up here after having slept.
2826 */
2827 igotlock = 0;
2828 tryagain:
2829 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2830 while (dp != NULL) {
2831 ndp = TAILQ_NEXT(dp, nfsdl_list);
2832 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2833 /*
2834 * Wait for outstanding I/O ops to be done.
2835 */
2836 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2837 if (igotlock) {
2838 nfsv4_unlock(&clp->nfsc_lock, 0);
2839 igotlock = 0;
2840 }
2841 dp->nfsdl_rwlock.nfslock_lock |=
2842 NFSV4LOCK_WANTED;
2843 msleep(&dp->nfsdl_rwlock,
2844 NFSCLSTATEMUTEXPTR, PVFS, "nfscld",
2845 5 * hz);
2846 if (NFSCL_FORCEDISM(mp))
2847 goto terminate;
2848 goto tryagain;
2849 }
2850 while (!igotlock) {
2851 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2852 &islept, NFSCLSTATEMUTEXPTR, mp);
2853 if (igotlock == 0 && NFSCL_FORCEDISM(mp))
2854 goto terminate;
2855 if (islept)
2856 goto tryagain;
2857 }
2858 NFSUNLOCKCLSTATE();
2859 newnfs_copycred(&dp->nfsdl_cred, cred);
2860 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2861 NULL, cred, p, 1, &vp);
2862 if (!ret) {
2863 nfscl_cleandeleg(dp);
2864 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2865 nfsdl_list);
2866 LIST_REMOVE(dp, nfsdl_hash);
2867 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2868 clp->nfsc_delegcnt--;
2869 nfsstatsv1.cldelegates--;
2870 }
2871 NFSLOCKCLSTATE();
2872 /*
2873 * The nfsc_lock must be released before doing
2874 * vrele(), since it might call nfs_inactive().
2875 * For the unlikely case where the vnode failed
2876 * to be acquired by nfscl_recalldeleg(), a
2877 * VOP_RECLAIM() should be in progress and it
2878 * will return the delegation.
2879 */
2880 nfsv4_unlock(&clp->nfsc_lock, 0);
2881 igotlock = 0;
2882 if (vp != NULL) {
2883 NFSUNLOCKCLSTATE();
2884 vrele(vp);
2885 NFSLOCKCLSTATE();
2886 }
2887 goto tryagain;
2888 }
2889 dp = ndp;
2890 }
2891
2892 /*
2893 * Clear out old delegations, if we are above the high water
2894 * mark. Only clear out ones with no state related to them.
2895 * The tailq list is in LRU order.
2896 */
2897 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2898 while (clp->nfsc_delegcnt > clp->nfsc_deleghighwater &&
2899 dp != NULL) {
2900 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2901 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2902 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2903 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2904 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2905 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2906 clearok = 1;
2907 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2908 op = LIST_FIRST(&owp->nfsow_open);
2909 if (op != NULL) {
2910 clearok = 0;
2911 break;
2912 }
2913 }
2914 if (clearok) {
2915 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2916 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2917 clearok = 0;
2918 break;
2919 }
2920 }
2921 }
2922 if (clearok) {
2923 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2924 LIST_REMOVE(dp, nfsdl_hash);
2925 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2926 clp->nfsc_delegcnt--;
2927 nfsstatsv1.cldelegates--;
2928 }
2929 }
2930 dp = ndp;
2931 }
2932 if (igotlock)
2933 nfsv4_unlock(&clp->nfsc_lock, 0);
2934
2935 /*
2936 * Do the recall on any layouts. To avoid trouble, always
2937 * come back up here after having slept.
2938 */
2939 TAILQ_INIT(&rlh);
2940 tryagain2:
2941 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2942 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2943 /*
2944 * Wait for outstanding I/O ops to be done.
2945 */
2946 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2947 (lyp->nfsly_lock.nfslock_lock &
2948 NFSV4LOCK_LOCK) != 0) {
2949 lyp->nfsly_lock.nfslock_lock |=
2950 NFSV4LOCK_WANTED;
2951 msleep(&lyp->nfsly_lock.nfslock_lock,
2952 NFSCLSTATEMUTEXPTR, PVFS, "nfslyp",
2953 5 * hz);
2954 if (NFSCL_FORCEDISM(mp))
2955 goto terminate;
2956 goto tryagain2;
2957 }
2958 /* Move the layout to the recall list. */
2959 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2960 nfsly_list);
2961 LIST_REMOVE(lyp, nfsly_hash);
2962 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2963
2964 /* Handle any layout commits. */
2965 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2966 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2967 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2968 NFSUNLOCKCLSTATE();
2969 NFSCL_DEBUG(3, "do layoutcommit\n");
2970 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2971 cred, p);
2972 NFSLOCKCLSTATE();
2973 goto tryagain2;
2974 }
2975 }
2976 }
2977
2978 /* Now, look for stale layouts. */
2979 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2980 while (lyp != NULL) {
2981 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2982 if ((lyp->nfsly_timestamp < NFSD_MONOSEC ||
2983 clp->nfsc_layoutcnt > clp->nfsc_layouthighwater) &&
2984 (lyp->nfsly_flags & (NFSLY_RECALL |
2985 NFSLY_RETONCLOSE)) == 0 &&
2986 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2987 lyp->nfsly_lock.nfslock_lock == 0) {
2988 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2989 clp->nfsc_layoutcnt);
2990 recallp = malloc(sizeof(*recallp),
2991 M_NFSLAYRECALL, M_NOWAIT);
2992 if (recallp == NULL)
2993 break;
2994 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2995 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2996 lyp->nfsly_stateid.seqid, 0, 0, NULL,
2997 recallp);
2998 }
2999 lyp = nlyp;
3000 }
3001
3002 /*
3003 * Free up any unreferenced device info structures.
3004 */
3005 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
3006 if (dip->nfsdi_layoutrefs == 0 &&
3007 dip->nfsdi_refcnt == 0) {
3008 NFSCL_DEBUG(4, "freeing devinfo\n");
3009 LIST_REMOVE(dip, nfsdi_list);
3010 nfscl_freedevinfo(dip);
3011 }
3012 }
3013 NFSUNLOCKCLSTATE();
3014
3015 /* Do layout return(s), as required. */
3016 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
3017 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
3018 NFSCL_DEBUG(4, "ret layout\n");
3019 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
3020 if ((lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
3021 NFSLOCKCLSTATE();
3022 lyp->nfsly_flags |= NFSLY_RETURNED;
3023 wakeup(lyp);
3024 NFSUNLOCKCLSTATE();
3025 } else
3026 nfscl_freelayout(lyp);
3027 }
3028
3029 /*
3030 * Delegreturn any delegations cleaned out or recalled.
3031 */
3032 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
3033 newnfs_copycred(&dp->nfsdl_cred, cred);
3034 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3035 TAILQ_REMOVE(&dh, dp, nfsdl_list);
3036 free(dp, M_NFSCLDELEG);
3037 }
3038
3039 SLIST_INIT(&lfh);
3040 /*
3041 * Call nfscl_cleanupkext() once per second to check for
3042 * open/lock owners where the process has exited.
3043 */
3044 mytime = NFSD_MONOSEC;
3045 if (prevsec != mytime) {
3046 prevsec = mytime;
3047 nfscl_cleanupkext(clp, &lfh);
3048 }
3049
3050 /*
3051 * Do a ReleaseLockOwner for all lock owners where the
3052 * associated process no longer exists, as found by
3053 * nfscl_cleanupkext().
3054 */
3055 newnfs_setroot(cred);
3056 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
3057 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
3058 nlp) {
3059 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
3060 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
3061 p);
3062 nfscl_freelockowner(lp, 0);
3063 }
3064 free(lfhp, M_TEMP);
3065 }
3066 SLIST_INIT(&lfh);
3067
3068 NFSLOCKCLSTATE();
3069 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
3070 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
3071 hz);
3072 terminate:
3073 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
3074 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
3075 NFSUNLOCKCLSTATE();
3076 NFSFREECRED(cred);
3077 wakeup((caddr_t)clp);
3078 return;
3079 }
3080 NFSUNLOCKCLSTATE();
3081 }
3082 }
3083
3084 /*
3085 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
3086 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
3087 */
3088 void
nfscl_initiate_recovery(struct nfsclclient * clp)3089 nfscl_initiate_recovery(struct nfsclclient *clp)
3090 {
3091
3092 if (clp == NULL)
3093 return;
3094 NFSLOCKCLSTATE();
3095 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
3096 NFSUNLOCKCLSTATE();
3097 wakeup((caddr_t)clp);
3098 }
3099
3100 /*
3101 * Dump out the state stuff for debugging.
3102 */
3103 void
nfscl_dumpstate(struct nfsmount * nmp,int openowner,int opens,int lockowner,int locks)3104 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
3105 int lockowner, int locks)
3106 {
3107 struct nfsclclient *clp;
3108 struct nfsclowner *owp;
3109 struct nfsclopen *op;
3110 struct nfscllockowner *lp;
3111 struct nfscllock *lop;
3112 struct nfscldeleg *dp;
3113
3114 clp = nmp->nm_clp;
3115 if (clp == NULL) {
3116 printf("nfscl dumpstate NULL clp\n");
3117 return;
3118 }
3119 NFSLOCKCLSTATE();
3120 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3121 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3122 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3123 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3124 owp->nfsow_owner[0], owp->nfsow_owner[1],
3125 owp->nfsow_owner[2], owp->nfsow_owner[3],
3126 owp->nfsow_seqid);
3127 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3128 if (opens)
3129 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3130 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3131 op->nfso_stateid.other[2], op->nfso_opencnt,
3132 op->nfso_fh[12]);
3133 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3134 if (lockowner)
3135 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3136 lp->nfsl_owner[0], lp->nfsl_owner[1],
3137 lp->nfsl_owner[2], lp->nfsl_owner[3],
3138 lp->nfsl_seqid,
3139 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3140 lp->nfsl_stateid.other[2]);
3141 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3142 if (locks)
3143 #ifdef __FreeBSD__
3144 printf("lck typ=%d fst=%ju end=%ju\n",
3145 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3146 (intmax_t)lop->nfslo_end);
3147 #else
3148 printf("lck typ=%d fst=%qd end=%qd\n",
3149 lop->nfslo_type, lop->nfslo_first,
3150 lop->nfslo_end);
3151 #endif
3152 }
3153 }
3154 }
3155 }
3156 }
3157 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3158 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3159 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3160 owp->nfsow_owner[0], owp->nfsow_owner[1],
3161 owp->nfsow_owner[2], owp->nfsow_owner[3],
3162 owp->nfsow_seqid);
3163 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3164 if (opens)
3165 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3166 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3167 op->nfso_stateid.other[2], op->nfso_opencnt,
3168 op->nfso_fh[12]);
3169 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3170 if (lockowner)
3171 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3172 lp->nfsl_owner[0], lp->nfsl_owner[1],
3173 lp->nfsl_owner[2], lp->nfsl_owner[3],
3174 lp->nfsl_seqid,
3175 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3176 lp->nfsl_stateid.other[2]);
3177 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3178 if (locks)
3179 #ifdef __FreeBSD__
3180 printf("lck typ=%d fst=%ju end=%ju\n",
3181 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3182 (intmax_t)lop->nfslo_end);
3183 #else
3184 printf("lck typ=%d fst=%qd end=%qd\n",
3185 lop->nfslo_type, lop->nfslo_first,
3186 lop->nfslo_end);
3187 #endif
3188 }
3189 }
3190 }
3191 }
3192 NFSUNLOCKCLSTATE();
3193 }
3194
3195 /*
3196 * Check for duplicate open owners and opens.
3197 * (Only used as a diagnostic aid.)
3198 */
3199 void
nfscl_dupopen(vnode_t vp,int dupopens)3200 nfscl_dupopen(vnode_t vp, int dupopens)
3201 {
3202 struct nfsclclient *clp;
3203 struct nfsclowner *owp, *owp2;
3204 struct nfsclopen *op, *op2;
3205 struct nfsfh *nfhp;
3206
3207 clp = VFSTONFS(vp->v_mount)->nm_clp;
3208 if (clp == NULL) {
3209 printf("nfscl dupopen NULL clp\n");
3210 return;
3211 }
3212 nfhp = VTONFS(vp)->n_fhp;
3213 NFSLOCKCLSTATE();
3214
3215 /*
3216 * First, search for duplicate owners.
3217 * These should never happen!
3218 */
3219 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3220 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3221 if (owp != owp2 &&
3222 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
3223 NFSV4CL_LOCKNAMELEN)) {
3224 NFSUNLOCKCLSTATE();
3225 printf("DUP OWNER\n");
3226 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3227 return;
3228 }
3229 }
3230 }
3231
3232 /*
3233 * Now, search for duplicate stateids.
3234 * These shouldn't happen, either.
3235 */
3236 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3237 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3238 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3239 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3240 if (op != op2 &&
3241 (op->nfso_stateid.other[0] != 0 ||
3242 op->nfso_stateid.other[1] != 0 ||
3243 op->nfso_stateid.other[2] != 0) &&
3244 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3245 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3246 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3247 NFSUNLOCKCLSTATE();
3248 printf("DUP STATEID\n");
3249 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3250 return;
3251 }
3252 }
3253 }
3254 }
3255 }
3256
3257 /*
3258 * Now search for duplicate opens.
3259 * Duplicate opens for the same owner
3260 * should never occur. Other duplicates are
3261 * possible and are checked for if "dupopens"
3262 * is true.
3263 */
3264 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3265 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3266 if (nfhp->nfh_len == op2->nfso_fhlen &&
3267 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3268 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3269 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3270 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3271 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3272 (!NFSBCMP(op->nfso_own->nfsow_owner,
3273 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3274 dupopens)) {
3275 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3276 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3277 NFSUNLOCKCLSTATE();
3278 printf("BADDUP OPEN\n");
3279 } else {
3280 NFSUNLOCKCLSTATE();
3281 printf("DUP OPEN\n");
3282 }
3283 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0,
3284 0);
3285 return;
3286 }
3287 }
3288 }
3289 }
3290 }
3291 }
3292 NFSUNLOCKCLSTATE();
3293 }
3294
3295 /*
3296 * During close, find an open that needs to be dereferenced and
3297 * dereference it. If there are no more opens for this file,
3298 * log a message to that effect.
3299 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3300 * on the file's vnode.
3301 * This is the safe way, since it is difficult to identify
3302 * which open the close is for and I/O can be performed after the
3303 * close(2) system call when a file is mmap'd.
3304 * If it returns 0 for success, there will be a referenced
3305 * clp returned via clpp.
3306 */
3307 int
nfscl_getclose(vnode_t vp,struct nfsclclient ** clpp)3308 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3309 {
3310 struct nfsclclient *clp;
3311 struct nfsclowner *owp;
3312 struct nfsclopen *op;
3313 struct nfscldeleg *dp;
3314 struct nfsfh *nfhp;
3315 int error, notdecr;
3316
3317 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3318 if (error)
3319 return (error);
3320 *clpp = clp;
3321
3322 nfhp = VTONFS(vp)->n_fhp;
3323 notdecr = 1;
3324 NFSLOCKCLSTATE();
3325 /*
3326 * First, look for one under a delegation that was locally issued
3327 * and just decrement the opencnt for it. Since all my Opens against
3328 * the server are DENY_NONE, I don't see a problem with hanging
3329 * onto them. (It is much easier to use one of the extant Opens
3330 * that I already have on the server when a Delegation is recalled
3331 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3332 */
3333 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3334 if (dp != NULL) {
3335 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3336 op = LIST_FIRST(&owp->nfsow_open);
3337 if (op != NULL) {
3338 /*
3339 * Since a delegation is for a file, there
3340 * should never be more than one open for
3341 * each openowner.
3342 */
3343 if (LIST_NEXT(op, nfso_list) != NULL)
3344 panic("nfscdeleg opens");
3345 if (notdecr && op->nfso_opencnt > 0) {
3346 notdecr = 0;
3347 op->nfso_opencnt--;
3348 break;
3349 }
3350 }
3351 }
3352 }
3353
3354 /* Now process the opens against the server. */
3355 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3356 nfso_hash) {
3357 if (op->nfso_fhlen == nfhp->nfh_len &&
3358 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3359 nfhp->nfh_len)) {
3360 /* Found an open, decrement cnt if possible */
3361 if (notdecr && op->nfso_opencnt > 0) {
3362 notdecr = 0;
3363 op->nfso_opencnt--;
3364 }
3365 /*
3366 * There are more opens, so just return.
3367 */
3368 if (op->nfso_opencnt > 0) {
3369 NFSUNLOCKCLSTATE();
3370 return (0);
3371 }
3372 }
3373 }
3374 NFSUNLOCKCLSTATE();
3375 if (notdecr)
3376 printf("nfscl: never fnd open\n");
3377 return (0);
3378 }
3379
3380 int
nfscl_doclose(vnode_t vp,struct nfsclclient ** clpp,NFSPROC_T * p)3381 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3382 {
3383 struct nfsclclient *clp;
3384 struct nfsmount *nmp;
3385 struct nfsclowner *owp, *nowp;
3386 struct nfsclopen *op, *nop;
3387 struct nfsclopenhead delayed;
3388 struct nfscldeleg *dp;
3389 struct nfsfh *nfhp;
3390 struct nfsclrecalllayout *recallp;
3391 struct nfscllayout *lyp;
3392 int error;
3393
3394 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3395 if (error)
3396 return (error);
3397 *clpp = clp;
3398
3399 nmp = VFSTONFS(vp->v_mount);
3400 nfhp = VTONFS(vp)->n_fhp;
3401 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3402 NFSLOCKCLSTATE();
3403 /*
3404 * First get rid of the local Open structures, which should be no
3405 * longer in use.
3406 */
3407 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3408 if (dp != NULL) {
3409 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3410 op = LIST_FIRST(&owp->nfsow_open);
3411 if (op != NULL) {
3412 KASSERT((op->nfso_opencnt == 0),
3413 ("nfscl: bad open cnt on deleg"));
3414 nfscl_freeopen(op, 1, true);
3415 }
3416 nfscl_freeopenowner(owp, 1);
3417 }
3418 }
3419
3420 /* Return any layouts marked return on close. */
3421 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp,
3422 &lyp);
3423
3424 /* Now process the opens against the server. */
3425 LIST_INIT(&delayed);
3426 lookformore:
3427 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3428 nfso_hash) {
3429 if (op->nfso_fhlen == nfhp->nfh_len &&
3430 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3431 nfhp->nfh_len)) {
3432 /* Found an open, close it. */
3433 #ifdef DIAGNOSTIC
3434 KASSERT((op->nfso_opencnt == 0),
3435 ("nfscl: bad open cnt on server (%d)",
3436 op->nfso_opencnt));
3437 #endif
3438 NFSUNLOCKCLSTATE();
3439 if (NFSHASNFSV4N(nmp))
3440 error = nfsrpc_doclose(nmp, op, p, false, true);
3441 else
3442 error = nfsrpc_doclose(nmp, op, p, true, true);
3443 NFSLOCKCLSTATE();
3444 if (error == NFSERR_DELAY) {
3445 nfscl_unlinkopen(op);
3446 op->nfso_own = NULL;
3447 LIST_INSERT_HEAD(&delayed, op, nfso_list);
3448 }
3449 goto lookformore;
3450 }
3451 }
3452 nfscl_clrelease(clp);
3453
3454 /* Now, wait for any layout that is returned upon close. */
3455 if (lyp != NULL) {
3456 while ((lyp->nfsly_flags & NFSLY_RETURNED) == 0) {
3457 if (NFSCL_FORCEDISM(nmp->nm_mountp)) {
3458 lyp = NULL;
3459 break;
3460 }
3461 msleep(lyp, NFSCLSTATEMUTEXPTR, PZERO, "nfslroc", hz);
3462 }
3463 if (lyp != NULL)
3464 nfscl_freelayout(lyp);
3465 }
3466
3467 NFSUNLOCKCLSTATE();
3468 /*
3469 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3470 * used by the function, but calling free() with a NULL pointer is ok.
3471 */
3472 free(recallp, M_NFSLAYRECALL);
3473
3474 /* Now, loop retrying the delayed closes. */
3475 LIST_FOREACH_SAFE(op, &delayed, nfso_list, nop) {
3476 nfsrpc_doclose(nmp, op, p, true, false);
3477 LIST_REMOVE(op, nfso_list);
3478 nfscl_freeopen(op, 0, false);
3479 }
3480 return (0);
3481 }
3482
3483 /*
3484 * Return all delegations on this client.
3485 * (Must be called with client sleep lock.)
3486 */
3487 static void
nfscl_delegreturnall(struct nfsclclient * clp,NFSPROC_T * p,struct nfscldeleghead * dhp)3488 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p,
3489 struct nfscldeleghead *dhp)
3490 {
3491 struct nfscldeleg *dp, *ndp;
3492 struct ucred *cred;
3493
3494 cred = newnfs_getcred();
3495 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3496 nfscl_cleandeleg(dp);
3497 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3498 if (dhp != NULL) {
3499 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3500 TAILQ_INSERT_HEAD(dhp, dp, nfsdl_list);
3501 } else
3502 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
3503 }
3504 NFSFREECRED(cred);
3505 }
3506
3507 /*
3508 * Return any delegation for this vp.
3509 */
3510 void
nfscl_delegreturnvp(vnode_t vp,NFSPROC_T * p)3511 nfscl_delegreturnvp(vnode_t vp, NFSPROC_T *p)
3512 {
3513 struct nfsclclient *clp;
3514 struct nfscldeleg *dp;
3515 struct ucred *cred;
3516 struct nfsnode *np;
3517 struct nfsmount *nmp;
3518
3519 nmp = VFSTONFS(vp->v_mount);
3520 NFSLOCKMNT(nmp);
3521 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
3522 NFSUNLOCKMNT(nmp);
3523 return;
3524 }
3525 NFSUNLOCKMNT(nmp);
3526 np = VTONFS(vp);
3527 cred = newnfs_getcred();
3528 dp = NULL;
3529 NFSLOCKCLSTATE();
3530 clp = nmp->nm_clp;
3531 if (clp != NULL)
3532 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3533 np->n_fhp->nfh_len);
3534 if (dp != NULL) {
3535 nfscl_cleandeleg(dp);
3536 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3537 NFSUNLOCKCLSTATE();
3538 newnfs_copycred(&dp->nfsdl_cred, cred);
3539 nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3540 free(dp, M_NFSCLDELEG);
3541 } else
3542 NFSUNLOCKCLSTATE();
3543 NFSFREECRED(cred);
3544 }
3545
3546 /*
3547 * Do a callback RPC.
3548 */
3549 void
nfscl_docb(struct nfsrv_descript * nd,NFSPROC_T * p)3550 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3551 {
3552 int clist, gotseq_ok, i, j, k, op, rcalls;
3553 u_int32_t *tl;
3554 struct nfsclclient *clp;
3555 struct nfscldeleg *dp = NULL;
3556 int numops, taglen = -1, error = 0, trunc __unused;
3557 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3558 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3559 vnode_t vp = NULL;
3560 struct nfsnode *np;
3561 struct vattr va;
3562 struct nfsfh *nfhp;
3563 mount_t mp;
3564 nfsattrbit_t attrbits, rattrbits;
3565 nfsv4stateid_t stateid;
3566 uint32_t seqid, slotid = 0, highslot, cachethis __unused;
3567 uint8_t sessionid[NFSX_V4SESSIONID];
3568 struct mbuf *rep;
3569 struct nfscllayout *lyp;
3570 uint64_t filesid[2], len, off;
3571 int changed, gotone, laytype, recalltype;
3572 uint32_t iomode;
3573 struct nfsclrecalllayout *recallp = NULL;
3574 struct nfsclsession *tsep;
3575
3576 gotseq_ok = 0;
3577 nfsrvd_rephead(nd);
3578 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3579 taglen = fxdr_unsigned(int, *tl);
3580 if (taglen < 0 || taglen > NFSV4_OPAQUELIMIT) {
3581 error = EBADRPC;
3582 taglen = -1;
3583 goto nfsmout;
3584 }
3585 if (taglen <= NFSV4_SMALLSTR)
3586 tagstr = tag;
3587 else
3588 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3589 error = nfsrv_mtostr(nd, tagstr, taglen);
3590 if (error) {
3591 if (taglen > NFSV4_SMALLSTR)
3592 free(tagstr, M_TEMP);
3593 taglen = -1;
3594 goto nfsmout;
3595 }
3596 (void) nfsm_strtom(nd, tag, taglen);
3597 if (taglen > NFSV4_SMALLSTR) {
3598 free(tagstr, M_TEMP);
3599 }
3600 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3601 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3602 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3603 if (minorvers != NFSV4_MINORVERSION &&
3604 minorvers != NFSV41_MINORVERSION &&
3605 minorvers != NFSV42_MINORVERSION)
3606 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3607 cbident = fxdr_unsigned(u_int32_t, *tl++);
3608 if (nd->nd_repstat)
3609 numops = 0;
3610 else
3611 numops = fxdr_unsigned(int, *tl);
3612 /*
3613 * Loop around doing the sub ops.
3614 */
3615 for (i = 0; i < numops; i++) {
3616 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3617 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3618 *repp++ = *tl;
3619 op = fxdr_unsigned(int, *tl);
3620 nd->nd_procnum = op;
3621 if (i == 0 && op != NFSV4OP_CBSEQUENCE && minorvers !=
3622 NFSV4_MINORVERSION) {
3623 nd->nd_repstat = NFSERR_OPNOTINSESS;
3624 *repp = nfscl_errmap(nd, minorvers);
3625 retops++;
3626 break;
3627 }
3628 if (op < NFSV4OP_CBGETATTR ||
3629 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3630 (op > NFSV4OP_CBNOTIFYDEVID &&
3631 minorvers == NFSV41_MINORVERSION) ||
3632 (op > NFSV4OP_CBOFFLOAD &&
3633 minorvers == NFSV42_MINORVERSION)) {
3634 nd->nd_repstat = NFSERR_OPILLEGAL;
3635 *repp = nfscl_errmap(nd, minorvers);
3636 retops++;
3637 break;
3638 }
3639 if (op < NFSV42_CBNOPS)
3640 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3641 switch (op) {
3642 case NFSV4OP_CBGETATTR:
3643 NFSCL_DEBUG(4, "cbgetattr\n");
3644 mp = NULL;
3645 vp = NULL;
3646 error = nfsm_getfh(nd, &nfhp);
3647 if (!error)
3648 error = nfsrv_getattrbits(nd, &attrbits,
3649 NULL, NULL);
3650 if (!error) {
3651 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3652 &clp);
3653 if (mp == NULL)
3654 error = NFSERR_SERVERFAULT;
3655 }
3656 if (!error) {
3657 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3658 nfhp->nfh_len, p, &np);
3659 if (!error)
3660 vp = NFSTOV(np);
3661 }
3662 if (!error) {
3663 NFSZERO_ATTRBIT(&rattrbits);
3664 NFSLOCKCLSTATE();
3665 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3666 nfhp->nfh_len);
3667 if (dp != NULL) {
3668 if (NFSISSET_ATTRBIT(&attrbits,
3669 NFSATTRBIT_SIZE)) {
3670 if (vp != NULL)
3671 va.va_size = np->n_size;
3672 else
3673 va.va_size =
3674 dp->nfsdl_size;
3675 NFSSETBIT_ATTRBIT(&rattrbits,
3676 NFSATTRBIT_SIZE);
3677 }
3678 if (NFSISSET_ATTRBIT(&attrbits,
3679 NFSATTRBIT_CHANGE)) {
3680 va.va_filerev =
3681 dp->nfsdl_change;
3682 if (vp == NULL ||
3683 (np->n_flag & NDELEGMOD))
3684 va.va_filerev++;
3685 NFSSETBIT_ATTRBIT(&rattrbits,
3686 NFSATTRBIT_CHANGE);
3687 }
3688 } else
3689 error = NFSERR_SERVERFAULT;
3690 NFSUNLOCKCLSTATE();
3691 }
3692 if (vp != NULL)
3693 vrele(vp);
3694 if (mp != NULL)
3695 vfs_unbusy(mp);
3696 if (nfhp != NULL)
3697 free(nfhp, M_NFSFH);
3698 if (!error)
3699 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3700 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3701 (uint64_t)0, NULL);
3702 break;
3703 case NFSV4OP_CBRECALL:
3704 NFSCL_DEBUG(4, "cbrecall\n");
3705 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3706 NFSX_UNSIGNED);
3707 stateid.seqid = *tl++;
3708 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3709 NFSX_STATEIDOTHER);
3710 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3711 trunc = fxdr_unsigned(int, *tl);
3712 error = nfsm_getfh(nd, &nfhp);
3713 if (!error) {
3714 NFSLOCKCLSTATE();
3715 if (minorvers == NFSV4_MINORVERSION)
3716 clp = nfscl_getclnt(cbident);
3717 else
3718 clp = nfscl_getclntsess(sessionid);
3719 if (clp != NULL) {
3720 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3721 nfhp->nfh_len);
3722 if (dp != NULL && (dp->nfsdl_flags &
3723 NFSCLDL_DELEGRET) == 0) {
3724 dp->nfsdl_flags |=
3725 NFSCLDL_RECALL;
3726 wakeup((caddr_t)clp);
3727 }
3728 } else {
3729 error = NFSERR_SERVERFAULT;
3730 }
3731 NFSUNLOCKCLSTATE();
3732 }
3733 if (nfhp != NULL)
3734 free(nfhp, M_NFSFH);
3735 break;
3736 case NFSV4OP_CBLAYOUTRECALL:
3737 NFSCL_DEBUG(4, "cblayrec\n");
3738 nfhp = NULL;
3739 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3740 laytype = fxdr_unsigned(int, *tl++);
3741 iomode = fxdr_unsigned(uint32_t, *tl++);
3742 if (newnfs_true == *tl++)
3743 changed = 1;
3744 else
3745 changed = 0;
3746 recalltype = fxdr_unsigned(int, *tl);
3747 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3748 laytype, iomode, changed, recalltype);
3749 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3750 M_WAITOK);
3751 if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3752 laytype != NFSLAYOUT_FLEXFILE)
3753 error = NFSERR_NOMATCHLAYOUT;
3754 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3755 error = nfsm_getfh(nd, &nfhp);
3756 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3757 if (error != 0)
3758 goto nfsmout;
3759 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3760 NFSX_STATEID);
3761 off = fxdr_hyper(tl); tl += 2;
3762 len = fxdr_hyper(tl); tl += 2;
3763 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3764 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3765 if (minorvers == NFSV4_MINORVERSION)
3766 error = NFSERR_NOTSUPP;
3767 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3768 (uintmax_t)off, (uintmax_t)len,
3769 stateid.seqid, error);
3770 if (error == 0) {
3771 NFSLOCKCLSTATE();
3772 clp = nfscl_getclntsess(sessionid);
3773 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3774 if (clp != NULL) {
3775 lyp = nfscl_findlayout(clp,
3776 nfhp->nfh_fh,
3777 nfhp->nfh_len);
3778 NFSCL_DEBUG(4, "cblyp=%p\n",
3779 lyp);
3780 if (lyp != NULL &&
3781 (lyp->nfsly_flags &
3782 (NFSLY_FILES |
3783 NFSLY_FLEXFILE)) != 0 &&
3784 !NFSBCMP(stateid.other,
3785 lyp->nfsly_stateid.other,
3786 NFSX_STATEIDOTHER)) {
3787 error =
3788 nfscl_layoutrecall(
3789 recalltype,
3790 lyp, iomode, off,
3791 len, stateid.seqid,
3792 0, 0, NULL,
3793 recallp);
3794 if (error == 0 &&
3795 stateid.seqid >
3796 lyp->nfsly_stateid.seqid)
3797 lyp->nfsly_stateid.seqid =
3798 stateid.seqid;
3799 recallp = NULL;
3800 wakeup(clp);
3801 NFSCL_DEBUG(4,
3802 "aft layrcal=%d "
3803 "layseqid=%d\n",
3804 error,
3805 lyp->nfsly_stateid.seqid);
3806 } else
3807 error =
3808 NFSERR_NOMATCHLAYOUT;
3809 } else
3810 error = NFSERR_NOMATCHLAYOUT;
3811 NFSUNLOCKCLSTATE();
3812 }
3813 free(nfhp, M_NFSFH);
3814 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3815 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3816 filesid[0] = fxdr_hyper(tl); tl += 2;
3817 filesid[1] = fxdr_hyper(tl); tl += 2;
3818 gotone = 0;
3819 NFSLOCKCLSTATE();
3820 clp = nfscl_getclntsess(sessionid);
3821 if (clp != NULL) {
3822 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3823 nfsly_list) {
3824 if (lyp->nfsly_filesid[0] ==
3825 filesid[0] &&
3826 lyp->nfsly_filesid[1] ==
3827 filesid[1]) {
3828 error =
3829 nfscl_layoutrecall(
3830 recalltype,
3831 lyp, iomode, 0,
3832 UINT64_MAX,
3833 lyp->nfsly_stateid.seqid,
3834 0, 0, NULL,
3835 recallp);
3836 recallp = NULL;
3837 gotone = 1;
3838 }
3839 }
3840 if (gotone != 0)
3841 wakeup(clp);
3842 else
3843 error = NFSERR_NOMATCHLAYOUT;
3844 } else
3845 error = NFSERR_NOMATCHLAYOUT;
3846 NFSUNLOCKCLSTATE();
3847 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3848 gotone = 0;
3849 NFSLOCKCLSTATE();
3850 clp = nfscl_getclntsess(sessionid);
3851 if (clp != NULL) {
3852 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3853 nfsly_list) {
3854 error = nfscl_layoutrecall(
3855 recalltype, lyp, iomode, 0,
3856 UINT64_MAX,
3857 lyp->nfsly_stateid.seqid,
3858 0, 0, NULL, recallp);
3859 recallp = NULL;
3860 gotone = 1;
3861 }
3862 if (gotone != 0)
3863 wakeup(clp);
3864 else
3865 error = NFSERR_NOMATCHLAYOUT;
3866 } else
3867 error = NFSERR_NOMATCHLAYOUT;
3868 NFSUNLOCKCLSTATE();
3869 } else
3870 error = NFSERR_NOMATCHLAYOUT;
3871 if (recallp != NULL) {
3872 free(recallp, M_NFSLAYRECALL);
3873 recallp = NULL;
3874 }
3875 break;
3876 case NFSV4OP_CBSEQUENCE:
3877 if (i != 0) {
3878 error = NFSERR_SEQUENCEPOS;
3879 break;
3880 }
3881 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3882 5 * NFSX_UNSIGNED);
3883 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3884 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3885 seqid = fxdr_unsigned(uint32_t, *tl++);
3886 slotid = fxdr_unsigned(uint32_t, *tl++);
3887 highslot = fxdr_unsigned(uint32_t, *tl++);
3888 cachethis = *tl++;
3889 /* Throw away the referring call stuff. */
3890 clist = fxdr_unsigned(int, *tl);
3891 for (j = 0; j < clist; j++) {
3892 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3893 NFSX_UNSIGNED);
3894 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3895 rcalls = fxdr_unsigned(int, *tl);
3896 for (k = 0; k < rcalls; k++) {
3897 NFSM_DISSECT(tl, uint32_t *,
3898 2 * NFSX_UNSIGNED);
3899 }
3900 }
3901 NFSLOCKCLSTATE();
3902 clp = nfscl_getclntsess(sessionid);
3903 if (clp == NULL)
3904 error = NFSERR_SERVERFAULT;
3905 if (error == 0) {
3906 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3907 error = nfsv4_seqsession(seqid, slotid,
3908 highslot, tsep->nfsess_cbslots, &rep,
3909 tsep->nfsess_backslots);
3910 }
3911 NFSUNLOCKCLSTATE();
3912 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3913 gotseq_ok = 1;
3914 if (rep != NULL) {
3915 /*
3916 * Handle a reply for a retried
3917 * callback. The reply will be
3918 * re-inserted in the session cache
3919 * by the nfsv4_seqsess_cacherep() call
3920 * after out:
3921 */
3922 KASSERT(error == NFSERR_REPLYFROMCACHE,
3923 ("cbsequence: non-NULL rep"));
3924 NFSCL_DEBUG(4, "Got cbretry\n");
3925 m_freem(nd->nd_mreq);
3926 nd->nd_mreq = rep;
3927 rep = NULL;
3928 goto out;
3929 }
3930 NFSM_BUILD(tl, uint32_t *,
3931 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3932 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3933 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3934 *tl++ = txdr_unsigned(seqid);
3935 *tl++ = txdr_unsigned(slotid);
3936 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3937 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3938 }
3939 break;
3940 case NFSV4OP_CBRECALLSLOT:
3941 NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3942 highslot = fxdr_unsigned(uint32_t, *tl);
3943 NFSLOCKCLSTATE();
3944 clp = nfscl_getclntsess(sessionid);
3945 if (clp == NULL)
3946 error = NFSERR_SERVERFAULT;
3947 if (error == 0) {
3948 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3949 mtx_lock(&tsep->nfsess_mtx);
3950 if ((highslot + 1) < tsep->nfsess_foreslots) {
3951 tsep->nfsess_foreslots = (highslot + 1);
3952 nfs_resetslots(tsep);
3953 }
3954 mtx_unlock(&tsep->nfsess_mtx);
3955 }
3956 NFSUNLOCKCLSTATE();
3957 break;
3958 case NFSV4OP_CBRECALLANY:
3959 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
3960 i = fxdr_unsigned(int, *tl++);
3961 j = fxdr_unsigned(int, *tl);
3962 if (i < 0 || j != 1)
3963 error = NFSERR_BADXDR;
3964 if (error == 0) {
3965 NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3966 j = fxdr_unsigned(int, *tl);
3967 if (i < 100)
3968 i = 100;
3969 else if (i > 100000)
3970 i = 100000;
3971 NFSLOCKCLSTATE();
3972 clp = nfscl_getclntsess(sessionid);
3973 if (clp == NULL)
3974 error = NFSERR_SERVERFAULT;
3975 if (((j & NFSRCA4_RDATA_DLG) != 0 ||
3976 (j & NFSRCA4_WDATA_DLG) != 0) &&
3977 error == 0 && i <
3978 clp->nfsc_deleghighwater)
3979 clp->nfsc_deleghighwater = i;
3980 if (error == 0 &&
3981 ((!NFSHASFLEXFILE(clp->nfsc_nmp) &&
3982 (j & NFSRCA4_FILE_LAYOUT) != 0 &&
3983 i < clp->nfsc_layouthighwater) ||
3984 (NFSHASFLEXFILE(clp->nfsc_nmp) &&
3985 (j & (NFSRCA4_FF_LAYOUT_READ |
3986 NFSRCA4_FF_LAYOUT_RW)) != 0 &&
3987 i < clp->nfsc_layouthighwater)))
3988 clp->nfsc_layouthighwater = i;
3989 NFSUNLOCKCLSTATE();
3990 }
3991 break;
3992 case NFSV4OP_CBNOTIFY:
3993 case NFSV4OP_CBRECALLOBJAVAIL:
3994 case NFSV4OP_CBNOTIFYLOCK:
3995 /*
3996 * These callbacks are not necessarily optional,
3997 * so I think it is better to reply NFS_OK than
3998 * NFSERR_NOTSUPP.
3999 * All provide information for which the FreeBSD client
4000 * does not currently have a use.
4001 * I am not sure if any of these could be generated
4002 * by a NFSv4.1/4.2 server for this client?
4003 */
4004 error = 0;
4005 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4006 break;
4007 case NFSV4OP_CBPUSHDELEG:
4008 error = NFSERR_REJECTDELEG;
4009 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4010 break;
4011 default:
4012 if (i == 0 && minorvers != NFSV4_MINORVERSION)
4013 error = NFSERR_OPNOTINSESS;
4014 else {
4015 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4016 error = NFSERR_NOTSUPP;
4017 }
4018 break;
4019 }
4020 if (error) {
4021 if (error == EBADRPC || error == NFSERR_BADXDR) {
4022 nd->nd_repstat = NFSERR_BADXDR;
4023 } else {
4024 nd->nd_repstat = error;
4025 }
4026 error = 0;
4027 }
4028 retops++;
4029 if (nd->nd_repstat) {
4030 *repp = nfscl_errmap(nd, minorvers);
4031 break;
4032 } else
4033 *repp = 0; /* NFS4_OK */
4034 }
4035 nfsmout:
4036 if (recallp != NULL)
4037 free(recallp, M_NFSLAYRECALL);
4038 if (error) {
4039 if (error == EBADRPC || error == NFSERR_BADXDR)
4040 nd->nd_repstat = NFSERR_BADXDR;
4041 else
4042 printf("nfsv4 comperr1=%d\n", error);
4043 }
4044 if (taglen == -1) {
4045 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
4046 *tl++ = 0;
4047 *tl = 0;
4048 } else {
4049 *retopsp = txdr_unsigned(retops);
4050 }
4051 *nd->nd_errp = nfscl_errmap(nd, minorvers);
4052 out:
4053 if (gotseq_ok != 0) {
4054 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
4055 NFSLOCKCLSTATE();
4056 clp = nfscl_getclntsess(sessionid);
4057 if (clp != NULL) {
4058 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4059 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
4060 NFSERR_OK, &rep);
4061 NFSUNLOCKCLSTATE();
4062 } else {
4063 NFSUNLOCKCLSTATE();
4064 m_freem(rep);
4065 }
4066 }
4067 }
4068
4069 /*
4070 * Generate the next cbident value. Basically just increment a static value
4071 * and then check that it isn't already in the list, if it has wrapped around.
4072 */
4073 static u_int32_t
nfscl_nextcbident(void)4074 nfscl_nextcbident(void)
4075 {
4076 struct nfsclclient *clp;
4077 int matched;
4078 static u_int32_t nextcbident = 0;
4079 static int haswrapped = 0;
4080
4081 nextcbident++;
4082 if (nextcbident == 0)
4083 haswrapped = 1;
4084 if (haswrapped) {
4085 /*
4086 * Search the clientid list for one already using this cbident.
4087 */
4088 do {
4089 matched = 0;
4090 NFSLOCKCLSTATE();
4091 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4092 if (clp->nfsc_cbident == nextcbident) {
4093 matched = 1;
4094 break;
4095 }
4096 }
4097 NFSUNLOCKCLSTATE();
4098 if (matched == 1)
4099 nextcbident++;
4100 } while (matched);
4101 }
4102 return (nextcbident);
4103 }
4104
4105 /*
4106 * Get the mount point related to a given cbident or session and busy it.
4107 */
4108 static mount_t
nfscl_getmnt(int minorvers,uint8_t * sessionid,u_int32_t cbident,struct nfsclclient ** clpp)4109 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
4110 struct nfsclclient **clpp)
4111 {
4112 struct nfsclclient *clp;
4113 mount_t mp;
4114 int error;
4115 struct nfsclsession *tsep;
4116
4117 *clpp = NULL;
4118 NFSLOCKCLSTATE();
4119 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4120 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4121 if (minorvers == NFSV4_MINORVERSION) {
4122 if (clp->nfsc_cbident == cbident)
4123 break;
4124 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4125 NFSX_V4SESSIONID))
4126 break;
4127 }
4128 if (clp == NULL) {
4129 NFSUNLOCKCLSTATE();
4130 return (NULL);
4131 }
4132 mp = clp->nfsc_nmp->nm_mountp;
4133 vfs_ref(mp);
4134 NFSUNLOCKCLSTATE();
4135 error = vfs_busy(mp, 0);
4136 vfs_rel(mp);
4137 if (error != 0)
4138 return (NULL);
4139 *clpp = clp;
4140 return (mp);
4141 }
4142
4143 /*
4144 * Get the clientid pointer related to a given cbident.
4145 */
4146 static struct nfsclclient *
nfscl_getclnt(u_int32_t cbident)4147 nfscl_getclnt(u_int32_t cbident)
4148 {
4149 struct nfsclclient *clp;
4150
4151 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
4152 if (clp->nfsc_cbident == cbident)
4153 break;
4154 return (clp);
4155 }
4156
4157 /*
4158 * Get the clientid pointer related to a given sessionid.
4159 */
4160 static struct nfsclclient *
nfscl_getclntsess(uint8_t * sessionid)4161 nfscl_getclntsess(uint8_t *sessionid)
4162 {
4163 struct nfsclclient *clp;
4164 struct nfsclsession *tsep;
4165
4166 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4167 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4168 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4169 NFSX_V4SESSIONID))
4170 break;
4171 }
4172 return (clp);
4173 }
4174
4175 /*
4176 * Search for a lock conflict locally on the client. A conflict occurs if
4177 * - not same owner and overlapping byte range and at least one of them is
4178 * a write lock or this is an unlock.
4179 */
4180 static int
nfscl_localconflict(struct nfsclclient * clp,u_int8_t * fhp,int fhlen,struct nfscllock * nlop,u_int8_t * own,struct nfscldeleg * dp,struct nfscllock ** lopp)4181 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
4182 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
4183 struct nfscllock **lopp)
4184 {
4185 struct nfsclopen *op;
4186 int ret;
4187
4188 if (dp != NULL) {
4189 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
4190 if (ret)
4191 return (ret);
4192 }
4193 LIST_FOREACH(op, NFSCLOPENHASH(clp, fhp, fhlen), nfso_hash) {
4194 if (op->nfso_fhlen == fhlen &&
4195 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
4196 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
4197 own, lopp);
4198 if (ret)
4199 return (ret);
4200 }
4201 }
4202 return (0);
4203 }
4204
4205 static int
nfscl_checkconflict(struct nfscllockownerhead * lhp,struct nfscllock * nlop,u_int8_t * own,struct nfscllock ** lopp)4206 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
4207 u_int8_t *own, struct nfscllock **lopp)
4208 {
4209 struct nfscllockowner *lp;
4210 struct nfscllock *lop;
4211
4212 LIST_FOREACH(lp, lhp, nfsl_list) {
4213 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
4214 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
4215 if (lop->nfslo_first >= nlop->nfslo_end)
4216 break;
4217 if (lop->nfslo_end <= nlop->nfslo_first)
4218 continue;
4219 if (lop->nfslo_type == F_WRLCK ||
4220 nlop->nfslo_type == F_WRLCK ||
4221 nlop->nfslo_type == F_UNLCK) {
4222 if (lopp != NULL)
4223 *lopp = lop;
4224 return (NFSERR_DENIED);
4225 }
4226 }
4227 }
4228 }
4229 return (0);
4230 }
4231
4232 /*
4233 * Check for a local conflicting lock.
4234 */
4235 int
nfscl_lockt(vnode_t vp,struct nfsclclient * clp,u_int64_t off,u_int64_t len,struct flock * fl,NFSPROC_T * p,void * id,int flags)4236 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
4237 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
4238 {
4239 struct nfscllock *lop, nlck;
4240 struct nfscldeleg *dp;
4241 struct nfsnode *np;
4242 u_int8_t own[NFSV4CL_LOCKNAMELEN];
4243 int error;
4244
4245 nlck.nfslo_type = fl->l_type;
4246 nlck.nfslo_first = off;
4247 if (len == NFS64BITSSET) {
4248 nlck.nfslo_end = NFS64BITSSET;
4249 } else {
4250 nlck.nfslo_end = off + len;
4251 if (nlck.nfslo_end <= nlck.nfslo_first)
4252 return (NFSERR_INVAL);
4253 }
4254 np = VTONFS(vp);
4255 nfscl_filllockowner(id, own, flags);
4256 NFSLOCKCLSTATE();
4257 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4258 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
4259 &nlck, own, dp, &lop);
4260 if (error != 0) {
4261 fl->l_whence = SEEK_SET;
4262 fl->l_start = lop->nfslo_first;
4263 if (lop->nfslo_end == NFS64BITSSET)
4264 fl->l_len = 0;
4265 else
4266 fl->l_len = lop->nfslo_end - lop->nfslo_first;
4267 fl->l_pid = (pid_t)0;
4268 fl->l_type = lop->nfslo_type;
4269 error = -1; /* no RPC required */
4270 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
4271 fl->l_type == F_RDLCK)) {
4272 /*
4273 * The delegation ensures that there isn't a conflicting
4274 * lock on the server, so return -1 to indicate an RPC
4275 * isn't required.
4276 */
4277 fl->l_type = F_UNLCK;
4278 error = -1;
4279 }
4280 NFSUNLOCKCLSTATE();
4281 return (error);
4282 }
4283
4284 /*
4285 * Handle Recall of a delegation.
4286 * The clp must be exclusive locked when this is called.
4287 */
4288 static int
nfscl_recalldeleg(struct nfsclclient * clp,struct nfsmount * nmp,struct nfscldeleg * dp,vnode_t vp,struct ucred * cred,NFSPROC_T * p,int called_from_renewthread,vnode_t * vpp)4289 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
4290 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
4291 int called_from_renewthread, vnode_t *vpp)
4292 {
4293 struct nfsclowner *owp, *lowp, *nowp;
4294 struct nfsclopen *op, *lop;
4295 struct nfscllockowner *lp;
4296 struct nfscllock *lckp;
4297 struct nfsnode *np;
4298 int error = 0, ret;
4299
4300 if (vp == NULL) {
4301 KASSERT(vpp != NULL, ("nfscl_recalldeleg: vpp NULL"));
4302 *vpp = NULL;
4303 /*
4304 * First, get a vnode for the file. This is needed to do RPCs.
4305 */
4306 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
4307 dp->nfsdl_fhlen, p, &np);
4308 if (ret) {
4309 /*
4310 * File isn't open, so nothing to move over to the
4311 * server.
4312 */
4313 return (0);
4314 }
4315 vp = NFSTOV(np);
4316 *vpp = vp;
4317 } else {
4318 np = VTONFS(vp);
4319 }
4320 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
4321
4322 /*
4323 * Ok, if it's a write delegation, flush data to the server, so
4324 * that close/open consistency is retained.
4325 */
4326 ret = 0;
4327 NFSLOCKNODE(np);
4328 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
4329 np->n_flag |= NDELEGRECALL;
4330 NFSUNLOCKNODE(np);
4331 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
4332 NFSLOCKNODE(np);
4333 np->n_flag &= ~NDELEGRECALL;
4334 }
4335 NFSINVALATTRCACHE(np);
4336 NFSUNLOCKNODE(np);
4337 if (ret == EIO && called_from_renewthread != 0) {
4338 /*
4339 * If the flush failed with EIO for the renew thread,
4340 * return now, so that the dirty buffer will be flushed
4341 * later.
4342 */
4343 return (ret);
4344 }
4345
4346 /*
4347 * Now, for each openowner with opens issued locally, move them
4348 * over to state against the server.
4349 */
4350 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
4351 lop = LIST_FIRST(&lowp->nfsow_open);
4352 if (lop != NULL) {
4353 if (LIST_NEXT(lop, nfso_list) != NULL)
4354 panic("nfsdlg mult opens");
4355 /*
4356 * Look for the same openowner against the server.
4357 */
4358 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
4359 if (!NFSBCMP(lowp->nfsow_owner,
4360 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
4361 newnfs_copycred(&dp->nfsdl_cred, cred);
4362 ret = nfscl_moveopen(vp, clp, nmp, lop,
4363 owp, dp, cred, p);
4364 if (ret == NFSERR_STALECLIENTID ||
4365 ret == NFSERR_STALEDONTRECOVER ||
4366 ret == NFSERR_BADSESSION)
4367 return (ret);
4368 if (ret) {
4369 nfscl_freeopen(lop, 1, true);
4370 if (!error)
4371 error = ret;
4372 }
4373 break;
4374 }
4375 }
4376
4377 /*
4378 * If no openowner found, create one and get an open
4379 * for it.
4380 */
4381 if (owp == NULL) {
4382 nowp = malloc(
4383 sizeof (struct nfsclowner), M_NFSCLOWNER,
4384 M_WAITOK);
4385 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
4386 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
4387 dp->nfsdl_fhlen, NULL, NULL);
4388 newnfs_copycred(&dp->nfsdl_cred, cred);
4389 ret = nfscl_moveopen(vp, clp, nmp, lop,
4390 owp, dp, cred, p);
4391 if (ret) {
4392 nfscl_freeopenowner(owp, 0);
4393 if (ret == NFSERR_STALECLIENTID ||
4394 ret == NFSERR_STALEDONTRECOVER ||
4395 ret == NFSERR_BADSESSION)
4396 return (ret);
4397 if (ret) {
4398 nfscl_freeopen(lop, 1, true);
4399 if (!error)
4400 error = ret;
4401 }
4402 }
4403 }
4404 }
4405 }
4406
4407 /*
4408 * Now, get byte range locks for any locks done locally.
4409 */
4410 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4411 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4412 newnfs_copycred(&dp->nfsdl_cred, cred);
4413 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4414 if (ret == NFSERR_STALESTATEID ||
4415 ret == NFSERR_STALEDONTRECOVER ||
4416 ret == NFSERR_STALECLIENTID ||
4417 ret == NFSERR_BADSESSION)
4418 return (ret);
4419 if (ret && !error)
4420 error = ret;
4421 }
4422 }
4423 return (error);
4424 }
4425
4426 /*
4427 * Move a locally issued open over to an owner on the state list.
4428 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4429 * returns with it unlocked.
4430 */
4431 static int
nfscl_moveopen(vnode_t vp,struct nfsclclient * clp,struct nfsmount * nmp,struct nfsclopen * lop,struct nfsclowner * owp,struct nfscldeleg * dp,struct ucred * cred,NFSPROC_T * p)4432 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4433 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4434 struct ucred *cred, NFSPROC_T *p)
4435 {
4436 struct nfsclopen *op, *nop;
4437 struct nfscldeleg *ndp;
4438 struct nfsnode *np;
4439 int error = 0, newone;
4440
4441 /*
4442 * First, look for an appropriate open, If found, just increment the
4443 * opencnt in it.
4444 */
4445 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4446 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4447 op->nfso_fhlen == lop->nfso_fhlen &&
4448 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4449 op->nfso_opencnt += lop->nfso_opencnt;
4450 nfscl_freeopen(lop, 1, true);
4451 return (0);
4452 }
4453 }
4454
4455 /* No appropriate open, so we have to do one against the server. */
4456 np = VTONFS(vp);
4457 nop = malloc(sizeof (struct nfsclopen) +
4458 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4459 nop->nfso_hash.le_prev = NULL;
4460 newone = 0;
4461 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4462 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4463 ndp = dp;
4464 if (NFSHASNFSV4N(nmp))
4465 error = nfscl_tryopen(nmp, vp, lop->nfso_fh, lop->nfso_fhlen,
4466 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4467 NULL, 0, &ndp, 0, 0, cred, p);
4468 else
4469 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4470 np->n_v4->n4_fhlen, lop->nfso_fh, lop->nfso_fhlen,
4471 lop->nfso_mode, op, NFS4NODENAME(np->n_v4),
4472 np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4473 if (error) {
4474 if (newone)
4475 nfscl_freeopen(op, 0, true);
4476 } else {
4477 op->nfso_mode |= lop->nfso_mode;
4478 op->nfso_opencnt += lop->nfso_opencnt;
4479 nfscl_freeopen(lop, 1, true);
4480 }
4481 if (nop != NULL)
4482 free(nop, M_NFSCLOPEN);
4483 if (ndp != NULL) {
4484 /*
4485 * What should I do with the returned delegation, since the
4486 * delegation is being recalled? For now, just printf and
4487 * through it away.
4488 */
4489 printf("Moveopen returned deleg\n");
4490 free(ndp, M_NFSCLDELEG);
4491 }
4492 return (error);
4493 }
4494
4495 /*
4496 * Recall all delegations on this client.
4497 */
4498 static void
nfscl_totalrecall(struct nfsclclient * clp)4499 nfscl_totalrecall(struct nfsclclient *clp)
4500 {
4501 struct nfscldeleg *dp;
4502
4503 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4504 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4505 dp->nfsdl_flags |= NFSCLDL_RECALL;
4506 }
4507 }
4508
4509 /*
4510 * Relock byte ranges. Called for delegation recall and state expiry.
4511 */
4512 static int
nfscl_relock(vnode_t vp,struct nfsclclient * clp,struct nfsmount * nmp,struct nfscllockowner * lp,struct nfscllock * lop,struct ucred * cred,NFSPROC_T * p)4513 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4514 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4515 NFSPROC_T *p)
4516 {
4517 struct nfscllockowner *nlp;
4518 struct nfsfh *nfhp;
4519 struct nfsnode *np;
4520 u_int64_t off, len;
4521 int error, newone, donelocally;
4522
4523 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) {
4524 np = VTONFS(vp);
4525 NFSLOCKNODE(np);
4526 np->n_flag |= NMIGHTBELOCKED;
4527 NFSUNLOCKNODE(np);
4528 }
4529
4530 off = lop->nfslo_first;
4531 len = lop->nfslo_end - lop->nfslo_first;
4532 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4533 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4534 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4535 if (error || donelocally)
4536 return (error);
4537 nfhp = VTONFS(vp)->n_fhp;
4538 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4539 nfhp->nfh_len, nlp, newone, 0, off,
4540 len, lop->nfslo_type, cred, p);
4541 if (error)
4542 nfscl_freelockowner(nlp, 0);
4543 return (error);
4544 }
4545
4546 /*
4547 * Called to re-open a file. Basically get a vnode for the file handle
4548 * and then call nfsrpc_openrpc() to do the rest.
4549 */
4550 static int
nfsrpc_reopen(struct nfsmount * nmp,u_int8_t * fhp,int fhlen,u_int32_t mode,struct nfsclopen * op,struct nfscldeleg ** dpp,struct ucred * cred,NFSPROC_T * p)4551 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4552 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4553 struct ucred *cred, NFSPROC_T *p)
4554 {
4555 struct nfsnode *np;
4556 vnode_t vp;
4557 int error;
4558
4559 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4560 if (error)
4561 return (error);
4562 vp = NFSTOV(np);
4563 if (NFSHASNFSV4N(nmp))
4564 error = nfscl_tryopen(nmp, vp, fhp, fhlen, fhp, fhlen, mode, op,
4565 NULL, 0, dpp, 0, 0, cred, p);
4566 else if (np->n_v4 != NULL)
4567 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4568 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4569 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4570 cred, p);
4571 else
4572 error = EINVAL;
4573 vrele(vp);
4574 return (error);
4575 }
4576
4577 /*
4578 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4579 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4580 * fail.
4581 */
4582 static int
nfscl_tryopen(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,u_int8_t * newfhp,int newfhlen,u_int32_t mode,struct nfsclopen * op,u_int8_t * name,int namelen,struct nfscldeleg ** ndpp,int reclaim,u_int32_t delegtype,struct ucred * cred,NFSPROC_T * p)4583 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4584 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4585 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4586 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4587 {
4588 int error;
4589 struct nfscldeleg *dp;
4590
4591 dp = *ndpp;
4592 do {
4593 *ndpp = dp; /* *ndpp needs to be set for retries. */
4594 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4595 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4596 0, 0);
4597 if (error == NFSERR_DELAY)
4598 (void) nfs_catnap(PZERO, error, "nfstryop");
4599 } while (error == NFSERR_DELAY);
4600 if (error == EAUTH || error == EACCES) {
4601 /* Try again using system credentials */
4602 newnfs_setroot(cred);
4603 do {
4604 *ndpp = dp; /* *ndpp needs to be set for retries. */
4605 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4606 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4607 delegtype, cred, p, 1, 0);
4608 if (error == NFSERR_DELAY)
4609 (void) nfs_catnap(PZERO, error, "nfstryop");
4610 } while (error == NFSERR_DELAY);
4611 }
4612 return (error);
4613 }
4614
4615 /*
4616 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4617 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4618 * cred don't work.
4619 */
4620 static int
nfscl_trylock(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,struct nfscllockowner * nlp,int newone,int reclaim,u_int64_t off,u_int64_t len,short type,struct ucred * cred,NFSPROC_T * p)4621 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4622 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4623 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4624 {
4625 struct nfsrv_descript nfsd, *nd = &nfsd;
4626 int error;
4627
4628 do {
4629 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4630 reclaim, off, len, type, cred, p, 0);
4631 if (!error && nd->nd_repstat == NFSERR_DELAY)
4632 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4633 "nfstrylck");
4634 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4635 if (!error)
4636 error = nd->nd_repstat;
4637 if (error == EAUTH || error == EACCES) {
4638 /* Try again using root credentials */
4639 newnfs_setroot(cred);
4640 do {
4641 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4642 newone, reclaim, off, len, type, cred, p, 1);
4643 if (!error && nd->nd_repstat == NFSERR_DELAY)
4644 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4645 "nfstrylck");
4646 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4647 if (!error)
4648 error = nd->nd_repstat;
4649 }
4650 return (error);
4651 }
4652
4653 /*
4654 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4655 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4656 * credentials fail.
4657 */
4658 int
nfscl_trydelegreturn(struct nfscldeleg * dp,struct ucred * cred,struct nfsmount * nmp,NFSPROC_T * p)4659 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4660 struct nfsmount *nmp, NFSPROC_T *p)
4661 {
4662 int error;
4663
4664 do {
4665 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4666 if (error == NFSERR_DELAY)
4667 (void) nfs_catnap(PZERO, error, "nfstrydp");
4668 } while (error == NFSERR_DELAY);
4669 if (error == EAUTH || error == EACCES) {
4670 /* Try again using system credentials */
4671 newnfs_setroot(cred);
4672 do {
4673 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4674 if (error == NFSERR_DELAY)
4675 (void) nfs_catnap(PZERO, error, "nfstrydp");
4676 } while (error == NFSERR_DELAY);
4677 }
4678 return (error);
4679 }
4680
4681 /*
4682 * Try a close against the server. Just call nfsrpc_closerpc(),
4683 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4684 * credentials fail.
4685 */
4686 int
nfscl_tryclose(struct nfsclopen * op,struct ucred * cred,struct nfsmount * nmp,NFSPROC_T * p,bool loop_on_delayed)4687 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4688 struct nfsmount *nmp, NFSPROC_T *p, bool loop_on_delayed)
4689 {
4690 struct nfsrv_descript nfsd, *nd = &nfsd;
4691 int error;
4692
4693 do {
4694 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4695 if (loop_on_delayed && error == NFSERR_DELAY)
4696 (void) nfs_catnap(PZERO, error, "nfstrycl");
4697 } while (loop_on_delayed && error == NFSERR_DELAY);
4698 if (error == EAUTH || error == EACCES) {
4699 /* Try again using system credentials */
4700 newnfs_setroot(cred);
4701 do {
4702 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4703 if (loop_on_delayed && error == NFSERR_DELAY)
4704 (void) nfs_catnap(PZERO, error, "nfstrycl");
4705 } while (loop_on_delayed && error == NFSERR_DELAY);
4706 }
4707 return (error);
4708 }
4709
4710 /*
4711 * Decide if a delegation on a file permits close without flushing writes
4712 * to the server. This might be a big performance win in some environments.
4713 * (Not useful until the client does caching on local stable storage.)
4714 */
4715 int
nfscl_mustflush(vnode_t vp)4716 nfscl_mustflush(vnode_t vp)
4717 {
4718 struct nfsclclient *clp;
4719 struct nfscldeleg *dp;
4720 struct nfsnode *np;
4721 struct nfsmount *nmp;
4722
4723 np = VTONFS(vp);
4724 nmp = VFSTONFS(vp->v_mount);
4725 if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4726 return (1);
4727 NFSLOCKMNT(nmp);
4728 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4729 NFSUNLOCKMNT(nmp);
4730 return (1);
4731 }
4732 NFSUNLOCKMNT(nmp);
4733 NFSLOCKCLSTATE();
4734 clp = nfscl_findcl(nmp);
4735 if (clp == NULL) {
4736 NFSUNLOCKCLSTATE();
4737 return (1);
4738 }
4739 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4740 if (dp != NULL && (dp->nfsdl_flags &
4741 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4742 NFSCLDL_WRITE &&
4743 (dp->nfsdl_sizelimit >= np->n_size ||
4744 !NFSHASSTRICT3530(nmp))) {
4745 NFSUNLOCKCLSTATE();
4746 return (0);
4747 }
4748 NFSUNLOCKCLSTATE();
4749 return (1);
4750 }
4751
4752 /*
4753 * See if a (write) delegation exists for this file.
4754 */
4755 int
nfscl_nodeleg(vnode_t vp,int writedeleg)4756 nfscl_nodeleg(vnode_t vp, int writedeleg)
4757 {
4758 struct nfsclclient *clp;
4759 struct nfscldeleg *dp;
4760 struct nfsnode *np;
4761 struct nfsmount *nmp;
4762
4763 np = VTONFS(vp);
4764 nmp = VFSTONFS(vp->v_mount);
4765 if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4766 return (1);
4767 NFSLOCKMNT(nmp);
4768 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4769 NFSUNLOCKMNT(nmp);
4770 return (1);
4771 }
4772 NFSUNLOCKMNT(nmp);
4773 NFSLOCKCLSTATE();
4774 clp = nfscl_findcl(nmp);
4775 if (clp == NULL) {
4776 NFSUNLOCKCLSTATE();
4777 return (1);
4778 }
4779 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4780 if (dp != NULL &&
4781 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4782 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4783 NFSCLDL_WRITE)) {
4784 NFSUNLOCKCLSTATE();
4785 return (0);
4786 }
4787 NFSUNLOCKCLSTATE();
4788 return (1);
4789 }
4790
4791 /*
4792 * Look for an associated delegation that should be DelegReturned.
4793 */
4794 int
nfscl_removedeleg(vnode_t vp,NFSPROC_T * p,nfsv4stateid_t * stp)4795 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4796 {
4797 struct nfsclclient *clp;
4798 struct nfscldeleg *dp;
4799 struct nfsclowner *owp;
4800 struct nfscllockowner *lp;
4801 struct nfsmount *nmp;
4802 struct mount *mp;
4803 struct ucred *cred;
4804 struct nfsnode *np;
4805 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4806
4807 nmp = VFSTONFS(vp->v_mount);
4808 if (NFSHASPNFS(nmp))
4809 return (retcnt);
4810 NFSLOCKMNT(nmp);
4811 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4812 NFSUNLOCKMNT(nmp);
4813 return (retcnt);
4814 }
4815 NFSUNLOCKMNT(nmp);
4816 np = VTONFS(vp);
4817 mp = nmp->nm_mountp;
4818 NFSLOCKCLSTATE();
4819 /*
4820 * Loop around waiting for:
4821 * - outstanding I/O operations on delegations to complete
4822 * - for a delegation on vp that has state, lock the client and
4823 * do a recall
4824 * - return delegation with no state
4825 */
4826 while (1) {
4827 clp = nfscl_findcl(nmp);
4828 if (clp == NULL) {
4829 NFSUNLOCKCLSTATE();
4830 return (retcnt);
4831 }
4832 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4833 np->n_fhp->nfh_len);
4834 if (dp != NULL) {
4835 /*
4836 * Wait for outstanding I/O ops to be done.
4837 */
4838 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4839 if (igotlock) {
4840 nfsv4_unlock(&clp->nfsc_lock, 0);
4841 igotlock = 0;
4842 }
4843 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4844 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4845 "nfscld", hz);
4846 if (NFSCL_FORCEDISM(mp)) {
4847 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4848 NFSUNLOCKCLSTATE();
4849 return (0);
4850 }
4851 continue;
4852 }
4853 needsrecall = 0;
4854 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4855 if (!LIST_EMPTY(&owp->nfsow_open)) {
4856 needsrecall = 1;
4857 break;
4858 }
4859 }
4860 if (!needsrecall) {
4861 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4862 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4863 needsrecall = 1;
4864 break;
4865 }
4866 }
4867 }
4868 if (needsrecall && !triedrecall) {
4869 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4870 islept = 0;
4871 while (!igotlock) {
4872 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4873 &islept, NFSCLSTATEMUTEXPTR, mp);
4874 if (NFSCL_FORCEDISM(mp)) {
4875 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4876 if (igotlock)
4877 nfsv4_unlock(&clp->nfsc_lock, 0);
4878 NFSUNLOCKCLSTATE();
4879 return (0);
4880 }
4881 if (islept)
4882 break;
4883 }
4884 if (islept)
4885 continue;
4886 NFSUNLOCKCLSTATE();
4887 cred = newnfs_getcred();
4888 newnfs_copycred(&dp->nfsdl_cred, cred);
4889 nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0, NULL);
4890 NFSFREECRED(cred);
4891 triedrecall = 1;
4892 NFSLOCKCLSTATE();
4893 nfsv4_unlock(&clp->nfsc_lock, 0);
4894 igotlock = 0;
4895 continue;
4896 }
4897 *stp = dp->nfsdl_stateid;
4898 retcnt = 1;
4899 nfscl_cleandeleg(dp);
4900 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4901 }
4902 if (igotlock)
4903 nfsv4_unlock(&clp->nfsc_lock, 0);
4904 NFSUNLOCKCLSTATE();
4905 return (retcnt);
4906 }
4907 }
4908
4909 /*
4910 * Look for associated delegation(s) that should be DelegReturned.
4911 */
4912 int
nfscl_renamedeleg(vnode_t fvp,nfsv4stateid_t * fstp,int * gotfdp,vnode_t tvp,nfsv4stateid_t * tstp,int * gottdp,NFSPROC_T * p)4913 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4914 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4915 {
4916 struct nfsclclient *clp;
4917 struct nfscldeleg *dp;
4918 struct nfsclowner *owp;
4919 struct nfscllockowner *lp;
4920 struct nfsmount *nmp;
4921 struct mount *mp;
4922 struct ucred *cred;
4923 struct nfsnode *np;
4924 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4925
4926 nmp = VFSTONFS(fvp->v_mount);
4927 *gotfdp = 0;
4928 *gottdp = 0;
4929 if (NFSHASPNFS(nmp))
4930 return (retcnt);
4931 NFSLOCKMNT(nmp);
4932 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4933 NFSUNLOCKMNT(nmp);
4934 return (retcnt);
4935 }
4936 NFSUNLOCKMNT(nmp);
4937 mp = nmp->nm_mountp;
4938 NFSLOCKCLSTATE();
4939 /*
4940 * Loop around waiting for:
4941 * - outstanding I/O operations on delegations to complete
4942 * - for a delegation on fvp that has state, lock the client and
4943 * do a recall
4944 * - return delegation(s) with no state.
4945 */
4946 while (1) {
4947 clp = nfscl_findcl(nmp);
4948 if (clp == NULL) {
4949 NFSUNLOCKCLSTATE();
4950 return (retcnt);
4951 }
4952 np = VTONFS(fvp);
4953 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4954 np->n_fhp->nfh_len);
4955 if (dp != NULL && *gotfdp == 0) {
4956 /*
4957 * Wait for outstanding I/O ops to be done.
4958 */
4959 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4960 if (igotlock) {
4961 nfsv4_unlock(&clp->nfsc_lock, 0);
4962 igotlock = 0;
4963 }
4964 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4965 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4966 "nfscld", hz);
4967 if (NFSCL_FORCEDISM(mp)) {
4968 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4969 NFSUNLOCKCLSTATE();
4970 *gotfdp = 0;
4971 *gottdp = 0;
4972 return (0);
4973 }
4974 continue;
4975 }
4976 needsrecall = 0;
4977 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4978 if (!LIST_EMPTY(&owp->nfsow_open)) {
4979 needsrecall = 1;
4980 break;
4981 }
4982 }
4983 if (!needsrecall) {
4984 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4985 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4986 needsrecall = 1;
4987 break;
4988 }
4989 }
4990 }
4991 if (needsrecall && !triedrecall) {
4992 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4993 islept = 0;
4994 while (!igotlock) {
4995 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4996 &islept, NFSCLSTATEMUTEXPTR, mp);
4997 if (NFSCL_FORCEDISM(mp)) {
4998 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4999 if (igotlock)
5000 nfsv4_unlock(&clp->nfsc_lock, 0);
5001 NFSUNLOCKCLSTATE();
5002 *gotfdp = 0;
5003 *gottdp = 0;
5004 return (0);
5005 }
5006 if (islept)
5007 break;
5008 }
5009 if (islept)
5010 continue;
5011 NFSUNLOCKCLSTATE();
5012 cred = newnfs_getcred();
5013 newnfs_copycred(&dp->nfsdl_cred, cred);
5014 nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0, NULL);
5015 NFSFREECRED(cred);
5016 triedrecall = 1;
5017 NFSLOCKCLSTATE();
5018 nfsv4_unlock(&clp->nfsc_lock, 0);
5019 igotlock = 0;
5020 continue;
5021 }
5022 *fstp = dp->nfsdl_stateid;
5023 retcnt++;
5024 *gotfdp = 1;
5025 nfscl_cleandeleg(dp);
5026 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5027 }
5028 if (igotlock) {
5029 nfsv4_unlock(&clp->nfsc_lock, 0);
5030 igotlock = 0;
5031 }
5032 if (tvp != NULL) {
5033 np = VTONFS(tvp);
5034 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
5035 np->n_fhp->nfh_len);
5036 if (dp != NULL && *gottdp == 0) {
5037 /*
5038 * Wait for outstanding I/O ops to be done.
5039 */
5040 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
5041 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
5042 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
5043 "nfscld", hz);
5044 if (NFSCL_FORCEDISM(mp)) {
5045 NFSUNLOCKCLSTATE();
5046 *gotfdp = 0;
5047 *gottdp = 0;
5048 return (0);
5049 }
5050 continue;
5051 }
5052 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
5053 if (!LIST_EMPTY(&owp->nfsow_open)) {
5054 NFSUNLOCKCLSTATE();
5055 return (retcnt);
5056 }
5057 }
5058 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
5059 if (!LIST_EMPTY(&lp->nfsl_lock)) {
5060 NFSUNLOCKCLSTATE();
5061 return (retcnt);
5062 }
5063 }
5064 *tstp = dp->nfsdl_stateid;
5065 retcnt++;
5066 *gottdp = 1;
5067 nfscl_cleandeleg(dp);
5068 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5069 }
5070 }
5071 NFSUNLOCKCLSTATE();
5072 return (retcnt);
5073 }
5074 }
5075
5076 /*
5077 * Get a reference on the clientid associated with the mount point.
5078 * Return 1 if success, 0 otherwise.
5079 */
5080 int
nfscl_getref(struct nfsmount * nmp)5081 nfscl_getref(struct nfsmount *nmp)
5082 {
5083 struct nfsclclient *clp;
5084 int ret;
5085
5086 NFSLOCKCLSTATE();
5087 clp = nfscl_findcl(nmp);
5088 if (clp == NULL) {
5089 NFSUNLOCKCLSTATE();
5090 return (0);
5091 }
5092 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, nmp->nm_mountp);
5093 ret = 1;
5094 if (NFSCL_FORCEDISM(nmp->nm_mountp))
5095 ret = 0;
5096 NFSUNLOCKCLSTATE();
5097 return (ret);
5098 }
5099
5100 /*
5101 * Release a reference on a clientid acquired with the above call.
5102 */
5103 void
nfscl_relref(struct nfsmount * nmp)5104 nfscl_relref(struct nfsmount *nmp)
5105 {
5106 struct nfsclclient *clp;
5107
5108 NFSLOCKCLSTATE();
5109 clp = nfscl_findcl(nmp);
5110 if (clp == NULL) {
5111 NFSUNLOCKCLSTATE();
5112 return;
5113 }
5114 nfsv4_relref(&clp->nfsc_lock);
5115 NFSUNLOCKCLSTATE();
5116 }
5117
5118 /*
5119 * Save the size attribute in the delegation, since the nfsnode
5120 * is going away.
5121 */
5122 void
nfscl_reclaimnode(vnode_t vp)5123 nfscl_reclaimnode(vnode_t vp)
5124 {
5125 struct nfsclclient *clp;
5126 struct nfscldeleg *dp;
5127 struct nfsnode *np = VTONFS(vp);
5128 struct nfsmount *nmp;
5129
5130 nmp = VFSTONFS(vp->v_mount);
5131 if (!NFSHASNFSV4(nmp))
5132 return;
5133 NFSLOCKCLSTATE();
5134 clp = nfscl_findcl(nmp);
5135 if (clp == NULL) {
5136 NFSUNLOCKCLSTATE();
5137 return;
5138 }
5139 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5140 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5141 dp->nfsdl_size = np->n_size;
5142 NFSUNLOCKCLSTATE();
5143 }
5144
5145 /*
5146 * Get the saved size attribute in the delegation, since it is a
5147 * newly allocated nfsnode.
5148 */
5149 void
nfscl_newnode(vnode_t vp)5150 nfscl_newnode(vnode_t vp)
5151 {
5152 struct nfsclclient *clp;
5153 struct nfscldeleg *dp;
5154 struct nfsnode *np = VTONFS(vp);
5155 struct nfsmount *nmp;
5156
5157 nmp = VFSTONFS(vp->v_mount);
5158 if (!NFSHASNFSV4(nmp))
5159 return;
5160 NFSLOCKCLSTATE();
5161 clp = nfscl_findcl(nmp);
5162 if (clp == NULL) {
5163 NFSUNLOCKCLSTATE();
5164 return;
5165 }
5166 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5167 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5168 np->n_size = dp->nfsdl_size;
5169 NFSUNLOCKCLSTATE();
5170 }
5171
5172 /*
5173 * If there is a valid write delegation for this file, set the modtime
5174 * to the local clock time.
5175 */
5176 void
nfscl_delegmodtime(struct vnode * vp,struct timespec * mtime)5177 nfscl_delegmodtime(struct vnode *vp, struct timespec *mtime)
5178 {
5179 struct nfsclclient *clp;
5180 struct nfscldeleg *dp;
5181 struct nfsnode *np = VTONFS(vp);
5182 struct nfsmount *nmp;
5183
5184 nmp = VFSTONFS(vp->v_mount);
5185 if (!NFSHASNFSV4(nmp))
5186 return;
5187 NFSLOCKMNT(nmp);
5188 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5189 NFSUNLOCKMNT(nmp);
5190 return;
5191 }
5192 NFSUNLOCKMNT(nmp);
5193 NFSLOCKCLSTATE();
5194 clp = nfscl_findcl(nmp);
5195 if (clp == NULL) {
5196 NFSUNLOCKCLSTATE();
5197 return;
5198 }
5199 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5200 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
5201 if (mtime != NULL)
5202 dp->nfsdl_modtime = *mtime;
5203 else
5204 nanotime(&dp->nfsdl_modtime);
5205 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
5206 }
5207 NFSUNLOCKCLSTATE();
5208 }
5209
5210 /*
5211 * If there is a valid write delegation for this file with a modtime set,
5212 * put that modtime in mtime.
5213 */
5214 void
nfscl_deleggetmodtime(vnode_t vp,struct timespec * mtime)5215 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
5216 {
5217 struct nfsclclient *clp;
5218 struct nfscldeleg *dp;
5219 struct nfsnode *np = VTONFS(vp);
5220 struct nfsmount *nmp;
5221
5222 nmp = VFSTONFS(vp->v_mount);
5223 if (!NFSHASNFSV4(nmp))
5224 return;
5225 NFSLOCKMNT(nmp);
5226 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5227 NFSUNLOCKMNT(nmp);
5228 return;
5229 }
5230 NFSUNLOCKMNT(nmp);
5231 NFSLOCKCLSTATE();
5232 clp = nfscl_findcl(nmp);
5233 if (clp == NULL) {
5234 NFSUNLOCKCLSTATE();
5235 return;
5236 }
5237 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5238 if (dp != NULL &&
5239 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
5240 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
5241 *mtime = dp->nfsdl_modtime;
5242 NFSUNLOCKCLSTATE();
5243 }
5244
5245 static int
nfscl_errmap(struct nfsrv_descript * nd,u_int32_t minorvers)5246 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
5247 {
5248 short *defaulterrp, *errp;
5249
5250 if (!nd->nd_repstat)
5251 return (0);
5252 if (nd->nd_procnum == NFSPROC_NOOP)
5253 return (txdr_unsigned(nd->nd_repstat & 0xffff));
5254 if (nd->nd_repstat == EBADRPC)
5255 return (txdr_unsigned(NFSERR_BADXDR));
5256 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
5257 nd->nd_repstat == NFSERR_OPILLEGAL)
5258 return (txdr_unsigned(nd->nd_repstat));
5259 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
5260 minorvers > NFSV4_MINORVERSION) {
5261 /* NFSv4.n error. */
5262 return (txdr_unsigned(nd->nd_repstat));
5263 }
5264 if (nd->nd_procnum < NFSV4OP_CBNOPS)
5265 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
5266 else
5267 return (txdr_unsigned(nd->nd_repstat));
5268 while (*++errp)
5269 if (*errp == (short)nd->nd_repstat)
5270 return (txdr_unsigned(nd->nd_repstat));
5271 return (txdr_unsigned(*defaulterrp));
5272 }
5273
5274 /*
5275 * Called to find/add a layout to a client.
5276 * This function returns the layout with a refcnt (shared lock) upon
5277 * success (returns 0) or with no lock/refcnt on the layout when an
5278 * error is returned.
5279 * If a layout is passed in via lypp, it is locked (exclusively locked).
5280 */
5281 int
nfscl_layout(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,nfsv4stateid_t * stateidp,int layouttype,int retonclose,struct nfsclflayouthead * fhlp,struct nfscllayout ** lypp,struct ucred * cred,NFSPROC_T * p)5282 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
5283 nfsv4stateid_t *stateidp, int layouttype, int retonclose,
5284 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
5285 struct ucred *cred, NFSPROC_T *p)
5286 {
5287 struct nfsclclient *clp;
5288 struct nfscllayout *lyp, *tlyp;
5289 struct nfsclflayout *flp;
5290 struct nfsnode *np = VTONFS(vp);
5291 mount_t mp;
5292 int layout_passed_in;
5293
5294 mp = nmp->nm_mountp;
5295 layout_passed_in = 1;
5296 tlyp = NULL;
5297 lyp = *lypp;
5298 if (lyp == NULL) {
5299 layout_passed_in = 0;
5300 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
5301 M_WAITOK | M_ZERO);
5302 }
5303
5304 NFSLOCKCLSTATE();
5305 clp = nmp->nm_clp;
5306 if (clp == NULL) {
5307 if (layout_passed_in != 0)
5308 nfsv4_unlock(&lyp->nfsly_lock, 0);
5309 NFSUNLOCKCLSTATE();
5310 if (tlyp != NULL)
5311 free(tlyp, M_NFSLAYOUT);
5312 return (EPERM);
5313 }
5314 if (lyp == NULL) {
5315 /*
5316 * Although no lyp was passed in, another thread might have
5317 * allocated one. If one is found, just increment it's ref
5318 * count and return it.
5319 */
5320 lyp = nfscl_findlayout(clp, fhp, fhlen);
5321 if (lyp == NULL) {
5322 lyp = tlyp;
5323 tlyp = NULL;
5324 lyp->nfsly_stateid.seqid = stateidp->seqid;
5325 lyp->nfsly_stateid.other[0] = stateidp->other[0];
5326 lyp->nfsly_stateid.other[1] = stateidp->other[1];
5327 lyp->nfsly_stateid.other[2] = stateidp->other[2];
5328 lyp->nfsly_lastbyte = 0;
5329 LIST_INIT(&lyp->nfsly_flayread);
5330 LIST_INIT(&lyp->nfsly_flayrw);
5331 LIST_INIT(&lyp->nfsly_recall);
5332 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
5333 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
5334 lyp->nfsly_clp = clp;
5335 if (layouttype == NFSLAYOUT_FLEXFILE)
5336 lyp->nfsly_flags = NFSLY_FLEXFILE;
5337 else
5338 lyp->nfsly_flags = NFSLY_FILES;
5339 if (retonclose != 0)
5340 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5341 lyp->nfsly_fhlen = fhlen;
5342 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
5343 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5344 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
5345 nfsly_hash);
5346 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5347 clp->nfsc_layoutcnt++;
5348 nfsstatsv1.cllayouts++;
5349 } else {
5350 if (retonclose != 0)
5351 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5352 if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5353 lyp->nfsly_stateid.seqid = stateidp->seqid;
5354 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5355 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5356 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5357 }
5358 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5359 if (NFSCL_FORCEDISM(mp)) {
5360 NFSUNLOCKCLSTATE();
5361 if (tlyp != NULL)
5362 free(tlyp, M_NFSLAYOUT);
5363 return (EPERM);
5364 }
5365 *lypp = lyp;
5366 } else if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5367 lyp->nfsly_stateid.seqid = stateidp->seqid;
5368
5369 /* Merge the new list of File Layouts into the list. */
5370 flp = LIST_FIRST(fhlp);
5371 if (flp != NULL) {
5372 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
5373 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
5374 else
5375 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
5376 }
5377 if (layout_passed_in != 0)
5378 nfsv4_unlock(&lyp->nfsly_lock, 1);
5379 NFSUNLOCKCLSTATE();
5380 if (tlyp != NULL)
5381 free(tlyp, M_NFSLAYOUT);
5382 return (0);
5383 }
5384
5385 /*
5386 * Search for a layout by MDS file handle.
5387 * If one is found, it is returned with a refcnt (shared lock) iff
5388 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
5389 * returned NULL.
5390 */
5391 struct nfscllayout *
nfscl_getlayout(struct nfsclclient * clp,uint8_t * fhp,int fhlen,uint64_t off,uint32_t rwaccess,struct nfsclflayout ** retflpp,int * recalledp)5392 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
5393 uint64_t off, uint32_t rwaccess, struct nfsclflayout **retflpp,
5394 int *recalledp)
5395 {
5396 struct nfscllayout *lyp;
5397 mount_t mp;
5398 int error, igotlock;
5399
5400 mp = clp->nfsc_nmp->nm_mountp;
5401 *recalledp = 0;
5402 *retflpp = NULL;
5403 NFSLOCKCLSTATE();
5404 lyp = nfscl_findlayout(clp, fhp, fhlen);
5405 if (lyp != NULL) {
5406 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5407 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5408 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5409 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5410 error = nfscl_findlayoutforio(lyp, off, rwaccess,
5411 retflpp);
5412 if (error == 0)
5413 nfsv4_getref(&lyp->nfsly_lock, NULL,
5414 NFSCLSTATEMUTEXPTR, mp);
5415 else {
5416 do {
5417 igotlock = nfsv4_lock(&lyp->nfsly_lock,
5418 1, NULL, NFSCLSTATEMUTEXPTR, mp);
5419 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
5420 *retflpp = NULL;
5421 }
5422 if (NFSCL_FORCEDISM(mp)) {
5423 lyp = NULL;
5424 *recalledp = 1;
5425 }
5426 } else {
5427 lyp = NULL;
5428 *recalledp = 1;
5429 }
5430 }
5431 NFSUNLOCKCLSTATE();
5432 return (lyp);
5433 }
5434
5435 /*
5436 * Search for a layout by MDS file handle. If one is found, mark in to be
5437 * recalled, if it already marked "return on close".
5438 */
5439 static void
nfscl_retoncloselayout(vnode_t vp,struct nfsclclient * clp,uint8_t * fhp,int fhlen,struct nfsclrecalllayout ** recallpp,struct nfscllayout ** lypp)5440 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
5441 int fhlen, struct nfsclrecalllayout **recallpp, struct nfscllayout **lypp)
5442 {
5443 struct nfscllayout *lyp;
5444 uint32_t iomode;
5445
5446 *lypp = NULL;
5447 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) ||
5448 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
5449 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
5450 return;
5451 lyp = nfscl_findlayout(clp, fhp, fhlen);
5452 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
5453 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5454 iomode = 0;
5455 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5456 iomode |= NFSLAYOUTIOMODE_READ;
5457 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5458 iomode |= NFSLAYOUTIOMODE_RW;
5459 nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5460 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
5461 *recallpp);
5462 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
5463 *recallpp = NULL;
5464 }
5465
5466 /* Now, wake up renew thread to do LayoutReturn. */
5467 wakeup(clp);
5468 *lypp = lyp;
5469 }
5470 }
5471
5472 /*
5473 * Mark the layout to be recalled and with an error.
5474 * Also, disable the dsp from further use.
5475 */
5476 void
nfscl_dserr(uint32_t op,uint32_t stat,struct nfscldevinfo * dp,struct nfscllayout * lyp,struct nfsclds * dsp)5477 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
5478 struct nfscllayout *lyp, struct nfsclds *dsp)
5479 {
5480 struct nfsclrecalllayout *recallp;
5481 uint32_t iomode;
5482
5483 printf("DS being disabled, error=%d\n", stat);
5484 /* Set up the return of the layout. */
5485 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
5486 iomode = 0;
5487 NFSLOCKCLSTATE();
5488 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5489 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5490 iomode |= NFSLAYOUTIOMODE_READ;
5491 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5492 iomode |= NFSLAYOUTIOMODE_RW;
5493 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5494 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
5495 dp->nfsdi_deviceid, recallp);
5496 NFSUNLOCKCLSTATE();
5497 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5498 } else {
5499 NFSUNLOCKCLSTATE();
5500 free(recallp, M_NFSLAYRECALL);
5501 }
5502
5503 /* And shut the TCP connection down. */
5504 nfscl_cancelreqs(dsp);
5505 }
5506
5507 /*
5508 * Cancel all RPCs for this "dsp" by closing the connection.
5509 * Also, mark the session as defunct.
5510 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5511 * cannot be shut down.
5512 */
5513 void
nfscl_cancelreqs(struct nfsclds * dsp)5514 nfscl_cancelreqs(struct nfsclds *dsp)
5515 {
5516 struct __rpc_client *cl;
5517 static int non_event;
5518
5519 NFSLOCKDS(dsp);
5520 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5521 dsp->nfsclds_sockp != NULL &&
5522 dsp->nfsclds_sockp->nr_client != NULL) {
5523 dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5524 cl = dsp->nfsclds_sockp->nr_client;
5525 dsp->nfsclds_sess.nfsess_defunct = 1;
5526 NFSUNLOCKDS(dsp);
5527 CLNT_CLOSE(cl);
5528 /*
5529 * This 1sec sleep is done to reduce the number of reconnect
5530 * attempts made on the DS while it has failed.
5531 */
5532 tsleep(&non_event, PVFS, "ndscls", hz);
5533 return;
5534 }
5535 NFSUNLOCKDS(dsp);
5536 }
5537
5538 /*
5539 * Dereference a layout.
5540 */
5541 void
nfscl_rellayout(struct nfscllayout * lyp,int exclocked)5542 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5543 {
5544
5545 NFSLOCKCLSTATE();
5546 if (exclocked != 0)
5547 nfsv4_unlock(&lyp->nfsly_lock, 0);
5548 else
5549 nfsv4_relref(&lyp->nfsly_lock);
5550 NFSUNLOCKCLSTATE();
5551 }
5552
5553 /*
5554 * Search for a devinfo by deviceid. If one is found, return it after
5555 * acquiring a reference count on it.
5556 */
5557 struct nfscldevinfo *
nfscl_getdevinfo(struct nfsclclient * clp,uint8_t * deviceid,struct nfscldevinfo * dip)5558 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5559 struct nfscldevinfo *dip)
5560 {
5561
5562 NFSLOCKCLSTATE();
5563 if (dip == NULL)
5564 dip = nfscl_finddevinfo(clp, deviceid);
5565 if (dip != NULL)
5566 dip->nfsdi_refcnt++;
5567 NFSUNLOCKCLSTATE();
5568 return (dip);
5569 }
5570
5571 /*
5572 * Dereference a devinfo structure.
5573 */
5574 static void
nfscl_reldevinfo_locked(struct nfscldevinfo * dip)5575 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5576 {
5577
5578 dip->nfsdi_refcnt--;
5579 if (dip->nfsdi_refcnt == 0)
5580 wakeup(&dip->nfsdi_refcnt);
5581 }
5582
5583 /*
5584 * Dereference a devinfo structure.
5585 */
5586 void
nfscl_reldevinfo(struct nfscldevinfo * dip)5587 nfscl_reldevinfo(struct nfscldevinfo *dip)
5588 {
5589
5590 NFSLOCKCLSTATE();
5591 nfscl_reldevinfo_locked(dip);
5592 NFSUNLOCKCLSTATE();
5593 }
5594
5595 /*
5596 * Find a layout for this file handle. Return NULL upon failure.
5597 */
5598 static struct nfscllayout *
nfscl_findlayout(struct nfsclclient * clp,u_int8_t * fhp,int fhlen)5599 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5600 {
5601 struct nfscllayout *lyp;
5602
5603 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5604 if (lyp->nfsly_fhlen == fhlen &&
5605 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5606 break;
5607 return (lyp);
5608 }
5609
5610 /*
5611 * Find a devinfo for this deviceid. Return NULL upon failure.
5612 */
5613 static struct nfscldevinfo *
nfscl_finddevinfo(struct nfsclclient * clp,uint8_t * deviceid)5614 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5615 {
5616 struct nfscldevinfo *dip;
5617
5618 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5619 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5620 == 0)
5621 break;
5622 return (dip);
5623 }
5624
5625 /*
5626 * Merge the new file layout list into the main one, maintaining it in
5627 * increasing offset order.
5628 */
5629 static void
nfscl_mergeflayouts(struct nfsclflayouthead * fhlp,struct nfsclflayouthead * newfhlp)5630 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5631 struct nfsclflayouthead *newfhlp)
5632 {
5633 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5634
5635 flp = LIST_FIRST(fhlp);
5636 prevflp = NULL;
5637 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5638 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5639 prevflp = flp;
5640 flp = LIST_NEXT(flp, nfsfl_list);
5641 }
5642 if (prevflp == NULL)
5643 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5644 else
5645 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5646 prevflp = nflp;
5647 }
5648 }
5649
5650 /*
5651 * Add this nfscldevinfo to the client, if it doesn't already exist.
5652 * This function consumes the structure pointed at by dip, if not NULL.
5653 */
5654 int
nfscl_adddevinfo(struct nfsmount * nmp,struct nfscldevinfo * dip,int ind,struct nfsclflayout * flp)5655 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5656 struct nfsclflayout *flp)
5657 {
5658 struct nfsclclient *clp;
5659 struct nfscldevinfo *tdip;
5660 uint8_t *dev;
5661
5662 NFSLOCKCLSTATE();
5663 clp = nmp->nm_clp;
5664 if (clp == NULL) {
5665 NFSUNLOCKCLSTATE();
5666 if (dip != NULL)
5667 free(dip, M_NFSDEVINFO);
5668 return (ENODEV);
5669 }
5670 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5671 dev = flp->nfsfl_dev;
5672 else
5673 dev = flp->nfsfl_ffm[ind].dev;
5674 tdip = nfscl_finddevinfo(clp, dev);
5675 if (tdip != NULL) {
5676 tdip->nfsdi_layoutrefs++;
5677 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5678 flp->nfsfl_devp = tdip;
5679 else
5680 flp->nfsfl_ffm[ind].devp = tdip;
5681 nfscl_reldevinfo_locked(tdip);
5682 NFSUNLOCKCLSTATE();
5683 if (dip != NULL)
5684 free(dip, M_NFSDEVINFO);
5685 return (0);
5686 }
5687 if (dip != NULL) {
5688 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5689 dip->nfsdi_layoutrefs = 1;
5690 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5691 flp->nfsfl_devp = dip;
5692 else
5693 flp->nfsfl_ffm[ind].devp = dip;
5694 }
5695 NFSUNLOCKCLSTATE();
5696 if (dip == NULL)
5697 return (ENODEV);
5698 return (0);
5699 }
5700
5701 /*
5702 * Free up a layout structure and associated file layout structure(s).
5703 */
5704 void
nfscl_freelayout(struct nfscllayout * layp)5705 nfscl_freelayout(struct nfscllayout *layp)
5706 {
5707 struct nfsclflayout *flp, *nflp;
5708 struct nfsclrecalllayout *rp, *nrp;
5709
5710 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5711 LIST_REMOVE(flp, nfsfl_list);
5712 nfscl_freeflayout(flp);
5713 }
5714 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5715 LIST_REMOVE(flp, nfsfl_list);
5716 nfscl_freeflayout(flp);
5717 }
5718 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5719 LIST_REMOVE(rp, nfsrecly_list);
5720 free(rp, M_NFSLAYRECALL);
5721 }
5722 layp->nfsly_clp->nfsc_layoutcnt--;
5723 nfsstatsv1.cllayouts--;
5724 free(layp, M_NFSLAYOUT);
5725 }
5726
5727 /*
5728 * Free up a file layout structure.
5729 */
5730 void
nfscl_freeflayout(struct nfsclflayout * flp)5731 nfscl_freeflayout(struct nfsclflayout *flp)
5732 {
5733 int i, j;
5734
5735 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5736 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5737 free(flp->nfsfl_fh[i], M_NFSFH);
5738 if (flp->nfsfl_devp != NULL)
5739 flp->nfsfl_devp->nfsdi_layoutrefs--;
5740 }
5741 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5742 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5743 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5744 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5745 if (flp->nfsfl_ffm[i].devp != NULL)
5746 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5747 }
5748 free(flp, M_NFSFLAYOUT);
5749 }
5750
5751 /*
5752 * Free up a file layout devinfo structure.
5753 */
5754 void
nfscl_freedevinfo(struct nfscldevinfo * dip)5755 nfscl_freedevinfo(struct nfscldevinfo *dip)
5756 {
5757
5758 free(dip, M_NFSDEVINFO);
5759 }
5760
5761 /*
5762 * Mark any layouts that match as recalled.
5763 */
5764 static int
nfscl_layoutrecall(int recalltype,struct nfscllayout * lyp,uint32_t iomode,uint64_t off,uint64_t len,uint32_t stateseqid,uint32_t stat,uint32_t op,char * devid,struct nfsclrecalllayout * recallp)5765 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5766 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5767 char *devid, struct nfsclrecalllayout *recallp)
5768 {
5769 struct nfsclrecalllayout *rp, *orp;
5770
5771 recallp->nfsrecly_recalltype = recalltype;
5772 recallp->nfsrecly_iomode = iomode;
5773 recallp->nfsrecly_stateseqid = stateseqid;
5774 recallp->nfsrecly_off = off;
5775 recallp->nfsrecly_len = len;
5776 recallp->nfsrecly_stat = stat;
5777 recallp->nfsrecly_op = op;
5778 if (devid != NULL)
5779 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5780 /*
5781 * Order the list as file returns first, followed by fsid and any
5782 * returns, both in increasing stateseqid order.
5783 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5784 * (I'm not sure this is correct because I find RFC5661 confusing
5785 * on this, but hopefully it will work ok.)
5786 */
5787 orp = NULL;
5788 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5789 orp = rp;
5790 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5791 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5792 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5793 (recalltype != NFSLAYOUTRETURN_FILE &&
5794 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5795 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5796 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5797 break;
5798 }
5799
5800 /*
5801 * Put any error return on all the file returns that will
5802 * preceed this one.
5803 */
5804 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5805 stat != 0 && rp->nfsrecly_stat == 0) {
5806 rp->nfsrecly_stat = stat;
5807 rp->nfsrecly_op = op;
5808 if (devid != NULL)
5809 NFSBCOPY(devid, rp->nfsrecly_devid,
5810 NFSX_V4DEVICEID);
5811 }
5812 }
5813 if (rp == NULL) {
5814 if (orp == NULL)
5815 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5816 nfsrecly_list);
5817 else
5818 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5819 }
5820 lyp->nfsly_flags |= NFSLY_RECALL;
5821 wakeup(lyp->nfsly_clp);
5822 return (0);
5823 }
5824
5825 /*
5826 * Compare the two seqids for ordering. The trick is that the seqids can
5827 * wrap around from 0xffffffff->0, so check for the cases where one
5828 * has wrapped around.
5829 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5830 */
5831 static int
nfscl_seq(uint32_t seqid1,uint32_t seqid2)5832 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5833 {
5834
5835 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5836 /* seqid2 has wrapped around. */
5837 return (0);
5838 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5839 /* seqid1 has wrapped around. */
5840 return (1);
5841 if (seqid1 <= seqid2)
5842 return (1);
5843 return (0);
5844 }
5845
5846 /*
5847 * Do a layout return for each of the recalls.
5848 */
5849 static void
nfscl_layoutreturn(struct nfsmount * nmp,struct nfscllayout * lyp,struct ucred * cred,NFSPROC_T * p)5850 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5851 struct ucred *cred, NFSPROC_T *p)
5852 {
5853 struct nfsclrecalllayout *rp;
5854 nfsv4stateid_t stateid;
5855 int layouttype;
5856
5857 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5858 stateid.seqid = lyp->nfsly_stateid.seqid;
5859 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5860 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5861 else
5862 layouttype = NFSLAYOUT_FLEXFILE;
5863 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5864 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5865 lyp->nfsly_fhlen, 0, layouttype,
5866 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5867 rp->nfsrecly_off, rp->nfsrecly_len,
5868 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5869 rp->nfsrecly_devid);
5870 }
5871 }
5872
5873 /*
5874 * Do the layout commit for a file layout.
5875 */
5876 static void
nfscl_dolayoutcommit(struct nfsmount * nmp,struct nfscllayout * lyp,struct ucred * cred,NFSPROC_T * p)5877 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5878 struct ucred *cred, NFSPROC_T *p)
5879 {
5880 struct nfsclflayout *flp;
5881 uint64_t len;
5882 int error, layouttype;
5883
5884 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5885 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5886 else
5887 layouttype = NFSLAYOUT_FLEXFILE;
5888 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5889 if (layouttype == NFSLAYOUT_FLEXFILE &&
5890 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5891 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5892 /* If not supported, don't bother doing it. */
5893 NFSLOCKMNT(nmp);
5894 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5895 NFSUNLOCKMNT(nmp);
5896 break;
5897 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5898 len = flp->nfsfl_end - flp->nfsfl_off;
5899 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5900 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5901 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5902 layouttype, cred, p);
5903 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5904 if (error == NFSERR_NOTSUPP) {
5905 /* If not supported, don't bother doing it. */
5906 NFSLOCKMNT(nmp);
5907 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5908 NFSUNLOCKMNT(nmp);
5909 break;
5910 }
5911 }
5912 }
5913 }
5914
5915 /*
5916 * Commit all layouts for a file (vnode).
5917 */
5918 int
nfscl_layoutcommit(vnode_t vp,NFSPROC_T * p)5919 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5920 {
5921 struct nfsclclient *clp;
5922 struct nfscllayout *lyp;
5923 struct nfsnode *np = VTONFS(vp);
5924 mount_t mp;
5925 struct nfsmount *nmp;
5926
5927 mp = vp->v_mount;
5928 nmp = VFSTONFS(mp);
5929 if (NFSHASNOLAYOUTCOMMIT(nmp))
5930 return (0);
5931 NFSLOCKCLSTATE();
5932 clp = nmp->nm_clp;
5933 if (clp == NULL) {
5934 NFSUNLOCKCLSTATE();
5935 return (EPERM);
5936 }
5937 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5938 if (lyp == NULL) {
5939 NFSUNLOCKCLSTATE();
5940 return (EPERM);
5941 }
5942 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5943 if (NFSCL_FORCEDISM(mp)) {
5944 NFSUNLOCKCLSTATE();
5945 return (EPERM);
5946 }
5947 tryagain:
5948 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5949 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5950 NFSUNLOCKCLSTATE();
5951 NFSCL_DEBUG(4, "do layoutcommit2\n");
5952 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5953 NFSLOCKCLSTATE();
5954 goto tryagain;
5955 }
5956 nfsv4_relref(&lyp->nfsly_lock);
5957 NFSUNLOCKCLSTATE();
5958 return (0);
5959 }
5960