1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009 Rick Macklem, University of Guelph
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/cdefs.h>
31 /*
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
79 */
80
81 #include <fs/nfs/nfsport.h>
82
83 /*
84 * Global variables
85 */
86 extern struct nfsstatsv1 nfsstatsv1;
87 extern struct nfsreqhead nfsd_reqq;
88 extern u_int32_t newnfs_false, newnfs_true;
89 extern int nfscl_debuglevel;
90 extern int nfscl_enablecallb;
91 extern int nfs_numnfscbd;
92 NFSREQSPINLOCK;
93 NFSCLSTATEMUTEX;
94 int nfscl_inited = 0;
95 struct nfsclhead nfsclhead; /* Head of clientid list */
96
97 static int nfscl_getopen(struct nfsclownerhead *, struct nfsclopenhash *,
98 u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t,
99 struct nfscllockowner **, struct nfsclopen **);
100 static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *,
101 uint8_t *, struct nfscllockowner **, struct nfsclopen **,
102 struct nfsclopen **);
103 static void nfscl_clrelease(struct nfsclclient *);
104 static void nfscl_unlinkopen(struct nfsclopen *);
105 static void nfscl_cleanclient(struct nfsclclient *);
106 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
107 struct ucred *, NFSPROC_T *);
108 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
109 struct nfsmount *, struct ucred *, NFSPROC_T *);
110 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *,
111 NFSPROC_T *);
112 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
113 struct nfscllock *, int);
114 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
115 struct nfscllock **, int);
116 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *,
117 struct nfscldeleghead *);
118 static u_int32_t nfscl_nextcbident(void);
119 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
120 static struct nfsclclient *nfscl_getclnt(u_int32_t);
121 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
122 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
123 int);
124 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
125 int, struct nfsclrecalllayout **, struct nfscllayout **);
126 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
127 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
128 int);
129 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
130 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
131 u_int8_t *, struct nfscllock **);
132 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
133 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
134 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
135 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
136 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
137 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
138 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
139 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
140 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
141 static void nfscl_totalrecall(struct nfsclclient *);
142 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
143 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
144 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
145 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
146 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
147 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
148 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
149 struct ucred *, NFSPROC_T *);
150 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
151 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
152 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *,
153 bool);
154 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
155 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
156 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
157 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int,
158 vnode_t *);
159 static void nfscl_freeopenowner(struct nfsclowner *, int);
160 static void nfscl_cleandeleg(struct nfscldeleg *);
161 static void nfscl_emptylockowner(struct nfscllockowner *,
162 struct nfscllockownerfhhead *);
163 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
164 struct nfsclflayouthead *);
165 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
166 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
167 static int nfscl_seq(uint32_t, uint32_t);
168 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
169 struct ucred *, NFSPROC_T *);
170 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
171 struct ucred *, NFSPROC_T *);
172
173 static short nfscberr_null[] = {
174 0,
175 0,
176 };
177
178 static short nfscberr_getattr[] = {
179 NFSERR_RESOURCE,
180 NFSERR_BADHANDLE,
181 NFSERR_BADXDR,
182 NFSERR_RESOURCE,
183 NFSERR_SERVERFAULT,
184 0,
185 };
186
187 static short nfscberr_recall[] = {
188 NFSERR_RESOURCE,
189 NFSERR_BADHANDLE,
190 NFSERR_BADSTATEID,
191 NFSERR_BADXDR,
192 NFSERR_RESOURCE,
193 NFSERR_SERVERFAULT,
194 0,
195 };
196
197 static short *nfscl_cberrmap[] = {
198 nfscberr_null,
199 nfscberr_null,
200 nfscberr_null,
201 nfscberr_getattr,
202 nfscberr_recall
203 };
204
205 #define NETFAMILY(clp) \
206 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
207
208 /*
209 * Called for an open operation.
210 * If the nfhp argument is NULL, just get an openowner.
211 */
212 int
nfscl_open(vnode_t vp,u_int8_t * nfhp,int fhlen,u_int32_t amode,int usedeleg,struct ucred * cred,NFSPROC_T * p,struct nfsclowner ** owpp,struct nfsclopen ** opp,int * newonep,int * retp,int lockit,bool firstref)213 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
214 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
215 struct nfsclopen **opp, int *newonep, int *retp, int lockit, bool firstref)
216 {
217 struct nfsclclient *clp;
218 struct nfsclowner *owp, *nowp;
219 struct nfsclopen *op = NULL, *nop = NULL;
220 struct nfscldeleg *dp;
221 struct nfsclownerhead *ohp;
222 u_int8_t own[NFSV4CL_LOCKNAMELEN];
223 int ret;
224
225 if (newonep != NULL)
226 *newonep = 0;
227 if (opp != NULL)
228 *opp = NULL;
229 if (owpp != NULL)
230 *owpp = NULL;
231
232 /*
233 * Might need one or both of these, so MALLOC them now, to
234 * avoid a tsleep() in MALLOC later.
235 */
236 nowp = malloc(sizeof (struct nfsclowner),
237 M_NFSCLOWNER, M_WAITOK);
238 if (nfhp != NULL) {
239 nop = malloc(sizeof (struct nfsclopen) +
240 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
241 nop->nfso_hash.le_prev = NULL;
242 }
243 ret = nfscl_getcl(vp->v_mount, cred, p, false, firstref, &clp);
244 if (ret != 0) {
245 free(nowp, M_NFSCLOWNER);
246 if (nop != NULL)
247 free(nop, M_NFSCLOPEN);
248 return (ret);
249 }
250
251 /*
252 * Get the Open iff it already exists.
253 * If none found, add the new one or return error, depending upon
254 * "create".
255 */
256 NFSLOCKCLSTATE();
257 dp = NULL;
258 /* First check the delegation list */
259 if (nfhp != NULL && usedeleg) {
260 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
261 if (dp->nfsdl_fhlen == fhlen &&
262 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
263 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
264 (dp->nfsdl_flags & NFSCLDL_WRITE))
265 break;
266 dp = NULL;
267 break;
268 }
269 }
270 }
271
272 /* For NFSv4.1/4.2 and this option, use a single open_owner. */
273 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
274 nfscl_filllockowner(NULL, own, F_POSIX);
275 else
276 nfscl_filllockowner(p->td_proc, own, F_POSIX);
277 if (dp != NULL)
278 ohp = &dp->nfsdl_owner;
279 else
280 ohp = &clp->nfsc_owner;
281 /* Now, search for an openowner */
282 LIST_FOREACH(owp, ohp, nfsow_list) {
283 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
284 break;
285 }
286
287 /*
288 * Create a new open, as required.
289 */
290 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
291 cred, newonep);
292
293 /*
294 * Now, check the mode on the open and return the appropriate
295 * value.
296 */
297 if (retp != NULL) {
298 if (nfhp != NULL && dp != NULL && nop == NULL)
299 /* new local open on delegation */
300 *retp = NFSCLOPEN_SETCRED;
301 else
302 *retp = NFSCLOPEN_OK;
303 }
304 if (op != NULL && (amode & ~(op->nfso_mode))) {
305 op->nfso_mode |= amode;
306 if (retp != NULL && dp == NULL)
307 *retp = NFSCLOPEN_DOOPEN;
308 }
309
310 /*
311 * Serialize modifications to the open owner for multiple threads
312 * within the same process using a read/write sleep lock.
313 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
314 * by acquiring a shared lock. The close operations still use an
315 * exclusive lock for this case.
316 */
317 if (lockit != 0) {
318 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) {
319 /*
320 * Get a shared lock on the OpenOwner, but first
321 * wait for any pending exclusive lock, so that the
322 * exclusive locker gets priority.
323 */
324 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
325 NFSCLSTATEMUTEXPTR, NULL);
326 nfsv4_getref(&owp->nfsow_rwlock, NULL,
327 NFSCLSTATEMUTEXPTR, NULL);
328 } else
329 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
330 }
331 NFSUNLOCKCLSTATE();
332 if (nowp != NULL)
333 free(nowp, M_NFSCLOWNER);
334 if (nop != NULL)
335 free(nop, M_NFSCLOPEN);
336 if (owpp != NULL)
337 *owpp = owp;
338 if (opp != NULL)
339 *opp = op;
340 return (0);
341 }
342
343 /*
344 * Create a new open, as required.
345 */
346 static void
nfscl_newopen(struct nfsclclient * clp,struct nfscldeleg * dp,struct nfsclowner ** owpp,struct nfsclowner ** nowpp,struct nfsclopen ** opp,struct nfsclopen ** nopp,u_int8_t * own,u_int8_t * fhp,int fhlen,struct ucred * cred,int * newonep)347 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
348 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
349 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
350 struct ucred *cred, int *newonep)
351 {
352 struct nfsclowner *owp = *owpp, *nowp;
353 struct nfsclopen *op, *nop;
354
355 if (nowpp != NULL)
356 nowp = *nowpp;
357 else
358 nowp = NULL;
359 if (nopp != NULL)
360 nop = *nopp;
361 else
362 nop = NULL;
363 if (owp == NULL && nowp != NULL) {
364 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
365 LIST_INIT(&nowp->nfsow_open);
366 nowp->nfsow_clp = clp;
367 nowp->nfsow_seqid = 0;
368 nowp->nfsow_defunct = 0;
369 nfscl_lockinit(&nowp->nfsow_rwlock);
370 if (dp != NULL) {
371 nfsstatsv1.cllocalopenowners++;
372 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
373 } else {
374 nfsstatsv1.clopenowners++;
375 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
376 }
377 owp = *owpp = nowp;
378 *nowpp = NULL;
379 if (newonep != NULL)
380 *newonep = 1;
381 }
382
383 /* If an fhp has been specified, create an Open as well. */
384 if (fhp != NULL) {
385 /* and look for the correct open, based upon FH */
386 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
387 if (op->nfso_fhlen == fhlen &&
388 !NFSBCMP(op->nfso_fh, fhp, fhlen))
389 break;
390 }
391 if (op == NULL && nop != NULL) {
392 nop->nfso_own = owp;
393 nop->nfso_mode = 0;
394 nop->nfso_opencnt = 0;
395 nop->nfso_posixlock = 1;
396 nop->nfso_fhlen = fhlen;
397 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
398 LIST_INIT(&nop->nfso_lock);
399 nop->nfso_stateid.seqid = 0;
400 nop->nfso_stateid.other[0] = 0;
401 nop->nfso_stateid.other[1] = 0;
402 nop->nfso_stateid.other[2] = 0;
403 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
404 newnfs_copyincred(cred, &nop->nfso_cred);
405 if (dp != NULL) {
406 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
407 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
408 nfsdl_list);
409 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
410 nfsstatsv1.cllocalopens++;
411 } else {
412 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, fhp, fhlen),
413 nop, nfso_hash);
414 nfsstatsv1.clopens++;
415 }
416 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
417 *opp = nop;
418 *nopp = NULL;
419 if (newonep != NULL)
420 *newonep = 1;
421 } else {
422 *opp = op;
423 }
424 }
425 }
426
427 /*
428 * Called to find/add a delegation to a client.
429 */
430 int
nfscl_deleg(mount_t mp,struct nfsclclient * clp,u_int8_t * nfhp,int fhlen,struct ucred * cred,NFSPROC_T * p,struct nfscldeleg * dp)431 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
432 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg *dp)
433 {
434 struct nfscldeleg *tdp;
435 struct nfsmount *nmp;
436
437 KASSERT(mp != NULL, ("nfscl_deleg: mp NULL"));
438 nmp = VFSTONFS(mp);
439
440 /*
441 * Since a delegation might be added to the mount,
442 * set NFSMNTP_DELEGISSUED now. If a delegation already
443 * exagain ists, setting this flag is harmless.
444 */
445 NFSLOCKMNT(nmp);
446 nmp->nm_privflag |= NFSMNTP_DELEGISSUED;
447 NFSUNLOCKMNT(nmp);
448
449 /* Look for the correct deleg, based upon FH */
450 NFSLOCKCLSTATE();
451 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
452 if (tdp == NULL) {
453 if (dp == NULL) {
454 NFSUNLOCKCLSTATE();
455 return (NFSERR_BADSTATEID);
456 }
457 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
458 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
459 nfsdl_hash);
460 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
461 nfsstatsv1.cldelegates++;
462 clp->nfsc_delegcnt++;
463 } else {
464 /*
465 * A delegation already exists. If the new one is a Write
466 * delegation and the old one a Read delegation, return the
467 * Read delegation. Otherwise, return the new delegation.
468 */
469 if (dp != NULL) {
470 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0 &&
471 (tdp->nfsdl_flags & NFSCLDL_READ) != 0) {
472 TAILQ_REMOVE(&clp->nfsc_deleg, tdp, nfsdl_list);
473 LIST_REMOVE(tdp, nfsdl_hash);
474 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
475 nfsdl_list);
476 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp,
477 fhlen), dp, nfsdl_hash);
478 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
479 } else {
480 tdp = dp; /* Return this one. */
481 }
482 } else {
483 tdp = NULL;
484 }
485 }
486 NFSUNLOCKCLSTATE();
487 if (tdp != NULL) {
488 nfscl_trydelegreturn(tdp, cred, nmp, p);
489 free(tdp, M_NFSCLDELEG);
490 }
491 return (0);
492 }
493
494 /*
495 * Find a delegation for this file handle. Return NULL upon failure.
496 */
497 static struct nfscldeleg *
nfscl_finddeleg(struct nfsclclient * clp,u_int8_t * fhp,int fhlen)498 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
499 {
500 struct nfscldeleg *dp;
501
502 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
503 if (dp->nfsdl_fhlen == fhlen &&
504 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
505 break;
506 }
507 return (dp);
508 }
509
510 /*
511 * Get a stateid for an I/O operation. First, look for an open and iff
512 * found, return either a lockowner stateid or the open stateid.
513 * If no Open is found, just return error and the special stateid of all zeros.
514 */
515 int
nfscl_getstateid(vnode_t vp,u_int8_t * nfhp,int fhlen,u_int32_t mode,int fords,struct ucred * cred,NFSPROC_T * p,nfsv4stateid_t * stateidp,void ** lckpp)516 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
517 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
518 void **lckpp)
519 {
520 struct nfsclclient *clp;
521 struct nfsclopen *op = NULL, *top;
522 struct nfsclopenhash *oph;
523 struct nfscllockowner *lp;
524 struct nfscldeleg *dp;
525 struct nfsnode *np;
526 struct nfsmount *nmp;
527 struct nfscred ncr;
528 u_int8_t own[NFSV4CL_LOCKNAMELEN], lockown[NFSV4CL_LOCKNAMELEN];
529 int error;
530 bool done;
531
532 *lckpp = NULL;
533 /*
534 * Initially, just set the special stateid of all zeros.
535 * (Don't do this for a DS, since the special stateid can't be used.)
536 */
537 if (fords == 0) {
538 stateidp->seqid = 0;
539 stateidp->other[0] = 0;
540 stateidp->other[1] = 0;
541 stateidp->other[2] = 0;
542 }
543 if (vp->v_type != VREG)
544 return (EISDIR);
545 np = VTONFS(vp);
546 nmp = VFSTONFS(vp->v_mount);
547
548 /*
549 * For "oneopenown" mounts, first check for a cached open in the
550 * NFS vnode, that can be used as a stateid. This can only be
551 * done if no delegations have been issued to the mount and no
552 * byte range file locking has been done for the file.
553 */
554 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && fords == 0) {
555 NFSLOCKMNT(nmp);
556 NFSLOCKNODE(np);
557 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 &&
558 (np->n_flag & NMIGHTBELOCKED) == 0 &&
559 np->n_openstateid != NULL) {
560 stateidp->seqid = 0;
561 stateidp->other[0] =
562 np->n_openstateid->nfso_stateid.other[0];
563 stateidp->other[1] =
564 np->n_openstateid->nfso_stateid.other[1];
565 stateidp->other[2] =
566 np->n_openstateid->nfso_stateid.other[2];
567 NFSUNLOCKNODE(np);
568 NFSUNLOCKMNT(nmp);
569 return (0);
570 }
571 NFSUNLOCKNODE(np);
572 NFSUNLOCKMNT(nmp);
573 }
574
575 NFSLOCKCLSTATE();
576 clp = nfscl_findcl(nmp);
577 if (clp == NULL) {
578 NFSUNLOCKCLSTATE();
579 return (EACCES);
580 }
581
582 /*
583 * Wait for recovery to complete.
584 */
585 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
586 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
587 PZERO, "nfsrecvr", NULL);
588
589 /*
590 * First, look for a delegation.
591 */
592 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
593 if (dp->nfsdl_fhlen == fhlen &&
594 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
595 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
596 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
597 if (NFSHASNFSV4N(nmp))
598 stateidp->seqid = 0;
599 else
600 stateidp->seqid =
601 dp->nfsdl_stateid.seqid;
602 stateidp->other[0] = dp->nfsdl_stateid.other[0];
603 stateidp->other[1] = dp->nfsdl_stateid.other[1];
604 stateidp->other[2] = dp->nfsdl_stateid.other[2];
605 if (!(np->n_flag & NDELEGRECALL)) {
606 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
607 nfsdl_list);
608 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
609 nfsdl_list);
610 dp->nfsdl_timestamp = NFSD_MONOSEC +
611 120;
612 dp->nfsdl_rwlock.nfslock_usecnt++;
613 *lckpp = (void *)&dp->nfsdl_rwlock;
614 }
615 NFSUNLOCKCLSTATE();
616 return (0);
617 }
618 break;
619 }
620 }
621
622 if (p != NULL) {
623 /*
624 * If p != NULL, we want to search the parentage tree
625 * for a matching OpenOwner and use that.
626 */
627 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
628 nfscl_filllockowner(NULL, own, F_POSIX);
629 else
630 nfscl_filllockowner(p->td_proc, own, F_POSIX);
631 nfscl_filllockowner(p->td_proc, lockown, F_POSIX);
632 lp = NULL;
633 error = nfscl_getopen(NULL, clp->nfsc_openhash, nfhp, fhlen,
634 own, lockown, mode, &lp, &op);
635 if (error == 0 && lp != NULL && fords == 0) {
636 /* Don't return a lock stateid for a DS. */
637 if (NFSHASNFSV4N(nmp))
638 stateidp->seqid = 0;
639 else
640 stateidp->seqid = lp->nfsl_stateid.seqid;
641 stateidp->other[0] =
642 lp->nfsl_stateid.other[0];
643 stateidp->other[1] =
644 lp->nfsl_stateid.other[1];
645 stateidp->other[2] =
646 lp->nfsl_stateid.other[2];
647 NFSUNLOCKCLSTATE();
648 return (0);
649 }
650 }
651 if (op == NULL) {
652 /* If not found, just look for any OpenOwner that will work. */
653 top = NULL;
654 done = false;
655 oph = NFSCLOPENHASH(clp, nfhp, fhlen);
656 LIST_FOREACH(op, oph, nfso_hash) {
657 if (op->nfso_fhlen == fhlen &&
658 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
659 if (top == NULL && (op->nfso_mode &
660 NFSV4OPEN_ACCESSWRITE) != 0 &&
661 (mode & NFSV4OPEN_ACCESSREAD) != 0)
662 top = op;
663 if ((mode & op->nfso_mode) == mode) {
664 /* LRU order the hash list. */
665 LIST_REMOVE(op, nfso_hash);
666 LIST_INSERT_HEAD(oph, op, nfso_hash);
667 done = true;
668 break;
669 }
670 }
671 }
672 if (!done) {
673 NFSCL_DEBUG(2, "openmode top=%p\n", top);
674 if (top == NULL || NFSHASOPENMODE(nmp)) {
675 NFSUNLOCKCLSTATE();
676 return (ENOENT);
677 } else
678 op = top;
679 }
680 /*
681 * For read aheads or write behinds, use the open cred.
682 * A read ahead or write behind is indicated by p == NULL.
683 */
684 if (p == NULL)
685 memcpy(&ncr, &op->nfso_cred, sizeof(ncr));
686 }
687
688 /*
689 * No lock stateid, so return the open stateid.
690 */
691 if (NFSHASNFSV4N(nmp))
692 stateidp->seqid = 0;
693 else
694 stateidp->seqid = op->nfso_stateid.seqid;
695 stateidp->other[0] = op->nfso_stateid.other[0];
696 stateidp->other[1] = op->nfso_stateid.other[1];
697 stateidp->other[2] = op->nfso_stateid.other[2];
698 NFSUNLOCKCLSTATE();
699 if (p == NULL)
700 newnfs_copycred(&ncr, cred);
701 return (0);
702 }
703
704 /*
705 * Search for a matching file, mode and, optionally, lockowner.
706 */
707 static int
nfscl_getopen(struct nfsclownerhead * ohp,struct nfsclopenhash * ohashp,u_int8_t * nfhp,int fhlen,u_int8_t * openown,u_int8_t * lockown,u_int32_t mode,struct nfscllockowner ** lpp,struct nfsclopen ** opp)708 nfscl_getopen(struct nfsclownerhead *ohp, struct nfsclopenhash *ohashp,
709 u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown,
710 u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp)
711 {
712 struct nfsclowner *owp;
713 struct nfsclopen *op, *rop, *rop2;
714 struct nfsclopenhash *oph;
715 bool keep_looping;
716
717 KASSERT(ohp == NULL || ohashp == NULL, ("nfscl_getopen: "
718 "only one of ohp and ohashp can be set"));
719 if (lpp != NULL)
720 *lpp = NULL;
721 /*
722 * rop will be set to the open to be returned. There are three
723 * variants of this, all for an open of the correct file:
724 * 1 - A match of lockown.
725 * 2 - A match of the openown, when no lockown match exists.
726 * 3 - A match for any open, if no openown or lockown match exists.
727 * Looking for #2 over #3 probably isn't necessary, but since
728 * RFC3530 is vague w.r.t. the relationship between openowners and
729 * lockowners, I think this is the safer way to go.
730 */
731 rop = NULL;
732 rop2 = NULL;
733 keep_looping = true;
734 /* Search the client list */
735 if (ohashp == NULL) {
736 /* Search the local opens on the delegation. */
737 LIST_FOREACH(owp, ohp, nfsow_list) {
738 /* and look for the correct open */
739 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
740 if (op->nfso_fhlen == fhlen &&
741 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
742 && (op->nfso_mode & mode) == mode)
743 keep_looping = nfscl_checkown(owp, op, openown,
744 lockown, lpp, &rop, &rop2);
745 if (!keep_looping)
746 break;
747 }
748 if (!keep_looping)
749 break;
750 }
751 } else {
752 /* Search for matching opens on the hash list. */
753 oph = &ohashp[NFSCLOPENHASHFUNC(nfhp, fhlen)];
754 LIST_FOREACH(op, oph, nfso_hash) {
755 if (op->nfso_fhlen == fhlen &&
756 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
757 && (op->nfso_mode & mode) == mode)
758 keep_looping = nfscl_checkown(op->nfso_own, op,
759 openown, lockown, lpp, &rop, &rop2);
760 if (!keep_looping) {
761 /* LRU order the hash list. */
762 LIST_REMOVE(op, nfso_hash);
763 LIST_INSERT_HEAD(oph, op, nfso_hash);
764 break;
765 }
766 }
767 }
768 if (rop == NULL)
769 rop = rop2;
770 if (rop == NULL)
771 return (EBADF);
772 *opp = rop;
773 return (0);
774 }
775
776 /* Check for an owner match. */
777 static bool
nfscl_checkown(struct nfsclowner * owp,struct nfsclopen * op,uint8_t * openown,uint8_t * lockown,struct nfscllockowner ** lpp,struct nfsclopen ** ropp,struct nfsclopen ** ropp2)778 nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown,
779 uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp,
780 struct nfsclopen **ropp2)
781 {
782 struct nfscllockowner *lp;
783 bool keep_looping;
784
785 keep_looping = true;
786 if (lpp != NULL) {
787 /* Now look for a matching lockowner. */
788 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
789 if (!NFSBCMP(lp->nfsl_owner, lockown,
790 NFSV4CL_LOCKNAMELEN)) {
791 *lpp = lp;
792 *ropp = op;
793 return (false);
794 }
795 }
796 }
797 if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown,
798 NFSV4CL_LOCKNAMELEN)) {
799 *ropp = op;
800 if (lpp == NULL)
801 keep_looping = false;
802 }
803 if (*ropp2 == NULL)
804 *ropp2 = op;
805 return (keep_looping);
806 }
807
808 /*
809 * Release use of an open owner. Called when open operations are done
810 * with the open owner.
811 */
812 void
nfscl_ownerrelease(struct nfsmount * nmp,struct nfsclowner * owp,__unused int error,__unused int candelete,int unlocked)813 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
814 __unused int error, __unused int candelete, int unlocked)
815 {
816
817 if (owp == NULL)
818 return;
819 NFSLOCKCLSTATE();
820 if (unlocked == 0) {
821 if (NFSHASONEOPENOWN(nmp))
822 nfsv4_relref(&owp->nfsow_rwlock);
823 else
824 nfscl_lockunlock(&owp->nfsow_rwlock);
825 }
826 nfscl_clrelease(owp->nfsow_clp);
827 NFSUNLOCKCLSTATE();
828 }
829
830 /*
831 * Release use of an open structure under an open owner.
832 */
833 void
nfscl_openrelease(struct nfsmount * nmp,struct nfsclopen * op,int error,int candelete)834 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
835 int candelete)
836 {
837 struct nfsclclient *clp;
838 struct nfsclowner *owp;
839
840 if (op == NULL)
841 return;
842 NFSLOCKCLSTATE();
843 owp = op->nfso_own;
844 if (NFSHASONEOPENOWN(nmp))
845 nfsv4_relref(&owp->nfsow_rwlock);
846 else
847 nfscl_lockunlock(&owp->nfsow_rwlock);
848 clp = owp->nfsow_clp;
849 if (error && candelete && op->nfso_opencnt == 0)
850 nfscl_freeopen(op, 0, true);
851 nfscl_clrelease(clp);
852 NFSUNLOCKCLSTATE();
853 }
854
855 /*
856 * Called to get a clientid structure. It will optionally lock the
857 * client data structures to do the SetClientId/SetClientId_confirm,
858 * but will release that lock and return the clientid with a reference
859 * count on it.
860 * If the "cred" argument is NULL, a new clientid should not be created.
861 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
862 * be done.
863 * It always clpp with a reference count on it, unless returning an error.
864 */
865 int
nfscl_getcl(struct mount * mp,struct ucred * cred,NFSPROC_T * p,bool tryminvers,bool firstref,struct nfsclclient ** clpp)866 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
867 bool tryminvers, bool firstref, struct nfsclclient **clpp)
868 {
869 struct nfsclclient *clp;
870 struct nfsclclient *newclp = NULL;
871 struct nfsmount *nmp;
872 char uuid[HOSTUUIDLEN];
873 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
874 u_int16_t idlen = 0;
875
876 nmp = VFSTONFS(mp);
877 if (cred != NULL) {
878 getcredhostuuid(cred, uuid, sizeof uuid);
879 idlen = strlen(uuid);
880 if (idlen > 0)
881 idlen += sizeof (u_int64_t);
882 else
883 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
884 newclp = malloc(
885 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
886 M_WAITOK | M_ZERO);
887 }
888 NFSLOCKCLSTATE();
889 /*
890 * If a forced dismount is already in progress, don't
891 * allocate a new clientid and get out now. For the case where
892 * clp != NULL, this is a harmless optimization.
893 */
894 if (NFSCL_FORCEDISM(mp)) {
895 NFSUNLOCKCLSTATE();
896 if (newclp != NULL)
897 free(newclp, M_NFSCLCLIENT);
898 return (EBADF);
899 }
900 clp = nmp->nm_clp;
901 if (clp == NULL) {
902 if (newclp == NULL) {
903 NFSUNLOCKCLSTATE();
904 return (EACCES);
905 }
906 clp = newclp;
907 clp->nfsc_idlen = idlen;
908 LIST_INIT(&clp->nfsc_owner);
909 TAILQ_INIT(&clp->nfsc_deleg);
910 TAILQ_INIT(&clp->nfsc_layout);
911 LIST_INIT(&clp->nfsc_devinfo);
912 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
913 LIST_INIT(&clp->nfsc_deleghash[i]);
914 for (i = 0; i < NFSCLOPENHASHSIZE; i++)
915 LIST_INIT(&clp->nfsc_openhash[i]);
916 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
917 LIST_INIT(&clp->nfsc_layouthash[i]);
918 clp->nfsc_flags = NFSCLFLAGS_INITED;
919 clp->nfsc_delegcnt = 0;
920 clp->nfsc_deleghighwater = NFSCLDELEGHIGHWATER;
921 clp->nfsc_layoutcnt = 0;
922 clp->nfsc_layouthighwater = NFSCLLAYOUTHIGHWATER;
923 clp->nfsc_clientidrev = 1;
924 clp->nfsc_cbident = nfscl_nextcbident();
925 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
926 clp->nfsc_idlen);
927 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
928 nmp->nm_clp = clp;
929 clp->nfsc_nmp = nmp;
930 } else {
931 if (newclp != NULL)
932 free(newclp, M_NFSCLCLIENT);
933 }
934 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
935 !NFSCL_FORCEDISM(mp))
936 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
937 NFSCLSTATEMUTEXPTR, mp);
938 if (igotlock == 0) {
939 /*
940 * Call nfsv4_lock() with "iwantlock == 0" on the firstref so
941 * that it will wait for a pending exclusive lock request.
942 * This gives the exclusive lock request priority over this
943 * shared lock request.
944 * An exclusive lock on nfsc_lock is used mainly for server
945 * crash recoveries and delegation recalls.
946 */
947 if (firstref)
948 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR,
949 mp);
950 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
951 }
952 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
953 /*
954 * Both nfsv4_lock() and nfsv4_getref() know to check
955 * for NFSCL_FORCEDISM() and return without sleeping to
956 * wait for the exclusive lock to be released, since it
957 * might be held by nfscl_umount() and we need to get out
958 * now for that case and not wait until nfscl_umount()
959 * releases it.
960 */
961 NFSUNLOCKCLSTATE();
962 return (EBADF);
963 }
964 NFSUNLOCKCLSTATE();
965
966 /*
967 * If it needs a clientid, do the setclientid now.
968 */
969 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
970 if (!igotlock)
971 panic("nfscl_clget");
972 if (p == NULL || cred == NULL) {
973 NFSLOCKCLSTATE();
974 nfsv4_unlock(&clp->nfsc_lock, 0);
975 NFSUNLOCKCLSTATE();
976 return (EACCES);
977 }
978 /*
979 * If RFC3530 Sec. 14.2.33 is taken literally,
980 * NFSERR_CLIDINUSE will be returned persistently for the
981 * case where a new mount of the same file system is using
982 * a different principal. In practice, NFSERR_CLIDINUSE is
983 * only returned when there is outstanding unexpired state
984 * on the clientid. As such, try for twice the lease
985 * interval, if we know what that is. Otherwise, make a
986 * wild ass guess.
987 * The case of returning NFSERR_STALECLIENTID is far less
988 * likely, but might occur if there is a significant delay
989 * between doing the SetClientID and SetClientIDConfirm Ops,
990 * such that the server throws away the clientid before
991 * receiving the SetClientIDConfirm.
992 */
993 if (clp->nfsc_renew > 0)
994 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
995 else
996 clidinusedelay = 120;
997 trystalecnt = 3;
998 do {
999 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
1000 if (error == NFSERR_STALECLIENTID ||
1001 error == NFSERR_STALEDONTRECOVER ||
1002 error == NFSERR_BADSESSION ||
1003 error == NFSERR_CLIDINUSE) {
1004 (void) nfs_catnap(PZERO, error, "nfs_setcl");
1005 } else if (error == NFSERR_MINORVERMISMATCH &&
1006 tryminvers) {
1007 if (nmp->nm_minorvers > 0)
1008 nmp->nm_minorvers--;
1009 else
1010 tryminvers = false;
1011 }
1012 } while (((error == NFSERR_STALECLIENTID ||
1013 error == NFSERR_BADSESSION ||
1014 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
1015 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0) ||
1016 (error == NFSERR_MINORVERMISMATCH && tryminvers));
1017 if (error) {
1018 NFSLOCKCLSTATE();
1019 nfsv4_unlock(&clp->nfsc_lock, 0);
1020 NFSUNLOCKCLSTATE();
1021 return (error);
1022 }
1023 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1024 }
1025 if (igotlock) {
1026 NFSLOCKCLSTATE();
1027 nfsv4_unlock(&clp->nfsc_lock, 1);
1028 NFSUNLOCKCLSTATE();
1029 }
1030
1031 *clpp = clp;
1032 return (0);
1033 }
1034
1035 /*
1036 * Get a reference to a clientid and return it, if valid.
1037 */
1038 struct nfsclclient *
nfscl_findcl(struct nfsmount * nmp)1039 nfscl_findcl(struct nfsmount *nmp)
1040 {
1041 struct nfsclclient *clp;
1042
1043 clp = nmp->nm_clp;
1044 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
1045 return (NULL);
1046 return (clp);
1047 }
1048
1049 /*
1050 * Release the clientid structure. It may be locked or reference counted.
1051 */
1052 static void
nfscl_clrelease(struct nfsclclient * clp)1053 nfscl_clrelease(struct nfsclclient *clp)
1054 {
1055
1056 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1057 nfsv4_unlock(&clp->nfsc_lock, 0);
1058 else
1059 nfsv4_relref(&clp->nfsc_lock);
1060 }
1061
1062 /*
1063 * External call for nfscl_clrelease.
1064 */
1065 void
nfscl_clientrelease(struct nfsclclient * clp)1066 nfscl_clientrelease(struct nfsclclient *clp)
1067 {
1068
1069 NFSLOCKCLSTATE();
1070 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1071 nfsv4_unlock(&clp->nfsc_lock, 0);
1072 else
1073 nfsv4_relref(&clp->nfsc_lock);
1074 NFSUNLOCKCLSTATE();
1075 }
1076
1077 /*
1078 * Called when wanting to lock a byte region.
1079 */
1080 int
nfscl_getbytelock(vnode_t vp,u_int64_t off,u_int64_t len,short type,struct ucred * cred,NFSPROC_T * p,struct nfsclclient * rclp,int recovery,void * id,int flags,u_int8_t * rownp,u_int8_t * ropenownp,struct nfscllockowner ** lpp,int * newonep,int * donelocallyp)1081 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1082 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
1083 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
1084 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
1085 {
1086 struct nfscllockowner *lp;
1087 struct nfsclopen *op;
1088 struct nfsclclient *clp;
1089 struct nfscllockowner *nlp;
1090 struct nfscllock *nlop, *otherlop;
1091 struct nfscldeleg *dp = NULL, *ldp = NULL;
1092 struct nfscllockownerhead *lhp = NULL;
1093 struct nfsnode *np;
1094 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
1095 u_int8_t *openownp;
1096 int error = 0, ret, donelocally = 0;
1097 u_int32_t mode;
1098
1099 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1100 mode = 0;
1101 np = VTONFS(vp);
1102 *lpp = NULL;
1103 lp = NULL;
1104 *newonep = 0;
1105 *donelocallyp = 0;
1106
1107 /*
1108 * Might need these, so MALLOC them now, to
1109 * avoid a tsleep() in MALLOC later.
1110 */
1111 nlp = malloc(
1112 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1113 otherlop = malloc(
1114 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1115 nlop = malloc(
1116 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1117 nlop->nfslo_type = type;
1118 nlop->nfslo_first = off;
1119 if (len == NFS64BITSSET) {
1120 nlop->nfslo_end = NFS64BITSSET;
1121 } else {
1122 nlop->nfslo_end = off + len;
1123 if (nlop->nfslo_end <= nlop->nfslo_first)
1124 error = NFSERR_INVAL;
1125 }
1126
1127 if (!error) {
1128 if (recovery)
1129 clp = rclp;
1130 else
1131 error = nfscl_getcl(vp->v_mount, cred, p, false, true,
1132 &clp);
1133 }
1134 if (error) {
1135 free(nlp, M_NFSCLLOCKOWNER);
1136 free(otherlop, M_NFSCLLOCK);
1137 free(nlop, M_NFSCLLOCK);
1138 return (error);
1139 }
1140
1141 op = NULL;
1142 if (recovery) {
1143 ownp = rownp;
1144 openownp = ropenownp;
1145 } else {
1146 nfscl_filllockowner(id, own, flags);
1147 ownp = own;
1148 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
1149 nfscl_filllockowner(NULL, openown, F_POSIX);
1150 else
1151 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1152 openownp = openown;
1153 }
1154 if (!recovery) {
1155 NFSLOCKCLSTATE();
1156 /*
1157 * First, search for a delegation. If one exists for this file,
1158 * the lock can be done locally against it, so long as there
1159 * isn't a local lock conflict.
1160 */
1161 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1162 np->n_fhp->nfh_len);
1163 /* Just sanity check for correct type of delegation */
1164 if (dp != NULL && ((dp->nfsdl_flags &
1165 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1166 (type == F_WRLCK &&
1167 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1168 dp = NULL;
1169 }
1170 if (dp != NULL) {
1171 /* Now, find an open and maybe a lockowner. */
1172 ret = nfscl_getopen(&dp->nfsdl_owner, NULL, np->n_fhp->nfh_fh,
1173 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1174 if (ret)
1175 ret = nfscl_getopen(NULL, clp->nfsc_openhash,
1176 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1177 ownp, mode, NULL, &op);
1178 if (!ret) {
1179 lhp = &dp->nfsdl_lock;
1180 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1181 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1182 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1183 donelocally = 1;
1184 } else {
1185 dp = NULL;
1186 }
1187 }
1188 if (!donelocally) {
1189 /*
1190 * Get the related Open and maybe lockowner.
1191 */
1192 error = nfscl_getopen(NULL, clp->nfsc_openhash,
1193 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1194 ownp, mode, &lp, &op);
1195 if (!error)
1196 lhp = &op->nfso_lock;
1197 }
1198 if (!error && !recovery)
1199 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1200 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1201 if (error) {
1202 if (!recovery) {
1203 nfscl_clrelease(clp);
1204 NFSUNLOCKCLSTATE();
1205 }
1206 free(nlp, M_NFSCLLOCKOWNER);
1207 free(otherlop, M_NFSCLLOCK);
1208 free(nlop, M_NFSCLLOCK);
1209 return (error);
1210 }
1211
1212 /*
1213 * Ok, see if a lockowner exists and create one, as required.
1214 */
1215 if (lp == NULL)
1216 LIST_FOREACH(lp, lhp, nfsl_list) {
1217 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1218 break;
1219 }
1220 if (lp == NULL) {
1221 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1222 if (recovery)
1223 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1224 NFSV4CL_LOCKNAMELEN);
1225 else
1226 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1227 NFSV4CL_LOCKNAMELEN);
1228 nlp->nfsl_seqid = 0;
1229 nlp->nfsl_lockflags = flags;
1230 nlp->nfsl_inprog = NULL;
1231 nfscl_lockinit(&nlp->nfsl_rwlock);
1232 LIST_INIT(&nlp->nfsl_lock);
1233 if (donelocally) {
1234 nlp->nfsl_open = NULL;
1235 nfsstatsv1.cllocallockowners++;
1236 } else {
1237 nlp->nfsl_open = op;
1238 nfsstatsv1.cllockowners++;
1239 }
1240 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1241 lp = nlp;
1242 nlp = NULL;
1243 *newonep = 1;
1244 }
1245
1246 /*
1247 * Now, update the byte ranges for locks.
1248 */
1249 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1250 if (!ret)
1251 donelocally = 1;
1252 if (donelocally) {
1253 *donelocallyp = 1;
1254 if (!recovery)
1255 nfscl_clrelease(clp);
1256 } else {
1257 /*
1258 * Serial modifications on the lock owner for multiple threads
1259 * for the same process using a read/write lock.
1260 */
1261 if (!recovery)
1262 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1263 }
1264 if (!recovery)
1265 NFSUNLOCKCLSTATE();
1266
1267 if (nlp)
1268 free(nlp, M_NFSCLLOCKOWNER);
1269 if (nlop)
1270 free(nlop, M_NFSCLLOCK);
1271 if (otherlop)
1272 free(otherlop, M_NFSCLLOCK);
1273
1274 *lpp = lp;
1275 return (0);
1276 }
1277
1278 /*
1279 * Called to unlock a byte range, for LockU.
1280 */
1281 int
nfscl_relbytelock(vnode_t vp,u_int64_t off,u_int64_t len,__unused struct ucred * cred,NFSPROC_T * p,int callcnt,struct nfsclclient * clp,void * id,int flags,struct nfscllockowner ** lpp,int * dorpcp)1282 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1283 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1284 struct nfsclclient *clp, void *id, int flags,
1285 struct nfscllockowner **lpp, int *dorpcp)
1286 {
1287 struct nfscllockowner *lp;
1288 struct nfsclopen *op;
1289 struct nfscllock *nlop, *other_lop = NULL;
1290 struct nfscldeleg *dp;
1291 struct nfsnode *np;
1292 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1293 int ret = 0, fnd;
1294
1295 np = VTONFS(vp);
1296 *lpp = NULL;
1297 *dorpcp = 0;
1298
1299 /*
1300 * Might need these, so MALLOC them now, to
1301 * avoid a tsleep() in MALLOC later.
1302 */
1303 nlop = malloc(
1304 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1305 nlop->nfslo_type = F_UNLCK;
1306 nlop->nfslo_first = off;
1307 if (len == NFS64BITSSET) {
1308 nlop->nfslo_end = NFS64BITSSET;
1309 } else {
1310 nlop->nfslo_end = off + len;
1311 if (nlop->nfslo_end <= nlop->nfslo_first) {
1312 free(nlop, M_NFSCLLOCK);
1313 return (NFSERR_INVAL);
1314 }
1315 }
1316 if (callcnt == 0) {
1317 other_lop = malloc(
1318 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1319 *other_lop = *nlop;
1320 }
1321 nfscl_filllockowner(id, own, flags);
1322 dp = NULL;
1323 NFSLOCKCLSTATE();
1324 if (callcnt == 0)
1325 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1326 np->n_fhp->nfh_len);
1327
1328 /*
1329 * First, unlock any local regions on a delegation.
1330 */
1331 if (dp != NULL) {
1332 /* Look for this lockowner. */
1333 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1334 if (!NFSBCMP(lp->nfsl_owner, own,
1335 NFSV4CL_LOCKNAMELEN))
1336 break;
1337 }
1338 if (lp != NULL)
1339 /* Use other_lop, so nlop is still available */
1340 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1341 }
1342
1343 /*
1344 * Now, find a matching open/lockowner that hasn't already been done,
1345 * as marked by nfsl_inprog.
1346 */
1347 lp = NULL;
1348 fnd = 0;
1349 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1350 np->n_fhp->nfh_len), nfso_hash) {
1351 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1352 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1353 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1354 if (lp->nfsl_inprog == NULL &&
1355 !NFSBCMP(lp->nfsl_owner, own,
1356 NFSV4CL_LOCKNAMELEN)) {
1357 fnd = 1;
1358 break;
1359 }
1360 }
1361 }
1362 if (fnd)
1363 break;
1364 }
1365
1366 if (lp != NULL) {
1367 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1368 if (ret)
1369 *dorpcp = 1;
1370 /*
1371 * Serial modifications on the lock owner for multiple
1372 * threads for the same process using a read/write lock.
1373 */
1374 lp->nfsl_inprog = p;
1375 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1376 *lpp = lp;
1377 }
1378 NFSUNLOCKCLSTATE();
1379 if (nlop)
1380 free(nlop, M_NFSCLLOCK);
1381 if (other_lop)
1382 free(other_lop, M_NFSCLLOCK);
1383 return (0);
1384 }
1385
1386 /*
1387 * Release all lockowners marked in progess for this process and file.
1388 */
1389 void
nfscl_releasealllocks(struct nfsclclient * clp,vnode_t vp,NFSPROC_T * p,void * id,int flags)1390 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1391 void *id, int flags)
1392 {
1393 struct nfsclopen *op;
1394 struct nfscllockowner *lp;
1395 struct nfsnode *np;
1396 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1397
1398 np = VTONFS(vp);
1399 nfscl_filllockowner(id, own, flags);
1400 NFSLOCKCLSTATE();
1401 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1402 np->n_fhp->nfh_len), nfso_hash) {
1403 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1404 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1405 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1406 if (lp->nfsl_inprog == p &&
1407 !NFSBCMP(lp->nfsl_owner, own,
1408 NFSV4CL_LOCKNAMELEN)) {
1409 lp->nfsl_inprog = NULL;
1410 nfscl_lockunlock(&lp->nfsl_rwlock);
1411 }
1412 }
1413 }
1414 }
1415 nfscl_clrelease(clp);
1416 NFSUNLOCKCLSTATE();
1417 }
1418
1419 /*
1420 * Called to find out if any bytes within the byte range specified are
1421 * write locked by the calling process. Used to determine if flushing
1422 * is required before a LockU.
1423 * If in doubt, return 1, so the flush will occur.
1424 */
1425 int
nfscl_checkwritelocked(vnode_t vp,struct flock * fl,struct ucred * cred,NFSPROC_T * p,void * id,int flags)1426 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1427 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1428 {
1429 struct nfscllockowner *lp;
1430 struct nfsclopen *op;
1431 struct nfsclclient *clp;
1432 struct nfscllock *lop;
1433 struct nfscldeleg *dp;
1434 struct nfsnode *np;
1435 u_int64_t off, end;
1436 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1437 int error = 0;
1438
1439 np = VTONFS(vp);
1440 switch (fl->l_whence) {
1441 case SEEK_SET:
1442 case SEEK_CUR:
1443 /*
1444 * Caller is responsible for adding any necessary offset
1445 * when SEEK_CUR is used.
1446 */
1447 off = fl->l_start;
1448 break;
1449 case SEEK_END:
1450 off = np->n_size + fl->l_start;
1451 break;
1452 default:
1453 return (1);
1454 }
1455 if (fl->l_len != 0) {
1456 end = off + fl->l_len;
1457 if (end < off)
1458 return (1);
1459 } else {
1460 end = NFS64BITSSET;
1461 }
1462
1463 error = nfscl_getcl(vp->v_mount, cred, p, false, true, &clp);
1464 if (error)
1465 return (1);
1466 nfscl_filllockowner(id, own, flags);
1467 NFSLOCKCLSTATE();
1468
1469 /*
1470 * First check the delegation locks.
1471 */
1472 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1473 if (dp != NULL) {
1474 /* No need to flush if it is a write delegation. */
1475 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0) {
1476 nfscl_clrelease(clp);
1477 NFSUNLOCKCLSTATE();
1478 return (0);
1479 }
1480 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1481 if (!NFSBCMP(lp->nfsl_owner, own,
1482 NFSV4CL_LOCKNAMELEN))
1483 break;
1484 }
1485 if (lp != NULL) {
1486 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1487 if (lop->nfslo_first >= end)
1488 break;
1489 if (lop->nfslo_end <= off)
1490 continue;
1491 if (lop->nfslo_type == F_WRLCK) {
1492 nfscl_clrelease(clp);
1493 NFSUNLOCKCLSTATE();
1494 return (1);
1495 }
1496 }
1497 }
1498 }
1499
1500 /*
1501 * Now, check state against the server.
1502 */
1503 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1504 np->n_fhp->nfh_len), nfso_hash) {
1505 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1506 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1507 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1508 if (!NFSBCMP(lp->nfsl_owner, own,
1509 NFSV4CL_LOCKNAMELEN))
1510 break;
1511 }
1512 if (lp != NULL) {
1513 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1514 if (lop->nfslo_first >= end)
1515 break;
1516 if (lop->nfslo_end <= off)
1517 continue;
1518 if (lop->nfslo_type == F_WRLCK) {
1519 nfscl_clrelease(clp);
1520 NFSUNLOCKCLSTATE();
1521 return (1);
1522 }
1523 }
1524 }
1525 }
1526 }
1527 nfscl_clrelease(clp);
1528 NFSUNLOCKCLSTATE();
1529 return (0);
1530 }
1531
1532 /*
1533 * Release a byte range lock owner structure.
1534 */
1535 void
nfscl_lockrelease(struct nfscllockowner * lp,int error,int candelete)1536 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1537 {
1538 struct nfsclclient *clp;
1539
1540 if (lp == NULL)
1541 return;
1542 NFSLOCKCLSTATE();
1543 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1544 if (error != 0 && candelete &&
1545 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1546 nfscl_freelockowner(lp, 0);
1547 else
1548 nfscl_lockunlock(&lp->nfsl_rwlock);
1549 nfscl_clrelease(clp);
1550 NFSUNLOCKCLSTATE();
1551 }
1552
1553 /*
1554 * Unlink the open structure.
1555 */
1556 static void
nfscl_unlinkopen(struct nfsclopen * op)1557 nfscl_unlinkopen(struct nfsclopen *op)
1558 {
1559
1560 LIST_REMOVE(op, nfso_list);
1561 if (op->nfso_hash.le_prev != NULL)
1562 LIST_REMOVE(op, nfso_hash);
1563 }
1564
1565 /*
1566 * Free up an open structure and any associated byte range lock structures.
1567 */
1568 void
nfscl_freeopen(struct nfsclopen * op,int local,bool unlink)1569 nfscl_freeopen(struct nfsclopen *op, int local, bool unlink)
1570 {
1571
1572 if (unlink)
1573 nfscl_unlinkopen(op);
1574 nfscl_freealllocks(&op->nfso_lock, local);
1575 free(op, M_NFSCLOPEN);
1576 if (local)
1577 nfsstatsv1.cllocalopens--;
1578 else
1579 nfsstatsv1.clopens--;
1580 }
1581
1582 /*
1583 * Free up all lock owners and associated locks.
1584 */
1585 static void
nfscl_freealllocks(struct nfscllockownerhead * lhp,int local)1586 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1587 {
1588 struct nfscllockowner *lp, *nlp;
1589
1590 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1591 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1592 panic("nfscllckw");
1593 nfscl_freelockowner(lp, local);
1594 }
1595 }
1596
1597 /*
1598 * Called for an Open when NFSERR_EXPIRED is received from the server.
1599 * If there are no byte range locks nor a Share Deny lost, try to do a
1600 * fresh Open. Otherwise, free the open.
1601 */
1602 static int
nfscl_expireopen(struct nfsclclient * clp,struct nfsclopen * op,struct nfsmount * nmp,struct ucred * cred,NFSPROC_T * p)1603 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1604 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1605 {
1606 struct nfscllockowner *lp;
1607 struct nfscldeleg *dp;
1608 int mustdelete = 0, error;
1609
1610 /*
1611 * Look for any byte range lock(s).
1612 */
1613 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1614 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1615 mustdelete = 1;
1616 break;
1617 }
1618 }
1619
1620 /*
1621 * If no byte range lock(s) nor a Share deny, try to re-open.
1622 */
1623 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1624 newnfs_copycred(&op->nfso_cred, cred);
1625 dp = NULL;
1626 error = nfsrpc_reopen(nmp, op->nfso_fh,
1627 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1628 if (error) {
1629 mustdelete = 1;
1630 if (dp != NULL) {
1631 free(dp, M_NFSCLDELEG);
1632 dp = NULL;
1633 }
1634 }
1635 if (dp != NULL)
1636 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1637 op->nfso_fhlen, cred, p, dp);
1638 }
1639
1640 /*
1641 * If a byte range lock or Share deny or couldn't re-open, free it.
1642 */
1643 if (mustdelete)
1644 nfscl_freeopen(op, 0, true);
1645 return (mustdelete);
1646 }
1647
1648 /*
1649 * Free up an open owner structure.
1650 */
1651 static void
nfscl_freeopenowner(struct nfsclowner * owp,int local)1652 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1653 {
1654 int owned;
1655
1656 /*
1657 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1658 * calls in nfscl_renewthread() that do not hold a reference
1659 * count on the nfsclclient and just the mutex.
1660 * The mutex will not be held for calls done with the exclusive
1661 * nfsclclient lock held, in particular, nfscl_hasexpired()
1662 * and nfscl_recalldeleg() might do this.
1663 */
1664 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1665 if (owned == 0)
1666 NFSLOCKCLSTATE();
1667 LIST_REMOVE(owp, nfsow_list);
1668 if (owned == 0)
1669 NFSUNLOCKCLSTATE();
1670 free(owp, M_NFSCLOWNER);
1671 if (local)
1672 nfsstatsv1.cllocalopenowners--;
1673 else
1674 nfsstatsv1.clopenowners--;
1675 }
1676
1677 /*
1678 * Free up a byte range lock owner structure.
1679 */
1680 void
nfscl_freelockowner(struct nfscllockowner * lp,int local)1681 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1682 {
1683 struct nfscllock *lop, *nlop;
1684 int owned;
1685
1686 /*
1687 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1688 * calls in nfscl_renewthread() that do not hold a reference
1689 * count on the nfsclclient and just the mutex.
1690 * The mutex will not be held for calls done with the exclusive
1691 * nfsclclient lock held, in particular, nfscl_hasexpired()
1692 * and nfscl_recalldeleg() might do this.
1693 */
1694 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1695 if (owned == 0)
1696 NFSLOCKCLSTATE();
1697 LIST_REMOVE(lp, nfsl_list);
1698 if (owned == 0)
1699 NFSUNLOCKCLSTATE();
1700 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1701 nfscl_freelock(lop, local);
1702 }
1703 free(lp, M_NFSCLLOCKOWNER);
1704 if (local)
1705 nfsstatsv1.cllocallockowners--;
1706 else
1707 nfsstatsv1.cllockowners--;
1708 }
1709
1710 /*
1711 * Free up a byte range lock structure.
1712 */
1713 void
nfscl_freelock(struct nfscllock * lop,int local)1714 nfscl_freelock(struct nfscllock *lop, int local)
1715 {
1716
1717 LIST_REMOVE(lop, nfslo_list);
1718 free(lop, M_NFSCLLOCK);
1719 if (local)
1720 nfsstatsv1.cllocallocks--;
1721 else
1722 nfsstatsv1.cllocks--;
1723 }
1724
1725 /*
1726 * Clean out the state related to a delegation.
1727 */
1728 static void
nfscl_cleandeleg(struct nfscldeleg * dp)1729 nfscl_cleandeleg(struct nfscldeleg *dp)
1730 {
1731 struct nfsclowner *owp, *nowp;
1732 struct nfsclopen *op;
1733
1734 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1735 op = LIST_FIRST(&owp->nfsow_open);
1736 if (op != NULL) {
1737 if (LIST_NEXT(op, nfso_list) != NULL)
1738 panic("nfscleandel");
1739 nfscl_freeopen(op, 1, true);
1740 }
1741 nfscl_freeopenowner(owp, 1);
1742 }
1743 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1744 }
1745
1746 /*
1747 * Free a delegation.
1748 */
1749 static void
nfscl_freedeleg(struct nfscldeleghead * hdp,struct nfscldeleg * dp,bool freeit)1750 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp, bool freeit)
1751 {
1752
1753 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1754 LIST_REMOVE(dp, nfsdl_hash);
1755 dp->nfsdl_clp->nfsc_delegcnt--;
1756 if (freeit)
1757 free(dp, M_NFSCLDELEG);
1758 nfsstatsv1.cldelegates--;
1759 }
1760
1761 /*
1762 * Free up all state related to this client structure.
1763 */
1764 static void
nfscl_cleanclient(struct nfsclclient * clp)1765 nfscl_cleanclient(struct nfsclclient *clp)
1766 {
1767 struct nfsclowner *owp, *nowp;
1768 struct nfsclopen *op, *nop;
1769 struct nfscllayout *lyp, *nlyp;
1770 struct nfscldevinfo *dip, *ndip;
1771
1772 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1773 nfscl_freelayout(lyp);
1774
1775 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1776 nfscl_freedevinfo(dip);
1777
1778 /* Now, all the OpenOwners, etc. */
1779 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1780 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1781 nfscl_freeopen(op, 0, true);
1782 }
1783 nfscl_freeopenowner(owp, 0);
1784 }
1785 }
1786
1787 /*
1788 * Called when an NFSERR_EXPIRED is received from the server.
1789 */
1790 static void
nfscl_expireclient(struct nfsclclient * clp,struct nfsmount * nmp,struct ucred * cred,NFSPROC_T * p)1791 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1792 struct ucred *cred, NFSPROC_T *p)
1793 {
1794 struct nfsclowner *owp, *nowp, *towp;
1795 struct nfsclopen *op, *nop, *top;
1796 struct nfscldeleg *dp, *ndp;
1797 int ret, printed = 0;
1798
1799 /*
1800 * First, merge locally issued Opens into the list for the server.
1801 */
1802 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1803 while (dp != NULL) {
1804 ndp = TAILQ_NEXT(dp, nfsdl_list);
1805 owp = LIST_FIRST(&dp->nfsdl_owner);
1806 while (owp != NULL) {
1807 nowp = LIST_NEXT(owp, nfsow_list);
1808 op = LIST_FIRST(&owp->nfsow_open);
1809 if (op != NULL) {
1810 if (LIST_NEXT(op, nfso_list) != NULL)
1811 panic("nfsclexp");
1812 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1813 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1814 NFSV4CL_LOCKNAMELEN))
1815 break;
1816 }
1817 if (towp != NULL) {
1818 /* Merge opens in */
1819 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1820 if (top->nfso_fhlen == op->nfso_fhlen &&
1821 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1822 op->nfso_fhlen)) {
1823 top->nfso_mode |= op->nfso_mode;
1824 top->nfso_opencnt += op->nfso_opencnt;
1825 break;
1826 }
1827 }
1828 if (top == NULL) {
1829 /* Just add the open to the owner list */
1830 LIST_REMOVE(op, nfso_list);
1831 op->nfso_own = towp;
1832 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1833 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1834 op->nfso_fhlen), op, nfso_hash);
1835 nfsstatsv1.cllocalopens--;
1836 nfsstatsv1.clopens++;
1837 }
1838 } else {
1839 /* Just add the openowner to the client list */
1840 LIST_REMOVE(owp, nfsow_list);
1841 owp->nfsow_clp = clp;
1842 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1843 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1844 op->nfso_fhlen), op, nfso_hash);
1845 nfsstatsv1.cllocalopenowners--;
1846 nfsstatsv1.clopenowners++;
1847 nfsstatsv1.cllocalopens--;
1848 nfsstatsv1.clopens++;
1849 }
1850 }
1851 owp = nowp;
1852 }
1853 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1854 printed = 1;
1855 printf("nfsv4 expired locks lost\n");
1856 }
1857 nfscl_cleandeleg(dp);
1858 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
1859 dp = ndp;
1860 }
1861 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1862 panic("nfsclexp");
1863
1864 /*
1865 * Now, try and reopen against the server.
1866 */
1867 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1868 owp->nfsow_seqid = 0;
1869 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1870 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1871 if (ret && !printed) {
1872 printed = 1;
1873 printf("nfsv4 expired locks lost\n");
1874 }
1875 }
1876 if (LIST_EMPTY(&owp->nfsow_open))
1877 nfscl_freeopenowner(owp, 0);
1878 }
1879 }
1880
1881 /*
1882 * This function must be called after the process represented by "own" has
1883 * exited. Must be called with CLSTATE lock held.
1884 */
1885 static void
nfscl_cleanup_common(struct nfsclclient * clp,u_int8_t * own)1886 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1887 {
1888 struct nfsclowner *owp, *nowp;
1889 struct nfscllockowner *lp;
1890 struct nfscldeleg *dp;
1891
1892 /* First, get rid of local locks on delegations. */
1893 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1894 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1895 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1896 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1897 panic("nfscllckw");
1898 nfscl_freelockowner(lp, 1);
1899 break;
1900 }
1901 }
1902 }
1903 owp = LIST_FIRST(&clp->nfsc_owner);
1904 while (owp != NULL) {
1905 nowp = LIST_NEXT(owp, nfsow_list);
1906 if (!NFSBCMP(owp->nfsow_owner, own,
1907 NFSV4CL_LOCKNAMELEN)) {
1908 /*
1909 * If there are children that haven't closed the
1910 * file descriptors yet, the opens will still be
1911 * here. For that case, let the renew thread clear
1912 * out the OpenOwner later.
1913 */
1914 if (LIST_EMPTY(&owp->nfsow_open))
1915 nfscl_freeopenowner(owp, 0);
1916 else
1917 owp->nfsow_defunct = 1;
1918 break;
1919 }
1920 owp = nowp;
1921 }
1922 }
1923
1924 /*
1925 * Find open/lock owners for processes that have exited.
1926 */
1927 static void
nfscl_cleanupkext(struct nfsclclient * clp,struct nfscllockownerfhhead * lhp)1928 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1929 {
1930 struct nfsclowner *owp, *nowp;
1931 struct nfsclopen *op;
1932 struct nfscllockowner *lp, *nlp;
1933 struct nfscldeleg *dp;
1934 uint8_t own[NFSV4CL_LOCKNAMELEN];
1935
1936 /*
1937 * All the pidhash locks must be acquired, since they are sx locks
1938 * and must be acquired before the mutexes. The pid(s) that will
1939 * be used aren't known yet, so all the locks need to be acquired.
1940 * Fortunately, this function is only performed once/sec.
1941 */
1942 pidhash_slockall();
1943 NFSLOCKCLSTATE();
1944 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1945 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1946 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1947 if (LIST_EMPTY(&lp->nfsl_lock))
1948 nfscl_emptylockowner(lp, lhp);
1949 }
1950 }
1951 if (nfscl_procdoesntexist(owp->nfsow_owner)) {
1952 memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
1953 nfscl_cleanup_common(clp, own);
1954 }
1955 }
1956
1957 /*
1958 * For the single open_owner case, these lock owners need to be
1959 * checked to see if they still exist separately.
1960 * This is because nfscl_procdoesntexist() never returns true for
1961 * the single open_owner so that the above doesn't ever call
1962 * nfscl_cleanup_common().
1963 */
1964 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1965 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1966 if (nfscl_procdoesntexist(lp->nfsl_owner)) {
1967 memcpy(own, lp->nfsl_owner,
1968 NFSV4CL_LOCKNAMELEN);
1969 nfscl_cleanup_common(clp, own);
1970 }
1971 }
1972 }
1973 NFSUNLOCKCLSTATE();
1974 pidhash_sunlockall();
1975 }
1976
1977 /*
1978 * Take the empty lock owner and move it to the local lhp list if the
1979 * associated process no longer exists.
1980 */
1981 static void
nfscl_emptylockowner(struct nfscllockowner * lp,struct nfscllockownerfhhead * lhp)1982 nfscl_emptylockowner(struct nfscllockowner *lp,
1983 struct nfscllockownerfhhead *lhp)
1984 {
1985 struct nfscllockownerfh *lfhp, *mylfhp;
1986 struct nfscllockowner *nlp;
1987 int fnd_it;
1988
1989 /* If not a Posix lock owner, just return. */
1990 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1991 return;
1992
1993 fnd_it = 0;
1994 mylfhp = NULL;
1995 /*
1996 * First, search to see if this lock owner is already in the list.
1997 * If it is, then the associated process no longer exists.
1998 */
1999 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
2000 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
2001 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
2002 lfhp->nfslfh_len))
2003 mylfhp = lfhp;
2004 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
2005 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
2006 NFSV4CL_LOCKNAMELEN))
2007 fnd_it = 1;
2008 }
2009 /* If not found, check if process still exists. */
2010 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
2011 return;
2012
2013 /* Move the lock owner over to the local list. */
2014 if (mylfhp == NULL) {
2015 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
2016 M_NOWAIT);
2017 if (mylfhp == NULL)
2018 return;
2019 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
2020 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
2021 mylfhp->nfslfh_len);
2022 LIST_INIT(&mylfhp->nfslfh_lock);
2023 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
2024 }
2025 LIST_REMOVE(lp, nfsl_list);
2026 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
2027 }
2028
2029 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
2030 /*
2031 * Called from nfs umount to free up the clientid.
2032 */
2033 void
nfscl_umount(struct nfsmount * nmp,NFSPROC_T * p,struct nfscldeleghead * dhp)2034 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p, struct nfscldeleghead *dhp)
2035 {
2036 struct nfsclclient *clp;
2037 struct ucred *cred;
2038 int igotlock;
2039
2040 /*
2041 * For the case that matters, this is the thread that set
2042 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
2043 * done to ensure that any thread executing nfscl_getcl() after
2044 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
2045 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
2046 * explanation, courtesy of Alan Cox.
2047 * What follows is a snippet from Alan Cox's email at:
2048 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
2049 *
2050 * 1. Set MNTK_UNMOUNTF
2051 * 2. Acquire a standard FreeBSD mutex "m".
2052 * 3. Update some data structures.
2053 * 4. Release mutex "m".
2054 *
2055 * Then, other threads that acquire "m" after step 4 has occurred will
2056 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
2057 * step 2 may or may not see MNTK_UNMOUNTF as set.
2058 */
2059 NFSLOCKCLSTATE();
2060 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
2061 fake_global++;
2062 NFSUNLOCKCLSTATE();
2063 NFSLOCKCLSTATE();
2064 }
2065
2066 clp = nmp->nm_clp;
2067 if (clp != NULL) {
2068 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
2069 panic("nfscl umount");
2070
2071 /*
2072 * First, handshake with the nfscl renew thread, to terminate
2073 * it.
2074 */
2075 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
2076 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
2077 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
2078 "nfsclumnt", hz);
2079
2080 /*
2081 * Now, get the exclusive lock on the client state, so
2082 * that no uses of the state are still in progress.
2083 */
2084 do {
2085 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2086 NFSCLSTATEMUTEXPTR, NULL);
2087 } while (!igotlock);
2088 NFSUNLOCKCLSTATE();
2089
2090 /*
2091 * Free up all the state. It will expire on the server, but
2092 * maybe we should do a SetClientId/SetClientIdConfirm so
2093 * the server throws it away?
2094 */
2095 LIST_REMOVE(clp, nfsc_list);
2096 nfscl_delegreturnall(clp, p, dhp);
2097 cred = newnfs_getcred();
2098 if (NFSHASNFSV4N(nmp)) {
2099 nfsrpc_destroysession(nmp, NULL, cred, p);
2100 nfsrpc_destroyclient(nmp, clp, cred, p);
2101 } else
2102 nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2103 nfscl_cleanclient(clp);
2104 nmp->nm_clp = NULL;
2105 NFSFREECRED(cred);
2106 free(clp, M_NFSCLCLIENT);
2107 } else
2108 NFSUNLOCKCLSTATE();
2109 }
2110
2111 /*
2112 * This function is called when a server replies with NFSERR_STALECLIENTID
2113 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
2114 * doing Opens and Locks with reclaim. If these fail, it deletes the
2115 * corresponding state.
2116 */
2117 static void
nfscl_recover(struct nfsclclient * clp,bool * retokp,struct ucred * cred,NFSPROC_T * p)2118 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred,
2119 NFSPROC_T *p)
2120 {
2121 struct nfsclowner *owp, *nowp;
2122 struct nfsclopen *op, *nop;
2123 struct nfscllockowner *lp, *nlp;
2124 struct nfscllock *lop, *nlop;
2125 struct nfscldeleg *dp, *ndp, *tdp;
2126 struct nfsmount *nmp;
2127 struct ucred *tcred;
2128 struct nfsclopenhead extra_open;
2129 struct nfscldeleghead extra_deleg;
2130 struct nfsreq *rep;
2131 u_int64_t len;
2132 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
2133 int i, igotlock = 0, error, trycnt, firstlock;
2134 struct nfscllayout *lyp, *nlyp;
2135 bool recovered_one;
2136
2137 /*
2138 * First, lock the client structure, so everyone else will
2139 * block when trying to use state.
2140 */
2141 NFSLOCKCLSTATE();
2142 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2143 do {
2144 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2145 NFSCLSTATEMUTEXPTR, NULL);
2146 } while (!igotlock);
2147 NFSUNLOCKCLSTATE();
2148
2149 nmp = clp->nfsc_nmp;
2150 if (nmp == NULL)
2151 panic("nfscl recover");
2152
2153 /*
2154 * For now, just get rid of all layouts. There may be a need
2155 * to do LayoutCommit Ops with reclaim == true later.
2156 */
2157 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2158 nfscl_freelayout(lyp);
2159 TAILQ_INIT(&clp->nfsc_layout);
2160 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2161 LIST_INIT(&clp->nfsc_layouthash[i]);
2162
2163 trycnt = 5;
2164 tcred = NULL;
2165 do {
2166 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p);
2167 } while ((error == NFSERR_STALECLIENTID ||
2168 error == NFSERR_BADSESSION ||
2169 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2170 if (error) {
2171 NFSLOCKCLSTATE();
2172 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2173 NFSCLFLAGS_RECVRINPROG);
2174 wakeup(&clp->nfsc_flags);
2175 nfsv4_unlock(&clp->nfsc_lock, 0);
2176 NFSUNLOCKCLSTATE();
2177 return;
2178 }
2179 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2180 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2181
2182 /*
2183 * Mark requests already queued on the server, so that they don't
2184 * initiate another recovery cycle. Any requests already in the
2185 * queue that handle state information will have the old stale
2186 * clientid/stateid and will get a NFSERR_STALESTATEID,
2187 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2188 * This will be translated to NFSERR_STALEDONTRECOVER when
2189 * R_DONTRECOVER is set.
2190 */
2191 NFSLOCKREQ();
2192 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2193 if (rep->r_nmp == nmp)
2194 rep->r_flags |= R_DONTRECOVER;
2195 }
2196 NFSUNLOCKREQ();
2197
2198 /*
2199 * If nfsrpc_setclient() returns *retokp == true,
2200 * no more recovery is needed.
2201 */
2202 if (*retokp)
2203 goto out;
2204
2205 /*
2206 * Now, mark all delegations "need reclaim".
2207 */
2208 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2209 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2210
2211 TAILQ_INIT(&extra_deleg);
2212 LIST_INIT(&extra_open);
2213 /*
2214 * Now traverse the state lists, doing Open and Lock Reclaims.
2215 */
2216 tcred = newnfs_getcred();
2217 recovered_one = false;
2218 owp = LIST_FIRST(&clp->nfsc_owner);
2219 while (owp != NULL) {
2220 nowp = LIST_NEXT(owp, nfsow_list);
2221 owp->nfsow_seqid = 0;
2222 op = LIST_FIRST(&owp->nfsow_open);
2223 while (op != NULL) {
2224 nop = LIST_NEXT(op, nfso_list);
2225 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2226 /* Search for a delegation to reclaim with the open */
2227 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2228 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2229 continue;
2230 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2231 mode = NFSV4OPEN_ACCESSWRITE;
2232 delegtype = NFSV4OPEN_DELEGATEWRITE;
2233 } else {
2234 mode = NFSV4OPEN_ACCESSREAD;
2235 delegtype = NFSV4OPEN_DELEGATEREAD;
2236 }
2237 if ((op->nfso_mode & mode) == mode &&
2238 op->nfso_fhlen == dp->nfsdl_fhlen &&
2239 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2240 break;
2241 }
2242 ndp = dp;
2243 if (dp == NULL)
2244 delegtype = NFSV4OPEN_DELEGATENONE;
2245 newnfs_copycred(&op->nfso_cred, tcred);
2246 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2247 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2248 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2249 tcred, p);
2250 if (!error) {
2251 recovered_one = true;
2252 /* Handle any replied delegation */
2253 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2254 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2255 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2256 mode = NFSV4OPEN_ACCESSWRITE;
2257 else
2258 mode = NFSV4OPEN_ACCESSREAD;
2259 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2260 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2261 continue;
2262 if ((op->nfso_mode & mode) == mode &&
2263 op->nfso_fhlen == dp->nfsdl_fhlen &&
2264 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2265 op->nfso_fhlen)) {
2266 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2267 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2268 dp->nfsdl_ace = ndp->nfsdl_ace;
2269 dp->nfsdl_change = ndp->nfsdl_change;
2270 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2271 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2272 dp->nfsdl_flags |= NFSCLDL_RECALL;
2273 free(ndp, M_NFSCLDELEG);
2274 ndp = NULL;
2275 break;
2276 }
2277 }
2278 }
2279 if (ndp != NULL)
2280 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2281
2282 /* and reclaim all byte range locks */
2283 lp = LIST_FIRST(&op->nfso_lock);
2284 while (lp != NULL) {
2285 nlp = LIST_NEXT(lp, nfsl_list);
2286 lp->nfsl_seqid = 0;
2287 firstlock = 1;
2288 lop = LIST_FIRST(&lp->nfsl_lock);
2289 while (lop != NULL) {
2290 nlop = LIST_NEXT(lop, nfslo_list);
2291 if (lop->nfslo_end == NFS64BITSSET)
2292 len = NFS64BITSSET;
2293 else
2294 len = lop->nfslo_end - lop->nfslo_first;
2295 error = nfscl_trylock(nmp, NULL,
2296 op->nfso_fh, op->nfso_fhlen, lp,
2297 firstlock, 1, lop->nfslo_first, len,
2298 lop->nfslo_type, tcred, p);
2299 if (error != 0)
2300 nfscl_freelock(lop, 0);
2301 else
2302 firstlock = 0;
2303 lop = nlop;
2304 }
2305 /* If no locks, but a lockowner, just delete it. */
2306 if (LIST_EMPTY(&lp->nfsl_lock))
2307 nfscl_freelockowner(lp, 0);
2308 lp = nlp;
2309 }
2310 } else if (error == NFSERR_NOGRACE && !recovered_one &&
2311 NFSHASNFSV4N(nmp)) {
2312 /*
2313 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2314 * actually end up here, since the client will do
2315 * a recovery for NFSERR_BADSESSION, but will get
2316 * an NFSERR_NOGRACE reply for the first "reclaim"
2317 * attempt.
2318 * So, call nfscl_expireclient() to recover the
2319 * opens as best we can and then do a reclaim
2320 * complete and return.
2321 */
2322 nfsrpc_reclaimcomplete(nmp, cred, p);
2323 nfscl_expireclient(clp, nmp, tcred, p);
2324 goto out;
2325 }
2326 }
2327 if (error != 0 && error != NFSERR_BADSESSION)
2328 nfscl_freeopen(op, 0, true);
2329 op = nop;
2330 }
2331 owp = nowp;
2332 }
2333
2334 /*
2335 * Now, try and get any delegations not yet reclaimed by cobbling
2336 * to-gether an appropriate open.
2337 */
2338 nowp = NULL;
2339 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2340 while (dp != NULL) {
2341 ndp = TAILQ_NEXT(dp, nfsdl_list);
2342 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2343 if (nowp == NULL) {
2344 nowp = malloc(
2345 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2346 /*
2347 * Name must be as long an largest possible
2348 * NFSV4CL_LOCKNAMELEN. 12 for now.
2349 */
2350 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2351 NFSV4CL_LOCKNAMELEN);
2352 LIST_INIT(&nowp->nfsow_open);
2353 nowp->nfsow_clp = clp;
2354 nowp->nfsow_seqid = 0;
2355 nowp->nfsow_defunct = 0;
2356 nfscl_lockinit(&nowp->nfsow_rwlock);
2357 }
2358 nop = NULL;
2359 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2360 nop = malloc(sizeof (struct nfsclopen) +
2361 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2362 nop->nfso_own = nowp;
2363 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2364 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2365 delegtype = NFSV4OPEN_DELEGATEWRITE;
2366 } else {
2367 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2368 delegtype = NFSV4OPEN_DELEGATEREAD;
2369 }
2370 nop->nfso_opencnt = 0;
2371 nop->nfso_posixlock = 1;
2372 nop->nfso_fhlen = dp->nfsdl_fhlen;
2373 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2374 LIST_INIT(&nop->nfso_lock);
2375 nop->nfso_stateid.seqid = 0;
2376 nop->nfso_stateid.other[0] = 0;
2377 nop->nfso_stateid.other[1] = 0;
2378 nop->nfso_stateid.other[2] = 0;
2379 newnfs_copycred(&dp->nfsdl_cred, tcred);
2380 newnfs_copyincred(tcred, &nop->nfso_cred);
2381 tdp = NULL;
2382 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2383 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2384 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2385 delegtype, tcred, p);
2386 if (tdp != NULL) {
2387 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2388 mode = NFSV4OPEN_ACCESSWRITE;
2389 else
2390 mode = NFSV4OPEN_ACCESSREAD;
2391 if ((nop->nfso_mode & mode) == mode &&
2392 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2393 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2394 nop->nfso_fhlen)) {
2395 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2396 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2397 dp->nfsdl_ace = tdp->nfsdl_ace;
2398 dp->nfsdl_change = tdp->nfsdl_change;
2399 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2400 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2401 dp->nfsdl_flags |= NFSCLDL_RECALL;
2402 free(tdp, M_NFSCLDELEG);
2403 } else {
2404 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2405 }
2406 }
2407 }
2408 if (error) {
2409 if (nop != NULL)
2410 free(nop, M_NFSCLOPEN);
2411 if (error == NFSERR_NOGRACE && !recovered_one &&
2412 NFSHASNFSV4N(nmp)) {
2413 /*
2414 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2415 * actually end up here, since the client will do
2416 * a recovery for NFSERR_BADSESSION, but will get
2417 * an NFSERR_NOGRACE reply for the first "reclaim"
2418 * attempt.
2419 * So, call nfscl_expireclient() to recover the
2420 * opens as best we can and then do a reclaim
2421 * complete and return.
2422 */
2423 nfsrpc_reclaimcomplete(nmp, cred, p);
2424 nfscl_expireclient(clp, nmp, tcred, p);
2425 free(nowp, M_NFSCLOWNER);
2426 goto out;
2427 }
2428 /*
2429 * Couldn't reclaim it, so throw the state
2430 * away. Ouch!!
2431 */
2432 nfscl_cleandeleg(dp);
2433 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
2434 } else {
2435 recovered_one = true;
2436 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2437 }
2438 }
2439 dp = ndp;
2440 }
2441
2442 /*
2443 * Now, get rid of extra Opens and Delegations.
2444 */
2445 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2446 do {
2447 newnfs_copycred(&op->nfso_cred, tcred);
2448 error = nfscl_tryclose(op, tcred, nmp, p, true);
2449 if (error == NFSERR_GRACE)
2450 (void) nfs_catnap(PZERO, error, "nfsexcls");
2451 } while (error == NFSERR_GRACE);
2452 LIST_REMOVE(op, nfso_list);
2453 free(op, M_NFSCLOPEN);
2454 }
2455 if (nowp != NULL)
2456 free(nowp, M_NFSCLOWNER);
2457
2458 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2459 do {
2460 newnfs_copycred(&dp->nfsdl_cred, tcred);
2461 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2462 if (error == NFSERR_GRACE)
2463 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2464 } while (error == NFSERR_GRACE);
2465 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2466 free(dp, M_NFSCLDELEG);
2467 }
2468
2469 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2470 if (NFSHASNFSV4N(nmp))
2471 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2472
2473 out:
2474 NFSLOCKCLSTATE();
2475 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2476 wakeup(&clp->nfsc_flags);
2477 nfsv4_unlock(&clp->nfsc_lock, 0);
2478 NFSUNLOCKCLSTATE();
2479 if (tcred != NULL)
2480 NFSFREECRED(tcred);
2481 }
2482
2483 /*
2484 * This function is called when a server replies with NFSERR_EXPIRED.
2485 * It deletes all state for the client and does a fresh SetClientId/confirm.
2486 * XXX Someday it should post a signal to the process(es) that hold the
2487 * state, so they know that lock state has been lost.
2488 */
2489 int
nfscl_hasexpired(struct nfsclclient * clp,u_int32_t clidrev,NFSPROC_T * p)2490 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2491 {
2492 struct nfsmount *nmp;
2493 struct ucred *cred;
2494 int igotlock = 0, error, trycnt;
2495
2496 /*
2497 * If the clientid has gone away or a new SetClientid has already
2498 * been done, just return ok.
2499 */
2500 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2501 return (0);
2502
2503 /*
2504 * First, lock the client structure, so everyone else will
2505 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2506 * that only one thread does the work.
2507 */
2508 NFSLOCKCLSTATE();
2509 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2510 do {
2511 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2512 NFSCLSTATEMUTEXPTR, NULL);
2513 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2514 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2515 if (igotlock)
2516 nfsv4_unlock(&clp->nfsc_lock, 0);
2517 NFSUNLOCKCLSTATE();
2518 return (0);
2519 }
2520 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2521 NFSUNLOCKCLSTATE();
2522
2523 nmp = clp->nfsc_nmp;
2524 if (nmp == NULL)
2525 panic("nfscl expired");
2526 cred = newnfs_getcred();
2527 trycnt = 5;
2528 do {
2529 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2530 } while ((error == NFSERR_STALECLIENTID ||
2531 error == NFSERR_BADSESSION ||
2532 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2533 if (error) {
2534 NFSLOCKCLSTATE();
2535 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2536 } else {
2537 /*
2538 * Expire the state for the client.
2539 */
2540 nfscl_expireclient(clp, nmp, cred, p);
2541 NFSLOCKCLSTATE();
2542 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2543 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2544 }
2545 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2546 wakeup(&clp->nfsc_flags);
2547 nfsv4_unlock(&clp->nfsc_lock, 0);
2548 NFSUNLOCKCLSTATE();
2549 NFSFREECRED(cred);
2550 return (error);
2551 }
2552
2553 /*
2554 * This function inserts a lock in the list after insert_lop.
2555 */
2556 static void
nfscl_insertlock(struct nfscllockowner * lp,struct nfscllock * new_lop,struct nfscllock * insert_lop,int local)2557 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2558 struct nfscllock *insert_lop, int local)
2559 {
2560
2561 if ((struct nfscllockowner *)insert_lop == lp)
2562 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2563 else
2564 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2565 if (local)
2566 nfsstatsv1.cllocallocks++;
2567 else
2568 nfsstatsv1.cllocks++;
2569 }
2570
2571 /*
2572 * This function updates the locking for a lock owner and given file. It
2573 * maintains a list of lock ranges ordered on increasing file offset that
2574 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2575 * It always adds new_lop to the list and sometimes uses the one pointed
2576 * at by other_lopp.
2577 * Returns 1 if the locks were modified, 0 otherwise.
2578 */
2579 static int
nfscl_updatelock(struct nfscllockowner * lp,struct nfscllock ** new_lopp,struct nfscllock ** other_lopp,int local)2580 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2581 struct nfscllock **other_lopp, int local)
2582 {
2583 struct nfscllock *new_lop = *new_lopp;
2584 struct nfscllock *lop, *tlop, *ilop;
2585 struct nfscllock *other_lop;
2586 int unlock = 0, modified = 0;
2587 u_int64_t tmp;
2588
2589 /*
2590 * Work down the list until the lock is merged.
2591 */
2592 if (new_lop->nfslo_type == F_UNLCK)
2593 unlock = 1;
2594 ilop = (struct nfscllock *)lp;
2595 lop = LIST_FIRST(&lp->nfsl_lock);
2596 while (lop != NULL) {
2597 /*
2598 * Only check locks for this file that aren't before the start of
2599 * new lock's range.
2600 */
2601 if (lop->nfslo_end >= new_lop->nfslo_first) {
2602 if (new_lop->nfslo_end < lop->nfslo_first) {
2603 /*
2604 * If the new lock ends before the start of the
2605 * current lock's range, no merge, just insert
2606 * the new lock.
2607 */
2608 break;
2609 }
2610 if (new_lop->nfslo_type == lop->nfslo_type ||
2611 (new_lop->nfslo_first <= lop->nfslo_first &&
2612 new_lop->nfslo_end >= lop->nfslo_end)) {
2613 /*
2614 * This lock can be absorbed by the new lock/unlock.
2615 * This happens when it covers the entire range
2616 * of the old lock or is contiguous
2617 * with the old lock and is of the same type or an
2618 * unlock.
2619 */
2620 if (new_lop->nfslo_type != lop->nfslo_type ||
2621 new_lop->nfslo_first != lop->nfslo_first ||
2622 new_lop->nfslo_end != lop->nfslo_end)
2623 modified = 1;
2624 if (lop->nfslo_first < new_lop->nfslo_first)
2625 new_lop->nfslo_first = lop->nfslo_first;
2626 if (lop->nfslo_end > new_lop->nfslo_end)
2627 new_lop->nfslo_end = lop->nfslo_end;
2628 tlop = lop;
2629 lop = LIST_NEXT(lop, nfslo_list);
2630 nfscl_freelock(tlop, local);
2631 continue;
2632 }
2633
2634 /*
2635 * All these cases are for contiguous locks that are not the
2636 * same type, so they can't be merged.
2637 */
2638 if (new_lop->nfslo_first <= lop->nfslo_first) {
2639 /*
2640 * This case is where the new lock overlaps with the
2641 * first part of the old lock. Move the start of the
2642 * old lock to just past the end of the new lock. The
2643 * new lock will be inserted in front of the old, since
2644 * ilop hasn't been updated. (We are done now.)
2645 */
2646 if (lop->nfslo_first != new_lop->nfslo_end) {
2647 lop->nfslo_first = new_lop->nfslo_end;
2648 modified = 1;
2649 }
2650 break;
2651 }
2652 if (new_lop->nfslo_end >= lop->nfslo_end) {
2653 /*
2654 * This case is where the new lock overlaps with the
2655 * end of the old lock's range. Move the old lock's
2656 * end to just before the new lock's first and insert
2657 * the new lock after the old lock.
2658 * Might not be done yet, since the new lock could
2659 * overlap further locks with higher ranges.
2660 */
2661 if (lop->nfslo_end != new_lop->nfslo_first) {
2662 lop->nfslo_end = new_lop->nfslo_first;
2663 modified = 1;
2664 }
2665 ilop = lop;
2666 lop = LIST_NEXT(lop, nfslo_list);
2667 continue;
2668 }
2669 /*
2670 * The final case is where the new lock's range is in the
2671 * middle of the current lock's and splits the current lock
2672 * up. Use *other_lopp to handle the second part of the
2673 * split old lock range. (We are done now.)
2674 * For unlock, we use new_lop as other_lop and tmp, since
2675 * other_lop and new_lop are the same for this case.
2676 * We noted the unlock case above, so we don't need
2677 * new_lop->nfslo_type any longer.
2678 */
2679 tmp = new_lop->nfslo_first;
2680 if (unlock) {
2681 other_lop = new_lop;
2682 *new_lopp = NULL;
2683 } else {
2684 other_lop = *other_lopp;
2685 *other_lopp = NULL;
2686 }
2687 other_lop->nfslo_first = new_lop->nfslo_end;
2688 other_lop->nfslo_end = lop->nfslo_end;
2689 other_lop->nfslo_type = lop->nfslo_type;
2690 lop->nfslo_end = tmp;
2691 nfscl_insertlock(lp, other_lop, lop, local);
2692 ilop = lop;
2693 modified = 1;
2694 break;
2695 }
2696 ilop = lop;
2697 lop = LIST_NEXT(lop, nfslo_list);
2698 if (lop == NULL)
2699 break;
2700 }
2701
2702 /*
2703 * Insert the new lock in the list at the appropriate place.
2704 */
2705 if (!unlock) {
2706 nfscl_insertlock(lp, new_lop, ilop, local);
2707 *new_lopp = NULL;
2708 modified = 1;
2709 }
2710 return (modified);
2711 }
2712
2713 /*
2714 * This function must be run as a kernel thread.
2715 * It does Renew Ops and recovery, when required.
2716 */
2717 void
nfscl_renewthread(struct nfsclclient * clp,NFSPROC_T * p)2718 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2719 {
2720 struct nfsclowner *owp, *nowp;
2721 struct nfsclopen *op;
2722 struct nfscllockowner *lp, *nlp;
2723 struct nfscldeleghead dh;
2724 struct nfscldeleg *dp, *ndp;
2725 struct ucred *cred;
2726 u_int32_t clidrev;
2727 int error, cbpathdown, islept, igotlock, ret, clearok;
2728 uint32_t recover_done_time = 0;
2729 time_t mytime;
2730 static time_t prevsec = 0;
2731 struct nfscllockownerfh *lfhp, *nlfhp;
2732 struct nfscllockownerfhhead lfh;
2733 struct nfscllayout *lyp, *nlyp;
2734 struct nfscldevinfo *dip, *ndip;
2735 struct nfscllayouthead rlh;
2736 struct nfsclrecalllayout *recallp;
2737 struct nfsclds *dsp;
2738 bool retok;
2739 struct mount *mp;
2740 vnode_t vp;
2741
2742 cred = newnfs_getcred();
2743 NFSLOCKCLSTATE();
2744 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2745 mp = clp->nfsc_nmp->nm_mountp;
2746 NFSUNLOCKCLSTATE();
2747 for(;;) {
2748 newnfs_setroot(cred);
2749 cbpathdown = 0;
2750 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2751 /*
2752 * Only allow one full recover within 1/2 of the lease
2753 * duration (nfsc_renew).
2754 * retok is value/result. If passed in set to true,
2755 * it indicates only a CreateSession operation should
2756 * be attempted.
2757 * If it is returned true, it indicates that the
2758 * recovery only required a CreateSession.
2759 */
2760 retok = true;
2761 if (recover_done_time < NFSD_MONOSEC) {
2762 recover_done_time = NFSD_MONOSEC +
2763 clp->nfsc_renew;
2764 retok = false;
2765 }
2766 NFSCL_DEBUG(1, "Doing recovery, only "
2767 "createsession=%d\n", retok);
2768 nfscl_recover(clp, &retok, cred, p);
2769 }
2770 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2771 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2772 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2773 clidrev = clp->nfsc_clientidrev;
2774 error = nfsrpc_renew(clp, NULL, cred, p);
2775 if (error == NFSERR_CBPATHDOWN)
2776 cbpathdown = 1;
2777 else if (error == NFSERR_STALECLIENTID) {
2778 NFSLOCKCLSTATE();
2779 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2780 NFSUNLOCKCLSTATE();
2781 } else if (error == NFSERR_EXPIRED)
2782 (void) nfscl_hasexpired(clp, clidrev, p);
2783 }
2784
2785 checkdsrenew:
2786 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2787 /* Do renews for any DS sessions. */
2788 NFSLOCKMNT(clp->nfsc_nmp);
2789 /* Skip first entry, since the MDS is handled above. */
2790 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2791 if (dsp != NULL)
2792 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2793 while (dsp != NULL) {
2794 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2795 dsp->nfsclds_sess.nfsess_defunct == 0) {
2796 dsp->nfsclds_expire = NFSD_MONOSEC +
2797 clp->nfsc_renew;
2798 NFSUNLOCKMNT(clp->nfsc_nmp);
2799 (void)nfsrpc_renew(clp, dsp, cred, p);
2800 goto checkdsrenew;
2801 }
2802 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2803 }
2804 NFSUNLOCKMNT(clp->nfsc_nmp);
2805 }
2806
2807 TAILQ_INIT(&dh);
2808 NFSLOCKCLSTATE();
2809 if (cbpathdown)
2810 /* It's a Total Recall! */
2811 nfscl_totalrecall(clp);
2812
2813 /*
2814 * Now, handle defunct owners.
2815 */
2816 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2817 if (LIST_EMPTY(&owp->nfsow_open)) {
2818 if (owp->nfsow_defunct != 0)
2819 nfscl_freeopenowner(owp, 0);
2820 }
2821 }
2822
2823 /*
2824 * Do the recall on any delegations. To avoid trouble, always
2825 * come back up here after having slept.
2826 */
2827 igotlock = 0;
2828 tryagain:
2829 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2830 while (dp != NULL) {
2831 ndp = TAILQ_NEXT(dp, nfsdl_list);
2832 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2833 /*
2834 * Wait for outstanding I/O ops to be done.
2835 */
2836 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2837 if (igotlock) {
2838 nfsv4_unlock(&clp->nfsc_lock, 0);
2839 igotlock = 0;
2840 }
2841 dp->nfsdl_rwlock.nfslock_lock |=
2842 NFSV4LOCK_WANTED;
2843 msleep(&dp->nfsdl_rwlock,
2844 NFSCLSTATEMUTEXPTR, PVFS, "nfscld",
2845 5 * hz);
2846 if (NFSCL_FORCEDISM(mp))
2847 goto terminate;
2848 goto tryagain;
2849 }
2850 while (!igotlock) {
2851 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2852 &islept, NFSCLSTATEMUTEXPTR, mp);
2853 if (igotlock == 0 && NFSCL_FORCEDISM(mp))
2854 goto terminate;
2855 if (islept)
2856 goto tryagain;
2857 }
2858 NFSUNLOCKCLSTATE();
2859 newnfs_copycred(&dp->nfsdl_cred, cred);
2860 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2861 NULL, cred, p, 1, &vp);
2862 if (!ret) {
2863 nfscl_cleandeleg(dp);
2864 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2865 nfsdl_list);
2866 LIST_REMOVE(dp, nfsdl_hash);
2867 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2868 clp->nfsc_delegcnt--;
2869 nfsstatsv1.cldelegates--;
2870 }
2871 NFSLOCKCLSTATE();
2872 /*
2873 * The nfsc_lock must be released before doing
2874 * vrele(), since it might call nfs_inactive().
2875 * For the unlikely case where the vnode failed
2876 * to be acquired by nfscl_recalldeleg(), a
2877 * VOP_RECLAIM() should be in progress and it
2878 * will return the delegation.
2879 */
2880 nfsv4_unlock(&clp->nfsc_lock, 0);
2881 igotlock = 0;
2882 if (vp != NULL) {
2883 NFSUNLOCKCLSTATE();
2884 vrele(vp);
2885 NFSLOCKCLSTATE();
2886 }
2887 goto tryagain;
2888 }
2889 dp = ndp;
2890 }
2891
2892 /*
2893 * Clear out old delegations, if we are above the high water
2894 * mark. Only clear out ones with no state related to them.
2895 * The tailq list is in LRU order.
2896 */
2897 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2898 while (clp->nfsc_delegcnt > clp->nfsc_deleghighwater &&
2899 dp != NULL) {
2900 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2901 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2902 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2903 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2904 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2905 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2906 clearok = 1;
2907 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2908 op = LIST_FIRST(&owp->nfsow_open);
2909 if (op != NULL) {
2910 clearok = 0;
2911 break;
2912 }
2913 }
2914 if (clearok) {
2915 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2916 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2917 clearok = 0;
2918 break;
2919 }
2920 }
2921 }
2922 if (clearok) {
2923 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2924 LIST_REMOVE(dp, nfsdl_hash);
2925 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2926 clp->nfsc_delegcnt--;
2927 nfsstatsv1.cldelegates--;
2928 }
2929 }
2930 dp = ndp;
2931 }
2932 if (igotlock)
2933 nfsv4_unlock(&clp->nfsc_lock, 0);
2934
2935 /*
2936 * Do the recall on any layouts. To avoid trouble, always
2937 * come back up here after having slept.
2938 */
2939 TAILQ_INIT(&rlh);
2940 tryagain2:
2941 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2942 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2943 /*
2944 * Wait for outstanding I/O ops to be done.
2945 */
2946 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2947 (lyp->nfsly_lock.nfslock_lock &
2948 NFSV4LOCK_LOCK) != 0) {
2949 lyp->nfsly_lock.nfslock_lock |=
2950 NFSV4LOCK_WANTED;
2951 msleep(&lyp->nfsly_lock.nfslock_lock,
2952 NFSCLSTATEMUTEXPTR, PVFS, "nfslyp",
2953 5 * hz);
2954 if (NFSCL_FORCEDISM(mp))
2955 goto terminate;
2956 goto tryagain2;
2957 }
2958 /* Move the layout to the recall list. */
2959 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2960 nfsly_list);
2961 LIST_REMOVE(lyp, nfsly_hash);
2962 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2963
2964 /* Handle any layout commits. */
2965 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2966 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2967 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2968 NFSUNLOCKCLSTATE();
2969 NFSCL_DEBUG(3, "do layoutcommit\n");
2970 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2971 cred, p);
2972 NFSLOCKCLSTATE();
2973 goto tryagain2;
2974 }
2975 }
2976 }
2977
2978 /* Now, look for stale layouts. */
2979 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2980 while (lyp != NULL) {
2981 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2982 if ((lyp->nfsly_timestamp < NFSD_MONOSEC ||
2983 clp->nfsc_layoutcnt > clp->nfsc_layouthighwater) &&
2984 (lyp->nfsly_flags & (NFSLY_RECALL |
2985 NFSLY_RETONCLOSE)) == 0 &&
2986 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2987 lyp->nfsly_lock.nfslock_lock == 0) {
2988 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2989 clp->nfsc_layoutcnt);
2990 recallp = malloc(sizeof(*recallp),
2991 M_NFSLAYRECALL, M_NOWAIT);
2992 if (recallp == NULL)
2993 break;
2994 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2995 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2996 lyp->nfsly_stateid.seqid, 0, 0, NULL,
2997 recallp);
2998 }
2999 lyp = nlyp;
3000 }
3001
3002 /*
3003 * Free up any unreferenced device info structures.
3004 */
3005 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
3006 if (dip->nfsdi_layoutrefs == 0 &&
3007 dip->nfsdi_refcnt == 0) {
3008 NFSCL_DEBUG(4, "freeing devinfo\n");
3009 LIST_REMOVE(dip, nfsdi_list);
3010 nfscl_freedevinfo(dip);
3011 }
3012 }
3013 NFSUNLOCKCLSTATE();
3014
3015 /* Do layout return(s), as required. */
3016 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
3017 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
3018 NFSCL_DEBUG(4, "ret layout\n");
3019 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
3020 if ((lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
3021 NFSLOCKCLSTATE();
3022 lyp->nfsly_flags |= NFSLY_RETURNED;
3023 wakeup(lyp);
3024 NFSUNLOCKCLSTATE();
3025 } else
3026 nfscl_freelayout(lyp);
3027 }
3028
3029 /*
3030 * Delegreturn any delegations cleaned out or recalled.
3031 */
3032 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
3033 newnfs_copycred(&dp->nfsdl_cred, cred);
3034 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3035 TAILQ_REMOVE(&dh, dp, nfsdl_list);
3036 free(dp, M_NFSCLDELEG);
3037 }
3038
3039 SLIST_INIT(&lfh);
3040 /*
3041 * Call nfscl_cleanupkext() once per second to check for
3042 * open/lock owners where the process has exited.
3043 */
3044 mytime = NFSD_MONOSEC;
3045 if (prevsec != mytime) {
3046 prevsec = mytime;
3047 nfscl_cleanupkext(clp, &lfh);
3048 }
3049
3050 /*
3051 * Do a ReleaseLockOwner for all lock owners where the
3052 * associated process no longer exists, as found by
3053 * nfscl_cleanupkext().
3054 */
3055 newnfs_setroot(cred);
3056 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
3057 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
3058 nlp) {
3059 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
3060 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
3061 p);
3062 nfscl_freelockowner(lp, 0);
3063 }
3064 free(lfhp, M_TEMP);
3065 }
3066 SLIST_INIT(&lfh);
3067
3068 NFSLOCKCLSTATE();
3069 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
3070 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
3071 hz);
3072 terminate:
3073 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
3074 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
3075 NFSUNLOCKCLSTATE();
3076 NFSFREECRED(cred);
3077 wakeup((caddr_t)clp);
3078 return;
3079 }
3080 NFSUNLOCKCLSTATE();
3081 }
3082 }
3083
3084 /*
3085 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
3086 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
3087 */
3088 void
nfscl_initiate_recovery(struct nfsclclient * clp)3089 nfscl_initiate_recovery(struct nfsclclient *clp)
3090 {
3091
3092 if (clp == NULL)
3093 return;
3094 NFSLOCKCLSTATE();
3095 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
3096 NFSUNLOCKCLSTATE();
3097 wakeup((caddr_t)clp);
3098 }
3099
3100 /*
3101 * Dump out the state stuff for debugging.
3102 */
3103 void
nfscl_dumpstate(struct nfsmount * nmp,int openowner,int opens,int lockowner,int locks)3104 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
3105 int lockowner, int locks)
3106 {
3107 struct nfsclclient *clp;
3108 struct nfsclowner *owp;
3109 struct nfsclopen *op;
3110 struct nfscllockowner *lp;
3111 struct nfscllock *lop;
3112 struct nfscldeleg *dp;
3113
3114 clp = nmp->nm_clp;
3115 if (clp == NULL) {
3116 printf("nfscl dumpstate NULL clp\n");
3117 return;
3118 }
3119 NFSLOCKCLSTATE();
3120 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3121 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3122 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3123 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3124 owp->nfsow_owner[0], owp->nfsow_owner[1],
3125 owp->nfsow_owner[2], owp->nfsow_owner[3],
3126 owp->nfsow_seqid);
3127 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3128 if (opens)
3129 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3130 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3131 op->nfso_stateid.other[2], op->nfso_opencnt,
3132 op->nfso_fh[12]);
3133 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3134 if (lockowner)
3135 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3136 lp->nfsl_owner[0], lp->nfsl_owner[1],
3137 lp->nfsl_owner[2], lp->nfsl_owner[3],
3138 lp->nfsl_seqid,
3139 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3140 lp->nfsl_stateid.other[2]);
3141 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3142 if (locks)
3143 #ifdef __FreeBSD__
3144 printf("lck typ=%d fst=%ju end=%ju\n",
3145 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3146 (intmax_t)lop->nfslo_end);
3147 #else
3148 printf("lck typ=%d fst=%qd end=%qd\n",
3149 lop->nfslo_type, lop->nfslo_first,
3150 lop->nfslo_end);
3151 #endif
3152 }
3153 }
3154 }
3155 }
3156 }
3157 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3158 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3159 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3160 owp->nfsow_owner[0], owp->nfsow_owner[1],
3161 owp->nfsow_owner[2], owp->nfsow_owner[3],
3162 owp->nfsow_seqid);
3163 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3164 if (opens)
3165 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3166 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3167 op->nfso_stateid.other[2], op->nfso_opencnt,
3168 op->nfso_fh[12]);
3169 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3170 if (lockowner)
3171 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3172 lp->nfsl_owner[0], lp->nfsl_owner[1],
3173 lp->nfsl_owner[2], lp->nfsl_owner[3],
3174 lp->nfsl_seqid,
3175 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3176 lp->nfsl_stateid.other[2]);
3177 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3178 if (locks)
3179 #ifdef __FreeBSD__
3180 printf("lck typ=%d fst=%ju end=%ju\n",
3181 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3182 (intmax_t)lop->nfslo_end);
3183 #else
3184 printf("lck typ=%d fst=%qd end=%qd\n",
3185 lop->nfslo_type, lop->nfslo_first,
3186 lop->nfslo_end);
3187 #endif
3188 }
3189 }
3190 }
3191 }
3192 NFSUNLOCKCLSTATE();
3193 }
3194
3195 /*
3196 * Check for duplicate open owners and opens.
3197 * (Only used as a diagnostic aid.)
3198 */
3199 void
nfscl_dupopen(vnode_t vp,int dupopens)3200 nfscl_dupopen(vnode_t vp, int dupopens)
3201 {
3202 struct nfsclclient *clp;
3203 struct nfsclowner *owp, *owp2;
3204 struct nfsclopen *op, *op2;
3205 struct nfsfh *nfhp;
3206
3207 clp = VFSTONFS(vp->v_mount)->nm_clp;
3208 if (clp == NULL) {
3209 printf("nfscl dupopen NULL clp\n");
3210 return;
3211 }
3212 nfhp = VTONFS(vp)->n_fhp;
3213 NFSLOCKCLSTATE();
3214
3215 /*
3216 * First, search for duplicate owners.
3217 * These should never happen!
3218 */
3219 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3220 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3221 if (owp != owp2 &&
3222 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
3223 NFSV4CL_LOCKNAMELEN)) {
3224 NFSUNLOCKCLSTATE();
3225 printf("DUP OWNER\n");
3226 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3227 return;
3228 }
3229 }
3230 }
3231
3232 /*
3233 * Now, search for duplicate stateids.
3234 * These shouldn't happen, either.
3235 */
3236 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3237 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3238 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3239 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3240 if (op != op2 &&
3241 (op->nfso_stateid.other[0] != 0 ||
3242 op->nfso_stateid.other[1] != 0 ||
3243 op->nfso_stateid.other[2] != 0) &&
3244 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3245 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3246 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3247 NFSUNLOCKCLSTATE();
3248 printf("DUP STATEID\n");
3249 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3250 return;
3251 }
3252 }
3253 }
3254 }
3255 }
3256
3257 /*
3258 * Now search for duplicate opens.
3259 * Duplicate opens for the same owner
3260 * should never occur. Other duplicates are
3261 * possible and are checked for if "dupopens"
3262 * is true.
3263 */
3264 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3265 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3266 if (nfhp->nfh_len == op2->nfso_fhlen &&
3267 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3268 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3269 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3270 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3271 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3272 (!NFSBCMP(op->nfso_own->nfsow_owner,
3273 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3274 dupopens)) {
3275 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3276 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3277 NFSUNLOCKCLSTATE();
3278 printf("BADDUP OPEN\n");
3279 } else {
3280 NFSUNLOCKCLSTATE();
3281 printf("DUP OPEN\n");
3282 }
3283 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0,
3284 0);
3285 return;
3286 }
3287 }
3288 }
3289 }
3290 }
3291 }
3292 NFSUNLOCKCLSTATE();
3293 }
3294
3295 /*
3296 * During close, find an open that needs to be dereferenced and
3297 * dereference it. If there are no more opens for this file,
3298 * log a message to that effect.
3299 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3300 * on the file's vnode.
3301 * This is the safe way, since it is difficult to identify
3302 * which open the close is for and I/O can be performed after the
3303 * close(2) system call when a file is mmap'd.
3304 * If it returns 0 for success, there will be a referenced
3305 * clp returned via clpp.
3306 */
3307 int
nfscl_getclose(vnode_t vp,struct nfsclclient ** clpp)3308 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3309 {
3310 struct nfsclclient *clp;
3311 struct nfsclowner *owp;
3312 struct nfsclopen *op;
3313 struct nfscldeleg *dp;
3314 struct nfsfh *nfhp;
3315 int error, notdecr;
3316
3317 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3318 if (error)
3319 return (error);
3320 *clpp = clp;
3321
3322 nfhp = VTONFS(vp)->n_fhp;
3323 notdecr = 1;
3324 NFSLOCKCLSTATE();
3325 /*
3326 * First, look for one under a delegation that was locally issued
3327 * and just decrement the opencnt for it. Since all my Opens against
3328 * the server are DENY_NONE, I don't see a problem with hanging
3329 * onto them. (It is much easier to use one of the extant Opens
3330 * that I already have on the server when a Delegation is recalled
3331 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3332 */
3333 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3334 if (dp != NULL) {
3335 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3336 op = LIST_FIRST(&owp->nfsow_open);
3337 if (op != NULL) {
3338 /*
3339 * Since a delegation is for a file, there
3340 * should never be more than one open for
3341 * each openowner.
3342 */
3343 if (LIST_NEXT(op, nfso_list) != NULL)
3344 panic("nfscdeleg opens");
3345 if (notdecr && op->nfso_opencnt > 0) {
3346 notdecr = 0;
3347 op->nfso_opencnt--;
3348 break;
3349 }
3350 }
3351 }
3352 }
3353
3354 /* Now process the opens against the server. */
3355 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3356 nfso_hash) {
3357 if (op->nfso_fhlen == nfhp->nfh_len &&
3358 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3359 nfhp->nfh_len)) {
3360 /* Found an open, decrement cnt if possible */
3361 if (notdecr && op->nfso_opencnt > 0) {
3362 notdecr = 0;
3363 op->nfso_opencnt--;
3364 }
3365 /*
3366 * There are more opens, so just return.
3367 */
3368 if (op->nfso_opencnt > 0) {
3369 NFSUNLOCKCLSTATE();
3370 return (0);
3371 }
3372 }
3373 }
3374 NFSUNLOCKCLSTATE();
3375 if (notdecr)
3376 printf("nfscl: never fnd open\n");
3377 return (0);
3378 }
3379
3380 int
nfscl_doclose(vnode_t vp,struct nfsclclient ** clpp,NFSPROC_T * p)3381 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3382 {
3383 struct nfsclclient *clp;
3384 struct nfsmount *nmp;
3385 struct nfsclowner *owp, *nowp;
3386 struct nfsclopen *op, *nop;
3387 struct nfsclopenhead delayed;
3388 struct nfscldeleg *dp;
3389 struct nfsfh *nfhp;
3390 struct nfsclrecalllayout *recallp;
3391 struct nfscllayout *lyp;
3392 int error;
3393
3394 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3395 if (error)
3396 return (error);
3397 *clpp = clp;
3398
3399 nmp = VFSTONFS(vp->v_mount);
3400 nfhp = VTONFS(vp)->n_fhp;
3401 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3402 NFSLOCKCLSTATE();
3403 /*
3404 * First get rid of the local Open structures, which should be no
3405 * longer in use.
3406 */
3407 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3408 if (dp != NULL) {
3409 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3410 op = LIST_FIRST(&owp->nfsow_open);
3411 if (op != NULL) {
3412 KASSERT((op->nfso_opencnt == 0),
3413 ("nfscl: bad open cnt on deleg"));
3414 nfscl_freeopen(op, 1, true);
3415 }
3416 nfscl_freeopenowner(owp, 1);
3417 }
3418 }
3419
3420 /* Return any layouts marked return on close. */
3421 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp,
3422 &lyp);
3423
3424 /* Now process the opens against the server. */
3425 LIST_INIT(&delayed);
3426 lookformore:
3427 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3428 nfso_hash) {
3429 if (op->nfso_fhlen == nfhp->nfh_len &&
3430 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3431 nfhp->nfh_len)) {
3432 /* Found an open, close it. */
3433 #ifdef DIAGNOSTIC
3434 KASSERT((op->nfso_opencnt == 0),
3435 ("nfscl: bad open cnt on server (%d)",
3436 op->nfso_opencnt));
3437 #endif
3438 NFSUNLOCKCLSTATE();
3439 if (NFSHASNFSV4N(nmp))
3440 error = nfsrpc_doclose(nmp, op, p, false, true);
3441 else
3442 error = nfsrpc_doclose(nmp, op, p, true, true);
3443 NFSLOCKCLSTATE();
3444 if (error == NFSERR_DELAY) {
3445 nfscl_unlinkopen(op);
3446 op->nfso_own = NULL;
3447 LIST_INSERT_HEAD(&delayed, op, nfso_list);
3448 }
3449 goto lookformore;
3450 }
3451 }
3452 nfscl_clrelease(clp);
3453
3454 /* Now, wait for any layout that is returned upon close. */
3455 if (lyp != NULL) {
3456 while ((lyp->nfsly_flags & NFSLY_RETURNED) == 0) {
3457 if (NFSCL_FORCEDISM(nmp->nm_mountp)) {
3458 lyp = NULL;
3459 break;
3460 }
3461 msleep(lyp, NFSCLSTATEMUTEXPTR, PZERO, "nfslroc", hz);
3462 }
3463 if (lyp != NULL)
3464 nfscl_freelayout(lyp);
3465 }
3466
3467 NFSUNLOCKCLSTATE();
3468 /*
3469 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3470 * used by the function, but calling free() with a NULL pointer is ok.
3471 */
3472 free(recallp, M_NFSLAYRECALL);
3473
3474 /* Now, loop retrying the delayed closes. */
3475 LIST_FOREACH_SAFE(op, &delayed, nfso_list, nop) {
3476 nfsrpc_doclose(nmp, op, p, true, false);
3477 LIST_REMOVE(op, nfso_list);
3478 nfscl_freeopen(op, 0, false);
3479 }
3480 return (0);
3481 }
3482
3483 /*
3484 * Return all delegations on this client.
3485 * (Must be called with client sleep lock.)
3486 */
3487 static void
nfscl_delegreturnall(struct nfsclclient * clp,NFSPROC_T * p,struct nfscldeleghead * dhp)3488 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p,
3489 struct nfscldeleghead *dhp)
3490 {
3491 struct nfscldeleg *dp, *ndp;
3492 struct ucred *cred;
3493
3494 cred = newnfs_getcred();
3495 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3496 nfscl_cleandeleg(dp);
3497 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3498 if (dhp != NULL) {
3499 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3500 TAILQ_INSERT_HEAD(dhp, dp, nfsdl_list);
3501 } else
3502 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
3503 }
3504 NFSFREECRED(cred);
3505 }
3506
3507 /*
3508 * Return any delegation for this vp.
3509 */
3510 void
nfscl_delegreturnvp(struct vnode * vp,bool retdeleg,NFSPROC_T * p)3511 nfscl_delegreturnvp(struct vnode *vp, bool retdeleg, NFSPROC_T *p)
3512 {
3513 struct nfsclclient *clp;
3514 struct nfscldeleg *dp;
3515 struct ucred *cred;
3516 struct nfsnode *np;
3517 struct nfsmount *nmp;
3518
3519 nmp = VFSTONFS(vp->v_mount);
3520 NFSLOCKMNT(nmp);
3521 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
3522 NFSUNLOCKMNT(nmp);
3523 return;
3524 }
3525 NFSUNLOCKMNT(nmp);
3526 np = VTONFS(vp);
3527 cred = newnfs_getcred();
3528 dp = NULL;
3529 NFSLOCKCLSTATE();
3530 clp = nmp->nm_clp;
3531 if (clp != NULL)
3532 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3533 np->n_fhp->nfh_len);
3534 if (dp != NULL &&
3535 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0) {
3536 nfscl_cleandeleg(dp);
3537 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3538 NFSUNLOCKCLSTATE();
3539 if (retdeleg) {
3540 newnfs_copycred(&dp->nfsdl_cred, cred);
3541 nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3542 }
3543 free(dp, M_NFSCLDELEG);
3544 } else
3545 NFSUNLOCKCLSTATE();
3546 NFSFREECRED(cred);
3547 }
3548
3549 /*
3550 * Do a callback RPC.
3551 */
3552 void
nfscl_docb(struct nfsrv_descript * nd,NFSPROC_T * p)3553 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3554 {
3555 int clist, gotseq_ok, i, j, k, op, rcalls;
3556 u_int32_t *tl;
3557 struct nfsclclient *clp;
3558 struct nfscldeleg *dp = NULL;
3559 int numops, taglen = -1, error = 0, trunc __unused;
3560 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3561 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3562 vnode_t vp = NULL;
3563 struct nfsnode *np;
3564 struct vattr va;
3565 struct nfsfh *nfhp;
3566 mount_t mp;
3567 nfsattrbit_t attrbits, rattrbits;
3568 nfsv4stateid_t stateid;
3569 uint32_t seqid, slotid = 0, highslot, cachethis __unused;
3570 uint8_t sessionid[NFSX_V4SESSIONID];
3571 struct mbuf *rep;
3572 struct nfscllayout *lyp;
3573 uint64_t filesid[2], len, off;
3574 int changed, gotone, laytype, recalltype;
3575 uint32_t iomode;
3576 struct nfsclrecalllayout *recallp = NULL;
3577 struct nfsclsession *tsep;
3578
3579 gotseq_ok = 0;
3580 nfsrvd_rephead(nd);
3581 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3582 taglen = fxdr_unsigned(int, *tl);
3583 if (taglen < 0 || taglen > NFSV4_OPAQUELIMIT) {
3584 error = EBADRPC;
3585 taglen = -1;
3586 goto nfsmout;
3587 }
3588 if (taglen <= NFSV4_SMALLSTR)
3589 tagstr = tag;
3590 else
3591 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3592 error = nfsrv_mtostr(nd, tagstr, taglen);
3593 if (error) {
3594 if (taglen > NFSV4_SMALLSTR)
3595 free(tagstr, M_TEMP);
3596 taglen = -1;
3597 goto nfsmout;
3598 }
3599 (void) nfsm_strtom(nd, tag, taglen);
3600 if (taglen > NFSV4_SMALLSTR) {
3601 free(tagstr, M_TEMP);
3602 }
3603 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3604 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3605 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3606 if (minorvers != NFSV4_MINORVERSION &&
3607 minorvers != NFSV41_MINORVERSION &&
3608 minorvers != NFSV42_MINORVERSION)
3609 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3610 cbident = fxdr_unsigned(u_int32_t, *tl++);
3611 if (nd->nd_repstat)
3612 numops = 0;
3613 else
3614 numops = fxdr_unsigned(int, *tl);
3615 /*
3616 * Loop around doing the sub ops.
3617 */
3618 for (i = 0; i < numops; i++) {
3619 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3620 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3621 *repp++ = *tl;
3622 op = fxdr_unsigned(int, *tl);
3623 nd->nd_procnum = op;
3624 if (i == 0 && op != NFSV4OP_CBSEQUENCE && minorvers !=
3625 NFSV4_MINORVERSION) {
3626 nd->nd_repstat = NFSERR_OPNOTINSESS;
3627 *repp = nfscl_errmap(nd, minorvers);
3628 retops++;
3629 break;
3630 }
3631 if (op < NFSV4OP_CBGETATTR ||
3632 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3633 (op > NFSV4OP_CBNOTIFYDEVID &&
3634 minorvers == NFSV41_MINORVERSION) ||
3635 (op > NFSV4OP_CBOFFLOAD &&
3636 minorvers == NFSV42_MINORVERSION)) {
3637 nd->nd_repstat = NFSERR_OPILLEGAL;
3638 *repp = nfscl_errmap(nd, minorvers);
3639 retops++;
3640 break;
3641 }
3642 if (op < NFSV42_CBNOPS)
3643 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3644 switch (op) {
3645 case NFSV4OP_CBGETATTR:
3646 NFSCL_DEBUG(4, "cbgetattr\n");
3647 mp = NULL;
3648 vp = NULL;
3649 error = nfsm_getfh(nd, &nfhp);
3650 if (!error)
3651 error = nfsrv_getattrbits(nd, &attrbits,
3652 NULL, NULL);
3653 if (!error) {
3654 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3655 &clp);
3656 if (mp == NULL)
3657 error = NFSERR_SERVERFAULT;
3658 }
3659 if (!error) {
3660 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3661 nfhp->nfh_len, p, &np);
3662 if (!error)
3663 vp = NFSTOV(np);
3664 }
3665 if (!error) {
3666 NFSZERO_ATTRBIT(&rattrbits);
3667 NFSLOCKCLSTATE();
3668 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3669 nfhp->nfh_len);
3670 if (dp != NULL) {
3671 if (NFSISSET_ATTRBIT(&attrbits,
3672 NFSATTRBIT_SIZE)) {
3673 if (vp != NULL)
3674 va.va_size = np->n_size;
3675 else
3676 va.va_size =
3677 dp->nfsdl_size;
3678 NFSSETBIT_ATTRBIT(&rattrbits,
3679 NFSATTRBIT_SIZE);
3680 }
3681 if (NFSISSET_ATTRBIT(&attrbits,
3682 NFSATTRBIT_CHANGE)) {
3683 va.va_filerev =
3684 dp->nfsdl_change;
3685 if (vp == NULL ||
3686 (np->n_flag & NDELEGMOD))
3687 va.va_filerev++;
3688 NFSSETBIT_ATTRBIT(&rattrbits,
3689 NFSATTRBIT_CHANGE);
3690 }
3691 } else
3692 error = NFSERR_SERVERFAULT;
3693 NFSUNLOCKCLSTATE();
3694 }
3695 if (vp != NULL)
3696 vrele(vp);
3697 if (mp != NULL)
3698 vfs_unbusy(mp);
3699 if (nfhp != NULL)
3700 free(nfhp, M_NFSFH);
3701 if (!error)
3702 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3703 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3704 (uint64_t)0, NULL);
3705 break;
3706 case NFSV4OP_CBRECALL:
3707 NFSCL_DEBUG(4, "cbrecall\n");
3708 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3709 NFSX_UNSIGNED);
3710 stateid.seqid = *tl++;
3711 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3712 NFSX_STATEIDOTHER);
3713 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3714 trunc = fxdr_unsigned(int, *tl);
3715 error = nfsm_getfh(nd, &nfhp);
3716 if (!error) {
3717 NFSLOCKCLSTATE();
3718 if (minorvers == NFSV4_MINORVERSION)
3719 clp = nfscl_getclnt(cbident);
3720 else
3721 clp = nfscl_getclntsess(sessionid);
3722 if (clp != NULL)
3723 nfscl_startdelegrecall(clp, nfhp);
3724 else
3725 error = NFSERR_SERVERFAULT;
3726 NFSUNLOCKCLSTATE();
3727 }
3728 if (nfhp != NULL)
3729 free(nfhp, M_NFSFH);
3730 break;
3731 case NFSV4OP_CBLAYOUTRECALL:
3732 NFSCL_DEBUG(4, "cblayrec\n");
3733 nfhp = NULL;
3734 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3735 laytype = fxdr_unsigned(int, *tl++);
3736 iomode = fxdr_unsigned(uint32_t, *tl++);
3737 if (newnfs_true == *tl++)
3738 changed = 1;
3739 else
3740 changed = 0;
3741 recalltype = fxdr_unsigned(int, *tl);
3742 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3743 laytype, iomode, changed, recalltype);
3744 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3745 M_WAITOK);
3746 if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3747 laytype != NFSLAYOUT_FLEXFILE)
3748 error = NFSERR_NOMATCHLAYOUT;
3749 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3750 error = nfsm_getfh(nd, &nfhp);
3751 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3752 if (error != 0)
3753 goto nfsmout;
3754 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3755 NFSX_STATEID);
3756 off = fxdr_hyper(tl); tl += 2;
3757 len = fxdr_hyper(tl); tl += 2;
3758 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3759 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3760 if (minorvers == NFSV4_MINORVERSION)
3761 error = NFSERR_NOTSUPP;
3762 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3763 (uintmax_t)off, (uintmax_t)len,
3764 stateid.seqid, error);
3765 if (error == 0) {
3766 NFSLOCKCLSTATE();
3767 clp = nfscl_getclntsess(sessionid);
3768 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3769 if (clp != NULL) {
3770 lyp = nfscl_findlayout(clp,
3771 nfhp->nfh_fh,
3772 nfhp->nfh_len);
3773 NFSCL_DEBUG(4, "cblyp=%p\n",
3774 lyp);
3775 if (lyp != NULL &&
3776 (lyp->nfsly_flags &
3777 (NFSLY_FILES |
3778 NFSLY_FLEXFILE)) != 0 &&
3779 !NFSBCMP(stateid.other,
3780 lyp->nfsly_stateid.other,
3781 NFSX_STATEIDOTHER)) {
3782 error =
3783 nfscl_layoutrecall(
3784 recalltype,
3785 lyp, iomode, off,
3786 len, stateid.seqid,
3787 0, 0, NULL,
3788 recallp);
3789 if (error == 0 &&
3790 stateid.seqid >
3791 lyp->nfsly_stateid.seqid)
3792 lyp->nfsly_stateid.seqid =
3793 stateid.seqid;
3794 recallp = NULL;
3795 wakeup(clp);
3796 NFSCL_DEBUG(4,
3797 "aft layrcal=%d "
3798 "layseqid=%d\n",
3799 error,
3800 lyp->nfsly_stateid.seqid);
3801 } else
3802 error =
3803 NFSERR_NOMATCHLAYOUT;
3804 } else
3805 error = NFSERR_NOMATCHLAYOUT;
3806 NFSUNLOCKCLSTATE();
3807 }
3808 free(nfhp, M_NFSFH);
3809 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3810 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3811 filesid[0] = fxdr_hyper(tl); tl += 2;
3812 filesid[1] = fxdr_hyper(tl); tl += 2;
3813 gotone = 0;
3814 NFSLOCKCLSTATE();
3815 clp = nfscl_getclntsess(sessionid);
3816 if (clp != NULL) {
3817 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3818 nfsly_list) {
3819 if (lyp->nfsly_filesid[0] ==
3820 filesid[0] &&
3821 lyp->nfsly_filesid[1] ==
3822 filesid[1]) {
3823 error =
3824 nfscl_layoutrecall(
3825 recalltype,
3826 lyp, iomode, 0,
3827 UINT64_MAX,
3828 lyp->nfsly_stateid.seqid,
3829 0, 0, NULL,
3830 recallp);
3831 recallp = NULL;
3832 gotone = 1;
3833 }
3834 }
3835 if (gotone != 0)
3836 wakeup(clp);
3837 else
3838 error = NFSERR_NOMATCHLAYOUT;
3839 } else
3840 error = NFSERR_NOMATCHLAYOUT;
3841 NFSUNLOCKCLSTATE();
3842 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3843 gotone = 0;
3844 NFSLOCKCLSTATE();
3845 clp = nfscl_getclntsess(sessionid);
3846 if (clp != NULL) {
3847 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3848 nfsly_list) {
3849 error = nfscl_layoutrecall(
3850 recalltype, lyp, iomode, 0,
3851 UINT64_MAX,
3852 lyp->nfsly_stateid.seqid,
3853 0, 0, NULL, recallp);
3854 recallp = NULL;
3855 gotone = 1;
3856 }
3857 if (gotone != 0)
3858 wakeup(clp);
3859 else
3860 error = NFSERR_NOMATCHLAYOUT;
3861 } else
3862 error = NFSERR_NOMATCHLAYOUT;
3863 NFSUNLOCKCLSTATE();
3864 } else
3865 error = NFSERR_NOMATCHLAYOUT;
3866 if (recallp != NULL) {
3867 free(recallp, M_NFSLAYRECALL);
3868 recallp = NULL;
3869 }
3870 break;
3871 case NFSV4OP_CBSEQUENCE:
3872 if (i != 0) {
3873 error = NFSERR_SEQUENCEPOS;
3874 break;
3875 }
3876 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3877 5 * NFSX_UNSIGNED);
3878 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3879 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3880 seqid = fxdr_unsigned(uint32_t, *tl++);
3881 slotid = fxdr_unsigned(uint32_t, *tl++);
3882 highslot = fxdr_unsigned(uint32_t, *tl++);
3883 cachethis = *tl++;
3884 /* Throw away the referring call stuff. */
3885 clist = fxdr_unsigned(int, *tl);
3886 for (j = 0; j < clist; j++) {
3887 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3888 NFSX_UNSIGNED);
3889 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3890 rcalls = fxdr_unsigned(int, *tl);
3891 for (k = 0; k < rcalls; k++) {
3892 NFSM_DISSECT(tl, uint32_t *,
3893 2 * NFSX_UNSIGNED);
3894 }
3895 }
3896 NFSLOCKCLSTATE();
3897 clp = nfscl_getclntsess(sessionid);
3898 if (clp == NULL)
3899 error = NFSERR_SERVERFAULT;
3900 if (error == 0) {
3901 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3902 error = nfsv4_seqsession(seqid, slotid,
3903 highslot, tsep->nfsess_cbslots, &rep,
3904 tsep->nfsess_backslots);
3905 }
3906 NFSUNLOCKCLSTATE();
3907 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3908 gotseq_ok = 1;
3909 if (rep != NULL) {
3910 /*
3911 * Handle a reply for a retried
3912 * callback. The reply will be
3913 * re-inserted in the session cache
3914 * by the nfsv4_seqsess_cacherep() call
3915 * after out:
3916 */
3917 KASSERT(error == NFSERR_REPLYFROMCACHE,
3918 ("cbsequence: non-NULL rep"));
3919 NFSCL_DEBUG(4, "Got cbretry\n");
3920 m_freem(nd->nd_mreq);
3921 nd->nd_mreq = rep;
3922 rep = NULL;
3923 goto out;
3924 }
3925 NFSM_BUILD(tl, uint32_t *,
3926 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3927 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3928 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3929 *tl++ = txdr_unsigned(seqid);
3930 *tl++ = txdr_unsigned(slotid);
3931 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3932 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3933 }
3934 break;
3935 case NFSV4OP_CBRECALLSLOT:
3936 NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3937 highslot = fxdr_unsigned(uint32_t, *tl);
3938 NFSLOCKCLSTATE();
3939 clp = nfscl_getclntsess(sessionid);
3940 if (clp == NULL)
3941 error = NFSERR_SERVERFAULT;
3942 if (error == 0) {
3943 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3944 mtx_lock(&tsep->nfsess_mtx);
3945 if ((highslot + 1) < tsep->nfsess_foreslots) {
3946 tsep->nfsess_foreslots = (highslot + 1);
3947 nfs_resetslots(tsep);
3948 }
3949 mtx_unlock(&tsep->nfsess_mtx);
3950 }
3951 NFSUNLOCKCLSTATE();
3952 break;
3953 case NFSV4OP_CBRECALLANY:
3954 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
3955 i = fxdr_unsigned(int, *tl++);
3956 j = fxdr_unsigned(int, *tl);
3957 if (i < 0 || j != 1)
3958 error = NFSERR_BADXDR;
3959 if (error == 0) {
3960 NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
3961 j = fxdr_unsigned(int, *tl);
3962 if (i < 100)
3963 i = 100;
3964 else if (i > 100000)
3965 i = 100000;
3966 NFSLOCKCLSTATE();
3967 clp = nfscl_getclntsess(sessionid);
3968 if (clp == NULL)
3969 error = NFSERR_SERVERFAULT;
3970 if (((j & NFSRCA4_RDATA_DLG) != 0 ||
3971 (j & NFSRCA4_WDATA_DLG) != 0) &&
3972 error == 0 && i <
3973 clp->nfsc_deleghighwater)
3974 clp->nfsc_deleghighwater = i;
3975 if (error == 0 &&
3976 ((!NFSHASFLEXFILE(clp->nfsc_nmp) &&
3977 (j & NFSRCA4_FILE_LAYOUT) != 0 &&
3978 i < clp->nfsc_layouthighwater) ||
3979 (NFSHASFLEXFILE(clp->nfsc_nmp) &&
3980 (j & (NFSRCA4_FF_LAYOUT_READ |
3981 NFSRCA4_FF_LAYOUT_RW)) != 0 &&
3982 i < clp->nfsc_layouthighwater)))
3983 clp->nfsc_layouthighwater = i;
3984 NFSUNLOCKCLSTATE();
3985 }
3986 break;
3987 case NFSV4OP_CBNOTIFY:
3988 case NFSV4OP_CBRECALLOBJAVAIL:
3989 case NFSV4OP_CBNOTIFYLOCK:
3990 /*
3991 * These callbacks are not necessarily optional,
3992 * so I think it is better to reply NFS_OK than
3993 * NFSERR_NOTSUPP.
3994 * All provide information for which the FreeBSD client
3995 * does not currently have a use.
3996 * I am not sure if any of these could be generated
3997 * by a NFSv4.1/4.2 server for this client?
3998 */
3999 error = 0;
4000 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4001 break;
4002 case NFSV4OP_CBPUSHDELEG:
4003 error = NFSERR_REJECTDELEG;
4004 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4005 break;
4006 default:
4007 if (i == 0 && minorvers != NFSV4_MINORVERSION)
4008 error = NFSERR_OPNOTINSESS;
4009 else {
4010 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
4011 error = NFSERR_NOTSUPP;
4012 }
4013 break;
4014 }
4015 if (error) {
4016 if (error == EBADRPC || error == NFSERR_BADXDR) {
4017 nd->nd_repstat = NFSERR_BADXDR;
4018 } else {
4019 nd->nd_repstat = error;
4020 }
4021 error = 0;
4022 }
4023 retops++;
4024 if (nd->nd_repstat) {
4025 *repp = nfscl_errmap(nd, minorvers);
4026 break;
4027 } else
4028 *repp = 0; /* NFS4_OK */
4029 }
4030 nfsmout:
4031 if (recallp != NULL)
4032 free(recallp, M_NFSLAYRECALL);
4033 if (error) {
4034 if (error == EBADRPC || error == NFSERR_BADXDR)
4035 nd->nd_repstat = NFSERR_BADXDR;
4036 else
4037 printf("nfsv4 comperr1=%d\n", error);
4038 }
4039 if (taglen == -1) {
4040 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
4041 *tl++ = 0;
4042 *tl = 0;
4043 } else {
4044 *retopsp = txdr_unsigned(retops);
4045 }
4046 *nd->nd_errp = nfscl_errmap(nd, minorvers);
4047 out:
4048 if (gotseq_ok != 0) {
4049 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
4050 NFSLOCKCLSTATE();
4051 clp = nfscl_getclntsess(sessionid);
4052 if (clp != NULL) {
4053 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4054 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
4055 NFSERR_OK, &rep);
4056 NFSUNLOCKCLSTATE();
4057 } else {
4058 NFSUNLOCKCLSTATE();
4059 m_freem(rep);
4060 }
4061 }
4062 }
4063
4064 /*
4065 * Generate the next cbident value. Basically just increment a static value
4066 * and then check that it isn't already in the list, if it has wrapped around.
4067 */
4068 static u_int32_t
nfscl_nextcbident(void)4069 nfscl_nextcbident(void)
4070 {
4071 struct nfsclclient *clp;
4072 int matched;
4073 static u_int32_t nextcbident = 0;
4074 static int haswrapped = 0;
4075
4076 nextcbident++;
4077 if (nextcbident == 0)
4078 haswrapped = 1;
4079 if (haswrapped) {
4080 /*
4081 * Search the clientid list for one already using this cbident.
4082 */
4083 do {
4084 matched = 0;
4085 NFSLOCKCLSTATE();
4086 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4087 if (clp->nfsc_cbident == nextcbident) {
4088 matched = 1;
4089 break;
4090 }
4091 }
4092 NFSUNLOCKCLSTATE();
4093 if (matched == 1)
4094 nextcbident++;
4095 } while (matched);
4096 }
4097 return (nextcbident);
4098 }
4099
4100 /*
4101 * Get the mount point related to a given cbident or session and busy it.
4102 */
4103 static mount_t
nfscl_getmnt(int minorvers,uint8_t * sessionid,u_int32_t cbident,struct nfsclclient ** clpp)4104 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
4105 struct nfsclclient **clpp)
4106 {
4107 struct nfsclclient *clp;
4108 mount_t mp;
4109 int error;
4110 struct nfsclsession *tsep;
4111
4112 *clpp = NULL;
4113 NFSLOCKCLSTATE();
4114 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4115 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4116 if (minorvers == NFSV4_MINORVERSION) {
4117 if (clp->nfsc_cbident == cbident)
4118 break;
4119 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4120 NFSX_V4SESSIONID))
4121 break;
4122 }
4123 if (clp == NULL) {
4124 NFSUNLOCKCLSTATE();
4125 return (NULL);
4126 }
4127 mp = clp->nfsc_nmp->nm_mountp;
4128 vfs_ref(mp);
4129 NFSUNLOCKCLSTATE();
4130 error = vfs_busy(mp, 0);
4131 vfs_rel(mp);
4132 if (error != 0)
4133 return (NULL);
4134 *clpp = clp;
4135 return (mp);
4136 }
4137
4138 /*
4139 * Get the clientid pointer related to a given cbident.
4140 */
4141 static struct nfsclclient *
nfscl_getclnt(u_int32_t cbident)4142 nfscl_getclnt(u_int32_t cbident)
4143 {
4144 struct nfsclclient *clp;
4145
4146 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
4147 if (clp->nfsc_cbident == cbident)
4148 break;
4149 return (clp);
4150 }
4151
4152 /*
4153 * Get the clientid pointer related to a given sessionid.
4154 */
4155 static struct nfsclclient *
nfscl_getclntsess(uint8_t * sessionid)4156 nfscl_getclntsess(uint8_t *sessionid)
4157 {
4158 struct nfsclclient *clp;
4159 struct nfsclsession *tsep;
4160
4161 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4162 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4163 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4164 NFSX_V4SESSIONID))
4165 break;
4166 }
4167 return (clp);
4168 }
4169
4170 /*
4171 * Search for a lock conflict locally on the client. A conflict occurs if
4172 * - not same owner and overlapping byte range and at least one of them is
4173 * a write lock or this is an unlock.
4174 */
4175 static int
nfscl_localconflict(struct nfsclclient * clp,u_int8_t * fhp,int fhlen,struct nfscllock * nlop,u_int8_t * own,struct nfscldeleg * dp,struct nfscllock ** lopp)4176 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
4177 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
4178 struct nfscllock **lopp)
4179 {
4180 struct nfsclopen *op;
4181 int ret;
4182
4183 if (dp != NULL) {
4184 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
4185 if (ret)
4186 return (ret);
4187 }
4188 LIST_FOREACH(op, NFSCLOPENHASH(clp, fhp, fhlen), nfso_hash) {
4189 if (op->nfso_fhlen == fhlen &&
4190 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
4191 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
4192 own, lopp);
4193 if (ret)
4194 return (ret);
4195 }
4196 }
4197 return (0);
4198 }
4199
4200 static int
nfscl_checkconflict(struct nfscllockownerhead * lhp,struct nfscllock * nlop,u_int8_t * own,struct nfscllock ** lopp)4201 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
4202 u_int8_t *own, struct nfscllock **lopp)
4203 {
4204 struct nfscllockowner *lp;
4205 struct nfscllock *lop;
4206
4207 LIST_FOREACH(lp, lhp, nfsl_list) {
4208 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
4209 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
4210 if (lop->nfslo_first >= nlop->nfslo_end)
4211 break;
4212 if (lop->nfslo_end <= nlop->nfslo_first)
4213 continue;
4214 if (lop->nfslo_type == F_WRLCK ||
4215 nlop->nfslo_type == F_WRLCK ||
4216 nlop->nfslo_type == F_UNLCK) {
4217 if (lopp != NULL)
4218 *lopp = lop;
4219 return (NFSERR_DENIED);
4220 }
4221 }
4222 }
4223 }
4224 return (0);
4225 }
4226
4227 /*
4228 * Check for a local conflicting lock.
4229 */
4230 int
nfscl_lockt(vnode_t vp,struct nfsclclient * clp,u_int64_t off,u_int64_t len,struct flock * fl,NFSPROC_T * p,void * id,int flags)4231 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
4232 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
4233 {
4234 struct nfscllock *lop, nlck;
4235 struct nfscldeleg *dp;
4236 struct nfsnode *np;
4237 u_int8_t own[NFSV4CL_LOCKNAMELEN];
4238 int error;
4239
4240 nlck.nfslo_type = fl->l_type;
4241 nlck.nfslo_first = off;
4242 if (len == NFS64BITSSET) {
4243 nlck.nfslo_end = NFS64BITSSET;
4244 } else {
4245 nlck.nfslo_end = off + len;
4246 if (nlck.nfslo_end <= nlck.nfslo_first)
4247 return (NFSERR_INVAL);
4248 }
4249 np = VTONFS(vp);
4250 nfscl_filllockowner(id, own, flags);
4251 NFSLOCKCLSTATE();
4252 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4253 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
4254 &nlck, own, dp, &lop);
4255 if (error != 0) {
4256 fl->l_whence = SEEK_SET;
4257 fl->l_start = lop->nfslo_first;
4258 if (lop->nfslo_end == NFS64BITSSET)
4259 fl->l_len = 0;
4260 else
4261 fl->l_len = lop->nfslo_end - lop->nfslo_first;
4262 fl->l_pid = (pid_t)0;
4263 fl->l_type = lop->nfslo_type;
4264 error = -1; /* no RPC required */
4265 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
4266 fl->l_type == F_RDLCK)) {
4267 /*
4268 * The delegation ensures that there isn't a conflicting
4269 * lock on the server, so return -1 to indicate an RPC
4270 * isn't required.
4271 */
4272 fl->l_type = F_UNLCK;
4273 error = -1;
4274 }
4275 NFSUNLOCKCLSTATE();
4276 return (error);
4277 }
4278
4279 /*
4280 * Handle Recall of a delegation.
4281 * The clp must be exclusive locked when this is called.
4282 */
4283 static int
nfscl_recalldeleg(struct nfsclclient * clp,struct nfsmount * nmp,struct nfscldeleg * dp,vnode_t vp,struct ucred * cred,NFSPROC_T * p,int called_from_renewthread,vnode_t * vpp)4284 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
4285 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
4286 int called_from_renewthread, vnode_t *vpp)
4287 {
4288 struct nfsclowner *owp, *lowp, *nowp;
4289 struct nfsclopen *op, *lop;
4290 struct nfscllockowner *lp;
4291 struct nfscllock *lckp;
4292 struct nfsnode *np;
4293 int error = 0, ret;
4294
4295 if (vp == NULL) {
4296 KASSERT(vpp != NULL, ("nfscl_recalldeleg: vpp NULL"));
4297 *vpp = NULL;
4298 /*
4299 * First, get a vnode for the file. This is needed to do RPCs.
4300 */
4301 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
4302 dp->nfsdl_fhlen, p, &np);
4303 if (ret) {
4304 /*
4305 * File isn't open, so nothing to move over to the
4306 * server.
4307 */
4308 return (0);
4309 }
4310 vp = NFSTOV(np);
4311 *vpp = vp;
4312 } else {
4313 np = VTONFS(vp);
4314 }
4315 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
4316
4317 /*
4318 * Ok, if it's a write delegation, flush data to the server, so
4319 * that close/open consistency is retained.
4320 */
4321 ret = 0;
4322 NFSLOCKNODE(np);
4323 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
4324 np->n_flag |= NDELEGRECALL;
4325 NFSUNLOCKNODE(np);
4326 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
4327 NFSLOCKNODE(np);
4328 np->n_flag &= ~NDELEGRECALL;
4329 }
4330 NFSINVALATTRCACHE(np);
4331 NFSUNLOCKNODE(np);
4332 if (ret == EIO && called_from_renewthread != 0) {
4333 /*
4334 * If the flush failed with EIO for the renew thread,
4335 * return now, so that the dirty buffer will be flushed
4336 * later.
4337 */
4338 return (ret);
4339 }
4340
4341 /*
4342 * Now, for each openowner with opens issued locally, move them
4343 * over to state against the server.
4344 */
4345 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
4346 lop = LIST_FIRST(&lowp->nfsow_open);
4347 if (lop != NULL) {
4348 if (LIST_NEXT(lop, nfso_list) != NULL)
4349 panic("nfsdlg mult opens");
4350 /*
4351 * Look for the same openowner against the server.
4352 */
4353 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
4354 if (!NFSBCMP(lowp->nfsow_owner,
4355 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
4356 newnfs_copycred(&dp->nfsdl_cred, cred);
4357 ret = nfscl_moveopen(vp, clp, nmp, lop,
4358 owp, dp, cred, p);
4359 if (ret == NFSERR_STALECLIENTID ||
4360 ret == NFSERR_STALEDONTRECOVER ||
4361 ret == NFSERR_BADSESSION)
4362 return (ret);
4363 if (ret) {
4364 nfscl_freeopen(lop, 1, true);
4365 if (!error)
4366 error = ret;
4367 }
4368 break;
4369 }
4370 }
4371
4372 /*
4373 * If no openowner found, create one and get an open
4374 * for it.
4375 */
4376 if (owp == NULL) {
4377 nowp = malloc(
4378 sizeof (struct nfsclowner), M_NFSCLOWNER,
4379 M_WAITOK);
4380 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
4381 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
4382 dp->nfsdl_fhlen, NULL, NULL);
4383 newnfs_copycred(&dp->nfsdl_cred, cred);
4384 ret = nfscl_moveopen(vp, clp, nmp, lop,
4385 owp, dp, cred, p);
4386 if (ret) {
4387 nfscl_freeopenowner(owp, 0);
4388 if (ret == NFSERR_STALECLIENTID ||
4389 ret == NFSERR_STALEDONTRECOVER ||
4390 ret == NFSERR_BADSESSION)
4391 return (ret);
4392 if (ret) {
4393 nfscl_freeopen(lop, 1, true);
4394 if (!error)
4395 error = ret;
4396 }
4397 }
4398 }
4399 }
4400 }
4401
4402 /*
4403 * Now, get byte range locks for any locks done locally.
4404 */
4405 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4406 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4407 newnfs_copycred(&dp->nfsdl_cred, cred);
4408 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4409 if (ret == NFSERR_STALESTATEID ||
4410 ret == NFSERR_STALEDONTRECOVER ||
4411 ret == NFSERR_STALECLIENTID ||
4412 ret == NFSERR_BADSESSION)
4413 return (ret);
4414 if (ret && !error)
4415 error = ret;
4416 }
4417 }
4418 return (error);
4419 }
4420
4421 /*
4422 * Move a locally issued open over to an owner on the state list.
4423 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4424 * returns with it unlocked.
4425 */
4426 static int
nfscl_moveopen(vnode_t vp,struct nfsclclient * clp,struct nfsmount * nmp,struct nfsclopen * lop,struct nfsclowner * owp,struct nfscldeleg * dp,struct ucred * cred,NFSPROC_T * p)4427 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4428 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4429 struct ucred *cred, NFSPROC_T *p)
4430 {
4431 struct nfsclopen *op, *nop;
4432 struct nfscldeleg *ndp;
4433 struct nfsnode *np;
4434 int error = 0, newone;
4435
4436 /*
4437 * First, look for an appropriate open, If found, just increment the
4438 * opencnt in it.
4439 */
4440 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4441 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4442 op->nfso_fhlen == lop->nfso_fhlen &&
4443 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4444 op->nfso_opencnt += lop->nfso_opencnt;
4445 nfscl_freeopen(lop, 1, true);
4446 return (0);
4447 }
4448 }
4449
4450 /* No appropriate open, so we have to do one against the server. */
4451 np = VTONFS(vp);
4452 nop = malloc(sizeof (struct nfsclopen) +
4453 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4454 nop->nfso_hash.le_prev = NULL;
4455 newone = 0;
4456 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4457 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4458 ndp = dp;
4459 if (NFSHASNFSV4N(nmp))
4460 error = nfscl_tryopen(nmp, vp, lop->nfso_fh, lop->nfso_fhlen,
4461 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4462 NULL, 0, &ndp, 0, 0, cred, p);
4463 else
4464 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4465 np->n_v4->n4_fhlen, lop->nfso_fh, lop->nfso_fhlen,
4466 lop->nfso_mode, op, NFS4NODENAME(np->n_v4),
4467 np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4468 if (error) {
4469 if (newone)
4470 nfscl_freeopen(op, 0, true);
4471 } else {
4472 op->nfso_mode |= lop->nfso_mode;
4473 op->nfso_opencnt += lop->nfso_opencnt;
4474 nfscl_freeopen(lop, 1, true);
4475 }
4476 if (nop != NULL)
4477 free(nop, M_NFSCLOPEN);
4478 if (ndp != NULL) {
4479 /*
4480 * What should I do with the returned delegation, since the
4481 * delegation is being recalled? For now, just printf and
4482 * through it away.
4483 */
4484 printf("Moveopen returned deleg\n");
4485 free(ndp, M_NFSCLDELEG);
4486 }
4487 return (error);
4488 }
4489
4490 /*
4491 * Recall all delegations on this client.
4492 */
4493 static void
nfscl_totalrecall(struct nfsclclient * clp)4494 nfscl_totalrecall(struct nfsclclient *clp)
4495 {
4496 struct nfscldeleg *dp;
4497
4498 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4499 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4500 dp->nfsdl_flags |= NFSCLDL_RECALL;
4501 }
4502 }
4503
4504 /*
4505 * Relock byte ranges. Called for delegation recall and state expiry.
4506 */
4507 static int
nfscl_relock(vnode_t vp,struct nfsclclient * clp,struct nfsmount * nmp,struct nfscllockowner * lp,struct nfscllock * lop,struct ucred * cred,NFSPROC_T * p)4508 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4509 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4510 NFSPROC_T *p)
4511 {
4512 struct nfscllockowner *nlp;
4513 struct nfsfh *nfhp;
4514 struct nfsnode *np;
4515 u_int64_t off, len;
4516 int error, newone, donelocally;
4517
4518 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) {
4519 np = VTONFS(vp);
4520 NFSLOCKNODE(np);
4521 np->n_flag |= NMIGHTBELOCKED;
4522 NFSUNLOCKNODE(np);
4523 }
4524
4525 off = lop->nfslo_first;
4526 len = lop->nfslo_end - lop->nfslo_first;
4527 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4528 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4529 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4530 if (error || donelocally)
4531 return (error);
4532 nfhp = VTONFS(vp)->n_fhp;
4533 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4534 nfhp->nfh_len, nlp, newone, 0, off,
4535 len, lop->nfslo_type, cred, p);
4536 if (error)
4537 nfscl_freelockowner(nlp, 0);
4538 return (error);
4539 }
4540
4541 /*
4542 * Called to re-open a file. Basically get a vnode for the file handle
4543 * and then call nfsrpc_openrpc() to do the rest.
4544 */
4545 static int
nfsrpc_reopen(struct nfsmount * nmp,u_int8_t * fhp,int fhlen,u_int32_t mode,struct nfsclopen * op,struct nfscldeleg ** dpp,struct ucred * cred,NFSPROC_T * p)4546 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4547 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4548 struct ucred *cred, NFSPROC_T *p)
4549 {
4550 struct nfsnode *np;
4551 vnode_t vp;
4552 int error;
4553
4554 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4555 if (error)
4556 return (error);
4557 vp = NFSTOV(np);
4558 if (NFSHASNFSV4N(nmp))
4559 error = nfscl_tryopen(nmp, vp, fhp, fhlen, fhp, fhlen, mode, op,
4560 NULL, 0, dpp, 0, 0, cred, p);
4561 else if (np->n_v4 != NULL)
4562 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4563 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4564 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4565 cred, p);
4566 else
4567 error = EINVAL;
4568 vrele(vp);
4569 return (error);
4570 }
4571
4572 /*
4573 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4574 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4575 * fail.
4576 */
4577 static int
nfscl_tryopen(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,u_int8_t * newfhp,int newfhlen,u_int32_t mode,struct nfsclopen * op,u_int8_t * name,int namelen,struct nfscldeleg ** ndpp,int reclaim,u_int32_t delegtype,struct ucred * cred,NFSPROC_T * p)4578 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4579 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4580 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4581 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4582 {
4583 int error;
4584 struct nfscldeleg *dp;
4585
4586 dp = *ndpp;
4587 do {
4588 *ndpp = dp; /* *ndpp needs to be set for retries. */
4589 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4590 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4591 0, 0);
4592 if (error == NFSERR_DELAY)
4593 (void) nfs_catnap(PZERO, error, "nfstryop");
4594 } while (error == NFSERR_DELAY);
4595 if (error == EAUTH || error == EACCES) {
4596 /* Try again using system credentials */
4597 newnfs_setroot(cred);
4598 do {
4599 *ndpp = dp; /* *ndpp needs to be set for retries. */
4600 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4601 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4602 delegtype, cred, p, 1, 0);
4603 if (error == NFSERR_DELAY)
4604 (void) nfs_catnap(PZERO, error, "nfstryop");
4605 } while (error == NFSERR_DELAY);
4606 }
4607 return (error);
4608 }
4609
4610 /*
4611 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4612 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4613 * cred don't work.
4614 */
4615 static int
nfscl_trylock(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,struct nfscllockowner * nlp,int newone,int reclaim,u_int64_t off,u_int64_t len,short type,struct ucred * cred,NFSPROC_T * p)4616 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4617 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4618 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4619 {
4620 struct nfsrv_descript nfsd, *nd = &nfsd;
4621 int error;
4622
4623 do {
4624 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4625 reclaim, off, len, type, cred, p, 0);
4626 if (!error && nd->nd_repstat == NFSERR_DELAY)
4627 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4628 "nfstrylck");
4629 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4630 if (!error)
4631 error = nd->nd_repstat;
4632 if (error == EAUTH || error == EACCES) {
4633 /* Try again using root credentials */
4634 newnfs_setroot(cred);
4635 do {
4636 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4637 newone, reclaim, off, len, type, cred, p, 1);
4638 if (!error && nd->nd_repstat == NFSERR_DELAY)
4639 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4640 "nfstrylck");
4641 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4642 if (!error)
4643 error = nd->nd_repstat;
4644 }
4645 return (error);
4646 }
4647
4648 /*
4649 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4650 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4651 * credentials fail.
4652 */
4653 int
nfscl_trydelegreturn(struct nfscldeleg * dp,struct ucred * cred,struct nfsmount * nmp,NFSPROC_T * p)4654 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4655 struct nfsmount *nmp, NFSPROC_T *p)
4656 {
4657 int error;
4658
4659 do {
4660 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4661 if (error == NFSERR_DELAY)
4662 (void) nfs_catnap(PZERO, error, "nfstrydp");
4663 } while (error == NFSERR_DELAY);
4664 if (error == EAUTH || error == EACCES) {
4665 /* Try again using system credentials */
4666 newnfs_setroot(cred);
4667 do {
4668 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4669 if (error == NFSERR_DELAY)
4670 (void) nfs_catnap(PZERO, error, "nfstrydp");
4671 } while (error == NFSERR_DELAY);
4672 }
4673 return (error);
4674 }
4675
4676 /*
4677 * Try a close against the server. Just call nfsrpc_closerpc(),
4678 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4679 * credentials fail.
4680 */
4681 int
nfscl_tryclose(struct nfsclopen * op,struct ucred * cred,struct nfsmount * nmp,NFSPROC_T * p,bool loop_on_delayed)4682 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4683 struct nfsmount *nmp, NFSPROC_T *p, bool loop_on_delayed)
4684 {
4685 struct nfsrv_descript nfsd, *nd = &nfsd;
4686 int error;
4687
4688 do {
4689 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4690 if (loop_on_delayed && error == NFSERR_DELAY)
4691 (void) nfs_catnap(PZERO, error, "nfstrycl");
4692 } while (loop_on_delayed && error == NFSERR_DELAY);
4693 if (error == EAUTH || error == EACCES) {
4694 /* Try again using system credentials */
4695 newnfs_setroot(cred);
4696 do {
4697 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4698 if (loop_on_delayed && error == NFSERR_DELAY)
4699 (void) nfs_catnap(PZERO, error, "nfstrycl");
4700 } while (loop_on_delayed && error == NFSERR_DELAY);
4701 }
4702 return (error);
4703 }
4704
4705 /*
4706 * Decide if a delegation on a file permits close without flushing writes
4707 * to the server. This might be a big performance win in some environments.
4708 * (Not useful until the client does caching on local stable storage.)
4709 */
4710 int
nfscl_mustflush(vnode_t vp)4711 nfscl_mustflush(vnode_t vp)
4712 {
4713 struct nfsclclient *clp;
4714 struct nfscldeleg *dp;
4715 struct nfsnode *np;
4716 struct nfsmount *nmp;
4717
4718 np = VTONFS(vp);
4719 nmp = VFSTONFS(vp->v_mount);
4720 if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4721 return (1);
4722 NFSLOCKMNT(nmp);
4723 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4724 NFSUNLOCKMNT(nmp);
4725 return (1);
4726 }
4727 NFSUNLOCKMNT(nmp);
4728 NFSLOCKCLSTATE();
4729 clp = nfscl_findcl(nmp);
4730 if (clp == NULL) {
4731 NFSUNLOCKCLSTATE();
4732 return (1);
4733 }
4734 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4735 if (dp != NULL && (dp->nfsdl_flags &
4736 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4737 NFSCLDL_WRITE &&
4738 (dp->nfsdl_sizelimit >= np->n_size ||
4739 !NFSHASSTRICT3530(nmp))) {
4740 NFSUNLOCKCLSTATE();
4741 return (0);
4742 }
4743 NFSUNLOCKCLSTATE();
4744 return (1);
4745 }
4746
4747 /*
4748 * See if a (write) delegation exists for this file.
4749 */
4750 int
nfscl_nodeleg(vnode_t vp,int writedeleg)4751 nfscl_nodeleg(vnode_t vp, int writedeleg)
4752 {
4753 struct nfsclclient *clp;
4754 struct nfscldeleg *dp;
4755 struct nfsnode *np;
4756 struct nfsmount *nmp;
4757
4758 np = VTONFS(vp);
4759 nmp = VFSTONFS(vp->v_mount);
4760 if (!NFSHASNFSV4(nmp) || vp->v_type != VREG)
4761 return (1);
4762 NFSLOCKMNT(nmp);
4763 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4764 NFSUNLOCKMNT(nmp);
4765 return (1);
4766 }
4767 NFSUNLOCKMNT(nmp);
4768 NFSLOCKCLSTATE();
4769 clp = nfscl_findcl(nmp);
4770 if (clp == NULL) {
4771 NFSUNLOCKCLSTATE();
4772 return (1);
4773 }
4774 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4775 if (dp != NULL &&
4776 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4777 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4778 NFSCLDL_WRITE)) {
4779 NFSUNLOCKCLSTATE();
4780 return (0);
4781 }
4782 NFSUNLOCKCLSTATE();
4783 return (1);
4784 }
4785
4786 /*
4787 * Look for an associated delegation that should be DelegReturned.
4788 */
4789 int
nfscl_removedeleg(vnode_t vp,NFSPROC_T * p,nfsv4stateid_t * stp)4790 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4791 {
4792 struct nfsclclient *clp;
4793 struct nfscldeleg *dp;
4794 struct nfsclowner *owp;
4795 struct nfscllockowner *lp;
4796 struct nfsmount *nmp;
4797 struct mount *mp;
4798 struct ucred *cred;
4799 struct nfsnode *np;
4800 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4801
4802 nmp = VFSTONFS(vp->v_mount);
4803 if (NFSHASPNFS(nmp))
4804 return (retcnt);
4805 NFSLOCKMNT(nmp);
4806 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4807 NFSUNLOCKMNT(nmp);
4808 return (retcnt);
4809 }
4810 NFSUNLOCKMNT(nmp);
4811 np = VTONFS(vp);
4812 mp = nmp->nm_mountp;
4813 NFSLOCKCLSTATE();
4814 /*
4815 * Loop around waiting for:
4816 * - outstanding I/O operations on delegations to complete
4817 * - for a delegation on vp that has state, lock the client and
4818 * do a recall
4819 * - return delegation with no state
4820 */
4821 while (1) {
4822 clp = nfscl_findcl(nmp);
4823 if (clp == NULL) {
4824 NFSUNLOCKCLSTATE();
4825 return (retcnt);
4826 }
4827 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4828 np->n_fhp->nfh_len);
4829 if (dp != NULL) {
4830 /*
4831 * Wait for outstanding I/O ops to be done.
4832 */
4833 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4834 if (igotlock) {
4835 nfsv4_unlock(&clp->nfsc_lock, 0);
4836 igotlock = 0;
4837 }
4838 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4839 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4840 "nfscld", hz);
4841 if (NFSCL_FORCEDISM(mp)) {
4842 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4843 NFSUNLOCKCLSTATE();
4844 return (0);
4845 }
4846 continue;
4847 }
4848 needsrecall = 0;
4849 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4850 if (!LIST_EMPTY(&owp->nfsow_open)) {
4851 needsrecall = 1;
4852 break;
4853 }
4854 }
4855 if (!needsrecall) {
4856 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4857 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4858 needsrecall = 1;
4859 break;
4860 }
4861 }
4862 }
4863 if (needsrecall && !triedrecall) {
4864 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4865 islept = 0;
4866 while (!igotlock) {
4867 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4868 &islept, NFSCLSTATEMUTEXPTR, mp);
4869 if (NFSCL_FORCEDISM(mp)) {
4870 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4871 if (igotlock)
4872 nfsv4_unlock(&clp->nfsc_lock, 0);
4873 NFSUNLOCKCLSTATE();
4874 return (0);
4875 }
4876 if (islept)
4877 break;
4878 }
4879 if (islept)
4880 continue;
4881 NFSUNLOCKCLSTATE();
4882 cred = newnfs_getcred();
4883 newnfs_copycred(&dp->nfsdl_cred, cred);
4884 nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0, NULL);
4885 NFSFREECRED(cred);
4886 triedrecall = 1;
4887 NFSLOCKCLSTATE();
4888 nfsv4_unlock(&clp->nfsc_lock, 0);
4889 igotlock = 0;
4890 continue;
4891 }
4892 *stp = dp->nfsdl_stateid;
4893 retcnt = 1;
4894 nfscl_cleandeleg(dp);
4895 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4896 }
4897 if (igotlock)
4898 nfsv4_unlock(&clp->nfsc_lock, 0);
4899 NFSUNLOCKCLSTATE();
4900 return (retcnt);
4901 }
4902 }
4903
4904 /*
4905 * Look for associated delegation(s) that should be DelegReturned.
4906 */
4907 int
nfscl_renamedeleg(vnode_t fvp,nfsv4stateid_t * fstp,int * gotfdp,vnode_t tvp,nfsv4stateid_t * tstp,int * gottdp,NFSPROC_T * p)4908 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4909 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4910 {
4911 struct nfsclclient *clp;
4912 struct nfscldeleg *dp;
4913 struct nfsclowner *owp;
4914 struct nfscllockowner *lp;
4915 struct nfsmount *nmp;
4916 struct mount *mp;
4917 struct ucred *cred;
4918 struct nfsnode *np;
4919 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4920
4921 nmp = VFSTONFS(fvp->v_mount);
4922 *gotfdp = 0;
4923 *gottdp = 0;
4924 if (NFSHASPNFS(nmp))
4925 return (retcnt);
4926 NFSLOCKMNT(nmp);
4927 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4928 NFSUNLOCKMNT(nmp);
4929 return (retcnt);
4930 }
4931 NFSUNLOCKMNT(nmp);
4932 mp = nmp->nm_mountp;
4933 NFSLOCKCLSTATE();
4934 /*
4935 * Loop around waiting for:
4936 * - outstanding I/O operations on delegations to complete
4937 * - for a delegation on fvp that has state, lock the client and
4938 * do a recall
4939 * - return delegation(s) with no state.
4940 */
4941 while (1) {
4942 clp = nfscl_findcl(nmp);
4943 if (clp == NULL) {
4944 NFSUNLOCKCLSTATE();
4945 return (retcnt);
4946 }
4947 np = VTONFS(fvp);
4948 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4949 np->n_fhp->nfh_len);
4950 if (dp != NULL && *gotfdp == 0) {
4951 /*
4952 * Wait for outstanding I/O ops to be done.
4953 */
4954 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4955 if (igotlock) {
4956 nfsv4_unlock(&clp->nfsc_lock, 0);
4957 igotlock = 0;
4958 }
4959 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4960 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4961 "nfscld", hz);
4962 if (NFSCL_FORCEDISM(mp)) {
4963 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4964 NFSUNLOCKCLSTATE();
4965 *gotfdp = 0;
4966 *gottdp = 0;
4967 return (0);
4968 }
4969 continue;
4970 }
4971 needsrecall = 0;
4972 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4973 if (!LIST_EMPTY(&owp->nfsow_open)) {
4974 needsrecall = 1;
4975 break;
4976 }
4977 }
4978 if (!needsrecall) {
4979 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4980 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4981 needsrecall = 1;
4982 break;
4983 }
4984 }
4985 }
4986 if (needsrecall && !triedrecall) {
4987 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4988 islept = 0;
4989 while (!igotlock) {
4990 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4991 &islept, NFSCLSTATEMUTEXPTR, mp);
4992 if (NFSCL_FORCEDISM(mp)) {
4993 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4994 if (igotlock)
4995 nfsv4_unlock(&clp->nfsc_lock, 0);
4996 NFSUNLOCKCLSTATE();
4997 *gotfdp = 0;
4998 *gottdp = 0;
4999 return (0);
5000 }
5001 if (islept)
5002 break;
5003 }
5004 if (islept)
5005 continue;
5006 NFSUNLOCKCLSTATE();
5007 cred = newnfs_getcred();
5008 newnfs_copycred(&dp->nfsdl_cred, cred);
5009 nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0, NULL);
5010 NFSFREECRED(cred);
5011 triedrecall = 1;
5012 NFSLOCKCLSTATE();
5013 nfsv4_unlock(&clp->nfsc_lock, 0);
5014 igotlock = 0;
5015 continue;
5016 }
5017 *fstp = dp->nfsdl_stateid;
5018 retcnt++;
5019 *gotfdp = 1;
5020 nfscl_cleandeleg(dp);
5021 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5022 }
5023 if (igotlock) {
5024 nfsv4_unlock(&clp->nfsc_lock, 0);
5025 igotlock = 0;
5026 }
5027 if (tvp != NULL) {
5028 np = VTONFS(tvp);
5029 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
5030 np->n_fhp->nfh_len);
5031 if (dp != NULL && *gottdp == 0) {
5032 /*
5033 * Wait for outstanding I/O ops to be done.
5034 */
5035 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
5036 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
5037 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
5038 "nfscld", hz);
5039 if (NFSCL_FORCEDISM(mp)) {
5040 NFSUNLOCKCLSTATE();
5041 *gotfdp = 0;
5042 *gottdp = 0;
5043 return (0);
5044 }
5045 continue;
5046 }
5047 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
5048 if (!LIST_EMPTY(&owp->nfsow_open)) {
5049 NFSUNLOCKCLSTATE();
5050 return (retcnt);
5051 }
5052 }
5053 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
5054 if (!LIST_EMPTY(&lp->nfsl_lock)) {
5055 NFSUNLOCKCLSTATE();
5056 return (retcnt);
5057 }
5058 }
5059 *tstp = dp->nfsdl_stateid;
5060 retcnt++;
5061 *gottdp = 1;
5062 nfscl_cleandeleg(dp);
5063 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
5064 }
5065 }
5066 NFSUNLOCKCLSTATE();
5067 return (retcnt);
5068 }
5069 }
5070
5071 /*
5072 * Get a reference on the clientid associated with the mount point.
5073 * Return 1 if success, 0 otherwise.
5074 */
5075 int
nfscl_getref(struct nfsmount * nmp)5076 nfscl_getref(struct nfsmount *nmp)
5077 {
5078 struct nfsclclient *clp;
5079 int ret;
5080
5081 NFSLOCKCLSTATE();
5082 clp = nfscl_findcl(nmp);
5083 if (clp == NULL) {
5084 NFSUNLOCKCLSTATE();
5085 return (0);
5086 }
5087 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, nmp->nm_mountp);
5088 ret = 1;
5089 if (NFSCL_FORCEDISM(nmp->nm_mountp))
5090 ret = 0;
5091 NFSUNLOCKCLSTATE();
5092 return (ret);
5093 }
5094
5095 /*
5096 * Release a reference on a clientid acquired with the above call.
5097 */
5098 void
nfscl_relref(struct nfsmount * nmp)5099 nfscl_relref(struct nfsmount *nmp)
5100 {
5101 struct nfsclclient *clp;
5102
5103 NFSLOCKCLSTATE();
5104 clp = nfscl_findcl(nmp);
5105 if (clp == NULL) {
5106 NFSUNLOCKCLSTATE();
5107 return;
5108 }
5109 nfsv4_relref(&clp->nfsc_lock);
5110 NFSUNLOCKCLSTATE();
5111 }
5112
5113 /*
5114 * Save the size attribute in the delegation, since the nfsnode
5115 * is going away.
5116 */
5117 void
nfscl_reclaimnode(vnode_t vp)5118 nfscl_reclaimnode(vnode_t vp)
5119 {
5120 struct nfsclclient *clp;
5121 struct nfscldeleg *dp;
5122 struct nfsnode *np = VTONFS(vp);
5123 struct nfsmount *nmp;
5124
5125 nmp = VFSTONFS(vp->v_mount);
5126 if (!NFSHASNFSV4(nmp))
5127 return;
5128 NFSLOCKCLSTATE();
5129 clp = nfscl_findcl(nmp);
5130 if (clp == NULL) {
5131 NFSUNLOCKCLSTATE();
5132 return;
5133 }
5134 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5135 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5136 dp->nfsdl_size = np->n_size;
5137 NFSUNLOCKCLSTATE();
5138 }
5139
5140 /*
5141 * Get the saved size attribute in the delegation, since it is a
5142 * newly allocated nfsnode.
5143 */
5144 void
nfscl_newnode(vnode_t vp)5145 nfscl_newnode(vnode_t vp)
5146 {
5147 struct nfsclclient *clp;
5148 struct nfscldeleg *dp;
5149 struct nfsnode *np = VTONFS(vp);
5150 struct nfsmount *nmp;
5151
5152 nmp = VFSTONFS(vp->v_mount);
5153 if (!NFSHASNFSV4(nmp))
5154 return;
5155 NFSLOCKCLSTATE();
5156 clp = nfscl_findcl(nmp);
5157 if (clp == NULL) {
5158 NFSUNLOCKCLSTATE();
5159 return;
5160 }
5161 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5162 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5163 np->n_size = dp->nfsdl_size;
5164 NFSUNLOCKCLSTATE();
5165 }
5166
5167 /*
5168 * If there is a valid write delegation for this file, set the modtime
5169 * to the local clock time.
5170 */
5171 void
nfscl_delegmodtime(struct vnode * vp,struct timespec * mtime)5172 nfscl_delegmodtime(struct vnode *vp, struct timespec *mtime)
5173 {
5174 struct nfsclclient *clp;
5175 struct nfscldeleg *dp;
5176 struct nfsnode *np = VTONFS(vp);
5177 struct nfsmount *nmp;
5178
5179 nmp = VFSTONFS(vp->v_mount);
5180 if (!NFSHASNFSV4(nmp))
5181 return;
5182 NFSLOCKMNT(nmp);
5183 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5184 NFSUNLOCKMNT(nmp);
5185 return;
5186 }
5187 NFSUNLOCKMNT(nmp);
5188 NFSLOCKCLSTATE();
5189 clp = nfscl_findcl(nmp);
5190 if (clp == NULL) {
5191 NFSUNLOCKCLSTATE();
5192 return;
5193 }
5194 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5195 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
5196 if (mtime != NULL)
5197 dp->nfsdl_modtime = *mtime;
5198 else
5199 nanotime(&dp->nfsdl_modtime);
5200 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
5201 }
5202 NFSUNLOCKCLSTATE();
5203 }
5204
5205 /*
5206 * If there is a valid write delegation for this file with a modtime set,
5207 * put that modtime in mtime.
5208 */
5209 void
nfscl_deleggetmodtime(vnode_t vp,struct timespec * mtime)5210 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
5211 {
5212 struct nfsclclient *clp;
5213 struct nfscldeleg *dp;
5214 struct nfsnode *np = VTONFS(vp);
5215 struct nfsmount *nmp;
5216
5217 nmp = VFSTONFS(vp->v_mount);
5218 if (!NFSHASNFSV4(nmp))
5219 return;
5220 NFSLOCKMNT(nmp);
5221 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5222 NFSUNLOCKMNT(nmp);
5223 return;
5224 }
5225 NFSUNLOCKMNT(nmp);
5226 NFSLOCKCLSTATE();
5227 clp = nfscl_findcl(nmp);
5228 if (clp == NULL) {
5229 NFSUNLOCKCLSTATE();
5230 return;
5231 }
5232 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5233 if (dp != NULL &&
5234 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
5235 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
5236 *mtime = dp->nfsdl_modtime;
5237 NFSUNLOCKCLSTATE();
5238 }
5239
5240 static int
nfscl_errmap(struct nfsrv_descript * nd,u_int32_t minorvers)5241 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
5242 {
5243 short *defaulterrp, *errp;
5244
5245 if (!nd->nd_repstat)
5246 return (0);
5247 if (nd->nd_procnum == NFSPROC_NOOP)
5248 return (txdr_unsigned(nd->nd_repstat & 0xffff));
5249 if (nd->nd_repstat == EBADRPC)
5250 return (txdr_unsigned(NFSERR_BADXDR));
5251 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
5252 nd->nd_repstat == NFSERR_OPILLEGAL)
5253 return (txdr_unsigned(nd->nd_repstat));
5254 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
5255 minorvers > NFSV4_MINORVERSION) {
5256 /* NFSv4.n error. */
5257 return (txdr_unsigned(nd->nd_repstat));
5258 }
5259 if (nd->nd_procnum < NFSV4OP_CBNOPS)
5260 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
5261 else
5262 return (txdr_unsigned(nd->nd_repstat));
5263 while (*++errp)
5264 if (*errp == (short)nd->nd_repstat)
5265 return (txdr_unsigned(nd->nd_repstat));
5266 return (txdr_unsigned(*defaulterrp));
5267 }
5268
5269 /*
5270 * Called to find/add a layout to a client.
5271 * This function returns the layout with a refcnt (shared lock) upon
5272 * success (returns 0) or with no lock/refcnt on the layout when an
5273 * error is returned.
5274 * If a layout is passed in via lypp, it is locked (exclusively locked).
5275 */
5276 int
nfscl_layout(struct nfsmount * nmp,vnode_t vp,u_int8_t * fhp,int fhlen,nfsv4stateid_t * stateidp,int layouttype,int retonclose,struct nfsclflayouthead * fhlp,struct nfscllayout ** lypp,struct ucred * cred,NFSPROC_T * p)5277 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
5278 nfsv4stateid_t *stateidp, int layouttype, int retonclose,
5279 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
5280 struct ucred *cred, NFSPROC_T *p)
5281 {
5282 struct nfsclclient *clp;
5283 struct nfscllayout *lyp, *tlyp;
5284 struct nfsclflayout *flp;
5285 struct nfsnode *np = VTONFS(vp);
5286 mount_t mp;
5287 int layout_passed_in;
5288
5289 mp = nmp->nm_mountp;
5290 layout_passed_in = 1;
5291 tlyp = NULL;
5292 lyp = *lypp;
5293 if (lyp == NULL) {
5294 layout_passed_in = 0;
5295 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
5296 M_WAITOK | M_ZERO);
5297 }
5298
5299 NFSLOCKCLSTATE();
5300 clp = nmp->nm_clp;
5301 if (clp == NULL) {
5302 if (layout_passed_in != 0)
5303 nfsv4_unlock(&lyp->nfsly_lock, 0);
5304 NFSUNLOCKCLSTATE();
5305 if (tlyp != NULL)
5306 free(tlyp, M_NFSLAYOUT);
5307 return (EPERM);
5308 }
5309 if (lyp == NULL) {
5310 /*
5311 * Although no lyp was passed in, another thread might have
5312 * allocated one. If one is found, just increment it's ref
5313 * count and return it.
5314 */
5315 lyp = nfscl_findlayout(clp, fhp, fhlen);
5316 if (lyp == NULL) {
5317 lyp = tlyp;
5318 tlyp = NULL;
5319 lyp->nfsly_stateid.seqid = stateidp->seqid;
5320 lyp->nfsly_stateid.other[0] = stateidp->other[0];
5321 lyp->nfsly_stateid.other[1] = stateidp->other[1];
5322 lyp->nfsly_stateid.other[2] = stateidp->other[2];
5323 lyp->nfsly_lastbyte = 0;
5324 LIST_INIT(&lyp->nfsly_flayread);
5325 LIST_INIT(&lyp->nfsly_flayrw);
5326 LIST_INIT(&lyp->nfsly_recall);
5327 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
5328 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
5329 lyp->nfsly_clp = clp;
5330 if (layouttype == NFSLAYOUT_FLEXFILE)
5331 lyp->nfsly_flags = NFSLY_FLEXFILE;
5332 else
5333 lyp->nfsly_flags = NFSLY_FILES;
5334 if (retonclose != 0)
5335 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5336 lyp->nfsly_fhlen = fhlen;
5337 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
5338 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5339 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
5340 nfsly_hash);
5341 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5342 clp->nfsc_layoutcnt++;
5343 nfsstatsv1.cllayouts++;
5344 } else {
5345 if (retonclose != 0)
5346 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5347 if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5348 lyp->nfsly_stateid.seqid = stateidp->seqid;
5349 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5350 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5351 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5352 }
5353 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5354 if (NFSCL_FORCEDISM(mp)) {
5355 NFSUNLOCKCLSTATE();
5356 if (tlyp != NULL)
5357 free(tlyp, M_NFSLAYOUT);
5358 return (EPERM);
5359 }
5360 *lypp = lyp;
5361 } else if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5362 lyp->nfsly_stateid.seqid = stateidp->seqid;
5363
5364 /* Merge the new list of File Layouts into the list. */
5365 flp = LIST_FIRST(fhlp);
5366 if (flp != NULL) {
5367 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
5368 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
5369 else
5370 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
5371 }
5372 if (layout_passed_in != 0)
5373 nfsv4_unlock(&lyp->nfsly_lock, 1);
5374 NFSUNLOCKCLSTATE();
5375 if (tlyp != NULL)
5376 free(tlyp, M_NFSLAYOUT);
5377 return (0);
5378 }
5379
5380 /*
5381 * Search for a layout by MDS file handle.
5382 * If one is found, it is returned with a refcnt (shared lock) iff
5383 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
5384 * returned NULL.
5385 */
5386 struct nfscllayout *
nfscl_getlayout(struct nfsclclient * clp,uint8_t * fhp,int fhlen,uint64_t off,uint32_t rwaccess,struct nfsclflayout ** retflpp,int * recalledp)5387 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
5388 uint64_t off, uint32_t rwaccess, struct nfsclflayout **retflpp,
5389 int *recalledp)
5390 {
5391 struct nfscllayout *lyp;
5392 mount_t mp;
5393 int error, igotlock;
5394
5395 mp = clp->nfsc_nmp->nm_mountp;
5396 *recalledp = 0;
5397 *retflpp = NULL;
5398 NFSLOCKCLSTATE();
5399 lyp = nfscl_findlayout(clp, fhp, fhlen);
5400 if (lyp != NULL) {
5401 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5402 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5403 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5404 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5405 error = nfscl_findlayoutforio(lyp, off, rwaccess,
5406 retflpp);
5407 if (error == 0)
5408 nfsv4_getref(&lyp->nfsly_lock, NULL,
5409 NFSCLSTATEMUTEXPTR, mp);
5410 else {
5411 do {
5412 igotlock = nfsv4_lock(&lyp->nfsly_lock,
5413 1, NULL, NFSCLSTATEMUTEXPTR, mp);
5414 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
5415 *retflpp = NULL;
5416 }
5417 if (NFSCL_FORCEDISM(mp)) {
5418 lyp = NULL;
5419 *recalledp = 1;
5420 }
5421 } else {
5422 lyp = NULL;
5423 *recalledp = 1;
5424 }
5425 }
5426 NFSUNLOCKCLSTATE();
5427 return (lyp);
5428 }
5429
5430 /*
5431 * Search for a layout by MDS file handle. If one is found, mark in to be
5432 * recalled, if it already marked "return on close".
5433 */
5434 static void
nfscl_retoncloselayout(vnode_t vp,struct nfsclclient * clp,uint8_t * fhp,int fhlen,struct nfsclrecalllayout ** recallpp,struct nfscllayout ** lypp)5435 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
5436 int fhlen, struct nfsclrecalllayout **recallpp, struct nfscllayout **lypp)
5437 {
5438 struct nfscllayout *lyp;
5439 uint32_t iomode;
5440
5441 *lypp = NULL;
5442 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) ||
5443 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
5444 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
5445 return;
5446 lyp = nfscl_findlayout(clp, fhp, fhlen);
5447 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
5448 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5449 iomode = 0;
5450 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5451 iomode |= NFSLAYOUTIOMODE_READ;
5452 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5453 iomode |= NFSLAYOUTIOMODE_RW;
5454 nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5455 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
5456 *recallpp);
5457 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
5458 *recallpp = NULL;
5459 }
5460
5461 /* Now, wake up renew thread to do LayoutReturn. */
5462 wakeup(clp);
5463 *lypp = lyp;
5464 }
5465 }
5466
5467 /*
5468 * Mark the layout to be recalled and with an error.
5469 * Also, disable the dsp from further use.
5470 */
5471 void
nfscl_dserr(uint32_t op,uint32_t stat,struct nfscldevinfo * dp,struct nfscllayout * lyp,struct nfsclds * dsp)5472 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
5473 struct nfscllayout *lyp, struct nfsclds *dsp)
5474 {
5475 struct nfsclrecalllayout *recallp;
5476 uint32_t iomode;
5477
5478 printf("DS being disabled, error=%d\n", stat);
5479 /* Set up the return of the layout. */
5480 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
5481 iomode = 0;
5482 NFSLOCKCLSTATE();
5483 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5484 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5485 iomode |= NFSLAYOUTIOMODE_READ;
5486 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5487 iomode |= NFSLAYOUTIOMODE_RW;
5488 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5489 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
5490 dp->nfsdi_deviceid, recallp);
5491 NFSUNLOCKCLSTATE();
5492 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5493 } else {
5494 NFSUNLOCKCLSTATE();
5495 free(recallp, M_NFSLAYRECALL);
5496 }
5497
5498 /* And shut the TCP connection down. */
5499 nfscl_cancelreqs(dsp);
5500 }
5501
5502 /*
5503 * Cancel all RPCs for this "dsp" by closing the connection.
5504 * Also, mark the session as defunct.
5505 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5506 * cannot be shut down.
5507 */
5508 void
nfscl_cancelreqs(struct nfsclds * dsp)5509 nfscl_cancelreqs(struct nfsclds *dsp)
5510 {
5511 struct __rpc_client *cl;
5512 static int non_event;
5513
5514 NFSLOCKDS(dsp);
5515 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5516 dsp->nfsclds_sockp != NULL &&
5517 dsp->nfsclds_sockp->nr_client != NULL) {
5518 dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5519 cl = dsp->nfsclds_sockp->nr_client;
5520 dsp->nfsclds_sess.nfsess_defunct = 1;
5521 NFSUNLOCKDS(dsp);
5522 CLNT_CLOSE(cl);
5523 /*
5524 * This 1sec sleep is done to reduce the number of reconnect
5525 * attempts made on the DS while it has failed.
5526 */
5527 tsleep(&non_event, PVFS, "ndscls", hz);
5528 return;
5529 }
5530 NFSUNLOCKDS(dsp);
5531 }
5532
5533 /*
5534 * Dereference a layout.
5535 */
5536 void
nfscl_rellayout(struct nfscllayout * lyp,int exclocked)5537 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5538 {
5539
5540 NFSLOCKCLSTATE();
5541 if (exclocked != 0)
5542 nfsv4_unlock(&lyp->nfsly_lock, 0);
5543 else
5544 nfsv4_relref(&lyp->nfsly_lock);
5545 NFSUNLOCKCLSTATE();
5546 }
5547
5548 /*
5549 * Search for a devinfo by deviceid. If one is found, return it after
5550 * acquiring a reference count on it.
5551 */
5552 struct nfscldevinfo *
nfscl_getdevinfo(struct nfsclclient * clp,uint8_t * deviceid,struct nfscldevinfo * dip)5553 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5554 struct nfscldevinfo *dip)
5555 {
5556
5557 NFSLOCKCLSTATE();
5558 if (dip == NULL)
5559 dip = nfscl_finddevinfo(clp, deviceid);
5560 if (dip != NULL)
5561 dip->nfsdi_refcnt++;
5562 NFSUNLOCKCLSTATE();
5563 return (dip);
5564 }
5565
5566 /*
5567 * Dereference a devinfo structure.
5568 */
5569 static void
nfscl_reldevinfo_locked(struct nfscldevinfo * dip)5570 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5571 {
5572
5573 dip->nfsdi_refcnt--;
5574 if (dip->nfsdi_refcnt == 0)
5575 wakeup(&dip->nfsdi_refcnt);
5576 }
5577
5578 /*
5579 * Dereference a devinfo structure.
5580 */
5581 void
nfscl_reldevinfo(struct nfscldevinfo * dip)5582 nfscl_reldevinfo(struct nfscldevinfo *dip)
5583 {
5584
5585 NFSLOCKCLSTATE();
5586 nfscl_reldevinfo_locked(dip);
5587 NFSUNLOCKCLSTATE();
5588 }
5589
5590 /*
5591 * Find a layout for this file handle. Return NULL upon failure.
5592 */
5593 static struct nfscllayout *
nfscl_findlayout(struct nfsclclient * clp,u_int8_t * fhp,int fhlen)5594 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5595 {
5596 struct nfscllayout *lyp;
5597
5598 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5599 if (lyp->nfsly_fhlen == fhlen &&
5600 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5601 break;
5602 return (lyp);
5603 }
5604
5605 /*
5606 * Find a devinfo for this deviceid. Return NULL upon failure.
5607 */
5608 static struct nfscldevinfo *
nfscl_finddevinfo(struct nfsclclient * clp,uint8_t * deviceid)5609 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5610 {
5611 struct nfscldevinfo *dip;
5612
5613 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5614 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5615 == 0)
5616 break;
5617 return (dip);
5618 }
5619
5620 /*
5621 * Merge the new file layout list into the main one, maintaining it in
5622 * increasing offset order.
5623 */
5624 static void
nfscl_mergeflayouts(struct nfsclflayouthead * fhlp,struct nfsclflayouthead * newfhlp)5625 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5626 struct nfsclflayouthead *newfhlp)
5627 {
5628 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5629
5630 flp = LIST_FIRST(fhlp);
5631 prevflp = NULL;
5632 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5633 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5634 prevflp = flp;
5635 flp = LIST_NEXT(flp, nfsfl_list);
5636 }
5637 if (prevflp == NULL)
5638 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5639 else
5640 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5641 prevflp = nflp;
5642 }
5643 }
5644
5645 /*
5646 * Add this nfscldevinfo to the client, if it doesn't already exist.
5647 * This function consumes the structure pointed at by dip, if not NULL.
5648 */
5649 int
nfscl_adddevinfo(struct nfsmount * nmp,struct nfscldevinfo * dip,int ind,struct nfsclflayout * flp)5650 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5651 struct nfsclflayout *flp)
5652 {
5653 struct nfsclclient *clp;
5654 struct nfscldevinfo *tdip;
5655 uint8_t *dev;
5656
5657 NFSLOCKCLSTATE();
5658 clp = nmp->nm_clp;
5659 if (clp == NULL) {
5660 NFSUNLOCKCLSTATE();
5661 if (dip != NULL)
5662 free(dip, M_NFSDEVINFO);
5663 return (ENODEV);
5664 }
5665 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5666 dev = flp->nfsfl_dev;
5667 else
5668 dev = flp->nfsfl_ffm[ind].dev;
5669 tdip = nfscl_finddevinfo(clp, dev);
5670 if (tdip != NULL) {
5671 tdip->nfsdi_layoutrefs++;
5672 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5673 flp->nfsfl_devp = tdip;
5674 else
5675 flp->nfsfl_ffm[ind].devp = tdip;
5676 nfscl_reldevinfo_locked(tdip);
5677 NFSUNLOCKCLSTATE();
5678 if (dip != NULL)
5679 free(dip, M_NFSDEVINFO);
5680 return (0);
5681 }
5682 if (dip != NULL) {
5683 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5684 dip->nfsdi_layoutrefs = 1;
5685 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5686 flp->nfsfl_devp = dip;
5687 else
5688 flp->nfsfl_ffm[ind].devp = dip;
5689 }
5690 NFSUNLOCKCLSTATE();
5691 if (dip == NULL)
5692 return (ENODEV);
5693 return (0);
5694 }
5695
5696 /*
5697 * Free up a layout structure and associated file layout structure(s).
5698 */
5699 void
nfscl_freelayout(struct nfscllayout * layp)5700 nfscl_freelayout(struct nfscllayout *layp)
5701 {
5702 struct nfsclflayout *flp, *nflp;
5703 struct nfsclrecalllayout *rp, *nrp;
5704
5705 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5706 LIST_REMOVE(flp, nfsfl_list);
5707 nfscl_freeflayout(flp);
5708 }
5709 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5710 LIST_REMOVE(flp, nfsfl_list);
5711 nfscl_freeflayout(flp);
5712 }
5713 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5714 LIST_REMOVE(rp, nfsrecly_list);
5715 free(rp, M_NFSLAYRECALL);
5716 }
5717 layp->nfsly_clp->nfsc_layoutcnt--;
5718 nfsstatsv1.cllayouts--;
5719 free(layp, M_NFSLAYOUT);
5720 }
5721
5722 /*
5723 * Free up a file layout structure.
5724 */
5725 void
nfscl_freeflayout(struct nfsclflayout * flp)5726 nfscl_freeflayout(struct nfsclflayout *flp)
5727 {
5728 int i, j;
5729
5730 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5731 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5732 free(flp->nfsfl_fh[i], M_NFSFH);
5733 if (flp->nfsfl_devp != NULL)
5734 flp->nfsfl_devp->nfsdi_layoutrefs--;
5735 }
5736 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5737 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5738 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5739 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5740 if (flp->nfsfl_ffm[i].devp != NULL)
5741 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5742 }
5743 free(flp, M_NFSFLAYOUT);
5744 }
5745
5746 /*
5747 * Free up a file layout devinfo structure.
5748 */
5749 void
nfscl_freedevinfo(struct nfscldevinfo * dip)5750 nfscl_freedevinfo(struct nfscldevinfo *dip)
5751 {
5752
5753 free(dip, M_NFSDEVINFO);
5754 }
5755
5756 /*
5757 * Mark any layouts that match as recalled.
5758 */
5759 static int
nfscl_layoutrecall(int recalltype,struct nfscllayout * lyp,uint32_t iomode,uint64_t off,uint64_t len,uint32_t stateseqid,uint32_t stat,uint32_t op,char * devid,struct nfsclrecalllayout * recallp)5760 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5761 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5762 char *devid, struct nfsclrecalllayout *recallp)
5763 {
5764 struct nfsclrecalllayout *rp, *orp;
5765
5766 recallp->nfsrecly_recalltype = recalltype;
5767 recallp->nfsrecly_iomode = iomode;
5768 recallp->nfsrecly_stateseqid = stateseqid;
5769 recallp->nfsrecly_off = off;
5770 recallp->nfsrecly_len = len;
5771 recallp->nfsrecly_stat = stat;
5772 recallp->nfsrecly_op = op;
5773 if (devid != NULL)
5774 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5775 /*
5776 * Order the list as file returns first, followed by fsid and any
5777 * returns, both in increasing stateseqid order.
5778 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5779 * (I'm not sure this is correct because I find RFC5661 confusing
5780 * on this, but hopefully it will work ok.)
5781 */
5782 orp = NULL;
5783 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5784 orp = rp;
5785 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5786 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5787 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5788 (recalltype != NFSLAYOUTRETURN_FILE &&
5789 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5790 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5791 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5792 break;
5793 }
5794
5795 /*
5796 * Put any error return on all the file returns that will
5797 * preceed this one.
5798 */
5799 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5800 stat != 0 && rp->nfsrecly_stat == 0) {
5801 rp->nfsrecly_stat = stat;
5802 rp->nfsrecly_op = op;
5803 if (devid != NULL)
5804 NFSBCOPY(devid, rp->nfsrecly_devid,
5805 NFSX_V4DEVICEID);
5806 }
5807 }
5808 if (rp == NULL) {
5809 if (orp == NULL)
5810 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5811 nfsrecly_list);
5812 else
5813 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5814 }
5815 lyp->nfsly_flags |= NFSLY_RECALL;
5816 wakeup(lyp->nfsly_clp);
5817 return (0);
5818 }
5819
5820 /*
5821 * Compare the two seqids for ordering. The trick is that the seqids can
5822 * wrap around from 0xffffffff->0, so check for the cases where one
5823 * has wrapped around.
5824 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5825 */
5826 static int
nfscl_seq(uint32_t seqid1,uint32_t seqid2)5827 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5828 {
5829
5830 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5831 /* seqid2 has wrapped around. */
5832 return (0);
5833 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5834 /* seqid1 has wrapped around. */
5835 return (1);
5836 if (seqid1 <= seqid2)
5837 return (1);
5838 return (0);
5839 }
5840
5841 /*
5842 * Do a layout return for each of the recalls.
5843 */
5844 static void
nfscl_layoutreturn(struct nfsmount * nmp,struct nfscllayout * lyp,struct ucred * cred,NFSPROC_T * p)5845 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5846 struct ucred *cred, NFSPROC_T *p)
5847 {
5848 struct nfsclrecalllayout *rp;
5849 nfsv4stateid_t stateid;
5850 int layouttype;
5851
5852 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5853 stateid.seqid = lyp->nfsly_stateid.seqid;
5854 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5855 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5856 else
5857 layouttype = NFSLAYOUT_FLEXFILE;
5858 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5859 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5860 lyp->nfsly_fhlen, 0, layouttype,
5861 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5862 rp->nfsrecly_off, rp->nfsrecly_len,
5863 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5864 rp->nfsrecly_devid);
5865 }
5866 }
5867
5868 /*
5869 * Do the layout commit for a file layout.
5870 */
5871 static void
nfscl_dolayoutcommit(struct nfsmount * nmp,struct nfscllayout * lyp,struct ucred * cred,NFSPROC_T * p)5872 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5873 struct ucred *cred, NFSPROC_T *p)
5874 {
5875 struct nfsclflayout *flp;
5876 uint64_t len;
5877 int error, layouttype;
5878
5879 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5880 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5881 else
5882 layouttype = NFSLAYOUT_FLEXFILE;
5883 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5884 if (layouttype == NFSLAYOUT_FLEXFILE &&
5885 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5886 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5887 /* If not supported, don't bother doing it. */
5888 NFSLOCKMNT(nmp);
5889 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5890 NFSUNLOCKMNT(nmp);
5891 break;
5892 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5893 len = flp->nfsfl_end - flp->nfsfl_off;
5894 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5895 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5896 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5897 layouttype, cred, p);
5898 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5899 if (error == NFSERR_NOTSUPP) {
5900 /* If not supported, don't bother doing it. */
5901 NFSLOCKMNT(nmp);
5902 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5903 NFSUNLOCKMNT(nmp);
5904 break;
5905 }
5906 }
5907 }
5908 }
5909
5910 /*
5911 * Commit all layouts for a file (vnode).
5912 */
5913 int
nfscl_layoutcommit(vnode_t vp,NFSPROC_T * p)5914 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5915 {
5916 struct nfsclclient *clp;
5917 struct nfscllayout *lyp;
5918 struct nfsnode *np = VTONFS(vp);
5919 mount_t mp;
5920 struct nfsmount *nmp;
5921
5922 mp = vp->v_mount;
5923 nmp = VFSTONFS(mp);
5924 if (NFSHASNOLAYOUTCOMMIT(nmp))
5925 return (0);
5926 NFSLOCKCLSTATE();
5927 clp = nmp->nm_clp;
5928 if (clp == NULL) {
5929 NFSUNLOCKCLSTATE();
5930 return (EPERM);
5931 }
5932 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5933 if (lyp == NULL) {
5934 NFSUNLOCKCLSTATE();
5935 return (EPERM);
5936 }
5937 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5938 if (NFSCL_FORCEDISM(mp)) {
5939 NFSUNLOCKCLSTATE();
5940 return (EPERM);
5941 }
5942 tryagain:
5943 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5944 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5945 NFSUNLOCKCLSTATE();
5946 NFSCL_DEBUG(4, "do layoutcommit2\n");
5947 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5948 NFSLOCKCLSTATE();
5949 goto tryagain;
5950 }
5951 nfsv4_relref(&lyp->nfsly_lock);
5952 NFSUNLOCKCLSTATE();
5953 return (0);
5954 }
5955
5956 /*
5957 * Start the recall of a delegation. Called for CB_RECALL and REMOVE
5958 * when nlink == 0 after the REMOVE.
5959 */
nfscl_startdelegrecall(struct nfsclclient * clp,struct nfsfh * nfhp)5960 void nfscl_startdelegrecall(struct nfsclclient *clp, struct nfsfh *nfhp)
5961 {
5962 struct nfscldeleg *dp;
5963
5964 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
5965 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0) {
5966 dp->nfsdl_flags |= NFSCLDL_RECALL;
5967 wakeup((caddr_t)clp);
5968 }
5969 }
5970