1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from nfs_node.c 8.6 (Berkeley) 5/22/95
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/socket.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48 #include <sys/vnode.h>
49
50 #include <vm/vm_param.h>
51 #include <vm/vnode_pager.h>
52 #include <vm/uma.h>
53
54 #include <fs/nfs/nfsport.h>
55 #include <fs/nfsclient/nfsnode.h>
56 #include <fs/nfsclient/nfsmount.h>
57 #include <fs/nfsclient/nfs.h>
58 #include <fs/nfsclient/nfs_kdtrace.h>
59
60 #include <nfs/nfs_lock.h>
61
62 extern struct vop_vector newnfs_vnodeops;
63 MALLOC_DECLARE(M_NEWNFSREQ);
64
65 uma_zone_t newnfsnode_zone;
66
67 const char nfs_vnode_tag[] = "nfs";
68
69 static void nfs_freesillyrename(void *arg, __unused int pending);
70
71 void
ncl_nhinit(void)72 ncl_nhinit(void)
73 {
74
75 newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL,
76 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
77 }
78
79 void
ncl_nhuninit(void)80 ncl_nhuninit(void)
81 {
82 uma_zdestroy(newnfsnode_zone);
83 }
84
85 /*
86 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
87 * function is going to be used to get Regular Files, code must be added
88 * to fill in the "struct nfsv4node".
89 * Look up a vnode/nfsnode by file handle.
90 * Callers must check for mount points!!
91 * In all cases, a pointer to a
92 * nfsnode structure is returned.
93 */
94 int
ncl_nget(struct mount * mntp,u_int8_t * fhp,int fhsize,struct nfsnode ** npp,int lkflags)95 ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
96 int lkflags)
97 {
98 struct thread *td = curthread; /* XXX */
99 struct nfsnode *np;
100 struct vnode *vp;
101 struct vnode *nvp;
102 int error;
103 u_int hash;
104 struct nfsmount *nmp;
105 struct nfsfh *nfhp;
106
107 nmp = VFSTONFS(mntp);
108 *npp = NULL;
109
110 hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
111
112 nfhp = malloc(sizeof (struct nfsfh) + fhsize,
113 M_NFSFH, M_WAITOK);
114 bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
115 nfhp->nfh_len = fhsize;
116 error = vfs_hash_get(mntp, hash, lkflags,
117 td, &nvp, newnfs_vncmpf, nfhp);
118 free(nfhp, M_NFSFH);
119 if (error)
120 return (error);
121 if (nvp != NULL) {
122 *npp = VTONFS(nvp);
123 return (0);
124 }
125 np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
126
127 error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
128 if (error) {
129 uma_zfree(newnfsnode_zone, np);
130 return (error);
131 }
132 vp = nvp;
133 KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
134 vp->v_data = np;
135 np->n_vnode = vp;
136 /*
137 * Initialize the mutex even if the vnode is going to be a loser.
138 * This simplifies the logic in reclaim, which can then unconditionally
139 * destroy the mutex (in the case of the loser, or if hash_insert
140 * happened to return an error no special casing is needed).
141 */
142 mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
143 lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
144 LK_CANRECURSE);
145
146 /*
147 * NFS supports recursive and shared locking.
148 */
149 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
150 VN_LOCK_AREC(vp);
151 VN_LOCK_ASHARE(vp);
152 /*
153 * Are we getting the root? If so, make sure the vnode flags
154 * are correct
155 */
156 if (fhsize == NFSX_FHMAX + 1 || (fhsize == nmp->nm_fhsize &&
157 !bcmp(fhp, nmp->nm_fh, fhsize))) {
158 if (vp->v_type == VNON)
159 vp->v_type = VDIR;
160 vp->v_vflag |= VV_ROOT;
161 }
162
163 vp->v_vflag |= VV_VMSIZEVNLOCK;
164
165 np->n_fhp = malloc(sizeof (struct nfsfh) + fhsize,
166 M_NFSFH, M_WAITOK);
167 bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
168 np->n_fhp->nfh_len = fhsize;
169 error = insmntque(vp, mntp);
170 if (error != 0) {
171 *npp = NULL;
172 free(np->n_fhp, M_NFSFH);
173 mtx_destroy(&np->n_mtx);
174 lockdestroy(&np->n_excl);
175 uma_zfree(newnfsnode_zone, np);
176 return (error);
177 }
178 vn_set_state(vp, VSTATE_CONSTRUCTED);
179 error = vfs_hash_insert(vp, hash, lkflags,
180 td, &nvp, newnfs_vncmpf, np->n_fhp);
181 if (error)
182 return (error);
183 if (nvp != NULL) {
184 *npp = VTONFS(nvp);
185 /* vfs_hash_insert() vput()'s the losing vnode */
186 return (0);
187 }
188 *npp = np;
189
190 return (0);
191 }
192
193 /*
194 * Do the vrele(sp->s_dvp) as a separate task in order to avoid a
195 * deadlock because of a LOR when vrele() locks the directory vnode.
196 */
197 static void
nfs_freesillyrename(void * arg,__unused int pending)198 nfs_freesillyrename(void *arg, __unused int pending)
199 {
200 struct sillyrename *sp;
201
202 sp = arg;
203 vrele(sp->s_dvp);
204 free(sp, M_NEWNFSREQ);
205 }
206
207 static void
ncl_releasesillyrename(struct vnode * vp,struct thread * td)208 ncl_releasesillyrename(struct vnode *vp, struct thread *td)
209 {
210 struct nfsnode *np;
211 struct sillyrename *sp;
212
213 ASSERT_VOP_ELOCKED(vp, "releasesillyrename");
214 np = VTONFS(vp);
215 NFSASSERTNODE(np);
216 if (vp->v_type != VDIR) {
217 sp = np->n_sillyrename;
218 np->n_sillyrename = NULL;
219 } else
220 sp = NULL;
221 if (sp != NULL) {
222 NFSUNLOCKNODE(np);
223 (void) ncl_vinvalbuf(vp, 0, td, 1);
224 /*
225 * Remove the silly file that was rename'd earlier
226 */
227 ncl_removeit(sp, vp);
228 crfree(sp->s_cred);
229 TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp);
230 taskqueue_enqueue(taskqueue_thread, &sp->s_task);
231 NFSLOCKNODE(np);
232 }
233 }
234
235 int
ncl_inactive(struct vop_inactive_args * ap)236 ncl_inactive(struct vop_inactive_args *ap)
237 {
238 struct vnode *vp = ap->a_vp;
239 struct nfsnode *np;
240 struct thread *td;
241
242 td = curthread;
243 np = VTONFS(vp);
244 if (NFS_ISV4(vp) && vp->v_type == VREG) {
245 NFSLOCKNODE(np);
246 np->n_openstateid = NULL;
247 NFSUNLOCKNODE(np);
248 /*
249 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
250 * Close operations are delayed until now. Any dirty
251 * buffers/pages must be flushed before the close, so that the
252 * stateid is available for the writes.
253 */
254 vnode_pager_clean_sync(vp);
255 (void)ncl_flush(vp, MNT_WAIT, td, 1, 0);
256 (void)nfsrpc_close(vp, 1, td);
257 }
258
259 NFSLOCKNODE(np);
260 ncl_releasesillyrename(vp, td);
261
262 /*
263 * NMODIFIED means that there might be dirty/stale buffers
264 * associated with the NFS vnode.
265 * NDSCOMMIT means that the file is on a pNFS server and commits
266 * should be done to the DS.
267 * None of the other flags are meaningful after the vnode is unused.
268 */
269 np->n_flag &= (NMODIFIED | NDSCOMMIT);
270 NFSUNLOCKNODE(np);
271 return (0);
272 }
273
274 /*
275 * Reclaim an nfsnode so that it can be used for other purposes.
276 */
277 int
ncl_reclaim(struct vop_reclaim_args * ap)278 ncl_reclaim(struct vop_reclaim_args *ap)
279 {
280 struct vnode *vp = ap->a_vp;
281 struct nfsnode *np = VTONFS(vp);
282 struct nfsdmap *dp, *dp2;
283 struct thread *td;
284 struct mount *mp;
285
286 td = curthread;
287 mp = vp->v_mount;
288
289 /*
290 * If the NLM is running, give it a chance to abort pending
291 * locks.
292 */
293 if (nfs_reclaim_p != NULL)
294 nfs_reclaim_p(ap);
295
296 NFSLOCKNODE(np);
297 ncl_releasesillyrename(vp, td);
298
299 if (NFS_ISV4(vp) && vp->v_type == VREG) {
300 np->n_openstateid = NULL;
301 NFSUNLOCKNODE(np);
302 /*
303 * We can now safely close any remaining NFSv4 Opens for
304 * this file. Most opens will have already been closed by
305 * ncl_inactive(), but there are cases where it is not
306 * called, so we need to do it again here.
307 */
308 (void) nfsrpc_close(vp, 1, td);
309 /*
310 * It it unlikely a delegation will still exist, but
311 * if one does, it must be returned before calling
312 * vfs_hash_remove(), since it cannot be recalled once the
313 * nfs node is no longer available.
314 */
315 MNT_ILOCK(mp);
316 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0) {
317 MNT_IUNLOCK(mp);
318 nfscl_delegreturnvp(vp, td);
319 } else
320 MNT_IUNLOCK(mp);
321 } else
322 NFSUNLOCKNODE(np);
323
324 vfs_hash_remove(vp);
325
326 /*
327 * Call nfscl_reclaimnode() to save attributes in the delegation,
328 * as required.
329 */
330 if (vp->v_type == VREG)
331 nfscl_reclaimnode(vp);
332
333 /*
334 * Free up any directory cookie structures and
335 * large file handle structures that might be associated with
336 * this nfs node.
337 */
338 if (vp->v_type == VDIR) {
339 dp = LIST_FIRST(&np->n_cookies);
340 while (dp) {
341 dp2 = dp;
342 dp = LIST_NEXT(dp, ndm_list);
343 free(dp2, M_NFSDIROFF);
344 }
345 }
346 if (np->n_writecred != NULL)
347 crfree(np->n_writecred);
348 free(np->n_fhp, M_NFSFH);
349 if (np->n_v4 != NULL)
350 free(np->n_v4, M_NFSV4NODE);
351 mtx_destroy(&np->n_mtx);
352 lockdestroy(&np->n_excl);
353 uma_zfree(newnfsnode_zone, vp->v_data);
354 vp->v_data = NULL;
355 return (0);
356 }
357
358 /*
359 * Invalidate both the access and attribute caches for this vnode.
360 */
361 void
ncl_invalcaches(struct vnode * vp)362 ncl_invalcaches(struct vnode *vp)
363 {
364 struct nfsnode *np = VTONFS(vp);
365 int i;
366
367 NFSLOCKNODE(np);
368 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
369 np->n_accesscache[i].stamp = 0;
370 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
371 np->n_attrstamp = 0;
372 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
373 NFSUNLOCKNODE(np);
374 }
375