xref: /freebsd/sys/fs/nfsclient/nfs_clnode.c (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from nfs_node.c	8.6 (Berkeley) 5/22/95
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
51 #include <sys/vnode.h>
52 
53 #include <vm/uma.h>
54 
55 #include <fs/nfs/nfsport.h>
56 #include <fs/nfsclient/nfsnode.h>
57 #include <fs/nfsclient/nfsmount.h>
58 #include <fs/nfsclient/nfs.h>
59 #include <fs/nfsclient/nfs_kdtrace.h>
60 
61 #include <nfs/nfs_lock.h>
62 
63 extern struct vop_vector newnfs_vnodeops;
64 extern struct buf_ops buf_ops_newnfs;
65 MALLOC_DECLARE(M_NEWNFSREQ);
66 
67 uma_zone_t newnfsnode_zone;
68 
69 const char nfs_vnode_tag[] = "nfs";
70 
71 static void	nfs_freesillyrename(void *arg, __unused int pending);
72 
73 void
74 ncl_nhinit(void)
75 {
76 
77 	newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL,
78 	    NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
79 }
80 
81 void
82 ncl_nhuninit(void)
83 {
84 	uma_zdestroy(newnfsnode_zone);
85 }
86 
87 /*
88  * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
89  * function is going to be used to get Regular Files, code must be added
90  * to fill in the "struct nfsv4node".
91  * Look up a vnode/nfsnode by file handle.
92  * Callers must check for mount points!!
93  * In all cases, a pointer to a
94  * nfsnode structure is returned.
95  */
96 int
97 ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
98     int lkflags)
99 {
100 	struct thread *td = curthread;	/* XXX */
101 	struct nfsnode *np;
102 	struct vnode *vp;
103 	struct vnode *nvp;
104 	int error;
105 	u_int hash;
106 	struct nfsmount *nmp;
107 	struct nfsfh *nfhp;
108 
109 	nmp = VFSTONFS(mntp);
110 	*npp = NULL;
111 
112 	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
113 
114 	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
115 	    M_NFSFH, M_WAITOK);
116 	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
117 	nfhp->nfh_len = fhsize;
118 	error = vfs_hash_get(mntp, hash, lkflags,
119 	    td, &nvp, newnfs_vncmpf, nfhp);
120 	FREE(nfhp, M_NFSFH);
121 	if (error)
122 		return (error);
123 	if (nvp != NULL) {
124 		*npp = VTONFS(nvp);
125 		return (0);
126 	}
127 	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
128 
129 	error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
130 	if (error) {
131 		uma_zfree(newnfsnode_zone, np);
132 		return (error);
133 	}
134 	vp = nvp;
135 	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
136 	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
137 	vp->v_data = np;
138 	np->n_vnode = vp;
139 	/*
140 	 * Initialize the mutex even if the vnode is going to be a loser.
141 	 * This simplifies the logic in reclaim, which can then unconditionally
142 	 * destroy the mutex (in the case of the loser, or if hash_insert
143 	 * happened to return an error no special casing is needed).
144 	 */
145 	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
146 	lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
147 	    LK_CANRECURSE);
148 
149 	/*
150 	 * NFS supports recursive and shared locking.
151 	 */
152 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
153 	VN_LOCK_AREC(vp);
154 	VN_LOCK_ASHARE(vp);
155 	/*
156 	 * Are we getting the root? If so, make sure the vnode flags
157 	 * are correct
158 	 */
159 	if ((fhsize == nmp->nm_fhsize) &&
160 	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
161 		if (vp->v_type == VNON)
162 			vp->v_type = VDIR;
163 		vp->v_vflag |= VV_ROOT;
164 	}
165 
166 	MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
167 	    M_NFSFH, M_WAITOK);
168 	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
169 	np->n_fhp->nfh_len = fhsize;
170 	error = insmntque(vp, mntp);
171 	if (error != 0) {
172 		*npp = NULL;
173 		FREE((caddr_t)np->n_fhp, M_NFSFH);
174 		mtx_destroy(&np->n_mtx);
175 		lockdestroy(&np->n_excl);
176 		uma_zfree(newnfsnode_zone, np);
177 		return (error);
178 	}
179 	error = vfs_hash_insert(vp, hash, lkflags,
180 	    td, &nvp, newnfs_vncmpf, np->n_fhp);
181 	if (error)
182 		return (error);
183 	if (nvp != NULL) {
184 		*npp = VTONFS(nvp);
185 		/* vfs_hash_insert() vput()'s the losing vnode */
186 		return (0);
187 	}
188 	*npp = np;
189 
190 	return (0);
191 }
192 
193 /*
194  * Do the vrele(sp->s_dvp) as a separate task in order to avoid a
195  * deadlock because of a LOR when vrele() locks the directory vnode.
196  */
197 static void
198 nfs_freesillyrename(void *arg, __unused int pending)
199 {
200 	struct sillyrename *sp;
201 
202 	sp = arg;
203 	vrele(sp->s_dvp);
204 	free(sp, M_NEWNFSREQ);
205 }
206 
207 static void
208 ncl_releasesillyrename(struct vnode *vp, struct thread *td)
209 {
210 	struct nfsnode *np;
211 	struct sillyrename *sp;
212 
213 	ASSERT_VOP_ELOCKED(vp, "releasesillyrename");
214 	np = VTONFS(vp);
215 	mtx_assert(&np->n_mtx, MA_OWNED);
216 	if (vp->v_type != VDIR) {
217 		sp = np->n_sillyrename;
218 		np->n_sillyrename = NULL;
219 	} else
220 		sp = NULL;
221 	if (sp != NULL) {
222 		mtx_unlock(&np->n_mtx);
223 		(void) ncl_vinvalbuf(vp, 0, td, 1);
224 		/*
225 		 * Remove the silly file that was rename'd earlier
226 		 */
227 		ncl_removeit(sp, vp);
228 		crfree(sp->s_cred);
229 		TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp);
230 		taskqueue_enqueue(taskqueue_thread, &sp->s_task);
231 		mtx_lock(&np->n_mtx);
232 	}
233 }
234 
235 int
236 ncl_inactive(struct vop_inactive_args *ap)
237 {
238 	struct vnode *vp = ap->a_vp;
239 	struct nfsnode *np;
240 	boolean_t retv;
241 
242 	if (NFS_ISV4(vp) && vp->v_type == VREG) {
243 		/*
244 		 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
245 		 * Close operations are delayed until now. Any dirty
246 		 * buffers/pages must be flushed before the close, so that the
247 		 * stateid is available for the writes.
248 		 */
249 		if (vp->v_object != NULL) {
250 			VM_OBJECT_WLOCK(vp->v_object);
251 			retv = vm_object_page_clean(vp->v_object, 0, 0,
252 			    OBJPC_SYNC);
253 			VM_OBJECT_WUNLOCK(vp->v_object);
254 		} else
255 			retv = TRUE;
256 		if (retv == TRUE) {
257 			(void)ncl_flush(vp, MNT_WAIT, ap->a_td, 1, 0);
258 			(void)nfsrpc_close(vp, 1, ap->a_td);
259 		}
260 	}
261 
262 	np = VTONFS(vp);
263 	mtx_lock(&np->n_mtx);
264 	ncl_releasesillyrename(vp, ap->a_td);
265 
266 	/*
267 	 * NMODIFIED means that there might be dirty/stale buffers
268 	 * associated with the NFS vnode.
269 	 * NDSCOMMIT means that the file is on a pNFS server and commits
270 	 * should be done to the DS.
271 	 * None of the other flags are meaningful after the vnode is unused.
272 	 */
273 	np->n_flag &= (NMODIFIED | NDSCOMMIT);
274 	mtx_unlock(&np->n_mtx);
275 	return (0);
276 }
277 
278 /*
279  * Reclaim an nfsnode so that it can be used for other purposes.
280  */
281 int
282 ncl_reclaim(struct vop_reclaim_args *ap)
283 {
284 	struct vnode *vp = ap->a_vp;
285 	struct nfsnode *np = VTONFS(vp);
286 	struct nfsdmap *dp, *dp2;
287 
288 	/*
289 	 * If the NLM is running, give it a chance to abort pending
290 	 * locks.
291 	 */
292 	if (nfs_reclaim_p != NULL)
293 		nfs_reclaim_p(ap);
294 
295 	mtx_lock(&np->n_mtx);
296 	ncl_releasesillyrename(vp, ap->a_td);
297 	mtx_unlock(&np->n_mtx);
298 
299 	/*
300 	 * Destroy the vm object and flush associated pages.
301 	 */
302 	vnode_destroy_vobject(vp);
303 
304 	if (NFS_ISV4(vp) && vp->v_type == VREG)
305 		/*
306 		 * We can now safely close any remaining NFSv4 Opens for
307 		 * this file. Most opens will have already been closed by
308 		 * ncl_inactive(), but there are cases where it is not
309 		 * called, so we need to do it again here.
310 		 */
311 		(void) nfsrpc_close(vp, 1, ap->a_td);
312 
313 	vfs_hash_remove(vp);
314 
315 	/*
316 	 * Call nfscl_reclaimnode() to save attributes in the delegation,
317 	 * as required.
318 	 */
319 	if (vp->v_type == VREG)
320 		nfscl_reclaimnode(vp);
321 
322 	/*
323 	 * Free up any directory cookie structures and
324 	 * large file handle structures that might be associated with
325 	 * this nfs node.
326 	 */
327 	if (vp->v_type == VDIR) {
328 		dp = LIST_FIRST(&np->n_cookies);
329 		while (dp) {
330 			dp2 = dp;
331 			dp = LIST_NEXT(dp, ndm_list);
332 			FREE((caddr_t)dp2, M_NFSDIROFF);
333 		}
334 	}
335 	if (np->n_writecred != NULL)
336 		crfree(np->n_writecred);
337 	FREE((caddr_t)np->n_fhp, M_NFSFH);
338 	if (np->n_v4 != NULL)
339 		FREE((caddr_t)np->n_v4, M_NFSV4NODE);
340 	mtx_destroy(&np->n_mtx);
341 	lockdestroy(&np->n_excl);
342 	uma_zfree(newnfsnode_zone, vp->v_data);
343 	vp->v_data = NULL;
344 	return (0);
345 }
346 
347 /*
348  * Invalidate both the access and attribute caches for this vnode.
349  */
350 void
351 ncl_invalcaches(struct vnode *vp)
352 {
353 	struct nfsnode *np = VTONFS(vp);
354 	int i;
355 
356 	mtx_lock(&np->n_mtx);
357 	for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
358 		np->n_accesscache[i].stamp = 0;
359 	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
360 	np->n_attrstamp = 0;
361 	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
362 	mtx_unlock(&np->n_mtx);
363 }
364