xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision cc96f92a570e05636a20fdd15d4616b127bb9ecc)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system supporting functions.
37  */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/lock.h>
46 #include <sys/limits.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/random.h>
52 #include <sys/refcount.h>
53 #include <sys/rwlock.h>
54 #include <sys/smr.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/vnode.h>
58 #include <sys/vmmeter.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pageout.h>
65 #include <vm/vm_pager.h>
66 #include <vm/vm_extern.h>
67 #include <vm/swap_pager.h>
68 
69 #include <fs/tmpfs/tmpfs.h>
70 #include <fs/tmpfs/tmpfs_fifoops.h>
71 #include <fs/tmpfs/tmpfs_vnops.h>
72 
73 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
74     "tmpfs file system");
75 
76 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
77 
78 MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
79 static uma_zone_t tmpfs_node_pool;
80 VFS_SMR_DECLARE;
81 
82 static int
83 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
84 {
85 	struct tmpfs_node *node;
86 
87 	node = mem;
88 	node->tn_gen++;
89 	node->tn_size = 0;
90 	node->tn_status = 0;
91 	node->tn_accessed = false;
92 	node->tn_flags = 0;
93 	node->tn_links = 0;
94 	node->tn_vnode = NULL;
95 	node->tn_vpstate = 0;
96 	return (0);
97 }
98 
99 static void
100 tmpfs_node_dtor(void *mem, int size, void *arg)
101 {
102 	struct tmpfs_node *node;
103 
104 	node = mem;
105 	node->tn_type = VNON;
106 }
107 
108 static int
109 tmpfs_node_init(void *mem, int size, int flags)
110 {
111 	struct tmpfs_node *node;
112 
113 	node = mem;
114 	node->tn_id = 0;
115 	mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
116 	node->tn_gen = arc4random();
117 	return (0);
118 }
119 
120 static void
121 tmpfs_node_fini(void *mem, int size)
122 {
123 	struct tmpfs_node *node;
124 
125 	node = mem;
126 	mtx_destroy(&node->tn_interlock);
127 }
128 
129 void
130 tmpfs_subr_init(void)
131 {
132 	tmpfs_node_pool = uma_zcreate("TMPFS node",
133 	    sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
134 	    tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
135 	VFS_SMR_ZONE_SET(tmpfs_node_pool);
136 }
137 
138 void
139 tmpfs_subr_uninit(void)
140 {
141 	uma_zdestroy(tmpfs_node_pool);
142 }
143 
144 static int
145 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
146 {
147 	int error;
148 	long pages, bytes;
149 
150 	pages = *(long *)arg1;
151 	bytes = pages * PAGE_SIZE;
152 
153 	error = sysctl_handle_long(oidp, &bytes, 0, req);
154 	if (error || !req->newptr)
155 		return (error);
156 
157 	pages = bytes / PAGE_SIZE;
158 	if (pages < TMPFS_PAGES_MINRESERVED)
159 		return (EINVAL);
160 
161 	*(long *)arg1 = pages;
162 	return (0);
163 }
164 
165 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved,
166     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &tmpfs_pages_reserved, 0,
167     sysctl_mem_reserved, "L",
168     "Amount of available memory and swap below which tmpfs growth stops");
169 
170 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a,
171     struct tmpfs_dirent *b);
172 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
173 
174 size_t
175 tmpfs_mem_avail(void)
176 {
177 	size_t avail;
178 	long reserved;
179 
180 	avail = swap_pager_avail + vm_free_count();
181 	reserved = atomic_load_long(&tmpfs_pages_reserved);
182 	if (__predict_false(avail < reserved))
183 		return (0);
184 	return (avail - reserved);
185 }
186 
187 size_t
188 tmpfs_pages_used(struct tmpfs_mount *tmp)
189 {
190 	const size_t node_size = sizeof(struct tmpfs_node) +
191 	    sizeof(struct tmpfs_dirent);
192 	size_t meta_pages;
193 
194 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
195 	    PAGE_SIZE);
196 	return (meta_pages + tmp->tm_pages_used);
197 }
198 
199 static size_t
200 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
201 {
202 	if (tmpfs_mem_avail() < req_pages)
203 		return (0);
204 
205 	if (tmp->tm_pages_max != ULONG_MAX &&
206 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
207 			return (0);
208 
209 	return (1);
210 }
211 
212 void
213 tmpfs_ref_node(struct tmpfs_node *node)
214 {
215 #ifdef INVARIANTS
216 	u_int old;
217 
218 	old =
219 #endif
220 	refcount_acquire(&node->tn_refcount);
221 #ifdef INVARIANTS
222 	KASSERT(old > 0, ("node %p zero refcount", node));
223 #endif
224 }
225 
226 /*
227  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
228  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
229  * using the credentials of the process 'p'.
230  *
231  * If the node type is set to 'VDIR', then the parent parameter must point
232  * to the parent directory of the node being created.  It may only be NULL
233  * while allocating the root node.
234  *
235  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
236  * specifies the device the node represents.
237  *
238  * If the node type is set to 'VLNK', then the parameter target specifies
239  * the file name of the target file for the symbolic link that is being
240  * created.
241  *
242  * Note that new nodes are retrieved from the available list if it has
243  * items or, if it is empty, from the node pool as long as there is enough
244  * space to create them.
245  *
246  * Returns zero on success or an appropriate error code on failure.
247  */
248 int
249 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
250     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
251     const char *target, dev_t rdev, struct tmpfs_node **node)
252 {
253 	struct tmpfs_node *nnode;
254 	vm_object_t obj;
255 	char *symlink;
256 	char symlink_smr;
257 
258 	/* If the root directory of the 'tmp' file system is not yet
259 	 * allocated, this must be the request to do it. */
260 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
261 
262 	MPASS(IFF(type == VLNK, target != NULL));
263 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
264 
265 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
266 		return (ENOSPC);
267 	if (tmpfs_pages_check_avail(tmp, 1) == 0)
268 		return (ENOSPC);
269 
270 	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
271 		/*
272 		 * When a new tmpfs node is created for fully
273 		 * constructed mount point, there must be a parent
274 		 * node, which vnode is locked exclusively.  As
275 		 * consequence, if the unmount is executing in
276 		 * parallel, vflush() cannot reclaim the parent vnode.
277 		 * Due to this, the check for MNTK_UNMOUNT flag is not
278 		 * racy: if we did not see MNTK_UNMOUNT flag, then tmp
279 		 * cannot be destroyed until node construction is
280 		 * finished and the parent vnode unlocked.
281 		 *
282 		 * Tmpfs does not need to instantiate new nodes during
283 		 * unmount.
284 		 */
285 		return (EBUSY);
286 	}
287 	if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
288 		return (EROFS);
289 
290 	nnode = uma_zalloc_smr(tmpfs_node_pool, M_WAITOK);
291 
292 	/* Generic initialization. */
293 	nnode->tn_type = type;
294 	vfs_timestamp(&nnode->tn_atime);
295 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
296 	    nnode->tn_atime;
297 	nnode->tn_uid = uid;
298 	nnode->tn_gid = gid;
299 	nnode->tn_mode = mode;
300 	nnode->tn_id = alloc_unr64(&tmp->tm_ino_unr);
301 	nnode->tn_refcount = 1;
302 
303 	/* Type-specific initialization. */
304 	switch (nnode->tn_type) {
305 	case VBLK:
306 	case VCHR:
307 		nnode->tn_rdev = rdev;
308 		break;
309 
310 	case VDIR:
311 		RB_INIT(&nnode->tn_dir.tn_dirhead);
312 		LIST_INIT(&nnode->tn_dir.tn_dupindex);
313 		MPASS(parent != nnode);
314 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
315 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
316 		nnode->tn_dir.tn_readdir_lastn = 0;
317 		nnode->tn_dir.tn_readdir_lastp = NULL;
318 		nnode->tn_links++;
319 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
320 		nnode->tn_dir.tn_parent->tn_links++;
321 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
322 		break;
323 
324 	case VFIFO:
325 		/* FALLTHROUGH */
326 	case VSOCK:
327 		break;
328 
329 	case VLNK:
330 		MPASS(strlen(target) < MAXPATHLEN);
331 		nnode->tn_size = strlen(target);
332 
333 		symlink = NULL;
334 		if (!tmp->tm_nonc) {
335 			symlink = cache_symlink_alloc(nnode->tn_size + 1, M_WAITOK);
336 			symlink_smr = true;
337 		}
338 		if (symlink == NULL) {
339 			symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME, M_WAITOK);
340 			symlink_smr = false;
341 		}
342 		memcpy(symlink, target, nnode->tn_size + 1);
343 
344 		/*
345 		 * Allow safe symlink resolving for lockless lookup.
346 		 * tmpfs_fplookup_symlink references this comment.
347 		 *
348 		 * 1. nnode is not yet visible to the world
349 		 * 2. both tn_link_target and tn_link_smr get populated
350 		 * 3. release fence publishes their content
351 		 * 4. tn_link_target content is immutable until node destruction,
352 		 *    where the pointer gets set to NULL
353 		 * 5. tn_link_smr is never changed once set
354 		 *
355 		 * As a result it is sufficient to issue load consume on the node
356 		 * pointer to also get the above content in a stable manner.
357 		 * Worst case tn_link_smr flag may be set to true despite being stale,
358 		 * while the target buffer is already cleared out.
359 		 *
360 		 * TODO: Since there is no load consume primitive provided
361 		 * right now, the load is performed with an acquire fence.
362 		 */
363 		atomic_store_ptr(&nnode->tn_link_target, symlink);
364 		atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
365 		atomic_thread_fence_rel();
366 		break;
367 
368 	case VREG:
369 		obj = nnode->tn_reg.tn_aobj =
370 		    vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
371 			NULL /* XXXKIB - tmpfs needs swap reservation */);
372 		VM_OBJECT_WLOCK(obj);
373 		/* OBJ_TMPFS is set together with the setting of vp->v_object */
374 		vm_object_set_flag(obj, OBJ_TMPFS_NODE);
375 		VM_OBJECT_WUNLOCK(obj);
376 		nnode->tn_reg.tn_tmp = tmp;
377 		break;
378 
379 	default:
380 		panic("tmpfs_alloc_node: type %p %d", nnode,
381 		    (int)nnode->tn_type);
382 	}
383 
384 	TMPFS_LOCK(tmp);
385 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
386 	nnode->tn_attached = true;
387 	tmp->tm_nodes_inuse++;
388 	tmp->tm_refcount++;
389 	TMPFS_UNLOCK(tmp);
390 
391 	*node = nnode;
392 	return (0);
393 }
394 
395 /*
396  * Destroys the node pointed to by node from the file system 'tmp'.
397  * If the node references a directory, no entries are allowed.
398  */
399 void
400 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
401 {
402 	if (refcount_release_if_not_last(&node->tn_refcount))
403 		return;
404 
405 	TMPFS_LOCK(tmp);
406 	TMPFS_NODE_LOCK(node);
407 	if (!tmpfs_free_node_locked(tmp, node, false)) {
408 		TMPFS_NODE_UNLOCK(node);
409 		TMPFS_UNLOCK(tmp);
410 	}
411 }
412 
413 bool
414 tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
415     bool detach)
416 {
417 	vm_object_t uobj;
418 	char *symlink;
419 	bool last;
420 
421 	TMPFS_MP_ASSERT_LOCKED(tmp);
422 	TMPFS_NODE_ASSERT_LOCKED(node);
423 
424 	last = refcount_release(&node->tn_refcount);
425 	if (node->tn_attached && (detach || last)) {
426 		MPASS(tmp->tm_nodes_inuse > 0);
427 		tmp->tm_nodes_inuse--;
428 		LIST_REMOVE(node, tn_entries);
429 		node->tn_attached = false;
430 	}
431 	if (!last)
432 		return (false);
433 
434 #ifdef INVARIANTS
435 	MPASS(node->tn_vnode == NULL);
436 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
437 #endif
438 	TMPFS_NODE_UNLOCK(node);
439 	TMPFS_UNLOCK(tmp);
440 
441 	switch (node->tn_type) {
442 	case VBLK:
443 		/* FALLTHROUGH */
444 	case VCHR:
445 		/* FALLTHROUGH */
446 	case VDIR:
447 		/* FALLTHROUGH */
448 	case VFIFO:
449 		/* FALLTHROUGH */
450 	case VSOCK:
451 		break;
452 
453 	case VLNK:
454 		symlink = node->tn_link_target;
455 		atomic_store_ptr(&node->tn_link_target, NULL);
456 		if (atomic_load_char(&node->tn_link_smr)) {
457 			cache_symlink_free(symlink, node->tn_size + 1);
458 		} else {
459 			free(symlink, M_TMPFSNAME);
460 		}
461 		break;
462 
463 	case VREG:
464 		uobj = node->tn_reg.tn_aobj;
465 		if (uobj != NULL) {
466 			if (uobj->size != 0)
467 				atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
468 			KASSERT((uobj->flags & OBJ_TMPFS) == 0,
469 			    ("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
470 			vm_object_deallocate(uobj);
471 		}
472 		break;
473 
474 	default:
475 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
476 	}
477 
478 	uma_zfree_smr(tmpfs_node_pool, node);
479 	TMPFS_LOCK(tmp);
480 	tmpfs_free_tmp(tmp);
481 	return (true);
482 }
483 
484 static __inline uint32_t
485 tmpfs_dirent_hash(const char *name, u_int len)
486 {
487 	uint32_t hash;
488 
489 	hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK;
490 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP
491 	hash &= 0xf;
492 #endif
493 	if (hash < TMPFS_DIRCOOKIE_MIN)
494 		hash += TMPFS_DIRCOOKIE_MIN;
495 
496 	return (hash);
497 }
498 
499 static __inline off_t
500 tmpfs_dirent_cookie(struct tmpfs_dirent *de)
501 {
502 	if (de == NULL)
503 		return (TMPFS_DIRCOOKIE_EOF);
504 
505 	MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN);
506 
507 	return (de->td_cookie);
508 }
509 
510 static __inline boolean_t
511 tmpfs_dirent_dup(struct tmpfs_dirent *de)
512 {
513 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0);
514 }
515 
516 static __inline boolean_t
517 tmpfs_dirent_duphead(struct tmpfs_dirent *de)
518 {
519 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0);
520 }
521 
522 void
523 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen)
524 {
525 	de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen);
526 	memcpy(de->ud.td_name, name, namelen);
527 	de->td_namelen = namelen;
528 }
529 
530 /*
531  * Allocates a new directory entry for the node node with a name of name.
532  * The new directory entry is returned in *de.
533  *
534  * The link count of node is increased by one to reflect the new object
535  * referencing it.
536  *
537  * Returns zero on success or an appropriate error code on failure.
538  */
539 int
540 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
541     const char *name, u_int len, struct tmpfs_dirent **de)
542 {
543 	struct tmpfs_dirent *nde;
544 
545 	nde = malloc(sizeof(*nde), M_TMPFSDIR, M_WAITOK);
546 	nde->td_node = node;
547 	if (name != NULL) {
548 		nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
549 		tmpfs_dirent_init(nde, name, len);
550 	} else
551 		nde->td_namelen = 0;
552 	if (node != NULL)
553 		node->tn_links++;
554 
555 	*de = nde;
556 
557 	return 0;
558 }
559 
560 /*
561  * Frees a directory entry.  It is the caller's responsibility to destroy
562  * the node referenced by it if needed.
563  *
564  * The link count of node is decreased by one to reflect the removal of an
565  * object that referenced it.  This only happens if 'node_exists' is true;
566  * otherwise the function will not access the node referred to by the
567  * directory entry, as it may already have been released from the outside.
568  */
569 void
570 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
571 {
572 	struct tmpfs_node *node;
573 
574 	node = de->td_node;
575 	if (node != NULL) {
576 		MPASS(node->tn_links > 0);
577 		node->tn_links--;
578 	}
579 	if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
580 		free(de->ud.td_name, M_TMPFSNAME);
581 	free(de, M_TMPFSDIR);
582 }
583 
584 void
585 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
586 {
587 
588 	ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
589 	if (vp->v_type != VREG || obj == NULL)
590 		return;
591 
592 	VM_OBJECT_WLOCK(obj);
593 	VI_LOCK(vp);
594 	vm_object_clear_flag(obj, OBJ_TMPFS);
595 	obj->un_pager.swp.swp_tmpfs = NULL;
596 	if (vp->v_writecount < 0)
597 		vp->v_writecount = 0;
598 	VI_UNLOCK(vp);
599 	VM_OBJECT_WUNLOCK(obj);
600 }
601 
602 /*
603  * Need to clear v_object for insmntque failure.
604  */
605 static void
606 tmpfs_insmntque_dtr(struct vnode *vp, void *dtr_arg)
607 {
608 
609 	tmpfs_destroy_vobject(vp, vp->v_object);
610 	vp->v_object = NULL;
611 	vp->v_data = NULL;
612 	vp->v_op = &dead_vnodeops;
613 	vgone(vp);
614 	vput(vp);
615 }
616 
617 /*
618  * Allocates a new vnode for the node node or returns a new reference to
619  * an existing one if the node had already a vnode referencing it.  The
620  * resulting locked vnode is returned in *vpp.
621  *
622  * Returns zero on success or an appropriate error code on failure.
623  */
624 int
625 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
626     struct vnode **vpp)
627 {
628 	struct vnode *vp;
629 	enum vgetstate vs;
630 	struct tmpfs_mount *tm;
631 	vm_object_t object;
632 	int error;
633 
634 	error = 0;
635 	tm = VFS_TO_TMPFS(mp);
636 	TMPFS_NODE_LOCK(node);
637 	tmpfs_ref_node(node);
638 loop:
639 	TMPFS_NODE_ASSERT_LOCKED(node);
640 	if ((vp = node->tn_vnode) != NULL) {
641 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
642 		if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
643 		    (VN_IS_DOOMED(vp) &&
644 		     (lkflag & LK_NOWAIT) != 0)) {
645 			TMPFS_NODE_UNLOCK(node);
646 			error = ENOENT;
647 			vp = NULL;
648 			goto out;
649 		}
650 		if (VN_IS_DOOMED(vp)) {
651 			node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
652 			while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {
653 				msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
654 				    0, "tmpfsE", 0);
655 			}
656 			goto loop;
657 		}
658 		vs = vget_prep(vp);
659 		TMPFS_NODE_UNLOCK(node);
660 		error = vget_finish(vp, lkflag, vs);
661 		if (error == ENOENT) {
662 			TMPFS_NODE_LOCK(node);
663 			goto loop;
664 		}
665 		if (error != 0) {
666 			vp = NULL;
667 			goto out;
668 		}
669 
670 		/*
671 		 * Make sure the vnode is still there after
672 		 * getting the interlock to avoid racing a free.
673 		 */
674 		if (node->tn_vnode == NULL || node->tn_vnode != vp) {
675 			vput(vp);
676 			TMPFS_NODE_LOCK(node);
677 			goto loop;
678 		}
679 
680 		goto out;
681 	}
682 
683 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
684 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
685 		TMPFS_NODE_UNLOCK(node);
686 		error = ENOENT;
687 		vp = NULL;
688 		goto out;
689 	}
690 
691 	/*
692 	 * otherwise lock the vp list while we call getnewvnode
693 	 * since that can block.
694 	 */
695 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
696 		node->tn_vpstate |= TMPFS_VNODE_WANT;
697 		error = msleep((caddr_t) &node->tn_vpstate,
698 		    TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
699 		if (error != 0)
700 			goto out;
701 		goto loop;
702 	} else
703 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
704 
705 	TMPFS_NODE_UNLOCK(node);
706 
707 	/* Get a new vnode and associate it with our node. */
708 	error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
709 	    &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
710 	if (error != 0)
711 		goto unlock;
712 	MPASS(vp != NULL);
713 
714 	/* lkflag is ignored, the lock is exclusive */
715 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
716 
717 	vp->v_data = node;
718 	vp->v_type = node->tn_type;
719 
720 	/* Type-specific initialization. */
721 	switch (node->tn_type) {
722 	case VBLK:
723 		/* FALLTHROUGH */
724 	case VCHR:
725 		/* FALLTHROUGH */
726 	case VLNK:
727 		/* FALLTHROUGH */
728 	case VSOCK:
729 		break;
730 	case VFIFO:
731 		vp->v_op = &tmpfs_fifoop_entries;
732 		break;
733 	case VREG:
734 		object = node->tn_reg.tn_aobj;
735 		VM_OBJECT_WLOCK(object);
736 		VI_LOCK(vp);
737 		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
738 		vp->v_object = object;
739 		object->un_pager.swp.swp_tmpfs = vp;
740 		vm_object_set_flag(object, OBJ_TMPFS);
741 		vn_irflag_set_locked(vp, VIRF_PGREAD);
742 		VI_UNLOCK(vp);
743 		VM_OBJECT_WUNLOCK(object);
744 		break;
745 	case VDIR:
746 		MPASS(node->tn_dir.tn_parent != NULL);
747 		if (node->tn_dir.tn_parent == node)
748 			vp->v_vflag |= VV_ROOT;
749 		break;
750 
751 	default:
752 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
753 	}
754 	if (vp->v_type != VFIFO)
755 		VN_LOCK_ASHARE(vp);
756 
757 	error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
758 	if (error != 0)
759 		vp = NULL;
760 
761 unlock:
762 	TMPFS_NODE_LOCK(node);
763 
764 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
765 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
766 	node->tn_vnode = vp;
767 
768 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
769 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
770 		TMPFS_NODE_UNLOCK(node);
771 		wakeup((caddr_t) &node->tn_vpstate);
772 	} else
773 		TMPFS_NODE_UNLOCK(node);
774 
775 out:
776 	if (error == 0) {
777 		*vpp = vp;
778 
779 #ifdef INVARIANTS
780 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
781 		TMPFS_NODE_LOCK(node);
782 		MPASS(*vpp == node->tn_vnode);
783 		TMPFS_NODE_UNLOCK(node);
784 #endif
785 	}
786 	tmpfs_free_node(tm, node);
787 
788 	return (error);
789 }
790 
791 /*
792  * Destroys the association between the vnode vp and the node it
793  * references.
794  */
795 void
796 tmpfs_free_vp(struct vnode *vp)
797 {
798 	struct tmpfs_node *node;
799 
800 	node = VP_TO_TMPFS_NODE(vp);
801 
802 	TMPFS_NODE_ASSERT_LOCKED(node);
803 	node->tn_vnode = NULL;
804 	if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0)
805 		wakeup(&node->tn_vnode);
806 	node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM;
807 	vp->v_data = NULL;
808 }
809 
810 /*
811  * Allocates a new file of type 'type' and adds it to the parent directory
812  * 'dvp'; this addition is done using the component name given in 'cnp'.
813  * The ownership of the new file is automatically assigned based on the
814  * credentials of the caller (through 'cnp'), the group is set based on
815  * the parent directory and the mode is determined from the 'vap' argument.
816  * If successful, *vpp holds a vnode to the newly created file and zero
817  * is returned.  Otherwise *vpp is NULL and the function returns an
818  * appropriate error code.
819  */
820 int
821 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
822     struct componentname *cnp, const char *target)
823 {
824 	int error;
825 	struct tmpfs_dirent *de;
826 	struct tmpfs_mount *tmp;
827 	struct tmpfs_node *dnode;
828 	struct tmpfs_node *node;
829 	struct tmpfs_node *parent;
830 
831 	ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
832 	MPASS(cnp->cn_flags & HASBUF);
833 
834 	tmp = VFS_TO_TMPFS(dvp->v_mount);
835 	dnode = VP_TO_TMPFS_DIR(dvp);
836 	*vpp = NULL;
837 
838 	/* If the entry we are creating is a directory, we cannot overflow
839 	 * the number of links of its parent, because it will get a new
840 	 * link. */
841 	if (vap->va_type == VDIR) {
842 		/* Ensure that we do not overflow the maximum number of links
843 		 * imposed by the system. */
844 		MPASS(dnode->tn_links <= TMPFS_LINK_MAX);
845 		if (dnode->tn_links == TMPFS_LINK_MAX) {
846 			return (EMLINK);
847 		}
848 
849 		parent = dnode;
850 		MPASS(parent != NULL);
851 	} else
852 		parent = NULL;
853 
854 	/* Allocate a node that represents the new file. */
855 	error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
856 	    cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
857 	    target, vap->va_rdev, &node);
858 	if (error != 0)
859 		return (error);
860 
861 	/* Allocate a directory entry that points to the new file. */
862 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
863 	    &de);
864 	if (error != 0) {
865 		tmpfs_free_node(tmp, node);
866 		return (error);
867 	}
868 
869 	/* Allocate a vnode for the new file. */
870 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
871 	if (error != 0) {
872 		tmpfs_free_dirent(tmp, de);
873 		tmpfs_free_node(tmp, node);
874 		return (error);
875 	}
876 
877 	/* Now that all required items are allocated, we can proceed to
878 	 * insert the new node into the directory, an operation that
879 	 * cannot fail. */
880 	if (cnp->cn_flags & ISWHITEOUT)
881 		tmpfs_dir_whiteout_remove(dvp, cnp);
882 	tmpfs_dir_attach(dvp, de);
883 	return (0);
884 }
885 
886 struct tmpfs_dirent *
887 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
888 {
889 	struct tmpfs_dirent *de;
890 
891 	de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead);
892 	dc->tdc_tree = de;
893 	if (de != NULL && tmpfs_dirent_duphead(de))
894 		de = LIST_FIRST(&de->ud.td_duphead);
895 	dc->tdc_current = de;
896 
897 	return (dc->tdc_current);
898 }
899 
900 struct tmpfs_dirent *
901 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
902 {
903 	struct tmpfs_dirent *de;
904 
905 	MPASS(dc->tdc_tree != NULL);
906 	if (tmpfs_dirent_dup(dc->tdc_current)) {
907 		dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries);
908 		if (dc->tdc_current != NULL)
909 			return (dc->tdc_current);
910 	}
911 	dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
912 	    &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
913 	if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
914 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
915 		MPASS(dc->tdc_current != NULL);
916 	}
917 
918 	return (dc->tdc_current);
919 }
920 
921 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */
922 static struct tmpfs_dirent *
923 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash)
924 {
925 	struct tmpfs_dirent *de, dekey;
926 
927 	dekey.td_hash = hash;
928 	de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey);
929 	return (de);
930 }
931 
932 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */
933 static struct tmpfs_dirent *
934 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
935     struct tmpfs_dir_cursor *dc)
936 {
937 	struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead;
938 	struct tmpfs_dirent *de, dekey;
939 
940 	MPASS(cookie >= TMPFS_DIRCOOKIE_MIN);
941 
942 	if (cookie == node->tn_dir.tn_readdir_lastn &&
943 	    (de = node->tn_dir.tn_readdir_lastp) != NULL) {
944 		/* Protect against possible race, tn_readdir_last[pn]
945 		 * may be updated with only shared vnode lock held. */
946 		if (cookie == tmpfs_dirent_cookie(de))
947 			goto out;
948 	}
949 
950 	if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) {
951 		LIST_FOREACH(de, &node->tn_dir.tn_dupindex,
952 		    uh.td_dup.index_entries) {
953 			MPASS(tmpfs_dirent_dup(de));
954 			if (de->td_cookie == cookie)
955 				goto out;
956 			/* dupindex list is sorted. */
957 			if (de->td_cookie < cookie) {
958 				de = NULL;
959 				goto out;
960 			}
961 		}
962 		MPASS(de == NULL);
963 		goto out;
964 	}
965 
966 	if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
967 		de = NULL;
968 	} else {
969 		dekey.td_hash = cookie;
970 		/* Recover if direntry for cookie was removed */
971 		de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
972 	}
973 	dc->tdc_tree = de;
974 	dc->tdc_current = de;
975 	if (de != NULL && tmpfs_dirent_duphead(de)) {
976 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
977 		MPASS(dc->tdc_current != NULL);
978 	}
979 	return (dc->tdc_current);
980 
981 out:
982 	dc->tdc_tree = de;
983 	dc->tdc_current = de;
984 	if (de != NULL && tmpfs_dirent_dup(de))
985 		dc->tdc_tree = tmpfs_dir_xlookup_hash(node,
986 		    de->td_hash);
987 	return (dc->tdc_current);
988 }
989 
990 /*
991  * Looks for a directory entry in the directory represented by node.
992  * 'cnp' describes the name of the entry to look for.  Note that the .
993  * and .. components are not allowed as they do not physically exist
994  * within directories.
995  *
996  * Returns a pointer to the entry when found, otherwise NULL.
997  */
998 struct tmpfs_dirent *
999 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
1000     struct componentname *cnp)
1001 {
1002 	struct tmpfs_dir_duphead *duphead;
1003 	struct tmpfs_dirent *de;
1004 	uint32_t hash;
1005 
1006 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
1007 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
1008 	    cnp->cn_nameptr[1] == '.')));
1009 	TMPFS_VALIDATE_DIR(node);
1010 
1011 	hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen);
1012 	de = tmpfs_dir_xlookup_hash(node, hash);
1013 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1014 		duphead = &de->ud.td_duphead;
1015 		LIST_FOREACH(de, duphead, uh.td_dup.entries) {
1016 			if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1017 			    cnp->cn_namelen))
1018 				break;
1019 		}
1020 	} else if (de != NULL) {
1021 		if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1022 		    cnp->cn_namelen))
1023 			de = NULL;
1024 	}
1025 	if (de != NULL && f != NULL && de->td_node != f)
1026 		de = NULL;
1027 
1028 	return (de);
1029 }
1030 
1031 /*
1032  * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex
1033  * list, allocate new cookie value.
1034  */
1035 static void
1036 tmpfs_dir_attach_dup(struct tmpfs_node *dnode,
1037     struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde)
1038 {
1039 	struct tmpfs_dir_duphead *dupindex;
1040 	struct tmpfs_dirent *de, *pde;
1041 
1042 	dupindex = &dnode->tn_dir.tn_dupindex;
1043 	de = LIST_FIRST(dupindex);
1044 	if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) {
1045 		if (de == NULL)
1046 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1047 		else
1048 			nde->td_cookie = de->td_cookie + 1;
1049 		MPASS(tmpfs_dirent_dup(nde));
1050 		LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries);
1051 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1052 		return;
1053 	}
1054 
1055 	/*
1056 	 * Cookie numbers are near exhaustion. Scan dupindex list for unused
1057 	 * numbers. dupindex list is sorted in descending order. Keep it so
1058 	 * after inserting nde.
1059 	 */
1060 	while (1) {
1061 		pde = de;
1062 		de = LIST_NEXT(de, uh.td_dup.index_entries);
1063 		if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) {
1064 			/*
1065 			 * Last element of the index doesn't have minimal cookie
1066 			 * value, use it.
1067 			 */
1068 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1069 			LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries);
1070 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1071 			return;
1072 		} else if (de == NULL) {
1073 			/*
1074 			 * We are so lucky have 2^30 hash duplicates in single
1075 			 * directory :) Return largest possible cookie value.
1076 			 * It should be fine except possible issues with
1077 			 * VOP_READDIR restart.
1078 			 */
1079 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX;
1080 			LIST_INSERT_HEAD(dupindex, nde,
1081 			    uh.td_dup.index_entries);
1082 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1083 			return;
1084 		}
1085 		if (de->td_cookie + 1 == pde->td_cookie ||
1086 		    de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX)
1087 			continue;	/* No hole or invalid cookie. */
1088 		nde->td_cookie = de->td_cookie + 1;
1089 		MPASS(tmpfs_dirent_dup(nde));
1090 		MPASS(pde->td_cookie > nde->td_cookie);
1091 		MPASS(nde->td_cookie > de->td_cookie);
1092 		LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries);
1093 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1094 		return;
1095 	}
1096 }
1097 
1098 /*
1099  * Attaches the directory entry de to the directory represented by vp.
1100  * Note that this does not change the link count of the node pointed by
1101  * the directory entry, as this is done by tmpfs_alloc_dirent.
1102  */
1103 void
1104 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
1105 {
1106 	struct tmpfs_node *dnode;
1107 	struct tmpfs_dirent *xde, *nde;
1108 
1109 	ASSERT_VOP_ELOCKED(vp, __func__);
1110 	MPASS(de->td_namelen > 0);
1111 	MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN);
1112 	MPASS(de->td_cookie == de->td_hash);
1113 
1114 	dnode = VP_TO_TMPFS_DIR(vp);
1115 	dnode->tn_dir.tn_readdir_lastn = 0;
1116 	dnode->tn_dir.tn_readdir_lastp = NULL;
1117 
1118 	MPASS(!tmpfs_dirent_dup(de));
1119 	xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1120 	if (xde != NULL && tmpfs_dirent_duphead(xde))
1121 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1122 	else if (xde != NULL) {
1123 		/*
1124 		 * Allocate new duphead. Swap xde with duphead to avoid
1125 		 * adding/removing elements with the same hash.
1126 		 */
1127 		MPASS(!tmpfs_dirent_dup(xde));
1128 		tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0,
1129 		    &nde);
1130 		/* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */
1131 		memcpy(nde, xde, sizeof(*xde));
1132 		xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD;
1133 		LIST_INIT(&xde->ud.td_duphead);
1134 		xde->td_namelen = 0;
1135 		xde->td_node = NULL;
1136 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde);
1137 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1138 	}
1139 	dnode->tn_size += sizeof(struct tmpfs_dirent);
1140 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1141 	dnode->tn_accessed = true;
1142 	tmpfs_update(vp);
1143 }
1144 
1145 /*
1146  * Detaches the directory entry de from the directory represented by vp.
1147  * Note that this does not change the link count of the node pointed by
1148  * the directory entry, as this is done by tmpfs_free_dirent.
1149  */
1150 void
1151 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
1152 {
1153 	struct tmpfs_mount *tmp;
1154 	struct tmpfs_dir *head;
1155 	struct tmpfs_node *dnode;
1156 	struct tmpfs_dirent *xde;
1157 
1158 	ASSERT_VOP_ELOCKED(vp, __func__);
1159 
1160 	dnode = VP_TO_TMPFS_DIR(vp);
1161 	head = &dnode->tn_dir.tn_dirhead;
1162 	dnode->tn_dir.tn_readdir_lastn = 0;
1163 	dnode->tn_dir.tn_readdir_lastp = NULL;
1164 
1165 	if (tmpfs_dirent_dup(de)) {
1166 		/* Remove duphead if de was last entry. */
1167 		if (LIST_NEXT(de, uh.td_dup.entries) == NULL) {
1168 			xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash);
1169 			MPASS(tmpfs_dirent_duphead(xde));
1170 		} else
1171 			xde = NULL;
1172 		LIST_REMOVE(de, uh.td_dup.entries);
1173 		LIST_REMOVE(de, uh.td_dup.index_entries);
1174 		if (xde != NULL) {
1175 			if (LIST_EMPTY(&xde->ud.td_duphead)) {
1176 				RB_REMOVE(tmpfs_dir, head, xde);
1177 				tmp = VFS_TO_TMPFS(vp->v_mount);
1178 				MPASS(xde->td_node == NULL);
1179 				tmpfs_free_dirent(tmp, xde);
1180 			}
1181 		}
1182 		de->td_cookie = de->td_hash;
1183 	} else
1184 		RB_REMOVE(tmpfs_dir, head, de);
1185 
1186 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
1187 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1188 	dnode->tn_accessed = true;
1189 	tmpfs_update(vp);
1190 }
1191 
1192 void
1193 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode)
1194 {
1195 	struct tmpfs_dirent *de, *dde, *nde;
1196 
1197 	RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) {
1198 		RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1199 		/* Node may already be destroyed. */
1200 		de->td_node = NULL;
1201 		if (tmpfs_dirent_duphead(de)) {
1202 			while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) {
1203 				LIST_REMOVE(dde, uh.td_dup.entries);
1204 				dde->td_node = NULL;
1205 				tmpfs_free_dirent(tmp, dde);
1206 			}
1207 		}
1208 		tmpfs_free_dirent(tmp, de);
1209 	}
1210 }
1211 
1212 /*
1213  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
1214  * directory and returns it in the uio space.  The function returns 0
1215  * on success, -1 if there was not enough space in the uio structure to
1216  * hold the directory entry or an appropriate error code if another
1217  * error happens.
1218  */
1219 static int
1220 tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1221     struct uio *uio)
1222 {
1223 	int error;
1224 	struct dirent dent;
1225 
1226 	TMPFS_VALIDATE_DIR(node);
1227 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
1228 
1229 	dent.d_fileno = node->tn_id;
1230 	dent.d_off = TMPFS_DIRCOOKIE_DOTDOT;
1231 	dent.d_type = DT_DIR;
1232 	dent.d_namlen = 1;
1233 	dent.d_name[0] = '.';
1234 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1235 	dirent_terminate(&dent);
1236 
1237 	if (dent.d_reclen > uio->uio_resid)
1238 		error = EJUSTRETURN;
1239 	else
1240 		error = uiomove(&dent, dent.d_reclen, uio);
1241 
1242 	tmpfs_set_accessed(tm, node);
1243 
1244 	return (error);
1245 }
1246 
1247 /*
1248  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
1249  * directory and returns it in the uio space.  The function returns 0
1250  * on success, -1 if there was not enough space in the uio structure to
1251  * hold the directory entry or an appropriate error code if another
1252  * error happens.
1253  */
1254 static int
1255 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1256     struct uio *uio, off_t next)
1257 {
1258 	struct tmpfs_node *parent;
1259 	struct dirent dent;
1260 	int error;
1261 
1262 	TMPFS_VALIDATE_DIR(node);
1263 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
1264 
1265 	/*
1266 	 * Return ENOENT if the current node is already removed.
1267 	 */
1268 	TMPFS_ASSERT_LOCKED(node);
1269 	parent = node->tn_dir.tn_parent;
1270 	if (parent == NULL)
1271 		return (ENOENT);
1272 
1273 	TMPFS_NODE_LOCK(parent);
1274 	dent.d_fileno = parent->tn_id;
1275 	TMPFS_NODE_UNLOCK(parent);
1276 
1277 	dent.d_off = next;
1278 	dent.d_type = DT_DIR;
1279 	dent.d_namlen = 2;
1280 	dent.d_name[0] = '.';
1281 	dent.d_name[1] = '.';
1282 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1283 	dirent_terminate(&dent);
1284 
1285 	if (dent.d_reclen > uio->uio_resid)
1286 		error = EJUSTRETURN;
1287 	else
1288 		error = uiomove(&dent, dent.d_reclen, uio);
1289 
1290 	tmpfs_set_accessed(tm, node);
1291 
1292 	return (error);
1293 }
1294 
1295 /*
1296  * Helper function for tmpfs_readdir.  Returns as much directory entries
1297  * as can fit in the uio space.  The read starts at uio->uio_offset.
1298  * The function returns 0 on success, -1 if there was not enough space
1299  * in the uio structure to hold the directory entry or an appropriate
1300  * error code if another error happens.
1301  */
1302 int
1303 tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
1304     struct uio *uio, int maxcookies, u_long *cookies, int *ncookies)
1305 {
1306 	struct tmpfs_dir_cursor dc;
1307 	struct tmpfs_dirent *de, *nde;
1308 	off_t off;
1309 	int error;
1310 
1311 	TMPFS_VALIDATE_DIR(node);
1312 
1313 	off = 0;
1314 
1315 	/*
1316 	 * Lookup the node from the current offset.  The starting offset of
1317 	 * 0 will lookup both '.' and '..', and then the first real entry,
1318 	 * or EOF if there are none.  Then find all entries for the dir that
1319 	 * fit into the buffer.  Once no more entries are found (de == NULL),
1320 	 * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next
1321 	 * call to return 0.
1322 	 */
1323 	switch (uio->uio_offset) {
1324 	case TMPFS_DIRCOOKIE_DOT:
1325 		error = tmpfs_dir_getdotdent(tm, node, uio);
1326 		if (error != 0)
1327 			return (error);
1328 		uio->uio_offset = off = TMPFS_DIRCOOKIE_DOTDOT;
1329 		if (cookies != NULL)
1330 			cookies[(*ncookies)++] = off;
1331 		/* FALLTHROUGH */
1332 	case TMPFS_DIRCOOKIE_DOTDOT:
1333 		de = tmpfs_dir_first(node, &dc);
1334 		off = tmpfs_dirent_cookie(de);
1335 		error = tmpfs_dir_getdotdotdent(tm, node, uio, off);
1336 		if (error != 0)
1337 			return (error);
1338 		uio->uio_offset = off;
1339 		if (cookies != NULL)
1340 			cookies[(*ncookies)++] = off;
1341 		/* EOF. */
1342 		if (de == NULL)
1343 			return (0);
1344 		break;
1345 	case TMPFS_DIRCOOKIE_EOF:
1346 		return (0);
1347 	default:
1348 		de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc);
1349 		if (de == NULL)
1350 			return (EINVAL);
1351 		if (cookies != NULL)
1352 			off = tmpfs_dirent_cookie(de);
1353 	}
1354 
1355 	/*
1356 	 * Read as much entries as possible; i.e., until we reach the end of the
1357 	 * directory or we exhaust uio space.
1358 	 */
1359 	do {
1360 		struct dirent d;
1361 
1362 		/*
1363 		 * Create a dirent structure representing the current tmpfs_node
1364 		 * and fill it.
1365 		 */
1366 		if (de->td_node == NULL) {
1367 			d.d_fileno = 1;
1368 			d.d_type = DT_WHT;
1369 		} else {
1370 			d.d_fileno = de->td_node->tn_id;
1371 			switch (de->td_node->tn_type) {
1372 			case VBLK:
1373 				d.d_type = DT_BLK;
1374 				break;
1375 
1376 			case VCHR:
1377 				d.d_type = DT_CHR;
1378 				break;
1379 
1380 			case VDIR:
1381 				d.d_type = DT_DIR;
1382 				break;
1383 
1384 			case VFIFO:
1385 				d.d_type = DT_FIFO;
1386 				break;
1387 
1388 			case VLNK:
1389 				d.d_type = DT_LNK;
1390 				break;
1391 
1392 			case VREG:
1393 				d.d_type = DT_REG;
1394 				break;
1395 
1396 			case VSOCK:
1397 				d.d_type = DT_SOCK;
1398 				break;
1399 
1400 			default:
1401 				panic("tmpfs_dir_getdents: type %p %d",
1402 				    de->td_node, (int)de->td_node->tn_type);
1403 			}
1404 		}
1405 		d.d_namlen = de->td_namelen;
1406 		MPASS(de->td_namelen < sizeof(d.d_name));
1407 		(void)memcpy(d.d_name, de->ud.td_name, de->td_namelen);
1408 		d.d_reclen = GENERIC_DIRSIZ(&d);
1409 
1410 		/*
1411 		 * Stop reading if the directory entry we are treating is bigger
1412 		 * than the amount of data that can be returned.
1413 		 */
1414 		if (d.d_reclen > uio->uio_resid) {
1415 			error = EJUSTRETURN;
1416 			break;
1417 		}
1418 
1419 		nde = tmpfs_dir_next(node, &dc);
1420 		d.d_off = tmpfs_dirent_cookie(nde);
1421 		dirent_terminate(&d);
1422 
1423 		/*
1424 		 * Copy the new dirent structure into the output buffer and
1425 		 * advance pointers.
1426 		 */
1427 		error = uiomove(&d, d.d_reclen, uio);
1428 		if (error == 0) {
1429 			de = nde;
1430 			if (cookies != NULL) {
1431 				off = tmpfs_dirent_cookie(de);
1432 				MPASS(*ncookies < maxcookies);
1433 				cookies[(*ncookies)++] = off;
1434 			}
1435 		}
1436 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
1437 
1438 	/* Skip setting off when using cookies as it is already done above. */
1439 	if (cookies == NULL)
1440 		off = tmpfs_dirent_cookie(de);
1441 
1442 	/* Update the offset and cache. */
1443 	uio->uio_offset = off;
1444 	node->tn_dir.tn_readdir_lastn = off;
1445 	node->tn_dir.tn_readdir_lastp = de;
1446 
1447 	tmpfs_set_accessed(tm, node);
1448 	return (error);
1449 }
1450 
1451 int
1452 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
1453 {
1454 	struct tmpfs_dirent *de;
1455 	int error;
1456 
1457 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
1458 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
1459 	if (error != 0)
1460 		return (error);
1461 	tmpfs_dir_attach(dvp, de);
1462 	return (0);
1463 }
1464 
1465 void
1466 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
1467 {
1468 	struct tmpfs_dirent *de;
1469 
1470 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1471 	MPASS(de != NULL && de->td_node == NULL);
1472 	tmpfs_dir_detach(dvp, de);
1473 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de);
1474 }
1475 
1476 /*
1477  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
1478  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
1479  * 'newsize' must be positive.
1480  *
1481  * Returns zero on success or an appropriate error code on failure.
1482  */
1483 int
1484 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
1485 {
1486 	struct tmpfs_mount *tmp;
1487 	struct tmpfs_node *node;
1488 	vm_object_t uobj;
1489 	vm_page_t m;
1490 	vm_pindex_t idx, newpages, oldpages;
1491 	off_t oldsize;
1492 	int base, rv;
1493 
1494 	MPASS(vp->v_type == VREG);
1495 	MPASS(newsize >= 0);
1496 
1497 	node = VP_TO_TMPFS_NODE(vp);
1498 	uobj = node->tn_reg.tn_aobj;
1499 	tmp = VFS_TO_TMPFS(vp->v_mount);
1500 
1501 	/*
1502 	 * Convert the old and new sizes to the number of pages needed to
1503 	 * store them.  It may happen that we do not need to do anything
1504 	 * because the last allocated page can accommodate the change on
1505 	 * its own.
1506 	 */
1507 	oldsize = node->tn_size;
1508 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
1509 	MPASS(oldpages == uobj->size);
1510 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
1511 
1512 	if (__predict_true(newpages == oldpages && newsize >= oldsize)) {
1513 		node->tn_size = newsize;
1514 		return (0);
1515 	}
1516 
1517 	if (newpages > oldpages &&
1518 	    tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
1519 		return (ENOSPC);
1520 
1521 	VM_OBJECT_WLOCK(uobj);
1522 	if (newsize < oldsize) {
1523 		/*
1524 		 * Zero the truncated part of the last page.
1525 		 */
1526 		base = newsize & PAGE_MASK;
1527 		if (base != 0) {
1528 			idx = OFF_TO_IDX(newsize);
1529 retry:
1530 			m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
1531 			if (m != NULL) {
1532 				MPASS(vm_page_all_valid(m));
1533 			} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
1534 				m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
1535 				    VM_ALLOC_WAITFAIL);
1536 				if (m == NULL)
1537 					goto retry;
1538 				vm_object_pip_add(uobj, 1);
1539 				VM_OBJECT_WUNLOCK(uobj);
1540 				rv = vm_pager_get_pages(uobj, &m, 1, NULL,
1541 				    NULL);
1542 				VM_OBJECT_WLOCK(uobj);
1543 				vm_object_pip_wakeup(uobj);
1544 				if (rv == VM_PAGER_OK) {
1545 					/*
1546 					 * Since the page was not resident,
1547 					 * and therefore not recently
1548 					 * accessed, immediately enqueue it
1549 					 * for asynchronous laundering.  The
1550 					 * current operation is not regarded
1551 					 * as an access.
1552 					 */
1553 					vm_page_launder(m);
1554 				} else {
1555 					vm_page_free(m);
1556 					if (ignerr)
1557 						m = NULL;
1558 					else {
1559 						VM_OBJECT_WUNLOCK(uobj);
1560 						return (EIO);
1561 					}
1562 				}
1563 			}
1564 			if (m != NULL) {
1565 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
1566 				vm_page_set_dirty(m);
1567 				vm_page_xunbusy(m);
1568 			}
1569 		}
1570 
1571 		/*
1572 		 * Release any swap space and free any whole pages.
1573 		 */
1574 		if (newpages < oldpages)
1575 			vm_object_page_remove(uobj, newpages, 0, 0);
1576 	}
1577 	uobj->size = newpages;
1578 	VM_OBJECT_WUNLOCK(uobj);
1579 
1580 	atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
1581 
1582 	node->tn_size = newsize;
1583 	return (0);
1584 }
1585 
1586 void
1587 tmpfs_check_mtime(struct vnode *vp)
1588 {
1589 	struct tmpfs_node *node;
1590 	struct vm_object *obj;
1591 
1592 	ASSERT_VOP_ELOCKED(vp, "check_mtime");
1593 	if (vp->v_type != VREG)
1594 		return;
1595 	obj = vp->v_object;
1596 	KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
1597 	    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
1598 	/* unlocked read */
1599 	if (obj->generation != obj->cleangeneration) {
1600 		VM_OBJECT_WLOCK(obj);
1601 		if (obj->generation != obj->cleangeneration) {
1602 			obj->cleangeneration = obj->generation;
1603 			node = VP_TO_TMPFS_NODE(vp);
1604 			node->tn_status |= TMPFS_NODE_MODIFIED |
1605 			    TMPFS_NODE_CHANGED;
1606 		}
1607 		VM_OBJECT_WUNLOCK(obj);
1608 	}
1609 }
1610 
1611 /*
1612  * Change flags of the given vnode.
1613  * Caller should execute tmpfs_update on vp after a successful execution.
1614  * The vnode must be locked on entry and remain locked on exit.
1615  */
1616 int
1617 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
1618     struct thread *p)
1619 {
1620 	int error;
1621 	struct tmpfs_node *node;
1622 
1623 	ASSERT_VOP_ELOCKED(vp, "chflags");
1624 
1625 	node = VP_TO_TMPFS_NODE(vp);
1626 
1627 	if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK |
1628 	    UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP |
1629 	    UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE |
1630 	    UF_SPARSE | UF_SYSTEM)) != 0)
1631 		return (EOPNOTSUPP);
1632 
1633 	/* Disallow this operation if the file system is mounted read-only. */
1634 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1635 		return (EROFS);
1636 
1637 	/*
1638 	 * Callers may only modify the file flags on objects they
1639 	 * have VADMIN rights for.
1640 	 */
1641 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1642 		return (error);
1643 	/*
1644 	 * Unprivileged processes are not permitted to unset system
1645 	 * flags, or modify flags if any system flags are set.
1646 	 */
1647 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) {
1648 		if (node->tn_flags &
1649 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
1650 			error = securelevel_gt(cred, 0);
1651 			if (error)
1652 				return (error);
1653 		}
1654 	} else {
1655 		if (node->tn_flags &
1656 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
1657 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
1658 			return (EPERM);
1659 	}
1660 	node->tn_flags = flags;
1661 	node->tn_status |= TMPFS_NODE_CHANGED;
1662 
1663 	ASSERT_VOP_ELOCKED(vp, "chflags2");
1664 
1665 	return (0);
1666 }
1667 
1668 /*
1669  * Change access mode on the given vnode.
1670  * Caller should execute tmpfs_update on vp after a successful execution.
1671  * The vnode must be locked on entry and remain locked on exit.
1672  */
1673 int
1674 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
1675 {
1676 	int error;
1677 	struct tmpfs_node *node;
1678 	mode_t newmode;
1679 
1680 	ASSERT_VOP_ELOCKED(vp, "chmod");
1681 	ASSERT_VOP_IN_SEQC(vp);
1682 
1683 	node = VP_TO_TMPFS_NODE(vp);
1684 
1685 	/* Disallow this operation if the file system is mounted read-only. */
1686 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1687 		return EROFS;
1688 
1689 	/* Immutable or append-only files cannot be modified, either. */
1690 	if (node->tn_flags & (IMMUTABLE | APPEND))
1691 		return EPERM;
1692 
1693 	/*
1694 	 * To modify the permissions on a file, must possess VADMIN
1695 	 * for that file.
1696 	 */
1697 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1698 		return (error);
1699 
1700 	/*
1701 	 * Privileged processes may set the sticky bit on non-directories,
1702 	 * as well as set the setgid bit on a file with a group that the
1703 	 * process is not a member of.
1704 	 */
1705 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
1706 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE))
1707 			return (EFTYPE);
1708 	}
1709 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
1710 		error = priv_check_cred(cred, PRIV_VFS_SETGID);
1711 		if (error)
1712 			return (error);
1713 	}
1714 
1715 	newmode = node->tn_mode & ~ALLPERMS;
1716 	newmode |= mode & ALLPERMS;
1717 	atomic_store_short(&node->tn_mode, newmode);
1718 
1719 	node->tn_status |= TMPFS_NODE_CHANGED;
1720 
1721 	ASSERT_VOP_ELOCKED(vp, "chmod2");
1722 
1723 	return (0);
1724 }
1725 
1726 /*
1727  * Change ownership of the given vnode.  At least one of uid or gid must
1728  * be different than VNOVAL.  If one is set to that value, the attribute
1729  * is unchanged.
1730  * Caller should execute tmpfs_update on vp after a successful execution.
1731  * The vnode must be locked on entry and remain locked on exit.
1732  */
1733 int
1734 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
1735     struct thread *p)
1736 {
1737 	int error;
1738 	struct tmpfs_node *node;
1739 	uid_t ouid;
1740 	gid_t ogid;
1741 	mode_t newmode;
1742 
1743 	ASSERT_VOP_ELOCKED(vp, "chown");
1744 	ASSERT_VOP_IN_SEQC(vp);
1745 
1746 	node = VP_TO_TMPFS_NODE(vp);
1747 
1748 	/* Assign default values if they are unknown. */
1749 	MPASS(uid != VNOVAL || gid != VNOVAL);
1750 	if (uid == VNOVAL)
1751 		uid = node->tn_uid;
1752 	if (gid == VNOVAL)
1753 		gid = node->tn_gid;
1754 	MPASS(uid != VNOVAL && gid != VNOVAL);
1755 
1756 	/* Disallow this operation if the file system is mounted read-only. */
1757 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1758 		return (EROFS);
1759 
1760 	/* Immutable or append-only files cannot be modified, either. */
1761 	if (node->tn_flags & (IMMUTABLE | APPEND))
1762 		return (EPERM);
1763 
1764 	/*
1765 	 * To modify the ownership of a file, must possess VADMIN for that
1766 	 * file.
1767 	 */
1768 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1769 		return (error);
1770 
1771 	/*
1772 	 * To change the owner of a file, or change the group of a file to a
1773 	 * group of which we are not a member, the caller must have
1774 	 * privilege.
1775 	 */
1776 	if ((uid != node->tn_uid ||
1777 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1778 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN)))
1779 		return (error);
1780 
1781 	ogid = node->tn_gid;
1782 	ouid = node->tn_uid;
1783 
1784 	node->tn_uid = uid;
1785 	node->tn_gid = gid;
1786 
1787 	node->tn_status |= TMPFS_NODE_CHANGED;
1788 
1789 	if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
1790 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
1791 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
1792 			atomic_store_short(&node->tn_mode, newmode);
1793 		}
1794 	}
1795 
1796 	ASSERT_VOP_ELOCKED(vp, "chown2");
1797 
1798 	return (0);
1799 }
1800 
1801 /*
1802  * Change size of the given vnode.
1803  * Caller should execute tmpfs_update on vp after a successful execution.
1804  * The vnode must be locked on entry and remain locked on exit.
1805  */
1806 int
1807 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
1808     struct thread *p)
1809 {
1810 	int error;
1811 	struct tmpfs_node *node;
1812 
1813 	ASSERT_VOP_ELOCKED(vp, "chsize");
1814 
1815 	node = VP_TO_TMPFS_NODE(vp);
1816 
1817 	/* Decide whether this is a valid operation based on the file type. */
1818 	error = 0;
1819 	switch (vp->v_type) {
1820 	case VDIR:
1821 		return (EISDIR);
1822 
1823 	case VREG:
1824 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1825 			return (EROFS);
1826 		break;
1827 
1828 	case VBLK:
1829 		/* FALLTHROUGH */
1830 	case VCHR:
1831 		/* FALLTHROUGH */
1832 	case VFIFO:
1833 		/*
1834 		 * Allow modifications of special files even if in the file
1835 		 * system is mounted read-only (we are not modifying the
1836 		 * files themselves, but the objects they represent).
1837 		 */
1838 		return (0);
1839 
1840 	default:
1841 		/* Anything else is unsupported. */
1842 		return (EOPNOTSUPP);
1843 	}
1844 
1845 	/* Immutable or append-only files cannot be modified, either. */
1846 	if (node->tn_flags & (IMMUTABLE | APPEND))
1847 		return (EPERM);
1848 
1849 	error = tmpfs_truncate(vp, size);
1850 	/*
1851 	 * tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1852 	 * for us, as will update tn_status; no need to do that here.
1853 	 */
1854 
1855 	ASSERT_VOP_ELOCKED(vp, "chsize2");
1856 
1857 	return (error);
1858 }
1859 
1860 /*
1861  * Change access and modification times of the given vnode.
1862  * Caller should execute tmpfs_update on vp after a successful execution.
1863  * The vnode must be locked on entry and remain locked on exit.
1864  */
1865 int
1866 tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
1867     struct ucred *cred, struct thread *l)
1868 {
1869 	int error;
1870 	struct tmpfs_node *node;
1871 
1872 	ASSERT_VOP_ELOCKED(vp, "chtimes");
1873 
1874 	node = VP_TO_TMPFS_NODE(vp);
1875 
1876 	/* Disallow this operation if the file system is mounted read-only. */
1877 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1878 		return (EROFS);
1879 
1880 	/* Immutable or append-only files cannot be modified, either. */
1881 	if (node->tn_flags & (IMMUTABLE | APPEND))
1882 		return (EPERM);
1883 
1884 	error = vn_utimes_perm(vp, vap, cred, l);
1885 	if (error != 0)
1886 		return (error);
1887 
1888 	if (vap->va_atime.tv_sec != VNOVAL)
1889 		node->tn_accessed = true;
1890 
1891 	if (vap->va_mtime.tv_sec != VNOVAL)
1892 		node->tn_status |= TMPFS_NODE_MODIFIED;
1893 
1894 	if (vap->va_birthtime.tv_sec != VNOVAL)
1895 		node->tn_status |= TMPFS_NODE_MODIFIED;
1896 
1897 	tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime);
1898 
1899 	if (vap->va_birthtime.tv_sec != VNOVAL)
1900 		node->tn_birthtime = vap->va_birthtime;
1901 	ASSERT_VOP_ELOCKED(vp, "chtimes2");
1902 
1903 	return (0);
1904 }
1905 
1906 void
1907 tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
1908 {
1909 
1910 	if ((node->tn_status & status) == status || tm->tm_ronly)
1911 		return;
1912 	TMPFS_NODE_LOCK(node);
1913 	node->tn_status |= status;
1914 	TMPFS_NODE_UNLOCK(node);
1915 }
1916 
1917 void
1918 tmpfs_set_accessed(struct tmpfs_mount *tm, struct tmpfs_node *node)
1919 {
1920 	if (node->tn_accessed || tm->tm_ronly)
1921 		return;
1922 	atomic_store_8(&node->tn_accessed, true);
1923 }
1924 
1925 /* Sync timestamps */
1926 void
1927 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1928     const struct timespec *mod)
1929 {
1930 	struct tmpfs_node *node;
1931 	struct timespec now;
1932 
1933 	ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
1934 	node = VP_TO_TMPFS_NODE(vp);
1935 
1936 	if (!node->tn_accessed &&
1937 	    (node->tn_status & (TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED)) == 0)
1938 		return;
1939 
1940 	vfs_timestamp(&now);
1941 	TMPFS_NODE_LOCK(node);
1942 	if (node->tn_accessed) {
1943 		if (acc == NULL)
1944 			 acc = &now;
1945 		node->tn_atime = *acc;
1946 	}
1947 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
1948 		if (mod == NULL)
1949 			mod = &now;
1950 		node->tn_mtime = *mod;
1951 	}
1952 	if (node->tn_status & TMPFS_NODE_CHANGED)
1953 		node->tn_ctime = now;
1954 	node->tn_status &= ~(TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1955 	node->tn_accessed = false;
1956 	TMPFS_NODE_UNLOCK(node);
1957 
1958 	/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
1959 	random_harvest_queue(node, sizeof(*node), RANDOM_FS_ATIME);
1960 }
1961 
1962 int
1963 tmpfs_truncate(struct vnode *vp, off_t length)
1964 {
1965 	int error;
1966 	struct tmpfs_node *node;
1967 
1968 	node = VP_TO_TMPFS_NODE(vp);
1969 
1970 	if (length < 0) {
1971 		error = EINVAL;
1972 		goto out;
1973 	}
1974 
1975 	if (node->tn_size == length) {
1976 		error = 0;
1977 		goto out;
1978 	}
1979 
1980 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1981 		return (EFBIG);
1982 
1983 	error = tmpfs_reg_resize(vp, length, FALSE);
1984 	if (error == 0)
1985 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1986 
1987 out:
1988 	tmpfs_update(vp);
1989 
1990 	return (error);
1991 }
1992 
1993 static __inline int
1994 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
1995 {
1996 	if (a->td_hash > b->td_hash)
1997 		return (1);
1998 	else if (a->td_hash < b->td_hash)
1999 		return (-1);
2000 	return (0);
2001 }
2002 
2003 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
2004