xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision 16fa3dcba027d13dcda9ee78e6057e3e5a79f80c)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system supporting functions.
37  */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/lock.h>
46 #include <sys/limits.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/random.h>
52 #include <sys/refcount.h>
53 #include <sys/rwlock.h>
54 #include <sys/smr.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/user.h>
58 #include <sys/vnode.h>
59 #include <sys/vmmeter.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vm_extern.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs.h>
71 #include <fs/tmpfs/tmpfs_fifoops.h>
72 #include <fs/tmpfs/tmpfs_vnops.h>
73 
74 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75     "tmpfs file system");
76 
77 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
78 
79 MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
80 static uma_zone_t tmpfs_node_pool;
81 VFS_SMR_DECLARE;
82 
83 int tmpfs_pager_type = -1;
84 
85 static vm_object_t
86 tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
87     vm_ooffset_t offset, struct ucred *cred)
88 {
89 	vm_object_t object;
90 
91 	MPASS(handle == NULL);
92 	MPASS(offset == 0);
93 	object = vm_object_allocate_dyn(tmpfs_pager_type, size,
94 	    OBJ_COLORED | OBJ_SWAP);
95 	if (!swap_pager_init_object(object, NULL, NULL, size, 0)) {
96 		vm_object_deallocate(object);
97 		object = NULL;
98 	}
99 	return (object);
100 }
101 
102 /*
103  * Make sure tmpfs vnodes with writable mappings can be found on the lazy list.
104  *
105  * This allows for periodic mtime updates while only scanning vnodes which are
106  * plausibly dirty, see tmpfs_update_mtime_lazy.
107  */
108 static void
109 tmpfs_pager_writecount_recalc(vm_object_t object, vm_offset_t old,
110     vm_offset_t new)
111 {
112 	struct vnode *vp;
113 
114 	VM_OBJECT_ASSERT_WLOCKED(object);
115 
116 	vp = object->un_pager.swp.swp_tmpfs;
117 
118 	/*
119 	 * Forced unmount?
120 	 */
121 	if (vp == NULL) {
122 		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
123 		    ("object %p with OBJ_TMPFS_VREF but without vnode", object));
124 		VM_OBJECT_WUNLOCK(object);
125 		return;
126 	}
127 
128 	if (old == 0) {
129 		VNASSERT((object->flags & OBJ_TMPFS_VREF) == 0, vp,
130 		    ("object without writable mappings has a reference"));
131 		VNPASS(vp->v_usecount > 0, vp);
132 	} else {
133 		VNASSERT((object->flags & OBJ_TMPFS_VREF) != 0, vp,
134 		    ("object with writable mappings does not have a reference"));
135 	}
136 
137 	if (old == new) {
138 		VM_OBJECT_WUNLOCK(object);
139 		return;
140 	}
141 
142 	if (new == 0) {
143 		vm_object_clear_flag(object, OBJ_TMPFS_VREF);
144 		VM_OBJECT_WUNLOCK(object);
145 		vrele(vp);
146 	} else {
147 		if ((object->flags & OBJ_TMPFS_VREF) == 0) {
148 			vref(vp);
149 			vlazy(vp);
150 			vm_object_set_flag(object, OBJ_TMPFS_VREF);
151 		}
152 		VM_OBJECT_WUNLOCK(object);
153 	}
154 }
155 
156 static void
157 tmpfs_pager_update_writecount(vm_object_t object, vm_offset_t start,
158     vm_offset_t end)
159 {
160 	vm_offset_t new, old;
161 
162 	VM_OBJECT_WLOCK(object);
163 	KASSERT((object->flags & OBJ_ANON) == 0,
164 	    ("%s: object %p with OBJ_ANON", __func__, object));
165 	old = object->un_pager.swp.writemappings;
166 	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
167 	new = object->un_pager.swp.writemappings;
168 	tmpfs_pager_writecount_recalc(object, old, new);
169 	VM_OBJECT_ASSERT_UNLOCKED(object);
170 }
171 
172 static void
173 tmpfs_pager_release_writecount(vm_object_t object, vm_offset_t start,
174     vm_offset_t end)
175 {
176 	vm_offset_t new, old;
177 
178 	VM_OBJECT_WLOCK(object);
179 	KASSERT((object->flags & OBJ_ANON) == 0,
180 	    ("%s: object %p with OBJ_ANON", __func__, object));
181 	old = object->un_pager.swp.writemappings;
182 	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
183 	new = object->un_pager.swp.writemappings;
184 	tmpfs_pager_writecount_recalc(object, old, new);
185 	VM_OBJECT_ASSERT_UNLOCKED(object);
186 }
187 
188 static void
189 tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
190 {
191 	struct vnode *vp;
192 
193 	/*
194 	 * Tmpfs VREG node, which was reclaimed, has tmpfs_pager_type
195 	 * type, but not OBJ_TMPFS flag.  In this case there is no
196 	 * v_writecount to adjust.
197 	 */
198 	if (vp_heldp != NULL)
199 		VM_OBJECT_RLOCK(object);
200 	else
201 		VM_OBJECT_ASSERT_LOCKED(object);
202 	if ((object->flags & OBJ_TMPFS) != 0) {
203 		vp = object->un_pager.swp.swp_tmpfs;
204 		if (vp != NULL) {
205 			*vpp = vp;
206 			if (vp_heldp != NULL) {
207 				vhold(vp);
208 				*vp_heldp = true;
209 			}
210 		}
211 	}
212 	if (vp_heldp != NULL)
213 		VM_OBJECT_RUNLOCK(object);
214 }
215 
216 struct pagerops tmpfs_pager_ops = {
217 	.pgo_kvme_type = KVME_TYPE_VNODE,
218 	.pgo_alloc = tmpfs_pager_alloc,
219 	.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
220 	.pgo_update_writecount = tmpfs_pager_update_writecount,
221 	.pgo_release_writecount = tmpfs_pager_release_writecount,
222 	.pgo_mightbedirty = vm_object_mightbedirty_,
223 	.pgo_getvp = tmpfs_pager_getvp,
224 };
225 
226 static int
227 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
228 {
229 	struct tmpfs_node *node;
230 
231 	node = mem;
232 	node->tn_gen++;
233 	node->tn_size = 0;
234 	node->tn_status = 0;
235 	node->tn_accessed = false;
236 	node->tn_flags = 0;
237 	node->tn_links = 0;
238 	node->tn_vnode = NULL;
239 	node->tn_vpstate = 0;
240 	return (0);
241 }
242 
243 static void
244 tmpfs_node_dtor(void *mem, int size, void *arg)
245 {
246 	struct tmpfs_node *node;
247 
248 	node = mem;
249 	node->tn_type = VNON;
250 }
251 
252 static int
253 tmpfs_node_init(void *mem, int size, int flags)
254 {
255 	struct tmpfs_node *node;
256 
257 	node = mem;
258 	node->tn_id = 0;
259 	mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
260 	node->tn_gen = arc4random();
261 	return (0);
262 }
263 
264 static void
265 tmpfs_node_fini(void *mem, int size)
266 {
267 	struct tmpfs_node *node;
268 
269 	node = mem;
270 	mtx_destroy(&node->tn_interlock);
271 }
272 
273 int
274 tmpfs_subr_init(void)
275 {
276 	tmpfs_pager_type = vm_pager_alloc_dyn_type(&tmpfs_pager_ops,
277 	    OBJT_SWAP);
278 	if (tmpfs_pager_type == -1)
279 		return (EINVAL);
280 	tmpfs_node_pool = uma_zcreate("TMPFS node",
281 	    sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
282 	    tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
283 	VFS_SMR_ZONE_SET(tmpfs_node_pool);
284 	return (0);
285 }
286 
287 void
288 tmpfs_subr_uninit(void)
289 {
290 	if (tmpfs_pager_type != -1)
291 		vm_pager_free_dyn_type(tmpfs_pager_type);
292 	tmpfs_pager_type = -1;
293 	uma_zdestroy(tmpfs_node_pool);
294 }
295 
296 static int
297 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
298 {
299 	int error;
300 	long pages, bytes;
301 
302 	pages = *(long *)arg1;
303 	bytes = pages * PAGE_SIZE;
304 
305 	error = sysctl_handle_long(oidp, &bytes, 0, req);
306 	if (error || !req->newptr)
307 		return (error);
308 
309 	pages = bytes / PAGE_SIZE;
310 	if (pages < TMPFS_PAGES_MINRESERVED)
311 		return (EINVAL);
312 
313 	*(long *)arg1 = pages;
314 	return (0);
315 }
316 
317 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved,
318     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &tmpfs_pages_reserved, 0,
319     sysctl_mem_reserved, "L",
320     "Amount of available memory and swap below which tmpfs growth stops");
321 
322 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a,
323     struct tmpfs_dirent *b);
324 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
325 
326 size_t
327 tmpfs_mem_avail(void)
328 {
329 	size_t avail;
330 	long reserved;
331 
332 	avail = swap_pager_avail + vm_free_count();
333 	reserved = atomic_load_long(&tmpfs_pages_reserved);
334 	if (__predict_false(avail < reserved))
335 		return (0);
336 	return (avail - reserved);
337 }
338 
339 size_t
340 tmpfs_pages_used(struct tmpfs_mount *tmp)
341 {
342 	const size_t node_size = sizeof(struct tmpfs_node) +
343 	    sizeof(struct tmpfs_dirent);
344 	size_t meta_pages;
345 
346 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
347 	    PAGE_SIZE);
348 	return (meta_pages + tmp->tm_pages_used);
349 }
350 
351 static size_t
352 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
353 {
354 	if (tmpfs_mem_avail() < req_pages)
355 		return (0);
356 
357 	if (tmp->tm_pages_max != ULONG_MAX &&
358 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
359 			return (0);
360 
361 	return (1);
362 }
363 
364 void
365 tmpfs_ref_node(struct tmpfs_node *node)
366 {
367 #ifdef INVARIANTS
368 	u_int old;
369 
370 	old =
371 #endif
372 	refcount_acquire(&node->tn_refcount);
373 #ifdef INVARIANTS
374 	KASSERT(old > 0, ("node %p zero refcount", node));
375 #endif
376 }
377 
378 /*
379  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
380  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
381  * using the credentials of the process 'p'.
382  *
383  * If the node type is set to 'VDIR', then the parent parameter must point
384  * to the parent directory of the node being created.  It may only be NULL
385  * while allocating the root node.
386  *
387  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
388  * specifies the device the node represents.
389  *
390  * If the node type is set to 'VLNK', then the parameter target specifies
391  * the file name of the target file for the symbolic link that is being
392  * created.
393  *
394  * Note that new nodes are retrieved from the available list if it has
395  * items or, if it is empty, from the node pool as long as there is enough
396  * space to create them.
397  *
398  * Returns zero on success or an appropriate error code on failure.
399  */
400 int
401 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
402     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
403     const char *target, dev_t rdev, struct tmpfs_node **node)
404 {
405 	struct tmpfs_node *nnode;
406 	vm_object_t obj;
407 	char *symlink;
408 	char symlink_smr;
409 
410 	/* If the root directory of the 'tmp' file system is not yet
411 	 * allocated, this must be the request to do it. */
412 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
413 
414 	MPASS(IFF(type == VLNK, target != NULL));
415 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
416 
417 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
418 		return (ENOSPC);
419 	if (tmpfs_pages_check_avail(tmp, 1) == 0)
420 		return (ENOSPC);
421 
422 	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
423 		/*
424 		 * When a new tmpfs node is created for fully
425 		 * constructed mount point, there must be a parent
426 		 * node, which vnode is locked exclusively.  As
427 		 * consequence, if the unmount is executing in
428 		 * parallel, vflush() cannot reclaim the parent vnode.
429 		 * Due to this, the check for MNTK_UNMOUNT flag is not
430 		 * racy: if we did not see MNTK_UNMOUNT flag, then tmp
431 		 * cannot be destroyed until node construction is
432 		 * finished and the parent vnode unlocked.
433 		 *
434 		 * Tmpfs does not need to instantiate new nodes during
435 		 * unmount.
436 		 */
437 		return (EBUSY);
438 	}
439 	if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
440 		return (EROFS);
441 
442 	nnode = uma_zalloc_smr(tmpfs_node_pool, M_WAITOK);
443 
444 	/* Generic initialization. */
445 	nnode->tn_type = type;
446 	vfs_timestamp(&nnode->tn_atime);
447 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
448 	    nnode->tn_atime;
449 	nnode->tn_uid = uid;
450 	nnode->tn_gid = gid;
451 	nnode->tn_mode = mode;
452 	nnode->tn_id = alloc_unr64(&tmp->tm_ino_unr);
453 	nnode->tn_refcount = 1;
454 
455 	/* Type-specific initialization. */
456 	switch (nnode->tn_type) {
457 	case VBLK:
458 	case VCHR:
459 		nnode->tn_rdev = rdev;
460 		break;
461 
462 	case VDIR:
463 		RB_INIT(&nnode->tn_dir.tn_dirhead);
464 		LIST_INIT(&nnode->tn_dir.tn_dupindex);
465 		MPASS(parent != nnode);
466 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
467 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
468 		nnode->tn_dir.tn_readdir_lastn = 0;
469 		nnode->tn_dir.tn_readdir_lastp = NULL;
470 		nnode->tn_links++;
471 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
472 		nnode->tn_dir.tn_parent->tn_links++;
473 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
474 		break;
475 
476 	case VFIFO:
477 		/* FALLTHROUGH */
478 	case VSOCK:
479 		break;
480 
481 	case VLNK:
482 		MPASS(strlen(target) < MAXPATHLEN);
483 		nnode->tn_size = strlen(target);
484 
485 		symlink = NULL;
486 		if (!tmp->tm_nonc) {
487 			symlink = cache_symlink_alloc(nnode->tn_size + 1, M_WAITOK);
488 			symlink_smr = true;
489 		}
490 		if (symlink == NULL) {
491 			symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME, M_WAITOK);
492 			symlink_smr = false;
493 		}
494 		memcpy(symlink, target, nnode->tn_size + 1);
495 
496 		/*
497 		 * Allow safe symlink resolving for lockless lookup.
498 		 * tmpfs_fplookup_symlink references this comment.
499 		 *
500 		 * 1. nnode is not yet visible to the world
501 		 * 2. both tn_link_target and tn_link_smr get populated
502 		 * 3. release fence publishes their content
503 		 * 4. tn_link_target content is immutable until node destruction,
504 		 *    where the pointer gets set to NULL
505 		 * 5. tn_link_smr is never changed once set
506 		 *
507 		 * As a result it is sufficient to issue load consume on the node
508 		 * pointer to also get the above content in a stable manner.
509 		 * Worst case tn_link_smr flag may be set to true despite being stale,
510 		 * while the target buffer is already cleared out.
511 		 */
512 		atomic_store_ptr(&nnode->tn_link_target, symlink);
513 		atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
514 		atomic_thread_fence_rel();
515 		break;
516 
517 	case VREG:
518 		obj = nnode->tn_reg.tn_aobj =
519 		    vm_pager_allocate(tmpfs_pager_type, NULL, 0,
520 			VM_PROT_DEFAULT, 0,
521 			NULL /* XXXKIB - tmpfs needs swap reservation */);
522 		/* OBJ_TMPFS is set together with the setting of vp->v_object */
523 		nnode->tn_reg.tn_tmp = tmp;
524 		break;
525 
526 	default:
527 		panic("tmpfs_alloc_node: type %p %d", nnode,
528 		    (int)nnode->tn_type);
529 	}
530 
531 	TMPFS_LOCK(tmp);
532 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
533 	nnode->tn_attached = true;
534 	tmp->tm_nodes_inuse++;
535 	tmp->tm_refcount++;
536 	TMPFS_UNLOCK(tmp);
537 
538 	*node = nnode;
539 	return (0);
540 }
541 
542 /*
543  * Destroys the node pointed to by node from the file system 'tmp'.
544  * If the node references a directory, no entries are allowed.
545  */
546 void
547 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
548 {
549 	if (refcount_release_if_not_last(&node->tn_refcount))
550 		return;
551 
552 	TMPFS_LOCK(tmp);
553 	TMPFS_NODE_LOCK(node);
554 	if (!tmpfs_free_node_locked(tmp, node, false)) {
555 		TMPFS_NODE_UNLOCK(node);
556 		TMPFS_UNLOCK(tmp);
557 	}
558 }
559 
560 bool
561 tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
562     bool detach)
563 {
564 	vm_object_t uobj;
565 	char *symlink;
566 	bool last;
567 
568 	TMPFS_MP_ASSERT_LOCKED(tmp);
569 	TMPFS_NODE_ASSERT_LOCKED(node);
570 
571 	last = refcount_release(&node->tn_refcount);
572 	if (node->tn_attached && (detach || last)) {
573 		MPASS(tmp->tm_nodes_inuse > 0);
574 		tmp->tm_nodes_inuse--;
575 		LIST_REMOVE(node, tn_entries);
576 		node->tn_attached = false;
577 	}
578 	if (!last)
579 		return (false);
580 
581 #ifdef INVARIANTS
582 	MPASS(node->tn_vnode == NULL);
583 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
584 #endif
585 	TMPFS_NODE_UNLOCK(node);
586 	TMPFS_UNLOCK(tmp);
587 
588 	switch (node->tn_type) {
589 	case VBLK:
590 		/* FALLTHROUGH */
591 	case VCHR:
592 		/* FALLTHROUGH */
593 	case VDIR:
594 		/* FALLTHROUGH */
595 	case VFIFO:
596 		/* FALLTHROUGH */
597 	case VSOCK:
598 		break;
599 
600 	case VLNK:
601 		symlink = node->tn_link_target;
602 		atomic_store_ptr(&node->tn_link_target, NULL);
603 		if (atomic_load_char(&node->tn_link_smr)) {
604 			cache_symlink_free(symlink, node->tn_size + 1);
605 		} else {
606 			free(symlink, M_TMPFSNAME);
607 		}
608 		break;
609 
610 	case VREG:
611 		uobj = node->tn_reg.tn_aobj;
612 		if (uobj != NULL) {
613 			if (uobj->size != 0)
614 				atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
615 			KASSERT((uobj->flags & OBJ_TMPFS) == 0,
616 			    ("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
617 			vm_object_deallocate(uobj);
618 		}
619 		break;
620 
621 	default:
622 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
623 	}
624 
625 	uma_zfree_smr(tmpfs_node_pool, node);
626 	TMPFS_LOCK(tmp);
627 	tmpfs_free_tmp(tmp);
628 	return (true);
629 }
630 
631 static __inline uint32_t
632 tmpfs_dirent_hash(const char *name, u_int len)
633 {
634 	uint32_t hash;
635 
636 	hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK;
637 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP
638 	hash &= 0xf;
639 #endif
640 	if (hash < TMPFS_DIRCOOKIE_MIN)
641 		hash += TMPFS_DIRCOOKIE_MIN;
642 
643 	return (hash);
644 }
645 
646 static __inline off_t
647 tmpfs_dirent_cookie(struct tmpfs_dirent *de)
648 {
649 	if (de == NULL)
650 		return (TMPFS_DIRCOOKIE_EOF);
651 
652 	MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN);
653 
654 	return (de->td_cookie);
655 }
656 
657 static __inline boolean_t
658 tmpfs_dirent_dup(struct tmpfs_dirent *de)
659 {
660 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0);
661 }
662 
663 static __inline boolean_t
664 tmpfs_dirent_duphead(struct tmpfs_dirent *de)
665 {
666 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0);
667 }
668 
669 void
670 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen)
671 {
672 	de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen);
673 	memcpy(de->ud.td_name, name, namelen);
674 	de->td_namelen = namelen;
675 }
676 
677 /*
678  * Allocates a new directory entry for the node node with a name of name.
679  * The new directory entry is returned in *de.
680  *
681  * The link count of node is increased by one to reflect the new object
682  * referencing it.
683  *
684  * Returns zero on success or an appropriate error code on failure.
685  */
686 int
687 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
688     const char *name, u_int len, struct tmpfs_dirent **de)
689 {
690 	struct tmpfs_dirent *nde;
691 
692 	nde = malloc(sizeof(*nde), M_TMPFSDIR, M_WAITOK);
693 	nde->td_node = node;
694 	if (name != NULL) {
695 		nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
696 		tmpfs_dirent_init(nde, name, len);
697 	} else
698 		nde->td_namelen = 0;
699 	if (node != NULL)
700 		node->tn_links++;
701 
702 	*de = nde;
703 
704 	return 0;
705 }
706 
707 /*
708  * Frees a directory entry.  It is the caller's responsibility to destroy
709  * the node referenced by it if needed.
710  *
711  * The link count of node is decreased by one to reflect the removal of an
712  * object that referenced it.  This only happens if 'node_exists' is true;
713  * otherwise the function will not access the node referred to by the
714  * directory entry, as it may already have been released from the outside.
715  */
716 void
717 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
718 {
719 	struct tmpfs_node *node;
720 
721 	node = de->td_node;
722 	if (node != NULL) {
723 		MPASS(node->tn_links > 0);
724 		node->tn_links--;
725 	}
726 	if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
727 		free(de->ud.td_name, M_TMPFSNAME);
728 	free(de, M_TMPFSDIR);
729 }
730 
731 void
732 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
733 {
734 	bool want_vrele;
735 
736 	ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
737 	if (vp->v_type != VREG || obj == NULL)
738 		return;
739 
740 	VM_OBJECT_WLOCK(obj);
741 	VI_LOCK(vp);
742 	/*
743 	 * May be going through forced unmount.
744 	 */
745 	want_vrele = false;
746 	if ((obj->flags & OBJ_TMPFS_VREF) != 0) {
747 		vm_object_clear_flag(obj, OBJ_TMPFS_VREF);
748 		want_vrele = true;
749 	}
750 
751 	vm_object_clear_flag(obj, OBJ_TMPFS);
752 	obj->un_pager.swp.swp_tmpfs = NULL;
753 	if (vp->v_writecount < 0)
754 		vp->v_writecount = 0;
755 	VI_UNLOCK(vp);
756 	VM_OBJECT_WUNLOCK(obj);
757 	if (want_vrele) {
758 		vrele(vp);
759 	}
760 }
761 
762 /*
763  * Need to clear v_object for insmntque failure.
764  */
765 static void
766 tmpfs_insmntque_dtr(struct vnode *vp, void *dtr_arg)
767 {
768 
769 	tmpfs_destroy_vobject(vp, vp->v_object);
770 	vp->v_object = NULL;
771 	vp->v_data = NULL;
772 	vp->v_op = &dead_vnodeops;
773 	vgone(vp);
774 	vput(vp);
775 }
776 
777 /*
778  * Allocates a new vnode for the node node or returns a new reference to
779  * an existing one if the node had already a vnode referencing it.  The
780  * resulting locked vnode is returned in *vpp.
781  *
782  * Returns zero on success or an appropriate error code on failure.
783  */
784 int
785 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
786     struct vnode **vpp)
787 {
788 	struct vnode *vp;
789 	enum vgetstate vs;
790 	struct tmpfs_mount *tm;
791 	vm_object_t object;
792 	int error;
793 
794 	error = 0;
795 	tm = VFS_TO_TMPFS(mp);
796 	TMPFS_NODE_LOCK(node);
797 	tmpfs_ref_node(node);
798 loop:
799 	TMPFS_NODE_ASSERT_LOCKED(node);
800 	if ((vp = node->tn_vnode) != NULL) {
801 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
802 		if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
803 		    (VN_IS_DOOMED(vp) &&
804 		     (lkflag & LK_NOWAIT) != 0)) {
805 			TMPFS_NODE_UNLOCK(node);
806 			error = ENOENT;
807 			vp = NULL;
808 			goto out;
809 		}
810 		if (VN_IS_DOOMED(vp)) {
811 			node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
812 			while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {
813 				msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
814 				    0, "tmpfsE", 0);
815 			}
816 			goto loop;
817 		}
818 		vs = vget_prep(vp);
819 		TMPFS_NODE_UNLOCK(node);
820 		error = vget_finish(vp, lkflag, vs);
821 		if (error == ENOENT) {
822 			TMPFS_NODE_LOCK(node);
823 			goto loop;
824 		}
825 		if (error != 0) {
826 			vp = NULL;
827 			goto out;
828 		}
829 
830 		/*
831 		 * Make sure the vnode is still there after
832 		 * getting the interlock to avoid racing a free.
833 		 */
834 		if (node->tn_vnode == NULL || node->tn_vnode != vp) {
835 			vput(vp);
836 			TMPFS_NODE_LOCK(node);
837 			goto loop;
838 		}
839 
840 		goto out;
841 	}
842 
843 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
844 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
845 		TMPFS_NODE_UNLOCK(node);
846 		error = ENOENT;
847 		vp = NULL;
848 		goto out;
849 	}
850 
851 	/*
852 	 * otherwise lock the vp list while we call getnewvnode
853 	 * since that can block.
854 	 */
855 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
856 		node->tn_vpstate |= TMPFS_VNODE_WANT;
857 		error = msleep((caddr_t) &node->tn_vpstate,
858 		    TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
859 		if (error != 0)
860 			goto out;
861 		goto loop;
862 	} else
863 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
864 
865 	TMPFS_NODE_UNLOCK(node);
866 
867 	/* Get a new vnode and associate it with our node. */
868 	error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
869 	    &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
870 	if (error != 0)
871 		goto unlock;
872 	MPASS(vp != NULL);
873 
874 	/* lkflag is ignored, the lock is exclusive */
875 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
876 
877 	vp->v_data = node;
878 	vp->v_type = node->tn_type;
879 
880 	/* Type-specific initialization. */
881 	switch (node->tn_type) {
882 	case VBLK:
883 		/* FALLTHROUGH */
884 	case VCHR:
885 		/* FALLTHROUGH */
886 	case VLNK:
887 		/* FALLTHROUGH */
888 	case VSOCK:
889 		break;
890 	case VFIFO:
891 		vp->v_op = &tmpfs_fifoop_entries;
892 		break;
893 	case VREG:
894 		object = node->tn_reg.tn_aobj;
895 		VM_OBJECT_WLOCK(object);
896 		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
897 		    ("%s: object %p with OBJ_TMPFS_VREF but without vnode",
898 		    __func__, object));
899 		KASSERT(object->un_pager.swp.writemappings == 0,
900 		    ("%s: object %p has writemappings",
901 		    __func__, object));
902 		VI_LOCK(vp);
903 		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
904 		vp->v_object = object;
905 		object->un_pager.swp.swp_tmpfs = vp;
906 		vm_object_set_flag(object, OBJ_TMPFS);
907 		vn_irflag_set_locked(vp, VIRF_PGREAD);
908 		VI_UNLOCK(vp);
909 		VM_OBJECT_WUNLOCK(object);
910 		break;
911 	case VDIR:
912 		MPASS(node->tn_dir.tn_parent != NULL);
913 		if (node->tn_dir.tn_parent == node)
914 			vp->v_vflag |= VV_ROOT;
915 		break;
916 
917 	default:
918 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
919 	}
920 	if (vp->v_type != VFIFO)
921 		VN_LOCK_ASHARE(vp);
922 
923 	error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
924 	if (error != 0)
925 		vp = NULL;
926 
927 unlock:
928 	TMPFS_NODE_LOCK(node);
929 
930 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
931 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
932 	node->tn_vnode = vp;
933 
934 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
935 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
936 		TMPFS_NODE_UNLOCK(node);
937 		wakeup((caddr_t) &node->tn_vpstate);
938 	} else
939 		TMPFS_NODE_UNLOCK(node);
940 
941 out:
942 	if (error == 0) {
943 		*vpp = vp;
944 
945 #ifdef INVARIANTS
946 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
947 		TMPFS_NODE_LOCK(node);
948 		MPASS(*vpp == node->tn_vnode);
949 		TMPFS_NODE_UNLOCK(node);
950 #endif
951 	}
952 	tmpfs_free_node(tm, node);
953 
954 	return (error);
955 }
956 
957 /*
958  * Destroys the association between the vnode vp and the node it
959  * references.
960  */
961 void
962 tmpfs_free_vp(struct vnode *vp)
963 {
964 	struct tmpfs_node *node;
965 
966 	node = VP_TO_TMPFS_NODE(vp);
967 
968 	TMPFS_NODE_ASSERT_LOCKED(node);
969 	node->tn_vnode = NULL;
970 	if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0)
971 		wakeup(&node->tn_vnode);
972 	node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM;
973 	vp->v_data = NULL;
974 }
975 
976 /*
977  * Allocates a new file of type 'type' and adds it to the parent directory
978  * 'dvp'; this addition is done using the component name given in 'cnp'.
979  * The ownership of the new file is automatically assigned based on the
980  * credentials of the caller (through 'cnp'), the group is set based on
981  * the parent directory and the mode is determined from the 'vap' argument.
982  * If successful, *vpp holds a vnode to the newly created file and zero
983  * is returned.  Otherwise *vpp is NULL and the function returns an
984  * appropriate error code.
985  */
986 int
987 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
988     struct componentname *cnp, const char *target)
989 {
990 	int error;
991 	struct tmpfs_dirent *de;
992 	struct tmpfs_mount *tmp;
993 	struct tmpfs_node *dnode;
994 	struct tmpfs_node *node;
995 	struct tmpfs_node *parent;
996 
997 	ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
998 	MPASS(cnp->cn_flags & HASBUF);
999 
1000 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1001 	dnode = VP_TO_TMPFS_DIR(dvp);
1002 	*vpp = NULL;
1003 
1004 	/* If the entry we are creating is a directory, we cannot overflow
1005 	 * the number of links of its parent, because it will get a new
1006 	 * link. */
1007 	if (vap->va_type == VDIR) {
1008 		/* Ensure that we do not overflow the maximum number of links
1009 		 * imposed by the system. */
1010 		MPASS(dnode->tn_links <= TMPFS_LINK_MAX);
1011 		if (dnode->tn_links == TMPFS_LINK_MAX) {
1012 			return (EMLINK);
1013 		}
1014 
1015 		parent = dnode;
1016 		MPASS(parent != NULL);
1017 	} else
1018 		parent = NULL;
1019 
1020 	/* Allocate a node that represents the new file. */
1021 	error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
1022 	    cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
1023 	    target, vap->va_rdev, &node);
1024 	if (error != 0)
1025 		return (error);
1026 
1027 	/* Allocate a directory entry that points to the new file. */
1028 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
1029 	    &de);
1030 	if (error != 0) {
1031 		tmpfs_free_node(tmp, node);
1032 		return (error);
1033 	}
1034 
1035 	/* Allocate a vnode for the new file. */
1036 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
1037 	if (error != 0) {
1038 		tmpfs_free_dirent(tmp, de);
1039 		tmpfs_free_node(tmp, node);
1040 		return (error);
1041 	}
1042 
1043 	/* Now that all required items are allocated, we can proceed to
1044 	 * insert the new node into the directory, an operation that
1045 	 * cannot fail. */
1046 	if (cnp->cn_flags & ISWHITEOUT)
1047 		tmpfs_dir_whiteout_remove(dvp, cnp);
1048 	tmpfs_dir_attach(dvp, de);
1049 	return (0);
1050 }
1051 
1052 struct tmpfs_dirent *
1053 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
1054 {
1055 	struct tmpfs_dirent *de;
1056 
1057 	de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead);
1058 	dc->tdc_tree = de;
1059 	if (de != NULL && tmpfs_dirent_duphead(de))
1060 		de = LIST_FIRST(&de->ud.td_duphead);
1061 	dc->tdc_current = de;
1062 
1063 	return (dc->tdc_current);
1064 }
1065 
1066 struct tmpfs_dirent *
1067 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
1068 {
1069 	struct tmpfs_dirent *de;
1070 
1071 	MPASS(dc->tdc_tree != NULL);
1072 	if (tmpfs_dirent_dup(dc->tdc_current)) {
1073 		dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries);
1074 		if (dc->tdc_current != NULL)
1075 			return (dc->tdc_current);
1076 	}
1077 	dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
1078 	    &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
1079 	if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
1080 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
1081 		MPASS(dc->tdc_current != NULL);
1082 	}
1083 
1084 	return (dc->tdc_current);
1085 }
1086 
1087 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */
1088 static struct tmpfs_dirent *
1089 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash)
1090 {
1091 	struct tmpfs_dirent *de, dekey;
1092 
1093 	dekey.td_hash = hash;
1094 	de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey);
1095 	return (de);
1096 }
1097 
1098 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */
1099 static struct tmpfs_dirent *
1100 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
1101     struct tmpfs_dir_cursor *dc)
1102 {
1103 	struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead;
1104 	struct tmpfs_dirent *de, dekey;
1105 
1106 	MPASS(cookie >= TMPFS_DIRCOOKIE_MIN);
1107 
1108 	if (cookie == node->tn_dir.tn_readdir_lastn &&
1109 	    (de = node->tn_dir.tn_readdir_lastp) != NULL) {
1110 		/* Protect against possible race, tn_readdir_last[pn]
1111 		 * may be updated with only shared vnode lock held. */
1112 		if (cookie == tmpfs_dirent_cookie(de))
1113 			goto out;
1114 	}
1115 
1116 	if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) {
1117 		LIST_FOREACH(de, &node->tn_dir.tn_dupindex,
1118 		    uh.td_dup.index_entries) {
1119 			MPASS(tmpfs_dirent_dup(de));
1120 			if (de->td_cookie == cookie)
1121 				goto out;
1122 			/* dupindex list is sorted. */
1123 			if (de->td_cookie < cookie) {
1124 				de = NULL;
1125 				goto out;
1126 			}
1127 		}
1128 		MPASS(de == NULL);
1129 		goto out;
1130 	}
1131 
1132 	if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
1133 		de = NULL;
1134 	} else {
1135 		dekey.td_hash = cookie;
1136 		/* Recover if direntry for cookie was removed */
1137 		de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
1138 	}
1139 	dc->tdc_tree = de;
1140 	dc->tdc_current = de;
1141 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1142 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
1143 		MPASS(dc->tdc_current != NULL);
1144 	}
1145 	return (dc->tdc_current);
1146 
1147 out:
1148 	dc->tdc_tree = de;
1149 	dc->tdc_current = de;
1150 	if (de != NULL && tmpfs_dirent_dup(de))
1151 		dc->tdc_tree = tmpfs_dir_xlookup_hash(node,
1152 		    de->td_hash);
1153 	return (dc->tdc_current);
1154 }
1155 
1156 /*
1157  * Looks for a directory entry in the directory represented by node.
1158  * 'cnp' describes the name of the entry to look for.  Note that the .
1159  * and .. components are not allowed as they do not physically exist
1160  * within directories.
1161  *
1162  * Returns a pointer to the entry when found, otherwise NULL.
1163  */
1164 struct tmpfs_dirent *
1165 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
1166     struct componentname *cnp)
1167 {
1168 	struct tmpfs_dir_duphead *duphead;
1169 	struct tmpfs_dirent *de;
1170 	uint32_t hash;
1171 
1172 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
1173 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
1174 	    cnp->cn_nameptr[1] == '.')));
1175 	TMPFS_VALIDATE_DIR(node);
1176 
1177 	hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen);
1178 	de = tmpfs_dir_xlookup_hash(node, hash);
1179 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1180 		duphead = &de->ud.td_duphead;
1181 		LIST_FOREACH(de, duphead, uh.td_dup.entries) {
1182 			if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1183 			    cnp->cn_namelen))
1184 				break;
1185 		}
1186 	} else if (de != NULL) {
1187 		if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1188 		    cnp->cn_namelen))
1189 			de = NULL;
1190 	}
1191 	if (de != NULL && f != NULL && de->td_node != f)
1192 		de = NULL;
1193 
1194 	return (de);
1195 }
1196 
1197 /*
1198  * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex
1199  * list, allocate new cookie value.
1200  */
1201 static void
1202 tmpfs_dir_attach_dup(struct tmpfs_node *dnode,
1203     struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde)
1204 {
1205 	struct tmpfs_dir_duphead *dupindex;
1206 	struct tmpfs_dirent *de, *pde;
1207 
1208 	dupindex = &dnode->tn_dir.tn_dupindex;
1209 	de = LIST_FIRST(dupindex);
1210 	if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) {
1211 		if (de == NULL)
1212 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1213 		else
1214 			nde->td_cookie = de->td_cookie + 1;
1215 		MPASS(tmpfs_dirent_dup(nde));
1216 		LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries);
1217 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1218 		return;
1219 	}
1220 
1221 	/*
1222 	 * Cookie numbers are near exhaustion. Scan dupindex list for unused
1223 	 * numbers. dupindex list is sorted in descending order. Keep it so
1224 	 * after inserting nde.
1225 	 */
1226 	while (1) {
1227 		pde = de;
1228 		de = LIST_NEXT(de, uh.td_dup.index_entries);
1229 		if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) {
1230 			/*
1231 			 * Last element of the index doesn't have minimal cookie
1232 			 * value, use it.
1233 			 */
1234 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1235 			LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries);
1236 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1237 			return;
1238 		} else if (de == NULL) {
1239 			/*
1240 			 * We are so lucky have 2^30 hash duplicates in single
1241 			 * directory :) Return largest possible cookie value.
1242 			 * It should be fine except possible issues with
1243 			 * VOP_READDIR restart.
1244 			 */
1245 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX;
1246 			LIST_INSERT_HEAD(dupindex, nde,
1247 			    uh.td_dup.index_entries);
1248 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1249 			return;
1250 		}
1251 		if (de->td_cookie + 1 == pde->td_cookie ||
1252 		    de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX)
1253 			continue;	/* No hole or invalid cookie. */
1254 		nde->td_cookie = de->td_cookie + 1;
1255 		MPASS(tmpfs_dirent_dup(nde));
1256 		MPASS(pde->td_cookie > nde->td_cookie);
1257 		MPASS(nde->td_cookie > de->td_cookie);
1258 		LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries);
1259 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1260 		return;
1261 	}
1262 }
1263 
1264 /*
1265  * Attaches the directory entry de to the directory represented by vp.
1266  * Note that this does not change the link count of the node pointed by
1267  * the directory entry, as this is done by tmpfs_alloc_dirent.
1268  */
1269 void
1270 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
1271 {
1272 	struct tmpfs_node *dnode;
1273 	struct tmpfs_dirent *xde, *nde;
1274 
1275 	ASSERT_VOP_ELOCKED(vp, __func__);
1276 	MPASS(de->td_namelen > 0);
1277 	MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN);
1278 	MPASS(de->td_cookie == de->td_hash);
1279 
1280 	dnode = VP_TO_TMPFS_DIR(vp);
1281 	dnode->tn_dir.tn_readdir_lastn = 0;
1282 	dnode->tn_dir.tn_readdir_lastp = NULL;
1283 
1284 	MPASS(!tmpfs_dirent_dup(de));
1285 	xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1286 	if (xde != NULL && tmpfs_dirent_duphead(xde))
1287 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1288 	else if (xde != NULL) {
1289 		/*
1290 		 * Allocate new duphead. Swap xde with duphead to avoid
1291 		 * adding/removing elements with the same hash.
1292 		 */
1293 		MPASS(!tmpfs_dirent_dup(xde));
1294 		tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0,
1295 		    &nde);
1296 		/* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */
1297 		memcpy(nde, xde, sizeof(*xde));
1298 		xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD;
1299 		LIST_INIT(&xde->ud.td_duphead);
1300 		xde->td_namelen = 0;
1301 		xde->td_node = NULL;
1302 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde);
1303 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1304 	}
1305 	dnode->tn_size += sizeof(struct tmpfs_dirent);
1306 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1307 	dnode->tn_accessed = true;
1308 	tmpfs_update(vp);
1309 }
1310 
1311 /*
1312  * Detaches the directory entry de from the directory represented by vp.
1313  * Note that this does not change the link count of the node pointed by
1314  * the directory entry, as this is done by tmpfs_free_dirent.
1315  */
1316 void
1317 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
1318 {
1319 	struct tmpfs_mount *tmp;
1320 	struct tmpfs_dir *head;
1321 	struct tmpfs_node *dnode;
1322 	struct tmpfs_dirent *xde;
1323 
1324 	ASSERT_VOP_ELOCKED(vp, __func__);
1325 
1326 	dnode = VP_TO_TMPFS_DIR(vp);
1327 	head = &dnode->tn_dir.tn_dirhead;
1328 	dnode->tn_dir.tn_readdir_lastn = 0;
1329 	dnode->tn_dir.tn_readdir_lastp = NULL;
1330 
1331 	if (tmpfs_dirent_dup(de)) {
1332 		/* Remove duphead if de was last entry. */
1333 		if (LIST_NEXT(de, uh.td_dup.entries) == NULL) {
1334 			xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash);
1335 			MPASS(tmpfs_dirent_duphead(xde));
1336 		} else
1337 			xde = NULL;
1338 		LIST_REMOVE(de, uh.td_dup.entries);
1339 		LIST_REMOVE(de, uh.td_dup.index_entries);
1340 		if (xde != NULL) {
1341 			if (LIST_EMPTY(&xde->ud.td_duphead)) {
1342 				RB_REMOVE(tmpfs_dir, head, xde);
1343 				tmp = VFS_TO_TMPFS(vp->v_mount);
1344 				MPASS(xde->td_node == NULL);
1345 				tmpfs_free_dirent(tmp, xde);
1346 			}
1347 		}
1348 		de->td_cookie = de->td_hash;
1349 	} else
1350 		RB_REMOVE(tmpfs_dir, head, de);
1351 
1352 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
1353 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1354 	dnode->tn_accessed = true;
1355 	tmpfs_update(vp);
1356 }
1357 
1358 void
1359 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode)
1360 {
1361 	struct tmpfs_dirent *de, *dde, *nde;
1362 
1363 	RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) {
1364 		RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1365 		/* Node may already be destroyed. */
1366 		de->td_node = NULL;
1367 		if (tmpfs_dirent_duphead(de)) {
1368 			while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) {
1369 				LIST_REMOVE(dde, uh.td_dup.entries);
1370 				dde->td_node = NULL;
1371 				tmpfs_free_dirent(tmp, dde);
1372 			}
1373 		}
1374 		tmpfs_free_dirent(tmp, de);
1375 	}
1376 }
1377 
1378 /*
1379  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
1380  * directory and returns it in the uio space.  The function returns 0
1381  * on success, -1 if there was not enough space in the uio structure to
1382  * hold the directory entry or an appropriate error code if another
1383  * error happens.
1384  */
1385 static int
1386 tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1387     struct uio *uio)
1388 {
1389 	int error;
1390 	struct dirent dent;
1391 
1392 	TMPFS_VALIDATE_DIR(node);
1393 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
1394 
1395 	dent.d_fileno = node->tn_id;
1396 	dent.d_off = TMPFS_DIRCOOKIE_DOTDOT;
1397 	dent.d_type = DT_DIR;
1398 	dent.d_namlen = 1;
1399 	dent.d_name[0] = '.';
1400 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1401 	dirent_terminate(&dent);
1402 
1403 	if (dent.d_reclen > uio->uio_resid)
1404 		error = EJUSTRETURN;
1405 	else
1406 		error = uiomove(&dent, dent.d_reclen, uio);
1407 
1408 	tmpfs_set_accessed(tm, node);
1409 
1410 	return (error);
1411 }
1412 
1413 /*
1414  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
1415  * directory and returns it in the uio space.  The function returns 0
1416  * on success, -1 if there was not enough space in the uio structure to
1417  * hold the directory entry or an appropriate error code if another
1418  * error happens.
1419  */
1420 static int
1421 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1422     struct uio *uio, off_t next)
1423 {
1424 	struct tmpfs_node *parent;
1425 	struct dirent dent;
1426 	int error;
1427 
1428 	TMPFS_VALIDATE_DIR(node);
1429 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
1430 
1431 	/*
1432 	 * Return ENOENT if the current node is already removed.
1433 	 */
1434 	TMPFS_ASSERT_LOCKED(node);
1435 	parent = node->tn_dir.tn_parent;
1436 	if (parent == NULL)
1437 		return (ENOENT);
1438 
1439 	TMPFS_NODE_LOCK(parent);
1440 	dent.d_fileno = parent->tn_id;
1441 	TMPFS_NODE_UNLOCK(parent);
1442 
1443 	dent.d_off = next;
1444 	dent.d_type = DT_DIR;
1445 	dent.d_namlen = 2;
1446 	dent.d_name[0] = '.';
1447 	dent.d_name[1] = '.';
1448 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1449 	dirent_terminate(&dent);
1450 
1451 	if (dent.d_reclen > uio->uio_resid)
1452 		error = EJUSTRETURN;
1453 	else
1454 		error = uiomove(&dent, dent.d_reclen, uio);
1455 
1456 	tmpfs_set_accessed(tm, node);
1457 
1458 	return (error);
1459 }
1460 
1461 /*
1462  * Helper function for tmpfs_readdir.  Returns as much directory entries
1463  * as can fit in the uio space.  The read starts at uio->uio_offset.
1464  * The function returns 0 on success, -1 if there was not enough space
1465  * in the uio structure to hold the directory entry or an appropriate
1466  * error code if another error happens.
1467  */
1468 int
1469 tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
1470     struct uio *uio, int maxcookies, u_long *cookies, int *ncookies)
1471 {
1472 	struct tmpfs_dir_cursor dc;
1473 	struct tmpfs_dirent *de, *nde;
1474 	off_t off;
1475 	int error;
1476 
1477 	TMPFS_VALIDATE_DIR(node);
1478 
1479 	off = 0;
1480 
1481 	/*
1482 	 * Lookup the node from the current offset.  The starting offset of
1483 	 * 0 will lookup both '.' and '..', and then the first real entry,
1484 	 * or EOF if there are none.  Then find all entries for the dir that
1485 	 * fit into the buffer.  Once no more entries are found (de == NULL),
1486 	 * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next
1487 	 * call to return 0.
1488 	 */
1489 	switch (uio->uio_offset) {
1490 	case TMPFS_DIRCOOKIE_DOT:
1491 		error = tmpfs_dir_getdotdent(tm, node, uio);
1492 		if (error != 0)
1493 			return (error);
1494 		uio->uio_offset = off = TMPFS_DIRCOOKIE_DOTDOT;
1495 		if (cookies != NULL)
1496 			cookies[(*ncookies)++] = off;
1497 		/* FALLTHROUGH */
1498 	case TMPFS_DIRCOOKIE_DOTDOT:
1499 		de = tmpfs_dir_first(node, &dc);
1500 		off = tmpfs_dirent_cookie(de);
1501 		error = tmpfs_dir_getdotdotdent(tm, node, uio, off);
1502 		if (error != 0)
1503 			return (error);
1504 		uio->uio_offset = off;
1505 		if (cookies != NULL)
1506 			cookies[(*ncookies)++] = off;
1507 		/* EOF. */
1508 		if (de == NULL)
1509 			return (0);
1510 		break;
1511 	case TMPFS_DIRCOOKIE_EOF:
1512 		return (0);
1513 	default:
1514 		de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc);
1515 		if (de == NULL)
1516 			return (EINVAL);
1517 		if (cookies != NULL)
1518 			off = tmpfs_dirent_cookie(de);
1519 	}
1520 
1521 	/*
1522 	 * Read as much entries as possible; i.e., until we reach the end of the
1523 	 * directory or we exhaust uio space.
1524 	 */
1525 	do {
1526 		struct dirent d;
1527 
1528 		/*
1529 		 * Create a dirent structure representing the current tmpfs_node
1530 		 * and fill it.
1531 		 */
1532 		if (de->td_node == NULL) {
1533 			d.d_fileno = 1;
1534 			d.d_type = DT_WHT;
1535 		} else {
1536 			d.d_fileno = de->td_node->tn_id;
1537 			switch (de->td_node->tn_type) {
1538 			case VBLK:
1539 				d.d_type = DT_BLK;
1540 				break;
1541 
1542 			case VCHR:
1543 				d.d_type = DT_CHR;
1544 				break;
1545 
1546 			case VDIR:
1547 				d.d_type = DT_DIR;
1548 				break;
1549 
1550 			case VFIFO:
1551 				d.d_type = DT_FIFO;
1552 				break;
1553 
1554 			case VLNK:
1555 				d.d_type = DT_LNK;
1556 				break;
1557 
1558 			case VREG:
1559 				d.d_type = DT_REG;
1560 				break;
1561 
1562 			case VSOCK:
1563 				d.d_type = DT_SOCK;
1564 				break;
1565 
1566 			default:
1567 				panic("tmpfs_dir_getdents: type %p %d",
1568 				    de->td_node, (int)de->td_node->tn_type);
1569 			}
1570 		}
1571 		d.d_namlen = de->td_namelen;
1572 		MPASS(de->td_namelen < sizeof(d.d_name));
1573 		(void)memcpy(d.d_name, de->ud.td_name, de->td_namelen);
1574 		d.d_reclen = GENERIC_DIRSIZ(&d);
1575 
1576 		/*
1577 		 * Stop reading if the directory entry we are treating is bigger
1578 		 * than the amount of data that can be returned.
1579 		 */
1580 		if (d.d_reclen > uio->uio_resid) {
1581 			error = EJUSTRETURN;
1582 			break;
1583 		}
1584 
1585 		nde = tmpfs_dir_next(node, &dc);
1586 		d.d_off = tmpfs_dirent_cookie(nde);
1587 		dirent_terminate(&d);
1588 
1589 		/*
1590 		 * Copy the new dirent structure into the output buffer and
1591 		 * advance pointers.
1592 		 */
1593 		error = uiomove(&d, d.d_reclen, uio);
1594 		if (error == 0) {
1595 			de = nde;
1596 			if (cookies != NULL) {
1597 				off = tmpfs_dirent_cookie(de);
1598 				MPASS(*ncookies < maxcookies);
1599 				cookies[(*ncookies)++] = off;
1600 			}
1601 		}
1602 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
1603 
1604 	/* Skip setting off when using cookies as it is already done above. */
1605 	if (cookies == NULL)
1606 		off = tmpfs_dirent_cookie(de);
1607 
1608 	/* Update the offset and cache. */
1609 	uio->uio_offset = off;
1610 	node->tn_dir.tn_readdir_lastn = off;
1611 	node->tn_dir.tn_readdir_lastp = de;
1612 
1613 	tmpfs_set_accessed(tm, node);
1614 	return (error);
1615 }
1616 
1617 int
1618 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
1619 {
1620 	struct tmpfs_dirent *de;
1621 	int error;
1622 
1623 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
1624 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
1625 	if (error != 0)
1626 		return (error);
1627 	tmpfs_dir_attach(dvp, de);
1628 	return (0);
1629 }
1630 
1631 void
1632 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
1633 {
1634 	struct tmpfs_dirent *de;
1635 
1636 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1637 	MPASS(de != NULL && de->td_node == NULL);
1638 	tmpfs_dir_detach(dvp, de);
1639 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de);
1640 }
1641 
1642 /*
1643  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
1644  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
1645  * 'newsize' must be positive.
1646  *
1647  * Returns zero on success or an appropriate error code on failure.
1648  */
1649 int
1650 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
1651 {
1652 	struct tmpfs_mount *tmp;
1653 	struct tmpfs_node *node;
1654 	vm_object_t uobj;
1655 	vm_page_t m;
1656 	vm_pindex_t idx, newpages, oldpages;
1657 	off_t oldsize;
1658 	int base, rv;
1659 
1660 	MPASS(vp->v_type == VREG);
1661 	MPASS(newsize >= 0);
1662 
1663 	node = VP_TO_TMPFS_NODE(vp);
1664 	uobj = node->tn_reg.tn_aobj;
1665 	tmp = VFS_TO_TMPFS(vp->v_mount);
1666 
1667 	/*
1668 	 * Convert the old and new sizes to the number of pages needed to
1669 	 * store them.  It may happen that we do not need to do anything
1670 	 * because the last allocated page can accommodate the change on
1671 	 * its own.
1672 	 */
1673 	oldsize = node->tn_size;
1674 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
1675 	MPASS(oldpages == uobj->size);
1676 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
1677 
1678 	if (__predict_true(newpages == oldpages && newsize >= oldsize)) {
1679 		node->tn_size = newsize;
1680 		return (0);
1681 	}
1682 
1683 	if (newpages > oldpages &&
1684 	    tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
1685 		return (ENOSPC);
1686 
1687 	VM_OBJECT_WLOCK(uobj);
1688 	if (newsize < oldsize) {
1689 		/*
1690 		 * Zero the truncated part of the last page.
1691 		 */
1692 		base = newsize & PAGE_MASK;
1693 		if (base != 0) {
1694 			idx = OFF_TO_IDX(newsize);
1695 retry:
1696 			m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
1697 			if (m != NULL) {
1698 				MPASS(vm_page_all_valid(m));
1699 			} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
1700 				m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
1701 				    VM_ALLOC_WAITFAIL);
1702 				if (m == NULL)
1703 					goto retry;
1704 				vm_object_pip_add(uobj, 1);
1705 				VM_OBJECT_WUNLOCK(uobj);
1706 				rv = vm_pager_get_pages(uobj, &m, 1, NULL,
1707 				    NULL);
1708 				VM_OBJECT_WLOCK(uobj);
1709 				vm_object_pip_wakeup(uobj);
1710 				if (rv == VM_PAGER_OK) {
1711 					/*
1712 					 * Since the page was not resident,
1713 					 * and therefore not recently
1714 					 * accessed, immediately enqueue it
1715 					 * for asynchronous laundering.  The
1716 					 * current operation is not regarded
1717 					 * as an access.
1718 					 */
1719 					vm_page_launder(m);
1720 				} else {
1721 					vm_page_free(m);
1722 					if (ignerr)
1723 						m = NULL;
1724 					else {
1725 						VM_OBJECT_WUNLOCK(uobj);
1726 						return (EIO);
1727 					}
1728 				}
1729 			}
1730 			if (m != NULL) {
1731 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
1732 				vm_page_set_dirty(m);
1733 				vm_page_xunbusy(m);
1734 			}
1735 		}
1736 
1737 		/*
1738 		 * Release any swap space and free any whole pages.
1739 		 */
1740 		if (newpages < oldpages)
1741 			vm_object_page_remove(uobj, newpages, 0, 0);
1742 	}
1743 	uobj->size = newpages;
1744 	VM_OBJECT_WUNLOCK(uobj);
1745 
1746 	atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
1747 
1748 	node->tn_size = newsize;
1749 	return (0);
1750 }
1751 
1752 void
1753 tmpfs_check_mtime(struct vnode *vp)
1754 {
1755 	struct tmpfs_node *node;
1756 	struct vm_object *obj;
1757 
1758 	ASSERT_VOP_ELOCKED(vp, "check_mtime");
1759 	if (vp->v_type != VREG)
1760 		return;
1761 	obj = vp->v_object;
1762 	KASSERT(obj->type == tmpfs_pager_type &&
1763 	    (obj->flags & (OBJ_SWAP | OBJ_TMPFS)) ==
1764 	    (OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj"));
1765 	/* unlocked read */
1766 	if (obj->generation != obj->cleangeneration) {
1767 		VM_OBJECT_WLOCK(obj);
1768 		if (obj->generation != obj->cleangeneration) {
1769 			obj->cleangeneration = obj->generation;
1770 			node = VP_TO_TMPFS_NODE(vp);
1771 			node->tn_status |= TMPFS_NODE_MODIFIED |
1772 			    TMPFS_NODE_CHANGED;
1773 		}
1774 		VM_OBJECT_WUNLOCK(obj);
1775 	}
1776 }
1777 
1778 /*
1779  * Change flags of the given vnode.
1780  * Caller should execute tmpfs_update on vp after a successful execution.
1781  * The vnode must be locked on entry and remain locked on exit.
1782  */
1783 int
1784 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
1785     struct thread *p)
1786 {
1787 	int error;
1788 	struct tmpfs_node *node;
1789 
1790 	ASSERT_VOP_ELOCKED(vp, "chflags");
1791 
1792 	node = VP_TO_TMPFS_NODE(vp);
1793 
1794 	if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK |
1795 	    UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP |
1796 	    UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE |
1797 	    UF_SPARSE | UF_SYSTEM)) != 0)
1798 		return (EOPNOTSUPP);
1799 
1800 	/* Disallow this operation if the file system is mounted read-only. */
1801 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1802 		return (EROFS);
1803 
1804 	/*
1805 	 * Callers may only modify the file flags on objects they
1806 	 * have VADMIN rights for.
1807 	 */
1808 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1809 		return (error);
1810 	/*
1811 	 * Unprivileged processes are not permitted to unset system
1812 	 * flags, or modify flags if any system flags are set.
1813 	 */
1814 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) {
1815 		if (node->tn_flags &
1816 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
1817 			error = securelevel_gt(cred, 0);
1818 			if (error)
1819 				return (error);
1820 		}
1821 	} else {
1822 		if (node->tn_flags &
1823 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
1824 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
1825 			return (EPERM);
1826 	}
1827 	node->tn_flags = flags;
1828 	node->tn_status |= TMPFS_NODE_CHANGED;
1829 
1830 	ASSERT_VOP_ELOCKED(vp, "chflags2");
1831 
1832 	return (0);
1833 }
1834 
1835 /*
1836  * Change access mode on the given vnode.
1837  * Caller should execute tmpfs_update on vp after a successful execution.
1838  * The vnode must be locked on entry and remain locked on exit.
1839  */
1840 int
1841 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
1842 {
1843 	int error;
1844 	struct tmpfs_node *node;
1845 	mode_t newmode;
1846 
1847 	ASSERT_VOP_ELOCKED(vp, "chmod");
1848 	ASSERT_VOP_IN_SEQC(vp);
1849 
1850 	node = VP_TO_TMPFS_NODE(vp);
1851 
1852 	/* Disallow this operation if the file system is mounted read-only. */
1853 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1854 		return EROFS;
1855 
1856 	/* Immutable or append-only files cannot be modified, either. */
1857 	if (node->tn_flags & (IMMUTABLE | APPEND))
1858 		return EPERM;
1859 
1860 	/*
1861 	 * To modify the permissions on a file, must possess VADMIN
1862 	 * for that file.
1863 	 */
1864 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1865 		return (error);
1866 
1867 	/*
1868 	 * Privileged processes may set the sticky bit on non-directories,
1869 	 * as well as set the setgid bit on a file with a group that the
1870 	 * process is not a member of.
1871 	 */
1872 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
1873 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE))
1874 			return (EFTYPE);
1875 	}
1876 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
1877 		error = priv_check_cred(cred, PRIV_VFS_SETGID);
1878 		if (error)
1879 			return (error);
1880 	}
1881 
1882 	newmode = node->tn_mode & ~ALLPERMS;
1883 	newmode |= mode & ALLPERMS;
1884 	atomic_store_short(&node->tn_mode, newmode);
1885 
1886 	node->tn_status |= TMPFS_NODE_CHANGED;
1887 
1888 	ASSERT_VOP_ELOCKED(vp, "chmod2");
1889 
1890 	return (0);
1891 }
1892 
1893 /*
1894  * Change ownership of the given vnode.  At least one of uid or gid must
1895  * be different than VNOVAL.  If one is set to that value, the attribute
1896  * is unchanged.
1897  * Caller should execute tmpfs_update on vp after a successful execution.
1898  * The vnode must be locked on entry and remain locked on exit.
1899  */
1900 int
1901 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
1902     struct thread *p)
1903 {
1904 	int error;
1905 	struct tmpfs_node *node;
1906 	uid_t ouid;
1907 	gid_t ogid;
1908 	mode_t newmode;
1909 
1910 	ASSERT_VOP_ELOCKED(vp, "chown");
1911 	ASSERT_VOP_IN_SEQC(vp);
1912 
1913 	node = VP_TO_TMPFS_NODE(vp);
1914 
1915 	/* Assign default values if they are unknown. */
1916 	MPASS(uid != VNOVAL || gid != VNOVAL);
1917 	if (uid == VNOVAL)
1918 		uid = node->tn_uid;
1919 	if (gid == VNOVAL)
1920 		gid = node->tn_gid;
1921 	MPASS(uid != VNOVAL && gid != VNOVAL);
1922 
1923 	/* Disallow this operation if the file system is mounted read-only. */
1924 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1925 		return (EROFS);
1926 
1927 	/* Immutable or append-only files cannot be modified, either. */
1928 	if (node->tn_flags & (IMMUTABLE | APPEND))
1929 		return (EPERM);
1930 
1931 	/*
1932 	 * To modify the ownership of a file, must possess VADMIN for that
1933 	 * file.
1934 	 */
1935 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1936 		return (error);
1937 
1938 	/*
1939 	 * To change the owner of a file, or change the group of a file to a
1940 	 * group of which we are not a member, the caller must have
1941 	 * privilege.
1942 	 */
1943 	if ((uid != node->tn_uid ||
1944 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1945 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN)))
1946 		return (error);
1947 
1948 	ogid = node->tn_gid;
1949 	ouid = node->tn_uid;
1950 
1951 	node->tn_uid = uid;
1952 	node->tn_gid = gid;
1953 
1954 	node->tn_status |= TMPFS_NODE_CHANGED;
1955 
1956 	if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
1957 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
1958 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
1959 			atomic_store_short(&node->tn_mode, newmode);
1960 		}
1961 	}
1962 
1963 	ASSERT_VOP_ELOCKED(vp, "chown2");
1964 
1965 	return (0);
1966 }
1967 
1968 /*
1969  * Change size of the given vnode.
1970  * Caller should execute tmpfs_update on vp after a successful execution.
1971  * The vnode must be locked on entry and remain locked on exit.
1972  */
1973 int
1974 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
1975     struct thread *p)
1976 {
1977 	int error;
1978 	struct tmpfs_node *node;
1979 
1980 	ASSERT_VOP_ELOCKED(vp, "chsize");
1981 
1982 	node = VP_TO_TMPFS_NODE(vp);
1983 
1984 	/* Decide whether this is a valid operation based on the file type. */
1985 	error = 0;
1986 	switch (vp->v_type) {
1987 	case VDIR:
1988 		return (EISDIR);
1989 
1990 	case VREG:
1991 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1992 			return (EROFS);
1993 		break;
1994 
1995 	case VBLK:
1996 		/* FALLTHROUGH */
1997 	case VCHR:
1998 		/* FALLTHROUGH */
1999 	case VFIFO:
2000 		/*
2001 		 * Allow modifications of special files even if in the file
2002 		 * system is mounted read-only (we are not modifying the
2003 		 * files themselves, but the objects they represent).
2004 		 */
2005 		return (0);
2006 
2007 	default:
2008 		/* Anything else is unsupported. */
2009 		return (EOPNOTSUPP);
2010 	}
2011 
2012 	/* Immutable or append-only files cannot be modified, either. */
2013 	if (node->tn_flags & (IMMUTABLE | APPEND))
2014 		return (EPERM);
2015 
2016 	error = tmpfs_truncate(vp, size);
2017 	/*
2018 	 * tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
2019 	 * for us, as will update tn_status; no need to do that here.
2020 	 */
2021 
2022 	ASSERT_VOP_ELOCKED(vp, "chsize2");
2023 
2024 	return (error);
2025 }
2026 
2027 /*
2028  * Change access and modification times of the given vnode.
2029  * Caller should execute tmpfs_update on vp after a successful execution.
2030  * The vnode must be locked on entry and remain locked on exit.
2031  */
2032 int
2033 tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
2034     struct ucred *cred, struct thread *l)
2035 {
2036 	int error;
2037 	struct tmpfs_node *node;
2038 
2039 	ASSERT_VOP_ELOCKED(vp, "chtimes");
2040 
2041 	node = VP_TO_TMPFS_NODE(vp);
2042 
2043 	/* Disallow this operation if the file system is mounted read-only. */
2044 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
2045 		return (EROFS);
2046 
2047 	/* Immutable or append-only files cannot be modified, either. */
2048 	if (node->tn_flags & (IMMUTABLE | APPEND))
2049 		return (EPERM);
2050 
2051 	error = vn_utimes_perm(vp, vap, cred, l);
2052 	if (error != 0)
2053 		return (error);
2054 
2055 	if (vap->va_atime.tv_sec != VNOVAL)
2056 		node->tn_accessed = true;
2057 
2058 	if (vap->va_mtime.tv_sec != VNOVAL)
2059 		node->tn_status |= TMPFS_NODE_MODIFIED;
2060 
2061 	if (vap->va_birthtime.tv_sec != VNOVAL)
2062 		node->tn_status |= TMPFS_NODE_MODIFIED;
2063 
2064 	tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime);
2065 
2066 	if (vap->va_birthtime.tv_sec != VNOVAL)
2067 		node->tn_birthtime = vap->va_birthtime;
2068 	ASSERT_VOP_ELOCKED(vp, "chtimes2");
2069 
2070 	return (0);
2071 }
2072 
2073 void
2074 tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
2075 {
2076 
2077 	if ((node->tn_status & status) == status || tm->tm_ronly)
2078 		return;
2079 	TMPFS_NODE_LOCK(node);
2080 	node->tn_status |= status;
2081 	TMPFS_NODE_UNLOCK(node);
2082 }
2083 
2084 void
2085 tmpfs_set_accessed(struct tmpfs_mount *tm, struct tmpfs_node *node)
2086 {
2087 	if (node->tn_accessed || tm->tm_ronly)
2088 		return;
2089 	atomic_store_8(&node->tn_accessed, true);
2090 }
2091 
2092 /* Sync timestamps */
2093 void
2094 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
2095     const struct timespec *mod)
2096 {
2097 	struct tmpfs_node *node;
2098 	struct timespec now;
2099 
2100 	ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
2101 	node = VP_TO_TMPFS_NODE(vp);
2102 
2103 	if (!node->tn_accessed &&
2104 	    (node->tn_status & (TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED)) == 0)
2105 		return;
2106 
2107 	vfs_timestamp(&now);
2108 	TMPFS_NODE_LOCK(node);
2109 	if (node->tn_accessed) {
2110 		if (acc == NULL)
2111 			 acc = &now;
2112 		node->tn_atime = *acc;
2113 	}
2114 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
2115 		if (mod == NULL)
2116 			mod = &now;
2117 		node->tn_mtime = *mod;
2118 	}
2119 	if (node->tn_status & TMPFS_NODE_CHANGED)
2120 		node->tn_ctime = now;
2121 	node->tn_status &= ~(TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
2122 	node->tn_accessed = false;
2123 	TMPFS_NODE_UNLOCK(node);
2124 
2125 	/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
2126 	random_harvest_queue(node, sizeof(*node), RANDOM_FS_ATIME);
2127 }
2128 
2129 int
2130 tmpfs_truncate(struct vnode *vp, off_t length)
2131 {
2132 	int error;
2133 	struct tmpfs_node *node;
2134 
2135 	node = VP_TO_TMPFS_NODE(vp);
2136 
2137 	if (length < 0) {
2138 		error = EINVAL;
2139 		goto out;
2140 	}
2141 
2142 	if (node->tn_size == length) {
2143 		error = 0;
2144 		goto out;
2145 	}
2146 
2147 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
2148 		return (EFBIG);
2149 
2150 	error = tmpfs_reg_resize(vp, length, FALSE);
2151 	if (error == 0)
2152 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
2153 
2154 out:
2155 	tmpfs_update(vp);
2156 
2157 	return (error);
2158 }
2159 
2160 static __inline int
2161 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
2162 {
2163 	if (a->td_hash > b->td_hash)
2164 		return (1);
2165 	else if (a->td_hash < b->td_hash)
2166 		return (-1);
2167 	return (0);
2168 }
2169 
2170 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
2171