xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision bc42155199b5b0b479311e05b07aee7f6f9c5172)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system supporting functions.
37  */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/lock.h>
46 #include <sys/limits.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/random.h>
52 #include <sys/refcount.h>
53 #include <sys/rwlock.h>
54 #include <sys/smr.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/user.h>
58 #include <sys/vnode.h>
59 #include <sys/vmmeter.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vm_extern.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs.h>
71 #include <fs/tmpfs/tmpfs_fifoops.h>
72 #include <fs/tmpfs/tmpfs_vnops.h>
73 
74 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75     "tmpfs file system");
76 
77 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
78 
79 MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
80 static uma_zone_t tmpfs_node_pool;
81 VFS_SMR_DECLARE;
82 
83 int tmpfs_pager_type = -1;
84 
85 static vm_object_t
86 tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
87     vm_ooffset_t offset, struct ucred *cred)
88 {
89 	vm_object_t object;
90 
91 	MPASS(handle == NULL);
92 	MPASS(offset == 0);
93 	object = vm_object_allocate_dyn(tmpfs_pager_type, size,
94 	    OBJ_COLORED | OBJ_SWAP);
95 	if (!swap_pager_init_object(object, NULL, NULL, size, 0)) {
96 		vm_object_deallocate(object);
97 		object = NULL;
98 	}
99 	return (object);
100 }
101 
102 /*
103  * Make sure tmpfs vnodes with writable mappings can be found on the lazy list.
104  *
105  * This allows for periodic mtime updates while only scanning vnodes which are
106  * plausibly dirty, see tmpfs_update_mtime_lazy.
107  */
108 static void
109 tmpfs_pager_writecount_recalc(vm_object_t object, vm_offset_t old,
110     vm_offset_t new)
111 {
112 	struct vnode *vp;
113 
114 	VM_OBJECT_ASSERT_WLOCKED(object);
115 
116 	vp = VM_TO_TMPFS_VP(object);
117 
118 	/*
119 	 * Forced unmount?
120 	 */
121 	if (vp == NULL) {
122 		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
123 		    ("object %p with OBJ_TMPFS_VREF but without vnode",
124 		    object));
125 		VM_OBJECT_WUNLOCK(object);
126 		return;
127 	}
128 
129 	if (old == 0) {
130 		VNASSERT((object->flags & OBJ_TMPFS_VREF) == 0, vp,
131 		    ("object without writable mappings has a reference"));
132 		VNPASS(vp->v_usecount > 0, vp);
133 	} else {
134 		VNASSERT((object->flags & OBJ_TMPFS_VREF) != 0, vp,
135 		    ("object with writable mappings does not "
136 		    "have a reference"));
137 	}
138 
139 	if (old == new) {
140 		VM_OBJECT_WUNLOCK(object);
141 		return;
142 	}
143 
144 	if (new == 0) {
145 		vm_object_clear_flag(object, OBJ_TMPFS_VREF);
146 		VM_OBJECT_WUNLOCK(object);
147 		vrele(vp);
148 	} else {
149 		if ((object->flags & OBJ_TMPFS_VREF) == 0) {
150 			vref(vp);
151 			vlazy(vp);
152 			vm_object_set_flag(object, OBJ_TMPFS_VREF);
153 		}
154 		VM_OBJECT_WUNLOCK(object);
155 	}
156 }
157 
158 static void
159 tmpfs_pager_update_writecount(vm_object_t object, vm_offset_t start,
160     vm_offset_t end)
161 {
162 	vm_offset_t new, old;
163 
164 	VM_OBJECT_WLOCK(object);
165 	KASSERT((object->flags & OBJ_ANON) == 0,
166 	    ("%s: object %p with OBJ_ANON", __func__, object));
167 	old = object->un_pager.swp.writemappings;
168 	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
169 	new = object->un_pager.swp.writemappings;
170 	tmpfs_pager_writecount_recalc(object, old, new);
171 	VM_OBJECT_ASSERT_UNLOCKED(object);
172 }
173 
174 static void
175 tmpfs_pager_release_writecount(vm_object_t object, vm_offset_t start,
176     vm_offset_t end)
177 {
178 	vm_offset_t new, old;
179 
180 	VM_OBJECT_WLOCK(object);
181 	KASSERT((object->flags & OBJ_ANON) == 0,
182 	    ("%s: object %p with OBJ_ANON", __func__, object));
183 	old = object->un_pager.swp.writemappings;
184 	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
185 	new = object->un_pager.swp.writemappings;
186 	tmpfs_pager_writecount_recalc(object, old, new);
187 	VM_OBJECT_ASSERT_UNLOCKED(object);
188 }
189 
190 static void
191 tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
192 {
193 	struct vnode *vp;
194 
195 	/*
196 	 * Tmpfs VREG node, which was reclaimed, has tmpfs_pager_type
197 	 * type.  In this case there is no v_writecount to adjust.
198 	 */
199 	if (vp_heldp != NULL)
200 		VM_OBJECT_RLOCK(object);
201 	else
202 		VM_OBJECT_ASSERT_LOCKED(object);
203 	if ((object->flags & OBJ_TMPFS) != 0) {
204 		vp = VM_TO_TMPFS_VP(object);
205 		if (vp != NULL) {
206 			*vpp = vp;
207 			if (vp_heldp != NULL) {
208 				vhold(vp);
209 				*vp_heldp = true;
210 			}
211 		}
212 	}
213 	if (vp_heldp != NULL)
214 		VM_OBJECT_RUNLOCK(object);
215 }
216 
217 static void
218 tmpfs_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
219 {
220 	struct tmpfs_node *node;
221 	struct tmpfs_mount *tm;
222 	vm_size_t c;
223 
224 	swap_pager_freespace(obj, start, size, &c);
225 	if ((obj->flags & OBJ_TMPFS) == 0 || c == 0)
226 		return;
227 
228 	node = obj->un_pager.swp.swp_priv;
229 	MPASS(node->tn_type == VREG);
230 	tm = node->tn_reg.tn_tmp;
231 
232 	KASSERT(tm->tm_pages_used >= c,
233 	    ("tmpfs tm %p pages %jd free %jd", tm,
234 	    (uintmax_t)tm->tm_pages_used, (uintmax_t)c));
235 	atomic_add_long(&tm->tm_pages_used, -c);
236 	KASSERT(node->tn_reg.tn_pages >= c,
237 	    ("tmpfs node %p pages %jd free %jd", node,
238 	    (uintmax_t)node->tn_reg.tn_pages, (uintmax_t)c));
239 	node->tn_reg.tn_pages -= c;
240 }
241 
242 static void
243 tmpfs_page_inserted(vm_object_t obj, vm_page_t m)
244 {
245 	struct tmpfs_node *node;
246 	struct tmpfs_mount *tm;
247 
248 	if ((obj->flags & OBJ_TMPFS) == 0)
249 		return;
250 
251 	node = obj->un_pager.swp.swp_priv;
252 	MPASS(node->tn_type == VREG);
253 	tm = node->tn_reg.tn_tmp;
254 
255 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
256 		atomic_add_long(&tm->tm_pages_used, 1);
257 		node->tn_reg.tn_pages += 1;
258 	}
259 }
260 
261 static void
262 tmpfs_page_removed(vm_object_t obj, vm_page_t m)
263 {
264 	struct tmpfs_node *node;
265 	struct tmpfs_mount *tm;
266 
267 	if ((obj->flags & OBJ_TMPFS) == 0)
268 		return;
269 
270 	node = obj->un_pager.swp.swp_priv;
271 	MPASS(node->tn_type == VREG);
272 	tm = node->tn_reg.tn_tmp;
273 
274 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
275 		KASSERT(tm->tm_pages_used >= 1,
276 		    ("tmpfs tm %p pages %jd free 1", tm,
277 		    (uintmax_t)tm->tm_pages_used));
278 		atomic_add_long(&tm->tm_pages_used, -1);
279 		KASSERT(node->tn_reg.tn_pages >= 1,
280 		    ("tmpfs node %p pages %jd free 1", node,
281 		    (uintmax_t)node->tn_reg.tn_pages));
282 		node->tn_reg.tn_pages -= 1;
283 	}
284 }
285 
286 static boolean_t
287 tmpfs_can_alloc_page(vm_object_t obj, vm_pindex_t pindex)
288 {
289 	struct tmpfs_mount *tm;
290 
291 	tm = VM_TO_TMPFS_MP(obj);
292 	if (tm == NULL || vm_pager_has_page(obj, pindex, NULL, NULL) ||
293 	    tm->tm_pages_max == 0)
294 		return (true);
295 	return (tm->tm_pages_max > atomic_load_long(&tm->tm_pages_used));
296 }
297 
298 struct pagerops tmpfs_pager_ops = {
299 	.pgo_kvme_type = KVME_TYPE_VNODE,
300 	.pgo_alloc = tmpfs_pager_alloc,
301 	.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
302 	.pgo_update_writecount = tmpfs_pager_update_writecount,
303 	.pgo_release_writecount = tmpfs_pager_release_writecount,
304 	.pgo_mightbedirty = vm_object_mightbedirty_,
305 	.pgo_getvp = tmpfs_pager_getvp,
306 	.pgo_freespace = tmpfs_pager_freespace,
307 	.pgo_page_inserted = tmpfs_page_inserted,
308 	.pgo_page_removed = tmpfs_page_removed,
309 	.pgo_can_alloc_page = tmpfs_can_alloc_page,
310 };
311 
312 static int
313 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
314 {
315 	struct tmpfs_node *node;
316 
317 	node = mem;
318 	node->tn_gen++;
319 	node->tn_size = 0;
320 	node->tn_status = 0;
321 	node->tn_accessed = false;
322 	node->tn_flags = 0;
323 	node->tn_links = 0;
324 	node->tn_vnode = NULL;
325 	node->tn_vpstate = 0;
326 	return (0);
327 }
328 
329 static void
330 tmpfs_node_dtor(void *mem, int size, void *arg)
331 {
332 	struct tmpfs_node *node;
333 
334 	node = mem;
335 	node->tn_type = VNON;
336 }
337 
338 static int
339 tmpfs_node_init(void *mem, int size, int flags)
340 {
341 	struct tmpfs_node *node;
342 
343 	node = mem;
344 	node->tn_id = 0;
345 	mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
346 	node->tn_gen = arc4random();
347 	return (0);
348 }
349 
350 static void
351 tmpfs_node_fini(void *mem, int size)
352 {
353 	struct tmpfs_node *node;
354 
355 	node = mem;
356 	mtx_destroy(&node->tn_interlock);
357 }
358 
359 int
360 tmpfs_subr_init(void)
361 {
362 	tmpfs_pager_type = vm_pager_alloc_dyn_type(&tmpfs_pager_ops,
363 	    OBJT_SWAP);
364 	if (tmpfs_pager_type == -1)
365 		return (EINVAL);
366 	tmpfs_node_pool = uma_zcreate("TMPFS node",
367 	    sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
368 	    tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
369 	VFS_SMR_ZONE_SET(tmpfs_node_pool);
370 	return (0);
371 }
372 
373 void
374 tmpfs_subr_uninit(void)
375 {
376 	if (tmpfs_pager_type != -1)
377 		vm_pager_free_dyn_type(tmpfs_pager_type);
378 	tmpfs_pager_type = -1;
379 	uma_zdestroy(tmpfs_node_pool);
380 }
381 
382 static int
383 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
384 {
385 	int error;
386 	long pages, bytes;
387 
388 	pages = *(long *)arg1;
389 	bytes = pages * PAGE_SIZE;
390 
391 	error = sysctl_handle_long(oidp, &bytes, 0, req);
392 	if (error || !req->newptr)
393 		return (error);
394 
395 	pages = bytes / PAGE_SIZE;
396 	if (pages < TMPFS_PAGES_MINRESERVED)
397 		return (EINVAL);
398 
399 	*(long *)arg1 = pages;
400 	return (0);
401 }
402 
403 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved,
404     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &tmpfs_pages_reserved, 0,
405     sysctl_mem_reserved, "L",
406     "Amount of available memory and swap below which tmpfs growth stops");
407 
408 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a,
409     struct tmpfs_dirent *b);
410 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
411 
412 size_t
413 tmpfs_mem_avail(void)
414 {
415 	size_t avail;
416 	long reserved;
417 
418 	avail = swap_pager_avail + vm_free_count();
419 	reserved = atomic_load_long(&tmpfs_pages_reserved);
420 	if (__predict_false(avail < reserved))
421 		return (0);
422 	return (avail - reserved);
423 }
424 
425 size_t
426 tmpfs_pages_used(struct tmpfs_mount *tmp)
427 {
428 	const size_t node_size = sizeof(struct tmpfs_node) +
429 	    sizeof(struct tmpfs_dirent);
430 	size_t meta_pages;
431 
432 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
433 	    PAGE_SIZE);
434 	return (meta_pages + tmp->tm_pages_used);
435 }
436 
437 static bool
438 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
439 {
440 	if (tmpfs_mem_avail() < req_pages)
441 		return (false);
442 
443 	if (tmp->tm_pages_max != ULONG_MAX &&
444 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
445 		return (false);
446 
447 	return (true);
448 }
449 
450 static int
451 tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
452     int end, boolean_t ignerr)
453 {
454 	vm_page_t m;
455 	int rv, error;
456 
457 	VM_OBJECT_ASSERT_WLOCKED(object);
458 	KASSERT(base >= 0, ("%s: base %d", __func__, base));
459 	KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
460 	    end));
461 	error = 0;
462 
463 retry:
464 	m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
465 	if (m != NULL) {
466 		MPASS(vm_page_all_valid(m));
467 	} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
468 		m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL |
469 		    VM_ALLOC_WAITFAIL);
470 		if (m == NULL)
471 			goto retry;
472 		vm_object_pip_add(object, 1);
473 		VM_OBJECT_WUNLOCK(object);
474 		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
475 		VM_OBJECT_WLOCK(object);
476 		vm_object_pip_wakeup(object);
477 		if (rv == VM_PAGER_OK) {
478 			/*
479 			 * Since the page was not resident, and therefore not
480 			 * recently accessed, immediately enqueue it for
481 			 * asynchronous laundering.  The current operation is
482 			 * not regarded as an access.
483 			 */
484 			vm_page_launder(m);
485 		} else {
486 			vm_page_free(m);
487 			m = NULL;
488 			if (!ignerr)
489 				error = EIO;
490 		}
491 	}
492 	if (m != NULL) {
493 		pmap_zero_page_area(m, base, end - base);
494 		vm_page_set_dirty(m);
495 		vm_page_xunbusy(m);
496 	}
497 
498 	return (error);
499 }
500 
501 void
502 tmpfs_ref_node(struct tmpfs_node *node)
503 {
504 #ifdef INVARIANTS
505 	u_int old;
506 
507 	old =
508 #endif
509 	refcount_acquire(&node->tn_refcount);
510 #ifdef INVARIANTS
511 	KASSERT(old > 0, ("node %p zero refcount", node));
512 #endif
513 }
514 
515 /*
516  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
517  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
518  * using the credentials of the process 'p'.
519  *
520  * If the node type is set to 'VDIR', then the parent parameter must point
521  * to the parent directory of the node being created.  It may only be NULL
522  * while allocating the root node.
523  *
524  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
525  * specifies the device the node represents.
526  *
527  * If the node type is set to 'VLNK', then the parameter target specifies
528  * the file name of the target file for the symbolic link that is being
529  * created.
530  *
531  * Note that new nodes are retrieved from the available list if it has
532  * items or, if it is empty, from the node pool as long as there is enough
533  * space to create them.
534  *
535  * Returns zero on success or an appropriate error code on failure.
536  */
537 int
538 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
539     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
540     const char *target, dev_t rdev, struct tmpfs_node **node)
541 {
542 	struct tmpfs_node *nnode;
543 	char *symlink;
544 	char symlink_smr;
545 
546 	/* If the root directory of the 'tmp' file system is not yet
547 	 * allocated, this must be the request to do it. */
548 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
549 
550 	MPASS(IFF(type == VLNK, target != NULL));
551 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
552 
553 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
554 		return (ENOSPC);
555 	if (!tmpfs_pages_check_avail(tmp, 1))
556 		return (ENOSPC);
557 
558 	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
559 		/*
560 		 * When a new tmpfs node is created for fully
561 		 * constructed mount point, there must be a parent
562 		 * node, which vnode is locked exclusively.  As
563 		 * consequence, if the unmount is executing in
564 		 * parallel, vflush() cannot reclaim the parent vnode.
565 		 * Due to this, the check for MNTK_UNMOUNT flag is not
566 		 * racy: if we did not see MNTK_UNMOUNT flag, then tmp
567 		 * cannot be destroyed until node construction is
568 		 * finished and the parent vnode unlocked.
569 		 *
570 		 * Tmpfs does not need to instantiate new nodes during
571 		 * unmount.
572 		 */
573 		return (EBUSY);
574 	}
575 	if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
576 		return (EROFS);
577 
578 	nnode = uma_zalloc_smr(tmpfs_node_pool, M_WAITOK);
579 
580 	/* Generic initialization. */
581 	nnode->tn_type = type;
582 	vfs_timestamp(&nnode->tn_atime);
583 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
584 	    nnode->tn_atime;
585 	nnode->tn_uid = uid;
586 	nnode->tn_gid = gid;
587 	nnode->tn_mode = mode;
588 	nnode->tn_id = alloc_unr64(&tmp->tm_ino_unr);
589 	nnode->tn_refcount = 1;
590 
591 	/* Type-specific initialization. */
592 	switch (nnode->tn_type) {
593 	case VBLK:
594 	case VCHR:
595 		nnode->tn_rdev = rdev;
596 		break;
597 
598 	case VDIR:
599 		RB_INIT(&nnode->tn_dir.tn_dirhead);
600 		LIST_INIT(&nnode->tn_dir.tn_dupindex);
601 		MPASS(parent != nnode);
602 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
603 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
604 		nnode->tn_dir.tn_readdir_lastn = 0;
605 		nnode->tn_dir.tn_readdir_lastp = NULL;
606 		nnode->tn_links++;
607 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
608 		nnode->tn_dir.tn_parent->tn_links++;
609 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
610 		break;
611 
612 	case VFIFO:
613 		/* FALLTHROUGH */
614 	case VSOCK:
615 		break;
616 
617 	case VLNK:
618 		MPASS(strlen(target) < MAXPATHLEN);
619 		nnode->tn_size = strlen(target);
620 
621 		symlink = NULL;
622 		if (!tmp->tm_nonc) {
623 			symlink = cache_symlink_alloc(nnode->tn_size + 1,
624 			    M_WAITOK);
625 			symlink_smr = true;
626 		}
627 		if (symlink == NULL) {
628 			symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME,
629 			    M_WAITOK);
630 			symlink_smr = false;
631 		}
632 		memcpy(symlink, target, nnode->tn_size + 1);
633 
634 		/*
635 		 * Allow safe symlink resolving for lockless lookup.
636 		 * tmpfs_fplookup_symlink references this comment.
637 		 *
638 		 * 1. nnode is not yet visible to the world
639 		 * 2. both tn_link_target and tn_link_smr get populated
640 		 * 3. release fence publishes their content
641 		 * 4. tn_link_target content is immutable until node
642 		 *    destruction, where the pointer gets set to NULL
643 		 * 5. tn_link_smr is never changed once set
644 		 *
645 		 * As a result it is sufficient to issue load consume
646 		 * on the node pointer to also get the above content
647 		 * in a stable manner.  Worst case tn_link_smr flag
648 		 * may be set to true despite being stale, while the
649 		 * target buffer is already cleared out.
650 		 */
651 		atomic_store_ptr(&nnode->tn_link_target, symlink);
652 		atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
653 		atomic_thread_fence_rel();
654 		break;
655 
656 	case VREG:
657 		nnode->tn_reg.tn_aobj =
658 		    vm_pager_allocate(tmpfs_pager_type, NULL, 0,
659 		    VM_PROT_DEFAULT, 0,
660 		    NULL /* XXXKIB - tmpfs needs swap reservation */);
661 		nnode->tn_reg.tn_aobj->un_pager.swp.swp_priv = nnode;
662 		vm_object_set_flag(nnode->tn_reg.tn_aobj, OBJ_TMPFS);
663 		nnode->tn_reg.tn_tmp = tmp;
664 		nnode->tn_reg.tn_pages = 0;
665 		break;
666 
667 	default:
668 		panic("tmpfs_alloc_node: type %p %d", nnode,
669 		    (int)nnode->tn_type);
670 	}
671 
672 	TMPFS_LOCK(tmp);
673 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
674 	nnode->tn_attached = true;
675 	tmp->tm_nodes_inuse++;
676 	tmp->tm_refcount++;
677 	TMPFS_UNLOCK(tmp);
678 
679 	*node = nnode;
680 	return (0);
681 }
682 
683 /*
684  * Destroys the node pointed to by node from the file system 'tmp'.
685  * If the node references a directory, no entries are allowed.
686  */
687 void
688 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
689 {
690 	if (refcount_release_if_not_last(&node->tn_refcount))
691 		return;
692 
693 	TMPFS_LOCK(tmp);
694 	TMPFS_NODE_LOCK(node);
695 	if (!tmpfs_free_node_locked(tmp, node, false)) {
696 		TMPFS_NODE_UNLOCK(node);
697 		TMPFS_UNLOCK(tmp);
698 	}
699 }
700 
701 bool
702 tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
703     bool detach)
704 {
705 	vm_object_t uobj;
706 	char *symlink;
707 	bool last;
708 
709 	TMPFS_MP_ASSERT_LOCKED(tmp);
710 	TMPFS_NODE_ASSERT_LOCKED(node);
711 
712 	last = refcount_release(&node->tn_refcount);
713 	if (node->tn_attached && (detach || last)) {
714 		MPASS(tmp->tm_nodes_inuse > 0);
715 		tmp->tm_nodes_inuse--;
716 		LIST_REMOVE(node, tn_entries);
717 		node->tn_attached = false;
718 	}
719 	if (!last)
720 		return (false);
721 
722 	TMPFS_NODE_UNLOCK(node);
723 
724 #ifdef INVARIANTS
725 	MPASS(node->tn_vnode == NULL);
726 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
727 
728 	/*
729 	 * Make sure this is a node type we can deal with. Everything
730 	 * is explicitly enumerated without the 'default' clause so
731 	 * the compiler can throw an error in case a new type is
732 	 * added.
733 	 */
734 	switch (node->tn_type) {
735 	case VBLK:
736 	case VCHR:
737 	case VDIR:
738 	case VFIFO:
739 	case VSOCK:
740 	case VLNK:
741 	case VREG:
742 		break;
743 	case VNON:
744 	case VBAD:
745 	case VMARKER:
746 		panic("%s: bad type %d for node %p", __func__,
747 		    (int)node->tn_type, node);
748 	}
749 #endif
750 
751 	switch (node->tn_type) {
752 	case VREG:
753 		uobj = node->tn_reg.tn_aobj;
754 		node->tn_reg.tn_aobj = NULL;
755 		if (uobj != NULL) {
756 			VM_OBJECT_WLOCK(uobj);
757 			KASSERT((uobj->flags & OBJ_TMPFS) != 0,
758 			    ("tmpfs node %p uobj %p not tmpfs", node, uobj));
759 			vm_object_clear_flag(uobj, OBJ_TMPFS);
760 			KASSERT(tmp->tm_pages_used >= node->tn_reg.tn_pages,
761 			    ("tmpfs tmp %p node %p pages %jd free %jd", tmp,
762 			    node, (uintmax_t)tmp->tm_pages_used,
763 			    (uintmax_t)node->tn_reg.tn_pages));
764 			atomic_add_long(&tmp->tm_pages_used,
765 			    -node->tn_reg.tn_pages);
766 			VM_OBJECT_WUNLOCK(uobj);
767 		}
768 		tmpfs_free_tmp(tmp);
769 
770 		/*
771 		 * vm_object_deallocate() must not be called while
772 		 * owning tm_allnode_lock, because deallocate might
773 		 * sleep.  Call it after tmpfs_free_tmp() does the
774 		 * unlock.
775 		 */
776 		if (uobj != NULL)
777 			vm_object_deallocate(uobj);
778 
779 		break;
780 	case VLNK:
781 		tmpfs_free_tmp(tmp);
782 
783 		symlink = node->tn_link_target;
784 		atomic_store_ptr(&node->tn_link_target, NULL);
785 		if (atomic_load_char(&node->tn_link_smr)) {
786 			cache_symlink_free(symlink, node->tn_size + 1);
787 		} else {
788 			free(symlink, M_TMPFSNAME);
789 		}
790 		break;
791 	default:
792 		tmpfs_free_tmp(tmp);
793 		break;
794 	}
795 
796 	uma_zfree_smr(tmpfs_node_pool, node);
797 	return (true);
798 }
799 
800 static __inline uint32_t
801 tmpfs_dirent_hash(const char *name, u_int len)
802 {
803 	uint32_t hash;
804 
805 	hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK;
806 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP
807 	hash &= 0xf;
808 #endif
809 	if (hash < TMPFS_DIRCOOKIE_MIN)
810 		hash += TMPFS_DIRCOOKIE_MIN;
811 
812 	return (hash);
813 }
814 
815 static __inline off_t
816 tmpfs_dirent_cookie(struct tmpfs_dirent *de)
817 {
818 	if (de == NULL)
819 		return (TMPFS_DIRCOOKIE_EOF);
820 
821 	MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN);
822 
823 	return (de->td_cookie);
824 }
825 
826 static __inline boolean_t
827 tmpfs_dirent_dup(struct tmpfs_dirent *de)
828 {
829 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0);
830 }
831 
832 static __inline boolean_t
833 tmpfs_dirent_duphead(struct tmpfs_dirent *de)
834 {
835 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0);
836 }
837 
838 void
839 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen)
840 {
841 	de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen);
842 	memcpy(de->ud.td_name, name, namelen);
843 	de->td_namelen = namelen;
844 }
845 
846 /*
847  * Allocates a new directory entry for the node node with a name of name.
848  * The new directory entry is returned in *de.
849  *
850  * The link count of node is increased by one to reflect the new object
851  * referencing it.
852  *
853  * Returns zero on success or an appropriate error code on failure.
854  */
855 int
856 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
857     const char *name, u_int len, struct tmpfs_dirent **de)
858 {
859 	struct tmpfs_dirent *nde;
860 
861 	nde = malloc(sizeof(*nde), M_TMPFSDIR, M_WAITOK);
862 	nde->td_node = node;
863 	if (name != NULL) {
864 		nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
865 		tmpfs_dirent_init(nde, name, len);
866 	} else
867 		nde->td_namelen = 0;
868 	if (node != NULL)
869 		node->tn_links++;
870 
871 	*de = nde;
872 
873 	return (0);
874 }
875 
876 /*
877  * Frees a directory entry.  It is the caller's responsibility to destroy
878  * the node referenced by it if needed.
879  *
880  * The link count of node is decreased by one to reflect the removal of an
881  * object that referenced it.  This only happens if 'node_exists' is true;
882  * otherwise the function will not access the node referred to by the
883  * directory entry, as it may already have been released from the outside.
884  */
885 void
886 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
887 {
888 	struct tmpfs_node *node;
889 
890 	node = de->td_node;
891 	if (node != NULL) {
892 		MPASS(node->tn_links > 0);
893 		node->tn_links--;
894 	}
895 	if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
896 		free(de->ud.td_name, M_TMPFSNAME);
897 	free(de, M_TMPFSDIR);
898 }
899 
900 void
901 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
902 {
903 	bool want_vrele;
904 
905 	ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
906 	if (vp->v_type != VREG || obj == NULL)
907 		return;
908 
909 	VM_OBJECT_WLOCK(obj);
910 	VI_LOCK(vp);
911 	/*
912 	 * May be going through forced unmount.
913 	 */
914 	want_vrele = false;
915 	if ((obj->flags & OBJ_TMPFS_VREF) != 0) {
916 		vm_object_clear_flag(obj, OBJ_TMPFS_VREF);
917 		want_vrele = true;
918 	}
919 
920 	if (vp->v_writecount < 0)
921 		vp->v_writecount = 0;
922 	VI_UNLOCK(vp);
923 	VM_OBJECT_WUNLOCK(obj);
924 	if (want_vrele) {
925 		vrele(vp);
926 	}
927 }
928 
929 /*
930  * Allocates a new vnode for the node node or returns a new reference to
931  * an existing one if the node had already a vnode referencing it.  The
932  * resulting locked vnode is returned in *vpp.
933  *
934  * Returns zero on success or an appropriate error code on failure.
935  */
936 int
937 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
938     struct vnode **vpp)
939 {
940 	struct vnode *vp;
941 	enum vgetstate vs;
942 	struct tmpfs_mount *tm;
943 	vm_object_t object;
944 	int error;
945 
946 	error = 0;
947 	tm = VFS_TO_TMPFS(mp);
948 	TMPFS_NODE_LOCK(node);
949 	tmpfs_ref_node(node);
950 loop:
951 	TMPFS_NODE_ASSERT_LOCKED(node);
952 	if ((vp = node->tn_vnode) != NULL) {
953 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
954 		if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
955 		    (VN_IS_DOOMED(vp) &&
956 		     (lkflag & LK_NOWAIT) != 0)) {
957 			TMPFS_NODE_UNLOCK(node);
958 			error = ENOENT;
959 			vp = NULL;
960 			goto out;
961 		}
962 		if (VN_IS_DOOMED(vp)) {
963 			node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
964 			while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {
965 				msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
966 				    0, "tmpfsE", 0);
967 			}
968 			goto loop;
969 		}
970 		vs = vget_prep(vp);
971 		TMPFS_NODE_UNLOCK(node);
972 		error = vget_finish(vp, lkflag, vs);
973 		if (error == ENOENT) {
974 			TMPFS_NODE_LOCK(node);
975 			goto loop;
976 		}
977 		if (error != 0) {
978 			vp = NULL;
979 			goto out;
980 		}
981 
982 		/*
983 		 * Make sure the vnode is still there after
984 		 * getting the interlock to avoid racing a free.
985 		 */
986 		if (node->tn_vnode != vp) {
987 			vput(vp);
988 			TMPFS_NODE_LOCK(node);
989 			goto loop;
990 		}
991 
992 		goto out;
993 	}
994 
995 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
996 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
997 		TMPFS_NODE_UNLOCK(node);
998 		error = ENOENT;
999 		vp = NULL;
1000 		goto out;
1001 	}
1002 
1003 	/*
1004 	 * otherwise lock the vp list while we call getnewvnode
1005 	 * since that can block.
1006 	 */
1007 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
1008 		node->tn_vpstate |= TMPFS_VNODE_WANT;
1009 		error = msleep((caddr_t) &node->tn_vpstate,
1010 		    TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
1011 		if (error != 0)
1012 			goto out;
1013 		goto loop;
1014 	} else
1015 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
1016 
1017 	TMPFS_NODE_UNLOCK(node);
1018 
1019 	/* Get a new vnode and associate it with our node. */
1020 	error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
1021 	    &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
1022 	if (error != 0)
1023 		goto unlock;
1024 	MPASS(vp != NULL);
1025 
1026 	/* lkflag is ignored, the lock is exclusive */
1027 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1028 
1029 	vp->v_data = node;
1030 	vp->v_type = node->tn_type;
1031 
1032 	/* Type-specific initialization. */
1033 	switch (node->tn_type) {
1034 	case VBLK:
1035 		/* FALLTHROUGH */
1036 	case VCHR:
1037 		/* FALLTHROUGH */
1038 	case VLNK:
1039 		/* FALLTHROUGH */
1040 	case VSOCK:
1041 		break;
1042 	case VFIFO:
1043 		vp->v_op = &tmpfs_fifoop_entries;
1044 		break;
1045 	case VREG:
1046 		object = node->tn_reg.tn_aobj;
1047 		VM_OBJECT_WLOCK(object);
1048 		KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
1049 		    ("%s: object %p with OBJ_TMPFS_VREF but without vnode",
1050 		    __func__, object));
1051 		KASSERT(object->un_pager.swp.writemappings == 0,
1052 		    ("%s: object %p has writemappings",
1053 		    __func__, object));
1054 		VI_LOCK(vp);
1055 		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
1056 		vp->v_object = object;
1057 		vn_irflag_set_locked(vp, VIRF_PGREAD | VIRF_TEXT_REF);
1058 		VI_UNLOCK(vp);
1059 		VM_OBJECT_WUNLOCK(object);
1060 		break;
1061 	case VDIR:
1062 		MPASS(node->tn_dir.tn_parent != NULL);
1063 		if (node->tn_dir.tn_parent == node)
1064 			vp->v_vflag |= VV_ROOT;
1065 		break;
1066 
1067 	default:
1068 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
1069 	}
1070 	if (vp->v_type != VFIFO)
1071 		VN_LOCK_ASHARE(vp);
1072 
1073 	error = insmntque1(vp, mp);
1074 	if (error != 0) {
1075 		/* Need to clear v_object for insmntque failure. */
1076 		tmpfs_destroy_vobject(vp, vp->v_object);
1077 		vp->v_object = NULL;
1078 		vp->v_data = NULL;
1079 		vp->v_op = &dead_vnodeops;
1080 		vgone(vp);
1081 		vput(vp);
1082 		vp = NULL;
1083 	} else {
1084 		vn_set_state(vp, VSTATE_CONSTRUCTED);
1085 	}
1086 
1087 unlock:
1088 	TMPFS_NODE_LOCK(node);
1089 
1090 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
1091 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
1092 	node->tn_vnode = vp;
1093 
1094 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
1095 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
1096 		TMPFS_NODE_UNLOCK(node);
1097 		wakeup((caddr_t) &node->tn_vpstate);
1098 	} else
1099 		TMPFS_NODE_UNLOCK(node);
1100 
1101 out:
1102 	if (error == 0) {
1103 		*vpp = vp;
1104 
1105 #ifdef INVARIANTS
1106 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
1107 		TMPFS_NODE_LOCK(node);
1108 		MPASS(*vpp == node->tn_vnode);
1109 		TMPFS_NODE_UNLOCK(node);
1110 #endif
1111 	}
1112 	tmpfs_free_node(tm, node);
1113 
1114 	return (error);
1115 }
1116 
1117 /*
1118  * Destroys the association between the vnode vp and the node it
1119  * references.
1120  */
1121 void
1122 tmpfs_free_vp(struct vnode *vp)
1123 {
1124 	struct tmpfs_node *node;
1125 
1126 	node = VP_TO_TMPFS_NODE(vp);
1127 
1128 	TMPFS_NODE_ASSERT_LOCKED(node);
1129 	node->tn_vnode = NULL;
1130 	if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0)
1131 		wakeup(&node->tn_vnode);
1132 	node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM;
1133 	vp->v_data = NULL;
1134 }
1135 
1136 /*
1137  * Allocates a new file of type 'type' and adds it to the parent directory
1138  * 'dvp'; this addition is done using the component name given in 'cnp'.
1139  * The ownership of the new file is automatically assigned based on the
1140  * credentials of the caller (through 'cnp'), the group is set based on
1141  * the parent directory and the mode is determined from the 'vap' argument.
1142  * If successful, *vpp holds a vnode to the newly created file and zero
1143  * is returned.  Otherwise *vpp is NULL and the function returns an
1144  * appropriate error code.
1145  */
1146 int
1147 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
1148     struct componentname *cnp, const char *target)
1149 {
1150 	int error;
1151 	struct tmpfs_dirent *de;
1152 	struct tmpfs_mount *tmp;
1153 	struct tmpfs_node *dnode;
1154 	struct tmpfs_node *node;
1155 	struct tmpfs_node *parent;
1156 
1157 	ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
1158 
1159 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1160 	dnode = VP_TO_TMPFS_DIR(dvp);
1161 	*vpp = NULL;
1162 
1163 	/* If the entry we are creating is a directory, we cannot overflow
1164 	 * the number of links of its parent, because it will get a new
1165 	 * link. */
1166 	if (vap->va_type == VDIR) {
1167 		/* Ensure that we do not overflow the maximum number of links
1168 		 * imposed by the system. */
1169 		MPASS(dnode->tn_links <= TMPFS_LINK_MAX);
1170 		if (dnode->tn_links == TMPFS_LINK_MAX) {
1171 			return (EMLINK);
1172 		}
1173 
1174 		parent = dnode;
1175 		MPASS(parent != NULL);
1176 	} else
1177 		parent = NULL;
1178 
1179 	/* Allocate a node that represents the new file. */
1180 	error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
1181 	    cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
1182 	    target, vap->va_rdev, &node);
1183 	if (error != 0)
1184 		return (error);
1185 
1186 	/* Allocate a directory entry that points to the new file. */
1187 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
1188 	    &de);
1189 	if (error != 0) {
1190 		tmpfs_free_node(tmp, node);
1191 		return (error);
1192 	}
1193 
1194 	/* Allocate a vnode for the new file. */
1195 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
1196 	if (error != 0) {
1197 		tmpfs_free_dirent(tmp, de);
1198 		tmpfs_free_node(tmp, node);
1199 		return (error);
1200 	}
1201 
1202 	/* Now that all required items are allocated, we can proceed to
1203 	 * insert the new node into the directory, an operation that
1204 	 * cannot fail. */
1205 	if (cnp->cn_flags & ISWHITEOUT)
1206 		tmpfs_dir_whiteout_remove(dvp, cnp);
1207 	tmpfs_dir_attach(dvp, de);
1208 	return (0);
1209 }
1210 
1211 struct tmpfs_dirent *
1212 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
1213 {
1214 	struct tmpfs_dirent *de;
1215 
1216 	de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead);
1217 	dc->tdc_tree = de;
1218 	if (de != NULL && tmpfs_dirent_duphead(de))
1219 		de = LIST_FIRST(&de->ud.td_duphead);
1220 	dc->tdc_current = de;
1221 
1222 	return (dc->tdc_current);
1223 }
1224 
1225 struct tmpfs_dirent *
1226 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
1227 {
1228 	struct tmpfs_dirent *de;
1229 
1230 	MPASS(dc->tdc_tree != NULL);
1231 	if (tmpfs_dirent_dup(dc->tdc_current)) {
1232 		dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries);
1233 		if (dc->tdc_current != NULL)
1234 			return (dc->tdc_current);
1235 	}
1236 	dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
1237 	    &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
1238 	if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
1239 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
1240 		MPASS(dc->tdc_current != NULL);
1241 	}
1242 
1243 	return (dc->tdc_current);
1244 }
1245 
1246 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */
1247 static struct tmpfs_dirent *
1248 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash)
1249 {
1250 	struct tmpfs_dirent *de, dekey;
1251 
1252 	dekey.td_hash = hash;
1253 	de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey);
1254 	return (de);
1255 }
1256 
1257 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */
1258 static struct tmpfs_dirent *
1259 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
1260     struct tmpfs_dir_cursor *dc)
1261 {
1262 	struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead;
1263 	struct tmpfs_dirent *de, dekey;
1264 
1265 	MPASS(cookie >= TMPFS_DIRCOOKIE_MIN);
1266 
1267 	if (cookie == node->tn_dir.tn_readdir_lastn &&
1268 	    (de = node->tn_dir.tn_readdir_lastp) != NULL) {
1269 		/* Protect against possible race, tn_readdir_last[pn]
1270 		 * may be updated with only shared vnode lock held. */
1271 		if (cookie == tmpfs_dirent_cookie(de))
1272 			goto out;
1273 	}
1274 
1275 	if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) {
1276 		LIST_FOREACH(de, &node->tn_dir.tn_dupindex,
1277 		    uh.td_dup.index_entries) {
1278 			MPASS(tmpfs_dirent_dup(de));
1279 			if (de->td_cookie == cookie)
1280 				goto out;
1281 			/* dupindex list is sorted. */
1282 			if (de->td_cookie < cookie) {
1283 				de = NULL;
1284 				goto out;
1285 			}
1286 		}
1287 		MPASS(de == NULL);
1288 		goto out;
1289 	}
1290 
1291 	if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
1292 		de = NULL;
1293 	} else {
1294 		dekey.td_hash = cookie;
1295 		/* Recover if direntry for cookie was removed */
1296 		de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
1297 	}
1298 	dc->tdc_tree = de;
1299 	dc->tdc_current = de;
1300 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1301 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
1302 		MPASS(dc->tdc_current != NULL);
1303 	}
1304 	return (dc->tdc_current);
1305 
1306 out:
1307 	dc->tdc_tree = de;
1308 	dc->tdc_current = de;
1309 	if (de != NULL && tmpfs_dirent_dup(de))
1310 		dc->tdc_tree = tmpfs_dir_xlookup_hash(node,
1311 		    de->td_hash);
1312 	return (dc->tdc_current);
1313 }
1314 
1315 /*
1316  * Looks for a directory entry in the directory represented by node.
1317  * 'cnp' describes the name of the entry to look for.  Note that the .
1318  * and .. components are not allowed as they do not physically exist
1319  * within directories.
1320  *
1321  * Returns a pointer to the entry when found, otherwise NULL.
1322  */
1323 struct tmpfs_dirent *
1324 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
1325     struct componentname *cnp)
1326 {
1327 	struct tmpfs_dir_duphead *duphead;
1328 	struct tmpfs_dirent *de;
1329 	uint32_t hash;
1330 
1331 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
1332 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
1333 	    cnp->cn_nameptr[1] == '.')));
1334 	TMPFS_VALIDATE_DIR(node);
1335 
1336 	hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen);
1337 	de = tmpfs_dir_xlookup_hash(node, hash);
1338 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1339 		duphead = &de->ud.td_duphead;
1340 		LIST_FOREACH(de, duphead, uh.td_dup.entries) {
1341 			if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1342 			    cnp->cn_namelen))
1343 				break;
1344 		}
1345 	} else if (de != NULL) {
1346 		if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1347 		    cnp->cn_namelen))
1348 			de = NULL;
1349 	}
1350 	if (de != NULL && f != NULL && de->td_node != f)
1351 		de = NULL;
1352 
1353 	return (de);
1354 }
1355 
1356 /*
1357  * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex
1358  * list, allocate new cookie value.
1359  */
1360 static void
1361 tmpfs_dir_attach_dup(struct tmpfs_node *dnode,
1362     struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde)
1363 {
1364 	struct tmpfs_dir_duphead *dupindex;
1365 	struct tmpfs_dirent *de, *pde;
1366 
1367 	dupindex = &dnode->tn_dir.tn_dupindex;
1368 	de = LIST_FIRST(dupindex);
1369 	if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) {
1370 		if (de == NULL)
1371 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1372 		else
1373 			nde->td_cookie = de->td_cookie + 1;
1374 		MPASS(tmpfs_dirent_dup(nde));
1375 		LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries);
1376 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1377 		return;
1378 	}
1379 
1380 	/*
1381 	 * Cookie numbers are near exhaustion. Scan dupindex list for unused
1382 	 * numbers. dupindex list is sorted in descending order. Keep it so
1383 	 * after inserting nde.
1384 	 */
1385 	while (1) {
1386 		pde = de;
1387 		de = LIST_NEXT(de, uh.td_dup.index_entries);
1388 		if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) {
1389 			/*
1390 			 * Last element of the index doesn't have minimal cookie
1391 			 * value, use it.
1392 			 */
1393 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1394 			LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries);
1395 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1396 			return;
1397 		} else if (de == NULL) {
1398 			/*
1399 			 * We are so lucky have 2^30 hash duplicates in single
1400 			 * directory :) Return largest possible cookie value.
1401 			 * It should be fine except possible issues with
1402 			 * VOP_READDIR restart.
1403 			 */
1404 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX;
1405 			LIST_INSERT_HEAD(dupindex, nde,
1406 			    uh.td_dup.index_entries);
1407 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1408 			return;
1409 		}
1410 		if (de->td_cookie + 1 == pde->td_cookie ||
1411 		    de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX)
1412 			continue;	/* No hole or invalid cookie. */
1413 		nde->td_cookie = de->td_cookie + 1;
1414 		MPASS(tmpfs_dirent_dup(nde));
1415 		MPASS(pde->td_cookie > nde->td_cookie);
1416 		MPASS(nde->td_cookie > de->td_cookie);
1417 		LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries);
1418 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1419 		return;
1420 	}
1421 }
1422 
1423 /*
1424  * Attaches the directory entry de to the directory represented by vp.
1425  * Note that this does not change the link count of the node pointed by
1426  * the directory entry, as this is done by tmpfs_alloc_dirent.
1427  */
1428 void
1429 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
1430 {
1431 	struct tmpfs_node *dnode;
1432 	struct tmpfs_dirent *xde, *nde;
1433 
1434 	ASSERT_VOP_ELOCKED(vp, __func__);
1435 	MPASS(de->td_namelen > 0);
1436 	MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN);
1437 	MPASS(de->td_cookie == de->td_hash);
1438 
1439 	dnode = VP_TO_TMPFS_DIR(vp);
1440 	dnode->tn_dir.tn_readdir_lastn = 0;
1441 	dnode->tn_dir.tn_readdir_lastp = NULL;
1442 
1443 	MPASS(!tmpfs_dirent_dup(de));
1444 	xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1445 	if (xde != NULL && tmpfs_dirent_duphead(xde))
1446 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1447 	else if (xde != NULL) {
1448 		/*
1449 		 * Allocate new duphead. Swap xde with duphead to avoid
1450 		 * adding/removing elements with the same hash.
1451 		 */
1452 		MPASS(!tmpfs_dirent_dup(xde));
1453 		tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0,
1454 		    &nde);
1455 		/* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */
1456 		memcpy(nde, xde, sizeof(*xde));
1457 		xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD;
1458 		LIST_INIT(&xde->ud.td_duphead);
1459 		xde->td_namelen = 0;
1460 		xde->td_node = NULL;
1461 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde);
1462 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1463 	}
1464 	dnode->tn_size += sizeof(struct tmpfs_dirent);
1465 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1466 	dnode->tn_accessed = true;
1467 	tmpfs_update(vp);
1468 }
1469 
1470 /*
1471  * Detaches the directory entry de from the directory represented by vp.
1472  * Note that this does not change the link count of the node pointed by
1473  * the directory entry, as this is done by tmpfs_free_dirent.
1474  */
1475 void
1476 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
1477 {
1478 	struct tmpfs_mount *tmp;
1479 	struct tmpfs_dir *head;
1480 	struct tmpfs_node *dnode;
1481 	struct tmpfs_dirent *xde;
1482 
1483 	ASSERT_VOP_ELOCKED(vp, __func__);
1484 
1485 	dnode = VP_TO_TMPFS_DIR(vp);
1486 	head = &dnode->tn_dir.tn_dirhead;
1487 	dnode->tn_dir.tn_readdir_lastn = 0;
1488 	dnode->tn_dir.tn_readdir_lastp = NULL;
1489 
1490 	if (tmpfs_dirent_dup(de)) {
1491 		/* Remove duphead if de was last entry. */
1492 		if (LIST_NEXT(de, uh.td_dup.entries) == NULL) {
1493 			xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash);
1494 			MPASS(tmpfs_dirent_duphead(xde));
1495 		} else
1496 			xde = NULL;
1497 		LIST_REMOVE(de, uh.td_dup.entries);
1498 		LIST_REMOVE(de, uh.td_dup.index_entries);
1499 		if (xde != NULL) {
1500 			if (LIST_EMPTY(&xde->ud.td_duphead)) {
1501 				RB_REMOVE(tmpfs_dir, head, xde);
1502 				tmp = VFS_TO_TMPFS(vp->v_mount);
1503 				MPASS(xde->td_node == NULL);
1504 				tmpfs_free_dirent(tmp, xde);
1505 			}
1506 		}
1507 		de->td_cookie = de->td_hash;
1508 	} else
1509 		RB_REMOVE(tmpfs_dir, head, de);
1510 
1511 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
1512 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1513 	dnode->tn_accessed = true;
1514 	tmpfs_update(vp);
1515 }
1516 
1517 void
1518 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode)
1519 {
1520 	struct tmpfs_dirent *de, *dde, *nde;
1521 
1522 	RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) {
1523 		RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1524 		/* Node may already be destroyed. */
1525 		de->td_node = NULL;
1526 		if (tmpfs_dirent_duphead(de)) {
1527 			while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) {
1528 				LIST_REMOVE(dde, uh.td_dup.entries);
1529 				dde->td_node = NULL;
1530 				tmpfs_free_dirent(tmp, dde);
1531 			}
1532 		}
1533 		tmpfs_free_dirent(tmp, de);
1534 	}
1535 }
1536 
1537 /*
1538  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
1539  * directory and returns it in the uio space.  The function returns 0
1540  * on success, -1 if there was not enough space in the uio structure to
1541  * hold the directory entry or an appropriate error code if another
1542  * error happens.
1543  */
1544 static int
1545 tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1546     struct uio *uio)
1547 {
1548 	int error;
1549 	struct dirent dent;
1550 
1551 	TMPFS_VALIDATE_DIR(node);
1552 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
1553 
1554 	dent.d_fileno = node->tn_id;
1555 	dent.d_off = TMPFS_DIRCOOKIE_DOTDOT;
1556 	dent.d_type = DT_DIR;
1557 	dent.d_namlen = 1;
1558 	dent.d_name[0] = '.';
1559 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1560 	dirent_terminate(&dent);
1561 
1562 	if (dent.d_reclen > uio->uio_resid)
1563 		error = EJUSTRETURN;
1564 	else
1565 		error = uiomove(&dent, dent.d_reclen, uio);
1566 
1567 	tmpfs_set_accessed(tm, node);
1568 
1569 	return (error);
1570 }
1571 
1572 /*
1573  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
1574  * directory and returns it in the uio space.  The function returns 0
1575  * on success, -1 if there was not enough space in the uio structure to
1576  * hold the directory entry or an appropriate error code if another
1577  * error happens.
1578  */
1579 static int
1580 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1581     struct uio *uio, off_t next)
1582 {
1583 	struct tmpfs_node *parent;
1584 	struct dirent dent;
1585 	int error;
1586 
1587 	TMPFS_VALIDATE_DIR(node);
1588 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
1589 
1590 	/*
1591 	 * Return ENOENT if the current node is already removed.
1592 	 */
1593 	TMPFS_ASSERT_LOCKED(node);
1594 	parent = node->tn_dir.tn_parent;
1595 	if (parent == NULL)
1596 		return (ENOENT);
1597 
1598 	dent.d_fileno = parent->tn_id;
1599 	dent.d_off = next;
1600 	dent.d_type = DT_DIR;
1601 	dent.d_namlen = 2;
1602 	dent.d_name[0] = '.';
1603 	dent.d_name[1] = '.';
1604 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1605 	dirent_terminate(&dent);
1606 
1607 	if (dent.d_reclen > uio->uio_resid)
1608 		error = EJUSTRETURN;
1609 	else
1610 		error = uiomove(&dent, dent.d_reclen, uio);
1611 
1612 	tmpfs_set_accessed(tm, node);
1613 
1614 	return (error);
1615 }
1616 
1617 /*
1618  * Helper function for tmpfs_readdir.  Returns as much directory entries
1619  * as can fit in the uio space.  The read starts at uio->uio_offset.
1620  * The function returns 0 on success, -1 if there was not enough space
1621  * in the uio structure to hold the directory entry or an appropriate
1622  * error code if another error happens.
1623  */
1624 int
1625 tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
1626     struct uio *uio, int maxcookies, uint64_t *cookies, int *ncookies)
1627 {
1628 	struct tmpfs_dir_cursor dc;
1629 	struct tmpfs_dirent *de, *nde;
1630 	off_t off;
1631 	int error;
1632 
1633 	TMPFS_VALIDATE_DIR(node);
1634 
1635 	off = 0;
1636 
1637 	/*
1638 	 * Lookup the node from the current offset.  The starting offset of
1639 	 * 0 will lookup both '.' and '..', and then the first real entry,
1640 	 * or EOF if there are none.  Then find all entries for the dir that
1641 	 * fit into the buffer.  Once no more entries are found (de == NULL),
1642 	 * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next
1643 	 * call to return 0.
1644 	 */
1645 	switch (uio->uio_offset) {
1646 	case TMPFS_DIRCOOKIE_DOT:
1647 		error = tmpfs_dir_getdotdent(tm, node, uio);
1648 		if (error != 0)
1649 			return (error);
1650 		uio->uio_offset = off = TMPFS_DIRCOOKIE_DOTDOT;
1651 		if (cookies != NULL)
1652 			cookies[(*ncookies)++] = off;
1653 		/* FALLTHROUGH */
1654 	case TMPFS_DIRCOOKIE_DOTDOT:
1655 		de = tmpfs_dir_first(node, &dc);
1656 		off = tmpfs_dirent_cookie(de);
1657 		error = tmpfs_dir_getdotdotdent(tm, node, uio, off);
1658 		if (error != 0)
1659 			return (error);
1660 		uio->uio_offset = off;
1661 		if (cookies != NULL)
1662 			cookies[(*ncookies)++] = off;
1663 		/* EOF. */
1664 		if (de == NULL)
1665 			return (0);
1666 		break;
1667 	case TMPFS_DIRCOOKIE_EOF:
1668 		return (0);
1669 	default:
1670 		de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc);
1671 		if (de == NULL)
1672 			return (EINVAL);
1673 		if (cookies != NULL)
1674 			off = tmpfs_dirent_cookie(de);
1675 	}
1676 
1677 	/*
1678 	 * Read as much entries as possible; i.e., until we reach the end of the
1679 	 * directory or we exhaust uio space.
1680 	 */
1681 	do {
1682 		struct dirent d;
1683 
1684 		/*
1685 		 * Create a dirent structure representing the current tmpfs_node
1686 		 * and fill it.
1687 		 */
1688 		if (de->td_node == NULL) {
1689 			d.d_fileno = 1;
1690 			d.d_type = DT_WHT;
1691 		} else {
1692 			d.d_fileno = de->td_node->tn_id;
1693 			switch (de->td_node->tn_type) {
1694 			case VBLK:
1695 				d.d_type = DT_BLK;
1696 				break;
1697 
1698 			case VCHR:
1699 				d.d_type = DT_CHR;
1700 				break;
1701 
1702 			case VDIR:
1703 				d.d_type = DT_DIR;
1704 				break;
1705 
1706 			case VFIFO:
1707 				d.d_type = DT_FIFO;
1708 				break;
1709 
1710 			case VLNK:
1711 				d.d_type = DT_LNK;
1712 				break;
1713 
1714 			case VREG:
1715 				d.d_type = DT_REG;
1716 				break;
1717 
1718 			case VSOCK:
1719 				d.d_type = DT_SOCK;
1720 				break;
1721 
1722 			default:
1723 				panic("tmpfs_dir_getdents: type %p %d",
1724 				    de->td_node, (int)de->td_node->tn_type);
1725 			}
1726 		}
1727 		d.d_namlen = de->td_namelen;
1728 		MPASS(de->td_namelen < sizeof(d.d_name));
1729 		(void)memcpy(d.d_name, de->ud.td_name, de->td_namelen);
1730 		d.d_reclen = GENERIC_DIRSIZ(&d);
1731 
1732 		/*
1733 		 * Stop reading if the directory entry we are treating is bigger
1734 		 * than the amount of data that can be returned.
1735 		 */
1736 		if (d.d_reclen > uio->uio_resid) {
1737 			error = EJUSTRETURN;
1738 			break;
1739 		}
1740 
1741 		nde = tmpfs_dir_next(node, &dc);
1742 		d.d_off = tmpfs_dirent_cookie(nde);
1743 		dirent_terminate(&d);
1744 
1745 		/*
1746 		 * Copy the new dirent structure into the output buffer and
1747 		 * advance pointers.
1748 		 */
1749 		error = uiomove(&d, d.d_reclen, uio);
1750 		if (error == 0) {
1751 			de = nde;
1752 			if (cookies != NULL) {
1753 				off = tmpfs_dirent_cookie(de);
1754 				MPASS(*ncookies < maxcookies);
1755 				cookies[(*ncookies)++] = off;
1756 			}
1757 		}
1758 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
1759 
1760 	/* Skip setting off when using cookies as it is already done above. */
1761 	if (cookies == NULL)
1762 		off = tmpfs_dirent_cookie(de);
1763 
1764 	/* Update the offset and cache. */
1765 	uio->uio_offset = off;
1766 	node->tn_dir.tn_readdir_lastn = off;
1767 	node->tn_dir.tn_readdir_lastp = de;
1768 
1769 	tmpfs_set_accessed(tm, node);
1770 	return (error);
1771 }
1772 
1773 int
1774 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
1775 {
1776 	struct tmpfs_dirent *de;
1777 	int error;
1778 
1779 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
1780 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
1781 	if (error != 0)
1782 		return (error);
1783 	tmpfs_dir_attach(dvp, de);
1784 	return (0);
1785 }
1786 
1787 void
1788 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
1789 {
1790 	struct tmpfs_dirent *de;
1791 
1792 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1793 	MPASS(de != NULL && de->td_node == NULL);
1794 	tmpfs_dir_detach(dvp, de);
1795 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de);
1796 }
1797 
1798 /*
1799  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
1800  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
1801  * 'newsize' must be positive.
1802  *
1803  * Returns zero on success or an appropriate error code on failure.
1804  */
1805 int
1806 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
1807 {
1808 	struct tmpfs_node *node;
1809 	vm_object_t uobj;
1810 	vm_pindex_t idx, newpages, oldpages;
1811 	off_t oldsize;
1812 	int base, error;
1813 
1814 	MPASS(vp->v_type == VREG);
1815 	MPASS(newsize >= 0);
1816 
1817 	node = VP_TO_TMPFS_NODE(vp);
1818 	uobj = node->tn_reg.tn_aobj;
1819 
1820 	/*
1821 	 * Convert the old and new sizes to the number of pages needed to
1822 	 * store them.  It may happen that we do not need to do anything
1823 	 * because the last allocated page can accommodate the change on
1824 	 * its own.
1825 	 */
1826 	oldsize = node->tn_size;
1827 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
1828 	MPASS(oldpages == uobj->size);
1829 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
1830 
1831 	if (__predict_true(newpages == oldpages && newsize >= oldsize)) {
1832 		node->tn_size = newsize;
1833 		return (0);
1834 	}
1835 
1836 	VM_OBJECT_WLOCK(uobj);
1837 	if (newsize < oldsize) {
1838 		/*
1839 		 * Zero the truncated part of the last page.
1840 		 */
1841 		base = newsize & PAGE_MASK;
1842 		if (base != 0) {
1843 			idx = OFF_TO_IDX(newsize);
1844 			error = tmpfs_partial_page_invalidate(uobj, idx, base,
1845 			    PAGE_SIZE, ignerr);
1846 			if (error != 0) {
1847 				VM_OBJECT_WUNLOCK(uobj);
1848 				return (error);
1849 			}
1850 		}
1851 
1852 		/*
1853 		 * Release any swap space and free any whole pages.
1854 		 */
1855 		if (newpages < oldpages)
1856 			vm_object_page_remove(uobj, newpages, 0, 0);
1857 	}
1858 	uobj->size = newpages;
1859 	VM_OBJECT_WUNLOCK(uobj);
1860 
1861 	node->tn_size = newsize;
1862 	return (0);
1863 }
1864 
1865 /*
1866  * Punch hole in the aobj associated with the regular file pointed to by 'vp'.
1867  * Requests completely beyond the end-of-file are converted to no-op.
1868  *
1869  * Returns 0 on success or error code from tmpfs_partial_page_invalidate() on
1870  * failure.
1871  */
1872 int
1873 tmpfs_reg_punch_hole(struct vnode *vp, off_t *offset, off_t *length)
1874 {
1875 	struct tmpfs_node *node;
1876 	vm_object_t object;
1877 	vm_pindex_t pistart, pi, piend;
1878 	int startofs, endofs, end;
1879 	off_t off, len;
1880 	int error;
1881 
1882 	KASSERT(*length <= OFF_MAX - *offset, ("%s: offset + length overflows",
1883 	    __func__));
1884 	node = VP_TO_TMPFS_NODE(vp);
1885 	KASSERT(node->tn_type == VREG, ("%s: node is not regular file",
1886 	    __func__));
1887 	object = node->tn_reg.tn_aobj;
1888 	off = *offset;
1889 	len = omin(node->tn_size - off, *length);
1890 	startofs = off & PAGE_MASK;
1891 	endofs = (off + len) & PAGE_MASK;
1892 	pistart = OFF_TO_IDX(off);
1893 	piend = OFF_TO_IDX(off + len);
1894 	pi = OFF_TO_IDX((vm_ooffset_t)off + PAGE_MASK);
1895 	error = 0;
1896 
1897 	/* Handle the case when offset is on or beyond file size. */
1898 	if (len <= 0) {
1899 		*length = 0;
1900 		return (0);
1901 	}
1902 
1903 	VM_OBJECT_WLOCK(object);
1904 
1905 	/*
1906 	 * If there is a partial page at the beginning of the hole-punching
1907 	 * request, fill the partial page with zeroes.
1908 	 */
1909 	if (startofs != 0) {
1910 		end = pistart != piend ? PAGE_SIZE : endofs;
1911 		error = tmpfs_partial_page_invalidate(object, pistart, startofs,
1912 		    end, FALSE);
1913 		if (error != 0)
1914 			goto out;
1915 		off += end - startofs;
1916 		len -= end - startofs;
1917 	}
1918 
1919 	/*
1920 	 * Toss away the full pages in the affected area.
1921 	 */
1922 	if (pi < piend) {
1923 		vm_object_page_remove(object, pi, piend, 0);
1924 		off += IDX_TO_OFF(piend - pi);
1925 		len -= IDX_TO_OFF(piend - pi);
1926 	}
1927 
1928 	/*
1929 	 * If there is a partial page at the end of the hole-punching request,
1930 	 * fill the partial page with zeroes.
1931 	 */
1932 	if (endofs != 0 && pistart != piend) {
1933 		error = tmpfs_partial_page_invalidate(object, piend, 0, endofs,
1934 		    FALSE);
1935 		if (error != 0)
1936 			goto out;
1937 		off += endofs;
1938 		len -= endofs;
1939 	}
1940 
1941 out:
1942 	VM_OBJECT_WUNLOCK(object);
1943 	*offset = off;
1944 	*length = len;
1945 	return (error);
1946 }
1947 
1948 void
1949 tmpfs_check_mtime(struct vnode *vp)
1950 {
1951 	struct tmpfs_node *node;
1952 	struct vm_object *obj;
1953 
1954 	ASSERT_VOP_ELOCKED(vp, "check_mtime");
1955 	if (vp->v_type != VREG)
1956 		return;
1957 	obj = vp->v_object;
1958 	KASSERT(obj->type == tmpfs_pager_type &&
1959 	    (obj->flags & (OBJ_SWAP | OBJ_TMPFS)) ==
1960 	    (OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj"));
1961 	/* unlocked read */
1962 	if (obj->generation != obj->cleangeneration) {
1963 		VM_OBJECT_WLOCK(obj);
1964 		if (obj->generation != obj->cleangeneration) {
1965 			obj->cleangeneration = obj->generation;
1966 			node = VP_TO_TMPFS_NODE(vp);
1967 			node->tn_status |= TMPFS_NODE_MODIFIED |
1968 			    TMPFS_NODE_CHANGED;
1969 		}
1970 		VM_OBJECT_WUNLOCK(obj);
1971 	}
1972 }
1973 
1974 /*
1975  * Change flags of the given vnode.
1976  * Caller should execute tmpfs_update on vp after a successful execution.
1977  * The vnode must be locked on entry and remain locked on exit.
1978  */
1979 int
1980 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
1981     struct thread *td)
1982 {
1983 	int error;
1984 	struct tmpfs_node *node;
1985 
1986 	ASSERT_VOP_ELOCKED(vp, "chflags");
1987 
1988 	node = VP_TO_TMPFS_NODE(vp);
1989 
1990 	if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK |
1991 	    UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP |
1992 	    UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE |
1993 	    UF_SPARSE | UF_SYSTEM)) != 0)
1994 		return (EOPNOTSUPP);
1995 
1996 	/* Disallow this operation if the file system is mounted read-only. */
1997 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1998 		return (EROFS);
1999 
2000 	/*
2001 	 * Callers may only modify the file flags on objects they
2002 	 * have VADMIN rights for.
2003 	 */
2004 	if ((error = VOP_ACCESS(vp, VADMIN, cred, td)))
2005 		return (error);
2006 	/*
2007 	 * Unprivileged processes are not permitted to unset system
2008 	 * flags, or modify flags if any system flags are set.
2009 	 */
2010 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) {
2011 		if (node->tn_flags &
2012 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
2013 			error = securelevel_gt(cred, 0);
2014 			if (error)
2015 				return (error);
2016 		}
2017 	} else {
2018 		if (node->tn_flags &
2019 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
2020 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
2021 			return (EPERM);
2022 	}
2023 	node->tn_flags = flags;
2024 	node->tn_status |= TMPFS_NODE_CHANGED;
2025 
2026 	ASSERT_VOP_ELOCKED(vp, "chflags2");
2027 
2028 	return (0);
2029 }
2030 
2031 /*
2032  * Change access mode on the given vnode.
2033  * Caller should execute tmpfs_update on vp after a successful execution.
2034  * The vnode must be locked on entry and remain locked on exit.
2035  */
2036 int
2037 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred,
2038     struct thread *td)
2039 {
2040 	int error;
2041 	struct tmpfs_node *node;
2042 	mode_t newmode;
2043 
2044 	ASSERT_VOP_ELOCKED(vp, "chmod");
2045 	ASSERT_VOP_IN_SEQC(vp);
2046 
2047 	node = VP_TO_TMPFS_NODE(vp);
2048 
2049 	/* Disallow this operation if the file system is mounted read-only. */
2050 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
2051 		return (EROFS);
2052 
2053 	/* Immutable or append-only files cannot be modified, either. */
2054 	if (node->tn_flags & (IMMUTABLE | APPEND))
2055 		return (EPERM);
2056 
2057 	/*
2058 	 * To modify the permissions on a file, must possess VADMIN
2059 	 * for that file.
2060 	 */
2061 	if ((error = VOP_ACCESS(vp, VADMIN, cred, td)))
2062 		return (error);
2063 
2064 	/*
2065 	 * Privileged processes may set the sticky bit on non-directories,
2066 	 * as well as set the setgid bit on a file with a group that the
2067 	 * process is not a member of.
2068 	 */
2069 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
2070 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE))
2071 			return (EFTYPE);
2072 	}
2073 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
2074 		error = priv_check_cred(cred, PRIV_VFS_SETGID);
2075 		if (error)
2076 			return (error);
2077 	}
2078 
2079 	newmode = node->tn_mode & ~ALLPERMS;
2080 	newmode |= mode & ALLPERMS;
2081 	atomic_store_short(&node->tn_mode, newmode);
2082 
2083 	node->tn_status |= TMPFS_NODE_CHANGED;
2084 
2085 	ASSERT_VOP_ELOCKED(vp, "chmod2");
2086 
2087 	return (0);
2088 }
2089 
2090 /*
2091  * Change ownership of the given vnode.  At least one of uid or gid must
2092  * be different than VNOVAL.  If one is set to that value, the attribute
2093  * is unchanged.
2094  * Caller should execute tmpfs_update on vp after a successful execution.
2095  * The vnode must be locked on entry and remain locked on exit.
2096  */
2097 int
2098 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
2099     struct thread *td)
2100 {
2101 	int error;
2102 	struct tmpfs_node *node;
2103 	uid_t ouid;
2104 	gid_t ogid;
2105 	mode_t newmode;
2106 
2107 	ASSERT_VOP_ELOCKED(vp, "chown");
2108 	ASSERT_VOP_IN_SEQC(vp);
2109 
2110 	node = VP_TO_TMPFS_NODE(vp);
2111 
2112 	/* Assign default values if they are unknown. */
2113 	MPASS(uid != VNOVAL || gid != VNOVAL);
2114 	if (uid == VNOVAL)
2115 		uid = node->tn_uid;
2116 	if (gid == VNOVAL)
2117 		gid = node->tn_gid;
2118 	MPASS(uid != VNOVAL && gid != VNOVAL);
2119 
2120 	/* Disallow this operation if the file system is mounted read-only. */
2121 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
2122 		return (EROFS);
2123 
2124 	/* Immutable or append-only files cannot be modified, either. */
2125 	if (node->tn_flags & (IMMUTABLE | APPEND))
2126 		return (EPERM);
2127 
2128 	/*
2129 	 * To modify the ownership of a file, must possess VADMIN for that
2130 	 * file.
2131 	 */
2132 	if ((error = VOP_ACCESS(vp, VADMIN, cred, td)))
2133 		return (error);
2134 
2135 	/*
2136 	 * To change the owner of a file, or change the group of a file to a
2137 	 * group of which we are not a member, the caller must have
2138 	 * privilege.
2139 	 */
2140 	if ((uid != node->tn_uid ||
2141 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
2142 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN)))
2143 		return (error);
2144 
2145 	ogid = node->tn_gid;
2146 	ouid = node->tn_uid;
2147 
2148 	node->tn_uid = uid;
2149 	node->tn_gid = gid;
2150 
2151 	node->tn_status |= TMPFS_NODE_CHANGED;
2152 
2153 	if ((node->tn_mode & (S_ISUID | S_ISGID)) != 0 &&
2154 	    (ouid != uid || ogid != gid)) {
2155 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
2156 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
2157 			atomic_store_short(&node->tn_mode, newmode);
2158 		}
2159 	}
2160 
2161 	ASSERT_VOP_ELOCKED(vp, "chown2");
2162 
2163 	return (0);
2164 }
2165 
2166 /*
2167  * Change size of the given vnode.
2168  * Caller should execute tmpfs_update on vp after a successful execution.
2169  * The vnode must be locked on entry and remain locked on exit.
2170  */
2171 int
2172 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
2173     struct thread *td)
2174 {
2175 	int error;
2176 	struct tmpfs_node *node;
2177 
2178 	ASSERT_VOP_ELOCKED(vp, "chsize");
2179 
2180 	node = VP_TO_TMPFS_NODE(vp);
2181 
2182 	/* Decide whether this is a valid operation based on the file type. */
2183 	error = 0;
2184 	switch (vp->v_type) {
2185 	case VDIR:
2186 		return (EISDIR);
2187 
2188 	case VREG:
2189 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
2190 			return (EROFS);
2191 		break;
2192 
2193 	case VBLK:
2194 		/* FALLTHROUGH */
2195 	case VCHR:
2196 		/* FALLTHROUGH */
2197 	case VFIFO:
2198 		/*
2199 		 * Allow modifications of special files even if in the file
2200 		 * system is mounted read-only (we are not modifying the
2201 		 * files themselves, but the objects they represent).
2202 		 */
2203 		return (0);
2204 
2205 	default:
2206 		/* Anything else is unsupported. */
2207 		return (EOPNOTSUPP);
2208 	}
2209 
2210 	/* Immutable or append-only files cannot be modified, either. */
2211 	if (node->tn_flags & (IMMUTABLE | APPEND))
2212 		return (EPERM);
2213 
2214 	error = vn_rlimit_trunc(size, td);
2215 	if (error != 0)
2216 		return (error);
2217 
2218 	error = tmpfs_truncate(vp, size);
2219 	/*
2220 	 * tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
2221 	 * for us, as will update tn_status; no need to do that here.
2222 	 */
2223 
2224 	ASSERT_VOP_ELOCKED(vp, "chsize2");
2225 
2226 	return (error);
2227 }
2228 
2229 /*
2230  * Change access and modification times of the given vnode.
2231  * Caller should execute tmpfs_update on vp after a successful execution.
2232  * The vnode must be locked on entry and remain locked on exit.
2233  */
2234 int
2235 tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
2236     struct ucred *cred, struct thread *td)
2237 {
2238 	int error;
2239 	struct tmpfs_node *node;
2240 
2241 	ASSERT_VOP_ELOCKED(vp, "chtimes");
2242 
2243 	node = VP_TO_TMPFS_NODE(vp);
2244 
2245 	/* Disallow this operation if the file system is mounted read-only. */
2246 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
2247 		return (EROFS);
2248 
2249 	/* Immutable or append-only files cannot be modified, either. */
2250 	if (node->tn_flags & (IMMUTABLE | APPEND))
2251 		return (EPERM);
2252 
2253 	error = vn_utimes_perm(vp, vap, cred, td);
2254 	if (error != 0)
2255 		return (error);
2256 
2257 	if (vap->va_atime.tv_sec != VNOVAL)
2258 		node->tn_accessed = true;
2259 	if (vap->va_mtime.tv_sec != VNOVAL)
2260 		node->tn_status |= TMPFS_NODE_MODIFIED;
2261 	if (vap->va_birthtime.tv_sec != VNOVAL)
2262 		node->tn_status |= TMPFS_NODE_MODIFIED;
2263 	tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime);
2264 	if (vap->va_birthtime.tv_sec != VNOVAL)
2265 		node->tn_birthtime = vap->va_birthtime;
2266 	ASSERT_VOP_ELOCKED(vp, "chtimes2");
2267 
2268 	return (0);
2269 }
2270 
2271 void
2272 tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
2273 {
2274 
2275 	if ((node->tn_status & status) == status || tm->tm_ronly)
2276 		return;
2277 	TMPFS_NODE_LOCK(node);
2278 	node->tn_status |= status;
2279 	TMPFS_NODE_UNLOCK(node);
2280 }
2281 
2282 void
2283 tmpfs_set_accessed(struct tmpfs_mount *tm, struct tmpfs_node *node)
2284 {
2285 	if (node->tn_accessed || tm->tm_ronly)
2286 		return;
2287 	atomic_store_8(&node->tn_accessed, true);
2288 }
2289 
2290 /* Sync timestamps */
2291 void
2292 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
2293     const struct timespec *mod)
2294 {
2295 	struct tmpfs_node *node;
2296 	struct timespec now;
2297 
2298 	ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
2299 	node = VP_TO_TMPFS_NODE(vp);
2300 
2301 	if (!node->tn_accessed &&
2302 	    (node->tn_status & (TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED)) == 0)
2303 		return;
2304 
2305 	vfs_timestamp(&now);
2306 	TMPFS_NODE_LOCK(node);
2307 	if (node->tn_accessed) {
2308 		if (acc == NULL)
2309 			 acc = &now;
2310 		node->tn_atime = *acc;
2311 	}
2312 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
2313 		if (mod == NULL)
2314 			mod = &now;
2315 		node->tn_mtime = *mod;
2316 	}
2317 	if (node->tn_status & TMPFS_NODE_CHANGED)
2318 		node->tn_ctime = now;
2319 	node->tn_status &= ~(TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
2320 	node->tn_accessed = false;
2321 	TMPFS_NODE_UNLOCK(node);
2322 
2323 	/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
2324 	random_harvest_queue(node, sizeof(*node), RANDOM_FS_ATIME);
2325 }
2326 
2327 int
2328 tmpfs_truncate(struct vnode *vp, off_t length)
2329 {
2330 	struct tmpfs_node *node;
2331 	int error;
2332 
2333 	if (length < 0)
2334 		return (EINVAL);
2335 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
2336 		return (EFBIG);
2337 
2338 	node = VP_TO_TMPFS_NODE(vp);
2339 	error = node->tn_size == length ? 0 : tmpfs_reg_resize(vp, length,
2340 	    FALSE);
2341 	if (error == 0)
2342 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
2343 	tmpfs_update(vp);
2344 
2345 	return (error);
2346 }
2347 
2348 static __inline int
2349 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
2350 {
2351 	if (a->td_hash > b->td_hash)
2352 		return (1);
2353 	else if (a->td_hash < b->td_hash)
2354 		return (-1);
2355 	return (0);
2356 }
2357 
2358 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
2359