xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision 2a60dec0919c20a03c2830a9466a86f9f44fbaf0)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system supporting functions.
37  */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/lock.h>
46 #include <sys/limits.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/random.h>
52 #include <sys/refcount.h>
53 #include <sys/rwlock.h>
54 #include <sys/smr.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/user.h>
58 #include <sys/vnode.h>
59 #include <sys/vmmeter.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vm_extern.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs.h>
71 #include <fs/tmpfs/tmpfs_fifoops.h>
72 #include <fs/tmpfs/tmpfs_vnops.h>
73 
74 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75     "tmpfs file system");
76 
77 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
78 
79 MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
80 static uma_zone_t tmpfs_node_pool;
81 VFS_SMR_DECLARE;
82 
83 int tmpfs_pager_type = -1;
84 
85 static vm_object_t
86 tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
87     vm_ooffset_t offset, struct ucred *cred)
88 {
89 	vm_object_t object;
90 
91 	MPASS(handle == NULL);
92 	MPASS(offset == 0);
93 	object = vm_object_allocate_dyn(tmpfs_pager_type, size,
94 	    OBJ_COLORED | OBJ_SWAP);
95 	if (!swap_pager_init_object(object, NULL, NULL, size, 0)) {
96 		vm_object_deallocate(object);
97 		object = NULL;
98 	}
99 	return (object);
100 }
101 
102 static void
103 tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
104 {
105 	struct vnode *vp;
106 
107 	/*
108 	 * Tmpfs VREG node, which was reclaimed, has tmpfs_pager_type
109 	 * type, but not OBJ_TMPFS flag.  In this case there is no
110 	 * v_writecount to adjust.
111 	 */
112 	if (vp_heldp != NULL)
113 		VM_OBJECT_RLOCK(object);
114 	else
115 		VM_OBJECT_ASSERT_LOCKED(object);
116 	if ((object->flags & OBJ_TMPFS) != 0) {
117 		vp = object->un_pager.swp.swp_tmpfs;
118 		if (vp != NULL) {
119 			*vpp = vp;
120 			if (vp_heldp != NULL) {
121 				vhold(vp);
122 				*vp_heldp = true;
123 			}
124 		}
125 	}
126 	if (vp_heldp != NULL)
127 		VM_OBJECT_RUNLOCK(object);
128 }
129 
130 struct pagerops tmpfs_pager_ops = {
131 	.pgo_kvme_type = KVME_TYPE_VNODE,
132 	.pgo_alloc = tmpfs_pager_alloc,
133 	.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
134 	.pgo_mightbedirty = vm_object_mightbedirty_,
135 	.pgo_getvp = tmpfs_pager_getvp,
136 };
137 
138 static int
139 tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
140 {
141 	struct tmpfs_node *node;
142 
143 	node = mem;
144 	node->tn_gen++;
145 	node->tn_size = 0;
146 	node->tn_status = 0;
147 	node->tn_accessed = false;
148 	node->tn_flags = 0;
149 	node->tn_links = 0;
150 	node->tn_vnode = NULL;
151 	node->tn_vpstate = 0;
152 	return (0);
153 }
154 
155 static void
156 tmpfs_node_dtor(void *mem, int size, void *arg)
157 {
158 	struct tmpfs_node *node;
159 
160 	node = mem;
161 	node->tn_type = VNON;
162 }
163 
164 static int
165 tmpfs_node_init(void *mem, int size, int flags)
166 {
167 	struct tmpfs_node *node;
168 
169 	node = mem;
170 	node->tn_id = 0;
171 	mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
172 	node->tn_gen = arc4random();
173 	return (0);
174 }
175 
176 static void
177 tmpfs_node_fini(void *mem, int size)
178 {
179 	struct tmpfs_node *node;
180 
181 	node = mem;
182 	mtx_destroy(&node->tn_interlock);
183 }
184 
185 int
186 tmpfs_subr_init(void)
187 {
188 	tmpfs_pager_type = vm_pager_alloc_dyn_type(&tmpfs_pager_ops,
189 	    OBJT_SWAP);
190 	if (tmpfs_pager_type == -1)
191 		return (EINVAL);
192 	tmpfs_node_pool = uma_zcreate("TMPFS node",
193 	    sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
194 	    tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
195 	VFS_SMR_ZONE_SET(tmpfs_node_pool);
196 	return (0);
197 }
198 
199 void
200 tmpfs_subr_uninit(void)
201 {
202 	if (tmpfs_pager_type != -1)
203 		vm_pager_free_dyn_type(tmpfs_pager_type);
204 	tmpfs_pager_type = -1;
205 	uma_zdestroy(tmpfs_node_pool);
206 }
207 
208 static int
209 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
210 {
211 	int error;
212 	long pages, bytes;
213 
214 	pages = *(long *)arg1;
215 	bytes = pages * PAGE_SIZE;
216 
217 	error = sysctl_handle_long(oidp, &bytes, 0, req);
218 	if (error || !req->newptr)
219 		return (error);
220 
221 	pages = bytes / PAGE_SIZE;
222 	if (pages < TMPFS_PAGES_MINRESERVED)
223 		return (EINVAL);
224 
225 	*(long *)arg1 = pages;
226 	return (0);
227 }
228 
229 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved,
230     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &tmpfs_pages_reserved, 0,
231     sysctl_mem_reserved, "L",
232     "Amount of available memory and swap below which tmpfs growth stops");
233 
234 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a,
235     struct tmpfs_dirent *b);
236 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
237 
238 size_t
239 tmpfs_mem_avail(void)
240 {
241 	size_t avail;
242 	long reserved;
243 
244 	avail = swap_pager_avail + vm_free_count();
245 	reserved = atomic_load_long(&tmpfs_pages_reserved);
246 	if (__predict_false(avail < reserved))
247 		return (0);
248 	return (avail - reserved);
249 }
250 
251 size_t
252 tmpfs_pages_used(struct tmpfs_mount *tmp)
253 {
254 	const size_t node_size = sizeof(struct tmpfs_node) +
255 	    sizeof(struct tmpfs_dirent);
256 	size_t meta_pages;
257 
258 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
259 	    PAGE_SIZE);
260 	return (meta_pages + tmp->tm_pages_used);
261 }
262 
263 static size_t
264 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
265 {
266 	if (tmpfs_mem_avail() < req_pages)
267 		return (0);
268 
269 	if (tmp->tm_pages_max != ULONG_MAX &&
270 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
271 			return (0);
272 
273 	return (1);
274 }
275 
276 void
277 tmpfs_ref_node(struct tmpfs_node *node)
278 {
279 #ifdef INVARIANTS
280 	u_int old;
281 
282 	old =
283 #endif
284 	refcount_acquire(&node->tn_refcount);
285 #ifdef INVARIANTS
286 	KASSERT(old > 0, ("node %p zero refcount", node));
287 #endif
288 }
289 
290 /*
291  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
292  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
293  * using the credentials of the process 'p'.
294  *
295  * If the node type is set to 'VDIR', then the parent parameter must point
296  * to the parent directory of the node being created.  It may only be NULL
297  * while allocating the root node.
298  *
299  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
300  * specifies the device the node represents.
301  *
302  * If the node type is set to 'VLNK', then the parameter target specifies
303  * the file name of the target file for the symbolic link that is being
304  * created.
305  *
306  * Note that new nodes are retrieved from the available list if it has
307  * items or, if it is empty, from the node pool as long as there is enough
308  * space to create them.
309  *
310  * Returns zero on success or an appropriate error code on failure.
311  */
312 int
313 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
314     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
315     const char *target, dev_t rdev, struct tmpfs_node **node)
316 {
317 	struct tmpfs_node *nnode;
318 	vm_object_t obj;
319 	char *symlink;
320 	char symlink_smr;
321 
322 	/* If the root directory of the 'tmp' file system is not yet
323 	 * allocated, this must be the request to do it. */
324 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
325 
326 	MPASS(IFF(type == VLNK, target != NULL));
327 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
328 
329 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
330 		return (ENOSPC);
331 	if (tmpfs_pages_check_avail(tmp, 1) == 0)
332 		return (ENOSPC);
333 
334 	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
335 		/*
336 		 * When a new tmpfs node is created for fully
337 		 * constructed mount point, there must be a parent
338 		 * node, which vnode is locked exclusively.  As
339 		 * consequence, if the unmount is executing in
340 		 * parallel, vflush() cannot reclaim the parent vnode.
341 		 * Due to this, the check for MNTK_UNMOUNT flag is not
342 		 * racy: if we did not see MNTK_UNMOUNT flag, then tmp
343 		 * cannot be destroyed until node construction is
344 		 * finished and the parent vnode unlocked.
345 		 *
346 		 * Tmpfs does not need to instantiate new nodes during
347 		 * unmount.
348 		 */
349 		return (EBUSY);
350 	}
351 	if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
352 		return (EROFS);
353 
354 	nnode = uma_zalloc_smr(tmpfs_node_pool, M_WAITOK);
355 
356 	/* Generic initialization. */
357 	nnode->tn_type = type;
358 	vfs_timestamp(&nnode->tn_atime);
359 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
360 	    nnode->tn_atime;
361 	nnode->tn_uid = uid;
362 	nnode->tn_gid = gid;
363 	nnode->tn_mode = mode;
364 	nnode->tn_id = alloc_unr64(&tmp->tm_ino_unr);
365 	nnode->tn_refcount = 1;
366 
367 	/* Type-specific initialization. */
368 	switch (nnode->tn_type) {
369 	case VBLK:
370 	case VCHR:
371 		nnode->tn_rdev = rdev;
372 		break;
373 
374 	case VDIR:
375 		RB_INIT(&nnode->tn_dir.tn_dirhead);
376 		LIST_INIT(&nnode->tn_dir.tn_dupindex);
377 		MPASS(parent != nnode);
378 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
379 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
380 		nnode->tn_dir.tn_readdir_lastn = 0;
381 		nnode->tn_dir.tn_readdir_lastp = NULL;
382 		nnode->tn_links++;
383 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
384 		nnode->tn_dir.tn_parent->tn_links++;
385 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
386 		break;
387 
388 	case VFIFO:
389 		/* FALLTHROUGH */
390 	case VSOCK:
391 		break;
392 
393 	case VLNK:
394 		MPASS(strlen(target) < MAXPATHLEN);
395 		nnode->tn_size = strlen(target);
396 
397 		symlink = NULL;
398 		if (!tmp->tm_nonc) {
399 			symlink = cache_symlink_alloc(nnode->tn_size + 1, M_WAITOK);
400 			symlink_smr = true;
401 		}
402 		if (symlink == NULL) {
403 			symlink = malloc(nnode->tn_size + 1, M_TMPFSNAME, M_WAITOK);
404 			symlink_smr = false;
405 		}
406 		memcpy(symlink, target, nnode->tn_size + 1);
407 
408 		/*
409 		 * Allow safe symlink resolving for lockless lookup.
410 		 * tmpfs_fplookup_symlink references this comment.
411 		 *
412 		 * 1. nnode is not yet visible to the world
413 		 * 2. both tn_link_target and tn_link_smr get populated
414 		 * 3. release fence publishes their content
415 		 * 4. tn_link_target content is immutable until node destruction,
416 		 *    where the pointer gets set to NULL
417 		 * 5. tn_link_smr is never changed once set
418 		 *
419 		 * As a result it is sufficient to issue load consume on the node
420 		 * pointer to also get the above content in a stable manner.
421 		 * Worst case tn_link_smr flag may be set to true despite being stale,
422 		 * while the target buffer is already cleared out.
423 		 */
424 		atomic_store_ptr(&nnode->tn_link_target, symlink);
425 		atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
426 		atomic_thread_fence_rel();
427 		break;
428 
429 	case VREG:
430 		obj = nnode->tn_reg.tn_aobj =
431 		    vm_pager_allocate(tmpfs_pager_type, NULL, 0,
432 			VM_PROT_DEFAULT, 0,
433 			NULL /* XXXKIB - tmpfs needs swap reservation */);
434 		/* OBJ_TMPFS is set together with the setting of vp->v_object */
435 		nnode->tn_reg.tn_tmp = tmp;
436 		break;
437 
438 	default:
439 		panic("tmpfs_alloc_node: type %p %d", nnode,
440 		    (int)nnode->tn_type);
441 	}
442 
443 	TMPFS_LOCK(tmp);
444 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
445 	nnode->tn_attached = true;
446 	tmp->tm_nodes_inuse++;
447 	tmp->tm_refcount++;
448 	TMPFS_UNLOCK(tmp);
449 
450 	*node = nnode;
451 	return (0);
452 }
453 
454 /*
455  * Destroys the node pointed to by node from the file system 'tmp'.
456  * If the node references a directory, no entries are allowed.
457  */
458 void
459 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
460 {
461 	if (refcount_release_if_not_last(&node->tn_refcount))
462 		return;
463 
464 	TMPFS_LOCK(tmp);
465 	TMPFS_NODE_LOCK(node);
466 	if (!tmpfs_free_node_locked(tmp, node, false)) {
467 		TMPFS_NODE_UNLOCK(node);
468 		TMPFS_UNLOCK(tmp);
469 	}
470 }
471 
472 bool
473 tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
474     bool detach)
475 {
476 	vm_object_t uobj;
477 	char *symlink;
478 	bool last;
479 
480 	TMPFS_MP_ASSERT_LOCKED(tmp);
481 	TMPFS_NODE_ASSERT_LOCKED(node);
482 
483 	last = refcount_release(&node->tn_refcount);
484 	if (node->tn_attached && (detach || last)) {
485 		MPASS(tmp->tm_nodes_inuse > 0);
486 		tmp->tm_nodes_inuse--;
487 		LIST_REMOVE(node, tn_entries);
488 		node->tn_attached = false;
489 	}
490 	if (!last)
491 		return (false);
492 
493 #ifdef INVARIANTS
494 	MPASS(node->tn_vnode == NULL);
495 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
496 #endif
497 	TMPFS_NODE_UNLOCK(node);
498 	TMPFS_UNLOCK(tmp);
499 
500 	switch (node->tn_type) {
501 	case VBLK:
502 		/* FALLTHROUGH */
503 	case VCHR:
504 		/* FALLTHROUGH */
505 	case VDIR:
506 		/* FALLTHROUGH */
507 	case VFIFO:
508 		/* FALLTHROUGH */
509 	case VSOCK:
510 		break;
511 
512 	case VLNK:
513 		symlink = node->tn_link_target;
514 		atomic_store_ptr(&node->tn_link_target, NULL);
515 		if (atomic_load_char(&node->tn_link_smr)) {
516 			cache_symlink_free(symlink, node->tn_size + 1);
517 		} else {
518 			free(symlink, M_TMPFSNAME);
519 		}
520 		break;
521 
522 	case VREG:
523 		uobj = node->tn_reg.tn_aobj;
524 		if (uobj != NULL) {
525 			if (uobj->size != 0)
526 				atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
527 			KASSERT((uobj->flags & OBJ_TMPFS) == 0,
528 			    ("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
529 			vm_object_deallocate(uobj);
530 		}
531 		break;
532 
533 	default:
534 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
535 	}
536 
537 	uma_zfree_smr(tmpfs_node_pool, node);
538 	TMPFS_LOCK(tmp);
539 	tmpfs_free_tmp(tmp);
540 	return (true);
541 }
542 
543 static __inline uint32_t
544 tmpfs_dirent_hash(const char *name, u_int len)
545 {
546 	uint32_t hash;
547 
548 	hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK;
549 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP
550 	hash &= 0xf;
551 #endif
552 	if (hash < TMPFS_DIRCOOKIE_MIN)
553 		hash += TMPFS_DIRCOOKIE_MIN;
554 
555 	return (hash);
556 }
557 
558 static __inline off_t
559 tmpfs_dirent_cookie(struct tmpfs_dirent *de)
560 {
561 	if (de == NULL)
562 		return (TMPFS_DIRCOOKIE_EOF);
563 
564 	MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN);
565 
566 	return (de->td_cookie);
567 }
568 
569 static __inline boolean_t
570 tmpfs_dirent_dup(struct tmpfs_dirent *de)
571 {
572 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0);
573 }
574 
575 static __inline boolean_t
576 tmpfs_dirent_duphead(struct tmpfs_dirent *de)
577 {
578 	return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0);
579 }
580 
581 void
582 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen)
583 {
584 	de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen);
585 	memcpy(de->ud.td_name, name, namelen);
586 	de->td_namelen = namelen;
587 }
588 
589 /*
590  * Allocates a new directory entry for the node node with a name of name.
591  * The new directory entry is returned in *de.
592  *
593  * The link count of node is increased by one to reflect the new object
594  * referencing it.
595  *
596  * Returns zero on success or an appropriate error code on failure.
597  */
598 int
599 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
600     const char *name, u_int len, struct tmpfs_dirent **de)
601 {
602 	struct tmpfs_dirent *nde;
603 
604 	nde = malloc(sizeof(*nde), M_TMPFSDIR, M_WAITOK);
605 	nde->td_node = node;
606 	if (name != NULL) {
607 		nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
608 		tmpfs_dirent_init(nde, name, len);
609 	} else
610 		nde->td_namelen = 0;
611 	if (node != NULL)
612 		node->tn_links++;
613 
614 	*de = nde;
615 
616 	return 0;
617 }
618 
619 /*
620  * Frees a directory entry.  It is the caller's responsibility to destroy
621  * the node referenced by it if needed.
622  *
623  * The link count of node is decreased by one to reflect the removal of an
624  * object that referenced it.  This only happens if 'node_exists' is true;
625  * otherwise the function will not access the node referred to by the
626  * directory entry, as it may already have been released from the outside.
627  */
628 void
629 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
630 {
631 	struct tmpfs_node *node;
632 
633 	node = de->td_node;
634 	if (node != NULL) {
635 		MPASS(node->tn_links > 0);
636 		node->tn_links--;
637 	}
638 	if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
639 		free(de->ud.td_name, M_TMPFSNAME);
640 	free(de, M_TMPFSDIR);
641 }
642 
643 void
644 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
645 {
646 
647 	ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
648 	if (vp->v_type != VREG || obj == NULL)
649 		return;
650 
651 	VM_OBJECT_WLOCK(obj);
652 	VI_LOCK(vp);
653 	vm_object_clear_flag(obj, OBJ_TMPFS);
654 	obj->un_pager.swp.swp_tmpfs = NULL;
655 	if (vp->v_writecount < 0)
656 		vp->v_writecount = 0;
657 	VI_UNLOCK(vp);
658 	VM_OBJECT_WUNLOCK(obj);
659 }
660 
661 /*
662  * Need to clear v_object for insmntque failure.
663  */
664 static void
665 tmpfs_insmntque_dtr(struct vnode *vp, void *dtr_arg)
666 {
667 
668 	tmpfs_destroy_vobject(vp, vp->v_object);
669 	vp->v_object = NULL;
670 	vp->v_data = NULL;
671 	vp->v_op = &dead_vnodeops;
672 	vgone(vp);
673 	vput(vp);
674 }
675 
676 /*
677  * Allocates a new vnode for the node node or returns a new reference to
678  * an existing one if the node had already a vnode referencing it.  The
679  * resulting locked vnode is returned in *vpp.
680  *
681  * Returns zero on success or an appropriate error code on failure.
682  */
683 int
684 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
685     struct vnode **vpp)
686 {
687 	struct vnode *vp;
688 	enum vgetstate vs;
689 	struct tmpfs_mount *tm;
690 	vm_object_t object;
691 	int error;
692 
693 	error = 0;
694 	tm = VFS_TO_TMPFS(mp);
695 	TMPFS_NODE_LOCK(node);
696 	tmpfs_ref_node(node);
697 loop:
698 	TMPFS_NODE_ASSERT_LOCKED(node);
699 	if ((vp = node->tn_vnode) != NULL) {
700 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
701 		if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
702 		    (VN_IS_DOOMED(vp) &&
703 		     (lkflag & LK_NOWAIT) != 0)) {
704 			TMPFS_NODE_UNLOCK(node);
705 			error = ENOENT;
706 			vp = NULL;
707 			goto out;
708 		}
709 		if (VN_IS_DOOMED(vp)) {
710 			node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
711 			while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {
712 				msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
713 				    0, "tmpfsE", 0);
714 			}
715 			goto loop;
716 		}
717 		vs = vget_prep(vp);
718 		TMPFS_NODE_UNLOCK(node);
719 		error = vget_finish(vp, lkflag, vs);
720 		if (error == ENOENT) {
721 			TMPFS_NODE_LOCK(node);
722 			goto loop;
723 		}
724 		if (error != 0) {
725 			vp = NULL;
726 			goto out;
727 		}
728 
729 		/*
730 		 * Make sure the vnode is still there after
731 		 * getting the interlock to avoid racing a free.
732 		 */
733 		if (node->tn_vnode == NULL || node->tn_vnode != vp) {
734 			vput(vp);
735 			TMPFS_NODE_LOCK(node);
736 			goto loop;
737 		}
738 
739 		goto out;
740 	}
741 
742 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
743 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
744 		TMPFS_NODE_UNLOCK(node);
745 		error = ENOENT;
746 		vp = NULL;
747 		goto out;
748 	}
749 
750 	/*
751 	 * otherwise lock the vp list while we call getnewvnode
752 	 * since that can block.
753 	 */
754 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
755 		node->tn_vpstate |= TMPFS_VNODE_WANT;
756 		error = msleep((caddr_t) &node->tn_vpstate,
757 		    TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
758 		if (error != 0)
759 			goto out;
760 		goto loop;
761 	} else
762 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
763 
764 	TMPFS_NODE_UNLOCK(node);
765 
766 	/* Get a new vnode and associate it with our node. */
767 	error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
768 	    &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
769 	if (error != 0)
770 		goto unlock;
771 	MPASS(vp != NULL);
772 
773 	/* lkflag is ignored, the lock is exclusive */
774 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
775 
776 	vp->v_data = node;
777 	vp->v_type = node->tn_type;
778 
779 	/* Type-specific initialization. */
780 	switch (node->tn_type) {
781 	case VBLK:
782 		/* FALLTHROUGH */
783 	case VCHR:
784 		/* FALLTHROUGH */
785 	case VLNK:
786 		/* FALLTHROUGH */
787 	case VSOCK:
788 		break;
789 	case VFIFO:
790 		vp->v_op = &tmpfs_fifoop_entries;
791 		break;
792 	case VREG:
793 		object = node->tn_reg.tn_aobj;
794 		VM_OBJECT_WLOCK(object);
795 		VI_LOCK(vp);
796 		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
797 		vp->v_object = object;
798 		object->un_pager.swp.swp_tmpfs = vp;
799 		vm_object_set_flag(object, OBJ_TMPFS);
800 		vn_irflag_set_locked(vp, VIRF_PGREAD);
801 		VI_UNLOCK(vp);
802 		VM_OBJECT_WUNLOCK(object);
803 		break;
804 	case VDIR:
805 		MPASS(node->tn_dir.tn_parent != NULL);
806 		if (node->tn_dir.tn_parent == node)
807 			vp->v_vflag |= VV_ROOT;
808 		break;
809 
810 	default:
811 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
812 	}
813 	if (vp->v_type != VFIFO)
814 		VN_LOCK_ASHARE(vp);
815 
816 	error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
817 	if (error != 0)
818 		vp = NULL;
819 
820 unlock:
821 	TMPFS_NODE_LOCK(node);
822 
823 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
824 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
825 	node->tn_vnode = vp;
826 
827 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
828 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
829 		TMPFS_NODE_UNLOCK(node);
830 		wakeup((caddr_t) &node->tn_vpstate);
831 	} else
832 		TMPFS_NODE_UNLOCK(node);
833 
834 out:
835 	if (error == 0) {
836 		*vpp = vp;
837 
838 #ifdef INVARIANTS
839 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
840 		TMPFS_NODE_LOCK(node);
841 		MPASS(*vpp == node->tn_vnode);
842 		TMPFS_NODE_UNLOCK(node);
843 #endif
844 	}
845 	tmpfs_free_node(tm, node);
846 
847 	return (error);
848 }
849 
850 /*
851  * Destroys the association between the vnode vp and the node it
852  * references.
853  */
854 void
855 tmpfs_free_vp(struct vnode *vp)
856 {
857 	struct tmpfs_node *node;
858 
859 	node = VP_TO_TMPFS_NODE(vp);
860 
861 	TMPFS_NODE_ASSERT_LOCKED(node);
862 	node->tn_vnode = NULL;
863 	if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0)
864 		wakeup(&node->tn_vnode);
865 	node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM;
866 	vp->v_data = NULL;
867 }
868 
869 /*
870  * Allocates a new file of type 'type' and adds it to the parent directory
871  * 'dvp'; this addition is done using the component name given in 'cnp'.
872  * The ownership of the new file is automatically assigned based on the
873  * credentials of the caller (through 'cnp'), the group is set based on
874  * the parent directory and the mode is determined from the 'vap' argument.
875  * If successful, *vpp holds a vnode to the newly created file and zero
876  * is returned.  Otherwise *vpp is NULL and the function returns an
877  * appropriate error code.
878  */
879 int
880 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
881     struct componentname *cnp, const char *target)
882 {
883 	int error;
884 	struct tmpfs_dirent *de;
885 	struct tmpfs_mount *tmp;
886 	struct tmpfs_node *dnode;
887 	struct tmpfs_node *node;
888 	struct tmpfs_node *parent;
889 
890 	ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
891 	MPASS(cnp->cn_flags & HASBUF);
892 
893 	tmp = VFS_TO_TMPFS(dvp->v_mount);
894 	dnode = VP_TO_TMPFS_DIR(dvp);
895 	*vpp = NULL;
896 
897 	/* If the entry we are creating is a directory, we cannot overflow
898 	 * the number of links of its parent, because it will get a new
899 	 * link. */
900 	if (vap->va_type == VDIR) {
901 		/* Ensure that we do not overflow the maximum number of links
902 		 * imposed by the system. */
903 		MPASS(dnode->tn_links <= TMPFS_LINK_MAX);
904 		if (dnode->tn_links == TMPFS_LINK_MAX) {
905 			return (EMLINK);
906 		}
907 
908 		parent = dnode;
909 		MPASS(parent != NULL);
910 	} else
911 		parent = NULL;
912 
913 	/* Allocate a node that represents the new file. */
914 	error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
915 	    cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
916 	    target, vap->va_rdev, &node);
917 	if (error != 0)
918 		return (error);
919 
920 	/* Allocate a directory entry that points to the new file. */
921 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
922 	    &de);
923 	if (error != 0) {
924 		tmpfs_free_node(tmp, node);
925 		return (error);
926 	}
927 
928 	/* Allocate a vnode for the new file. */
929 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
930 	if (error != 0) {
931 		tmpfs_free_dirent(tmp, de);
932 		tmpfs_free_node(tmp, node);
933 		return (error);
934 	}
935 
936 	/* Now that all required items are allocated, we can proceed to
937 	 * insert the new node into the directory, an operation that
938 	 * cannot fail. */
939 	if (cnp->cn_flags & ISWHITEOUT)
940 		tmpfs_dir_whiteout_remove(dvp, cnp);
941 	tmpfs_dir_attach(dvp, de);
942 	return (0);
943 }
944 
945 struct tmpfs_dirent *
946 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
947 {
948 	struct tmpfs_dirent *de;
949 
950 	de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead);
951 	dc->tdc_tree = de;
952 	if (de != NULL && tmpfs_dirent_duphead(de))
953 		de = LIST_FIRST(&de->ud.td_duphead);
954 	dc->tdc_current = de;
955 
956 	return (dc->tdc_current);
957 }
958 
959 struct tmpfs_dirent *
960 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
961 {
962 	struct tmpfs_dirent *de;
963 
964 	MPASS(dc->tdc_tree != NULL);
965 	if (tmpfs_dirent_dup(dc->tdc_current)) {
966 		dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries);
967 		if (dc->tdc_current != NULL)
968 			return (dc->tdc_current);
969 	}
970 	dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
971 	    &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
972 	if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
973 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
974 		MPASS(dc->tdc_current != NULL);
975 	}
976 
977 	return (dc->tdc_current);
978 }
979 
980 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */
981 static struct tmpfs_dirent *
982 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash)
983 {
984 	struct tmpfs_dirent *de, dekey;
985 
986 	dekey.td_hash = hash;
987 	de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey);
988 	return (de);
989 }
990 
991 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */
992 static struct tmpfs_dirent *
993 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
994     struct tmpfs_dir_cursor *dc)
995 {
996 	struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead;
997 	struct tmpfs_dirent *de, dekey;
998 
999 	MPASS(cookie >= TMPFS_DIRCOOKIE_MIN);
1000 
1001 	if (cookie == node->tn_dir.tn_readdir_lastn &&
1002 	    (de = node->tn_dir.tn_readdir_lastp) != NULL) {
1003 		/* Protect against possible race, tn_readdir_last[pn]
1004 		 * may be updated with only shared vnode lock held. */
1005 		if (cookie == tmpfs_dirent_cookie(de))
1006 			goto out;
1007 	}
1008 
1009 	if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) {
1010 		LIST_FOREACH(de, &node->tn_dir.tn_dupindex,
1011 		    uh.td_dup.index_entries) {
1012 			MPASS(tmpfs_dirent_dup(de));
1013 			if (de->td_cookie == cookie)
1014 				goto out;
1015 			/* dupindex list is sorted. */
1016 			if (de->td_cookie < cookie) {
1017 				de = NULL;
1018 				goto out;
1019 			}
1020 		}
1021 		MPASS(de == NULL);
1022 		goto out;
1023 	}
1024 
1025 	if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
1026 		de = NULL;
1027 	} else {
1028 		dekey.td_hash = cookie;
1029 		/* Recover if direntry for cookie was removed */
1030 		de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
1031 	}
1032 	dc->tdc_tree = de;
1033 	dc->tdc_current = de;
1034 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1035 		dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
1036 		MPASS(dc->tdc_current != NULL);
1037 	}
1038 	return (dc->tdc_current);
1039 
1040 out:
1041 	dc->tdc_tree = de;
1042 	dc->tdc_current = de;
1043 	if (de != NULL && tmpfs_dirent_dup(de))
1044 		dc->tdc_tree = tmpfs_dir_xlookup_hash(node,
1045 		    de->td_hash);
1046 	return (dc->tdc_current);
1047 }
1048 
1049 /*
1050  * Looks for a directory entry in the directory represented by node.
1051  * 'cnp' describes the name of the entry to look for.  Note that the .
1052  * and .. components are not allowed as they do not physically exist
1053  * within directories.
1054  *
1055  * Returns a pointer to the entry when found, otherwise NULL.
1056  */
1057 struct tmpfs_dirent *
1058 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
1059     struct componentname *cnp)
1060 {
1061 	struct tmpfs_dir_duphead *duphead;
1062 	struct tmpfs_dirent *de;
1063 	uint32_t hash;
1064 
1065 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
1066 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
1067 	    cnp->cn_nameptr[1] == '.')));
1068 	TMPFS_VALIDATE_DIR(node);
1069 
1070 	hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen);
1071 	de = tmpfs_dir_xlookup_hash(node, hash);
1072 	if (de != NULL && tmpfs_dirent_duphead(de)) {
1073 		duphead = &de->ud.td_duphead;
1074 		LIST_FOREACH(de, duphead, uh.td_dup.entries) {
1075 			if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1076 			    cnp->cn_namelen))
1077 				break;
1078 		}
1079 	} else if (de != NULL) {
1080 		if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr,
1081 		    cnp->cn_namelen))
1082 			de = NULL;
1083 	}
1084 	if (de != NULL && f != NULL && de->td_node != f)
1085 		de = NULL;
1086 
1087 	return (de);
1088 }
1089 
1090 /*
1091  * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex
1092  * list, allocate new cookie value.
1093  */
1094 static void
1095 tmpfs_dir_attach_dup(struct tmpfs_node *dnode,
1096     struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde)
1097 {
1098 	struct tmpfs_dir_duphead *dupindex;
1099 	struct tmpfs_dirent *de, *pde;
1100 
1101 	dupindex = &dnode->tn_dir.tn_dupindex;
1102 	de = LIST_FIRST(dupindex);
1103 	if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) {
1104 		if (de == NULL)
1105 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1106 		else
1107 			nde->td_cookie = de->td_cookie + 1;
1108 		MPASS(tmpfs_dirent_dup(nde));
1109 		LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries);
1110 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1111 		return;
1112 	}
1113 
1114 	/*
1115 	 * Cookie numbers are near exhaustion. Scan dupindex list for unused
1116 	 * numbers. dupindex list is sorted in descending order. Keep it so
1117 	 * after inserting nde.
1118 	 */
1119 	while (1) {
1120 		pde = de;
1121 		de = LIST_NEXT(de, uh.td_dup.index_entries);
1122 		if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) {
1123 			/*
1124 			 * Last element of the index doesn't have minimal cookie
1125 			 * value, use it.
1126 			 */
1127 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN;
1128 			LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries);
1129 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1130 			return;
1131 		} else if (de == NULL) {
1132 			/*
1133 			 * We are so lucky have 2^30 hash duplicates in single
1134 			 * directory :) Return largest possible cookie value.
1135 			 * It should be fine except possible issues with
1136 			 * VOP_READDIR restart.
1137 			 */
1138 			nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX;
1139 			LIST_INSERT_HEAD(dupindex, nde,
1140 			    uh.td_dup.index_entries);
1141 			LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1142 			return;
1143 		}
1144 		if (de->td_cookie + 1 == pde->td_cookie ||
1145 		    de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX)
1146 			continue;	/* No hole or invalid cookie. */
1147 		nde->td_cookie = de->td_cookie + 1;
1148 		MPASS(tmpfs_dirent_dup(nde));
1149 		MPASS(pde->td_cookie > nde->td_cookie);
1150 		MPASS(nde->td_cookie > de->td_cookie);
1151 		LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries);
1152 		LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries);
1153 		return;
1154 	}
1155 }
1156 
1157 /*
1158  * Attaches the directory entry de to the directory represented by vp.
1159  * Note that this does not change the link count of the node pointed by
1160  * the directory entry, as this is done by tmpfs_alloc_dirent.
1161  */
1162 void
1163 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
1164 {
1165 	struct tmpfs_node *dnode;
1166 	struct tmpfs_dirent *xde, *nde;
1167 
1168 	ASSERT_VOP_ELOCKED(vp, __func__);
1169 	MPASS(de->td_namelen > 0);
1170 	MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN);
1171 	MPASS(de->td_cookie == de->td_hash);
1172 
1173 	dnode = VP_TO_TMPFS_DIR(vp);
1174 	dnode->tn_dir.tn_readdir_lastn = 0;
1175 	dnode->tn_dir.tn_readdir_lastp = NULL;
1176 
1177 	MPASS(!tmpfs_dirent_dup(de));
1178 	xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1179 	if (xde != NULL && tmpfs_dirent_duphead(xde))
1180 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1181 	else if (xde != NULL) {
1182 		/*
1183 		 * Allocate new duphead. Swap xde with duphead to avoid
1184 		 * adding/removing elements with the same hash.
1185 		 */
1186 		MPASS(!tmpfs_dirent_dup(xde));
1187 		tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0,
1188 		    &nde);
1189 		/* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */
1190 		memcpy(nde, xde, sizeof(*xde));
1191 		xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD;
1192 		LIST_INIT(&xde->ud.td_duphead);
1193 		xde->td_namelen = 0;
1194 		xde->td_node = NULL;
1195 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde);
1196 		tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de);
1197 	}
1198 	dnode->tn_size += sizeof(struct tmpfs_dirent);
1199 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1200 	dnode->tn_accessed = true;
1201 	tmpfs_update(vp);
1202 }
1203 
1204 /*
1205  * Detaches the directory entry de from the directory represented by vp.
1206  * Note that this does not change the link count of the node pointed by
1207  * the directory entry, as this is done by tmpfs_free_dirent.
1208  */
1209 void
1210 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
1211 {
1212 	struct tmpfs_mount *tmp;
1213 	struct tmpfs_dir *head;
1214 	struct tmpfs_node *dnode;
1215 	struct tmpfs_dirent *xde;
1216 
1217 	ASSERT_VOP_ELOCKED(vp, __func__);
1218 
1219 	dnode = VP_TO_TMPFS_DIR(vp);
1220 	head = &dnode->tn_dir.tn_dirhead;
1221 	dnode->tn_dir.tn_readdir_lastn = 0;
1222 	dnode->tn_dir.tn_readdir_lastp = NULL;
1223 
1224 	if (tmpfs_dirent_dup(de)) {
1225 		/* Remove duphead if de was last entry. */
1226 		if (LIST_NEXT(de, uh.td_dup.entries) == NULL) {
1227 			xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash);
1228 			MPASS(tmpfs_dirent_duphead(xde));
1229 		} else
1230 			xde = NULL;
1231 		LIST_REMOVE(de, uh.td_dup.entries);
1232 		LIST_REMOVE(de, uh.td_dup.index_entries);
1233 		if (xde != NULL) {
1234 			if (LIST_EMPTY(&xde->ud.td_duphead)) {
1235 				RB_REMOVE(tmpfs_dir, head, xde);
1236 				tmp = VFS_TO_TMPFS(vp->v_mount);
1237 				MPASS(xde->td_node == NULL);
1238 				tmpfs_free_dirent(tmp, xde);
1239 			}
1240 		}
1241 		de->td_cookie = de->td_hash;
1242 	} else
1243 		RB_REMOVE(tmpfs_dir, head, de);
1244 
1245 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
1246 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1247 	dnode->tn_accessed = true;
1248 	tmpfs_update(vp);
1249 }
1250 
1251 void
1252 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode)
1253 {
1254 	struct tmpfs_dirent *de, *dde, *nde;
1255 
1256 	RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) {
1257 		RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de);
1258 		/* Node may already be destroyed. */
1259 		de->td_node = NULL;
1260 		if (tmpfs_dirent_duphead(de)) {
1261 			while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) {
1262 				LIST_REMOVE(dde, uh.td_dup.entries);
1263 				dde->td_node = NULL;
1264 				tmpfs_free_dirent(tmp, dde);
1265 			}
1266 		}
1267 		tmpfs_free_dirent(tmp, de);
1268 	}
1269 }
1270 
1271 /*
1272  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
1273  * directory and returns it in the uio space.  The function returns 0
1274  * on success, -1 if there was not enough space in the uio structure to
1275  * hold the directory entry or an appropriate error code if another
1276  * error happens.
1277  */
1278 static int
1279 tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1280     struct uio *uio)
1281 {
1282 	int error;
1283 	struct dirent dent;
1284 
1285 	TMPFS_VALIDATE_DIR(node);
1286 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
1287 
1288 	dent.d_fileno = node->tn_id;
1289 	dent.d_off = TMPFS_DIRCOOKIE_DOTDOT;
1290 	dent.d_type = DT_DIR;
1291 	dent.d_namlen = 1;
1292 	dent.d_name[0] = '.';
1293 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1294 	dirent_terminate(&dent);
1295 
1296 	if (dent.d_reclen > uio->uio_resid)
1297 		error = EJUSTRETURN;
1298 	else
1299 		error = uiomove(&dent, dent.d_reclen, uio);
1300 
1301 	tmpfs_set_accessed(tm, node);
1302 
1303 	return (error);
1304 }
1305 
1306 /*
1307  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
1308  * directory and returns it in the uio space.  The function returns 0
1309  * on success, -1 if there was not enough space in the uio structure to
1310  * hold the directory entry or an appropriate error code if another
1311  * error happens.
1312  */
1313 static int
1314 tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
1315     struct uio *uio, off_t next)
1316 {
1317 	struct tmpfs_node *parent;
1318 	struct dirent dent;
1319 	int error;
1320 
1321 	TMPFS_VALIDATE_DIR(node);
1322 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
1323 
1324 	/*
1325 	 * Return ENOENT if the current node is already removed.
1326 	 */
1327 	TMPFS_ASSERT_LOCKED(node);
1328 	parent = node->tn_dir.tn_parent;
1329 	if (parent == NULL)
1330 		return (ENOENT);
1331 
1332 	TMPFS_NODE_LOCK(parent);
1333 	dent.d_fileno = parent->tn_id;
1334 	TMPFS_NODE_UNLOCK(parent);
1335 
1336 	dent.d_off = next;
1337 	dent.d_type = DT_DIR;
1338 	dent.d_namlen = 2;
1339 	dent.d_name[0] = '.';
1340 	dent.d_name[1] = '.';
1341 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
1342 	dirent_terminate(&dent);
1343 
1344 	if (dent.d_reclen > uio->uio_resid)
1345 		error = EJUSTRETURN;
1346 	else
1347 		error = uiomove(&dent, dent.d_reclen, uio);
1348 
1349 	tmpfs_set_accessed(tm, node);
1350 
1351 	return (error);
1352 }
1353 
1354 /*
1355  * Helper function for tmpfs_readdir.  Returns as much directory entries
1356  * as can fit in the uio space.  The read starts at uio->uio_offset.
1357  * The function returns 0 on success, -1 if there was not enough space
1358  * in the uio structure to hold the directory entry or an appropriate
1359  * error code if another error happens.
1360  */
1361 int
1362 tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
1363     struct uio *uio, int maxcookies, u_long *cookies, int *ncookies)
1364 {
1365 	struct tmpfs_dir_cursor dc;
1366 	struct tmpfs_dirent *de, *nde;
1367 	off_t off;
1368 	int error;
1369 
1370 	TMPFS_VALIDATE_DIR(node);
1371 
1372 	off = 0;
1373 
1374 	/*
1375 	 * Lookup the node from the current offset.  The starting offset of
1376 	 * 0 will lookup both '.' and '..', and then the first real entry,
1377 	 * or EOF if there are none.  Then find all entries for the dir that
1378 	 * fit into the buffer.  Once no more entries are found (de == NULL),
1379 	 * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next
1380 	 * call to return 0.
1381 	 */
1382 	switch (uio->uio_offset) {
1383 	case TMPFS_DIRCOOKIE_DOT:
1384 		error = tmpfs_dir_getdotdent(tm, node, uio);
1385 		if (error != 0)
1386 			return (error);
1387 		uio->uio_offset = off = TMPFS_DIRCOOKIE_DOTDOT;
1388 		if (cookies != NULL)
1389 			cookies[(*ncookies)++] = off;
1390 		/* FALLTHROUGH */
1391 	case TMPFS_DIRCOOKIE_DOTDOT:
1392 		de = tmpfs_dir_first(node, &dc);
1393 		off = tmpfs_dirent_cookie(de);
1394 		error = tmpfs_dir_getdotdotdent(tm, node, uio, off);
1395 		if (error != 0)
1396 			return (error);
1397 		uio->uio_offset = off;
1398 		if (cookies != NULL)
1399 			cookies[(*ncookies)++] = off;
1400 		/* EOF. */
1401 		if (de == NULL)
1402 			return (0);
1403 		break;
1404 	case TMPFS_DIRCOOKIE_EOF:
1405 		return (0);
1406 	default:
1407 		de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc);
1408 		if (de == NULL)
1409 			return (EINVAL);
1410 		if (cookies != NULL)
1411 			off = tmpfs_dirent_cookie(de);
1412 	}
1413 
1414 	/*
1415 	 * Read as much entries as possible; i.e., until we reach the end of the
1416 	 * directory or we exhaust uio space.
1417 	 */
1418 	do {
1419 		struct dirent d;
1420 
1421 		/*
1422 		 * Create a dirent structure representing the current tmpfs_node
1423 		 * and fill it.
1424 		 */
1425 		if (de->td_node == NULL) {
1426 			d.d_fileno = 1;
1427 			d.d_type = DT_WHT;
1428 		} else {
1429 			d.d_fileno = de->td_node->tn_id;
1430 			switch (de->td_node->tn_type) {
1431 			case VBLK:
1432 				d.d_type = DT_BLK;
1433 				break;
1434 
1435 			case VCHR:
1436 				d.d_type = DT_CHR;
1437 				break;
1438 
1439 			case VDIR:
1440 				d.d_type = DT_DIR;
1441 				break;
1442 
1443 			case VFIFO:
1444 				d.d_type = DT_FIFO;
1445 				break;
1446 
1447 			case VLNK:
1448 				d.d_type = DT_LNK;
1449 				break;
1450 
1451 			case VREG:
1452 				d.d_type = DT_REG;
1453 				break;
1454 
1455 			case VSOCK:
1456 				d.d_type = DT_SOCK;
1457 				break;
1458 
1459 			default:
1460 				panic("tmpfs_dir_getdents: type %p %d",
1461 				    de->td_node, (int)de->td_node->tn_type);
1462 			}
1463 		}
1464 		d.d_namlen = de->td_namelen;
1465 		MPASS(de->td_namelen < sizeof(d.d_name));
1466 		(void)memcpy(d.d_name, de->ud.td_name, de->td_namelen);
1467 		d.d_reclen = GENERIC_DIRSIZ(&d);
1468 
1469 		/*
1470 		 * Stop reading if the directory entry we are treating is bigger
1471 		 * than the amount of data that can be returned.
1472 		 */
1473 		if (d.d_reclen > uio->uio_resid) {
1474 			error = EJUSTRETURN;
1475 			break;
1476 		}
1477 
1478 		nde = tmpfs_dir_next(node, &dc);
1479 		d.d_off = tmpfs_dirent_cookie(nde);
1480 		dirent_terminate(&d);
1481 
1482 		/*
1483 		 * Copy the new dirent structure into the output buffer and
1484 		 * advance pointers.
1485 		 */
1486 		error = uiomove(&d, d.d_reclen, uio);
1487 		if (error == 0) {
1488 			de = nde;
1489 			if (cookies != NULL) {
1490 				off = tmpfs_dirent_cookie(de);
1491 				MPASS(*ncookies < maxcookies);
1492 				cookies[(*ncookies)++] = off;
1493 			}
1494 		}
1495 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
1496 
1497 	/* Skip setting off when using cookies as it is already done above. */
1498 	if (cookies == NULL)
1499 		off = tmpfs_dirent_cookie(de);
1500 
1501 	/* Update the offset and cache. */
1502 	uio->uio_offset = off;
1503 	node->tn_dir.tn_readdir_lastn = off;
1504 	node->tn_dir.tn_readdir_lastp = de;
1505 
1506 	tmpfs_set_accessed(tm, node);
1507 	return (error);
1508 }
1509 
1510 int
1511 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
1512 {
1513 	struct tmpfs_dirent *de;
1514 	int error;
1515 
1516 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
1517 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
1518 	if (error != 0)
1519 		return (error);
1520 	tmpfs_dir_attach(dvp, de);
1521 	return (0);
1522 }
1523 
1524 void
1525 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
1526 {
1527 	struct tmpfs_dirent *de;
1528 
1529 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1530 	MPASS(de != NULL && de->td_node == NULL);
1531 	tmpfs_dir_detach(dvp, de);
1532 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de);
1533 }
1534 
1535 /*
1536  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
1537  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
1538  * 'newsize' must be positive.
1539  *
1540  * Returns zero on success or an appropriate error code on failure.
1541  */
1542 int
1543 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
1544 {
1545 	struct tmpfs_mount *tmp;
1546 	struct tmpfs_node *node;
1547 	vm_object_t uobj;
1548 	vm_page_t m;
1549 	vm_pindex_t idx, newpages, oldpages;
1550 	off_t oldsize;
1551 	int base, rv;
1552 
1553 	MPASS(vp->v_type == VREG);
1554 	MPASS(newsize >= 0);
1555 
1556 	node = VP_TO_TMPFS_NODE(vp);
1557 	uobj = node->tn_reg.tn_aobj;
1558 	tmp = VFS_TO_TMPFS(vp->v_mount);
1559 
1560 	/*
1561 	 * Convert the old and new sizes to the number of pages needed to
1562 	 * store them.  It may happen that we do not need to do anything
1563 	 * because the last allocated page can accommodate the change on
1564 	 * its own.
1565 	 */
1566 	oldsize = node->tn_size;
1567 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
1568 	MPASS(oldpages == uobj->size);
1569 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
1570 
1571 	if (__predict_true(newpages == oldpages && newsize >= oldsize)) {
1572 		node->tn_size = newsize;
1573 		return (0);
1574 	}
1575 
1576 	if (newpages > oldpages &&
1577 	    tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
1578 		return (ENOSPC);
1579 
1580 	VM_OBJECT_WLOCK(uobj);
1581 	if (newsize < oldsize) {
1582 		/*
1583 		 * Zero the truncated part of the last page.
1584 		 */
1585 		base = newsize & PAGE_MASK;
1586 		if (base != 0) {
1587 			idx = OFF_TO_IDX(newsize);
1588 retry:
1589 			m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
1590 			if (m != NULL) {
1591 				MPASS(vm_page_all_valid(m));
1592 			} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
1593 				m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
1594 				    VM_ALLOC_WAITFAIL);
1595 				if (m == NULL)
1596 					goto retry;
1597 				vm_object_pip_add(uobj, 1);
1598 				VM_OBJECT_WUNLOCK(uobj);
1599 				rv = vm_pager_get_pages(uobj, &m, 1, NULL,
1600 				    NULL);
1601 				VM_OBJECT_WLOCK(uobj);
1602 				vm_object_pip_wakeup(uobj);
1603 				if (rv == VM_PAGER_OK) {
1604 					/*
1605 					 * Since the page was not resident,
1606 					 * and therefore not recently
1607 					 * accessed, immediately enqueue it
1608 					 * for asynchronous laundering.  The
1609 					 * current operation is not regarded
1610 					 * as an access.
1611 					 */
1612 					vm_page_launder(m);
1613 				} else {
1614 					vm_page_free(m);
1615 					if (ignerr)
1616 						m = NULL;
1617 					else {
1618 						VM_OBJECT_WUNLOCK(uobj);
1619 						return (EIO);
1620 					}
1621 				}
1622 			}
1623 			if (m != NULL) {
1624 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
1625 				vm_page_set_dirty(m);
1626 				vm_page_xunbusy(m);
1627 			}
1628 		}
1629 
1630 		/*
1631 		 * Release any swap space and free any whole pages.
1632 		 */
1633 		if (newpages < oldpages)
1634 			vm_object_page_remove(uobj, newpages, 0, 0);
1635 	}
1636 	uobj->size = newpages;
1637 	VM_OBJECT_WUNLOCK(uobj);
1638 
1639 	atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
1640 
1641 	node->tn_size = newsize;
1642 	return (0);
1643 }
1644 
1645 void
1646 tmpfs_check_mtime(struct vnode *vp)
1647 {
1648 	struct tmpfs_node *node;
1649 	struct vm_object *obj;
1650 
1651 	ASSERT_VOP_ELOCKED(vp, "check_mtime");
1652 	if (vp->v_type != VREG)
1653 		return;
1654 	obj = vp->v_object;
1655 	KASSERT(obj->type == tmpfs_pager_type &&
1656 	    (obj->flags & (OBJ_SWAP | OBJ_TMPFS)) ==
1657 	    (OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj"));
1658 	/* unlocked read */
1659 	if (obj->generation != obj->cleangeneration) {
1660 		VM_OBJECT_WLOCK(obj);
1661 		if (obj->generation != obj->cleangeneration) {
1662 			obj->cleangeneration = obj->generation;
1663 			node = VP_TO_TMPFS_NODE(vp);
1664 			node->tn_status |= TMPFS_NODE_MODIFIED |
1665 			    TMPFS_NODE_CHANGED;
1666 		}
1667 		VM_OBJECT_WUNLOCK(obj);
1668 	}
1669 }
1670 
1671 /*
1672  * Change flags of the given vnode.
1673  * Caller should execute tmpfs_update on vp after a successful execution.
1674  * The vnode must be locked on entry and remain locked on exit.
1675  */
1676 int
1677 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
1678     struct thread *p)
1679 {
1680 	int error;
1681 	struct tmpfs_node *node;
1682 
1683 	ASSERT_VOP_ELOCKED(vp, "chflags");
1684 
1685 	node = VP_TO_TMPFS_NODE(vp);
1686 
1687 	if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK |
1688 	    UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP |
1689 	    UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE |
1690 	    UF_SPARSE | UF_SYSTEM)) != 0)
1691 		return (EOPNOTSUPP);
1692 
1693 	/* Disallow this operation if the file system is mounted read-only. */
1694 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1695 		return (EROFS);
1696 
1697 	/*
1698 	 * Callers may only modify the file flags on objects they
1699 	 * have VADMIN rights for.
1700 	 */
1701 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1702 		return (error);
1703 	/*
1704 	 * Unprivileged processes are not permitted to unset system
1705 	 * flags, or modify flags if any system flags are set.
1706 	 */
1707 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) {
1708 		if (node->tn_flags &
1709 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
1710 			error = securelevel_gt(cred, 0);
1711 			if (error)
1712 				return (error);
1713 		}
1714 	} else {
1715 		if (node->tn_flags &
1716 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
1717 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
1718 			return (EPERM);
1719 	}
1720 	node->tn_flags = flags;
1721 	node->tn_status |= TMPFS_NODE_CHANGED;
1722 
1723 	ASSERT_VOP_ELOCKED(vp, "chflags2");
1724 
1725 	return (0);
1726 }
1727 
1728 /*
1729  * Change access mode on the given vnode.
1730  * Caller should execute tmpfs_update on vp after a successful execution.
1731  * The vnode must be locked on entry and remain locked on exit.
1732  */
1733 int
1734 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
1735 {
1736 	int error;
1737 	struct tmpfs_node *node;
1738 	mode_t newmode;
1739 
1740 	ASSERT_VOP_ELOCKED(vp, "chmod");
1741 	ASSERT_VOP_IN_SEQC(vp);
1742 
1743 	node = VP_TO_TMPFS_NODE(vp);
1744 
1745 	/* Disallow this operation if the file system is mounted read-only. */
1746 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1747 		return EROFS;
1748 
1749 	/* Immutable or append-only files cannot be modified, either. */
1750 	if (node->tn_flags & (IMMUTABLE | APPEND))
1751 		return EPERM;
1752 
1753 	/*
1754 	 * To modify the permissions on a file, must possess VADMIN
1755 	 * for that file.
1756 	 */
1757 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1758 		return (error);
1759 
1760 	/*
1761 	 * Privileged processes may set the sticky bit on non-directories,
1762 	 * as well as set the setgid bit on a file with a group that the
1763 	 * process is not a member of.
1764 	 */
1765 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
1766 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE))
1767 			return (EFTYPE);
1768 	}
1769 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
1770 		error = priv_check_cred(cred, PRIV_VFS_SETGID);
1771 		if (error)
1772 			return (error);
1773 	}
1774 
1775 	newmode = node->tn_mode & ~ALLPERMS;
1776 	newmode |= mode & ALLPERMS;
1777 	atomic_store_short(&node->tn_mode, newmode);
1778 
1779 	node->tn_status |= TMPFS_NODE_CHANGED;
1780 
1781 	ASSERT_VOP_ELOCKED(vp, "chmod2");
1782 
1783 	return (0);
1784 }
1785 
1786 /*
1787  * Change ownership of the given vnode.  At least one of uid or gid must
1788  * be different than VNOVAL.  If one is set to that value, the attribute
1789  * is unchanged.
1790  * Caller should execute tmpfs_update on vp after a successful execution.
1791  * The vnode must be locked on entry and remain locked on exit.
1792  */
1793 int
1794 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
1795     struct thread *p)
1796 {
1797 	int error;
1798 	struct tmpfs_node *node;
1799 	uid_t ouid;
1800 	gid_t ogid;
1801 	mode_t newmode;
1802 
1803 	ASSERT_VOP_ELOCKED(vp, "chown");
1804 	ASSERT_VOP_IN_SEQC(vp);
1805 
1806 	node = VP_TO_TMPFS_NODE(vp);
1807 
1808 	/* Assign default values if they are unknown. */
1809 	MPASS(uid != VNOVAL || gid != VNOVAL);
1810 	if (uid == VNOVAL)
1811 		uid = node->tn_uid;
1812 	if (gid == VNOVAL)
1813 		gid = node->tn_gid;
1814 	MPASS(uid != VNOVAL && gid != VNOVAL);
1815 
1816 	/* Disallow this operation if the file system is mounted read-only. */
1817 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1818 		return (EROFS);
1819 
1820 	/* Immutable or append-only files cannot be modified, either. */
1821 	if (node->tn_flags & (IMMUTABLE | APPEND))
1822 		return (EPERM);
1823 
1824 	/*
1825 	 * To modify the ownership of a file, must possess VADMIN for that
1826 	 * file.
1827 	 */
1828 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1829 		return (error);
1830 
1831 	/*
1832 	 * To change the owner of a file, or change the group of a file to a
1833 	 * group of which we are not a member, the caller must have
1834 	 * privilege.
1835 	 */
1836 	if ((uid != node->tn_uid ||
1837 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1838 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN)))
1839 		return (error);
1840 
1841 	ogid = node->tn_gid;
1842 	ouid = node->tn_uid;
1843 
1844 	node->tn_uid = uid;
1845 	node->tn_gid = gid;
1846 
1847 	node->tn_status |= TMPFS_NODE_CHANGED;
1848 
1849 	if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
1850 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
1851 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
1852 			atomic_store_short(&node->tn_mode, newmode);
1853 		}
1854 	}
1855 
1856 	ASSERT_VOP_ELOCKED(vp, "chown2");
1857 
1858 	return (0);
1859 }
1860 
1861 /*
1862  * Change size of the given vnode.
1863  * Caller should execute tmpfs_update on vp after a successful execution.
1864  * The vnode must be locked on entry and remain locked on exit.
1865  */
1866 int
1867 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
1868     struct thread *p)
1869 {
1870 	int error;
1871 	struct tmpfs_node *node;
1872 
1873 	ASSERT_VOP_ELOCKED(vp, "chsize");
1874 
1875 	node = VP_TO_TMPFS_NODE(vp);
1876 
1877 	/* Decide whether this is a valid operation based on the file type. */
1878 	error = 0;
1879 	switch (vp->v_type) {
1880 	case VDIR:
1881 		return (EISDIR);
1882 
1883 	case VREG:
1884 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1885 			return (EROFS);
1886 		break;
1887 
1888 	case VBLK:
1889 		/* FALLTHROUGH */
1890 	case VCHR:
1891 		/* FALLTHROUGH */
1892 	case VFIFO:
1893 		/*
1894 		 * Allow modifications of special files even if in the file
1895 		 * system is mounted read-only (we are not modifying the
1896 		 * files themselves, but the objects they represent).
1897 		 */
1898 		return (0);
1899 
1900 	default:
1901 		/* Anything else is unsupported. */
1902 		return (EOPNOTSUPP);
1903 	}
1904 
1905 	/* Immutable or append-only files cannot be modified, either. */
1906 	if (node->tn_flags & (IMMUTABLE | APPEND))
1907 		return (EPERM);
1908 
1909 	error = tmpfs_truncate(vp, size);
1910 	/*
1911 	 * tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1912 	 * for us, as will update tn_status; no need to do that here.
1913 	 */
1914 
1915 	ASSERT_VOP_ELOCKED(vp, "chsize2");
1916 
1917 	return (error);
1918 }
1919 
1920 /*
1921  * Change access and modification times of the given vnode.
1922  * Caller should execute tmpfs_update on vp after a successful execution.
1923  * The vnode must be locked on entry and remain locked on exit.
1924  */
1925 int
1926 tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
1927     struct ucred *cred, struct thread *l)
1928 {
1929 	int error;
1930 	struct tmpfs_node *node;
1931 
1932 	ASSERT_VOP_ELOCKED(vp, "chtimes");
1933 
1934 	node = VP_TO_TMPFS_NODE(vp);
1935 
1936 	/* Disallow this operation if the file system is mounted read-only. */
1937 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1938 		return (EROFS);
1939 
1940 	/* Immutable or append-only files cannot be modified, either. */
1941 	if (node->tn_flags & (IMMUTABLE | APPEND))
1942 		return (EPERM);
1943 
1944 	error = vn_utimes_perm(vp, vap, cred, l);
1945 	if (error != 0)
1946 		return (error);
1947 
1948 	if (vap->va_atime.tv_sec != VNOVAL)
1949 		node->tn_accessed = true;
1950 
1951 	if (vap->va_mtime.tv_sec != VNOVAL)
1952 		node->tn_status |= TMPFS_NODE_MODIFIED;
1953 
1954 	if (vap->va_birthtime.tv_sec != VNOVAL)
1955 		node->tn_status |= TMPFS_NODE_MODIFIED;
1956 
1957 	tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime);
1958 
1959 	if (vap->va_birthtime.tv_sec != VNOVAL)
1960 		node->tn_birthtime = vap->va_birthtime;
1961 	ASSERT_VOP_ELOCKED(vp, "chtimes2");
1962 
1963 	return (0);
1964 }
1965 
1966 void
1967 tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
1968 {
1969 
1970 	if ((node->tn_status & status) == status || tm->tm_ronly)
1971 		return;
1972 	TMPFS_NODE_LOCK(node);
1973 	node->tn_status |= status;
1974 	TMPFS_NODE_UNLOCK(node);
1975 }
1976 
1977 void
1978 tmpfs_set_accessed(struct tmpfs_mount *tm, struct tmpfs_node *node)
1979 {
1980 	if (node->tn_accessed || tm->tm_ronly)
1981 		return;
1982 	atomic_store_8(&node->tn_accessed, true);
1983 }
1984 
1985 /* Sync timestamps */
1986 void
1987 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1988     const struct timespec *mod)
1989 {
1990 	struct tmpfs_node *node;
1991 	struct timespec now;
1992 
1993 	ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
1994 	node = VP_TO_TMPFS_NODE(vp);
1995 
1996 	if (!node->tn_accessed &&
1997 	    (node->tn_status & (TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED)) == 0)
1998 		return;
1999 
2000 	vfs_timestamp(&now);
2001 	TMPFS_NODE_LOCK(node);
2002 	if (node->tn_accessed) {
2003 		if (acc == NULL)
2004 			 acc = &now;
2005 		node->tn_atime = *acc;
2006 	}
2007 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
2008 		if (mod == NULL)
2009 			mod = &now;
2010 		node->tn_mtime = *mod;
2011 	}
2012 	if (node->tn_status & TMPFS_NODE_CHANGED)
2013 		node->tn_ctime = now;
2014 	node->tn_status &= ~(TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
2015 	node->tn_accessed = false;
2016 	TMPFS_NODE_UNLOCK(node);
2017 
2018 	/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
2019 	random_harvest_queue(node, sizeof(*node), RANDOM_FS_ATIME);
2020 }
2021 
2022 int
2023 tmpfs_truncate(struct vnode *vp, off_t length)
2024 {
2025 	int error;
2026 	struct tmpfs_node *node;
2027 
2028 	node = VP_TO_TMPFS_NODE(vp);
2029 
2030 	if (length < 0) {
2031 		error = EINVAL;
2032 		goto out;
2033 	}
2034 
2035 	if (node->tn_size == length) {
2036 		error = 0;
2037 		goto out;
2038 	}
2039 
2040 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
2041 		return (EFBIG);
2042 
2043 	error = tmpfs_reg_resize(vp, length, FALSE);
2044 	if (error == 0)
2045 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
2046 
2047 out:
2048 	tmpfs_update(vp);
2049 
2050 	return (error);
2051 }
2052 
2053 static __inline int
2054 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
2055 {
2056 	if (a->td_hash > b->td_hash)
2057 		return (1);
2058 	else if (a->td_hash < b->td_hash)
2059 		return (-1);
2060 	return (0);
2061 }
2062 
2063 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp);
2064