xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision 6486b015fc84e96725fef22b0e3363351399ae83)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Efficient memory file system supporting functions.
35  */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/namei.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 #include <sys/stat.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/vnode.h>
47 #include <sys/vmmeter.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_extern.h>
55 
56 #include <fs/tmpfs/tmpfs.h>
57 #include <fs/tmpfs/tmpfs_fifoops.h>
58 #include <fs/tmpfs/tmpfs_vnops.h>
59 
60 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system");
61 
62 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
63 
64 static int
65 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
66 {
67 	int error;
68 	long pages, bytes;
69 
70 	pages = *(long *)arg1;
71 	bytes = pages * PAGE_SIZE;
72 
73 	error = sysctl_handle_long(oidp, &bytes, 0, req);
74 	if (error || !req->newptr)
75 		return (error);
76 
77 	pages = bytes / PAGE_SIZE;
78 	if (pages < TMPFS_PAGES_MINRESERVED)
79 		return (EINVAL);
80 
81 	*(long *)arg1 = pages;
82 	return (0);
83 }
84 
85 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved, CTLTYPE_LONG|CTLFLAG_RW,
86     &tmpfs_pages_reserved, 0, sysctl_mem_reserved, "L",
87     "Amount of available memory and swap below which tmpfs growth stops");
88 
89 size_t
90 tmpfs_mem_avail(void)
91 {
92 	vm_ooffset_t avail;
93 
94 	avail = swap_pager_avail + cnt.v_free_count + cnt.v_cache_count -
95 	    tmpfs_pages_reserved;
96 	if (__predict_false(avail < 0))
97 		avail = 0;
98 	return (avail);
99 }
100 
101 size_t
102 tmpfs_pages_used(struct tmpfs_mount *tmp)
103 {
104 	const size_t node_size = sizeof(struct tmpfs_node) +
105 	    sizeof(struct tmpfs_dirent);
106 	size_t meta_pages;
107 
108 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
109 	    PAGE_SIZE);
110 	return (meta_pages + tmp->tm_pages_used);
111 }
112 
113 static size_t
114 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
115 {
116 	if (tmpfs_mem_avail() < req_pages)
117 		return (0);
118 
119 	if (tmp->tm_pages_max != SIZE_MAX &&
120 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
121 			return (0);
122 
123 	return (1);
124 }
125 
126 /* --------------------------------------------------------------------- */
127 
128 /*
129  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
130  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
131  * using the credentials of the process 'p'.
132  *
133  * If the node type is set to 'VDIR', then the parent parameter must point
134  * to the parent directory of the node being created.  It may only be NULL
135  * while allocating the root node.
136  *
137  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
138  * specifies the device the node represents.
139  *
140  * If the node type is set to 'VLNK', then the parameter target specifies
141  * the file name of the target file for the symbolic link that is being
142  * created.
143  *
144  * Note that new nodes are retrieved from the available list if it has
145  * items or, if it is empty, from the node pool as long as there is enough
146  * space to create them.
147  *
148  * Returns zero on success or an appropriate error code on failure.
149  */
150 int
151 tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
152     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
153     char *target, dev_t rdev, struct tmpfs_node **node)
154 {
155 	struct tmpfs_node *nnode;
156 
157 	/* If the root directory of the 'tmp' file system is not yet
158 	 * allocated, this must be the request to do it. */
159 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
160 
161 	MPASS(IFF(type == VLNK, target != NULL));
162 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
163 
164 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
165 		return (ENOSPC);
166 	if (tmpfs_pages_check_avail(tmp, 1) == 0)
167 		return (ENOSPC);
168 
169 	nnode = (struct tmpfs_node *)uma_zalloc_arg(
170 				tmp->tm_node_pool, tmp, M_WAITOK);
171 
172 	/* Generic initialization. */
173 	nnode->tn_type = type;
174 	vfs_timestamp(&nnode->tn_atime);
175 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
176 	    nnode->tn_atime;
177 	nnode->tn_uid = uid;
178 	nnode->tn_gid = gid;
179 	nnode->tn_mode = mode;
180 	nnode->tn_id = alloc_unr(tmp->tm_ino_unr);
181 
182 	/* Type-specific initialization. */
183 	switch (nnode->tn_type) {
184 	case VBLK:
185 	case VCHR:
186 		nnode->tn_rdev = rdev;
187 		break;
188 
189 	case VDIR:
190 		TAILQ_INIT(&nnode->tn_dir.tn_dirhead);
191 		MPASS(parent != nnode);
192 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
193 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
194 		nnode->tn_dir.tn_readdir_lastn = 0;
195 		nnode->tn_dir.tn_readdir_lastp = NULL;
196 		nnode->tn_links++;
197 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
198 		nnode->tn_dir.tn_parent->tn_links++;
199 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
200 		break;
201 
202 	case VFIFO:
203 		/* FALLTHROUGH */
204 	case VSOCK:
205 		break;
206 
207 	case VLNK:
208 		MPASS(strlen(target) < MAXPATHLEN);
209 		nnode->tn_size = strlen(target);
210 		nnode->tn_link = malloc(nnode->tn_size, M_TMPFSNAME,
211 		    M_WAITOK);
212 		memcpy(nnode->tn_link, target, nnode->tn_size);
213 		break;
214 
215 	case VREG:
216 		nnode->tn_reg.tn_aobj =
217 		    vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
218 			NULL /* XXXKIB - tmpfs needs swap reservation */);
219 		break;
220 
221 	default:
222 		panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
223 	}
224 
225 	TMPFS_LOCK(tmp);
226 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
227 	tmp->tm_nodes_inuse++;
228 	TMPFS_UNLOCK(tmp);
229 
230 	*node = nnode;
231 	return 0;
232 }
233 
234 /* --------------------------------------------------------------------- */
235 
236 /*
237  * Destroys the node pointed to by node from the file system 'tmp'.
238  * If the node does not belong to the given mount point, the results are
239  * unpredicted.
240  *
241  * If the node references a directory; no entries are allowed because
242  * their removal could need a recursive algorithm, something forbidden in
243  * kernel space.  Furthermore, there is not need to provide such
244  * functionality (recursive removal) because the only primitives offered
245  * to the user are the removal of empty directories and the deletion of
246  * individual files.
247  *
248  * Note that nodes are not really deleted; in fact, when a node has been
249  * allocated, it cannot be deleted during the whole life of the file
250  * system.  Instead, they are moved to the available list and remain there
251  * until reused.
252  */
253 void
254 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
255 {
256 	vm_object_t uobj;
257 
258 #ifdef INVARIANTS
259 	TMPFS_NODE_LOCK(node);
260 	MPASS(node->tn_vnode == NULL);
261 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
262 	TMPFS_NODE_UNLOCK(node);
263 #endif
264 
265 	TMPFS_LOCK(tmp);
266 	LIST_REMOVE(node, tn_entries);
267 	tmp->tm_nodes_inuse--;
268 	TMPFS_UNLOCK(tmp);
269 
270 	switch (node->tn_type) {
271 	case VNON:
272 		/* Do not do anything.  VNON is provided to let the
273 		 * allocation routine clean itself easily by avoiding
274 		 * duplicating code in it. */
275 		/* FALLTHROUGH */
276 	case VBLK:
277 		/* FALLTHROUGH */
278 	case VCHR:
279 		/* FALLTHROUGH */
280 	case VDIR:
281 		/* FALLTHROUGH */
282 	case VFIFO:
283 		/* FALLTHROUGH */
284 	case VSOCK:
285 		break;
286 
287 	case VLNK:
288 		free(node->tn_link, M_TMPFSNAME);
289 		break;
290 
291 	case VREG:
292 		uobj = node->tn_reg.tn_aobj;
293 		if (uobj != NULL) {
294 			TMPFS_LOCK(tmp);
295 			tmp->tm_pages_used -= uobj->size;
296 			TMPFS_UNLOCK(tmp);
297 			vm_object_deallocate(uobj);
298 		}
299 		break;
300 
301 	default:
302 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
303 	}
304 
305 	free_unr(tmp->tm_ino_unr, node->tn_id);
306 	uma_zfree(tmp->tm_node_pool, node);
307 }
308 
309 /* --------------------------------------------------------------------- */
310 
311 /*
312  * Allocates a new directory entry for the node node with a name of name.
313  * The new directory entry is returned in *de.
314  *
315  * The link count of node is increased by one to reflect the new object
316  * referencing it.
317  *
318  * Returns zero on success or an appropriate error code on failure.
319  */
320 int
321 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
322     const char *name, uint16_t len, struct tmpfs_dirent **de)
323 {
324 	struct tmpfs_dirent *nde;
325 
326 	nde = (struct tmpfs_dirent *)uma_zalloc(
327 					tmp->tm_dirent_pool, M_WAITOK);
328 	nde->td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
329 	nde->td_namelen = len;
330 	memcpy(nde->td_name, name, len);
331 
332 	nde->td_node = node;
333 	if (node != NULL)
334 		node->tn_links++;
335 
336 	*de = nde;
337 
338 	return 0;
339 }
340 
341 /* --------------------------------------------------------------------- */
342 
343 /*
344  * Frees a directory entry.  It is the caller's responsibility to destroy
345  * the node referenced by it if needed.
346  *
347  * The link count of node is decreased by one to reflect the removal of an
348  * object that referenced it.  This only happens if 'node_exists' is true;
349  * otherwise the function will not access the node referred to by the
350  * directory entry, as it may already have been released from the outside.
351  */
352 void
353 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de,
354     boolean_t node_exists)
355 {
356 	if (node_exists) {
357 		struct tmpfs_node *node;
358 
359 		node = de->td_node;
360 		if (node != NULL) {
361 			MPASS(node->tn_links > 0);
362 			node->tn_links--;
363 		}
364 	}
365 
366 	free(de->td_name, M_TMPFSNAME);
367 	uma_zfree(tmp->tm_dirent_pool, de);
368 }
369 
370 /* --------------------------------------------------------------------- */
371 
372 /*
373  * Allocates a new vnode for the node node or returns a new reference to
374  * an existing one if the node had already a vnode referencing it.  The
375  * resulting locked vnode is returned in *vpp.
376  *
377  * Returns zero on success or an appropriate error code on failure.
378  */
379 int
380 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
381     struct vnode **vpp)
382 {
383 	int error = 0;
384 	struct vnode *vp;
385 
386 loop:
387 	TMPFS_NODE_LOCK(node);
388 	if ((vp = node->tn_vnode) != NULL) {
389 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
390 		VI_LOCK(vp);
391 		TMPFS_NODE_UNLOCK(node);
392 		error = vget(vp, lkflag | LK_INTERLOCK, curthread);
393 		if (error != 0) {
394 			vp = NULL;
395 			goto out;
396 		}
397 
398 		/*
399 		 * Make sure the vnode is still there after
400 		 * getting the interlock to avoid racing a free.
401 		 */
402 		if (node->tn_vnode == NULL || node->tn_vnode != vp) {
403 			vput(vp);
404 			goto loop;
405 		}
406 
407 		goto out;
408 	}
409 
410 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
411 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
412 		TMPFS_NODE_UNLOCK(node);
413 		error = ENOENT;
414 		vp = NULL;
415 		goto out;
416 	}
417 
418 	/*
419 	 * otherwise lock the vp list while we call getnewvnode
420 	 * since that can block.
421 	 */
422 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
423 		node->tn_vpstate |= TMPFS_VNODE_WANT;
424 		error = msleep((caddr_t) &node->tn_vpstate,
425 		    TMPFS_NODE_MTX(node), PDROP | PCATCH,
426 		    "tmpfs_alloc_vp", 0);
427 		if (error)
428 			return error;
429 
430 		goto loop;
431 	} else
432 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
433 
434 	TMPFS_NODE_UNLOCK(node);
435 
436 	/* Get a new vnode and associate it with our node. */
437 	error = getnewvnode("tmpfs", mp, &tmpfs_vnodeop_entries, &vp);
438 	if (error != 0)
439 		goto unlock;
440 	MPASS(vp != NULL);
441 
442 	(void) vn_lock(vp, lkflag | LK_RETRY);
443 
444 	vp->v_data = node;
445 	vp->v_type = node->tn_type;
446 
447 	/* Type-specific initialization. */
448 	switch (node->tn_type) {
449 	case VBLK:
450 		/* FALLTHROUGH */
451 	case VCHR:
452 		/* FALLTHROUGH */
453 	case VLNK:
454 		/* FALLTHROUGH */
455 	case VREG:
456 		/* FALLTHROUGH */
457 	case VSOCK:
458 		break;
459 	case VFIFO:
460 		vp->v_op = &tmpfs_fifoop_entries;
461 		break;
462 	case VDIR:
463 		MPASS(node->tn_dir.tn_parent != NULL);
464 		if (node->tn_dir.tn_parent == node)
465 			vp->v_vflag |= VV_ROOT;
466 		break;
467 
468 	default:
469 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
470 	}
471 
472 	vnode_pager_setsize(vp, node->tn_size);
473 	error = insmntque(vp, mp);
474 	if (error)
475 		vp = NULL;
476 
477 unlock:
478 	TMPFS_NODE_LOCK(node);
479 
480 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
481 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
482 	node->tn_vnode = vp;
483 
484 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
485 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
486 		TMPFS_NODE_UNLOCK(node);
487 		wakeup((caddr_t) &node->tn_vpstate);
488 	} else
489 		TMPFS_NODE_UNLOCK(node);
490 
491 out:
492 	*vpp = vp;
493 
494 #ifdef INVARIANTS
495 	if (error == 0) {
496 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
497 		TMPFS_NODE_LOCK(node);
498 		MPASS(*vpp == node->tn_vnode);
499 		TMPFS_NODE_UNLOCK(node);
500 	}
501 #endif
502 
503 	return error;
504 }
505 
506 /* --------------------------------------------------------------------- */
507 
508 /*
509  * Destroys the association between the vnode vp and the node it
510  * references.
511  */
512 void
513 tmpfs_free_vp(struct vnode *vp)
514 {
515 	struct tmpfs_node *node;
516 
517 	node = VP_TO_TMPFS_NODE(vp);
518 
519 	mtx_assert(TMPFS_NODE_MTX(node), MA_OWNED);
520 	node->tn_vnode = NULL;
521 	vp->v_data = NULL;
522 }
523 
524 /* --------------------------------------------------------------------- */
525 
526 /*
527  * Allocates a new file of type 'type' and adds it to the parent directory
528  * 'dvp'; this addition is done using the component name given in 'cnp'.
529  * The ownership of the new file is automatically assigned based on the
530  * credentials of the caller (through 'cnp'), the group is set based on
531  * the parent directory and the mode is determined from the 'vap' argument.
532  * If successful, *vpp holds a vnode to the newly created file and zero
533  * is returned.  Otherwise *vpp is NULL and the function returns an
534  * appropriate error code.
535  */
536 int
537 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
538     struct componentname *cnp, char *target)
539 {
540 	int error;
541 	struct tmpfs_dirent *de;
542 	struct tmpfs_mount *tmp;
543 	struct tmpfs_node *dnode;
544 	struct tmpfs_node *node;
545 	struct tmpfs_node *parent;
546 
547 	MPASS(VOP_ISLOCKED(dvp));
548 	MPASS(cnp->cn_flags & HASBUF);
549 
550 	tmp = VFS_TO_TMPFS(dvp->v_mount);
551 	dnode = VP_TO_TMPFS_DIR(dvp);
552 	*vpp = NULL;
553 
554 	/* If the entry we are creating is a directory, we cannot overflow
555 	 * the number of links of its parent, because it will get a new
556 	 * link. */
557 	if (vap->va_type == VDIR) {
558 		/* Ensure that we do not overflow the maximum number of links
559 		 * imposed by the system. */
560 		MPASS(dnode->tn_links <= LINK_MAX);
561 		if (dnode->tn_links == LINK_MAX) {
562 			error = EMLINK;
563 			goto out;
564 		}
565 
566 		parent = dnode;
567 		MPASS(parent != NULL);
568 	} else
569 		parent = NULL;
570 
571 	/* Allocate a node that represents the new file. */
572 	error = tmpfs_alloc_node(tmp, vap->va_type, cnp->cn_cred->cr_uid,
573 	    dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev, &node);
574 	if (error != 0)
575 		goto out;
576 
577 	/* Allocate a directory entry that points to the new file. */
578 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
579 	    &de);
580 	if (error != 0) {
581 		tmpfs_free_node(tmp, node);
582 		goto out;
583 	}
584 
585 	/* Allocate a vnode for the new file. */
586 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
587 	if (error != 0) {
588 		tmpfs_free_dirent(tmp, de, TRUE);
589 		tmpfs_free_node(tmp, node);
590 		goto out;
591 	}
592 
593 	/* Now that all required items are allocated, we can proceed to
594 	 * insert the new node into the directory, an operation that
595 	 * cannot fail. */
596 	if (cnp->cn_flags & ISWHITEOUT)
597 		tmpfs_dir_whiteout_remove(dvp, cnp);
598 	tmpfs_dir_attach(dvp, de);
599 
600 out:
601 
602 	return error;
603 }
604 
605 /* --------------------------------------------------------------------- */
606 
607 /*
608  * Attaches the directory entry de to the directory represented by vp.
609  * Note that this does not change the link count of the node pointed by
610  * the directory entry, as this is done by tmpfs_alloc_dirent.
611  */
612 void
613 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
614 {
615 	struct tmpfs_node *dnode;
616 
617 	ASSERT_VOP_ELOCKED(vp, __func__);
618 	dnode = VP_TO_TMPFS_DIR(vp);
619 	TAILQ_INSERT_TAIL(&dnode->tn_dir.tn_dirhead, de, td_entries);
620 	dnode->tn_size += sizeof(struct tmpfs_dirent);
621 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
622 	    TMPFS_NODE_MODIFIED;
623 }
624 
625 /* --------------------------------------------------------------------- */
626 
627 /*
628  * Detaches the directory entry de from the directory represented by vp.
629  * Note that this does not change the link count of the node pointed by
630  * the directory entry, as this is done by tmpfs_free_dirent.
631  */
632 void
633 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
634 {
635 	struct tmpfs_node *dnode;
636 
637 	ASSERT_VOP_ELOCKED(vp, __func__);
638 	dnode = VP_TO_TMPFS_DIR(vp);
639 
640 	if (dnode->tn_dir.tn_readdir_lastp == de) {
641 		dnode->tn_dir.tn_readdir_lastn = 0;
642 		dnode->tn_dir.tn_readdir_lastp = NULL;
643 	}
644 
645 	TAILQ_REMOVE(&dnode->tn_dir.tn_dirhead, de, td_entries);
646 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
647 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
648 	    TMPFS_NODE_MODIFIED;
649 }
650 
651 /* --------------------------------------------------------------------- */
652 
653 /*
654  * Looks for a directory entry in the directory represented by node.
655  * 'cnp' describes the name of the entry to look for.  Note that the .
656  * and .. components are not allowed as they do not physically exist
657  * within directories.
658  *
659  * Returns a pointer to the entry when found, otherwise NULL.
660  */
661 struct tmpfs_dirent *
662 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
663     struct componentname *cnp)
664 {
665 	boolean_t found;
666 	struct tmpfs_dirent *de;
667 
668 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
669 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
670 	    cnp->cn_nameptr[1] == '.')));
671 	TMPFS_VALIDATE_DIR(node);
672 
673 	found = 0;
674 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
675 		if (f != NULL && de->td_node != f)
676 		    continue;
677 		MPASS(cnp->cn_namelen < 0xffff);
678 		if (de->td_namelen == (uint16_t)cnp->cn_namelen &&
679 		    bcmp(de->td_name, cnp->cn_nameptr, de->td_namelen) == 0) {
680 			found = 1;
681 			break;
682 		}
683 	}
684 	node->tn_status |= TMPFS_NODE_ACCESSED;
685 
686 	return found ? de : NULL;
687 }
688 
689 /* --------------------------------------------------------------------- */
690 
691 /*
692  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
693  * directory and returns it in the uio space.  The function returns 0
694  * on success, -1 if there was not enough space in the uio structure to
695  * hold the directory entry or an appropriate error code if another
696  * error happens.
697  */
698 int
699 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
700 {
701 	int error;
702 	struct dirent dent;
703 
704 	TMPFS_VALIDATE_DIR(node);
705 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
706 
707 	dent.d_fileno = node->tn_id;
708 	dent.d_type = DT_DIR;
709 	dent.d_namlen = 1;
710 	dent.d_name[0] = '.';
711 	dent.d_name[1] = '\0';
712 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
713 
714 	if (dent.d_reclen > uio->uio_resid)
715 		error = -1;
716 	else {
717 		error = uiomove(&dent, dent.d_reclen, uio);
718 		if (error == 0)
719 			uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
720 	}
721 
722 	node->tn_status |= TMPFS_NODE_ACCESSED;
723 
724 	return error;
725 }
726 
727 /* --------------------------------------------------------------------- */
728 
729 /*
730  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
731  * directory and returns it in the uio space.  The function returns 0
732  * on success, -1 if there was not enough space in the uio structure to
733  * hold the directory entry or an appropriate error code if another
734  * error happens.
735  */
736 int
737 tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
738 {
739 	int error;
740 	struct dirent dent;
741 
742 	TMPFS_VALIDATE_DIR(node);
743 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
744 
745 	/*
746 	 * Return ENOENT if the current node is already removed.
747 	 */
748 	TMPFS_ASSERT_LOCKED(node);
749 	if (node->tn_dir.tn_parent == NULL) {
750 		return (ENOENT);
751 	}
752 
753 	TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
754 	dent.d_fileno = node->tn_dir.tn_parent->tn_id;
755 	TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
756 
757 	dent.d_type = DT_DIR;
758 	dent.d_namlen = 2;
759 	dent.d_name[0] = '.';
760 	dent.d_name[1] = '.';
761 	dent.d_name[2] = '\0';
762 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
763 
764 	if (dent.d_reclen > uio->uio_resid)
765 		error = -1;
766 	else {
767 		error = uiomove(&dent, dent.d_reclen, uio);
768 		if (error == 0) {
769 			struct tmpfs_dirent *de;
770 
771 			de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
772 			if (de == NULL)
773 				uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
774 			else
775 				uio->uio_offset = tmpfs_dircookie(de);
776 		}
777 	}
778 
779 	node->tn_status |= TMPFS_NODE_ACCESSED;
780 
781 	return error;
782 }
783 
784 /* --------------------------------------------------------------------- */
785 
786 /*
787  * Lookup a directory entry by its associated cookie.
788  */
789 struct tmpfs_dirent *
790 tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
791 {
792 	struct tmpfs_dirent *de;
793 
794 	if (cookie == node->tn_dir.tn_readdir_lastn &&
795 	    node->tn_dir.tn_readdir_lastp != NULL) {
796 		return node->tn_dir.tn_readdir_lastp;
797 	}
798 
799 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
800 		if (tmpfs_dircookie(de) == cookie) {
801 			break;
802 		}
803 	}
804 
805 	return de;
806 }
807 
808 /* --------------------------------------------------------------------- */
809 
810 /*
811  * Helper function for tmpfs_readdir.  Returns as much directory entries
812  * as can fit in the uio space.  The read starts at uio->uio_offset.
813  * The function returns 0 on success, -1 if there was not enough space
814  * in the uio structure to hold the directory entry or an appropriate
815  * error code if another error happens.
816  */
817 int
818 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
819 {
820 	int error;
821 	off_t startcookie;
822 	struct tmpfs_dirent *de;
823 
824 	TMPFS_VALIDATE_DIR(node);
825 
826 	/* Locate the first directory entry we have to return.  We have cached
827 	 * the last readdir in the node, so use those values if appropriate.
828 	 * Otherwise do a linear scan to find the requested entry. */
829 	startcookie = uio->uio_offset;
830 	MPASS(startcookie != TMPFS_DIRCOOKIE_DOT);
831 	MPASS(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
832 	if (startcookie == TMPFS_DIRCOOKIE_EOF) {
833 		return 0;
834 	} else {
835 		de = tmpfs_dir_lookupbycookie(node, startcookie);
836 	}
837 	if (de == NULL) {
838 		return EINVAL;
839 	}
840 
841 	/* Read as much entries as possible; i.e., until we reach the end of
842 	 * the directory or we exhaust uio space. */
843 	do {
844 		struct dirent d;
845 
846 		/* Create a dirent structure representing the current
847 		 * tmpfs_node and fill it. */
848 		if (de->td_node == NULL) {
849 			d.d_fileno = 1;
850 			d.d_type = DT_WHT;
851 		} else {
852 			d.d_fileno = de->td_node->tn_id;
853 			switch (de->td_node->tn_type) {
854 			case VBLK:
855 				d.d_type = DT_BLK;
856 				break;
857 
858 			case VCHR:
859 				d.d_type = DT_CHR;
860 				break;
861 
862 			case VDIR:
863 				d.d_type = DT_DIR;
864 				break;
865 
866 			case VFIFO:
867 				d.d_type = DT_FIFO;
868 				break;
869 
870 			case VLNK:
871 				d.d_type = DT_LNK;
872 				break;
873 
874 			case VREG:
875 				d.d_type = DT_REG;
876 				break;
877 
878 			case VSOCK:
879 				d.d_type = DT_SOCK;
880 				break;
881 
882 			default:
883 				panic("tmpfs_dir_getdents: type %p %d",
884 				    de->td_node, (int)de->td_node->tn_type);
885 			}
886 		}
887 		d.d_namlen = de->td_namelen;
888 		MPASS(de->td_namelen < sizeof(d.d_name));
889 		(void)memcpy(d.d_name, de->td_name, de->td_namelen);
890 		d.d_name[de->td_namelen] = '\0';
891 		d.d_reclen = GENERIC_DIRSIZ(&d);
892 
893 		/* Stop reading if the directory entry we are treating is
894 		 * bigger than the amount of data that can be returned. */
895 		if (d.d_reclen > uio->uio_resid) {
896 			error = -1;
897 			break;
898 		}
899 
900 		/* Copy the new dirent structure into the output buffer and
901 		 * advance pointers. */
902 		error = uiomove(&d, d.d_reclen, uio);
903 		if (error == 0) {
904 			(*cntp)++;
905 			de = TAILQ_NEXT(de, td_entries);
906 		}
907 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
908 
909 	/* Update the offset and cache. */
910 	if (de == NULL) {
911 		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
912 		node->tn_dir.tn_readdir_lastn = 0;
913 		node->tn_dir.tn_readdir_lastp = NULL;
914 	} else {
915 		node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de);
916 		node->tn_dir.tn_readdir_lastp = de;
917 	}
918 
919 	node->tn_status |= TMPFS_NODE_ACCESSED;
920 	return error;
921 }
922 
923 int
924 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
925 {
926 	struct tmpfs_dirent *de;
927 	int error;
928 
929 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
930 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
931 	if (error != 0)
932 		return (error);
933 	tmpfs_dir_attach(dvp, de);
934 	return (0);
935 }
936 
937 void
938 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
939 {
940 	struct tmpfs_dirent *de;
941 
942 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
943 	MPASS(de != NULL && de->td_node == NULL);
944 	tmpfs_dir_detach(dvp, de);
945 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de, TRUE);
946 }
947 
948 /* --------------------------------------------------------------------- */
949 
950 /*
951  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
952  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
953  * 'newsize' must be positive.
954  *
955  * Returns zero on success or an appropriate error code on failure.
956  */
957 int
958 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
959 {
960 	struct tmpfs_mount *tmp;
961 	struct tmpfs_node *node;
962 	vm_object_t uobj;
963 	vm_page_t m, ma[1];
964 	vm_pindex_t idx, newpages, oldpages;
965 	off_t oldsize;
966 	int base, rv;
967 
968 	MPASS(vp->v_type == VREG);
969 	MPASS(newsize >= 0);
970 
971 	node = VP_TO_TMPFS_NODE(vp);
972 	uobj = node->tn_reg.tn_aobj;
973 	tmp = VFS_TO_TMPFS(vp->v_mount);
974 
975 	/*
976 	 * Convert the old and new sizes to the number of pages needed to
977 	 * store them.  It may happen that we do not need to do anything
978 	 * because the last allocated page can accommodate the change on
979 	 * its own.
980 	 */
981 	oldsize = node->tn_size;
982 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
983 	MPASS(oldpages == uobj->size);
984 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
985 	if (newpages > oldpages &&
986 	    tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
987 		return (ENOSPC);
988 
989 	VM_OBJECT_LOCK(uobj);
990 	if (newsize < oldsize) {
991 		/*
992 		 * Zero the truncated part of the last page.
993 		 */
994 		base = newsize & PAGE_MASK;
995 		if (base != 0) {
996 			idx = OFF_TO_IDX(newsize);
997 retry:
998 			m = vm_page_lookup(uobj, idx);
999 			if (m != NULL) {
1000 				if ((m->oflags & VPO_BUSY) != 0 ||
1001 				    m->busy != 0) {
1002 					vm_page_sleep(m, "tmfssz");
1003 					goto retry;
1004 				}
1005 				MPASS(m->valid == VM_PAGE_BITS_ALL);
1006 			} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
1007 				m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
1008 				if (m == NULL) {
1009 					VM_OBJECT_UNLOCK(uobj);
1010 					VM_WAIT;
1011 					VM_OBJECT_LOCK(uobj);
1012 					goto retry;
1013 				} else if (m->valid != VM_PAGE_BITS_ALL) {
1014 					ma[0] = m;
1015 					rv = vm_pager_get_pages(uobj, ma, 1, 0);
1016 					m = vm_page_lookup(uobj, idx);
1017 				} else
1018 					/* A cached page was reactivated. */
1019 					rv = VM_PAGER_OK;
1020 				vm_page_lock(m);
1021 				if (rv == VM_PAGER_OK) {
1022 					vm_page_deactivate(m);
1023 					vm_page_unlock(m);
1024 					vm_page_wakeup(m);
1025 				} else {
1026 					vm_page_free(m);
1027 					vm_page_unlock(m);
1028 					if (ignerr)
1029 						m = NULL;
1030 					else {
1031 						VM_OBJECT_UNLOCK(uobj);
1032 						return (EIO);
1033 					}
1034 				}
1035 			}
1036 			if (m != NULL) {
1037 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
1038 				vm_page_dirty(m);
1039 				vm_pager_page_unswapped(m);
1040 			}
1041 		}
1042 
1043 		/*
1044 		 * Release any swap space and free any whole pages.
1045 		 */
1046 		if (newpages < oldpages) {
1047 			swap_pager_freespace(uobj, newpages, oldpages -
1048 			    newpages);
1049 			vm_object_page_remove(uobj, newpages, 0, 0);
1050 		}
1051 	}
1052 	uobj->size = newpages;
1053 	VM_OBJECT_UNLOCK(uobj);
1054 
1055 	TMPFS_LOCK(tmp);
1056 	tmp->tm_pages_used += (newpages - oldpages);
1057 	TMPFS_UNLOCK(tmp);
1058 
1059 	node->tn_size = newsize;
1060 	vnode_pager_setsize(vp, newsize);
1061 	return (0);
1062 }
1063 
1064 /* --------------------------------------------------------------------- */
1065 
1066 /*
1067  * Change flags of the given vnode.
1068  * Caller should execute tmpfs_update on vp after a successful execution.
1069  * The vnode must be locked on entry and remain locked on exit.
1070  */
1071 int
1072 tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p)
1073 {
1074 	int error;
1075 	struct tmpfs_node *node;
1076 
1077 	MPASS(VOP_ISLOCKED(vp));
1078 
1079 	node = VP_TO_TMPFS_NODE(vp);
1080 
1081 	if ((flags & ~(UF_NODUMP | UF_IMMUTABLE | UF_APPEND | UF_OPAQUE |
1082 	    UF_NOUNLINK | SF_ARCHIVED | SF_IMMUTABLE | SF_APPEND |
1083 	    SF_NOUNLINK)) != 0)
1084 		return (EOPNOTSUPP);
1085 
1086 	/* Disallow this operation if the file system is mounted read-only. */
1087 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1088 		return EROFS;
1089 
1090 	/*
1091 	 * Callers may only modify the file flags on objects they
1092 	 * have VADMIN rights for.
1093 	 */
1094 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1095 		return (error);
1096 	/*
1097 	 * Unprivileged processes are not permitted to unset system
1098 	 * flags, or modify flags if any system flags are set.
1099 	 */
1100 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) {
1101 		if (node->tn_flags &
1102 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
1103 			error = securelevel_gt(cred, 0);
1104 			if (error)
1105 				return (error);
1106 		}
1107 	} else {
1108 		if (node->tn_flags &
1109 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
1110 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
1111 			return (EPERM);
1112 	}
1113 	node->tn_flags = flags;
1114 	node->tn_status |= TMPFS_NODE_CHANGED;
1115 
1116 	MPASS(VOP_ISLOCKED(vp));
1117 
1118 	return 0;
1119 }
1120 
1121 /* --------------------------------------------------------------------- */
1122 
1123 /*
1124  * Change access mode on the given vnode.
1125  * Caller should execute tmpfs_update on vp after a successful execution.
1126  * The vnode must be locked on entry and remain locked on exit.
1127  */
1128 int
1129 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
1130 {
1131 	int error;
1132 	struct tmpfs_node *node;
1133 
1134 	MPASS(VOP_ISLOCKED(vp));
1135 
1136 	node = VP_TO_TMPFS_NODE(vp);
1137 
1138 	/* Disallow this operation if the file system is mounted read-only. */
1139 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1140 		return EROFS;
1141 
1142 	/* Immutable or append-only files cannot be modified, either. */
1143 	if (node->tn_flags & (IMMUTABLE | APPEND))
1144 		return EPERM;
1145 
1146 	/*
1147 	 * To modify the permissions on a file, must possess VADMIN
1148 	 * for that file.
1149 	 */
1150 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1151 		return (error);
1152 
1153 	/*
1154 	 * Privileged processes may set the sticky bit on non-directories,
1155 	 * as well as set the setgid bit on a file with a group that the
1156 	 * process is not a member of.
1157 	 */
1158 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
1159 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE, 0))
1160 			return (EFTYPE);
1161 	}
1162 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
1163 		error = priv_check_cred(cred, PRIV_VFS_SETGID, 0);
1164 		if (error)
1165 			return (error);
1166 	}
1167 
1168 
1169 	node->tn_mode &= ~ALLPERMS;
1170 	node->tn_mode |= mode & ALLPERMS;
1171 
1172 	node->tn_status |= TMPFS_NODE_CHANGED;
1173 
1174 	MPASS(VOP_ISLOCKED(vp));
1175 
1176 	return 0;
1177 }
1178 
1179 /* --------------------------------------------------------------------- */
1180 
1181 /*
1182  * Change ownership of the given vnode.  At least one of uid or gid must
1183  * be different than VNOVAL.  If one is set to that value, the attribute
1184  * is unchanged.
1185  * Caller should execute tmpfs_update on vp after a successful execution.
1186  * The vnode must be locked on entry and remain locked on exit.
1187  */
1188 int
1189 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
1190     struct thread *p)
1191 {
1192 	int error;
1193 	struct tmpfs_node *node;
1194 	uid_t ouid;
1195 	gid_t ogid;
1196 
1197 	MPASS(VOP_ISLOCKED(vp));
1198 
1199 	node = VP_TO_TMPFS_NODE(vp);
1200 
1201 	/* Assign default values if they are unknown. */
1202 	MPASS(uid != VNOVAL || gid != VNOVAL);
1203 	if (uid == VNOVAL)
1204 		uid = node->tn_uid;
1205 	if (gid == VNOVAL)
1206 		gid = node->tn_gid;
1207 	MPASS(uid != VNOVAL && gid != VNOVAL);
1208 
1209 	/* Disallow this operation if the file system is mounted read-only. */
1210 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1211 		return EROFS;
1212 
1213 	/* Immutable or append-only files cannot be modified, either. */
1214 	if (node->tn_flags & (IMMUTABLE | APPEND))
1215 		return EPERM;
1216 
1217 	/*
1218 	 * To modify the ownership of a file, must possess VADMIN for that
1219 	 * file.
1220 	 */
1221 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1222 		return (error);
1223 
1224 	/*
1225 	 * To change the owner of a file, or change the group of a file to a
1226 	 * group of which we are not a member, the caller must have
1227 	 * privilege.
1228 	 */
1229 	if ((uid != node->tn_uid ||
1230 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1231 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0)))
1232 		return (error);
1233 
1234 	ogid = node->tn_gid;
1235 	ouid = node->tn_uid;
1236 
1237 	node->tn_uid = uid;
1238 	node->tn_gid = gid;
1239 
1240 	node->tn_status |= TMPFS_NODE_CHANGED;
1241 
1242 	if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
1243 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0))
1244 			node->tn_mode &= ~(S_ISUID | S_ISGID);
1245 	}
1246 
1247 	MPASS(VOP_ISLOCKED(vp));
1248 
1249 	return 0;
1250 }
1251 
1252 /* --------------------------------------------------------------------- */
1253 
1254 /*
1255  * Change size of the given vnode.
1256  * Caller should execute tmpfs_update on vp after a successful execution.
1257  * The vnode must be locked on entry and remain locked on exit.
1258  */
1259 int
1260 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
1261     struct thread *p)
1262 {
1263 	int error;
1264 	struct tmpfs_node *node;
1265 
1266 	MPASS(VOP_ISLOCKED(vp));
1267 
1268 	node = VP_TO_TMPFS_NODE(vp);
1269 
1270 	/* Decide whether this is a valid operation based on the file type. */
1271 	error = 0;
1272 	switch (vp->v_type) {
1273 	case VDIR:
1274 		return EISDIR;
1275 
1276 	case VREG:
1277 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1278 			return EROFS;
1279 		break;
1280 
1281 	case VBLK:
1282 		/* FALLTHROUGH */
1283 	case VCHR:
1284 		/* FALLTHROUGH */
1285 	case VFIFO:
1286 		/* Allow modifications of special files even if in the file
1287 		 * system is mounted read-only (we are not modifying the
1288 		 * files themselves, but the objects they represent). */
1289 		return 0;
1290 
1291 	default:
1292 		/* Anything else is unsupported. */
1293 		return EOPNOTSUPP;
1294 	}
1295 
1296 	/* Immutable or append-only files cannot be modified, either. */
1297 	if (node->tn_flags & (IMMUTABLE | APPEND))
1298 		return EPERM;
1299 
1300 	error = tmpfs_truncate(vp, size);
1301 	/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1302 	 * for us, as will update tn_status; no need to do that here. */
1303 
1304 	MPASS(VOP_ISLOCKED(vp));
1305 
1306 	return error;
1307 }
1308 
1309 /* --------------------------------------------------------------------- */
1310 
1311 /*
1312  * Change access and modification times of the given vnode.
1313  * Caller should execute tmpfs_update on vp after a successful execution.
1314  * The vnode must be locked on entry and remain locked on exit.
1315  */
1316 int
1317 tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1318 	struct timespec *birthtime, int vaflags, struct ucred *cred, struct thread *l)
1319 {
1320 	int error;
1321 	struct tmpfs_node *node;
1322 
1323 	MPASS(VOP_ISLOCKED(vp));
1324 
1325 	node = VP_TO_TMPFS_NODE(vp);
1326 
1327 	/* Disallow this operation if the file system is mounted read-only. */
1328 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1329 		return EROFS;
1330 
1331 	/* Immutable or append-only files cannot be modified, either. */
1332 	if (node->tn_flags & (IMMUTABLE | APPEND))
1333 		return EPERM;
1334 
1335 	/* Determine if the user have proper privilege to update time. */
1336 	if (vaflags & VA_UTIMES_NULL) {
1337 		error = VOP_ACCESS(vp, VADMIN, cred, l);
1338 		if (error)
1339 			error = VOP_ACCESS(vp, VWRITE, cred, l);
1340 	} else
1341 		error = VOP_ACCESS(vp, VADMIN, cred, l);
1342 	if (error)
1343 		return (error);
1344 
1345 	if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1346 		node->tn_status |= TMPFS_NODE_ACCESSED;
1347 
1348 	if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1349 		node->tn_status |= TMPFS_NODE_MODIFIED;
1350 
1351 	if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL)
1352 		node->tn_status |= TMPFS_NODE_MODIFIED;
1353 
1354 	tmpfs_itimes(vp, atime, mtime);
1355 
1356 	if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL)
1357 		node->tn_birthtime = *birthtime;
1358 	MPASS(VOP_ISLOCKED(vp));
1359 
1360 	return 0;
1361 }
1362 
1363 /* --------------------------------------------------------------------- */
1364 /* Sync timestamps */
1365 void
1366 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1367     const struct timespec *mod)
1368 {
1369 	struct tmpfs_node *node;
1370 	struct timespec now;
1371 
1372 	node = VP_TO_TMPFS_NODE(vp);
1373 
1374 	if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
1375 	    TMPFS_NODE_CHANGED)) == 0)
1376 		return;
1377 
1378 	vfs_timestamp(&now);
1379 	if (node->tn_status & TMPFS_NODE_ACCESSED) {
1380 		if (acc == NULL)
1381 			 acc = &now;
1382 		node->tn_atime = *acc;
1383 	}
1384 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
1385 		if (mod == NULL)
1386 			mod = &now;
1387 		node->tn_mtime = *mod;
1388 	}
1389 	if (node->tn_status & TMPFS_NODE_CHANGED) {
1390 		node->tn_ctime = now;
1391 	}
1392 	node->tn_status &=
1393 	    ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1394 }
1395 
1396 /* --------------------------------------------------------------------- */
1397 
1398 void
1399 tmpfs_update(struct vnode *vp)
1400 {
1401 
1402 	tmpfs_itimes(vp, NULL, NULL);
1403 }
1404 
1405 /* --------------------------------------------------------------------- */
1406 
1407 int
1408 tmpfs_truncate(struct vnode *vp, off_t length)
1409 {
1410 	int error;
1411 	struct tmpfs_node *node;
1412 
1413 	node = VP_TO_TMPFS_NODE(vp);
1414 
1415 	if (length < 0) {
1416 		error = EINVAL;
1417 		goto out;
1418 	}
1419 
1420 	if (node->tn_size == length) {
1421 		error = 0;
1422 		goto out;
1423 	}
1424 
1425 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1426 		return (EFBIG);
1427 
1428 	error = tmpfs_reg_resize(vp, length, FALSE);
1429 	if (error == 0) {
1430 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1431 	}
1432 
1433 out:
1434 	tmpfs_update(vp);
1435 
1436 	return error;
1437 }
1438