xref: /freebsd/sys/fs/tmpfs/tmpfs_subr.c (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*	$NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9  * 2005 program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Efficient memory file system supporting functions.
35  */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/namei.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 #include <sys/stat.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/vnode.h>
47 #include <sys/vmmeter.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_extern.h>
56 
57 #include <fs/tmpfs/tmpfs.h>
58 #include <fs/tmpfs/tmpfs_fifoops.h>
59 #include <fs/tmpfs/tmpfs_vnops.h>
60 
61 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system");
62 
63 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
64 
65 static int
66 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
67 {
68 	int error;
69 	long pages, bytes;
70 
71 	pages = *(long *)arg1;
72 	bytes = pages * PAGE_SIZE;
73 
74 	error = sysctl_handle_long(oidp, &bytes, 0, req);
75 	if (error || !req->newptr)
76 		return (error);
77 
78 	pages = bytes / PAGE_SIZE;
79 	if (pages < TMPFS_PAGES_MINRESERVED)
80 		return (EINVAL);
81 
82 	*(long *)arg1 = pages;
83 	return (0);
84 }
85 
86 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved, CTLTYPE_LONG|CTLFLAG_RW,
87     &tmpfs_pages_reserved, 0, sysctl_mem_reserved, "L",
88     "Amount of available memory and swap below which tmpfs growth stops");
89 
90 size_t
91 tmpfs_mem_avail(void)
92 {
93 	vm_ooffset_t avail;
94 
95 	avail = swap_pager_avail + cnt.v_free_count + cnt.v_cache_count -
96 	    tmpfs_pages_reserved;
97 	if (__predict_false(avail < 0))
98 		avail = 0;
99 	return (avail);
100 }
101 
102 size_t
103 tmpfs_pages_used(struct tmpfs_mount *tmp)
104 {
105 	const size_t node_size = sizeof(struct tmpfs_node) +
106 	    sizeof(struct tmpfs_dirent);
107 	size_t meta_pages;
108 
109 	meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size,
110 	    PAGE_SIZE);
111 	return (meta_pages + tmp->tm_pages_used);
112 }
113 
114 static size_t
115 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
116 {
117 	if (tmpfs_mem_avail() < req_pages)
118 		return (0);
119 
120 	if (tmp->tm_pages_max != SIZE_MAX &&
121 	    tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
122 			return (0);
123 
124 	return (1);
125 }
126 
127 /* --------------------------------------------------------------------- */
128 
129 /*
130  * Allocates a new node of type 'type' inside the 'tmp' mount point, with
131  * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
132  * using the credentials of the process 'p'.
133  *
134  * If the node type is set to 'VDIR', then the parent parameter must point
135  * to the parent directory of the node being created.  It may only be NULL
136  * while allocating the root node.
137  *
138  * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
139  * specifies the device the node represents.
140  *
141  * If the node type is set to 'VLNK', then the parameter target specifies
142  * the file name of the target file for the symbolic link that is being
143  * created.
144  *
145  * Note that new nodes are retrieved from the available list if it has
146  * items or, if it is empty, from the node pool as long as there is enough
147  * space to create them.
148  *
149  * Returns zero on success or an appropriate error code on failure.
150  */
151 int
152 tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
153     uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
154     char *target, dev_t rdev, struct tmpfs_node **node)
155 {
156 	struct tmpfs_node *nnode;
157 
158 	/* If the root directory of the 'tmp' file system is not yet
159 	 * allocated, this must be the request to do it. */
160 	MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
161 
162 	MPASS(IFF(type == VLNK, target != NULL));
163 	MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
164 
165 	if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
166 		return (ENOSPC);
167 	if (tmpfs_pages_check_avail(tmp, 1) == 0)
168 		return (ENOSPC);
169 
170 	nnode = (struct tmpfs_node *)uma_zalloc_arg(
171 				tmp->tm_node_pool, tmp, M_WAITOK);
172 
173 	/* Generic initialization. */
174 	nnode->tn_type = type;
175 	vfs_timestamp(&nnode->tn_atime);
176 	nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
177 	    nnode->tn_atime;
178 	nnode->tn_uid = uid;
179 	nnode->tn_gid = gid;
180 	nnode->tn_mode = mode;
181 	nnode->tn_id = alloc_unr(tmp->tm_ino_unr);
182 
183 	/* Type-specific initialization. */
184 	switch (nnode->tn_type) {
185 	case VBLK:
186 	case VCHR:
187 		nnode->tn_rdev = rdev;
188 		break;
189 
190 	case VDIR:
191 		TAILQ_INIT(&nnode->tn_dir.tn_dirhead);
192 		MPASS(parent != nnode);
193 		MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL));
194 		nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent;
195 		nnode->tn_dir.tn_readdir_lastn = 0;
196 		nnode->tn_dir.tn_readdir_lastp = NULL;
197 		nnode->tn_links++;
198 		TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent);
199 		nnode->tn_dir.tn_parent->tn_links++;
200 		TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent);
201 		break;
202 
203 	case VFIFO:
204 		/* FALLTHROUGH */
205 	case VSOCK:
206 		break;
207 
208 	case VLNK:
209 		MPASS(strlen(target) < MAXPATHLEN);
210 		nnode->tn_size = strlen(target);
211 		nnode->tn_link = malloc(nnode->tn_size, M_TMPFSNAME,
212 		    M_WAITOK);
213 		memcpy(nnode->tn_link, target, nnode->tn_size);
214 		break;
215 
216 	case VREG:
217 		nnode->tn_reg.tn_aobj =
218 		    vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
219 			NULL /* XXXKIB - tmpfs needs swap reservation */);
220 		break;
221 
222 	default:
223 		panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
224 	}
225 
226 	TMPFS_LOCK(tmp);
227 	LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
228 	tmp->tm_nodes_inuse++;
229 	TMPFS_UNLOCK(tmp);
230 
231 	*node = nnode;
232 	return 0;
233 }
234 
235 /* --------------------------------------------------------------------- */
236 
237 /*
238  * Destroys the node pointed to by node from the file system 'tmp'.
239  * If the node does not belong to the given mount point, the results are
240  * unpredicted.
241  *
242  * If the node references a directory; no entries are allowed because
243  * their removal could need a recursive algorithm, something forbidden in
244  * kernel space.  Furthermore, there is not need to provide such
245  * functionality (recursive removal) because the only primitives offered
246  * to the user are the removal of empty directories and the deletion of
247  * individual files.
248  *
249  * Note that nodes are not really deleted; in fact, when a node has been
250  * allocated, it cannot be deleted during the whole life of the file
251  * system.  Instead, they are moved to the available list and remain there
252  * until reused.
253  */
254 void
255 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
256 {
257 	vm_object_t uobj;
258 
259 #ifdef INVARIANTS
260 	TMPFS_NODE_LOCK(node);
261 	MPASS(node->tn_vnode == NULL);
262 	MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
263 	TMPFS_NODE_UNLOCK(node);
264 #endif
265 
266 	TMPFS_LOCK(tmp);
267 	LIST_REMOVE(node, tn_entries);
268 	tmp->tm_nodes_inuse--;
269 	TMPFS_UNLOCK(tmp);
270 
271 	switch (node->tn_type) {
272 	case VNON:
273 		/* Do not do anything.  VNON is provided to let the
274 		 * allocation routine clean itself easily by avoiding
275 		 * duplicating code in it. */
276 		/* FALLTHROUGH */
277 	case VBLK:
278 		/* FALLTHROUGH */
279 	case VCHR:
280 		/* FALLTHROUGH */
281 	case VDIR:
282 		/* FALLTHROUGH */
283 	case VFIFO:
284 		/* FALLTHROUGH */
285 	case VSOCK:
286 		break;
287 
288 	case VLNK:
289 		free(node->tn_link, M_TMPFSNAME);
290 		break;
291 
292 	case VREG:
293 		uobj = node->tn_reg.tn_aobj;
294 		if (uobj != NULL) {
295 			TMPFS_LOCK(tmp);
296 			tmp->tm_pages_used -= uobj->size;
297 			TMPFS_UNLOCK(tmp);
298 			vm_object_deallocate(uobj);
299 		}
300 		break;
301 
302 	default:
303 		panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
304 	}
305 
306 	free_unr(tmp->tm_ino_unr, node->tn_id);
307 	uma_zfree(tmp->tm_node_pool, node);
308 }
309 
310 /* --------------------------------------------------------------------- */
311 
312 /*
313  * Allocates a new directory entry for the node node with a name of name.
314  * The new directory entry is returned in *de.
315  *
316  * The link count of node is increased by one to reflect the new object
317  * referencing it.
318  *
319  * Returns zero on success or an appropriate error code on failure.
320  */
321 int
322 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
323     const char *name, uint16_t len, struct tmpfs_dirent **de)
324 {
325 	struct tmpfs_dirent *nde;
326 
327 	nde = (struct tmpfs_dirent *)uma_zalloc(
328 					tmp->tm_dirent_pool, M_WAITOK);
329 	nde->td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
330 	nde->td_namelen = len;
331 	memcpy(nde->td_name, name, len);
332 
333 	nde->td_node = node;
334 	if (node != NULL)
335 		node->tn_links++;
336 
337 	*de = nde;
338 
339 	return 0;
340 }
341 
342 /* --------------------------------------------------------------------- */
343 
344 /*
345  * Frees a directory entry.  It is the caller's responsibility to destroy
346  * the node referenced by it if needed.
347  *
348  * The link count of node is decreased by one to reflect the removal of an
349  * object that referenced it.  This only happens if 'node_exists' is true;
350  * otherwise the function will not access the node referred to by the
351  * directory entry, as it may already have been released from the outside.
352  */
353 void
354 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de,
355     boolean_t node_exists)
356 {
357 	if (node_exists) {
358 		struct tmpfs_node *node;
359 
360 		node = de->td_node;
361 		if (node != NULL) {
362 			MPASS(node->tn_links > 0);
363 			node->tn_links--;
364 		}
365 	}
366 
367 	free(de->td_name, M_TMPFSNAME);
368 	uma_zfree(tmp->tm_dirent_pool, de);
369 }
370 
371 /* --------------------------------------------------------------------- */
372 
373 /*
374  * Allocates a new vnode for the node node or returns a new reference to
375  * an existing one if the node had already a vnode referencing it.  The
376  * resulting locked vnode is returned in *vpp.
377  *
378  * Returns zero on success or an appropriate error code on failure.
379  */
380 int
381 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
382     struct vnode **vpp)
383 {
384 	int error = 0;
385 	struct vnode *vp;
386 
387 loop:
388 	TMPFS_NODE_LOCK(node);
389 	if ((vp = node->tn_vnode) != NULL) {
390 		MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
391 		VI_LOCK(vp);
392 		TMPFS_NODE_UNLOCK(node);
393 		error = vget(vp, lkflag | LK_INTERLOCK, curthread);
394 		if (error != 0) {
395 			vp = NULL;
396 			goto out;
397 		}
398 
399 		/*
400 		 * Make sure the vnode is still there after
401 		 * getting the interlock to avoid racing a free.
402 		 */
403 		if (node->tn_vnode == NULL || node->tn_vnode != vp) {
404 			vput(vp);
405 			goto loop;
406 		}
407 
408 		goto out;
409 	}
410 
411 	if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) ||
412 	    (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) {
413 		TMPFS_NODE_UNLOCK(node);
414 		error = ENOENT;
415 		vp = NULL;
416 		goto out;
417 	}
418 
419 	/*
420 	 * otherwise lock the vp list while we call getnewvnode
421 	 * since that can block.
422 	 */
423 	if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
424 		node->tn_vpstate |= TMPFS_VNODE_WANT;
425 		error = msleep((caddr_t) &node->tn_vpstate,
426 		    TMPFS_NODE_MTX(node), PDROP | PCATCH,
427 		    "tmpfs_alloc_vp", 0);
428 		if (error)
429 			return error;
430 
431 		goto loop;
432 	} else
433 		node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
434 
435 	TMPFS_NODE_UNLOCK(node);
436 
437 	/* Get a new vnode and associate it with our node. */
438 	error = getnewvnode("tmpfs", mp, &tmpfs_vnodeop_entries, &vp);
439 	if (error != 0)
440 		goto unlock;
441 	MPASS(vp != NULL);
442 
443 	(void) vn_lock(vp, lkflag | LK_RETRY);
444 
445 	vp->v_data = node;
446 	vp->v_type = node->tn_type;
447 
448 	/* Type-specific initialization. */
449 	switch (node->tn_type) {
450 	case VBLK:
451 		/* FALLTHROUGH */
452 	case VCHR:
453 		/* FALLTHROUGH */
454 	case VLNK:
455 		/* FALLTHROUGH */
456 	case VREG:
457 		/* FALLTHROUGH */
458 	case VSOCK:
459 		break;
460 	case VFIFO:
461 		vp->v_op = &tmpfs_fifoop_entries;
462 		break;
463 	case VDIR:
464 		MPASS(node->tn_dir.tn_parent != NULL);
465 		if (node->tn_dir.tn_parent == node)
466 			vp->v_vflag |= VV_ROOT;
467 		break;
468 
469 	default:
470 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
471 	}
472 
473 	vnode_pager_setsize(vp, node->tn_size);
474 	error = insmntque(vp, mp);
475 	if (error)
476 		vp = NULL;
477 
478 unlock:
479 	TMPFS_NODE_LOCK(node);
480 
481 	MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
482 	node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
483 	node->tn_vnode = vp;
484 
485 	if (node->tn_vpstate & TMPFS_VNODE_WANT) {
486 		node->tn_vpstate &= ~TMPFS_VNODE_WANT;
487 		TMPFS_NODE_UNLOCK(node);
488 		wakeup((caddr_t) &node->tn_vpstate);
489 	} else
490 		TMPFS_NODE_UNLOCK(node);
491 
492 out:
493 	*vpp = vp;
494 
495 #ifdef INVARIANTS
496 	if (error == 0) {
497 		MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
498 		TMPFS_NODE_LOCK(node);
499 		MPASS(*vpp == node->tn_vnode);
500 		TMPFS_NODE_UNLOCK(node);
501 	}
502 #endif
503 
504 	return error;
505 }
506 
507 /* --------------------------------------------------------------------- */
508 
509 /*
510  * Destroys the association between the vnode vp and the node it
511  * references.
512  */
513 void
514 tmpfs_free_vp(struct vnode *vp)
515 {
516 	struct tmpfs_node *node;
517 
518 	node = VP_TO_TMPFS_NODE(vp);
519 
520 	mtx_assert(TMPFS_NODE_MTX(node), MA_OWNED);
521 	node->tn_vnode = NULL;
522 	vp->v_data = NULL;
523 }
524 
525 /* --------------------------------------------------------------------- */
526 
527 /*
528  * Allocates a new file of type 'type' and adds it to the parent directory
529  * 'dvp'; this addition is done using the component name given in 'cnp'.
530  * The ownership of the new file is automatically assigned based on the
531  * credentials of the caller (through 'cnp'), the group is set based on
532  * the parent directory and the mode is determined from the 'vap' argument.
533  * If successful, *vpp holds a vnode to the newly created file and zero
534  * is returned.  Otherwise *vpp is NULL and the function returns an
535  * appropriate error code.
536  */
537 int
538 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
539     struct componentname *cnp, char *target)
540 {
541 	int error;
542 	struct tmpfs_dirent *de;
543 	struct tmpfs_mount *tmp;
544 	struct tmpfs_node *dnode;
545 	struct tmpfs_node *node;
546 	struct tmpfs_node *parent;
547 
548 	MPASS(VOP_ISLOCKED(dvp));
549 	MPASS(cnp->cn_flags & HASBUF);
550 
551 	tmp = VFS_TO_TMPFS(dvp->v_mount);
552 	dnode = VP_TO_TMPFS_DIR(dvp);
553 	*vpp = NULL;
554 
555 	/* If the entry we are creating is a directory, we cannot overflow
556 	 * the number of links of its parent, because it will get a new
557 	 * link. */
558 	if (vap->va_type == VDIR) {
559 		/* Ensure that we do not overflow the maximum number of links
560 		 * imposed by the system. */
561 		MPASS(dnode->tn_links <= LINK_MAX);
562 		if (dnode->tn_links == LINK_MAX) {
563 			error = EMLINK;
564 			goto out;
565 		}
566 
567 		parent = dnode;
568 		MPASS(parent != NULL);
569 	} else
570 		parent = NULL;
571 
572 	/* Allocate a node that represents the new file. */
573 	error = tmpfs_alloc_node(tmp, vap->va_type, cnp->cn_cred->cr_uid,
574 	    dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev, &node);
575 	if (error != 0)
576 		goto out;
577 
578 	/* Allocate a directory entry that points to the new file. */
579 	error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
580 	    &de);
581 	if (error != 0) {
582 		tmpfs_free_node(tmp, node);
583 		goto out;
584 	}
585 
586 	/* Allocate a vnode for the new file. */
587 	error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
588 	if (error != 0) {
589 		tmpfs_free_dirent(tmp, de, TRUE);
590 		tmpfs_free_node(tmp, node);
591 		goto out;
592 	}
593 
594 	/* Now that all required items are allocated, we can proceed to
595 	 * insert the new node into the directory, an operation that
596 	 * cannot fail. */
597 	if (cnp->cn_flags & ISWHITEOUT)
598 		tmpfs_dir_whiteout_remove(dvp, cnp);
599 	tmpfs_dir_attach(dvp, de);
600 
601 out:
602 
603 	return error;
604 }
605 
606 /* --------------------------------------------------------------------- */
607 
608 /*
609  * Attaches the directory entry de to the directory represented by vp.
610  * Note that this does not change the link count of the node pointed by
611  * the directory entry, as this is done by tmpfs_alloc_dirent.
612  */
613 void
614 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
615 {
616 	struct tmpfs_node *dnode;
617 
618 	ASSERT_VOP_ELOCKED(vp, __func__);
619 	dnode = VP_TO_TMPFS_DIR(vp);
620 	TAILQ_INSERT_TAIL(&dnode->tn_dir.tn_dirhead, de, td_entries);
621 	dnode->tn_size += sizeof(struct tmpfs_dirent);
622 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
623 	    TMPFS_NODE_MODIFIED;
624 }
625 
626 /* --------------------------------------------------------------------- */
627 
628 /*
629  * Detaches the directory entry de from the directory represented by vp.
630  * Note that this does not change the link count of the node pointed by
631  * the directory entry, as this is done by tmpfs_free_dirent.
632  */
633 void
634 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
635 {
636 	struct tmpfs_node *dnode;
637 
638 	ASSERT_VOP_ELOCKED(vp, __func__);
639 	dnode = VP_TO_TMPFS_DIR(vp);
640 
641 	if (dnode->tn_dir.tn_readdir_lastp == de) {
642 		dnode->tn_dir.tn_readdir_lastn = 0;
643 		dnode->tn_dir.tn_readdir_lastp = NULL;
644 	}
645 
646 	TAILQ_REMOVE(&dnode->tn_dir.tn_dirhead, de, td_entries);
647 	dnode->tn_size -= sizeof(struct tmpfs_dirent);
648 	dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
649 	    TMPFS_NODE_MODIFIED;
650 }
651 
652 /* --------------------------------------------------------------------- */
653 
654 /*
655  * Looks for a directory entry in the directory represented by node.
656  * 'cnp' describes the name of the entry to look for.  Note that the .
657  * and .. components are not allowed as they do not physically exist
658  * within directories.
659  *
660  * Returns a pointer to the entry when found, otherwise NULL.
661  */
662 struct tmpfs_dirent *
663 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
664     struct componentname *cnp)
665 {
666 	boolean_t found;
667 	struct tmpfs_dirent *de;
668 
669 	MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
670 	MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
671 	    cnp->cn_nameptr[1] == '.')));
672 	TMPFS_VALIDATE_DIR(node);
673 
674 	found = 0;
675 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
676 		if (f != NULL && de->td_node != f)
677 		    continue;
678 		MPASS(cnp->cn_namelen < 0xffff);
679 		if (de->td_namelen == (uint16_t)cnp->cn_namelen &&
680 		    bcmp(de->td_name, cnp->cn_nameptr, de->td_namelen) == 0) {
681 			found = 1;
682 			break;
683 		}
684 	}
685 	node->tn_status |= TMPFS_NODE_ACCESSED;
686 
687 	return found ? de : NULL;
688 }
689 
690 /* --------------------------------------------------------------------- */
691 
692 /*
693  * Helper function for tmpfs_readdir.  Creates a '.' entry for the given
694  * directory and returns it in the uio space.  The function returns 0
695  * on success, -1 if there was not enough space in the uio structure to
696  * hold the directory entry or an appropriate error code if another
697  * error happens.
698  */
699 int
700 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
701 {
702 	int error;
703 	struct dirent dent;
704 
705 	TMPFS_VALIDATE_DIR(node);
706 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
707 
708 	dent.d_fileno = node->tn_id;
709 	dent.d_type = DT_DIR;
710 	dent.d_namlen = 1;
711 	dent.d_name[0] = '.';
712 	dent.d_name[1] = '\0';
713 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
714 
715 	if (dent.d_reclen > uio->uio_resid)
716 		error = -1;
717 	else {
718 		error = uiomove(&dent, dent.d_reclen, uio);
719 		if (error == 0)
720 			uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
721 	}
722 
723 	node->tn_status |= TMPFS_NODE_ACCESSED;
724 
725 	return error;
726 }
727 
728 /* --------------------------------------------------------------------- */
729 
730 /*
731  * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
732  * directory and returns it in the uio space.  The function returns 0
733  * on success, -1 if there was not enough space in the uio structure to
734  * hold the directory entry or an appropriate error code if another
735  * error happens.
736  */
737 int
738 tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
739 {
740 	int error;
741 	struct dirent dent;
742 
743 	TMPFS_VALIDATE_DIR(node);
744 	MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
745 
746 	/*
747 	 * Return ENOENT if the current node is already removed.
748 	 */
749 	TMPFS_ASSERT_LOCKED(node);
750 	if (node->tn_dir.tn_parent == NULL) {
751 		return (ENOENT);
752 	}
753 
754 	TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
755 	dent.d_fileno = node->tn_dir.tn_parent->tn_id;
756 	TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
757 
758 	dent.d_type = DT_DIR;
759 	dent.d_namlen = 2;
760 	dent.d_name[0] = '.';
761 	dent.d_name[1] = '.';
762 	dent.d_name[2] = '\0';
763 	dent.d_reclen = GENERIC_DIRSIZ(&dent);
764 
765 	if (dent.d_reclen > uio->uio_resid)
766 		error = -1;
767 	else {
768 		error = uiomove(&dent, dent.d_reclen, uio);
769 		if (error == 0) {
770 			struct tmpfs_dirent *de;
771 
772 			de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
773 			if (de == NULL)
774 				uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
775 			else
776 				uio->uio_offset = tmpfs_dircookie(de);
777 		}
778 	}
779 
780 	node->tn_status |= TMPFS_NODE_ACCESSED;
781 
782 	return error;
783 }
784 
785 /* --------------------------------------------------------------------- */
786 
787 /*
788  * Lookup a directory entry by its associated cookie.
789  */
790 struct tmpfs_dirent *
791 tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
792 {
793 	struct tmpfs_dirent *de;
794 
795 	if (cookie == node->tn_dir.tn_readdir_lastn &&
796 	    node->tn_dir.tn_readdir_lastp != NULL) {
797 		return node->tn_dir.tn_readdir_lastp;
798 	}
799 
800 	TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
801 		if (tmpfs_dircookie(de) == cookie) {
802 			break;
803 		}
804 	}
805 
806 	return de;
807 }
808 
809 /* --------------------------------------------------------------------- */
810 
811 /*
812  * Helper function for tmpfs_readdir.  Returns as much directory entries
813  * as can fit in the uio space.  The read starts at uio->uio_offset.
814  * The function returns 0 on success, -1 if there was not enough space
815  * in the uio structure to hold the directory entry or an appropriate
816  * error code if another error happens.
817  */
818 int
819 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
820 {
821 	int error;
822 	off_t startcookie;
823 	struct tmpfs_dirent *de;
824 
825 	TMPFS_VALIDATE_DIR(node);
826 
827 	/* Locate the first directory entry we have to return.  We have cached
828 	 * the last readdir in the node, so use those values if appropriate.
829 	 * Otherwise do a linear scan to find the requested entry. */
830 	startcookie = uio->uio_offset;
831 	MPASS(startcookie != TMPFS_DIRCOOKIE_DOT);
832 	MPASS(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
833 	if (startcookie == TMPFS_DIRCOOKIE_EOF) {
834 		return 0;
835 	} else {
836 		de = tmpfs_dir_lookupbycookie(node, startcookie);
837 	}
838 	if (de == NULL) {
839 		return EINVAL;
840 	}
841 
842 	/* Read as much entries as possible; i.e., until we reach the end of
843 	 * the directory or we exhaust uio space. */
844 	do {
845 		struct dirent d;
846 
847 		/* Create a dirent structure representing the current
848 		 * tmpfs_node and fill it. */
849 		if (de->td_node == NULL) {
850 			d.d_fileno = 1;
851 			d.d_type = DT_WHT;
852 		} else {
853 			d.d_fileno = de->td_node->tn_id;
854 			switch (de->td_node->tn_type) {
855 			case VBLK:
856 				d.d_type = DT_BLK;
857 				break;
858 
859 			case VCHR:
860 				d.d_type = DT_CHR;
861 				break;
862 
863 			case VDIR:
864 				d.d_type = DT_DIR;
865 				break;
866 
867 			case VFIFO:
868 				d.d_type = DT_FIFO;
869 				break;
870 
871 			case VLNK:
872 				d.d_type = DT_LNK;
873 				break;
874 
875 			case VREG:
876 				d.d_type = DT_REG;
877 				break;
878 
879 			case VSOCK:
880 				d.d_type = DT_SOCK;
881 				break;
882 
883 			default:
884 				panic("tmpfs_dir_getdents: type %p %d",
885 				    de->td_node, (int)de->td_node->tn_type);
886 			}
887 		}
888 		d.d_namlen = de->td_namelen;
889 		MPASS(de->td_namelen < sizeof(d.d_name));
890 		(void)memcpy(d.d_name, de->td_name, de->td_namelen);
891 		d.d_name[de->td_namelen] = '\0';
892 		d.d_reclen = GENERIC_DIRSIZ(&d);
893 
894 		/* Stop reading if the directory entry we are treating is
895 		 * bigger than the amount of data that can be returned. */
896 		if (d.d_reclen > uio->uio_resid) {
897 			error = -1;
898 			break;
899 		}
900 
901 		/* Copy the new dirent structure into the output buffer and
902 		 * advance pointers. */
903 		error = uiomove(&d, d.d_reclen, uio);
904 		if (error == 0) {
905 			(*cntp)++;
906 			de = TAILQ_NEXT(de, td_entries);
907 		}
908 	} while (error == 0 && uio->uio_resid > 0 && de != NULL);
909 
910 	/* Update the offset and cache. */
911 	if (de == NULL) {
912 		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
913 		node->tn_dir.tn_readdir_lastn = 0;
914 		node->tn_dir.tn_readdir_lastp = NULL;
915 	} else {
916 		node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de);
917 		node->tn_dir.tn_readdir_lastp = de;
918 	}
919 
920 	node->tn_status |= TMPFS_NODE_ACCESSED;
921 	return error;
922 }
923 
924 int
925 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp)
926 {
927 	struct tmpfs_dirent *de;
928 	int error;
929 
930 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL,
931 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
932 	if (error != 0)
933 		return (error);
934 	tmpfs_dir_attach(dvp, de);
935 	return (0);
936 }
937 
938 void
939 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp)
940 {
941 	struct tmpfs_dirent *de;
942 
943 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
944 	MPASS(de != NULL && de->td_node == NULL);
945 	tmpfs_dir_detach(dvp, de);
946 	tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de, TRUE);
947 }
948 
949 /* --------------------------------------------------------------------- */
950 
951 /*
952  * Resizes the aobj associated with the regular file pointed to by 'vp' to the
953  * size 'newsize'.  'vp' must point to a vnode that represents a regular file.
954  * 'newsize' must be positive.
955  *
956  * Returns zero on success or an appropriate error code on failure.
957  */
958 int
959 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
960 {
961 	struct tmpfs_mount *tmp;
962 	struct tmpfs_node *node;
963 	vm_object_t uobj;
964 	vm_page_t m, ma[1];
965 	vm_pindex_t idx, newpages, oldpages;
966 	off_t oldsize;
967 	int base, rv;
968 
969 	MPASS(vp->v_type == VREG);
970 	MPASS(newsize >= 0);
971 
972 	node = VP_TO_TMPFS_NODE(vp);
973 	uobj = node->tn_reg.tn_aobj;
974 	tmp = VFS_TO_TMPFS(vp->v_mount);
975 
976 	/*
977 	 * Convert the old and new sizes to the number of pages needed to
978 	 * store them.  It may happen that we do not need to do anything
979 	 * because the last allocated page can accommodate the change on
980 	 * its own.
981 	 */
982 	oldsize = node->tn_size;
983 	oldpages = OFF_TO_IDX(oldsize + PAGE_MASK);
984 	MPASS(oldpages == uobj->size);
985 	newpages = OFF_TO_IDX(newsize + PAGE_MASK);
986 	if (newpages > oldpages &&
987 	    tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
988 		return (ENOSPC);
989 
990 	VM_OBJECT_LOCK(uobj);
991 	if (newsize < oldsize) {
992 		/*
993 		 * Zero the truncated part of the last page.
994 		 */
995 		base = newsize & PAGE_MASK;
996 		if (base != 0) {
997 			idx = OFF_TO_IDX(newsize);
998 retry:
999 			m = vm_page_lookup(uobj, idx);
1000 			if (m != NULL) {
1001 				if ((m->oflags & VPO_BUSY) != 0 ||
1002 				    m->busy != 0) {
1003 					vm_page_sleep(m, "tmfssz");
1004 					goto retry;
1005 				}
1006 				MPASS(m->valid == VM_PAGE_BITS_ALL);
1007 			} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
1008 				m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
1009 				if (m == NULL) {
1010 					VM_OBJECT_UNLOCK(uobj);
1011 					VM_WAIT;
1012 					VM_OBJECT_LOCK(uobj);
1013 					goto retry;
1014 				} else if (m->valid != VM_PAGE_BITS_ALL) {
1015 					ma[0] = m;
1016 					rv = vm_pager_get_pages(uobj, ma, 1, 0);
1017 					m = vm_page_lookup(uobj, idx);
1018 				} else
1019 					/* A cached page was reactivated. */
1020 					rv = VM_PAGER_OK;
1021 				vm_page_lock(m);
1022 				if (rv == VM_PAGER_OK) {
1023 					vm_page_deactivate(m);
1024 					vm_page_unlock(m);
1025 					vm_page_wakeup(m);
1026 				} else {
1027 					vm_page_free(m);
1028 					vm_page_unlock(m);
1029 					if (ignerr)
1030 						m = NULL;
1031 					else {
1032 						VM_OBJECT_UNLOCK(uobj);
1033 						return (EIO);
1034 					}
1035 				}
1036 			}
1037 			if (m != NULL) {
1038 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
1039 				vm_page_dirty(m);
1040 				vm_pager_page_unswapped(m);
1041 			}
1042 		}
1043 
1044 		/*
1045 		 * Release any swap space and free any whole pages.
1046 		 */
1047 		if (newpages < oldpages) {
1048 			swap_pager_freespace(uobj, newpages, oldpages -
1049 			    newpages);
1050 			vm_object_page_remove(uobj, newpages, 0, 0);
1051 		}
1052 	}
1053 	uobj->size = newpages;
1054 	VM_OBJECT_UNLOCK(uobj);
1055 
1056 	TMPFS_LOCK(tmp);
1057 	tmp->tm_pages_used += (newpages - oldpages);
1058 	TMPFS_UNLOCK(tmp);
1059 
1060 	node->tn_size = newsize;
1061 	vnode_pager_setsize(vp, newsize);
1062 	return (0);
1063 }
1064 
1065 /* --------------------------------------------------------------------- */
1066 
1067 /*
1068  * Change flags of the given vnode.
1069  * Caller should execute tmpfs_update on vp after a successful execution.
1070  * The vnode must be locked on entry and remain locked on exit.
1071  */
1072 int
1073 tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p)
1074 {
1075 	int error;
1076 	struct tmpfs_node *node;
1077 
1078 	MPASS(VOP_ISLOCKED(vp));
1079 
1080 	node = VP_TO_TMPFS_NODE(vp);
1081 
1082 	if ((flags & ~(UF_NODUMP | UF_IMMUTABLE | UF_APPEND | UF_OPAQUE |
1083 	    UF_NOUNLINK | SF_ARCHIVED | SF_IMMUTABLE | SF_APPEND |
1084 	    SF_NOUNLINK)) != 0)
1085 		return (EOPNOTSUPP);
1086 
1087 	/* Disallow this operation if the file system is mounted read-only. */
1088 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1089 		return EROFS;
1090 
1091 	/*
1092 	 * Callers may only modify the file flags on objects they
1093 	 * have VADMIN rights for.
1094 	 */
1095 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1096 		return (error);
1097 	/*
1098 	 * Unprivileged processes are not permitted to unset system
1099 	 * flags, or modify flags if any system flags are set.
1100 	 */
1101 	if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) {
1102 		if (node->tn_flags &
1103 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) {
1104 			error = securelevel_gt(cred, 0);
1105 			if (error)
1106 				return (error);
1107 		}
1108 	} else {
1109 		if (node->tn_flags &
1110 		    (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) ||
1111 		    ((flags ^ node->tn_flags) & SF_SETTABLE))
1112 			return (EPERM);
1113 	}
1114 	node->tn_flags = flags;
1115 	node->tn_status |= TMPFS_NODE_CHANGED;
1116 
1117 	MPASS(VOP_ISLOCKED(vp));
1118 
1119 	return 0;
1120 }
1121 
1122 /* --------------------------------------------------------------------- */
1123 
1124 /*
1125  * Change access mode on the given vnode.
1126  * Caller should execute tmpfs_update on vp after a successful execution.
1127  * The vnode must be locked on entry and remain locked on exit.
1128  */
1129 int
1130 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
1131 {
1132 	int error;
1133 	struct tmpfs_node *node;
1134 
1135 	MPASS(VOP_ISLOCKED(vp));
1136 
1137 	node = VP_TO_TMPFS_NODE(vp);
1138 
1139 	/* Disallow this operation if the file system is mounted read-only. */
1140 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1141 		return EROFS;
1142 
1143 	/* Immutable or append-only files cannot be modified, either. */
1144 	if (node->tn_flags & (IMMUTABLE | APPEND))
1145 		return EPERM;
1146 
1147 	/*
1148 	 * To modify the permissions on a file, must possess VADMIN
1149 	 * for that file.
1150 	 */
1151 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1152 		return (error);
1153 
1154 	/*
1155 	 * Privileged processes may set the sticky bit on non-directories,
1156 	 * as well as set the setgid bit on a file with a group that the
1157 	 * process is not a member of.
1158 	 */
1159 	if (vp->v_type != VDIR && (mode & S_ISTXT)) {
1160 		if (priv_check_cred(cred, PRIV_VFS_STICKYFILE, 0))
1161 			return (EFTYPE);
1162 	}
1163 	if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) {
1164 		error = priv_check_cred(cred, PRIV_VFS_SETGID, 0);
1165 		if (error)
1166 			return (error);
1167 	}
1168 
1169 
1170 	node->tn_mode &= ~ALLPERMS;
1171 	node->tn_mode |= mode & ALLPERMS;
1172 
1173 	node->tn_status |= TMPFS_NODE_CHANGED;
1174 
1175 	MPASS(VOP_ISLOCKED(vp));
1176 
1177 	return 0;
1178 }
1179 
1180 /* --------------------------------------------------------------------- */
1181 
1182 /*
1183  * Change ownership of the given vnode.  At least one of uid or gid must
1184  * be different than VNOVAL.  If one is set to that value, the attribute
1185  * is unchanged.
1186  * Caller should execute tmpfs_update on vp after a successful execution.
1187  * The vnode must be locked on entry and remain locked on exit.
1188  */
1189 int
1190 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
1191     struct thread *p)
1192 {
1193 	int error;
1194 	struct tmpfs_node *node;
1195 	uid_t ouid;
1196 	gid_t ogid;
1197 
1198 	MPASS(VOP_ISLOCKED(vp));
1199 
1200 	node = VP_TO_TMPFS_NODE(vp);
1201 
1202 	/* Assign default values if they are unknown. */
1203 	MPASS(uid != VNOVAL || gid != VNOVAL);
1204 	if (uid == VNOVAL)
1205 		uid = node->tn_uid;
1206 	if (gid == VNOVAL)
1207 		gid = node->tn_gid;
1208 	MPASS(uid != VNOVAL && gid != VNOVAL);
1209 
1210 	/* Disallow this operation if the file system is mounted read-only. */
1211 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1212 		return EROFS;
1213 
1214 	/* Immutable or append-only files cannot be modified, either. */
1215 	if (node->tn_flags & (IMMUTABLE | APPEND))
1216 		return EPERM;
1217 
1218 	/*
1219 	 * To modify the ownership of a file, must possess VADMIN for that
1220 	 * file.
1221 	 */
1222 	if ((error = VOP_ACCESS(vp, VADMIN, cred, p)))
1223 		return (error);
1224 
1225 	/*
1226 	 * To change the owner of a file, or change the group of a file to a
1227 	 * group of which we are not a member, the caller must have
1228 	 * privilege.
1229 	 */
1230 	if ((uid != node->tn_uid ||
1231 	    (gid != node->tn_gid && !groupmember(gid, cred))) &&
1232 	    (error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0)))
1233 		return (error);
1234 
1235 	ogid = node->tn_gid;
1236 	ouid = node->tn_uid;
1237 
1238 	node->tn_uid = uid;
1239 	node->tn_gid = gid;
1240 
1241 	node->tn_status |= TMPFS_NODE_CHANGED;
1242 
1243 	if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) {
1244 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0))
1245 			node->tn_mode &= ~(S_ISUID | S_ISGID);
1246 	}
1247 
1248 	MPASS(VOP_ISLOCKED(vp));
1249 
1250 	return 0;
1251 }
1252 
1253 /* --------------------------------------------------------------------- */
1254 
1255 /*
1256  * Change size of the given vnode.
1257  * Caller should execute tmpfs_update on vp after a successful execution.
1258  * The vnode must be locked on entry and remain locked on exit.
1259  */
1260 int
1261 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
1262     struct thread *p)
1263 {
1264 	int error;
1265 	struct tmpfs_node *node;
1266 
1267 	MPASS(VOP_ISLOCKED(vp));
1268 
1269 	node = VP_TO_TMPFS_NODE(vp);
1270 
1271 	/* Decide whether this is a valid operation based on the file type. */
1272 	error = 0;
1273 	switch (vp->v_type) {
1274 	case VDIR:
1275 		return EISDIR;
1276 
1277 	case VREG:
1278 		if (vp->v_mount->mnt_flag & MNT_RDONLY)
1279 			return EROFS;
1280 		break;
1281 
1282 	case VBLK:
1283 		/* FALLTHROUGH */
1284 	case VCHR:
1285 		/* FALLTHROUGH */
1286 	case VFIFO:
1287 		/* Allow modifications of special files even if in the file
1288 		 * system is mounted read-only (we are not modifying the
1289 		 * files themselves, but the objects they represent). */
1290 		return 0;
1291 
1292 	default:
1293 		/* Anything else is unsupported. */
1294 		return EOPNOTSUPP;
1295 	}
1296 
1297 	/* Immutable or append-only files cannot be modified, either. */
1298 	if (node->tn_flags & (IMMUTABLE | APPEND))
1299 		return EPERM;
1300 
1301 	error = tmpfs_truncate(vp, size);
1302 	/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1303 	 * for us, as will update tn_status; no need to do that here. */
1304 
1305 	MPASS(VOP_ISLOCKED(vp));
1306 
1307 	return error;
1308 }
1309 
1310 /* --------------------------------------------------------------------- */
1311 
1312 /*
1313  * Change access and modification times of the given vnode.
1314  * Caller should execute tmpfs_update on vp after a successful execution.
1315  * The vnode must be locked on entry and remain locked on exit.
1316  */
1317 int
1318 tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1319 	struct timespec *birthtime, int vaflags, struct ucred *cred, struct thread *l)
1320 {
1321 	int error;
1322 	struct tmpfs_node *node;
1323 
1324 	MPASS(VOP_ISLOCKED(vp));
1325 
1326 	node = VP_TO_TMPFS_NODE(vp);
1327 
1328 	/* Disallow this operation if the file system is mounted read-only. */
1329 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
1330 		return EROFS;
1331 
1332 	/* Immutable or append-only files cannot be modified, either. */
1333 	if (node->tn_flags & (IMMUTABLE | APPEND))
1334 		return EPERM;
1335 
1336 	/* Determine if the user have proper privilege to update time. */
1337 	if (vaflags & VA_UTIMES_NULL) {
1338 		error = VOP_ACCESS(vp, VADMIN, cred, l);
1339 		if (error)
1340 			error = VOP_ACCESS(vp, VWRITE, cred, l);
1341 	} else
1342 		error = VOP_ACCESS(vp, VADMIN, cred, l);
1343 	if (error)
1344 		return (error);
1345 
1346 	if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1347 		node->tn_status |= TMPFS_NODE_ACCESSED;
1348 
1349 	if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1350 		node->tn_status |= TMPFS_NODE_MODIFIED;
1351 
1352 	if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL)
1353 		node->tn_status |= TMPFS_NODE_MODIFIED;
1354 
1355 	tmpfs_itimes(vp, atime, mtime);
1356 
1357 	if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL)
1358 		node->tn_birthtime = *birthtime;
1359 	MPASS(VOP_ISLOCKED(vp));
1360 
1361 	return 0;
1362 }
1363 
1364 /* --------------------------------------------------------------------- */
1365 /* Sync timestamps */
1366 void
1367 tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1368     const struct timespec *mod)
1369 {
1370 	struct tmpfs_node *node;
1371 	struct timespec now;
1372 
1373 	node = VP_TO_TMPFS_NODE(vp);
1374 
1375 	if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
1376 	    TMPFS_NODE_CHANGED)) == 0)
1377 		return;
1378 
1379 	vfs_timestamp(&now);
1380 	if (node->tn_status & TMPFS_NODE_ACCESSED) {
1381 		if (acc == NULL)
1382 			 acc = &now;
1383 		node->tn_atime = *acc;
1384 	}
1385 	if (node->tn_status & TMPFS_NODE_MODIFIED) {
1386 		if (mod == NULL)
1387 			mod = &now;
1388 		node->tn_mtime = *mod;
1389 	}
1390 	if (node->tn_status & TMPFS_NODE_CHANGED) {
1391 		node->tn_ctime = now;
1392 	}
1393 	node->tn_status &=
1394 	    ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1395 }
1396 
1397 /* --------------------------------------------------------------------- */
1398 
1399 void
1400 tmpfs_update(struct vnode *vp)
1401 {
1402 
1403 	tmpfs_itimes(vp, NULL, NULL);
1404 }
1405 
1406 /* --------------------------------------------------------------------- */
1407 
1408 int
1409 tmpfs_truncate(struct vnode *vp, off_t length)
1410 {
1411 	int error;
1412 	struct tmpfs_node *node;
1413 
1414 	node = VP_TO_TMPFS_NODE(vp);
1415 
1416 	if (length < 0) {
1417 		error = EINVAL;
1418 		goto out;
1419 	}
1420 
1421 	if (node->tn_size == length) {
1422 		error = 0;
1423 		goto out;
1424 	}
1425 
1426 	if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1427 		return (EFBIG);
1428 
1429 	error = tmpfs_reg_resize(vp, length, FALSE);
1430 	if (error == 0) {
1431 		node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1432 	}
1433 
1434 out:
1435 	tmpfs_update(vp);
1436 
1437 	return error;
1438 }
1439