1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11 * 2005 program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * tmpfs vnode interface.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/dirent.h>
42 #include <sys/extattr.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/lockf.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/smr.h>
56 #include <sys/stat.h>
57 #include <sys/sysctl.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pager.h>
68 #include <vm/swap_pager.h>
69
70 #include <fs/tmpfs/tmpfs_vnops.h>
71 #include <fs/tmpfs/tmpfs.h>
72
73 SYSCTL_DECL(_vfs_tmpfs);
74 VFS_SMR_DECLARE;
75
76 static volatile int tmpfs_rename_restarts;
77 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
78 __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
79 "Times rename had to restart due to lock contention");
80
81 MALLOC_DEFINE(M_TMPFSEA, "tmpfs extattr", "tmpfs extattr structure");
82
83 static int
tmpfs_vn_get_ino_alloc(struct mount * mp,void * arg,int lkflags,struct vnode ** rvp)84 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
85 struct vnode **rvp)
86 {
87
88 return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
89 }
90
91 static int
tmpfs_lookup1(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)92 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
93 {
94 struct tmpfs_dirent *de;
95 struct tmpfs_node *dnode, *pnode;
96 struct tmpfs_mount *tm;
97 int error;
98
99 /* Caller assumes responsibility for ensuring access (VEXEC). */
100 dnode = VP_TO_TMPFS_DIR(dvp);
101 *vpp = NULLVP;
102
103 /* We cannot be requesting the parent directory of the root node. */
104 MPASS(IMPLIES(dnode->tn_type == VDIR &&
105 dnode->tn_dir.tn_parent == dnode,
106 !(cnp->cn_flags & ISDOTDOT)));
107
108 TMPFS_ASSERT_LOCKED(dnode);
109 if (dnode->tn_dir.tn_parent == NULL) {
110 error = ENOENT;
111 goto out;
112 }
113 if (cnp->cn_flags & ISDOTDOT) {
114 tm = VFS_TO_TMPFS(dvp->v_mount);
115 pnode = dnode->tn_dir.tn_parent;
116 tmpfs_ref_node(pnode);
117 error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
118 pnode, cnp->cn_lkflags, vpp);
119 tmpfs_free_node(tm, pnode);
120 if (error != 0)
121 goto out;
122 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
123 VREF(dvp);
124 *vpp = dvp;
125 error = 0;
126 } else {
127 de = tmpfs_dir_lookup(dnode, NULL, cnp);
128 if (de != NULL && de->td_node == NULL)
129 cnp->cn_flags |= ISWHITEOUT;
130 if (de == NULL || de->td_node == NULL) {
131 /*
132 * The entry was not found in the directory.
133 * This is OK if we are creating or renaming an
134 * entry and are working on the last component of
135 * the path name.
136 */
137 if ((cnp->cn_flags & ISLASTCN) &&
138 (cnp->cn_nameiop == CREATE || \
139 cnp->cn_nameiop == RENAME ||
140 (cnp->cn_nameiop == DELETE &&
141 cnp->cn_flags & DOWHITEOUT &&
142 cnp->cn_flags & ISWHITEOUT))) {
143 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
144 curthread);
145 if (error != 0)
146 goto out;
147
148 error = EJUSTRETURN;
149 } else
150 error = ENOENT;
151 } else {
152 struct tmpfs_node *tnode;
153
154 /*
155 * The entry was found, so get its associated
156 * tmpfs_node.
157 */
158 tnode = de->td_node;
159
160 /*
161 * If we are not at the last path component and
162 * found a non-directory or non-link entry (which
163 * may itself be pointing to a directory), raise
164 * an error.
165 */
166 if ((tnode->tn_type != VDIR &&
167 tnode->tn_type != VLNK) &&
168 !(cnp->cn_flags & ISLASTCN)) {
169 error = ENOTDIR;
170 goto out;
171 }
172
173 /*
174 * If we are deleting or renaming the entry, keep
175 * track of its tmpfs_dirent so that it can be
176 * easily deleted later.
177 */
178 if ((cnp->cn_flags & ISLASTCN) &&
179 (cnp->cn_nameiop == DELETE ||
180 cnp->cn_nameiop == RENAME)) {
181 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
182 curthread);
183 if (error != 0)
184 goto out;
185
186 /* Allocate a new vnode on the matching entry. */
187 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
188 cnp->cn_lkflags, vpp);
189 if (error != 0)
190 goto out;
191
192 if ((dnode->tn_mode & S_ISTXT) &&
193 VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
194 curthread) && VOP_ACCESS(*vpp, VADMIN,
195 cnp->cn_cred, curthread)) {
196 error = EPERM;
197 vput(*vpp);
198 *vpp = NULL;
199 goto out;
200 }
201 } else {
202 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
203 cnp->cn_lkflags, vpp);
204 if (error != 0)
205 goto out;
206 }
207 }
208 }
209
210 /*
211 * Store the result of this lookup in the cache. Avoid this if the
212 * request was for creation, as it does not improve timings on
213 * emprical tests.
214 */
215 if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
216 cache_enter(dvp, *vpp, cnp);
217
218 out:
219 #ifdef INVARIANTS
220 /*
221 * If there were no errors, *vpp cannot be null and it must be
222 * locked.
223 */
224 if (error == 0) {
225 MPASS(*vpp != NULLVP);
226 ASSERT_VOP_LOCKED(*vpp, __func__);
227 } else {
228 MPASS(*vpp == NULL);
229 }
230 #endif
231
232 return (error);
233 }
234
235 static int
tmpfs_cached_lookup(struct vop_cachedlookup_args * v)236 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
237 {
238
239 return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
240 }
241
242 static int
tmpfs_lookup(struct vop_lookup_args * v)243 tmpfs_lookup(struct vop_lookup_args *v)
244 {
245 struct vnode *dvp = v->a_dvp;
246 struct vnode **vpp = v->a_vpp;
247 struct componentname *cnp = v->a_cnp;
248 int error;
249
250 /* Check accessibility of requested node as a first step. */
251 error = vn_dir_check_exec(dvp, cnp);
252 if (error != 0)
253 return (error);
254
255 return (tmpfs_lookup1(dvp, vpp, cnp));
256 }
257
258 static int
tmpfs_create(struct vop_create_args * v)259 tmpfs_create(struct vop_create_args *v)
260 {
261 struct vnode *dvp = v->a_dvp;
262 struct vnode **vpp = v->a_vpp;
263 struct componentname *cnp = v->a_cnp;
264 struct vattr *vap = v->a_vap;
265 int error;
266
267 MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
268
269 error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
270 if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
271 cache_enter(dvp, *vpp, cnp);
272 return (error);
273 }
274
275 static int
tmpfs_mknod(struct vop_mknod_args * v)276 tmpfs_mknod(struct vop_mknod_args *v)
277 {
278 struct vnode *dvp = v->a_dvp;
279 struct vnode **vpp = v->a_vpp;
280 struct componentname *cnp = v->a_cnp;
281 struct vattr *vap = v->a_vap;
282
283 if (vap->va_type != VBLK && vap->va_type != VCHR &&
284 vap->va_type != VFIFO)
285 return (EINVAL);
286
287 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
288 }
289
290 struct fileops tmpfs_fnops;
291
292 static int
tmpfs_open(struct vop_open_args * v)293 tmpfs_open(struct vop_open_args *v)
294 {
295 struct vnode *vp;
296 struct tmpfs_node *node;
297 struct file *fp;
298 int error, mode;
299
300 vp = v->a_vp;
301 mode = v->a_mode;
302 node = VP_TO_TMPFS_NODE(vp);
303
304 /*
305 * The file is still active but all its names have been removed
306 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
307 * it is about to die.
308 */
309 if (node->tn_links < 1)
310 return (ENOENT);
311
312 /* If the file is marked append-only, deny write requests. */
313 if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
314 error = EPERM;
315 else {
316 error = 0;
317 /* For regular files, the call below is nop. */
318 KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
319 OBJ_DEAD) == 0, ("dead object"));
320 vnode_create_vobject(vp, node->tn_size, v->a_td);
321 }
322
323 fp = v->a_fp;
324 MPASS(fp == NULL || fp->f_data == NULL);
325 if (error == 0 && fp != NULL && vp->v_type == VREG) {
326 tmpfs_ref_node(node);
327 finit_vnode(fp, mode, node, &tmpfs_fnops);
328 }
329
330 return (error);
331 }
332
333 static int
tmpfs_close(struct vop_close_args * v)334 tmpfs_close(struct vop_close_args *v)
335 {
336 struct vnode *vp = v->a_vp;
337
338 /* Update node times. */
339 tmpfs_update(vp);
340
341 return (0);
342 }
343
344 int
tmpfs_fo_close(struct file * fp,struct thread * td)345 tmpfs_fo_close(struct file *fp, struct thread *td)
346 {
347 struct tmpfs_node *node;
348
349 node = fp->f_data;
350 if (node != NULL) {
351 MPASS(node->tn_type == VREG);
352 tmpfs_free_node(node->tn_reg.tn_tmp, node);
353 }
354 return (vnops.fo_close(fp, td));
355 }
356
357 /*
358 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
359 * the comment above cache_fplookup for details.
360 */
361 int
tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args * v)362 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
363 {
364 struct vnode *vp;
365 struct tmpfs_node *node;
366 struct ucred *cred;
367 mode_t all_x, mode;
368
369 vp = v->a_vp;
370 node = VP_TO_TMPFS_NODE_SMR(vp);
371 if (__predict_false(node == NULL))
372 return (EAGAIN);
373
374 all_x = S_IXUSR | S_IXGRP | S_IXOTH;
375 mode = atomic_load_short(&node->tn_mode);
376 if (__predict_true((mode & all_x) == all_x))
377 return (0);
378
379 cred = v->a_cred;
380 return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
381 }
382
383 static int
tmpfs_access_locked(struct vnode * vp,struct tmpfs_node * node,accmode_t accmode,struct ucred * cred)384 tmpfs_access_locked(struct vnode *vp, struct tmpfs_node *node,
385 accmode_t accmode, struct ucred *cred)
386 {
387 #ifdef DEBUG_VFS_LOCKS
388 if (!mtx_owned(TMPFS_NODE_MTX(node))) {
389 ASSERT_VOP_LOCKED(vp,
390 "tmpfs_access_locked needs locked vnode or node");
391 }
392 #endif
393
394 if ((accmode & VWRITE) != 0 && (node->tn_flags & IMMUTABLE) != 0)
395 return (EPERM);
396 return (vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
397 accmode, cred));
398 }
399
400 int
tmpfs_access(struct vop_access_args * v)401 tmpfs_access(struct vop_access_args *v)
402 {
403 struct vnode *vp = v->a_vp;
404 struct ucred *cred = v->a_cred;
405 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
406 mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
407 accmode_t accmode = v->a_accmode;
408
409 /*
410 * Common case path lookup.
411 */
412 if (__predict_true(accmode == VEXEC &&
413 (node->tn_mode & all_x) == all_x))
414 return (0);
415
416 switch (vp->v_type) {
417 case VDIR:
418 /* FALLTHROUGH */
419 case VLNK:
420 /* FALLTHROUGH */
421 case VREG:
422 if ((accmode & VWRITE) != 0 &&
423 (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
424 return (EROFS);
425 break;
426
427 case VBLK:
428 /* FALLTHROUGH */
429 case VCHR:
430 /* FALLTHROUGH */
431 case VSOCK:
432 /* FALLTHROUGH */
433 case VFIFO:
434 break;
435
436 default:
437 return (EINVAL);
438 }
439
440 return (tmpfs_access_locked(vp, node, accmode, cred));
441 }
442
443 int
tmpfs_stat(struct vop_stat_args * v)444 tmpfs_stat(struct vop_stat_args *v)
445 {
446 struct vnode *vp = v->a_vp;
447 struct stat *sb = v->a_sb;
448 struct tmpfs_node *node;
449 int error;
450
451 node = VP_TO_TMPFS_NODE(vp);
452
453 tmpfs_update_getattr(vp);
454
455 error = vop_stat_helper_pre(v);
456 if (__predict_false(error))
457 return (error);
458
459 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
460 sb->st_ino = node->tn_id;
461 sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
462 sb->st_nlink = node->tn_links;
463 sb->st_uid = node->tn_uid;
464 sb->st_gid = node->tn_gid;
465 sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
466 node->tn_rdev : NODEV;
467 sb->st_size = node->tn_size;
468 sb->st_atim.tv_sec = node->tn_atime.tv_sec;
469 sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
470 sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
471 sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
472 sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
473 sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
474 sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
475 sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
476 sb->st_blksize = PAGE_SIZE;
477 sb->st_flags = node->tn_flags;
478 sb->st_gen = node->tn_gen;
479 if (vp->v_type == VREG) {
480 #ifdef __ILP32__
481 vm_object_t obj = node->tn_reg.tn_aobj;
482
483 /* Handle torn read */
484 VM_OBJECT_RLOCK(obj);
485 #endif
486 sb->st_blocks = ptoa(node->tn_reg.tn_pages);
487 #ifdef __ILP32__
488 VM_OBJECT_RUNLOCK(obj);
489 #endif
490 } else {
491 sb->st_blocks = node->tn_size;
492 }
493 sb->st_blocks /= S_BLKSIZE;
494 return (vop_stat_helper_post(v, error));
495 }
496
497 int
tmpfs_getattr(struct vop_getattr_args * v)498 tmpfs_getattr(struct vop_getattr_args *v)
499 {
500 struct vnode *vp = v->a_vp;
501 struct vattr *vap = v->a_vap;
502 struct tmpfs_node *node;
503
504 node = VP_TO_TMPFS_NODE(vp);
505
506 tmpfs_update_getattr(vp);
507
508 vap->va_type = vp->v_type;
509 vap->va_mode = node->tn_mode;
510 vap->va_nlink = node->tn_links;
511 vap->va_uid = node->tn_uid;
512 vap->va_gid = node->tn_gid;
513 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
514 vap->va_fileid = node->tn_id;
515 vap->va_size = node->tn_size;
516 vap->va_blocksize = PAGE_SIZE;
517 vap->va_atime = node->tn_atime;
518 vap->va_mtime = node->tn_mtime;
519 vap->va_ctime = node->tn_ctime;
520 vap->va_birthtime = node->tn_birthtime;
521 vap->va_gen = node->tn_gen;
522 vap->va_flags = node->tn_flags;
523 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
524 node->tn_rdev : NODEV;
525 if (vp->v_type == VREG) {
526 #ifdef __ILP32__
527 vm_object_t obj = node->tn_reg.tn_aobj;
528
529 VM_OBJECT_RLOCK(obj);
530 #endif
531 vap->va_bytes = ptoa(node->tn_reg.tn_pages);
532 #ifdef __ILP32__
533 VM_OBJECT_RUNLOCK(obj);
534 #endif
535 } else {
536 vap->va_bytes = node->tn_size;
537 }
538 vap->va_filerev = 0;
539
540 return (0);
541 }
542
543 int
tmpfs_setattr(struct vop_setattr_args * v)544 tmpfs_setattr(struct vop_setattr_args *v)
545 {
546 struct vnode *vp = v->a_vp;
547 struct vattr *vap = v->a_vap;
548 struct ucred *cred = v->a_cred;
549 struct thread *td = curthread;
550
551 int error;
552
553 ASSERT_VOP_IN_SEQC(vp);
554
555 error = 0;
556
557 /* Abort if any unsettable attribute is given. */
558 if (vap->va_type != VNON ||
559 vap->va_nlink != VNOVAL ||
560 vap->va_fsid != VNOVAL ||
561 vap->va_fileid != VNOVAL ||
562 vap->va_blocksize != VNOVAL ||
563 vap->va_gen != VNOVAL ||
564 vap->va_rdev != VNOVAL ||
565 vap->va_bytes != VNOVAL)
566 error = EINVAL;
567
568 if (error == 0 && (vap->va_flags != VNOVAL))
569 error = tmpfs_chflags(vp, vap->va_flags, cred, td);
570
571 if (error == 0 && (vap->va_size != VNOVAL))
572 error = tmpfs_chsize(vp, vap->va_size, cred, td);
573
574 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
575 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
576
577 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
578 error = tmpfs_chmod(vp, vap->va_mode, cred, td);
579
580 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
581 vap->va_atime.tv_nsec != VNOVAL) ||
582 (vap->va_mtime.tv_sec != VNOVAL &&
583 vap->va_mtime.tv_nsec != VNOVAL) ||
584 (vap->va_birthtime.tv_sec != VNOVAL &&
585 vap->va_birthtime.tv_nsec != VNOVAL)))
586 error = tmpfs_chtimes(vp, vap, cred, td);
587
588 /*
589 * Update the node times. We give preference to the error codes
590 * generated by this function rather than the ones that may arise
591 * from tmpfs_update.
592 */
593 tmpfs_update(vp);
594
595 return (error);
596 }
597
598 static int
tmpfs_read(struct vop_read_args * v)599 tmpfs_read(struct vop_read_args *v)
600 {
601 struct vnode *vp;
602 struct uio *uio;
603 struct tmpfs_node *node;
604
605 vp = v->a_vp;
606 if (vp->v_type != VREG)
607 return (EISDIR);
608 uio = v->a_uio;
609 if (uio->uio_offset < 0)
610 return (EINVAL);
611 node = VP_TO_TMPFS_NODE(vp);
612 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
613 return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
614 }
615
616 static int
tmpfs_read_pgcache(struct vop_read_pgcache_args * v)617 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
618 {
619 struct vnode *vp;
620 struct tmpfs_node *node;
621 vm_object_t object;
622 off_t size;
623 int error;
624
625 vp = v->a_vp;
626 VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
627
628 if (v->a_uio->uio_offset < 0)
629 return (EINVAL);
630
631 error = EJUSTRETURN;
632 vfs_smr_enter();
633
634 node = VP_TO_TMPFS_NODE_SMR(vp);
635 if (node == NULL)
636 goto out_smr;
637 MPASS(node->tn_type == VREG);
638 MPASS(node->tn_refcount >= 1);
639 object = node->tn_reg.tn_aobj;
640 if (object == NULL)
641 goto out_smr;
642
643 MPASS(object->type == tmpfs_pager_type);
644 MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
645 OBJ_SWAP);
646 if (!VN_IS_DOOMED(vp)) {
647 /* size cannot become shorter due to rangelock. */
648 size = node->tn_size;
649 tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
650 vfs_smr_exit();
651 error = uiomove_object(object, size, v->a_uio);
652 return (error);
653 }
654 out_smr:
655 vfs_smr_exit();
656 return (error);
657 }
658
659 static int
tmpfs_write(struct vop_write_args * v)660 tmpfs_write(struct vop_write_args *v)
661 {
662 struct vnode *vp;
663 struct uio *uio;
664 struct tmpfs_node *node;
665 off_t oldsize;
666 ssize_t r;
667 int error, ioflag;
668 mode_t newmode;
669
670 vp = v->a_vp;
671 uio = v->a_uio;
672 ioflag = v->a_ioflag;
673 error = 0;
674 node = VP_TO_TMPFS_NODE(vp);
675 oldsize = node->tn_size;
676
677 if (uio->uio_offset < 0 || vp->v_type != VREG)
678 return (EINVAL);
679 if (uio->uio_resid == 0)
680 return (0);
681 if (ioflag & IO_APPEND)
682 uio->uio_offset = node->tn_size;
683 error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
684 tm_maxfilesize, &r, uio->uio_td);
685 if (error != 0) {
686 vn_rlimit_fsizex_res(uio, r);
687 return (error);
688 }
689
690 if (uio->uio_offset + uio->uio_resid > node->tn_size) {
691 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
692 FALSE);
693 if (error != 0)
694 goto out;
695 }
696
697 error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
698 node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
699 node->tn_accessed = true;
700 if (node->tn_mode & (S_ISUID | S_ISGID)) {
701 if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
702 newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
703 vn_seqc_write_begin(vp);
704 atomic_store_short(&node->tn_mode, newmode);
705 vn_seqc_write_end(vp);
706 }
707 }
708 if (error != 0)
709 (void)tmpfs_reg_resize(vp, oldsize, TRUE);
710
711 out:
712 MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
713 MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
714
715 vn_rlimit_fsizex_res(uio, r);
716 return (error);
717 }
718
719 static int
tmpfs_deallocate(struct vop_deallocate_args * v)720 tmpfs_deallocate(struct vop_deallocate_args *v)
721 {
722 return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
723 }
724
725 static int
tmpfs_fsync(struct vop_fsync_args * v)726 tmpfs_fsync(struct vop_fsync_args *v)
727 {
728 struct vnode *vp = v->a_vp;
729
730 tmpfs_check_mtime(vp);
731 tmpfs_update(vp);
732
733 return (0);
734 }
735
736 static int
tmpfs_remove(struct vop_remove_args * v)737 tmpfs_remove(struct vop_remove_args *v)
738 {
739 struct vnode *dvp = v->a_dvp;
740 struct vnode *vp = v->a_vp;
741
742 int error;
743 struct tmpfs_dirent *de;
744 struct tmpfs_mount *tmp;
745 struct tmpfs_node *dnode;
746 struct tmpfs_node *node;
747
748 if (vp->v_type == VDIR) {
749 error = EISDIR;
750 goto out;
751 }
752
753 dnode = VP_TO_TMPFS_DIR(dvp);
754 node = VP_TO_TMPFS_NODE(vp);
755 tmp = VFS_TO_TMPFS(vp->v_mount);
756 de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
757 MPASS(de != NULL);
758
759 /* Files marked as immutable or append-only cannot be deleted. */
760 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
761 (dnode->tn_flags & APPEND)) {
762 error = EPERM;
763 goto out;
764 }
765
766 /* Remove the entry from the directory; as it is a file, we do not
767 * have to change the number of hard links of the directory. */
768 tmpfs_dir_detach(dvp, de);
769 if (v->a_cnp->cn_flags & DOWHITEOUT)
770 tmpfs_dir_whiteout_add(dvp, v->a_cnp);
771
772 /* Free the directory entry we just deleted. Note that the node
773 * referred by it will not be removed until the vnode is really
774 * reclaimed. */
775 tmpfs_free_dirent(tmp, de);
776
777 node->tn_status |= TMPFS_NODE_CHANGED;
778 node->tn_accessed = true;
779 error = 0;
780
781 out:
782 return (error);
783 }
784
785 static int
tmpfs_link(struct vop_link_args * v)786 tmpfs_link(struct vop_link_args *v)
787 {
788 struct vnode *dvp = v->a_tdvp;
789 struct vnode *vp = v->a_vp;
790 struct componentname *cnp = v->a_cnp;
791
792 int error;
793 struct tmpfs_dirent *de;
794 struct tmpfs_node *node;
795
796 MPASS(dvp != vp); /* XXX When can this be false? */
797 node = VP_TO_TMPFS_NODE(vp);
798
799 /* Ensure that we do not overflow the maximum number of links imposed
800 * by the system. */
801 MPASS(node->tn_links <= TMPFS_LINK_MAX);
802 if (node->tn_links == TMPFS_LINK_MAX) {
803 error = EMLINK;
804 goto out;
805 }
806
807 /* We cannot create links of files marked immutable or append-only. */
808 if (node->tn_flags & (IMMUTABLE | APPEND)) {
809 error = EPERM;
810 goto out;
811 }
812
813 /* Allocate a new directory entry to represent the node. */
814 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
815 cnp->cn_nameptr, cnp->cn_namelen, &de);
816 if (error != 0)
817 goto out;
818
819 /* Insert the new directory entry into the appropriate directory. */
820 if (cnp->cn_flags & ISWHITEOUT)
821 tmpfs_dir_whiteout_remove(dvp, cnp);
822 tmpfs_dir_attach(dvp, de);
823
824 /* vp link count has changed, so update node times. */
825 node->tn_status |= TMPFS_NODE_CHANGED;
826 tmpfs_update(vp);
827
828 error = 0;
829
830 out:
831 return (error);
832 }
833
834 /*
835 * We acquire all but fdvp locks using non-blocking acquisitions. If we
836 * fail to acquire any lock in the path we will drop all held locks,
837 * acquire the new lock in a blocking fashion, and then release it and
838 * restart the rename. This acquire/release step ensures that we do not
839 * spin on a lock waiting for release. On error release all vnode locks
840 * and decrement references the way tmpfs_rename() would do.
841 */
842 static int
tmpfs_rename_relock(struct vnode * fdvp,struct vnode ** fvpp,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * fcnp,struct componentname * tcnp)843 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
844 struct vnode *tdvp, struct vnode **tvpp,
845 struct componentname *fcnp, struct componentname *tcnp)
846 {
847 struct vnode *nvp;
848 struct mount *mp;
849 struct tmpfs_dirent *de;
850 int error, restarts = 0;
851
852 VOP_UNLOCK(tdvp);
853 if (*tvpp != NULL && *tvpp != tdvp)
854 VOP_UNLOCK(*tvpp);
855 mp = fdvp->v_mount;
856
857 relock:
858 restarts += 1;
859 error = vn_lock(fdvp, LK_EXCLUSIVE);
860 if (error)
861 goto releout;
862 if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
863 VOP_UNLOCK(fdvp);
864 error = vn_lock(tdvp, LK_EXCLUSIVE);
865 if (error)
866 goto releout;
867 VOP_UNLOCK(tdvp);
868 goto relock;
869 }
870 /*
871 * Re-resolve fvp to be certain it still exists and fetch the
872 * correct vnode.
873 */
874 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
875 if (de == NULL) {
876 VOP_UNLOCK(fdvp);
877 VOP_UNLOCK(tdvp);
878 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
879 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
880 error = EINVAL;
881 else
882 error = ENOENT;
883 goto releout;
884 }
885 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
886 if (error != 0) {
887 VOP_UNLOCK(fdvp);
888 VOP_UNLOCK(tdvp);
889 if (error != EBUSY)
890 goto releout;
891 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
892 if (error != 0)
893 goto releout;
894 VOP_UNLOCK(nvp);
895 /*
896 * Concurrent rename race.
897 */
898 if (nvp == tdvp) {
899 vrele(nvp);
900 error = EINVAL;
901 goto releout;
902 }
903 vrele(*fvpp);
904 *fvpp = nvp;
905 goto relock;
906 }
907 vrele(*fvpp);
908 *fvpp = nvp;
909 VOP_UNLOCK(*fvpp);
910 /*
911 * Re-resolve tvp and acquire the vnode lock if present.
912 */
913 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
914 /*
915 * If tvp disappeared we just carry on.
916 */
917 if (de == NULL && *tvpp != NULL) {
918 vrele(*tvpp);
919 *tvpp = NULL;
920 }
921 /*
922 * Get the tvp ino if the lookup succeeded. We may have to restart
923 * if the non-blocking acquire fails.
924 */
925 if (de != NULL) {
926 nvp = NULL;
927 error = tmpfs_alloc_vp(mp, de->td_node,
928 LK_EXCLUSIVE | LK_NOWAIT, &nvp);
929 if (*tvpp != NULL)
930 vrele(*tvpp);
931 *tvpp = nvp;
932 if (error != 0) {
933 VOP_UNLOCK(fdvp);
934 VOP_UNLOCK(tdvp);
935 if (error != EBUSY)
936 goto releout;
937 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
938 &nvp);
939 if (error != 0)
940 goto releout;
941 VOP_UNLOCK(nvp);
942 /*
943 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
944 */
945 if (nvp == fdvp) {
946 error = ENOTEMPTY;
947 goto releout;
948 }
949 goto relock;
950 }
951 }
952 tmpfs_rename_restarts += restarts;
953
954 return (0);
955
956 releout:
957 vrele(fdvp);
958 vrele(*fvpp);
959 vrele(tdvp);
960 if (*tvpp != NULL)
961 vrele(*tvpp);
962 tmpfs_rename_restarts += restarts;
963
964 return (error);
965 }
966
967 static int
tmpfs_rename(struct vop_rename_args * v)968 tmpfs_rename(struct vop_rename_args *v)
969 {
970 struct vnode *fdvp = v->a_fdvp;
971 struct vnode *fvp = v->a_fvp;
972 struct componentname *fcnp = v->a_fcnp;
973 struct vnode *tdvp = v->a_tdvp;
974 struct vnode *tvp = v->a_tvp;
975 struct componentname *tcnp = v->a_tcnp;
976 char *newname;
977 struct tmpfs_dirent *de;
978 struct tmpfs_mount *tmp;
979 struct tmpfs_node *fdnode;
980 struct tmpfs_node *fnode;
981 struct tmpfs_node *tnode;
982 struct tmpfs_node *tdnode;
983 int error;
984 bool want_seqc_end;
985
986 want_seqc_end = false;
987
988 /*
989 * Disallow cross-device renames.
990 * XXX Why isn't this done by the caller?
991 */
992 if (fvp->v_mount != tdvp->v_mount ||
993 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
994 error = EXDEV;
995 goto out;
996 }
997
998 /* If source and target are the same file, there is nothing to do. */
999 if (fvp == tvp) {
1000 error = 0;
1001 goto out;
1002 }
1003
1004 /*
1005 * If we need to move the directory between entries, lock the
1006 * source so that we can safely operate on it.
1007 */
1008 if (fdvp != tdvp && fdvp != tvp) {
1009 if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1010 error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1011 fcnp, tcnp);
1012 if (error != 0)
1013 return (error);
1014 ASSERT_VOP_ELOCKED(fdvp,
1015 "tmpfs_rename: fdvp not locked");
1016 ASSERT_VOP_ELOCKED(tdvp,
1017 "tmpfs_rename: tdvp not locked");
1018 if (tvp != NULL)
1019 ASSERT_VOP_ELOCKED(tvp,
1020 "tmpfs_rename: tvp not locked");
1021 if (fvp == tvp) {
1022 error = 0;
1023 goto out_locked;
1024 }
1025 }
1026 }
1027
1028 /*
1029 * Avoid manipulating '.' and '..' entries.
1030 */
1031 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1032 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) {
1033 error = EINVAL;
1034 goto out_locked;
1035 }
1036
1037 if (tvp != NULL)
1038 vn_seqc_write_begin(tvp);
1039 vn_seqc_write_begin(tdvp);
1040 vn_seqc_write_begin(fvp);
1041 vn_seqc_write_begin(fdvp);
1042 want_seqc_end = true;
1043
1044 tmp = VFS_TO_TMPFS(tdvp->v_mount);
1045 tdnode = VP_TO_TMPFS_DIR(tdvp);
1046 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1047 fdnode = VP_TO_TMPFS_DIR(fdvp);
1048 fnode = VP_TO_TMPFS_NODE(fvp);
1049 de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1050
1051 /*
1052 * Entry can disappear before we lock fdvp.
1053 */
1054 if (de == NULL) {
1055 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1056 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1057 error = EINVAL;
1058 else
1059 error = ENOENT;
1060 goto out_locked;
1061 }
1062 MPASS(de->td_node == fnode);
1063
1064 /*
1065 * If re-naming a directory to another preexisting directory
1066 * ensure that the target directory is empty so that its
1067 * removal causes no side effects.
1068 * Kern_rename guarantees the destination to be a directory
1069 * if the source is one.
1070 */
1071 if (tvp != NULL) {
1072 MPASS(tnode != NULL);
1073
1074 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1075 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1076 error = EPERM;
1077 goto out_locked;
1078 }
1079
1080 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1081 if (tnode->tn_size != 0 &&
1082 ((tcnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1083 tnode->tn_size > tnode->tn_dir.tn_wht_size)) {
1084 error = ENOTEMPTY;
1085 goto out_locked;
1086 }
1087 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1088 error = ENOTDIR;
1089 goto out_locked;
1090 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1091 error = EISDIR;
1092 goto out_locked;
1093 } else {
1094 MPASS(fnode->tn_type != VDIR &&
1095 tnode->tn_type != VDIR);
1096 }
1097 }
1098
1099 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1100 || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1101 error = EPERM;
1102 goto out_locked;
1103 }
1104
1105 /*
1106 * Ensure that we have enough memory to hold the new name, if it
1107 * has to be changed.
1108 */
1109 if (fcnp->cn_namelen != tcnp->cn_namelen ||
1110 bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1111 newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1112 } else
1113 newname = NULL;
1114
1115 /*
1116 * If the node is being moved to another directory, we have to do
1117 * the move.
1118 */
1119 if (fdnode != tdnode) {
1120 /*
1121 * In case we are moving a directory, we have to adjust its
1122 * parent to point to the new parent.
1123 */
1124 if (de->td_node->tn_type == VDIR) {
1125 struct tmpfs_node *n;
1126
1127 TMPFS_NODE_LOCK(fnode);
1128 error = tmpfs_access_locked(fvp, fnode, VWRITE,
1129 tcnp->cn_cred);
1130 TMPFS_NODE_UNLOCK(fnode);
1131 if (error) {
1132 if (newname != NULL)
1133 free(newname, M_TMPFSNAME);
1134 goto out_locked;
1135 }
1136
1137 /*
1138 * Ensure the target directory is not a child of the
1139 * directory being moved. Otherwise, we'd end up
1140 * with stale nodes.
1141 */
1142 n = tdnode;
1143 /*
1144 * TMPFS_LOCK guaranties that no nodes are freed while
1145 * traversing the list. Nodes can only be marked as
1146 * removed: tn_parent == NULL.
1147 */
1148 TMPFS_LOCK(tmp);
1149 TMPFS_NODE_LOCK(n);
1150 while (n != n->tn_dir.tn_parent) {
1151 struct tmpfs_node *parent;
1152
1153 if (n == fnode) {
1154 TMPFS_NODE_UNLOCK(n);
1155 TMPFS_UNLOCK(tmp);
1156 error = EINVAL;
1157 if (newname != NULL)
1158 free(newname, M_TMPFSNAME);
1159 goto out_locked;
1160 }
1161 parent = n->tn_dir.tn_parent;
1162 TMPFS_NODE_UNLOCK(n);
1163 if (parent == NULL) {
1164 n = NULL;
1165 break;
1166 }
1167 TMPFS_NODE_LOCK(parent);
1168 if (parent->tn_dir.tn_parent == NULL) {
1169 TMPFS_NODE_UNLOCK(parent);
1170 n = NULL;
1171 break;
1172 }
1173 n = parent;
1174 }
1175 TMPFS_UNLOCK(tmp);
1176 if (n == NULL) {
1177 error = EINVAL;
1178 if (newname != NULL)
1179 free(newname, M_TMPFSNAME);
1180 goto out_locked;
1181 }
1182 TMPFS_NODE_UNLOCK(n);
1183
1184 /* Adjust the parent pointer. */
1185 TMPFS_VALIDATE_DIR(fnode);
1186 TMPFS_NODE_LOCK(de->td_node);
1187 de->td_node->tn_dir.tn_parent = tdnode;
1188 TMPFS_NODE_UNLOCK(de->td_node);
1189
1190 /*
1191 * As a result of changing the target of the '..'
1192 * entry, the link count of the source and target
1193 * directories has to be adjusted.
1194 */
1195 TMPFS_NODE_LOCK(tdnode);
1196 TMPFS_ASSERT_LOCKED(tdnode);
1197 tdnode->tn_links++;
1198 TMPFS_NODE_UNLOCK(tdnode);
1199
1200 TMPFS_NODE_LOCK(fdnode);
1201 TMPFS_ASSERT_LOCKED(fdnode);
1202 fdnode->tn_links--;
1203 TMPFS_NODE_UNLOCK(fdnode);
1204 }
1205 }
1206
1207 /*
1208 * Do the move: just remove the entry from the source directory
1209 * and insert it into the target one.
1210 */
1211 tmpfs_dir_detach(fdvp, de);
1212
1213 if (fcnp->cn_flags & DOWHITEOUT)
1214 tmpfs_dir_whiteout_add(fdvp, fcnp);
1215 if (tcnp->cn_flags & ISWHITEOUT)
1216 tmpfs_dir_whiteout_remove(tdvp, tcnp);
1217
1218 /*
1219 * If the name has changed, we need to make it effective by changing
1220 * it in the directory entry.
1221 */
1222 if (newname != NULL) {
1223 MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1224
1225 free(de->ud.td_name, M_TMPFSNAME);
1226 de->ud.td_name = newname;
1227 tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1228
1229 fnode->tn_status |= TMPFS_NODE_CHANGED;
1230 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1231 }
1232
1233 /*
1234 * If we are overwriting an entry, we have to remove the old one
1235 * from the target directory.
1236 */
1237 if (tvp != NULL) {
1238 struct tmpfs_dirent *tde;
1239
1240 /* Remove the old entry from the target directory. */
1241 tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1242 tmpfs_dir_detach(tdvp, tde);
1243
1244 /*
1245 * If we are overwriting a directory, per the ENOTEMPTY check
1246 * above it must either be empty or contain only whiteout
1247 * entries. In the latter case (which can only happen if
1248 * IGNOREWHITEOUT was passed in tcnp->cn_flags), clear the
1249 * whiteout entries to avoid leaking memory.
1250 */
1251 if (tnode->tn_type == VDIR && tnode->tn_size > 0)
1252 tmpfs_dir_clear_whiteouts(tvp);
1253
1254 /* Update node's ctime because of possible hardlinks. */
1255 tnode->tn_status |= TMPFS_NODE_CHANGED;
1256 tmpfs_update(tvp);
1257
1258 /*
1259 * Free the directory entry we just deleted. Note that the
1260 * node referred by it will not be removed until the vnode is
1261 * really reclaimed.
1262 */
1263 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1264 }
1265
1266 tmpfs_dir_attach(tdvp, de);
1267
1268 if (tmpfs_use_nc(fvp)) {
1269 cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1270 }
1271
1272 error = 0;
1273
1274 out_locked:
1275 if (fdvp != tdvp && fdvp != tvp)
1276 VOP_UNLOCK(fdvp);
1277
1278 out:
1279 if (want_seqc_end) {
1280 if (tvp != NULL)
1281 vn_seqc_write_end(tvp);
1282 vn_seqc_write_end(tdvp);
1283 vn_seqc_write_end(fvp);
1284 vn_seqc_write_end(fdvp);
1285 }
1286
1287 /*
1288 * Release target nodes.
1289 * XXX: I don't understand when tdvp can be the same as tvp, but
1290 * other code takes care of this...
1291 */
1292 if (tdvp == tvp)
1293 vrele(tdvp);
1294 else
1295 vput(tdvp);
1296 if (tvp != NULL)
1297 vput(tvp);
1298
1299 /* Release source nodes. */
1300 vrele(fdvp);
1301 vrele(fvp);
1302
1303 return (error);
1304 }
1305
1306 static int
tmpfs_mkdir(struct vop_mkdir_args * v)1307 tmpfs_mkdir(struct vop_mkdir_args *v)
1308 {
1309 struct vnode *dvp = v->a_dvp;
1310 struct vnode **vpp = v->a_vpp;
1311 struct componentname *cnp = v->a_cnp;
1312 struct vattr *vap = v->a_vap;
1313
1314 MPASS(vap->va_type == VDIR);
1315
1316 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1317 }
1318
1319 static int
tmpfs_rmdir(struct vop_rmdir_args * v)1320 tmpfs_rmdir(struct vop_rmdir_args *v)
1321 {
1322 struct vnode *dvp = v->a_dvp;
1323 struct vnode *vp = v->a_vp;
1324 struct componentname *cnp = v->a_cnp;
1325
1326 int error;
1327 struct tmpfs_dirent *de;
1328 struct tmpfs_mount *tmp;
1329 struct tmpfs_node *dnode;
1330 struct tmpfs_node *node;
1331
1332 tmp = VFS_TO_TMPFS(dvp->v_mount);
1333 dnode = VP_TO_TMPFS_DIR(dvp);
1334 node = VP_TO_TMPFS_DIR(vp);
1335
1336 /*
1337 * Directories with more than two non-whiteout entries ('.' and '..')
1338 * cannot be removed.
1339 */
1340 if (node->tn_size != 0 &&
1341 ((cnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1342 node->tn_size > node->tn_dir.tn_wht_size)) {
1343 error = ENOTEMPTY;
1344 goto out;
1345 }
1346
1347 /* Check flags to see if we are allowed to remove the directory. */
1348 if ((dnode->tn_flags & APPEND)
1349 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1350 error = EPERM;
1351 goto out;
1352 }
1353
1354 /* This invariant holds only if we are not trying to remove "..".
1355 * We checked for that above so this is safe now. */
1356 MPASS(node->tn_dir.tn_parent == dnode);
1357
1358 /* Get the directory entry associated with node (vp). This was
1359 * filled by tmpfs_lookup while looking up the entry. */
1360 de = tmpfs_dir_lookup(dnode, node, cnp);
1361 MPASS(TMPFS_DIRENT_MATCHES(de,
1362 cnp->cn_nameptr,
1363 cnp->cn_namelen));
1364
1365 /* Detach the directory entry from the directory (dnode). */
1366 tmpfs_dir_detach(dvp, de);
1367
1368 /*
1369 * If we are removing a directory, per the ENOTEMPTY check above it
1370 * must either be empty or contain only whiteout entries. In the
1371 * latter case (which can only happen if IGNOREWHITEOUT was passed
1372 * in cnp->cn_flags), clear the whiteout entries to avoid leaking
1373 * memory.
1374 */
1375 if (node->tn_size > 0)
1376 tmpfs_dir_clear_whiteouts(vp);
1377
1378 if (cnp->cn_flags & DOWHITEOUT)
1379 tmpfs_dir_whiteout_add(dvp, cnp);
1380
1381 /* No vnode should be allocated for this entry from this point */
1382 TMPFS_NODE_LOCK(node);
1383 node->tn_links--;
1384 node->tn_dir.tn_parent = NULL;
1385 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1386 node->tn_accessed = true;
1387
1388 TMPFS_NODE_UNLOCK(node);
1389
1390 TMPFS_NODE_LOCK(dnode);
1391 dnode->tn_links--;
1392 dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1393 dnode->tn_accessed = true;
1394 TMPFS_NODE_UNLOCK(dnode);
1395
1396 if (tmpfs_use_nc(dvp)) {
1397 cache_vop_rmdir(dvp, vp);
1398 }
1399
1400 /* Free the directory entry we just deleted. Note that the node
1401 * referred by it will not be removed until the vnode is really
1402 * reclaimed. */
1403 tmpfs_free_dirent(tmp, de);
1404
1405 /* Release the deleted vnode (will destroy the node, notify
1406 * interested parties and clean it from the cache). */
1407
1408 dnode->tn_status |= TMPFS_NODE_CHANGED;
1409 tmpfs_update(dvp);
1410
1411 error = 0;
1412
1413 out:
1414 return (error);
1415 }
1416
1417 static int
tmpfs_symlink(struct vop_symlink_args * v)1418 tmpfs_symlink(struct vop_symlink_args *v)
1419 {
1420 struct vnode *dvp = v->a_dvp;
1421 struct vnode **vpp = v->a_vpp;
1422 struct componentname *cnp = v->a_cnp;
1423 struct vattr *vap = v->a_vap;
1424 const char *target = v->a_target;
1425
1426 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1427 MPASS(vap->va_type == VLNK);
1428 #else
1429 vap->va_type = VLNK;
1430 #endif
1431
1432 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1433 }
1434
1435 static int
tmpfs_readdir(struct vop_readdir_args * va)1436 tmpfs_readdir(struct vop_readdir_args *va)
1437 {
1438 struct vnode *vp;
1439 struct uio *uio;
1440 struct tmpfs_mount *tm;
1441 struct tmpfs_node *node;
1442 uint64_t **cookies;
1443 int *eofflag, *ncookies;
1444 ssize_t startresid;
1445 int error, maxcookies;
1446
1447 vp = va->a_vp;
1448 uio = va->a_uio;
1449 eofflag = va->a_eofflag;
1450 cookies = va->a_cookies;
1451 ncookies = va->a_ncookies;
1452
1453 /* This operation only makes sense on directory nodes. */
1454 if (vp->v_type != VDIR)
1455 return (ENOTDIR);
1456
1457 maxcookies = 0;
1458 node = VP_TO_TMPFS_DIR(vp);
1459 tm = VFS_TO_TMPFS(vp->v_mount);
1460
1461 startresid = uio->uio_resid;
1462
1463 /* Allocate cookies for NFS and compat modules. */
1464 if (cookies != NULL && ncookies != NULL) {
1465 maxcookies = howmany(node->tn_size,
1466 sizeof(struct tmpfs_dirent)) + 2;
1467 *cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1468 M_WAITOK);
1469 *ncookies = 0;
1470 }
1471
1472 if (cookies == NULL)
1473 error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1474 else
1475 error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1476 ncookies);
1477
1478 /* Buffer was filled without hitting EOF. */
1479 if (error == EJUSTRETURN)
1480 error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1481
1482 if (error != 0 && cookies != NULL && ncookies != NULL) {
1483 free(*cookies, M_TEMP);
1484 *cookies = NULL;
1485 *ncookies = 0;
1486 }
1487
1488 if (eofflag != NULL)
1489 *eofflag =
1490 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1491
1492 return (error);
1493 }
1494
1495 static int
tmpfs_readlink(struct vop_readlink_args * v)1496 tmpfs_readlink(struct vop_readlink_args *v)
1497 {
1498 struct vnode *vp = v->a_vp;
1499 struct uio *uio = v->a_uio;
1500
1501 int error;
1502 struct tmpfs_node *node;
1503
1504 MPASS(uio->uio_offset == 0);
1505 MPASS(vp->v_type == VLNK);
1506
1507 node = VP_TO_TMPFS_NODE(vp);
1508
1509 error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1510 uio);
1511 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1512
1513 return (error);
1514 }
1515
1516 /*
1517 * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1518 * the comment above cache_fplookup for details.
1519 *
1520 * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1521 */
1522 static int
tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args * v)1523 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1524 {
1525 struct vnode *vp;
1526 struct tmpfs_node *node;
1527 char *symlink;
1528
1529 vp = v->a_vp;
1530 node = VP_TO_TMPFS_NODE_SMR(vp);
1531 if (__predict_false(node == NULL))
1532 return (EAGAIN);
1533 if (!atomic_load_char(&node->tn_link_smr))
1534 return (EAGAIN);
1535 symlink = atomic_load_ptr(&node->tn_link_target);
1536 if (symlink == NULL)
1537 return (EAGAIN);
1538
1539 return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1540 }
1541
1542 static int
tmpfs_inactive(struct vop_inactive_args * v)1543 tmpfs_inactive(struct vop_inactive_args *v)
1544 {
1545 struct vnode *vp;
1546 struct tmpfs_node *node;
1547
1548 vp = v->a_vp;
1549 node = VP_TO_TMPFS_NODE(vp);
1550 if (node->tn_links == 0)
1551 vrecycle(vp);
1552 else
1553 tmpfs_check_mtime(vp);
1554 return (0);
1555 }
1556
1557 static int
tmpfs_need_inactive(struct vop_need_inactive_args * ap)1558 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1559 {
1560 struct vnode *vp;
1561 struct tmpfs_node *node;
1562 struct vm_object *obj;
1563
1564 vp = ap->a_vp;
1565 node = VP_TO_TMPFS_NODE(vp);
1566 if (node->tn_links == 0)
1567 goto need;
1568 if (vp->v_type == VREG) {
1569 obj = vp->v_object;
1570 if (obj->generation != obj->cleangeneration)
1571 goto need;
1572 }
1573 return (0);
1574 need:
1575 return (1);
1576 }
1577
1578 int
tmpfs_reclaim(struct vop_reclaim_args * v)1579 tmpfs_reclaim(struct vop_reclaim_args *v)
1580 {
1581 struct vnode *vp;
1582 struct tmpfs_mount *tmp;
1583 struct tmpfs_node *node;
1584 bool unlock;
1585
1586 vp = v->a_vp;
1587 node = VP_TO_TMPFS_NODE(vp);
1588 tmp = VFS_TO_TMPFS(vp->v_mount);
1589
1590 if (vp->v_type == VREG)
1591 tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1592 vp->v_object = NULL;
1593
1594 TMPFS_LOCK(tmp);
1595 TMPFS_NODE_LOCK(node);
1596 tmpfs_free_vp(vp);
1597
1598 /*
1599 * If the node referenced by this vnode was deleted by the user,
1600 * we must free its associated data structures (now that the vnode
1601 * is being reclaimed).
1602 */
1603 unlock = true;
1604 if (node->tn_links == 0 &&
1605 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1606 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1607 unlock = !tmpfs_free_node_locked(tmp, node, true);
1608 }
1609
1610 if (unlock) {
1611 TMPFS_NODE_UNLOCK(node);
1612 TMPFS_UNLOCK(tmp);
1613 }
1614
1615 MPASS(vp->v_data == NULL);
1616 return (0);
1617 }
1618
1619 int
tmpfs_print(struct vop_print_args * v)1620 tmpfs_print(struct vop_print_args *v)
1621 {
1622 struct vnode *vp = v->a_vp;
1623
1624 struct tmpfs_node *node;
1625
1626 node = VP_TO_TMPFS_NODE(vp);
1627
1628 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1629 node, node->tn_flags, (uintmax_t)node->tn_links);
1630 printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1631 node->tn_mode, node->tn_uid, node->tn_gid,
1632 (intmax_t)node->tn_size, node->tn_status);
1633
1634 if (vp->v_type == VFIFO)
1635 fifo_printinfo(vp);
1636
1637 printf("\n");
1638
1639 return (0);
1640 }
1641
1642 int
tmpfs_pathconf(struct vop_pathconf_args * v)1643 tmpfs_pathconf(struct vop_pathconf_args *v)
1644 {
1645 struct vnode *vp = v->a_vp;
1646 int name = v->a_name;
1647 long *retval = v->a_retval;
1648
1649 int error;
1650
1651 error = 0;
1652
1653 switch (name) {
1654 case _PC_LINK_MAX:
1655 *retval = TMPFS_LINK_MAX;
1656 break;
1657
1658 case _PC_SYMLINK_MAX:
1659 *retval = MAXPATHLEN;
1660 break;
1661
1662 case _PC_NAME_MAX:
1663 *retval = NAME_MAX;
1664 break;
1665
1666 case _PC_PIPE_BUF:
1667 if (vp->v_type == VDIR || vp->v_type == VFIFO)
1668 *retval = PIPE_BUF;
1669 else
1670 error = EINVAL;
1671 break;
1672
1673 case _PC_CHOWN_RESTRICTED:
1674 *retval = 1;
1675 break;
1676
1677 case _PC_NO_TRUNC:
1678 *retval = 1;
1679 break;
1680
1681 case _PC_SYNC_IO:
1682 *retval = 1;
1683 break;
1684
1685 case _PC_FILESIZEBITS:
1686 *retval = 64;
1687 break;
1688
1689 case _PC_MIN_HOLE_SIZE:
1690 *retval = PAGE_SIZE;
1691 break;
1692
1693 default:
1694 error = vop_stdpathconf(v);
1695 }
1696
1697 return (error);
1698 }
1699
1700 static int
tmpfs_vptofh(struct vop_vptofh_args * ap)1701 tmpfs_vptofh(struct vop_vptofh_args *ap)
1702 /*
1703 vop_vptofh {
1704 IN struct vnode *a_vp;
1705 IN struct fid *a_fhp;
1706 };
1707 */
1708 {
1709 struct tmpfs_fid_data tfd;
1710 struct tmpfs_node *node;
1711 struct fid *fhp;
1712 _Static_assert(sizeof(struct tmpfs_fid_data) <= sizeof(struct fid),
1713 "struct tmpfs_fid_data cannot be larger than struct fid");
1714
1715 node = VP_TO_TMPFS_NODE(ap->a_vp);
1716 fhp = ap->a_fhp;
1717 fhp->fid_len = sizeof(tfd);
1718
1719 /*
1720 * Copy into fid_data from the stack to avoid unaligned pointer use.
1721 * See the comment in sys/mount.h on struct fid for details.
1722 */
1723 tfd.tfd_id = node->tn_id;
1724 tfd.tfd_gen = node->tn_gen;
1725 memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1726
1727 return (0);
1728 }
1729
1730 static int
tmpfs_whiteout(struct vop_whiteout_args * ap)1731 tmpfs_whiteout(struct vop_whiteout_args *ap)
1732 {
1733 struct vnode *dvp = ap->a_dvp;
1734 struct componentname *cnp = ap->a_cnp;
1735 struct tmpfs_dirent *de;
1736
1737 switch (ap->a_flags) {
1738 case LOOKUP:
1739 return (0);
1740 case CREATE:
1741 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1742 if (de != NULL)
1743 return (de->td_node == NULL ? 0 : EEXIST);
1744 return (tmpfs_dir_whiteout_add(dvp, cnp));
1745 case DELETE:
1746 tmpfs_dir_whiteout_remove(dvp, cnp);
1747 return (0);
1748 default:
1749 panic("tmpfs_whiteout: unknown op");
1750 }
1751 }
1752
1753 static int
tmpfs_vptocnp_dir(struct tmpfs_node * tn,struct tmpfs_node * tnp,struct tmpfs_dirent ** pde)1754 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1755 struct tmpfs_dirent **pde)
1756 {
1757 struct tmpfs_dir_cursor dc;
1758 struct tmpfs_dirent *de;
1759
1760 for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1761 de = tmpfs_dir_next(tnp, &dc)) {
1762 if (de->td_node == tn) {
1763 *pde = de;
1764 return (0);
1765 }
1766 }
1767 return (ENOENT);
1768 }
1769
1770 static int
tmpfs_vptocnp_fill(struct vnode * vp,struct tmpfs_node * tn,struct tmpfs_node * tnp,char * buf,size_t * buflen,struct vnode ** dvp)1771 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1772 struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1773 {
1774 struct tmpfs_dirent *de;
1775 int error, i;
1776
1777 error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1778 dvp);
1779 if (error != 0)
1780 return (error);
1781 error = tmpfs_vptocnp_dir(tn, tnp, &de);
1782 if (error == 0) {
1783 i = *buflen;
1784 i -= de->td_namelen;
1785 if (i < 0) {
1786 error = ENOMEM;
1787 } else {
1788 bcopy(de->ud.td_name, buf + i, de->td_namelen);
1789 *buflen = i;
1790 }
1791 }
1792 if (error == 0) {
1793 if (vp != *dvp)
1794 VOP_UNLOCK(*dvp);
1795 } else {
1796 if (vp != *dvp)
1797 vput(*dvp);
1798 else
1799 vrele(vp);
1800 }
1801 return (error);
1802 }
1803
1804 static int
tmpfs_vptocnp(struct vop_vptocnp_args * ap)1805 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1806 {
1807 struct vnode *vp, **dvp;
1808 struct tmpfs_node *tn, *tnp, *tnp1;
1809 struct tmpfs_dirent *de;
1810 struct tmpfs_mount *tm;
1811 char *buf;
1812 size_t *buflen;
1813 int error;
1814
1815 vp = ap->a_vp;
1816 dvp = ap->a_vpp;
1817 buf = ap->a_buf;
1818 buflen = ap->a_buflen;
1819
1820 tm = VFS_TO_TMPFS(vp->v_mount);
1821 tn = VP_TO_TMPFS_NODE(vp);
1822 if (tn->tn_type == VDIR) {
1823 tnp = tn->tn_dir.tn_parent;
1824 if (tnp == NULL)
1825 return (ENOENT);
1826 tmpfs_ref_node(tnp);
1827 error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1828 buflen, dvp);
1829 tmpfs_free_node(tm, tnp);
1830 return (error);
1831 }
1832 restart:
1833 TMPFS_LOCK(tm);
1834 restart_locked:
1835 LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1836 if (tnp->tn_type != VDIR)
1837 continue;
1838 TMPFS_NODE_LOCK(tnp);
1839 tmpfs_ref_node(tnp);
1840
1841 /*
1842 * tn_vnode cannot be instantiated while we hold the
1843 * node lock, so the directory cannot be changed while
1844 * we iterate over it. Do this to avoid instantiating
1845 * vnode for directories which cannot point to our
1846 * node.
1847 */
1848 error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1849 &de) : 0;
1850
1851 if (error == 0) {
1852 TMPFS_NODE_UNLOCK(tnp);
1853 TMPFS_UNLOCK(tm);
1854 error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1855 dvp);
1856 if (error == 0) {
1857 tmpfs_free_node(tm, tnp);
1858 return (0);
1859 }
1860 if (VN_IS_DOOMED(vp)) {
1861 tmpfs_free_node(tm, tnp);
1862 return (ENOENT);
1863 }
1864 TMPFS_LOCK(tm);
1865 TMPFS_NODE_LOCK(tnp);
1866 }
1867 if (tmpfs_free_node_locked(tm, tnp, false)) {
1868 goto restart;
1869 } else {
1870 KASSERT(tnp->tn_refcount > 0,
1871 ("node %p refcount zero", tnp));
1872 if (tnp->tn_attached) {
1873 tnp1 = LIST_NEXT(tnp, tn_entries);
1874 TMPFS_NODE_UNLOCK(tnp);
1875 } else {
1876 TMPFS_NODE_UNLOCK(tnp);
1877 goto restart_locked;
1878 }
1879 }
1880 }
1881 TMPFS_UNLOCK(tm);
1882 return (ENOENT);
1883 }
1884
1885 void
tmpfs_extattr_free(struct tmpfs_extattr * ea)1886 tmpfs_extattr_free(struct tmpfs_extattr *ea)
1887 {
1888 free(ea->ea_name, M_TMPFSEA);
1889 free(ea->ea_value, M_TMPFSEA);
1890 free(ea, M_TMPFSEA);
1891 }
1892
1893 static bool
tmpfs_extattr_update_mem(struct tmpfs_mount * tmp,ssize_t size)1894 tmpfs_extattr_update_mem(struct tmpfs_mount *tmp, ssize_t size)
1895 {
1896 TMPFS_LOCK(tmp);
1897 if (size > 0 &&
1898 !tmpfs_pages_check_avail(tmp, howmany(size, PAGE_SIZE))) {
1899 TMPFS_UNLOCK(tmp);
1900 return (false);
1901 }
1902 if (tmp->tm_ea_memory_inuse + size > tmp->tm_ea_memory_max) {
1903 TMPFS_UNLOCK(tmp);
1904 return (false);
1905 }
1906 tmp->tm_ea_memory_inuse += size;
1907 TMPFS_UNLOCK(tmp);
1908 return (true);
1909 }
1910
1911 static int
tmpfs_deleteextattr(struct vop_deleteextattr_args * ap)1912 tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
1913 {
1914 struct vnode *vp = ap->a_vp;
1915 struct tmpfs_mount *tmp;
1916 struct tmpfs_node *node;
1917 struct tmpfs_extattr *ea;
1918 size_t namelen;
1919 ssize_t diff;
1920 int error;
1921
1922 node = VP_TO_TMPFS_NODE(vp);
1923 tmp = VFS_TO_TMPFS(vp->v_mount);
1924 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1925 return (EOPNOTSUPP);
1926 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1927 ap->a_cred, ap->a_td, VWRITE);
1928 if (error != 0)
1929 return (error);
1930 if (ap->a_name == NULL || ap->a_name[0] == '\0')
1931 return (EINVAL);
1932 namelen = strlen(ap->a_name);
1933 if (namelen > EXTATTR_MAXNAMELEN)
1934 return (EINVAL);
1935
1936 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1937 if (ea->ea_namespace == ap->a_attrnamespace &&
1938 namelen == ea->ea_namelen &&
1939 memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1940 break;
1941 }
1942
1943 if (ea == NULL)
1944 return (ENOATTR);
1945 LIST_REMOVE(ea, ea_extattrs);
1946 diff = -(sizeof(struct tmpfs_extattr) + namelen + ea->ea_size);
1947 tmpfs_extattr_update_mem(tmp, diff);
1948 tmpfs_extattr_free(ea);
1949 return (0);
1950 }
1951
1952 static int
tmpfs_getextattr(struct vop_getextattr_args * ap)1953 tmpfs_getextattr(struct vop_getextattr_args *ap)
1954 {
1955 struct vnode *vp = ap->a_vp;
1956 struct tmpfs_node *node;
1957 struct tmpfs_extattr *ea;
1958 size_t namelen;
1959 int error;
1960
1961 node = VP_TO_TMPFS_NODE(vp);
1962 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1963 return (EOPNOTSUPP);
1964 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1965 ap->a_cred, ap->a_td, VREAD);
1966 if (error != 0)
1967 return (error);
1968 if (ap->a_name == NULL || ap->a_name[0] == '\0')
1969 return (EINVAL);
1970 namelen = strlen(ap->a_name);
1971 if (namelen > EXTATTR_MAXNAMELEN)
1972 return (EINVAL);
1973
1974 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1975 if (ea->ea_namespace == ap->a_attrnamespace &&
1976 namelen == ea->ea_namelen &&
1977 memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1978 break;
1979 }
1980
1981 if (ea == NULL)
1982 return (ENOATTR);
1983 if (ap->a_size != NULL)
1984 *ap->a_size = ea->ea_size;
1985 if (ap->a_uio != NULL && ea->ea_size != 0)
1986 error = uiomove(ea->ea_value, ea->ea_size, ap->a_uio);
1987 return (error);
1988 }
1989
1990 static int
tmpfs_listextattr(struct vop_listextattr_args * ap)1991 tmpfs_listextattr(struct vop_listextattr_args *ap)
1992 {
1993 struct vnode *vp = ap->a_vp;
1994 struct tmpfs_node *node;
1995 struct tmpfs_extattr *ea;
1996 int error;
1997
1998 node = VP_TO_TMPFS_NODE(vp);
1999 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2000 return (EOPNOTSUPP);
2001 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2002 ap->a_cred, ap->a_td, VREAD);
2003 if (error != 0)
2004 return (error);
2005 if (ap->a_size != NULL)
2006 *ap->a_size = 0;
2007
2008 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2009 if (ea->ea_namespace != ap->a_attrnamespace)
2010 continue;
2011 if (ap->a_size != NULL)
2012 *ap->a_size += ea->ea_namelen + 1;
2013 if (ap->a_uio != NULL) {
2014 error = uiomove(&ea->ea_namelen, 1, ap->a_uio);
2015 if (error != 0)
2016 break;
2017 error = uiomove(ea->ea_name, ea->ea_namelen, ap->a_uio);
2018 if (error != 0)
2019 break;
2020 }
2021 }
2022
2023 return (error);
2024 }
2025
2026 static int
tmpfs_setextattr(struct vop_setextattr_args * ap)2027 tmpfs_setextattr(struct vop_setextattr_args *ap)
2028 {
2029 struct vnode *vp = ap->a_vp;
2030 struct tmpfs_mount *tmp;
2031 struct tmpfs_node *node;
2032 struct tmpfs_extattr *ea;
2033 struct tmpfs_extattr *new_ea;
2034 size_t attr_size;
2035 size_t namelen;
2036 ssize_t diff;
2037 int error;
2038
2039 node = VP_TO_TMPFS_NODE(vp);
2040 tmp = VFS_TO_TMPFS(vp->v_mount);
2041 attr_size = ap->a_uio->uio_resid;
2042 diff = 0;
2043 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2044 return (EOPNOTSUPP);
2045 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2046 ap->a_cred, ap->a_td, VWRITE);
2047 if (error != 0)
2048 return (error);
2049 if (ap->a_name == NULL || ap->a_name[0] == '\0')
2050 return (EINVAL);
2051 namelen = strlen(ap->a_name);
2052 if (namelen > EXTATTR_MAXNAMELEN)
2053 return (EINVAL);
2054
2055 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2056 if (ea->ea_namespace == ap->a_attrnamespace &&
2057 namelen == ea->ea_namelen &&
2058 memcmp(ap->a_name, ea->ea_name, namelen) == 0) {
2059 diff -= sizeof(struct tmpfs_extattr) + ea->ea_namelen +
2060 ea->ea_size;
2061 break;
2062 }
2063 }
2064
2065 diff += sizeof(struct tmpfs_extattr) + namelen + attr_size;
2066 if (!tmpfs_extattr_update_mem(tmp, diff))
2067 return (ENOSPC);
2068 new_ea = malloc(sizeof(struct tmpfs_extattr), M_TMPFSEA, M_WAITOK);
2069 new_ea->ea_namespace = ap->a_attrnamespace;
2070 new_ea->ea_name = malloc(namelen, M_TMPFSEA, M_WAITOK);
2071 new_ea->ea_namelen = namelen;
2072 memcpy(new_ea->ea_name, ap->a_name, namelen);
2073 if (attr_size != 0) {
2074 new_ea->ea_value = malloc(attr_size, M_TMPFSEA, M_WAITOK);
2075 new_ea->ea_size = attr_size;
2076 error = uiomove(new_ea->ea_value, attr_size, ap->a_uio);
2077 } else {
2078 new_ea->ea_value = NULL;
2079 new_ea->ea_size = 0;
2080 }
2081 if (error != 0) {
2082 tmpfs_extattr_update_mem(tmp, -diff);
2083 tmpfs_extattr_free(new_ea);
2084 return (error);
2085 }
2086 if (ea != NULL) {
2087 LIST_REMOVE(ea, ea_extattrs);
2088 tmpfs_extattr_free(ea);
2089 }
2090 LIST_INSERT_HEAD(&node->tn_extattrs, new_ea, ea_extattrs);
2091 return (0);
2092 }
2093
2094 static off_t
tmpfs_seek_data_locked(vm_object_t obj,off_t noff)2095 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
2096 {
2097 vm_pindex_t p;
2098
2099 p = swap_pager_seek_data(obj, OFF_TO_IDX(noff));
2100 return (p == OFF_TO_IDX(noff) ? noff : IDX_TO_OFF(p));
2101 }
2102
2103 static int
tmpfs_seek_clamp(struct tmpfs_node * tn,off_t * noff,bool seekdata)2104 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
2105 {
2106 if (*noff < tn->tn_size)
2107 return (0);
2108 if (seekdata)
2109 return (ENXIO);
2110 *noff = tn->tn_size;
2111 return (0);
2112 }
2113
2114 static off_t
tmpfs_seek_hole_locked(vm_object_t obj,off_t noff)2115 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
2116 {
2117
2118 return (IDX_TO_OFF(swap_pager_seek_hole(obj, OFF_TO_IDX(noff))));
2119 }
2120
2121 static int
tmpfs_seek_datahole(struct vnode * vp,off_t * off,bool seekdata)2122 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
2123 {
2124 struct tmpfs_node *tn;
2125 vm_object_t obj;
2126 off_t noff;
2127 int error;
2128
2129 if (vp->v_type != VREG)
2130 return (ENOTTY);
2131 tn = VP_TO_TMPFS_NODE(vp);
2132 noff = *off;
2133 if (noff < 0)
2134 return (ENXIO);
2135 error = tmpfs_seek_clamp(tn, &noff, seekdata);
2136 if (error != 0)
2137 return (error);
2138 obj = tn->tn_reg.tn_aobj;
2139
2140 VM_OBJECT_RLOCK(obj);
2141 noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
2142 tmpfs_seek_hole_locked(obj, noff);
2143 VM_OBJECT_RUNLOCK(obj);
2144
2145 error = tmpfs_seek_clamp(tn, &noff, seekdata);
2146 if (error == 0)
2147 *off = noff;
2148 return (error);
2149 }
2150
2151 static int
tmpfs_ioctl(struct vop_ioctl_args * ap)2152 tmpfs_ioctl(struct vop_ioctl_args *ap)
2153 {
2154 struct vnode *vp = ap->a_vp;
2155 int error = 0;
2156
2157 switch (ap->a_command) {
2158 case FIOSEEKDATA:
2159 case FIOSEEKHOLE:
2160 error = vn_lock(vp, LK_SHARED);
2161 if (error != 0) {
2162 error = EBADF;
2163 break;
2164 }
2165 error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
2166 ap->a_command == FIOSEEKDATA);
2167 VOP_UNLOCK(vp);
2168 break;
2169 default:
2170 error = ENOTTY;
2171 break;
2172 }
2173 return (error);
2174 }
2175
2176 /*
2177 * Vnode operations vector used for files stored in a tmpfs file system.
2178 */
2179 struct vop_vector tmpfs_vnodeop_entries = {
2180 .vop_default = &default_vnodeops,
2181 .vop_lookup = vfs_cache_lookup,
2182 .vop_cachedlookup = tmpfs_cached_lookup,
2183 .vop_create = tmpfs_create,
2184 .vop_mknod = tmpfs_mknod,
2185 .vop_open = tmpfs_open,
2186 .vop_close = tmpfs_close,
2187 .vop_fplookup_vexec = tmpfs_fplookup_vexec,
2188 .vop_fplookup_symlink = tmpfs_fplookup_symlink,
2189 .vop_access = tmpfs_access,
2190 .vop_stat = tmpfs_stat,
2191 .vop_getattr = tmpfs_getattr,
2192 .vop_setattr = tmpfs_setattr,
2193 .vop_read = tmpfs_read,
2194 .vop_read_pgcache = tmpfs_read_pgcache,
2195 .vop_write = tmpfs_write,
2196 .vop_deallocate = tmpfs_deallocate,
2197 .vop_fsync = tmpfs_fsync,
2198 .vop_remove = tmpfs_remove,
2199 .vop_link = tmpfs_link,
2200 .vop_rename = tmpfs_rename,
2201 .vop_mkdir = tmpfs_mkdir,
2202 .vop_rmdir = tmpfs_rmdir,
2203 .vop_symlink = tmpfs_symlink,
2204 .vop_readdir = tmpfs_readdir,
2205 .vop_readlink = tmpfs_readlink,
2206 .vop_inactive = tmpfs_inactive,
2207 .vop_need_inactive = tmpfs_need_inactive,
2208 .vop_reclaim = tmpfs_reclaim,
2209 .vop_print = tmpfs_print,
2210 .vop_pathconf = tmpfs_pathconf,
2211 .vop_vptofh = tmpfs_vptofh,
2212 .vop_whiteout = tmpfs_whiteout,
2213 .vop_bmap = VOP_EOPNOTSUPP,
2214 .vop_vptocnp = tmpfs_vptocnp,
2215 .vop_lock1 = vop_lock,
2216 .vop_unlock = vop_unlock,
2217 .vop_islocked = vop_islocked,
2218 .vop_deleteextattr = tmpfs_deleteextattr,
2219 .vop_getextattr = tmpfs_getextattr,
2220 .vop_listextattr = tmpfs_listextattr,
2221 .vop_setextattr = tmpfs_setextattr,
2222 .vop_add_writecount = vop_stdadd_writecount_nomsync,
2223 .vop_ioctl = tmpfs_ioctl,
2224 };
2225 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2226
2227 /*
2228 * Same vector for mounts which do not use namecache.
2229 */
2230 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2231 .vop_default = &tmpfs_vnodeop_entries,
2232 .vop_lookup = tmpfs_lookup,
2233 };
2234 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);
2235