1 /* 2 * Copyright (c) 2007-2009 Google Inc. and Amit Singh 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: 8 * 9 * * Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * * Redistributions in binary form must reproduce the above 12 * copyright notice, this list of conditions and the following disclaimer 13 * in the documentation and/or other materials provided with the 14 * distribution. 15 * * Neither the name of Google Inc. nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (C) 2005 Csaba Henk. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 */ 55 56 #include <sys/cdefs.h> 57 __FBSDID("$FreeBSD$"); 58 59 #include <sys/types.h> 60 #include <sys/module.h> 61 #include <sys/systm.h> 62 #include <sys/errno.h> 63 #include <sys/param.h> 64 #include <sys/kernel.h> 65 #include <sys/conf.h> 66 #include <sys/uio.h> 67 #include <sys/malloc.h> 68 #include <sys/queue.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/sx.h> 72 #include <sys/proc.h> 73 #include <sys/mount.h> 74 #include <sys/vnode.h> 75 #include <sys/namei.h> 76 #include <sys/stat.h> 77 #include <sys/unistd.h> 78 #include <sys/filedesc.h> 79 #include <sys/file.h> 80 #include <sys/fcntl.h> 81 #include <sys/dirent.h> 82 #include <sys/bio.h> 83 #include <sys/buf.h> 84 #include <sys/sysctl.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_extern.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_param.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vnode_pager.h> 95 #include <vm/vm_object.h> 96 97 #include "fuse.h" 98 #include "fuse_file.h" 99 #include "fuse_internal.h" 100 #include "fuse_ipc.h" 101 #include "fuse_node.h" 102 #include "fuse_param.h" 103 #include "fuse_io.h" 104 105 #include <sys/priv.h> 106 107 #define FUSE_DEBUG_MODULE VNOPS 108 #include "fuse_debug.h" 109 110 /* vnode ops */ 111 static vop_access_t fuse_vnop_access; 112 static vop_close_t fuse_vnop_close; 113 static vop_create_t fuse_vnop_create; 114 static vop_fsync_t fuse_vnop_fsync; 115 static vop_getattr_t fuse_vnop_getattr; 116 static vop_inactive_t fuse_vnop_inactive; 117 static vop_link_t fuse_vnop_link; 118 static vop_lookup_t fuse_vnop_lookup; 119 static vop_mkdir_t fuse_vnop_mkdir; 120 static vop_mknod_t fuse_vnop_mknod; 121 static vop_open_t fuse_vnop_open; 122 static vop_read_t fuse_vnop_read; 123 static vop_readdir_t fuse_vnop_readdir; 124 static vop_readlink_t fuse_vnop_readlink; 125 static vop_reclaim_t fuse_vnop_reclaim; 126 static vop_remove_t fuse_vnop_remove; 127 static vop_rename_t fuse_vnop_rename; 128 static vop_rmdir_t fuse_vnop_rmdir; 129 static vop_setattr_t fuse_vnop_setattr; 130 static vop_strategy_t fuse_vnop_strategy; 131 static vop_symlink_t fuse_vnop_symlink; 132 static vop_write_t fuse_vnop_write; 133 static vop_getpages_t fuse_vnop_getpages; 134 static vop_putpages_t fuse_vnop_putpages; 135 static vop_print_t fuse_vnop_print; 136 137 struct vop_vector fuse_vnops = { 138 .vop_default = &default_vnodeops, 139 .vop_access = fuse_vnop_access, 140 .vop_close = fuse_vnop_close, 141 .vop_create = fuse_vnop_create, 142 .vop_fsync = fuse_vnop_fsync, 143 .vop_getattr = fuse_vnop_getattr, 144 .vop_inactive = fuse_vnop_inactive, 145 .vop_link = fuse_vnop_link, 146 .vop_lookup = fuse_vnop_lookup, 147 .vop_mkdir = fuse_vnop_mkdir, 148 .vop_mknod = fuse_vnop_mknod, 149 .vop_open = fuse_vnop_open, 150 .vop_pathconf = vop_stdpathconf, 151 .vop_read = fuse_vnop_read, 152 .vop_readdir = fuse_vnop_readdir, 153 .vop_readlink = fuse_vnop_readlink, 154 .vop_reclaim = fuse_vnop_reclaim, 155 .vop_remove = fuse_vnop_remove, 156 .vop_rename = fuse_vnop_rename, 157 .vop_rmdir = fuse_vnop_rmdir, 158 .vop_setattr = fuse_vnop_setattr, 159 .vop_strategy = fuse_vnop_strategy, 160 .vop_symlink = fuse_vnop_symlink, 161 .vop_write = fuse_vnop_write, 162 .vop_getpages = fuse_vnop_getpages, 163 .vop_putpages = fuse_vnop_putpages, 164 .vop_print = fuse_vnop_print, 165 }; 166 167 static u_long fuse_lookup_cache_hits = 0; 168 169 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, 170 &fuse_lookup_cache_hits, 0, ""); 171 172 static u_long fuse_lookup_cache_misses = 0; 173 174 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, 175 &fuse_lookup_cache_misses, 0, ""); 176 177 int fuse_lookup_cache_enable = 1; 178 179 SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW, 180 &fuse_lookup_cache_enable, 0, ""); 181 182 /* 183 * XXX: This feature is highly experimental and can bring to instabilities, 184 * needs revisiting before to be enabled by default. 185 */ 186 static int fuse_reclaim_revoked = 0; 187 188 SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW, 189 &fuse_reclaim_revoked, 0, ""); 190 191 int fuse_pbuf_freecnt = -1; 192 193 #define fuse_vm_page_lock(m) vm_page_lock((m)); 194 #define fuse_vm_page_unlock(m) vm_page_unlock((m)); 195 #define fuse_vm_page_lock_queues() ((void)0) 196 #define fuse_vm_page_unlock_queues() ((void)0) 197 198 /* 199 struct vnop_access_args { 200 struct vnode *a_vp; 201 #if VOP_ACCESS_TAKES_ACCMODE_T 202 accmode_t a_accmode; 203 #else 204 int a_mode; 205 #endif 206 struct ucred *a_cred; 207 struct thread *a_td; 208 }; 209 */ 210 static int 211 fuse_vnop_access(struct vop_access_args *ap) 212 { 213 struct vnode *vp = ap->a_vp; 214 int accmode = ap->a_accmode; 215 struct ucred *cred = ap->a_cred; 216 217 struct fuse_access_param facp; 218 struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); 219 220 int err; 221 222 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 223 224 if (fuse_isdeadfs(vp)) { 225 if (vnode_isvroot(vp)) { 226 return 0; 227 } 228 return ENXIO; 229 } 230 if (!(data->dataflags & FSESS_INITED)) { 231 if (vnode_isvroot(vp)) { 232 if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) || 233 (fuse_match_cred(data->daemoncred, cred) == 0)) { 234 return 0; 235 } 236 } 237 return EBADF; 238 } 239 if (vnode_islnk(vp)) { 240 return 0; 241 } 242 bzero(&facp, sizeof(facp)); 243 244 err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred); 245 FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode); 246 return err; 247 } 248 249 /* 250 struct vnop_close_args { 251 struct vnode *a_vp; 252 int a_fflag; 253 struct ucred *a_cred; 254 struct thread *a_td; 255 }; 256 */ 257 static int 258 fuse_vnop_close(struct vop_close_args *ap) 259 { 260 struct vnode *vp = ap->a_vp; 261 struct ucred *cred = ap->a_cred; 262 int fflag = ap->a_fflag; 263 fufh_type_t fufh_type; 264 265 fuse_trace_printf_vnop(); 266 267 if (fuse_isdeadfs(vp)) { 268 return 0; 269 } 270 if (vnode_isdir(vp)) { 271 if (fuse_filehandle_valid(vp, FUFH_RDONLY)) { 272 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred); 273 } 274 return 0; 275 } 276 if (fflag & IO_NDELAY) { 277 return 0; 278 } 279 fufh_type = fuse_filehandle_xlate_from_fflags(fflag); 280 281 if (!fuse_filehandle_valid(vp, fufh_type)) { 282 int i; 283 284 for (i = 0; i < FUFH_MAXTYPE; i++) 285 if (fuse_filehandle_valid(vp, i)) 286 break; 287 if (i == FUFH_MAXTYPE) 288 panic("FUSE: fufh type %d found to be invalid in close" 289 " (fflag=0x%x)\n", 290 fufh_type, fflag); 291 } 292 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { 293 fuse_vnode_savesize(vp, cred); 294 } 295 return 0; 296 } 297 298 /* 299 struct vnop_create_args { 300 struct vnode *a_dvp; 301 struct vnode **a_vpp; 302 struct componentname *a_cnp; 303 struct vattr *a_vap; 304 }; 305 */ 306 static int 307 fuse_vnop_create(struct vop_create_args *ap) 308 { 309 struct vnode *dvp = ap->a_dvp; 310 struct vnode **vpp = ap->a_vpp; 311 struct componentname *cnp = ap->a_cnp; 312 struct vattr *vap = ap->a_vap; 313 struct thread *td = cnp->cn_thread; 314 struct ucred *cred = cnp->cn_cred; 315 316 struct fuse_open_in *foi; 317 struct fuse_entry_out *feo; 318 struct fuse_dispatcher fdi; 319 struct fuse_dispatcher *fdip = &fdi; 320 321 int err; 322 323 struct mount *mp = vnode_mount(dvp); 324 uint64_t parentnid = VTOFUD(dvp)->nid; 325 mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode); 326 uint64_t x_fh_id; 327 uint32_t x_open_flags; 328 329 fuse_trace_printf_vnop(); 330 331 if (fuse_isdeadfs(dvp)) { 332 return ENXIO; 333 } 334 bzero(&fdi, sizeof(fdi)); 335 336 /* XXX: Will we ever want devices ? */ 337 if ((vap->va_type != VREG)) { 338 MPASS(vap->va_type != VFIFO); 339 goto bringup; 340 } 341 debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid, 342 mode); 343 344 fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1); 345 if (!fsess_isimpl(mp, FUSE_CREATE)) { 346 debug_printf("eh, daemon doesn't implement create?\n"); 347 return (EINVAL); 348 } 349 fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred); 350 351 foi = fdip->indata; 352 foi->mode = mode; 353 foi->flags = O_CREAT | O_RDWR; 354 355 memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr, 356 cnp->cn_namelen); 357 ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0'; 358 359 err = fdisp_wait_answ(fdip); 360 361 if (err == ENOSYS) { 362 debug_printf("create: got ENOSYS from daemon\n"); 363 fsess_set_notimpl(mp, FUSE_CREATE); 364 fdisp_destroy(fdip); 365 } else if (err) { 366 debug_printf("create: darn, got err=%d from daemon\n", err); 367 goto out; 368 } 369 bringup: 370 feo = fdip->answ; 371 372 if ((err = fuse_internal_checkentry(feo, VREG))) { 373 goto out; 374 } 375 err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG); 376 if (err) { 377 struct fuse_release_in *fri; 378 uint64_t nodeid = feo->nodeid; 379 uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh; 380 381 fdisp_init(fdip, sizeof(*fri)); 382 fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred); 383 fri = fdip->indata; 384 fri->fh = fh_id; 385 fri->flags = OFLAGS(mode); 386 fuse_insert_callback(fdip->tick, fuse_internal_forget_callback); 387 fuse_insert_message(fdip->tick); 388 return err; 389 } 390 ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create"); 391 392 fdip->answ = feo + 1; 393 394 x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh; 395 x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags; 396 fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id); 397 fuse_vnode_open(*vpp, x_open_flags, td); 398 cache_purge_negative(dvp); 399 400 out: 401 fdisp_destroy(fdip); 402 return err; 403 } 404 405 /* 406 * Our vnop_fsync roughly corresponds to the FUSE_FSYNC method. The Linux 407 * version of FUSE also has a FUSE_FLUSH method. 408 * 409 * On Linux, fsync() synchronizes a file's complete in-core state with that 410 * on disk. The call is not supposed to return until the system has completed 411 * that action or until an error is detected. 412 * 413 * Linux also has an fdatasync() call that is similar to fsync() but is not 414 * required to update the metadata such as access time and modification time. 415 */ 416 417 /* 418 struct vnop_fsync_args { 419 struct vnodeop_desc *a_desc; 420 struct vnode * a_vp; 421 struct ucred * a_cred; 422 int a_waitfor; 423 struct thread * a_td; 424 }; 425 */ 426 static int 427 fuse_vnop_fsync(struct vop_fsync_args *ap) 428 { 429 struct vnode *vp = ap->a_vp; 430 struct thread *td = ap->a_td; 431 432 struct fuse_filehandle *fufh; 433 struct fuse_vnode_data *fvdat = VTOFUD(vp); 434 435 int type, err = 0; 436 437 fuse_trace_printf_vnop(); 438 439 if (fuse_isdeadfs(vp)) { 440 return 0; 441 } 442 if ((err = vop_stdfsync(ap))) 443 return err; 444 445 if (!fsess_isimpl(vnode_mount(vp), 446 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { 447 goto out; 448 } 449 for (type = 0; type < FUFH_MAXTYPE; type++) { 450 fufh = &(fvdat->fufh[type]); 451 if (FUFH_IS_VALID(fufh)) { 452 fuse_internal_fsync(vp, td, NULL, fufh); 453 } 454 } 455 456 out: 457 return 0; 458 } 459 460 /* 461 struct vnop_getattr_args { 462 struct vnode *a_vp; 463 struct vattr *a_vap; 464 struct ucred *a_cred; 465 struct thread *a_td; 466 }; 467 */ 468 static int 469 fuse_vnop_getattr(struct vop_getattr_args *ap) 470 { 471 struct vnode *vp = ap->a_vp; 472 struct vattr *vap = ap->a_vap; 473 struct ucred *cred = ap->a_cred; 474 struct thread *td = curthread; 475 struct fuse_vnode_data *fvdat = VTOFUD(vp); 476 477 int err = 0; 478 int dataflags; 479 struct fuse_dispatcher fdi; 480 481 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 482 483 dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; 484 485 /* Note that we are not bailing out on a dead file system just yet. */ 486 487 /* look for cached attributes */ 488 if (fuse_isvalid_attr(vp)) { 489 if (vap != VTOVA(vp)) { 490 memcpy(vap, VTOVA(vp), sizeof(*vap)); 491 } 492 if ((fvdat->flag & FN_SIZECHANGE) != 0) { 493 vap->va_size = fvdat->filesize; 494 } 495 debug_printf("return cached: inode=%ju\n", (uintmax_t)VTOI(vp)); 496 return 0; 497 } 498 if (!(dataflags & FSESS_INITED)) { 499 if (!vnode_isvroot(vp)) { 500 fdata_set_dead(fuse_get_mpdata(vnode_mount(vp))); 501 err = ENOTCONN; 502 debug_printf("fuse_getattr b: returning ENOTCONN\n"); 503 return err; 504 } else { 505 goto fake; 506 } 507 } 508 fdisp_init(&fdi, 0); 509 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) { 510 if ((err == ENOTCONN) && vnode_isvroot(vp)) { 511 /* see comment at similar place in fuse_statfs() */ 512 fdisp_destroy(&fdi); 513 goto fake; 514 } 515 if (err == ENOENT) { 516 fuse_internal_vnode_disappear(vp); 517 } 518 goto out; 519 } 520 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); 521 if (vap != VTOVA(vp)) { 522 memcpy(vap, VTOVA(vp), sizeof(*vap)); 523 } 524 if ((fvdat->flag & FN_SIZECHANGE) != 0) 525 vap->va_size = fvdat->filesize; 526 527 if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) { 528 /* 529 * This is for those cases when the file size changed without us 530 * knowing, and we want to catch up. 531 */ 532 off_t new_filesize = ((struct fuse_attr_out *) 533 fdi.answ)->attr.size; 534 535 if (fvdat->filesize != new_filesize) { 536 fuse_vnode_setsize(vp, cred, new_filesize); 537 } 538 } 539 KASSERT(vnode_vtype(vp) == vap->va_type, ("stale vnode")); 540 debug_printf("fuse_getattr e: returning 0\n"); 541 542 out: 543 fdisp_destroy(&fdi); 544 return err; 545 546 fake: 547 bzero(vap, sizeof(*vap)); 548 vap->va_type = vnode_vtype(vp); 549 550 return 0; 551 } 552 553 /* 554 struct vnop_inactive_args { 555 struct vnode *a_vp; 556 struct thread *a_td; 557 }; 558 */ 559 static int 560 fuse_vnop_inactive(struct vop_inactive_args *ap) 561 { 562 struct vnode *vp = ap->a_vp; 563 struct thread *td = ap->a_td; 564 565 struct fuse_vnode_data *fvdat = VTOFUD(vp); 566 struct fuse_filehandle *fufh = NULL; 567 568 int type, need_flush = 1; 569 570 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp)); 571 572 for (type = 0; type < FUFH_MAXTYPE; type++) { 573 fufh = &(fvdat->fufh[type]); 574 if (FUFH_IS_VALID(fufh)) { 575 if (need_flush && vp->v_type == VREG) { 576 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { 577 fuse_vnode_savesize(vp, NULL); 578 } 579 if (fuse_data_cache_invalidate || 580 (fvdat->flag & FN_REVOKED) != 0) 581 fuse_io_invalbuf(vp, td); 582 else 583 fuse_io_flushbuf(vp, MNT_WAIT, td); 584 need_flush = 0; 585 } 586 fuse_filehandle_close(vp, type, td, NULL); 587 } 588 } 589 590 if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) { 591 vrecycle(vp); 592 } 593 return 0; 594 } 595 596 /* 597 struct vnop_link_args { 598 struct vnode *a_tdvp; 599 struct vnode *a_vp; 600 struct componentname *a_cnp; 601 }; 602 */ 603 static int 604 fuse_vnop_link(struct vop_link_args *ap) 605 { 606 struct vnode *vp = ap->a_vp; 607 struct vnode *tdvp = ap->a_tdvp; 608 struct componentname *cnp = ap->a_cnp; 609 610 struct vattr *vap = VTOVA(vp); 611 612 struct fuse_dispatcher fdi; 613 struct fuse_entry_out *feo; 614 struct fuse_link_in fli; 615 616 int err; 617 618 fuse_trace_printf_vnop(); 619 620 if (fuse_isdeadfs(vp)) { 621 return ENXIO; 622 } 623 if (vnode_mount(tdvp) != vnode_mount(vp)) { 624 return EXDEV; 625 } 626 if (vap->va_nlink >= FUSE_LINK_MAX) { 627 return EMLINK; 628 } 629 fli.oldnodeid = VTOI(vp); 630 631 fdisp_init(&fdi, 0); 632 fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp, 633 FUSE_LINK, &fli, sizeof(fli), &fdi); 634 if ((err = fdisp_wait_answ(&fdi))) { 635 goto out; 636 } 637 feo = fdi.answ; 638 639 err = fuse_internal_checkentry(feo, vnode_vtype(vp)); 640 fuse_invalidate_attr(tdvp); 641 fuse_invalidate_attr(vp); 642 643 out: 644 fdisp_destroy(&fdi); 645 return err; 646 } 647 648 /* 649 struct vnop_lookup_args { 650 struct vnodeop_desc *a_desc; 651 struct vnode *a_dvp; 652 struct vnode **a_vpp; 653 struct componentname *a_cnp; 654 }; 655 */ 656 int 657 fuse_vnop_lookup(struct vop_lookup_args *ap) 658 { 659 struct vnode *dvp = ap->a_dvp; 660 struct vnode **vpp = ap->a_vpp; 661 struct componentname *cnp = ap->a_cnp; 662 struct thread *td = cnp->cn_thread; 663 struct ucred *cred = cnp->cn_cred; 664 665 int nameiop = cnp->cn_nameiop; 666 int flags = cnp->cn_flags; 667 int wantparent = flags & (LOCKPARENT | WANTPARENT); 668 int islastcn = flags & ISLASTCN; 669 struct mount *mp = vnode_mount(dvp); 670 671 int err = 0; 672 int lookup_err = 0; 673 struct vnode *vp = NULL; 674 675 struct fuse_dispatcher fdi; 676 enum fuse_opcode op; 677 678 uint64_t nid; 679 struct fuse_access_param facp; 680 681 FS_DEBUG2G("parent_inode=%ju - %*s\n", 682 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr); 683 684 if (fuse_isdeadfs(dvp)) { 685 *vpp = NULL; 686 return ENXIO; 687 } 688 if (!vnode_isdir(dvp)) { 689 return ENOTDIR; 690 } 691 if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) { 692 return EROFS; 693 } 694 /* 695 * We do access check prior to doing anything else only in the case 696 * when we are at fs root (we'd like to say, "we are at the first 697 * component", but that's not exactly the same... nevermind). 698 * See further comments at further access checks. 699 */ 700 701 bzero(&facp, sizeof(facp)); 702 if (vnode_isvroot(dvp)) { /* early permission check hack */ 703 if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) { 704 return err; 705 } 706 } 707 if (flags & ISDOTDOT) { 708 nid = VTOFUD(dvp)->parent_nid; 709 if (nid == 0) { 710 return ENOENT; 711 } 712 fdisp_init(&fdi, 0); 713 op = FUSE_GETATTR; 714 goto calldaemon; 715 } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') { 716 nid = VTOI(dvp); 717 fdisp_init(&fdi, 0); 718 op = FUSE_GETATTR; 719 goto calldaemon; 720 } else if (fuse_lookup_cache_enable) { 721 err = cache_lookup(dvp, vpp, cnp, NULL, NULL); 722 switch (err) { 723 724 case -1: /* positive match */ 725 atomic_add_acq_long(&fuse_lookup_cache_hits, 1); 726 return 0; 727 728 case 0: /* no match in cache */ 729 atomic_add_acq_long(&fuse_lookup_cache_misses, 1); 730 break; 731 732 case ENOENT: /* negative match */ 733 /* fall through */ 734 default: 735 return err; 736 } 737 } 738 nid = VTOI(dvp); 739 fdisp_init(&fdi, cnp->cn_namelen + 1); 740 op = FUSE_LOOKUP; 741 742 calldaemon: 743 fdisp_make(&fdi, op, mp, nid, td, cred); 744 745 if (op == FUSE_LOOKUP) { 746 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); 747 ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; 748 } 749 lookup_err = fdisp_wait_answ(&fdi); 750 751 if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */ 752 nid = ((struct fuse_entry_out *)fdi.answ)->nodeid; 753 if (!nid) { 754 /* 755 * zero nodeid is the same as "not found", 756 * but it's also cacheable (which we keep 757 * keep on doing not as of writing this) 758 */ 759 lookup_err = ENOENT; 760 } else if (nid == FUSE_ROOT_ID) { 761 lookup_err = EINVAL; 762 } 763 } 764 if (lookup_err && 765 (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) { 766 fdisp_destroy(&fdi); 767 return lookup_err; 768 } 769 /* lookup_err, if non-zero, must be ENOENT at this point */ 770 771 if (lookup_err) { 772 773 if ((nameiop == CREATE || nameiop == RENAME) && islastcn 774 /* && directory dvp has not been removed */ ) { 775 776 if (vfs_isrdonly(mp)) { 777 err = EROFS; 778 goto out; 779 } 780 #if 0 /* THINK_ABOUT_THIS */ 781 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { 782 goto out; 783 } 784 #endif 785 786 /* 787 * Possibly record the position of a slot in the 788 * directory large enough for the new component name. 789 * This can be recorded in the vnode private data for 790 * dvp. Set the SAVENAME flag to hold onto the 791 * pathname for use later in VOP_CREATE or VOP_RENAME. 792 */ 793 cnp->cn_flags |= SAVENAME; 794 795 err = EJUSTRETURN; 796 goto out; 797 } 798 /* Consider inserting name into cache. */ 799 800 /* 801 * No we can't use negative caching, as the fs 802 * changes are out of our control. 803 * False positives' falseness turns out just as things 804 * go by, but false negatives' falseness doesn't. 805 * (and aiding the caching mechanism with extra control 806 * mechanisms comes quite close to beating the whole purpose 807 * caching...) 808 */ 809 #if 0 810 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) { 811 FS_DEBUG("inserting NULL into cache\n"); 812 cache_enter(dvp, NULL, cnp); 813 } 814 #endif 815 err = ENOENT; 816 goto out; 817 818 } else { 819 820 /* !lookup_err */ 821 822 struct fuse_entry_out *feo = NULL; 823 struct fuse_attr *fattr = NULL; 824 825 if (op == FUSE_GETATTR) { 826 fattr = &((struct fuse_attr_out *)fdi.answ)->attr; 827 } else { 828 feo = (struct fuse_entry_out *)fdi.answ; 829 fattr = &(feo->attr); 830 } 831 832 /* 833 * If deleting, and at end of pathname, return parameters 834 * which can be used to remove file. If the wantparent flag 835 * isn't set, we return only the directory, otherwise we go on 836 * and lock the inode, being careful with ".". 837 */ 838 if (nameiop == DELETE && islastcn) { 839 /* 840 * Check for write access on directory. 841 */ 842 facp.xuid = fattr->uid; 843 facp.facc_flags |= FACCESS_STICKY; 844 err = fuse_internal_access(dvp, VWRITE, &facp, td, cred); 845 facp.facc_flags &= ~FACCESS_XQUERIES; 846 847 if (err) { 848 goto out; 849 } 850 if (nid == VTOI(dvp)) { 851 vref(dvp); 852 *vpp = dvp; 853 } else { 854 err = fuse_vnode_get(dvp->v_mount, nid, dvp, 855 &vp, cnp, IFTOVT(fattr->mode)); 856 if (err) 857 goto out; 858 *vpp = vp; 859 } 860 861 /* 862 * Save the name for use in VOP_RMDIR and VOP_REMOVE 863 * later. 864 */ 865 cnp->cn_flags |= SAVENAME; 866 goto out; 867 868 } 869 /* 870 * If rewriting (RENAME), return the inode and the 871 * information required to rewrite the present directory 872 * Must get inode of directory entry to verify it's a 873 * regular file, or empty directory. 874 */ 875 if (nameiop == RENAME && wantparent && islastcn) { 876 877 #if 0 /* THINK_ABOUT_THIS */ 878 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { 879 goto out; 880 } 881 #endif 882 883 /* 884 * Check for "." 885 */ 886 if (nid == VTOI(dvp)) { 887 err = EISDIR; 888 goto out; 889 } 890 err = fuse_vnode_get(vnode_mount(dvp), 891 nid, 892 dvp, 893 &vp, 894 cnp, 895 IFTOVT(fattr->mode)); 896 if (err) { 897 goto out; 898 } 899 *vpp = vp; 900 /* 901 * Save the name for use in VOP_RENAME later. 902 */ 903 cnp->cn_flags |= SAVENAME; 904 905 goto out; 906 } 907 if (flags & ISDOTDOT) { 908 struct mount *mp; 909 int ltype; 910 911 /* 912 * Expanded copy of vn_vget_ino() so that 913 * fuse_vnode_get() can be used. 914 */ 915 mp = dvp->v_mount; 916 ltype = VOP_ISLOCKED(dvp); 917 err = vfs_busy(mp, MBF_NOWAIT); 918 if (err != 0) { 919 vfs_ref(mp); 920 VOP_UNLOCK(dvp, 0); 921 err = vfs_busy(mp, 0); 922 vn_lock(dvp, ltype | LK_RETRY); 923 vfs_rel(mp); 924 if (err) 925 goto out; 926 if ((dvp->v_iflag & VI_DOOMED) != 0) { 927 err = ENOENT; 928 vfs_unbusy(mp); 929 goto out; 930 } 931 } 932 VOP_UNLOCK(dvp, 0); 933 err = fuse_vnode_get(vnode_mount(dvp), 934 nid, 935 NULL, 936 &vp, 937 cnp, 938 IFTOVT(fattr->mode)); 939 vfs_unbusy(mp); 940 vn_lock(dvp, ltype | LK_RETRY); 941 if ((dvp->v_iflag & VI_DOOMED) != 0) { 942 if (err == 0) 943 vput(vp); 944 err = ENOENT; 945 } 946 if (err) 947 goto out; 948 *vpp = vp; 949 } else if (nid == VTOI(dvp)) { 950 vref(dvp); 951 *vpp = dvp; 952 } else { 953 err = fuse_vnode_get(vnode_mount(dvp), 954 nid, 955 dvp, 956 &vp, 957 cnp, 958 IFTOVT(fattr->mode)); 959 if (err) { 960 goto out; 961 } 962 fuse_vnode_setparent(vp, dvp); 963 *vpp = vp; 964 } 965 966 if (op == FUSE_GETATTR) { 967 cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ); 968 } else { 969 cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ); 970 } 971 972 /* Insert name into cache if appropriate. */ 973 974 /* 975 * Nooo, caching is evil. With caching, we can't avoid stale 976 * information taking over the playground (cached info is not 977 * just positive/negative, it does have qualitative aspects, 978 * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when 979 * walking down along cached path components, and that's not 980 * any cheaper than FUSE_LOOKUP. This might change with 981 * implementing kernel side attr caching, but... In Linux, 982 * lookup results are not cached, and the daemon is bombarded 983 * with FUSE_LOOKUPS on and on. This shows that by design, the 984 * daemon is expected to handle frequent lookup queries 985 * efficiently, do its caching in userspace, and so on. 986 * 987 * So just leave the name cache alone. 988 */ 989 990 /* 991 * Well, now I know, Linux caches lookups, but with a 992 * timeout... So it's the same thing as attribute caching: 993 * we can deal with it when implement timeouts. 994 */ 995 #if 0 996 if (cnp->cn_flags & MAKEENTRY) { 997 cache_enter(dvp, *vpp, cnp); 998 } 999 #endif 1000 } 1001 out: 1002 if (!lookup_err) { 1003 1004 /* No lookup error; need to clean up. */ 1005 1006 if (err) { /* Found inode; exit with no vnode. */ 1007 if (op == FUSE_LOOKUP) { 1008 fuse_internal_forget_send(vnode_mount(dvp), td, cred, 1009 nid, 1); 1010 } 1011 fdisp_destroy(&fdi); 1012 return err; 1013 } else { 1014 #ifndef NO_EARLY_PERM_CHECK_HACK 1015 if (!islastcn) { 1016 /* 1017 * We have the attributes of the next item 1018 * *now*, and it's a fact, and we do not 1019 * have to do extra work for it (ie, beg the 1020 * daemon), and it neither depends on such 1021 * accidental things like attr caching. So 1022 * the big idea: check credentials *now*, 1023 * not at the beginning of the next call to 1024 * lookup. 1025 * 1026 * The first item of the lookup chain (fs root) 1027 * won't be checked then here, of course, as 1028 * its never "the next". But go and see that 1029 * the root is taken care about at the very 1030 * beginning of this function. 1031 * 1032 * Now, given we want to do the access check 1033 * this way, one might ask: so then why not 1034 * do the access check just after fetching 1035 * the inode and its attributes from the 1036 * daemon? Why bother with producing the 1037 * corresponding vnode at all if something 1038 * is not OK? We know what's the deal as 1039 * soon as we get those attrs... There is 1040 * one bit of info though not given us by 1041 * the daemon: whether his response is 1042 * authorative or not... His response should 1043 * be ignored if something is mounted over 1044 * the dir in question. But that can be 1045 * known only by having the vnode... 1046 */ 1047 int tmpvtype = vnode_vtype(*vpp); 1048 1049 bzero(&facp, sizeof(facp)); 1050 /*the early perm check hack */ 1051 facp.facc_flags |= FACCESS_VA_VALID; 1052 1053 if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) { 1054 err = ENOTDIR; 1055 } 1056 if (!err && !vnode_mountedhere(*vpp)) { 1057 err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred); 1058 } 1059 if (err) { 1060 if (tmpvtype == VLNK) 1061 FS_DEBUG("weird, permission error with a symlink?\n"); 1062 vput(*vpp); 1063 *vpp = NULL; 1064 } 1065 } 1066 #endif 1067 } 1068 } 1069 fdisp_destroy(&fdi); 1070 1071 return err; 1072 } 1073 1074 /* 1075 struct vnop_mkdir_args { 1076 struct vnode *a_dvp; 1077 struct vnode **a_vpp; 1078 struct componentname *a_cnp; 1079 struct vattr *a_vap; 1080 }; 1081 */ 1082 static int 1083 fuse_vnop_mkdir(struct vop_mkdir_args *ap) 1084 { 1085 struct vnode *dvp = ap->a_dvp; 1086 struct vnode **vpp = ap->a_vpp; 1087 struct componentname *cnp = ap->a_cnp; 1088 struct vattr *vap = ap->a_vap; 1089 1090 int err = 0; 1091 1092 struct fuse_mkdir_in fmdi; 1093 1094 fuse_trace_printf_vnop(); 1095 1096 if (fuse_isdeadfs(dvp)) { 1097 return ENXIO; 1098 } 1099 fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode); 1100 1101 err = fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi, 1102 sizeof(fmdi), VDIR); 1103 1104 if (err == 0) { 1105 fuse_invalidate_attr(dvp); 1106 } 1107 return err; 1108 } 1109 1110 /* 1111 struct vnop_mknod_args { 1112 struct vnode *a_dvp; 1113 struct vnode **a_vpp; 1114 struct componentname *a_cnp; 1115 struct vattr *a_vap; 1116 }; 1117 */ 1118 static int 1119 fuse_vnop_mknod(struct vop_mknod_args *ap) 1120 { 1121 1122 return (EINVAL); 1123 } 1124 1125 1126 /* 1127 struct vnop_open_args { 1128 struct vnode *a_vp; 1129 int a_mode; 1130 struct ucred *a_cred; 1131 struct thread *a_td; 1132 int a_fdidx; / struct file *a_fp; 1133 }; 1134 */ 1135 static int 1136 fuse_vnop_open(struct vop_open_args *ap) 1137 { 1138 struct vnode *vp = ap->a_vp; 1139 int mode = ap->a_mode; 1140 struct thread *td = ap->a_td; 1141 struct ucred *cred = ap->a_cred; 1142 1143 fufh_type_t fufh_type; 1144 struct fuse_vnode_data *fvdat; 1145 1146 int error, isdir = 0; 1147 1148 FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode); 1149 1150 if (fuse_isdeadfs(vp)) { 1151 return ENXIO; 1152 } 1153 fvdat = VTOFUD(vp); 1154 1155 if (vnode_isdir(vp)) { 1156 isdir = 1; 1157 } 1158 if (isdir) { 1159 fufh_type = FUFH_RDONLY; 1160 } else { 1161 fufh_type = fuse_filehandle_xlate_from_fflags(mode); 1162 } 1163 1164 if (fuse_filehandle_valid(vp, fufh_type)) { 1165 fuse_vnode_open(vp, 0, td); 1166 return 0; 1167 } 1168 error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred); 1169 1170 return error; 1171 } 1172 1173 /* 1174 struct vnop_read_args { 1175 struct vnode *a_vp; 1176 struct uio *a_uio; 1177 int a_ioflag; 1178 struct ucred *a_cred; 1179 }; 1180 */ 1181 static int 1182 fuse_vnop_read(struct vop_read_args *ap) 1183 { 1184 struct vnode *vp = ap->a_vp; 1185 struct uio *uio = ap->a_uio; 1186 int ioflag = ap->a_ioflag; 1187 struct ucred *cred = ap->a_cred; 1188 1189 FS_DEBUG2G("inode=%ju offset=%jd resid=%zd\n", 1190 (uintmax_t)VTOI(vp), uio->uio_offset, uio->uio_resid); 1191 1192 if (fuse_isdeadfs(vp)) { 1193 return ENXIO; 1194 } 1195 return fuse_io_dispatch(vp, uio, ioflag, cred); 1196 } 1197 1198 /* 1199 struct vnop_readdir_args { 1200 struct vnode *a_vp; 1201 struct uio *a_uio; 1202 struct ucred *a_cred; 1203 int *a_eofflag; 1204 int *ncookies; 1205 u_long **a_cookies; 1206 }; 1207 */ 1208 static int 1209 fuse_vnop_readdir(struct vop_readdir_args *ap) 1210 { 1211 struct vnode *vp = ap->a_vp; 1212 struct uio *uio = ap->a_uio; 1213 struct ucred *cred = ap->a_cred; 1214 1215 struct fuse_filehandle *fufh = NULL; 1216 struct fuse_vnode_data *fvdat; 1217 struct fuse_iov cookediov; 1218 1219 int err = 0; 1220 int freefufh = 0; 1221 1222 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 1223 1224 if (fuse_isdeadfs(vp)) { 1225 return ENXIO; 1226 } 1227 if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */ 1228 (uio_resid(uio) < sizeof(struct dirent))) { 1229 return EINVAL; 1230 } 1231 fvdat = VTOFUD(vp); 1232 1233 if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) { 1234 FS_DEBUG("calling readdir() before open()"); 1235 err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred); 1236 freefufh = 1; 1237 } else { 1238 err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh); 1239 } 1240 if (err) { 1241 return (err); 1242 } 1243 #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1) 1244 fiov_init(&cookediov, DIRCOOKEDSIZE); 1245 1246 err = fuse_internal_readdir(vp, uio, fufh, &cookediov); 1247 1248 fiov_teardown(&cookediov); 1249 if (freefufh) { 1250 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred); 1251 } 1252 return err; 1253 } 1254 1255 /* 1256 struct vnop_readlink_args { 1257 struct vnode *a_vp; 1258 struct uio *a_uio; 1259 struct ucred *a_cred; 1260 }; 1261 */ 1262 static int 1263 fuse_vnop_readlink(struct vop_readlink_args *ap) 1264 { 1265 struct vnode *vp = ap->a_vp; 1266 struct uio *uio = ap->a_uio; 1267 struct ucred *cred = ap->a_cred; 1268 1269 struct fuse_dispatcher fdi; 1270 int err; 1271 1272 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 1273 1274 if (fuse_isdeadfs(vp)) { 1275 return ENXIO; 1276 } 1277 if (!vnode_islnk(vp)) { 1278 return EINVAL; 1279 } 1280 fdisp_init(&fdi, 0); 1281 err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred); 1282 if (err) { 1283 goto out; 1284 } 1285 if (((char *)fdi.answ)[0] == '/' && 1286 fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) { 1287 char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname; 1288 1289 err = uiomove(mpth, strlen(mpth), uio); 1290 } 1291 if (!err) { 1292 err = uiomove(fdi.answ, fdi.iosize, uio); 1293 } 1294 out: 1295 fdisp_destroy(&fdi); 1296 return err; 1297 } 1298 1299 /* 1300 struct vnop_reclaim_args { 1301 struct vnode *a_vp; 1302 struct thread *a_td; 1303 }; 1304 */ 1305 static int 1306 fuse_vnop_reclaim(struct vop_reclaim_args *ap) 1307 { 1308 struct vnode *vp = ap->a_vp; 1309 struct thread *td = ap->a_td; 1310 1311 struct fuse_vnode_data *fvdat = VTOFUD(vp); 1312 struct fuse_filehandle *fufh = NULL; 1313 1314 int type; 1315 1316 if (!fvdat) { 1317 panic("FUSE: no vnode data during recycling"); 1318 } 1319 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp)); 1320 1321 for (type = 0; type < FUFH_MAXTYPE; type++) { 1322 fufh = &(fvdat->fufh[type]); 1323 if (FUFH_IS_VALID(fufh)) { 1324 printf("FUSE: vnode being reclaimed but fufh (type=%d) is valid", 1325 type); 1326 fuse_filehandle_close(vp, type, td, NULL); 1327 } 1328 } 1329 1330 if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) { 1331 fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp), 1332 fvdat->nlookup); 1333 } 1334 fuse_vnode_setparent(vp, NULL); 1335 cache_purge(vp); 1336 vfs_hash_remove(vp); 1337 vnode_destroy_vobject(vp); 1338 fuse_vnode_destroy(vp); 1339 1340 return 0; 1341 } 1342 1343 /* 1344 struct vnop_remove_args { 1345 struct vnode *a_dvp; 1346 struct vnode *a_vp; 1347 struct componentname *a_cnp; 1348 }; 1349 */ 1350 static int 1351 fuse_vnop_remove(struct vop_remove_args *ap) 1352 { 1353 struct vnode *dvp = ap->a_dvp; 1354 struct vnode *vp = ap->a_vp; 1355 struct componentname *cnp = ap->a_cnp; 1356 1357 int err; 1358 1359 FS_DEBUG2G("inode=%ju name=%*s\n", 1360 (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr); 1361 1362 if (fuse_isdeadfs(vp)) { 1363 return ENXIO; 1364 } 1365 if (vnode_isdir(vp)) { 1366 return EPERM; 1367 } 1368 cache_purge(vp); 1369 1370 err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK); 1371 1372 if (err == 0) { 1373 fuse_internal_vnode_disappear(vp); 1374 fuse_invalidate_attr(dvp); 1375 } 1376 return err; 1377 } 1378 1379 /* 1380 struct vnop_rename_args { 1381 struct vnode *a_fdvp; 1382 struct vnode *a_fvp; 1383 struct componentname *a_fcnp; 1384 struct vnode *a_tdvp; 1385 struct vnode *a_tvp; 1386 struct componentname *a_tcnp; 1387 }; 1388 */ 1389 static int 1390 fuse_vnop_rename(struct vop_rename_args *ap) 1391 { 1392 struct vnode *fdvp = ap->a_fdvp; 1393 struct vnode *fvp = ap->a_fvp; 1394 struct componentname *fcnp = ap->a_fcnp; 1395 struct vnode *tdvp = ap->a_tdvp; 1396 struct vnode *tvp = ap->a_tvp; 1397 struct componentname *tcnp = ap->a_tcnp; 1398 struct fuse_data *data; 1399 1400 int err = 0; 1401 1402 FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n", 1403 (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr, 1404 (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)), 1405 (int)tcnp->cn_namelen, tcnp->cn_nameptr); 1406 1407 if (fuse_isdeadfs(fdvp)) { 1408 return ENXIO; 1409 } 1410 if (fvp->v_mount != tdvp->v_mount || 1411 (tvp && fvp->v_mount != tvp->v_mount)) { 1412 FS_DEBUG("cross-device rename: %s -> %s\n", 1413 fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)")); 1414 err = EXDEV; 1415 goto out; 1416 } 1417 cache_purge(fvp); 1418 1419 /* 1420 * FUSE library is expected to check if target directory is not 1421 * under the source directory in the file system tree. 1422 * Linux performs this check at VFS level. 1423 */ 1424 data = fuse_get_mpdata(vnode_mount(tdvp)); 1425 sx_xlock(&data->rename_lock); 1426 err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp); 1427 if (err == 0) { 1428 fuse_invalidate_attr(fdvp); 1429 if (tdvp != fdvp) { 1430 fuse_vnode_setparent(fvp, tdvp); 1431 fuse_invalidate_attr(tdvp); 1432 } 1433 if (tvp != NULL) 1434 fuse_vnode_setparent(tvp, NULL); 1435 } 1436 sx_unlock(&data->rename_lock); 1437 1438 if (tvp != NULL && tvp != fvp) { 1439 cache_purge(tvp); 1440 } 1441 if (vnode_isdir(fvp)) { 1442 if ((tvp != NULL) && vnode_isdir(tvp)) { 1443 cache_purge(tdvp); 1444 } 1445 cache_purge(fdvp); 1446 } 1447 out: 1448 if (tdvp == tvp) { 1449 vrele(tdvp); 1450 } else { 1451 vput(tdvp); 1452 } 1453 if (tvp != NULL) { 1454 vput(tvp); 1455 } 1456 vrele(fdvp); 1457 vrele(fvp); 1458 1459 return err; 1460 } 1461 1462 /* 1463 struct vnop_rmdir_args { 1464 struct vnode *a_dvp; 1465 struct vnode *a_vp; 1466 struct componentname *a_cnp; 1467 } *ap; 1468 */ 1469 static int 1470 fuse_vnop_rmdir(struct vop_rmdir_args *ap) 1471 { 1472 struct vnode *dvp = ap->a_dvp; 1473 struct vnode *vp = ap->a_vp; 1474 1475 int err; 1476 1477 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 1478 1479 if (fuse_isdeadfs(vp)) { 1480 return ENXIO; 1481 } 1482 if (VTOFUD(vp) == VTOFUD(dvp)) { 1483 return EINVAL; 1484 } 1485 err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR); 1486 1487 if (err == 0) { 1488 fuse_internal_vnode_disappear(vp); 1489 fuse_invalidate_attr(dvp); 1490 } 1491 return err; 1492 } 1493 1494 /* 1495 struct vnop_setattr_args { 1496 struct vnode *a_vp; 1497 struct vattr *a_vap; 1498 struct ucred *a_cred; 1499 struct thread *a_td; 1500 }; 1501 */ 1502 static int 1503 fuse_vnop_setattr(struct vop_setattr_args *ap) 1504 { 1505 struct vnode *vp = ap->a_vp; 1506 struct vattr *vap = ap->a_vap; 1507 struct ucred *cred = ap->a_cred; 1508 struct thread *td = curthread; 1509 1510 struct fuse_dispatcher fdi; 1511 struct fuse_setattr_in *fsai; 1512 struct fuse_access_param facp; 1513 1514 int err = 0; 1515 enum vtype vtyp; 1516 int sizechanged = 0; 1517 uint64_t newsize = 0; 1518 1519 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); 1520 1521 if (fuse_isdeadfs(vp)) { 1522 return ENXIO; 1523 } 1524 fdisp_init(&fdi, sizeof(*fsai)); 1525 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); 1526 fsai = fdi.indata; 1527 fsai->valid = 0; 1528 1529 bzero(&facp, sizeof(facp)); 1530 1531 facp.xuid = vap->va_uid; 1532 facp.xgid = vap->va_gid; 1533 1534 if (vap->va_uid != (uid_t)VNOVAL) { 1535 facp.facc_flags |= FACCESS_CHOWN; 1536 fsai->uid = vap->va_uid; 1537 fsai->valid |= FATTR_UID; 1538 } 1539 if (vap->va_gid != (gid_t)VNOVAL) { 1540 facp.facc_flags |= FACCESS_CHOWN; 1541 fsai->gid = vap->va_gid; 1542 fsai->valid |= FATTR_GID; 1543 } 1544 if (vap->va_size != VNOVAL) { 1545 1546 struct fuse_filehandle *fufh = NULL; 1547 1548 /*Truncate to a new value. */ 1549 fsai->size = vap->va_size; 1550 sizechanged = 1; 1551 newsize = vap->va_size; 1552 fsai->valid |= FATTR_SIZE; 1553 1554 fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh); 1555 if (fufh) { 1556 fsai->fh = fufh->fh_id; 1557 fsai->valid |= FATTR_FH; 1558 } 1559 } 1560 if (vap->va_atime.tv_sec != VNOVAL) { 1561 fsai->atime = vap->va_atime.tv_sec; 1562 fsai->atimensec = vap->va_atime.tv_nsec; 1563 fsai->valid |= FATTR_ATIME; 1564 } 1565 if (vap->va_mtime.tv_sec != VNOVAL) { 1566 fsai->mtime = vap->va_mtime.tv_sec; 1567 fsai->mtimensec = vap->va_mtime.tv_nsec; 1568 fsai->valid |= FATTR_MTIME; 1569 } 1570 if (vap->va_mode != (mode_t)VNOVAL) { 1571 fsai->mode = vap->va_mode & ALLPERMS; 1572 fsai->valid |= FATTR_MODE; 1573 } 1574 if (!fsai->valid) { 1575 goto out; 1576 } 1577 vtyp = vnode_vtype(vp); 1578 1579 if (fsai->valid & FATTR_SIZE && vtyp == VDIR) { 1580 err = EISDIR; 1581 goto out; 1582 } 1583 if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) { 1584 err = EROFS; 1585 goto out; 1586 } 1587 if (fsai->valid & ~FATTR_SIZE) { 1588 /*err = fuse_internal_access(vp, VADMIN, context, &facp); */ 1589 /*XXX */ 1590 err = 0; 1591 } 1592 facp.facc_flags &= ~FACCESS_XQUERIES; 1593 1594 if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) && 1595 vap->va_vaflags & VA_UTIMES_NULL) { 1596 err = fuse_internal_access(vp, VWRITE, &facp, td, cred); 1597 } 1598 if (err) { 1599 fuse_invalidate_attr(vp); 1600 goto out; 1601 } 1602 if ((err = fdisp_wait_answ(&fdi))) { 1603 fuse_invalidate_attr(vp); 1604 goto out; 1605 } 1606 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); 1607 1608 if (vnode_vtype(vp) != vtyp) { 1609 if (vnode_vtype(vp) == VNON && vtyp != VNON) { 1610 debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n"); 1611 } else { 1612 /* 1613 * STALE vnode, ditch 1614 * 1615 * The vnode has changed its type "behind our back". There's 1616 * nothing really we can do, so let us just force an internal 1617 * revocation and tell the caller to try again, if interested. 1618 */ 1619 fuse_internal_vnode_disappear(vp); 1620 err = EAGAIN; 1621 } 1622 } 1623 if (!err && !sizechanged) { 1624 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); 1625 } 1626 out: 1627 fdisp_destroy(&fdi); 1628 if (!err && sizechanged) { 1629 fuse_invalidate_attr(vp); 1630 fuse_vnode_setsize(vp, cred, newsize); 1631 VTOFUD(vp)->flag &= ~FN_SIZECHANGE; 1632 } 1633 return err; 1634 } 1635 1636 /* 1637 struct vnop_strategy_args { 1638 struct vnode *a_vp; 1639 struct buf *a_bp; 1640 }; 1641 */ 1642 static int 1643 fuse_vnop_strategy(struct vop_strategy_args *ap) 1644 { 1645 struct vnode *vp = ap->a_vp; 1646 struct buf *bp = ap->a_bp; 1647 1648 fuse_trace_printf_vnop(); 1649 1650 if (!vp || fuse_isdeadfs(vp)) { 1651 bp->b_ioflags |= BIO_ERROR; 1652 bp->b_error = ENXIO; 1653 bufdone(bp); 1654 return ENXIO; 1655 } 1656 if (bp->b_iocmd == BIO_WRITE) 1657 fuse_vnode_refreshsize(vp, NOCRED); 1658 1659 (void)fuse_io_strategy(vp, bp); 1660 1661 /* 1662 * This is a dangerous function. If returns error, that might mean a 1663 * panic. We prefer pretty much anything over being forced to panic 1664 * by a malicious daemon (a demon?). So we just return 0 anyway. You 1665 * should never mind this: this function has its own error 1666 * propagation mechanism via the argument buffer, so 1667 * not-that-melodramatic residents of the call chain still will be 1668 * able to know what to do. 1669 */ 1670 return 0; 1671 } 1672 1673 1674 /* 1675 struct vnop_symlink_args { 1676 struct vnode *a_dvp; 1677 struct vnode **a_vpp; 1678 struct componentname *a_cnp; 1679 struct vattr *a_vap; 1680 char *a_target; 1681 }; 1682 */ 1683 static int 1684 fuse_vnop_symlink(struct vop_symlink_args *ap) 1685 { 1686 struct vnode *dvp = ap->a_dvp; 1687 struct vnode **vpp = ap->a_vpp; 1688 struct componentname *cnp = ap->a_cnp; 1689 char *target = ap->a_target; 1690 1691 struct fuse_dispatcher fdi; 1692 1693 int err; 1694 size_t len; 1695 1696 FS_DEBUG2G("inode=%ju name=%*s\n", 1697 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr); 1698 1699 if (fuse_isdeadfs(dvp)) { 1700 return ENXIO; 1701 } 1702 /* 1703 * Unlike the other creator type calls, here we have to create a message 1704 * where the name of the new entry comes first, and the data describing 1705 * the entry comes second. 1706 * Hence we can't rely on our handy fuse_internal_newentry() routine, 1707 * but put together the message manually and just call the core part. 1708 */ 1709 1710 len = strlen(target) + 1; 1711 fdisp_init(&fdi, len + cnp->cn_namelen + 1); 1712 fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL); 1713 1714 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); 1715 ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; 1716 memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len); 1717 1718 err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi); 1719 fdisp_destroy(&fdi); 1720 1721 if (err == 0) { 1722 fuse_invalidate_attr(dvp); 1723 } 1724 return err; 1725 } 1726 1727 /* 1728 struct vnop_write_args { 1729 struct vnode *a_vp; 1730 struct uio *a_uio; 1731 int a_ioflag; 1732 struct ucred *a_cred; 1733 }; 1734 */ 1735 static int 1736 fuse_vnop_write(struct vop_write_args *ap) 1737 { 1738 struct vnode *vp = ap->a_vp; 1739 struct uio *uio = ap->a_uio; 1740 int ioflag = ap->a_ioflag; 1741 struct ucred *cred = ap->a_cred; 1742 1743 fuse_trace_printf_vnop(); 1744 1745 if (fuse_isdeadfs(vp)) { 1746 return ENXIO; 1747 } 1748 fuse_vnode_refreshsize(vp, cred); 1749 1750 return fuse_io_dispatch(vp, uio, ioflag, cred); 1751 } 1752 1753 /* 1754 struct vnop_getpages_args { 1755 struct vnode *a_vp; 1756 vm_page_t *a_m; 1757 int a_count; 1758 int a_reqpage; 1759 vm_ooffset_t a_offset; 1760 }; 1761 */ 1762 static int 1763 fuse_vnop_getpages(struct vop_getpages_args *ap) 1764 { 1765 int i, error, nextoff, size, toff, count, npages; 1766 struct uio uio; 1767 struct iovec iov; 1768 vm_offset_t kva; 1769 struct buf *bp; 1770 struct vnode *vp; 1771 struct thread *td; 1772 struct ucred *cred; 1773 vm_page_t *pages; 1774 1775 FS_DEBUG2G("heh\n"); 1776 1777 vp = ap->a_vp; 1778 KASSERT(vp->v_object, ("objectless vp passed to getpages")); 1779 td = curthread; /* XXX */ 1780 cred = curthread->td_ucred; /* XXX */ 1781 pages = ap->a_m; 1782 count = ap->a_count; 1783 1784 if (!fsess_opt_mmap(vnode_mount(vp))) { 1785 FS_DEBUG("called on non-cacheable vnode??\n"); 1786 return (VM_PAGER_ERROR); 1787 } 1788 npages = btoc(count); 1789 1790 /* 1791 * If the requested page is partially valid, just return it and 1792 * allow the pager to zero-out the blanks. Partially valid pages 1793 * can only occur at the file EOF. 1794 */ 1795 1796 VM_OBJECT_LOCK(vp->v_object); 1797 fuse_vm_page_lock_queues(); 1798 if (pages[ap->a_reqpage]->valid != 0) { 1799 for (i = 0; i < npages; ++i) { 1800 if (i != ap->a_reqpage) { 1801 fuse_vm_page_lock(pages[i]); 1802 vm_page_free(pages[i]); 1803 fuse_vm_page_unlock(pages[i]); 1804 } 1805 } 1806 fuse_vm_page_unlock_queues(); 1807 VM_OBJECT_UNLOCK(vp->v_object); 1808 return 0; 1809 } 1810 fuse_vm_page_unlock_queues(); 1811 VM_OBJECT_UNLOCK(vp->v_object); 1812 1813 /* 1814 * We use only the kva address for the buffer, but this is extremely 1815 * convienient and fast. 1816 */ 1817 bp = getpbuf(&fuse_pbuf_freecnt); 1818 1819 kva = (vm_offset_t)bp->b_data; 1820 pmap_qenter(kva, pages, npages); 1821 PCPU_INC(cnt.v_vnodein); 1822 PCPU_ADD(cnt.v_vnodepgsin, npages); 1823 1824 iov.iov_base = (caddr_t)kva; 1825 iov.iov_len = count; 1826 uio.uio_iov = &iov; 1827 uio.uio_iovcnt = 1; 1828 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 1829 uio.uio_resid = count; 1830 uio.uio_segflg = UIO_SYSSPACE; 1831 uio.uio_rw = UIO_READ; 1832 uio.uio_td = td; 1833 1834 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred); 1835 pmap_qremove(kva, npages); 1836 1837 relpbuf(bp, &fuse_pbuf_freecnt); 1838 1839 if (error && (uio.uio_resid == count)) { 1840 FS_DEBUG("error %d\n", error); 1841 VM_OBJECT_LOCK(vp->v_object); 1842 fuse_vm_page_lock_queues(); 1843 for (i = 0; i < npages; ++i) { 1844 if (i != ap->a_reqpage) { 1845 fuse_vm_page_lock(pages[i]); 1846 vm_page_free(pages[i]); 1847 fuse_vm_page_unlock(pages[i]); 1848 } 1849 } 1850 fuse_vm_page_unlock_queues(); 1851 VM_OBJECT_UNLOCK(vp->v_object); 1852 return VM_PAGER_ERROR; 1853 } 1854 /* 1855 * Calculate the number of bytes read and validate only that number 1856 * of bytes. Note that due to pending writes, size may be 0. This 1857 * does not mean that the remaining data is invalid! 1858 */ 1859 1860 size = count - uio.uio_resid; 1861 VM_OBJECT_LOCK(vp->v_object); 1862 fuse_vm_page_lock_queues(); 1863 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 1864 vm_page_t m; 1865 1866 nextoff = toff + PAGE_SIZE; 1867 m = pages[i]; 1868 1869 if (nextoff <= size) { 1870 /* 1871 * Read operation filled an entire page 1872 */ 1873 m->valid = VM_PAGE_BITS_ALL; 1874 KASSERT(m->dirty == 0, 1875 ("fuse_getpages: page %p is dirty", m)); 1876 } else if (size > toff) { 1877 /* 1878 * Read operation filled a partial page. 1879 */ 1880 m->valid = 0; 1881 vm_page_set_valid_range(m, 0, size - toff); 1882 KASSERT(m->dirty == 0, 1883 ("fuse_getpages: page %p is dirty", m)); 1884 } else { 1885 /* 1886 * Read operation was short. If no error occured 1887 * we may have hit a zero-fill section. We simply 1888 * leave valid set to 0. 1889 */ 1890 ; 1891 } 1892 if (i != ap->a_reqpage) { 1893 /* 1894 * Whether or not to leave the page activated is up in 1895 * the air, but we should put the page on a page queue 1896 * somewhere (it already is in the object). Result: 1897 * It appears that emperical results show that 1898 * deactivating pages is best. 1899 */ 1900 1901 /* 1902 * Just in case someone was asking for this page we 1903 * now tell them that it is ok to use. 1904 */ 1905 if (!error) { 1906 if (m->oflags & VPO_WANTED) { 1907 fuse_vm_page_lock(m); 1908 vm_page_activate(m); 1909 fuse_vm_page_unlock(m); 1910 } else { 1911 fuse_vm_page_lock(m); 1912 vm_page_deactivate(m); 1913 fuse_vm_page_unlock(m); 1914 } 1915 vm_page_wakeup(m); 1916 } else { 1917 fuse_vm_page_lock(m); 1918 vm_page_free(m); 1919 fuse_vm_page_unlock(m); 1920 } 1921 } 1922 } 1923 fuse_vm_page_unlock_queues(); 1924 VM_OBJECT_UNLOCK(vp->v_object); 1925 return 0; 1926 } 1927 1928 /* 1929 struct vnop_putpages_args { 1930 struct vnode *a_vp; 1931 vm_page_t *a_m; 1932 int a_count; 1933 int a_sync; 1934 int *a_rtvals; 1935 vm_ooffset_t a_offset; 1936 }; 1937 */ 1938 static int 1939 fuse_vnop_putpages(struct vop_putpages_args *ap) 1940 { 1941 struct uio uio; 1942 struct iovec iov; 1943 vm_offset_t kva; 1944 struct buf *bp; 1945 int i, error, npages, count; 1946 off_t offset; 1947 int *rtvals; 1948 struct vnode *vp; 1949 struct thread *td; 1950 struct ucred *cred; 1951 vm_page_t *pages; 1952 vm_ooffset_t fsize; 1953 1954 FS_DEBUG2G("heh\n"); 1955 1956 vp = ap->a_vp; 1957 KASSERT(vp->v_object, ("objectless vp passed to putpages")); 1958 fsize = vp->v_object->un_pager.vnp.vnp_size; 1959 td = curthread; /* XXX */ 1960 cred = curthread->td_ucred; /* XXX */ 1961 pages = ap->a_m; 1962 count = ap->a_count; 1963 rtvals = ap->a_rtvals; 1964 npages = btoc(count); 1965 offset = IDX_TO_OFF(pages[0]->pindex); 1966 1967 if (!fsess_opt_mmap(vnode_mount(vp))) { 1968 FS_DEBUG("called on non-cacheable vnode??\n"); 1969 } 1970 for (i = 0; i < npages; i++) 1971 rtvals[i] = VM_PAGER_AGAIN; 1972 1973 /* 1974 * When putting pages, do not extend file past EOF. 1975 */ 1976 1977 if (offset + count > fsize) { 1978 count = fsize - offset; 1979 if (count < 0) 1980 count = 0; 1981 } 1982 /* 1983 * We use only the kva address for the buffer, but this is extremely 1984 * convienient and fast. 1985 */ 1986 bp = getpbuf(&fuse_pbuf_freecnt); 1987 1988 kva = (vm_offset_t)bp->b_data; 1989 pmap_qenter(kva, pages, npages); 1990 PCPU_INC(cnt.v_vnodeout); 1991 PCPU_ADD(cnt.v_vnodepgsout, count); 1992 1993 iov.iov_base = (caddr_t)kva; 1994 iov.iov_len = count; 1995 uio.uio_iov = &iov; 1996 uio.uio_iovcnt = 1; 1997 uio.uio_offset = offset; 1998 uio.uio_resid = count; 1999 uio.uio_segflg = UIO_SYSSPACE; 2000 uio.uio_rw = UIO_WRITE; 2001 uio.uio_td = td; 2002 2003 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred); 2004 2005 pmap_qremove(kva, npages); 2006 relpbuf(bp, &fuse_pbuf_freecnt); 2007 2008 if (!error) { 2009 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 2010 2011 for (i = 0; i < nwritten; i++) { 2012 rtvals[i] = VM_PAGER_OK; 2013 VM_OBJECT_LOCK(pages[i]->object); 2014 vm_page_undirty(pages[i]); 2015 VM_OBJECT_UNLOCK(pages[i]->object); 2016 } 2017 } 2018 return rtvals[0]; 2019 } 2020 2021 /* 2022 struct vnop_print_args { 2023 struct vnode *a_vp; 2024 }; 2025 */ 2026 static int 2027 fuse_vnop_print(struct vop_print_args *ap) 2028 { 2029 struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp); 2030 2031 printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n", 2032 (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid, 2033 (uintmax_t)fvdat->nlookup, 2034 fvdat->flag); 2035 2036 return 0; 2037 } 2038