1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __FBSDID("$FreeBSD$"); 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/counter.h> 69 #include <sys/module.h> 70 #include <sys/errno.h> 71 #include <sys/kernel.h> 72 #include <sys/conf.h> 73 #include <sys/uio.h> 74 #include <sys/malloc.h> 75 #include <sys/queue.h> 76 #include <sys/lock.h> 77 #include <sys/mutex.h> 78 #include <sys/sdt.h> 79 #include <sys/sx.h> 80 #include <sys/proc.h> 81 #include <sys/mount.h> 82 #include <sys/vnode.h> 83 #include <sys/namei.h> 84 #include <sys/stat.h> 85 #include <sys/unistd.h> 86 #include <sys/filedesc.h> 87 #include <sys/file.h> 88 #include <sys/fcntl.h> 89 #include <sys/dirent.h> 90 #include <sys/bio.h> 91 #include <sys/buf.h> 92 #include <sys/sysctl.h> 93 #include <sys/priv.h> 94 95 #include "fuse.h" 96 #include "fuse_file.h" 97 #include "fuse_internal.h" 98 #include "fuse_io.h" 99 #include "fuse_ipc.h" 100 #include "fuse_node.h" 101 #include "fuse_file.h" 102 103 SDT_PROVIDER_DECLARE(fusefs); 104 /* 105 * Fuse trace probe: 106 * arg0: verbosity. Higher numbers give more verbose messages 107 * arg1: Textual message 108 */ 109 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); 110 111 #ifdef ZERO_PAD_INCOMPLETE_BUFS 112 static int isbzero(void *buf, size_t len); 113 114 #endif 115 116 counter_u64_t fuse_lookup_cache_hits; 117 counter_u64_t fuse_lookup_cache_misses; 118 119 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, 120 &fuse_lookup_cache_hits, "number of positive cache hits in lookup"); 121 122 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, 123 &fuse_lookup_cache_misses, "number of cache misses in lookup"); 124 125 int 126 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags, 127 struct vnode **vpp) 128 { 129 struct bintime now; 130 struct thread *td = curthread; 131 uint64_t nodeid = ino; 132 int error; 133 134 *vpp = NULL; 135 136 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp, 137 fuse_vnode_cmp, &nodeid); 138 if (error) 139 return error; 140 /* 141 * Check the entry cache timeout. We have to do this within fusefs 142 * instead of by using cache_enter_time/cache_lookup because those 143 * routines are only intended to work with pathnames, not inodes 144 */ 145 if (*vpp != NULL) { 146 getbinuptime(&now); 147 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){ 148 counter_u64_add(fuse_lookup_cache_hits, 1); 149 return 0; 150 } else { 151 /* Entry cache timeout */ 152 counter_u64_add(fuse_lookup_cache_misses, 1); 153 cache_purge(*vpp); 154 vput(*vpp); 155 *vpp = NULL; 156 } 157 } 158 return 0; 159 } 160 161 SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin); 162 /* Synchronously send a FUSE_ACCESS operation */ 163 int 164 fuse_internal_access(struct vnode *vp, 165 accmode_t mode, 166 struct thread *td, 167 struct ucred *cred) 168 { 169 int err = 0; 170 uint32_t mask = F_OK; 171 int dataflags; 172 int vtype; 173 struct mount *mp; 174 struct fuse_dispatcher fdi; 175 struct fuse_access_in *fai; 176 struct fuse_data *data; 177 178 mp = vnode_mount(vp); 179 vtype = vnode_vtype(vp); 180 181 data = fuse_get_mpdata(mp); 182 dataflags = data->dataflags; 183 184 if (mode == 0) 185 return 0; 186 187 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { 188 switch (vp->v_type) { 189 case VDIR: 190 /* FALLTHROUGH */ 191 case VLNK: 192 /* FALLTHROUGH */ 193 case VREG: 194 return EROFS; 195 default: 196 break; 197 } 198 } 199 200 /* Unless explicitly permitted, deny everyone except the fs owner. */ 201 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { 202 if (fuse_match_cred(data->daemoncred, cred)) 203 return EPERM; 204 } 205 206 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 207 struct vattr va; 208 209 fuse_internal_getattr(vp, &va, cred, td); 210 return vaccess(vp->v_type, va.va_mode, va.va_uid, 211 va.va_gid, mode, cred); 212 } 213 214 if (mode & VADMIN) { 215 /* 216 * The FUSE protocol doesn't have an equivalent of VADMIN, so 217 * it's a bug if we ever reach this point with that bit set. 218 */ 219 SDT_PROBE0(fusefs, , internal, access_vadmin); 220 } 221 222 if (fsess_not_impl(mp, FUSE_ACCESS)) 223 return 0; 224 225 if ((mode & (VWRITE | VAPPEND)) != 0) 226 mask |= W_OK; 227 if ((mode & VREAD) != 0) 228 mask |= R_OK; 229 if ((mode & VEXEC) != 0) 230 mask |= X_OK; 231 232 fdisp_init(&fdi, sizeof(*fai)); 233 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); 234 235 fai = fdi.indata; 236 fai->mask = mask; 237 238 err = fdisp_wait_answ(&fdi); 239 fdisp_destroy(&fdi); 240 241 if (err == ENOSYS) { 242 fsess_set_notimpl(mp, FUSE_ACCESS); 243 err = 0; 244 } 245 return err; 246 } 247 248 /* 249 * Cache FUSE attributes from attr, in attribute cache associated with vnode 250 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the 251 * converted attributes there as well. 252 * 253 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do 254 * return the result to the caller). 255 */ 256 void 257 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr, 258 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap, 259 bool from_server) 260 { 261 struct mount *mp; 262 struct fuse_vnode_data *fvdat; 263 struct fuse_data *data; 264 struct vattr *vp_cache_at; 265 266 mp = vnode_mount(vp); 267 fvdat = VTOFUD(vp); 268 data = fuse_get_mpdata(mp); 269 270 ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); 271 272 fuse_validity_2_bintime(attr_valid, attr_valid_nsec, 273 &fvdat->attr_cache_timeout); 274 275 if (vnode_isreg(vp) && 276 fvdat->cached_attrs.va_size != VNOVAL && 277 attr->size != fvdat->cached_attrs.va_size) 278 { 279 if ( data->cache_mode == FUSE_CACHE_WB && 280 fvdat->flag & FN_SIZECHANGE) 281 { 282 const char *msg; 283 284 /* 285 * The server changed the file's size even though we're 286 * using writeback cacheing and and we have outstanding 287 * dirty writes! That's a server bug. 288 */ 289 if (fuse_libabi_geq(data, 7, 23)) { 290 msg = "writeback cache incoherent!." 291 "To prevent data corruption, disable " 292 "the writeback cache according to your " 293 "FUSE server's documentation."; 294 } else { 295 msg = "writeback cache incoherent!." 296 "To prevent data corruption, disable " 297 "the writeback cache by setting " 298 "vfs.fusefs.data_cache_mode to 0 or 1."; 299 } 300 fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg); 301 } 302 if (fuse_vnode_attr_cache_valid(vp) && 303 data->cache_mode != FUSE_CACHE_UC) 304 { 305 /* 306 * The server changed the file's size even though we 307 * have it cached and our cache has not yet expired. 308 * That's a bug. 309 */ 310 fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT, 311 "cache incoherent! " 312 "To prevent " 313 "data corruption, disable the data cache " 314 "by mounting with -o direct_io, or as " 315 "directed otherwise by your FUSE server's " 316 "documentation."); 317 } 318 } 319 320 /* Fix our buffers if the filesize changed without us knowing */ 321 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { 322 (void)fuse_vnode_setsize(vp, attr->size, from_server); 323 fvdat->cached_attrs.va_size = attr->size; 324 } 325 326 if (attr_valid > 0 || attr_valid_nsec > 0) 327 vp_cache_at = &(fvdat->cached_attrs); 328 else if (vap != NULL) 329 vp_cache_at = vap; 330 else 331 return; 332 333 vattr_null(vp_cache_at); 334 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; 335 vp_cache_at->va_fileid = attr->ino; 336 vp_cache_at->va_mode = attr->mode & ~S_IFMT; 337 vp_cache_at->va_nlink = attr->nlink; 338 vp_cache_at->va_uid = attr->uid; 339 vp_cache_at->va_gid = attr->gid; 340 vp_cache_at->va_rdev = attr->rdev; 341 vp_cache_at->va_size = attr->size; 342 /* XXX on i386, seconds are truncated to 32 bits */ 343 vp_cache_at->va_atime.tv_sec = attr->atime; 344 vp_cache_at->va_atime.tv_nsec = attr->atimensec; 345 vp_cache_at->va_mtime.tv_sec = attr->mtime; 346 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; 347 vp_cache_at->va_ctime.tv_sec = attr->ctime; 348 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; 349 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) 350 vp_cache_at->va_blocksize = attr->blksize; 351 else 352 vp_cache_at->va_blocksize = PAGE_SIZE; 353 vp_cache_at->va_type = IFTOVT(attr->mode); 354 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; 355 vp_cache_at->va_flags = 0; 356 357 if (vap != vp_cache_at && vap != NULL) 358 memcpy(vap, vp_cache_at, sizeof(*vap)); 359 } 360 361 /* fsync */ 362 363 int 364 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) 365 { 366 if (tick->tk_aw_ohead.error == ENOSYS) { 367 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); 368 } 369 return 0; 370 } 371 372 int 373 fuse_internal_fsync(struct vnode *vp, 374 struct thread *td, 375 int waitfor, 376 bool datasync) 377 { 378 struct fuse_fsync_in *ffsi = NULL; 379 struct fuse_dispatcher fdi; 380 struct fuse_filehandle *fufh; 381 struct fuse_vnode_data *fvdat = VTOFUD(vp); 382 struct mount *mp = vnode_mount(vp); 383 int op = FUSE_FSYNC; 384 int err = 0; 385 386 if (fsess_not_impl(vnode_mount(vp), 387 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { 388 return 0; 389 } 390 if (vnode_isdir(vp)) 391 op = FUSE_FSYNCDIR; 392 393 if (fsess_not_impl(mp, op)) 394 return 0; 395 396 fdisp_init(&fdi, sizeof(*ffsi)); 397 /* 398 * fsync every open file handle for this file, because we can't be sure 399 * which file handle the caller is really referring to. 400 */ 401 LIST_FOREACH(fufh, &fvdat->handles, next) { 402 fdi.iosize = sizeof(*ffsi); 403 if (ffsi == NULL) 404 fdisp_make_vp(&fdi, op, vp, td, NULL); 405 else 406 fdisp_refresh_vp(&fdi, op, vp, td, NULL); 407 ffsi = fdi.indata; 408 ffsi->fh = fufh->fh_id; 409 ffsi->fsync_flags = 0; 410 411 if (datasync) 412 ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC; 413 414 if (waitfor == MNT_WAIT) { 415 err = fdisp_wait_answ(&fdi); 416 } else { 417 fuse_insert_callback(fdi.tick, 418 fuse_internal_fsync_callback); 419 fuse_insert_message(fdi.tick, false); 420 } 421 if (err == ENOSYS) { 422 /* ENOSYS means "success, and don't call again" */ 423 fsess_set_notimpl(mp, op); 424 err = 0; 425 break; 426 } 427 } 428 fdisp_destroy(&fdi); 429 430 return err; 431 } 432 433 /* Asynchronous invalidation */ 434 SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry, 435 "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*"); 436 int 437 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) 438 { 439 struct fuse_notify_inval_entry_out fnieo; 440 struct componentname cn; 441 struct vnode *dvp, *vp; 442 char name[PATH_MAX]; 443 int err; 444 445 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) 446 return (err); 447 448 if (fnieo.namelen >= sizeof(name)) 449 return (EINVAL); 450 451 if ((err = uiomove(name, fnieo.namelen, uio)) != 0) 452 return (err); 453 name[fnieo.namelen] = '\0'; 454 /* fusefs does not cache "." or ".." entries */ 455 if (strncmp(name, ".", sizeof(".")) == 0 || 456 strncmp(name, "..", sizeof("..")) == 0) 457 return (0); 458 459 if (fnieo.parent == FUSE_ROOT_ID) 460 err = VFS_ROOT(mp, LK_SHARED, &dvp); 461 else 462 err = fuse_internal_get_cached_vnode( mp, fnieo.parent, 463 LK_SHARED, &dvp); 464 SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name); 465 /* 466 * If dvp is not in the cache, then it must've been reclaimed. And 467 * since fuse_vnop_reclaim does a cache_purge, name's entry must've 468 * been invalidated already. So we can safely return if dvp == NULL 469 */ 470 if (err != 0 || dvp == NULL) 471 return (err); 472 /* 473 * XXX we can't check dvp's generation because the FUSE invalidate 474 * entry message doesn't include it. Worse case is that we invalidate 475 * an entry that didn't need to be invalidated. 476 */ 477 478 cn.cn_nameiop = LOOKUP; 479 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ 480 cn.cn_cred = curthread->td_ucred; 481 cn.cn_lkflags = LK_SHARED; 482 cn.cn_pnbuf = NULL; 483 cn.cn_nameptr = name; 484 cn.cn_namelen = fnieo.namelen; 485 err = cache_lookup(dvp, &vp, &cn, NULL, NULL); 486 MPASS(err == 0); 487 fuse_vnode_clear_attr_cache(dvp); 488 vput(dvp); 489 return (0); 490 } 491 492 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode, 493 "struct vnode*", "struct fuse_notify_inval_inode_out *"); 494 int 495 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio) 496 { 497 struct fuse_notify_inval_inode_out fniio; 498 struct vnode *vp; 499 int err; 500 501 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0) 502 return (err); 503 504 if (fniio.ino == FUSE_ROOT_ID) 505 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp); 506 else 507 err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED, 508 &vp); 509 SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio); 510 if (err != 0 || vp == NULL) 511 return (err); 512 /* 513 * XXX we can't check vp's generation because the FUSE invalidate 514 * entry message doesn't include it. Worse case is that we invalidate 515 * an inode that didn't need to be invalidated. 516 */ 517 518 /* 519 * Flush and invalidate buffers if off >= 0. Technically we only need 520 * to flush and invalidate the range of offsets [off, off + len), but 521 * for simplicity's sake we do everything. 522 */ 523 if (fniio.off >= 0) 524 fuse_io_invalbuf(vp, curthread); 525 fuse_vnode_clear_attr_cache(vp); 526 vput(vp); 527 return (0); 528 } 529 530 /* mknod */ 531 int 532 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, 533 struct componentname *cnp, struct vattr *vap) 534 { 535 struct fuse_data *data; 536 struct fuse_mknod_in fmni; 537 size_t insize; 538 539 data = fuse_get_mpdata(dvp->v_mount); 540 541 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); 542 fmni.rdev = vap->va_rdev; 543 if (fuse_libabi_geq(data, 7, 12)) { 544 insize = sizeof(fmni); 545 fmni.umask = curthread->td_proc->p_pd->pd_cmask; 546 } else { 547 insize = FUSE_COMPAT_MKNOD_IN_SIZE; 548 } 549 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, 550 insize, vap->va_type)); 551 } 552 553 /* readdir */ 554 555 int 556 fuse_internal_readdir(struct vnode *vp, 557 struct uio *uio, 558 off_t startoff, 559 struct fuse_filehandle *fufh, 560 struct fuse_iov *cookediov, 561 int *ncookies, 562 uint64_t *cookies) 563 { 564 int err = 0; 565 struct fuse_dispatcher fdi; 566 struct fuse_read_in *fri = NULL; 567 int fnd_start; 568 569 if (uio_resid(uio) == 0) 570 return 0; 571 fdisp_init(&fdi, 0); 572 573 /* 574 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p 575 * I/O). 576 */ 577 578 /* 579 * fnd_start is set non-zero once the offset in the directory gets 580 * to the startoff. This is done because directories must be read 581 * from the beginning (offset == 0) when fuse_vnop_readdir() needs 582 * to do an open of the directory. 583 * If it is not set non-zero here, it will be set non-zero in 584 * fuse_internal_readdir_processdata() when uio_offset == startoff. 585 */ 586 fnd_start = 0; 587 if (uio->uio_offset == startoff) 588 fnd_start = 1; 589 while (uio_resid(uio) > 0) { 590 fdi.iosize = sizeof(*fri); 591 if (fri == NULL) 592 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); 593 else 594 fdisp_refresh_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); 595 596 fri = fdi.indata; 597 fri->fh = fufh->fh_id; 598 fri->offset = uio_offset(uio); 599 fri->size = MIN(uio->uio_resid, 600 fuse_get_mpdata(vp->v_mount)->max_read); 601 602 if ((err = fdisp_wait_answ(&fdi))) 603 break; 604 if ((err = fuse_internal_readdir_processdata(uio, startoff, 605 &fnd_start, fri->size, fdi.answ, fdi.iosize, cookediov, 606 ncookies, &cookies))) 607 break; 608 } 609 610 fdisp_destroy(&fdi); 611 return ((err == -1) ? 0 : err); 612 } 613 614 /* 615 * Return -1 to indicate that this readdir is finished, 0 if it copied 616 * all the directory data read in and it may be possible to read more 617 * and greater than 0 for a failure. 618 */ 619 int 620 fuse_internal_readdir_processdata(struct uio *uio, 621 off_t startoff, 622 int *fnd_start, 623 size_t reqsize, 624 void *buf, 625 size_t bufsize, 626 struct fuse_iov *cookediov, 627 int *ncookies, 628 uint64_t **cookiesp) 629 { 630 int err = 0; 631 int oreclen; 632 size_t freclen; 633 634 struct dirent *de; 635 struct fuse_dirent *fudge; 636 uint64_t *cookies; 637 638 cookies = *cookiesp; 639 if (bufsize < FUSE_NAME_OFFSET) 640 return -1; 641 for (;;) { 642 if (bufsize < FUSE_NAME_OFFSET) { 643 err = -1; 644 break; 645 } 646 fudge = (struct fuse_dirent *)buf; 647 freclen = FUSE_DIRENT_SIZE(fudge); 648 649 if (bufsize < freclen) { 650 /* 651 * This indicates a partial directory entry at the 652 * end of the directory data. 653 */ 654 err = -1; 655 break; 656 } 657 #ifdef ZERO_PAD_INCOMPLETE_BUFS 658 if (isbzero(buf, FUSE_NAME_OFFSET)) { 659 err = -1; 660 break; 661 } 662 #endif 663 664 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { 665 err = EINVAL; 666 break; 667 } 668 oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *) 669 &fudge->namelen); 670 671 if (oreclen > uio_resid(uio)) { 672 /* Out of space for the dir so we are done. */ 673 err = -1; 674 break; 675 } 676 /* 677 * Don't start to copy the directory entries out until 678 * the requested offset in the directory is found. 679 */ 680 if (*fnd_start != 0) { 681 fiov_adjust(cookediov, oreclen); 682 bzero(cookediov->base, oreclen); 683 684 de = (struct dirent *)cookediov->base; 685 de->d_fileno = fudge->ino; 686 de->d_off = fudge->off; 687 de->d_reclen = oreclen; 688 de->d_type = fudge->type; 689 de->d_namlen = fudge->namelen; 690 memcpy((char *)cookediov->base + sizeof(struct dirent) - 691 MAXNAMLEN - 1, 692 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); 693 dirent_terminate(de); 694 695 err = uiomove(cookediov->base, cookediov->len, uio); 696 if (err) 697 break; 698 if (cookies != NULL) { 699 if (*ncookies == 0) { 700 err = -1; 701 break; 702 } 703 *cookies = fudge->off; 704 cookies++; 705 (*ncookies)--; 706 } 707 } else if (startoff == fudge->off) 708 *fnd_start = 1; 709 buf = (char *)buf + freclen; 710 bufsize -= freclen; 711 uio_setoffset(uio, fudge->off); 712 } 713 *cookiesp = cookies; 714 715 return err; 716 } 717 718 /* remove */ 719 720 int 721 fuse_internal_remove(struct vnode *dvp, 722 struct vnode *vp, 723 struct componentname *cnp, 724 enum fuse_opcode op) 725 { 726 struct fuse_dispatcher fdi; 727 nlink_t nlink; 728 int err = 0; 729 730 fdisp_init(&fdi, cnp->cn_namelen + 1); 731 fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred); 732 733 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); 734 ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; 735 736 err = fdisp_wait_answ(&fdi); 737 fdisp_destroy(&fdi); 738 739 if (err) 740 return (err); 741 742 /* 743 * Access the cached nlink even if the attr cached has expired. If 744 * it's inaccurate, the worst that will happen is: 745 * 1) We'll recycle the vnode even though the file has another link we 746 * don't know about, costing a bit of cpu time, or 747 * 2) We won't recycle the vnode even though all of its links are gone. 748 * It will linger around until vnlru reclaims it, costing a bit of 749 * temporary memory. 750 */ 751 nlink = VTOFUD(vp)->cached_attrs.va_nlink--; 752 753 /* 754 * Purge the parent's attribute cache because the daemon 755 * should've updated its mtime and ctime. 756 */ 757 fuse_vnode_clear_attr_cache(dvp); 758 759 /* NB: nlink could be zero if it was never cached */ 760 if (nlink <= 1 || vnode_vtype(vp) == VDIR) { 761 fuse_internal_vnode_disappear(vp); 762 } else { 763 cache_purge(vp); 764 fuse_vnode_update(vp, FN_CTIMECHANGE); 765 } 766 767 return err; 768 } 769 770 /* rename */ 771 772 int 773 fuse_internal_rename(struct vnode *fdvp, 774 struct componentname *fcnp, 775 struct vnode *tdvp, 776 struct componentname *tcnp) 777 { 778 struct fuse_dispatcher fdi; 779 struct fuse_rename_in *fri; 780 int err = 0; 781 782 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); 783 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred); 784 785 fri = fdi.indata; 786 fri->newdir = VTOI(tdvp); 787 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, 788 fcnp->cn_namelen); 789 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; 790 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, 791 tcnp->cn_nameptr, tcnp->cn_namelen); 792 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + 793 tcnp->cn_namelen + 1] = '\0'; 794 795 err = fdisp_wait_answ(&fdi); 796 fdisp_destroy(&fdi); 797 return err; 798 } 799 800 /* strategy */ 801 802 /* entity creation */ 803 804 void 805 fuse_internal_newentry_makerequest(struct mount *mp, 806 uint64_t dnid, 807 struct componentname *cnp, 808 enum fuse_opcode op, 809 void *buf, 810 size_t bufsize, 811 struct fuse_dispatcher *fdip) 812 { 813 fdip->iosize = bufsize + cnp->cn_namelen + 1; 814 815 fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred); 816 memcpy(fdip->indata, buf, bufsize); 817 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); 818 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; 819 } 820 821 int 822 fuse_internal_newentry_core(struct vnode *dvp, 823 struct vnode **vpp, 824 struct componentname *cnp, 825 enum vtype vtyp, 826 struct fuse_dispatcher *fdip) 827 { 828 int err = 0; 829 struct fuse_entry_out *feo; 830 struct mount *mp = vnode_mount(dvp); 831 832 if ((err = fdisp_wait_answ(fdip))) { 833 return err; 834 } 835 feo = fdip->answ; 836 837 if ((err = fuse_internal_checkentry(feo, vtyp))) { 838 return err; 839 } 840 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); 841 if (err) { 842 fuse_internal_forget_send(mp, curthread, cnp->cn_cred, 843 feo->nodeid, 1); 844 return err; 845 } 846 847 /* 848 * Purge the parent's attribute cache because the daemon should've 849 * updated its mtime and ctime 850 */ 851 fuse_vnode_clear_attr_cache(dvp); 852 853 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, 854 feo->attr_valid_nsec, NULL, true); 855 856 return err; 857 } 858 859 int 860 fuse_internal_newentry(struct vnode *dvp, 861 struct vnode **vpp, 862 struct componentname *cnp, 863 enum fuse_opcode op, 864 void *buf, 865 size_t bufsize, 866 enum vtype vtype) 867 { 868 int err; 869 struct fuse_dispatcher fdi; 870 struct mount *mp = vnode_mount(dvp); 871 872 fdisp_init(&fdi, 0); 873 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, 874 bufsize, &fdi); 875 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); 876 fdisp_destroy(&fdi); 877 878 return err; 879 } 880 881 /* entity destruction */ 882 883 int 884 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) 885 { 886 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, 887 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); 888 889 return 0; 890 } 891 892 void 893 fuse_internal_forget_send(struct mount *mp, 894 struct thread *td, 895 struct ucred *cred, 896 uint64_t nodeid, 897 uint64_t nlookup) 898 { 899 900 struct fuse_dispatcher fdi; 901 struct fuse_forget_in *ffi; 902 903 /* 904 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", 905 * (long long unsigned) nodeid)); 906 */ 907 908 fdisp_init(&fdi, sizeof(*ffi)); 909 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); 910 911 ffi = fdi.indata; 912 ffi->nlookup = nlookup; 913 914 fuse_insert_message(fdi.tick, false); 915 fdisp_destroy(&fdi); 916 } 917 918 /* Fetch the vnode's attributes from the daemon*/ 919 int 920 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, 921 struct ucred *cred, struct thread *td) 922 { 923 struct fuse_dispatcher fdi; 924 struct fuse_vnode_data *fvdat = VTOFUD(vp); 925 struct fuse_getattr_in *fgai; 926 struct fuse_attr_out *fao; 927 off_t old_filesize = fvdat->cached_attrs.va_size; 928 struct timespec old_atime = fvdat->cached_attrs.va_atime; 929 struct timespec old_ctime = fvdat->cached_attrs.va_ctime; 930 struct timespec old_mtime = fvdat->cached_attrs.va_mtime; 931 enum vtype vtyp; 932 int err; 933 934 fdisp_init(&fdi, sizeof(*fgai)); 935 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); 936 fgai = fdi.indata; 937 /* 938 * We could look up a file handle and set it in fgai->fh, but that 939 * involves extra runtime work and I'm unaware of any file systems that 940 * care. 941 */ 942 fgai->getattr_flags = 0; 943 if ((err = fdisp_wait_answ(&fdi))) { 944 if (err == ENOENT) 945 fuse_internal_vnode_disappear(vp); 946 goto out; 947 } 948 949 fao = (struct fuse_attr_out *)fdi.answ; 950 vtyp = IFTOVT(fao->attr.mode); 951 if (fvdat->flag & FN_SIZECHANGE) 952 fao->attr.size = old_filesize; 953 if (fvdat->flag & FN_ATIMECHANGE) { 954 fao->attr.atime = old_atime.tv_sec; 955 fao->attr.atimensec = old_atime.tv_nsec; 956 } 957 if (fvdat->flag & FN_CTIMECHANGE) { 958 fao->attr.ctime = old_ctime.tv_sec; 959 fao->attr.ctimensec = old_ctime.tv_nsec; 960 } 961 if (fvdat->flag & FN_MTIMECHANGE) { 962 fao->attr.mtime = old_mtime.tv_sec; 963 fao->attr.mtimensec = old_mtime.tv_nsec; 964 } 965 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 966 fao->attr_valid_nsec, vap, true); 967 if (vtyp != vnode_vtype(vp)) { 968 fuse_internal_vnode_disappear(vp); 969 err = ENOENT; 970 } 971 972 out: 973 fdisp_destroy(&fdi); 974 return err; 975 } 976 977 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ 978 int 979 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, 980 struct thread *td) 981 { 982 struct vattr *attrs; 983 984 if ((attrs = VTOVA(vp)) != NULL) { 985 *vap = *attrs; /* struct copy */ 986 return 0; 987 } 988 989 return fuse_internal_do_getattr(vp, vap, cred, td); 990 } 991 992 void 993 fuse_internal_vnode_disappear(struct vnode *vp) 994 { 995 struct fuse_vnode_data *fvdat = VTOFUD(vp); 996 997 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); 998 fvdat->flag |= FN_REVOKED; 999 cache_purge(vp); 1000 } 1001 1002 /* fuse start/stop */ 1003 1004 SDT_PROBE_DEFINE2(fusefs, , internal, init_done, 1005 "struct fuse_data*", "struct fuse_init_out*"); 1006 int 1007 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) 1008 { 1009 int err = 0; 1010 struct fuse_data *data = tick->tk_data; 1011 struct fuse_init_out *fiio; 1012 1013 if ((err = tick->tk_aw_ohead.error)) { 1014 goto out; 1015 } 1016 if ((err = fticket_pull(tick, uio))) { 1017 goto out; 1018 } 1019 fiio = fticket_resp(tick)->base; 1020 1021 data->fuse_libabi_major = fiio->major; 1022 data->fuse_libabi_minor = fiio->minor; 1023 if (!fuse_libabi_geq(data, 7, 4)) { 1024 /* 1025 * With a little work we could support servers as old as 7.1. 1026 * But there would be little payoff. 1027 */ 1028 SDT_PROBE2(fusefs, , internal, trace, 1, 1029 "userpace version too low"); 1030 err = EPROTONOSUPPORT; 1031 goto out; 1032 } 1033 1034 if (fuse_libabi_geq(data, 7, 5)) { 1035 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) || 1036 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) { 1037 data->max_write = fiio->max_write; 1038 if (fiio->flags & FUSE_ASYNC_READ) 1039 data->dataflags |= FSESS_ASYNC_READ; 1040 if (fiio->flags & FUSE_POSIX_LOCKS) 1041 data->dataflags |= FSESS_POSIX_LOCKS; 1042 if (fiio->flags & FUSE_EXPORT_SUPPORT) 1043 data->dataflags |= FSESS_EXPORT_SUPPORT; 1044 if (fiio->flags & FUSE_NO_OPEN_SUPPORT) 1045 data->dataflags |= FSESS_NO_OPEN_SUPPORT; 1046 if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT) 1047 data->dataflags |= FSESS_NO_OPENDIR_SUPPORT; 1048 /* 1049 * Don't bother to check FUSE_BIG_WRITES, because it's 1050 * redundant with max_write 1051 */ 1052 /* 1053 * max_background and congestion_threshold are not 1054 * implemented 1055 */ 1056 } else { 1057 err = EINVAL; 1058 } 1059 } else { 1060 /* Old fixed values */ 1061 data->max_write = 4096; 1062 } 1063 1064 if (fuse_libabi_geq(data, 7, 6)) 1065 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf; 1066 1067 if (!fuse_libabi_geq(data, 7, 7)) 1068 fsess_set_notimpl(data->mp, FUSE_INTERRUPT); 1069 1070 if (!fuse_libabi_geq(data, 7, 8)) { 1071 fsess_set_notimpl(data->mp, FUSE_BMAP); 1072 fsess_set_notimpl(data->mp, FUSE_DESTROY); 1073 } 1074 1075 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 && 1076 fiio->time_gran <= 1000000000) 1077 data->time_gran = fiio->time_gran; 1078 else 1079 data->time_gran = 1; 1080 1081 if (!fuse_libabi_geq(data, 7, 23)) 1082 data->cache_mode = fuse_data_cache_mode; 1083 else if (fiio->flags & FUSE_WRITEBACK_CACHE) 1084 data->cache_mode = FUSE_CACHE_WB; 1085 else 1086 data->cache_mode = FUSE_CACHE_WT; 1087 1088 if (!fuse_libabi_geq(data, 7, 24)) 1089 fsess_set_notimpl(data->mp, FUSE_LSEEK); 1090 1091 if (!fuse_libabi_geq(data, 7, 28)) 1092 fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE); 1093 1094 out: 1095 if (err) { 1096 fdata_set_dead(data); 1097 } 1098 FUSE_LOCK(); 1099 data->dataflags |= FSESS_INITED; 1100 SDT_PROBE2(fusefs, , internal, init_done, data, fiio); 1101 wakeup(&data->ticketer); 1102 FUSE_UNLOCK(); 1103 1104 return 0; 1105 } 1106 1107 void 1108 fuse_internal_send_init(struct fuse_data *data, struct thread *td) 1109 { 1110 struct fuse_init_in *fiii; 1111 struct fuse_dispatcher fdi; 1112 1113 fdisp_init(&fdi, sizeof(*fiii)); 1114 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); 1115 fiii = fdi.indata; 1116 fiii->major = FUSE_KERNEL_VERSION; 1117 fiii->minor = FUSE_KERNEL_MINOR_VERSION; 1118 /* 1119 * fusefs currently reads ahead no more than one cache block at a time. 1120 * See fuse_read_biobackend 1121 */ 1122 fiii->max_readahead = maxbcachebuf; 1123 /* 1124 * Unsupported features: 1125 * FUSE_FILE_OPS: No known FUSE server or client supports it 1126 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it 1127 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even 1128 * when default ACLs are in use. 1129 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD 1130 * doesn't have splice(2). 1131 * FUSE_FLOCK_LOCKS: not yet implemented 1132 * FUSE_HAS_IOCTL_DIR: not yet implemented 1133 * FUSE_AUTO_INVAL_DATA: not yet implemented 1134 * FUSE_DO_READDIRPLUS: not yet implemented 1135 * FUSE_READDIRPLUS_AUTO: not yet implemented 1136 * FUSE_ASYNC_DIO: not yet implemented 1137 * FUSE_PARALLEL_DIROPS: not yet implemented 1138 * FUSE_HANDLE_KILLPRIV: not yet implemented 1139 * FUSE_POSIX_ACL: not yet implemented 1140 * FUSE_ABORT_ERROR: not yet implemented 1141 * FUSE_CACHE_SYMLINKS: not yet implemented 1142 * FUSE_MAX_PAGES: not yet implemented 1143 */ 1144 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT 1145 | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE 1146 | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT; 1147 1148 fuse_insert_callback(fdi.tick, fuse_internal_init_callback); 1149 fuse_insert_message(fdi.tick, false); 1150 fdisp_destroy(&fdi); 1151 } 1152 1153 /* 1154 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, 1155 * send the request with root credentials 1156 */ 1157 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, 1158 struct thread *td, struct ucred *cred) 1159 { 1160 struct fuse_vnode_data *fvdat; 1161 struct fuse_dispatcher fdi; 1162 struct fuse_setattr_in *fsai; 1163 struct mount *mp; 1164 pid_t pid = td->td_proc->p_pid; 1165 struct fuse_data *data; 1166 int dataflags; 1167 int err = 0; 1168 enum vtype vtyp; 1169 int sizechanged = -1; 1170 uint64_t newsize = 0; 1171 1172 mp = vnode_mount(vp); 1173 fvdat = VTOFUD(vp); 1174 data = fuse_get_mpdata(mp); 1175 dataflags = data->dataflags; 1176 1177 fdisp_init(&fdi, sizeof(*fsai)); 1178 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); 1179 if (!cred) { 1180 fdi.finh->uid = 0; 1181 fdi.finh->gid = 0; 1182 } 1183 fsai = fdi.indata; 1184 fsai->valid = 0; 1185 1186 if (vap->va_uid != (uid_t)VNOVAL) { 1187 fsai->uid = vap->va_uid; 1188 fsai->valid |= FATTR_UID; 1189 } 1190 if (vap->va_gid != (gid_t)VNOVAL) { 1191 fsai->gid = vap->va_gid; 1192 fsai->valid |= FATTR_GID; 1193 } 1194 if (vap->va_size != VNOVAL) { 1195 struct fuse_filehandle *fufh = NULL; 1196 1197 /*Truncate to a new value. */ 1198 fsai->size = vap->va_size; 1199 sizechanged = 1; 1200 newsize = vap->va_size; 1201 fsai->valid |= FATTR_SIZE; 1202 1203 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); 1204 if (fufh) { 1205 fsai->fh = fufh->fh_id; 1206 fsai->valid |= FATTR_FH; 1207 } 1208 VTOFUD(vp)->flag &= ~FN_SIZECHANGE; 1209 } 1210 if (vap->va_atime.tv_sec != VNOVAL) { 1211 fsai->atime = vap->va_atime.tv_sec; 1212 fsai->atimensec = vap->va_atime.tv_nsec; 1213 fsai->valid |= FATTR_ATIME; 1214 if (vap->va_vaflags & VA_UTIMES_NULL) 1215 fsai->valid |= FATTR_ATIME_NOW; 1216 } else if (fvdat->flag & FN_ATIMECHANGE) { 1217 fsai->atime = fvdat->cached_attrs.va_atime.tv_sec; 1218 fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec; 1219 fsai->valid |= FATTR_ATIME; 1220 } 1221 if (vap->va_mtime.tv_sec != VNOVAL) { 1222 fsai->mtime = vap->va_mtime.tv_sec; 1223 fsai->mtimensec = vap->va_mtime.tv_nsec; 1224 fsai->valid |= FATTR_MTIME; 1225 if (vap->va_vaflags & VA_UTIMES_NULL) 1226 fsai->valid |= FATTR_MTIME_NOW; 1227 } else if (fvdat->flag & FN_MTIMECHANGE) { 1228 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec; 1229 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec; 1230 fsai->valid |= FATTR_MTIME; 1231 } 1232 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) { 1233 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec; 1234 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec; 1235 fsai->valid |= FATTR_CTIME; 1236 } 1237 if (vap->va_mode != (mode_t)VNOVAL) { 1238 fsai->mode = vap->va_mode & ALLPERMS; 1239 fsai->valid |= FATTR_MODE; 1240 } 1241 if (!fsai->valid) { 1242 goto out; 1243 } 1244 1245 if ((err = fdisp_wait_answ(&fdi))) 1246 goto out; 1247 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); 1248 1249 if (vnode_vtype(vp) != vtyp) { 1250 if (vnode_vtype(vp) == VNON && vtyp != VNON) { 1251 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " 1252 "vnode_vtype is VNON and vtype isn't."); 1253 } else { 1254 /* 1255 * STALE vnode, ditch 1256 * 1257 * The vnode has changed its type "behind our back". 1258 * This probably means that the file got deleted and 1259 * recreated on the server, with the same inode. 1260 * There's nothing really we can do, so let us just 1261 * return ENOENT. After all, the entry must not have 1262 * existed in the recent past. If the user tries 1263 * again, it will work. 1264 */ 1265 fuse_internal_vnode_disappear(vp); 1266 err = ENOENT; 1267 } 1268 } 1269 if (err == 0) { 1270 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; 1271 fuse_vnode_undirty_cached_timestamps(vp, true); 1272 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 1273 fao->attr_valid_nsec, NULL, false); 1274 } 1275 1276 out: 1277 fdisp_destroy(&fdi); 1278 return err; 1279 } 1280 1281 /* 1282 * FreeBSD clears the SUID and SGID bits on any write by a non-root user. 1283 */ 1284 void 1285 fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred, 1286 struct thread *td) 1287 { 1288 struct fuse_data *data; 1289 struct mount *mp; 1290 struct vattr va; 1291 int dataflags; 1292 1293 mp = vnode_mount(vp); 1294 data = fuse_get_mpdata(mp); 1295 dataflags = data->dataflags; 1296 1297 ASSERT_VOP_LOCKED(vp, __func__); 1298 1299 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 1300 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) { 1301 fuse_internal_getattr(vp, &va, cred, td); 1302 if (va.va_mode & (S_ISUID | S_ISGID)) { 1303 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID); 1304 /* Clear all vattr fields except mode */ 1305 vattr_null(&va); 1306 va.va_mode = mode; 1307 1308 /* 1309 * Ignore fuse_internal_setattr's return value, 1310 * because at this point the write operation has 1311 * already succeeded and we don't want to return 1312 * failing status for that. 1313 */ 1314 (void)fuse_internal_setattr(vp, &va, td, NULL); 1315 } 1316 } 1317 } 1318 } 1319 1320 #ifdef ZERO_PAD_INCOMPLETE_BUFS 1321 static int 1322 isbzero(void *buf, size_t len) 1323 { 1324 int i; 1325 1326 for (i = 0; i < len; i++) { 1327 if (((char *)buf)[i]) 1328 return (0); 1329 } 1330 1331 return (1); 1332 } 1333 1334 #endif 1335 1336 void 1337 fuse_internal_init(void) 1338 { 1339 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK); 1340 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK); 1341 } 1342 1343 void 1344 fuse_internal_destroy(void) 1345 { 1346 counter_u64_free(fuse_lookup_cache_hits); 1347 counter_u64_free(fuse_lookup_cache_misses); 1348 } 1349