1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/counter.h> 67 #include <sys/module.h> 68 #include <sys/errno.h> 69 #include <sys/kernel.h> 70 #include <sys/conf.h> 71 #include <sys/uio.h> 72 #include <sys/malloc.h> 73 #include <sys/queue.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/sdt.h> 77 #include <sys/sx.h> 78 #include <sys/proc.h> 79 #include <sys/mount.h> 80 #include <sys/vnode.h> 81 #include <sys/namei.h> 82 #include <sys/stat.h> 83 #include <sys/unistd.h> 84 #include <sys/filedesc.h> 85 #include <sys/file.h> 86 #include <sys/fcntl.h> 87 #include <sys/dirent.h> 88 #include <sys/bio.h> 89 #include <sys/buf.h> 90 #include <sys/sysctl.h> 91 #include <sys/priv.h> 92 93 #include "fuse.h" 94 #include "fuse_file.h" 95 #include "fuse_internal.h" 96 #include "fuse_io.h" 97 #include "fuse_ipc.h" 98 #include "fuse_node.h" 99 #include "fuse_file.h" 100 101 SDT_PROVIDER_DECLARE(fusefs); 102 /* 103 * Fuse trace probe: 104 * arg0: verbosity. Higher numbers give more verbose messages 105 * arg1: Textual message 106 */ 107 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); 108 109 #ifdef ZERO_PAD_INCOMPLETE_BUFS 110 static int isbzero(void *buf, size_t len); 111 112 #endif 113 114 counter_u64_t fuse_lookup_cache_hits; 115 counter_u64_t fuse_lookup_cache_misses; 116 117 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, 118 &fuse_lookup_cache_hits, "number of positive cache hits in lookup"); 119 120 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, 121 &fuse_lookup_cache_misses, "number of cache misses in lookup"); 122 123 int 124 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags, 125 struct vnode **vpp) 126 { 127 struct bintime now; 128 struct thread *td = curthread; 129 uint64_t nodeid = ino; 130 int error; 131 132 *vpp = NULL; 133 134 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp, 135 fuse_vnode_cmp, &nodeid); 136 if (error) 137 return error; 138 /* 139 * Check the entry cache timeout. We have to do this within fusefs 140 * instead of by using cache_enter_time/cache_lookup because those 141 * routines are only intended to work with pathnames, not inodes 142 */ 143 if (*vpp != NULL) { 144 getbinuptime(&now); 145 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){ 146 counter_u64_add(fuse_lookup_cache_hits, 1); 147 return 0; 148 } else { 149 /* Entry cache timeout */ 150 counter_u64_add(fuse_lookup_cache_misses, 1); 151 cache_purge(*vpp); 152 vput(*vpp); 153 *vpp = NULL; 154 } 155 } 156 return 0; 157 } 158 159 SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin); 160 /* Synchronously send a FUSE_ACCESS operation */ 161 int 162 fuse_internal_access(struct vnode *vp, 163 accmode_t mode, 164 struct thread *td, 165 struct ucred *cred) 166 { 167 int err = 0; 168 uint32_t mask = F_OK; 169 int dataflags; 170 struct mount *mp; 171 struct fuse_dispatcher fdi; 172 struct fuse_access_in *fai; 173 struct fuse_data *data; 174 175 mp = vnode_mount(vp); 176 177 data = fuse_get_mpdata(mp); 178 dataflags = data->dataflags; 179 180 if (mode == 0) 181 return 0; 182 183 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { 184 switch (vp->v_type) { 185 case VDIR: 186 /* FALLTHROUGH */ 187 case VLNK: 188 /* FALLTHROUGH */ 189 case VREG: 190 return EROFS; 191 default: 192 break; 193 } 194 } 195 196 /* Unless explicitly permitted, deny everyone except the fs owner. */ 197 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { 198 if (fuse_match_cred(data->daemoncred, cred)) 199 return EPERM; 200 } 201 202 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 203 struct vattr va; 204 205 fuse_internal_getattr(vp, &va, cred, td); 206 return vaccess(vp->v_type, va.va_mode, va.va_uid, 207 va.va_gid, mode, cred); 208 } 209 210 if (mode & VADMIN) { 211 /* 212 * The FUSE protocol doesn't have an equivalent of VADMIN, so 213 * it's a bug if we ever reach this point with that bit set. 214 */ 215 SDT_PROBE0(fusefs, , internal, access_vadmin); 216 } 217 218 if (fsess_not_impl(mp, FUSE_ACCESS)) 219 return 0; 220 221 if ((mode & (VWRITE | VAPPEND)) != 0) 222 mask |= W_OK; 223 if ((mode & VREAD) != 0) 224 mask |= R_OK; 225 if ((mode & VEXEC) != 0) 226 mask |= X_OK; 227 228 fdisp_init(&fdi, sizeof(*fai)); 229 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); 230 231 fai = fdi.indata; 232 fai->mask = mask; 233 234 err = fdisp_wait_answ(&fdi); 235 fdisp_destroy(&fdi); 236 237 if (err == ENOSYS) { 238 fsess_set_notimpl(mp, FUSE_ACCESS); 239 err = 0; 240 } 241 return err; 242 } 243 244 /* 245 * Cache FUSE attributes from attr, in attribute cache associated with vnode 246 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the 247 * converted attributes there as well. 248 * 249 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do 250 * return the result to the caller). 251 */ 252 void 253 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr, 254 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap, 255 bool from_server) 256 { 257 struct mount *mp; 258 struct fuse_vnode_data *fvdat; 259 struct fuse_data *data; 260 struct vattr *vp_cache_at; 261 262 mp = vnode_mount(vp); 263 fvdat = VTOFUD(vp); 264 data = fuse_get_mpdata(mp); 265 266 ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); 267 268 fuse_validity_2_bintime(attr_valid, attr_valid_nsec, 269 &fvdat->attr_cache_timeout); 270 271 if (vnode_isreg(vp) && 272 fvdat->cached_attrs.va_size != VNOVAL && 273 attr->size != fvdat->cached_attrs.va_size) 274 { 275 if ( data->cache_mode == FUSE_CACHE_WB && 276 fvdat->flag & FN_SIZECHANGE) 277 { 278 const char *msg; 279 280 /* 281 * The server changed the file's size even though we're 282 * using writeback cacheing and and we have outstanding 283 * dirty writes! That's a server bug. 284 */ 285 if (fuse_libabi_geq(data, 7, 23)) { 286 msg = "writeback cache incoherent!." 287 "To prevent data corruption, disable " 288 "the writeback cache according to your " 289 "FUSE server's documentation."; 290 } else { 291 msg = "writeback cache incoherent!." 292 "To prevent data corruption, disable " 293 "the writeback cache by setting " 294 "vfs.fusefs.data_cache_mode to 0 or 1."; 295 } 296 fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg); 297 } 298 if (fuse_vnode_attr_cache_valid(vp) && 299 data->cache_mode != FUSE_CACHE_UC) 300 { 301 /* 302 * The server changed the file's size even though we 303 * have it cached and our cache has not yet expired. 304 * That's a bug. 305 */ 306 fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT, 307 "cache incoherent! " 308 "To prevent " 309 "data corruption, disable the data cache " 310 "by mounting with -o direct_io, or as " 311 "directed otherwise by your FUSE server's " 312 "documentation."); 313 } 314 } 315 316 /* Fix our buffers if the filesize changed without us knowing */ 317 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { 318 (void)fuse_vnode_setsize(vp, attr->size, from_server); 319 fvdat->cached_attrs.va_size = attr->size; 320 } 321 322 if (attr_valid > 0 || attr_valid_nsec > 0) 323 vp_cache_at = &(fvdat->cached_attrs); 324 else if (vap != NULL) 325 vp_cache_at = vap; 326 else 327 return; 328 329 vattr_null(vp_cache_at); 330 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; 331 vp_cache_at->va_fileid = attr->ino; 332 vp_cache_at->va_mode = attr->mode & ~S_IFMT; 333 vp_cache_at->va_nlink = attr->nlink; 334 vp_cache_at->va_uid = attr->uid; 335 vp_cache_at->va_gid = attr->gid; 336 vp_cache_at->va_rdev = attr->rdev; 337 vp_cache_at->va_size = attr->size; 338 /* XXX on i386, seconds are truncated to 32 bits */ 339 vp_cache_at->va_atime.tv_sec = attr->atime; 340 vp_cache_at->va_atime.tv_nsec = attr->atimensec; 341 vp_cache_at->va_mtime.tv_sec = attr->mtime; 342 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; 343 vp_cache_at->va_ctime.tv_sec = attr->ctime; 344 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; 345 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) 346 vp_cache_at->va_blocksize = attr->blksize; 347 else 348 vp_cache_at->va_blocksize = PAGE_SIZE; 349 vp_cache_at->va_type = IFTOVT(attr->mode); 350 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; 351 vp_cache_at->va_flags = 0; 352 353 if (vap != vp_cache_at && vap != NULL) 354 memcpy(vap, vp_cache_at, sizeof(*vap)); 355 } 356 357 /* fsync */ 358 359 int 360 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) 361 { 362 if (tick->tk_aw_ohead.error == ENOSYS) { 363 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); 364 } 365 return 0; 366 } 367 368 int 369 fuse_internal_fsync(struct vnode *vp, 370 struct thread *td, 371 int waitfor, 372 bool datasync) 373 { 374 struct fuse_fsync_in *ffsi = NULL; 375 struct fuse_dispatcher fdi; 376 struct fuse_filehandle *fufh; 377 struct fuse_vnode_data *fvdat = VTOFUD(vp); 378 struct mount *mp = vnode_mount(vp); 379 int op = FUSE_FSYNC; 380 int err = 0; 381 382 if (fsess_not_impl(vnode_mount(vp), 383 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { 384 return 0; 385 } 386 if (vnode_isdir(vp)) 387 op = FUSE_FSYNCDIR; 388 389 if (fsess_not_impl(mp, op)) 390 return 0; 391 392 fdisp_init(&fdi, sizeof(*ffsi)); 393 /* 394 * fsync every open file handle for this file, because we can't be sure 395 * which file handle the caller is really referring to. 396 */ 397 LIST_FOREACH(fufh, &fvdat->handles, next) { 398 fdi.iosize = sizeof(*ffsi); 399 if (ffsi == NULL) 400 fdisp_make_vp(&fdi, op, vp, td, NULL); 401 else 402 fdisp_refresh_vp(&fdi, op, vp, td, NULL); 403 ffsi = fdi.indata; 404 ffsi->fh = fufh->fh_id; 405 ffsi->fsync_flags = 0; 406 407 if (datasync) 408 ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC; 409 410 if (waitfor == MNT_WAIT) { 411 err = fdisp_wait_answ(&fdi); 412 } else { 413 fuse_insert_callback(fdi.tick, 414 fuse_internal_fsync_callback); 415 fuse_insert_message(fdi.tick, false); 416 } 417 if (err == ENOSYS) { 418 /* ENOSYS means "success, and don't call again" */ 419 fsess_set_notimpl(mp, op); 420 err = 0; 421 break; 422 } 423 } 424 fdisp_destroy(&fdi); 425 426 return err; 427 } 428 429 /* Asynchronous invalidation */ 430 SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry, 431 "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*"); 432 int 433 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) 434 { 435 struct fuse_notify_inval_entry_out fnieo; 436 struct componentname cn; 437 struct vnode *dvp, *vp; 438 char name[PATH_MAX]; 439 int err; 440 441 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) 442 return (err); 443 444 if (fnieo.namelen >= sizeof(name)) 445 return (EINVAL); 446 447 if ((err = uiomove(name, fnieo.namelen, uio)) != 0) 448 return (err); 449 name[fnieo.namelen] = '\0'; 450 /* fusefs does not cache "." or ".." entries */ 451 if (strncmp(name, ".", sizeof(".")) == 0 || 452 strncmp(name, "..", sizeof("..")) == 0) 453 return (0); 454 455 if (fnieo.parent == FUSE_ROOT_ID) 456 err = VFS_ROOT(mp, LK_SHARED, &dvp); 457 else 458 err = fuse_internal_get_cached_vnode( mp, fnieo.parent, 459 LK_SHARED, &dvp); 460 SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name); 461 /* 462 * If dvp is not in the cache, then it must've been reclaimed. And 463 * since fuse_vnop_reclaim does a cache_purge, name's entry must've 464 * been invalidated already. So we can safely return if dvp == NULL 465 */ 466 if (err != 0 || dvp == NULL) 467 return (err); 468 /* 469 * XXX we can't check dvp's generation because the FUSE invalidate 470 * entry message doesn't include it. Worse case is that we invalidate 471 * an entry that didn't need to be invalidated. 472 */ 473 474 cn.cn_nameiop = LOOKUP; 475 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ 476 cn.cn_cred = curthread->td_ucred; 477 cn.cn_lkflags = LK_SHARED; 478 cn.cn_pnbuf = NULL; 479 cn.cn_nameptr = name; 480 cn.cn_namelen = fnieo.namelen; 481 err = cache_lookup(dvp, &vp, &cn, NULL, NULL); 482 MPASS(err == 0); 483 fuse_vnode_clear_attr_cache(dvp); 484 vput(dvp); 485 return (0); 486 } 487 488 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode, 489 "struct vnode*", "struct fuse_notify_inval_inode_out *"); 490 int 491 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio) 492 { 493 struct fuse_notify_inval_inode_out fniio; 494 struct vnode *vp; 495 int err; 496 497 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0) 498 return (err); 499 500 if (fniio.ino == FUSE_ROOT_ID) 501 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp); 502 else 503 err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED, 504 &vp); 505 SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio); 506 if (err != 0 || vp == NULL) 507 return (err); 508 /* 509 * XXX we can't check vp's generation because the FUSE invalidate 510 * entry message doesn't include it. Worse case is that we invalidate 511 * an inode that didn't need to be invalidated. 512 */ 513 514 /* 515 * Flush and invalidate buffers if off >= 0. Technically we only need 516 * to flush and invalidate the range of offsets [off, off + len), but 517 * for simplicity's sake we do everything. 518 */ 519 if (fniio.off >= 0) 520 fuse_io_invalbuf(vp, curthread); 521 fuse_vnode_clear_attr_cache(vp); 522 vput(vp); 523 return (0); 524 } 525 526 /* mknod */ 527 int 528 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, 529 struct componentname *cnp, struct vattr *vap) 530 { 531 struct fuse_data *data; 532 struct fuse_mknod_in fmni; 533 size_t insize; 534 535 data = fuse_get_mpdata(dvp->v_mount); 536 537 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); 538 fmni.rdev = vap->va_rdev; 539 if (fuse_libabi_geq(data, 7, 12)) { 540 insize = sizeof(fmni); 541 fmni.umask = curthread->td_proc->p_pd->pd_cmask; 542 fmni.padding = 0; 543 } else { 544 insize = FUSE_COMPAT_MKNOD_IN_SIZE; 545 } 546 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, 547 insize, vap->va_type)); 548 } 549 550 /* readdir */ 551 552 int 553 fuse_internal_readdir(struct vnode *vp, 554 struct uio *uio, 555 struct fuse_filehandle *fufh, 556 struct fuse_iov *cookediov, 557 int *ncookies, 558 uint64_t *cookies) 559 { 560 int err = 0; 561 struct fuse_dispatcher fdi; 562 struct fuse_read_in *fri = NULL; 563 564 if (uio_resid(uio) == 0) 565 return 0; 566 fdisp_init(&fdi, 0); 567 568 /* 569 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p 570 * I/O). 571 */ 572 while (uio_resid(uio) > 0) { 573 fdi.iosize = sizeof(*fri); 574 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); 575 fri = fdi.indata; 576 fri->fh = fufh->fh_id; 577 fri->offset = uio_offset(uio); 578 fri->size = MIN(uio->uio_resid, 579 fuse_get_mpdata(vp->v_mount)->max_read); 580 581 if ((err = fdisp_wait_answ(&fdi))) 582 break; 583 if ((err = fuse_internal_readdir_processdata(uio, fri->size, 584 fdi.answ, fdi.iosize, cookediov, ncookies, &cookies))) 585 break; 586 } 587 588 fdisp_destroy(&fdi); 589 return ((err == -1) ? 0 : err); 590 } 591 592 /* 593 * Return -1 to indicate that this readdir is finished, 0 if it copied 594 * all the directory data read in and it may be possible to read more 595 * and greater than 0 for a failure. 596 */ 597 int 598 fuse_internal_readdir_processdata(struct uio *uio, 599 size_t reqsize, 600 void *buf, 601 size_t bufsize, 602 struct fuse_iov *cookediov, 603 int *ncookies, 604 uint64_t **cookiesp) 605 { 606 int err = 0; 607 int oreclen; 608 size_t freclen; 609 610 struct dirent *de; 611 struct fuse_dirent *fudge; 612 uint64_t *cookies; 613 614 cookies = *cookiesp; 615 if (bufsize < FUSE_NAME_OFFSET) 616 return -1; 617 for (;;) { 618 if (bufsize < FUSE_NAME_OFFSET) { 619 err = -1; 620 break; 621 } 622 fudge = (struct fuse_dirent *)buf; 623 freclen = FUSE_DIRENT_SIZE(fudge); 624 625 if (bufsize < freclen) { 626 /* 627 * This indicates a partial directory entry at the 628 * end of the directory data. 629 */ 630 err = -1; 631 break; 632 } 633 #ifdef ZERO_PAD_INCOMPLETE_BUFS 634 if (isbzero(buf, FUSE_NAME_OFFSET)) { 635 err = -1; 636 break; 637 } 638 #endif 639 640 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { 641 err = EINVAL; 642 break; 643 } 644 oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *) 645 &fudge->namelen); 646 647 if (oreclen > uio_resid(uio)) { 648 /* Out of space for the dir so we are done. */ 649 err = -1; 650 break; 651 } 652 fiov_adjust(cookediov, oreclen); 653 bzero(cookediov->base, oreclen); 654 655 de = (struct dirent *)cookediov->base; 656 de->d_fileno = fudge->ino; 657 de->d_off = fudge->off; 658 de->d_reclen = oreclen; 659 de->d_type = fudge->type; 660 de->d_namlen = fudge->namelen; 661 memcpy((char *)cookediov->base + sizeof(struct dirent) - 662 MAXNAMLEN - 1, 663 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); 664 dirent_terminate(de); 665 666 err = uiomove(cookediov->base, cookediov->len, uio); 667 if (err) 668 break; 669 if (cookies != NULL) { 670 if (*ncookies == 0) { 671 err = -1; 672 break; 673 } 674 *cookies = fudge->off; 675 cookies++; 676 (*ncookies)--; 677 } 678 buf = (char *)buf + freclen; 679 bufsize -= freclen; 680 uio_setoffset(uio, fudge->off); 681 } 682 *cookiesp = cookies; 683 684 return err; 685 } 686 687 /* remove */ 688 689 int 690 fuse_internal_remove(struct vnode *dvp, 691 struct vnode *vp, 692 struct componentname *cnp, 693 enum fuse_opcode op) 694 { 695 struct fuse_dispatcher fdi; 696 nlink_t nlink; 697 int err = 0; 698 699 fdisp_init(&fdi, cnp->cn_namelen + 1); 700 fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred); 701 702 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); 703 ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; 704 705 err = fdisp_wait_answ(&fdi); 706 fdisp_destroy(&fdi); 707 708 if (err) 709 return (err); 710 711 /* 712 * Access the cached nlink even if the attr cached has expired. If 713 * it's inaccurate, the worst that will happen is: 714 * 1) We'll recycle the vnode even though the file has another link we 715 * don't know about, costing a bit of cpu time, or 716 * 2) We won't recycle the vnode even though all of its links are gone. 717 * It will linger around until vnlru reclaims it, costing a bit of 718 * temporary memory. 719 */ 720 nlink = VTOFUD(vp)->cached_attrs.va_nlink--; 721 722 /* 723 * Purge the parent's attribute cache because the daemon 724 * should've updated its mtime and ctime. 725 */ 726 fuse_vnode_clear_attr_cache(dvp); 727 728 /* NB: nlink could be zero if it was never cached */ 729 if (nlink <= 1 || vnode_vtype(vp) == VDIR) { 730 fuse_internal_vnode_disappear(vp); 731 } else { 732 cache_purge(vp); 733 fuse_vnode_update(vp, FN_CTIMECHANGE); 734 } 735 736 return err; 737 } 738 739 /* rename */ 740 741 int 742 fuse_internal_rename(struct vnode *fdvp, 743 struct componentname *fcnp, 744 struct vnode *tdvp, 745 struct componentname *tcnp) 746 { 747 struct fuse_dispatcher fdi; 748 struct fuse_rename_in *fri; 749 int err = 0; 750 751 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); 752 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred); 753 754 fri = fdi.indata; 755 fri->newdir = VTOI(tdvp); 756 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, 757 fcnp->cn_namelen); 758 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; 759 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, 760 tcnp->cn_nameptr, tcnp->cn_namelen); 761 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + 762 tcnp->cn_namelen + 1] = '\0'; 763 764 err = fdisp_wait_answ(&fdi); 765 fdisp_destroy(&fdi); 766 return err; 767 } 768 769 /* strategy */ 770 771 /* entity creation */ 772 773 void 774 fuse_internal_newentry_makerequest(struct mount *mp, 775 uint64_t dnid, 776 struct componentname *cnp, 777 enum fuse_opcode op, 778 void *buf, 779 size_t bufsize, 780 struct fuse_dispatcher *fdip) 781 { 782 fdip->iosize = bufsize + cnp->cn_namelen + 1; 783 784 fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred); 785 memcpy(fdip->indata, buf, bufsize); 786 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); 787 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; 788 } 789 790 int 791 fuse_internal_newentry_core(struct vnode *dvp, 792 struct vnode **vpp, 793 struct componentname *cnp, 794 __enum_uint8(vtype) vtyp, 795 struct fuse_dispatcher *fdip) 796 { 797 int err = 0; 798 struct fuse_entry_out *feo; 799 struct mount *mp = vnode_mount(dvp); 800 801 if ((err = fdisp_wait_answ(fdip))) { 802 return err; 803 } 804 feo = fdip->answ; 805 806 if ((err = fuse_internal_checkentry(feo, vtyp))) { 807 return err; 808 } 809 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); 810 if (err) { 811 fuse_internal_forget_send(mp, curthread, cnp->cn_cred, 812 feo->nodeid, 1); 813 return err; 814 } 815 816 /* 817 * Purge the parent's attribute cache because the daemon should've 818 * updated its mtime and ctime 819 */ 820 fuse_vnode_clear_attr_cache(dvp); 821 822 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, 823 feo->attr_valid_nsec, NULL, true); 824 825 return err; 826 } 827 828 int 829 fuse_internal_newentry(struct vnode *dvp, 830 struct vnode **vpp, 831 struct componentname *cnp, 832 enum fuse_opcode op, 833 void *buf, 834 size_t bufsize, 835 __enum_uint8(vtype) vtype) 836 { 837 int err; 838 struct fuse_dispatcher fdi; 839 struct mount *mp = vnode_mount(dvp); 840 841 fdisp_init(&fdi, 0); 842 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, 843 bufsize, &fdi); 844 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); 845 fdisp_destroy(&fdi); 846 847 return err; 848 } 849 850 /* entity destruction */ 851 852 int 853 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) 854 { 855 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, 856 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); 857 858 return 0; 859 } 860 861 void 862 fuse_internal_forget_send(struct mount *mp, 863 struct thread *td, 864 struct ucred *cred, 865 uint64_t nodeid, 866 uint64_t nlookup) 867 { 868 869 struct fuse_dispatcher fdi; 870 struct fuse_forget_in *ffi; 871 872 /* 873 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", 874 * (long long unsigned) nodeid)); 875 */ 876 877 fdisp_init(&fdi, sizeof(*ffi)); 878 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); 879 880 ffi = fdi.indata; 881 ffi->nlookup = nlookup; 882 883 fuse_insert_message(fdi.tick, false); 884 fdisp_destroy(&fdi); 885 } 886 887 /* Fetch the vnode's attributes from the daemon*/ 888 int 889 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, 890 struct ucred *cred, struct thread *td) 891 { 892 struct fuse_dispatcher fdi; 893 struct fuse_vnode_data *fvdat = VTOFUD(vp); 894 struct fuse_getattr_in *fgai; 895 struct fuse_attr_out *fao; 896 off_t old_filesize = fvdat->cached_attrs.va_size; 897 struct timespec old_atime = fvdat->cached_attrs.va_atime; 898 struct timespec old_ctime = fvdat->cached_attrs.va_ctime; 899 struct timespec old_mtime = fvdat->cached_attrs.va_mtime; 900 __enum_uint8(vtype) vtyp; 901 int err; 902 903 ASSERT_VOP_LOCKED(vp, __func__); 904 905 fdisp_init(&fdi, sizeof(*fgai)); 906 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); 907 fgai = fdi.indata; 908 /* 909 * We could look up a file handle and set it in fgai->fh, but that 910 * involves extra runtime work and I'm unaware of any file systems that 911 * care. 912 */ 913 fgai->getattr_flags = 0; 914 if ((err = fdisp_wait_answ(&fdi))) { 915 if (err == ENOENT) 916 fuse_internal_vnode_disappear(vp); 917 goto out; 918 } 919 920 fao = (struct fuse_attr_out *)fdi.answ; 921 vtyp = IFTOVT(fao->attr.mode); 922 if (fvdat->flag & FN_SIZECHANGE) 923 fao->attr.size = old_filesize; 924 if (fvdat->flag & FN_ATIMECHANGE) { 925 fao->attr.atime = old_atime.tv_sec; 926 fao->attr.atimensec = old_atime.tv_nsec; 927 } 928 if (fvdat->flag & FN_CTIMECHANGE) { 929 fao->attr.ctime = old_ctime.tv_sec; 930 fao->attr.ctimensec = old_ctime.tv_nsec; 931 } 932 if (fvdat->flag & FN_MTIMECHANGE) { 933 fao->attr.mtime = old_mtime.tv_sec; 934 fao->attr.mtimensec = old_mtime.tv_nsec; 935 } 936 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 937 fao->attr_valid_nsec, vap, true); 938 if (vtyp != vnode_vtype(vp)) { 939 fuse_internal_vnode_disappear(vp); 940 err = ENOENT; 941 } 942 943 out: 944 fdisp_destroy(&fdi); 945 return err; 946 } 947 948 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ 949 int 950 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, 951 struct thread *td) 952 { 953 struct vattr *attrs; 954 955 if ((attrs = VTOVA(vp)) != NULL) { 956 *vap = *attrs; /* struct copy */ 957 return 0; 958 } 959 960 return fuse_internal_do_getattr(vp, vap, cred, td); 961 } 962 963 void 964 fuse_internal_vnode_disappear(struct vnode *vp) 965 { 966 struct fuse_vnode_data *fvdat = VTOFUD(vp); 967 968 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); 969 fvdat->flag |= FN_REVOKED; 970 cache_purge(vp); 971 } 972 973 /* fuse start/stop */ 974 975 SDT_PROBE_DEFINE2(fusefs, , internal, init_done, 976 "struct fuse_data*", "struct fuse_init_out*"); 977 int 978 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) 979 { 980 int err = 0; 981 struct fuse_data *data = tick->tk_data; 982 struct fuse_init_out *fiio = NULL; 983 984 if ((err = tick->tk_aw_ohead.error)) { 985 goto out; 986 } 987 if ((err = fticket_pull(tick, uio))) { 988 goto out; 989 } 990 fiio = fticket_resp(tick)->base; 991 992 data->fuse_libabi_major = fiio->major; 993 data->fuse_libabi_minor = fiio->minor; 994 if (!fuse_libabi_geq(data, 7, 4)) { 995 /* 996 * With a little work we could support servers as old as 7.1. 997 * But there would be little payoff. 998 */ 999 SDT_PROBE2(fusefs, , internal, trace, 1, 1000 "userpace version too low"); 1001 err = EPROTONOSUPPORT; 1002 goto out; 1003 } 1004 1005 if (fuse_libabi_geq(data, 7, 5)) { 1006 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) || 1007 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) { 1008 data->max_write = fiio->max_write; 1009 if (fiio->flags & FUSE_ASYNC_READ) 1010 data->dataflags |= FSESS_ASYNC_READ; 1011 if (fiio->flags & FUSE_POSIX_LOCKS) 1012 data->dataflags |= FSESS_POSIX_LOCKS; 1013 if (fiio->flags & FUSE_EXPORT_SUPPORT) 1014 data->dataflags |= FSESS_EXPORT_SUPPORT; 1015 if (fiio->flags & FUSE_NO_OPEN_SUPPORT) 1016 data->dataflags |= FSESS_NO_OPEN_SUPPORT; 1017 if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT) 1018 data->dataflags |= FSESS_NO_OPENDIR_SUPPORT; 1019 /* 1020 * Don't bother to check FUSE_BIG_WRITES, because it's 1021 * redundant with max_write 1022 */ 1023 /* 1024 * max_background and congestion_threshold are not 1025 * implemented 1026 */ 1027 } else { 1028 err = EINVAL; 1029 } 1030 } else { 1031 /* Old fixed values */ 1032 data->max_write = 4096; 1033 } 1034 1035 if (fuse_libabi_geq(data, 7, 6)) 1036 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf; 1037 1038 if (!fuse_libabi_geq(data, 7, 7)) 1039 fsess_set_notimpl(data->mp, FUSE_INTERRUPT); 1040 1041 if (!fuse_libabi_geq(data, 7, 8)) { 1042 fsess_set_notimpl(data->mp, FUSE_BMAP); 1043 fsess_set_notimpl(data->mp, FUSE_DESTROY); 1044 } 1045 1046 if (!fuse_libabi_geq(data, 7, 19)) { 1047 fsess_set_notimpl(data->mp, FUSE_FALLOCATE); 1048 } 1049 1050 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 && 1051 fiio->time_gran <= 1000000000) 1052 data->time_gran = fiio->time_gran; 1053 else 1054 data->time_gran = 1; 1055 1056 if (!fuse_libabi_geq(data, 7, 23)) 1057 data->cache_mode = fuse_data_cache_mode; 1058 else if (fiio->flags & FUSE_WRITEBACK_CACHE) 1059 data->cache_mode = FUSE_CACHE_WB; 1060 else 1061 data->cache_mode = FUSE_CACHE_WT; 1062 1063 if (!fuse_libabi_geq(data, 7, 24)) 1064 fsess_set_notimpl(data->mp, FUSE_LSEEK); 1065 1066 if (!fuse_libabi_geq(data, 7, 28)) 1067 fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE); 1068 1069 out: 1070 if (err) { 1071 fdata_set_dead(data); 1072 } 1073 FUSE_LOCK(); 1074 data->dataflags |= FSESS_INITED; 1075 SDT_PROBE2(fusefs, , internal, init_done, data, fiio); 1076 wakeup(&data->ticketer); 1077 FUSE_UNLOCK(); 1078 1079 return 0; 1080 } 1081 1082 void 1083 fuse_internal_send_init(struct fuse_data *data, struct thread *td) 1084 { 1085 struct fuse_init_in *fiii; 1086 struct fuse_dispatcher fdi; 1087 1088 fdisp_init(&fdi, sizeof(*fiii)); 1089 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); 1090 fiii = fdi.indata; 1091 fiii->major = FUSE_KERNEL_VERSION; 1092 fiii->minor = FUSE_KERNEL_MINOR_VERSION; 1093 /* 1094 * fusefs currently reads ahead no more than one cache block at a time. 1095 * See fuse_read_biobackend 1096 */ 1097 fiii->max_readahead = maxbcachebuf; 1098 /* 1099 * Unsupported features: 1100 * FUSE_FILE_OPS: No known FUSE server or client supports it 1101 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it 1102 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even 1103 * when default ACLs are in use. 1104 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD 1105 * doesn't have splice(2). 1106 * FUSE_FLOCK_LOCKS: not yet implemented 1107 * FUSE_HAS_IOCTL_DIR: not yet implemented 1108 * FUSE_AUTO_INVAL_DATA: not yet implemented 1109 * FUSE_DO_READDIRPLUS: not yet implemented 1110 * FUSE_READDIRPLUS_AUTO: not yet implemented 1111 * FUSE_ASYNC_DIO: not yet implemented 1112 * FUSE_PARALLEL_DIROPS: not yet implemented 1113 * FUSE_HANDLE_KILLPRIV: not yet implemented 1114 * FUSE_POSIX_ACL: not yet implemented 1115 * FUSE_ABORT_ERROR: not yet implemented 1116 * FUSE_CACHE_SYMLINKS: not yet implemented 1117 * FUSE_MAX_PAGES: not yet implemented 1118 */ 1119 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT 1120 | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE 1121 | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT; 1122 1123 fuse_insert_callback(fdi.tick, fuse_internal_init_callback); 1124 fuse_insert_message(fdi.tick, false); 1125 fdisp_destroy(&fdi); 1126 } 1127 1128 /* 1129 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, 1130 * send the request with root credentials 1131 */ 1132 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, 1133 struct thread *td, struct ucred *cred) 1134 { 1135 struct fuse_vnode_data *fvdat; 1136 struct fuse_dispatcher fdi; 1137 struct fuse_setattr_in *fsai; 1138 struct mount *mp; 1139 pid_t pid = td->td_proc->p_pid; 1140 struct fuse_data *data; 1141 int err = 0; 1142 __enum_uint8(vtype) vtyp; 1143 1144 ASSERT_VOP_ELOCKED(vp, __func__); 1145 1146 mp = vnode_mount(vp); 1147 fvdat = VTOFUD(vp); 1148 data = fuse_get_mpdata(mp); 1149 1150 fdisp_init(&fdi, sizeof(*fsai)); 1151 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); 1152 if (!cred) { 1153 fdi.finh->uid = 0; 1154 fdi.finh->gid = 0; 1155 } 1156 fsai = fdi.indata; 1157 fsai->valid = 0; 1158 1159 if (vap->va_uid != (uid_t)VNOVAL) { 1160 fsai->uid = vap->va_uid; 1161 fsai->valid |= FATTR_UID; 1162 } 1163 if (vap->va_gid != (gid_t)VNOVAL) { 1164 fsai->gid = vap->va_gid; 1165 fsai->valid |= FATTR_GID; 1166 } 1167 if (vap->va_size != VNOVAL) { 1168 struct fuse_filehandle *fufh = NULL; 1169 1170 /*Truncate to a new value. */ 1171 fsai->size = vap->va_size; 1172 fsai->valid |= FATTR_SIZE; 1173 1174 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); 1175 if (fufh) { 1176 fsai->fh = fufh->fh_id; 1177 fsai->valid |= FATTR_FH; 1178 } 1179 VTOFUD(vp)->flag &= ~FN_SIZECHANGE; 1180 } 1181 if (vap->va_atime.tv_sec != VNOVAL) { 1182 fsai->atime = vap->va_atime.tv_sec; 1183 fsai->atimensec = vap->va_atime.tv_nsec; 1184 fsai->valid |= FATTR_ATIME; 1185 if (vap->va_vaflags & VA_UTIMES_NULL) 1186 fsai->valid |= FATTR_ATIME_NOW; 1187 } else if (fvdat->flag & FN_ATIMECHANGE) { 1188 fsai->atime = fvdat->cached_attrs.va_atime.tv_sec; 1189 fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec; 1190 fsai->valid |= FATTR_ATIME; 1191 } 1192 if (vap->va_mtime.tv_sec != VNOVAL) { 1193 fsai->mtime = vap->va_mtime.tv_sec; 1194 fsai->mtimensec = vap->va_mtime.tv_nsec; 1195 fsai->valid |= FATTR_MTIME; 1196 if (vap->va_vaflags & VA_UTIMES_NULL) 1197 fsai->valid |= FATTR_MTIME_NOW; 1198 } else if (fvdat->flag & FN_MTIMECHANGE) { 1199 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec; 1200 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec; 1201 fsai->valid |= FATTR_MTIME; 1202 } 1203 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) { 1204 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec; 1205 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec; 1206 fsai->valid |= FATTR_CTIME; 1207 } 1208 if (vap->va_mode != (mode_t)VNOVAL) { 1209 fsai->mode = vap->va_mode & ALLPERMS; 1210 fsai->valid |= FATTR_MODE; 1211 } 1212 if (!fsai->valid) { 1213 goto out; 1214 } 1215 1216 if ((err = fdisp_wait_answ(&fdi))) 1217 goto out; 1218 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); 1219 1220 if (vnode_vtype(vp) != vtyp) { 1221 if (vnode_vtype(vp) == VNON && vtyp != VNON) { 1222 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " 1223 "vnode_vtype is VNON and vtype isn't."); 1224 } else { 1225 /* 1226 * STALE vnode, ditch 1227 * 1228 * The vnode has changed its type "behind our back". 1229 * This probably means that the file got deleted and 1230 * recreated on the server, with the same inode. 1231 * There's nothing really we can do, so let us just 1232 * return ENOENT. After all, the entry must not have 1233 * existed in the recent past. If the user tries 1234 * again, it will work. 1235 */ 1236 fuse_internal_vnode_disappear(vp); 1237 err = ENOENT; 1238 } 1239 } 1240 if (err == 0) { 1241 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; 1242 fuse_vnode_undirty_cached_timestamps(vp, true); 1243 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 1244 fao->attr_valid_nsec, NULL, false); 1245 getnanouptime(&fvdat->last_local_modify); 1246 } 1247 1248 out: 1249 fdisp_destroy(&fdi); 1250 return err; 1251 } 1252 1253 /* 1254 * FreeBSD clears the SUID and SGID bits on any write by a non-root user. 1255 */ 1256 void 1257 fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred, 1258 struct thread *td) 1259 { 1260 struct fuse_data *data; 1261 struct mount *mp; 1262 struct vattr va; 1263 int dataflags; 1264 1265 mp = vnode_mount(vp); 1266 data = fuse_get_mpdata(mp); 1267 dataflags = data->dataflags; 1268 1269 ASSERT_VOP_LOCKED(vp, __func__); 1270 1271 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 1272 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) { 1273 fuse_internal_getattr(vp, &va, cred, td); 1274 if (va.va_mode & (S_ISUID | S_ISGID)) { 1275 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID); 1276 /* Clear all vattr fields except mode */ 1277 vattr_null(&va); 1278 va.va_mode = mode; 1279 1280 /* 1281 * Ignore fuse_internal_setattr's return value, 1282 * because at this point the write operation has 1283 * already succeeded and we don't want to return 1284 * failing status for that. 1285 */ 1286 (void)fuse_internal_setattr(vp, &va, td, NULL); 1287 } 1288 } 1289 } 1290 } 1291 1292 #ifdef ZERO_PAD_INCOMPLETE_BUFS 1293 static int 1294 isbzero(void *buf, size_t len) 1295 { 1296 int i; 1297 1298 for (i = 0; i < len; i++) { 1299 if (((char *)buf)[i]) 1300 return (0); 1301 } 1302 1303 return (1); 1304 } 1305 1306 #endif 1307 1308 void 1309 fuse_internal_init(void) 1310 { 1311 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK); 1312 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK); 1313 } 1314 1315 void 1316 fuse_internal_destroy(void) 1317 { 1318 counter_u64_free(fuse_lookup_cache_hits); 1319 counter_u64_free(fuse_lookup_cache_misses); 1320 } 1321