1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/counter.h> 66 #include <sys/module.h> 67 #include <sys/errno.h> 68 #include <sys/kernel.h> 69 #include <sys/conf.h> 70 #include <sys/uio.h> 71 #include <sys/malloc.h> 72 #include <sys/queue.h> 73 #include <sys/lock.h> 74 #include <sys/mutex.h> 75 #include <sys/sdt.h> 76 #include <sys/sx.h> 77 #include <sys/proc.h> 78 #include <sys/mount.h> 79 #include <sys/vnode.h> 80 #include <sys/namei.h> 81 #include <sys/stat.h> 82 #include <sys/unistd.h> 83 #include <sys/filedesc.h> 84 #include <sys/file.h> 85 #include <sys/fcntl.h> 86 #include <sys/dirent.h> 87 #include <sys/bio.h> 88 #include <sys/buf.h> 89 #include <sys/sysctl.h> 90 #include <sys/priv.h> 91 92 #include "fuse.h" 93 #include "fuse_file.h" 94 #include "fuse_internal.h" 95 #include "fuse_io.h" 96 #include "fuse_ipc.h" 97 #include "fuse_node.h" 98 #include "fuse_file.h" 99 100 SDT_PROVIDER_DECLARE(fusefs); 101 /* 102 * Fuse trace probe: 103 * arg0: verbosity. Higher numbers give more verbose messages 104 * arg1: Textual message 105 */ 106 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); 107 108 #ifdef ZERO_PAD_INCOMPLETE_BUFS 109 static int isbzero(void *buf, size_t len); 110 111 #endif 112 113 counter_u64_t fuse_lookup_cache_hits; 114 counter_u64_t fuse_lookup_cache_misses; 115 116 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, 117 &fuse_lookup_cache_hits, "number of positive cache hits in lookup"); 118 119 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, 120 &fuse_lookup_cache_misses, "number of cache misses in lookup"); 121 122 int 123 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags, 124 struct vnode **vpp) 125 { 126 struct bintime now; 127 struct thread *td = curthread; 128 uint64_t nodeid = ino; 129 int error; 130 131 *vpp = NULL; 132 133 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp, 134 fuse_vnode_cmp, &nodeid); 135 if (error) 136 return error; 137 /* 138 * Check the entry cache timeout. We have to do this within fusefs 139 * instead of by using cache_enter_time/cache_lookup because those 140 * routines are only intended to work with pathnames, not inodes 141 */ 142 if (*vpp != NULL) { 143 getbinuptime(&now); 144 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){ 145 counter_u64_add(fuse_lookup_cache_hits, 1); 146 return 0; 147 } else { 148 /* Entry cache timeout */ 149 counter_u64_add(fuse_lookup_cache_misses, 1); 150 cache_purge(*vpp); 151 vput(*vpp); 152 *vpp = NULL; 153 } 154 } 155 return 0; 156 } 157 158 SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin); 159 /* Synchronously send a FUSE_ACCESS operation */ 160 int 161 fuse_internal_access(struct vnode *vp, 162 accmode_t mode, 163 struct thread *td, 164 struct ucred *cred) 165 { 166 int err = 0; 167 uint32_t mask = F_OK; 168 int dataflags; 169 struct mount *mp; 170 struct fuse_dispatcher fdi; 171 struct fuse_access_in *fai; 172 struct fuse_data *data; 173 174 mp = vnode_mount(vp); 175 176 data = fuse_get_mpdata(mp); 177 dataflags = data->dataflags; 178 179 if (mode == 0) 180 return 0; 181 182 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { 183 switch (vp->v_type) { 184 case VDIR: 185 /* FALLTHROUGH */ 186 case VLNK: 187 /* FALLTHROUGH */ 188 case VREG: 189 return EROFS; 190 default: 191 break; 192 } 193 } 194 195 /* Unless explicitly permitted, deny everyone except the fs owner. */ 196 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { 197 if (fuse_match_cred(data->daemoncred, cred)) 198 return EPERM; 199 } 200 201 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 202 struct vattr va; 203 204 fuse_internal_getattr(vp, &va, cred, td); 205 return vaccess(vp->v_type, va.va_mode, va.va_uid, 206 va.va_gid, mode, cred); 207 } 208 209 if (mode & VADMIN) { 210 /* 211 * The FUSE protocol doesn't have an equivalent of VADMIN, so 212 * it's a bug if we ever reach this point with that bit set. 213 */ 214 SDT_PROBE0(fusefs, , internal, access_vadmin); 215 } 216 217 if (fsess_not_impl(mp, FUSE_ACCESS)) 218 return 0; 219 220 if ((mode & (VWRITE | VAPPEND)) != 0) 221 mask |= W_OK; 222 if ((mode & VREAD) != 0) 223 mask |= R_OK; 224 if ((mode & VEXEC) != 0) 225 mask |= X_OK; 226 227 fdisp_init(&fdi, sizeof(*fai)); 228 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); 229 230 fai = fdi.indata; 231 fai->mask = mask; 232 233 err = fdisp_wait_answ(&fdi); 234 fdisp_destroy(&fdi); 235 236 if (err == ENOSYS) { 237 fsess_set_notimpl(mp, FUSE_ACCESS); 238 err = 0; 239 } 240 return err; 241 } 242 243 /* 244 * Cache FUSE attributes from attr, in attribute cache associated with vnode 245 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the 246 * converted attributes there as well. 247 * 248 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do 249 * return the result to the caller). 250 */ 251 void 252 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr, 253 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap, 254 bool from_server) 255 { 256 struct mount *mp; 257 struct fuse_vnode_data *fvdat; 258 struct fuse_data *data; 259 struct vattr *vp_cache_at; 260 261 mp = vnode_mount(vp); 262 fvdat = VTOFUD(vp); 263 data = fuse_get_mpdata(mp); 264 265 ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); 266 267 fuse_validity_2_bintime(attr_valid, attr_valid_nsec, 268 &fvdat->attr_cache_timeout); 269 270 if (vnode_isreg(vp) && 271 fvdat->cached_attrs.va_size != VNOVAL && 272 attr->size != fvdat->cached_attrs.va_size) 273 { 274 if ( data->cache_mode == FUSE_CACHE_WB && 275 fvdat->flag & FN_SIZECHANGE) 276 { 277 const char *msg; 278 279 /* 280 * The server changed the file's size even though we're 281 * using writeback cacheing and and we have outstanding 282 * dirty writes! That's a server bug. 283 */ 284 if (fuse_libabi_geq(data, 7, 23)) { 285 msg = "writeback cache incoherent!." 286 "To prevent data corruption, disable " 287 "the writeback cache according to your " 288 "FUSE server's documentation."; 289 } else { 290 msg = "writeback cache incoherent!." 291 "To prevent data corruption, disable " 292 "the writeback cache by setting " 293 "vfs.fusefs.data_cache_mode to 0 or 1."; 294 } 295 fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg); 296 } 297 if (fuse_vnode_attr_cache_valid(vp) && 298 data->cache_mode != FUSE_CACHE_UC) 299 { 300 /* 301 * The server changed the file's size even though we 302 * have it cached and our cache has not yet expired. 303 * That's a bug. 304 */ 305 fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT, 306 "cache incoherent! " 307 "To prevent " 308 "data corruption, disable the data cache " 309 "by mounting with -o direct_io, or as " 310 "directed otherwise by your FUSE server's " 311 "documentation."); 312 } 313 } 314 315 /* Fix our buffers if the filesize changed without us knowing */ 316 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { 317 (void)fuse_vnode_setsize(vp, attr->size, from_server); 318 fvdat->cached_attrs.va_size = attr->size; 319 } 320 321 if (attr_valid > 0 || attr_valid_nsec > 0) 322 vp_cache_at = &(fvdat->cached_attrs); 323 else if (vap != NULL) 324 vp_cache_at = vap; 325 else 326 return; 327 328 vattr_null(vp_cache_at); 329 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; 330 vp_cache_at->va_fileid = attr->ino; 331 vp_cache_at->va_mode = attr->mode & ~S_IFMT; 332 vp_cache_at->va_nlink = attr->nlink; 333 vp_cache_at->va_uid = attr->uid; 334 vp_cache_at->va_gid = attr->gid; 335 vp_cache_at->va_rdev = attr->rdev; 336 vp_cache_at->va_size = attr->size; 337 /* XXX on i386, seconds are truncated to 32 bits */ 338 vp_cache_at->va_atime.tv_sec = attr->atime; 339 vp_cache_at->va_atime.tv_nsec = attr->atimensec; 340 vp_cache_at->va_mtime.tv_sec = attr->mtime; 341 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; 342 vp_cache_at->va_ctime.tv_sec = attr->ctime; 343 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; 344 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) 345 vp_cache_at->va_blocksize = attr->blksize; 346 else 347 vp_cache_at->va_blocksize = PAGE_SIZE; 348 vp_cache_at->va_type = IFTOVT(attr->mode); 349 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; 350 vp_cache_at->va_flags = 0; 351 352 if (vap != vp_cache_at && vap != NULL) 353 memcpy(vap, vp_cache_at, sizeof(*vap)); 354 } 355 356 /* fsync */ 357 358 int 359 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) 360 { 361 if (tick->tk_aw_ohead.error == ENOSYS) { 362 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); 363 } 364 return 0; 365 } 366 367 int 368 fuse_internal_fsync(struct vnode *vp, 369 struct thread *td, 370 int waitfor, 371 bool datasync) 372 { 373 struct fuse_fsync_in *ffsi = NULL; 374 struct fuse_dispatcher fdi; 375 struct fuse_filehandle *fufh; 376 struct fuse_vnode_data *fvdat = VTOFUD(vp); 377 struct mount *mp = vnode_mount(vp); 378 int op = FUSE_FSYNC; 379 int err = 0; 380 381 if (fsess_not_impl(vnode_mount(vp), 382 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { 383 return 0; 384 } 385 if (vnode_isdir(vp)) 386 op = FUSE_FSYNCDIR; 387 388 if (fsess_not_impl(mp, op)) 389 return 0; 390 391 fdisp_init(&fdi, sizeof(*ffsi)); 392 /* 393 * fsync every open file handle for this file, because we can't be sure 394 * which file handle the caller is really referring to. 395 */ 396 LIST_FOREACH(fufh, &fvdat->handles, next) { 397 fdi.iosize = sizeof(*ffsi); 398 if (ffsi == NULL) 399 fdisp_make_vp(&fdi, op, vp, td, NULL); 400 else 401 fdisp_refresh_vp(&fdi, op, vp, td, NULL); 402 ffsi = fdi.indata; 403 ffsi->fh = fufh->fh_id; 404 ffsi->fsync_flags = 0; 405 406 if (datasync) 407 ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC; 408 409 if (waitfor == MNT_WAIT) { 410 err = fdisp_wait_answ(&fdi); 411 } else { 412 fuse_insert_callback(fdi.tick, 413 fuse_internal_fsync_callback); 414 fuse_insert_message(fdi.tick, false); 415 } 416 if (err == ENOSYS) { 417 /* ENOSYS means "success, and don't call again" */ 418 fsess_set_notimpl(mp, op); 419 err = 0; 420 break; 421 } 422 } 423 fdisp_destroy(&fdi); 424 425 return err; 426 } 427 428 /* Asynchronous invalidation */ 429 SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry, 430 "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*"); 431 int 432 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) 433 { 434 struct fuse_notify_inval_entry_out fnieo; 435 struct componentname cn; 436 struct vnode *dvp, *vp; 437 char name[PATH_MAX]; 438 int err; 439 440 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) 441 return (err); 442 443 if (fnieo.namelen >= sizeof(name)) 444 return (EINVAL); 445 446 if ((err = uiomove(name, fnieo.namelen, uio)) != 0) 447 return (err); 448 name[fnieo.namelen] = '\0'; 449 /* fusefs does not cache "." or ".." entries */ 450 if (strncmp(name, ".", sizeof(".")) == 0 || 451 strncmp(name, "..", sizeof("..")) == 0) 452 return (0); 453 454 if (fnieo.parent == FUSE_ROOT_ID) 455 err = VFS_ROOT(mp, LK_SHARED, &dvp); 456 else 457 err = fuse_internal_get_cached_vnode( mp, fnieo.parent, 458 LK_SHARED, &dvp); 459 SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name); 460 /* 461 * If dvp is not in the cache, then it must've been reclaimed. And 462 * since fuse_vnop_reclaim does a cache_purge, name's entry must've 463 * been invalidated already. So we can safely return if dvp == NULL 464 */ 465 if (err != 0 || dvp == NULL) 466 return (err); 467 /* 468 * XXX we can't check dvp's generation because the FUSE invalidate 469 * entry message doesn't include it. Worse case is that we invalidate 470 * an entry that didn't need to be invalidated. 471 */ 472 473 cn.cn_nameiop = LOOKUP; 474 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ 475 cn.cn_cred = curthread->td_ucred; 476 cn.cn_lkflags = LK_SHARED; 477 cn.cn_pnbuf = NULL; 478 cn.cn_nameptr = name; 479 cn.cn_namelen = fnieo.namelen; 480 err = cache_lookup(dvp, &vp, &cn, NULL, NULL); 481 MPASS(err == 0); 482 fuse_vnode_clear_attr_cache(dvp); 483 vput(dvp); 484 return (0); 485 } 486 487 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode, 488 "struct vnode*", "struct fuse_notify_inval_inode_out *"); 489 int 490 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio) 491 { 492 struct fuse_notify_inval_inode_out fniio; 493 struct vnode *vp; 494 int err; 495 496 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0) 497 return (err); 498 499 if (fniio.ino == FUSE_ROOT_ID) 500 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp); 501 else 502 err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED, 503 &vp); 504 SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio); 505 if (err != 0 || vp == NULL) 506 return (err); 507 /* 508 * XXX we can't check vp's generation because the FUSE invalidate 509 * entry message doesn't include it. Worse case is that we invalidate 510 * an inode that didn't need to be invalidated. 511 */ 512 513 /* 514 * Flush and invalidate buffers if off >= 0. Technically we only need 515 * to flush and invalidate the range of offsets [off, off + len), but 516 * for simplicity's sake we do everything. 517 */ 518 if (fniio.off >= 0) 519 fuse_io_invalbuf(vp, curthread); 520 fuse_vnode_clear_attr_cache(vp); 521 vput(vp); 522 return (0); 523 } 524 525 /* mknod */ 526 int 527 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, 528 struct componentname *cnp, struct vattr *vap) 529 { 530 struct fuse_data *data; 531 struct fuse_mknod_in fmni; 532 size_t insize; 533 534 data = fuse_get_mpdata(dvp->v_mount); 535 536 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); 537 fmni.rdev = vap->va_rdev; 538 if (fuse_libabi_geq(data, 7, 12)) { 539 insize = sizeof(fmni); 540 fmni.umask = curthread->td_proc->p_pd->pd_cmask; 541 fmni.padding = 0; 542 } else { 543 insize = FUSE_COMPAT_MKNOD_IN_SIZE; 544 } 545 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, 546 insize, vap->va_type)); 547 } 548 549 /* readdir */ 550 551 int 552 fuse_internal_readdir(struct vnode *vp, 553 struct uio *uio, 554 struct fuse_filehandle *fufh, 555 struct fuse_iov *cookediov, 556 int *ncookies, 557 uint64_t *cookies) 558 { 559 int err = 0; 560 struct fuse_dispatcher fdi; 561 struct fuse_read_in *fri = NULL; 562 563 if (uio_resid(uio) == 0) 564 return 0; 565 fdisp_init(&fdi, 0); 566 567 /* 568 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p 569 * I/O). 570 */ 571 while (uio_resid(uio) > 0) { 572 fdi.iosize = sizeof(*fri); 573 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); 574 fri = fdi.indata; 575 fri->fh = fufh->fh_id; 576 fri->offset = uio_offset(uio); 577 fri->size = MIN(uio->uio_resid, 578 fuse_get_mpdata(vp->v_mount)->max_read); 579 580 if ((err = fdisp_wait_answ(&fdi))) 581 break; 582 if ((err = fuse_internal_readdir_processdata(uio, fri->size, 583 fdi.answ, fdi.iosize, cookediov, ncookies, &cookies))) 584 break; 585 } 586 587 fdisp_destroy(&fdi); 588 return ((err == -1) ? 0 : err); 589 } 590 591 /* 592 * Return -1 to indicate that this readdir is finished, 0 if it copied 593 * all the directory data read in and it may be possible to read more 594 * and greater than 0 for a failure. 595 */ 596 int 597 fuse_internal_readdir_processdata(struct uio *uio, 598 size_t reqsize, 599 void *buf, 600 size_t bufsize, 601 struct fuse_iov *cookediov, 602 int *ncookies, 603 uint64_t **cookiesp) 604 { 605 int err = 0; 606 int oreclen; 607 size_t freclen; 608 609 struct dirent *de; 610 struct fuse_dirent *fudge; 611 uint64_t *cookies; 612 613 cookies = *cookiesp; 614 if (bufsize < FUSE_NAME_OFFSET) 615 return -1; 616 for (;;) { 617 if (bufsize < FUSE_NAME_OFFSET) { 618 err = -1; 619 break; 620 } 621 fudge = (struct fuse_dirent *)buf; 622 freclen = FUSE_DIRENT_SIZE(fudge); 623 624 if (bufsize < freclen) { 625 /* 626 * This indicates a partial directory entry at the 627 * end of the directory data. 628 */ 629 err = -1; 630 break; 631 } 632 #ifdef ZERO_PAD_INCOMPLETE_BUFS 633 if (isbzero(buf, FUSE_NAME_OFFSET)) { 634 err = -1; 635 break; 636 } 637 #endif 638 639 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { 640 err = EINVAL; 641 break; 642 } 643 oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *) 644 &fudge->namelen); 645 646 if (oreclen > uio_resid(uio)) { 647 /* Out of space for the dir so we are done. */ 648 err = -1; 649 break; 650 } 651 fiov_adjust(cookediov, oreclen); 652 bzero(cookediov->base, oreclen); 653 654 de = (struct dirent *)cookediov->base; 655 de->d_fileno = fudge->ino; 656 de->d_off = fudge->off; 657 de->d_reclen = oreclen; 658 de->d_type = fudge->type; 659 de->d_namlen = fudge->namelen; 660 memcpy((char *)cookediov->base + sizeof(struct dirent) - 661 MAXNAMLEN - 1, 662 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); 663 dirent_terminate(de); 664 665 err = uiomove(cookediov->base, cookediov->len, uio); 666 if (err) 667 break; 668 if (cookies != NULL) { 669 if (*ncookies == 0) { 670 err = -1; 671 break; 672 } 673 *cookies = fudge->off; 674 cookies++; 675 (*ncookies)--; 676 } 677 buf = (char *)buf + freclen; 678 bufsize -= freclen; 679 uio_setoffset(uio, fudge->off); 680 } 681 *cookiesp = cookies; 682 683 return err; 684 } 685 686 /* remove */ 687 688 int 689 fuse_internal_remove(struct vnode *dvp, 690 struct vnode *vp, 691 struct componentname *cnp, 692 enum fuse_opcode op) 693 { 694 struct fuse_dispatcher fdi; 695 nlink_t nlink; 696 int err = 0; 697 698 fdisp_init(&fdi, cnp->cn_namelen + 1); 699 fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred); 700 701 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); 702 ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; 703 704 err = fdisp_wait_answ(&fdi); 705 fdisp_destroy(&fdi); 706 707 if (err) 708 return (err); 709 710 /* 711 * Access the cached nlink even if the attr cached has expired. If 712 * it's inaccurate, the worst that will happen is: 713 * 1) We'll recycle the vnode even though the file has another link we 714 * don't know about, costing a bit of cpu time, or 715 * 2) We won't recycle the vnode even though all of its links are gone. 716 * It will linger around until vnlru reclaims it, costing a bit of 717 * temporary memory. 718 */ 719 nlink = VTOFUD(vp)->cached_attrs.va_nlink--; 720 721 /* 722 * Purge the parent's attribute cache because the daemon 723 * should've updated its mtime and ctime. 724 */ 725 fuse_vnode_clear_attr_cache(dvp); 726 727 /* NB: nlink could be zero if it was never cached */ 728 if (nlink <= 1 || vnode_vtype(vp) == VDIR) { 729 fuse_internal_vnode_disappear(vp); 730 } else { 731 cache_purge(vp); 732 fuse_vnode_update(vp, FN_CTIMECHANGE); 733 } 734 735 return err; 736 } 737 738 /* rename */ 739 740 int 741 fuse_internal_rename(struct vnode *fdvp, 742 struct componentname *fcnp, 743 struct vnode *tdvp, 744 struct componentname *tcnp) 745 { 746 struct fuse_dispatcher fdi; 747 struct fuse_rename_in *fri; 748 int err = 0; 749 750 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); 751 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred); 752 753 fri = fdi.indata; 754 fri->newdir = VTOI(tdvp); 755 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, 756 fcnp->cn_namelen); 757 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; 758 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, 759 tcnp->cn_nameptr, tcnp->cn_namelen); 760 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + 761 tcnp->cn_namelen + 1] = '\0'; 762 763 err = fdisp_wait_answ(&fdi); 764 fdisp_destroy(&fdi); 765 return err; 766 } 767 768 /* strategy */ 769 770 /* entity creation */ 771 772 void 773 fuse_internal_newentry_makerequest(struct mount *mp, 774 uint64_t dnid, 775 struct componentname *cnp, 776 enum fuse_opcode op, 777 void *buf, 778 size_t bufsize, 779 struct fuse_dispatcher *fdip) 780 { 781 fdip->iosize = bufsize + cnp->cn_namelen + 1; 782 783 fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred); 784 memcpy(fdip->indata, buf, bufsize); 785 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); 786 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; 787 } 788 789 int 790 fuse_internal_newentry_core(struct vnode *dvp, 791 struct vnode **vpp, 792 struct componentname *cnp, 793 __enum_uint8(vtype) vtyp, 794 struct fuse_dispatcher *fdip) 795 { 796 int err = 0; 797 struct fuse_entry_out *feo; 798 struct mount *mp = vnode_mount(dvp); 799 800 if ((err = fdisp_wait_answ(fdip))) { 801 return err; 802 } 803 feo = fdip->answ; 804 805 if ((err = fuse_internal_checkentry(feo, vtyp))) { 806 return err; 807 } 808 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); 809 if (err) { 810 fuse_internal_forget_send(mp, curthread, cnp->cn_cred, 811 feo->nodeid, 1); 812 return err; 813 } 814 815 /* 816 * Purge the parent's attribute cache because the daemon should've 817 * updated its mtime and ctime 818 */ 819 fuse_vnode_clear_attr_cache(dvp); 820 821 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, 822 feo->attr_valid_nsec, NULL, true); 823 824 return err; 825 } 826 827 int 828 fuse_internal_newentry(struct vnode *dvp, 829 struct vnode **vpp, 830 struct componentname *cnp, 831 enum fuse_opcode op, 832 void *buf, 833 size_t bufsize, 834 __enum_uint8(vtype) vtype) 835 { 836 int err; 837 struct fuse_dispatcher fdi; 838 struct mount *mp = vnode_mount(dvp); 839 840 fdisp_init(&fdi, 0); 841 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, 842 bufsize, &fdi); 843 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); 844 fdisp_destroy(&fdi); 845 846 return err; 847 } 848 849 /* entity destruction */ 850 851 int 852 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) 853 { 854 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, 855 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); 856 857 return 0; 858 } 859 860 void 861 fuse_internal_forget_send(struct mount *mp, 862 struct thread *td, 863 struct ucred *cred, 864 uint64_t nodeid, 865 uint64_t nlookup) 866 { 867 868 struct fuse_dispatcher fdi; 869 struct fuse_forget_in *ffi; 870 871 /* 872 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", 873 * (long long unsigned) nodeid)); 874 */ 875 876 fdisp_init(&fdi, sizeof(*ffi)); 877 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); 878 879 ffi = fdi.indata; 880 ffi->nlookup = nlookup; 881 882 fuse_insert_message(fdi.tick, false); 883 fdisp_destroy(&fdi); 884 } 885 886 /* Fetch the vnode's attributes from the daemon*/ 887 int 888 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, 889 struct ucred *cred, struct thread *td) 890 { 891 struct fuse_dispatcher fdi; 892 struct fuse_vnode_data *fvdat = VTOFUD(vp); 893 struct fuse_getattr_in *fgai; 894 struct fuse_attr_out *fao; 895 off_t old_filesize = fvdat->cached_attrs.va_size; 896 struct timespec old_atime = fvdat->cached_attrs.va_atime; 897 struct timespec old_ctime = fvdat->cached_attrs.va_ctime; 898 struct timespec old_mtime = fvdat->cached_attrs.va_mtime; 899 __enum_uint8(vtype) vtyp; 900 int err; 901 902 ASSERT_VOP_LOCKED(vp, __func__); 903 904 fdisp_init(&fdi, sizeof(*fgai)); 905 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); 906 fgai = fdi.indata; 907 /* 908 * We could look up a file handle and set it in fgai->fh, but that 909 * involves extra runtime work and I'm unaware of any file systems that 910 * care. 911 */ 912 fgai->getattr_flags = 0; 913 if ((err = fdisp_wait_answ(&fdi))) { 914 if (err == ENOENT) 915 fuse_internal_vnode_disappear(vp); 916 goto out; 917 } 918 919 fao = (struct fuse_attr_out *)fdi.answ; 920 vtyp = IFTOVT(fao->attr.mode); 921 if (fvdat->flag & FN_SIZECHANGE) 922 fao->attr.size = old_filesize; 923 if (fvdat->flag & FN_ATIMECHANGE) { 924 fao->attr.atime = old_atime.tv_sec; 925 fao->attr.atimensec = old_atime.tv_nsec; 926 } 927 if (fvdat->flag & FN_CTIMECHANGE) { 928 fao->attr.ctime = old_ctime.tv_sec; 929 fao->attr.ctimensec = old_ctime.tv_nsec; 930 } 931 if (fvdat->flag & FN_MTIMECHANGE) { 932 fao->attr.mtime = old_mtime.tv_sec; 933 fao->attr.mtimensec = old_mtime.tv_nsec; 934 } 935 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 936 fao->attr_valid_nsec, vap, true); 937 if (vtyp != vnode_vtype(vp)) { 938 fuse_internal_vnode_disappear(vp); 939 err = ENOENT; 940 } 941 942 out: 943 fdisp_destroy(&fdi); 944 return err; 945 } 946 947 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ 948 int 949 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, 950 struct thread *td) 951 { 952 struct vattr *attrs; 953 954 if ((attrs = VTOVA(vp)) != NULL) { 955 *vap = *attrs; /* struct copy */ 956 return 0; 957 } 958 959 return fuse_internal_do_getattr(vp, vap, cred, td); 960 } 961 962 void 963 fuse_internal_vnode_disappear(struct vnode *vp) 964 { 965 struct fuse_vnode_data *fvdat = VTOFUD(vp); 966 967 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); 968 fvdat->flag |= FN_REVOKED; 969 cache_purge(vp); 970 } 971 972 /* fuse start/stop */ 973 974 SDT_PROBE_DEFINE2(fusefs, , internal, init_done, 975 "struct fuse_data*", "struct fuse_init_out*"); 976 int 977 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) 978 { 979 int err = 0; 980 struct fuse_data *data = tick->tk_data; 981 struct fuse_init_out *fiio = NULL; 982 983 if ((err = tick->tk_aw_ohead.error)) { 984 goto out; 985 } 986 if ((err = fticket_pull(tick, uio))) { 987 goto out; 988 } 989 fiio = fticket_resp(tick)->base; 990 991 data->fuse_libabi_major = fiio->major; 992 data->fuse_libabi_minor = fiio->minor; 993 if (!fuse_libabi_geq(data, 7, 4)) { 994 /* 995 * With a little work we could support servers as old as 7.1. 996 * But there would be little payoff. 997 */ 998 SDT_PROBE2(fusefs, , internal, trace, 1, 999 "userpace version too low"); 1000 err = EPROTONOSUPPORT; 1001 goto out; 1002 } 1003 1004 if (fuse_libabi_geq(data, 7, 5)) { 1005 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) || 1006 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) { 1007 data->max_write = fiio->max_write; 1008 if (fiio->flags & FUSE_ASYNC_READ) 1009 data->dataflags |= FSESS_ASYNC_READ; 1010 if (fiio->flags & FUSE_POSIX_LOCKS) 1011 data->dataflags |= FSESS_POSIX_LOCKS; 1012 if (fiio->flags & FUSE_EXPORT_SUPPORT) 1013 data->dataflags |= FSESS_EXPORT_SUPPORT; 1014 if (fiio->flags & FUSE_NO_OPEN_SUPPORT) 1015 data->dataflags |= FSESS_NO_OPEN_SUPPORT; 1016 if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT) 1017 data->dataflags |= FSESS_NO_OPENDIR_SUPPORT; 1018 /* 1019 * Don't bother to check FUSE_BIG_WRITES, because it's 1020 * redundant with max_write 1021 */ 1022 /* 1023 * max_background and congestion_threshold are not 1024 * implemented 1025 */ 1026 } else { 1027 err = EINVAL; 1028 } 1029 } else { 1030 /* Old fixed values */ 1031 data->max_write = 4096; 1032 } 1033 1034 if (fuse_libabi_geq(data, 7, 6)) 1035 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf; 1036 1037 if (!fuse_libabi_geq(data, 7, 7)) 1038 fsess_set_notimpl(data->mp, FUSE_INTERRUPT); 1039 1040 if (!fuse_libabi_geq(data, 7, 8)) { 1041 fsess_set_notimpl(data->mp, FUSE_BMAP); 1042 fsess_set_notimpl(data->mp, FUSE_DESTROY); 1043 } 1044 1045 if (!fuse_libabi_geq(data, 7, 19)) { 1046 fsess_set_notimpl(data->mp, FUSE_FALLOCATE); 1047 } 1048 1049 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 && 1050 fiio->time_gran <= 1000000000) 1051 data->time_gran = fiio->time_gran; 1052 else 1053 data->time_gran = 1; 1054 1055 if (!fuse_libabi_geq(data, 7, 23)) 1056 data->cache_mode = fuse_data_cache_mode; 1057 else if (fiio->flags & FUSE_WRITEBACK_CACHE) 1058 data->cache_mode = FUSE_CACHE_WB; 1059 else 1060 data->cache_mode = FUSE_CACHE_WT; 1061 1062 if (!fuse_libabi_geq(data, 7, 24)) 1063 fsess_set_notimpl(data->mp, FUSE_LSEEK); 1064 1065 if (!fuse_libabi_geq(data, 7, 28)) 1066 fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE); 1067 1068 out: 1069 if (err) { 1070 fdata_set_dead(data); 1071 } 1072 FUSE_LOCK(); 1073 data->dataflags |= FSESS_INITED; 1074 SDT_PROBE2(fusefs, , internal, init_done, data, fiio); 1075 wakeup(&data->ticketer); 1076 FUSE_UNLOCK(); 1077 1078 return 0; 1079 } 1080 1081 void 1082 fuse_internal_send_init(struct fuse_data *data, struct thread *td) 1083 { 1084 struct fuse_init_in *fiii; 1085 struct fuse_dispatcher fdi; 1086 1087 fdisp_init(&fdi, sizeof(*fiii)); 1088 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); 1089 fiii = fdi.indata; 1090 fiii->major = FUSE_KERNEL_VERSION; 1091 fiii->minor = FUSE_KERNEL_MINOR_VERSION; 1092 /* 1093 * fusefs currently reads ahead no more than one cache block at a time. 1094 * See fuse_read_biobackend 1095 */ 1096 fiii->max_readahead = maxbcachebuf; 1097 /* 1098 * Unsupported features: 1099 * FUSE_FILE_OPS: No known FUSE server or client supports it 1100 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it 1101 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even 1102 * when default ACLs are in use. 1103 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD 1104 * doesn't have splice(2). 1105 * FUSE_FLOCK_LOCKS: not yet implemented 1106 * FUSE_HAS_IOCTL_DIR: not yet implemented 1107 * FUSE_AUTO_INVAL_DATA: not yet implemented 1108 * FUSE_DO_READDIRPLUS: not yet implemented 1109 * FUSE_READDIRPLUS_AUTO: not yet implemented 1110 * FUSE_ASYNC_DIO: not yet implemented 1111 * FUSE_PARALLEL_DIROPS: not yet implemented 1112 * FUSE_HANDLE_KILLPRIV: not yet implemented 1113 * FUSE_POSIX_ACL: not yet implemented 1114 * FUSE_ABORT_ERROR: not yet implemented 1115 * FUSE_CACHE_SYMLINKS: not yet implemented 1116 * FUSE_MAX_PAGES: not yet implemented 1117 */ 1118 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT 1119 | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE 1120 | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT; 1121 1122 fuse_insert_callback(fdi.tick, fuse_internal_init_callback); 1123 fuse_insert_message(fdi.tick, false); 1124 fdisp_destroy(&fdi); 1125 } 1126 1127 /* 1128 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, 1129 * send the request with root credentials 1130 */ 1131 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, 1132 struct thread *td, struct ucred *cred) 1133 { 1134 struct fuse_vnode_data *fvdat; 1135 struct fuse_dispatcher fdi; 1136 struct fuse_setattr_in *fsai; 1137 struct mount *mp; 1138 pid_t pid = td->td_proc->p_pid; 1139 struct fuse_data *data; 1140 int err = 0; 1141 __enum_uint8(vtype) vtyp; 1142 1143 ASSERT_VOP_ELOCKED(vp, __func__); 1144 1145 mp = vnode_mount(vp); 1146 fvdat = VTOFUD(vp); 1147 data = fuse_get_mpdata(mp); 1148 1149 fdisp_init(&fdi, sizeof(*fsai)); 1150 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); 1151 if (!cred) { 1152 fdi.finh->uid = 0; 1153 fdi.finh->gid = 0; 1154 } 1155 fsai = fdi.indata; 1156 fsai->valid = 0; 1157 1158 if (vap->va_uid != (uid_t)VNOVAL) { 1159 fsai->uid = vap->va_uid; 1160 fsai->valid |= FATTR_UID; 1161 } 1162 if (vap->va_gid != (gid_t)VNOVAL) { 1163 fsai->gid = vap->va_gid; 1164 fsai->valid |= FATTR_GID; 1165 } 1166 if (vap->va_size != VNOVAL) { 1167 struct fuse_filehandle *fufh = NULL; 1168 1169 /*Truncate to a new value. */ 1170 fsai->size = vap->va_size; 1171 fsai->valid |= FATTR_SIZE; 1172 1173 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); 1174 if (fufh) { 1175 fsai->fh = fufh->fh_id; 1176 fsai->valid |= FATTR_FH; 1177 } 1178 VTOFUD(vp)->flag &= ~FN_SIZECHANGE; 1179 } 1180 if (vap->va_atime.tv_sec != VNOVAL) { 1181 fsai->atime = vap->va_atime.tv_sec; 1182 fsai->atimensec = vap->va_atime.tv_nsec; 1183 fsai->valid |= FATTR_ATIME; 1184 if (vap->va_vaflags & VA_UTIMES_NULL) 1185 fsai->valid |= FATTR_ATIME_NOW; 1186 } else if (fvdat->flag & FN_ATIMECHANGE) { 1187 fsai->atime = fvdat->cached_attrs.va_atime.tv_sec; 1188 fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec; 1189 fsai->valid |= FATTR_ATIME; 1190 } 1191 if (vap->va_mtime.tv_sec != VNOVAL) { 1192 fsai->mtime = vap->va_mtime.tv_sec; 1193 fsai->mtimensec = vap->va_mtime.tv_nsec; 1194 fsai->valid |= FATTR_MTIME; 1195 if (vap->va_vaflags & VA_UTIMES_NULL) 1196 fsai->valid |= FATTR_MTIME_NOW; 1197 } else if (fvdat->flag & FN_MTIMECHANGE) { 1198 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec; 1199 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec; 1200 fsai->valid |= FATTR_MTIME; 1201 } 1202 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) { 1203 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec; 1204 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec; 1205 fsai->valid |= FATTR_CTIME; 1206 } 1207 if (vap->va_mode != (mode_t)VNOVAL) { 1208 fsai->mode = vap->va_mode & ALLPERMS; 1209 fsai->valid |= FATTR_MODE; 1210 } 1211 if (!fsai->valid) { 1212 goto out; 1213 } 1214 1215 if ((err = fdisp_wait_answ(&fdi))) 1216 goto out; 1217 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); 1218 1219 if (vnode_vtype(vp) != vtyp) { 1220 if (vnode_vtype(vp) == VNON && vtyp != VNON) { 1221 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " 1222 "vnode_vtype is VNON and vtype isn't."); 1223 } else { 1224 /* 1225 * STALE vnode, ditch 1226 * 1227 * The vnode has changed its type "behind our back". 1228 * This probably means that the file got deleted and 1229 * recreated on the server, with the same inode. 1230 * There's nothing really we can do, so let us just 1231 * return ENOENT. After all, the entry must not have 1232 * existed in the recent past. If the user tries 1233 * again, it will work. 1234 */ 1235 fuse_internal_vnode_disappear(vp); 1236 err = ENOENT; 1237 } 1238 } 1239 if (err == 0) { 1240 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; 1241 fuse_vnode_undirty_cached_timestamps(vp, true); 1242 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, 1243 fao->attr_valid_nsec, NULL, false); 1244 getnanouptime(&fvdat->last_local_modify); 1245 } 1246 1247 out: 1248 fdisp_destroy(&fdi); 1249 return err; 1250 } 1251 1252 /* 1253 * FreeBSD clears the SUID and SGID bits on any write by a non-root user. 1254 */ 1255 void 1256 fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred, 1257 struct thread *td) 1258 { 1259 struct fuse_data *data; 1260 struct mount *mp; 1261 struct vattr va; 1262 int dataflags; 1263 1264 mp = vnode_mount(vp); 1265 data = fuse_get_mpdata(mp); 1266 dataflags = data->dataflags; 1267 1268 ASSERT_VOP_LOCKED(vp, __func__); 1269 1270 if (dataflags & FSESS_DEFAULT_PERMISSIONS) { 1271 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) { 1272 fuse_internal_getattr(vp, &va, cred, td); 1273 if (va.va_mode & (S_ISUID | S_ISGID)) { 1274 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID); 1275 /* Clear all vattr fields except mode */ 1276 vattr_null(&va); 1277 va.va_mode = mode; 1278 1279 /* 1280 * Ignore fuse_internal_setattr's return value, 1281 * because at this point the write operation has 1282 * already succeeded and we don't want to return 1283 * failing status for that. 1284 */ 1285 (void)fuse_internal_setattr(vp, &va, td, NULL); 1286 } 1287 } 1288 } 1289 } 1290 1291 #ifdef ZERO_PAD_INCOMPLETE_BUFS 1292 static int 1293 isbzero(void *buf, size_t len) 1294 { 1295 int i; 1296 1297 for (i = 0; i < len; i++) { 1298 if (((char *)buf)[i]) 1299 return (0); 1300 } 1301 1302 return (1); 1303 } 1304 1305 #endif 1306 1307 void 1308 fuse_internal_init(void) 1309 { 1310 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK); 1311 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK); 1312 } 1313 1314 void 1315 fuse_internal_destroy(void) 1316 { 1317 counter_u64_free(fuse_lookup_cache_hits); 1318 counter_u64_free(fuse_lookup_cache_misses); 1319 } 1320