1 /* 2 * Copyright (c) 2007-2009 Google Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: 8 * 9 * * Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * * Redistributions in binary form must reproduce the above 12 * copyright notice, this list of conditions and the following disclaimer 13 * in the documentation and/or other materials provided with the 14 * distribution. 15 * * Neither the name of Google Inc. nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (C) 2005 Csaba Henk. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 */ 55 56 #include <sys/cdefs.h> 57 __FBSDID("$FreeBSD$"); 58 59 #include <sys/types.h> 60 #include <sys/module.h> 61 #include <sys/systm.h> 62 #include <sys/errno.h> 63 #include <sys/param.h> 64 #include <sys/kernel.h> 65 #include <sys/conf.h> 66 #include <sys/uio.h> 67 #include <sys/malloc.h> 68 #include <sys/queue.h> 69 #include <sys/lock.h> 70 #include <sys/sx.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/mount.h> 74 #include <sys/vnode.h> 75 #include <sys/stat.h> 76 #include <sys/unistd.h> 77 #include <sys/filedesc.h> 78 #include <sys/file.h> 79 #include <sys/fcntl.h> 80 #include <sys/bio.h> 81 #include <sys/buf.h> 82 #include <sys/sysctl.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_extern.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_pager.h> 91 #include <vm/vnode_pager.h> 92 #include <vm/vm_object.h> 93 94 #include "fuse.h" 95 #include "fuse_file.h" 96 #include "fuse_node.h" 97 #include "fuse_internal.h" 98 #include "fuse_ipc.h" 99 #include "fuse_io.h" 100 101 #define FUSE_DEBUG_MODULE IO 102 #include "fuse_debug.h" 103 104 105 static int 106 fuse_read_directbackend(struct vnode *vp, struct uio *uio, 107 struct ucred *cred, struct fuse_filehandle *fufh); 108 static int 109 fuse_read_biobackend(struct vnode *vp, struct uio *uio, 110 struct ucred *cred, struct fuse_filehandle *fufh); 111 static int 112 fuse_write_directbackend(struct vnode *vp, struct uio *uio, 113 struct ucred *cred, struct fuse_filehandle *fufh); 114 static int 115 fuse_write_biobackend(struct vnode *vp, struct uio *uio, 116 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag); 117 118 int 119 fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag, 120 struct ucred *cred) 121 { 122 struct fuse_filehandle *fufh; 123 int err, directio; 124 125 MPASS(vp->v_type == VREG || vp->v_type == VDIR); 126 127 err = fuse_filehandle_getrw(vp, 128 (uio->uio_rw == UIO_READ) ? FUFH_RDONLY : FUFH_WRONLY, &fufh); 129 if (err) { 130 printf("FUSE: io dispatch: filehandles are closed\n"); 131 return err; 132 } 133 /* 134 * Ideally, when the daemon asks for direct io at open time, the 135 * standard file flag should be set according to this, so that would 136 * just change the default mode, which later on could be changed via 137 * fcntl(2). 138 * But this doesn't work, the O_DIRECT flag gets cleared at some point 139 * (don't know where). So to make any use of the Fuse direct_io option, 140 * we hardwire it into the file's private data (similarly to Linux, 141 * btw.). 142 */ 143 directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp)); 144 145 switch (uio->uio_rw) { 146 case UIO_READ: 147 if (directio) { 148 FS_DEBUG("direct read of vnode %ju via file handle %ju\n", 149 (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id); 150 err = fuse_read_directbackend(vp, uio, cred, fufh); 151 } else { 152 FS_DEBUG("buffered read of vnode %ju\n", 153 (uintmax_t)VTOILLU(vp)); 154 err = fuse_read_biobackend(vp, uio, cred, fufh); 155 } 156 break; 157 case UIO_WRITE: 158 if (directio) { 159 FS_DEBUG("direct write of vnode %ju via file handle %ju\n", 160 (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id); 161 err = fuse_write_directbackend(vp, uio, cred, fufh); 162 } else { 163 FS_DEBUG("buffered write of vnode %ju\n", 164 (uintmax_t)VTOILLU(vp)); 165 err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag); 166 } 167 break; 168 default: 169 panic("uninterpreted mode passed to fuse_io_dispatch"); 170 } 171 172 return (err); 173 } 174 175 static int 176 fuse_read_biobackend(struct vnode *vp, struct uio *uio, 177 struct ucred *cred, struct fuse_filehandle *fufh) 178 { 179 struct buf *bp; 180 daddr_t lbn; 181 int bcount; 182 int err = 0, n = 0, on = 0; 183 off_t filesize; 184 185 const int biosize = fuse_iosize(vp); 186 187 FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n", 188 uio->uio_resid, uio->uio_offset, VTOFUD(vp)->filesize); 189 190 if (uio->uio_resid == 0) 191 return (0); 192 if (uio->uio_offset < 0) 193 return (EINVAL); 194 195 bcount = MIN(MAXBSIZE, biosize); 196 filesize = VTOFUD(vp)->filesize; 197 198 do { 199 if (fuse_isdeadfs(vp)) { 200 err = ENXIO; 201 break; 202 } 203 lbn = uio->uio_offset / biosize; 204 on = uio->uio_offset & (biosize - 1); 205 206 FS_DEBUG2G("biosize %d, lbn %d, on %d\n", biosize, (int)lbn, on); 207 208 /* 209 * Obtain the buffer cache block. Figure out the buffer size 210 * when we are at EOF. If we are modifying the size of the 211 * buffer based on an EOF condition we need to hold 212 * nfs_rslock() through obtaining the buffer to prevent 213 * a potential writer-appender from messing with n_size. 214 * Otherwise we may accidently truncate the buffer and 215 * lose dirty data. 216 * 217 * Note that bcount is *not* DEV_BSIZE aligned. 218 */ 219 if ((off_t)lbn * biosize >= filesize) { 220 bcount = 0; 221 } else if ((off_t)(lbn + 1) * biosize > filesize) { 222 bcount = filesize - (off_t)lbn *biosize; 223 } 224 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); 225 226 if (!bp) 227 return (EINTR); 228 229 /* 230 * If B_CACHE is not set, we must issue the read. If this 231 * fails, we return an error. 232 */ 233 234 if ((bp->b_flags & B_CACHE) == 0) { 235 bp->b_iocmd = BIO_READ; 236 vfs_busy_pages(bp, 0); 237 err = fuse_io_strategy(vp, bp); 238 if (err) { 239 brelse(bp); 240 return (err); 241 } 242 } 243 /* 244 * on is the offset into the current bp. Figure out how many 245 * bytes we can copy out of the bp. Note that bcount is 246 * NOT DEV_BSIZE aligned. 247 * 248 * Then figure out how many bytes we can copy into the uio. 249 */ 250 251 n = 0; 252 if (on < bcount) 253 n = MIN((unsigned)(bcount - on), uio->uio_resid); 254 if (n > 0) { 255 FS_DEBUG2G("feeding buffeater with %d bytes of buffer %p," 256 " saying %d was asked for\n", 257 n, bp->b_data + on, n + (int)bp->b_resid); 258 err = uiomove(bp->b_data + on, n, uio); 259 } 260 brelse(bp); 261 FS_DEBUG2G("end of turn, err %d, uio->uio_resid %zd, n %d\n", 262 err, uio->uio_resid, n); 263 } while (err == 0 && uio->uio_resid > 0 && n > 0); 264 265 return (err); 266 } 267 268 static int 269 fuse_read_directbackend(struct vnode *vp, struct uio *uio, 270 struct ucred *cred, struct fuse_filehandle *fufh) 271 { 272 struct fuse_dispatcher fdi; 273 struct fuse_read_in *fri; 274 int err = 0; 275 276 if (uio->uio_resid == 0) 277 return (0); 278 279 fdisp_init(&fdi, 0); 280 281 /* 282 * XXX In "normal" case we use an intermediate kernel buffer for 283 * transmitting data from daemon's context to ours. Eventually, we should 284 * get rid of this. Anyway, if the target uio lives in sysspace (we are 285 * called from pageops), and the input data doesn't need kernel-side 286 * processing (we are not called from readdir) we can already invoke 287 * an optimized, "peer-to-peer" I/O routine. 288 */ 289 while (uio->uio_resid > 0) { 290 fdi.iosize = sizeof(*fri); 291 fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred); 292 fri = fdi.indata; 293 fri->fh = fufh->fh_id; 294 fri->offset = uio->uio_offset; 295 fri->size = MIN(uio->uio_resid, 296 fuse_get_mpdata(vp->v_mount)->max_read); 297 298 FS_DEBUG2G("fri->fh %ju, fri->offset %ju, fri->size %ju\n", 299 (uintmax_t)fri->fh, (uintmax_t)fri->offset, 300 (uintmax_t)fri->size); 301 302 if ((err = fdisp_wait_answ(&fdi))) 303 goto out; 304 305 FS_DEBUG2G("complete: got iosize=%d, requested fri.size=%zd; " 306 "resid=%zd offset=%ju\n", 307 fri->size, fdi.iosize, uio->uio_resid, 308 (uintmax_t)uio->uio_offset); 309 310 if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio))) 311 break; 312 if (fdi.iosize < fri->size) 313 break; 314 } 315 316 out: 317 fdisp_destroy(&fdi); 318 return (err); 319 } 320 321 static int 322 fuse_write_directbackend(struct vnode *vp, struct uio *uio, 323 struct ucred *cred, struct fuse_filehandle *fufh) 324 { 325 struct fuse_vnode_data *fvdat = VTOFUD(vp); 326 struct fuse_write_in *fwi; 327 struct fuse_dispatcher fdi; 328 size_t chunksize; 329 int diff; 330 int err = 0; 331 332 if (!uio->uio_resid) 333 return (0); 334 335 fdisp_init(&fdi, 0); 336 337 while (uio->uio_resid > 0) { 338 chunksize = MIN(uio->uio_resid, 339 fuse_get_mpdata(vp->v_mount)->max_write); 340 341 fdi.iosize = sizeof(*fwi) + chunksize; 342 fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred); 343 344 fwi = fdi.indata; 345 fwi->fh = fufh->fh_id; 346 fwi->offset = uio->uio_offset; 347 fwi->size = chunksize; 348 349 if ((err = uiomove((char *)fdi.indata + sizeof(*fwi), 350 chunksize, uio))) 351 break; 352 353 if ((err = fdisp_wait_answ(&fdi))) 354 break; 355 356 diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size; 357 if (diff < 0) { 358 err = EINVAL; 359 break; 360 } 361 uio->uio_resid += diff; 362 uio->uio_offset -= diff; 363 if (uio->uio_offset > fvdat->filesize) 364 fuse_vnode_setsize(vp, cred, uio->uio_offset); 365 } 366 367 fdisp_destroy(&fdi); 368 369 return (err); 370 } 371 372 static int 373 fuse_write_biobackend(struct vnode *vp, struct uio *uio, 374 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag) 375 { 376 struct fuse_vnode_data *fvdat = VTOFUD(vp); 377 struct buf *bp; 378 daddr_t lbn; 379 int bcount; 380 int n, on, err = 0; 381 382 const int biosize = fuse_iosize(vp); 383 384 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 385 FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n", 386 uio->uio_resid, uio->uio_offset, fvdat->filesize); 387 if (vp->v_type != VREG) 388 return (EIO); 389 if (uio->uio_offset < 0) 390 return (EINVAL); 391 if (uio->uio_resid == 0) 392 return (0); 393 if (ioflag & IO_APPEND) 394 uio_setoffset(uio, fvdat->filesize); 395 396 /* 397 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 398 * would exceed the local maximum per-file write commit size when 399 * combined with those, we must decide whether to flush, 400 * go synchronous, or return err. We don't bother checking 401 * IO_UNIT -- we just make all writes atomic anyway, as there's 402 * no point optimizing for something that really won't ever happen. 403 */ 404 do { 405 if (fuse_isdeadfs(vp)) { 406 err = ENXIO; 407 break; 408 } 409 lbn = uio->uio_offset / biosize; 410 on = uio->uio_offset & (biosize - 1); 411 n = MIN((unsigned)(biosize - on), uio->uio_resid); 412 413 FS_DEBUG2G("lbn %ju, on %d, n %d, uio offset %ju, uio resid %zd\n", 414 (uintmax_t)lbn, on, n, 415 (uintmax_t)uio->uio_offset, uio->uio_resid); 416 417 again: 418 /* 419 * Handle direct append and file extension cases, calculate 420 * unaligned buffer size. 421 */ 422 if (uio->uio_offset == fvdat->filesize && n) { 423 /* 424 * Get the buffer (in its pre-append state to maintain 425 * B_CACHE if it was previously set). Resize the 426 * nfsnode after we have locked the buffer to prevent 427 * readers from reading garbage. 428 */ 429 bcount = on; 430 FS_DEBUG("getting block from OS, bcount %d\n", bcount); 431 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); 432 433 if (bp != NULL) { 434 long save; 435 436 err = fuse_vnode_setsize(vp, cred, 437 uio->uio_offset + n); 438 if (err) { 439 brelse(bp); 440 break; 441 } 442 save = bp->b_flags & B_CACHE; 443 bcount += n; 444 allocbuf(bp, bcount); 445 bp->b_flags |= save; 446 } 447 } else { 448 /* 449 * Obtain the locked cache block first, and then 450 * adjust the file's size as appropriate. 451 */ 452 bcount = on + n; 453 if ((off_t)lbn * biosize + bcount < fvdat->filesize) { 454 if ((off_t)(lbn + 1) * biosize < fvdat->filesize) 455 bcount = biosize; 456 else 457 bcount = fvdat->filesize - 458 (off_t)lbn *biosize; 459 } 460 FS_DEBUG("getting block from OS, bcount %d\n", bcount); 461 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); 462 if (bp && uio->uio_offset + n > fvdat->filesize) { 463 err = fuse_vnode_setsize(vp, cred, 464 uio->uio_offset + n); 465 if (err) { 466 brelse(bp); 467 break; 468 } 469 } 470 } 471 472 if (!bp) { 473 err = EINTR; 474 break; 475 } 476 /* 477 * Issue a READ if B_CACHE is not set. In special-append 478 * mode, B_CACHE is based on the buffer prior to the write 479 * op and is typically set, avoiding the read. If a read 480 * is required in special append mode, the server will 481 * probably send us a short-read since we extended the file 482 * on our end, resulting in b_resid == 0 and, thusly, 483 * B_CACHE getting set. 484 * 485 * We can also avoid issuing the read if the write covers 486 * the entire buffer. We have to make sure the buffer state 487 * is reasonable in this case since we will not be initiating 488 * I/O. See the comments in kern/vfs_bio.c's getblk() for 489 * more information. 490 * 491 * B_CACHE may also be set due to the buffer being cached 492 * normally. 493 */ 494 495 if (on == 0 && n == bcount) { 496 bp->b_flags |= B_CACHE; 497 bp->b_flags &= ~B_INVAL; 498 bp->b_ioflags &= ~BIO_ERROR; 499 } 500 if ((bp->b_flags & B_CACHE) == 0) { 501 bp->b_iocmd = BIO_READ; 502 vfs_busy_pages(bp, 0); 503 fuse_io_strategy(vp, bp); 504 if ((err = bp->b_error)) { 505 brelse(bp); 506 break; 507 } 508 } 509 if (bp->b_wcred == NOCRED) 510 bp->b_wcred = crhold(cred); 511 512 /* 513 * If dirtyend exceeds file size, chop it down. This should 514 * not normally occur but there is an append race where it 515 * might occur XXX, so we log it. 516 * 517 * If the chopping creates a reverse-indexed or degenerate 518 * situation with dirtyoff/end, we 0 both of them. 519 */ 520 521 if (bp->b_dirtyend > bcount) { 522 FS_DEBUG("FUSE append race @%lx:%d\n", 523 (long)bp->b_blkno * biosize, 524 bp->b_dirtyend - bcount); 525 bp->b_dirtyend = bcount; 526 } 527 if (bp->b_dirtyoff >= bp->b_dirtyend) 528 bp->b_dirtyoff = bp->b_dirtyend = 0; 529 530 /* 531 * If the new write will leave a contiguous dirty 532 * area, just update the b_dirtyoff and b_dirtyend, 533 * otherwise force a write rpc of the old dirty area. 534 * 535 * While it is possible to merge discontiguous writes due to 536 * our having a B_CACHE buffer ( and thus valid read data 537 * for the hole), we don't because it could lead to 538 * significant cache coherency problems with multiple clients, 539 * especially if locking is implemented later on. 540 * 541 * as an optimization we could theoretically maintain 542 * a linked list of discontinuous areas, but we would still 543 * have to commit them separately so there isn't much 544 * advantage to it except perhaps a bit of asynchronization. 545 */ 546 547 if (bp->b_dirtyend > 0 && 548 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 549 /* 550 * Yes, we mean it. Write out everything to "storage" 551 * immediatly, without hesitation. (Apart from other 552 * reasons: the only way to know if a write is valid 553 * if its actually written out.) 554 */ 555 bwrite(bp); 556 if (bp->b_error == EINTR) { 557 err = EINTR; 558 break; 559 } 560 goto again; 561 } 562 err = uiomove((char *)bp->b_data + on, n, uio); 563 564 /* 565 * Since this block is being modified, it must be written 566 * again and not just committed. Since write clustering does 567 * not work for the stage 1 data write, only the stage 2 568 * commit rpc, we have to clear B_CLUSTEROK as well. 569 */ 570 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 571 572 if (err) { 573 bp->b_ioflags |= BIO_ERROR; 574 bp->b_error = err; 575 brelse(bp); 576 break; 577 } 578 /* 579 * Only update dirtyoff/dirtyend if not a degenerate 580 * condition. 581 */ 582 if (n) { 583 if (bp->b_dirtyend > 0) { 584 bp->b_dirtyoff = MIN(on, bp->b_dirtyoff); 585 bp->b_dirtyend = MAX((on + n), bp->b_dirtyend); 586 } else { 587 bp->b_dirtyoff = on; 588 bp->b_dirtyend = on + n; 589 } 590 vfs_bio_set_valid(bp, on, n); 591 } 592 err = bwrite(bp); 593 if (err) 594 break; 595 } while (uio->uio_resid > 0 && n > 0); 596 597 if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0) 598 fuse_vnode_savesize(vp, cred); 599 600 return (err); 601 } 602 603 int 604 fuse_io_strategy(struct vnode *vp, struct buf *bp) 605 { 606 struct fuse_filehandle *fufh; 607 struct fuse_vnode_data *fvdat = VTOFUD(vp); 608 struct ucred *cred; 609 struct uio *uiop; 610 struct uio uio; 611 struct iovec io; 612 int error = 0; 613 614 const int biosize = fuse_iosize(vp); 615 616 MPASS(vp->v_type == VREG || vp->v_type == VDIR); 617 MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE); 618 FS_DEBUG("inode=%ju offset=%jd resid=%ld\n", 619 (uintmax_t)VTOI(vp), (intmax_t)(((off_t)bp->b_blkno) * biosize), 620 bp->b_bcount); 621 622 error = fuse_filehandle_getrw(vp, 623 (bp->b_iocmd == BIO_READ) ? FUFH_RDONLY : FUFH_WRONLY, &fufh); 624 if (error) { 625 printf("FUSE: strategy: filehandles are closed\n"); 626 bp->b_ioflags |= BIO_ERROR; 627 bp->b_error = error; 628 return (error); 629 } 630 cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred; 631 632 uiop = &uio; 633 uiop->uio_iov = &io; 634 uiop->uio_iovcnt = 1; 635 uiop->uio_segflg = UIO_SYSSPACE; 636 uiop->uio_td = curthread; 637 638 /* 639 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 640 * do this here so we do not have to do it in all the code that 641 * calls us. 642 */ 643 bp->b_flags &= ~B_INVAL; 644 bp->b_ioflags &= ~BIO_ERROR; 645 646 KASSERT(!(bp->b_flags & B_DONE), 647 ("fuse_io_strategy: bp %p already marked done", bp)); 648 if (bp->b_iocmd == BIO_READ) { 649 io.iov_len = uiop->uio_resid = bp->b_bcount; 650 io.iov_base = bp->b_data; 651 uiop->uio_rw = UIO_READ; 652 653 uiop->uio_offset = ((off_t)bp->b_blkno) * biosize; 654 error = fuse_read_directbackend(vp, uiop, cred, fufh); 655 656 if ((!error && uiop->uio_resid) || 657 (fsess_opt_brokenio(vnode_mount(vp)) && error == EIO && 658 uiop->uio_offset < fvdat->filesize && fvdat->filesize > 0 && 659 uiop->uio_offset >= fvdat->cached_attrs.va_size)) { 660 /* 661 * If we had a short read with no error, we must have 662 * hit a file hole. We should zero-fill the remainder. 663 * This can also occur if the server hits the file EOF. 664 * 665 * Holes used to be able to occur due to pending 666 * writes, but that is not possible any longer. 667 */ 668 int nread = bp->b_bcount - uiop->uio_resid; 669 int left = uiop->uio_resid; 670 671 if (error != 0) { 672 printf("FUSE: Fix broken io: offset %ju, " 673 " resid %zd, file size %ju/%ju\n", 674 (uintmax_t)uiop->uio_offset, 675 uiop->uio_resid, fvdat->filesize, 676 fvdat->cached_attrs.va_size); 677 error = 0; 678 } 679 if (left > 0) 680 bzero((char *)bp->b_data + nread, left); 681 uiop->uio_resid = 0; 682 } 683 if (error) { 684 bp->b_ioflags |= BIO_ERROR; 685 bp->b_error = error; 686 } 687 } else { 688 /* 689 * If we only need to commit, try to commit 690 */ 691 if (bp->b_flags & B_NEEDCOMMIT) { 692 FS_DEBUG("write: B_NEEDCOMMIT flags set\n"); 693 } 694 /* 695 * Setup for actual write 696 */ 697 if ((off_t)bp->b_blkno * biosize + bp->b_dirtyend > 698 fvdat->filesize) 699 bp->b_dirtyend = fvdat->filesize - 700 (off_t)bp->b_blkno * biosize; 701 702 if (bp->b_dirtyend > bp->b_dirtyoff) { 703 io.iov_len = uiop->uio_resid = bp->b_dirtyend 704 - bp->b_dirtyoff; 705 uiop->uio_offset = (off_t)bp->b_blkno * biosize 706 + bp->b_dirtyoff; 707 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 708 uiop->uio_rw = UIO_WRITE; 709 710 error = fuse_write_directbackend(vp, uiop, cred, fufh); 711 712 if (error == EINTR || error == ETIMEDOUT 713 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 714 715 bp->b_flags &= ~(B_INVAL | B_NOCACHE); 716 if ((bp->b_flags & B_PAGING) == 0) { 717 bdirty(bp); 718 bp->b_flags &= ~B_DONE; 719 } 720 if ((error == EINTR || error == ETIMEDOUT) && 721 (bp->b_flags & B_ASYNC) == 0) 722 bp->b_flags |= B_EINTR; 723 } else { 724 if (error) { 725 bp->b_ioflags |= BIO_ERROR; 726 bp->b_flags |= B_INVAL; 727 bp->b_error = error; 728 } 729 bp->b_dirtyoff = bp->b_dirtyend = 0; 730 } 731 } else { 732 bp->b_resid = 0; 733 bufdone(bp); 734 return (0); 735 } 736 } 737 bp->b_resid = uiop->uio_resid; 738 bufdone(bp); 739 return (error); 740 } 741 742 int 743 fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td) 744 { 745 struct vop_fsync_args a = { 746 .a_vp = vp, 747 .a_waitfor = waitfor, 748 .a_td = td, 749 }; 750 751 return (vop_stdfsync(&a)); 752 } 753 754 /* 755 * Flush and invalidate all dirty buffers. If another process is already 756 * doing the flush, just wait for completion. 757 */ 758 int 759 fuse_io_invalbuf(struct vnode *vp, struct thread *td) 760 { 761 struct fuse_vnode_data *fvdat = VTOFUD(vp); 762 int error = 0; 763 764 if (vp->v_iflag & VI_DOOMED) 765 return 0; 766 767 ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf"); 768 769 while (fvdat->flag & FN_FLUSHINPROG) { 770 struct proc *p = td->td_proc; 771 772 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) 773 return EIO; 774 fvdat->flag |= FN_FLUSHWANT; 775 tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz); 776 error = 0; 777 if (p != NULL) { 778 PROC_LOCK(p); 779 if (SIGNOTEMPTY(p->p_siglist) || 780 SIGNOTEMPTY(td->td_siglist)) 781 error = EINTR; 782 PROC_UNLOCK(p); 783 } 784 if (error == EINTR) 785 return EINTR; 786 } 787 fvdat->flag |= FN_FLUSHINPROG; 788 789 if (vp->v_bufobj.bo_object != NULL) { 790 VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 791 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 792 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 793 } 794 error = vinvalbuf(vp, V_SAVE, PCATCH, 0); 795 while (error) { 796 if (error == ERESTART || error == EINTR) { 797 fvdat->flag &= ~FN_FLUSHINPROG; 798 if (fvdat->flag & FN_FLUSHWANT) { 799 fvdat->flag &= ~FN_FLUSHWANT; 800 wakeup(&fvdat->flag); 801 } 802 return EINTR; 803 } 804 error = vinvalbuf(vp, V_SAVE, PCATCH, 0); 805 } 806 fvdat->flag &= ~FN_FLUSHINPROG; 807 if (fvdat->flag & FN_FLUSHWANT) { 808 fvdat->flag &= ~FN_FLUSHWANT; 809 wakeup(&fvdat->flag); 810 } 811 return (error); 812 } 813