1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_kdtrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/kernel.h> 45 #include <sys/mount.h> 46 #include <sys/vmmeter.h> 47 #include <sys/vnode.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_object.h> 53 #include <vm/vm_pager.h> 54 #include <vm/vnode_pager.h> 55 56 #include <fs/nfs/nfsport.h> 57 #include <fs/nfsclient/nfsmount.h> 58 #include <fs/nfsclient/nfs.h> 59 #include <fs/nfsclient/nfsnode.h> 60 #include <fs/nfsclient/nfs_kdtrace.h> 61 62 extern int newnfs_directio_allow_mmap; 63 extern struct nfsstats newnfsstats; 64 extern struct mtx ncl_iod_mutex; 65 extern int ncl_numasync; 66 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 67 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 68 extern int newnfs_directio_enable; 69 70 int ncl_pbuf_freecnt = -1; /* start out unlimited */ 71 72 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 73 struct thread *td); 74 static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 75 struct ucred *cred, int ioflag); 76 77 /* 78 * Vnode op for VM getpages. 79 */ 80 int 81 ncl_getpages(struct vop_getpages_args *ap) 82 { 83 int i, error, nextoff, size, toff, count, npages; 84 struct uio uio; 85 struct iovec iov; 86 vm_offset_t kva; 87 struct buf *bp; 88 struct vnode *vp; 89 struct thread *td; 90 struct ucred *cred; 91 struct nfsmount *nmp; 92 vm_object_t object; 93 vm_page_t *pages; 94 struct nfsnode *np; 95 96 vp = ap->a_vp; 97 np = VTONFS(vp); 98 td = curthread; /* XXX */ 99 cred = curthread->td_ucred; /* XXX */ 100 nmp = VFSTONFS(vp->v_mount); 101 pages = ap->a_m; 102 count = ap->a_count; 103 104 if ((object = vp->v_object) == NULL) { 105 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); 106 return (VM_PAGER_ERROR); 107 } 108 109 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 110 mtx_lock(&np->n_mtx); 111 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 112 mtx_unlock(&np->n_mtx); 113 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); 114 return (VM_PAGER_ERROR); 115 } else 116 mtx_unlock(&np->n_mtx); 117 } 118 119 mtx_lock(&nmp->nm_mtx); 120 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 121 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 122 mtx_unlock(&nmp->nm_mtx); 123 /* We'll never get here for v4, because we always have fsinfo */ 124 (void)ncl_fsinfo(nmp, vp, cred, td); 125 } else 126 mtx_unlock(&nmp->nm_mtx); 127 128 npages = btoc(count); 129 130 /* 131 * If the requested page is partially valid, just return it and 132 * allow the pager to zero-out the blanks. Partially valid pages 133 * can only occur at the file EOF. 134 */ 135 VM_OBJECT_LOCK(object); 136 if (pages[ap->a_reqpage]->valid != 0) { 137 for (i = 0; i < npages; ++i) { 138 if (i != ap->a_reqpage) { 139 vm_page_lock(pages[i]); 140 vm_page_free(pages[i]); 141 vm_page_unlock(pages[i]); 142 } 143 } 144 VM_OBJECT_UNLOCK(object); 145 return (0); 146 } 147 VM_OBJECT_UNLOCK(object); 148 149 /* 150 * We use only the kva address for the buffer, but this is extremely 151 * convienient and fast. 152 */ 153 bp = getpbuf(&ncl_pbuf_freecnt); 154 155 kva = (vm_offset_t) bp->b_data; 156 pmap_qenter(kva, pages, npages); 157 PCPU_INC(cnt.v_vnodein); 158 PCPU_ADD(cnt.v_vnodepgsin, npages); 159 160 iov.iov_base = (caddr_t) kva; 161 iov.iov_len = count; 162 uio.uio_iov = &iov; 163 uio.uio_iovcnt = 1; 164 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 165 uio.uio_resid = count; 166 uio.uio_segflg = UIO_SYSSPACE; 167 uio.uio_rw = UIO_READ; 168 uio.uio_td = td; 169 170 error = ncl_readrpc(vp, &uio, cred); 171 pmap_qremove(kva, npages); 172 173 relpbuf(bp, &ncl_pbuf_freecnt); 174 175 if (error && (uio.uio_resid == count)) { 176 ncl_printf("nfs_getpages: error %d\n", error); 177 VM_OBJECT_LOCK(object); 178 for (i = 0; i < npages; ++i) { 179 if (i != ap->a_reqpage) { 180 vm_page_lock(pages[i]); 181 vm_page_free(pages[i]); 182 vm_page_unlock(pages[i]); 183 } 184 } 185 VM_OBJECT_UNLOCK(object); 186 return (VM_PAGER_ERROR); 187 } 188 189 /* 190 * Calculate the number of bytes read and validate only that number 191 * of bytes. Note that due to pending writes, size may be 0. This 192 * does not mean that the remaining data is invalid! 193 */ 194 195 size = count - uio.uio_resid; 196 VM_OBJECT_LOCK(object); 197 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 198 vm_page_t m; 199 nextoff = toff + PAGE_SIZE; 200 m = pages[i]; 201 202 if (nextoff <= size) { 203 /* 204 * Read operation filled an entire page 205 */ 206 m->valid = VM_PAGE_BITS_ALL; 207 KASSERT(m->dirty == 0, 208 ("nfs_getpages: page %p is dirty", m)); 209 } else if (size > toff) { 210 /* 211 * Read operation filled a partial page. 212 */ 213 m->valid = 0; 214 vm_page_set_valid_range(m, 0, size - toff); 215 KASSERT(m->dirty == 0, 216 ("nfs_getpages: page %p is dirty", m)); 217 } else { 218 /* 219 * Read operation was short. If no error occured 220 * we may have hit a zero-fill section. We simply 221 * leave valid set to 0. 222 */ 223 ; 224 } 225 if (i != ap->a_reqpage) { 226 /* 227 * Whether or not to leave the page activated is up in 228 * the air, but we should put the page on a page queue 229 * somewhere (it already is in the object). Result: 230 * It appears that emperical results show that 231 * deactivating pages is best. 232 */ 233 234 /* 235 * Just in case someone was asking for this page we 236 * now tell them that it is ok to use. 237 */ 238 if (!error) { 239 if (m->oflags & VPO_WANTED) { 240 vm_page_lock(m); 241 vm_page_activate(m); 242 vm_page_unlock(m); 243 } else { 244 vm_page_lock(m); 245 vm_page_deactivate(m); 246 vm_page_unlock(m); 247 } 248 vm_page_wakeup(m); 249 } else { 250 vm_page_lock(m); 251 vm_page_free(m); 252 vm_page_unlock(m); 253 } 254 } 255 } 256 VM_OBJECT_UNLOCK(object); 257 return (0); 258 } 259 260 /* 261 * Vnode op for VM putpages. 262 */ 263 int 264 ncl_putpages(struct vop_putpages_args *ap) 265 { 266 struct uio uio; 267 struct iovec iov; 268 vm_offset_t kva; 269 struct buf *bp; 270 int iomode, must_commit, i, error, npages, count; 271 off_t offset; 272 int *rtvals; 273 struct vnode *vp; 274 struct thread *td; 275 struct ucred *cred; 276 struct nfsmount *nmp; 277 struct nfsnode *np; 278 vm_page_t *pages; 279 280 vp = ap->a_vp; 281 np = VTONFS(vp); 282 td = curthread; /* XXX */ 283 cred = curthread->td_ucred; /* XXX */ 284 nmp = VFSTONFS(vp->v_mount); 285 pages = ap->a_m; 286 count = ap->a_count; 287 rtvals = ap->a_rtvals; 288 npages = btoc(count); 289 offset = IDX_TO_OFF(pages[0]->pindex); 290 291 mtx_lock(&nmp->nm_mtx); 292 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 293 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 294 mtx_unlock(&nmp->nm_mtx); 295 (void)ncl_fsinfo(nmp, vp, cred, td); 296 } else 297 mtx_unlock(&nmp->nm_mtx); 298 299 mtx_lock(&np->n_mtx); 300 if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 301 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 302 mtx_unlock(&np->n_mtx); 303 ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); 304 mtx_lock(&np->n_mtx); 305 } 306 307 for (i = 0; i < npages; i++) 308 rtvals[i] = VM_PAGER_ERROR; 309 310 /* 311 * When putting pages, do not extend file past EOF. 312 */ 313 if (offset + count > np->n_size) { 314 count = np->n_size - offset; 315 if (count < 0) 316 count = 0; 317 } 318 mtx_unlock(&np->n_mtx); 319 320 /* 321 * We use only the kva address for the buffer, but this is extremely 322 * convienient and fast. 323 */ 324 bp = getpbuf(&ncl_pbuf_freecnt); 325 326 kva = (vm_offset_t) bp->b_data; 327 pmap_qenter(kva, pages, npages); 328 PCPU_INC(cnt.v_vnodeout); 329 PCPU_ADD(cnt.v_vnodepgsout, count); 330 331 iov.iov_base = (caddr_t) kva; 332 iov.iov_len = count; 333 uio.uio_iov = &iov; 334 uio.uio_iovcnt = 1; 335 uio.uio_offset = offset; 336 uio.uio_resid = count; 337 uio.uio_segflg = UIO_SYSSPACE; 338 uio.uio_rw = UIO_WRITE; 339 uio.uio_td = td; 340 341 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 342 iomode = NFSWRITE_UNSTABLE; 343 else 344 iomode = NFSWRITE_FILESYNC; 345 346 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); 347 348 pmap_qremove(kva, npages); 349 relpbuf(bp, &ncl_pbuf_freecnt); 350 351 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); 352 if (must_commit) 353 ncl_clearcommit(vp->v_mount); 354 return rtvals[0]; 355 } 356 357 /* 358 * For nfs, cache consistency can only be maintained approximately. 359 * Although RFC1094 does not specify the criteria, the following is 360 * believed to be compatible with the reference port. 361 * For nfs: 362 * If the file's modify time on the server has changed since the 363 * last read rpc or you have written to the file, 364 * you may have lost data cache consistency with the 365 * server, so flush all of the file's data out of the cache. 366 * Then force a getattr rpc to ensure that you have up to date 367 * attributes. 368 * NB: This implies that cache data can be read when up to 369 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 370 * attributes this could be forced by setting n_attrstamp to 0 before 371 * the VOP_GETATTR() call. 372 */ 373 static inline int 374 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 375 { 376 int error = 0; 377 struct vattr vattr; 378 struct nfsnode *np = VTONFS(vp); 379 int old_lock; 380 381 /* 382 * Grab the exclusive lock before checking whether the cache is 383 * consistent. 384 * XXX - We can make this cheaper later (by acquiring cheaper locks). 385 * But for now, this suffices. 386 */ 387 old_lock = ncl_upgrade_vnlock(vp); 388 if (vp->v_iflag & VI_DOOMED) { 389 ncl_downgrade_vnlock(vp, old_lock); 390 return (EBADF); 391 } 392 393 mtx_lock(&np->n_mtx); 394 if (np->n_flag & NMODIFIED) { 395 mtx_unlock(&np->n_mtx); 396 if (vp->v_type != VREG) { 397 if (vp->v_type != VDIR) 398 panic("nfs: bioread, not dir"); 399 ncl_invaldir(vp); 400 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 401 if (error) 402 goto out; 403 } 404 np->n_attrstamp = 0; 405 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 406 error = VOP_GETATTR(vp, &vattr, cred); 407 if (error) 408 goto out; 409 mtx_lock(&np->n_mtx); 410 np->n_mtime = vattr.va_mtime; 411 mtx_unlock(&np->n_mtx); 412 } else { 413 mtx_unlock(&np->n_mtx); 414 error = VOP_GETATTR(vp, &vattr, cred); 415 if (error) 416 return (error); 417 mtx_lock(&np->n_mtx); 418 if ((np->n_flag & NSIZECHANGED) 419 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 420 mtx_unlock(&np->n_mtx); 421 if (vp->v_type == VDIR) 422 ncl_invaldir(vp); 423 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 424 if (error) 425 goto out; 426 mtx_lock(&np->n_mtx); 427 np->n_mtime = vattr.va_mtime; 428 np->n_flag &= ~NSIZECHANGED; 429 } 430 mtx_unlock(&np->n_mtx); 431 } 432 out: 433 ncl_downgrade_vnlock(vp, old_lock); 434 return error; 435 } 436 437 /* 438 * Vnode op for read using bio 439 */ 440 int 441 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 442 { 443 struct nfsnode *np = VTONFS(vp); 444 int biosize, i; 445 struct buf *bp, *rabp; 446 struct thread *td; 447 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 448 daddr_t lbn, rabn; 449 int bcount; 450 int seqcount; 451 int nra, error = 0, n = 0, on = 0; 452 off_t tmp_off; 453 454 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 455 if (uio->uio_resid == 0) 456 return (0); 457 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 458 return (EINVAL); 459 td = uio->uio_td; 460 461 mtx_lock(&nmp->nm_mtx); 462 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 463 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 464 mtx_unlock(&nmp->nm_mtx); 465 (void)ncl_fsinfo(nmp, vp, cred, td); 466 mtx_lock(&nmp->nm_mtx); 467 } 468 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 469 (void) newnfs_iosize(nmp); 470 471 tmp_off = uio->uio_offset + uio->uio_resid; 472 if (vp->v_type != VDIR && 473 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 474 mtx_unlock(&nmp->nm_mtx); 475 return (EFBIG); 476 } 477 mtx_unlock(&nmp->nm_mtx); 478 479 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 480 /* No caching/ no readaheads. Just read data into the user buffer */ 481 return ncl_readrpc(vp, uio, cred); 482 483 biosize = vp->v_bufobj.bo_bsize; 484 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 485 486 error = nfs_bioread_check_cons(vp, td, cred); 487 if (error) 488 return error; 489 490 do { 491 u_quad_t nsize; 492 493 mtx_lock(&np->n_mtx); 494 nsize = np->n_size; 495 mtx_unlock(&np->n_mtx); 496 497 switch (vp->v_type) { 498 case VREG: 499 NFSINCRGLOBAL(newnfsstats.biocache_reads); 500 lbn = uio->uio_offset / biosize; 501 on = uio->uio_offset & (biosize - 1); 502 503 /* 504 * Start the read ahead(s), as required. 505 */ 506 if (nmp->nm_readahead > 0) { 507 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 508 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 509 rabn = lbn + 1 + nra; 510 if (incore(&vp->v_bufobj, rabn) == NULL) { 511 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 512 if (!rabp) { 513 error = newnfs_sigintr(nmp, td); 514 return (error ? error : EINTR); 515 } 516 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 517 rabp->b_flags |= B_ASYNC; 518 rabp->b_iocmd = BIO_READ; 519 vfs_busy_pages(rabp, 0); 520 if (ncl_asyncio(nmp, rabp, cred, td)) { 521 rabp->b_flags |= B_INVAL; 522 rabp->b_ioflags |= BIO_ERROR; 523 vfs_unbusy_pages(rabp); 524 brelse(rabp); 525 break; 526 } 527 } else { 528 brelse(rabp); 529 } 530 } 531 } 532 } 533 534 /* Note that bcount is *not* DEV_BSIZE aligned. */ 535 bcount = biosize; 536 if ((off_t)lbn * biosize >= nsize) { 537 bcount = 0; 538 } else if ((off_t)(lbn + 1) * biosize > nsize) { 539 bcount = nsize - (off_t)lbn * biosize; 540 } 541 bp = nfs_getcacheblk(vp, lbn, bcount, td); 542 543 if (!bp) { 544 error = newnfs_sigintr(nmp, td); 545 return (error ? error : EINTR); 546 } 547 548 /* 549 * If B_CACHE is not set, we must issue the read. If this 550 * fails, we return an error. 551 */ 552 553 if ((bp->b_flags & B_CACHE) == 0) { 554 bp->b_iocmd = BIO_READ; 555 vfs_busy_pages(bp, 0); 556 error = ncl_doio(vp, bp, cred, td, 0); 557 if (error) { 558 brelse(bp); 559 return (error); 560 } 561 } 562 563 /* 564 * on is the offset into the current bp. Figure out how many 565 * bytes we can copy out of the bp. Note that bcount is 566 * NOT DEV_BSIZE aligned. 567 * 568 * Then figure out how many bytes we can copy into the uio. 569 */ 570 571 n = 0; 572 if (on < bcount) 573 n = MIN((unsigned)(bcount - on), uio->uio_resid); 574 break; 575 case VLNK: 576 NFSINCRGLOBAL(newnfsstats.biocache_readlinks); 577 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 578 if (!bp) { 579 error = newnfs_sigintr(nmp, td); 580 return (error ? error : EINTR); 581 } 582 if ((bp->b_flags & B_CACHE) == 0) { 583 bp->b_iocmd = BIO_READ; 584 vfs_busy_pages(bp, 0); 585 error = ncl_doio(vp, bp, cred, td, 0); 586 if (error) { 587 bp->b_ioflags |= BIO_ERROR; 588 brelse(bp); 589 return (error); 590 } 591 } 592 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 593 on = 0; 594 break; 595 case VDIR: 596 NFSINCRGLOBAL(newnfsstats.biocache_readdirs); 597 if (np->n_direofoffset 598 && uio->uio_offset >= np->n_direofoffset) { 599 return (0); 600 } 601 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 602 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 603 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 604 if (!bp) { 605 error = newnfs_sigintr(nmp, td); 606 return (error ? error : EINTR); 607 } 608 if ((bp->b_flags & B_CACHE) == 0) { 609 bp->b_iocmd = BIO_READ; 610 vfs_busy_pages(bp, 0); 611 error = ncl_doio(vp, bp, cred, td, 0); 612 if (error) { 613 brelse(bp); 614 } 615 while (error == NFSERR_BAD_COOKIE) { 616 ncl_invaldir(vp); 617 error = ncl_vinvalbuf(vp, 0, td, 1); 618 /* 619 * Yuck! The directory has been modified on the 620 * server. The only way to get the block is by 621 * reading from the beginning to get all the 622 * offset cookies. 623 * 624 * Leave the last bp intact unless there is an error. 625 * Loop back up to the while if the error is another 626 * NFSERR_BAD_COOKIE (double yuch!). 627 */ 628 for (i = 0; i <= lbn && !error; i++) { 629 if (np->n_direofoffset 630 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 631 return (0); 632 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 633 if (!bp) { 634 error = newnfs_sigintr(nmp, td); 635 return (error ? error : EINTR); 636 } 637 if ((bp->b_flags & B_CACHE) == 0) { 638 bp->b_iocmd = BIO_READ; 639 vfs_busy_pages(bp, 0); 640 error = ncl_doio(vp, bp, cred, td, 0); 641 /* 642 * no error + B_INVAL == directory EOF, 643 * use the block. 644 */ 645 if (error == 0 && (bp->b_flags & B_INVAL)) 646 break; 647 } 648 /* 649 * An error will throw away the block and the 650 * for loop will break out. If no error and this 651 * is not the block we want, we throw away the 652 * block and go for the next one via the for loop. 653 */ 654 if (error || i < lbn) 655 brelse(bp); 656 } 657 } 658 /* 659 * The above while is repeated if we hit another cookie 660 * error. If we hit an error and it wasn't a cookie error, 661 * we give up. 662 */ 663 if (error) 664 return (error); 665 } 666 667 /* 668 * If not eof and read aheads are enabled, start one. 669 * (You need the current block first, so that you have the 670 * directory offset cookie of the next block.) 671 */ 672 if (nmp->nm_readahead > 0 && 673 (bp->b_flags & B_INVAL) == 0 && 674 (np->n_direofoffset == 0 || 675 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 676 incore(&vp->v_bufobj, lbn + 1) == NULL) { 677 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 678 if (rabp) { 679 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 680 rabp->b_flags |= B_ASYNC; 681 rabp->b_iocmd = BIO_READ; 682 vfs_busy_pages(rabp, 0); 683 if (ncl_asyncio(nmp, rabp, cred, td)) { 684 rabp->b_flags |= B_INVAL; 685 rabp->b_ioflags |= BIO_ERROR; 686 vfs_unbusy_pages(rabp); 687 brelse(rabp); 688 } 689 } else { 690 brelse(rabp); 691 } 692 } 693 } 694 /* 695 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 696 * chopped for the EOF condition, we cannot tell how large 697 * NFS directories are going to be until we hit EOF. So 698 * an NFS directory buffer is *not* chopped to its EOF. Now, 699 * it just so happens that b_resid will effectively chop it 700 * to EOF. *BUT* this information is lost if the buffer goes 701 * away and is reconstituted into a B_CACHE state ( due to 702 * being VMIO ) later. So we keep track of the directory eof 703 * in np->n_direofoffset and chop it off as an extra step 704 * right here. 705 */ 706 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 707 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 708 n = np->n_direofoffset - uio->uio_offset; 709 break; 710 default: 711 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 712 bp = NULL; 713 break; 714 }; 715 716 if (n > 0) { 717 error = uiomove(bp->b_data + on, (int)n, uio); 718 } 719 if (vp->v_type == VLNK) 720 n = 0; 721 if (bp != NULL) 722 brelse(bp); 723 } while (error == 0 && uio->uio_resid > 0 && n > 0); 724 return (error); 725 } 726 727 /* 728 * The NFS write path cannot handle iovecs with len > 1. So we need to 729 * break up iovecs accordingly (restricting them to wsize). 730 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 731 * For the ASYNC case, 2 copies are needed. The first a copy from the 732 * user buffer to a staging buffer and then a second copy from the staging 733 * buffer to mbufs. This can be optimized by copying from the user buffer 734 * directly into mbufs and passing the chain down, but that requires a 735 * fair amount of re-working of the relevant codepaths (and can be done 736 * later). 737 */ 738 static int 739 nfs_directio_write(vp, uiop, cred, ioflag) 740 struct vnode *vp; 741 struct uio *uiop; 742 struct ucred *cred; 743 int ioflag; 744 { 745 int error; 746 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 747 struct thread *td = uiop->uio_td; 748 int size; 749 int wsize; 750 751 mtx_lock(&nmp->nm_mtx); 752 wsize = nmp->nm_wsize; 753 mtx_unlock(&nmp->nm_mtx); 754 if (ioflag & IO_SYNC) { 755 int iomode, must_commit; 756 struct uio uio; 757 struct iovec iov; 758 do_sync: 759 while (uiop->uio_resid > 0) { 760 size = MIN(uiop->uio_resid, wsize); 761 size = MIN(uiop->uio_iov->iov_len, size); 762 iov.iov_base = uiop->uio_iov->iov_base; 763 iov.iov_len = size; 764 uio.uio_iov = &iov; 765 uio.uio_iovcnt = 1; 766 uio.uio_offset = uiop->uio_offset; 767 uio.uio_resid = size; 768 uio.uio_segflg = UIO_USERSPACE; 769 uio.uio_rw = UIO_WRITE; 770 uio.uio_td = td; 771 iomode = NFSWRITE_FILESYNC; 772 error = ncl_writerpc(vp, &uio, cred, &iomode, 773 &must_commit, 0); 774 KASSERT((must_commit == 0), 775 ("ncl_directio_write: Did not commit write")); 776 if (error) 777 return (error); 778 uiop->uio_offset += size; 779 uiop->uio_resid -= size; 780 if (uiop->uio_iov->iov_len <= size) { 781 uiop->uio_iovcnt--; 782 uiop->uio_iov++; 783 } else { 784 uiop->uio_iov->iov_base = 785 (char *)uiop->uio_iov->iov_base + size; 786 uiop->uio_iov->iov_len -= size; 787 } 788 } 789 } else { 790 struct uio *t_uio; 791 struct iovec *t_iov; 792 struct buf *bp; 793 794 /* 795 * Break up the write into blocksize chunks and hand these 796 * over to nfsiod's for write back. 797 * Unfortunately, this incurs a copy of the data. Since 798 * the user could modify the buffer before the write is 799 * initiated. 800 * 801 * The obvious optimization here is that one of the 2 copies 802 * in the async write path can be eliminated by copying the 803 * data here directly into mbufs and passing the mbuf chain 804 * down. But that will require a fair amount of re-working 805 * of the code and can be done if there's enough interest 806 * in NFS directio access. 807 */ 808 while (uiop->uio_resid > 0) { 809 size = MIN(uiop->uio_resid, wsize); 810 size = MIN(uiop->uio_iov->iov_len, size); 811 bp = getpbuf(&ncl_pbuf_freecnt); 812 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 813 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 814 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 815 t_iov->iov_len = size; 816 t_uio->uio_iov = t_iov; 817 t_uio->uio_iovcnt = 1; 818 t_uio->uio_offset = uiop->uio_offset; 819 t_uio->uio_resid = size; 820 t_uio->uio_segflg = UIO_SYSSPACE; 821 t_uio->uio_rw = UIO_WRITE; 822 t_uio->uio_td = td; 823 KASSERT(uiop->uio_segflg == UIO_USERSPACE || 824 uiop->uio_segflg == UIO_SYSSPACE, 825 ("nfs_directio_write: Bad uio_segflg")); 826 if (uiop->uio_segflg == UIO_USERSPACE) { 827 error = copyin(uiop->uio_iov->iov_base, 828 t_iov->iov_base, size); 829 if (error != 0) 830 goto err_free; 831 } else 832 /* 833 * UIO_SYSSPACE may never happen, but handle 834 * it just in case it does. 835 */ 836 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, 837 size); 838 bp->b_flags |= B_DIRECT; 839 bp->b_iocmd = BIO_WRITE; 840 if (cred != NOCRED) { 841 crhold(cred); 842 bp->b_wcred = cred; 843 } else 844 bp->b_wcred = NOCRED; 845 bp->b_caller1 = (void *)t_uio; 846 bp->b_vp = vp; 847 error = ncl_asyncio(nmp, bp, NOCRED, td); 848 err_free: 849 if (error) { 850 free(t_iov->iov_base, M_NFSDIRECTIO); 851 free(t_iov, M_NFSDIRECTIO); 852 free(t_uio, M_NFSDIRECTIO); 853 bp->b_vp = NULL; 854 relpbuf(bp, &ncl_pbuf_freecnt); 855 if (error == EINTR) 856 return (error); 857 goto do_sync; 858 } 859 uiop->uio_offset += size; 860 uiop->uio_resid -= size; 861 if (uiop->uio_iov->iov_len <= size) { 862 uiop->uio_iovcnt--; 863 uiop->uio_iov++; 864 } else { 865 uiop->uio_iov->iov_base = 866 (char *)uiop->uio_iov->iov_base + size; 867 uiop->uio_iov->iov_len -= size; 868 } 869 } 870 } 871 return (0); 872 } 873 874 /* 875 * Vnode op for write using bio 876 */ 877 int 878 ncl_write(struct vop_write_args *ap) 879 { 880 int biosize; 881 struct uio *uio = ap->a_uio; 882 struct thread *td = uio->uio_td; 883 struct vnode *vp = ap->a_vp; 884 struct nfsnode *np = VTONFS(vp); 885 struct ucred *cred = ap->a_cred; 886 int ioflag = ap->a_ioflag; 887 struct buf *bp; 888 struct vattr vattr; 889 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 890 daddr_t lbn; 891 int bcount; 892 int n, on, error = 0; 893 off_t tmp_off; 894 895 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 896 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 897 ("ncl_write proc")); 898 if (vp->v_type != VREG) 899 return (EIO); 900 mtx_lock(&np->n_mtx); 901 if (np->n_flag & NWRITEERR) { 902 np->n_flag &= ~NWRITEERR; 903 mtx_unlock(&np->n_mtx); 904 return (np->n_error); 905 } else 906 mtx_unlock(&np->n_mtx); 907 mtx_lock(&nmp->nm_mtx); 908 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 909 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 910 mtx_unlock(&nmp->nm_mtx); 911 (void)ncl_fsinfo(nmp, vp, cred, td); 912 mtx_lock(&nmp->nm_mtx); 913 } 914 if (nmp->nm_wsize == 0) 915 (void) newnfs_iosize(nmp); 916 mtx_unlock(&nmp->nm_mtx); 917 918 /* 919 * Synchronously flush pending buffers if we are in synchronous 920 * mode or if we are appending. 921 */ 922 if (ioflag & (IO_APPEND | IO_SYNC)) { 923 mtx_lock(&np->n_mtx); 924 if (np->n_flag & NMODIFIED) { 925 mtx_unlock(&np->n_mtx); 926 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 927 /* 928 * Require non-blocking, synchronous writes to 929 * dirty files to inform the program it needs 930 * to fsync(2) explicitly. 931 */ 932 if (ioflag & IO_NDELAY) 933 return (EAGAIN); 934 #endif 935 flush_and_restart: 936 np->n_attrstamp = 0; 937 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 938 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 939 if (error) 940 return (error); 941 } else 942 mtx_unlock(&np->n_mtx); 943 } 944 945 /* 946 * If IO_APPEND then load uio_offset. We restart here if we cannot 947 * get the append lock. 948 */ 949 if (ioflag & IO_APPEND) { 950 np->n_attrstamp = 0; 951 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 952 error = VOP_GETATTR(vp, &vattr, cred); 953 if (error) 954 return (error); 955 mtx_lock(&np->n_mtx); 956 uio->uio_offset = np->n_size; 957 mtx_unlock(&np->n_mtx); 958 } 959 960 if (uio->uio_offset < 0) 961 return (EINVAL); 962 tmp_off = uio->uio_offset + uio->uio_resid; 963 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 964 return (EFBIG); 965 if (uio->uio_resid == 0) 966 return (0); 967 968 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 969 return nfs_directio_write(vp, uio, cred, ioflag); 970 971 /* 972 * Maybe this should be above the vnode op call, but so long as 973 * file servers have no limits, i don't think it matters 974 */ 975 if (vn_rlimit_fsize(vp, uio, td)) 976 return (EFBIG); 977 978 biosize = vp->v_bufobj.bo_bsize; 979 /* 980 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 981 * would exceed the local maximum per-file write commit size when 982 * combined with those, we must decide whether to flush, 983 * go synchronous, or return error. We don't bother checking 984 * IO_UNIT -- we just make all writes atomic anyway, as there's 985 * no point optimizing for something that really won't ever happen. 986 */ 987 if (!(ioflag & IO_SYNC)) { 988 int nflag; 989 990 mtx_lock(&np->n_mtx); 991 nflag = np->n_flag; 992 mtx_unlock(&np->n_mtx); 993 int needrestart = 0; 994 if (nmp->nm_wcommitsize < uio->uio_resid) { 995 /* 996 * If this request could not possibly be completed 997 * without exceeding the maximum outstanding write 998 * commit size, see if we can convert it into a 999 * synchronous write operation. 1000 */ 1001 if (ioflag & IO_NDELAY) 1002 return (EAGAIN); 1003 ioflag |= IO_SYNC; 1004 if (nflag & NMODIFIED) 1005 needrestart = 1; 1006 } else if (nflag & NMODIFIED) { 1007 int wouldcommit = 0; 1008 BO_LOCK(&vp->v_bufobj); 1009 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 1010 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 1011 b_bobufs) { 1012 if (bp->b_flags & B_NEEDCOMMIT) 1013 wouldcommit += bp->b_bcount; 1014 } 1015 } 1016 BO_UNLOCK(&vp->v_bufobj); 1017 /* 1018 * Since we're not operating synchronously and 1019 * bypassing the buffer cache, we are in a commit 1020 * and holding all of these buffers whether 1021 * transmitted or not. If not limited, this 1022 * will lead to the buffer cache deadlocking, 1023 * as no one else can flush our uncommitted buffers. 1024 */ 1025 wouldcommit += uio->uio_resid; 1026 /* 1027 * If we would initially exceed the maximum 1028 * outstanding write commit size, flush and restart. 1029 */ 1030 if (wouldcommit > nmp->nm_wcommitsize) 1031 needrestart = 1; 1032 } 1033 if (needrestart) 1034 goto flush_and_restart; 1035 } 1036 1037 do { 1038 NFSINCRGLOBAL(newnfsstats.biocache_writes); 1039 lbn = uio->uio_offset / biosize; 1040 on = uio->uio_offset & (biosize-1); 1041 n = MIN((unsigned)(biosize - on), uio->uio_resid); 1042 again: 1043 /* 1044 * Handle direct append and file extension cases, calculate 1045 * unaligned buffer size. 1046 */ 1047 mtx_lock(&np->n_mtx); 1048 if (uio->uio_offset == np->n_size && n) { 1049 mtx_unlock(&np->n_mtx); 1050 /* 1051 * Get the buffer (in its pre-append state to maintain 1052 * B_CACHE if it was previously set). Resize the 1053 * nfsnode after we have locked the buffer to prevent 1054 * readers from reading garbage. 1055 */ 1056 bcount = on; 1057 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1058 1059 if (bp != NULL) { 1060 long save; 1061 1062 mtx_lock(&np->n_mtx); 1063 np->n_size = uio->uio_offset + n; 1064 np->n_flag |= NMODIFIED; 1065 vnode_pager_setsize(vp, np->n_size); 1066 mtx_unlock(&np->n_mtx); 1067 1068 save = bp->b_flags & B_CACHE; 1069 bcount += n; 1070 allocbuf(bp, bcount); 1071 bp->b_flags |= save; 1072 } 1073 } else { 1074 /* 1075 * Obtain the locked cache block first, and then 1076 * adjust the file's size as appropriate. 1077 */ 1078 bcount = on + n; 1079 if ((off_t)lbn * biosize + bcount < np->n_size) { 1080 if ((off_t)(lbn + 1) * biosize < np->n_size) 1081 bcount = biosize; 1082 else 1083 bcount = np->n_size - (off_t)lbn * biosize; 1084 } 1085 mtx_unlock(&np->n_mtx); 1086 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1087 mtx_lock(&np->n_mtx); 1088 if (uio->uio_offset + n > np->n_size) { 1089 np->n_size = uio->uio_offset + n; 1090 np->n_flag |= NMODIFIED; 1091 vnode_pager_setsize(vp, np->n_size); 1092 } 1093 mtx_unlock(&np->n_mtx); 1094 } 1095 1096 if (!bp) { 1097 error = newnfs_sigintr(nmp, td); 1098 if (!error) 1099 error = EINTR; 1100 break; 1101 } 1102 1103 /* 1104 * Issue a READ if B_CACHE is not set. In special-append 1105 * mode, B_CACHE is based on the buffer prior to the write 1106 * op and is typically set, avoiding the read. If a read 1107 * is required in special append mode, the server will 1108 * probably send us a short-read since we extended the file 1109 * on our end, resulting in b_resid == 0 and, thusly, 1110 * B_CACHE getting set. 1111 * 1112 * We can also avoid issuing the read if the write covers 1113 * the entire buffer. We have to make sure the buffer state 1114 * is reasonable in this case since we will not be initiating 1115 * I/O. See the comments in kern/vfs_bio.c's getblk() for 1116 * more information. 1117 * 1118 * B_CACHE may also be set due to the buffer being cached 1119 * normally. 1120 */ 1121 1122 if (on == 0 && n == bcount) { 1123 bp->b_flags |= B_CACHE; 1124 bp->b_flags &= ~B_INVAL; 1125 bp->b_ioflags &= ~BIO_ERROR; 1126 } 1127 1128 if ((bp->b_flags & B_CACHE) == 0) { 1129 bp->b_iocmd = BIO_READ; 1130 vfs_busy_pages(bp, 0); 1131 error = ncl_doio(vp, bp, cred, td, 0); 1132 if (error) { 1133 brelse(bp); 1134 break; 1135 } 1136 } 1137 if (bp->b_wcred == NOCRED) 1138 bp->b_wcred = crhold(cred); 1139 mtx_lock(&np->n_mtx); 1140 np->n_flag |= NMODIFIED; 1141 mtx_unlock(&np->n_mtx); 1142 1143 /* 1144 * If dirtyend exceeds file size, chop it down. This should 1145 * not normally occur but there is an append race where it 1146 * might occur XXX, so we log it. 1147 * 1148 * If the chopping creates a reverse-indexed or degenerate 1149 * situation with dirtyoff/end, we 0 both of them. 1150 */ 1151 1152 if (bp->b_dirtyend > bcount) { 1153 ncl_printf("NFS append race @%lx:%d\n", 1154 (long)bp->b_blkno * DEV_BSIZE, 1155 bp->b_dirtyend - bcount); 1156 bp->b_dirtyend = bcount; 1157 } 1158 1159 if (bp->b_dirtyoff >= bp->b_dirtyend) 1160 bp->b_dirtyoff = bp->b_dirtyend = 0; 1161 1162 /* 1163 * If the new write will leave a contiguous dirty 1164 * area, just update the b_dirtyoff and b_dirtyend, 1165 * otherwise force a write rpc of the old dirty area. 1166 * 1167 * While it is possible to merge discontiguous writes due to 1168 * our having a B_CACHE buffer ( and thus valid read data 1169 * for the hole), we don't because it could lead to 1170 * significant cache coherency problems with multiple clients, 1171 * especially if locking is implemented later on. 1172 * 1173 * as an optimization we could theoretically maintain 1174 * a linked list of discontinuous areas, but we would still 1175 * have to commit them separately so there isn't much 1176 * advantage to it except perhaps a bit of asynchronization. 1177 */ 1178 1179 if (bp->b_dirtyend > 0 && 1180 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1181 if (bwrite(bp) == EINTR) { 1182 error = EINTR; 1183 break; 1184 } 1185 goto again; 1186 } 1187 1188 error = uiomove((char *)bp->b_data + on, n, uio); 1189 1190 /* 1191 * Since this block is being modified, it must be written 1192 * again and not just committed. Since write clustering does 1193 * not work for the stage 1 data write, only the stage 2 1194 * commit rpc, we have to clear B_CLUSTEROK as well. 1195 */ 1196 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1197 1198 if (error) { 1199 bp->b_ioflags |= BIO_ERROR; 1200 brelse(bp); 1201 break; 1202 } 1203 1204 /* 1205 * Only update dirtyoff/dirtyend if not a degenerate 1206 * condition. 1207 */ 1208 if (n) { 1209 if (bp->b_dirtyend > 0) { 1210 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1211 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1212 } else { 1213 bp->b_dirtyoff = on; 1214 bp->b_dirtyend = on + n; 1215 } 1216 vfs_bio_set_valid(bp, on, n); 1217 } 1218 1219 /* 1220 * If IO_SYNC do bwrite(). 1221 * 1222 * IO_INVAL appears to be unused. The idea appears to be 1223 * to turn off caching in this case. Very odd. XXX 1224 */ 1225 if ((ioflag & IO_SYNC)) { 1226 if (ioflag & IO_INVAL) 1227 bp->b_flags |= B_NOCACHE; 1228 error = bwrite(bp); 1229 if (error) 1230 break; 1231 } else if ((n + on) == biosize) { 1232 bp->b_flags |= B_ASYNC; 1233 (void) ncl_writebp(bp, 0, NULL); 1234 } else { 1235 bdwrite(bp); 1236 } 1237 } while (uio->uio_resid > 0 && n > 0); 1238 1239 return (error); 1240 } 1241 1242 /* 1243 * Get an nfs cache block. 1244 * 1245 * Allocate a new one if the block isn't currently in the cache 1246 * and return the block marked busy. If the calling process is 1247 * interrupted by a signal for an interruptible mount point, return 1248 * NULL. 1249 * 1250 * The caller must carefully deal with the possible B_INVAL state of 1251 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 1252 * indirectly), so synchronous reads can be issued without worrying about 1253 * the B_INVAL state. We have to be a little more careful when dealing 1254 * with writes (see comments in nfs_write()) when extending a file past 1255 * its EOF. 1256 */ 1257 static struct buf * 1258 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1259 { 1260 struct buf *bp; 1261 struct mount *mp; 1262 struct nfsmount *nmp; 1263 1264 mp = vp->v_mount; 1265 nmp = VFSTONFS(mp); 1266 1267 if (nmp->nm_flag & NFSMNT_INT) { 1268 sigset_t oldset; 1269 1270 newnfs_set_sigmask(td, &oldset); 1271 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0); 1272 newnfs_restore_sigmask(td, &oldset); 1273 while (bp == NULL) { 1274 if (newnfs_sigintr(nmp, td)) 1275 return (NULL); 1276 bp = getblk(vp, bn, size, 0, 2 * hz, 0); 1277 } 1278 } else { 1279 bp = getblk(vp, bn, size, 0, 0, 0); 1280 } 1281 1282 if (vp->v_type == VREG) 1283 bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 1284 return (bp); 1285 } 1286 1287 /* 1288 * Flush and invalidate all dirty buffers. If another process is already 1289 * doing the flush, just wait for completion. 1290 */ 1291 int 1292 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 1293 { 1294 struct nfsnode *np = VTONFS(vp); 1295 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1296 int error = 0, slpflag, slptimeo; 1297 int old_lock = 0; 1298 1299 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 1300 1301 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1302 intrflg = 0; 1303 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 1304 intrflg = 1; 1305 if (intrflg) { 1306 slpflag = NFS_PCATCH; 1307 slptimeo = 2 * hz; 1308 } else { 1309 slpflag = 0; 1310 slptimeo = 0; 1311 } 1312 1313 old_lock = ncl_upgrade_vnlock(vp); 1314 if (vp->v_iflag & VI_DOOMED) { 1315 /* 1316 * Since vgonel() uses the generic vinvalbuf() to flush 1317 * dirty buffers and it does not call this function, it 1318 * is safe to just return OK when VI_DOOMED is set. 1319 */ 1320 ncl_downgrade_vnlock(vp, old_lock); 1321 return (0); 1322 } 1323 1324 /* 1325 * Now, flush as required. 1326 */ 1327 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 1328 VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 1329 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 1330 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 1331 /* 1332 * If the page clean was interrupted, fail the invalidation. 1333 * Not doing so, we run the risk of losing dirty pages in the 1334 * vinvalbuf() call below. 1335 */ 1336 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1337 goto out; 1338 } 1339 1340 error = vinvalbuf(vp, flags, slpflag, 0); 1341 while (error) { 1342 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1343 goto out; 1344 error = vinvalbuf(vp, flags, 0, slptimeo); 1345 } 1346 mtx_lock(&np->n_mtx); 1347 if (np->n_directio_asyncwr == 0) 1348 np->n_flag &= ~NMODIFIED; 1349 mtx_unlock(&np->n_mtx); 1350 out: 1351 ncl_downgrade_vnlock(vp, old_lock); 1352 return error; 1353 } 1354 1355 /* 1356 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1357 * This is mainly to avoid queueing async I/O requests when the nfsiods 1358 * are all hung on a dead server. 1359 * 1360 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1361 * is eventually dequeued by the async daemon, ncl_doio() *will*. 1362 */ 1363 int 1364 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 1365 { 1366 int iod; 1367 int gotiod; 1368 int slpflag = 0; 1369 int slptimeo = 0; 1370 int error, error2; 1371 1372 /* 1373 * Commits are usually short and sweet so lets save some cpu and 1374 * leave the async daemons for more important rpc's (such as reads 1375 * and writes). 1376 */ 1377 mtx_lock(&ncl_iod_mutex); 1378 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1379 (nmp->nm_bufqiods > ncl_numasync / 2)) { 1380 mtx_unlock(&ncl_iod_mutex); 1381 return(EIO); 1382 } 1383 again: 1384 if (nmp->nm_flag & NFSMNT_INT) 1385 slpflag = NFS_PCATCH; 1386 gotiod = FALSE; 1387 1388 /* 1389 * Find a free iod to process this request. 1390 */ 1391 for (iod = 0; iod < ncl_numasync; iod++) 1392 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 1393 gotiod = TRUE; 1394 break; 1395 } 1396 1397 /* 1398 * Try to create one if none are free. 1399 */ 1400 if (!gotiod) 1401 ncl_nfsiodnew(); 1402 else { 1403 /* 1404 * Found one, so wake it up and tell it which 1405 * mount to process. 1406 */ 1407 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 1408 iod, nmp)); 1409 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 1410 ncl_iodmount[iod] = nmp; 1411 nmp->nm_bufqiods++; 1412 wakeup(&ncl_iodwant[iod]); 1413 } 1414 1415 /* 1416 * If none are free, we may already have an iod working on this mount 1417 * point. If so, it will process our request. 1418 */ 1419 if (!gotiod) { 1420 if (nmp->nm_bufqiods > 0) { 1421 NFS_DPF(ASYNCIO, 1422 ("ncl_asyncio: %d iods are already processing mount %p\n", 1423 nmp->nm_bufqiods, nmp)); 1424 gotiod = TRUE; 1425 } 1426 } 1427 1428 /* 1429 * If we have an iod which can process the request, then queue 1430 * the buffer. 1431 */ 1432 if (gotiod) { 1433 /* 1434 * Ensure that the queue never grows too large. We still want 1435 * to asynchronize so we block rather then return EIO. 1436 */ 1437 while (nmp->nm_bufqlen >= 2*ncl_numasync) { 1438 NFS_DPF(ASYNCIO, 1439 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 1440 nmp->nm_bufqwant = TRUE; 1441 error = newnfs_msleep(td, &nmp->nm_bufq, 1442 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 1443 slptimeo); 1444 if (error) { 1445 error2 = newnfs_sigintr(nmp, td); 1446 if (error2) { 1447 mtx_unlock(&ncl_iod_mutex); 1448 return (error2); 1449 } 1450 if (slpflag == NFS_PCATCH) { 1451 slpflag = 0; 1452 slptimeo = 2 * hz; 1453 } 1454 } 1455 /* 1456 * We might have lost our iod while sleeping, 1457 * so check and loop if nescessary. 1458 */ 1459 goto again; 1460 } 1461 1462 /* We might have lost our nfsiod */ 1463 if (nmp->nm_bufqiods == 0) { 1464 NFS_DPF(ASYNCIO, 1465 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1466 goto again; 1467 } 1468 1469 if (bp->b_iocmd == BIO_READ) { 1470 if (bp->b_rcred == NOCRED && cred != NOCRED) 1471 bp->b_rcred = crhold(cred); 1472 } else { 1473 if (bp->b_wcred == NOCRED && cred != NOCRED) 1474 bp->b_wcred = crhold(cred); 1475 } 1476 1477 if (bp->b_flags & B_REMFREE) 1478 bremfreef(bp); 1479 BUF_KERNPROC(bp); 1480 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1481 nmp->nm_bufqlen++; 1482 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1483 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 1484 VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 1485 VTONFS(bp->b_vp)->n_directio_asyncwr++; 1486 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 1487 } 1488 mtx_unlock(&ncl_iod_mutex); 1489 return (0); 1490 } 1491 1492 mtx_unlock(&ncl_iod_mutex); 1493 1494 /* 1495 * All the iods are busy on other mounts, so return EIO to 1496 * force the caller to process the i/o synchronously. 1497 */ 1498 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 1499 return (EIO); 1500 } 1501 1502 void 1503 ncl_doio_directwrite(struct buf *bp) 1504 { 1505 int iomode, must_commit; 1506 struct uio *uiop = (struct uio *)bp->b_caller1; 1507 char *iov_base = uiop->uio_iov->iov_base; 1508 1509 iomode = NFSWRITE_FILESYNC; 1510 uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 1511 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 1512 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 1513 free(iov_base, M_NFSDIRECTIO); 1514 free(uiop->uio_iov, M_NFSDIRECTIO); 1515 free(uiop, M_NFSDIRECTIO); 1516 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1517 struct nfsnode *np = VTONFS(bp->b_vp); 1518 mtx_lock(&np->n_mtx); 1519 np->n_directio_asyncwr--; 1520 if (np->n_directio_asyncwr == 0) { 1521 np->n_flag &= ~NMODIFIED; 1522 if ((np->n_flag & NFSYNCWAIT)) { 1523 np->n_flag &= ~NFSYNCWAIT; 1524 wakeup((caddr_t)&np->n_directio_asyncwr); 1525 } 1526 } 1527 mtx_unlock(&np->n_mtx); 1528 } 1529 bp->b_vp = NULL; 1530 relpbuf(bp, &ncl_pbuf_freecnt); 1531 } 1532 1533 /* 1534 * Do an I/O operation to/from a cache block. This may be called 1535 * synchronously or from an nfsiod. 1536 */ 1537 int 1538 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 1539 int called_from_strategy) 1540 { 1541 struct uio *uiop; 1542 struct nfsnode *np; 1543 struct nfsmount *nmp; 1544 int error = 0, iomode, must_commit = 0; 1545 struct uio uio; 1546 struct iovec io; 1547 struct proc *p = td ? td->td_proc : NULL; 1548 uint8_t iocmd; 1549 1550 np = VTONFS(vp); 1551 nmp = VFSTONFS(vp->v_mount); 1552 uiop = &uio; 1553 uiop->uio_iov = &io; 1554 uiop->uio_iovcnt = 1; 1555 uiop->uio_segflg = UIO_SYSSPACE; 1556 uiop->uio_td = td; 1557 1558 /* 1559 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1560 * do this here so we do not have to do it in all the code that 1561 * calls us. 1562 */ 1563 bp->b_flags &= ~B_INVAL; 1564 bp->b_ioflags &= ~BIO_ERROR; 1565 1566 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 1567 iocmd = bp->b_iocmd; 1568 if (iocmd == BIO_READ) { 1569 io.iov_len = uiop->uio_resid = bp->b_bcount; 1570 io.iov_base = bp->b_data; 1571 uiop->uio_rw = UIO_READ; 1572 1573 switch (vp->v_type) { 1574 case VREG: 1575 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1576 NFSINCRGLOBAL(newnfsstats.read_bios); 1577 error = ncl_readrpc(vp, uiop, cr); 1578 1579 if (!error) { 1580 if (uiop->uio_resid) { 1581 /* 1582 * If we had a short read with no error, we must have 1583 * hit a file hole. We should zero-fill the remainder. 1584 * This can also occur if the server hits the file EOF. 1585 * 1586 * Holes used to be able to occur due to pending 1587 * writes, but that is not possible any longer. 1588 */ 1589 int nread = bp->b_bcount - uiop->uio_resid; 1590 ssize_t left = uiop->uio_resid; 1591 1592 if (left > 0) 1593 bzero((char *)bp->b_data + nread, left); 1594 uiop->uio_resid = 0; 1595 } 1596 } 1597 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 1598 if (p && (vp->v_vflag & VV_TEXT)) { 1599 mtx_lock(&np->n_mtx); 1600 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 1601 mtx_unlock(&np->n_mtx); 1602 PROC_LOCK(p); 1603 killproc(p, "text file modification"); 1604 PROC_UNLOCK(p); 1605 } else 1606 mtx_unlock(&np->n_mtx); 1607 } 1608 break; 1609 case VLNK: 1610 uiop->uio_offset = (off_t)0; 1611 NFSINCRGLOBAL(newnfsstats.readlink_bios); 1612 error = ncl_readlinkrpc(vp, uiop, cr); 1613 break; 1614 case VDIR: 1615 NFSINCRGLOBAL(newnfsstats.readdir_bios); 1616 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1617 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1618 error = ncl_readdirplusrpc(vp, uiop, cr, td); 1619 if (error == NFSERR_NOTSUPP) 1620 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1621 } 1622 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1623 error = ncl_readdirrpc(vp, uiop, cr, td); 1624 /* 1625 * end-of-directory sets B_INVAL but does not generate an 1626 * error. 1627 */ 1628 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1629 bp->b_flags |= B_INVAL; 1630 break; 1631 default: 1632 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); 1633 break; 1634 }; 1635 if (error) { 1636 bp->b_ioflags |= BIO_ERROR; 1637 bp->b_error = error; 1638 } 1639 } else { 1640 /* 1641 * If we only need to commit, try to commit 1642 */ 1643 if (bp->b_flags & B_NEEDCOMMIT) { 1644 int retv; 1645 off_t off; 1646 1647 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1648 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1649 bp->b_wcred, td); 1650 if (retv == 0) { 1651 bp->b_dirtyoff = bp->b_dirtyend = 0; 1652 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1653 bp->b_resid = 0; 1654 bufdone(bp); 1655 return (0); 1656 } 1657 if (retv == NFSERR_STALEWRITEVERF) { 1658 ncl_clearcommit(vp->v_mount); 1659 } 1660 } 1661 1662 /* 1663 * Setup for actual write 1664 */ 1665 mtx_lock(&np->n_mtx); 1666 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1667 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1668 mtx_unlock(&np->n_mtx); 1669 1670 if (bp->b_dirtyend > bp->b_dirtyoff) { 1671 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1672 - bp->b_dirtyoff; 1673 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1674 + bp->b_dirtyoff; 1675 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1676 uiop->uio_rw = UIO_WRITE; 1677 NFSINCRGLOBAL(newnfsstats.write_bios); 1678 1679 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1680 iomode = NFSWRITE_UNSTABLE; 1681 else 1682 iomode = NFSWRITE_FILESYNC; 1683 1684 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 1685 called_from_strategy); 1686 1687 /* 1688 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1689 * to cluster the buffers needing commit. This will allow 1690 * the system to submit a single commit rpc for the whole 1691 * cluster. We can do this even if the buffer is not 100% 1692 * dirty (relative to the NFS blocksize), so we optimize the 1693 * append-to-file-case. 1694 * 1695 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1696 * cleared because write clustering only works for commit 1697 * rpc's, not for the data portion of the write). 1698 */ 1699 1700 if (!error && iomode == NFSWRITE_UNSTABLE) { 1701 bp->b_flags |= B_NEEDCOMMIT; 1702 if (bp->b_dirtyoff == 0 1703 && bp->b_dirtyend == bp->b_bcount) 1704 bp->b_flags |= B_CLUSTEROK; 1705 } else { 1706 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1707 } 1708 1709 /* 1710 * For an interrupted write, the buffer is still valid 1711 * and the write hasn't been pushed to the server yet, 1712 * so we can't set BIO_ERROR and report the interruption 1713 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1714 * is not relevant, so the rpc attempt is essentially 1715 * a noop. For the case of a V3 write rpc not being 1716 * committed to stable storage, the block is still 1717 * dirty and requires either a commit rpc or another 1718 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1719 * the block is reused. This is indicated by setting 1720 * the B_DELWRI and B_NEEDCOMMIT flags. 1721 * 1722 * EIO is returned by ncl_writerpc() to indicate a recoverable 1723 * write error and is handled as above, except that 1724 * B_EINTR isn't set. One cause of this is a stale stateid 1725 * error for the RPC that indicates recovery is required, 1726 * when called with called_from_strategy != 0. 1727 * 1728 * If the buffer is marked B_PAGING, it does not reside on 1729 * the vp's paging queues so we cannot call bdirty(). The 1730 * bp in this case is not an NFS cache block so we should 1731 * be safe. XXX 1732 * 1733 * The logic below breaks up errors into recoverable and 1734 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 1735 * and keep the buffer around for potential write retries. 1736 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 1737 * and save the error in the nfsnode. This is less than ideal 1738 * but necessary. Keeping such buffers around could potentially 1739 * cause buffer exhaustion eventually (they can never be written 1740 * out, so will get constantly be re-dirtied). It also causes 1741 * all sorts of vfs panics. For non-recoverable write errors, 1742 * also invalidate the attrcache, so we'll be forced to go over 1743 * the wire for this object, returning an error to user on next 1744 * call (most of the time). 1745 */ 1746 if (error == EINTR || error == EIO || error == ETIMEDOUT 1747 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1748 int s; 1749 1750 s = splbio(); 1751 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1752 if ((bp->b_flags & B_PAGING) == 0) { 1753 bdirty(bp); 1754 bp->b_flags &= ~B_DONE; 1755 } 1756 if ((error == EINTR || error == ETIMEDOUT) && 1757 (bp->b_flags & B_ASYNC) == 0) 1758 bp->b_flags |= B_EINTR; 1759 splx(s); 1760 } else { 1761 if (error) { 1762 bp->b_ioflags |= BIO_ERROR; 1763 bp->b_flags |= B_INVAL; 1764 bp->b_error = np->n_error = error; 1765 mtx_lock(&np->n_mtx); 1766 np->n_flag |= NWRITEERR; 1767 np->n_attrstamp = 0; 1768 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1769 mtx_unlock(&np->n_mtx); 1770 } 1771 bp->b_dirtyoff = bp->b_dirtyend = 0; 1772 } 1773 } else { 1774 bp->b_resid = 0; 1775 bufdone(bp); 1776 return (0); 1777 } 1778 } 1779 bp->b_resid = uiop->uio_resid; 1780 if (must_commit) 1781 ncl_clearcommit(vp->v_mount); 1782 bufdone(bp); 1783 return (error); 1784 } 1785 1786 /* 1787 * Used to aid in handling ftruncate() operations on the NFS client side. 1788 * Truncation creates a number of special problems for NFS. We have to 1789 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1790 * we have to properly handle VM pages or (potentially dirty) buffers 1791 * that straddle the truncation point. 1792 */ 1793 1794 int 1795 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1796 { 1797 struct nfsnode *np = VTONFS(vp); 1798 u_quad_t tsize; 1799 int biosize = vp->v_bufobj.bo_bsize; 1800 int error = 0; 1801 1802 mtx_lock(&np->n_mtx); 1803 tsize = np->n_size; 1804 np->n_size = nsize; 1805 mtx_unlock(&np->n_mtx); 1806 1807 if (nsize < tsize) { 1808 struct buf *bp; 1809 daddr_t lbn; 1810 int bufsize; 1811 1812 /* 1813 * vtruncbuf() doesn't get the buffer overlapping the 1814 * truncation point. We may have a B_DELWRI and/or B_CACHE 1815 * buffer that now needs to be truncated. 1816 */ 1817 error = vtruncbuf(vp, cred, td, nsize, biosize); 1818 lbn = nsize / biosize; 1819 bufsize = nsize & (biosize - 1); 1820 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1821 if (!bp) 1822 return EINTR; 1823 if (bp->b_dirtyoff > bp->b_bcount) 1824 bp->b_dirtyoff = bp->b_bcount; 1825 if (bp->b_dirtyend > bp->b_bcount) 1826 bp->b_dirtyend = bp->b_bcount; 1827 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1828 brelse(bp); 1829 } else { 1830 vnode_pager_setsize(vp, nsize); 1831 } 1832 return(error); 1833 } 1834 1835