1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/kernel.h> 43 #include <sys/mount.h> 44 #include <sys/vmmeter.h> 45 #include <sys/vnode.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_extern.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_object.h> 51 #include <vm/vm_pager.h> 52 #include <vm/vnode_pager.h> 53 54 #include <fs/nfs/nfsport.h> 55 #include <fs/nfsclient/nfsmount.h> 56 #include <fs/nfsclient/nfs.h> 57 #include <fs/nfsclient/nfsnode.h> 58 59 extern int newnfs_directio_allow_mmap; 60 extern struct nfsstats newnfsstats; 61 extern struct mtx ncl_iod_mutex; 62 extern int ncl_numasync; 63 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 64 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 65 extern int newnfs_directio_enable; 66 67 int ncl_pbuf_freecnt = -1; /* start out unlimited */ 68 69 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 70 struct thread *td); 71 static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 72 struct ucred *cred, int ioflag); 73 74 /* 75 * Vnode op for VM getpages. 76 */ 77 int 78 ncl_getpages(struct vop_getpages_args *ap) 79 { 80 int i, error, nextoff, size, toff, count, npages; 81 struct uio uio; 82 struct iovec iov; 83 vm_offset_t kva; 84 struct buf *bp; 85 struct vnode *vp; 86 struct thread *td; 87 struct ucred *cred; 88 struct nfsmount *nmp; 89 vm_object_t object; 90 vm_page_t *pages; 91 struct nfsnode *np; 92 93 vp = ap->a_vp; 94 np = VTONFS(vp); 95 td = curthread; /* XXX */ 96 cred = curthread->td_ucred; /* XXX */ 97 nmp = VFSTONFS(vp->v_mount); 98 pages = ap->a_m; 99 count = ap->a_count; 100 101 if ((object = vp->v_object) == NULL) { 102 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); 103 return (VM_PAGER_ERROR); 104 } 105 106 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 107 mtx_lock(&np->n_mtx); 108 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 109 mtx_unlock(&np->n_mtx); 110 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); 111 return (VM_PAGER_ERROR); 112 } else 113 mtx_unlock(&np->n_mtx); 114 } 115 116 mtx_lock(&nmp->nm_mtx); 117 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 118 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 119 mtx_unlock(&nmp->nm_mtx); 120 /* We'll never get here for v4, because we always have fsinfo */ 121 (void)ncl_fsinfo(nmp, vp, cred, td); 122 } else 123 mtx_unlock(&nmp->nm_mtx); 124 125 npages = btoc(count); 126 127 /* 128 * If the requested page is partially valid, just return it and 129 * allow the pager to zero-out the blanks. Partially valid pages 130 * can only occur at the file EOF. 131 */ 132 VM_OBJECT_LOCK(object); 133 if (pages[ap->a_reqpage]->valid != 0) { 134 for (i = 0; i < npages; ++i) { 135 if (i != ap->a_reqpage) { 136 vm_page_lock(pages[i]); 137 vm_page_free(pages[i]); 138 vm_page_unlock(pages[i]); 139 } 140 } 141 VM_OBJECT_UNLOCK(object); 142 return (0); 143 } 144 VM_OBJECT_UNLOCK(object); 145 146 /* 147 * We use only the kva address for the buffer, but this is extremely 148 * convienient and fast. 149 */ 150 bp = getpbuf(&ncl_pbuf_freecnt); 151 152 kva = (vm_offset_t) bp->b_data; 153 pmap_qenter(kva, pages, npages); 154 PCPU_INC(cnt.v_vnodein); 155 PCPU_ADD(cnt.v_vnodepgsin, npages); 156 157 iov.iov_base = (caddr_t) kva; 158 iov.iov_len = count; 159 uio.uio_iov = &iov; 160 uio.uio_iovcnt = 1; 161 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 162 uio.uio_resid = count; 163 uio.uio_segflg = UIO_SYSSPACE; 164 uio.uio_rw = UIO_READ; 165 uio.uio_td = td; 166 167 error = ncl_readrpc(vp, &uio, cred); 168 pmap_qremove(kva, npages); 169 170 relpbuf(bp, &ncl_pbuf_freecnt); 171 172 if (error && (uio.uio_resid == count)) { 173 ncl_printf("nfs_getpages: error %d\n", error); 174 VM_OBJECT_LOCK(object); 175 for (i = 0; i < npages; ++i) { 176 if (i != ap->a_reqpage) { 177 vm_page_lock(pages[i]); 178 vm_page_free(pages[i]); 179 vm_page_unlock(pages[i]); 180 } 181 } 182 VM_OBJECT_UNLOCK(object); 183 return (VM_PAGER_ERROR); 184 } 185 186 /* 187 * Calculate the number of bytes read and validate only that number 188 * of bytes. Note that due to pending writes, size may be 0. This 189 * does not mean that the remaining data is invalid! 190 */ 191 192 size = count - uio.uio_resid; 193 VM_OBJECT_LOCK(object); 194 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 195 vm_page_t m; 196 nextoff = toff + PAGE_SIZE; 197 m = pages[i]; 198 199 if (nextoff <= size) { 200 /* 201 * Read operation filled an entire page 202 */ 203 m->valid = VM_PAGE_BITS_ALL; 204 KASSERT(m->dirty == 0, 205 ("nfs_getpages: page %p is dirty", m)); 206 } else if (size > toff) { 207 /* 208 * Read operation filled a partial page. 209 */ 210 m->valid = 0; 211 vm_page_set_valid(m, 0, size - toff); 212 KASSERT(m->dirty == 0, 213 ("nfs_getpages: page %p is dirty", m)); 214 } else { 215 /* 216 * Read operation was short. If no error occured 217 * we may have hit a zero-fill section. We simply 218 * leave valid set to 0. 219 */ 220 ; 221 } 222 if (i != ap->a_reqpage) { 223 /* 224 * Whether or not to leave the page activated is up in 225 * the air, but we should put the page on a page queue 226 * somewhere (it already is in the object). Result: 227 * It appears that emperical results show that 228 * deactivating pages is best. 229 */ 230 231 /* 232 * Just in case someone was asking for this page we 233 * now tell them that it is ok to use. 234 */ 235 if (!error) { 236 if (m->oflags & VPO_WANTED) { 237 vm_page_lock(m); 238 vm_page_activate(m); 239 vm_page_unlock(m); 240 } else { 241 vm_page_lock(m); 242 vm_page_deactivate(m); 243 vm_page_unlock(m); 244 } 245 vm_page_wakeup(m); 246 } else { 247 vm_page_lock(m); 248 vm_page_free(m); 249 vm_page_unlock(m); 250 } 251 } 252 } 253 VM_OBJECT_UNLOCK(object); 254 return (0); 255 } 256 257 /* 258 * Vnode op for VM putpages. 259 */ 260 int 261 ncl_putpages(struct vop_putpages_args *ap) 262 { 263 struct uio uio; 264 struct iovec iov; 265 vm_offset_t kva; 266 struct buf *bp; 267 int iomode, must_commit, i, error, npages, count; 268 off_t offset; 269 int *rtvals; 270 struct vnode *vp; 271 struct thread *td; 272 struct ucred *cred; 273 struct nfsmount *nmp; 274 struct nfsnode *np; 275 vm_page_t *pages; 276 277 vp = ap->a_vp; 278 np = VTONFS(vp); 279 td = curthread; /* XXX */ 280 cred = curthread->td_ucred; /* XXX */ 281 nmp = VFSTONFS(vp->v_mount); 282 pages = ap->a_m; 283 count = ap->a_count; 284 rtvals = ap->a_rtvals; 285 npages = btoc(count); 286 offset = IDX_TO_OFF(pages[0]->pindex); 287 288 mtx_lock(&nmp->nm_mtx); 289 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 290 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 291 mtx_unlock(&nmp->nm_mtx); 292 (void)ncl_fsinfo(nmp, vp, cred, td); 293 } else 294 mtx_unlock(&nmp->nm_mtx); 295 296 mtx_lock(&np->n_mtx); 297 if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 298 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 299 mtx_unlock(&np->n_mtx); 300 ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); 301 mtx_lock(&np->n_mtx); 302 } 303 304 for (i = 0; i < npages; i++) 305 rtvals[i] = VM_PAGER_ERROR; 306 307 /* 308 * When putting pages, do not extend file past EOF. 309 */ 310 if (offset + count > np->n_size) { 311 count = np->n_size - offset; 312 if (count < 0) 313 count = 0; 314 } 315 mtx_unlock(&np->n_mtx); 316 317 /* 318 * We use only the kva address for the buffer, but this is extremely 319 * convienient and fast. 320 */ 321 bp = getpbuf(&ncl_pbuf_freecnt); 322 323 kva = (vm_offset_t) bp->b_data; 324 pmap_qenter(kva, pages, npages); 325 PCPU_INC(cnt.v_vnodeout); 326 PCPU_ADD(cnt.v_vnodepgsout, count); 327 328 iov.iov_base = (caddr_t) kva; 329 iov.iov_len = count; 330 uio.uio_iov = &iov; 331 uio.uio_iovcnt = 1; 332 uio.uio_offset = offset; 333 uio.uio_resid = count; 334 uio.uio_segflg = UIO_SYSSPACE; 335 uio.uio_rw = UIO_WRITE; 336 uio.uio_td = td; 337 338 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 339 iomode = NFSWRITE_UNSTABLE; 340 else 341 iomode = NFSWRITE_FILESYNC; 342 343 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); 344 345 pmap_qremove(kva, npages); 346 relpbuf(bp, &ncl_pbuf_freecnt); 347 348 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); 349 if (must_commit) 350 ncl_clearcommit(vp->v_mount); 351 return rtvals[0]; 352 } 353 354 /* 355 * For nfs, cache consistency can only be maintained approximately. 356 * Although RFC1094 does not specify the criteria, the following is 357 * believed to be compatible with the reference port. 358 * For nfs: 359 * If the file's modify time on the server has changed since the 360 * last read rpc or you have written to the file, 361 * you may have lost data cache consistency with the 362 * server, so flush all of the file's data out of the cache. 363 * Then force a getattr rpc to ensure that you have up to date 364 * attributes. 365 * NB: This implies that cache data can be read when up to 366 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 367 * attributes this could be forced by setting n_attrstamp to 0 before 368 * the VOP_GETATTR() call. 369 */ 370 static inline int 371 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 372 { 373 int error = 0; 374 struct vattr vattr; 375 struct nfsnode *np = VTONFS(vp); 376 int old_lock; 377 378 /* 379 * Grab the exclusive lock before checking whether the cache is 380 * consistent. 381 * XXX - We can make this cheaper later (by acquiring cheaper locks). 382 * But for now, this suffices. 383 */ 384 old_lock = ncl_upgrade_vnlock(vp); 385 if (vp->v_iflag & VI_DOOMED) { 386 ncl_downgrade_vnlock(vp, old_lock); 387 return (EBADF); 388 } 389 390 mtx_lock(&np->n_mtx); 391 if (np->n_flag & NMODIFIED) { 392 mtx_unlock(&np->n_mtx); 393 if (vp->v_type != VREG) { 394 if (vp->v_type != VDIR) 395 panic("nfs: bioread, not dir"); 396 ncl_invaldir(vp); 397 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 398 if (error) 399 goto out; 400 } 401 np->n_attrstamp = 0; 402 error = VOP_GETATTR(vp, &vattr, cred); 403 if (error) 404 goto out; 405 mtx_lock(&np->n_mtx); 406 np->n_mtime = vattr.va_mtime; 407 mtx_unlock(&np->n_mtx); 408 } else { 409 mtx_unlock(&np->n_mtx); 410 error = VOP_GETATTR(vp, &vattr, cred); 411 if (error) 412 return (error); 413 mtx_lock(&np->n_mtx); 414 if ((np->n_flag & NSIZECHANGED) 415 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 416 mtx_unlock(&np->n_mtx); 417 if (vp->v_type == VDIR) 418 ncl_invaldir(vp); 419 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 420 if (error) 421 goto out; 422 mtx_lock(&np->n_mtx); 423 np->n_mtime = vattr.va_mtime; 424 np->n_flag &= ~NSIZECHANGED; 425 } 426 mtx_unlock(&np->n_mtx); 427 } 428 out: 429 ncl_downgrade_vnlock(vp, old_lock); 430 return error; 431 } 432 433 /* 434 * Vnode op for read using bio 435 */ 436 int 437 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 438 { 439 struct nfsnode *np = VTONFS(vp); 440 int biosize, i; 441 struct buf *bp, *rabp; 442 struct thread *td; 443 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 444 daddr_t lbn, rabn; 445 int bcount; 446 int seqcount; 447 int nra, error = 0, n = 0, on = 0; 448 off_t tmp_off; 449 450 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 451 if (uio->uio_resid == 0) 452 return (0); 453 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 454 return (EINVAL); 455 td = uio->uio_td; 456 457 mtx_lock(&nmp->nm_mtx); 458 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 459 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 460 mtx_unlock(&nmp->nm_mtx); 461 (void)ncl_fsinfo(nmp, vp, cred, td); 462 mtx_lock(&nmp->nm_mtx); 463 } 464 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 465 (void) newnfs_iosize(nmp); 466 467 tmp_off = uio->uio_offset + uio->uio_resid; 468 if (vp->v_type != VDIR && 469 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 470 mtx_unlock(&nmp->nm_mtx); 471 return (EFBIG); 472 } 473 mtx_unlock(&nmp->nm_mtx); 474 475 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 476 /* No caching/ no readaheads. Just read data into the user buffer */ 477 return ncl_readrpc(vp, uio, cred); 478 479 biosize = vp->v_mount->mnt_stat.f_iosize; 480 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 481 482 error = nfs_bioread_check_cons(vp, td, cred); 483 if (error) 484 return error; 485 486 do { 487 u_quad_t nsize; 488 489 mtx_lock(&np->n_mtx); 490 nsize = np->n_size; 491 mtx_unlock(&np->n_mtx); 492 493 switch (vp->v_type) { 494 case VREG: 495 NFSINCRGLOBAL(newnfsstats.biocache_reads); 496 lbn = uio->uio_offset / biosize; 497 on = uio->uio_offset & (biosize - 1); 498 499 /* 500 * Start the read ahead(s), as required. 501 */ 502 if (nmp->nm_readahead > 0) { 503 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 504 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 505 rabn = lbn + 1 + nra; 506 if (incore(&vp->v_bufobj, rabn) == NULL) { 507 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 508 if (!rabp) { 509 error = newnfs_sigintr(nmp, td); 510 return (error ? error : EINTR); 511 } 512 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 513 rabp->b_flags |= B_ASYNC; 514 rabp->b_iocmd = BIO_READ; 515 vfs_busy_pages(rabp, 0); 516 if (ncl_asyncio(nmp, rabp, cred, td)) { 517 rabp->b_flags |= B_INVAL; 518 rabp->b_ioflags |= BIO_ERROR; 519 vfs_unbusy_pages(rabp); 520 brelse(rabp); 521 break; 522 } 523 } else { 524 brelse(rabp); 525 } 526 } 527 } 528 } 529 530 /* Note that bcount is *not* DEV_BSIZE aligned. */ 531 bcount = biosize; 532 if ((off_t)lbn * biosize >= nsize) { 533 bcount = 0; 534 } else if ((off_t)(lbn + 1) * biosize > nsize) { 535 bcount = nsize - (off_t)lbn * biosize; 536 } 537 bp = nfs_getcacheblk(vp, lbn, bcount, td); 538 539 if (!bp) { 540 error = newnfs_sigintr(nmp, td); 541 return (error ? error : EINTR); 542 } 543 544 /* 545 * If B_CACHE is not set, we must issue the read. If this 546 * fails, we return an error. 547 */ 548 549 if ((bp->b_flags & B_CACHE) == 0) { 550 bp->b_iocmd = BIO_READ; 551 vfs_busy_pages(bp, 0); 552 error = ncl_doio(vp, bp, cred, td, 0); 553 if (error) { 554 brelse(bp); 555 return (error); 556 } 557 } 558 559 /* 560 * on is the offset into the current bp. Figure out how many 561 * bytes we can copy out of the bp. Note that bcount is 562 * NOT DEV_BSIZE aligned. 563 * 564 * Then figure out how many bytes we can copy into the uio. 565 */ 566 567 n = 0; 568 if (on < bcount) 569 n = min((unsigned)(bcount - on), uio->uio_resid); 570 break; 571 case VLNK: 572 NFSINCRGLOBAL(newnfsstats.biocache_readlinks); 573 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 574 if (!bp) { 575 error = newnfs_sigintr(nmp, td); 576 return (error ? error : EINTR); 577 } 578 if ((bp->b_flags & B_CACHE) == 0) { 579 bp->b_iocmd = BIO_READ; 580 vfs_busy_pages(bp, 0); 581 error = ncl_doio(vp, bp, cred, td, 0); 582 if (error) { 583 bp->b_ioflags |= BIO_ERROR; 584 brelse(bp); 585 return (error); 586 } 587 } 588 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 589 on = 0; 590 break; 591 case VDIR: 592 NFSINCRGLOBAL(newnfsstats.biocache_readdirs); 593 if (np->n_direofoffset 594 && uio->uio_offset >= np->n_direofoffset) { 595 return (0); 596 } 597 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 598 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 599 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 600 if (!bp) { 601 error = newnfs_sigintr(nmp, td); 602 return (error ? error : EINTR); 603 } 604 if ((bp->b_flags & B_CACHE) == 0) { 605 bp->b_iocmd = BIO_READ; 606 vfs_busy_pages(bp, 0); 607 error = ncl_doio(vp, bp, cred, td, 0); 608 if (error) { 609 brelse(bp); 610 } 611 while (error == NFSERR_BAD_COOKIE) { 612 ncl_invaldir(vp); 613 error = ncl_vinvalbuf(vp, 0, td, 1); 614 /* 615 * Yuck! The directory has been modified on the 616 * server. The only way to get the block is by 617 * reading from the beginning to get all the 618 * offset cookies. 619 * 620 * Leave the last bp intact unless there is an error. 621 * Loop back up to the while if the error is another 622 * NFSERR_BAD_COOKIE (double yuch!). 623 */ 624 for (i = 0; i <= lbn && !error; i++) { 625 if (np->n_direofoffset 626 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 627 return (0); 628 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 629 if (!bp) { 630 error = newnfs_sigintr(nmp, td); 631 return (error ? error : EINTR); 632 } 633 if ((bp->b_flags & B_CACHE) == 0) { 634 bp->b_iocmd = BIO_READ; 635 vfs_busy_pages(bp, 0); 636 error = ncl_doio(vp, bp, cred, td, 0); 637 /* 638 * no error + B_INVAL == directory EOF, 639 * use the block. 640 */ 641 if (error == 0 && (bp->b_flags & B_INVAL)) 642 break; 643 } 644 /* 645 * An error will throw away the block and the 646 * for loop will break out. If no error and this 647 * is not the block we want, we throw away the 648 * block and go for the next one via the for loop. 649 */ 650 if (error || i < lbn) 651 brelse(bp); 652 } 653 } 654 /* 655 * The above while is repeated if we hit another cookie 656 * error. If we hit an error and it wasn't a cookie error, 657 * we give up. 658 */ 659 if (error) 660 return (error); 661 } 662 663 /* 664 * If not eof and read aheads are enabled, start one. 665 * (You need the current block first, so that you have the 666 * directory offset cookie of the next block.) 667 */ 668 if (nmp->nm_readahead > 0 && 669 (bp->b_flags & B_INVAL) == 0 && 670 (np->n_direofoffset == 0 || 671 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 672 incore(&vp->v_bufobj, lbn + 1) == NULL) { 673 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 674 if (rabp) { 675 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 676 rabp->b_flags |= B_ASYNC; 677 rabp->b_iocmd = BIO_READ; 678 vfs_busy_pages(rabp, 0); 679 if (ncl_asyncio(nmp, rabp, cred, td)) { 680 rabp->b_flags |= B_INVAL; 681 rabp->b_ioflags |= BIO_ERROR; 682 vfs_unbusy_pages(rabp); 683 brelse(rabp); 684 } 685 } else { 686 brelse(rabp); 687 } 688 } 689 } 690 /* 691 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 692 * chopped for the EOF condition, we cannot tell how large 693 * NFS directories are going to be until we hit EOF. So 694 * an NFS directory buffer is *not* chopped to its EOF. Now, 695 * it just so happens that b_resid will effectively chop it 696 * to EOF. *BUT* this information is lost if the buffer goes 697 * away and is reconstituted into a B_CACHE state ( due to 698 * being VMIO ) later. So we keep track of the directory eof 699 * in np->n_direofoffset and chop it off as an extra step 700 * right here. 701 */ 702 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 703 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 704 n = np->n_direofoffset - uio->uio_offset; 705 break; 706 default: 707 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 708 bp = NULL; 709 break; 710 }; 711 712 if (n > 0) { 713 error = uiomove(bp->b_data + on, (int)n, uio); 714 } 715 if (vp->v_type == VLNK) 716 n = 0; 717 if (bp != NULL) 718 brelse(bp); 719 } while (error == 0 && uio->uio_resid > 0 && n > 0); 720 return (error); 721 } 722 723 /* 724 * The NFS write path cannot handle iovecs with len > 1. So we need to 725 * break up iovecs accordingly (restricting them to wsize). 726 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 727 * For the ASYNC case, 2 copies are needed. The first a copy from the 728 * user buffer to a staging buffer and then a second copy from the staging 729 * buffer to mbufs. This can be optimized by copying from the user buffer 730 * directly into mbufs and passing the chain down, but that requires a 731 * fair amount of re-working of the relevant codepaths (and can be done 732 * later). 733 */ 734 static int 735 nfs_directio_write(vp, uiop, cred, ioflag) 736 struct vnode *vp; 737 struct uio *uiop; 738 struct ucred *cred; 739 int ioflag; 740 { 741 int error; 742 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 743 struct thread *td = uiop->uio_td; 744 int size; 745 int wsize; 746 747 mtx_lock(&nmp->nm_mtx); 748 wsize = nmp->nm_wsize; 749 mtx_unlock(&nmp->nm_mtx); 750 if (ioflag & IO_SYNC) { 751 int iomode, must_commit; 752 struct uio uio; 753 struct iovec iov; 754 do_sync: 755 while (uiop->uio_resid > 0) { 756 size = min(uiop->uio_resid, wsize); 757 size = min(uiop->uio_iov->iov_len, size); 758 iov.iov_base = uiop->uio_iov->iov_base; 759 iov.iov_len = size; 760 uio.uio_iov = &iov; 761 uio.uio_iovcnt = 1; 762 uio.uio_offset = uiop->uio_offset; 763 uio.uio_resid = size; 764 uio.uio_segflg = UIO_USERSPACE; 765 uio.uio_rw = UIO_WRITE; 766 uio.uio_td = td; 767 iomode = NFSWRITE_FILESYNC; 768 error = ncl_writerpc(vp, &uio, cred, &iomode, 769 &must_commit, 0); 770 KASSERT((must_commit == 0), 771 ("ncl_directio_write: Did not commit write")); 772 if (error) 773 return (error); 774 uiop->uio_offset += size; 775 uiop->uio_resid -= size; 776 if (uiop->uio_iov->iov_len <= size) { 777 uiop->uio_iovcnt--; 778 uiop->uio_iov++; 779 } else { 780 uiop->uio_iov->iov_base = 781 (char *)uiop->uio_iov->iov_base + size; 782 uiop->uio_iov->iov_len -= size; 783 } 784 } 785 } else { 786 struct uio *t_uio; 787 struct iovec *t_iov; 788 struct buf *bp; 789 790 /* 791 * Break up the write into blocksize chunks and hand these 792 * over to nfsiod's for write back. 793 * Unfortunately, this incurs a copy of the data. Since 794 * the user could modify the buffer before the write is 795 * initiated. 796 * 797 * The obvious optimization here is that one of the 2 copies 798 * in the async write path can be eliminated by copying the 799 * data here directly into mbufs and passing the mbuf chain 800 * down. But that will require a fair amount of re-working 801 * of the code and can be done if there's enough interest 802 * in NFS directio access. 803 */ 804 while (uiop->uio_resid > 0) { 805 size = min(uiop->uio_resid, wsize); 806 size = min(uiop->uio_iov->iov_len, size); 807 bp = getpbuf(&ncl_pbuf_freecnt); 808 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 809 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 810 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 811 t_iov->iov_len = size; 812 t_uio->uio_iov = t_iov; 813 t_uio->uio_iovcnt = 1; 814 t_uio->uio_offset = uiop->uio_offset; 815 t_uio->uio_resid = size; 816 t_uio->uio_segflg = UIO_SYSSPACE; 817 t_uio->uio_rw = UIO_WRITE; 818 t_uio->uio_td = td; 819 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size); 820 bp->b_flags |= B_DIRECT; 821 bp->b_iocmd = BIO_WRITE; 822 if (cred != NOCRED) { 823 crhold(cred); 824 bp->b_wcred = cred; 825 } else 826 bp->b_wcred = NOCRED; 827 bp->b_caller1 = (void *)t_uio; 828 bp->b_vp = vp; 829 error = ncl_asyncio(nmp, bp, NOCRED, td); 830 if (error) { 831 free(t_iov->iov_base, M_NFSDIRECTIO); 832 free(t_iov, M_NFSDIRECTIO); 833 free(t_uio, M_NFSDIRECTIO); 834 bp->b_vp = NULL; 835 relpbuf(bp, &ncl_pbuf_freecnt); 836 if (error == EINTR) 837 return (error); 838 goto do_sync; 839 } 840 uiop->uio_offset += size; 841 uiop->uio_resid -= size; 842 if (uiop->uio_iov->iov_len <= size) { 843 uiop->uio_iovcnt--; 844 uiop->uio_iov++; 845 } else { 846 uiop->uio_iov->iov_base = 847 (char *)uiop->uio_iov->iov_base + size; 848 uiop->uio_iov->iov_len -= size; 849 } 850 } 851 } 852 return (0); 853 } 854 855 /* 856 * Vnode op for write using bio 857 */ 858 int 859 ncl_write(struct vop_write_args *ap) 860 { 861 int biosize; 862 struct uio *uio = ap->a_uio; 863 struct thread *td = uio->uio_td; 864 struct vnode *vp = ap->a_vp; 865 struct nfsnode *np = VTONFS(vp); 866 struct ucred *cred = ap->a_cred; 867 int ioflag = ap->a_ioflag; 868 struct buf *bp; 869 struct vattr vattr; 870 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 871 daddr_t lbn; 872 int bcount; 873 int n, on, error = 0; 874 off_t tmp_off; 875 876 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 877 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 878 ("ncl_write proc")); 879 if (vp->v_type != VREG) 880 return (EIO); 881 mtx_lock(&np->n_mtx); 882 if (np->n_flag & NWRITEERR) { 883 np->n_flag &= ~NWRITEERR; 884 mtx_unlock(&np->n_mtx); 885 return (np->n_error); 886 } else 887 mtx_unlock(&np->n_mtx); 888 mtx_lock(&nmp->nm_mtx); 889 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 890 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 891 mtx_unlock(&nmp->nm_mtx); 892 (void)ncl_fsinfo(nmp, vp, cred, td); 893 mtx_lock(&nmp->nm_mtx); 894 } 895 if (nmp->nm_wsize == 0) 896 (void) newnfs_iosize(nmp); 897 mtx_unlock(&nmp->nm_mtx); 898 899 /* 900 * Synchronously flush pending buffers if we are in synchronous 901 * mode or if we are appending. 902 */ 903 if (ioflag & (IO_APPEND | IO_SYNC)) { 904 mtx_lock(&np->n_mtx); 905 if (np->n_flag & NMODIFIED) { 906 mtx_unlock(&np->n_mtx); 907 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 908 /* 909 * Require non-blocking, synchronous writes to 910 * dirty files to inform the program it needs 911 * to fsync(2) explicitly. 912 */ 913 if (ioflag & IO_NDELAY) 914 return (EAGAIN); 915 #endif 916 flush_and_restart: 917 np->n_attrstamp = 0; 918 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 919 if (error) 920 return (error); 921 } else 922 mtx_unlock(&np->n_mtx); 923 } 924 925 /* 926 * If IO_APPEND then load uio_offset. We restart here if we cannot 927 * get the append lock. 928 */ 929 if (ioflag & IO_APPEND) { 930 np->n_attrstamp = 0; 931 error = VOP_GETATTR(vp, &vattr, cred); 932 if (error) 933 return (error); 934 mtx_lock(&np->n_mtx); 935 uio->uio_offset = np->n_size; 936 mtx_unlock(&np->n_mtx); 937 } 938 939 if (uio->uio_offset < 0) 940 return (EINVAL); 941 tmp_off = uio->uio_offset + uio->uio_resid; 942 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 943 return (EFBIG); 944 if (uio->uio_resid == 0) 945 return (0); 946 947 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 948 return nfs_directio_write(vp, uio, cred, ioflag); 949 950 /* 951 * Maybe this should be above the vnode op call, but so long as 952 * file servers have no limits, i don't think it matters 953 */ 954 if (vn_rlimit_fsize(vp, uio, td)) 955 return (EFBIG); 956 957 biosize = vp->v_mount->mnt_stat.f_iosize; 958 /* 959 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 960 * would exceed the local maximum per-file write commit size when 961 * combined with those, we must decide whether to flush, 962 * go synchronous, or return error. We don't bother checking 963 * IO_UNIT -- we just make all writes atomic anyway, as there's 964 * no point optimizing for something that really won't ever happen. 965 */ 966 if (!(ioflag & IO_SYNC)) { 967 int nflag; 968 969 mtx_lock(&np->n_mtx); 970 nflag = np->n_flag; 971 mtx_unlock(&np->n_mtx); 972 int needrestart = 0; 973 if (nmp->nm_wcommitsize < uio->uio_resid) { 974 /* 975 * If this request could not possibly be completed 976 * without exceeding the maximum outstanding write 977 * commit size, see if we can convert it into a 978 * synchronous write operation. 979 */ 980 if (ioflag & IO_NDELAY) 981 return (EAGAIN); 982 ioflag |= IO_SYNC; 983 if (nflag & NMODIFIED) 984 needrestart = 1; 985 } else if (nflag & NMODIFIED) { 986 int wouldcommit = 0; 987 BO_LOCK(&vp->v_bufobj); 988 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 989 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 990 b_bobufs) { 991 if (bp->b_flags & B_NEEDCOMMIT) 992 wouldcommit += bp->b_bcount; 993 } 994 } 995 BO_UNLOCK(&vp->v_bufobj); 996 /* 997 * Since we're not operating synchronously and 998 * bypassing the buffer cache, we are in a commit 999 * and holding all of these buffers whether 1000 * transmitted or not. If not limited, this 1001 * will lead to the buffer cache deadlocking, 1002 * as no one else can flush our uncommitted buffers. 1003 */ 1004 wouldcommit += uio->uio_resid; 1005 /* 1006 * If we would initially exceed the maximum 1007 * outstanding write commit size, flush and restart. 1008 */ 1009 if (wouldcommit > nmp->nm_wcommitsize) 1010 needrestart = 1; 1011 } 1012 if (needrestart) 1013 goto flush_and_restart; 1014 } 1015 1016 do { 1017 NFSINCRGLOBAL(newnfsstats.biocache_writes); 1018 lbn = uio->uio_offset / biosize; 1019 on = uio->uio_offset & (biosize-1); 1020 n = min((unsigned)(biosize - on), uio->uio_resid); 1021 again: 1022 /* 1023 * Handle direct append and file extension cases, calculate 1024 * unaligned buffer size. 1025 */ 1026 mtx_lock(&np->n_mtx); 1027 if (uio->uio_offset == np->n_size && n) { 1028 mtx_unlock(&np->n_mtx); 1029 /* 1030 * Get the buffer (in its pre-append state to maintain 1031 * B_CACHE if it was previously set). Resize the 1032 * nfsnode after we have locked the buffer to prevent 1033 * readers from reading garbage. 1034 */ 1035 bcount = on; 1036 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1037 1038 if (bp != NULL) { 1039 long save; 1040 1041 mtx_lock(&np->n_mtx); 1042 np->n_size = uio->uio_offset + n; 1043 np->n_flag |= NMODIFIED; 1044 vnode_pager_setsize(vp, np->n_size); 1045 mtx_unlock(&np->n_mtx); 1046 1047 save = bp->b_flags & B_CACHE; 1048 bcount += n; 1049 allocbuf(bp, bcount); 1050 bp->b_flags |= save; 1051 } 1052 } else { 1053 /* 1054 * Obtain the locked cache block first, and then 1055 * adjust the file's size as appropriate. 1056 */ 1057 bcount = on + n; 1058 if ((off_t)lbn * biosize + bcount < np->n_size) { 1059 if ((off_t)(lbn + 1) * biosize < np->n_size) 1060 bcount = biosize; 1061 else 1062 bcount = np->n_size - (off_t)lbn * biosize; 1063 } 1064 mtx_unlock(&np->n_mtx); 1065 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1066 mtx_lock(&np->n_mtx); 1067 if (uio->uio_offset + n > np->n_size) { 1068 np->n_size = uio->uio_offset + n; 1069 np->n_flag |= NMODIFIED; 1070 vnode_pager_setsize(vp, np->n_size); 1071 } 1072 mtx_unlock(&np->n_mtx); 1073 } 1074 1075 if (!bp) { 1076 error = newnfs_sigintr(nmp, td); 1077 if (!error) 1078 error = EINTR; 1079 break; 1080 } 1081 1082 /* 1083 * Issue a READ if B_CACHE is not set. In special-append 1084 * mode, B_CACHE is based on the buffer prior to the write 1085 * op and is typically set, avoiding the read. If a read 1086 * is required in special append mode, the server will 1087 * probably send us a short-read since we extended the file 1088 * on our end, resulting in b_resid == 0 and, thusly, 1089 * B_CACHE getting set. 1090 * 1091 * We can also avoid issuing the read if the write covers 1092 * the entire buffer. We have to make sure the buffer state 1093 * is reasonable in this case since we will not be initiating 1094 * I/O. See the comments in kern/vfs_bio.c's getblk() for 1095 * more information. 1096 * 1097 * B_CACHE may also be set due to the buffer being cached 1098 * normally. 1099 */ 1100 1101 if (on == 0 && n == bcount) { 1102 bp->b_flags |= B_CACHE; 1103 bp->b_flags &= ~B_INVAL; 1104 bp->b_ioflags &= ~BIO_ERROR; 1105 } 1106 1107 if ((bp->b_flags & B_CACHE) == 0) { 1108 bp->b_iocmd = BIO_READ; 1109 vfs_busy_pages(bp, 0); 1110 error = ncl_doio(vp, bp, cred, td, 0); 1111 if (error) { 1112 brelse(bp); 1113 break; 1114 } 1115 } 1116 if (bp->b_wcred == NOCRED) 1117 bp->b_wcred = crhold(cred); 1118 mtx_lock(&np->n_mtx); 1119 np->n_flag |= NMODIFIED; 1120 mtx_unlock(&np->n_mtx); 1121 1122 /* 1123 * If dirtyend exceeds file size, chop it down. This should 1124 * not normally occur but there is an append race where it 1125 * might occur XXX, so we log it. 1126 * 1127 * If the chopping creates a reverse-indexed or degenerate 1128 * situation with dirtyoff/end, we 0 both of them. 1129 */ 1130 1131 if (bp->b_dirtyend > bcount) { 1132 ncl_printf("NFS append race @%lx:%d\n", 1133 (long)bp->b_blkno * DEV_BSIZE, 1134 bp->b_dirtyend - bcount); 1135 bp->b_dirtyend = bcount; 1136 } 1137 1138 if (bp->b_dirtyoff >= bp->b_dirtyend) 1139 bp->b_dirtyoff = bp->b_dirtyend = 0; 1140 1141 /* 1142 * If the new write will leave a contiguous dirty 1143 * area, just update the b_dirtyoff and b_dirtyend, 1144 * otherwise force a write rpc of the old dirty area. 1145 * 1146 * While it is possible to merge discontiguous writes due to 1147 * our having a B_CACHE buffer ( and thus valid read data 1148 * for the hole), we don't because it could lead to 1149 * significant cache coherency problems with multiple clients, 1150 * especially if locking is implemented later on. 1151 * 1152 * as an optimization we could theoretically maintain 1153 * a linked list of discontinuous areas, but we would still 1154 * have to commit them separately so there isn't much 1155 * advantage to it except perhaps a bit of asynchronization. 1156 */ 1157 1158 if (bp->b_dirtyend > 0 && 1159 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1160 if (bwrite(bp) == EINTR) { 1161 error = EINTR; 1162 break; 1163 } 1164 goto again; 1165 } 1166 1167 error = uiomove((char *)bp->b_data + on, n, uio); 1168 1169 /* 1170 * Since this block is being modified, it must be written 1171 * again and not just committed. Since write clustering does 1172 * not work for the stage 1 data write, only the stage 2 1173 * commit rpc, we have to clear B_CLUSTEROK as well. 1174 */ 1175 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1176 1177 if (error) { 1178 bp->b_ioflags |= BIO_ERROR; 1179 brelse(bp); 1180 break; 1181 } 1182 1183 /* 1184 * Only update dirtyoff/dirtyend if not a degenerate 1185 * condition. 1186 */ 1187 if (n) { 1188 if (bp->b_dirtyend > 0) { 1189 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1190 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1191 } else { 1192 bp->b_dirtyoff = on; 1193 bp->b_dirtyend = on + n; 1194 } 1195 vfs_bio_set_valid(bp, on, n); 1196 } 1197 1198 /* 1199 * If IO_SYNC do bwrite(). 1200 * 1201 * IO_INVAL appears to be unused. The idea appears to be 1202 * to turn off caching in this case. Very odd. XXX 1203 */ 1204 if ((ioflag & IO_SYNC)) { 1205 if (ioflag & IO_INVAL) 1206 bp->b_flags |= B_NOCACHE; 1207 error = bwrite(bp); 1208 if (error) 1209 break; 1210 } else if ((n + on) == biosize) { 1211 bp->b_flags |= B_ASYNC; 1212 (void) ncl_writebp(bp, 0, NULL); 1213 } else { 1214 bdwrite(bp); 1215 } 1216 } while (uio->uio_resid > 0 && n > 0); 1217 1218 return (error); 1219 } 1220 1221 /* 1222 * Get an nfs cache block. 1223 * 1224 * Allocate a new one if the block isn't currently in the cache 1225 * and return the block marked busy. If the calling process is 1226 * interrupted by a signal for an interruptible mount point, return 1227 * NULL. 1228 * 1229 * The caller must carefully deal with the possible B_INVAL state of 1230 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 1231 * indirectly), so synchronous reads can be issued without worrying about 1232 * the B_INVAL state. We have to be a little more careful when dealing 1233 * with writes (see comments in nfs_write()) when extending a file past 1234 * its EOF. 1235 */ 1236 static struct buf * 1237 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1238 { 1239 struct buf *bp; 1240 struct mount *mp; 1241 struct nfsmount *nmp; 1242 1243 mp = vp->v_mount; 1244 nmp = VFSTONFS(mp); 1245 1246 if (nmp->nm_flag & NFSMNT_INT) { 1247 sigset_t oldset; 1248 1249 newnfs_set_sigmask(td, &oldset); 1250 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0); 1251 newnfs_restore_sigmask(td, &oldset); 1252 while (bp == NULL) { 1253 if (newnfs_sigintr(nmp, td)) 1254 return (NULL); 1255 bp = getblk(vp, bn, size, 0, 2 * hz, 0); 1256 } 1257 } else { 1258 bp = getblk(vp, bn, size, 0, 0, 0); 1259 } 1260 1261 if (vp->v_type == VREG) { 1262 int biosize; 1263 1264 biosize = mp->mnt_stat.f_iosize; 1265 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1266 } 1267 return (bp); 1268 } 1269 1270 /* 1271 * Flush and invalidate all dirty buffers. If another process is already 1272 * doing the flush, just wait for completion. 1273 */ 1274 int 1275 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 1276 { 1277 struct nfsnode *np = VTONFS(vp); 1278 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1279 int error = 0, slpflag, slptimeo; 1280 int old_lock = 0; 1281 1282 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 1283 1284 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1285 intrflg = 0; 1286 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 1287 intrflg = 1; 1288 if (intrflg) { 1289 slpflag = NFS_PCATCH; 1290 slptimeo = 2 * hz; 1291 } else { 1292 slpflag = 0; 1293 slptimeo = 0; 1294 } 1295 1296 old_lock = ncl_upgrade_vnlock(vp); 1297 if (vp->v_iflag & VI_DOOMED) { 1298 /* 1299 * Since vgonel() uses the generic vinvalbuf() to flush 1300 * dirty buffers and it does not call this function, it 1301 * is safe to just return OK when VI_DOOMED is set. 1302 */ 1303 ncl_downgrade_vnlock(vp, old_lock); 1304 return (0); 1305 } 1306 1307 /* 1308 * Now, flush as required. 1309 */ 1310 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 1311 VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 1312 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 1313 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 1314 /* 1315 * If the page clean was interrupted, fail the invalidation. 1316 * Not doing so, we run the risk of losing dirty pages in the 1317 * vinvalbuf() call below. 1318 */ 1319 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1320 goto out; 1321 } 1322 1323 error = vinvalbuf(vp, flags, slpflag, 0); 1324 while (error) { 1325 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1326 goto out; 1327 error = vinvalbuf(vp, flags, 0, slptimeo); 1328 } 1329 mtx_lock(&np->n_mtx); 1330 if (np->n_directio_asyncwr == 0) 1331 np->n_flag &= ~NMODIFIED; 1332 mtx_unlock(&np->n_mtx); 1333 out: 1334 ncl_downgrade_vnlock(vp, old_lock); 1335 return error; 1336 } 1337 1338 /* 1339 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1340 * This is mainly to avoid queueing async I/O requests when the nfsiods 1341 * are all hung on a dead server. 1342 * 1343 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1344 * is eventually dequeued by the async daemon, ncl_doio() *will*. 1345 */ 1346 int 1347 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 1348 { 1349 int iod; 1350 int gotiod; 1351 int slpflag = 0; 1352 int slptimeo = 0; 1353 int error, error2; 1354 1355 /* 1356 * Commits are usually short and sweet so lets save some cpu and 1357 * leave the async daemons for more important rpc's (such as reads 1358 * and writes). 1359 */ 1360 mtx_lock(&ncl_iod_mutex); 1361 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1362 (nmp->nm_bufqiods > ncl_numasync / 2)) { 1363 mtx_unlock(&ncl_iod_mutex); 1364 return(EIO); 1365 } 1366 again: 1367 if (nmp->nm_flag & NFSMNT_INT) 1368 slpflag = NFS_PCATCH; 1369 gotiod = FALSE; 1370 1371 /* 1372 * Find a free iod to process this request. 1373 */ 1374 for (iod = 0; iod < ncl_numasync; iod++) 1375 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 1376 gotiod = TRUE; 1377 break; 1378 } 1379 1380 /* 1381 * Try to create one if none are free. 1382 */ 1383 if (!gotiod) 1384 ncl_nfsiodnew(); 1385 else { 1386 /* 1387 * Found one, so wake it up and tell it which 1388 * mount to process. 1389 */ 1390 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 1391 iod, nmp)); 1392 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 1393 ncl_iodmount[iod] = nmp; 1394 nmp->nm_bufqiods++; 1395 wakeup(&ncl_iodwant[iod]); 1396 } 1397 1398 /* 1399 * If none are free, we may already have an iod working on this mount 1400 * point. If so, it will process our request. 1401 */ 1402 if (!gotiod) { 1403 if (nmp->nm_bufqiods > 0) { 1404 NFS_DPF(ASYNCIO, 1405 ("ncl_asyncio: %d iods are already processing mount %p\n", 1406 nmp->nm_bufqiods, nmp)); 1407 gotiod = TRUE; 1408 } 1409 } 1410 1411 /* 1412 * If we have an iod which can process the request, then queue 1413 * the buffer. 1414 */ 1415 if (gotiod) { 1416 /* 1417 * Ensure that the queue never grows too large. We still want 1418 * to asynchronize so we block rather then return EIO. 1419 */ 1420 while (nmp->nm_bufqlen >= 2*ncl_numasync) { 1421 NFS_DPF(ASYNCIO, 1422 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 1423 nmp->nm_bufqwant = TRUE; 1424 error = newnfs_msleep(td, &nmp->nm_bufq, 1425 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 1426 slptimeo); 1427 if (error) { 1428 error2 = newnfs_sigintr(nmp, td); 1429 if (error2) { 1430 mtx_unlock(&ncl_iod_mutex); 1431 return (error2); 1432 } 1433 if (slpflag == NFS_PCATCH) { 1434 slpflag = 0; 1435 slptimeo = 2 * hz; 1436 } 1437 } 1438 /* 1439 * We might have lost our iod while sleeping, 1440 * so check and loop if nescessary. 1441 */ 1442 goto again; 1443 } 1444 1445 /* We might have lost our nfsiod */ 1446 if (nmp->nm_bufqiods == 0) { 1447 NFS_DPF(ASYNCIO, 1448 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1449 goto again; 1450 } 1451 1452 if (bp->b_iocmd == BIO_READ) { 1453 if (bp->b_rcred == NOCRED && cred != NOCRED) 1454 bp->b_rcred = crhold(cred); 1455 } else { 1456 if (bp->b_wcred == NOCRED && cred != NOCRED) 1457 bp->b_wcred = crhold(cred); 1458 } 1459 1460 if (bp->b_flags & B_REMFREE) 1461 bremfreef(bp); 1462 BUF_KERNPROC(bp); 1463 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1464 nmp->nm_bufqlen++; 1465 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1466 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 1467 VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 1468 VTONFS(bp->b_vp)->n_directio_asyncwr++; 1469 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 1470 } 1471 mtx_unlock(&ncl_iod_mutex); 1472 return (0); 1473 } 1474 1475 mtx_unlock(&ncl_iod_mutex); 1476 1477 /* 1478 * All the iods are busy on other mounts, so return EIO to 1479 * force the caller to process the i/o synchronously. 1480 */ 1481 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 1482 return (EIO); 1483 } 1484 1485 void 1486 ncl_doio_directwrite(struct buf *bp) 1487 { 1488 int iomode, must_commit; 1489 struct uio *uiop = (struct uio *)bp->b_caller1; 1490 char *iov_base = uiop->uio_iov->iov_base; 1491 1492 iomode = NFSWRITE_FILESYNC; 1493 uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 1494 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 1495 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 1496 free(iov_base, M_NFSDIRECTIO); 1497 free(uiop->uio_iov, M_NFSDIRECTIO); 1498 free(uiop, M_NFSDIRECTIO); 1499 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1500 struct nfsnode *np = VTONFS(bp->b_vp); 1501 mtx_lock(&np->n_mtx); 1502 np->n_directio_asyncwr--; 1503 if (np->n_directio_asyncwr == 0) { 1504 np->n_flag &= ~NMODIFIED; 1505 if ((np->n_flag & NFSYNCWAIT)) { 1506 np->n_flag &= ~NFSYNCWAIT; 1507 wakeup((caddr_t)&np->n_directio_asyncwr); 1508 } 1509 } 1510 mtx_unlock(&np->n_mtx); 1511 } 1512 bp->b_vp = NULL; 1513 relpbuf(bp, &ncl_pbuf_freecnt); 1514 } 1515 1516 /* 1517 * Do an I/O operation to/from a cache block. This may be called 1518 * synchronously or from an nfsiod. 1519 */ 1520 int 1521 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 1522 int called_from_strategy) 1523 { 1524 struct uio *uiop; 1525 struct nfsnode *np; 1526 struct nfsmount *nmp; 1527 int error = 0, iomode, must_commit = 0; 1528 struct uio uio; 1529 struct iovec io; 1530 struct proc *p = td ? td->td_proc : NULL; 1531 uint8_t iocmd; 1532 1533 np = VTONFS(vp); 1534 nmp = VFSTONFS(vp->v_mount); 1535 uiop = &uio; 1536 uiop->uio_iov = &io; 1537 uiop->uio_iovcnt = 1; 1538 uiop->uio_segflg = UIO_SYSSPACE; 1539 uiop->uio_td = td; 1540 1541 /* 1542 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1543 * do this here so we do not have to do it in all the code that 1544 * calls us. 1545 */ 1546 bp->b_flags &= ~B_INVAL; 1547 bp->b_ioflags &= ~BIO_ERROR; 1548 1549 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 1550 iocmd = bp->b_iocmd; 1551 if (iocmd == BIO_READ) { 1552 io.iov_len = uiop->uio_resid = bp->b_bcount; 1553 io.iov_base = bp->b_data; 1554 uiop->uio_rw = UIO_READ; 1555 1556 switch (vp->v_type) { 1557 case VREG: 1558 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1559 NFSINCRGLOBAL(newnfsstats.read_bios); 1560 error = ncl_readrpc(vp, uiop, cr); 1561 1562 if (!error) { 1563 if (uiop->uio_resid) { 1564 /* 1565 * If we had a short read with no error, we must have 1566 * hit a file hole. We should zero-fill the remainder. 1567 * This can also occur if the server hits the file EOF. 1568 * 1569 * Holes used to be able to occur due to pending 1570 * writes, but that is not possible any longer. 1571 */ 1572 int nread = bp->b_bcount - uiop->uio_resid; 1573 int left = uiop->uio_resid; 1574 1575 if (left > 0) 1576 bzero((char *)bp->b_data + nread, left); 1577 uiop->uio_resid = 0; 1578 } 1579 } 1580 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 1581 if (p && (vp->v_vflag & VV_TEXT)) { 1582 mtx_lock(&np->n_mtx); 1583 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 1584 mtx_unlock(&np->n_mtx); 1585 PROC_LOCK(p); 1586 killproc(p, "text file modification"); 1587 PROC_UNLOCK(p); 1588 } else 1589 mtx_unlock(&np->n_mtx); 1590 } 1591 break; 1592 case VLNK: 1593 uiop->uio_offset = (off_t)0; 1594 NFSINCRGLOBAL(newnfsstats.readlink_bios); 1595 error = ncl_readlinkrpc(vp, uiop, cr); 1596 break; 1597 case VDIR: 1598 NFSINCRGLOBAL(newnfsstats.readdir_bios); 1599 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1600 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1601 error = ncl_readdirplusrpc(vp, uiop, cr, td); 1602 if (error == NFSERR_NOTSUPP) 1603 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1604 } 1605 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1606 error = ncl_readdirrpc(vp, uiop, cr, td); 1607 /* 1608 * end-of-directory sets B_INVAL but does not generate an 1609 * error. 1610 */ 1611 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1612 bp->b_flags |= B_INVAL; 1613 break; 1614 default: 1615 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); 1616 break; 1617 }; 1618 if (error) { 1619 bp->b_ioflags |= BIO_ERROR; 1620 bp->b_error = error; 1621 } 1622 } else { 1623 /* 1624 * If we only need to commit, try to commit 1625 */ 1626 if (bp->b_flags & B_NEEDCOMMIT) { 1627 int retv; 1628 off_t off; 1629 1630 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1631 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1632 bp->b_wcred, td); 1633 if (retv == 0) { 1634 bp->b_dirtyoff = bp->b_dirtyend = 0; 1635 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1636 bp->b_resid = 0; 1637 bufdone(bp); 1638 return (0); 1639 } 1640 if (retv == NFSERR_STALEWRITEVERF) { 1641 ncl_clearcommit(vp->v_mount); 1642 } 1643 } 1644 1645 /* 1646 * Setup for actual write 1647 */ 1648 mtx_lock(&np->n_mtx); 1649 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1650 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1651 mtx_unlock(&np->n_mtx); 1652 1653 if (bp->b_dirtyend > bp->b_dirtyoff) { 1654 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1655 - bp->b_dirtyoff; 1656 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1657 + bp->b_dirtyoff; 1658 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1659 uiop->uio_rw = UIO_WRITE; 1660 NFSINCRGLOBAL(newnfsstats.write_bios); 1661 1662 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1663 iomode = NFSWRITE_UNSTABLE; 1664 else 1665 iomode = NFSWRITE_FILESYNC; 1666 1667 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 1668 called_from_strategy); 1669 1670 /* 1671 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1672 * to cluster the buffers needing commit. This will allow 1673 * the system to submit a single commit rpc for the whole 1674 * cluster. We can do this even if the buffer is not 100% 1675 * dirty (relative to the NFS blocksize), so we optimize the 1676 * append-to-file-case. 1677 * 1678 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1679 * cleared because write clustering only works for commit 1680 * rpc's, not for the data portion of the write). 1681 */ 1682 1683 if (!error && iomode == NFSWRITE_UNSTABLE) { 1684 bp->b_flags |= B_NEEDCOMMIT; 1685 if (bp->b_dirtyoff == 0 1686 && bp->b_dirtyend == bp->b_bcount) 1687 bp->b_flags |= B_CLUSTEROK; 1688 } else { 1689 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1690 } 1691 1692 /* 1693 * For an interrupted write, the buffer is still valid 1694 * and the write hasn't been pushed to the server yet, 1695 * so we can't set BIO_ERROR and report the interruption 1696 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1697 * is not relevant, so the rpc attempt is essentially 1698 * a noop. For the case of a V3 write rpc not being 1699 * committed to stable storage, the block is still 1700 * dirty and requires either a commit rpc or another 1701 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1702 * the block is reused. This is indicated by setting 1703 * the B_DELWRI and B_NEEDCOMMIT flags. 1704 * 1705 * EIO is returned by ncl_writerpc() to indicate a recoverable 1706 * write error and is handled as above, except that 1707 * B_EINTR isn't set. One cause of this is a stale stateid 1708 * error for the RPC that indicates recovery is required, 1709 * when called with called_from_strategy != 0. 1710 * 1711 * If the buffer is marked B_PAGING, it does not reside on 1712 * the vp's paging queues so we cannot call bdirty(). The 1713 * bp in this case is not an NFS cache block so we should 1714 * be safe. XXX 1715 * 1716 * The logic below breaks up errors into recoverable and 1717 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 1718 * and keep the buffer around for potential write retries. 1719 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 1720 * and save the error in the nfsnode. This is less than ideal 1721 * but necessary. Keeping such buffers around could potentially 1722 * cause buffer exhaustion eventually (they can never be written 1723 * out, so will get constantly be re-dirtied). It also causes 1724 * all sorts of vfs panics. For non-recoverable write errors, 1725 * also invalidate the attrcache, so we'll be forced to go over 1726 * the wire for this object, returning an error to user on next 1727 * call (most of the time). 1728 */ 1729 if (error == EINTR || error == EIO || error == ETIMEDOUT 1730 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1731 int s; 1732 1733 s = splbio(); 1734 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1735 if ((bp->b_flags & B_PAGING) == 0) { 1736 bdirty(bp); 1737 bp->b_flags &= ~B_DONE; 1738 } 1739 if ((error == EINTR || error == ETIMEDOUT) && 1740 (bp->b_flags & B_ASYNC) == 0) 1741 bp->b_flags |= B_EINTR; 1742 splx(s); 1743 } else { 1744 if (error) { 1745 bp->b_ioflags |= BIO_ERROR; 1746 bp->b_flags |= B_INVAL; 1747 bp->b_error = np->n_error = error; 1748 mtx_lock(&np->n_mtx); 1749 np->n_flag |= NWRITEERR; 1750 np->n_attrstamp = 0; 1751 mtx_unlock(&np->n_mtx); 1752 } 1753 bp->b_dirtyoff = bp->b_dirtyend = 0; 1754 } 1755 } else { 1756 bp->b_resid = 0; 1757 bufdone(bp); 1758 return (0); 1759 } 1760 } 1761 bp->b_resid = uiop->uio_resid; 1762 if (must_commit) 1763 ncl_clearcommit(vp->v_mount); 1764 bufdone(bp); 1765 return (error); 1766 } 1767 1768 /* 1769 * Used to aid in handling ftruncate() operations on the NFS client side. 1770 * Truncation creates a number of special problems for NFS. We have to 1771 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1772 * we have to properly handle VM pages or (potentially dirty) buffers 1773 * that straddle the truncation point. 1774 */ 1775 1776 int 1777 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1778 { 1779 struct nfsnode *np = VTONFS(vp); 1780 u_quad_t tsize; 1781 int biosize = vp->v_mount->mnt_stat.f_iosize; 1782 int error = 0; 1783 1784 mtx_lock(&np->n_mtx); 1785 tsize = np->n_size; 1786 np->n_size = nsize; 1787 mtx_unlock(&np->n_mtx); 1788 1789 if (nsize < tsize) { 1790 struct buf *bp; 1791 daddr_t lbn; 1792 int bufsize; 1793 1794 /* 1795 * vtruncbuf() doesn't get the buffer overlapping the 1796 * truncation point. We may have a B_DELWRI and/or B_CACHE 1797 * buffer that now needs to be truncated. 1798 */ 1799 error = vtruncbuf(vp, cred, td, nsize, biosize); 1800 lbn = nsize / biosize; 1801 bufsize = nsize & (biosize - 1); 1802 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1803 if (!bp) 1804 return EINTR; 1805 if (bp->b_dirtyoff > bp->b_bcount) 1806 bp->b_dirtyoff = bp->b_bcount; 1807 if (bp->b_dirtyend > bp->b_bcount) 1808 bp->b_dirtyend = bp->b_bcount; 1809 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1810 brelse(bp); 1811 } else { 1812 vnode_pager_setsize(vp, nsize); 1813 } 1814 return(error); 1815 } 1816 1817