19ec7b004SRick Macklem /*- 29ec7b004SRick Macklem * Copyright (c) 1989, 1993 39ec7b004SRick Macklem * The Regents of the University of California. All rights reserved. 49ec7b004SRick Macklem * 59ec7b004SRick Macklem * This code is derived from software contributed to Berkeley by 69ec7b004SRick Macklem * Rick Macklem at The University of Guelph. 79ec7b004SRick Macklem * 89ec7b004SRick Macklem * Redistribution and use in source and binary forms, with or without 99ec7b004SRick Macklem * modification, are permitted provided that the following conditions 109ec7b004SRick Macklem * are met: 119ec7b004SRick Macklem * 1. Redistributions of source code must retain the above copyright 129ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer. 139ec7b004SRick Macklem * 2. Redistributions in binary form must reproduce the above copyright 149ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer in the 159ec7b004SRick Macklem * documentation and/or other materials provided with the distribution. 16fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 179ec7b004SRick Macklem * may be used to endorse or promote products derived from this software 189ec7b004SRick Macklem * without specific prior written permission. 199ec7b004SRick Macklem * 209ec7b004SRick Macklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 219ec7b004SRick Macklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 229ec7b004SRick Macklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 239ec7b004SRick Macklem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 249ec7b004SRick Macklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 259ec7b004SRick Macklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 269ec7b004SRick Macklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 279ec7b004SRick Macklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 289ec7b004SRick Macklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 299ec7b004SRick Macklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 309ec7b004SRick Macklem * SUCH DAMAGE. 319ec7b004SRick Macklem * 329ec7b004SRick Macklem * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 339ec7b004SRick Macklem */ 349ec7b004SRick Macklem 359ec7b004SRick Macklem #include <sys/cdefs.h> 369ec7b004SRick Macklem __FBSDID("$FreeBSD$"); 379ec7b004SRick Macklem 389ec7b004SRick Macklem #include <sys/param.h> 399ec7b004SRick Macklem #include <sys/systm.h> 409ec7b004SRick Macklem #include <sys/bio.h> 419ec7b004SRick Macklem #include <sys/buf.h> 429ec7b004SRick Macklem #include <sys/kernel.h> 439ec7b004SRick Macklem #include <sys/mount.h> 4489f6b863SAttilio Rao #include <sys/rwlock.h> 459ec7b004SRick Macklem #include <sys/vmmeter.h> 469ec7b004SRick Macklem #include <sys/vnode.h> 479ec7b004SRick Macklem 489ec7b004SRick Macklem #include <vm/vm.h> 491c771f92SKonstantin Belousov #include <vm/vm_param.h> 509ec7b004SRick Macklem #include <vm/vm_extern.h> 519ec7b004SRick Macklem #include <vm/vm_page.h> 529ec7b004SRick Macklem #include <vm/vm_object.h> 539ec7b004SRick Macklem #include <vm/vm_pager.h> 549ec7b004SRick Macklem #include <vm/vnode_pager.h> 559ec7b004SRick Macklem 569ec7b004SRick Macklem #include <fs/nfs/nfsport.h> 579ec7b004SRick Macklem #include <fs/nfsclient/nfsmount.h> 589ec7b004SRick Macklem #include <fs/nfsclient/nfs.h> 599ec7b004SRick Macklem #include <fs/nfsclient/nfsnode.h> 608f0e65c9SRick Macklem #include <fs/nfsclient/nfs_kdtrace.h> 619ec7b004SRick Macklem 629ec7b004SRick Macklem extern int newnfs_directio_allow_mmap; 631b819cf2SRick Macklem extern struct nfsstatsv1 nfsstatsv1; 649ec7b004SRick Macklem extern struct mtx ncl_iod_mutex; 659ec7b004SRick Macklem extern int ncl_numasync; 667b8c319bSRick Macklem extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 677b8c319bSRick Macklem extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 689ec7b004SRick Macklem extern int newnfs_directio_enable; 69a53373faSKonstantin Belousov extern int nfs_keep_dirty_on_error; 709ec7b004SRick Macklem 719ec7b004SRick Macklem int ncl_pbuf_freecnt = -1; /* start out unlimited */ 729ec7b004SRick Macklem 739ec7b004SRick Macklem static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 749ec7b004SRick Macklem struct thread *td); 759ec7b004SRick Macklem static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 769ec7b004SRick Macklem struct ucred *cred, int ioflag); 779ec7b004SRick Macklem 789ec7b004SRick Macklem /* 799ec7b004SRick Macklem * Vnode op for VM getpages. 809ec7b004SRick Macklem */ 81753a007fSKonstantin Belousov SYSCTL_DECL(_vfs_nfs); 82753a007fSKonstantin Belousov static int use_buf_pager = 1; 83753a007fSKonstantin Belousov SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, 84753a007fSKonstantin Belousov &use_buf_pager, 0, 85753a007fSKonstantin Belousov "Use buffer pager instead of direct readrpc call"); 86753a007fSKonstantin Belousov 87753a007fSKonstantin Belousov static daddr_t 88753a007fSKonstantin Belousov ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) 89753a007fSKonstantin Belousov { 90753a007fSKonstantin Belousov 91753a007fSKonstantin Belousov return (off / vp->v_bufobj.bo_bsize); 92753a007fSKonstantin Belousov } 93753a007fSKonstantin Belousov 94753a007fSKonstantin Belousov static int 95753a007fSKonstantin Belousov ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn) 96753a007fSKonstantin Belousov { 97753a007fSKonstantin Belousov struct nfsnode *np; 98753a007fSKonstantin Belousov u_quad_t nsize; 99753a007fSKonstantin Belousov int biosize, bcount; 100753a007fSKonstantin Belousov 101753a007fSKonstantin Belousov np = VTONFS(vp); 102753a007fSKonstantin Belousov mtx_lock(&np->n_mtx); 103753a007fSKonstantin Belousov nsize = np->n_size; 104753a007fSKonstantin Belousov mtx_unlock(&np->n_mtx); 105753a007fSKonstantin Belousov 106753a007fSKonstantin Belousov biosize = vp->v_bufobj.bo_bsize; 107753a007fSKonstantin Belousov bcount = biosize; 108753a007fSKonstantin Belousov if ((off_t)lbn * biosize >= nsize) 109753a007fSKonstantin Belousov bcount = 0; 110753a007fSKonstantin Belousov else if ((off_t)(lbn + 1) * biosize > nsize) 111753a007fSKonstantin Belousov bcount = nsize - (off_t)lbn * biosize; 112753a007fSKonstantin Belousov return (bcount); 113753a007fSKonstantin Belousov } 114753a007fSKonstantin Belousov 1159ec7b004SRick Macklem int 1169ec7b004SRick Macklem ncl_getpages(struct vop_getpages_args *ap) 1179ec7b004SRick Macklem { 1189ec7b004SRick Macklem int i, error, nextoff, size, toff, count, npages; 1199ec7b004SRick Macklem struct uio uio; 1209ec7b004SRick Macklem struct iovec iov; 1219ec7b004SRick Macklem vm_offset_t kva; 1229ec7b004SRick Macklem struct buf *bp; 1239ec7b004SRick Macklem struct vnode *vp; 1249ec7b004SRick Macklem struct thread *td; 1259ec7b004SRick Macklem struct ucred *cred; 1269ec7b004SRick Macklem struct nfsmount *nmp; 1279ec7b004SRick Macklem vm_object_t object; 1289ec7b004SRick Macklem vm_page_t *pages; 1299ec7b004SRick Macklem struct nfsnode *np; 1309ec7b004SRick Macklem 1319ec7b004SRick Macklem vp = ap->a_vp; 1329ec7b004SRick Macklem np = VTONFS(vp); 133fc2c3afeSKonstantin Belousov td = curthread; 134fc2c3afeSKonstantin Belousov cred = curthread->td_ucred; 1359ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 1369ec7b004SRick Macklem pages = ap->a_m; 137f17f88d3SGleb Smirnoff npages = ap->a_count; 1389ec7b004SRick Macklem 1399ec7b004SRick Macklem if ((object = vp->v_object) == NULL) { 140ad600ac8SKonstantin Belousov printf("ncl_getpages: called with non-merged cache vnode\n"); 14157a7e732SAlan Cox return (VM_PAGER_ERROR); 1429ec7b004SRick Macklem } 1439ec7b004SRick Macklem 1449ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 1459ec7b004SRick Macklem mtx_lock(&np->n_mtx); 1469ec7b004SRick Macklem if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 1479ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 148ad600ac8SKonstantin Belousov printf("ncl_getpages: called on non-cacheable vnode\n"); 14957a7e732SAlan Cox return (VM_PAGER_ERROR); 1509ec7b004SRick Macklem } else 1519ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 1529ec7b004SRick Macklem } 1539ec7b004SRick Macklem 1549ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 1559ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 1569ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 1579ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1589ec7b004SRick Macklem /* We'll never get here for v4, because we always have fsinfo */ 1599ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 1609ec7b004SRick Macklem } else 1619ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1629ec7b004SRick Macklem 163753a007fSKonstantin Belousov if (use_buf_pager) 164753a007fSKonstantin Belousov return (vfs_bio_getpages(vp, pages, npages, ap->a_rbehind, 165753a007fSKonstantin Belousov ap->a_rahead, ncl_gbp_getblkno, ncl_gbp_getblksz)); 166753a007fSKonstantin Belousov 1679ec7b004SRick Macklem /* 1689ec7b004SRick Macklem * If the requested page is partially valid, just return it and 1699ec7b004SRick Macklem * allow the pager to zero-out the blanks. Partially valid pages 1709ec7b004SRick Macklem * can only occur at the file EOF. 171b0cd2017SGleb Smirnoff * 172b0cd2017SGleb Smirnoff * XXXGL: is that true for NFS, where short read can occur??? 1739ec7b004SRick Macklem */ 174b0cd2017SGleb Smirnoff VM_OBJECT_WLOCK(object); 175f17f88d3SGleb Smirnoff if (pages[npages - 1]->valid != 0 && --npages == 0) 176f17f88d3SGleb Smirnoff goto out; 177b0cd2017SGleb Smirnoff VM_OBJECT_WUNLOCK(object); 1789ec7b004SRick Macklem 1799ec7b004SRick Macklem /* 1809ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 181a96c9b30SPedro F. Giffuni * convenient and fast. 1829ec7b004SRick Macklem */ 1839ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 1849ec7b004SRick Macklem 1859ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 1869ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 18783c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 18883c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, npages); 1899ec7b004SRick Macklem 190f17f88d3SGleb Smirnoff count = npages << PAGE_SHIFT; 1919ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 1929ec7b004SRick Macklem iov.iov_len = count; 1939ec7b004SRick Macklem uio.uio_iov = &iov; 1949ec7b004SRick Macklem uio.uio_iovcnt = 1; 1959ec7b004SRick Macklem uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 1969ec7b004SRick Macklem uio.uio_resid = count; 1979ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 1989ec7b004SRick Macklem uio.uio_rw = UIO_READ; 1999ec7b004SRick Macklem uio.uio_td = td; 2009ec7b004SRick Macklem 2019ec7b004SRick Macklem error = ncl_readrpc(vp, &uio, cred); 2029ec7b004SRick Macklem pmap_qremove(kva, npages); 2039ec7b004SRick Macklem 2049ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 2059ec7b004SRick Macklem 2069ec7b004SRick Macklem if (error && (uio.uio_resid == count)) { 207ad600ac8SKonstantin Belousov printf("ncl_getpages: error %d\n", error); 20857a7e732SAlan Cox return (VM_PAGER_ERROR); 2099ec7b004SRick Macklem } 2109ec7b004SRick Macklem 2119ec7b004SRick Macklem /* 2129ec7b004SRick Macklem * Calculate the number of bytes read and validate only that number 2139ec7b004SRick Macklem * of bytes. Note that due to pending writes, size may be 0. This 2149ec7b004SRick Macklem * does not mean that the remaining data is invalid! 2159ec7b004SRick Macklem */ 2169ec7b004SRick Macklem 2179ec7b004SRick Macklem size = count - uio.uio_resid; 21889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 2199ec7b004SRick Macklem for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 2209ec7b004SRick Macklem vm_page_t m; 2219ec7b004SRick Macklem nextoff = toff + PAGE_SIZE; 2229ec7b004SRick Macklem m = pages[i]; 2239ec7b004SRick Macklem 2249ec7b004SRick Macklem if (nextoff <= size) { 2259ec7b004SRick Macklem /* 2269ec7b004SRick Macklem * Read operation filled an entire page 2279ec7b004SRick Macklem */ 2289ec7b004SRick Macklem m->valid = VM_PAGE_BITS_ALL; 229a770e183SRick Macklem KASSERT(m->dirty == 0, 230a770e183SRick Macklem ("nfs_getpages: page %p is dirty", m)); 2319ec7b004SRick Macklem } else if (size > toff) { 2329ec7b004SRick Macklem /* 2339ec7b004SRick Macklem * Read operation filled a partial page. 2349ec7b004SRick Macklem */ 2359ec7b004SRick Macklem m->valid = 0; 236dc874f98SKonstantin Belousov vm_page_set_valid_range(m, 0, size - toff); 2373933ec4dSAlan Cox KASSERT(m->dirty == 0, 23872d1bbbaSRick Macklem ("nfs_getpages: page %p is dirty", m)); 2399ec7b004SRick Macklem } else { 2409ec7b004SRick Macklem /* 241b6c00483SKonstantin Belousov * Read operation was short. If no error 242a96c9b30SPedro F. Giffuni * occurred we may have hit a zero-fill 243b6c00483SKonstantin Belousov * section. We leave valid set to 0, and page 244b6c00483SKonstantin Belousov * is freed by vm_page_readahead_finish() if 245b6c00483SKonstantin Belousov * its index is not equal to requested, or 246b6c00483SKonstantin Belousov * page is zeroed and set valid by 247b6c00483SKonstantin Belousov * vm_pager_get_pages() for requested page. 2489ec7b004SRick Macklem */ 2499ec7b004SRick Macklem ; 2509ec7b004SRick Macklem } 25103679e23SAlan Cox } 252f17f88d3SGleb Smirnoff out: 25389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 254f17f88d3SGleb Smirnoff if (ap->a_rbehind) 255f17f88d3SGleb Smirnoff *ap->a_rbehind = 0; 256f17f88d3SGleb Smirnoff if (ap->a_rahead) 257f17f88d3SGleb Smirnoff *ap->a_rahead = 0; 258f17f88d3SGleb Smirnoff return (VM_PAGER_OK); 2599ec7b004SRick Macklem } 2609ec7b004SRick Macklem 2619ec7b004SRick Macklem /* 2629ec7b004SRick Macklem * Vnode op for VM putpages. 2639ec7b004SRick Macklem */ 2649ec7b004SRick Macklem int 2659ec7b004SRick Macklem ncl_putpages(struct vop_putpages_args *ap) 2669ec7b004SRick Macklem { 2679ec7b004SRick Macklem struct uio uio; 2689ec7b004SRick Macklem struct iovec iov; 269ea525259SKonstantin Belousov int i, error, npages, count; 2709ec7b004SRick Macklem off_t offset; 2719ec7b004SRick Macklem int *rtvals; 2729ec7b004SRick Macklem struct vnode *vp; 2739ec7b004SRick Macklem struct thread *td; 2749ec7b004SRick Macklem struct ucred *cred; 2759ec7b004SRick Macklem struct nfsmount *nmp; 2769ec7b004SRick Macklem struct nfsnode *np; 2779ec7b004SRick Macklem vm_page_t *pages; 2789ec7b004SRick Macklem 2799ec7b004SRick Macklem vp = ap->a_vp; 2809ec7b004SRick Macklem np = VTONFS(vp); 2819ec7b004SRick Macklem td = curthread; /* XXX */ 2827af1242aSRick Macklem /* Set the cred to n_writecred for the write rpcs. */ 2837af1242aSRick Macklem if (np->n_writecred != NULL) 2847af1242aSRick Macklem cred = crhold(np->n_writecred); 2857af1242aSRick Macklem else 2867af1242aSRick Macklem cred = crhold(curthread->td_ucred); /* XXX */ 2879ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 2889ec7b004SRick Macklem pages = ap->a_m; 2899ec7b004SRick Macklem count = ap->a_count; 2909ec7b004SRick Macklem rtvals = ap->a_rtvals; 2919ec7b004SRick Macklem npages = btoc(count); 2929ec7b004SRick Macklem offset = IDX_TO_OFF(pages[0]->pindex); 2939ec7b004SRick Macklem 2949ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 2959ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 2969ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 2979ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 2989ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 2999ec7b004SRick Macklem } else 3009ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 3019ec7b004SRick Macklem 3029ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3039ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 3049ec7b004SRick Macklem (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 3059ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 306ad600ac8SKonstantin Belousov printf("ncl_putpages: called on noncache-able vnode\n"); 3079ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3089ec7b004SRick Macklem } 3099ec7b004SRick Macklem /* 3109ec7b004SRick Macklem * When putting pages, do not extend file past EOF. 3119ec7b004SRick Macklem */ 3129ec7b004SRick Macklem if (offset + count > np->n_size) { 3139ec7b004SRick Macklem count = np->n_size - offset; 3149ec7b004SRick Macklem if (count < 0) 3159ec7b004SRick Macklem count = 0; 3169ec7b004SRick Macklem } 3179ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3189ec7b004SRick Macklem 319cc2c2622SKonstantin Belousov for (i = 0; i < npages; i++) 320cc2c2622SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 321cc2c2622SKonstantin Belousov 32283c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodeout); 32383c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsout, count); 3249ec7b004SRick Macklem 325ea525259SKonstantin Belousov iov.iov_base = unmapped_buf; 3269ec7b004SRick Macklem iov.iov_len = count; 3279ec7b004SRick Macklem uio.uio_iov = &iov; 3289ec7b004SRick Macklem uio.uio_iovcnt = 1; 3299ec7b004SRick Macklem uio.uio_offset = offset; 3309ec7b004SRick Macklem uio.uio_resid = count; 331ea525259SKonstantin Belousov uio.uio_segflg = UIO_NOCOPY; 3329ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 3339ec7b004SRick Macklem uio.uio_td = td; 3349ec7b004SRick Macklem 335ea525259SKonstantin Belousov error = VOP_WRITE(vp, &uio, vnode_pager_putpages_ioflags(ap->a_sync), 336ea525259SKonstantin Belousov cred); 3377af1242aSRick Macklem crfree(cred); 3389ec7b004SRick Macklem 339*555b7bb4SKonstantin Belousov if (error == 0 || !nfs_keep_dirty_on_error) { 340*555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid, 341*555b7bb4SKonstantin Belousov np->n_size - offset, npages * PAGE_SIZE); 342*555b7bb4SKonstantin Belousov } 343ea525259SKonstantin Belousov return (rtvals[0]); 3449ec7b004SRick Macklem } 3459ec7b004SRick Macklem 3469ec7b004SRick Macklem /* 3479ec7b004SRick Macklem * For nfs, cache consistency can only be maintained approximately. 3489ec7b004SRick Macklem * Although RFC1094 does not specify the criteria, the following is 3499ec7b004SRick Macklem * believed to be compatible with the reference port. 3509ec7b004SRick Macklem * For nfs: 3519ec7b004SRick Macklem * If the file's modify time on the server has changed since the 3529ec7b004SRick Macklem * last read rpc or you have written to the file, 3539ec7b004SRick Macklem * you may have lost data cache consistency with the 3549ec7b004SRick Macklem * server, so flush all of the file's data out of the cache. 3559ec7b004SRick Macklem * Then force a getattr rpc to ensure that you have up to date 3569ec7b004SRick Macklem * attributes. 3579ec7b004SRick Macklem * NB: This implies that cache data can be read when up to 3589ec7b004SRick Macklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current 3599ec7b004SRick Macklem * attributes this could be forced by setting n_attrstamp to 0 before 3609ec7b004SRick Macklem * the VOP_GETATTR() call. 3619ec7b004SRick Macklem */ 3629ec7b004SRick Macklem static inline int 3639ec7b004SRick Macklem nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 3649ec7b004SRick Macklem { 3659ec7b004SRick Macklem int error = 0; 3669ec7b004SRick Macklem struct vattr vattr; 3679ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 3689ec7b004SRick Macklem int old_lock; 3699ec7b004SRick Macklem 3709ec7b004SRick Macklem /* 3719ec7b004SRick Macklem * Grab the exclusive lock before checking whether the cache is 3729ec7b004SRick Macklem * consistent. 3739ec7b004SRick Macklem * XXX - We can make this cheaper later (by acquiring cheaper locks). 3749ec7b004SRick Macklem * But for now, this suffices. 3759ec7b004SRick Macklem */ 3769ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 377934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 37848fe9263SKonstantin Belousov error = EBADF; 37948fe9263SKonstantin Belousov goto out; 380934a3099SRick Macklem } 381934a3099SRick Macklem 3829ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3839ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 3849ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3859ec7b004SRick Macklem if (vp->v_type != VREG) { 3869ec7b004SRick Macklem if (vp->v_type != VDIR) 3879ec7b004SRick Macklem panic("nfs: bioread, not dir"); 3889ec7b004SRick Macklem ncl_invaldir(vp); 3899ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 39048fe9263SKonstantin Belousov if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 39148fe9263SKonstantin Belousov error = EBADF; 39248fe9263SKonstantin Belousov if (error != 0) 3939ec7b004SRick Macklem goto out; 3949ec7b004SRick Macklem } 3959ec7b004SRick Macklem np->n_attrstamp = 0; 3968f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3979ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 3989ec7b004SRick Macklem if (error) 3999ec7b004SRick Macklem goto out; 4009ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4019ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4029ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4039ec7b004SRick Macklem } else { 4049ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4059ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 4069ec7b004SRick Macklem if (error) 4079ec7b004SRick Macklem return (error); 4089ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4099ec7b004SRick Macklem if ((np->n_flag & NSIZECHANGED) 4109ec7b004SRick Macklem || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 4119ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4129ec7b004SRick Macklem if (vp->v_type == VDIR) 4139ec7b004SRick Macklem ncl_invaldir(vp); 4149ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 41548fe9263SKonstantin Belousov if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 41648fe9263SKonstantin Belousov error = EBADF; 41748fe9263SKonstantin Belousov if (error != 0) 4189ec7b004SRick Macklem goto out; 4199ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4209ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4219ec7b004SRick Macklem np->n_flag &= ~NSIZECHANGED; 4229ec7b004SRick Macklem } 4239ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4249ec7b004SRick Macklem } 4259ec7b004SRick Macklem out: 4269ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 42748fe9263SKonstantin Belousov return (error); 4289ec7b004SRick Macklem } 4299ec7b004SRick Macklem 4309ec7b004SRick Macklem /* 4319ec7b004SRick Macklem * Vnode op for read using bio 4329ec7b004SRick Macklem */ 4339ec7b004SRick Macklem int 4349ec7b004SRick Macklem ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 4359ec7b004SRick Macklem { 4369ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 4379ec7b004SRick Macklem int biosize, i; 4389ec7b004SRick Macklem struct buf *bp, *rabp; 4399ec7b004SRick Macklem struct thread *td; 4409ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 4419ec7b004SRick Macklem daddr_t lbn, rabn; 4429ec7b004SRick Macklem int bcount; 4439ec7b004SRick Macklem int seqcount; 4449ec7b004SRick Macklem int nra, error = 0, n = 0, on = 0; 445b29b9bcbSRick Macklem off_t tmp_off; 4469ec7b004SRick Macklem 447b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 4489ec7b004SRick Macklem if (uio->uio_resid == 0) 4499ec7b004SRick Macklem return (0); 4509ec7b004SRick Macklem if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 4519ec7b004SRick Macklem return (EINVAL); 4529ec7b004SRick Macklem td = uio->uio_td; 4539ec7b004SRick Macklem 4549ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4559ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 4569ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 4579ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 4589ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 4599ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4609ec7b004SRick Macklem } 4619ec7b004SRick Macklem if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 4629ec7b004SRick Macklem (void) newnfs_iosize(nmp); 4639ec7b004SRick Macklem 464b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 4659ec7b004SRick Macklem if (vp->v_type != VDIR && 466b29b9bcbSRick Macklem (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 467b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4689ec7b004SRick Macklem return (EFBIG); 469b29b9bcbSRick Macklem } 470b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4719ec7b004SRick Macklem 4729ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 4739ec7b004SRick Macklem /* No caching/ no readaheads. Just read data into the user buffer */ 4749ec7b004SRick Macklem return ncl_readrpc(vp, uio, cred); 4759ec7b004SRick Macklem 4767f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 4779ec7b004SRick Macklem seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 4789ec7b004SRick Macklem 4799ec7b004SRick Macklem error = nfs_bioread_check_cons(vp, td, cred); 4809ec7b004SRick Macklem if (error) 4819ec7b004SRick Macklem return error; 4829ec7b004SRick Macklem 4839ec7b004SRick Macklem do { 4849ec7b004SRick Macklem u_quad_t nsize; 4859ec7b004SRick Macklem 4869ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4879ec7b004SRick Macklem nsize = np->n_size; 4889ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4899ec7b004SRick Macklem 4909ec7b004SRick Macklem switch (vp->v_type) { 4919ec7b004SRick Macklem case VREG: 4921b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_reads); 4939ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 49496ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 4959ec7b004SRick Macklem 4969ec7b004SRick Macklem /* 4979ec7b004SRick Macklem * Start the read ahead(s), as required. 4989ec7b004SRick Macklem */ 4999ec7b004SRick Macklem if (nmp->nm_readahead > 0) { 5009ec7b004SRick Macklem for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 5019ec7b004SRick Macklem (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 5029ec7b004SRick Macklem rabn = lbn + 1 + nra; 5039ec7b004SRick Macklem if (incore(&vp->v_bufobj, rabn) == NULL) { 5049ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, rabn, biosize, td); 5059ec7b004SRick Macklem if (!rabp) { 5069ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 507848fd2c0SRick Macklem return (error ? error : EINTR); 5089ec7b004SRick Macklem } 5099ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 5109ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 5119ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 5129ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 5139ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 5149ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 5159ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 5169ec7b004SRick Macklem vfs_unbusy_pages(rabp); 5179ec7b004SRick Macklem brelse(rabp); 5189ec7b004SRick Macklem break; 5199ec7b004SRick Macklem } 5209ec7b004SRick Macklem } else { 5219ec7b004SRick Macklem brelse(rabp); 5229ec7b004SRick Macklem } 5239ec7b004SRick Macklem } 5249ec7b004SRick Macklem } 5259ec7b004SRick Macklem } 5269ec7b004SRick Macklem 5279ec7b004SRick Macklem /* Note that bcount is *not* DEV_BSIZE aligned. */ 5289ec7b004SRick Macklem bcount = biosize; 5299ec7b004SRick Macklem if ((off_t)lbn * biosize >= nsize) { 5309ec7b004SRick Macklem bcount = 0; 5319ec7b004SRick Macklem } else if ((off_t)(lbn + 1) * biosize > nsize) { 5329ec7b004SRick Macklem bcount = nsize - (off_t)lbn * biosize; 5339ec7b004SRick Macklem } 5349ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 5359ec7b004SRick Macklem 5369ec7b004SRick Macklem if (!bp) { 5379ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5389ec7b004SRick Macklem return (error ? error : EINTR); 5399ec7b004SRick Macklem } 5409ec7b004SRick Macklem 5419ec7b004SRick Macklem /* 5429ec7b004SRick Macklem * If B_CACHE is not set, we must issue the read. If this 5439ec7b004SRick Macklem * fails, we return an error. 5449ec7b004SRick Macklem */ 5459ec7b004SRick Macklem 5469ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5479ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5489ec7b004SRick Macklem vfs_busy_pages(bp, 0); 54967c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5509ec7b004SRick Macklem if (error) { 5519ec7b004SRick Macklem brelse(bp); 5529ec7b004SRick Macklem return (error); 5539ec7b004SRick Macklem } 5549ec7b004SRick Macklem } 5559ec7b004SRick Macklem 5569ec7b004SRick Macklem /* 5579ec7b004SRick Macklem * on is the offset into the current bp. Figure out how many 5589ec7b004SRick Macklem * bytes we can copy out of the bp. Note that bcount is 5599ec7b004SRick Macklem * NOT DEV_BSIZE aligned. 5609ec7b004SRick Macklem * 5619ec7b004SRick Macklem * Then figure out how many bytes we can copy into the uio. 5629ec7b004SRick Macklem */ 5639ec7b004SRick Macklem 5649ec7b004SRick Macklem n = 0; 5659ec7b004SRick Macklem if (on < bcount) 566526d0bd5SKonstantin Belousov n = MIN((unsigned)(bcount - on), uio->uio_resid); 5679ec7b004SRick Macklem break; 5689ec7b004SRick Macklem case VLNK: 5691b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks); 5709ec7b004SRick Macklem bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 5719ec7b004SRick Macklem if (!bp) { 5729ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5739ec7b004SRick Macklem return (error ? error : EINTR); 5749ec7b004SRick Macklem } 5759ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5769ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5779ec7b004SRick Macklem vfs_busy_pages(bp, 0); 57867c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5799ec7b004SRick Macklem if (error) { 5809ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 5819ec7b004SRick Macklem brelse(bp); 5829ec7b004SRick Macklem return (error); 5839ec7b004SRick Macklem } 5849ec7b004SRick Macklem } 585526d0bd5SKonstantin Belousov n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 5869ec7b004SRick Macklem on = 0; 5879ec7b004SRick Macklem break; 5889ec7b004SRick Macklem case VDIR: 5891b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs); 5909ec7b004SRick Macklem if (np->n_direofoffset 5919ec7b004SRick Macklem && uio->uio_offset >= np->n_direofoffset) { 5929ec7b004SRick Macklem return (0); 5939ec7b004SRick Macklem } 5949ec7b004SRick Macklem lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 5959ec7b004SRick Macklem on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 5969ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 5979ec7b004SRick Macklem if (!bp) { 5989ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5999ec7b004SRick Macklem return (error ? error : EINTR); 6009ec7b004SRick Macklem } 6019ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6029ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6039ec7b004SRick Macklem vfs_busy_pages(bp, 0); 60467c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6059ec7b004SRick Macklem if (error) { 6069ec7b004SRick Macklem brelse(bp); 6079ec7b004SRick Macklem } 6089ec7b004SRick Macklem while (error == NFSERR_BAD_COOKIE) { 6099ec7b004SRick Macklem ncl_invaldir(vp); 6109ec7b004SRick Macklem error = ncl_vinvalbuf(vp, 0, td, 1); 61148fe9263SKonstantin Belousov if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 61248fe9263SKonstantin Belousov return (EBADF); 61348fe9263SKonstantin Belousov 6149ec7b004SRick Macklem /* 6159ec7b004SRick Macklem * Yuck! The directory has been modified on the 6169ec7b004SRick Macklem * server. The only way to get the block is by 6179ec7b004SRick Macklem * reading from the beginning to get all the 6189ec7b004SRick Macklem * offset cookies. 6199ec7b004SRick Macklem * 6209ec7b004SRick Macklem * Leave the last bp intact unless there is an error. 6219ec7b004SRick Macklem * Loop back up to the while if the error is another 6229ec7b004SRick Macklem * NFSERR_BAD_COOKIE (double yuch!). 6239ec7b004SRick Macklem */ 6249ec7b004SRick Macklem for (i = 0; i <= lbn && !error; i++) { 6259ec7b004SRick Macklem if (np->n_direofoffset 6269ec7b004SRick Macklem && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 6279ec7b004SRick Macklem return (0); 6289ec7b004SRick Macklem bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 6299ec7b004SRick Macklem if (!bp) { 6309ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6319ec7b004SRick Macklem return (error ? error : EINTR); 6329ec7b004SRick Macklem } 6339ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6349ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6359ec7b004SRick Macklem vfs_busy_pages(bp, 0); 63667c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6379ec7b004SRick Macklem /* 6389ec7b004SRick Macklem * no error + B_INVAL == directory EOF, 6399ec7b004SRick Macklem * use the block. 6409ec7b004SRick Macklem */ 6419ec7b004SRick Macklem if (error == 0 && (bp->b_flags & B_INVAL)) 6429ec7b004SRick Macklem break; 6439ec7b004SRick Macklem } 6449ec7b004SRick Macklem /* 6459ec7b004SRick Macklem * An error will throw away the block and the 6469ec7b004SRick Macklem * for loop will break out. If no error and this 6479ec7b004SRick Macklem * is not the block we want, we throw away the 6489ec7b004SRick Macklem * block and go for the next one via the for loop. 6499ec7b004SRick Macklem */ 6509ec7b004SRick Macklem if (error || i < lbn) 6519ec7b004SRick Macklem brelse(bp); 6529ec7b004SRick Macklem } 6539ec7b004SRick Macklem } 6549ec7b004SRick Macklem /* 6559ec7b004SRick Macklem * The above while is repeated if we hit another cookie 6569ec7b004SRick Macklem * error. If we hit an error and it wasn't a cookie error, 6579ec7b004SRick Macklem * we give up. 6589ec7b004SRick Macklem */ 6599ec7b004SRick Macklem if (error) 6609ec7b004SRick Macklem return (error); 6619ec7b004SRick Macklem } 6629ec7b004SRick Macklem 6639ec7b004SRick Macklem /* 6649ec7b004SRick Macklem * If not eof and read aheads are enabled, start one. 6659ec7b004SRick Macklem * (You need the current block first, so that you have the 6669ec7b004SRick Macklem * directory offset cookie of the next block.) 6679ec7b004SRick Macklem */ 6689ec7b004SRick Macklem if (nmp->nm_readahead > 0 && 6699ec7b004SRick Macklem (bp->b_flags & B_INVAL) == 0 && 6709ec7b004SRick Macklem (np->n_direofoffset == 0 || 6719ec7b004SRick Macklem (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 6729ec7b004SRick Macklem incore(&vp->v_bufobj, lbn + 1) == NULL) { 6739ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 6749ec7b004SRick Macklem if (rabp) { 6759ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 6769ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 6779ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 6789ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 6799ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 6809ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 6819ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 6829ec7b004SRick Macklem vfs_unbusy_pages(rabp); 6839ec7b004SRick Macklem brelse(rabp); 6849ec7b004SRick Macklem } 6859ec7b004SRick Macklem } else { 6869ec7b004SRick Macklem brelse(rabp); 6879ec7b004SRick Macklem } 6889ec7b004SRick Macklem } 6899ec7b004SRick Macklem } 6909ec7b004SRick Macklem /* 6919ec7b004SRick Macklem * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 6929ec7b004SRick Macklem * chopped for the EOF condition, we cannot tell how large 6939ec7b004SRick Macklem * NFS directories are going to be until we hit EOF. So 6949ec7b004SRick Macklem * an NFS directory buffer is *not* chopped to its EOF. Now, 6959ec7b004SRick Macklem * it just so happens that b_resid will effectively chop it 6969ec7b004SRick Macklem * to EOF. *BUT* this information is lost if the buffer goes 6979ec7b004SRick Macklem * away and is reconstituted into a B_CACHE state ( due to 6989ec7b004SRick Macklem * being VMIO ) later. So we keep track of the directory eof 6999ec7b004SRick Macklem * in np->n_direofoffset and chop it off as an extra step 7009ec7b004SRick Macklem * right here. 7019ec7b004SRick Macklem */ 7029ec7b004SRick Macklem n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 7039ec7b004SRick Macklem if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 7049ec7b004SRick Macklem n = np->n_direofoffset - uio->uio_offset; 7059ec7b004SRick Macklem break; 7069ec7b004SRick Macklem default: 707ad600ac8SKonstantin Belousov printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 7089ec7b004SRick Macklem bp = NULL; 7099ec7b004SRick Macklem break; 71074b8d63dSPedro F. Giffuni } 7119ec7b004SRick Macklem 7129ec7b004SRick Macklem if (n > 0) { 713ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); 7149ec7b004SRick Macklem } 7159ec7b004SRick Macklem if (vp->v_type == VLNK) 7169ec7b004SRick Macklem n = 0; 7179ec7b004SRick Macklem if (bp != NULL) 7189ec7b004SRick Macklem brelse(bp); 7199ec7b004SRick Macklem } while (error == 0 && uio->uio_resid > 0 && n > 0); 7209ec7b004SRick Macklem return (error); 7219ec7b004SRick Macklem } 7229ec7b004SRick Macklem 7239ec7b004SRick Macklem /* 7249ec7b004SRick Macklem * The NFS write path cannot handle iovecs with len > 1. So we need to 7259ec7b004SRick Macklem * break up iovecs accordingly (restricting them to wsize). 7269ec7b004SRick Macklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 7279ec7b004SRick Macklem * For the ASYNC case, 2 copies are needed. The first a copy from the 7289ec7b004SRick Macklem * user buffer to a staging buffer and then a second copy from the staging 7299ec7b004SRick Macklem * buffer to mbufs. This can be optimized by copying from the user buffer 7309ec7b004SRick Macklem * directly into mbufs and passing the chain down, but that requires a 7319ec7b004SRick Macklem * fair amount of re-working of the relevant codepaths (and can be done 7329ec7b004SRick Macklem * later). 7339ec7b004SRick Macklem */ 7349ec7b004SRick Macklem static int 7359ec7b004SRick Macklem nfs_directio_write(vp, uiop, cred, ioflag) 7369ec7b004SRick Macklem struct vnode *vp; 7379ec7b004SRick Macklem struct uio *uiop; 7389ec7b004SRick Macklem struct ucred *cred; 7399ec7b004SRick Macklem int ioflag; 7409ec7b004SRick Macklem { 7419ec7b004SRick Macklem int error; 7429ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 7439ec7b004SRick Macklem struct thread *td = uiop->uio_td; 7449ec7b004SRick Macklem int size; 7459ec7b004SRick Macklem int wsize; 7469ec7b004SRick Macklem 7479ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 7489ec7b004SRick Macklem wsize = nmp->nm_wsize; 7499ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 7509ec7b004SRick Macklem if (ioflag & IO_SYNC) { 7519ec7b004SRick Macklem int iomode, must_commit; 7529ec7b004SRick Macklem struct uio uio; 7539ec7b004SRick Macklem struct iovec iov; 7549ec7b004SRick Macklem do_sync: 7559ec7b004SRick Macklem while (uiop->uio_resid > 0) { 756526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 757526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 7589ec7b004SRick Macklem iov.iov_base = uiop->uio_iov->iov_base; 7599ec7b004SRick Macklem iov.iov_len = size; 7609ec7b004SRick Macklem uio.uio_iov = &iov; 7619ec7b004SRick Macklem uio.uio_iovcnt = 1; 7629ec7b004SRick Macklem uio.uio_offset = uiop->uio_offset; 7639ec7b004SRick Macklem uio.uio_resid = size; 7649ec7b004SRick Macklem uio.uio_segflg = UIO_USERSPACE; 7659ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 7669ec7b004SRick Macklem uio.uio_td = td; 7679ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 7689ec7b004SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, 76967c5c2d2SRick Macklem &must_commit, 0); 7709ec7b004SRick Macklem KASSERT((must_commit == 0), 7719ec7b004SRick Macklem ("ncl_directio_write: Did not commit write")); 7729ec7b004SRick Macklem if (error) 7739ec7b004SRick Macklem return (error); 7749ec7b004SRick Macklem uiop->uio_offset += size; 7759ec7b004SRick Macklem uiop->uio_resid -= size; 7769ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 7779ec7b004SRick Macklem uiop->uio_iovcnt--; 7789ec7b004SRick Macklem uiop->uio_iov++; 7799ec7b004SRick Macklem } else { 7809ec7b004SRick Macklem uiop->uio_iov->iov_base = 7819ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 7829ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 7839ec7b004SRick Macklem } 7849ec7b004SRick Macklem } 7859ec7b004SRick Macklem } else { 7869ec7b004SRick Macklem struct uio *t_uio; 7879ec7b004SRick Macklem struct iovec *t_iov; 7889ec7b004SRick Macklem struct buf *bp; 7899ec7b004SRick Macklem 7909ec7b004SRick Macklem /* 7919ec7b004SRick Macklem * Break up the write into blocksize chunks and hand these 7929ec7b004SRick Macklem * over to nfsiod's for write back. 7939ec7b004SRick Macklem * Unfortunately, this incurs a copy of the data. Since 7949ec7b004SRick Macklem * the user could modify the buffer before the write is 7959ec7b004SRick Macklem * initiated. 7969ec7b004SRick Macklem * 7979ec7b004SRick Macklem * The obvious optimization here is that one of the 2 copies 7989ec7b004SRick Macklem * in the async write path can be eliminated by copying the 7999ec7b004SRick Macklem * data here directly into mbufs and passing the mbuf chain 8009ec7b004SRick Macklem * down. But that will require a fair amount of re-working 8019ec7b004SRick Macklem * of the code and can be done if there's enough interest 8029ec7b004SRick Macklem * in NFS directio access. 8039ec7b004SRick Macklem */ 8049ec7b004SRick Macklem while (uiop->uio_resid > 0) { 805526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 806526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 8079ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 8089ec7b004SRick Macklem t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 8099ec7b004SRick Macklem t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 8109ec7b004SRick Macklem t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 8119ec7b004SRick Macklem t_iov->iov_len = size; 8129ec7b004SRick Macklem t_uio->uio_iov = t_iov; 8139ec7b004SRick Macklem t_uio->uio_iovcnt = 1; 8149ec7b004SRick Macklem t_uio->uio_offset = uiop->uio_offset; 8159ec7b004SRick Macklem t_uio->uio_resid = size; 8169ec7b004SRick Macklem t_uio->uio_segflg = UIO_SYSSPACE; 8179ec7b004SRick Macklem t_uio->uio_rw = UIO_WRITE; 8189ec7b004SRick Macklem t_uio->uio_td = td; 8194cf7d128SRick Macklem KASSERT(uiop->uio_segflg == UIO_USERSPACE || 8204cf7d128SRick Macklem uiop->uio_segflg == UIO_SYSSPACE, 8214cf7d128SRick Macklem ("nfs_directio_write: Bad uio_segflg")); 8224cf7d128SRick Macklem if (uiop->uio_segflg == UIO_USERSPACE) { 8234cf7d128SRick Macklem error = copyin(uiop->uio_iov->iov_base, 8244cf7d128SRick Macklem t_iov->iov_base, size); 8254cf7d128SRick Macklem if (error != 0) 8264cf7d128SRick Macklem goto err_free; 8274cf7d128SRick Macklem } else 8284cf7d128SRick Macklem /* 8294cf7d128SRick Macklem * UIO_SYSSPACE may never happen, but handle 8304cf7d128SRick Macklem * it just in case it does. 8314cf7d128SRick Macklem */ 8324cf7d128SRick Macklem bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, 8334cf7d128SRick Macklem size); 8349ec7b004SRick Macklem bp->b_flags |= B_DIRECT; 8359ec7b004SRick Macklem bp->b_iocmd = BIO_WRITE; 8369ec7b004SRick Macklem if (cred != NOCRED) { 8379ec7b004SRick Macklem crhold(cred); 8389ec7b004SRick Macklem bp->b_wcred = cred; 8399ec7b004SRick Macklem } else 8409ec7b004SRick Macklem bp->b_wcred = NOCRED; 8419ec7b004SRick Macklem bp->b_caller1 = (void *)t_uio; 8429ec7b004SRick Macklem bp->b_vp = vp; 8439ec7b004SRick Macklem error = ncl_asyncio(nmp, bp, NOCRED, td); 8444cf7d128SRick Macklem err_free: 8459ec7b004SRick Macklem if (error) { 8469ec7b004SRick Macklem free(t_iov->iov_base, M_NFSDIRECTIO); 8479ec7b004SRick Macklem free(t_iov, M_NFSDIRECTIO); 8489ec7b004SRick Macklem free(t_uio, M_NFSDIRECTIO); 8499ec7b004SRick Macklem bp->b_vp = NULL; 8509ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 8519ec7b004SRick Macklem if (error == EINTR) 8529ec7b004SRick Macklem return (error); 8539ec7b004SRick Macklem goto do_sync; 8549ec7b004SRick Macklem } 8559ec7b004SRick Macklem uiop->uio_offset += size; 8569ec7b004SRick Macklem uiop->uio_resid -= size; 8579ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 8589ec7b004SRick Macklem uiop->uio_iovcnt--; 8599ec7b004SRick Macklem uiop->uio_iov++; 8609ec7b004SRick Macklem } else { 8619ec7b004SRick Macklem uiop->uio_iov->iov_base = 8629ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 8639ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 8649ec7b004SRick Macklem } 8659ec7b004SRick Macklem } 8669ec7b004SRick Macklem } 8679ec7b004SRick Macklem return (0); 8689ec7b004SRick Macklem } 8699ec7b004SRick Macklem 8709ec7b004SRick Macklem /* 8719ec7b004SRick Macklem * Vnode op for write using bio 8729ec7b004SRick Macklem */ 8739ec7b004SRick Macklem int 8749ec7b004SRick Macklem ncl_write(struct vop_write_args *ap) 8759ec7b004SRick Macklem { 8769ec7b004SRick Macklem int biosize; 8779ec7b004SRick Macklem struct uio *uio = ap->a_uio; 8789ec7b004SRick Macklem struct thread *td = uio->uio_td; 8799ec7b004SRick Macklem struct vnode *vp = ap->a_vp; 8809ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 8819ec7b004SRick Macklem struct ucred *cred = ap->a_cred; 8829ec7b004SRick Macklem int ioflag = ap->a_ioflag; 8839ec7b004SRick Macklem struct buf *bp; 8849ec7b004SRick Macklem struct vattr vattr; 8859ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 8869ec7b004SRick Macklem daddr_t lbn; 887cf766161SRick Macklem int bcount, noncontig_write, obcount; 888a87627b2SAlexander Motin int bp_cached, n, on, error = 0, error1, wouldcommit; 889bfb68a9eSKonstantin Belousov size_t orig_resid, local_resid; 890bfb68a9eSKonstantin Belousov off_t orig_size, tmp_off; 8919ec7b004SRick Macklem 892b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 893b38f7723SKonstantin Belousov KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 894b38f7723SKonstantin Belousov ("ncl_write proc")); 8959ec7b004SRick Macklem if (vp->v_type != VREG) 8969ec7b004SRick Macklem return (EIO); 8979ec7b004SRick Macklem mtx_lock(&np->n_mtx); 8989ec7b004SRick Macklem if (np->n_flag & NWRITEERR) { 8999ec7b004SRick Macklem np->n_flag &= ~NWRITEERR; 9009ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9019ec7b004SRick Macklem return (np->n_error); 9029ec7b004SRick Macklem } else 9039ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9049ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 9059ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 9069ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 9079ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 9089ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 9099ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 9109ec7b004SRick Macklem } 9119ec7b004SRick Macklem if (nmp->nm_wsize == 0) 9129ec7b004SRick Macklem (void) newnfs_iosize(nmp); 9139ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 9149ec7b004SRick Macklem 9159ec7b004SRick Macklem /* 9169ec7b004SRick Macklem * Synchronously flush pending buffers if we are in synchronous 9179ec7b004SRick Macklem * mode or if we are appending. 9189ec7b004SRick Macklem */ 9199ec7b004SRick Macklem if (ioflag & (IO_APPEND | IO_SYNC)) { 9209ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9219ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 9229ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9239ec7b004SRick Macklem #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 9249ec7b004SRick Macklem /* 9259ec7b004SRick Macklem * Require non-blocking, synchronous writes to 9269ec7b004SRick Macklem * dirty files to inform the program it needs 9279ec7b004SRick Macklem * to fsync(2) explicitly. 9289ec7b004SRick Macklem */ 9299ec7b004SRick Macklem if (ioflag & IO_NDELAY) 9309ec7b004SRick Macklem return (EAGAIN); 9319ec7b004SRick Macklem #endif 9329ec7b004SRick Macklem np->n_attrstamp = 0; 9338f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 93448fe9263SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag & 93548fe9263SKonstantin Belousov IO_VMIO) != 0 ? V_VMIO : 0), td, 1); 93648fe9263SKonstantin Belousov if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 93748fe9263SKonstantin Belousov error = EBADF; 93848fe9263SKonstantin Belousov if (error != 0) 9399ec7b004SRick Macklem return (error); 9409ec7b004SRick Macklem } else 9419ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9429ec7b004SRick Macklem } 9439ec7b004SRick Macklem 944bfb68a9eSKonstantin Belousov orig_resid = uio->uio_resid; 945bfb68a9eSKonstantin Belousov mtx_lock(&np->n_mtx); 946bfb68a9eSKonstantin Belousov orig_size = np->n_size; 947bfb68a9eSKonstantin Belousov mtx_unlock(&np->n_mtx); 948bfb68a9eSKonstantin Belousov 9499ec7b004SRick Macklem /* 9509ec7b004SRick Macklem * If IO_APPEND then load uio_offset. We restart here if we cannot 9519ec7b004SRick Macklem * get the append lock. 9529ec7b004SRick Macklem */ 9539ec7b004SRick Macklem if (ioflag & IO_APPEND) { 9549ec7b004SRick Macklem np->n_attrstamp = 0; 9558f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 9569ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 9579ec7b004SRick Macklem if (error) 9589ec7b004SRick Macklem return (error); 9599ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9609ec7b004SRick Macklem uio->uio_offset = np->n_size; 9619ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9629ec7b004SRick Macklem } 9639ec7b004SRick Macklem 9649ec7b004SRick Macklem if (uio->uio_offset < 0) 9659ec7b004SRick Macklem return (EINVAL); 966b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 96724e2bcc0SRick Macklem if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 9689ec7b004SRick Macklem return (EFBIG); 9699ec7b004SRick Macklem if (uio->uio_resid == 0) 9709ec7b004SRick Macklem return (0); 9719ec7b004SRick Macklem 9729ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 9739ec7b004SRick Macklem return nfs_directio_write(vp, uio, cred, ioflag); 9749ec7b004SRick Macklem 9759ec7b004SRick Macklem /* 9769ec7b004SRick Macklem * Maybe this should be above the vnode op call, but so long as 9779ec7b004SRick Macklem * file servers have no limits, i don't think it matters 9789ec7b004SRick Macklem */ 979b5f770bdSEdward Tomasz Napierala if (vn_rlimit_fsize(vp, uio, td)) 9809ec7b004SRick Macklem return (EFBIG); 9819ec7b004SRick Macklem 9827f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 9839ec7b004SRick Macklem /* 9849ec7b004SRick Macklem * Find all of this file's B_NEEDCOMMIT buffers. If our writes 9859ec7b004SRick Macklem * would exceed the local maximum per-file write commit size when 9869ec7b004SRick Macklem * combined with those, we must decide whether to flush, 9879ec7b004SRick Macklem * go synchronous, or return error. We don't bother checking 9889ec7b004SRick Macklem * IO_UNIT -- we just make all writes atomic anyway, as there's 9899ec7b004SRick Macklem * no point optimizing for something that really won't ever happen. 9909ec7b004SRick Macklem */ 991a87627b2SAlexander Motin wouldcommit = 0; 9929ec7b004SRick Macklem if (!(ioflag & IO_SYNC)) { 9939ec7b004SRick Macklem int nflag; 9949ec7b004SRick Macklem 9959ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9969ec7b004SRick Macklem nflag = np->n_flag; 9979ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 998a87627b2SAlexander Motin if (nflag & NMODIFIED) { 9999ec7b004SRick Macklem BO_LOCK(&vp->v_bufobj); 10009ec7b004SRick Macklem if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 10019ec7b004SRick Macklem TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 10029ec7b004SRick Macklem b_bobufs) { 10039ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) 10049ec7b004SRick Macklem wouldcommit += bp->b_bcount; 10059ec7b004SRick Macklem } 10069ec7b004SRick Macklem } 10079ec7b004SRick Macklem BO_UNLOCK(&vp->v_bufobj); 10089ec7b004SRick Macklem } 10099ec7b004SRick Macklem } 10109ec7b004SRick Macklem 10119ec7b004SRick Macklem do { 1012a87627b2SAlexander Motin if (!(ioflag & IO_SYNC)) { 1013a87627b2SAlexander Motin wouldcommit += biosize; 1014a87627b2SAlexander Motin if (wouldcommit > nmp->nm_wcommitsize) { 1015a87627b2SAlexander Motin np->n_attrstamp = 0; 1016a87627b2SAlexander Motin KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 101748fe9263SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag & 101848fe9263SKonstantin Belousov IO_VMIO) != 0 ? V_VMIO : 0), td, 1); 101948fe9263SKonstantin Belousov if (error == 0 && 102048fe9263SKonstantin Belousov (vp->v_iflag & VI_DOOMED) != 0) 102148fe9263SKonstantin Belousov error = EBADF; 102248fe9263SKonstantin Belousov if (error != 0) 1023a87627b2SAlexander Motin return (error); 1024a87627b2SAlexander Motin wouldcommit = biosize; 1025a87627b2SAlexander Motin } 1026a87627b2SAlexander Motin } 1027a87627b2SAlexander Motin 10281b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_writes); 10299ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 103096ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 1031526d0bd5SKonstantin Belousov n = MIN((unsigned)(biosize - on), uio->uio_resid); 10329ec7b004SRick Macklem again: 10339ec7b004SRick Macklem /* 10349ec7b004SRick Macklem * Handle direct append and file extension cases, calculate 10359ec7b004SRick Macklem * unaligned buffer size. 10369ec7b004SRick Macklem */ 10379ec7b004SRick Macklem mtx_lock(&np->n_mtx); 1038cf766161SRick Macklem if ((np->n_flag & NHASBEENLOCKED) == 0 && 1039cf766161SRick Macklem (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0) 1040cf766161SRick Macklem noncontig_write = 1; 1041cf766161SRick Macklem else 1042cf766161SRick Macklem noncontig_write = 0; 1043cf766161SRick Macklem if ((uio->uio_offset == np->n_size || 1044cf766161SRick Macklem (noncontig_write != 0 && 1045cf766161SRick Macklem lbn == (np->n_size / biosize) && 1046cf766161SRick Macklem uio->uio_offset + n > np->n_size)) && n) { 10479ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10489ec7b004SRick Macklem /* 10499ec7b004SRick Macklem * Get the buffer (in its pre-append state to maintain 10509ec7b004SRick Macklem * B_CACHE if it was previously set). Resize the 10519ec7b004SRick Macklem * nfsnode after we have locked the buffer to prevent 10529ec7b004SRick Macklem * readers from reading garbage. 10539ec7b004SRick Macklem */ 1054cf766161SRick Macklem obcount = np->n_size - (lbn * biosize); 1055cf766161SRick Macklem bp = nfs_getcacheblk(vp, lbn, obcount, td); 10569ec7b004SRick Macklem 10579ec7b004SRick Macklem if (bp != NULL) { 10589ec7b004SRick Macklem long save; 10599ec7b004SRick Macklem 10609ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10619ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10629ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10639ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10649ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10659ec7b004SRick Macklem 10669ec7b004SRick Macklem save = bp->b_flags & B_CACHE; 1067cf766161SRick Macklem bcount = on + n; 10689ec7b004SRick Macklem allocbuf(bp, bcount); 10699ec7b004SRick Macklem bp->b_flags |= save; 1070cf766161SRick Macklem if (noncontig_write != 0 && on > obcount) 1071cf766161SRick Macklem vfs_bio_bzero_buf(bp, obcount, on - 1072cf766161SRick Macklem obcount); 10739ec7b004SRick Macklem } 10749ec7b004SRick Macklem } else { 10759ec7b004SRick Macklem /* 10769ec7b004SRick Macklem * Obtain the locked cache block first, and then 10779ec7b004SRick Macklem * adjust the file's size as appropriate. 10789ec7b004SRick Macklem */ 10799ec7b004SRick Macklem bcount = on + n; 10809ec7b004SRick Macklem if ((off_t)lbn * biosize + bcount < np->n_size) { 10819ec7b004SRick Macklem if ((off_t)(lbn + 1) * biosize < np->n_size) 10829ec7b004SRick Macklem bcount = biosize; 10839ec7b004SRick Macklem else 10849ec7b004SRick Macklem bcount = np->n_size - (off_t)lbn * biosize; 10859ec7b004SRick Macklem } 10869ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10879ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 10889ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10899ec7b004SRick Macklem if (uio->uio_offset + n > np->n_size) { 10909ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10919ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10929ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10939ec7b004SRick Macklem } 10949ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10959ec7b004SRick Macklem } 10969ec7b004SRick Macklem 10979ec7b004SRick Macklem if (!bp) { 10989ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 10999ec7b004SRick Macklem if (!error) 11009ec7b004SRick Macklem error = EINTR; 11019ec7b004SRick Macklem break; 11029ec7b004SRick Macklem } 11039ec7b004SRick Macklem 11049ec7b004SRick Macklem /* 11059ec7b004SRick Macklem * Issue a READ if B_CACHE is not set. In special-append 11069ec7b004SRick Macklem * mode, B_CACHE is based on the buffer prior to the write 11079ec7b004SRick Macklem * op and is typically set, avoiding the read. If a read 11089ec7b004SRick Macklem * is required in special append mode, the server will 11099ec7b004SRick Macklem * probably send us a short-read since we extended the file 11109ec7b004SRick Macklem * on our end, resulting in b_resid == 0 and, thusly, 11119ec7b004SRick Macklem * B_CACHE getting set. 11129ec7b004SRick Macklem * 11139ec7b004SRick Macklem * We can also avoid issuing the read if the write covers 11149ec7b004SRick Macklem * the entire buffer. We have to make sure the buffer state 11159ec7b004SRick Macklem * is reasonable in this case since we will not be initiating 11169ec7b004SRick Macklem * I/O. See the comments in kern/vfs_bio.c's getblk() for 11179ec7b004SRick Macklem * more information. 11189ec7b004SRick Macklem * 11199ec7b004SRick Macklem * B_CACHE may also be set due to the buffer being cached 11209ec7b004SRick Macklem * normally. 11219ec7b004SRick Macklem */ 11229ec7b004SRick Macklem 1123bfb68a9eSKonstantin Belousov bp_cached = 1; 11249ec7b004SRick Macklem if (on == 0 && n == bcount) { 1125bfb68a9eSKonstantin Belousov if ((bp->b_flags & B_CACHE) == 0) 1126bfb68a9eSKonstantin Belousov bp_cached = 0; 11279ec7b004SRick Macklem bp->b_flags |= B_CACHE; 11289ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 11299ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 11309ec7b004SRick Macklem } 11319ec7b004SRick Macklem 11329ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 11339ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 11349ec7b004SRick Macklem vfs_busy_pages(bp, 0); 113567c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 11369ec7b004SRick Macklem if (error) { 11379ec7b004SRick Macklem brelse(bp); 11389ec7b004SRick Macklem break; 11399ec7b004SRick Macklem } 11409ec7b004SRick Macklem } 11419ec7b004SRick Macklem if (bp->b_wcred == NOCRED) 11429ec7b004SRick Macklem bp->b_wcred = crhold(cred); 11439ec7b004SRick Macklem mtx_lock(&np->n_mtx); 11449ec7b004SRick Macklem np->n_flag |= NMODIFIED; 11459ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 11469ec7b004SRick Macklem 11479ec7b004SRick Macklem /* 11489ec7b004SRick Macklem * If dirtyend exceeds file size, chop it down. This should 11499ec7b004SRick Macklem * not normally occur but there is an append race where it 11509ec7b004SRick Macklem * might occur XXX, so we log it. 11519ec7b004SRick Macklem * 11529ec7b004SRick Macklem * If the chopping creates a reverse-indexed or degenerate 11539ec7b004SRick Macklem * situation with dirtyoff/end, we 0 both of them. 11549ec7b004SRick Macklem */ 11559ec7b004SRick Macklem 11569ec7b004SRick Macklem if (bp->b_dirtyend > bcount) { 1157ad600ac8SKonstantin Belousov printf("NFS append race @%lx:%d\n", 11589ec7b004SRick Macklem (long)bp->b_blkno * DEV_BSIZE, 11599ec7b004SRick Macklem bp->b_dirtyend - bcount); 11609ec7b004SRick Macklem bp->b_dirtyend = bcount; 11619ec7b004SRick Macklem } 11629ec7b004SRick Macklem 11639ec7b004SRick Macklem if (bp->b_dirtyoff >= bp->b_dirtyend) 11649ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 11659ec7b004SRick Macklem 11669ec7b004SRick Macklem /* 11679ec7b004SRick Macklem * If the new write will leave a contiguous dirty 11689ec7b004SRick Macklem * area, just update the b_dirtyoff and b_dirtyend, 11699ec7b004SRick Macklem * otherwise force a write rpc of the old dirty area. 11709ec7b004SRick Macklem * 1171cf766161SRick Macklem * If there has been a file lock applied to this file 1172cf766161SRick Macklem * or vfs.nfs.old_noncontig_writing is set, do the following: 11739ec7b004SRick Macklem * While it is possible to merge discontiguous writes due to 11749ec7b004SRick Macklem * our having a B_CACHE buffer ( and thus valid read data 11759ec7b004SRick Macklem * for the hole), we don't because it could lead to 11769ec7b004SRick Macklem * significant cache coherency problems with multiple clients, 11779ec7b004SRick Macklem * especially if locking is implemented later on. 11789ec7b004SRick Macklem * 1179cf766161SRick Macklem * If vfs.nfs.old_noncontig_writing is not set and there has 1180cf766161SRick Macklem * not been file locking done on this file: 1181cf766161SRick Macklem * Relax coherency a bit for the sake of performance and 1182cf766161SRick Macklem * expand the current dirty region to contain the new 1183cf766161SRick Macklem * write even if it means we mark some non-dirty data as 1184cf766161SRick Macklem * dirty. 11859ec7b004SRick Macklem */ 11869ec7b004SRick Macklem 1187cf766161SRick Macklem if (noncontig_write == 0 && bp->b_dirtyend > 0 && 11889ec7b004SRick Macklem (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 11899ec7b004SRick Macklem if (bwrite(bp) == EINTR) { 11909ec7b004SRick Macklem error = EINTR; 11919ec7b004SRick Macklem break; 11929ec7b004SRick Macklem } 11939ec7b004SRick Macklem goto again; 11949ec7b004SRick Macklem } 11959ec7b004SRick Macklem 1196bfb68a9eSKonstantin Belousov local_resid = uio->uio_resid; 1197ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); 11989ec7b004SRick Macklem 1199bfb68a9eSKonstantin Belousov if (error != 0 && !bp_cached) { 1200bfb68a9eSKonstantin Belousov /* 1201bfb68a9eSKonstantin Belousov * This block has no other content then what 1202bfb68a9eSKonstantin Belousov * possibly was written by the faulty uiomove. 1203bfb68a9eSKonstantin Belousov * Release it, forgetting the data pages, to 1204bfb68a9eSKonstantin Belousov * prevent the leak of uninitialized data to 1205bfb68a9eSKonstantin Belousov * usermode. 1206bfb68a9eSKonstantin Belousov */ 1207bfb68a9eSKonstantin Belousov bp->b_ioflags |= BIO_ERROR; 1208bfb68a9eSKonstantin Belousov brelse(bp); 1209bfb68a9eSKonstantin Belousov uio->uio_offset -= local_resid - uio->uio_resid; 1210bfb68a9eSKonstantin Belousov uio->uio_resid = local_resid; 1211bfb68a9eSKonstantin Belousov break; 1212bfb68a9eSKonstantin Belousov } 1213bfb68a9eSKonstantin Belousov 12149ec7b004SRick Macklem /* 12159ec7b004SRick Macklem * Since this block is being modified, it must be written 12169ec7b004SRick Macklem * again and not just committed. Since write clustering does 12179ec7b004SRick Macklem * not work for the stage 1 data write, only the stage 2 12189ec7b004SRick Macklem * commit rpc, we have to clear B_CLUSTEROK as well. 12199ec7b004SRick Macklem */ 12209ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 12219ec7b004SRick Macklem 1222bfb68a9eSKonstantin Belousov /* 1223bfb68a9eSKonstantin Belousov * Get the partial update on the progress made from 1224a96c9b30SPedro F. Giffuni * uiomove, if an error occurred. 1225bfb68a9eSKonstantin Belousov */ 1226bfb68a9eSKonstantin Belousov if (error != 0) 1227bfb68a9eSKonstantin Belousov n = local_resid - uio->uio_resid; 12289ec7b004SRick Macklem 12299ec7b004SRick Macklem /* 12309ec7b004SRick Macklem * Only update dirtyoff/dirtyend if not a degenerate 12319ec7b004SRick Macklem * condition. 12329ec7b004SRick Macklem */ 1233bfb68a9eSKonstantin Belousov if (n > 0) { 12349ec7b004SRick Macklem if (bp->b_dirtyend > 0) { 12359ec7b004SRick Macklem bp->b_dirtyoff = min(on, bp->b_dirtyoff); 12369ec7b004SRick Macklem bp->b_dirtyend = max((on + n), bp->b_dirtyend); 12379ec7b004SRick Macklem } else { 12389ec7b004SRick Macklem bp->b_dirtyoff = on; 12399ec7b004SRick Macklem bp->b_dirtyend = on + n; 12409ec7b004SRick Macklem } 12411f176894SAlan Cox vfs_bio_set_valid(bp, on, n); 12429ec7b004SRick Macklem } 12439ec7b004SRick Macklem 12449ec7b004SRick Macklem /* 12459ec7b004SRick Macklem * If IO_SYNC do bwrite(). 12469ec7b004SRick Macklem * 12479ec7b004SRick Macklem * IO_INVAL appears to be unused. The idea appears to be 12489ec7b004SRick Macklem * to turn off caching in this case. Very odd. XXX 12499ec7b004SRick Macklem */ 12509ec7b004SRick Macklem if ((ioflag & IO_SYNC)) { 12519ec7b004SRick Macklem if (ioflag & IO_INVAL) 12529ec7b004SRick Macklem bp->b_flags |= B_NOCACHE; 12539d232eecSKonstantin Belousov error1 = bwrite(bp); 12549d232eecSKonstantin Belousov if (error1 != 0) { 12559d232eecSKonstantin Belousov if (error == 0) 12569d232eecSKonstantin Belousov error = error1; 12579ec7b004SRick Macklem break; 12589d232eecSKonstantin Belousov } 1259b73cd4d3SKonstantin Belousov } else if ((n + on) == biosize || (ioflag & IO_ASYNC) != 0) { 12609ec7b004SRick Macklem bp->b_flags |= B_ASYNC; 12619ec7b004SRick Macklem (void) ncl_writebp(bp, 0, NULL); 12629ec7b004SRick Macklem } else { 12639ec7b004SRick Macklem bdwrite(bp); 12649ec7b004SRick Macklem } 1265bfb68a9eSKonstantin Belousov 1266bfb68a9eSKonstantin Belousov if (error != 0) 1267bfb68a9eSKonstantin Belousov break; 12689ec7b004SRick Macklem } while (uio->uio_resid > 0 && n > 0); 12699ec7b004SRick Macklem 1270bfb68a9eSKonstantin Belousov if (error != 0) { 1271bfb68a9eSKonstantin Belousov if (ioflag & IO_UNIT) { 1272bfb68a9eSKonstantin Belousov VATTR_NULL(&vattr); 1273bfb68a9eSKonstantin Belousov vattr.va_size = orig_size; 1274bfb68a9eSKonstantin Belousov /* IO_SYNC is handled implicitely */ 1275bfb68a9eSKonstantin Belousov (void)VOP_SETATTR(vp, &vattr, cred); 1276bfb68a9eSKonstantin Belousov uio->uio_offset -= orig_resid - uio->uio_resid; 1277bfb68a9eSKonstantin Belousov uio->uio_resid = orig_resid; 1278bfb68a9eSKonstantin Belousov } 1279bfb68a9eSKonstantin Belousov } 1280bfb68a9eSKonstantin Belousov 12819ec7b004SRick Macklem return (error); 12829ec7b004SRick Macklem } 12839ec7b004SRick Macklem 12849ec7b004SRick Macklem /* 12859ec7b004SRick Macklem * Get an nfs cache block. 12869ec7b004SRick Macklem * 12879ec7b004SRick Macklem * Allocate a new one if the block isn't currently in the cache 12889ec7b004SRick Macklem * and return the block marked busy. If the calling process is 12899ec7b004SRick Macklem * interrupted by a signal for an interruptible mount point, return 12909ec7b004SRick Macklem * NULL. 12919ec7b004SRick Macklem * 12929ec7b004SRick Macklem * The caller must carefully deal with the possible B_INVAL state of 12939ec7b004SRick Macklem * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 12949ec7b004SRick Macklem * indirectly), so synchronous reads can be issued without worrying about 12959ec7b004SRick Macklem * the B_INVAL state. We have to be a little more careful when dealing 12969ec7b004SRick Macklem * with writes (see comments in nfs_write()) when extending a file past 12979ec7b004SRick Macklem * its EOF. 12989ec7b004SRick Macklem */ 12999ec7b004SRick Macklem static struct buf * 13009ec7b004SRick Macklem nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 13019ec7b004SRick Macklem { 13029ec7b004SRick Macklem struct buf *bp; 13039ec7b004SRick Macklem struct mount *mp; 13049ec7b004SRick Macklem struct nfsmount *nmp; 13059ec7b004SRick Macklem 13069ec7b004SRick Macklem mp = vp->v_mount; 13079ec7b004SRick Macklem nmp = VFSTONFS(mp); 13089ec7b004SRick Macklem 13099ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) { 13109ec7b004SRick Macklem sigset_t oldset; 13119ec7b004SRick Macklem 13124a8e2176SRick Macklem newnfs_set_sigmask(td, &oldset); 13133b14c753SJohn Baldwin bp = getblk(vp, bn, size, PCATCH, 0, 0); 13144a8e2176SRick Macklem newnfs_restore_sigmask(td, &oldset); 13159ec7b004SRick Macklem while (bp == NULL) { 13169ec7b004SRick Macklem if (newnfs_sigintr(nmp, td)) 13179ec7b004SRick Macklem return (NULL); 13189ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 2 * hz, 0); 13199ec7b004SRick Macklem } 13209ec7b004SRick Macklem } else { 13219ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 0, 0); 13229ec7b004SRick Macklem } 13239ec7b004SRick Macklem 13247f763fc3SRick Macklem if (vp->v_type == VREG) 13257f763fc3SRick Macklem bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 13269ec7b004SRick Macklem return (bp); 13279ec7b004SRick Macklem } 13289ec7b004SRick Macklem 13299ec7b004SRick Macklem /* 13309ec7b004SRick Macklem * Flush and invalidate all dirty buffers. If another process is already 13319ec7b004SRick Macklem * doing the flush, just wait for completion. 13329ec7b004SRick Macklem */ 13339ec7b004SRick Macklem int 13349ec7b004SRick Macklem ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 13359ec7b004SRick Macklem { 13369ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 13379ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 13389ec7b004SRick Macklem int error = 0, slpflag, slptimeo; 13399ec7b004SRick Macklem int old_lock = 0; 13409ec7b004SRick Macklem 13419ec7b004SRick Macklem ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 13429ec7b004SRick Macklem 13439ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_INT) == 0) 13449ec7b004SRick Macklem intrflg = 0; 13459ec7b004SRick Macklem if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 13469ec7b004SRick Macklem intrflg = 1; 13479ec7b004SRick Macklem if (intrflg) { 13483b14c753SJohn Baldwin slpflag = PCATCH; 13499ec7b004SRick Macklem slptimeo = 2 * hz; 13509ec7b004SRick Macklem } else { 13519ec7b004SRick Macklem slpflag = 0; 13529ec7b004SRick Macklem slptimeo = 0; 13539ec7b004SRick Macklem } 13549ec7b004SRick Macklem 13559ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 1356934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 1357934a3099SRick Macklem /* 1358934a3099SRick Macklem * Since vgonel() uses the generic vinvalbuf() to flush 1359934a3099SRick Macklem * dirty buffers and it does not call this function, it 1360934a3099SRick Macklem * is safe to just return OK when VI_DOOMED is set. 1361934a3099SRick Macklem */ 1362934a3099SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 1363934a3099SRick Macklem return (0); 1364934a3099SRick Macklem } 1365934a3099SRick Macklem 13669ec7b004SRick Macklem /* 13679ec7b004SRick Macklem * Now, flush as required. 13689ec7b004SRick Macklem */ 1369ea525259SKonstantin Belousov if ((flags & (V_SAVE | V_VMIO)) == V_SAVE && 1370ea525259SKonstantin Belousov vp->v_bufobj.bo_object != NULL) { 137189f6b863SAttilio Rao VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); 13729ec7b004SRick Macklem vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 137389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); 13749ec7b004SRick Macklem /* 13759ec7b004SRick Macklem * If the page clean was interrupted, fail the invalidation. 13769ec7b004SRick Macklem * Not doing so, we run the risk of losing dirty pages in the 13779ec7b004SRick Macklem * vinvalbuf() call below. 13789ec7b004SRick Macklem */ 13799ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13809ec7b004SRick Macklem goto out; 13819ec7b004SRick Macklem } 13829ec7b004SRick Macklem 13839ec7b004SRick Macklem error = vinvalbuf(vp, flags, slpflag, 0); 13849ec7b004SRick Macklem while (error) { 13859ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13869ec7b004SRick Macklem goto out; 13879ec7b004SRick Macklem error = vinvalbuf(vp, flags, 0, slptimeo); 13889ec7b004SRick Macklem } 138956239558SRick Macklem if (NFSHASPNFS(nmp)) { 13901f60bfd8SRick Macklem nfscl_layoutcommit(vp, td); 139156239558SRick Macklem /* 139256239558SRick Macklem * Invalidate the attribute cache, since writes to a DS 139356239558SRick Macklem * won't update the size attribute. 139456239558SRick Macklem */ 139556239558SRick Macklem mtx_lock(&np->n_mtx); 139656239558SRick Macklem np->n_attrstamp = 0; 139756239558SRick Macklem } else 13989ec7b004SRick Macklem mtx_lock(&np->n_mtx); 13999ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) 14009ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 14019ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 14029ec7b004SRick Macklem out: 14039ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 14049ec7b004SRick Macklem return error; 14059ec7b004SRick Macklem } 14069ec7b004SRick Macklem 14079ec7b004SRick Macklem /* 14089ec7b004SRick Macklem * Initiate asynchronous I/O. Return an error if no nfsiods are available. 14099ec7b004SRick Macklem * This is mainly to avoid queueing async I/O requests when the nfsiods 14109ec7b004SRick Macklem * are all hung on a dead server. 14119ec7b004SRick Macklem * 14129ec7b004SRick Macklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 14139ec7b004SRick Macklem * is eventually dequeued by the async daemon, ncl_doio() *will*. 14149ec7b004SRick Macklem */ 14159ec7b004SRick Macklem int 14169ec7b004SRick Macklem ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 14179ec7b004SRick Macklem { 14189ec7b004SRick Macklem int iod; 14199ec7b004SRick Macklem int gotiod; 14209ec7b004SRick Macklem int slpflag = 0; 14219ec7b004SRick Macklem int slptimeo = 0; 14229ec7b004SRick Macklem int error, error2; 14239ec7b004SRick Macklem 14249ec7b004SRick Macklem /* 14259ec7b004SRick Macklem * Commits are usually short and sweet so lets save some cpu and 14269ec7b004SRick Macklem * leave the async daemons for more important rpc's (such as reads 14279ec7b004SRick Macklem * and writes). 1428175b3f31SRick Macklem * 1429175b3f31SRick Macklem * Readdirplus RPCs do vget()s to acquire the vnodes for entries 1430175b3f31SRick Macklem * in the directory in order to update attributes. This can deadlock 1431175b3f31SRick Macklem * with another thread that is waiting for async I/O to be done by 1432175b3f31SRick Macklem * an nfsiod thread while holding a lock on one of these vnodes. 1433175b3f31SRick Macklem * To avoid this deadlock, don't allow the async nfsiod threads to 1434175b3f31SRick Macklem * perform Readdirplus RPCs. 14359ec7b004SRick Macklem */ 14369ec7b004SRick Macklem mtx_lock(&ncl_iod_mutex); 1437175b3f31SRick Macklem if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1438175b3f31SRick Macklem (nmp->nm_bufqiods > ncl_numasync / 2)) || 1439175b3f31SRick Macklem (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) { 14409ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 14419ec7b004SRick Macklem return(EIO); 14429ec7b004SRick Macklem } 14439ec7b004SRick Macklem again: 14449ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) 14453b14c753SJohn Baldwin slpflag = PCATCH; 14469ec7b004SRick Macklem gotiod = FALSE; 14479ec7b004SRick Macklem 14489ec7b004SRick Macklem /* 14499ec7b004SRick Macklem * Find a free iod to process this request. 14509ec7b004SRick Macklem */ 14519ec7b004SRick Macklem for (iod = 0; iod < ncl_numasync; iod++) 145280169e41SRick Macklem if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 14539ec7b004SRick Macklem gotiod = TRUE; 14549ec7b004SRick Macklem break; 14559ec7b004SRick Macklem } 14569ec7b004SRick Macklem 14579ec7b004SRick Macklem /* 14589ec7b004SRick Macklem * Try to create one if none are free. 14599ec7b004SRick Macklem */ 14607b8c319bSRick Macklem if (!gotiod) 14617b8c319bSRick Macklem ncl_nfsiodnew(); 14627b8c319bSRick Macklem else { 14639ec7b004SRick Macklem /* 14649ec7b004SRick Macklem * Found one, so wake it up and tell it which 14659ec7b004SRick Macklem * mount to process. 14669ec7b004SRick Macklem */ 14679ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 14689ec7b004SRick Macklem iod, nmp)); 146980169e41SRick Macklem ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 14709ec7b004SRick Macklem ncl_iodmount[iod] = nmp; 14719ec7b004SRick Macklem nmp->nm_bufqiods++; 14729ec7b004SRick Macklem wakeup(&ncl_iodwant[iod]); 14739ec7b004SRick Macklem } 14749ec7b004SRick Macklem 14759ec7b004SRick Macklem /* 14769ec7b004SRick Macklem * If none are free, we may already have an iod working on this mount 14779ec7b004SRick Macklem * point. If so, it will process our request. 14789ec7b004SRick Macklem */ 14799ec7b004SRick Macklem if (!gotiod) { 14809ec7b004SRick Macklem if (nmp->nm_bufqiods > 0) { 14819ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14829ec7b004SRick Macklem ("ncl_asyncio: %d iods are already processing mount %p\n", 14839ec7b004SRick Macklem nmp->nm_bufqiods, nmp)); 14849ec7b004SRick Macklem gotiod = TRUE; 14859ec7b004SRick Macklem } 14869ec7b004SRick Macklem } 14879ec7b004SRick Macklem 14889ec7b004SRick Macklem /* 14899ec7b004SRick Macklem * If we have an iod which can process the request, then queue 14909ec7b004SRick Macklem * the buffer. 14919ec7b004SRick Macklem */ 14929ec7b004SRick Macklem if (gotiod) { 14939ec7b004SRick Macklem /* 14949ec7b004SRick Macklem * Ensure that the queue never grows too large. We still want 14959ec7b004SRick Macklem * to asynchronize so we block rather then return EIO. 14969ec7b004SRick Macklem */ 14979ec7b004SRick Macklem while (nmp->nm_bufqlen >= 2*ncl_numasync) { 14989ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14999ec7b004SRick Macklem ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 15009ec7b004SRick Macklem nmp->nm_bufqwant = TRUE; 15014a8e2176SRick Macklem error = newnfs_msleep(td, &nmp->nm_bufq, 15024a8e2176SRick Macklem &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 15034a8e2176SRick Macklem slptimeo); 15049ec7b004SRick Macklem if (error) { 15059ec7b004SRick Macklem error2 = newnfs_sigintr(nmp, td); 15069ec7b004SRick Macklem if (error2) { 15079ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15089ec7b004SRick Macklem return (error2); 15099ec7b004SRick Macklem } 15103b14c753SJohn Baldwin if (slpflag == PCATCH) { 15119ec7b004SRick Macklem slpflag = 0; 15129ec7b004SRick Macklem slptimeo = 2 * hz; 15139ec7b004SRick Macklem } 15149ec7b004SRick Macklem } 15159ec7b004SRick Macklem /* 15169ec7b004SRick Macklem * We might have lost our iod while sleeping, 1517a96c9b30SPedro F. Giffuni * so check and loop if necessary. 15189ec7b004SRick Macklem */ 15199ec7b004SRick Macklem goto again; 15209ec7b004SRick Macklem } 15219ec7b004SRick Macklem 15229ec7b004SRick Macklem /* We might have lost our nfsiod */ 15239ec7b004SRick Macklem if (nmp->nm_bufqiods == 0) { 15249ec7b004SRick Macklem NFS_DPF(ASYNCIO, 15259ec7b004SRick Macklem ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 15269ec7b004SRick Macklem goto again; 15279ec7b004SRick Macklem } 15289ec7b004SRick Macklem 15299ec7b004SRick Macklem if (bp->b_iocmd == BIO_READ) { 15309ec7b004SRick Macklem if (bp->b_rcred == NOCRED && cred != NOCRED) 15319ec7b004SRick Macklem bp->b_rcred = crhold(cred); 15329ec7b004SRick Macklem } else { 15339ec7b004SRick Macklem if (bp->b_wcred == NOCRED && cred != NOCRED) 15349ec7b004SRick Macklem bp->b_wcred = crhold(cred); 15359ec7b004SRick Macklem } 15369ec7b004SRick Macklem 15379ec7b004SRick Macklem if (bp->b_flags & B_REMFREE) 15389ec7b004SRick Macklem bremfreef(bp); 15399ec7b004SRick Macklem BUF_KERNPROC(bp); 15409ec7b004SRick Macklem TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 15419ec7b004SRick Macklem nmp->nm_bufqlen++; 15429ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15439ec7b004SRick Macklem mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 15449ec7b004SRick Macklem VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 15459ec7b004SRick Macklem VTONFS(bp->b_vp)->n_directio_asyncwr++; 15469ec7b004SRick Macklem mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 15479ec7b004SRick Macklem } 15489ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15499ec7b004SRick Macklem return (0); 15509ec7b004SRick Macklem } 15519ec7b004SRick Macklem 15529ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15539ec7b004SRick Macklem 15549ec7b004SRick Macklem /* 15559ec7b004SRick Macklem * All the iods are busy on other mounts, so return EIO to 15569ec7b004SRick Macklem * force the caller to process the i/o synchronously. 15579ec7b004SRick Macklem */ 15589ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 15599ec7b004SRick Macklem return (EIO); 15609ec7b004SRick Macklem } 15619ec7b004SRick Macklem 15629ec7b004SRick Macklem void 15639ec7b004SRick Macklem ncl_doio_directwrite(struct buf *bp) 15649ec7b004SRick Macklem { 15659ec7b004SRick Macklem int iomode, must_commit; 15669ec7b004SRick Macklem struct uio *uiop = (struct uio *)bp->b_caller1; 15679ec7b004SRick Macklem char *iov_base = uiop->uio_iov->iov_base; 15689ec7b004SRick Macklem 15699ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 15709ec7b004SRick Macklem uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 157167c5c2d2SRick Macklem ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 15729ec7b004SRick Macklem KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 15739ec7b004SRick Macklem free(iov_base, M_NFSDIRECTIO); 15749ec7b004SRick Macklem free(uiop->uio_iov, M_NFSDIRECTIO); 15759ec7b004SRick Macklem free(uiop, M_NFSDIRECTIO); 15769ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15779ec7b004SRick Macklem struct nfsnode *np = VTONFS(bp->b_vp); 15789ec7b004SRick Macklem mtx_lock(&np->n_mtx); 157956239558SRick Macklem if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) { 158056239558SRick Macklem /* 158156239558SRick Macklem * Invalidate the attribute cache, since writes to a DS 158256239558SRick Macklem * won't update the size attribute. 158356239558SRick Macklem */ 158456239558SRick Macklem np->n_attrstamp = 0; 158556239558SRick Macklem } 15869ec7b004SRick Macklem np->n_directio_asyncwr--; 15879ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) { 15889ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 15899ec7b004SRick Macklem if ((np->n_flag & NFSYNCWAIT)) { 15909ec7b004SRick Macklem np->n_flag &= ~NFSYNCWAIT; 15919ec7b004SRick Macklem wakeup((caddr_t)&np->n_directio_asyncwr); 15929ec7b004SRick Macklem } 15939ec7b004SRick Macklem } 15949ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 15959ec7b004SRick Macklem } 15969ec7b004SRick Macklem bp->b_vp = NULL; 15979ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 15989ec7b004SRick Macklem } 15999ec7b004SRick Macklem 16009ec7b004SRick Macklem /* 16019ec7b004SRick Macklem * Do an I/O operation to/from a cache block. This may be called 16029ec7b004SRick Macklem * synchronously or from an nfsiod. 16039ec7b004SRick Macklem */ 16049ec7b004SRick Macklem int 160567c5c2d2SRick Macklem ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 160667c5c2d2SRick Macklem int called_from_strategy) 16079ec7b004SRick Macklem { 16089ec7b004SRick Macklem struct uio *uiop; 16099ec7b004SRick Macklem struct nfsnode *np; 16109ec7b004SRick Macklem struct nfsmount *nmp; 16119ec7b004SRick Macklem int error = 0, iomode, must_commit = 0; 16129ec7b004SRick Macklem struct uio uio; 16139ec7b004SRick Macklem struct iovec io; 16149ec7b004SRick Macklem struct proc *p = td ? td->td_proc : NULL; 16159ec7b004SRick Macklem uint8_t iocmd; 16169ec7b004SRick Macklem 16179ec7b004SRick Macklem np = VTONFS(vp); 16189ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 16199ec7b004SRick Macklem uiop = &uio; 16209ec7b004SRick Macklem uiop->uio_iov = &io; 16219ec7b004SRick Macklem uiop->uio_iovcnt = 1; 16229ec7b004SRick Macklem uiop->uio_segflg = UIO_SYSSPACE; 16239ec7b004SRick Macklem uiop->uio_td = td; 16249ec7b004SRick Macklem 16259ec7b004SRick Macklem /* 16269ec7b004SRick Macklem * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 16279ec7b004SRick Macklem * do this here so we do not have to do it in all the code that 16289ec7b004SRick Macklem * calls us. 16299ec7b004SRick Macklem */ 16309ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 16319ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 16329ec7b004SRick Macklem 16339ec7b004SRick Macklem KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 16349ec7b004SRick Macklem iocmd = bp->b_iocmd; 16359ec7b004SRick Macklem if (iocmd == BIO_READ) { 16369ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_bcount; 16379ec7b004SRick Macklem io.iov_base = bp->b_data; 16389ec7b004SRick Macklem uiop->uio_rw = UIO_READ; 16399ec7b004SRick Macklem 16409ec7b004SRick Macklem switch (vp->v_type) { 16419ec7b004SRick Macklem case VREG: 16429ec7b004SRick Macklem uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 16431b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.read_bios); 16449ec7b004SRick Macklem error = ncl_readrpc(vp, uiop, cr); 16459ec7b004SRick Macklem 16469ec7b004SRick Macklem if (!error) { 16479ec7b004SRick Macklem if (uiop->uio_resid) { 16489ec7b004SRick Macklem /* 16499ec7b004SRick Macklem * If we had a short read with no error, we must have 16509ec7b004SRick Macklem * hit a file hole. We should zero-fill the remainder. 16519ec7b004SRick Macklem * This can also occur if the server hits the file EOF. 16529ec7b004SRick Macklem * 16539ec7b004SRick Macklem * Holes used to be able to occur due to pending 16549ec7b004SRick Macklem * writes, but that is not possible any longer. 16559ec7b004SRick Macklem */ 16569ec7b004SRick Macklem int nread = bp->b_bcount - uiop->uio_resid; 1657526d0bd5SKonstantin Belousov ssize_t left = uiop->uio_resid; 16589ec7b004SRick Macklem 16599ec7b004SRick Macklem if (left > 0) 16609ec7b004SRick Macklem bzero((char *)bp->b_data + nread, left); 16619ec7b004SRick Macklem uiop->uio_resid = 0; 16629ec7b004SRick Macklem } 16639ec7b004SRick Macklem } 16649ec7b004SRick Macklem /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 16659ec7b004SRick Macklem if (p && (vp->v_vflag & VV_TEXT)) { 16669ec7b004SRick Macklem mtx_lock(&np->n_mtx); 16679ec7b004SRick Macklem if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 16689ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16699ec7b004SRick Macklem PROC_LOCK(p); 16709ec7b004SRick Macklem killproc(p, "text file modification"); 16719ec7b004SRick Macklem PROC_UNLOCK(p); 16729ec7b004SRick Macklem } else 16739ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16749ec7b004SRick Macklem } 16759ec7b004SRick Macklem break; 16769ec7b004SRick Macklem case VLNK: 16779ec7b004SRick Macklem uiop->uio_offset = (off_t)0; 16781b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.readlink_bios); 16799ec7b004SRick Macklem error = ncl_readlinkrpc(vp, uiop, cr); 16809ec7b004SRick Macklem break; 16819ec7b004SRick Macklem case VDIR: 16821b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.readdir_bios); 16839ec7b004SRick Macklem uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 16849ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 16859ec7b004SRick Macklem error = ncl_readdirplusrpc(vp, uiop, cr, td); 16869ec7b004SRick Macklem if (error == NFSERR_NOTSUPP) 16879ec7b004SRick Macklem nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 16889ec7b004SRick Macklem } 16899ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 16909ec7b004SRick Macklem error = ncl_readdirrpc(vp, uiop, cr, td); 16919ec7b004SRick Macklem /* 16929ec7b004SRick Macklem * end-of-directory sets B_INVAL but does not generate an 16939ec7b004SRick Macklem * error. 16949ec7b004SRick Macklem */ 16959ec7b004SRick Macklem if (error == 0 && uiop->uio_resid == bp->b_bcount) 16969ec7b004SRick Macklem bp->b_flags |= B_INVAL; 16979ec7b004SRick Macklem break; 16989ec7b004SRick Macklem default: 1699ad600ac8SKonstantin Belousov printf("ncl_doio: type %x unexpected\n", vp->v_type); 17009ec7b004SRick Macklem break; 170174b8d63dSPedro F. Giffuni } 17029ec7b004SRick Macklem if (error) { 17039ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 17049ec7b004SRick Macklem bp->b_error = error; 17059ec7b004SRick Macklem } 17069ec7b004SRick Macklem } else { 17079ec7b004SRick Macklem /* 17089ec7b004SRick Macklem * If we only need to commit, try to commit 17099ec7b004SRick Macklem */ 17109ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) { 17119ec7b004SRick Macklem int retv; 17129ec7b004SRick Macklem off_t off; 17139ec7b004SRick Macklem 17149ec7b004SRick Macklem off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 17159ec7b004SRick Macklem retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 17169ec7b004SRick Macklem bp->b_wcred, td); 17179ec7b004SRick Macklem if (retv == 0) { 17189ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 17199ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 17209ec7b004SRick Macklem bp->b_resid = 0; 17219ec7b004SRick Macklem bufdone(bp); 17229ec7b004SRick Macklem return (0); 17239ec7b004SRick Macklem } 17249ec7b004SRick Macklem if (retv == NFSERR_STALEWRITEVERF) { 17259ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 17269ec7b004SRick Macklem } 17279ec7b004SRick Macklem } 17289ec7b004SRick Macklem 17299ec7b004SRick Macklem /* 17309ec7b004SRick Macklem * Setup for actual write 17319ec7b004SRick Macklem */ 17329ec7b004SRick Macklem mtx_lock(&np->n_mtx); 17339ec7b004SRick Macklem if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 17349ec7b004SRick Macklem bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 17359ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 17369ec7b004SRick Macklem 17379ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_dirtyoff) { 17389ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_dirtyend 17399ec7b004SRick Macklem - bp->b_dirtyoff; 17409ec7b004SRick Macklem uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 17419ec7b004SRick Macklem + bp->b_dirtyoff; 17429ec7b004SRick Macklem io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 17439ec7b004SRick Macklem uiop->uio_rw = UIO_WRITE; 17441b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.write_bios); 17459ec7b004SRick Macklem 17469ec7b004SRick Macklem if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 17479ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 17489ec7b004SRick Macklem else 17499ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 17509ec7b004SRick Macklem 175167c5c2d2SRick Macklem error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 175267c5c2d2SRick Macklem called_from_strategy); 17539ec7b004SRick Macklem 17549ec7b004SRick Macklem /* 17559ec7b004SRick Macklem * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 17569ec7b004SRick Macklem * to cluster the buffers needing commit. This will allow 17579ec7b004SRick Macklem * the system to submit a single commit rpc for the whole 17589ec7b004SRick Macklem * cluster. We can do this even if the buffer is not 100% 17599ec7b004SRick Macklem * dirty (relative to the NFS blocksize), so we optimize the 17609ec7b004SRick Macklem * append-to-file-case. 17619ec7b004SRick Macklem * 17629ec7b004SRick Macklem * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 17639ec7b004SRick Macklem * cleared because write clustering only works for commit 17649ec7b004SRick Macklem * rpc's, not for the data portion of the write). 17659ec7b004SRick Macklem */ 17669ec7b004SRick Macklem 17679ec7b004SRick Macklem if (!error && iomode == NFSWRITE_UNSTABLE) { 17689ec7b004SRick Macklem bp->b_flags |= B_NEEDCOMMIT; 17699ec7b004SRick Macklem if (bp->b_dirtyoff == 0 17709ec7b004SRick Macklem && bp->b_dirtyend == bp->b_bcount) 17719ec7b004SRick Macklem bp->b_flags |= B_CLUSTEROK; 17729ec7b004SRick Macklem } else { 17739ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 17749ec7b004SRick Macklem } 17759ec7b004SRick Macklem 17769ec7b004SRick Macklem /* 17779ec7b004SRick Macklem * For an interrupted write, the buffer is still valid 17789ec7b004SRick Macklem * and the write hasn't been pushed to the server yet, 17799ec7b004SRick Macklem * so we can't set BIO_ERROR and report the interruption 17809ec7b004SRick Macklem * by setting B_EINTR. For the B_ASYNC case, B_EINTR 17819ec7b004SRick Macklem * is not relevant, so the rpc attempt is essentially 17829ec7b004SRick Macklem * a noop. For the case of a V3 write rpc not being 17839ec7b004SRick Macklem * committed to stable storage, the block is still 17849ec7b004SRick Macklem * dirty and requires either a commit rpc or another 17859ec7b004SRick Macklem * write rpc with iomode == NFSV3WRITE_FILESYNC before 17869ec7b004SRick Macklem * the block is reused. This is indicated by setting 17879ec7b004SRick Macklem * the B_DELWRI and B_NEEDCOMMIT flags. 17889ec7b004SRick Macklem * 178967c5c2d2SRick Macklem * EIO is returned by ncl_writerpc() to indicate a recoverable 179067c5c2d2SRick Macklem * write error and is handled as above, except that 179167c5c2d2SRick Macklem * B_EINTR isn't set. One cause of this is a stale stateid 179267c5c2d2SRick Macklem * error for the RPC that indicates recovery is required, 179367c5c2d2SRick Macklem * when called with called_from_strategy != 0. 179467c5c2d2SRick Macklem * 17959ec7b004SRick Macklem * If the buffer is marked B_PAGING, it does not reside on 17969ec7b004SRick Macklem * the vp's paging queues so we cannot call bdirty(). The 17979ec7b004SRick Macklem * bp in this case is not an NFS cache block so we should 17989ec7b004SRick Macklem * be safe. XXX 17999ec7b004SRick Macklem * 18009ec7b004SRick Macklem * The logic below breaks up errors into recoverable and 18019ec7b004SRick Macklem * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 18029ec7b004SRick Macklem * and keep the buffer around for potential write retries. 18039ec7b004SRick Macklem * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 18049ec7b004SRick Macklem * and save the error in the nfsnode. This is less than ideal 18059ec7b004SRick Macklem * but necessary. Keeping such buffers around could potentially 18069ec7b004SRick Macklem * cause buffer exhaustion eventually (they can never be written 18079ec7b004SRick Macklem * out, so will get constantly be re-dirtied). It also causes 18089ec7b004SRick Macklem * all sorts of vfs panics. For non-recoverable write errors, 18099ec7b004SRick Macklem * also invalidate the attrcache, so we'll be forced to go over 18109ec7b004SRick Macklem * the wire for this object, returning an error to user on next 18119ec7b004SRick Macklem * call (most of the time). 18129ec7b004SRick Macklem */ 18139ec7b004SRick Macklem if (error == EINTR || error == EIO || error == ETIMEDOUT 18149ec7b004SRick Macklem || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 18159ec7b004SRick Macklem bp->b_flags &= ~(B_INVAL|B_NOCACHE); 18169ec7b004SRick Macklem if ((bp->b_flags & B_PAGING) == 0) { 18179ec7b004SRick Macklem bdirty(bp); 18189ec7b004SRick Macklem bp->b_flags &= ~B_DONE; 18199ec7b004SRick Macklem } 182067c5c2d2SRick Macklem if ((error == EINTR || error == ETIMEDOUT) && 182167c5c2d2SRick Macklem (bp->b_flags & B_ASYNC) == 0) 18229ec7b004SRick Macklem bp->b_flags |= B_EINTR; 18239ec7b004SRick Macklem } else { 18249ec7b004SRick Macklem if (error) { 18259ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 18269ec7b004SRick Macklem bp->b_flags |= B_INVAL; 18279ec7b004SRick Macklem bp->b_error = np->n_error = error; 18289ec7b004SRick Macklem mtx_lock(&np->n_mtx); 18299ec7b004SRick Macklem np->n_flag |= NWRITEERR; 18309ec7b004SRick Macklem np->n_attrstamp = 0; 18318f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 18329ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18339ec7b004SRick Macklem } 18349ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 18359ec7b004SRick Macklem } 18369ec7b004SRick Macklem } else { 18379ec7b004SRick Macklem bp->b_resid = 0; 18389ec7b004SRick Macklem bufdone(bp); 18399ec7b004SRick Macklem return (0); 18409ec7b004SRick Macklem } 18419ec7b004SRick Macklem } 18429ec7b004SRick Macklem bp->b_resid = uiop->uio_resid; 18439ec7b004SRick Macklem if (must_commit) 18449ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 18459ec7b004SRick Macklem bufdone(bp); 18469ec7b004SRick Macklem return (error); 18479ec7b004SRick Macklem } 18489ec7b004SRick Macklem 18499ec7b004SRick Macklem /* 18509ec7b004SRick Macklem * Used to aid in handling ftruncate() operations on the NFS client side. 18519ec7b004SRick Macklem * Truncation creates a number of special problems for NFS. We have to 18529ec7b004SRick Macklem * throw away VM pages and buffer cache buffers that are beyond EOF, and 18539ec7b004SRick Macklem * we have to properly handle VM pages or (potentially dirty) buffers 18549ec7b004SRick Macklem * that straddle the truncation point. 18559ec7b004SRick Macklem */ 18569ec7b004SRick Macklem 18579ec7b004SRick Macklem int 18589ec7b004SRick Macklem ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 18599ec7b004SRick Macklem { 18609ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 18619ec7b004SRick Macklem u_quad_t tsize; 18627f763fc3SRick Macklem int biosize = vp->v_bufobj.bo_bsize; 18639ec7b004SRick Macklem int error = 0; 18649ec7b004SRick Macklem 18659ec7b004SRick Macklem mtx_lock(&np->n_mtx); 18669ec7b004SRick Macklem tsize = np->n_size; 18679ec7b004SRick Macklem np->n_size = nsize; 18689ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18699ec7b004SRick Macklem 18709ec7b004SRick Macklem if (nsize < tsize) { 18719ec7b004SRick Macklem struct buf *bp; 18729ec7b004SRick Macklem daddr_t lbn; 18739ec7b004SRick Macklem int bufsize; 18749ec7b004SRick Macklem 18759ec7b004SRick Macklem /* 18769ec7b004SRick Macklem * vtruncbuf() doesn't get the buffer overlapping the 18779ec7b004SRick Macklem * truncation point. We may have a B_DELWRI and/or B_CACHE 18789ec7b004SRick Macklem * buffer that now needs to be truncated. 18799ec7b004SRick Macklem */ 1880c52fd858SEdward Tomasz Napierala error = vtruncbuf(vp, cred, nsize, biosize); 18819ec7b004SRick Macklem lbn = nsize / biosize; 188296ecfd98SEd Maste bufsize = nsize - (lbn * biosize); 18839ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bufsize, td); 18849ec7b004SRick Macklem if (!bp) 18859ec7b004SRick Macklem return EINTR; 18869ec7b004SRick Macklem if (bp->b_dirtyoff > bp->b_bcount) 18879ec7b004SRick Macklem bp->b_dirtyoff = bp->b_bcount; 18889ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_bcount) 18899ec7b004SRick Macklem bp->b_dirtyend = bp->b_bcount; 18909ec7b004SRick Macklem bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 18919ec7b004SRick Macklem brelse(bp); 18929ec7b004SRick Macklem } else { 18939ec7b004SRick Macklem vnode_pager_setsize(vp, nsize); 18949ec7b004SRick Macklem } 18959ec7b004SRick Macklem return(error); 18969ec7b004SRick Macklem } 18979ec7b004SRick Macklem 1898