19ec7b004SRick Macklem /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 49ec7b004SRick Macklem * Copyright (c) 1989, 1993 59ec7b004SRick Macklem * The Regents of the University of California. All rights reserved. 69ec7b004SRick Macklem * 79ec7b004SRick Macklem * This code is derived from software contributed to Berkeley by 89ec7b004SRick Macklem * Rick Macklem at The University of Guelph. 99ec7b004SRick Macklem * 109ec7b004SRick Macklem * Redistribution and use in source and binary forms, with or without 119ec7b004SRick Macklem * modification, are permitted provided that the following conditions 129ec7b004SRick Macklem * are met: 139ec7b004SRick Macklem * 1. Redistributions of source code must retain the above copyright 149ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer. 159ec7b004SRick Macklem * 2. Redistributions in binary form must reproduce the above copyright 169ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer in the 179ec7b004SRick Macklem * documentation and/or other materials provided with the distribution. 18fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 199ec7b004SRick Macklem * may be used to endorse or promote products derived from this software 209ec7b004SRick Macklem * without specific prior written permission. 219ec7b004SRick Macklem * 229ec7b004SRick Macklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 239ec7b004SRick Macklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 249ec7b004SRick Macklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 259ec7b004SRick Macklem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 269ec7b004SRick Macklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 279ec7b004SRick Macklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 289ec7b004SRick Macklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 299ec7b004SRick Macklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 309ec7b004SRick Macklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 319ec7b004SRick Macklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 329ec7b004SRick Macklem * SUCH DAMAGE. 339ec7b004SRick Macklem */ 349ec7b004SRick Macklem 359ec7b004SRick Macklem #include <sys/param.h> 369ec7b004SRick Macklem #include <sys/systm.h> 379ec7b004SRick Macklem #include <sys/bio.h> 389ec7b004SRick Macklem #include <sys/buf.h> 399ec7b004SRick Macklem #include <sys/kernel.h> 409ec7b004SRick Macklem #include <sys/mount.h> 4189f6b863SAttilio Rao #include <sys/rwlock.h> 429ec7b004SRick Macklem #include <sys/vmmeter.h> 439ec7b004SRick Macklem #include <sys/vnode.h> 449ec7b004SRick Macklem 459ec7b004SRick Macklem #include <vm/vm.h> 461c771f92SKonstantin Belousov #include <vm/vm_param.h> 479ec7b004SRick Macklem #include <vm/vm_extern.h> 489ec7b004SRick Macklem #include <vm/vm_page.h> 499ec7b004SRick Macklem #include <vm/vm_object.h> 509ec7b004SRick Macklem #include <vm/vm_pager.h> 519ec7b004SRick Macklem #include <vm/vnode_pager.h> 529ec7b004SRick Macklem 539ec7b004SRick Macklem #include <fs/nfs/nfsport.h> 549ec7b004SRick Macklem #include <fs/nfsclient/nfsmount.h> 559ec7b004SRick Macklem #include <fs/nfsclient/nfs.h> 569ec7b004SRick Macklem #include <fs/nfsclient/nfsnode.h> 578f0e65c9SRick Macklem #include <fs/nfsclient/nfs_kdtrace.h> 589ec7b004SRick Macklem 599ec7b004SRick Macklem extern int newnfs_directio_allow_mmap; 601b819cf2SRick Macklem extern struct nfsstatsv1 nfsstatsv1; 619ec7b004SRick Macklem extern struct mtx ncl_iod_mutex; 629ec7b004SRick Macklem extern int ncl_numasync; 637b8c319bSRick Macklem extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 647b8c319bSRick Macklem extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 659ec7b004SRick Macklem extern int newnfs_directio_enable; 66a53373faSKonstantin Belousov extern int nfs_keep_dirty_on_error; 679ec7b004SRick Macklem 68756a5412SGleb Smirnoff uma_zone_t ncl_pbuf_zone; 699ec7b004SRick Macklem 709ec7b004SRick Macklem static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 719ec7b004SRick Macklem struct thread *td); 729ec7b004SRick Macklem static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 739ec7b004SRick Macklem struct ucred *cred, int ioflag); 749ec7b004SRick Macklem 759ec7b004SRick Macklem /* 769ec7b004SRick Macklem * Vnode op for VM getpages. 779ec7b004SRick Macklem */ 78753a007fSKonstantin Belousov SYSCTL_DECL(_vfs_nfs); 79753a007fSKonstantin Belousov static int use_buf_pager = 1; 80753a007fSKonstantin Belousov SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, 81753a007fSKonstantin Belousov &use_buf_pager, 0, 82753a007fSKonstantin Belousov "Use buffer pager instead of direct readrpc call"); 83753a007fSKonstantin Belousov 84753a007fSKonstantin Belousov static daddr_t 85753a007fSKonstantin Belousov ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) 86753a007fSKonstantin Belousov { 87753a007fSKonstantin Belousov 88753a007fSKonstantin Belousov return (off / vp->v_bufobj.bo_bsize); 89753a007fSKonstantin Belousov } 90753a007fSKonstantin Belousov 91753a007fSKonstantin Belousov static int 92197a4f29SKonstantin Belousov ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn, long *sz) 93753a007fSKonstantin Belousov { 94753a007fSKonstantin Belousov struct nfsnode *np; 95753a007fSKonstantin Belousov u_quad_t nsize; 96753a007fSKonstantin Belousov int biosize, bcount; 97753a007fSKonstantin Belousov 98753a007fSKonstantin Belousov np = VTONFS(vp); 995d85e12fSRick Macklem NFSLOCKNODE(np); 100753a007fSKonstantin Belousov nsize = np->n_size; 1015d85e12fSRick Macklem NFSUNLOCKNODE(np); 102753a007fSKonstantin Belousov 103753a007fSKonstantin Belousov biosize = vp->v_bufobj.bo_bsize; 104753a007fSKonstantin Belousov bcount = biosize; 105753a007fSKonstantin Belousov if ((off_t)lbn * biosize >= nsize) 106753a007fSKonstantin Belousov bcount = 0; 107753a007fSKonstantin Belousov else if ((off_t)(lbn + 1) * biosize > nsize) 108753a007fSKonstantin Belousov bcount = nsize - (off_t)lbn * biosize; 109197a4f29SKonstantin Belousov *sz = bcount; 110197a4f29SKonstantin Belousov return (0); 111753a007fSKonstantin Belousov } 112753a007fSKonstantin Belousov 1139ec7b004SRick Macklem int 1149ec7b004SRick Macklem ncl_getpages(struct vop_getpages_args *ap) 1159ec7b004SRick Macklem { 1169ec7b004SRick Macklem int i, error, nextoff, size, toff, count, npages; 1179ec7b004SRick Macklem struct uio uio; 1189ec7b004SRick Macklem struct iovec iov; 1199ec7b004SRick Macklem vm_offset_t kva; 1209ec7b004SRick Macklem struct buf *bp; 1219ec7b004SRick Macklem struct vnode *vp; 1229ec7b004SRick Macklem struct thread *td; 1239ec7b004SRick Macklem struct ucred *cred; 1249ec7b004SRick Macklem struct nfsmount *nmp; 1259ec7b004SRick Macklem vm_object_t object; 1269ec7b004SRick Macklem vm_page_t *pages; 1279ec7b004SRick Macklem struct nfsnode *np; 1289ec7b004SRick Macklem 1299ec7b004SRick Macklem vp = ap->a_vp; 1309ec7b004SRick Macklem np = VTONFS(vp); 131fc2c3afeSKonstantin Belousov td = curthread; 132fc2c3afeSKonstantin Belousov cred = curthread->td_ucred; 1339ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 1349ec7b004SRick Macklem pages = ap->a_m; 135f17f88d3SGleb Smirnoff npages = ap->a_count; 1369ec7b004SRick Macklem 1379ec7b004SRick Macklem if ((object = vp->v_object) == NULL) { 138ad600ac8SKonstantin Belousov printf("ncl_getpages: called with non-merged cache vnode\n"); 13957a7e732SAlan Cox return (VM_PAGER_ERROR); 1409ec7b004SRick Macklem } 1419ec7b004SRick Macklem 1429ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 1435d85e12fSRick Macklem NFSLOCKNODE(np); 1449ec7b004SRick Macklem if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 1455d85e12fSRick Macklem NFSUNLOCKNODE(np); 146ad600ac8SKonstantin Belousov printf("ncl_getpages: called on non-cacheable vnode\n"); 14757a7e732SAlan Cox return (VM_PAGER_ERROR); 1489ec7b004SRick Macklem } else 1495d85e12fSRick Macklem NFSUNLOCKNODE(np); 1509ec7b004SRick Macklem } 1519ec7b004SRick Macklem 1529ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 1539ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 1549ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 1559ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1569ec7b004SRick Macklem /* We'll never get here for v4, because we always have fsinfo */ 1579ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 1589ec7b004SRick Macklem } else 1599ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1609ec7b004SRick Macklem 161753a007fSKonstantin Belousov if (use_buf_pager) 162753a007fSKonstantin Belousov return (vfs_bio_getpages(vp, pages, npages, ap->a_rbehind, 163753a007fSKonstantin Belousov ap->a_rahead, ncl_gbp_getblkno, ncl_gbp_getblksz)); 164753a007fSKonstantin Belousov 1659ec7b004SRick Macklem /* 1669ec7b004SRick Macklem * If the requested page is partially valid, just return it and 1679ec7b004SRick Macklem * allow the pager to zero-out the blanks. Partially valid pages 1689ec7b004SRick Macklem * can only occur at the file EOF. 169b0cd2017SGleb Smirnoff * 170b0cd2017SGleb Smirnoff * XXXGL: is that true for NFS, where short read can occur??? 1719ec7b004SRick Macklem */ 172b0cd2017SGleb Smirnoff VM_OBJECT_WLOCK(object); 1730012f373SJeff Roberson if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0) 174f17f88d3SGleb Smirnoff goto out; 175b0cd2017SGleb Smirnoff VM_OBJECT_WUNLOCK(object); 1769ec7b004SRick Macklem 1779ec7b004SRick Macklem /* 1789ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 179a96c9b30SPedro F. Giffuni * convenient and fast. 1809ec7b004SRick Macklem */ 181756a5412SGleb Smirnoff bp = uma_zalloc(ncl_pbuf_zone, M_WAITOK); 1829ec7b004SRick Macklem 1839ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 1849ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 18583c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 18683c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, npages); 1879ec7b004SRick Macklem 188f17f88d3SGleb Smirnoff count = npages << PAGE_SHIFT; 1899ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 1909ec7b004SRick Macklem iov.iov_len = count; 1919ec7b004SRick Macklem uio.uio_iov = &iov; 1929ec7b004SRick Macklem uio.uio_iovcnt = 1; 1939ec7b004SRick Macklem uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 1949ec7b004SRick Macklem uio.uio_resid = count; 1959ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 1969ec7b004SRick Macklem uio.uio_rw = UIO_READ; 1979ec7b004SRick Macklem uio.uio_td = td; 1989ec7b004SRick Macklem 1999ec7b004SRick Macklem error = ncl_readrpc(vp, &uio, cred); 2009ec7b004SRick Macklem pmap_qremove(kva, npages); 2019ec7b004SRick Macklem 202756a5412SGleb Smirnoff uma_zfree(ncl_pbuf_zone, bp); 2039ec7b004SRick Macklem 2049ec7b004SRick Macklem if (error && (uio.uio_resid == count)) { 205ad600ac8SKonstantin Belousov printf("ncl_getpages: error %d\n", error); 20657a7e732SAlan Cox return (VM_PAGER_ERROR); 2079ec7b004SRick Macklem } 2089ec7b004SRick Macklem 2099ec7b004SRick Macklem /* 2109ec7b004SRick Macklem * Calculate the number of bytes read and validate only that number 2119ec7b004SRick Macklem * of bytes. Note that due to pending writes, size may be 0. This 2129ec7b004SRick Macklem * does not mean that the remaining data is invalid! 2139ec7b004SRick Macklem */ 2149ec7b004SRick Macklem 2159ec7b004SRick Macklem size = count - uio.uio_resid; 21689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 2179ec7b004SRick Macklem for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 2189ec7b004SRick Macklem vm_page_t m; 2199ec7b004SRick Macklem nextoff = toff + PAGE_SIZE; 2209ec7b004SRick Macklem m = pages[i]; 2219ec7b004SRick Macklem 2229ec7b004SRick Macklem if (nextoff <= size) { 2239ec7b004SRick Macklem /* 2249ec7b004SRick Macklem * Read operation filled an entire page 2259ec7b004SRick Macklem */ 2260012f373SJeff Roberson vm_page_valid(m); 227a770e183SRick Macklem KASSERT(m->dirty == 0, 228a770e183SRick Macklem ("nfs_getpages: page %p is dirty", m)); 2299ec7b004SRick Macklem } else if (size > toff) { 2309ec7b004SRick Macklem /* 2319ec7b004SRick Macklem * Read operation filled a partial page. 2329ec7b004SRick Macklem */ 2330012f373SJeff Roberson vm_page_invalid(m); 234dc874f98SKonstantin Belousov vm_page_set_valid_range(m, 0, size - toff); 2353933ec4dSAlan Cox KASSERT(m->dirty == 0, 23672d1bbbaSRick Macklem ("nfs_getpages: page %p is dirty", m)); 2379ec7b004SRick Macklem } else { 2389ec7b004SRick Macklem /* 239b6c00483SKonstantin Belousov * Read operation was short. If no error 240a96c9b30SPedro F. Giffuni * occurred we may have hit a zero-fill 241b6c00483SKonstantin Belousov * section. We leave valid set to 0, and page 242b6c00483SKonstantin Belousov * is freed by vm_page_readahead_finish() if 243b6c00483SKonstantin Belousov * its index is not equal to requested, or 244b6c00483SKonstantin Belousov * page is zeroed and set valid by 245b6c00483SKonstantin Belousov * vm_pager_get_pages() for requested page. 2469ec7b004SRick Macklem */ 2479ec7b004SRick Macklem ; 2489ec7b004SRick Macklem } 24903679e23SAlan Cox } 250f17f88d3SGleb Smirnoff out: 25189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 252f17f88d3SGleb Smirnoff if (ap->a_rbehind) 253f17f88d3SGleb Smirnoff *ap->a_rbehind = 0; 254f17f88d3SGleb Smirnoff if (ap->a_rahead) 255f17f88d3SGleb Smirnoff *ap->a_rahead = 0; 256f17f88d3SGleb Smirnoff return (VM_PAGER_OK); 2579ec7b004SRick Macklem } 2589ec7b004SRick Macklem 2599ec7b004SRick Macklem /* 2609ec7b004SRick Macklem * Vnode op for VM putpages. 2619ec7b004SRick Macklem */ 2629ec7b004SRick Macklem int 2639ec7b004SRick Macklem ncl_putpages(struct vop_putpages_args *ap) 2649ec7b004SRick Macklem { 2659ec7b004SRick Macklem struct uio uio; 2669ec7b004SRick Macklem struct iovec iov; 267ea525259SKonstantin Belousov int i, error, npages, count; 2689ec7b004SRick Macklem off_t offset; 2699ec7b004SRick Macklem int *rtvals; 2709ec7b004SRick Macklem struct vnode *vp; 2719ec7b004SRick Macklem struct thread *td; 2729ec7b004SRick Macklem struct ucred *cred; 2739ec7b004SRick Macklem struct nfsmount *nmp; 2749ec7b004SRick Macklem struct nfsnode *np; 2759ec7b004SRick Macklem vm_page_t *pages; 2769ec7b004SRick Macklem 2779ec7b004SRick Macklem vp = ap->a_vp; 2789ec7b004SRick Macklem np = VTONFS(vp); 2799ec7b004SRick Macklem td = curthread; /* XXX */ 2807af1242aSRick Macklem /* Set the cred to n_writecred for the write rpcs. */ 2817af1242aSRick Macklem if (np->n_writecred != NULL) 2827af1242aSRick Macklem cred = crhold(np->n_writecred); 2837af1242aSRick Macklem else 2847af1242aSRick Macklem cred = crhold(curthread->td_ucred); /* XXX */ 2859ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 2869ec7b004SRick Macklem pages = ap->a_m; 2879ec7b004SRick Macklem count = ap->a_count; 2889ec7b004SRick Macklem rtvals = ap->a_rtvals; 2899ec7b004SRick Macklem npages = btoc(count); 2909ec7b004SRick Macklem offset = IDX_TO_OFF(pages[0]->pindex); 2919ec7b004SRick Macklem 2929ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 2939ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 2949ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 2959ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 2969ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 2979ec7b004SRick Macklem } else 2989ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 2999ec7b004SRick Macklem 3005d85e12fSRick Macklem NFSLOCKNODE(np); 3019ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 3029ec7b004SRick Macklem (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 3035d85e12fSRick Macklem NFSUNLOCKNODE(np); 304ad600ac8SKonstantin Belousov printf("ncl_putpages: called on noncache-able vnode\n"); 3055d85e12fSRick Macklem NFSLOCKNODE(np); 3069ec7b004SRick Macklem } 3079ec7b004SRick Macklem /* 3089ec7b004SRick Macklem * When putting pages, do not extend file past EOF. 3099ec7b004SRick Macklem */ 3109ec7b004SRick Macklem if (offset + count > np->n_size) { 3119ec7b004SRick Macklem count = np->n_size - offset; 3129ec7b004SRick Macklem if (count < 0) 3139ec7b004SRick Macklem count = 0; 3149ec7b004SRick Macklem } 3155d85e12fSRick Macklem NFSUNLOCKNODE(np); 3169ec7b004SRick Macklem 317cc2c2622SKonstantin Belousov for (i = 0; i < npages; i++) 318cc2c2622SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 319cc2c2622SKonstantin Belousov 32083c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodeout); 32183c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsout, count); 3229ec7b004SRick Macklem 323ea525259SKonstantin Belousov iov.iov_base = unmapped_buf; 3249ec7b004SRick Macklem iov.iov_len = count; 3259ec7b004SRick Macklem uio.uio_iov = &iov; 3269ec7b004SRick Macklem uio.uio_iovcnt = 1; 3279ec7b004SRick Macklem uio.uio_offset = offset; 3289ec7b004SRick Macklem uio.uio_resid = count; 329ea525259SKonstantin Belousov uio.uio_segflg = UIO_NOCOPY; 3309ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 3319ec7b004SRick Macklem uio.uio_td = td; 3329ec7b004SRick Macklem 333ea525259SKonstantin Belousov error = VOP_WRITE(vp, &uio, vnode_pager_putpages_ioflags(ap->a_sync), 334ea525259SKonstantin Belousov cred); 3357af1242aSRick Macklem crfree(cred); 3369ec7b004SRick Macklem 337555b7bb4SKonstantin Belousov if (error == 0 || !nfs_keep_dirty_on_error) { 338555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid, 339555b7bb4SKonstantin Belousov np->n_size - offset, npages * PAGE_SIZE); 340555b7bb4SKonstantin Belousov } 341ea525259SKonstantin Belousov return (rtvals[0]); 3429ec7b004SRick Macklem } 3439ec7b004SRick Macklem 3449ec7b004SRick Macklem /* 3459ec7b004SRick Macklem * For nfs, cache consistency can only be maintained approximately. 3469ec7b004SRick Macklem * Although RFC1094 does not specify the criteria, the following is 3479ec7b004SRick Macklem * believed to be compatible with the reference port. 3489ec7b004SRick Macklem * For nfs: 3499ec7b004SRick Macklem * If the file's modify time on the server has changed since the 3509ec7b004SRick Macklem * last read rpc or you have written to the file, 3519ec7b004SRick Macklem * you may have lost data cache consistency with the 3529ec7b004SRick Macklem * server, so flush all of the file's data out of the cache. 3539ec7b004SRick Macklem * Then force a getattr rpc to ensure that you have up to date 3549ec7b004SRick Macklem * attributes. 3559ec7b004SRick Macklem * NB: This implies that cache data can be read when up to 3569ec7b004SRick Macklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current 3579ec7b004SRick Macklem * attributes this could be forced by setting n_attrstamp to 0 before 3589ec7b004SRick Macklem * the VOP_GETATTR() call. 3599ec7b004SRick Macklem */ 3609ec7b004SRick Macklem static inline int 3619ec7b004SRick Macklem nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 3629ec7b004SRick Macklem { 3639ec7b004SRick Macklem int error = 0; 3649ec7b004SRick Macklem struct vattr vattr; 3659ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 366e5cffdd3SKonstantin Belousov bool old_lock; 3679ec7b004SRick Macklem 3689ec7b004SRick Macklem /* 369*ff4480baSKonstantin Belousov * Ensure the exclusive access to the node before checking 370e5cffdd3SKonstantin Belousov * whether the cache is consistent. 3719ec7b004SRick Macklem */ 372e5cffdd3SKonstantin Belousov old_lock = ncl_excl_start(vp); 3735d85e12fSRick Macklem NFSLOCKNODE(np); 3749ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 3755d85e12fSRick Macklem NFSUNLOCKNODE(np); 3769ec7b004SRick Macklem if (vp->v_type != VREG) { 3779ec7b004SRick Macklem if (vp->v_type != VDIR) 3789ec7b004SRick Macklem panic("nfs: bioread, not dir"); 3799ec7b004SRick Macklem ncl_invaldir(vp); 380e5cffdd3SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1); 38148fe9263SKonstantin Belousov if (error != 0) 3829ec7b004SRick Macklem goto out; 3839ec7b004SRick Macklem } 3849ec7b004SRick Macklem np->n_attrstamp = 0; 3858f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3869ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 3879ec7b004SRick Macklem if (error) 3889ec7b004SRick Macklem goto out; 3895d85e12fSRick Macklem NFSLOCKNODE(np); 3909ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 3915d85e12fSRick Macklem NFSUNLOCKNODE(np); 3929ec7b004SRick Macklem } else { 3935d85e12fSRick Macklem NFSUNLOCKNODE(np); 3949ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 3959ec7b004SRick Macklem if (error) 396e5cffdd3SKonstantin Belousov goto out; 3975d85e12fSRick Macklem NFSLOCKNODE(np); 3989ec7b004SRick Macklem if ((np->n_flag & NSIZECHANGED) 3999ec7b004SRick Macklem || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 4005d85e12fSRick Macklem NFSUNLOCKNODE(np); 4019ec7b004SRick Macklem if (vp->v_type == VDIR) 4029ec7b004SRick Macklem ncl_invaldir(vp); 403e5cffdd3SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1); 40448fe9263SKonstantin Belousov if (error != 0) 4059ec7b004SRick Macklem goto out; 4065d85e12fSRick Macklem NFSLOCKNODE(np); 4079ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4089ec7b004SRick Macklem np->n_flag &= ~NSIZECHANGED; 4099ec7b004SRick Macklem } 4105d85e12fSRick Macklem NFSUNLOCKNODE(np); 4119ec7b004SRick Macklem } 4129ec7b004SRick Macklem out: 413e5cffdd3SKonstantin Belousov ncl_excl_finish(vp, old_lock); 41448fe9263SKonstantin Belousov return (error); 4159ec7b004SRick Macklem } 4169ec7b004SRick Macklem 417503f72a8SKonstantin Belousov static bool 418503f72a8SKonstantin Belousov ncl_bioread_dora(struct vnode *vp) 419503f72a8SKonstantin Belousov { 420503f72a8SKonstantin Belousov vm_object_t obj; 421503f72a8SKonstantin Belousov 422503f72a8SKonstantin Belousov obj = vp->v_object; 423503f72a8SKonstantin Belousov if (obj == NULL) 424503f72a8SKonstantin Belousov return (true); 425503f72a8SKonstantin Belousov return (!vm_object_mightbedirty(vp->v_object) && 426503f72a8SKonstantin Belousov vp->v_object->un_pager.vnp.writemappings == 0); 427503f72a8SKonstantin Belousov } 428503f72a8SKonstantin Belousov 4299ec7b004SRick Macklem /* 4309ec7b004SRick Macklem * Vnode op for read using bio 4319ec7b004SRick Macklem */ 4329ec7b004SRick Macklem int 4339ec7b004SRick Macklem ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 4349ec7b004SRick Macklem { 4359ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 4369ec7b004SRick Macklem struct buf *bp, *rabp; 4379ec7b004SRick Macklem struct thread *td; 4389ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 4399ec7b004SRick Macklem daddr_t lbn, rabn; 4400ff51c98SKonstantin Belousov int biosize, bcount, error, i, n, nra, on, save2, seqcount; 441b29b9bcbSRick Macklem off_t tmp_off; 4429ec7b004SRick Macklem 443b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 4449ec7b004SRick Macklem if (uio->uio_resid == 0) 4459ec7b004SRick Macklem return (0); 4469ec7b004SRick Macklem if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 4479ec7b004SRick Macklem return (EINVAL); 4489ec7b004SRick Macklem td = uio->uio_td; 4499ec7b004SRick Macklem 4509ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4519ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 4529ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 4539ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 4549ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 4559ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4569ec7b004SRick Macklem } 4579ec7b004SRick Macklem if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 4589ec7b004SRick Macklem (void) newnfs_iosize(nmp); 4599ec7b004SRick Macklem 460b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 4619ec7b004SRick Macklem if (vp->v_type != VDIR && 462b29b9bcbSRick Macklem (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 463b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4649ec7b004SRick Macklem return (EFBIG); 465b29b9bcbSRick Macklem } 466b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4679ec7b004SRick Macklem 4689ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 4699ec7b004SRick Macklem /* No caching/ no readaheads. Just read data into the user buffer */ 4709ec7b004SRick Macklem return ncl_readrpc(vp, uio, cred); 4719ec7b004SRick Macklem 4720ff51c98SKonstantin Belousov n = 0; 4730ff51c98SKonstantin Belousov on = 0; 4747f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 4759ec7b004SRick Macklem seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 4769ec7b004SRick Macklem 4779ec7b004SRick Macklem error = nfs_bioread_check_cons(vp, td, cred); 4789ec7b004SRick Macklem if (error) 4799ec7b004SRick Macklem return error; 4809ec7b004SRick Macklem 4810ff51c98SKonstantin Belousov save2 = curthread_pflags2_set(TDP2_SBPAGES); 4829ec7b004SRick Macklem do { 4839ec7b004SRick Macklem u_quad_t nsize; 4849ec7b004SRick Macklem 4855d85e12fSRick Macklem NFSLOCKNODE(np); 4869ec7b004SRick Macklem nsize = np->n_size; 4875d85e12fSRick Macklem NFSUNLOCKNODE(np); 4889ec7b004SRick Macklem 4899ec7b004SRick Macklem switch (vp->v_type) { 4909ec7b004SRick Macklem case VREG: 4911b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_reads); 4929ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 49396ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 4949ec7b004SRick Macklem 4959ec7b004SRick Macklem /* 49670dc6b2cSKonstantin Belousov * Start the read ahead(s), as required. Do not do 49770dc6b2cSKonstantin Belousov * read-ahead if there are writeable mappings, since 49870dc6b2cSKonstantin Belousov * unlocked read by nfsiod could obliterate changes 49970dc6b2cSKonstantin Belousov * done by userspace. 5009ec7b004SRick Macklem */ 501503f72a8SKonstantin Belousov if (nmp->nm_readahead > 0 && ncl_bioread_dora(vp)) { 5029ec7b004SRick Macklem for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 5039ec7b004SRick Macklem (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 5049ec7b004SRick Macklem rabn = lbn + 1 + nra; 5059ec7b004SRick Macklem if (incore(&vp->v_bufobj, rabn) == NULL) { 5069ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, rabn, biosize, td); 5079ec7b004SRick Macklem if (!rabp) { 5089ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5090ff51c98SKonstantin Belousov if (error == 0) 5100ff51c98SKonstantin Belousov error = EINTR; 5110ff51c98SKonstantin Belousov goto out; 5129ec7b004SRick Macklem } 5139ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 5149ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 5159ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 5169ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 5179ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 5189ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 5199ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 5209ec7b004SRick Macklem vfs_unbusy_pages(rabp); 5219ec7b004SRick Macklem brelse(rabp); 5229ec7b004SRick Macklem break; 5239ec7b004SRick Macklem } 5249ec7b004SRick Macklem } else { 5259ec7b004SRick Macklem brelse(rabp); 5269ec7b004SRick Macklem } 5279ec7b004SRick Macklem } 5289ec7b004SRick Macklem } 5299ec7b004SRick Macklem } 5309ec7b004SRick Macklem 5319ec7b004SRick Macklem /* Note that bcount is *not* DEV_BSIZE aligned. */ 5329ec7b004SRick Macklem bcount = biosize; 5339ec7b004SRick Macklem if ((off_t)lbn * biosize >= nsize) { 5349ec7b004SRick Macklem bcount = 0; 5359ec7b004SRick Macklem } else if ((off_t)(lbn + 1) * biosize > nsize) { 5369ec7b004SRick Macklem bcount = nsize - (off_t)lbn * biosize; 5379ec7b004SRick Macklem } 5389ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 5399ec7b004SRick Macklem 5409ec7b004SRick Macklem if (!bp) { 5419ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5420ff51c98SKonstantin Belousov if (error == 0) 5430ff51c98SKonstantin Belousov error = EINTR; 5440ff51c98SKonstantin Belousov goto out; 5459ec7b004SRick Macklem } 5469ec7b004SRick Macklem 5479ec7b004SRick Macklem /* 5489ec7b004SRick Macklem * If B_CACHE is not set, we must issue the read. If this 5499ec7b004SRick Macklem * fails, we return an error. 5509ec7b004SRick Macklem */ 5519ec7b004SRick Macklem 5529ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5539ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5549ec7b004SRick Macklem vfs_busy_pages(bp, 0); 55567c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5569ec7b004SRick Macklem if (error) { 5579ec7b004SRick Macklem brelse(bp); 5580ff51c98SKonstantin Belousov goto out; 5599ec7b004SRick Macklem } 5609ec7b004SRick Macklem } 5619ec7b004SRick Macklem 5629ec7b004SRick Macklem /* 5639ec7b004SRick Macklem * on is the offset into the current bp. Figure out how many 5649ec7b004SRick Macklem * bytes we can copy out of the bp. Note that bcount is 5659ec7b004SRick Macklem * NOT DEV_BSIZE aligned. 5669ec7b004SRick Macklem * 5679ec7b004SRick Macklem * Then figure out how many bytes we can copy into the uio. 5689ec7b004SRick Macklem */ 5699ec7b004SRick Macklem 5709ec7b004SRick Macklem n = 0; 5719ec7b004SRick Macklem if (on < bcount) 572526d0bd5SKonstantin Belousov n = MIN((unsigned)(bcount - on), uio->uio_resid); 5739ec7b004SRick Macklem break; 5749ec7b004SRick Macklem case VLNK: 5751b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks); 5769ec7b004SRick Macklem bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 5779ec7b004SRick Macklem if (!bp) { 5789ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5790ff51c98SKonstantin Belousov if (error == 0) 5800ff51c98SKonstantin Belousov error = EINTR; 5810ff51c98SKonstantin Belousov goto out; 5829ec7b004SRick Macklem } 5839ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5849ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5859ec7b004SRick Macklem vfs_busy_pages(bp, 0); 58667c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5879ec7b004SRick Macklem if (error) { 5889ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 5899ec7b004SRick Macklem brelse(bp); 5900ff51c98SKonstantin Belousov goto out; 5919ec7b004SRick Macklem } 5929ec7b004SRick Macklem } 593526d0bd5SKonstantin Belousov n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 5949ec7b004SRick Macklem on = 0; 5959ec7b004SRick Macklem break; 5969ec7b004SRick Macklem case VDIR: 5971b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs); 59815bed8c4SRick Macklem NFSLOCKNODE(np); 5999ec7b004SRick Macklem if (np->n_direofoffset 6009ec7b004SRick Macklem && uio->uio_offset >= np->n_direofoffset) { 60115bed8c4SRick Macklem NFSUNLOCKNODE(np); 6020ff51c98SKonstantin Belousov error = 0; 6030ff51c98SKonstantin Belousov goto out; 6049ec7b004SRick Macklem } 60515bed8c4SRick Macklem NFSUNLOCKNODE(np); 6069ec7b004SRick Macklem lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 6079ec7b004SRick Macklem on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 6089ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 6099ec7b004SRick Macklem if (!bp) { 6109ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6110ff51c98SKonstantin Belousov if (error == 0) 6120ff51c98SKonstantin Belousov error = EINTR; 6130ff51c98SKonstantin Belousov goto out; 6149ec7b004SRick Macklem } 6159ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6169ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6179ec7b004SRick Macklem vfs_busy_pages(bp, 0); 61867c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6199ec7b004SRick Macklem if (error) { 6209ec7b004SRick Macklem brelse(bp); 6219ec7b004SRick Macklem } 6229ec7b004SRick Macklem while (error == NFSERR_BAD_COOKIE) { 6239ec7b004SRick Macklem ncl_invaldir(vp); 6249ec7b004SRick Macklem error = ncl_vinvalbuf(vp, 0, td, 1); 62548fe9263SKonstantin Belousov 6269ec7b004SRick Macklem /* 6279ec7b004SRick Macklem * Yuck! The directory has been modified on the 6289ec7b004SRick Macklem * server. The only way to get the block is by 6299ec7b004SRick Macklem * reading from the beginning to get all the 6309ec7b004SRick Macklem * offset cookies. 6319ec7b004SRick Macklem * 6329ec7b004SRick Macklem * Leave the last bp intact unless there is an error. 6339ec7b004SRick Macklem * Loop back up to the while if the error is another 6349ec7b004SRick Macklem * NFSERR_BAD_COOKIE (double yuch!). 6359ec7b004SRick Macklem */ 6369ec7b004SRick Macklem for (i = 0; i <= lbn && !error; i++) { 63715bed8c4SRick Macklem NFSLOCKNODE(np); 6389ec7b004SRick Macklem if (np->n_direofoffset 6390ff51c98SKonstantin Belousov && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) { 64015bed8c4SRick Macklem NFSUNLOCKNODE(np); 6410ff51c98SKonstantin Belousov error = 0; 6420ff51c98SKonstantin Belousov goto out; 6430ff51c98SKonstantin Belousov } 64415bed8c4SRick Macklem NFSUNLOCKNODE(np); 6459ec7b004SRick Macklem bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 6469ec7b004SRick Macklem if (!bp) { 6479ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6480ff51c98SKonstantin Belousov if (error == 0) 6490ff51c98SKonstantin Belousov error = EINTR; 6500ff51c98SKonstantin Belousov goto out; 6519ec7b004SRick Macklem } 6529ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6539ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6549ec7b004SRick Macklem vfs_busy_pages(bp, 0); 65567c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6569ec7b004SRick Macklem /* 6579ec7b004SRick Macklem * no error + B_INVAL == directory EOF, 6589ec7b004SRick Macklem * use the block. 6599ec7b004SRick Macklem */ 6609ec7b004SRick Macklem if (error == 0 && (bp->b_flags & B_INVAL)) 6619ec7b004SRick Macklem break; 6629ec7b004SRick Macklem } 6639ec7b004SRick Macklem /* 6649ec7b004SRick Macklem * An error will throw away the block and the 6659ec7b004SRick Macklem * for loop will break out. If no error and this 6669ec7b004SRick Macklem * is not the block we want, we throw away the 6679ec7b004SRick Macklem * block and go for the next one via the for loop. 6689ec7b004SRick Macklem */ 6699ec7b004SRick Macklem if (error || i < lbn) 6709ec7b004SRick Macklem brelse(bp); 6719ec7b004SRick Macklem } 6729ec7b004SRick Macklem } 6739ec7b004SRick Macklem /* 6749ec7b004SRick Macklem * The above while is repeated if we hit another cookie 6759ec7b004SRick Macklem * error. If we hit an error and it wasn't a cookie error, 6769ec7b004SRick Macklem * we give up. 6779ec7b004SRick Macklem */ 6789ec7b004SRick Macklem if (error) 6790ff51c98SKonstantin Belousov goto out; 6809ec7b004SRick Macklem } 6819ec7b004SRick Macklem 6829ec7b004SRick Macklem /* 6839ec7b004SRick Macklem * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 6849ec7b004SRick Macklem * chopped for the EOF condition, we cannot tell how large 6859ec7b004SRick Macklem * NFS directories are going to be until we hit EOF. So 6869ec7b004SRick Macklem * an NFS directory buffer is *not* chopped to its EOF. Now, 6879ec7b004SRick Macklem * it just so happens that b_resid will effectively chop it 6889ec7b004SRick Macklem * to EOF. *BUT* this information is lost if the buffer goes 6899ec7b004SRick Macklem * away and is reconstituted into a B_CACHE state ( due to 6909ec7b004SRick Macklem * being VMIO ) later. So we keep track of the directory eof 6919ec7b004SRick Macklem * in np->n_direofoffset and chop it off as an extra step 6929ec7b004SRick Macklem * right here. 6939ec7b004SRick Macklem */ 694fbe96559SRick Macklem NFSLOCKNODE(np); 6959ec7b004SRick Macklem n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 6969ec7b004SRick Macklem if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 6979ec7b004SRick Macklem n = np->n_direofoffset - uio->uio_offset; 69815bed8c4SRick Macklem NFSUNLOCKNODE(np); 6999ec7b004SRick Macklem break; 7009ec7b004SRick Macklem default: 701ad600ac8SKonstantin Belousov printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 7029ec7b004SRick Macklem bp = NULL; 7039ec7b004SRick Macklem break; 70474b8d63dSPedro F. Giffuni } 7059ec7b004SRick Macklem 7069ec7b004SRick Macklem if (n > 0) { 707ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); 7089ec7b004SRick Macklem } 7099ec7b004SRick Macklem if (vp->v_type == VLNK) 7109ec7b004SRick Macklem n = 0; 7119ec7b004SRick Macklem if (bp != NULL) 7129ec7b004SRick Macklem brelse(bp); 7139ec7b004SRick Macklem } while (error == 0 && uio->uio_resid > 0 && n > 0); 7140ff51c98SKonstantin Belousov out: 7150ff51c98SKonstantin Belousov curthread_pflags2_restore(save2); 7160ff51c98SKonstantin Belousov if ((curthread->td_pflags2 & TDP2_SBPAGES) == 0) { 7170ff51c98SKonstantin Belousov NFSLOCKNODE(np); 7180ff51c98SKonstantin Belousov ncl_pager_setsize(vp, NULL); 7190ff51c98SKonstantin Belousov } 7209ec7b004SRick Macklem return (error); 7219ec7b004SRick Macklem } 7229ec7b004SRick Macklem 7239ec7b004SRick Macklem /* 7249ec7b004SRick Macklem * The NFS write path cannot handle iovecs with len > 1. So we need to 7259ec7b004SRick Macklem * break up iovecs accordingly (restricting them to wsize). 7269ec7b004SRick Macklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 7279ec7b004SRick Macklem * For the ASYNC case, 2 copies are needed. The first a copy from the 7289ec7b004SRick Macklem * user buffer to a staging buffer and then a second copy from the staging 7299ec7b004SRick Macklem * buffer to mbufs. This can be optimized by copying from the user buffer 7309ec7b004SRick Macklem * directly into mbufs and passing the chain down, but that requires a 7319ec7b004SRick Macklem * fair amount of re-working of the relevant codepaths (and can be done 7329ec7b004SRick Macklem * later). 7339ec7b004SRick Macklem */ 7349ec7b004SRick Macklem static int 735c45d934fSRick Macklem nfs_directio_write(struct vnode *vp, struct uio *uiop, struct ucred *cred, 736c45d934fSRick Macklem int ioflag) 7379ec7b004SRick Macklem { 73803a39a17SRick Macklem struct uio uio; 73903a39a17SRick Macklem struct iovec iov; 7409ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 7419ec7b004SRick Macklem struct thread *td = uiop->uio_td; 74203a39a17SRick Macklem int error, iomode, must_commit, size, wsize; 7439ec7b004SRick Macklem 74403a39a17SRick Macklem KASSERT((ioflag & IO_SYNC) != 0, ("nfs_directio_write: not sync")); 7459ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 7469ec7b004SRick Macklem wsize = nmp->nm_wsize; 7479ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 7489ec7b004SRick Macklem while (uiop->uio_resid > 0) { 749526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 750526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 7519ec7b004SRick Macklem iov.iov_base = uiop->uio_iov->iov_base; 7529ec7b004SRick Macklem iov.iov_len = size; 7539ec7b004SRick Macklem uio.uio_iov = &iov; 7549ec7b004SRick Macklem uio.uio_iovcnt = 1; 7559ec7b004SRick Macklem uio.uio_offset = uiop->uio_offset; 7569ec7b004SRick Macklem uio.uio_resid = size; 757c1970a7eSRick Macklem uio.uio_segflg = uiop->uio_segflg; 7589ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 7599ec7b004SRick Macklem uio.uio_td = td; 7609ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 761ead50c94SRick Macklem /* 762ead50c94SRick Macklem * When doing direct I/O we do not care if the 763ead50c94SRick Macklem * server's write verifier has changed, but we 764ead50c94SRick Macklem * do not want to update the verifier if it has 765ead50c94SRick Macklem * changed, since that hides the change from 766ead50c94SRick Macklem * writes being done through the buffer cache. 767ead50c94SRick Macklem * By passing must_commit in set to two, the code 768ead50c94SRick Macklem * in nfsrpc_writerpc() will not update the 769ead50c94SRick Macklem * verifier on the mount point. 770ead50c94SRick Macklem */ 771ead50c94SRick Macklem must_commit = 2; 7729ec7b004SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, 7735218d82cSRick Macklem &must_commit, 0, ioflag); 77403a39a17SRick Macklem KASSERT(must_commit == 2, 775ead50c94SRick Macklem ("ncl_directio_write: Updated write verifier")); 77603a39a17SRick Macklem if (error != 0) 7779ec7b004SRick Macklem return (error); 778ead50c94SRick Macklem if (iomode != NFSWRITE_FILESYNC) 779ead50c94SRick Macklem printf("nfs_directio_write: Broken server " 780ead50c94SRick Macklem "did not reply FILE_SYNC\n"); 7819ec7b004SRick Macklem uiop->uio_offset += size; 7829ec7b004SRick Macklem uiop->uio_resid -= size; 7839ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 7849ec7b004SRick Macklem uiop->uio_iovcnt--; 7859ec7b004SRick Macklem uiop->uio_iov++; 7869ec7b004SRick Macklem } else { 7879ec7b004SRick Macklem uiop->uio_iov->iov_base = 7889ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 7899ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 7909ec7b004SRick Macklem } 7919ec7b004SRick Macklem } 7929ec7b004SRick Macklem return (0); 7939ec7b004SRick Macklem } 7949ec7b004SRick Macklem 7959ec7b004SRick Macklem /* 7969ec7b004SRick Macklem * Vnode op for write using bio 7979ec7b004SRick Macklem */ 7989ec7b004SRick Macklem int 7999ec7b004SRick Macklem ncl_write(struct vop_write_args *ap) 8009ec7b004SRick Macklem { 8019ec7b004SRick Macklem int biosize; 8029ec7b004SRick Macklem struct uio *uio = ap->a_uio; 8039ec7b004SRick Macklem struct thread *td = uio->uio_td; 8049ec7b004SRick Macklem struct vnode *vp = ap->a_vp; 8059ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 8069ec7b004SRick Macklem struct ucred *cred = ap->a_cred; 8079ec7b004SRick Macklem int ioflag = ap->a_ioflag; 8089ec7b004SRick Macklem struct buf *bp; 8099ec7b004SRick Macklem struct vattr vattr; 8109ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 8119ec7b004SRick Macklem daddr_t lbn; 812cf766161SRick Macklem int bcount, noncontig_write, obcount; 813aa8c1f8dSKonstantin Belousov int bp_cached, n, on, error = 0, error1, save2, wouldcommit; 814bfb68a9eSKonstantin Belousov size_t orig_resid, local_resid; 815bfb68a9eSKonstantin Belousov off_t orig_size, tmp_off; 81650dcff08SRick Macklem struct timespec ts; 8179ec7b004SRick Macklem 818b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 819b38f7723SKonstantin Belousov KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 820b38f7723SKonstantin Belousov ("ncl_write proc")); 8219ec7b004SRick Macklem if (vp->v_type != VREG) 8229ec7b004SRick Macklem return (EIO); 8235d85e12fSRick Macklem NFSLOCKNODE(np); 8249ec7b004SRick Macklem if (np->n_flag & NWRITEERR) { 8259ec7b004SRick Macklem np->n_flag &= ~NWRITEERR; 8265d85e12fSRick Macklem NFSUNLOCKNODE(np); 8279ec7b004SRick Macklem return (np->n_error); 8289ec7b004SRick Macklem } else 8295d85e12fSRick Macklem NFSUNLOCKNODE(np); 8309ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 8319ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 8329ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 8339ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 8349ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 8359ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 8369ec7b004SRick Macklem } 8379ec7b004SRick Macklem if (nmp->nm_wsize == 0) 8389ec7b004SRick Macklem (void) newnfs_iosize(nmp); 8399ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 8409ec7b004SRick Macklem 8419ec7b004SRick Macklem /* 8429ec7b004SRick Macklem * Synchronously flush pending buffers if we are in synchronous 8439ec7b004SRick Macklem * mode or if we are appending. 8449ec7b004SRick Macklem */ 845e4df1036SRick Macklem if ((ioflag & IO_APPEND) || ((ioflag & IO_SYNC) && (np->n_flag & 846e4df1036SRick Macklem NMODIFIED))) { 8479ec7b004SRick Macklem /* 848e4df1036SRick Macklem * For the case where IO_APPEND is being done using a 849e4df1036SRick Macklem * direct output (to the NFS server) RPC and 850e4df1036SRick Macklem * newnfs_directio_enable is 0, all buffer cache buffers, 851e4df1036SRick Macklem * including ones not modified, must be invalidated. 852e4df1036SRick Macklem * This ensures that stale data is not read out of the 853e4df1036SRick Macklem * buffer cache. The call also invalidates all mapped 854e4df1036SRick Macklem * pages and, since the exclusive lock is held on the vnode, 855e4df1036SRick Macklem * new pages cannot be faulted in. 856e4df1036SRick Macklem * 857e4df1036SRick Macklem * For the case where newnfs_directio_enable is set 858e4df1036SRick Macklem * (which is not the default), it is not obvious that 859e4df1036SRick Macklem * stale data should be left in the buffer cache, but 860e4df1036SRick Macklem * the code has been this way for over a decade without 861e4df1036SRick Macklem * complaints. Note that, unlike doing IO_APPEND via 862e4df1036SRick Macklem * a direct write RPC when newnfs_directio_enable is not set, 863e4df1036SRick Macklem * when newnfs_directio_enable is set, reading is done via 864e4df1036SRick Macklem * direct to NFS server RPCs as well. 8659ec7b004SRick Macklem */ 8669ec7b004SRick Macklem np->n_attrstamp = 0; 8678f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 86848fe9263SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag & 86948fe9263SKonstantin Belousov IO_VMIO) != 0 ? V_VMIO : 0), td, 1); 87048fe9263SKonstantin Belousov if (error != 0) 8719ec7b004SRick Macklem return (error); 8729ec7b004SRick Macklem } 8739ec7b004SRick Macklem 874bfb68a9eSKonstantin Belousov orig_resid = uio->uio_resid; 8755d85e12fSRick Macklem NFSLOCKNODE(np); 876bfb68a9eSKonstantin Belousov orig_size = np->n_size; 8775d85e12fSRick Macklem NFSUNLOCKNODE(np); 878bfb68a9eSKonstantin Belousov 8799ec7b004SRick Macklem /* 8809ec7b004SRick Macklem * If IO_APPEND then load uio_offset. We restart here if we cannot 8819ec7b004SRick Macklem * get the append lock. 8829ec7b004SRick Macklem */ 8839ec7b004SRick Macklem if (ioflag & IO_APPEND) { 8845218d82cSRick Macklem /* 8855218d82cSRick Macklem * For NFSv4, the AppendWrite will Verify the size against 8865218d82cSRick Macklem * the file's size on the server. If not the same, the 8875218d82cSRick Macklem * write will then be retried, using the file size returned 8885218d82cSRick Macklem * by the AppendWrite. However, for NFSv2 and NFSv3, the 8895218d82cSRick Macklem * size must be acquired here via a Getattr RPC. 8905218d82cSRick Macklem * The AppendWrite is not done for a pNFS mount. 8915218d82cSRick Macklem */ 8925218d82cSRick Macklem if (!NFSHASNFSV4(nmp) || NFSHASPNFS(nmp)) { 8939ec7b004SRick Macklem np->n_attrstamp = 0; 8948f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 8959ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 8969ec7b004SRick Macklem if (error) 8979ec7b004SRick Macklem return (error); 8985218d82cSRick Macklem } 8995d85e12fSRick Macklem NFSLOCKNODE(np); 9009ec7b004SRick Macklem uio->uio_offset = np->n_size; 9015d85e12fSRick Macklem NFSUNLOCKNODE(np); 9029ec7b004SRick Macklem } 9039ec7b004SRick Macklem 9049ec7b004SRick Macklem if (uio->uio_offset < 0) 9059ec7b004SRick Macklem return (EINVAL); 906b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 90724e2bcc0SRick Macklem if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 9089ec7b004SRick Macklem return (EFBIG); 9099ec7b004SRick Macklem if (uio->uio_resid == 0) 9109ec7b004SRick Macklem return (0); 9119ec7b004SRick Macklem 912b70042adSRick Macklem /* 913e4df1036SRick Macklem * Do IO_APPEND writing via a synchronous direct write. 914e4df1036SRick Macklem * This can result in a significant performance improvement. 915b70042adSRick Macklem */ 916e4df1036SRick Macklem if ((newnfs_directio_enable && (ioflag & IO_DIRECT)) || 917e4df1036SRick Macklem (ioflag & IO_APPEND)) { 918e4df1036SRick Macklem /* 919e4df1036SRick Macklem * Direct writes to the server must be done NFSWRITE_FILESYNC, 920e4df1036SRick Macklem * because the write data is not cached and, therefore, the 921e4df1036SRick Macklem * write cannot be redone after a server reboot. 922e4df1036SRick Macklem * Set IO_SYNC to make this happen. 923e4df1036SRick Macklem */ 924867c27c2SRick Macklem ioflag |= IO_SYNC; 925e4df1036SRick Macklem return (nfs_directio_write(vp, uio, cred, ioflag)); 926867c27c2SRick Macklem } 9279ec7b004SRick Macklem 9289ec7b004SRick Macklem /* 9299ec7b004SRick Macklem * Maybe this should be above the vnode op call, but so long as 9309ec7b004SRick Macklem * file servers have no limits, i don't think it matters 9319ec7b004SRick Macklem */ 932cc65a412SKonstantin Belousov error = vn_rlimit_fsize(vp, uio, td); 933cc65a412SKonstantin Belousov if (error != 0) 934cc65a412SKonstantin Belousov return (error); 9359ec7b004SRick Macklem 936aa8c1f8dSKonstantin Belousov save2 = curthread_pflags2_set(TDP2_SBPAGES); 9377f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 9389ec7b004SRick Macklem /* 9399ec7b004SRick Macklem * Find all of this file's B_NEEDCOMMIT buffers. If our writes 9409ec7b004SRick Macklem * would exceed the local maximum per-file write commit size when 9419ec7b004SRick Macklem * combined with those, we must decide whether to flush, 9429ec7b004SRick Macklem * go synchronous, or return error. We don't bother checking 9439ec7b004SRick Macklem * IO_UNIT -- we just make all writes atomic anyway, as there's 9449ec7b004SRick Macklem * no point optimizing for something that really won't ever happen. 9459ec7b004SRick Macklem */ 946a87627b2SAlexander Motin wouldcommit = 0; 9479ec7b004SRick Macklem if (!(ioflag & IO_SYNC)) { 9489ec7b004SRick Macklem int nflag; 9499ec7b004SRick Macklem 9505d85e12fSRick Macklem NFSLOCKNODE(np); 9519ec7b004SRick Macklem nflag = np->n_flag; 9525d85e12fSRick Macklem NFSUNLOCKNODE(np); 953a87627b2SAlexander Motin if (nflag & NMODIFIED) { 9549ec7b004SRick Macklem BO_LOCK(&vp->v_bufobj); 9559ec7b004SRick Macklem if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 9569ec7b004SRick Macklem TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 9579ec7b004SRick Macklem b_bobufs) { 9589ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) 9599ec7b004SRick Macklem wouldcommit += bp->b_bcount; 9609ec7b004SRick Macklem } 9619ec7b004SRick Macklem } 9629ec7b004SRick Macklem BO_UNLOCK(&vp->v_bufobj); 9639ec7b004SRick Macklem } 9649ec7b004SRick Macklem } 9659ec7b004SRick Macklem 9669ec7b004SRick Macklem do { 967a87627b2SAlexander Motin if (!(ioflag & IO_SYNC)) { 968a87627b2SAlexander Motin wouldcommit += biosize; 969a87627b2SAlexander Motin if (wouldcommit > nmp->nm_wcommitsize) { 970a87627b2SAlexander Motin np->n_attrstamp = 0; 971a87627b2SAlexander Motin KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 97248fe9263SKonstantin Belousov error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag & 97348fe9263SKonstantin Belousov IO_VMIO) != 0 ? V_VMIO : 0), td, 1); 97448fe9263SKonstantin Belousov if (error != 0) 975aa8c1f8dSKonstantin Belousov goto out; 976a87627b2SAlexander Motin wouldcommit = biosize; 977a87627b2SAlexander Motin } 978a87627b2SAlexander Motin } 979a87627b2SAlexander Motin 9801b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.biocache_writes); 9819ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 98296ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 983526d0bd5SKonstantin Belousov n = MIN((unsigned)(biosize - on), uio->uio_resid); 9849ec7b004SRick Macklem again: 9859ec7b004SRick Macklem /* 9869ec7b004SRick Macklem * Handle direct append and file extension cases, calculate 9879ec7b004SRick Macklem * unaligned buffer size. 9889ec7b004SRick Macklem */ 9895d85e12fSRick Macklem NFSLOCKNODE(np); 990cf766161SRick Macklem if ((np->n_flag & NHASBEENLOCKED) == 0 && 991cf766161SRick Macklem (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0) 992cf766161SRick Macklem noncontig_write = 1; 993cf766161SRick Macklem else 994cf766161SRick Macklem noncontig_write = 0; 995cf766161SRick Macklem if ((uio->uio_offset == np->n_size || 996cf766161SRick Macklem (noncontig_write != 0 && 997cf766161SRick Macklem lbn == (np->n_size / biosize) && 998cf766161SRick Macklem uio->uio_offset + n > np->n_size)) && n) { 9995d85e12fSRick Macklem NFSUNLOCKNODE(np); 10009ec7b004SRick Macklem /* 10019ec7b004SRick Macklem * Get the buffer (in its pre-append state to maintain 10029ec7b004SRick Macklem * B_CACHE if it was previously set). Resize the 10039ec7b004SRick Macklem * nfsnode after we have locked the buffer to prevent 10049ec7b004SRick Macklem * readers from reading garbage. 10059ec7b004SRick Macklem */ 1006cf766161SRick Macklem obcount = np->n_size - (lbn * biosize); 1007cf766161SRick Macklem bp = nfs_getcacheblk(vp, lbn, obcount, td); 10089ec7b004SRick Macklem 10099ec7b004SRick Macklem if (bp != NULL) { 10109ec7b004SRick Macklem long save; 10119ec7b004SRick Macklem 10125d85e12fSRick Macklem NFSLOCKNODE(np); 10139ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10149ec7b004SRick Macklem np->n_flag |= NMODIFIED; 1015aa8c1f8dSKonstantin Belousov np->n_flag &= ~NVNSETSZSKIP; 10169ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10175d85e12fSRick Macklem NFSUNLOCKNODE(np); 10189ec7b004SRick Macklem 10199ec7b004SRick Macklem save = bp->b_flags & B_CACHE; 1020cf766161SRick Macklem bcount = on + n; 10219ec7b004SRick Macklem allocbuf(bp, bcount); 10229ec7b004SRick Macklem bp->b_flags |= save; 1023cf766161SRick Macklem if (noncontig_write != 0 && on > obcount) 1024cf766161SRick Macklem vfs_bio_bzero_buf(bp, obcount, on - 1025cf766161SRick Macklem obcount); 10269ec7b004SRick Macklem } 10279ec7b004SRick Macklem } else { 10289ec7b004SRick Macklem /* 10299ec7b004SRick Macklem * Obtain the locked cache block first, and then 10309ec7b004SRick Macklem * adjust the file's size as appropriate. 10319ec7b004SRick Macklem */ 10329ec7b004SRick Macklem bcount = on + n; 10339ec7b004SRick Macklem if ((off_t)lbn * biosize + bcount < np->n_size) { 10349ec7b004SRick Macklem if ((off_t)(lbn + 1) * biosize < np->n_size) 10359ec7b004SRick Macklem bcount = biosize; 10369ec7b004SRick Macklem else 10379ec7b004SRick Macklem bcount = np->n_size - (off_t)lbn * biosize; 10389ec7b004SRick Macklem } 10395d85e12fSRick Macklem NFSUNLOCKNODE(np); 10409ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 10415d85e12fSRick Macklem NFSLOCKNODE(np); 10429ec7b004SRick Macklem if (uio->uio_offset + n > np->n_size) { 10439ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10449ec7b004SRick Macklem np->n_flag |= NMODIFIED; 1045aa8c1f8dSKonstantin Belousov np->n_flag &= ~NVNSETSZSKIP; 10469ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10479ec7b004SRick Macklem } 10485d85e12fSRick Macklem NFSUNLOCKNODE(np); 10499ec7b004SRick Macklem } 10509ec7b004SRick Macklem 10519ec7b004SRick Macklem if (!bp) { 10529ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 10539ec7b004SRick Macklem if (!error) 10549ec7b004SRick Macklem error = EINTR; 10559ec7b004SRick Macklem break; 10569ec7b004SRick Macklem } 10579ec7b004SRick Macklem 10589ec7b004SRick Macklem /* 10599ec7b004SRick Macklem * Issue a READ if B_CACHE is not set. In special-append 10609ec7b004SRick Macklem * mode, B_CACHE is based on the buffer prior to the write 10619ec7b004SRick Macklem * op and is typically set, avoiding the read. If a read 10629ec7b004SRick Macklem * is required in special append mode, the server will 10639ec7b004SRick Macklem * probably send us a short-read since we extended the file 10649ec7b004SRick Macklem * on our end, resulting in b_resid == 0 and, thusly, 10659ec7b004SRick Macklem * B_CACHE getting set. 10669ec7b004SRick Macklem * 10679ec7b004SRick Macklem * We can also avoid issuing the read if the write covers 10689ec7b004SRick Macklem * the entire buffer. We have to make sure the buffer state 10699ec7b004SRick Macklem * is reasonable in this case since we will not be initiating 10709ec7b004SRick Macklem * I/O. See the comments in kern/vfs_bio.c's getblk() for 10719ec7b004SRick Macklem * more information. 10729ec7b004SRick Macklem * 10739ec7b004SRick Macklem * B_CACHE may also be set due to the buffer being cached 10749ec7b004SRick Macklem * normally. 10759ec7b004SRick Macklem */ 10769ec7b004SRick Macklem 1077bfb68a9eSKonstantin Belousov bp_cached = 1; 10789ec7b004SRick Macklem if (on == 0 && n == bcount) { 1079bfb68a9eSKonstantin Belousov if ((bp->b_flags & B_CACHE) == 0) 1080bfb68a9eSKonstantin Belousov bp_cached = 0; 10819ec7b004SRick Macklem bp->b_flags |= B_CACHE; 10829ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 10839ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 10849ec7b004SRick Macklem } 10859ec7b004SRick Macklem 10869ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 10879ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 10889ec7b004SRick Macklem vfs_busy_pages(bp, 0); 108967c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 10909ec7b004SRick Macklem if (error) { 10919ec7b004SRick Macklem brelse(bp); 10929ec7b004SRick Macklem break; 10939ec7b004SRick Macklem } 10949ec7b004SRick Macklem } 10959ec7b004SRick Macklem if (bp->b_wcred == NOCRED) 10969ec7b004SRick Macklem bp->b_wcred = crhold(cred); 10975d85e12fSRick Macklem NFSLOCKNODE(np); 10989ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10995d85e12fSRick Macklem NFSUNLOCKNODE(np); 11009ec7b004SRick Macklem 11019ec7b004SRick Macklem /* 11029ec7b004SRick Macklem * If dirtyend exceeds file size, chop it down. This should 11039ec7b004SRick Macklem * not normally occur but there is an append race where it 11049ec7b004SRick Macklem * might occur XXX, so we log it. 11059ec7b004SRick Macklem * 11069ec7b004SRick Macklem * If the chopping creates a reverse-indexed or degenerate 11079ec7b004SRick Macklem * situation with dirtyoff/end, we 0 both of them. 11089ec7b004SRick Macklem */ 11099ec7b004SRick Macklem 11109ec7b004SRick Macklem if (bp->b_dirtyend > bcount) { 1111ad600ac8SKonstantin Belousov printf("NFS append race @%lx:%d\n", 11129ec7b004SRick Macklem (long)bp->b_blkno * DEV_BSIZE, 11139ec7b004SRick Macklem bp->b_dirtyend - bcount); 11149ec7b004SRick Macklem bp->b_dirtyend = bcount; 11159ec7b004SRick Macklem } 11169ec7b004SRick Macklem 11179ec7b004SRick Macklem if (bp->b_dirtyoff >= bp->b_dirtyend) 11189ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 11199ec7b004SRick Macklem 11209ec7b004SRick Macklem /* 11219ec7b004SRick Macklem * If the new write will leave a contiguous dirty 11229ec7b004SRick Macklem * area, just update the b_dirtyoff and b_dirtyend, 11239ec7b004SRick Macklem * otherwise force a write rpc of the old dirty area. 11249ec7b004SRick Macklem * 1125cf766161SRick Macklem * If there has been a file lock applied to this file 1126cf766161SRick Macklem * or vfs.nfs.old_noncontig_writing is set, do the following: 11279ec7b004SRick Macklem * While it is possible to merge discontiguous writes due to 11289ec7b004SRick Macklem * our having a B_CACHE buffer ( and thus valid read data 11299ec7b004SRick Macklem * for the hole), we don't because it could lead to 11309ec7b004SRick Macklem * significant cache coherency problems with multiple clients, 11319ec7b004SRick Macklem * especially if locking is implemented later on. 11329ec7b004SRick Macklem * 1133cf766161SRick Macklem * If vfs.nfs.old_noncontig_writing is not set and there has 1134cf766161SRick Macklem * not been file locking done on this file: 1135cf766161SRick Macklem * Relax coherency a bit for the sake of performance and 1136cf766161SRick Macklem * expand the current dirty region to contain the new 1137cf766161SRick Macklem * write even if it means we mark some non-dirty data as 1138cf766161SRick Macklem * dirty. 11399ec7b004SRick Macklem */ 11409ec7b004SRick Macklem 1141cf766161SRick Macklem if (noncontig_write == 0 && bp->b_dirtyend > 0 && 11429ec7b004SRick Macklem (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 11439ec7b004SRick Macklem if (bwrite(bp) == EINTR) { 11449ec7b004SRick Macklem error = EINTR; 11459ec7b004SRick Macklem break; 11469ec7b004SRick Macklem } 11479ec7b004SRick Macklem goto again; 11489ec7b004SRick Macklem } 11499ec7b004SRick Macklem 1150bfb68a9eSKonstantin Belousov local_resid = uio->uio_resid; 1151ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); 11529ec7b004SRick Macklem 1153bfb68a9eSKonstantin Belousov if (error != 0 && !bp_cached) { 1154bfb68a9eSKonstantin Belousov /* 1155bfb68a9eSKonstantin Belousov * This block has no other content then what 1156bfb68a9eSKonstantin Belousov * possibly was written by the faulty uiomove. 1157bfb68a9eSKonstantin Belousov * Release it, forgetting the data pages, to 1158bfb68a9eSKonstantin Belousov * prevent the leak of uninitialized data to 1159bfb68a9eSKonstantin Belousov * usermode. 1160bfb68a9eSKonstantin Belousov */ 1161bfb68a9eSKonstantin Belousov bp->b_ioflags |= BIO_ERROR; 1162bfb68a9eSKonstantin Belousov brelse(bp); 1163bfb68a9eSKonstantin Belousov uio->uio_offset -= local_resid - uio->uio_resid; 1164bfb68a9eSKonstantin Belousov uio->uio_resid = local_resid; 1165bfb68a9eSKonstantin Belousov break; 1166bfb68a9eSKonstantin Belousov } 1167bfb68a9eSKonstantin Belousov 11689ec7b004SRick Macklem /* 11699ec7b004SRick Macklem * Since this block is being modified, it must be written 11709ec7b004SRick Macklem * again and not just committed. Since write clustering does 11719ec7b004SRick Macklem * not work for the stage 1 data write, only the stage 2 11729ec7b004SRick Macklem * commit rpc, we have to clear B_CLUSTEROK as well. 11739ec7b004SRick Macklem */ 11749ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 11759ec7b004SRick Macklem 1176bfb68a9eSKonstantin Belousov /* 1177bfb68a9eSKonstantin Belousov * Get the partial update on the progress made from 1178a96c9b30SPedro F. Giffuni * uiomove, if an error occurred. 1179bfb68a9eSKonstantin Belousov */ 1180bfb68a9eSKonstantin Belousov if (error != 0) 1181bfb68a9eSKonstantin Belousov n = local_resid - uio->uio_resid; 11829ec7b004SRick Macklem 11839ec7b004SRick Macklem /* 11849ec7b004SRick Macklem * Only update dirtyoff/dirtyend if not a degenerate 11859ec7b004SRick Macklem * condition. 11869ec7b004SRick Macklem */ 1187bfb68a9eSKonstantin Belousov if (n > 0) { 11889ec7b004SRick Macklem if (bp->b_dirtyend > 0) { 11899ec7b004SRick Macklem bp->b_dirtyoff = min(on, bp->b_dirtyoff); 11909ec7b004SRick Macklem bp->b_dirtyend = max((on + n), bp->b_dirtyend); 11919ec7b004SRick Macklem } else { 11929ec7b004SRick Macklem bp->b_dirtyoff = on; 11939ec7b004SRick Macklem bp->b_dirtyend = on + n; 11949ec7b004SRick Macklem } 11951f176894SAlan Cox vfs_bio_set_valid(bp, on, n); 11969ec7b004SRick Macklem } 11979ec7b004SRick Macklem 11989ec7b004SRick Macklem /* 11999ec7b004SRick Macklem * If IO_SYNC do bwrite(). 12009ec7b004SRick Macklem * 12019ec7b004SRick Macklem * IO_INVAL appears to be unused. The idea appears to be 12029ec7b004SRick Macklem * to turn off caching in this case. Very odd. XXX 12039ec7b004SRick Macklem */ 12049ec7b004SRick Macklem if ((ioflag & IO_SYNC)) { 12059ec7b004SRick Macklem if (ioflag & IO_INVAL) 12069ec7b004SRick Macklem bp->b_flags |= B_NOCACHE; 12079d232eecSKonstantin Belousov error1 = bwrite(bp); 12089d232eecSKonstantin Belousov if (error1 != 0) { 12099d232eecSKonstantin Belousov if (error == 0) 12109d232eecSKonstantin Belousov error = error1; 12119ec7b004SRick Macklem break; 12129d232eecSKonstantin Belousov } 1213b73cd4d3SKonstantin Belousov } else if ((n + on) == biosize || (ioflag & IO_ASYNC) != 0) { 12149ec7b004SRick Macklem bp->b_flags |= B_ASYNC; 1215656d2e83SKonstantin Belousov (void) bwrite(bp); 12169ec7b004SRick Macklem } else { 12179ec7b004SRick Macklem bdwrite(bp); 12189ec7b004SRick Macklem } 1219bfb68a9eSKonstantin Belousov 1220bfb68a9eSKonstantin Belousov if (error != 0) 1221bfb68a9eSKonstantin Belousov break; 12229ec7b004SRick Macklem } while (uio->uio_resid > 0 && n > 0); 12239ec7b004SRick Macklem 122450dcff08SRick Macklem if (error == 0) { 122550dcff08SRick Macklem nanouptime(&ts); 122650dcff08SRick Macklem NFSLOCKNODE(np); 122750dcff08SRick Macklem np->n_localmodtime = ts; 122850dcff08SRick Macklem NFSUNLOCKNODE(np); 122950dcff08SRick Macklem } else { 1230bfb68a9eSKonstantin Belousov if (ioflag & IO_UNIT) { 1231bfb68a9eSKonstantin Belousov VATTR_NULL(&vattr); 1232bfb68a9eSKonstantin Belousov vattr.va_size = orig_size; 1233bfb68a9eSKonstantin Belousov /* IO_SYNC is handled implicitely */ 1234bfb68a9eSKonstantin Belousov (void)VOP_SETATTR(vp, &vattr, cred); 1235bfb68a9eSKonstantin Belousov uio->uio_offset -= orig_resid - uio->uio_resid; 1236bfb68a9eSKonstantin Belousov uio->uio_resid = orig_resid; 1237bfb68a9eSKonstantin Belousov } 1238bfb68a9eSKonstantin Belousov } 1239bfb68a9eSKonstantin Belousov 1240aa8c1f8dSKonstantin Belousov out: 1241aa8c1f8dSKonstantin Belousov curthread_pflags2_restore(save2); 12429ec7b004SRick Macklem return (error); 12439ec7b004SRick Macklem } 12449ec7b004SRick Macklem 12459ec7b004SRick Macklem /* 12469ec7b004SRick Macklem * Get an nfs cache block. 12479ec7b004SRick Macklem * 12489ec7b004SRick Macklem * Allocate a new one if the block isn't currently in the cache 12499ec7b004SRick Macklem * and return the block marked busy. If the calling process is 12509ec7b004SRick Macklem * interrupted by a signal for an interruptible mount point, return 12519ec7b004SRick Macklem * NULL. 12529ec7b004SRick Macklem * 12539ec7b004SRick Macklem * The caller must carefully deal with the possible B_INVAL state of 12549ec7b004SRick Macklem * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 12559ec7b004SRick Macklem * indirectly), so synchronous reads can be issued without worrying about 12569ec7b004SRick Macklem * the B_INVAL state. We have to be a little more careful when dealing 12579ec7b004SRick Macklem * with writes (see comments in nfs_write()) when extending a file past 12589ec7b004SRick Macklem * its EOF. 12599ec7b004SRick Macklem */ 12609ec7b004SRick Macklem static struct buf * 12619ec7b004SRick Macklem nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 12629ec7b004SRick Macklem { 12639ec7b004SRick Macklem struct buf *bp; 12649ec7b004SRick Macklem struct mount *mp; 12659ec7b004SRick Macklem struct nfsmount *nmp; 12669ec7b004SRick Macklem 12679ec7b004SRick Macklem mp = vp->v_mount; 12689ec7b004SRick Macklem nmp = VFSTONFS(mp); 12699ec7b004SRick Macklem 12709ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) { 12719ec7b004SRick Macklem sigset_t oldset; 12729ec7b004SRick Macklem 12734a8e2176SRick Macklem newnfs_set_sigmask(td, &oldset); 12743b14c753SJohn Baldwin bp = getblk(vp, bn, size, PCATCH, 0, 0); 12754a8e2176SRick Macklem newnfs_restore_sigmask(td, &oldset); 12769ec7b004SRick Macklem while (bp == NULL) { 12779ec7b004SRick Macklem if (newnfs_sigintr(nmp, td)) 12789ec7b004SRick Macklem return (NULL); 12799ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 2 * hz, 0); 12809ec7b004SRick Macklem } 12819ec7b004SRick Macklem } else { 12829ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 0, 0); 12839ec7b004SRick Macklem } 12849ec7b004SRick Macklem 12857f763fc3SRick Macklem if (vp->v_type == VREG) 12867f763fc3SRick Macklem bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 12879ec7b004SRick Macklem return (bp); 12889ec7b004SRick Macklem } 12899ec7b004SRick Macklem 12909ec7b004SRick Macklem /* 12919ec7b004SRick Macklem * Flush and invalidate all dirty buffers. If another process is already 12929ec7b004SRick Macklem * doing the flush, just wait for completion. 12939ec7b004SRick Macklem */ 12949ec7b004SRick Macklem int 12959ec7b004SRick Macklem ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 12969ec7b004SRick Macklem { 12979ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 12989ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 12999ec7b004SRick Macklem int error = 0, slpflag, slptimeo; 1300e5cffdd3SKonstantin Belousov bool old_lock; 130150dcff08SRick Macklem struct timespec ts; 13029ec7b004SRick Macklem 13039ec7b004SRick Macklem ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 13049ec7b004SRick Macklem 13059ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_INT) == 0) 13069ec7b004SRick Macklem intrflg = 0; 130716f300faSRick Macklem if (NFSCL_FORCEDISM(nmp->nm_mountp)) 13089ec7b004SRick Macklem intrflg = 1; 13099ec7b004SRick Macklem if (intrflg) { 13103b14c753SJohn Baldwin slpflag = PCATCH; 13119ec7b004SRick Macklem slptimeo = 2 * hz; 13129ec7b004SRick Macklem } else { 13139ec7b004SRick Macklem slpflag = 0; 13149ec7b004SRick Macklem slptimeo = 0; 13159ec7b004SRick Macklem } 13169ec7b004SRick Macklem 1317e5cffdd3SKonstantin Belousov old_lock = ncl_excl_start(vp); 1318e5cffdd3SKonstantin Belousov if (old_lock) 1319e5cffdd3SKonstantin Belousov flags |= V_ALLOWCLEAN; 1320934a3099SRick Macklem 13219ec7b004SRick Macklem /* 13229ec7b004SRick Macklem * Now, flush as required. 13239ec7b004SRick Macklem */ 1324b068bb09SKonstantin Belousov if ((flags & (V_SAVE | V_VMIO)) == V_SAVE) { 1325b068bb09SKonstantin Belousov vnode_pager_clean_sync(vp); 1326b068bb09SKonstantin Belousov 13279ec7b004SRick Macklem /* 13289ec7b004SRick Macklem * If the page clean was interrupted, fail the invalidation. 13299ec7b004SRick Macklem * Not doing so, we run the risk of losing dirty pages in the 13309ec7b004SRick Macklem * vinvalbuf() call below. 13319ec7b004SRick Macklem */ 13329ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13339ec7b004SRick Macklem goto out; 13349ec7b004SRick Macklem } 13359ec7b004SRick Macklem 13369ec7b004SRick Macklem error = vinvalbuf(vp, flags, slpflag, 0); 13379ec7b004SRick Macklem while (error) { 13389ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13399ec7b004SRick Macklem goto out; 13409ec7b004SRick Macklem error = vinvalbuf(vp, flags, 0, slptimeo); 13419ec7b004SRick Macklem } 134256239558SRick Macklem if (NFSHASPNFS(nmp)) { 13431f60bfd8SRick Macklem nfscl_layoutcommit(vp, td); 134450dcff08SRick Macklem nanouptime(&ts); 134556239558SRick Macklem /* 134656239558SRick Macklem * Invalidate the attribute cache, since writes to a DS 134756239558SRick Macklem * won't update the size attribute. 134856239558SRick Macklem */ 13495d85e12fSRick Macklem NFSLOCKNODE(np); 135056239558SRick Macklem np->n_attrstamp = 0; 135150dcff08SRick Macklem } else { 135250dcff08SRick Macklem nanouptime(&ts); 13535d85e12fSRick Macklem NFSLOCKNODE(np); 135450dcff08SRick Macklem } 135503a39a17SRick Macklem if ((np->n_flag & NMODIFIED) != 0) { 135650dcff08SRick Macklem np->n_localmodtime = ts; 13579ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 135850dcff08SRick Macklem } 13595d85e12fSRick Macklem NFSUNLOCKNODE(np); 13609ec7b004SRick Macklem out: 1361e5cffdd3SKonstantin Belousov ncl_excl_finish(vp, old_lock); 13629ec7b004SRick Macklem return error; 13639ec7b004SRick Macklem } 13649ec7b004SRick Macklem 13659ec7b004SRick Macklem /* 13669ec7b004SRick Macklem * Initiate asynchronous I/O. Return an error if no nfsiods are available. 13679ec7b004SRick Macklem * This is mainly to avoid queueing async I/O requests when the nfsiods 13689ec7b004SRick Macklem * are all hung on a dead server. 13699ec7b004SRick Macklem * 13709ec7b004SRick Macklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 13719ec7b004SRick Macklem * is eventually dequeued by the async daemon, ncl_doio() *will*. 13729ec7b004SRick Macklem */ 13739ec7b004SRick Macklem int 13749ec7b004SRick Macklem ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 13759ec7b004SRick Macklem { 13769ec7b004SRick Macklem int iod; 13779ec7b004SRick Macklem int gotiod; 13789ec7b004SRick Macklem int slpflag = 0; 13799ec7b004SRick Macklem int slptimeo = 0; 13809ec7b004SRick Macklem int error, error2; 13819ec7b004SRick Macklem 13829ec7b004SRick Macklem /* 13839ec7b004SRick Macklem * Commits are usually short and sweet so lets save some cpu and 13849ec7b004SRick Macklem * leave the async daemons for more important rpc's (such as reads 13859ec7b004SRick Macklem * and writes). 1386175b3f31SRick Macklem * 1387175b3f31SRick Macklem * Readdirplus RPCs do vget()s to acquire the vnodes for entries 1388175b3f31SRick Macklem * in the directory in order to update attributes. This can deadlock 1389175b3f31SRick Macklem * with another thread that is waiting for async I/O to be done by 1390175b3f31SRick Macklem * an nfsiod thread while holding a lock on one of these vnodes. 1391175b3f31SRick Macklem * To avoid this deadlock, don't allow the async nfsiod threads to 1392175b3f31SRick Macklem * perform Readdirplus RPCs. 13939ec7b004SRick Macklem */ 1394b662b41eSRick Macklem NFSLOCKIOD(); 1395175b3f31SRick Macklem if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1396175b3f31SRick Macklem (nmp->nm_bufqiods > ncl_numasync / 2)) || 1397175b3f31SRick Macklem (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) { 1398b662b41eSRick Macklem NFSUNLOCKIOD(); 13999ec7b004SRick Macklem return(EIO); 14009ec7b004SRick Macklem } 14019ec7b004SRick Macklem again: 14029ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) 14033b14c753SJohn Baldwin slpflag = PCATCH; 14049ec7b004SRick Macklem gotiod = FALSE; 14059ec7b004SRick Macklem 14069ec7b004SRick Macklem /* 14079ec7b004SRick Macklem * Find a free iod to process this request. 14089ec7b004SRick Macklem */ 14099ec7b004SRick Macklem for (iod = 0; iod < ncl_numasync; iod++) 141080169e41SRick Macklem if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 14119ec7b004SRick Macklem gotiod = TRUE; 14129ec7b004SRick Macklem break; 14139ec7b004SRick Macklem } 14149ec7b004SRick Macklem 14159ec7b004SRick Macklem /* 14169ec7b004SRick Macklem * Try to create one if none are free. 14179ec7b004SRick Macklem */ 14187b8c319bSRick Macklem if (!gotiod) 14197b8c319bSRick Macklem ncl_nfsiodnew(); 14207b8c319bSRick Macklem else { 14219ec7b004SRick Macklem /* 14229ec7b004SRick Macklem * Found one, so wake it up and tell it which 14239ec7b004SRick Macklem * mount to process. 14249ec7b004SRick Macklem */ 14259ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 14269ec7b004SRick Macklem iod, nmp)); 142780169e41SRick Macklem ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 14289ec7b004SRick Macklem ncl_iodmount[iod] = nmp; 14299ec7b004SRick Macklem nmp->nm_bufqiods++; 14309ec7b004SRick Macklem wakeup(&ncl_iodwant[iod]); 14319ec7b004SRick Macklem } 14329ec7b004SRick Macklem 14339ec7b004SRick Macklem /* 14349ec7b004SRick Macklem * If none are free, we may already have an iod working on this mount 14359ec7b004SRick Macklem * point. If so, it will process our request. 14369ec7b004SRick Macklem */ 14379ec7b004SRick Macklem if (!gotiod) { 14389ec7b004SRick Macklem if (nmp->nm_bufqiods > 0) { 14399ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14409ec7b004SRick Macklem ("ncl_asyncio: %d iods are already processing mount %p\n", 14419ec7b004SRick Macklem nmp->nm_bufqiods, nmp)); 14429ec7b004SRick Macklem gotiod = TRUE; 14439ec7b004SRick Macklem } 14449ec7b004SRick Macklem } 14459ec7b004SRick Macklem 14469ec7b004SRick Macklem /* 14479ec7b004SRick Macklem * If we have an iod which can process the request, then queue 14489ec7b004SRick Macklem * the buffer. 14499ec7b004SRick Macklem */ 14509ec7b004SRick Macklem if (gotiod) { 14519ec7b004SRick Macklem /* 14529ec7b004SRick Macklem * Ensure that the queue never grows too large. We still want 14539ec7b004SRick Macklem * to asynchronize so we block rather then return EIO. 14549ec7b004SRick Macklem */ 14559ec7b004SRick Macklem while (nmp->nm_bufqlen >= 2*ncl_numasync) { 14569ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14579ec7b004SRick Macklem ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 14589ec7b004SRick Macklem nmp->nm_bufqwant = TRUE; 14594a8e2176SRick Macklem error = newnfs_msleep(td, &nmp->nm_bufq, 14604a8e2176SRick Macklem &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 14614a8e2176SRick Macklem slptimeo); 14629ec7b004SRick Macklem if (error) { 14639ec7b004SRick Macklem error2 = newnfs_sigintr(nmp, td); 14649ec7b004SRick Macklem if (error2) { 1465b662b41eSRick Macklem NFSUNLOCKIOD(); 14669ec7b004SRick Macklem return (error2); 14679ec7b004SRick Macklem } 14683b14c753SJohn Baldwin if (slpflag == PCATCH) { 14699ec7b004SRick Macklem slpflag = 0; 14709ec7b004SRick Macklem slptimeo = 2 * hz; 14719ec7b004SRick Macklem } 14729ec7b004SRick Macklem } 14739ec7b004SRick Macklem /* 14749ec7b004SRick Macklem * We might have lost our iod while sleeping, 1475a96c9b30SPedro F. Giffuni * so check and loop if necessary. 14769ec7b004SRick Macklem */ 14779ec7b004SRick Macklem goto again; 14789ec7b004SRick Macklem } 14799ec7b004SRick Macklem 14809ec7b004SRick Macklem /* We might have lost our nfsiod */ 14819ec7b004SRick Macklem if (nmp->nm_bufqiods == 0) { 14829ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14839ec7b004SRick Macklem ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 14849ec7b004SRick Macklem goto again; 14859ec7b004SRick Macklem } 14869ec7b004SRick Macklem 14879ec7b004SRick Macklem if (bp->b_iocmd == BIO_READ) { 14889ec7b004SRick Macklem if (bp->b_rcred == NOCRED && cred != NOCRED) 14899ec7b004SRick Macklem bp->b_rcred = crhold(cred); 14909ec7b004SRick Macklem } else { 14919ec7b004SRick Macklem if (bp->b_wcred == NOCRED && cred != NOCRED) 14929ec7b004SRick Macklem bp->b_wcred = crhold(cred); 14939ec7b004SRick Macklem } 14949ec7b004SRick Macklem 14959ec7b004SRick Macklem if (bp->b_flags & B_REMFREE) 14969ec7b004SRick Macklem bremfreef(bp); 14979ec7b004SRick Macklem BUF_KERNPROC(bp); 14989ec7b004SRick Macklem TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 14999ec7b004SRick Macklem nmp->nm_bufqlen++; 150003a39a17SRick Macklem KASSERT((bp->b_flags & B_DIRECT) == 0, 150103a39a17SRick Macklem ("ncl_asyncio: B_DIRECT set")); 1502b662b41eSRick Macklem NFSUNLOCKIOD(); 15039ec7b004SRick Macklem return (0); 15049ec7b004SRick Macklem } 15059ec7b004SRick Macklem 1506b662b41eSRick Macklem NFSUNLOCKIOD(); 15079ec7b004SRick Macklem 15089ec7b004SRick Macklem /* 15099ec7b004SRick Macklem * All the iods are busy on other mounts, so return EIO to 15109ec7b004SRick Macklem * force the caller to process the i/o synchronously. 15119ec7b004SRick Macklem */ 15129ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 15139ec7b004SRick Macklem return (EIO); 15149ec7b004SRick Macklem } 15159ec7b004SRick Macklem 15169ec7b004SRick Macklem /* 15179ec7b004SRick Macklem * Do an I/O operation to/from a cache block. This may be called 15189ec7b004SRick Macklem * synchronously or from an nfsiod. 15199ec7b004SRick Macklem */ 15209ec7b004SRick Macklem int 152167c5c2d2SRick Macklem ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 152267c5c2d2SRick Macklem int called_from_strategy) 15239ec7b004SRick Macklem { 15249ec7b004SRick Macklem struct uio *uiop; 15259ec7b004SRick Macklem struct nfsnode *np; 15269ec7b004SRick Macklem struct nfsmount *nmp; 15279ec7b004SRick Macklem int error = 0, iomode, must_commit = 0; 15289ec7b004SRick Macklem struct uio uio; 15299ec7b004SRick Macklem struct iovec io; 15309ec7b004SRick Macklem struct proc *p = td ? td->td_proc : NULL; 15319ec7b004SRick Macklem uint8_t iocmd; 15329ec7b004SRick Macklem 15339ec7b004SRick Macklem np = VTONFS(vp); 15349ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 15359ec7b004SRick Macklem uiop = &uio; 15369ec7b004SRick Macklem uiop->uio_iov = &io; 15379ec7b004SRick Macklem uiop->uio_iovcnt = 1; 15389ec7b004SRick Macklem uiop->uio_segflg = UIO_SYSSPACE; 15399ec7b004SRick Macklem uiop->uio_td = td; 15409ec7b004SRick Macklem 15419ec7b004SRick Macklem /* 15429ec7b004SRick Macklem * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 15439ec7b004SRick Macklem * do this here so we do not have to do it in all the code that 15449ec7b004SRick Macklem * calls us. 15459ec7b004SRick Macklem */ 15469ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 15479ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 15489ec7b004SRick Macklem 15499ec7b004SRick Macklem KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 15509ec7b004SRick Macklem iocmd = bp->b_iocmd; 15519ec7b004SRick Macklem if (iocmd == BIO_READ) { 15529ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_bcount; 15539ec7b004SRick Macklem io.iov_base = bp->b_data; 15549ec7b004SRick Macklem uiop->uio_rw = UIO_READ; 15559ec7b004SRick Macklem 15569ec7b004SRick Macklem switch (vp->v_type) { 15579ec7b004SRick Macklem case VREG: 15589ec7b004SRick Macklem uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 15591b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.read_bios); 15609ec7b004SRick Macklem error = ncl_readrpc(vp, uiop, cr); 15619ec7b004SRick Macklem 15629ec7b004SRick Macklem if (!error) { 15639ec7b004SRick Macklem if (uiop->uio_resid) { 15649ec7b004SRick Macklem /* 15659ec7b004SRick Macklem * If we had a short read with no error, we must have 15669ec7b004SRick Macklem * hit a file hole. We should zero-fill the remainder. 15679ec7b004SRick Macklem * This can also occur if the server hits the file EOF. 15689ec7b004SRick Macklem * 15699ec7b004SRick Macklem * Holes used to be able to occur due to pending 15709ec7b004SRick Macklem * writes, but that is not possible any longer. 15719ec7b004SRick Macklem */ 15729ec7b004SRick Macklem int nread = bp->b_bcount - uiop->uio_resid; 1573526d0bd5SKonstantin Belousov ssize_t left = uiop->uio_resid; 15749ec7b004SRick Macklem 15759ec7b004SRick Macklem if (left > 0) 15769ec7b004SRick Macklem bzero((char *)bp->b_data + nread, left); 15779ec7b004SRick Macklem uiop->uio_resid = 0; 15789ec7b004SRick Macklem } 15799ec7b004SRick Macklem } 15809ec7b004SRick Macklem /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 158178022527SKonstantin Belousov if (p && vp->v_writecount <= -1) { 15825d85e12fSRick Macklem NFSLOCKNODE(np); 15839ec7b004SRick Macklem if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 15845d85e12fSRick Macklem NFSUNLOCKNODE(np); 15859ec7b004SRick Macklem PROC_LOCK(p); 15869ec7b004SRick Macklem killproc(p, "text file modification"); 15879ec7b004SRick Macklem PROC_UNLOCK(p); 15889ec7b004SRick Macklem } else 15895d85e12fSRick Macklem NFSUNLOCKNODE(np); 15909ec7b004SRick Macklem } 15919ec7b004SRick Macklem break; 15929ec7b004SRick Macklem case VLNK: 15939ec7b004SRick Macklem uiop->uio_offset = (off_t)0; 15941b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.readlink_bios); 15959ec7b004SRick Macklem error = ncl_readlinkrpc(vp, uiop, cr); 15969ec7b004SRick Macklem break; 15979ec7b004SRick Macklem case VDIR: 15981b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.readdir_bios); 15999ec7b004SRick Macklem uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 16009ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 16019ec7b004SRick Macklem error = ncl_readdirplusrpc(vp, uiop, cr, td); 16029ec7b004SRick Macklem if (error == NFSERR_NOTSUPP) 16039ec7b004SRick Macklem nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 16049ec7b004SRick Macklem } 16059ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 16069ec7b004SRick Macklem error = ncl_readdirrpc(vp, uiop, cr, td); 16079ec7b004SRick Macklem /* 16089ec7b004SRick Macklem * end-of-directory sets B_INVAL but does not generate an 16099ec7b004SRick Macklem * error. 16109ec7b004SRick Macklem */ 16119ec7b004SRick Macklem if (error == 0 && uiop->uio_resid == bp->b_bcount) 16129ec7b004SRick Macklem bp->b_flags |= B_INVAL; 16139ec7b004SRick Macklem break; 16149ec7b004SRick Macklem default: 1615ad600ac8SKonstantin Belousov printf("ncl_doio: type %x unexpected\n", vp->v_type); 16169ec7b004SRick Macklem break; 161774b8d63dSPedro F. Giffuni } 16189ec7b004SRick Macklem if (error) { 16199ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 16209ec7b004SRick Macklem bp->b_error = error; 16219ec7b004SRick Macklem } 16229ec7b004SRick Macklem } else { 16239ec7b004SRick Macklem /* 16249ec7b004SRick Macklem * If we only need to commit, try to commit 16259ec7b004SRick Macklem */ 16269ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) { 16279ec7b004SRick Macklem int retv; 16289ec7b004SRick Macklem off_t off; 16299ec7b004SRick Macklem 16309ec7b004SRick Macklem off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 16319ec7b004SRick Macklem retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 16329ec7b004SRick Macklem bp->b_wcred, td); 16336b677534SRick Macklem if (NFSCL_FORCEDISM(vp->v_mount) || retv == 0) { 16349ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 16359ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 16369ec7b004SRick Macklem bp->b_resid = 0; 16379ec7b004SRick Macklem bufdone(bp); 16389ec7b004SRick Macklem return (0); 16399ec7b004SRick Macklem } 16409ec7b004SRick Macklem if (retv == NFSERR_STALEWRITEVERF) { 16419ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 16429ec7b004SRick Macklem } 16439ec7b004SRick Macklem } 16449ec7b004SRick Macklem 16459ec7b004SRick Macklem /* 16469ec7b004SRick Macklem * Setup for actual write 16479ec7b004SRick Macklem */ 16485d85e12fSRick Macklem NFSLOCKNODE(np); 16499ec7b004SRick Macklem if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 16509ec7b004SRick Macklem bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 16515d85e12fSRick Macklem NFSUNLOCKNODE(np); 16529ec7b004SRick Macklem 16539ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_dirtyoff) { 16549ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_dirtyend 16559ec7b004SRick Macklem - bp->b_dirtyoff; 16569ec7b004SRick Macklem uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 16579ec7b004SRick Macklem + bp->b_dirtyoff; 16589ec7b004SRick Macklem io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 16599ec7b004SRick Macklem uiop->uio_rw = UIO_WRITE; 16601b819cf2SRick Macklem NFSINCRGLOBAL(nfsstatsv1.write_bios); 16619ec7b004SRick Macklem 16629ec7b004SRick Macklem if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 16639ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 16649ec7b004SRick Macklem else 16659ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 16669ec7b004SRick Macklem 166767c5c2d2SRick Macklem error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 16685218d82cSRick Macklem called_from_strategy, 0); 16699ec7b004SRick Macklem 16709ec7b004SRick Macklem /* 16719ec7b004SRick Macklem * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 16729ec7b004SRick Macklem * to cluster the buffers needing commit. This will allow 16739ec7b004SRick Macklem * the system to submit a single commit rpc for the whole 16749ec7b004SRick Macklem * cluster. We can do this even if the buffer is not 100% 16759ec7b004SRick Macklem * dirty (relative to the NFS blocksize), so we optimize the 16769ec7b004SRick Macklem * append-to-file-case. 16779ec7b004SRick Macklem * 16789ec7b004SRick Macklem * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 16799ec7b004SRick Macklem * cleared because write clustering only works for commit 16809ec7b004SRick Macklem * rpc's, not for the data portion of the write). 16819ec7b004SRick Macklem */ 16829ec7b004SRick Macklem 16839ec7b004SRick Macklem if (!error && iomode == NFSWRITE_UNSTABLE) { 16849ec7b004SRick Macklem bp->b_flags |= B_NEEDCOMMIT; 16859ec7b004SRick Macklem if (bp->b_dirtyoff == 0 16869ec7b004SRick Macklem && bp->b_dirtyend == bp->b_bcount) 16879ec7b004SRick Macklem bp->b_flags |= B_CLUSTEROK; 16889ec7b004SRick Macklem } else { 16899ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 16909ec7b004SRick Macklem } 16919ec7b004SRick Macklem 16929ec7b004SRick Macklem /* 16939ec7b004SRick Macklem * For an interrupted write, the buffer is still valid 16949ec7b004SRick Macklem * and the write hasn't been pushed to the server yet, 16959ec7b004SRick Macklem * so we can't set BIO_ERROR and report the interruption 16969ec7b004SRick Macklem * by setting B_EINTR. For the B_ASYNC case, B_EINTR 16979ec7b004SRick Macklem * is not relevant, so the rpc attempt is essentially 16989ec7b004SRick Macklem * a noop. For the case of a V3 write rpc not being 16999ec7b004SRick Macklem * committed to stable storage, the block is still 17009ec7b004SRick Macklem * dirty and requires either a commit rpc or another 17019ec7b004SRick Macklem * write rpc with iomode == NFSV3WRITE_FILESYNC before 17029ec7b004SRick Macklem * the block is reused. This is indicated by setting 17039ec7b004SRick Macklem * the B_DELWRI and B_NEEDCOMMIT flags. 17049ec7b004SRick Macklem * 170567c5c2d2SRick Macklem * EIO is returned by ncl_writerpc() to indicate a recoverable 170667c5c2d2SRick Macklem * write error and is handled as above, except that 170767c5c2d2SRick Macklem * B_EINTR isn't set. One cause of this is a stale stateid 170867c5c2d2SRick Macklem * error for the RPC that indicates recovery is required, 170967c5c2d2SRick Macklem * when called with called_from_strategy != 0. 171067c5c2d2SRick Macklem * 17119ec7b004SRick Macklem * If the buffer is marked B_PAGING, it does not reside on 17129ec7b004SRick Macklem * the vp's paging queues so we cannot call bdirty(). The 17139ec7b004SRick Macklem * bp in this case is not an NFS cache block so we should 17149ec7b004SRick Macklem * be safe. XXX 17159ec7b004SRick Macklem * 17169ec7b004SRick Macklem * The logic below breaks up errors into recoverable and 17179ec7b004SRick Macklem * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 17189ec7b004SRick Macklem * and keep the buffer around for potential write retries. 17199ec7b004SRick Macklem * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 17209ec7b004SRick Macklem * and save the error in the nfsnode. This is less than ideal 17219ec7b004SRick Macklem * but necessary. Keeping such buffers around could potentially 17229ec7b004SRick Macklem * cause buffer exhaustion eventually (they can never be written 17239ec7b004SRick Macklem * out, so will get constantly be re-dirtied). It also causes 17249ec7b004SRick Macklem * all sorts of vfs panics. For non-recoverable write errors, 17259ec7b004SRick Macklem * also invalidate the attrcache, so we'll be forced to go over 17269ec7b004SRick Macklem * the wire for this object, returning an error to user on next 17279ec7b004SRick Macklem * call (most of the time). 17289ec7b004SRick Macklem */ 17299ec7b004SRick Macklem if (error == EINTR || error == EIO || error == ETIMEDOUT 17309ec7b004SRick Macklem || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 17319ec7b004SRick Macklem bp->b_flags &= ~(B_INVAL|B_NOCACHE); 17329ec7b004SRick Macklem if ((bp->b_flags & B_PAGING) == 0) { 17339ec7b004SRick Macklem bdirty(bp); 17349ec7b004SRick Macklem bp->b_flags &= ~B_DONE; 17359ec7b004SRick Macklem } 173667c5c2d2SRick Macklem if ((error == EINTR || error == ETIMEDOUT) && 173767c5c2d2SRick Macklem (bp->b_flags & B_ASYNC) == 0) 17389ec7b004SRick Macklem bp->b_flags |= B_EINTR; 17399ec7b004SRick Macklem } else { 17409ec7b004SRick Macklem if (error) { 17419ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 17429ec7b004SRick Macklem bp->b_flags |= B_INVAL; 17439ec7b004SRick Macklem bp->b_error = np->n_error = error; 17445d85e12fSRick Macklem NFSLOCKNODE(np); 17459ec7b004SRick Macklem np->n_flag |= NWRITEERR; 17469ec7b004SRick Macklem np->n_attrstamp = 0; 17478f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 17485d85e12fSRick Macklem NFSUNLOCKNODE(np); 17499ec7b004SRick Macklem } 17509ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 17519ec7b004SRick Macklem } 17529ec7b004SRick Macklem } else { 17539ec7b004SRick Macklem bp->b_resid = 0; 17549ec7b004SRick Macklem bufdone(bp); 17559ec7b004SRick Macklem return (0); 17569ec7b004SRick Macklem } 17579ec7b004SRick Macklem } 17589ec7b004SRick Macklem bp->b_resid = uiop->uio_resid; 1759ead50c94SRick Macklem if (must_commit == 1) 17609ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 17619ec7b004SRick Macklem bufdone(bp); 17629ec7b004SRick Macklem return (error); 17639ec7b004SRick Macklem } 17649ec7b004SRick Macklem 17659ec7b004SRick Macklem /* 17669ec7b004SRick Macklem * Used to aid in handling ftruncate() operations on the NFS client side. 17679ec7b004SRick Macklem * Truncation creates a number of special problems for NFS. We have to 17689ec7b004SRick Macklem * throw away VM pages and buffer cache buffers that are beyond EOF, and 17699ec7b004SRick Macklem * we have to properly handle VM pages or (potentially dirty) buffers 17709ec7b004SRick Macklem * that straddle the truncation point. 17719ec7b004SRick Macklem */ 17729ec7b004SRick Macklem 17739ec7b004SRick Macklem int 177465417f5eSAlan Somers ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize) 17759ec7b004SRick Macklem { 17769ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 17779ec7b004SRick Macklem u_quad_t tsize; 17787f763fc3SRick Macklem int biosize = vp->v_bufobj.bo_bsize; 17799ec7b004SRick Macklem int error = 0; 17809ec7b004SRick Macklem 17815d85e12fSRick Macklem NFSLOCKNODE(np); 17829ec7b004SRick Macklem tsize = np->n_size; 17839ec7b004SRick Macklem np->n_size = nsize; 17845d85e12fSRick Macklem NFSUNLOCKNODE(np); 17859ec7b004SRick Macklem 17869ec7b004SRick Macklem if (nsize < tsize) { 17879ec7b004SRick Macklem struct buf *bp; 17889ec7b004SRick Macklem daddr_t lbn; 17899ec7b004SRick Macklem int bufsize; 17909ec7b004SRick Macklem 17919ec7b004SRick Macklem /* 17929ec7b004SRick Macklem * vtruncbuf() doesn't get the buffer overlapping the 17939ec7b004SRick Macklem * truncation point. We may have a B_DELWRI and/or B_CACHE 17949ec7b004SRick Macklem * buffer that now needs to be truncated. 17959ec7b004SRick Macklem */ 179665417f5eSAlan Somers error = vtruncbuf(vp, nsize, biosize); 17979ec7b004SRick Macklem lbn = nsize / biosize; 179896ecfd98SEd Maste bufsize = nsize - (lbn * biosize); 17999ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bufsize, td); 18009ec7b004SRick Macklem if (!bp) 18019ec7b004SRick Macklem return EINTR; 18029ec7b004SRick Macklem if (bp->b_dirtyoff > bp->b_bcount) 18039ec7b004SRick Macklem bp->b_dirtyoff = bp->b_bcount; 18049ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_bcount) 18059ec7b004SRick Macklem bp->b_dirtyend = bp->b_bcount; 18069ec7b004SRick Macklem bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 18079ec7b004SRick Macklem brelse(bp); 18089ec7b004SRick Macklem } else { 18099ec7b004SRick Macklem vnode_pager_setsize(vp, nsize); 18109ec7b004SRick Macklem } 18119ec7b004SRick Macklem return(error); 18129ec7b004SRick Macklem } 1813