19ec7b004SRick Macklem /*- 29ec7b004SRick Macklem * Copyright (c) 1989, 1993 39ec7b004SRick Macklem * The Regents of the University of California. All rights reserved. 49ec7b004SRick Macklem * 59ec7b004SRick Macklem * This code is derived from software contributed to Berkeley by 69ec7b004SRick Macklem * Rick Macklem at The University of Guelph. 79ec7b004SRick Macklem * 89ec7b004SRick Macklem * Redistribution and use in source and binary forms, with or without 99ec7b004SRick Macklem * modification, are permitted provided that the following conditions 109ec7b004SRick Macklem * are met: 119ec7b004SRick Macklem * 1. Redistributions of source code must retain the above copyright 129ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer. 139ec7b004SRick Macklem * 2. Redistributions in binary form must reproduce the above copyright 149ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer in the 159ec7b004SRick Macklem * documentation and/or other materials provided with the distribution. 169ec7b004SRick Macklem * 4. Neither the name of the University nor the names of its contributors 179ec7b004SRick Macklem * may be used to endorse or promote products derived from this software 189ec7b004SRick Macklem * without specific prior written permission. 199ec7b004SRick Macklem * 209ec7b004SRick Macklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 219ec7b004SRick Macklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 229ec7b004SRick Macklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 239ec7b004SRick Macklem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 249ec7b004SRick Macklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 259ec7b004SRick Macklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 269ec7b004SRick Macklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 279ec7b004SRick Macklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 289ec7b004SRick Macklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 299ec7b004SRick Macklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 309ec7b004SRick Macklem * SUCH DAMAGE. 319ec7b004SRick Macklem * 329ec7b004SRick Macklem * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 339ec7b004SRick Macklem */ 349ec7b004SRick Macklem 359ec7b004SRick Macklem #include <sys/cdefs.h> 369ec7b004SRick Macklem __FBSDID("$FreeBSD$"); 379ec7b004SRick Macklem 388f0e65c9SRick Macklem #include "opt_kdtrace.h" 398f0e65c9SRick Macklem 409ec7b004SRick Macklem #include <sys/param.h> 419ec7b004SRick Macklem #include <sys/systm.h> 429ec7b004SRick Macklem #include <sys/bio.h> 439ec7b004SRick Macklem #include <sys/buf.h> 449ec7b004SRick Macklem #include <sys/kernel.h> 459ec7b004SRick Macklem #include <sys/mount.h> 469ec7b004SRick Macklem #include <sys/vmmeter.h> 479ec7b004SRick Macklem #include <sys/vnode.h> 489ec7b004SRick Macklem 499ec7b004SRick Macklem #include <vm/vm.h> 509ec7b004SRick Macklem #include <vm/vm_extern.h> 519ec7b004SRick Macklem #include <vm/vm_page.h> 529ec7b004SRick Macklem #include <vm/vm_object.h> 539ec7b004SRick Macklem #include <vm/vm_pager.h> 549ec7b004SRick Macklem #include <vm/vnode_pager.h> 559ec7b004SRick Macklem 569ec7b004SRick Macklem #include <fs/nfs/nfsport.h> 579ec7b004SRick Macklem #include <fs/nfsclient/nfsmount.h> 589ec7b004SRick Macklem #include <fs/nfsclient/nfs.h> 599ec7b004SRick Macklem #include <fs/nfsclient/nfsnode.h> 608f0e65c9SRick Macklem #include <fs/nfsclient/nfs_kdtrace.h> 619ec7b004SRick Macklem 629ec7b004SRick Macklem extern int newnfs_directio_allow_mmap; 639ec7b004SRick Macklem extern struct nfsstats newnfsstats; 649ec7b004SRick Macklem extern struct mtx ncl_iod_mutex; 659ec7b004SRick Macklem extern int ncl_numasync; 667b8c319bSRick Macklem extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 677b8c319bSRick Macklem extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 689ec7b004SRick Macklem extern int newnfs_directio_enable; 69a53373faSKonstantin Belousov extern int nfs_keep_dirty_on_error; 709ec7b004SRick Macklem 719ec7b004SRick Macklem int ncl_pbuf_freecnt = -1; /* start out unlimited */ 729ec7b004SRick Macklem 739ec7b004SRick Macklem static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 749ec7b004SRick Macklem struct thread *td); 759ec7b004SRick Macklem static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 769ec7b004SRick Macklem struct ucred *cred, int ioflag); 779ec7b004SRick Macklem 789ec7b004SRick Macklem /* 799ec7b004SRick Macklem * Vnode op for VM getpages. 809ec7b004SRick Macklem */ 819ec7b004SRick Macklem int 829ec7b004SRick Macklem ncl_getpages(struct vop_getpages_args *ap) 839ec7b004SRick Macklem { 849ec7b004SRick Macklem int i, error, nextoff, size, toff, count, npages; 859ec7b004SRick Macklem struct uio uio; 869ec7b004SRick Macklem struct iovec iov; 879ec7b004SRick Macklem vm_offset_t kva; 889ec7b004SRick Macklem struct buf *bp; 899ec7b004SRick Macklem struct vnode *vp; 909ec7b004SRick Macklem struct thread *td; 919ec7b004SRick Macklem struct ucred *cred; 929ec7b004SRick Macklem struct nfsmount *nmp; 939ec7b004SRick Macklem vm_object_t object; 949ec7b004SRick Macklem vm_page_t *pages; 959ec7b004SRick Macklem struct nfsnode *np; 969ec7b004SRick Macklem 979ec7b004SRick Macklem vp = ap->a_vp; 989ec7b004SRick Macklem np = VTONFS(vp); 999ec7b004SRick Macklem td = curthread; /* XXX */ 1009ec7b004SRick Macklem cred = curthread->td_ucred; /* XXX */ 1019ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 1029ec7b004SRick Macklem pages = ap->a_m; 1039ec7b004SRick Macklem count = ap->a_count; 1049ec7b004SRick Macklem 1059ec7b004SRick Macklem if ((object = vp->v_object) == NULL) { 1069ec7b004SRick Macklem ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); 10757a7e732SAlan Cox return (VM_PAGER_ERROR); 1089ec7b004SRick Macklem } 1099ec7b004SRick Macklem 1109ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 1119ec7b004SRick Macklem mtx_lock(&np->n_mtx); 1129ec7b004SRick Macklem if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 1139ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 1149ec7b004SRick Macklem ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); 11557a7e732SAlan Cox return (VM_PAGER_ERROR); 1169ec7b004SRick Macklem } else 1179ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 1189ec7b004SRick Macklem } 1199ec7b004SRick Macklem 1209ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 1219ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 1229ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 1239ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1249ec7b004SRick Macklem /* We'll never get here for v4, because we always have fsinfo */ 1259ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 1269ec7b004SRick Macklem } else 1279ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1289ec7b004SRick Macklem 1299ec7b004SRick Macklem npages = btoc(count); 1309ec7b004SRick Macklem 1319ec7b004SRick Macklem /* 1329ec7b004SRick Macklem * If the requested page is partially valid, just return it and 1339ec7b004SRick Macklem * allow the pager to zero-out the blanks. Partially valid pages 1349ec7b004SRick Macklem * can only occur at the file EOF. 1359ec7b004SRick Macklem */ 1369ec7b004SRick Macklem VM_OBJECT_LOCK(object); 13757a7e732SAlan Cox if (pages[ap->a_reqpage]->valid != 0) { 1389ec7b004SRick Macklem for (i = 0; i < npages; ++i) { 1395ac59343SAlan Cox if (i != ap->a_reqpage) { 1405ac59343SAlan Cox vm_page_lock(pages[i]); 1419ec7b004SRick Macklem vm_page_free(pages[i]); 1425ac59343SAlan Cox vm_page_unlock(pages[i]); 1435ac59343SAlan Cox } 1445ac59343SAlan Cox } 1459ec7b004SRick Macklem VM_OBJECT_UNLOCK(object); 1469ec7b004SRick Macklem return (0); 1479ec7b004SRick Macklem } 1489ec7b004SRick Macklem VM_OBJECT_UNLOCK(object); 1499ec7b004SRick Macklem 1509ec7b004SRick Macklem /* 1519ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 1529ec7b004SRick Macklem * convienient and fast. 1539ec7b004SRick Macklem */ 1549ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 1559ec7b004SRick Macklem 1569ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 1579ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 1589ec7b004SRick Macklem PCPU_INC(cnt.v_vnodein); 1599ec7b004SRick Macklem PCPU_ADD(cnt.v_vnodepgsin, npages); 1609ec7b004SRick Macklem 1619ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 1629ec7b004SRick Macklem iov.iov_len = count; 1639ec7b004SRick Macklem uio.uio_iov = &iov; 1649ec7b004SRick Macklem uio.uio_iovcnt = 1; 1659ec7b004SRick Macklem uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 1669ec7b004SRick Macklem uio.uio_resid = count; 1679ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 1689ec7b004SRick Macklem uio.uio_rw = UIO_READ; 1699ec7b004SRick Macklem uio.uio_td = td; 1709ec7b004SRick Macklem 1719ec7b004SRick Macklem error = ncl_readrpc(vp, &uio, cred); 1729ec7b004SRick Macklem pmap_qremove(kva, npages); 1739ec7b004SRick Macklem 1749ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 1759ec7b004SRick Macklem 1769ec7b004SRick Macklem if (error && (uio.uio_resid == count)) { 1779ec7b004SRick Macklem ncl_printf("nfs_getpages: error %d\n", error); 1789ec7b004SRick Macklem VM_OBJECT_LOCK(object); 1799ec7b004SRick Macklem for (i = 0; i < npages; ++i) { 1805ac59343SAlan Cox if (i != ap->a_reqpage) { 1815ac59343SAlan Cox vm_page_lock(pages[i]); 1829ec7b004SRick Macklem vm_page_free(pages[i]); 1835ac59343SAlan Cox vm_page_unlock(pages[i]); 1845ac59343SAlan Cox } 1855ac59343SAlan Cox } 1869ec7b004SRick Macklem VM_OBJECT_UNLOCK(object); 18757a7e732SAlan Cox return (VM_PAGER_ERROR); 1889ec7b004SRick Macklem } 1899ec7b004SRick Macklem 1909ec7b004SRick Macklem /* 1919ec7b004SRick Macklem * Calculate the number of bytes read and validate only that number 1929ec7b004SRick Macklem * of bytes. Note that due to pending writes, size may be 0. This 1939ec7b004SRick Macklem * does not mean that the remaining data is invalid! 1949ec7b004SRick Macklem */ 1959ec7b004SRick Macklem 1969ec7b004SRick Macklem size = count - uio.uio_resid; 1979ec7b004SRick Macklem VM_OBJECT_LOCK(object); 1989ec7b004SRick Macklem for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 1999ec7b004SRick Macklem vm_page_t m; 2009ec7b004SRick Macklem nextoff = toff + PAGE_SIZE; 2019ec7b004SRick Macklem m = pages[i]; 2029ec7b004SRick Macklem 2039ec7b004SRick Macklem if (nextoff <= size) { 2049ec7b004SRick Macklem /* 2059ec7b004SRick Macklem * Read operation filled an entire page 2069ec7b004SRick Macklem */ 2079ec7b004SRick Macklem m->valid = VM_PAGE_BITS_ALL; 208a770e183SRick Macklem KASSERT(m->dirty == 0, 209a770e183SRick Macklem ("nfs_getpages: page %p is dirty", m)); 2109ec7b004SRick Macklem } else if (size > toff) { 2119ec7b004SRick Macklem /* 2129ec7b004SRick Macklem * Read operation filled a partial page. 2139ec7b004SRick Macklem */ 2149ec7b004SRick Macklem m->valid = 0; 215dc874f98SKonstantin Belousov vm_page_set_valid_range(m, 0, size - toff); 2163933ec4dSAlan Cox KASSERT(m->dirty == 0, 21772d1bbbaSRick Macklem ("nfs_getpages: page %p is dirty", m)); 2189ec7b004SRick Macklem } else { 2199ec7b004SRick Macklem /* 2209ec7b004SRick Macklem * Read operation was short. If no error occured 2219ec7b004SRick Macklem * we may have hit a zero-fill section. We simply 2229ec7b004SRick Macklem * leave valid set to 0. 2239ec7b004SRick Macklem */ 2249ec7b004SRick Macklem ; 2259ec7b004SRick Macklem } 2269ec7b004SRick Macklem if (i != ap->a_reqpage) { 2279ec7b004SRick Macklem /* 2289ec7b004SRick Macklem * Whether or not to leave the page activated is up in 2299ec7b004SRick Macklem * the air, but we should put the page on a page queue 2309ec7b004SRick Macklem * somewhere (it already is in the object). Result: 2319ec7b004SRick Macklem * It appears that emperical results show that 2329ec7b004SRick Macklem * deactivating pages is best. 2339ec7b004SRick Macklem */ 2349ec7b004SRick Macklem 2359ec7b004SRick Macklem /* 2369ec7b004SRick Macklem * Just in case someone was asking for this page we 2379ec7b004SRick Macklem * now tell them that it is ok to use. 2389ec7b004SRick Macklem */ 2399ec7b004SRick Macklem if (!error) { 24003679e23SAlan Cox if (m->oflags & VPO_WANTED) { 24103679e23SAlan Cox vm_page_lock(m); 2429ec7b004SRick Macklem vm_page_activate(m); 24303679e23SAlan Cox vm_page_unlock(m); 24403679e23SAlan Cox } else { 24503679e23SAlan Cox vm_page_lock(m); 2469ec7b004SRick Macklem vm_page_deactivate(m); 24703679e23SAlan Cox vm_page_unlock(m); 24803679e23SAlan Cox } 2499ec7b004SRick Macklem vm_page_wakeup(m); 2509ec7b004SRick Macklem } else { 25103679e23SAlan Cox vm_page_lock(m); 2529ec7b004SRick Macklem vm_page_free(m); 253fc0c3802SKonstantin Belousov vm_page_unlock(m); 254fc0c3802SKonstantin Belousov } 25503679e23SAlan Cox } 25603679e23SAlan Cox } 2579ec7b004SRick Macklem VM_OBJECT_UNLOCK(object); 25857a7e732SAlan Cox return (0); 2599ec7b004SRick Macklem } 2609ec7b004SRick Macklem 2619ec7b004SRick Macklem /* 2629ec7b004SRick Macklem * Vnode op for VM putpages. 2639ec7b004SRick Macklem */ 2649ec7b004SRick Macklem int 2659ec7b004SRick Macklem ncl_putpages(struct vop_putpages_args *ap) 2669ec7b004SRick Macklem { 2679ec7b004SRick Macklem struct uio uio; 2689ec7b004SRick Macklem struct iovec iov; 2699ec7b004SRick Macklem vm_offset_t kva; 2709ec7b004SRick Macklem struct buf *bp; 2719ec7b004SRick Macklem int iomode, must_commit, i, error, npages, count; 2729ec7b004SRick Macklem off_t offset; 2739ec7b004SRick Macklem int *rtvals; 2749ec7b004SRick Macklem struct vnode *vp; 2759ec7b004SRick Macklem struct thread *td; 2769ec7b004SRick Macklem struct ucred *cred; 2779ec7b004SRick Macklem struct nfsmount *nmp; 2789ec7b004SRick Macklem struct nfsnode *np; 2799ec7b004SRick Macklem vm_page_t *pages; 2809ec7b004SRick Macklem 2819ec7b004SRick Macklem vp = ap->a_vp; 2829ec7b004SRick Macklem np = VTONFS(vp); 2839ec7b004SRick Macklem td = curthread; /* XXX */ 2847af1242aSRick Macklem /* Set the cred to n_writecred for the write rpcs. */ 2857af1242aSRick Macklem if (np->n_writecred != NULL) 2867af1242aSRick Macklem cred = crhold(np->n_writecred); 2877af1242aSRick Macklem else 2887af1242aSRick Macklem cred = crhold(curthread->td_ucred); /* XXX */ 2899ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 2909ec7b004SRick Macklem pages = ap->a_m; 2919ec7b004SRick Macklem count = ap->a_count; 2929ec7b004SRick Macklem rtvals = ap->a_rtvals; 2939ec7b004SRick Macklem npages = btoc(count); 2949ec7b004SRick Macklem offset = IDX_TO_OFF(pages[0]->pindex); 2959ec7b004SRick Macklem 2969ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 2979ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 2989ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 2999ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 3009ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 3019ec7b004SRick Macklem } else 3029ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 3039ec7b004SRick Macklem 3049ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3059ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 3069ec7b004SRick Macklem (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 3079ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3089ec7b004SRick Macklem ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); 3099ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3109ec7b004SRick Macklem } 3119ec7b004SRick Macklem 3129ec7b004SRick Macklem for (i = 0; i < npages; i++) 313031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 3149ec7b004SRick Macklem 3159ec7b004SRick Macklem /* 3169ec7b004SRick Macklem * When putting pages, do not extend file past EOF. 3179ec7b004SRick Macklem */ 3189ec7b004SRick Macklem if (offset + count > np->n_size) { 3199ec7b004SRick Macklem count = np->n_size - offset; 3209ec7b004SRick Macklem if (count < 0) 3219ec7b004SRick Macklem count = 0; 3229ec7b004SRick Macklem } 3239ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3249ec7b004SRick Macklem 3259ec7b004SRick Macklem /* 3269ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 3279ec7b004SRick Macklem * convienient and fast. 3289ec7b004SRick Macklem */ 3299ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 3309ec7b004SRick Macklem 3319ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 3329ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 3339ec7b004SRick Macklem PCPU_INC(cnt.v_vnodeout); 3349ec7b004SRick Macklem PCPU_ADD(cnt.v_vnodepgsout, count); 3359ec7b004SRick Macklem 3369ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 3379ec7b004SRick Macklem iov.iov_len = count; 3389ec7b004SRick Macklem uio.uio_iov = &iov; 3399ec7b004SRick Macklem uio.uio_iovcnt = 1; 3409ec7b004SRick Macklem uio.uio_offset = offset; 3419ec7b004SRick Macklem uio.uio_resid = count; 3429ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 3439ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 3449ec7b004SRick Macklem uio.uio_td = td; 3459ec7b004SRick Macklem 3469ec7b004SRick Macklem if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 3479ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 3489ec7b004SRick Macklem else 3499ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 3509ec7b004SRick Macklem 35167c5c2d2SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); 3527af1242aSRick Macklem crfree(cred); 3539ec7b004SRick Macklem 3549ec7b004SRick Macklem pmap_qremove(kva, npages); 3559ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 3569ec7b004SRick Macklem 357a53373faSKonstantin Belousov if (error == 0 || !nfs_keep_dirty_on_error) { 358031ec8c1SKonstantin Belousov vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); 359031ec8c1SKonstantin Belousov if (must_commit) 3609ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 361a53373faSKonstantin Belousov } 3629ec7b004SRick Macklem return rtvals[0]; 3639ec7b004SRick Macklem } 3649ec7b004SRick Macklem 3659ec7b004SRick Macklem /* 3669ec7b004SRick Macklem * For nfs, cache consistency can only be maintained approximately. 3679ec7b004SRick Macklem * Although RFC1094 does not specify the criteria, the following is 3689ec7b004SRick Macklem * believed to be compatible with the reference port. 3699ec7b004SRick Macklem * For nfs: 3709ec7b004SRick Macklem * If the file's modify time on the server has changed since the 3719ec7b004SRick Macklem * last read rpc or you have written to the file, 3729ec7b004SRick Macklem * you may have lost data cache consistency with the 3739ec7b004SRick Macklem * server, so flush all of the file's data out of the cache. 3749ec7b004SRick Macklem * Then force a getattr rpc to ensure that you have up to date 3759ec7b004SRick Macklem * attributes. 3769ec7b004SRick Macklem * NB: This implies that cache data can be read when up to 3779ec7b004SRick Macklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current 3789ec7b004SRick Macklem * attributes this could be forced by setting n_attrstamp to 0 before 3799ec7b004SRick Macklem * the VOP_GETATTR() call. 3809ec7b004SRick Macklem */ 3819ec7b004SRick Macklem static inline int 3829ec7b004SRick Macklem nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 3839ec7b004SRick Macklem { 3849ec7b004SRick Macklem int error = 0; 3859ec7b004SRick Macklem struct vattr vattr; 3869ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 3879ec7b004SRick Macklem int old_lock; 3889ec7b004SRick Macklem 3899ec7b004SRick Macklem /* 3909ec7b004SRick Macklem * Grab the exclusive lock before checking whether the cache is 3919ec7b004SRick Macklem * consistent. 3929ec7b004SRick Macklem * XXX - We can make this cheaper later (by acquiring cheaper locks). 3939ec7b004SRick Macklem * But for now, this suffices. 3949ec7b004SRick Macklem */ 3959ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 396934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 397934a3099SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 398934a3099SRick Macklem return (EBADF); 399934a3099SRick Macklem } 400934a3099SRick Macklem 4019ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4029ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 4039ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4049ec7b004SRick Macklem if (vp->v_type != VREG) { 4059ec7b004SRick Macklem if (vp->v_type != VDIR) 4069ec7b004SRick Macklem panic("nfs: bioread, not dir"); 4079ec7b004SRick Macklem ncl_invaldir(vp); 4089ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 4099ec7b004SRick Macklem if (error) 4109ec7b004SRick Macklem goto out; 4119ec7b004SRick Macklem } 4129ec7b004SRick Macklem np->n_attrstamp = 0; 4138f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 4149ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 4159ec7b004SRick Macklem if (error) 4169ec7b004SRick Macklem goto out; 4179ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4189ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4199ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4209ec7b004SRick Macklem } else { 4219ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4229ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 4239ec7b004SRick Macklem if (error) 4249ec7b004SRick Macklem return (error); 4259ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4269ec7b004SRick Macklem if ((np->n_flag & NSIZECHANGED) 4279ec7b004SRick Macklem || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 4289ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4299ec7b004SRick Macklem if (vp->v_type == VDIR) 4309ec7b004SRick Macklem ncl_invaldir(vp); 4319ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 4329ec7b004SRick Macklem if (error) 4339ec7b004SRick Macklem goto out; 4349ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4359ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4369ec7b004SRick Macklem np->n_flag &= ~NSIZECHANGED; 4379ec7b004SRick Macklem } 4389ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4399ec7b004SRick Macklem } 4409ec7b004SRick Macklem out: 4419ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 4429ec7b004SRick Macklem return error; 4439ec7b004SRick Macklem } 4449ec7b004SRick Macklem 4459ec7b004SRick Macklem /* 4469ec7b004SRick Macklem * Vnode op for read using bio 4479ec7b004SRick Macklem */ 4489ec7b004SRick Macklem int 4499ec7b004SRick Macklem ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 4509ec7b004SRick Macklem { 4519ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 4529ec7b004SRick Macklem int biosize, i; 4539ec7b004SRick Macklem struct buf *bp, *rabp; 4549ec7b004SRick Macklem struct thread *td; 4559ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 4569ec7b004SRick Macklem daddr_t lbn, rabn; 4579ec7b004SRick Macklem int bcount; 4589ec7b004SRick Macklem int seqcount; 4599ec7b004SRick Macklem int nra, error = 0, n = 0, on = 0; 460b29b9bcbSRick Macklem off_t tmp_off; 4619ec7b004SRick Macklem 462b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 4639ec7b004SRick Macklem if (uio->uio_resid == 0) 4649ec7b004SRick Macklem return (0); 4659ec7b004SRick Macklem if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 4669ec7b004SRick Macklem return (EINVAL); 4679ec7b004SRick Macklem td = uio->uio_td; 4689ec7b004SRick Macklem 4699ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4709ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 4719ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 4729ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 4739ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 4749ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4759ec7b004SRick Macklem } 4769ec7b004SRick Macklem if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 4779ec7b004SRick Macklem (void) newnfs_iosize(nmp); 4789ec7b004SRick Macklem 479b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 4809ec7b004SRick Macklem if (vp->v_type != VDIR && 481b29b9bcbSRick Macklem (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 482b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4839ec7b004SRick Macklem return (EFBIG); 484b29b9bcbSRick Macklem } 485b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4869ec7b004SRick Macklem 4879ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 4889ec7b004SRick Macklem /* No caching/ no readaheads. Just read data into the user buffer */ 4899ec7b004SRick Macklem return ncl_readrpc(vp, uio, cred); 4909ec7b004SRick Macklem 4917f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 4929ec7b004SRick Macklem seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 4939ec7b004SRick Macklem 4949ec7b004SRick Macklem error = nfs_bioread_check_cons(vp, td, cred); 4959ec7b004SRick Macklem if (error) 4969ec7b004SRick Macklem return error; 4979ec7b004SRick Macklem 4989ec7b004SRick Macklem do { 4999ec7b004SRick Macklem u_quad_t nsize; 5009ec7b004SRick Macklem 5019ec7b004SRick Macklem mtx_lock(&np->n_mtx); 5029ec7b004SRick Macklem nsize = np->n_size; 5039ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 5049ec7b004SRick Macklem 5059ec7b004SRick Macklem switch (vp->v_type) { 5069ec7b004SRick Macklem case VREG: 5079ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_reads); 5089ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 5099ec7b004SRick Macklem on = uio->uio_offset & (biosize - 1); 5109ec7b004SRick Macklem 5119ec7b004SRick Macklem /* 5129ec7b004SRick Macklem * Start the read ahead(s), as required. 5139ec7b004SRick Macklem */ 5149ec7b004SRick Macklem if (nmp->nm_readahead > 0) { 5159ec7b004SRick Macklem for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 5169ec7b004SRick Macklem (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 5179ec7b004SRick Macklem rabn = lbn + 1 + nra; 5189ec7b004SRick Macklem if (incore(&vp->v_bufobj, rabn) == NULL) { 5199ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, rabn, biosize, td); 5209ec7b004SRick Macklem if (!rabp) { 5219ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 522848fd2c0SRick Macklem return (error ? error : EINTR); 5239ec7b004SRick Macklem } 5249ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 5259ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 5269ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 5279ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 5289ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 5299ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 5309ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 5319ec7b004SRick Macklem vfs_unbusy_pages(rabp); 5329ec7b004SRick Macklem brelse(rabp); 5339ec7b004SRick Macklem break; 5349ec7b004SRick Macklem } 5359ec7b004SRick Macklem } else { 5369ec7b004SRick Macklem brelse(rabp); 5379ec7b004SRick Macklem } 5389ec7b004SRick Macklem } 5399ec7b004SRick Macklem } 5409ec7b004SRick Macklem } 5419ec7b004SRick Macklem 5429ec7b004SRick Macklem /* Note that bcount is *not* DEV_BSIZE aligned. */ 5439ec7b004SRick Macklem bcount = biosize; 5449ec7b004SRick Macklem if ((off_t)lbn * biosize >= nsize) { 5459ec7b004SRick Macklem bcount = 0; 5469ec7b004SRick Macklem } else if ((off_t)(lbn + 1) * biosize > nsize) { 5479ec7b004SRick Macklem bcount = nsize - (off_t)lbn * biosize; 5489ec7b004SRick Macklem } 5499ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 5509ec7b004SRick Macklem 5519ec7b004SRick Macklem if (!bp) { 5529ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5539ec7b004SRick Macklem return (error ? error : EINTR); 5549ec7b004SRick Macklem } 5559ec7b004SRick Macklem 5569ec7b004SRick Macklem /* 5579ec7b004SRick Macklem * If B_CACHE is not set, we must issue the read. If this 5589ec7b004SRick Macklem * fails, we return an error. 5599ec7b004SRick Macklem */ 5609ec7b004SRick Macklem 5619ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5629ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5639ec7b004SRick Macklem vfs_busy_pages(bp, 0); 56467c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5659ec7b004SRick Macklem if (error) { 5669ec7b004SRick Macklem brelse(bp); 5679ec7b004SRick Macklem return (error); 5689ec7b004SRick Macklem } 5699ec7b004SRick Macklem } 5709ec7b004SRick Macklem 5719ec7b004SRick Macklem /* 5729ec7b004SRick Macklem * on is the offset into the current bp. Figure out how many 5739ec7b004SRick Macklem * bytes we can copy out of the bp. Note that bcount is 5749ec7b004SRick Macklem * NOT DEV_BSIZE aligned. 5759ec7b004SRick Macklem * 5769ec7b004SRick Macklem * Then figure out how many bytes we can copy into the uio. 5779ec7b004SRick Macklem */ 5789ec7b004SRick Macklem 5799ec7b004SRick Macklem n = 0; 5809ec7b004SRick Macklem if (on < bcount) 581526d0bd5SKonstantin Belousov n = MIN((unsigned)(bcount - on), uio->uio_resid); 5829ec7b004SRick Macklem break; 5839ec7b004SRick Macklem case VLNK: 5849ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_readlinks); 5859ec7b004SRick Macklem bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 5869ec7b004SRick Macklem if (!bp) { 5879ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5889ec7b004SRick Macklem return (error ? error : EINTR); 5899ec7b004SRick Macklem } 5909ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5919ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5929ec7b004SRick Macklem vfs_busy_pages(bp, 0); 59367c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5949ec7b004SRick Macklem if (error) { 5959ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 5969ec7b004SRick Macklem brelse(bp); 5979ec7b004SRick Macklem return (error); 5989ec7b004SRick Macklem } 5999ec7b004SRick Macklem } 600526d0bd5SKonstantin Belousov n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 6019ec7b004SRick Macklem on = 0; 6029ec7b004SRick Macklem break; 6039ec7b004SRick Macklem case VDIR: 6049ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_readdirs); 6059ec7b004SRick Macklem if (np->n_direofoffset 6069ec7b004SRick Macklem && uio->uio_offset >= np->n_direofoffset) { 6079ec7b004SRick Macklem return (0); 6089ec7b004SRick Macklem } 6099ec7b004SRick Macklem lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 6109ec7b004SRick Macklem on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 6119ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 6129ec7b004SRick Macklem if (!bp) { 6139ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6149ec7b004SRick Macklem return (error ? error : EINTR); 6159ec7b004SRick Macklem } 6169ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6179ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6189ec7b004SRick Macklem vfs_busy_pages(bp, 0); 61967c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6209ec7b004SRick Macklem if (error) { 6219ec7b004SRick Macklem brelse(bp); 6229ec7b004SRick Macklem } 6239ec7b004SRick Macklem while (error == NFSERR_BAD_COOKIE) { 6249ec7b004SRick Macklem ncl_invaldir(vp); 6259ec7b004SRick Macklem error = ncl_vinvalbuf(vp, 0, td, 1); 6269ec7b004SRick Macklem /* 6279ec7b004SRick Macklem * Yuck! The directory has been modified on the 6289ec7b004SRick Macklem * server. The only way to get the block is by 6299ec7b004SRick Macklem * reading from the beginning to get all the 6309ec7b004SRick Macklem * offset cookies. 6319ec7b004SRick Macklem * 6329ec7b004SRick Macklem * Leave the last bp intact unless there is an error. 6339ec7b004SRick Macklem * Loop back up to the while if the error is another 6349ec7b004SRick Macklem * NFSERR_BAD_COOKIE (double yuch!). 6359ec7b004SRick Macklem */ 6369ec7b004SRick Macklem for (i = 0; i <= lbn && !error; i++) { 6379ec7b004SRick Macklem if (np->n_direofoffset 6389ec7b004SRick Macklem && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 6399ec7b004SRick Macklem return (0); 6409ec7b004SRick Macklem bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 6419ec7b004SRick Macklem if (!bp) { 6429ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6439ec7b004SRick Macklem return (error ? error : EINTR); 6449ec7b004SRick Macklem } 6459ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6469ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6479ec7b004SRick Macklem vfs_busy_pages(bp, 0); 64867c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6499ec7b004SRick Macklem /* 6509ec7b004SRick Macklem * no error + B_INVAL == directory EOF, 6519ec7b004SRick Macklem * use the block. 6529ec7b004SRick Macklem */ 6539ec7b004SRick Macklem if (error == 0 && (bp->b_flags & B_INVAL)) 6549ec7b004SRick Macklem break; 6559ec7b004SRick Macklem } 6569ec7b004SRick Macklem /* 6579ec7b004SRick Macklem * An error will throw away the block and the 6589ec7b004SRick Macklem * for loop will break out. If no error and this 6599ec7b004SRick Macklem * is not the block we want, we throw away the 6609ec7b004SRick Macklem * block and go for the next one via the for loop. 6619ec7b004SRick Macklem */ 6629ec7b004SRick Macklem if (error || i < lbn) 6639ec7b004SRick Macklem brelse(bp); 6649ec7b004SRick Macklem } 6659ec7b004SRick Macklem } 6669ec7b004SRick Macklem /* 6679ec7b004SRick Macklem * The above while is repeated if we hit another cookie 6689ec7b004SRick Macklem * error. If we hit an error and it wasn't a cookie error, 6699ec7b004SRick Macklem * we give up. 6709ec7b004SRick Macklem */ 6719ec7b004SRick Macklem if (error) 6729ec7b004SRick Macklem return (error); 6739ec7b004SRick Macklem } 6749ec7b004SRick Macklem 6759ec7b004SRick Macklem /* 6769ec7b004SRick Macklem * If not eof and read aheads are enabled, start one. 6779ec7b004SRick Macklem * (You need the current block first, so that you have the 6789ec7b004SRick Macklem * directory offset cookie of the next block.) 6799ec7b004SRick Macklem */ 6809ec7b004SRick Macklem if (nmp->nm_readahead > 0 && 6819ec7b004SRick Macklem (bp->b_flags & B_INVAL) == 0 && 6829ec7b004SRick Macklem (np->n_direofoffset == 0 || 6839ec7b004SRick Macklem (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 6849ec7b004SRick Macklem incore(&vp->v_bufobj, lbn + 1) == NULL) { 6859ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 6869ec7b004SRick Macklem if (rabp) { 6879ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 6889ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 6899ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 6909ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 6919ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 6929ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 6939ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 6949ec7b004SRick Macklem vfs_unbusy_pages(rabp); 6959ec7b004SRick Macklem brelse(rabp); 6969ec7b004SRick Macklem } 6979ec7b004SRick Macklem } else { 6989ec7b004SRick Macklem brelse(rabp); 6999ec7b004SRick Macklem } 7009ec7b004SRick Macklem } 7019ec7b004SRick Macklem } 7029ec7b004SRick Macklem /* 7039ec7b004SRick Macklem * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 7049ec7b004SRick Macklem * chopped for the EOF condition, we cannot tell how large 7059ec7b004SRick Macklem * NFS directories are going to be until we hit EOF. So 7069ec7b004SRick Macklem * an NFS directory buffer is *not* chopped to its EOF. Now, 7079ec7b004SRick Macklem * it just so happens that b_resid will effectively chop it 7089ec7b004SRick Macklem * to EOF. *BUT* this information is lost if the buffer goes 7099ec7b004SRick Macklem * away and is reconstituted into a B_CACHE state ( due to 7109ec7b004SRick Macklem * being VMIO ) later. So we keep track of the directory eof 7119ec7b004SRick Macklem * in np->n_direofoffset and chop it off as an extra step 7129ec7b004SRick Macklem * right here. 7139ec7b004SRick Macklem */ 7149ec7b004SRick Macklem n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 7159ec7b004SRick Macklem if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 7169ec7b004SRick Macklem n = np->n_direofoffset - uio->uio_offset; 7179ec7b004SRick Macklem break; 7189ec7b004SRick Macklem default: 7199ec7b004SRick Macklem ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 7209ec7b004SRick Macklem bp = NULL; 7219ec7b004SRick Macklem break; 7229ec7b004SRick Macklem }; 7239ec7b004SRick Macklem 7249ec7b004SRick Macklem if (n > 0) { 7259ec7b004SRick Macklem error = uiomove(bp->b_data + on, (int)n, uio); 7269ec7b004SRick Macklem } 7279ec7b004SRick Macklem if (vp->v_type == VLNK) 7289ec7b004SRick Macklem n = 0; 7299ec7b004SRick Macklem if (bp != NULL) 7309ec7b004SRick Macklem brelse(bp); 7319ec7b004SRick Macklem } while (error == 0 && uio->uio_resid > 0 && n > 0); 7329ec7b004SRick Macklem return (error); 7339ec7b004SRick Macklem } 7349ec7b004SRick Macklem 7359ec7b004SRick Macklem /* 7369ec7b004SRick Macklem * The NFS write path cannot handle iovecs with len > 1. So we need to 7379ec7b004SRick Macklem * break up iovecs accordingly (restricting them to wsize). 7389ec7b004SRick Macklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 7399ec7b004SRick Macklem * For the ASYNC case, 2 copies are needed. The first a copy from the 7409ec7b004SRick Macklem * user buffer to a staging buffer and then a second copy from the staging 7419ec7b004SRick Macklem * buffer to mbufs. This can be optimized by copying from the user buffer 7429ec7b004SRick Macklem * directly into mbufs and passing the chain down, but that requires a 7439ec7b004SRick Macklem * fair amount of re-working of the relevant codepaths (and can be done 7449ec7b004SRick Macklem * later). 7459ec7b004SRick Macklem */ 7469ec7b004SRick Macklem static int 7479ec7b004SRick Macklem nfs_directio_write(vp, uiop, cred, ioflag) 7489ec7b004SRick Macklem struct vnode *vp; 7499ec7b004SRick Macklem struct uio *uiop; 7509ec7b004SRick Macklem struct ucred *cred; 7519ec7b004SRick Macklem int ioflag; 7529ec7b004SRick Macklem { 7539ec7b004SRick Macklem int error; 7549ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 7559ec7b004SRick Macklem struct thread *td = uiop->uio_td; 7569ec7b004SRick Macklem int size; 7579ec7b004SRick Macklem int wsize; 7589ec7b004SRick Macklem 7599ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 7609ec7b004SRick Macklem wsize = nmp->nm_wsize; 7619ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 7629ec7b004SRick Macklem if (ioflag & IO_SYNC) { 7639ec7b004SRick Macklem int iomode, must_commit; 7649ec7b004SRick Macklem struct uio uio; 7659ec7b004SRick Macklem struct iovec iov; 7669ec7b004SRick Macklem do_sync: 7679ec7b004SRick Macklem while (uiop->uio_resid > 0) { 768526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 769526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 7709ec7b004SRick Macklem iov.iov_base = uiop->uio_iov->iov_base; 7719ec7b004SRick Macklem iov.iov_len = size; 7729ec7b004SRick Macklem uio.uio_iov = &iov; 7739ec7b004SRick Macklem uio.uio_iovcnt = 1; 7749ec7b004SRick Macklem uio.uio_offset = uiop->uio_offset; 7759ec7b004SRick Macklem uio.uio_resid = size; 7769ec7b004SRick Macklem uio.uio_segflg = UIO_USERSPACE; 7779ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 7789ec7b004SRick Macklem uio.uio_td = td; 7799ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 7809ec7b004SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, 78167c5c2d2SRick Macklem &must_commit, 0); 7829ec7b004SRick Macklem KASSERT((must_commit == 0), 7839ec7b004SRick Macklem ("ncl_directio_write: Did not commit write")); 7849ec7b004SRick Macklem if (error) 7859ec7b004SRick Macklem return (error); 7869ec7b004SRick Macklem uiop->uio_offset += size; 7879ec7b004SRick Macklem uiop->uio_resid -= size; 7889ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 7899ec7b004SRick Macklem uiop->uio_iovcnt--; 7909ec7b004SRick Macklem uiop->uio_iov++; 7919ec7b004SRick Macklem } else { 7929ec7b004SRick Macklem uiop->uio_iov->iov_base = 7939ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 7949ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 7959ec7b004SRick Macklem } 7969ec7b004SRick Macklem } 7979ec7b004SRick Macklem } else { 7989ec7b004SRick Macklem struct uio *t_uio; 7999ec7b004SRick Macklem struct iovec *t_iov; 8009ec7b004SRick Macklem struct buf *bp; 8019ec7b004SRick Macklem 8029ec7b004SRick Macklem /* 8039ec7b004SRick Macklem * Break up the write into blocksize chunks and hand these 8049ec7b004SRick Macklem * over to nfsiod's for write back. 8059ec7b004SRick Macklem * Unfortunately, this incurs a copy of the data. Since 8069ec7b004SRick Macklem * the user could modify the buffer before the write is 8079ec7b004SRick Macklem * initiated. 8089ec7b004SRick Macklem * 8099ec7b004SRick Macklem * The obvious optimization here is that one of the 2 copies 8109ec7b004SRick Macklem * in the async write path can be eliminated by copying the 8119ec7b004SRick Macklem * data here directly into mbufs and passing the mbuf chain 8129ec7b004SRick Macklem * down. But that will require a fair amount of re-working 8139ec7b004SRick Macklem * of the code and can be done if there's enough interest 8149ec7b004SRick Macklem * in NFS directio access. 8159ec7b004SRick Macklem */ 8169ec7b004SRick Macklem while (uiop->uio_resid > 0) { 817526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 818526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 8199ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 8209ec7b004SRick Macklem t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 8219ec7b004SRick Macklem t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 8229ec7b004SRick Macklem t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 8239ec7b004SRick Macklem t_iov->iov_len = size; 8249ec7b004SRick Macklem t_uio->uio_iov = t_iov; 8259ec7b004SRick Macklem t_uio->uio_iovcnt = 1; 8269ec7b004SRick Macklem t_uio->uio_offset = uiop->uio_offset; 8279ec7b004SRick Macklem t_uio->uio_resid = size; 8289ec7b004SRick Macklem t_uio->uio_segflg = UIO_SYSSPACE; 8299ec7b004SRick Macklem t_uio->uio_rw = UIO_WRITE; 8309ec7b004SRick Macklem t_uio->uio_td = td; 8314cf7d128SRick Macklem KASSERT(uiop->uio_segflg == UIO_USERSPACE || 8324cf7d128SRick Macklem uiop->uio_segflg == UIO_SYSSPACE, 8334cf7d128SRick Macklem ("nfs_directio_write: Bad uio_segflg")); 8344cf7d128SRick Macklem if (uiop->uio_segflg == UIO_USERSPACE) { 8354cf7d128SRick Macklem error = copyin(uiop->uio_iov->iov_base, 8364cf7d128SRick Macklem t_iov->iov_base, size); 8374cf7d128SRick Macklem if (error != 0) 8384cf7d128SRick Macklem goto err_free; 8394cf7d128SRick Macklem } else 8404cf7d128SRick Macklem /* 8414cf7d128SRick Macklem * UIO_SYSSPACE may never happen, but handle 8424cf7d128SRick Macklem * it just in case it does. 8434cf7d128SRick Macklem */ 8444cf7d128SRick Macklem bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, 8454cf7d128SRick Macklem size); 8469ec7b004SRick Macklem bp->b_flags |= B_DIRECT; 8479ec7b004SRick Macklem bp->b_iocmd = BIO_WRITE; 8489ec7b004SRick Macklem if (cred != NOCRED) { 8499ec7b004SRick Macklem crhold(cred); 8509ec7b004SRick Macklem bp->b_wcred = cred; 8519ec7b004SRick Macklem } else 8529ec7b004SRick Macklem bp->b_wcred = NOCRED; 8539ec7b004SRick Macklem bp->b_caller1 = (void *)t_uio; 8549ec7b004SRick Macklem bp->b_vp = vp; 8559ec7b004SRick Macklem error = ncl_asyncio(nmp, bp, NOCRED, td); 8564cf7d128SRick Macklem err_free: 8579ec7b004SRick Macklem if (error) { 8589ec7b004SRick Macklem free(t_iov->iov_base, M_NFSDIRECTIO); 8599ec7b004SRick Macklem free(t_iov, M_NFSDIRECTIO); 8609ec7b004SRick Macklem free(t_uio, M_NFSDIRECTIO); 8619ec7b004SRick Macklem bp->b_vp = NULL; 8629ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 8639ec7b004SRick Macklem if (error == EINTR) 8649ec7b004SRick Macklem return (error); 8659ec7b004SRick Macklem goto do_sync; 8669ec7b004SRick Macklem } 8679ec7b004SRick Macklem uiop->uio_offset += size; 8689ec7b004SRick Macklem uiop->uio_resid -= size; 8699ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 8709ec7b004SRick Macklem uiop->uio_iovcnt--; 8719ec7b004SRick Macklem uiop->uio_iov++; 8729ec7b004SRick Macklem } else { 8739ec7b004SRick Macklem uiop->uio_iov->iov_base = 8749ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 8759ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 8769ec7b004SRick Macklem } 8779ec7b004SRick Macklem } 8789ec7b004SRick Macklem } 8799ec7b004SRick Macklem return (0); 8809ec7b004SRick Macklem } 8819ec7b004SRick Macklem 8829ec7b004SRick Macklem /* 8839ec7b004SRick Macklem * Vnode op for write using bio 8849ec7b004SRick Macklem */ 8859ec7b004SRick Macklem int 8869ec7b004SRick Macklem ncl_write(struct vop_write_args *ap) 8879ec7b004SRick Macklem { 8889ec7b004SRick Macklem int biosize; 8899ec7b004SRick Macklem struct uio *uio = ap->a_uio; 8909ec7b004SRick Macklem struct thread *td = uio->uio_td; 8919ec7b004SRick Macklem struct vnode *vp = ap->a_vp; 8929ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 8939ec7b004SRick Macklem struct ucred *cred = ap->a_cred; 8949ec7b004SRick Macklem int ioflag = ap->a_ioflag; 8959ec7b004SRick Macklem struct buf *bp; 8969ec7b004SRick Macklem struct vattr vattr; 8979ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 8989ec7b004SRick Macklem daddr_t lbn; 8999ec7b004SRick Macklem int bcount; 900*bfb68a9eSKonstantin Belousov int bp_cached, n, on, error = 0; 901*bfb68a9eSKonstantin Belousov size_t orig_resid, local_resid; 902*bfb68a9eSKonstantin Belousov off_t orig_size, tmp_off; 9039ec7b004SRick Macklem 904b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 905b38f7723SKonstantin Belousov KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 906b38f7723SKonstantin Belousov ("ncl_write proc")); 9079ec7b004SRick Macklem if (vp->v_type != VREG) 9089ec7b004SRick Macklem return (EIO); 9099ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9109ec7b004SRick Macklem if (np->n_flag & NWRITEERR) { 9119ec7b004SRick Macklem np->n_flag &= ~NWRITEERR; 9129ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9139ec7b004SRick Macklem return (np->n_error); 9149ec7b004SRick Macklem } else 9159ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9169ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 9179ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 9189ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 9199ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 9209ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 9219ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 9229ec7b004SRick Macklem } 9239ec7b004SRick Macklem if (nmp->nm_wsize == 0) 9249ec7b004SRick Macklem (void) newnfs_iosize(nmp); 9259ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 9269ec7b004SRick Macklem 9279ec7b004SRick Macklem /* 9289ec7b004SRick Macklem * Synchronously flush pending buffers if we are in synchronous 9299ec7b004SRick Macklem * mode or if we are appending. 9309ec7b004SRick Macklem */ 9319ec7b004SRick Macklem if (ioflag & (IO_APPEND | IO_SYNC)) { 9329ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9339ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 9349ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9359ec7b004SRick Macklem #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 9369ec7b004SRick Macklem /* 9379ec7b004SRick Macklem * Require non-blocking, synchronous writes to 9389ec7b004SRick Macklem * dirty files to inform the program it needs 9399ec7b004SRick Macklem * to fsync(2) explicitly. 9409ec7b004SRick Macklem */ 9419ec7b004SRick Macklem if (ioflag & IO_NDELAY) 9429ec7b004SRick Macklem return (EAGAIN); 9439ec7b004SRick Macklem #endif 9449ec7b004SRick Macklem flush_and_restart: 9459ec7b004SRick Macklem np->n_attrstamp = 0; 9468f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 9479ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 9489ec7b004SRick Macklem if (error) 9499ec7b004SRick Macklem return (error); 9509ec7b004SRick Macklem } else 9519ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9529ec7b004SRick Macklem } 9539ec7b004SRick Macklem 954*bfb68a9eSKonstantin Belousov orig_resid = uio->uio_resid; 955*bfb68a9eSKonstantin Belousov mtx_lock(&np->n_mtx); 956*bfb68a9eSKonstantin Belousov orig_size = np->n_size; 957*bfb68a9eSKonstantin Belousov mtx_unlock(&np->n_mtx); 958*bfb68a9eSKonstantin Belousov 9599ec7b004SRick Macklem /* 9609ec7b004SRick Macklem * If IO_APPEND then load uio_offset. We restart here if we cannot 9619ec7b004SRick Macklem * get the append lock. 9629ec7b004SRick Macklem */ 9639ec7b004SRick Macklem if (ioflag & IO_APPEND) { 9649ec7b004SRick Macklem np->n_attrstamp = 0; 9658f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 9669ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 9679ec7b004SRick Macklem if (error) 9689ec7b004SRick Macklem return (error); 9699ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9709ec7b004SRick Macklem uio->uio_offset = np->n_size; 9719ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9729ec7b004SRick Macklem } 9739ec7b004SRick Macklem 9749ec7b004SRick Macklem if (uio->uio_offset < 0) 9759ec7b004SRick Macklem return (EINVAL); 976b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 97724e2bcc0SRick Macklem if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 9789ec7b004SRick Macklem return (EFBIG); 9799ec7b004SRick Macklem if (uio->uio_resid == 0) 9809ec7b004SRick Macklem return (0); 9819ec7b004SRick Macklem 9829ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 9839ec7b004SRick Macklem return nfs_directio_write(vp, uio, cred, ioflag); 9849ec7b004SRick Macklem 9859ec7b004SRick Macklem /* 9869ec7b004SRick Macklem * Maybe this should be above the vnode op call, but so long as 9879ec7b004SRick Macklem * file servers have no limits, i don't think it matters 9889ec7b004SRick Macklem */ 989b5f770bdSEdward Tomasz Napierala if (vn_rlimit_fsize(vp, uio, td)) 9909ec7b004SRick Macklem return (EFBIG); 9919ec7b004SRick Macklem 9927f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 9939ec7b004SRick Macklem /* 9949ec7b004SRick Macklem * Find all of this file's B_NEEDCOMMIT buffers. If our writes 9959ec7b004SRick Macklem * would exceed the local maximum per-file write commit size when 9969ec7b004SRick Macklem * combined with those, we must decide whether to flush, 9979ec7b004SRick Macklem * go synchronous, or return error. We don't bother checking 9989ec7b004SRick Macklem * IO_UNIT -- we just make all writes atomic anyway, as there's 9999ec7b004SRick Macklem * no point optimizing for something that really won't ever happen. 10009ec7b004SRick Macklem */ 10019ec7b004SRick Macklem if (!(ioflag & IO_SYNC)) { 10029ec7b004SRick Macklem int nflag; 10039ec7b004SRick Macklem 10049ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10059ec7b004SRick Macklem nflag = np->n_flag; 10069ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10079ec7b004SRick Macklem int needrestart = 0; 10089ec7b004SRick Macklem if (nmp->nm_wcommitsize < uio->uio_resid) { 10099ec7b004SRick Macklem /* 10109ec7b004SRick Macklem * If this request could not possibly be completed 10119ec7b004SRick Macklem * without exceeding the maximum outstanding write 10129ec7b004SRick Macklem * commit size, see if we can convert it into a 10139ec7b004SRick Macklem * synchronous write operation. 10149ec7b004SRick Macklem */ 10159ec7b004SRick Macklem if (ioflag & IO_NDELAY) 10169ec7b004SRick Macklem return (EAGAIN); 10179ec7b004SRick Macklem ioflag |= IO_SYNC; 10189ec7b004SRick Macklem if (nflag & NMODIFIED) 10199ec7b004SRick Macklem needrestart = 1; 10209ec7b004SRick Macklem } else if (nflag & NMODIFIED) { 10219ec7b004SRick Macklem int wouldcommit = 0; 10229ec7b004SRick Macklem BO_LOCK(&vp->v_bufobj); 10239ec7b004SRick Macklem if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 10249ec7b004SRick Macklem TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 10259ec7b004SRick Macklem b_bobufs) { 10269ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) 10279ec7b004SRick Macklem wouldcommit += bp->b_bcount; 10289ec7b004SRick Macklem } 10299ec7b004SRick Macklem } 10309ec7b004SRick Macklem BO_UNLOCK(&vp->v_bufobj); 10319ec7b004SRick Macklem /* 10329ec7b004SRick Macklem * Since we're not operating synchronously and 10339ec7b004SRick Macklem * bypassing the buffer cache, we are in a commit 10349ec7b004SRick Macklem * and holding all of these buffers whether 10359ec7b004SRick Macklem * transmitted or not. If not limited, this 10369ec7b004SRick Macklem * will lead to the buffer cache deadlocking, 10379ec7b004SRick Macklem * as no one else can flush our uncommitted buffers. 10389ec7b004SRick Macklem */ 10399ec7b004SRick Macklem wouldcommit += uio->uio_resid; 10409ec7b004SRick Macklem /* 10419ec7b004SRick Macklem * If we would initially exceed the maximum 10429ec7b004SRick Macklem * outstanding write commit size, flush and restart. 10439ec7b004SRick Macklem */ 10449ec7b004SRick Macklem if (wouldcommit > nmp->nm_wcommitsize) 10459ec7b004SRick Macklem needrestart = 1; 10469ec7b004SRick Macklem } 10479ec7b004SRick Macklem if (needrestart) 10489ec7b004SRick Macklem goto flush_and_restart; 10499ec7b004SRick Macklem } 10509ec7b004SRick Macklem 10519ec7b004SRick Macklem do { 10529ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_writes); 10539ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 10549ec7b004SRick Macklem on = uio->uio_offset & (biosize-1); 1055526d0bd5SKonstantin Belousov n = MIN((unsigned)(biosize - on), uio->uio_resid); 10569ec7b004SRick Macklem again: 10579ec7b004SRick Macklem /* 10589ec7b004SRick Macklem * Handle direct append and file extension cases, calculate 10599ec7b004SRick Macklem * unaligned buffer size. 10609ec7b004SRick Macklem */ 10619ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10629ec7b004SRick Macklem if (uio->uio_offset == np->n_size && n) { 10639ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10649ec7b004SRick Macklem /* 10659ec7b004SRick Macklem * Get the buffer (in its pre-append state to maintain 10669ec7b004SRick Macklem * B_CACHE if it was previously set). Resize the 10679ec7b004SRick Macklem * nfsnode after we have locked the buffer to prevent 10689ec7b004SRick Macklem * readers from reading garbage. 10699ec7b004SRick Macklem */ 10709ec7b004SRick Macklem bcount = on; 10719ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 10729ec7b004SRick Macklem 10739ec7b004SRick Macklem if (bp != NULL) { 10749ec7b004SRick Macklem long save; 10759ec7b004SRick Macklem 10769ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10779ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10789ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10799ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10809ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10819ec7b004SRick Macklem 10829ec7b004SRick Macklem save = bp->b_flags & B_CACHE; 10839ec7b004SRick Macklem bcount += n; 10849ec7b004SRick Macklem allocbuf(bp, bcount); 10859ec7b004SRick Macklem bp->b_flags |= save; 10869ec7b004SRick Macklem } 10879ec7b004SRick Macklem } else { 10889ec7b004SRick Macklem /* 10899ec7b004SRick Macklem * Obtain the locked cache block first, and then 10909ec7b004SRick Macklem * adjust the file's size as appropriate. 10919ec7b004SRick Macklem */ 10929ec7b004SRick Macklem bcount = on + n; 10939ec7b004SRick Macklem if ((off_t)lbn * biosize + bcount < np->n_size) { 10949ec7b004SRick Macklem if ((off_t)(lbn + 1) * biosize < np->n_size) 10959ec7b004SRick Macklem bcount = biosize; 10969ec7b004SRick Macklem else 10979ec7b004SRick Macklem bcount = np->n_size - (off_t)lbn * biosize; 10989ec7b004SRick Macklem } 10999ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 11009ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 11019ec7b004SRick Macklem mtx_lock(&np->n_mtx); 11029ec7b004SRick Macklem if (uio->uio_offset + n > np->n_size) { 11039ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 11049ec7b004SRick Macklem np->n_flag |= NMODIFIED; 11059ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 11069ec7b004SRick Macklem } 11079ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 11089ec7b004SRick Macklem } 11099ec7b004SRick Macklem 11109ec7b004SRick Macklem if (!bp) { 11119ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 11129ec7b004SRick Macklem if (!error) 11139ec7b004SRick Macklem error = EINTR; 11149ec7b004SRick Macklem break; 11159ec7b004SRick Macklem } 11169ec7b004SRick Macklem 11179ec7b004SRick Macklem /* 11189ec7b004SRick Macklem * Issue a READ if B_CACHE is not set. In special-append 11199ec7b004SRick Macklem * mode, B_CACHE is based on the buffer prior to the write 11209ec7b004SRick Macklem * op and is typically set, avoiding the read. If a read 11219ec7b004SRick Macklem * is required in special append mode, the server will 11229ec7b004SRick Macklem * probably send us a short-read since we extended the file 11239ec7b004SRick Macklem * on our end, resulting in b_resid == 0 and, thusly, 11249ec7b004SRick Macklem * B_CACHE getting set. 11259ec7b004SRick Macklem * 11269ec7b004SRick Macklem * We can also avoid issuing the read if the write covers 11279ec7b004SRick Macklem * the entire buffer. We have to make sure the buffer state 11289ec7b004SRick Macklem * is reasonable in this case since we will not be initiating 11299ec7b004SRick Macklem * I/O. See the comments in kern/vfs_bio.c's getblk() for 11309ec7b004SRick Macklem * more information. 11319ec7b004SRick Macklem * 11329ec7b004SRick Macklem * B_CACHE may also be set due to the buffer being cached 11339ec7b004SRick Macklem * normally. 11349ec7b004SRick Macklem */ 11359ec7b004SRick Macklem 1136*bfb68a9eSKonstantin Belousov bp_cached = 1; 11379ec7b004SRick Macklem if (on == 0 && n == bcount) { 1138*bfb68a9eSKonstantin Belousov if ((bp->b_flags & B_CACHE) == 0) 1139*bfb68a9eSKonstantin Belousov bp_cached = 0; 11409ec7b004SRick Macklem bp->b_flags |= B_CACHE; 11419ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 11429ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 11439ec7b004SRick Macklem } 11449ec7b004SRick Macklem 11459ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 11469ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 11479ec7b004SRick Macklem vfs_busy_pages(bp, 0); 114867c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 11499ec7b004SRick Macklem if (error) { 11509ec7b004SRick Macklem brelse(bp); 11519ec7b004SRick Macklem break; 11529ec7b004SRick Macklem } 11539ec7b004SRick Macklem } 11549ec7b004SRick Macklem if (bp->b_wcred == NOCRED) 11559ec7b004SRick Macklem bp->b_wcred = crhold(cred); 11569ec7b004SRick Macklem mtx_lock(&np->n_mtx); 11579ec7b004SRick Macklem np->n_flag |= NMODIFIED; 11589ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 11599ec7b004SRick Macklem 11609ec7b004SRick Macklem /* 11619ec7b004SRick Macklem * If dirtyend exceeds file size, chop it down. This should 11629ec7b004SRick Macklem * not normally occur but there is an append race where it 11639ec7b004SRick Macklem * might occur XXX, so we log it. 11649ec7b004SRick Macklem * 11659ec7b004SRick Macklem * If the chopping creates a reverse-indexed or degenerate 11669ec7b004SRick Macklem * situation with dirtyoff/end, we 0 both of them. 11679ec7b004SRick Macklem */ 11689ec7b004SRick Macklem 11699ec7b004SRick Macklem if (bp->b_dirtyend > bcount) { 11709ec7b004SRick Macklem ncl_printf("NFS append race @%lx:%d\n", 11719ec7b004SRick Macklem (long)bp->b_blkno * DEV_BSIZE, 11729ec7b004SRick Macklem bp->b_dirtyend - bcount); 11739ec7b004SRick Macklem bp->b_dirtyend = bcount; 11749ec7b004SRick Macklem } 11759ec7b004SRick Macklem 11769ec7b004SRick Macklem if (bp->b_dirtyoff >= bp->b_dirtyend) 11779ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 11789ec7b004SRick Macklem 11799ec7b004SRick Macklem /* 11809ec7b004SRick Macklem * If the new write will leave a contiguous dirty 11819ec7b004SRick Macklem * area, just update the b_dirtyoff and b_dirtyend, 11829ec7b004SRick Macklem * otherwise force a write rpc of the old dirty area. 11839ec7b004SRick Macklem * 11849ec7b004SRick Macklem * While it is possible to merge discontiguous writes due to 11859ec7b004SRick Macklem * our having a B_CACHE buffer ( and thus valid read data 11869ec7b004SRick Macklem * for the hole), we don't because it could lead to 11879ec7b004SRick Macklem * significant cache coherency problems with multiple clients, 11889ec7b004SRick Macklem * especially if locking is implemented later on. 11899ec7b004SRick Macklem * 11906eec26f5SKonstantin Belousov * As an optimization we could theoretically maintain 11919ec7b004SRick Macklem * a linked list of discontinuous areas, but we would still 11929ec7b004SRick Macklem * have to commit them separately so there isn't much 11939ec7b004SRick Macklem * advantage to it except perhaps a bit of asynchronization. 11949ec7b004SRick Macklem */ 11959ec7b004SRick Macklem 11969ec7b004SRick Macklem if (bp->b_dirtyend > 0 && 11979ec7b004SRick Macklem (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 11989ec7b004SRick Macklem if (bwrite(bp) == EINTR) { 11999ec7b004SRick Macklem error = EINTR; 12009ec7b004SRick Macklem break; 12019ec7b004SRick Macklem } 12029ec7b004SRick Macklem goto again; 12039ec7b004SRick Macklem } 12049ec7b004SRick Macklem 1205*bfb68a9eSKonstantin Belousov local_resid = uio->uio_resid; 12069ec7b004SRick Macklem error = uiomove((char *)bp->b_data + on, n, uio); 12079ec7b004SRick Macklem 1208*bfb68a9eSKonstantin Belousov if (error != 0 && !bp_cached) { 1209*bfb68a9eSKonstantin Belousov /* 1210*bfb68a9eSKonstantin Belousov * This block has no other content then what 1211*bfb68a9eSKonstantin Belousov * possibly was written by the faulty uiomove. 1212*bfb68a9eSKonstantin Belousov * Release it, forgetting the data pages, to 1213*bfb68a9eSKonstantin Belousov * prevent the leak of uninitialized data to 1214*bfb68a9eSKonstantin Belousov * usermode. 1215*bfb68a9eSKonstantin Belousov */ 1216*bfb68a9eSKonstantin Belousov bp->b_ioflags |= BIO_ERROR; 1217*bfb68a9eSKonstantin Belousov brelse(bp); 1218*bfb68a9eSKonstantin Belousov uio->uio_offset -= local_resid - uio->uio_resid; 1219*bfb68a9eSKonstantin Belousov uio->uio_resid = local_resid; 1220*bfb68a9eSKonstantin Belousov break; 1221*bfb68a9eSKonstantin Belousov } 1222*bfb68a9eSKonstantin Belousov 12239ec7b004SRick Macklem /* 12249ec7b004SRick Macklem * Since this block is being modified, it must be written 12259ec7b004SRick Macklem * again and not just committed. Since write clustering does 12269ec7b004SRick Macklem * not work for the stage 1 data write, only the stage 2 12279ec7b004SRick Macklem * commit rpc, we have to clear B_CLUSTEROK as well. 12289ec7b004SRick Macklem */ 12299ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 12309ec7b004SRick Macklem 1231*bfb68a9eSKonstantin Belousov /* 1232*bfb68a9eSKonstantin Belousov * Get the partial update on the progress made from 1233*bfb68a9eSKonstantin Belousov * uiomove, if an error occured. 1234*bfb68a9eSKonstantin Belousov */ 1235*bfb68a9eSKonstantin Belousov if (error != 0) 1236*bfb68a9eSKonstantin Belousov n = local_resid - uio->uio_resid; 12379ec7b004SRick Macklem 12389ec7b004SRick Macklem /* 12399ec7b004SRick Macklem * Only update dirtyoff/dirtyend if not a degenerate 12409ec7b004SRick Macklem * condition. 12419ec7b004SRick Macklem */ 1242*bfb68a9eSKonstantin Belousov if (n > 0) { 12439ec7b004SRick Macklem if (bp->b_dirtyend > 0) { 12449ec7b004SRick Macklem bp->b_dirtyoff = min(on, bp->b_dirtyoff); 12459ec7b004SRick Macklem bp->b_dirtyend = max((on + n), bp->b_dirtyend); 12469ec7b004SRick Macklem } else { 12479ec7b004SRick Macklem bp->b_dirtyoff = on; 12489ec7b004SRick Macklem bp->b_dirtyend = on + n; 12499ec7b004SRick Macklem } 12501f176894SAlan Cox vfs_bio_set_valid(bp, on, n); 12519ec7b004SRick Macklem } 12529ec7b004SRick Macklem 12539ec7b004SRick Macklem /* 12549ec7b004SRick Macklem * If IO_SYNC do bwrite(). 12559ec7b004SRick Macklem * 12569ec7b004SRick Macklem * IO_INVAL appears to be unused. The idea appears to be 12579ec7b004SRick Macklem * to turn off caching in this case. Very odd. XXX 12589ec7b004SRick Macklem */ 12599ec7b004SRick Macklem if ((ioflag & IO_SYNC)) { 12609ec7b004SRick Macklem if (ioflag & IO_INVAL) 12619ec7b004SRick Macklem bp->b_flags |= B_NOCACHE; 12629ec7b004SRick Macklem error = bwrite(bp); 12639ec7b004SRick Macklem if (error) 12649ec7b004SRick Macklem break; 12659ec7b004SRick Macklem } else if ((n + on) == biosize) { 12669ec7b004SRick Macklem bp->b_flags |= B_ASYNC; 12679ec7b004SRick Macklem (void) ncl_writebp(bp, 0, NULL); 12689ec7b004SRick Macklem } else { 12699ec7b004SRick Macklem bdwrite(bp); 12709ec7b004SRick Macklem } 1271*bfb68a9eSKonstantin Belousov 1272*bfb68a9eSKonstantin Belousov if (error != 0) 1273*bfb68a9eSKonstantin Belousov break; 12749ec7b004SRick Macklem } while (uio->uio_resid > 0 && n > 0); 12759ec7b004SRick Macklem 1276*bfb68a9eSKonstantin Belousov if (error != 0) { 1277*bfb68a9eSKonstantin Belousov if (ioflag & IO_UNIT) { 1278*bfb68a9eSKonstantin Belousov VATTR_NULL(&vattr); 1279*bfb68a9eSKonstantin Belousov vattr.va_size = orig_size; 1280*bfb68a9eSKonstantin Belousov /* IO_SYNC is handled implicitely */ 1281*bfb68a9eSKonstantin Belousov (void)VOP_SETATTR(vp, &vattr, cred); 1282*bfb68a9eSKonstantin Belousov uio->uio_offset -= orig_resid - uio->uio_resid; 1283*bfb68a9eSKonstantin Belousov uio->uio_resid = orig_resid; 1284*bfb68a9eSKonstantin Belousov } 1285*bfb68a9eSKonstantin Belousov } 1286*bfb68a9eSKonstantin Belousov 12879ec7b004SRick Macklem return (error); 12889ec7b004SRick Macklem } 12899ec7b004SRick Macklem 12909ec7b004SRick Macklem /* 12919ec7b004SRick Macklem * Get an nfs cache block. 12929ec7b004SRick Macklem * 12939ec7b004SRick Macklem * Allocate a new one if the block isn't currently in the cache 12949ec7b004SRick Macklem * and return the block marked busy. If the calling process is 12959ec7b004SRick Macklem * interrupted by a signal for an interruptible mount point, return 12969ec7b004SRick Macklem * NULL. 12979ec7b004SRick Macklem * 12989ec7b004SRick Macklem * The caller must carefully deal with the possible B_INVAL state of 12999ec7b004SRick Macklem * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 13009ec7b004SRick Macklem * indirectly), so synchronous reads can be issued without worrying about 13019ec7b004SRick Macklem * the B_INVAL state. We have to be a little more careful when dealing 13029ec7b004SRick Macklem * with writes (see comments in nfs_write()) when extending a file past 13039ec7b004SRick Macklem * its EOF. 13049ec7b004SRick Macklem */ 13059ec7b004SRick Macklem static struct buf * 13069ec7b004SRick Macklem nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 13079ec7b004SRick Macklem { 13089ec7b004SRick Macklem struct buf *bp; 13099ec7b004SRick Macklem struct mount *mp; 13109ec7b004SRick Macklem struct nfsmount *nmp; 13119ec7b004SRick Macklem 13129ec7b004SRick Macklem mp = vp->v_mount; 13139ec7b004SRick Macklem nmp = VFSTONFS(mp); 13149ec7b004SRick Macklem 13159ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) { 13169ec7b004SRick Macklem sigset_t oldset; 13179ec7b004SRick Macklem 13184a8e2176SRick Macklem newnfs_set_sigmask(td, &oldset); 1319c79e6976SRick Macklem bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0); 13204a8e2176SRick Macklem newnfs_restore_sigmask(td, &oldset); 13219ec7b004SRick Macklem while (bp == NULL) { 13229ec7b004SRick Macklem if (newnfs_sigintr(nmp, td)) 13239ec7b004SRick Macklem return (NULL); 13249ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 2 * hz, 0); 13259ec7b004SRick Macklem } 13269ec7b004SRick Macklem } else { 13279ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 0, 0); 13289ec7b004SRick Macklem } 13299ec7b004SRick Macklem 13307f763fc3SRick Macklem if (vp->v_type == VREG) 13317f763fc3SRick Macklem bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 13329ec7b004SRick Macklem return (bp); 13339ec7b004SRick Macklem } 13349ec7b004SRick Macklem 13359ec7b004SRick Macklem /* 13369ec7b004SRick Macklem * Flush and invalidate all dirty buffers. If another process is already 13379ec7b004SRick Macklem * doing the flush, just wait for completion. 13389ec7b004SRick Macklem */ 13399ec7b004SRick Macklem int 13409ec7b004SRick Macklem ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 13419ec7b004SRick Macklem { 13429ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 13439ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 13449ec7b004SRick Macklem int error = 0, slpflag, slptimeo; 13459ec7b004SRick Macklem int old_lock = 0; 13469ec7b004SRick Macklem 13479ec7b004SRick Macklem ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 13489ec7b004SRick Macklem 13499ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_INT) == 0) 13509ec7b004SRick Macklem intrflg = 0; 13519ec7b004SRick Macklem if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 13529ec7b004SRick Macklem intrflg = 1; 13539ec7b004SRick Macklem if (intrflg) { 1354c79e6976SRick Macklem slpflag = NFS_PCATCH; 13559ec7b004SRick Macklem slptimeo = 2 * hz; 13569ec7b004SRick Macklem } else { 13579ec7b004SRick Macklem slpflag = 0; 13589ec7b004SRick Macklem slptimeo = 0; 13599ec7b004SRick Macklem } 13609ec7b004SRick Macklem 13619ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 1362934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 1363934a3099SRick Macklem /* 1364934a3099SRick Macklem * Since vgonel() uses the generic vinvalbuf() to flush 1365934a3099SRick Macklem * dirty buffers and it does not call this function, it 1366934a3099SRick Macklem * is safe to just return OK when VI_DOOMED is set. 1367934a3099SRick Macklem */ 1368934a3099SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 1369934a3099SRick Macklem return (0); 1370934a3099SRick Macklem } 1371934a3099SRick Macklem 13729ec7b004SRick Macklem /* 13739ec7b004SRick Macklem * Now, flush as required. 13749ec7b004SRick Macklem */ 13759ec7b004SRick Macklem if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 13769ec7b004SRick Macklem VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 13779ec7b004SRick Macklem vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 13789ec7b004SRick Macklem VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 13799ec7b004SRick Macklem /* 13809ec7b004SRick Macklem * If the page clean was interrupted, fail the invalidation. 13819ec7b004SRick Macklem * Not doing so, we run the risk of losing dirty pages in the 13829ec7b004SRick Macklem * vinvalbuf() call below. 13839ec7b004SRick Macklem */ 13849ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13859ec7b004SRick Macklem goto out; 13869ec7b004SRick Macklem } 13879ec7b004SRick Macklem 13889ec7b004SRick Macklem error = vinvalbuf(vp, flags, slpflag, 0); 13899ec7b004SRick Macklem while (error) { 13909ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13919ec7b004SRick Macklem goto out; 13929ec7b004SRick Macklem error = vinvalbuf(vp, flags, 0, slptimeo); 13939ec7b004SRick Macklem } 13949ec7b004SRick Macklem mtx_lock(&np->n_mtx); 13959ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) 13969ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 13979ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 13989ec7b004SRick Macklem out: 13999ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 14009ec7b004SRick Macklem return error; 14019ec7b004SRick Macklem } 14029ec7b004SRick Macklem 14039ec7b004SRick Macklem /* 14049ec7b004SRick Macklem * Initiate asynchronous I/O. Return an error if no nfsiods are available. 14059ec7b004SRick Macklem * This is mainly to avoid queueing async I/O requests when the nfsiods 14069ec7b004SRick Macklem * are all hung on a dead server. 14079ec7b004SRick Macklem * 14089ec7b004SRick Macklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 14099ec7b004SRick Macklem * is eventually dequeued by the async daemon, ncl_doio() *will*. 14109ec7b004SRick Macklem */ 14119ec7b004SRick Macklem int 14129ec7b004SRick Macklem ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 14139ec7b004SRick Macklem { 14149ec7b004SRick Macklem int iod; 14159ec7b004SRick Macklem int gotiod; 14169ec7b004SRick Macklem int slpflag = 0; 14179ec7b004SRick Macklem int slptimeo = 0; 14189ec7b004SRick Macklem int error, error2; 14199ec7b004SRick Macklem 14209ec7b004SRick Macklem /* 14219ec7b004SRick Macklem * Commits are usually short and sweet so lets save some cpu and 14229ec7b004SRick Macklem * leave the async daemons for more important rpc's (such as reads 14239ec7b004SRick Macklem * and writes). 14249ec7b004SRick Macklem */ 14259ec7b004SRick Macklem mtx_lock(&ncl_iod_mutex); 14269ec7b004SRick Macklem if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 14279ec7b004SRick Macklem (nmp->nm_bufqiods > ncl_numasync / 2)) { 14289ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 14299ec7b004SRick Macklem return(EIO); 14309ec7b004SRick Macklem } 14319ec7b004SRick Macklem again: 14329ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) 1433c79e6976SRick Macklem slpflag = NFS_PCATCH; 14349ec7b004SRick Macklem gotiod = FALSE; 14359ec7b004SRick Macklem 14369ec7b004SRick Macklem /* 14379ec7b004SRick Macklem * Find a free iod to process this request. 14389ec7b004SRick Macklem */ 14399ec7b004SRick Macklem for (iod = 0; iod < ncl_numasync; iod++) 144080169e41SRick Macklem if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 14419ec7b004SRick Macklem gotiod = TRUE; 14429ec7b004SRick Macklem break; 14439ec7b004SRick Macklem } 14449ec7b004SRick Macklem 14459ec7b004SRick Macklem /* 14469ec7b004SRick Macklem * Try to create one if none are free. 14479ec7b004SRick Macklem */ 14487b8c319bSRick Macklem if (!gotiod) 14497b8c319bSRick Macklem ncl_nfsiodnew(); 14507b8c319bSRick Macklem else { 14519ec7b004SRick Macklem /* 14529ec7b004SRick Macklem * Found one, so wake it up and tell it which 14539ec7b004SRick Macklem * mount to process. 14549ec7b004SRick Macklem */ 14559ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 14569ec7b004SRick Macklem iod, nmp)); 145780169e41SRick Macklem ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 14589ec7b004SRick Macklem ncl_iodmount[iod] = nmp; 14599ec7b004SRick Macklem nmp->nm_bufqiods++; 14609ec7b004SRick Macklem wakeup(&ncl_iodwant[iod]); 14619ec7b004SRick Macklem } 14629ec7b004SRick Macklem 14639ec7b004SRick Macklem /* 14649ec7b004SRick Macklem * If none are free, we may already have an iod working on this mount 14659ec7b004SRick Macklem * point. If so, it will process our request. 14669ec7b004SRick Macklem */ 14679ec7b004SRick Macklem if (!gotiod) { 14689ec7b004SRick Macklem if (nmp->nm_bufqiods > 0) { 14699ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14709ec7b004SRick Macklem ("ncl_asyncio: %d iods are already processing mount %p\n", 14719ec7b004SRick Macklem nmp->nm_bufqiods, nmp)); 14729ec7b004SRick Macklem gotiod = TRUE; 14739ec7b004SRick Macklem } 14749ec7b004SRick Macklem } 14759ec7b004SRick Macklem 14769ec7b004SRick Macklem /* 14779ec7b004SRick Macklem * If we have an iod which can process the request, then queue 14789ec7b004SRick Macklem * the buffer. 14799ec7b004SRick Macklem */ 14809ec7b004SRick Macklem if (gotiod) { 14819ec7b004SRick Macklem /* 14829ec7b004SRick Macklem * Ensure that the queue never grows too large. We still want 14839ec7b004SRick Macklem * to asynchronize so we block rather then return EIO. 14849ec7b004SRick Macklem */ 14859ec7b004SRick Macklem while (nmp->nm_bufqlen >= 2*ncl_numasync) { 14869ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14879ec7b004SRick Macklem ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 14889ec7b004SRick Macklem nmp->nm_bufqwant = TRUE; 14894a8e2176SRick Macklem error = newnfs_msleep(td, &nmp->nm_bufq, 14904a8e2176SRick Macklem &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 14914a8e2176SRick Macklem slptimeo); 14929ec7b004SRick Macklem if (error) { 14939ec7b004SRick Macklem error2 = newnfs_sigintr(nmp, td); 14949ec7b004SRick Macklem if (error2) { 14959ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 14969ec7b004SRick Macklem return (error2); 14979ec7b004SRick Macklem } 1498c79e6976SRick Macklem if (slpflag == NFS_PCATCH) { 14999ec7b004SRick Macklem slpflag = 0; 15009ec7b004SRick Macklem slptimeo = 2 * hz; 15019ec7b004SRick Macklem } 15029ec7b004SRick Macklem } 15039ec7b004SRick Macklem /* 15049ec7b004SRick Macklem * We might have lost our iod while sleeping, 15059ec7b004SRick Macklem * so check and loop if nescessary. 15069ec7b004SRick Macklem */ 15079ec7b004SRick Macklem goto again; 15089ec7b004SRick Macklem } 15099ec7b004SRick Macklem 15109ec7b004SRick Macklem /* We might have lost our nfsiod */ 15119ec7b004SRick Macklem if (nmp->nm_bufqiods == 0) { 15129ec7b004SRick Macklem NFS_DPF(ASYNCIO, 15139ec7b004SRick Macklem ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 15149ec7b004SRick Macklem goto again; 15159ec7b004SRick Macklem } 15169ec7b004SRick Macklem 15179ec7b004SRick Macklem if (bp->b_iocmd == BIO_READ) { 15189ec7b004SRick Macklem if (bp->b_rcred == NOCRED && cred != NOCRED) 15199ec7b004SRick Macklem bp->b_rcred = crhold(cred); 15209ec7b004SRick Macklem } else { 15219ec7b004SRick Macklem if (bp->b_wcred == NOCRED && cred != NOCRED) 15229ec7b004SRick Macklem bp->b_wcred = crhold(cred); 15239ec7b004SRick Macklem } 15249ec7b004SRick Macklem 15259ec7b004SRick Macklem if (bp->b_flags & B_REMFREE) 15269ec7b004SRick Macklem bremfreef(bp); 15279ec7b004SRick Macklem BUF_KERNPROC(bp); 15289ec7b004SRick Macklem TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 15299ec7b004SRick Macklem nmp->nm_bufqlen++; 15309ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15319ec7b004SRick Macklem mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 15329ec7b004SRick Macklem VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 15339ec7b004SRick Macklem VTONFS(bp->b_vp)->n_directio_asyncwr++; 15349ec7b004SRick Macklem mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 15359ec7b004SRick Macklem } 15369ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15379ec7b004SRick Macklem return (0); 15389ec7b004SRick Macklem } 15399ec7b004SRick Macklem 15409ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15419ec7b004SRick Macklem 15429ec7b004SRick Macklem /* 15439ec7b004SRick Macklem * All the iods are busy on other mounts, so return EIO to 15449ec7b004SRick Macklem * force the caller to process the i/o synchronously. 15459ec7b004SRick Macklem */ 15469ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 15479ec7b004SRick Macklem return (EIO); 15489ec7b004SRick Macklem } 15499ec7b004SRick Macklem 15509ec7b004SRick Macklem void 15519ec7b004SRick Macklem ncl_doio_directwrite(struct buf *bp) 15529ec7b004SRick Macklem { 15539ec7b004SRick Macklem int iomode, must_commit; 15549ec7b004SRick Macklem struct uio *uiop = (struct uio *)bp->b_caller1; 15559ec7b004SRick Macklem char *iov_base = uiop->uio_iov->iov_base; 15569ec7b004SRick Macklem 15579ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 15589ec7b004SRick Macklem uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 155967c5c2d2SRick Macklem ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 15609ec7b004SRick Macklem KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 15619ec7b004SRick Macklem free(iov_base, M_NFSDIRECTIO); 15629ec7b004SRick Macklem free(uiop->uio_iov, M_NFSDIRECTIO); 15639ec7b004SRick Macklem free(uiop, M_NFSDIRECTIO); 15649ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15659ec7b004SRick Macklem struct nfsnode *np = VTONFS(bp->b_vp); 15669ec7b004SRick Macklem mtx_lock(&np->n_mtx); 15679ec7b004SRick Macklem np->n_directio_asyncwr--; 15689ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) { 15699ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 15709ec7b004SRick Macklem if ((np->n_flag & NFSYNCWAIT)) { 15719ec7b004SRick Macklem np->n_flag &= ~NFSYNCWAIT; 15729ec7b004SRick Macklem wakeup((caddr_t)&np->n_directio_asyncwr); 15739ec7b004SRick Macklem } 15749ec7b004SRick Macklem } 15759ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 15769ec7b004SRick Macklem } 15779ec7b004SRick Macklem bp->b_vp = NULL; 15789ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 15799ec7b004SRick Macklem } 15809ec7b004SRick Macklem 15819ec7b004SRick Macklem /* 15829ec7b004SRick Macklem * Do an I/O operation to/from a cache block. This may be called 15839ec7b004SRick Macklem * synchronously or from an nfsiod. 15849ec7b004SRick Macklem */ 15859ec7b004SRick Macklem int 158667c5c2d2SRick Macklem ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 158767c5c2d2SRick Macklem int called_from_strategy) 15889ec7b004SRick Macklem { 15899ec7b004SRick Macklem struct uio *uiop; 15909ec7b004SRick Macklem struct nfsnode *np; 15919ec7b004SRick Macklem struct nfsmount *nmp; 15929ec7b004SRick Macklem int error = 0, iomode, must_commit = 0; 15939ec7b004SRick Macklem struct uio uio; 15949ec7b004SRick Macklem struct iovec io; 15959ec7b004SRick Macklem struct proc *p = td ? td->td_proc : NULL; 15969ec7b004SRick Macklem uint8_t iocmd; 15979ec7b004SRick Macklem 15989ec7b004SRick Macklem np = VTONFS(vp); 15999ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 16009ec7b004SRick Macklem uiop = &uio; 16019ec7b004SRick Macklem uiop->uio_iov = &io; 16029ec7b004SRick Macklem uiop->uio_iovcnt = 1; 16039ec7b004SRick Macklem uiop->uio_segflg = UIO_SYSSPACE; 16049ec7b004SRick Macklem uiop->uio_td = td; 16059ec7b004SRick Macklem 16069ec7b004SRick Macklem /* 16079ec7b004SRick Macklem * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 16089ec7b004SRick Macklem * do this here so we do not have to do it in all the code that 16099ec7b004SRick Macklem * calls us. 16109ec7b004SRick Macklem */ 16119ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 16129ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 16139ec7b004SRick Macklem 16149ec7b004SRick Macklem KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 16159ec7b004SRick Macklem iocmd = bp->b_iocmd; 16169ec7b004SRick Macklem if (iocmd == BIO_READ) { 16179ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_bcount; 16189ec7b004SRick Macklem io.iov_base = bp->b_data; 16199ec7b004SRick Macklem uiop->uio_rw = UIO_READ; 16209ec7b004SRick Macklem 16219ec7b004SRick Macklem switch (vp->v_type) { 16229ec7b004SRick Macklem case VREG: 16239ec7b004SRick Macklem uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 16249ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.read_bios); 16259ec7b004SRick Macklem error = ncl_readrpc(vp, uiop, cr); 16269ec7b004SRick Macklem 16279ec7b004SRick Macklem if (!error) { 16289ec7b004SRick Macklem if (uiop->uio_resid) { 16299ec7b004SRick Macklem /* 16309ec7b004SRick Macklem * If we had a short read with no error, we must have 16319ec7b004SRick Macklem * hit a file hole. We should zero-fill the remainder. 16329ec7b004SRick Macklem * This can also occur if the server hits the file EOF. 16339ec7b004SRick Macklem * 16349ec7b004SRick Macklem * Holes used to be able to occur due to pending 16359ec7b004SRick Macklem * writes, but that is not possible any longer. 16369ec7b004SRick Macklem */ 16379ec7b004SRick Macklem int nread = bp->b_bcount - uiop->uio_resid; 1638526d0bd5SKonstantin Belousov ssize_t left = uiop->uio_resid; 16399ec7b004SRick Macklem 16409ec7b004SRick Macklem if (left > 0) 16419ec7b004SRick Macklem bzero((char *)bp->b_data + nread, left); 16429ec7b004SRick Macklem uiop->uio_resid = 0; 16439ec7b004SRick Macklem } 16449ec7b004SRick Macklem } 16459ec7b004SRick Macklem /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 16469ec7b004SRick Macklem if (p && (vp->v_vflag & VV_TEXT)) { 16479ec7b004SRick Macklem mtx_lock(&np->n_mtx); 16489ec7b004SRick Macklem if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 16499ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16509ec7b004SRick Macklem PROC_LOCK(p); 16519ec7b004SRick Macklem killproc(p, "text file modification"); 16529ec7b004SRick Macklem PROC_UNLOCK(p); 16539ec7b004SRick Macklem } else 16549ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16559ec7b004SRick Macklem } 16569ec7b004SRick Macklem break; 16579ec7b004SRick Macklem case VLNK: 16589ec7b004SRick Macklem uiop->uio_offset = (off_t)0; 16599ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.readlink_bios); 16609ec7b004SRick Macklem error = ncl_readlinkrpc(vp, uiop, cr); 16619ec7b004SRick Macklem break; 16629ec7b004SRick Macklem case VDIR: 16639ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.readdir_bios); 16649ec7b004SRick Macklem uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 16659ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 16669ec7b004SRick Macklem error = ncl_readdirplusrpc(vp, uiop, cr, td); 16679ec7b004SRick Macklem if (error == NFSERR_NOTSUPP) 16689ec7b004SRick Macklem nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 16699ec7b004SRick Macklem } 16709ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 16719ec7b004SRick Macklem error = ncl_readdirrpc(vp, uiop, cr, td); 16729ec7b004SRick Macklem /* 16739ec7b004SRick Macklem * end-of-directory sets B_INVAL but does not generate an 16749ec7b004SRick Macklem * error. 16759ec7b004SRick Macklem */ 16769ec7b004SRick Macklem if (error == 0 && uiop->uio_resid == bp->b_bcount) 16779ec7b004SRick Macklem bp->b_flags |= B_INVAL; 16789ec7b004SRick Macklem break; 16799ec7b004SRick Macklem default: 16809ec7b004SRick Macklem ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); 16819ec7b004SRick Macklem break; 16829ec7b004SRick Macklem }; 16839ec7b004SRick Macklem if (error) { 16849ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 16859ec7b004SRick Macklem bp->b_error = error; 16869ec7b004SRick Macklem } 16879ec7b004SRick Macklem } else { 16889ec7b004SRick Macklem /* 16899ec7b004SRick Macklem * If we only need to commit, try to commit 16909ec7b004SRick Macklem */ 16919ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) { 16929ec7b004SRick Macklem int retv; 16939ec7b004SRick Macklem off_t off; 16949ec7b004SRick Macklem 16959ec7b004SRick Macklem off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 16969ec7b004SRick Macklem retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 16979ec7b004SRick Macklem bp->b_wcred, td); 16989ec7b004SRick Macklem if (retv == 0) { 16999ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 17009ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 17019ec7b004SRick Macklem bp->b_resid = 0; 17029ec7b004SRick Macklem bufdone(bp); 17039ec7b004SRick Macklem return (0); 17049ec7b004SRick Macklem } 17059ec7b004SRick Macklem if (retv == NFSERR_STALEWRITEVERF) { 17069ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 17079ec7b004SRick Macklem } 17089ec7b004SRick Macklem } 17099ec7b004SRick Macklem 17109ec7b004SRick Macklem /* 17119ec7b004SRick Macklem * Setup for actual write 17129ec7b004SRick Macklem */ 17139ec7b004SRick Macklem mtx_lock(&np->n_mtx); 17149ec7b004SRick Macklem if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 17159ec7b004SRick Macklem bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 17169ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 17179ec7b004SRick Macklem 17189ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_dirtyoff) { 17199ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_dirtyend 17209ec7b004SRick Macklem - bp->b_dirtyoff; 17219ec7b004SRick Macklem uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 17229ec7b004SRick Macklem + bp->b_dirtyoff; 17239ec7b004SRick Macklem io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 17249ec7b004SRick Macklem uiop->uio_rw = UIO_WRITE; 17259ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.write_bios); 17269ec7b004SRick Macklem 17279ec7b004SRick Macklem if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 17289ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 17299ec7b004SRick Macklem else 17309ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 17319ec7b004SRick Macklem 173267c5c2d2SRick Macklem error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 173367c5c2d2SRick Macklem called_from_strategy); 17349ec7b004SRick Macklem 17359ec7b004SRick Macklem /* 17369ec7b004SRick Macklem * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 17379ec7b004SRick Macklem * to cluster the buffers needing commit. This will allow 17389ec7b004SRick Macklem * the system to submit a single commit rpc for the whole 17399ec7b004SRick Macklem * cluster. We can do this even if the buffer is not 100% 17409ec7b004SRick Macklem * dirty (relative to the NFS blocksize), so we optimize the 17419ec7b004SRick Macklem * append-to-file-case. 17429ec7b004SRick Macklem * 17439ec7b004SRick Macklem * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 17449ec7b004SRick Macklem * cleared because write clustering only works for commit 17459ec7b004SRick Macklem * rpc's, not for the data portion of the write). 17469ec7b004SRick Macklem */ 17479ec7b004SRick Macklem 17489ec7b004SRick Macklem if (!error && iomode == NFSWRITE_UNSTABLE) { 17499ec7b004SRick Macklem bp->b_flags |= B_NEEDCOMMIT; 17509ec7b004SRick Macklem if (bp->b_dirtyoff == 0 17519ec7b004SRick Macklem && bp->b_dirtyend == bp->b_bcount) 17529ec7b004SRick Macklem bp->b_flags |= B_CLUSTEROK; 17539ec7b004SRick Macklem } else { 17549ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 17559ec7b004SRick Macklem } 17569ec7b004SRick Macklem 17579ec7b004SRick Macklem /* 17589ec7b004SRick Macklem * For an interrupted write, the buffer is still valid 17599ec7b004SRick Macklem * and the write hasn't been pushed to the server yet, 17609ec7b004SRick Macklem * so we can't set BIO_ERROR and report the interruption 17619ec7b004SRick Macklem * by setting B_EINTR. For the B_ASYNC case, B_EINTR 17629ec7b004SRick Macklem * is not relevant, so the rpc attempt is essentially 17639ec7b004SRick Macklem * a noop. For the case of a V3 write rpc not being 17649ec7b004SRick Macklem * committed to stable storage, the block is still 17659ec7b004SRick Macklem * dirty and requires either a commit rpc or another 17669ec7b004SRick Macklem * write rpc with iomode == NFSV3WRITE_FILESYNC before 17679ec7b004SRick Macklem * the block is reused. This is indicated by setting 17689ec7b004SRick Macklem * the B_DELWRI and B_NEEDCOMMIT flags. 17699ec7b004SRick Macklem * 177067c5c2d2SRick Macklem * EIO is returned by ncl_writerpc() to indicate a recoverable 177167c5c2d2SRick Macklem * write error and is handled as above, except that 177267c5c2d2SRick Macklem * B_EINTR isn't set. One cause of this is a stale stateid 177367c5c2d2SRick Macklem * error for the RPC that indicates recovery is required, 177467c5c2d2SRick Macklem * when called with called_from_strategy != 0. 177567c5c2d2SRick Macklem * 17769ec7b004SRick Macklem * If the buffer is marked B_PAGING, it does not reside on 17779ec7b004SRick Macklem * the vp's paging queues so we cannot call bdirty(). The 17789ec7b004SRick Macklem * bp in this case is not an NFS cache block so we should 17799ec7b004SRick Macklem * be safe. XXX 17809ec7b004SRick Macklem * 17819ec7b004SRick Macklem * The logic below breaks up errors into recoverable and 17829ec7b004SRick Macklem * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 17839ec7b004SRick Macklem * and keep the buffer around for potential write retries. 17849ec7b004SRick Macklem * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 17859ec7b004SRick Macklem * and save the error in the nfsnode. This is less than ideal 17869ec7b004SRick Macklem * but necessary. Keeping such buffers around could potentially 17879ec7b004SRick Macklem * cause buffer exhaustion eventually (they can never be written 17889ec7b004SRick Macklem * out, so will get constantly be re-dirtied). It also causes 17899ec7b004SRick Macklem * all sorts of vfs panics. For non-recoverable write errors, 17909ec7b004SRick Macklem * also invalidate the attrcache, so we'll be forced to go over 17919ec7b004SRick Macklem * the wire for this object, returning an error to user on next 17929ec7b004SRick Macklem * call (most of the time). 17939ec7b004SRick Macklem */ 17949ec7b004SRick Macklem if (error == EINTR || error == EIO || error == ETIMEDOUT 17959ec7b004SRick Macklem || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 17969ec7b004SRick Macklem int s; 17979ec7b004SRick Macklem 17989ec7b004SRick Macklem s = splbio(); 17999ec7b004SRick Macklem bp->b_flags &= ~(B_INVAL|B_NOCACHE); 18009ec7b004SRick Macklem if ((bp->b_flags & B_PAGING) == 0) { 18019ec7b004SRick Macklem bdirty(bp); 18029ec7b004SRick Macklem bp->b_flags &= ~B_DONE; 18039ec7b004SRick Macklem } 180467c5c2d2SRick Macklem if ((error == EINTR || error == ETIMEDOUT) && 180567c5c2d2SRick Macklem (bp->b_flags & B_ASYNC) == 0) 18069ec7b004SRick Macklem bp->b_flags |= B_EINTR; 18079ec7b004SRick Macklem splx(s); 18089ec7b004SRick Macklem } else { 18099ec7b004SRick Macklem if (error) { 18109ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 18119ec7b004SRick Macklem bp->b_flags |= B_INVAL; 18129ec7b004SRick Macklem bp->b_error = np->n_error = error; 18139ec7b004SRick Macklem mtx_lock(&np->n_mtx); 18149ec7b004SRick Macklem np->n_flag |= NWRITEERR; 18159ec7b004SRick Macklem np->n_attrstamp = 0; 18168f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 18179ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18189ec7b004SRick Macklem } 18199ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 18209ec7b004SRick Macklem } 18219ec7b004SRick Macklem } else { 18229ec7b004SRick Macklem bp->b_resid = 0; 18239ec7b004SRick Macklem bufdone(bp); 18249ec7b004SRick Macklem return (0); 18259ec7b004SRick Macklem } 18269ec7b004SRick Macklem } 18279ec7b004SRick Macklem bp->b_resid = uiop->uio_resid; 18289ec7b004SRick Macklem if (must_commit) 18299ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 18309ec7b004SRick Macklem bufdone(bp); 18319ec7b004SRick Macklem return (error); 18329ec7b004SRick Macklem } 18339ec7b004SRick Macklem 18349ec7b004SRick Macklem /* 18359ec7b004SRick Macklem * Used to aid in handling ftruncate() operations on the NFS client side. 18369ec7b004SRick Macklem * Truncation creates a number of special problems for NFS. We have to 18379ec7b004SRick Macklem * throw away VM pages and buffer cache buffers that are beyond EOF, and 18389ec7b004SRick Macklem * we have to properly handle VM pages or (potentially dirty) buffers 18399ec7b004SRick Macklem * that straddle the truncation point. 18409ec7b004SRick Macklem */ 18419ec7b004SRick Macklem 18429ec7b004SRick Macklem int 18439ec7b004SRick Macklem ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 18449ec7b004SRick Macklem { 18459ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 18469ec7b004SRick Macklem u_quad_t tsize; 18477f763fc3SRick Macklem int biosize = vp->v_bufobj.bo_bsize; 18489ec7b004SRick Macklem int error = 0; 18499ec7b004SRick Macklem 18509ec7b004SRick Macklem mtx_lock(&np->n_mtx); 18519ec7b004SRick Macklem tsize = np->n_size; 18529ec7b004SRick Macklem np->n_size = nsize; 18539ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18549ec7b004SRick Macklem 18559ec7b004SRick Macklem if (nsize < tsize) { 18569ec7b004SRick Macklem struct buf *bp; 18579ec7b004SRick Macklem daddr_t lbn; 18589ec7b004SRick Macklem int bufsize; 18599ec7b004SRick Macklem 18609ec7b004SRick Macklem /* 18619ec7b004SRick Macklem * vtruncbuf() doesn't get the buffer overlapping the 18629ec7b004SRick Macklem * truncation point. We may have a B_DELWRI and/or B_CACHE 18639ec7b004SRick Macklem * buffer that now needs to be truncated. 18649ec7b004SRick Macklem */ 1865c52fd858SEdward Tomasz Napierala error = vtruncbuf(vp, cred, nsize, biosize); 18669ec7b004SRick Macklem lbn = nsize / biosize; 18679ec7b004SRick Macklem bufsize = nsize & (biosize - 1); 18689ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bufsize, td); 18699ec7b004SRick Macklem if (!bp) 18709ec7b004SRick Macklem return EINTR; 18719ec7b004SRick Macklem if (bp->b_dirtyoff > bp->b_bcount) 18729ec7b004SRick Macklem bp->b_dirtyoff = bp->b_bcount; 18739ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_bcount) 18749ec7b004SRick Macklem bp->b_dirtyend = bp->b_bcount; 18759ec7b004SRick Macklem bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 18769ec7b004SRick Macklem brelse(bp); 18779ec7b004SRick Macklem } else { 18789ec7b004SRick Macklem vnode_pager_setsize(vp, nsize); 18799ec7b004SRick Macklem } 18809ec7b004SRick Macklem return(error); 18819ec7b004SRick Macklem } 18829ec7b004SRick Macklem 1883