19ec7b004SRick Macklem /*- 29ec7b004SRick Macklem * Copyright (c) 1989, 1993 39ec7b004SRick Macklem * The Regents of the University of California. All rights reserved. 49ec7b004SRick Macklem * 59ec7b004SRick Macklem * This code is derived from software contributed to Berkeley by 69ec7b004SRick Macklem * Rick Macklem at The University of Guelph. 79ec7b004SRick Macklem * 89ec7b004SRick Macklem * Redistribution and use in source and binary forms, with or without 99ec7b004SRick Macklem * modification, are permitted provided that the following conditions 109ec7b004SRick Macklem * are met: 119ec7b004SRick Macklem * 1. Redistributions of source code must retain the above copyright 129ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer. 139ec7b004SRick Macklem * 2. Redistributions in binary form must reproduce the above copyright 149ec7b004SRick Macklem * notice, this list of conditions and the following disclaimer in the 159ec7b004SRick Macklem * documentation and/or other materials provided with the distribution. 169ec7b004SRick Macklem * 4. Neither the name of the University nor the names of its contributors 179ec7b004SRick Macklem * may be used to endorse or promote products derived from this software 189ec7b004SRick Macklem * without specific prior written permission. 199ec7b004SRick Macklem * 209ec7b004SRick Macklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 219ec7b004SRick Macklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 229ec7b004SRick Macklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 239ec7b004SRick Macklem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 249ec7b004SRick Macklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 259ec7b004SRick Macklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 269ec7b004SRick Macklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 279ec7b004SRick Macklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 289ec7b004SRick Macklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 299ec7b004SRick Macklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 309ec7b004SRick Macklem * SUCH DAMAGE. 319ec7b004SRick Macklem * 329ec7b004SRick Macklem * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 339ec7b004SRick Macklem */ 349ec7b004SRick Macklem 359ec7b004SRick Macklem #include <sys/cdefs.h> 369ec7b004SRick Macklem __FBSDID("$FreeBSD$"); 379ec7b004SRick Macklem 388f0e65c9SRick Macklem #include "opt_kdtrace.h" 398f0e65c9SRick Macklem 409ec7b004SRick Macklem #include <sys/param.h> 419ec7b004SRick Macklem #include <sys/systm.h> 429ec7b004SRick Macklem #include <sys/bio.h> 439ec7b004SRick Macklem #include <sys/buf.h> 449ec7b004SRick Macklem #include <sys/kernel.h> 459ec7b004SRick Macklem #include <sys/mount.h> 4689f6b863SAttilio Rao #include <sys/rwlock.h> 479ec7b004SRick Macklem #include <sys/vmmeter.h> 489ec7b004SRick Macklem #include <sys/vnode.h> 499ec7b004SRick Macklem 509ec7b004SRick Macklem #include <vm/vm.h> 511c771f92SKonstantin Belousov #include <vm/vm_param.h> 529ec7b004SRick Macklem #include <vm/vm_extern.h> 539ec7b004SRick Macklem #include <vm/vm_page.h> 549ec7b004SRick Macklem #include <vm/vm_object.h> 559ec7b004SRick Macklem #include <vm/vm_pager.h> 569ec7b004SRick Macklem #include <vm/vnode_pager.h> 579ec7b004SRick Macklem 589ec7b004SRick Macklem #include <fs/nfs/nfsport.h> 599ec7b004SRick Macklem #include <fs/nfsclient/nfsmount.h> 609ec7b004SRick Macklem #include <fs/nfsclient/nfs.h> 619ec7b004SRick Macklem #include <fs/nfsclient/nfsnode.h> 628f0e65c9SRick Macklem #include <fs/nfsclient/nfs_kdtrace.h> 639ec7b004SRick Macklem 649ec7b004SRick Macklem extern int newnfs_directio_allow_mmap; 659ec7b004SRick Macklem extern struct nfsstats newnfsstats; 669ec7b004SRick Macklem extern struct mtx ncl_iod_mutex; 679ec7b004SRick Macklem extern int ncl_numasync; 687b8c319bSRick Macklem extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 697b8c319bSRick Macklem extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 709ec7b004SRick Macklem extern int newnfs_directio_enable; 71a53373faSKonstantin Belousov extern int nfs_keep_dirty_on_error; 729ec7b004SRick Macklem 739ec7b004SRick Macklem int ncl_pbuf_freecnt = -1; /* start out unlimited */ 749ec7b004SRick Macklem 759ec7b004SRick Macklem static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 769ec7b004SRick Macklem struct thread *td); 779ec7b004SRick Macklem static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 789ec7b004SRick Macklem struct ucred *cred, int ioflag); 799ec7b004SRick Macklem 809ec7b004SRick Macklem /* 819ec7b004SRick Macklem * Vnode op for VM getpages. 829ec7b004SRick Macklem */ 839ec7b004SRick Macklem int 849ec7b004SRick Macklem ncl_getpages(struct vop_getpages_args *ap) 859ec7b004SRick Macklem { 869ec7b004SRick Macklem int i, error, nextoff, size, toff, count, npages; 879ec7b004SRick Macklem struct uio uio; 889ec7b004SRick Macklem struct iovec iov; 899ec7b004SRick Macklem vm_offset_t kva; 909ec7b004SRick Macklem struct buf *bp; 919ec7b004SRick Macklem struct vnode *vp; 929ec7b004SRick Macklem struct thread *td; 939ec7b004SRick Macklem struct ucred *cred; 949ec7b004SRick Macklem struct nfsmount *nmp; 959ec7b004SRick Macklem vm_object_t object; 969ec7b004SRick Macklem vm_page_t *pages; 979ec7b004SRick Macklem struct nfsnode *np; 989ec7b004SRick Macklem 999ec7b004SRick Macklem vp = ap->a_vp; 1009ec7b004SRick Macklem np = VTONFS(vp); 1019ec7b004SRick Macklem td = curthread; /* XXX */ 1029ec7b004SRick Macklem cred = curthread->td_ucred; /* XXX */ 1039ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 1049ec7b004SRick Macklem pages = ap->a_m; 1059ec7b004SRick Macklem count = ap->a_count; 1069ec7b004SRick Macklem 1079ec7b004SRick Macklem if ((object = vp->v_object) == NULL) { 1089ec7b004SRick Macklem ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); 10957a7e732SAlan Cox return (VM_PAGER_ERROR); 1109ec7b004SRick Macklem } 1119ec7b004SRick Macklem 1129ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 1139ec7b004SRick Macklem mtx_lock(&np->n_mtx); 1149ec7b004SRick Macklem if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 1159ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 1169ec7b004SRick Macklem ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); 11757a7e732SAlan Cox return (VM_PAGER_ERROR); 1189ec7b004SRick Macklem } else 1199ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 1209ec7b004SRick Macklem } 1219ec7b004SRick Macklem 1229ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 1239ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 1249ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 1259ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1269ec7b004SRick Macklem /* We'll never get here for v4, because we always have fsinfo */ 1279ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 1289ec7b004SRick Macklem } else 1299ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 1309ec7b004SRick Macklem 1319ec7b004SRick Macklem npages = btoc(count); 1329ec7b004SRick Macklem 1339ec7b004SRick Macklem /* 1349ec7b004SRick Macklem * If the requested page is partially valid, just return it and 1359ec7b004SRick Macklem * allow the pager to zero-out the blanks. Partially valid pages 1369ec7b004SRick Macklem * can only occur at the file EOF. 1379ec7b004SRick Macklem */ 13889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 13957a7e732SAlan Cox if (pages[ap->a_reqpage]->valid != 0) { 1409ec7b004SRick Macklem for (i = 0; i < npages; ++i) { 1415ac59343SAlan Cox if (i != ap->a_reqpage) { 1425ac59343SAlan Cox vm_page_lock(pages[i]); 1439ec7b004SRick Macklem vm_page_free(pages[i]); 1445ac59343SAlan Cox vm_page_unlock(pages[i]); 1455ac59343SAlan Cox } 1465ac59343SAlan Cox } 14789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1489ec7b004SRick Macklem return (0); 1499ec7b004SRick Macklem } 15089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1519ec7b004SRick Macklem 1529ec7b004SRick Macklem /* 1539ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 1549ec7b004SRick Macklem * convienient and fast. 1559ec7b004SRick Macklem */ 1569ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 1579ec7b004SRick Macklem 1589ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 1599ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 1609ec7b004SRick Macklem PCPU_INC(cnt.v_vnodein); 1619ec7b004SRick Macklem PCPU_ADD(cnt.v_vnodepgsin, npages); 1629ec7b004SRick Macklem 1639ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 1649ec7b004SRick Macklem iov.iov_len = count; 1659ec7b004SRick Macklem uio.uio_iov = &iov; 1669ec7b004SRick Macklem uio.uio_iovcnt = 1; 1679ec7b004SRick Macklem uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 1689ec7b004SRick Macklem uio.uio_resid = count; 1699ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 1709ec7b004SRick Macklem uio.uio_rw = UIO_READ; 1719ec7b004SRick Macklem uio.uio_td = td; 1729ec7b004SRick Macklem 1739ec7b004SRick Macklem error = ncl_readrpc(vp, &uio, cred); 1749ec7b004SRick Macklem pmap_qremove(kva, npages); 1759ec7b004SRick Macklem 1769ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 1779ec7b004SRick Macklem 1789ec7b004SRick Macklem if (error && (uio.uio_resid == count)) { 1799ec7b004SRick Macklem ncl_printf("nfs_getpages: error %d\n", error); 18089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1819ec7b004SRick Macklem for (i = 0; i < npages; ++i) { 1825ac59343SAlan Cox if (i != ap->a_reqpage) { 1835ac59343SAlan Cox vm_page_lock(pages[i]); 1849ec7b004SRick Macklem vm_page_free(pages[i]); 1855ac59343SAlan Cox vm_page_unlock(pages[i]); 1865ac59343SAlan Cox } 1875ac59343SAlan Cox } 18889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 18957a7e732SAlan Cox return (VM_PAGER_ERROR); 1909ec7b004SRick Macklem } 1919ec7b004SRick Macklem 1929ec7b004SRick Macklem /* 1939ec7b004SRick Macklem * Calculate the number of bytes read and validate only that number 1949ec7b004SRick Macklem * of bytes. Note that due to pending writes, size may be 0. This 1959ec7b004SRick Macklem * does not mean that the remaining data is invalid! 1969ec7b004SRick Macklem */ 1979ec7b004SRick Macklem 1989ec7b004SRick Macklem size = count - uio.uio_resid; 19989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 2009ec7b004SRick Macklem for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 2019ec7b004SRick Macklem vm_page_t m; 2029ec7b004SRick Macklem nextoff = toff + PAGE_SIZE; 2039ec7b004SRick Macklem m = pages[i]; 2049ec7b004SRick Macklem 2059ec7b004SRick Macklem if (nextoff <= size) { 2069ec7b004SRick Macklem /* 2079ec7b004SRick Macklem * Read operation filled an entire page 2089ec7b004SRick Macklem */ 2099ec7b004SRick Macklem m->valid = VM_PAGE_BITS_ALL; 210a770e183SRick Macklem KASSERT(m->dirty == 0, 211a770e183SRick Macklem ("nfs_getpages: page %p is dirty", m)); 2129ec7b004SRick Macklem } else if (size > toff) { 2139ec7b004SRick Macklem /* 2149ec7b004SRick Macklem * Read operation filled a partial page. 2159ec7b004SRick Macklem */ 2169ec7b004SRick Macklem m->valid = 0; 217dc874f98SKonstantin Belousov vm_page_set_valid_range(m, 0, size - toff); 2183933ec4dSAlan Cox KASSERT(m->dirty == 0, 21972d1bbbaSRick Macklem ("nfs_getpages: page %p is dirty", m)); 2209ec7b004SRick Macklem } else { 2219ec7b004SRick Macklem /* 222b6c00483SKonstantin Belousov * Read operation was short. If no error 223b6c00483SKonstantin Belousov * occured we may have hit a zero-fill 224b6c00483SKonstantin Belousov * section. We leave valid set to 0, and page 225b6c00483SKonstantin Belousov * is freed by vm_page_readahead_finish() if 226b6c00483SKonstantin Belousov * its index is not equal to requested, or 227b6c00483SKonstantin Belousov * page is zeroed and set valid by 228b6c00483SKonstantin Belousov * vm_pager_get_pages() for requested page. 2299ec7b004SRick Macklem */ 2309ec7b004SRick Macklem ; 2319ec7b004SRick Macklem } 2320055cbd3SKonstantin Belousov if (i != ap->a_reqpage) 233b6c00483SKonstantin Belousov vm_page_readahead_finish(m); 23403679e23SAlan Cox } 23589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 23657a7e732SAlan Cox return (0); 2379ec7b004SRick Macklem } 2389ec7b004SRick Macklem 2399ec7b004SRick Macklem /* 2409ec7b004SRick Macklem * Vnode op for VM putpages. 2419ec7b004SRick Macklem */ 2429ec7b004SRick Macklem int 2439ec7b004SRick Macklem ncl_putpages(struct vop_putpages_args *ap) 2449ec7b004SRick Macklem { 2459ec7b004SRick Macklem struct uio uio; 2469ec7b004SRick Macklem struct iovec iov; 2479ec7b004SRick Macklem vm_offset_t kva; 2489ec7b004SRick Macklem struct buf *bp; 2499ec7b004SRick Macklem int iomode, must_commit, i, error, npages, count; 2509ec7b004SRick Macklem off_t offset; 2519ec7b004SRick Macklem int *rtvals; 2529ec7b004SRick Macklem struct vnode *vp; 2539ec7b004SRick Macklem struct thread *td; 2549ec7b004SRick Macklem struct ucred *cred; 2559ec7b004SRick Macklem struct nfsmount *nmp; 2569ec7b004SRick Macklem struct nfsnode *np; 2579ec7b004SRick Macklem vm_page_t *pages; 2589ec7b004SRick Macklem 2599ec7b004SRick Macklem vp = ap->a_vp; 2609ec7b004SRick Macklem np = VTONFS(vp); 2619ec7b004SRick Macklem td = curthread; /* XXX */ 2627af1242aSRick Macklem /* Set the cred to n_writecred for the write rpcs. */ 2637af1242aSRick Macklem if (np->n_writecred != NULL) 2647af1242aSRick Macklem cred = crhold(np->n_writecred); 2657af1242aSRick Macklem else 2667af1242aSRick Macklem cred = crhold(curthread->td_ucred); /* XXX */ 2679ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 2689ec7b004SRick Macklem pages = ap->a_m; 2699ec7b004SRick Macklem count = ap->a_count; 2709ec7b004SRick Macklem rtvals = ap->a_rtvals; 2719ec7b004SRick Macklem npages = btoc(count); 2729ec7b004SRick Macklem offset = IDX_TO_OFF(pages[0]->pindex); 2739ec7b004SRick Macklem 2749ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 2759ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 2769ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 2779ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 2789ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 2799ec7b004SRick Macklem } else 2809ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 2819ec7b004SRick Macklem 2829ec7b004SRick Macklem mtx_lock(&np->n_mtx); 2839ec7b004SRick Macklem if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 2849ec7b004SRick Macklem (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 2859ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 2869ec7b004SRick Macklem ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); 2879ec7b004SRick Macklem mtx_lock(&np->n_mtx); 2889ec7b004SRick Macklem } 2899ec7b004SRick Macklem 2909ec7b004SRick Macklem for (i = 0; i < npages; i++) 291031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 2929ec7b004SRick Macklem 2939ec7b004SRick Macklem /* 2949ec7b004SRick Macklem * When putting pages, do not extend file past EOF. 2959ec7b004SRick Macklem */ 2969ec7b004SRick Macklem if (offset + count > np->n_size) { 2979ec7b004SRick Macklem count = np->n_size - offset; 2989ec7b004SRick Macklem if (count < 0) 2999ec7b004SRick Macklem count = 0; 3009ec7b004SRick Macklem } 3019ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3029ec7b004SRick Macklem 3039ec7b004SRick Macklem /* 3049ec7b004SRick Macklem * We use only the kva address for the buffer, but this is extremely 3059ec7b004SRick Macklem * convienient and fast. 3069ec7b004SRick Macklem */ 3079ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 3089ec7b004SRick Macklem 3099ec7b004SRick Macklem kva = (vm_offset_t) bp->b_data; 3109ec7b004SRick Macklem pmap_qenter(kva, pages, npages); 3119ec7b004SRick Macklem PCPU_INC(cnt.v_vnodeout); 3129ec7b004SRick Macklem PCPU_ADD(cnt.v_vnodepgsout, count); 3139ec7b004SRick Macklem 3149ec7b004SRick Macklem iov.iov_base = (caddr_t) kva; 3159ec7b004SRick Macklem iov.iov_len = count; 3169ec7b004SRick Macklem uio.uio_iov = &iov; 3179ec7b004SRick Macklem uio.uio_iovcnt = 1; 3189ec7b004SRick Macklem uio.uio_offset = offset; 3199ec7b004SRick Macklem uio.uio_resid = count; 3209ec7b004SRick Macklem uio.uio_segflg = UIO_SYSSPACE; 3219ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 3229ec7b004SRick Macklem uio.uio_td = td; 3239ec7b004SRick Macklem 3249ec7b004SRick Macklem if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 3259ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 3269ec7b004SRick Macklem else 3279ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 3289ec7b004SRick Macklem 32967c5c2d2SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); 3307af1242aSRick Macklem crfree(cred); 3319ec7b004SRick Macklem 3329ec7b004SRick Macklem pmap_qremove(kva, npages); 3339ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 3349ec7b004SRick Macklem 335a53373faSKonstantin Belousov if (error == 0 || !nfs_keep_dirty_on_error) { 336031ec8c1SKonstantin Belousov vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); 337031ec8c1SKonstantin Belousov if (must_commit) 3389ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 339a53373faSKonstantin Belousov } 3409ec7b004SRick Macklem return rtvals[0]; 3419ec7b004SRick Macklem } 3429ec7b004SRick Macklem 3439ec7b004SRick Macklem /* 3449ec7b004SRick Macklem * For nfs, cache consistency can only be maintained approximately. 3459ec7b004SRick Macklem * Although RFC1094 does not specify the criteria, the following is 3469ec7b004SRick Macklem * believed to be compatible with the reference port. 3479ec7b004SRick Macklem * For nfs: 3489ec7b004SRick Macklem * If the file's modify time on the server has changed since the 3499ec7b004SRick Macklem * last read rpc or you have written to the file, 3509ec7b004SRick Macklem * you may have lost data cache consistency with the 3519ec7b004SRick Macklem * server, so flush all of the file's data out of the cache. 3529ec7b004SRick Macklem * Then force a getattr rpc to ensure that you have up to date 3539ec7b004SRick Macklem * attributes. 3549ec7b004SRick Macklem * NB: This implies that cache data can be read when up to 3559ec7b004SRick Macklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current 3569ec7b004SRick Macklem * attributes this could be forced by setting n_attrstamp to 0 before 3579ec7b004SRick Macklem * the VOP_GETATTR() call. 3589ec7b004SRick Macklem */ 3599ec7b004SRick Macklem static inline int 3609ec7b004SRick Macklem nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 3619ec7b004SRick Macklem { 3629ec7b004SRick Macklem int error = 0; 3639ec7b004SRick Macklem struct vattr vattr; 3649ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 3659ec7b004SRick Macklem int old_lock; 3669ec7b004SRick Macklem 3679ec7b004SRick Macklem /* 3689ec7b004SRick Macklem * Grab the exclusive lock before checking whether the cache is 3699ec7b004SRick Macklem * consistent. 3709ec7b004SRick Macklem * XXX - We can make this cheaper later (by acquiring cheaper locks). 3719ec7b004SRick Macklem * But for now, this suffices. 3729ec7b004SRick Macklem */ 3739ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 374934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 375934a3099SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 376934a3099SRick Macklem return (EBADF); 377934a3099SRick Macklem } 378934a3099SRick Macklem 3799ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3809ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 3819ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3829ec7b004SRick Macklem if (vp->v_type != VREG) { 3839ec7b004SRick Macklem if (vp->v_type != VDIR) 3849ec7b004SRick Macklem panic("nfs: bioread, not dir"); 3859ec7b004SRick Macklem ncl_invaldir(vp); 3869ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 3879ec7b004SRick Macklem if (error) 3889ec7b004SRick Macklem goto out; 3899ec7b004SRick Macklem } 3909ec7b004SRick Macklem np->n_attrstamp = 0; 3918f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3929ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 3939ec7b004SRick Macklem if (error) 3949ec7b004SRick Macklem goto out; 3959ec7b004SRick Macklem mtx_lock(&np->n_mtx); 3969ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 3979ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 3989ec7b004SRick Macklem } else { 3999ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4009ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 4019ec7b004SRick Macklem if (error) 4029ec7b004SRick Macklem return (error); 4039ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4049ec7b004SRick Macklem if ((np->n_flag & NSIZECHANGED) 4059ec7b004SRick Macklem || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 4069ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4079ec7b004SRick Macklem if (vp->v_type == VDIR) 4089ec7b004SRick Macklem ncl_invaldir(vp); 4099ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 4109ec7b004SRick Macklem if (error) 4119ec7b004SRick Macklem goto out; 4129ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4139ec7b004SRick Macklem np->n_mtime = vattr.va_mtime; 4149ec7b004SRick Macklem np->n_flag &= ~NSIZECHANGED; 4159ec7b004SRick Macklem } 4169ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4179ec7b004SRick Macklem } 4189ec7b004SRick Macklem out: 4199ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 4209ec7b004SRick Macklem return error; 4219ec7b004SRick Macklem } 4229ec7b004SRick Macklem 4239ec7b004SRick Macklem /* 4249ec7b004SRick Macklem * Vnode op for read using bio 4259ec7b004SRick Macklem */ 4269ec7b004SRick Macklem int 4279ec7b004SRick Macklem ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 4289ec7b004SRick Macklem { 4299ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 4309ec7b004SRick Macklem int biosize, i; 4319ec7b004SRick Macklem struct buf *bp, *rabp; 4329ec7b004SRick Macklem struct thread *td; 4339ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 4349ec7b004SRick Macklem daddr_t lbn, rabn; 4359ec7b004SRick Macklem int bcount; 4369ec7b004SRick Macklem int seqcount; 4379ec7b004SRick Macklem int nra, error = 0, n = 0, on = 0; 438b29b9bcbSRick Macklem off_t tmp_off; 4399ec7b004SRick Macklem 440b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 4419ec7b004SRick Macklem if (uio->uio_resid == 0) 4429ec7b004SRick Macklem return (0); 4439ec7b004SRick Macklem if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 4449ec7b004SRick Macklem return (EINVAL); 4459ec7b004SRick Macklem td = uio->uio_td; 4469ec7b004SRick Macklem 4479ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4489ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 4499ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 4509ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 4519ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 4529ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 4539ec7b004SRick Macklem } 4549ec7b004SRick Macklem if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 4559ec7b004SRick Macklem (void) newnfs_iosize(nmp); 4569ec7b004SRick Macklem 457b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 4589ec7b004SRick Macklem if (vp->v_type != VDIR && 459b29b9bcbSRick Macklem (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 460b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4619ec7b004SRick Macklem return (EFBIG); 462b29b9bcbSRick Macklem } 463b29b9bcbSRick Macklem mtx_unlock(&nmp->nm_mtx); 4649ec7b004SRick Macklem 4659ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 4669ec7b004SRick Macklem /* No caching/ no readaheads. Just read data into the user buffer */ 4679ec7b004SRick Macklem return ncl_readrpc(vp, uio, cred); 4689ec7b004SRick Macklem 4697f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 4709ec7b004SRick Macklem seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 4719ec7b004SRick Macklem 4729ec7b004SRick Macklem error = nfs_bioread_check_cons(vp, td, cred); 4739ec7b004SRick Macklem if (error) 4749ec7b004SRick Macklem return error; 4759ec7b004SRick Macklem 4769ec7b004SRick Macklem do { 4779ec7b004SRick Macklem u_quad_t nsize; 4789ec7b004SRick Macklem 4799ec7b004SRick Macklem mtx_lock(&np->n_mtx); 4809ec7b004SRick Macklem nsize = np->n_size; 4819ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 4829ec7b004SRick Macklem 4839ec7b004SRick Macklem switch (vp->v_type) { 4849ec7b004SRick Macklem case VREG: 4859ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_reads); 4869ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 487*96ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 4889ec7b004SRick Macklem 4899ec7b004SRick Macklem /* 4909ec7b004SRick Macklem * Start the read ahead(s), as required. 4919ec7b004SRick Macklem */ 4929ec7b004SRick Macklem if (nmp->nm_readahead > 0) { 4939ec7b004SRick Macklem for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 4949ec7b004SRick Macklem (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 4959ec7b004SRick Macklem rabn = lbn + 1 + nra; 4969ec7b004SRick Macklem if (incore(&vp->v_bufobj, rabn) == NULL) { 4979ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, rabn, biosize, td); 4989ec7b004SRick Macklem if (!rabp) { 4999ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 500848fd2c0SRick Macklem return (error ? error : EINTR); 5019ec7b004SRick Macklem } 5029ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 5039ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 5049ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 5059ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 5069ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 5079ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 5089ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 5099ec7b004SRick Macklem vfs_unbusy_pages(rabp); 5109ec7b004SRick Macklem brelse(rabp); 5119ec7b004SRick Macklem break; 5129ec7b004SRick Macklem } 5139ec7b004SRick Macklem } else { 5149ec7b004SRick Macklem brelse(rabp); 5159ec7b004SRick Macklem } 5169ec7b004SRick Macklem } 5179ec7b004SRick Macklem } 5189ec7b004SRick Macklem } 5199ec7b004SRick Macklem 5209ec7b004SRick Macklem /* Note that bcount is *not* DEV_BSIZE aligned. */ 5219ec7b004SRick Macklem bcount = biosize; 5229ec7b004SRick Macklem if ((off_t)lbn * biosize >= nsize) { 5239ec7b004SRick Macklem bcount = 0; 5249ec7b004SRick Macklem } else if ((off_t)(lbn + 1) * biosize > nsize) { 5259ec7b004SRick Macklem bcount = nsize - (off_t)lbn * biosize; 5269ec7b004SRick Macklem } 5279ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 5289ec7b004SRick Macklem 5299ec7b004SRick Macklem if (!bp) { 5309ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5319ec7b004SRick Macklem return (error ? error : EINTR); 5329ec7b004SRick Macklem } 5339ec7b004SRick Macklem 5349ec7b004SRick Macklem /* 5359ec7b004SRick Macklem * If B_CACHE is not set, we must issue the read. If this 5369ec7b004SRick Macklem * fails, we return an error. 5379ec7b004SRick Macklem */ 5389ec7b004SRick Macklem 5399ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5409ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5419ec7b004SRick Macklem vfs_busy_pages(bp, 0); 54267c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5439ec7b004SRick Macklem if (error) { 5449ec7b004SRick Macklem brelse(bp); 5459ec7b004SRick Macklem return (error); 5469ec7b004SRick Macklem } 5479ec7b004SRick Macklem } 5489ec7b004SRick Macklem 5499ec7b004SRick Macklem /* 5509ec7b004SRick Macklem * on is the offset into the current bp. Figure out how many 5519ec7b004SRick Macklem * bytes we can copy out of the bp. Note that bcount is 5529ec7b004SRick Macklem * NOT DEV_BSIZE aligned. 5539ec7b004SRick Macklem * 5549ec7b004SRick Macklem * Then figure out how many bytes we can copy into the uio. 5559ec7b004SRick Macklem */ 5569ec7b004SRick Macklem 5579ec7b004SRick Macklem n = 0; 5589ec7b004SRick Macklem if (on < bcount) 559526d0bd5SKonstantin Belousov n = MIN((unsigned)(bcount - on), uio->uio_resid); 5609ec7b004SRick Macklem break; 5619ec7b004SRick Macklem case VLNK: 5629ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_readlinks); 5639ec7b004SRick Macklem bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 5649ec7b004SRick Macklem if (!bp) { 5659ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5669ec7b004SRick Macklem return (error ? error : EINTR); 5679ec7b004SRick Macklem } 5689ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5699ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5709ec7b004SRick Macklem vfs_busy_pages(bp, 0); 57167c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5729ec7b004SRick Macklem if (error) { 5739ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 5749ec7b004SRick Macklem brelse(bp); 5759ec7b004SRick Macklem return (error); 5769ec7b004SRick Macklem } 5779ec7b004SRick Macklem } 578526d0bd5SKonstantin Belousov n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 5799ec7b004SRick Macklem on = 0; 5809ec7b004SRick Macklem break; 5819ec7b004SRick Macklem case VDIR: 5829ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_readdirs); 5839ec7b004SRick Macklem if (np->n_direofoffset 5849ec7b004SRick Macklem && uio->uio_offset >= np->n_direofoffset) { 5859ec7b004SRick Macklem return (0); 5869ec7b004SRick Macklem } 5879ec7b004SRick Macklem lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 5889ec7b004SRick Macklem on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 5899ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 5909ec7b004SRick Macklem if (!bp) { 5919ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 5929ec7b004SRick Macklem return (error ? error : EINTR); 5939ec7b004SRick Macklem } 5949ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 5959ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 5969ec7b004SRick Macklem vfs_busy_pages(bp, 0); 59767c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 5989ec7b004SRick Macklem if (error) { 5999ec7b004SRick Macklem brelse(bp); 6009ec7b004SRick Macklem } 6019ec7b004SRick Macklem while (error == NFSERR_BAD_COOKIE) { 6029ec7b004SRick Macklem ncl_invaldir(vp); 6039ec7b004SRick Macklem error = ncl_vinvalbuf(vp, 0, td, 1); 6049ec7b004SRick Macklem /* 6059ec7b004SRick Macklem * Yuck! The directory has been modified on the 6069ec7b004SRick Macklem * server. The only way to get the block is by 6079ec7b004SRick Macklem * reading from the beginning to get all the 6089ec7b004SRick Macklem * offset cookies. 6099ec7b004SRick Macklem * 6109ec7b004SRick Macklem * Leave the last bp intact unless there is an error. 6119ec7b004SRick Macklem * Loop back up to the while if the error is another 6129ec7b004SRick Macklem * NFSERR_BAD_COOKIE (double yuch!). 6139ec7b004SRick Macklem */ 6149ec7b004SRick Macklem for (i = 0; i <= lbn && !error; i++) { 6159ec7b004SRick Macklem if (np->n_direofoffset 6169ec7b004SRick Macklem && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 6179ec7b004SRick Macklem return (0); 6189ec7b004SRick Macklem bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 6199ec7b004SRick Macklem if (!bp) { 6209ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 6219ec7b004SRick Macklem return (error ? error : EINTR); 6229ec7b004SRick Macklem } 6239ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 6249ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 6259ec7b004SRick Macklem vfs_busy_pages(bp, 0); 62667c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 6279ec7b004SRick Macklem /* 6289ec7b004SRick Macklem * no error + B_INVAL == directory EOF, 6299ec7b004SRick Macklem * use the block. 6309ec7b004SRick Macklem */ 6319ec7b004SRick Macklem if (error == 0 && (bp->b_flags & B_INVAL)) 6329ec7b004SRick Macklem break; 6339ec7b004SRick Macklem } 6349ec7b004SRick Macklem /* 6359ec7b004SRick Macklem * An error will throw away the block and the 6369ec7b004SRick Macklem * for loop will break out. If no error and this 6379ec7b004SRick Macklem * is not the block we want, we throw away the 6389ec7b004SRick Macklem * block and go for the next one via the for loop. 6399ec7b004SRick Macklem */ 6409ec7b004SRick Macklem if (error || i < lbn) 6419ec7b004SRick Macklem brelse(bp); 6429ec7b004SRick Macklem } 6439ec7b004SRick Macklem } 6449ec7b004SRick Macklem /* 6459ec7b004SRick Macklem * The above while is repeated if we hit another cookie 6469ec7b004SRick Macklem * error. If we hit an error and it wasn't a cookie error, 6479ec7b004SRick Macklem * we give up. 6489ec7b004SRick Macklem */ 6499ec7b004SRick Macklem if (error) 6509ec7b004SRick Macklem return (error); 6519ec7b004SRick Macklem } 6529ec7b004SRick Macklem 6539ec7b004SRick Macklem /* 6549ec7b004SRick Macklem * If not eof and read aheads are enabled, start one. 6559ec7b004SRick Macklem * (You need the current block first, so that you have the 6569ec7b004SRick Macklem * directory offset cookie of the next block.) 6579ec7b004SRick Macklem */ 6589ec7b004SRick Macklem if (nmp->nm_readahead > 0 && 6599ec7b004SRick Macklem (bp->b_flags & B_INVAL) == 0 && 6609ec7b004SRick Macklem (np->n_direofoffset == 0 || 6619ec7b004SRick Macklem (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 6629ec7b004SRick Macklem incore(&vp->v_bufobj, lbn + 1) == NULL) { 6639ec7b004SRick Macklem rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 6649ec7b004SRick Macklem if (rabp) { 6659ec7b004SRick Macklem if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 6669ec7b004SRick Macklem rabp->b_flags |= B_ASYNC; 6679ec7b004SRick Macklem rabp->b_iocmd = BIO_READ; 6689ec7b004SRick Macklem vfs_busy_pages(rabp, 0); 6699ec7b004SRick Macklem if (ncl_asyncio(nmp, rabp, cred, td)) { 6709ec7b004SRick Macklem rabp->b_flags |= B_INVAL; 6719ec7b004SRick Macklem rabp->b_ioflags |= BIO_ERROR; 6729ec7b004SRick Macklem vfs_unbusy_pages(rabp); 6739ec7b004SRick Macklem brelse(rabp); 6749ec7b004SRick Macklem } 6759ec7b004SRick Macklem } else { 6769ec7b004SRick Macklem brelse(rabp); 6779ec7b004SRick Macklem } 6789ec7b004SRick Macklem } 6799ec7b004SRick Macklem } 6809ec7b004SRick Macklem /* 6819ec7b004SRick Macklem * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 6829ec7b004SRick Macklem * chopped for the EOF condition, we cannot tell how large 6839ec7b004SRick Macklem * NFS directories are going to be until we hit EOF. So 6849ec7b004SRick Macklem * an NFS directory buffer is *not* chopped to its EOF. Now, 6859ec7b004SRick Macklem * it just so happens that b_resid will effectively chop it 6869ec7b004SRick Macklem * to EOF. *BUT* this information is lost if the buffer goes 6879ec7b004SRick Macklem * away and is reconstituted into a B_CACHE state ( due to 6889ec7b004SRick Macklem * being VMIO ) later. So we keep track of the directory eof 6899ec7b004SRick Macklem * in np->n_direofoffset and chop it off as an extra step 6909ec7b004SRick Macklem * right here. 6919ec7b004SRick Macklem */ 6929ec7b004SRick Macklem n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 6939ec7b004SRick Macklem if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 6949ec7b004SRick Macklem n = np->n_direofoffset - uio->uio_offset; 6959ec7b004SRick Macklem break; 6969ec7b004SRick Macklem default: 6979ec7b004SRick Macklem ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 6989ec7b004SRick Macklem bp = NULL; 6999ec7b004SRick Macklem break; 7009ec7b004SRick Macklem }; 7019ec7b004SRick Macklem 7029ec7b004SRick Macklem if (n > 0) { 703ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); 7049ec7b004SRick Macklem } 7059ec7b004SRick Macklem if (vp->v_type == VLNK) 7069ec7b004SRick Macklem n = 0; 7079ec7b004SRick Macklem if (bp != NULL) 7089ec7b004SRick Macklem brelse(bp); 7099ec7b004SRick Macklem } while (error == 0 && uio->uio_resid > 0 && n > 0); 7109ec7b004SRick Macklem return (error); 7119ec7b004SRick Macklem } 7129ec7b004SRick Macklem 7139ec7b004SRick Macklem /* 7149ec7b004SRick Macklem * The NFS write path cannot handle iovecs with len > 1. So we need to 7159ec7b004SRick Macklem * break up iovecs accordingly (restricting them to wsize). 7169ec7b004SRick Macklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 7179ec7b004SRick Macklem * For the ASYNC case, 2 copies are needed. The first a copy from the 7189ec7b004SRick Macklem * user buffer to a staging buffer and then a second copy from the staging 7199ec7b004SRick Macklem * buffer to mbufs. This can be optimized by copying from the user buffer 7209ec7b004SRick Macklem * directly into mbufs and passing the chain down, but that requires a 7219ec7b004SRick Macklem * fair amount of re-working of the relevant codepaths (and can be done 7229ec7b004SRick Macklem * later). 7239ec7b004SRick Macklem */ 7249ec7b004SRick Macklem static int 7259ec7b004SRick Macklem nfs_directio_write(vp, uiop, cred, ioflag) 7269ec7b004SRick Macklem struct vnode *vp; 7279ec7b004SRick Macklem struct uio *uiop; 7289ec7b004SRick Macklem struct ucred *cred; 7299ec7b004SRick Macklem int ioflag; 7309ec7b004SRick Macklem { 7319ec7b004SRick Macklem int error; 7329ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 7339ec7b004SRick Macklem struct thread *td = uiop->uio_td; 7349ec7b004SRick Macklem int size; 7359ec7b004SRick Macklem int wsize; 7369ec7b004SRick Macklem 7379ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 7389ec7b004SRick Macklem wsize = nmp->nm_wsize; 7399ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 7409ec7b004SRick Macklem if (ioflag & IO_SYNC) { 7419ec7b004SRick Macklem int iomode, must_commit; 7429ec7b004SRick Macklem struct uio uio; 7439ec7b004SRick Macklem struct iovec iov; 7449ec7b004SRick Macklem do_sync: 7459ec7b004SRick Macklem while (uiop->uio_resid > 0) { 746526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 747526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 7489ec7b004SRick Macklem iov.iov_base = uiop->uio_iov->iov_base; 7499ec7b004SRick Macklem iov.iov_len = size; 7509ec7b004SRick Macklem uio.uio_iov = &iov; 7519ec7b004SRick Macklem uio.uio_iovcnt = 1; 7529ec7b004SRick Macklem uio.uio_offset = uiop->uio_offset; 7539ec7b004SRick Macklem uio.uio_resid = size; 7549ec7b004SRick Macklem uio.uio_segflg = UIO_USERSPACE; 7559ec7b004SRick Macklem uio.uio_rw = UIO_WRITE; 7569ec7b004SRick Macklem uio.uio_td = td; 7579ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 7589ec7b004SRick Macklem error = ncl_writerpc(vp, &uio, cred, &iomode, 75967c5c2d2SRick Macklem &must_commit, 0); 7609ec7b004SRick Macklem KASSERT((must_commit == 0), 7619ec7b004SRick Macklem ("ncl_directio_write: Did not commit write")); 7629ec7b004SRick Macklem if (error) 7639ec7b004SRick Macklem return (error); 7649ec7b004SRick Macklem uiop->uio_offset += size; 7659ec7b004SRick Macklem uiop->uio_resid -= size; 7669ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 7679ec7b004SRick Macklem uiop->uio_iovcnt--; 7689ec7b004SRick Macklem uiop->uio_iov++; 7699ec7b004SRick Macklem } else { 7709ec7b004SRick Macklem uiop->uio_iov->iov_base = 7719ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 7729ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 7739ec7b004SRick Macklem } 7749ec7b004SRick Macklem } 7759ec7b004SRick Macklem } else { 7769ec7b004SRick Macklem struct uio *t_uio; 7779ec7b004SRick Macklem struct iovec *t_iov; 7789ec7b004SRick Macklem struct buf *bp; 7799ec7b004SRick Macklem 7809ec7b004SRick Macklem /* 7819ec7b004SRick Macklem * Break up the write into blocksize chunks and hand these 7829ec7b004SRick Macklem * over to nfsiod's for write back. 7839ec7b004SRick Macklem * Unfortunately, this incurs a copy of the data. Since 7849ec7b004SRick Macklem * the user could modify the buffer before the write is 7859ec7b004SRick Macklem * initiated. 7869ec7b004SRick Macklem * 7879ec7b004SRick Macklem * The obvious optimization here is that one of the 2 copies 7889ec7b004SRick Macklem * in the async write path can be eliminated by copying the 7899ec7b004SRick Macklem * data here directly into mbufs and passing the mbuf chain 7909ec7b004SRick Macklem * down. But that will require a fair amount of re-working 7919ec7b004SRick Macklem * of the code and can be done if there's enough interest 7929ec7b004SRick Macklem * in NFS directio access. 7939ec7b004SRick Macklem */ 7949ec7b004SRick Macklem while (uiop->uio_resid > 0) { 795526d0bd5SKonstantin Belousov size = MIN(uiop->uio_resid, wsize); 796526d0bd5SKonstantin Belousov size = MIN(uiop->uio_iov->iov_len, size); 7979ec7b004SRick Macklem bp = getpbuf(&ncl_pbuf_freecnt); 7989ec7b004SRick Macklem t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 7999ec7b004SRick Macklem t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 8009ec7b004SRick Macklem t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 8019ec7b004SRick Macklem t_iov->iov_len = size; 8029ec7b004SRick Macklem t_uio->uio_iov = t_iov; 8039ec7b004SRick Macklem t_uio->uio_iovcnt = 1; 8049ec7b004SRick Macklem t_uio->uio_offset = uiop->uio_offset; 8059ec7b004SRick Macklem t_uio->uio_resid = size; 8069ec7b004SRick Macklem t_uio->uio_segflg = UIO_SYSSPACE; 8079ec7b004SRick Macklem t_uio->uio_rw = UIO_WRITE; 8089ec7b004SRick Macklem t_uio->uio_td = td; 8094cf7d128SRick Macklem KASSERT(uiop->uio_segflg == UIO_USERSPACE || 8104cf7d128SRick Macklem uiop->uio_segflg == UIO_SYSSPACE, 8114cf7d128SRick Macklem ("nfs_directio_write: Bad uio_segflg")); 8124cf7d128SRick Macklem if (uiop->uio_segflg == UIO_USERSPACE) { 8134cf7d128SRick Macklem error = copyin(uiop->uio_iov->iov_base, 8144cf7d128SRick Macklem t_iov->iov_base, size); 8154cf7d128SRick Macklem if (error != 0) 8164cf7d128SRick Macklem goto err_free; 8174cf7d128SRick Macklem } else 8184cf7d128SRick Macklem /* 8194cf7d128SRick Macklem * UIO_SYSSPACE may never happen, but handle 8204cf7d128SRick Macklem * it just in case it does. 8214cf7d128SRick Macklem */ 8224cf7d128SRick Macklem bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, 8234cf7d128SRick Macklem size); 8249ec7b004SRick Macklem bp->b_flags |= B_DIRECT; 8259ec7b004SRick Macklem bp->b_iocmd = BIO_WRITE; 8269ec7b004SRick Macklem if (cred != NOCRED) { 8279ec7b004SRick Macklem crhold(cred); 8289ec7b004SRick Macklem bp->b_wcred = cred; 8299ec7b004SRick Macklem } else 8309ec7b004SRick Macklem bp->b_wcred = NOCRED; 8319ec7b004SRick Macklem bp->b_caller1 = (void *)t_uio; 8329ec7b004SRick Macklem bp->b_vp = vp; 8339ec7b004SRick Macklem error = ncl_asyncio(nmp, bp, NOCRED, td); 8344cf7d128SRick Macklem err_free: 8359ec7b004SRick Macklem if (error) { 8369ec7b004SRick Macklem free(t_iov->iov_base, M_NFSDIRECTIO); 8379ec7b004SRick Macklem free(t_iov, M_NFSDIRECTIO); 8389ec7b004SRick Macklem free(t_uio, M_NFSDIRECTIO); 8399ec7b004SRick Macklem bp->b_vp = NULL; 8409ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 8419ec7b004SRick Macklem if (error == EINTR) 8429ec7b004SRick Macklem return (error); 8439ec7b004SRick Macklem goto do_sync; 8449ec7b004SRick Macklem } 8459ec7b004SRick Macklem uiop->uio_offset += size; 8469ec7b004SRick Macklem uiop->uio_resid -= size; 8479ec7b004SRick Macklem if (uiop->uio_iov->iov_len <= size) { 8489ec7b004SRick Macklem uiop->uio_iovcnt--; 8499ec7b004SRick Macklem uiop->uio_iov++; 8509ec7b004SRick Macklem } else { 8519ec7b004SRick Macklem uiop->uio_iov->iov_base = 8529ec7b004SRick Macklem (char *)uiop->uio_iov->iov_base + size; 8539ec7b004SRick Macklem uiop->uio_iov->iov_len -= size; 8549ec7b004SRick Macklem } 8559ec7b004SRick Macklem } 8569ec7b004SRick Macklem } 8579ec7b004SRick Macklem return (0); 8589ec7b004SRick Macklem } 8599ec7b004SRick Macklem 8609ec7b004SRick Macklem /* 8619ec7b004SRick Macklem * Vnode op for write using bio 8629ec7b004SRick Macklem */ 8639ec7b004SRick Macklem int 8649ec7b004SRick Macklem ncl_write(struct vop_write_args *ap) 8659ec7b004SRick Macklem { 8669ec7b004SRick Macklem int biosize; 8679ec7b004SRick Macklem struct uio *uio = ap->a_uio; 8689ec7b004SRick Macklem struct thread *td = uio->uio_td; 8699ec7b004SRick Macklem struct vnode *vp = ap->a_vp; 8709ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 8719ec7b004SRick Macklem struct ucred *cred = ap->a_cred; 8729ec7b004SRick Macklem int ioflag = ap->a_ioflag; 8739ec7b004SRick Macklem struct buf *bp; 8749ec7b004SRick Macklem struct vattr vattr; 8759ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 8769ec7b004SRick Macklem daddr_t lbn; 8779ec7b004SRick Macklem int bcount; 8789d232eecSKonstantin Belousov int bp_cached, n, on, error = 0, error1; 879bfb68a9eSKonstantin Belousov size_t orig_resid, local_resid; 880bfb68a9eSKonstantin Belousov off_t orig_size, tmp_off; 8819ec7b004SRick Macklem 882b38f7723SKonstantin Belousov KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 883b38f7723SKonstantin Belousov KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 884b38f7723SKonstantin Belousov ("ncl_write proc")); 8859ec7b004SRick Macklem if (vp->v_type != VREG) 8869ec7b004SRick Macklem return (EIO); 8879ec7b004SRick Macklem mtx_lock(&np->n_mtx); 8889ec7b004SRick Macklem if (np->n_flag & NWRITEERR) { 8899ec7b004SRick Macklem np->n_flag &= ~NWRITEERR; 8909ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 8919ec7b004SRick Macklem return (np->n_error); 8929ec7b004SRick Macklem } else 8939ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 8949ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 8959ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 8969ec7b004SRick Macklem (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 8979ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 8989ec7b004SRick Macklem (void)ncl_fsinfo(nmp, vp, cred, td); 8999ec7b004SRick Macklem mtx_lock(&nmp->nm_mtx); 9009ec7b004SRick Macklem } 9019ec7b004SRick Macklem if (nmp->nm_wsize == 0) 9029ec7b004SRick Macklem (void) newnfs_iosize(nmp); 9039ec7b004SRick Macklem mtx_unlock(&nmp->nm_mtx); 9049ec7b004SRick Macklem 9059ec7b004SRick Macklem /* 9069ec7b004SRick Macklem * Synchronously flush pending buffers if we are in synchronous 9079ec7b004SRick Macklem * mode or if we are appending. 9089ec7b004SRick Macklem */ 9099ec7b004SRick Macklem if (ioflag & (IO_APPEND | IO_SYNC)) { 9109ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9119ec7b004SRick Macklem if (np->n_flag & NMODIFIED) { 9129ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9139ec7b004SRick Macklem #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 9149ec7b004SRick Macklem /* 9159ec7b004SRick Macklem * Require non-blocking, synchronous writes to 9169ec7b004SRick Macklem * dirty files to inform the program it needs 9179ec7b004SRick Macklem * to fsync(2) explicitly. 9189ec7b004SRick Macklem */ 9199ec7b004SRick Macklem if (ioflag & IO_NDELAY) 9209ec7b004SRick Macklem return (EAGAIN); 9219ec7b004SRick Macklem #endif 9229ec7b004SRick Macklem flush_and_restart: 9239ec7b004SRick Macklem np->n_attrstamp = 0; 9248f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 9259ec7b004SRick Macklem error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 9269ec7b004SRick Macklem if (error) 9279ec7b004SRick Macklem return (error); 9289ec7b004SRick Macklem } else 9299ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9309ec7b004SRick Macklem } 9319ec7b004SRick Macklem 932bfb68a9eSKonstantin Belousov orig_resid = uio->uio_resid; 933bfb68a9eSKonstantin Belousov mtx_lock(&np->n_mtx); 934bfb68a9eSKonstantin Belousov orig_size = np->n_size; 935bfb68a9eSKonstantin Belousov mtx_unlock(&np->n_mtx); 936bfb68a9eSKonstantin Belousov 9379ec7b004SRick Macklem /* 9389ec7b004SRick Macklem * If IO_APPEND then load uio_offset. We restart here if we cannot 9399ec7b004SRick Macklem * get the append lock. 9409ec7b004SRick Macklem */ 9419ec7b004SRick Macklem if (ioflag & IO_APPEND) { 9429ec7b004SRick Macklem np->n_attrstamp = 0; 9438f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 9449ec7b004SRick Macklem error = VOP_GETATTR(vp, &vattr, cred); 9459ec7b004SRick Macklem if (error) 9469ec7b004SRick Macklem return (error); 9479ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9489ec7b004SRick Macklem uio->uio_offset = np->n_size; 9499ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9509ec7b004SRick Macklem } 9519ec7b004SRick Macklem 9529ec7b004SRick Macklem if (uio->uio_offset < 0) 9539ec7b004SRick Macklem return (EINVAL); 954b29b9bcbSRick Macklem tmp_off = uio->uio_offset + uio->uio_resid; 95524e2bcc0SRick Macklem if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 9569ec7b004SRick Macklem return (EFBIG); 9579ec7b004SRick Macklem if (uio->uio_resid == 0) 9589ec7b004SRick Macklem return (0); 9599ec7b004SRick Macklem 9609ec7b004SRick Macklem if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 9619ec7b004SRick Macklem return nfs_directio_write(vp, uio, cred, ioflag); 9629ec7b004SRick Macklem 9639ec7b004SRick Macklem /* 9649ec7b004SRick Macklem * Maybe this should be above the vnode op call, but so long as 9659ec7b004SRick Macklem * file servers have no limits, i don't think it matters 9669ec7b004SRick Macklem */ 967b5f770bdSEdward Tomasz Napierala if (vn_rlimit_fsize(vp, uio, td)) 9689ec7b004SRick Macklem return (EFBIG); 9699ec7b004SRick Macklem 9707f763fc3SRick Macklem biosize = vp->v_bufobj.bo_bsize; 9719ec7b004SRick Macklem /* 9729ec7b004SRick Macklem * Find all of this file's B_NEEDCOMMIT buffers. If our writes 9739ec7b004SRick Macklem * would exceed the local maximum per-file write commit size when 9749ec7b004SRick Macklem * combined with those, we must decide whether to flush, 9759ec7b004SRick Macklem * go synchronous, or return error. We don't bother checking 9769ec7b004SRick Macklem * IO_UNIT -- we just make all writes atomic anyway, as there's 9779ec7b004SRick Macklem * no point optimizing for something that really won't ever happen. 9789ec7b004SRick Macklem */ 9799ec7b004SRick Macklem if (!(ioflag & IO_SYNC)) { 9809ec7b004SRick Macklem int nflag; 9819ec7b004SRick Macklem 9829ec7b004SRick Macklem mtx_lock(&np->n_mtx); 9839ec7b004SRick Macklem nflag = np->n_flag; 9849ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 9859ec7b004SRick Macklem int needrestart = 0; 9869ec7b004SRick Macklem if (nmp->nm_wcommitsize < uio->uio_resid) { 9879ec7b004SRick Macklem /* 9889ec7b004SRick Macklem * If this request could not possibly be completed 9899ec7b004SRick Macklem * without exceeding the maximum outstanding write 9909ec7b004SRick Macklem * commit size, see if we can convert it into a 9919ec7b004SRick Macklem * synchronous write operation. 9929ec7b004SRick Macklem */ 9939ec7b004SRick Macklem if (ioflag & IO_NDELAY) 9949ec7b004SRick Macklem return (EAGAIN); 9959ec7b004SRick Macklem ioflag |= IO_SYNC; 9969ec7b004SRick Macklem if (nflag & NMODIFIED) 9979ec7b004SRick Macklem needrestart = 1; 9989ec7b004SRick Macklem } else if (nflag & NMODIFIED) { 9999ec7b004SRick Macklem int wouldcommit = 0; 10009ec7b004SRick Macklem BO_LOCK(&vp->v_bufobj); 10019ec7b004SRick Macklem if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 10029ec7b004SRick Macklem TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 10039ec7b004SRick Macklem b_bobufs) { 10049ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) 10059ec7b004SRick Macklem wouldcommit += bp->b_bcount; 10069ec7b004SRick Macklem } 10079ec7b004SRick Macklem } 10089ec7b004SRick Macklem BO_UNLOCK(&vp->v_bufobj); 10099ec7b004SRick Macklem /* 10109ec7b004SRick Macklem * Since we're not operating synchronously and 10119ec7b004SRick Macklem * bypassing the buffer cache, we are in a commit 10129ec7b004SRick Macklem * and holding all of these buffers whether 10139ec7b004SRick Macklem * transmitted or not. If not limited, this 10149ec7b004SRick Macklem * will lead to the buffer cache deadlocking, 10159ec7b004SRick Macklem * as no one else can flush our uncommitted buffers. 10169ec7b004SRick Macklem */ 10179ec7b004SRick Macklem wouldcommit += uio->uio_resid; 10189ec7b004SRick Macklem /* 10199ec7b004SRick Macklem * If we would initially exceed the maximum 10209ec7b004SRick Macklem * outstanding write commit size, flush and restart. 10219ec7b004SRick Macklem */ 10229ec7b004SRick Macklem if (wouldcommit > nmp->nm_wcommitsize) 10239ec7b004SRick Macklem needrestart = 1; 10249ec7b004SRick Macklem } 10259ec7b004SRick Macklem if (needrestart) 10269ec7b004SRick Macklem goto flush_and_restart; 10279ec7b004SRick Macklem } 10289ec7b004SRick Macklem 10299ec7b004SRick Macklem do { 10309ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.biocache_writes); 10319ec7b004SRick Macklem lbn = uio->uio_offset / biosize; 1032*96ecfd98SEd Maste on = uio->uio_offset - (lbn * biosize); 1033526d0bd5SKonstantin Belousov n = MIN((unsigned)(biosize - on), uio->uio_resid); 10349ec7b004SRick Macklem again: 10359ec7b004SRick Macklem /* 10369ec7b004SRick Macklem * Handle direct append and file extension cases, calculate 10379ec7b004SRick Macklem * unaligned buffer size. 10389ec7b004SRick Macklem */ 10399ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10409ec7b004SRick Macklem if (uio->uio_offset == np->n_size && n) { 10419ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10429ec7b004SRick Macklem /* 10439ec7b004SRick Macklem * Get the buffer (in its pre-append state to maintain 10449ec7b004SRick Macklem * B_CACHE if it was previously set). Resize the 10459ec7b004SRick Macklem * nfsnode after we have locked the buffer to prevent 10469ec7b004SRick Macklem * readers from reading garbage. 10479ec7b004SRick Macklem */ 10489ec7b004SRick Macklem bcount = on; 10499ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 10509ec7b004SRick Macklem 10519ec7b004SRick Macklem if (bp != NULL) { 10529ec7b004SRick Macklem long save; 10539ec7b004SRick Macklem 10549ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10559ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10569ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10579ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10589ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10599ec7b004SRick Macklem 10609ec7b004SRick Macklem save = bp->b_flags & B_CACHE; 10619ec7b004SRick Macklem bcount += n; 10629ec7b004SRick Macklem allocbuf(bp, bcount); 10639ec7b004SRick Macklem bp->b_flags |= save; 10649ec7b004SRick Macklem } 10659ec7b004SRick Macklem } else { 10669ec7b004SRick Macklem /* 10679ec7b004SRick Macklem * Obtain the locked cache block first, and then 10689ec7b004SRick Macklem * adjust the file's size as appropriate. 10699ec7b004SRick Macklem */ 10709ec7b004SRick Macklem bcount = on + n; 10719ec7b004SRick Macklem if ((off_t)lbn * biosize + bcount < np->n_size) { 10729ec7b004SRick Macklem if ((off_t)(lbn + 1) * biosize < np->n_size) 10739ec7b004SRick Macklem bcount = biosize; 10749ec7b004SRick Macklem else 10759ec7b004SRick Macklem bcount = np->n_size - (off_t)lbn * biosize; 10769ec7b004SRick Macklem } 10779ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10789ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bcount, td); 10799ec7b004SRick Macklem mtx_lock(&np->n_mtx); 10809ec7b004SRick Macklem if (uio->uio_offset + n > np->n_size) { 10819ec7b004SRick Macklem np->n_size = uio->uio_offset + n; 10829ec7b004SRick Macklem np->n_flag |= NMODIFIED; 10839ec7b004SRick Macklem vnode_pager_setsize(vp, np->n_size); 10849ec7b004SRick Macklem } 10859ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 10869ec7b004SRick Macklem } 10879ec7b004SRick Macklem 10889ec7b004SRick Macklem if (!bp) { 10899ec7b004SRick Macklem error = newnfs_sigintr(nmp, td); 10909ec7b004SRick Macklem if (!error) 10919ec7b004SRick Macklem error = EINTR; 10929ec7b004SRick Macklem break; 10939ec7b004SRick Macklem } 10949ec7b004SRick Macklem 10959ec7b004SRick Macklem /* 10969ec7b004SRick Macklem * Issue a READ if B_CACHE is not set. In special-append 10979ec7b004SRick Macklem * mode, B_CACHE is based on the buffer prior to the write 10989ec7b004SRick Macklem * op and is typically set, avoiding the read. If a read 10999ec7b004SRick Macklem * is required in special append mode, the server will 11009ec7b004SRick Macklem * probably send us a short-read since we extended the file 11019ec7b004SRick Macklem * on our end, resulting in b_resid == 0 and, thusly, 11029ec7b004SRick Macklem * B_CACHE getting set. 11039ec7b004SRick Macklem * 11049ec7b004SRick Macklem * We can also avoid issuing the read if the write covers 11059ec7b004SRick Macklem * the entire buffer. We have to make sure the buffer state 11069ec7b004SRick Macklem * is reasonable in this case since we will not be initiating 11079ec7b004SRick Macklem * I/O. See the comments in kern/vfs_bio.c's getblk() for 11089ec7b004SRick Macklem * more information. 11099ec7b004SRick Macklem * 11109ec7b004SRick Macklem * B_CACHE may also be set due to the buffer being cached 11119ec7b004SRick Macklem * normally. 11129ec7b004SRick Macklem */ 11139ec7b004SRick Macklem 1114bfb68a9eSKonstantin Belousov bp_cached = 1; 11159ec7b004SRick Macklem if (on == 0 && n == bcount) { 1116bfb68a9eSKonstantin Belousov if ((bp->b_flags & B_CACHE) == 0) 1117bfb68a9eSKonstantin Belousov bp_cached = 0; 11189ec7b004SRick Macklem bp->b_flags |= B_CACHE; 11199ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 11209ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 11219ec7b004SRick Macklem } 11229ec7b004SRick Macklem 11239ec7b004SRick Macklem if ((bp->b_flags & B_CACHE) == 0) { 11249ec7b004SRick Macklem bp->b_iocmd = BIO_READ; 11259ec7b004SRick Macklem vfs_busy_pages(bp, 0); 112667c5c2d2SRick Macklem error = ncl_doio(vp, bp, cred, td, 0); 11279ec7b004SRick Macklem if (error) { 11289ec7b004SRick Macklem brelse(bp); 11299ec7b004SRick Macklem break; 11309ec7b004SRick Macklem } 11319ec7b004SRick Macklem } 11329ec7b004SRick Macklem if (bp->b_wcred == NOCRED) 11339ec7b004SRick Macklem bp->b_wcred = crhold(cred); 11349ec7b004SRick Macklem mtx_lock(&np->n_mtx); 11359ec7b004SRick Macklem np->n_flag |= NMODIFIED; 11369ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 11379ec7b004SRick Macklem 11389ec7b004SRick Macklem /* 11399ec7b004SRick Macklem * If dirtyend exceeds file size, chop it down. This should 11409ec7b004SRick Macklem * not normally occur but there is an append race where it 11419ec7b004SRick Macklem * might occur XXX, so we log it. 11429ec7b004SRick Macklem * 11439ec7b004SRick Macklem * If the chopping creates a reverse-indexed or degenerate 11449ec7b004SRick Macklem * situation with dirtyoff/end, we 0 both of them. 11459ec7b004SRick Macklem */ 11469ec7b004SRick Macklem 11479ec7b004SRick Macklem if (bp->b_dirtyend > bcount) { 11489ec7b004SRick Macklem ncl_printf("NFS append race @%lx:%d\n", 11499ec7b004SRick Macklem (long)bp->b_blkno * DEV_BSIZE, 11509ec7b004SRick Macklem bp->b_dirtyend - bcount); 11519ec7b004SRick Macklem bp->b_dirtyend = bcount; 11529ec7b004SRick Macklem } 11539ec7b004SRick Macklem 11549ec7b004SRick Macklem if (bp->b_dirtyoff >= bp->b_dirtyend) 11559ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 11569ec7b004SRick Macklem 11579ec7b004SRick Macklem /* 11589ec7b004SRick Macklem * If the new write will leave a contiguous dirty 11599ec7b004SRick Macklem * area, just update the b_dirtyoff and b_dirtyend, 11609ec7b004SRick Macklem * otherwise force a write rpc of the old dirty area. 11619ec7b004SRick Macklem * 11629ec7b004SRick Macklem * While it is possible to merge discontiguous writes due to 11639ec7b004SRick Macklem * our having a B_CACHE buffer ( and thus valid read data 11649ec7b004SRick Macklem * for the hole), we don't because it could lead to 11659ec7b004SRick Macklem * significant cache coherency problems with multiple clients, 11669ec7b004SRick Macklem * especially if locking is implemented later on. 11679ec7b004SRick Macklem * 11686eec26f5SKonstantin Belousov * As an optimization we could theoretically maintain 11699ec7b004SRick Macklem * a linked list of discontinuous areas, but we would still 11709ec7b004SRick Macklem * have to commit them separately so there isn't much 11719ec7b004SRick Macklem * advantage to it except perhaps a bit of asynchronization. 11729ec7b004SRick Macklem */ 11739ec7b004SRick Macklem 11749ec7b004SRick Macklem if (bp->b_dirtyend > 0 && 11759ec7b004SRick Macklem (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 11769ec7b004SRick Macklem if (bwrite(bp) == EINTR) { 11779ec7b004SRick Macklem error = EINTR; 11789ec7b004SRick Macklem break; 11799ec7b004SRick Macklem } 11809ec7b004SRick Macklem goto again; 11819ec7b004SRick Macklem } 11829ec7b004SRick Macklem 1183bfb68a9eSKonstantin Belousov local_resid = uio->uio_resid; 1184ddfc47fdSKonstantin Belousov error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); 11859ec7b004SRick Macklem 1186bfb68a9eSKonstantin Belousov if (error != 0 && !bp_cached) { 1187bfb68a9eSKonstantin Belousov /* 1188bfb68a9eSKonstantin Belousov * This block has no other content then what 1189bfb68a9eSKonstantin Belousov * possibly was written by the faulty uiomove. 1190bfb68a9eSKonstantin Belousov * Release it, forgetting the data pages, to 1191bfb68a9eSKonstantin Belousov * prevent the leak of uninitialized data to 1192bfb68a9eSKonstantin Belousov * usermode. 1193bfb68a9eSKonstantin Belousov */ 1194bfb68a9eSKonstantin Belousov bp->b_ioflags |= BIO_ERROR; 1195bfb68a9eSKonstantin Belousov brelse(bp); 1196bfb68a9eSKonstantin Belousov uio->uio_offset -= local_resid - uio->uio_resid; 1197bfb68a9eSKonstantin Belousov uio->uio_resid = local_resid; 1198bfb68a9eSKonstantin Belousov break; 1199bfb68a9eSKonstantin Belousov } 1200bfb68a9eSKonstantin Belousov 12019ec7b004SRick Macklem /* 12029ec7b004SRick Macklem * Since this block is being modified, it must be written 12039ec7b004SRick Macklem * again and not just committed. Since write clustering does 12049ec7b004SRick Macklem * not work for the stage 1 data write, only the stage 2 12059ec7b004SRick Macklem * commit rpc, we have to clear B_CLUSTEROK as well. 12069ec7b004SRick Macklem */ 12079ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 12089ec7b004SRick Macklem 1209bfb68a9eSKonstantin Belousov /* 1210bfb68a9eSKonstantin Belousov * Get the partial update on the progress made from 1211bfb68a9eSKonstantin Belousov * uiomove, if an error occured. 1212bfb68a9eSKonstantin Belousov */ 1213bfb68a9eSKonstantin Belousov if (error != 0) 1214bfb68a9eSKonstantin Belousov n = local_resid - uio->uio_resid; 12159ec7b004SRick Macklem 12169ec7b004SRick Macklem /* 12179ec7b004SRick Macklem * Only update dirtyoff/dirtyend if not a degenerate 12189ec7b004SRick Macklem * condition. 12199ec7b004SRick Macklem */ 1220bfb68a9eSKonstantin Belousov if (n > 0) { 12219ec7b004SRick Macklem if (bp->b_dirtyend > 0) { 12229ec7b004SRick Macklem bp->b_dirtyoff = min(on, bp->b_dirtyoff); 12239ec7b004SRick Macklem bp->b_dirtyend = max((on + n), bp->b_dirtyend); 12249ec7b004SRick Macklem } else { 12259ec7b004SRick Macklem bp->b_dirtyoff = on; 12269ec7b004SRick Macklem bp->b_dirtyend = on + n; 12279ec7b004SRick Macklem } 12281f176894SAlan Cox vfs_bio_set_valid(bp, on, n); 12299ec7b004SRick Macklem } 12309ec7b004SRick Macklem 12319ec7b004SRick Macklem /* 12329ec7b004SRick Macklem * If IO_SYNC do bwrite(). 12339ec7b004SRick Macklem * 12349ec7b004SRick Macklem * IO_INVAL appears to be unused. The idea appears to be 12359ec7b004SRick Macklem * to turn off caching in this case. Very odd. XXX 12369ec7b004SRick Macklem */ 12379ec7b004SRick Macklem if ((ioflag & IO_SYNC)) { 12389ec7b004SRick Macklem if (ioflag & IO_INVAL) 12399ec7b004SRick Macklem bp->b_flags |= B_NOCACHE; 12409d232eecSKonstantin Belousov error1 = bwrite(bp); 12419d232eecSKonstantin Belousov if (error1 != 0) { 12429d232eecSKonstantin Belousov if (error == 0) 12439d232eecSKonstantin Belousov error = error1; 12449ec7b004SRick Macklem break; 12459d232eecSKonstantin Belousov } 12469ec7b004SRick Macklem } else if ((n + on) == biosize) { 12479ec7b004SRick Macklem bp->b_flags |= B_ASYNC; 12489ec7b004SRick Macklem (void) ncl_writebp(bp, 0, NULL); 12499ec7b004SRick Macklem } else { 12509ec7b004SRick Macklem bdwrite(bp); 12519ec7b004SRick Macklem } 1252bfb68a9eSKonstantin Belousov 1253bfb68a9eSKonstantin Belousov if (error != 0) 1254bfb68a9eSKonstantin Belousov break; 12559ec7b004SRick Macklem } while (uio->uio_resid > 0 && n > 0); 12569ec7b004SRick Macklem 1257bfb68a9eSKonstantin Belousov if (error != 0) { 1258bfb68a9eSKonstantin Belousov if (ioflag & IO_UNIT) { 1259bfb68a9eSKonstantin Belousov VATTR_NULL(&vattr); 1260bfb68a9eSKonstantin Belousov vattr.va_size = orig_size; 1261bfb68a9eSKonstantin Belousov /* IO_SYNC is handled implicitely */ 1262bfb68a9eSKonstantin Belousov (void)VOP_SETATTR(vp, &vattr, cred); 1263bfb68a9eSKonstantin Belousov uio->uio_offset -= orig_resid - uio->uio_resid; 1264bfb68a9eSKonstantin Belousov uio->uio_resid = orig_resid; 1265bfb68a9eSKonstantin Belousov } 1266bfb68a9eSKonstantin Belousov } 1267bfb68a9eSKonstantin Belousov 12689ec7b004SRick Macklem return (error); 12699ec7b004SRick Macklem } 12709ec7b004SRick Macklem 12719ec7b004SRick Macklem /* 12729ec7b004SRick Macklem * Get an nfs cache block. 12739ec7b004SRick Macklem * 12749ec7b004SRick Macklem * Allocate a new one if the block isn't currently in the cache 12759ec7b004SRick Macklem * and return the block marked busy. If the calling process is 12769ec7b004SRick Macklem * interrupted by a signal for an interruptible mount point, return 12779ec7b004SRick Macklem * NULL. 12789ec7b004SRick Macklem * 12799ec7b004SRick Macklem * The caller must carefully deal with the possible B_INVAL state of 12809ec7b004SRick Macklem * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 12819ec7b004SRick Macklem * indirectly), so synchronous reads can be issued without worrying about 12829ec7b004SRick Macklem * the B_INVAL state. We have to be a little more careful when dealing 12839ec7b004SRick Macklem * with writes (see comments in nfs_write()) when extending a file past 12849ec7b004SRick Macklem * its EOF. 12859ec7b004SRick Macklem */ 12869ec7b004SRick Macklem static struct buf * 12879ec7b004SRick Macklem nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 12889ec7b004SRick Macklem { 12899ec7b004SRick Macklem struct buf *bp; 12909ec7b004SRick Macklem struct mount *mp; 12919ec7b004SRick Macklem struct nfsmount *nmp; 12929ec7b004SRick Macklem 12939ec7b004SRick Macklem mp = vp->v_mount; 12949ec7b004SRick Macklem nmp = VFSTONFS(mp); 12959ec7b004SRick Macklem 12969ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) { 12979ec7b004SRick Macklem sigset_t oldset; 12989ec7b004SRick Macklem 12994a8e2176SRick Macklem newnfs_set_sigmask(td, &oldset); 13003b14c753SJohn Baldwin bp = getblk(vp, bn, size, PCATCH, 0, 0); 13014a8e2176SRick Macklem newnfs_restore_sigmask(td, &oldset); 13029ec7b004SRick Macklem while (bp == NULL) { 13039ec7b004SRick Macklem if (newnfs_sigintr(nmp, td)) 13049ec7b004SRick Macklem return (NULL); 13059ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 2 * hz, 0); 13069ec7b004SRick Macklem } 13079ec7b004SRick Macklem } else { 13089ec7b004SRick Macklem bp = getblk(vp, bn, size, 0, 0, 0); 13099ec7b004SRick Macklem } 13109ec7b004SRick Macklem 13117f763fc3SRick Macklem if (vp->v_type == VREG) 13127f763fc3SRick Macklem bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 13139ec7b004SRick Macklem return (bp); 13149ec7b004SRick Macklem } 13159ec7b004SRick Macklem 13169ec7b004SRick Macklem /* 13179ec7b004SRick Macklem * Flush and invalidate all dirty buffers. If another process is already 13189ec7b004SRick Macklem * doing the flush, just wait for completion. 13199ec7b004SRick Macklem */ 13209ec7b004SRick Macklem int 13219ec7b004SRick Macklem ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 13229ec7b004SRick Macklem { 13239ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 13249ec7b004SRick Macklem struct nfsmount *nmp = VFSTONFS(vp->v_mount); 13259ec7b004SRick Macklem int error = 0, slpflag, slptimeo; 13269ec7b004SRick Macklem int old_lock = 0; 13279ec7b004SRick Macklem 13289ec7b004SRick Macklem ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 13299ec7b004SRick Macklem 13309ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_INT) == 0) 13319ec7b004SRick Macklem intrflg = 0; 13329ec7b004SRick Macklem if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 13339ec7b004SRick Macklem intrflg = 1; 13349ec7b004SRick Macklem if (intrflg) { 13353b14c753SJohn Baldwin slpflag = PCATCH; 13369ec7b004SRick Macklem slptimeo = 2 * hz; 13379ec7b004SRick Macklem } else { 13389ec7b004SRick Macklem slpflag = 0; 13399ec7b004SRick Macklem slptimeo = 0; 13409ec7b004SRick Macklem } 13419ec7b004SRick Macklem 13429ec7b004SRick Macklem old_lock = ncl_upgrade_vnlock(vp); 1343934a3099SRick Macklem if (vp->v_iflag & VI_DOOMED) { 1344934a3099SRick Macklem /* 1345934a3099SRick Macklem * Since vgonel() uses the generic vinvalbuf() to flush 1346934a3099SRick Macklem * dirty buffers and it does not call this function, it 1347934a3099SRick Macklem * is safe to just return OK when VI_DOOMED is set. 1348934a3099SRick Macklem */ 1349934a3099SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 1350934a3099SRick Macklem return (0); 1351934a3099SRick Macklem } 1352934a3099SRick Macklem 13539ec7b004SRick Macklem /* 13549ec7b004SRick Macklem * Now, flush as required. 13559ec7b004SRick Macklem */ 13569ec7b004SRick Macklem if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 135789f6b863SAttilio Rao VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); 13589ec7b004SRick Macklem vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 135989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); 13609ec7b004SRick Macklem /* 13619ec7b004SRick Macklem * If the page clean was interrupted, fail the invalidation. 13629ec7b004SRick Macklem * Not doing so, we run the risk of losing dirty pages in the 13639ec7b004SRick Macklem * vinvalbuf() call below. 13649ec7b004SRick Macklem */ 13659ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13669ec7b004SRick Macklem goto out; 13679ec7b004SRick Macklem } 13689ec7b004SRick Macklem 13699ec7b004SRick Macklem error = vinvalbuf(vp, flags, slpflag, 0); 13709ec7b004SRick Macklem while (error) { 13719ec7b004SRick Macklem if (intrflg && (error = newnfs_sigintr(nmp, td))) 13729ec7b004SRick Macklem goto out; 13739ec7b004SRick Macklem error = vinvalbuf(vp, flags, 0, slptimeo); 13749ec7b004SRick Macklem } 13751f60bfd8SRick Macklem if (NFSHASPNFS(nmp)) 13761f60bfd8SRick Macklem nfscl_layoutcommit(vp, td); 13779ec7b004SRick Macklem mtx_lock(&np->n_mtx); 13789ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) 13799ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 13809ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 13819ec7b004SRick Macklem out: 13829ec7b004SRick Macklem ncl_downgrade_vnlock(vp, old_lock); 13839ec7b004SRick Macklem return error; 13849ec7b004SRick Macklem } 13859ec7b004SRick Macklem 13869ec7b004SRick Macklem /* 13879ec7b004SRick Macklem * Initiate asynchronous I/O. Return an error if no nfsiods are available. 13889ec7b004SRick Macklem * This is mainly to avoid queueing async I/O requests when the nfsiods 13899ec7b004SRick Macklem * are all hung on a dead server. 13909ec7b004SRick Macklem * 13919ec7b004SRick Macklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 13929ec7b004SRick Macklem * is eventually dequeued by the async daemon, ncl_doio() *will*. 13939ec7b004SRick Macklem */ 13949ec7b004SRick Macklem int 13959ec7b004SRick Macklem ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 13969ec7b004SRick Macklem { 13979ec7b004SRick Macklem int iod; 13989ec7b004SRick Macklem int gotiod; 13999ec7b004SRick Macklem int slpflag = 0; 14009ec7b004SRick Macklem int slptimeo = 0; 14019ec7b004SRick Macklem int error, error2; 14029ec7b004SRick Macklem 14039ec7b004SRick Macklem /* 14049ec7b004SRick Macklem * Commits are usually short and sweet so lets save some cpu and 14059ec7b004SRick Macklem * leave the async daemons for more important rpc's (such as reads 14069ec7b004SRick Macklem * and writes). 14079ec7b004SRick Macklem */ 14089ec7b004SRick Macklem mtx_lock(&ncl_iod_mutex); 14099ec7b004SRick Macklem if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 14109ec7b004SRick Macklem (nmp->nm_bufqiods > ncl_numasync / 2)) { 14119ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 14129ec7b004SRick Macklem return(EIO); 14139ec7b004SRick Macklem } 14149ec7b004SRick Macklem again: 14159ec7b004SRick Macklem if (nmp->nm_flag & NFSMNT_INT) 14163b14c753SJohn Baldwin slpflag = PCATCH; 14179ec7b004SRick Macklem gotiod = FALSE; 14189ec7b004SRick Macklem 14199ec7b004SRick Macklem /* 14209ec7b004SRick Macklem * Find a free iod to process this request. 14219ec7b004SRick Macklem */ 14229ec7b004SRick Macklem for (iod = 0; iod < ncl_numasync; iod++) 142380169e41SRick Macklem if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 14249ec7b004SRick Macklem gotiod = TRUE; 14259ec7b004SRick Macklem break; 14269ec7b004SRick Macklem } 14279ec7b004SRick Macklem 14289ec7b004SRick Macklem /* 14299ec7b004SRick Macklem * Try to create one if none are free. 14309ec7b004SRick Macklem */ 14317b8c319bSRick Macklem if (!gotiod) 14327b8c319bSRick Macklem ncl_nfsiodnew(); 14337b8c319bSRick Macklem else { 14349ec7b004SRick Macklem /* 14359ec7b004SRick Macklem * Found one, so wake it up and tell it which 14369ec7b004SRick Macklem * mount to process. 14379ec7b004SRick Macklem */ 14389ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 14399ec7b004SRick Macklem iod, nmp)); 144080169e41SRick Macklem ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 14419ec7b004SRick Macklem ncl_iodmount[iod] = nmp; 14429ec7b004SRick Macklem nmp->nm_bufqiods++; 14439ec7b004SRick Macklem wakeup(&ncl_iodwant[iod]); 14449ec7b004SRick Macklem } 14459ec7b004SRick Macklem 14469ec7b004SRick Macklem /* 14479ec7b004SRick Macklem * If none are free, we may already have an iod working on this mount 14489ec7b004SRick Macklem * point. If so, it will process our request. 14499ec7b004SRick Macklem */ 14509ec7b004SRick Macklem if (!gotiod) { 14519ec7b004SRick Macklem if (nmp->nm_bufqiods > 0) { 14529ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14539ec7b004SRick Macklem ("ncl_asyncio: %d iods are already processing mount %p\n", 14549ec7b004SRick Macklem nmp->nm_bufqiods, nmp)); 14559ec7b004SRick Macklem gotiod = TRUE; 14569ec7b004SRick Macklem } 14579ec7b004SRick Macklem } 14589ec7b004SRick Macklem 14599ec7b004SRick Macklem /* 14609ec7b004SRick Macklem * If we have an iod which can process the request, then queue 14619ec7b004SRick Macklem * the buffer. 14629ec7b004SRick Macklem */ 14639ec7b004SRick Macklem if (gotiod) { 14649ec7b004SRick Macklem /* 14659ec7b004SRick Macklem * Ensure that the queue never grows too large. We still want 14669ec7b004SRick Macklem * to asynchronize so we block rather then return EIO. 14679ec7b004SRick Macklem */ 14689ec7b004SRick Macklem while (nmp->nm_bufqlen >= 2*ncl_numasync) { 14699ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14709ec7b004SRick Macklem ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 14719ec7b004SRick Macklem nmp->nm_bufqwant = TRUE; 14724a8e2176SRick Macklem error = newnfs_msleep(td, &nmp->nm_bufq, 14734a8e2176SRick Macklem &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 14744a8e2176SRick Macklem slptimeo); 14759ec7b004SRick Macklem if (error) { 14769ec7b004SRick Macklem error2 = newnfs_sigintr(nmp, td); 14779ec7b004SRick Macklem if (error2) { 14789ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 14799ec7b004SRick Macklem return (error2); 14809ec7b004SRick Macklem } 14813b14c753SJohn Baldwin if (slpflag == PCATCH) { 14829ec7b004SRick Macklem slpflag = 0; 14839ec7b004SRick Macklem slptimeo = 2 * hz; 14849ec7b004SRick Macklem } 14859ec7b004SRick Macklem } 14869ec7b004SRick Macklem /* 14879ec7b004SRick Macklem * We might have lost our iod while sleeping, 14889ec7b004SRick Macklem * so check and loop if nescessary. 14899ec7b004SRick Macklem */ 14909ec7b004SRick Macklem goto again; 14919ec7b004SRick Macklem } 14929ec7b004SRick Macklem 14939ec7b004SRick Macklem /* We might have lost our nfsiod */ 14949ec7b004SRick Macklem if (nmp->nm_bufqiods == 0) { 14959ec7b004SRick Macklem NFS_DPF(ASYNCIO, 14969ec7b004SRick Macklem ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 14979ec7b004SRick Macklem goto again; 14989ec7b004SRick Macklem } 14999ec7b004SRick Macklem 15009ec7b004SRick Macklem if (bp->b_iocmd == BIO_READ) { 15019ec7b004SRick Macklem if (bp->b_rcred == NOCRED && cred != NOCRED) 15029ec7b004SRick Macklem bp->b_rcred = crhold(cred); 15039ec7b004SRick Macklem } else { 15049ec7b004SRick Macklem if (bp->b_wcred == NOCRED && cred != NOCRED) 15059ec7b004SRick Macklem bp->b_wcred = crhold(cred); 15069ec7b004SRick Macklem } 15079ec7b004SRick Macklem 15089ec7b004SRick Macklem if (bp->b_flags & B_REMFREE) 15099ec7b004SRick Macklem bremfreef(bp); 15109ec7b004SRick Macklem BUF_KERNPROC(bp); 15119ec7b004SRick Macklem TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 15129ec7b004SRick Macklem nmp->nm_bufqlen++; 15139ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15149ec7b004SRick Macklem mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 15159ec7b004SRick Macklem VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 15169ec7b004SRick Macklem VTONFS(bp->b_vp)->n_directio_asyncwr++; 15179ec7b004SRick Macklem mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 15189ec7b004SRick Macklem } 15199ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15209ec7b004SRick Macklem return (0); 15219ec7b004SRick Macklem } 15229ec7b004SRick Macklem 15239ec7b004SRick Macklem mtx_unlock(&ncl_iod_mutex); 15249ec7b004SRick Macklem 15259ec7b004SRick Macklem /* 15269ec7b004SRick Macklem * All the iods are busy on other mounts, so return EIO to 15279ec7b004SRick Macklem * force the caller to process the i/o synchronously. 15289ec7b004SRick Macklem */ 15299ec7b004SRick Macklem NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 15309ec7b004SRick Macklem return (EIO); 15319ec7b004SRick Macklem } 15329ec7b004SRick Macklem 15339ec7b004SRick Macklem void 15349ec7b004SRick Macklem ncl_doio_directwrite(struct buf *bp) 15359ec7b004SRick Macklem { 15369ec7b004SRick Macklem int iomode, must_commit; 15379ec7b004SRick Macklem struct uio *uiop = (struct uio *)bp->b_caller1; 15389ec7b004SRick Macklem char *iov_base = uiop->uio_iov->iov_base; 15399ec7b004SRick Macklem 15409ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 15419ec7b004SRick Macklem uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 154267c5c2d2SRick Macklem ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 15439ec7b004SRick Macklem KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 15449ec7b004SRick Macklem free(iov_base, M_NFSDIRECTIO); 15459ec7b004SRick Macklem free(uiop->uio_iov, M_NFSDIRECTIO); 15469ec7b004SRick Macklem free(uiop, M_NFSDIRECTIO); 15479ec7b004SRick Macklem if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 15489ec7b004SRick Macklem struct nfsnode *np = VTONFS(bp->b_vp); 15499ec7b004SRick Macklem mtx_lock(&np->n_mtx); 15509ec7b004SRick Macklem np->n_directio_asyncwr--; 15519ec7b004SRick Macklem if (np->n_directio_asyncwr == 0) { 15529ec7b004SRick Macklem np->n_flag &= ~NMODIFIED; 15539ec7b004SRick Macklem if ((np->n_flag & NFSYNCWAIT)) { 15549ec7b004SRick Macklem np->n_flag &= ~NFSYNCWAIT; 15559ec7b004SRick Macklem wakeup((caddr_t)&np->n_directio_asyncwr); 15569ec7b004SRick Macklem } 15579ec7b004SRick Macklem } 15589ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 15599ec7b004SRick Macklem } 15609ec7b004SRick Macklem bp->b_vp = NULL; 15619ec7b004SRick Macklem relpbuf(bp, &ncl_pbuf_freecnt); 15629ec7b004SRick Macklem } 15639ec7b004SRick Macklem 15649ec7b004SRick Macklem /* 15659ec7b004SRick Macklem * Do an I/O operation to/from a cache block. This may be called 15669ec7b004SRick Macklem * synchronously or from an nfsiod. 15679ec7b004SRick Macklem */ 15689ec7b004SRick Macklem int 156967c5c2d2SRick Macklem ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 157067c5c2d2SRick Macklem int called_from_strategy) 15719ec7b004SRick Macklem { 15729ec7b004SRick Macklem struct uio *uiop; 15739ec7b004SRick Macklem struct nfsnode *np; 15749ec7b004SRick Macklem struct nfsmount *nmp; 15759ec7b004SRick Macklem int error = 0, iomode, must_commit = 0; 15769ec7b004SRick Macklem struct uio uio; 15779ec7b004SRick Macklem struct iovec io; 15789ec7b004SRick Macklem struct proc *p = td ? td->td_proc : NULL; 15799ec7b004SRick Macklem uint8_t iocmd; 15809ec7b004SRick Macklem 15819ec7b004SRick Macklem np = VTONFS(vp); 15829ec7b004SRick Macklem nmp = VFSTONFS(vp->v_mount); 15839ec7b004SRick Macklem uiop = &uio; 15849ec7b004SRick Macklem uiop->uio_iov = &io; 15859ec7b004SRick Macklem uiop->uio_iovcnt = 1; 15869ec7b004SRick Macklem uiop->uio_segflg = UIO_SYSSPACE; 15879ec7b004SRick Macklem uiop->uio_td = td; 15889ec7b004SRick Macklem 15899ec7b004SRick Macklem /* 15909ec7b004SRick Macklem * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 15919ec7b004SRick Macklem * do this here so we do not have to do it in all the code that 15929ec7b004SRick Macklem * calls us. 15939ec7b004SRick Macklem */ 15949ec7b004SRick Macklem bp->b_flags &= ~B_INVAL; 15959ec7b004SRick Macklem bp->b_ioflags &= ~BIO_ERROR; 15969ec7b004SRick Macklem 15979ec7b004SRick Macklem KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 15989ec7b004SRick Macklem iocmd = bp->b_iocmd; 15999ec7b004SRick Macklem if (iocmd == BIO_READ) { 16009ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_bcount; 16019ec7b004SRick Macklem io.iov_base = bp->b_data; 16029ec7b004SRick Macklem uiop->uio_rw = UIO_READ; 16039ec7b004SRick Macklem 16049ec7b004SRick Macklem switch (vp->v_type) { 16059ec7b004SRick Macklem case VREG: 16069ec7b004SRick Macklem uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 16079ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.read_bios); 16089ec7b004SRick Macklem error = ncl_readrpc(vp, uiop, cr); 16099ec7b004SRick Macklem 16109ec7b004SRick Macklem if (!error) { 16119ec7b004SRick Macklem if (uiop->uio_resid) { 16129ec7b004SRick Macklem /* 16139ec7b004SRick Macklem * If we had a short read with no error, we must have 16149ec7b004SRick Macklem * hit a file hole. We should zero-fill the remainder. 16159ec7b004SRick Macklem * This can also occur if the server hits the file EOF. 16169ec7b004SRick Macklem * 16179ec7b004SRick Macklem * Holes used to be able to occur due to pending 16189ec7b004SRick Macklem * writes, but that is not possible any longer. 16199ec7b004SRick Macklem */ 16209ec7b004SRick Macklem int nread = bp->b_bcount - uiop->uio_resid; 1621526d0bd5SKonstantin Belousov ssize_t left = uiop->uio_resid; 16229ec7b004SRick Macklem 16239ec7b004SRick Macklem if (left > 0) 16249ec7b004SRick Macklem bzero((char *)bp->b_data + nread, left); 16259ec7b004SRick Macklem uiop->uio_resid = 0; 16269ec7b004SRick Macklem } 16279ec7b004SRick Macklem } 16289ec7b004SRick Macklem /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 16299ec7b004SRick Macklem if (p && (vp->v_vflag & VV_TEXT)) { 16309ec7b004SRick Macklem mtx_lock(&np->n_mtx); 16319ec7b004SRick Macklem if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 16329ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16339ec7b004SRick Macklem PROC_LOCK(p); 16349ec7b004SRick Macklem killproc(p, "text file modification"); 16359ec7b004SRick Macklem PROC_UNLOCK(p); 16369ec7b004SRick Macklem } else 16379ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 16389ec7b004SRick Macklem } 16399ec7b004SRick Macklem break; 16409ec7b004SRick Macklem case VLNK: 16419ec7b004SRick Macklem uiop->uio_offset = (off_t)0; 16429ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.readlink_bios); 16439ec7b004SRick Macklem error = ncl_readlinkrpc(vp, uiop, cr); 16449ec7b004SRick Macklem break; 16459ec7b004SRick Macklem case VDIR: 16469ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.readdir_bios); 16479ec7b004SRick Macklem uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 16489ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 16499ec7b004SRick Macklem error = ncl_readdirplusrpc(vp, uiop, cr, td); 16509ec7b004SRick Macklem if (error == NFSERR_NOTSUPP) 16519ec7b004SRick Macklem nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 16529ec7b004SRick Macklem } 16539ec7b004SRick Macklem if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 16549ec7b004SRick Macklem error = ncl_readdirrpc(vp, uiop, cr, td); 16559ec7b004SRick Macklem /* 16569ec7b004SRick Macklem * end-of-directory sets B_INVAL but does not generate an 16579ec7b004SRick Macklem * error. 16589ec7b004SRick Macklem */ 16599ec7b004SRick Macklem if (error == 0 && uiop->uio_resid == bp->b_bcount) 16609ec7b004SRick Macklem bp->b_flags |= B_INVAL; 16619ec7b004SRick Macklem break; 16629ec7b004SRick Macklem default: 16639ec7b004SRick Macklem ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); 16649ec7b004SRick Macklem break; 16659ec7b004SRick Macklem }; 16669ec7b004SRick Macklem if (error) { 16679ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 16689ec7b004SRick Macklem bp->b_error = error; 16699ec7b004SRick Macklem } 16709ec7b004SRick Macklem } else { 16719ec7b004SRick Macklem /* 16729ec7b004SRick Macklem * If we only need to commit, try to commit 16739ec7b004SRick Macklem */ 16749ec7b004SRick Macklem if (bp->b_flags & B_NEEDCOMMIT) { 16759ec7b004SRick Macklem int retv; 16769ec7b004SRick Macklem off_t off; 16779ec7b004SRick Macklem 16789ec7b004SRick Macklem off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 16799ec7b004SRick Macklem retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 16809ec7b004SRick Macklem bp->b_wcred, td); 16819ec7b004SRick Macklem if (retv == 0) { 16829ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 16839ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 16849ec7b004SRick Macklem bp->b_resid = 0; 16859ec7b004SRick Macklem bufdone(bp); 16869ec7b004SRick Macklem return (0); 16879ec7b004SRick Macklem } 16889ec7b004SRick Macklem if (retv == NFSERR_STALEWRITEVERF) { 16899ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 16909ec7b004SRick Macklem } 16919ec7b004SRick Macklem } 16929ec7b004SRick Macklem 16939ec7b004SRick Macklem /* 16949ec7b004SRick Macklem * Setup for actual write 16959ec7b004SRick Macklem */ 16969ec7b004SRick Macklem mtx_lock(&np->n_mtx); 16979ec7b004SRick Macklem if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 16989ec7b004SRick Macklem bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 16999ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 17009ec7b004SRick Macklem 17019ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_dirtyoff) { 17029ec7b004SRick Macklem io.iov_len = uiop->uio_resid = bp->b_dirtyend 17039ec7b004SRick Macklem - bp->b_dirtyoff; 17049ec7b004SRick Macklem uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 17059ec7b004SRick Macklem + bp->b_dirtyoff; 17069ec7b004SRick Macklem io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 17079ec7b004SRick Macklem uiop->uio_rw = UIO_WRITE; 17089ec7b004SRick Macklem NFSINCRGLOBAL(newnfsstats.write_bios); 17099ec7b004SRick Macklem 17109ec7b004SRick Macklem if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 17119ec7b004SRick Macklem iomode = NFSWRITE_UNSTABLE; 17129ec7b004SRick Macklem else 17139ec7b004SRick Macklem iomode = NFSWRITE_FILESYNC; 17149ec7b004SRick Macklem 171567c5c2d2SRick Macklem error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 171667c5c2d2SRick Macklem called_from_strategy); 17179ec7b004SRick Macklem 17189ec7b004SRick Macklem /* 17199ec7b004SRick Macklem * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 17209ec7b004SRick Macklem * to cluster the buffers needing commit. This will allow 17219ec7b004SRick Macklem * the system to submit a single commit rpc for the whole 17229ec7b004SRick Macklem * cluster. We can do this even if the buffer is not 100% 17239ec7b004SRick Macklem * dirty (relative to the NFS blocksize), so we optimize the 17249ec7b004SRick Macklem * append-to-file-case. 17259ec7b004SRick Macklem * 17269ec7b004SRick Macklem * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 17279ec7b004SRick Macklem * cleared because write clustering only works for commit 17289ec7b004SRick Macklem * rpc's, not for the data portion of the write). 17299ec7b004SRick Macklem */ 17309ec7b004SRick Macklem 17319ec7b004SRick Macklem if (!error && iomode == NFSWRITE_UNSTABLE) { 17329ec7b004SRick Macklem bp->b_flags |= B_NEEDCOMMIT; 17339ec7b004SRick Macklem if (bp->b_dirtyoff == 0 17349ec7b004SRick Macklem && bp->b_dirtyend == bp->b_bcount) 17359ec7b004SRick Macklem bp->b_flags |= B_CLUSTEROK; 17369ec7b004SRick Macklem } else { 17379ec7b004SRick Macklem bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 17389ec7b004SRick Macklem } 17399ec7b004SRick Macklem 17409ec7b004SRick Macklem /* 17419ec7b004SRick Macklem * For an interrupted write, the buffer is still valid 17429ec7b004SRick Macklem * and the write hasn't been pushed to the server yet, 17439ec7b004SRick Macklem * so we can't set BIO_ERROR and report the interruption 17449ec7b004SRick Macklem * by setting B_EINTR. For the B_ASYNC case, B_EINTR 17459ec7b004SRick Macklem * is not relevant, so the rpc attempt is essentially 17469ec7b004SRick Macklem * a noop. For the case of a V3 write rpc not being 17479ec7b004SRick Macklem * committed to stable storage, the block is still 17489ec7b004SRick Macklem * dirty and requires either a commit rpc or another 17499ec7b004SRick Macklem * write rpc with iomode == NFSV3WRITE_FILESYNC before 17509ec7b004SRick Macklem * the block is reused. This is indicated by setting 17519ec7b004SRick Macklem * the B_DELWRI and B_NEEDCOMMIT flags. 17529ec7b004SRick Macklem * 175367c5c2d2SRick Macklem * EIO is returned by ncl_writerpc() to indicate a recoverable 175467c5c2d2SRick Macklem * write error and is handled as above, except that 175567c5c2d2SRick Macklem * B_EINTR isn't set. One cause of this is a stale stateid 175667c5c2d2SRick Macklem * error for the RPC that indicates recovery is required, 175767c5c2d2SRick Macklem * when called with called_from_strategy != 0. 175867c5c2d2SRick Macklem * 17599ec7b004SRick Macklem * If the buffer is marked B_PAGING, it does not reside on 17609ec7b004SRick Macklem * the vp's paging queues so we cannot call bdirty(). The 17619ec7b004SRick Macklem * bp in this case is not an NFS cache block so we should 17629ec7b004SRick Macklem * be safe. XXX 17639ec7b004SRick Macklem * 17649ec7b004SRick Macklem * The logic below breaks up errors into recoverable and 17659ec7b004SRick Macklem * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 17669ec7b004SRick Macklem * and keep the buffer around for potential write retries. 17679ec7b004SRick Macklem * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 17689ec7b004SRick Macklem * and save the error in the nfsnode. This is less than ideal 17699ec7b004SRick Macklem * but necessary. Keeping such buffers around could potentially 17709ec7b004SRick Macklem * cause buffer exhaustion eventually (they can never be written 17719ec7b004SRick Macklem * out, so will get constantly be re-dirtied). It also causes 17729ec7b004SRick Macklem * all sorts of vfs panics. For non-recoverable write errors, 17739ec7b004SRick Macklem * also invalidate the attrcache, so we'll be forced to go over 17749ec7b004SRick Macklem * the wire for this object, returning an error to user on next 17759ec7b004SRick Macklem * call (most of the time). 17769ec7b004SRick Macklem */ 17779ec7b004SRick Macklem if (error == EINTR || error == EIO || error == ETIMEDOUT 17789ec7b004SRick Macklem || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 17799ec7b004SRick Macklem int s; 17809ec7b004SRick Macklem 17819ec7b004SRick Macklem s = splbio(); 17829ec7b004SRick Macklem bp->b_flags &= ~(B_INVAL|B_NOCACHE); 17839ec7b004SRick Macklem if ((bp->b_flags & B_PAGING) == 0) { 17849ec7b004SRick Macklem bdirty(bp); 17859ec7b004SRick Macklem bp->b_flags &= ~B_DONE; 17869ec7b004SRick Macklem } 178767c5c2d2SRick Macklem if ((error == EINTR || error == ETIMEDOUT) && 178867c5c2d2SRick Macklem (bp->b_flags & B_ASYNC) == 0) 17899ec7b004SRick Macklem bp->b_flags |= B_EINTR; 17909ec7b004SRick Macklem splx(s); 17919ec7b004SRick Macklem } else { 17929ec7b004SRick Macklem if (error) { 17939ec7b004SRick Macklem bp->b_ioflags |= BIO_ERROR; 17949ec7b004SRick Macklem bp->b_flags |= B_INVAL; 17959ec7b004SRick Macklem bp->b_error = np->n_error = error; 17969ec7b004SRick Macklem mtx_lock(&np->n_mtx); 17979ec7b004SRick Macklem np->n_flag |= NWRITEERR; 17989ec7b004SRick Macklem np->n_attrstamp = 0; 17998f0e65c9SRick Macklem KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 18009ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18019ec7b004SRick Macklem } 18029ec7b004SRick Macklem bp->b_dirtyoff = bp->b_dirtyend = 0; 18039ec7b004SRick Macklem } 18049ec7b004SRick Macklem } else { 18059ec7b004SRick Macklem bp->b_resid = 0; 18069ec7b004SRick Macklem bufdone(bp); 18079ec7b004SRick Macklem return (0); 18089ec7b004SRick Macklem } 18099ec7b004SRick Macklem } 18109ec7b004SRick Macklem bp->b_resid = uiop->uio_resid; 18119ec7b004SRick Macklem if (must_commit) 18129ec7b004SRick Macklem ncl_clearcommit(vp->v_mount); 18139ec7b004SRick Macklem bufdone(bp); 18149ec7b004SRick Macklem return (error); 18159ec7b004SRick Macklem } 18169ec7b004SRick Macklem 18179ec7b004SRick Macklem /* 18189ec7b004SRick Macklem * Used to aid in handling ftruncate() operations on the NFS client side. 18199ec7b004SRick Macklem * Truncation creates a number of special problems for NFS. We have to 18209ec7b004SRick Macklem * throw away VM pages and buffer cache buffers that are beyond EOF, and 18219ec7b004SRick Macklem * we have to properly handle VM pages or (potentially dirty) buffers 18229ec7b004SRick Macklem * that straddle the truncation point. 18239ec7b004SRick Macklem */ 18249ec7b004SRick Macklem 18259ec7b004SRick Macklem int 18269ec7b004SRick Macklem ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 18279ec7b004SRick Macklem { 18289ec7b004SRick Macklem struct nfsnode *np = VTONFS(vp); 18299ec7b004SRick Macklem u_quad_t tsize; 18307f763fc3SRick Macklem int biosize = vp->v_bufobj.bo_bsize; 18319ec7b004SRick Macklem int error = 0; 18329ec7b004SRick Macklem 18339ec7b004SRick Macklem mtx_lock(&np->n_mtx); 18349ec7b004SRick Macklem tsize = np->n_size; 18359ec7b004SRick Macklem np->n_size = nsize; 18369ec7b004SRick Macklem mtx_unlock(&np->n_mtx); 18379ec7b004SRick Macklem 18389ec7b004SRick Macklem if (nsize < tsize) { 18399ec7b004SRick Macklem struct buf *bp; 18409ec7b004SRick Macklem daddr_t lbn; 18419ec7b004SRick Macklem int bufsize; 18429ec7b004SRick Macklem 18439ec7b004SRick Macklem /* 18449ec7b004SRick Macklem * vtruncbuf() doesn't get the buffer overlapping the 18459ec7b004SRick Macklem * truncation point. We may have a B_DELWRI and/or B_CACHE 18469ec7b004SRick Macklem * buffer that now needs to be truncated. 18479ec7b004SRick Macklem */ 1848c52fd858SEdward Tomasz Napierala error = vtruncbuf(vp, cred, nsize, biosize); 18499ec7b004SRick Macklem lbn = nsize / biosize; 1850*96ecfd98SEd Maste bufsize = nsize - (lbn * biosize); 18519ec7b004SRick Macklem bp = nfs_getcacheblk(vp, lbn, bufsize, td); 18529ec7b004SRick Macklem if (!bp) 18539ec7b004SRick Macklem return EINTR; 18549ec7b004SRick Macklem if (bp->b_dirtyoff > bp->b_bcount) 18559ec7b004SRick Macklem bp->b_dirtyoff = bp->b_bcount; 18569ec7b004SRick Macklem if (bp->b_dirtyend > bp->b_bcount) 18579ec7b004SRick Macklem bp->b_dirtyend = bp->b_bcount; 18589ec7b004SRick Macklem bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 18599ec7b004SRick Macklem brelse(bp); 18609ec7b004SRick Macklem } else { 18619ec7b004SRick Macklem vnode_pager_setsize(vp, nsize); 18629ec7b004SRick Macklem } 18639ec7b004SRick Macklem return(error); 18649ec7b004SRick Macklem } 18659ec7b004SRick Macklem 1866