17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 580d34432Sfrankho * Common Development and Distribution License (the "License"). 680d34432Sfrankho * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22408aef6aSbatschul * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 24*2c164fafSPatrick Mooney * Copyright 2019 Joyent, Inc. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 287c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate /* 317c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 327c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California. 337c478bd9Sstevel@tonic-gate */ 347c478bd9Sstevel@tonic-gate 357c478bd9Sstevel@tonic-gate #include <sys/types.h> 367c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 377c478bd9Sstevel@tonic-gate #include <sys/param.h> 387c478bd9Sstevel@tonic-gate #include <sys/time.h> 397c478bd9Sstevel@tonic-gate #include <sys/systm.h> 407c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 417c478bd9Sstevel@tonic-gate #include <sys/resource.h> 427c478bd9Sstevel@tonic-gate #include <sys/signal.h> 437c478bd9Sstevel@tonic-gate #include <sys/cred.h> 447c478bd9Sstevel@tonic-gate #include <sys/user.h> 457c478bd9Sstevel@tonic-gate #include <sys/buf.h> 467c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 477c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 487c478bd9Sstevel@tonic-gate #include <sys/proc.h> 497c478bd9Sstevel@tonic-gate #include <sys/disp.h> 507c478bd9Sstevel@tonic-gate #include <sys/file.h> 517c478bd9Sstevel@tonic-gate #include <sys/fcntl.h> 527c478bd9Sstevel@tonic-gate #include <sys/flock.h> 537c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 547c478bd9Sstevel@tonic-gate #include <sys/uio.h> 557c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 567c478bd9Sstevel@tonic-gate #include <sys/conf.h> 577c478bd9Sstevel@tonic-gate #include <sys/mman.h> 587c478bd9Sstevel@tonic-gate #include <sys/pathname.h> 597c478bd9Sstevel@tonic-gate #include <sys/debug.h> 607c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 617c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 627c478bd9Sstevel@tonic-gate #include <sys/filio.h> 637c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate #include <sys/fssnap_if.h> 667c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_fs.h> 677c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_lockfs.h> 687c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_filio.h> 697c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_inode.h> 707c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_fsdir.h> 717c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_quota.h> 727c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_trans.h> 737c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_panic.h> 747c478bd9Sstevel@tonic-gate #include <sys/dirent.h> /* must be AFTER <sys/fs/fsdir.h>! */ 757c478bd9Sstevel@tonic-gate #include <sys/errno.h> 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate #include <sys/filio.h> /* _FIOIO */ 787c478bd9Sstevel@tonic-gate 797c478bd9Sstevel@tonic-gate #include <vm/hat.h> 807c478bd9Sstevel@tonic-gate #include <vm/page.h> 817c478bd9Sstevel@tonic-gate #include <vm/pvn.h> 827c478bd9Sstevel@tonic-gate #include <vm/as.h> 837c478bd9Sstevel@tonic-gate #include <vm/seg.h> 847c478bd9Sstevel@tonic-gate #include <vm/seg_map.h> 857c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 867c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 877c478bd9Sstevel@tonic-gate #include <vm/rm.h> 887c478bd9Sstevel@tonic-gate #include <sys/swap.h> 897c478bd9Sstevel@tonic-gate #include <sys/epm.h> 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h> 927c478bd9Sstevel@tonic-gate 937c478bd9Sstevel@tonic-gate static void *ufs_directio_zero_buf; 947c478bd9Sstevel@tonic-gate static int ufs_directio_zero_len = 8192; 957c478bd9Sstevel@tonic-gate 967c478bd9Sstevel@tonic-gate int ufs_directio_enabled = 1; /* feature is enabled */ 977c478bd9Sstevel@tonic-gate 987c478bd9Sstevel@tonic-gate /* 997c478bd9Sstevel@tonic-gate * for kstats reader 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate struct ufs_directio_kstats { 102d6767ee3Srshoaib kstat_named_t logical_reads; 103d6767ee3Srshoaib kstat_named_t phys_reads; 104d6767ee3Srshoaib kstat_named_t hole_reads; 105d6767ee3Srshoaib kstat_named_t nread; 106d6767ee3Srshoaib kstat_named_t logical_writes; 107d6767ee3Srshoaib kstat_named_t phys_writes; 108d6767ee3Srshoaib kstat_named_t nwritten; 109d6767ee3Srshoaib kstat_named_t nflushes; 110d6767ee3Srshoaib } ufs_directio_kstats = { 111d6767ee3Srshoaib { "logical_reads", KSTAT_DATA_UINT64 }, 112d6767ee3Srshoaib { "phys_reads", KSTAT_DATA_UINT64 }, 113d6767ee3Srshoaib { "hole_reads", KSTAT_DATA_UINT64 }, 114d6767ee3Srshoaib { "nread", KSTAT_DATA_UINT64 }, 115d6767ee3Srshoaib { "logical_writes", KSTAT_DATA_UINT64 }, 116d6767ee3Srshoaib { "phys_writes", KSTAT_DATA_UINT64 }, 117d6767ee3Srshoaib { "nwritten", KSTAT_DATA_UINT64 }, 118d6767ee3Srshoaib { "nflushes", KSTAT_DATA_UINT64 }, 119d6767ee3Srshoaib }; 1207c478bd9Sstevel@tonic-gate 1217c478bd9Sstevel@tonic-gate kstat_t *ufs_directio_kstatsp; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * use kmem_cache_create for direct-physio buffers. This has shown 1257c478bd9Sstevel@tonic-gate * a better cache distribution compared to buffers on the 1267c478bd9Sstevel@tonic-gate * stack. It also avoids semaphore construction/deconstruction 1277c478bd9Sstevel@tonic-gate * per request 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate struct directio_buf { 1307c478bd9Sstevel@tonic-gate struct directio_buf *next; 1317c478bd9Sstevel@tonic-gate char *addr; 1327c478bd9Sstevel@tonic-gate size_t nbytes; 1337c478bd9Sstevel@tonic-gate struct buf buf; 1347c478bd9Sstevel@tonic-gate }; 1357c478bd9Sstevel@tonic-gate static struct kmem_cache *directio_buf_cache; 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1397c478bd9Sstevel@tonic-gate static int 1407c478bd9Sstevel@tonic-gate directio_buf_constructor(void *dbp, void *cdrarg, int kmflags) 1417c478bd9Sstevel@tonic-gate { 1427c478bd9Sstevel@tonic-gate bioinit((struct buf *)&((struct directio_buf *)dbp)->buf); 1437c478bd9Sstevel@tonic-gate return (0); 1447c478bd9Sstevel@tonic-gate } 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1477c478bd9Sstevel@tonic-gate static void 1487c478bd9Sstevel@tonic-gate directio_buf_destructor(void *dbp, void *cdrarg) 1497c478bd9Sstevel@tonic-gate { 1507c478bd9Sstevel@tonic-gate biofini((struct buf *)&((struct directio_buf *)dbp)->buf); 1517c478bd9Sstevel@tonic-gate } 1527c478bd9Sstevel@tonic-gate 1537c478bd9Sstevel@tonic-gate void 1547c478bd9Sstevel@tonic-gate directio_bufs_init(void) 1557c478bd9Sstevel@tonic-gate { 1567c478bd9Sstevel@tonic-gate directio_buf_cache = kmem_cache_create("directio_buf_cache", 1577c478bd9Sstevel@tonic-gate sizeof (struct directio_buf), 0, 1587c478bd9Sstevel@tonic-gate directio_buf_constructor, directio_buf_destructor, 1597c478bd9Sstevel@tonic-gate NULL, NULL, NULL, 0); 1607c478bd9Sstevel@tonic-gate } 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate void 1637c478bd9Sstevel@tonic-gate ufs_directio_init(void) 1647c478bd9Sstevel@tonic-gate { 1657c478bd9Sstevel@tonic-gate /* 1667c478bd9Sstevel@tonic-gate * kstats 1677c478bd9Sstevel@tonic-gate */ 168d6767ee3Srshoaib ufs_directio_kstatsp = kstat_create("ufs", 0, 169d6767ee3Srshoaib "directio", "ufs", KSTAT_TYPE_NAMED, 170d6767ee3Srshoaib sizeof (ufs_directio_kstats) / sizeof (kstat_named_t), 1717c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE); 1727c478bd9Sstevel@tonic-gate if (ufs_directio_kstatsp) { 1737c478bd9Sstevel@tonic-gate ufs_directio_kstatsp->ks_data = (void *)&ufs_directio_kstats; 1747c478bd9Sstevel@tonic-gate kstat_install(ufs_directio_kstatsp); 1757c478bd9Sstevel@tonic-gate } 1767c478bd9Sstevel@tonic-gate /* 1777c478bd9Sstevel@tonic-gate * kzero is broken so we have to use a private buf of zeroes 1787c478bd9Sstevel@tonic-gate */ 1797c478bd9Sstevel@tonic-gate ufs_directio_zero_buf = kmem_zalloc(ufs_directio_zero_len, KM_SLEEP); 1807c478bd9Sstevel@tonic-gate directio_bufs_init(); 1817c478bd9Sstevel@tonic-gate } 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate /* 1847c478bd9Sstevel@tonic-gate * Wait for the first direct IO operation to finish 1857c478bd9Sstevel@tonic-gate */ 1867c478bd9Sstevel@tonic-gate static int 1877c478bd9Sstevel@tonic-gate directio_wait_one(struct directio_buf *dbp, long *bytes_iop) 1887c478bd9Sstevel@tonic-gate { 1897c478bd9Sstevel@tonic-gate buf_t *bp; 1907c478bd9Sstevel@tonic-gate int error; 1917c478bd9Sstevel@tonic-gate 1927c478bd9Sstevel@tonic-gate /* 1937c478bd9Sstevel@tonic-gate * Wait for IO to finish 1947c478bd9Sstevel@tonic-gate */ 1957c478bd9Sstevel@tonic-gate bp = &dbp->buf; 1967c478bd9Sstevel@tonic-gate error = biowait(bp); 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate /* 1997c478bd9Sstevel@tonic-gate * bytes_io will be used to figure out a resid 2007c478bd9Sstevel@tonic-gate * for the caller. The resid is approximated by reporting 2017c478bd9Sstevel@tonic-gate * the bytes following the first failed IO as the residual. 2027c478bd9Sstevel@tonic-gate * 2037c478bd9Sstevel@tonic-gate * I am cautious about using b_resid because I 2047c478bd9Sstevel@tonic-gate * am not sure how well the disk drivers maintain it. 2057c478bd9Sstevel@tonic-gate */ 2067c478bd9Sstevel@tonic-gate if (error) 2077c478bd9Sstevel@tonic-gate if (bp->b_resid) 2087c478bd9Sstevel@tonic-gate *bytes_iop = bp->b_bcount - bp->b_resid; 2097c478bd9Sstevel@tonic-gate else 2107c478bd9Sstevel@tonic-gate *bytes_iop = 0; 2117c478bd9Sstevel@tonic-gate else 2127c478bd9Sstevel@tonic-gate *bytes_iop += bp->b_bcount; 2137c478bd9Sstevel@tonic-gate /* 2147c478bd9Sstevel@tonic-gate * Release direct IO resources 2157c478bd9Sstevel@tonic-gate */ 2167c478bd9Sstevel@tonic-gate bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_SHADOW); 2177c478bd9Sstevel@tonic-gate kmem_cache_free(directio_buf_cache, dbp); 2187c478bd9Sstevel@tonic-gate return (error); 2197c478bd9Sstevel@tonic-gate } 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate /* 2227c478bd9Sstevel@tonic-gate * Wait for all of the direct IO operations to finish 2237c478bd9Sstevel@tonic-gate */ 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate static int 2267c478bd9Sstevel@tonic-gate directio_wait(struct directio_buf *tail, long *bytes_iop) 2277c478bd9Sstevel@tonic-gate { 2287c478bd9Sstevel@tonic-gate int error = 0, newerror; 2297c478bd9Sstevel@tonic-gate struct directio_buf *dbp; 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate /* 2327c478bd9Sstevel@tonic-gate * The linked list of directio buf structures is maintained 2337c478bd9Sstevel@tonic-gate * in reverse order (tail->last request->penultimate request->...) 2347c478bd9Sstevel@tonic-gate */ 2357c478bd9Sstevel@tonic-gate while ((dbp = tail) != NULL) { 2367c478bd9Sstevel@tonic-gate tail = dbp->next; 2377c478bd9Sstevel@tonic-gate newerror = directio_wait_one(dbp, bytes_iop); 2387c478bd9Sstevel@tonic-gate if (error == 0) 2397c478bd9Sstevel@tonic-gate error = newerror; 2407c478bd9Sstevel@tonic-gate } 2417c478bd9Sstevel@tonic-gate return (error); 2427c478bd9Sstevel@tonic-gate } 2437c478bd9Sstevel@tonic-gate /* 2447c478bd9Sstevel@tonic-gate * Initiate direct IO request 2457c478bd9Sstevel@tonic-gate */ 2467c478bd9Sstevel@tonic-gate static void 247408aef6aSbatschul directio_start(struct ufsvfs *ufsvfsp, struct inode *ip, size_t nbytes, 2487c478bd9Sstevel@tonic-gate offset_t offset, char *addr, enum seg_rw rw, struct proc *procp, 2497c478bd9Sstevel@tonic-gate struct directio_buf **tailp, page_t **pplist) 2507c478bd9Sstevel@tonic-gate { 2517c478bd9Sstevel@tonic-gate buf_t *bp; 2527c478bd9Sstevel@tonic-gate struct directio_buf *dbp; 2537c478bd9Sstevel@tonic-gate 2547c478bd9Sstevel@tonic-gate /* 2557c478bd9Sstevel@tonic-gate * Allocate a directio buf header 2567c478bd9Sstevel@tonic-gate * Note - list is maintained in reverse order. 2577c478bd9Sstevel@tonic-gate * directio_wait_one() depends on this fact when 2587c478bd9Sstevel@tonic-gate * adjusting the ``bytes_io'' param. bytes_io 2597c478bd9Sstevel@tonic-gate * is used to compute a residual in the case of error. 2607c478bd9Sstevel@tonic-gate */ 2617c478bd9Sstevel@tonic-gate dbp = kmem_cache_alloc(directio_buf_cache, KM_SLEEP); 2627c478bd9Sstevel@tonic-gate dbp->next = *tailp; 2637c478bd9Sstevel@tonic-gate *tailp = dbp; 2647c478bd9Sstevel@tonic-gate 2657c478bd9Sstevel@tonic-gate /* 2667c478bd9Sstevel@tonic-gate * Initialize buf header 2677c478bd9Sstevel@tonic-gate */ 2687c478bd9Sstevel@tonic-gate dbp->addr = addr; 2697c478bd9Sstevel@tonic-gate dbp->nbytes = nbytes; 2707c478bd9Sstevel@tonic-gate bp = &dbp->buf; 271408aef6aSbatschul bp->b_edev = ip->i_dev; 2727c478bd9Sstevel@tonic-gate bp->b_lblkno = btodt(offset); 2737c478bd9Sstevel@tonic-gate bp->b_bcount = nbytes; 2747c478bd9Sstevel@tonic-gate bp->b_un.b_addr = addr; 2757c478bd9Sstevel@tonic-gate bp->b_proc = procp; 276408aef6aSbatschul bp->b_file = ip->i_vnode; 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate /* 2797c478bd9Sstevel@tonic-gate * Note that S_WRITE implies B_READ and vice versa: a read(2) 2807c478bd9Sstevel@tonic-gate * will B_READ data from the filesystem and S_WRITE it into 2817c478bd9Sstevel@tonic-gate * the user's buffer; a write(2) will S_READ data from the 2827c478bd9Sstevel@tonic-gate * user's buffer and B_WRITE it to the filesystem. 2837c478bd9Sstevel@tonic-gate */ 2847c478bd9Sstevel@tonic-gate if (rw == S_WRITE) { 2857c478bd9Sstevel@tonic-gate bp->b_flags = B_BUSY | B_PHYS | B_READ; 286d6767ee3Srshoaib ufs_directio_kstats.phys_reads.value.ui64++; 287d6767ee3Srshoaib ufs_directio_kstats.nread.value.ui64 += nbytes; 2887c478bd9Sstevel@tonic-gate } else { 2897c478bd9Sstevel@tonic-gate bp->b_flags = B_BUSY | B_PHYS | B_WRITE; 290d6767ee3Srshoaib ufs_directio_kstats.phys_writes.value.ui64++; 291d6767ee3Srshoaib ufs_directio_kstats.nwritten.value.ui64 += nbytes; 2927c478bd9Sstevel@tonic-gate } 2937c478bd9Sstevel@tonic-gate bp->b_shadow = pplist; 2947c478bd9Sstevel@tonic-gate if (pplist != NULL) 2957c478bd9Sstevel@tonic-gate bp->b_flags |= B_SHADOW; 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate /* 2987c478bd9Sstevel@tonic-gate * Issue I/O request. 2997c478bd9Sstevel@tonic-gate */ 300d3d50737SRafael Vanoni ufsvfsp->vfs_iotstamp = ddi_get_lbolt(); 3017c478bd9Sstevel@tonic-gate if (ufsvfsp->vfs_snapshot) 3027c478bd9Sstevel@tonic-gate fssnap_strategy(&ufsvfsp->vfs_snapshot, bp); 3037c478bd9Sstevel@tonic-gate else 3047c478bd9Sstevel@tonic-gate (void) bdev_strategy(bp); 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate if (rw == S_WRITE) 3077c478bd9Sstevel@tonic-gate lwp_stat_update(LWP_STAT_OUBLK, 1); 3087c478bd9Sstevel@tonic-gate else 3097c478bd9Sstevel@tonic-gate lwp_stat_update(LWP_STAT_INBLK, 1); 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate } 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate uint32_t ufs_shared_writes; /* writes done w/ lock shared */ 3147c478bd9Sstevel@tonic-gate uint32_t ufs_cur_writes; /* # concurrent writes */ 3157c478bd9Sstevel@tonic-gate uint32_t ufs_maxcur_writes; /* high water concurrent writes */ 3167c478bd9Sstevel@tonic-gate uint32_t ufs_posix_hits; /* writes done /w lock excl. */ 3177c478bd9Sstevel@tonic-gate 3187c478bd9Sstevel@tonic-gate /* 3197c478bd9Sstevel@tonic-gate * Force POSIX syncronous data integrity on all writes for testing. 3207c478bd9Sstevel@tonic-gate */ 3217c478bd9Sstevel@tonic-gate uint32_t ufs_force_posix_sdi = 0; 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* 3247c478bd9Sstevel@tonic-gate * Direct Write 3257c478bd9Sstevel@tonic-gate */ 3267c478bd9Sstevel@tonic-gate 3277c478bd9Sstevel@tonic-gate int 3287c478bd9Sstevel@tonic-gate ufs_directio_write(struct inode *ip, uio_t *arg_uio, int ioflag, int rewrite, 3297c478bd9Sstevel@tonic-gate cred_t *cr, int *statusp) 3307c478bd9Sstevel@tonic-gate { 3317c478bd9Sstevel@tonic-gate long resid, bytes_written; 3327c478bd9Sstevel@tonic-gate u_offset_t size, uoff; 3337c478bd9Sstevel@tonic-gate uio_t *uio = arg_uio; 3347c478bd9Sstevel@tonic-gate rlim64_t limit = uio->uio_llimit; 3357c478bd9Sstevel@tonic-gate int on, n, error, newerror, len, has_holes; 3367c478bd9Sstevel@tonic-gate daddr_t bn; 3377c478bd9Sstevel@tonic-gate size_t nbytes; 3387c478bd9Sstevel@tonic-gate struct fs *fs; 3397c478bd9Sstevel@tonic-gate vnode_t *vp; 3407c478bd9Sstevel@tonic-gate iovec_t *iov; 3417c478bd9Sstevel@tonic-gate struct ufsvfs *ufsvfsp = ip->i_ufsvfs; 3427c478bd9Sstevel@tonic-gate struct proc *procp; 3437c478bd9Sstevel@tonic-gate struct as *as; 3447c478bd9Sstevel@tonic-gate struct directio_buf *tail; 3457c478bd9Sstevel@tonic-gate int exclusive, ncur, bmap_peek; 3467c478bd9Sstevel@tonic-gate uio_t copy_uio; 3477c478bd9Sstevel@tonic-gate iovec_t copy_iov; 3487c478bd9Sstevel@tonic-gate char *copy_base; 3497c478bd9Sstevel@tonic-gate long copy_resid; 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate /* 3527c478bd9Sstevel@tonic-gate * assume that directio isn't possible (normal case) 3537c478bd9Sstevel@tonic-gate */ 3547c478bd9Sstevel@tonic-gate *statusp = DIRECTIO_FAILURE; 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate /* 3577c478bd9Sstevel@tonic-gate * Don't go direct 3587c478bd9Sstevel@tonic-gate */ 3597c478bd9Sstevel@tonic-gate if (ufs_directio_enabled == 0) 3607c478bd9Sstevel@tonic-gate return (0); 3617c478bd9Sstevel@tonic-gate 3627c478bd9Sstevel@tonic-gate /* 3637c478bd9Sstevel@tonic-gate * mapped file; nevermind 3647c478bd9Sstevel@tonic-gate */ 3657c478bd9Sstevel@tonic-gate if (ip->i_mapcnt) 3667c478bd9Sstevel@tonic-gate return (0); 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate /* 3697c478bd9Sstevel@tonic-gate * CAN WE DO DIRECT IO? 3707c478bd9Sstevel@tonic-gate */ 3717c478bd9Sstevel@tonic-gate uoff = uio->uio_loffset; 3727c478bd9Sstevel@tonic-gate resid = uio->uio_resid; 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate /* 3757c478bd9Sstevel@tonic-gate * beyond limit 3767c478bd9Sstevel@tonic-gate */ 3777c478bd9Sstevel@tonic-gate if (uoff + resid > limit) 3787c478bd9Sstevel@tonic-gate return (0); 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate /* 3817c478bd9Sstevel@tonic-gate * must be sector aligned 3827c478bd9Sstevel@tonic-gate */ 3837c478bd9Sstevel@tonic-gate if ((uoff & (u_offset_t)(DEV_BSIZE - 1)) || (resid & (DEV_BSIZE - 1))) 3847c478bd9Sstevel@tonic-gate return (0); 3857c478bd9Sstevel@tonic-gate 3867c478bd9Sstevel@tonic-gate /* 3877c478bd9Sstevel@tonic-gate * SHOULD WE DO DIRECT IO? 3887c478bd9Sstevel@tonic-gate */ 3897c478bd9Sstevel@tonic-gate size = ip->i_size; 3907c478bd9Sstevel@tonic-gate has_holes = -1; 3917c478bd9Sstevel@tonic-gate 3927c478bd9Sstevel@tonic-gate /* 3937c478bd9Sstevel@tonic-gate * only on regular files; no metadata 3947c478bd9Sstevel@tonic-gate */ 3957c478bd9Sstevel@tonic-gate if (((ip->i_mode & IFMT) != IFREG) || ip->i_ufsvfs->vfs_qinod == ip) 3967c478bd9Sstevel@tonic-gate return (0); 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate /* 3997c478bd9Sstevel@tonic-gate * Synchronous, allocating writes run very slow in Direct-Mode 4007c478bd9Sstevel@tonic-gate * XXX - can be fixed with bmap_write changes for large writes!!! 4017c478bd9Sstevel@tonic-gate * XXX - can be fixed for updates to "almost-full" files 4027c478bd9Sstevel@tonic-gate * XXX - WARNING - system hangs if bmap_write() has to 4037c478bd9Sstevel@tonic-gate * allocate lots of pages since pageout 4047c478bd9Sstevel@tonic-gate * suspends on locked inode 4057c478bd9Sstevel@tonic-gate */ 4067c478bd9Sstevel@tonic-gate if (!rewrite && (ip->i_flag & ISYNC)) { 4077c478bd9Sstevel@tonic-gate if ((uoff + resid) > size) 4087c478bd9Sstevel@tonic-gate return (0); 4097c478bd9Sstevel@tonic-gate has_holes = bmap_has_holes(ip); 4107c478bd9Sstevel@tonic-gate if (has_holes) 4117c478bd9Sstevel@tonic-gate return (0); 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate /* 4157c478bd9Sstevel@tonic-gate * Each iovec must be short aligned and sector aligned. If 4167c478bd9Sstevel@tonic-gate * one is not, then kmem_alloc a new buffer and copy all of 4177c478bd9Sstevel@tonic-gate * the smaller buffers into the new buffer. This new 4187c478bd9Sstevel@tonic-gate * buffer will be short aligned and sector aligned. 4197c478bd9Sstevel@tonic-gate */ 4207c478bd9Sstevel@tonic-gate iov = uio->uio_iov; 4217c478bd9Sstevel@tonic-gate nbytes = uio->uio_iovcnt; 4227c478bd9Sstevel@tonic-gate while (nbytes--) { 4237c478bd9Sstevel@tonic-gate if (((uint_t)iov->iov_len & (DEV_BSIZE - 1)) != 0 || 4247c478bd9Sstevel@tonic-gate (intptr_t)(iov->iov_base) & 1) { 4257c478bd9Sstevel@tonic-gate copy_resid = uio->uio_resid; 4267c478bd9Sstevel@tonic-gate copy_base = kmem_alloc(copy_resid, KM_NOSLEEP); 4277c478bd9Sstevel@tonic-gate if (copy_base == NULL) 4287c478bd9Sstevel@tonic-gate return (0); 4297c478bd9Sstevel@tonic-gate copy_iov.iov_base = copy_base; 4307c478bd9Sstevel@tonic-gate copy_iov.iov_len = copy_resid; 4317c478bd9Sstevel@tonic-gate copy_uio.uio_iov = ©_iov; 4327c478bd9Sstevel@tonic-gate copy_uio.uio_iovcnt = 1; 4337c478bd9Sstevel@tonic-gate copy_uio.uio_segflg = UIO_SYSSPACE; 4347c478bd9Sstevel@tonic-gate copy_uio.uio_extflg = UIO_COPY_DEFAULT; 4357c478bd9Sstevel@tonic-gate copy_uio.uio_loffset = uio->uio_loffset; 4367c478bd9Sstevel@tonic-gate copy_uio.uio_resid = uio->uio_resid; 4377c478bd9Sstevel@tonic-gate copy_uio.uio_llimit = uio->uio_llimit; 4387c478bd9Sstevel@tonic-gate error = uiomove(copy_base, copy_resid, UIO_WRITE, uio); 4397c478bd9Sstevel@tonic-gate if (error) { 4407c478bd9Sstevel@tonic-gate kmem_free(copy_base, copy_resid); 4417c478bd9Sstevel@tonic-gate return (0); 4427c478bd9Sstevel@tonic-gate } 4437c478bd9Sstevel@tonic-gate uio = ©_uio; 4447c478bd9Sstevel@tonic-gate break; 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate iov++; 4477c478bd9Sstevel@tonic-gate } 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate /* 4507c478bd9Sstevel@tonic-gate * From here on down, all error exits must go to errout and 4517c478bd9Sstevel@tonic-gate * not simply return a 0. 4527c478bd9Sstevel@tonic-gate */ 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate /* 4557c478bd9Sstevel@tonic-gate * DIRECTIO 4567c478bd9Sstevel@tonic-gate */ 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate fs = ip->i_fs; 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate /* 4617c478bd9Sstevel@tonic-gate * POSIX check. If attempting a concurrent re-write, make sure 4627c478bd9Sstevel@tonic-gate * that this will be a single request to the driver to meet 4637c478bd9Sstevel@tonic-gate * POSIX synchronous data integrity requirements. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate bmap_peek = 0; 4667c478bd9Sstevel@tonic-gate if (rewrite && ((ioflag & FDSYNC) || ufs_force_posix_sdi)) { 4677c478bd9Sstevel@tonic-gate int upgrade = 0; 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* check easy conditions first */ 4707c478bd9Sstevel@tonic-gate if (uio->uio_iovcnt != 1 || resid > ufsvfsp->vfs_ioclustsz) { 4717c478bd9Sstevel@tonic-gate upgrade = 1; 4727c478bd9Sstevel@tonic-gate } else { 4737c478bd9Sstevel@tonic-gate /* now look for contiguous allocation */ 4747c478bd9Sstevel@tonic-gate len = (ssize_t)blkroundup(fs, resid); 4757c478bd9Sstevel@tonic-gate error = bmap_read(ip, uoff, &bn, &len); 4767c478bd9Sstevel@tonic-gate if (error || bn == UFS_HOLE || len == 0) 4777c478bd9Sstevel@tonic-gate goto errout; 4787c478bd9Sstevel@tonic-gate /* save a call to bmap_read later */ 4797c478bd9Sstevel@tonic-gate bmap_peek = 1; 4807c478bd9Sstevel@tonic-gate if (len < resid) 4817c478bd9Sstevel@tonic-gate upgrade = 1; 4827c478bd9Sstevel@tonic-gate } 4837c478bd9Sstevel@tonic-gate if (upgrade) { 4847c478bd9Sstevel@tonic-gate rw_exit(&ip->i_contents); 4857c478bd9Sstevel@tonic-gate rw_enter(&ip->i_contents, RW_WRITER); 4867c478bd9Sstevel@tonic-gate ufs_posix_hits++; 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate } 4897c478bd9Sstevel@tonic-gate 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate /* 4927c478bd9Sstevel@tonic-gate * allocate space 4937c478bd9Sstevel@tonic-gate */ 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate /* 4967c478bd9Sstevel@tonic-gate * If attempting a re-write, there is no allocation to do. 4977c478bd9Sstevel@tonic-gate * bmap_write would trip an ASSERT if i_contents is held shared. 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate if (rewrite) 5007c478bd9Sstevel@tonic-gate goto skip_alloc; 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate do { 5037c478bd9Sstevel@tonic-gate on = (int)blkoff(fs, uoff); 5047c478bd9Sstevel@tonic-gate n = (int)MIN(fs->fs_bsize - on, resid); 5057c478bd9Sstevel@tonic-gate if ((uoff + n) > ip->i_size) { 5067c478bd9Sstevel@tonic-gate error = bmap_write(ip, uoff, (int)(on + n), 5077c478bd9Sstevel@tonic-gate (int)(uoff & (offset_t)MAXBOFFSET) == 0, 508303bf60bSsdebnath NULL, cr); 5097c478bd9Sstevel@tonic-gate /* Caller is responsible for updating i_seq if needed */ 5107c478bd9Sstevel@tonic-gate if (error) 5117c478bd9Sstevel@tonic-gate break; 5127c478bd9Sstevel@tonic-gate ip->i_size = uoff + n; 5137c478bd9Sstevel@tonic-gate ip->i_flag |= IATTCHG; 5147c478bd9Sstevel@tonic-gate } else if (n == MAXBSIZE) { 515303bf60bSsdebnath error = bmap_write(ip, uoff, (int)(on + n), 516303bf60bSsdebnath BI_ALLOC_ONLY, NULL, cr); 5177c478bd9Sstevel@tonic-gate /* Caller is responsible for updating i_seq if needed */ 5187c478bd9Sstevel@tonic-gate } else { 5197c478bd9Sstevel@tonic-gate if (has_holes < 0) 5207c478bd9Sstevel@tonic-gate has_holes = bmap_has_holes(ip); 5217c478bd9Sstevel@tonic-gate if (has_holes) { 5227c478bd9Sstevel@tonic-gate uint_t blk_size; 5237c478bd9Sstevel@tonic-gate u_offset_t offset; 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate offset = uoff & (offset_t)fs->fs_bmask; 5267c478bd9Sstevel@tonic-gate blk_size = (int)blksize(fs, ip, 5277c478bd9Sstevel@tonic-gate (daddr_t)lblkno(fs, offset)); 528303bf60bSsdebnath error = bmap_write(ip, uoff, blk_size, 529303bf60bSsdebnath BI_NORMAL, NULL, cr); 5307c478bd9Sstevel@tonic-gate /* 5317c478bd9Sstevel@tonic-gate * Caller is responsible for updating 5327c478bd9Sstevel@tonic-gate * i_seq if needed 5337c478bd9Sstevel@tonic-gate */ 5347c478bd9Sstevel@tonic-gate } else 5357c478bd9Sstevel@tonic-gate error = 0; 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate if (error) 5387c478bd9Sstevel@tonic-gate break; 5397c478bd9Sstevel@tonic-gate uoff += n; 5407c478bd9Sstevel@tonic-gate resid -= n; 5417c478bd9Sstevel@tonic-gate /* 5427c478bd9Sstevel@tonic-gate * if file has grown larger than 2GB, set flag 5437c478bd9Sstevel@tonic-gate * in superblock if not already set 5447c478bd9Sstevel@tonic-gate */ 5457c478bd9Sstevel@tonic-gate if ((ip->i_size > MAXOFF32_T) && 5467c478bd9Sstevel@tonic-gate !(fs->fs_flags & FSLARGEFILES)) { 5477c478bd9Sstevel@tonic-gate ASSERT(ufsvfsp->vfs_lfflags & UFS_LARGEFILES); 5487c478bd9Sstevel@tonic-gate mutex_enter(&ufsvfsp->vfs_lock); 5497c478bd9Sstevel@tonic-gate fs->fs_flags |= FSLARGEFILES; 5507c478bd9Sstevel@tonic-gate ufs_sbwrite(ufsvfsp); 5517c478bd9Sstevel@tonic-gate mutex_exit(&ufsvfsp->vfs_lock); 5527c478bd9Sstevel@tonic-gate } 5537c478bd9Sstevel@tonic-gate } while (resid); 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate if (error) { 5567c478bd9Sstevel@tonic-gate /* 5577c478bd9Sstevel@tonic-gate * restore original state 5587c478bd9Sstevel@tonic-gate */ 5597c478bd9Sstevel@tonic-gate if (resid) { 5607c478bd9Sstevel@tonic-gate if (size == ip->i_size) 5617c478bd9Sstevel@tonic-gate goto errout; 5627c478bd9Sstevel@tonic-gate (void) ufs_itrunc(ip, size, 0, cr); 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate /* 5657c478bd9Sstevel@tonic-gate * try non-directio path 5667c478bd9Sstevel@tonic-gate */ 5677c478bd9Sstevel@tonic-gate goto errout; 5687c478bd9Sstevel@tonic-gate } 5697c478bd9Sstevel@tonic-gate skip_alloc: 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate /* 5727c478bd9Sstevel@tonic-gate * get rid of cached pages 5737c478bd9Sstevel@tonic-gate */ 5747c478bd9Sstevel@tonic-gate vp = ITOV(ip); 5757c478bd9Sstevel@tonic-gate exclusive = rw_write_held(&ip->i_contents); 5767c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) { 5777c478bd9Sstevel@tonic-gate if (!exclusive) { 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Still holding i_rwlock, so no allocations 5807c478bd9Sstevel@tonic-gate * can happen after dropping contents. 5817c478bd9Sstevel@tonic-gate */ 5827c478bd9Sstevel@tonic-gate rw_exit(&ip->i_contents); 5837c478bd9Sstevel@tonic-gate rw_enter(&ip->i_contents, RW_WRITER); 5847c478bd9Sstevel@tonic-gate } 585da6c28aaSamw (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, 586da6c28aaSamw B_INVAL, cr, NULL); 5877c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) 5887c478bd9Sstevel@tonic-gate goto errout; 5897c478bd9Sstevel@tonic-gate if (!exclusive) 5907c478bd9Sstevel@tonic-gate rw_downgrade(&ip->i_contents); 591d6767ee3Srshoaib ufs_directio_kstats.nflushes.value.ui64++; 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate /* 5957c478bd9Sstevel@tonic-gate * Direct Writes 5967c478bd9Sstevel@tonic-gate */ 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate if (!exclusive) { 5997c478bd9Sstevel@tonic-gate ufs_shared_writes++; 6001a5e258fSJosef 'Jeff' Sipek ncur = atomic_inc_32_nv(&ufs_cur_writes); 6017c478bd9Sstevel@tonic-gate if (ncur > ufs_maxcur_writes) 6027c478bd9Sstevel@tonic-gate ufs_maxcur_writes = ncur; 6037c478bd9Sstevel@tonic-gate } 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate /* 6067c478bd9Sstevel@tonic-gate * proc and as are for VM operations in directio_start() 6077c478bd9Sstevel@tonic-gate */ 6087c478bd9Sstevel@tonic-gate if (uio->uio_segflg == UIO_USERSPACE) { 6097c478bd9Sstevel@tonic-gate procp = ttoproc(curthread); 6107c478bd9Sstevel@tonic-gate as = procp->p_as; 6117c478bd9Sstevel@tonic-gate } else { 6127c478bd9Sstevel@tonic-gate procp = NULL; 6137c478bd9Sstevel@tonic-gate as = &kas; 6147c478bd9Sstevel@tonic-gate } 6157c478bd9Sstevel@tonic-gate *statusp = DIRECTIO_SUCCESS; 6167c478bd9Sstevel@tonic-gate error = 0; 6177c478bd9Sstevel@tonic-gate newerror = 0; 6187c478bd9Sstevel@tonic-gate resid = uio->uio_resid; 6197c478bd9Sstevel@tonic-gate bytes_written = 0; 620d6767ee3Srshoaib ufs_directio_kstats.logical_writes.value.ui64++; 6217c478bd9Sstevel@tonic-gate while (error == 0 && newerror == 0 && resid && uio->uio_iovcnt) { 6227c478bd9Sstevel@tonic-gate size_t pglck_len, pglck_size; 6237c478bd9Sstevel@tonic-gate caddr_t pglck_base; 6247c478bd9Sstevel@tonic-gate page_t **pplist, **spplist; 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate tail = NULL; 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate /* 6297c478bd9Sstevel@tonic-gate * Adjust number of bytes 6307c478bd9Sstevel@tonic-gate */ 6317c478bd9Sstevel@tonic-gate iov = uio->uio_iov; 6327c478bd9Sstevel@tonic-gate pglck_len = (size_t)MIN(iov->iov_len, resid); 6337c478bd9Sstevel@tonic-gate pglck_base = iov->iov_base; 6347c478bd9Sstevel@tonic-gate if (pglck_len == 0) { 6357c478bd9Sstevel@tonic-gate uio->uio_iov++; 6367c478bd9Sstevel@tonic-gate uio->uio_iovcnt--; 6377c478bd9Sstevel@tonic-gate continue; 6387c478bd9Sstevel@tonic-gate } 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * Try to Lock down the largest chunck of pages possible. 6427c478bd9Sstevel@tonic-gate */ 6437c478bd9Sstevel@tonic-gate pglck_len = (size_t)MIN(pglck_len, ufsvfsp->vfs_ioclustsz); 6447c478bd9Sstevel@tonic-gate error = as_pagelock(as, &pplist, pglck_base, pglck_len, S_READ); 6457c478bd9Sstevel@tonic-gate 6467c478bd9Sstevel@tonic-gate if (error) 6477c478bd9Sstevel@tonic-gate break; 6487c478bd9Sstevel@tonic-gate 6497c478bd9Sstevel@tonic-gate pglck_size = pglck_len; 6507c478bd9Sstevel@tonic-gate while (pglck_len) { 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate nbytes = pglck_len; 6537c478bd9Sstevel@tonic-gate uoff = uio->uio_loffset; 6547c478bd9Sstevel@tonic-gate 6557c478bd9Sstevel@tonic-gate if (!bmap_peek) { 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate /* 6587c478bd9Sstevel@tonic-gate * Re-adjust number of bytes to contiguous 6597c478bd9Sstevel@tonic-gate * range. May have already called bmap_read 6607c478bd9Sstevel@tonic-gate * in the case of a concurrent rewrite. 6617c478bd9Sstevel@tonic-gate */ 6627c478bd9Sstevel@tonic-gate len = (ssize_t)blkroundup(fs, nbytes); 6637c478bd9Sstevel@tonic-gate error = bmap_read(ip, uoff, &bn, &len); 6647c478bd9Sstevel@tonic-gate if (error) 6657c478bd9Sstevel@tonic-gate break; 6667c478bd9Sstevel@tonic-gate if (bn == UFS_HOLE || len == 0) 6677c478bd9Sstevel@tonic-gate break; 6687c478bd9Sstevel@tonic-gate } 6697c478bd9Sstevel@tonic-gate nbytes = (size_t)MIN(nbytes, len); 6707c478bd9Sstevel@tonic-gate bmap_peek = 0; 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate /* 6737c478bd9Sstevel@tonic-gate * Get the pagelist pointer for this offset to be 6747c478bd9Sstevel@tonic-gate * passed to directio_start. 6757c478bd9Sstevel@tonic-gate */ 6767c478bd9Sstevel@tonic-gate 6777c478bd9Sstevel@tonic-gate if (pplist != NULL) 6787c478bd9Sstevel@tonic-gate spplist = pplist + 6797c478bd9Sstevel@tonic-gate btop((uintptr_t)iov->iov_base - 6807c478bd9Sstevel@tonic-gate ((uintptr_t)pglck_base & PAGEMASK)); 6817c478bd9Sstevel@tonic-gate else 6827c478bd9Sstevel@tonic-gate spplist = NULL; 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * Kick off the direct write requests 6867c478bd9Sstevel@tonic-gate */ 687408aef6aSbatschul directio_start(ufsvfsp, ip, nbytes, ldbtob(bn), 6887c478bd9Sstevel@tonic-gate iov->iov_base, S_READ, procp, &tail, spplist); 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate /* 6917c478bd9Sstevel@tonic-gate * Adjust pointers and counters 6927c478bd9Sstevel@tonic-gate */ 6937c478bd9Sstevel@tonic-gate iov->iov_len -= nbytes; 6947c478bd9Sstevel@tonic-gate iov->iov_base += nbytes; 6957c478bd9Sstevel@tonic-gate uio->uio_loffset += nbytes; 6967c478bd9Sstevel@tonic-gate resid -= nbytes; 6977c478bd9Sstevel@tonic-gate pglck_len -= nbytes; 6987c478bd9Sstevel@tonic-gate } 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate /* 7017c478bd9Sstevel@tonic-gate * Wait for outstanding requests 7027c478bd9Sstevel@tonic-gate */ 7037c478bd9Sstevel@tonic-gate newerror = directio_wait(tail, &bytes_written); 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate /* 7067c478bd9Sstevel@tonic-gate * Release VM resources 7077c478bd9Sstevel@tonic-gate */ 7087c478bd9Sstevel@tonic-gate as_pageunlock(as, pplist, pglck_base, pglck_size, S_READ); 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate 7127c478bd9Sstevel@tonic-gate if (!exclusive) { 7131a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&ufs_cur_writes); 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * If this write was done shared, readers may 7167c478bd9Sstevel@tonic-gate * have pulled in unmodified pages. Get rid of 7177c478bd9Sstevel@tonic-gate * these potentially stale pages. 7187c478bd9Sstevel@tonic-gate */ 7197c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) { 7207c478bd9Sstevel@tonic-gate rw_exit(&ip->i_contents); 7217c478bd9Sstevel@tonic-gate rw_enter(&ip->i_contents, RW_WRITER); 7227c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, 723da6c28aaSamw B_INVAL, cr, NULL); 724d6767ee3Srshoaib ufs_directio_kstats.nflushes.value.ui64++; 7257c478bd9Sstevel@tonic-gate rw_downgrade(&ip->i_contents); 7267c478bd9Sstevel@tonic-gate } 7277c478bd9Sstevel@tonic-gate } 7287c478bd9Sstevel@tonic-gate 7297c478bd9Sstevel@tonic-gate /* 7307c478bd9Sstevel@tonic-gate * If error, adjust resid to begin at the first 7317c478bd9Sstevel@tonic-gate * un-writable byte. 7327c478bd9Sstevel@tonic-gate */ 7337c478bd9Sstevel@tonic-gate if (error == 0) 7347c478bd9Sstevel@tonic-gate error = newerror; 7357c478bd9Sstevel@tonic-gate if (error) 7367c478bd9Sstevel@tonic-gate resid = uio->uio_resid - bytes_written; 7377c478bd9Sstevel@tonic-gate arg_uio->uio_resid = resid; 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate if (!rewrite) { 7407c478bd9Sstevel@tonic-gate ip->i_flag |= IUPD | ICHG; 7417c478bd9Sstevel@tonic-gate /* Caller will update i_seq */ 7427c478bd9Sstevel@tonic-gate TRANS_INODE(ip->i_ufsvfs, ip); 7437c478bd9Sstevel@tonic-gate } 7447c478bd9Sstevel@tonic-gate /* 7457c478bd9Sstevel@tonic-gate * If there is a residual; adjust the EOF if necessary 7467c478bd9Sstevel@tonic-gate */ 7477c478bd9Sstevel@tonic-gate if (resid) { 7487c478bd9Sstevel@tonic-gate if (size != ip->i_size) { 7497c478bd9Sstevel@tonic-gate if (uio->uio_loffset > size) 7507c478bd9Sstevel@tonic-gate size = uio->uio_loffset; 7517c478bd9Sstevel@tonic-gate (void) ufs_itrunc(ip, size, 0, cr); 7527c478bd9Sstevel@tonic-gate } 7537c478bd9Sstevel@tonic-gate } 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate if (uio == ©_uio) 7567c478bd9Sstevel@tonic-gate kmem_free(copy_base, copy_resid); 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate return (error); 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate errout: 7617c478bd9Sstevel@tonic-gate if (uio == ©_uio) 7627c478bd9Sstevel@tonic-gate kmem_free(copy_base, copy_resid); 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate return (0); 7657c478bd9Sstevel@tonic-gate } 7667c478bd9Sstevel@tonic-gate /* 7677c478bd9Sstevel@tonic-gate * Direct read of a hole 7687c478bd9Sstevel@tonic-gate */ 7697c478bd9Sstevel@tonic-gate static int 7707c478bd9Sstevel@tonic-gate directio_hole(struct uio *uio, size_t nbytes) 7717c478bd9Sstevel@tonic-gate { 7727c478bd9Sstevel@tonic-gate int error = 0, nzero; 7737c478bd9Sstevel@tonic-gate uio_t phys_uio; 7747c478bd9Sstevel@tonic-gate iovec_t phys_iov; 7757c478bd9Sstevel@tonic-gate 776d6767ee3Srshoaib ufs_directio_kstats.hole_reads.value.ui64++; 777d6767ee3Srshoaib ufs_directio_kstats.nread.value.ui64 += nbytes; 7787c478bd9Sstevel@tonic-gate 7797c478bd9Sstevel@tonic-gate phys_iov.iov_base = uio->uio_iov->iov_base; 7807c478bd9Sstevel@tonic-gate phys_iov.iov_len = nbytes; 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate phys_uio.uio_iov = &phys_iov; 7837c478bd9Sstevel@tonic-gate phys_uio.uio_iovcnt = 1; 7847c478bd9Sstevel@tonic-gate phys_uio.uio_resid = phys_iov.iov_len; 7857c478bd9Sstevel@tonic-gate phys_uio.uio_segflg = uio->uio_segflg; 7867c478bd9Sstevel@tonic-gate phys_uio.uio_extflg = uio->uio_extflg; 7877c478bd9Sstevel@tonic-gate while (error == 0 && phys_uio.uio_resid) { 7887c478bd9Sstevel@tonic-gate nzero = (int)MIN(phys_iov.iov_len, ufs_directio_zero_len); 7897c478bd9Sstevel@tonic-gate error = uiomove(ufs_directio_zero_buf, nzero, UIO_READ, 7907c478bd9Sstevel@tonic-gate &phys_uio); 7917c478bd9Sstevel@tonic-gate } 7927c478bd9Sstevel@tonic-gate return (error); 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate /* 7967c478bd9Sstevel@tonic-gate * Direct Read 7977c478bd9Sstevel@tonic-gate */ 7987c478bd9Sstevel@tonic-gate int 7997c478bd9Sstevel@tonic-gate ufs_directio_read(struct inode *ip, uio_t *uio, cred_t *cr, int *statusp) 8007c478bd9Sstevel@tonic-gate { 8017c478bd9Sstevel@tonic-gate ssize_t resid, bytes_read; 8027c478bd9Sstevel@tonic-gate u_offset_t size, uoff; 8037c478bd9Sstevel@tonic-gate int error, newerror, len; 8047c478bd9Sstevel@tonic-gate size_t nbytes; 8057c478bd9Sstevel@tonic-gate struct fs *fs; 8067c478bd9Sstevel@tonic-gate vnode_t *vp; 8077c478bd9Sstevel@tonic-gate daddr_t bn; 8087c478bd9Sstevel@tonic-gate iovec_t *iov; 8097c478bd9Sstevel@tonic-gate struct ufsvfs *ufsvfsp = ip->i_ufsvfs; 8107c478bd9Sstevel@tonic-gate struct proc *procp; 8117c478bd9Sstevel@tonic-gate struct as *as; 8127c478bd9Sstevel@tonic-gate struct directio_buf *tail; 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate /* 8157c478bd9Sstevel@tonic-gate * assume that directio isn't possible (normal case) 8167c478bd9Sstevel@tonic-gate */ 8177c478bd9Sstevel@tonic-gate *statusp = DIRECTIO_FAILURE; 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate /* 8207c478bd9Sstevel@tonic-gate * Don't go direct 8217c478bd9Sstevel@tonic-gate */ 8227c478bd9Sstevel@tonic-gate if (ufs_directio_enabled == 0) 8237c478bd9Sstevel@tonic-gate return (0); 8247c478bd9Sstevel@tonic-gate 8257c478bd9Sstevel@tonic-gate /* 8267c478bd9Sstevel@tonic-gate * mapped file; nevermind 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate if (ip->i_mapcnt) 8297c478bd9Sstevel@tonic-gate return (0); 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate /* 8327c478bd9Sstevel@tonic-gate * CAN WE DO DIRECT IO? 8337c478bd9Sstevel@tonic-gate */ 8347c478bd9Sstevel@tonic-gate /* 8357c478bd9Sstevel@tonic-gate * must be sector aligned 8367c478bd9Sstevel@tonic-gate */ 8377c478bd9Sstevel@tonic-gate uoff = uio->uio_loffset; 8387c478bd9Sstevel@tonic-gate resid = uio->uio_resid; 8397c478bd9Sstevel@tonic-gate if ((uoff & (u_offset_t)(DEV_BSIZE - 1)) || (resid & (DEV_BSIZE - 1))) 8407c478bd9Sstevel@tonic-gate return (0); 8417c478bd9Sstevel@tonic-gate /* 8427c478bd9Sstevel@tonic-gate * must be short aligned and sector aligned 8437c478bd9Sstevel@tonic-gate */ 8447c478bd9Sstevel@tonic-gate iov = uio->uio_iov; 8457c478bd9Sstevel@tonic-gate nbytes = uio->uio_iovcnt; 8467c478bd9Sstevel@tonic-gate while (nbytes--) { 8477c478bd9Sstevel@tonic-gate if (((size_t)iov->iov_len & (DEV_BSIZE - 1)) != 0) 8487c478bd9Sstevel@tonic-gate return (0); 8497c478bd9Sstevel@tonic-gate if ((intptr_t)(iov++->iov_base) & 1) 8507c478bd9Sstevel@tonic-gate return (0); 8517c478bd9Sstevel@tonic-gate } 8527c478bd9Sstevel@tonic-gate 8537c478bd9Sstevel@tonic-gate /* 8547c478bd9Sstevel@tonic-gate * DIRECTIO 8557c478bd9Sstevel@tonic-gate */ 8567c478bd9Sstevel@tonic-gate fs = ip->i_fs; 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate /* 8597c478bd9Sstevel@tonic-gate * don't read past EOF 8607c478bd9Sstevel@tonic-gate */ 8617c478bd9Sstevel@tonic-gate size = ip->i_size; 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate /* 8647c478bd9Sstevel@tonic-gate * The file offset is past EOF so bail out here; we don't want 8657c478bd9Sstevel@tonic-gate * to update uio_resid and make it look like we read something. 8667c478bd9Sstevel@tonic-gate * We say that direct I/O was a success to avoid having rdip() 8677c478bd9Sstevel@tonic-gate * go through the same "read past EOF logic". 8687c478bd9Sstevel@tonic-gate */ 8697c478bd9Sstevel@tonic-gate if (uoff >= size) { 8707c478bd9Sstevel@tonic-gate *statusp = DIRECTIO_SUCCESS; 8717c478bd9Sstevel@tonic-gate return (0); 8727c478bd9Sstevel@tonic-gate } 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate /* 8757c478bd9Sstevel@tonic-gate * The read would extend past EOF so make it smaller. 8767c478bd9Sstevel@tonic-gate */ 8777c478bd9Sstevel@tonic-gate if ((uoff + resid) > size) { 8787c478bd9Sstevel@tonic-gate resid = size - uoff; 8797c478bd9Sstevel@tonic-gate /* 8807c478bd9Sstevel@tonic-gate * recheck sector alignment 8817c478bd9Sstevel@tonic-gate */ 8827c478bd9Sstevel@tonic-gate if (resid & (DEV_BSIZE - 1)) 8837c478bd9Sstevel@tonic-gate return (0); 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate 8867c478bd9Sstevel@tonic-gate /* 8877c478bd9Sstevel@tonic-gate * At this point, we know there is some real work to do. 8887c478bd9Sstevel@tonic-gate */ 8897c478bd9Sstevel@tonic-gate ASSERT(resid); 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate /* 8927c478bd9Sstevel@tonic-gate * get rid of cached pages 8937c478bd9Sstevel@tonic-gate */ 8947c478bd9Sstevel@tonic-gate vp = ITOV(ip); 8957c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) { 8967c478bd9Sstevel@tonic-gate rw_exit(&ip->i_contents); 8977c478bd9Sstevel@tonic-gate rw_enter(&ip->i_contents, RW_WRITER); 898da6c28aaSamw (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, 899da6c28aaSamw B_INVAL, cr, NULL); 9007c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) 9017c478bd9Sstevel@tonic-gate return (0); 9027c478bd9Sstevel@tonic-gate rw_downgrade(&ip->i_contents); 903d6767ee3Srshoaib ufs_directio_kstats.nflushes.value.ui64++; 9047c478bd9Sstevel@tonic-gate } 9057c478bd9Sstevel@tonic-gate /* 9067c478bd9Sstevel@tonic-gate * Direct Reads 9077c478bd9Sstevel@tonic-gate */ 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate /* 9107c478bd9Sstevel@tonic-gate * proc and as are for VM operations in directio_start() 9117c478bd9Sstevel@tonic-gate */ 9127c478bd9Sstevel@tonic-gate if (uio->uio_segflg == UIO_USERSPACE) { 9137c478bd9Sstevel@tonic-gate procp = ttoproc(curthread); 9147c478bd9Sstevel@tonic-gate as = procp->p_as; 9157c478bd9Sstevel@tonic-gate } else { 9167c478bd9Sstevel@tonic-gate procp = NULL; 9177c478bd9Sstevel@tonic-gate as = &kas; 9187c478bd9Sstevel@tonic-gate } 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate *statusp = DIRECTIO_SUCCESS; 9217c478bd9Sstevel@tonic-gate error = 0; 9227c478bd9Sstevel@tonic-gate newerror = 0; 9237c478bd9Sstevel@tonic-gate bytes_read = 0; 924d6767ee3Srshoaib ufs_directio_kstats.logical_reads.value.ui64++; 9257c478bd9Sstevel@tonic-gate while (error == 0 && newerror == 0 && resid && uio->uio_iovcnt) { 9267c478bd9Sstevel@tonic-gate size_t pglck_len, pglck_size; 9277c478bd9Sstevel@tonic-gate caddr_t pglck_base; 9287c478bd9Sstevel@tonic-gate page_t **pplist, **spplist; 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate tail = NULL; 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * Adjust number of bytes 9347c478bd9Sstevel@tonic-gate */ 9357c478bd9Sstevel@tonic-gate iov = uio->uio_iov; 9367c478bd9Sstevel@tonic-gate pglck_len = (size_t)MIN(iov->iov_len, resid); 9377c478bd9Sstevel@tonic-gate pglck_base = iov->iov_base; 9387c478bd9Sstevel@tonic-gate if (pglck_len == 0) { 9397c478bd9Sstevel@tonic-gate uio->uio_iov++; 9407c478bd9Sstevel@tonic-gate uio->uio_iovcnt--; 9417c478bd9Sstevel@tonic-gate continue; 9427c478bd9Sstevel@tonic-gate } 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * Try to Lock down the largest chunck of pages possible. 9467c478bd9Sstevel@tonic-gate */ 9477c478bd9Sstevel@tonic-gate pglck_len = (size_t)MIN(pglck_len, ufsvfsp->vfs_ioclustsz); 9487c478bd9Sstevel@tonic-gate error = as_pagelock(as, &pplist, pglck_base, 9497c478bd9Sstevel@tonic-gate pglck_len, S_WRITE); 9507c478bd9Sstevel@tonic-gate 9517c478bd9Sstevel@tonic-gate if (error) 9527c478bd9Sstevel@tonic-gate break; 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate pglck_size = pglck_len; 9557c478bd9Sstevel@tonic-gate while (pglck_len) { 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate nbytes = pglck_len; 9587c478bd9Sstevel@tonic-gate uoff = uio->uio_loffset; 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate /* 9617c478bd9Sstevel@tonic-gate * Re-adjust number of bytes to contiguous range 9627c478bd9Sstevel@tonic-gate */ 9637c478bd9Sstevel@tonic-gate len = (ssize_t)blkroundup(fs, nbytes); 9647c478bd9Sstevel@tonic-gate error = bmap_read(ip, uoff, &bn, &len); 9657c478bd9Sstevel@tonic-gate if (error) 9667c478bd9Sstevel@tonic-gate break; 9677c478bd9Sstevel@tonic-gate 9687c478bd9Sstevel@tonic-gate if (bn == UFS_HOLE) { 9697c478bd9Sstevel@tonic-gate nbytes = (size_t)MIN(fs->fs_bsize - 9707c478bd9Sstevel@tonic-gate (long)blkoff(fs, uoff), nbytes); 9717c478bd9Sstevel@tonic-gate error = directio_hole(uio, nbytes); 9727c478bd9Sstevel@tonic-gate /* 9737c478bd9Sstevel@tonic-gate * Hole reads are not added to the list 9747c478bd9Sstevel@tonic-gate * processed by directio_wait() below so 9757c478bd9Sstevel@tonic-gate * account for bytes read here. 9767c478bd9Sstevel@tonic-gate */ 9777c478bd9Sstevel@tonic-gate if (!error) 9787c478bd9Sstevel@tonic-gate bytes_read += nbytes; 9797c478bd9Sstevel@tonic-gate } else { 9807c478bd9Sstevel@tonic-gate nbytes = (size_t)MIN(nbytes, len); 9817c478bd9Sstevel@tonic-gate 9827c478bd9Sstevel@tonic-gate /* 9837c478bd9Sstevel@tonic-gate * Get the pagelist pointer for this offset 9847c478bd9Sstevel@tonic-gate * to be passed to directio_start. 9857c478bd9Sstevel@tonic-gate */ 9867c478bd9Sstevel@tonic-gate if (pplist != NULL) 9877c478bd9Sstevel@tonic-gate spplist = pplist + 9887c478bd9Sstevel@tonic-gate btop((uintptr_t)iov->iov_base - 9897c478bd9Sstevel@tonic-gate ((uintptr_t)pglck_base & PAGEMASK)); 9907c478bd9Sstevel@tonic-gate else 9917c478bd9Sstevel@tonic-gate spplist = NULL; 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate /* 9947c478bd9Sstevel@tonic-gate * Kick off the direct read requests 9957c478bd9Sstevel@tonic-gate */ 996408aef6aSbatschul directio_start(ufsvfsp, ip, nbytes, 9977c478bd9Sstevel@tonic-gate ldbtob(bn), iov->iov_base, 9987c478bd9Sstevel@tonic-gate S_WRITE, procp, &tail, spplist); 9997c478bd9Sstevel@tonic-gate } 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate if (error) 10027c478bd9Sstevel@tonic-gate break; 10037c478bd9Sstevel@tonic-gate 10047c478bd9Sstevel@tonic-gate /* 10057c478bd9Sstevel@tonic-gate * Adjust pointers and counters 10067c478bd9Sstevel@tonic-gate */ 10077c478bd9Sstevel@tonic-gate iov->iov_len -= nbytes; 10087c478bd9Sstevel@tonic-gate iov->iov_base += nbytes; 10097c478bd9Sstevel@tonic-gate uio->uio_loffset += nbytes; 10107c478bd9Sstevel@tonic-gate resid -= nbytes; 10117c478bd9Sstevel@tonic-gate pglck_len -= nbytes; 10127c478bd9Sstevel@tonic-gate } 10137c478bd9Sstevel@tonic-gate 10147c478bd9Sstevel@tonic-gate /* 10157c478bd9Sstevel@tonic-gate * Wait for outstanding requests 10167c478bd9Sstevel@tonic-gate */ 10177c478bd9Sstevel@tonic-gate newerror = directio_wait(tail, &bytes_read); 10187c478bd9Sstevel@tonic-gate /* 10197c478bd9Sstevel@tonic-gate * Release VM resources 10207c478bd9Sstevel@tonic-gate */ 10217c478bd9Sstevel@tonic-gate as_pageunlock(as, pplist, pglck_base, pglck_size, S_WRITE); 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate } 10247c478bd9Sstevel@tonic-gate 10257c478bd9Sstevel@tonic-gate /* 10267c478bd9Sstevel@tonic-gate * If error, adjust resid to begin at the first 10277c478bd9Sstevel@tonic-gate * un-read byte. 10287c478bd9Sstevel@tonic-gate */ 10297c478bd9Sstevel@tonic-gate if (error == 0) 10307c478bd9Sstevel@tonic-gate error = newerror; 10317c478bd9Sstevel@tonic-gate uio->uio_resid -= bytes_read; 10327c478bd9Sstevel@tonic-gate return (error); 10337c478bd9Sstevel@tonic-gate } 1034