17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a5652762Spraks * Common Development and Distribution License (the "License"). 6a5652762Spraks * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22134a1f4eSCasper H.S. Dik * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate * 247c478bd9Sstevel@tonic-gate * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 257c478bd9Sstevel@tonic-gate * All rights reserved. 267c478bd9Sstevel@tonic-gate */ 277c478bd9Sstevel@tonic-gate 2872102e74SBryan Cantrill /* 2972102e74SBryan Cantrill * Copyright (c) 2013, Joyent, Inc. All rights reserved. 30*06e6833aSJosef 'Jeff' Sipek * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 3172102e74SBryan Cantrill */ 3272102e74SBryan Cantrill 337c478bd9Sstevel@tonic-gate #include <sys/param.h> 347c478bd9Sstevel@tonic-gate #include <sys/types.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/cred.h> 377c478bd9Sstevel@tonic-gate #include <sys/time.h> 387c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 397c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 40aa59c4cbSrsb #include <sys/vfs_opreg.h> 417c478bd9Sstevel@tonic-gate #include <sys/file.h> 427c478bd9Sstevel@tonic-gate #include <sys/filio.h> 437c478bd9Sstevel@tonic-gate #include <sys/uio.h> 447c478bd9Sstevel@tonic-gate #include <sys/buf.h> 457c478bd9Sstevel@tonic-gate #include <sys/mman.h> 467c478bd9Sstevel@tonic-gate #include <sys/pathname.h> 477c478bd9Sstevel@tonic-gate #include <sys/dirent.h> 487c478bd9Sstevel@tonic-gate #include <sys/debug.h> 497c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 507c478bd9Sstevel@tonic-gate #include <sys/fcntl.h> 517c478bd9Sstevel@tonic-gate #include <sys/flock.h> 527c478bd9Sstevel@tonic-gate #include <sys/swap.h> 537c478bd9Sstevel@tonic-gate #include <sys/errno.h> 547c478bd9Sstevel@tonic-gate #include <sys/strsubr.h> 557c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 567c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 577c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 587c478bd9Sstevel@tonic-gate #include <sys/pathconf.h> 597c478bd9Sstevel@tonic-gate #include <sys/utsname.h> 607c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 617c478bd9Sstevel@tonic-gate #include <sys/acl.h> 627c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 637c478bd9Sstevel@tonic-gate #include <sys/policy.h> 647c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate #include <rpc/types.h> 677c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 687c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate #include <nfs/nfs.h> 717c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h> 727c478bd9Sstevel@tonic-gate #include <nfs/rnode.h> 737c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h> 747c478bd9Sstevel@tonic-gate #include <nfs/lm.h> 757c478bd9Sstevel@tonic-gate 767c478bd9Sstevel@tonic-gate #include <vm/hat.h> 777c478bd9Sstevel@tonic-gate #include <vm/as.h> 787c478bd9Sstevel@tonic-gate #include <vm/page.h> 797c478bd9Sstevel@tonic-gate #include <vm/pvn.h> 807c478bd9Sstevel@tonic-gate #include <vm/seg.h> 817c478bd9Sstevel@tonic-gate #include <vm/seg_map.h> 827c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 837c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h> 867c478bd9Sstevel@tonic-gate 877c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate static int nfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int, 907c478bd9Sstevel@tonic-gate cred_t *); 917c478bd9Sstevel@tonic-gate static int nfswrite(vnode_t *, caddr_t, uint_t, int, cred_t *); 927c478bd9Sstevel@tonic-gate static int nfsread(vnode_t *, caddr_t, uint_t, int, size_t *, cred_t *); 937c478bd9Sstevel@tonic-gate static int nfssetattr(vnode_t *, struct vattr *, int, cred_t *); 947c478bd9Sstevel@tonic-gate static int nfslookup_dnlc(vnode_t *, char *, vnode_t **, cred_t *); 957c478bd9Sstevel@tonic-gate static int nfslookup_otw(vnode_t *, char *, vnode_t **, cred_t *, int); 96da6c28aaSamw static int nfsrename(vnode_t *, char *, vnode_t *, char *, cred_t *, 97da6c28aaSamw caller_context_t *); 987c478bd9Sstevel@tonic-gate static int nfsreaddir(vnode_t *, rddir_cache *, cred_t *); 997c478bd9Sstevel@tonic-gate static int nfs_bio(struct buf *, cred_t *); 1007c478bd9Sstevel@tonic-gate static int nfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *, 1017c478bd9Sstevel@tonic-gate page_t *[], size_t, struct seg *, caddr_t, 1027c478bd9Sstevel@tonic-gate enum seg_rw, cred_t *); 1037c478bd9Sstevel@tonic-gate static void nfs_readahead(vnode_t *, u_offset_t, caddr_t, struct seg *, 1047c478bd9Sstevel@tonic-gate cred_t *); 1057c478bd9Sstevel@tonic-gate static int nfs_sync_putapage(vnode_t *, page_t *, u_offset_t, size_t, 1067c478bd9Sstevel@tonic-gate int, cred_t *); 1077c478bd9Sstevel@tonic-gate static int nfs_sync_pageio(vnode_t *, page_t *, u_offset_t, size_t, 1087c478bd9Sstevel@tonic-gate int, cred_t *); 1097c478bd9Sstevel@tonic-gate static void nfs_delmap_callback(struct as *, void *, uint_t); 1107c478bd9Sstevel@tonic-gate 1117c478bd9Sstevel@tonic-gate /* 1127c478bd9Sstevel@tonic-gate * Error flags used to pass information about certain special errors 1137c478bd9Sstevel@tonic-gate * which need to be handled specially. 1147c478bd9Sstevel@tonic-gate */ 1157c478bd9Sstevel@tonic-gate #define NFS_EOF -98 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * These are the vnode ops routines which implement the vnode interface to 1197c478bd9Sstevel@tonic-gate * the networked file system. These routines just take their parameters, 1207c478bd9Sstevel@tonic-gate * make them look networkish by putting the right info into interface structs, 1217c478bd9Sstevel@tonic-gate * and then calling the appropriate remote routine(s) to do the work. 1227c478bd9Sstevel@tonic-gate * 1237c478bd9Sstevel@tonic-gate * Note on directory name lookup cacheing: If we detect a stale fhandle, 1247c478bd9Sstevel@tonic-gate * we purge the directory cache relative to that vnode. This way, the 1257c478bd9Sstevel@tonic-gate * user won't get burned by the cache repeatedly. See <nfs/rnode.h> for 1267c478bd9Sstevel@tonic-gate * more details on rnode locking. 1277c478bd9Sstevel@tonic-gate */ 1287c478bd9Sstevel@tonic-gate 129da6c28aaSamw static int nfs_open(vnode_t **, int, cred_t *, caller_context_t *); 130da6c28aaSamw static int nfs_close(vnode_t *, int, int, offset_t, cred_t *, 131da6c28aaSamw caller_context_t *); 1327c478bd9Sstevel@tonic-gate static int nfs_read(vnode_t *, struct uio *, int, cred_t *, 1337c478bd9Sstevel@tonic-gate caller_context_t *); 1347c478bd9Sstevel@tonic-gate static int nfs_write(vnode_t *, struct uio *, int, cred_t *, 1357c478bd9Sstevel@tonic-gate caller_context_t *); 136da6c28aaSamw static int nfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *, 137da6c28aaSamw caller_context_t *); 138da6c28aaSamw static int nfs_getattr(vnode_t *, struct vattr *, int, cred_t *, 139da6c28aaSamw caller_context_t *); 1407c478bd9Sstevel@tonic-gate static int nfs_setattr(vnode_t *, struct vattr *, int, cred_t *, 1417c478bd9Sstevel@tonic-gate caller_context_t *); 142da6c28aaSamw static int nfs_access(vnode_t *, int, int, cred_t *, caller_context_t *); 1437c478bd9Sstevel@tonic-gate static int nfs_accessx(void *, int, cred_t *); 144da6c28aaSamw static int nfs_readlink(vnode_t *, struct uio *, cred_t *, 145da6c28aaSamw caller_context_t *); 146da6c28aaSamw static int nfs_fsync(vnode_t *, int, cred_t *, caller_context_t *); 147da6c28aaSamw static void nfs_inactive(vnode_t *, cred_t *, caller_context_t *); 1487c478bd9Sstevel@tonic-gate static int nfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *, 149da6c28aaSamw int, vnode_t *, cred_t *, caller_context_t *, 150da6c28aaSamw int *, pathname_t *); 1517c478bd9Sstevel@tonic-gate static int nfs_create(vnode_t *, char *, struct vattr *, enum vcexcl, 152da6c28aaSamw int, vnode_t **, cred_t *, int, caller_context_t *, 153da6c28aaSamw vsecattr_t *); 154da6c28aaSamw static int nfs_remove(vnode_t *, char *, cred_t *, caller_context_t *, 155da6c28aaSamw int); 156da6c28aaSamw static int nfs_link(vnode_t *, vnode_t *, char *, cred_t *, 157da6c28aaSamw caller_context_t *, int); 158da6c28aaSamw static int nfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *, 159da6c28aaSamw caller_context_t *, int); 160da6c28aaSamw static int nfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **, 161da6c28aaSamw cred_t *, caller_context_t *, int, vsecattr_t *); 162da6c28aaSamw static int nfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *, 163da6c28aaSamw caller_context_t *, int); 1647c478bd9Sstevel@tonic-gate static int nfs_symlink(vnode_t *, char *, struct vattr *, char *, 165da6c28aaSamw cred_t *, caller_context_t *, int); 166da6c28aaSamw static int nfs_readdir(vnode_t *, struct uio *, cred_t *, int *, 167da6c28aaSamw caller_context_t *, int); 168da6c28aaSamw static int nfs_fid(vnode_t *, fid_t *, caller_context_t *); 1697c478bd9Sstevel@tonic-gate static int nfs_rwlock(vnode_t *, int, caller_context_t *); 1707c478bd9Sstevel@tonic-gate static void nfs_rwunlock(vnode_t *, int, caller_context_t *); 171da6c28aaSamw static int nfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *); 1727c478bd9Sstevel@tonic-gate static int nfs_getpage(vnode_t *, offset_t, size_t, uint_t *, 1737c478bd9Sstevel@tonic-gate page_t *[], size_t, struct seg *, caddr_t, 174da6c28aaSamw enum seg_rw, cred_t *, caller_context_t *); 175da6c28aaSamw static int nfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *, 176da6c28aaSamw caller_context_t *); 177da6c28aaSamw static int nfs_map(vnode_t *, offset_t, struct as *, caddr_t *, size_t, 178da6c28aaSamw uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *); 179da6c28aaSamw static int nfs_addmap(vnode_t *, offset_t, struct as *, caddr_t, size_t, 180da6c28aaSamw uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *); 1817c478bd9Sstevel@tonic-gate static int nfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t, 182da6c28aaSamw struct flk_callback *, cred_t *, caller_context_t *); 1837c478bd9Sstevel@tonic-gate static int nfs_space(vnode_t *, int, struct flock64 *, int, offset_t, 1847c478bd9Sstevel@tonic-gate cred_t *, caller_context_t *); 185da6c28aaSamw static int nfs_realvp(vnode_t *, vnode_t **, caller_context_t *); 186da6c28aaSamw static int nfs_delmap(vnode_t *, offset_t, struct as *, caddr_t, size_t, 187da6c28aaSamw uint_t, uint_t, uint_t, cred_t *, caller_context_t *); 188da6c28aaSamw static int nfs_pathconf(vnode_t *, int, ulong_t *, cred_t *, 189da6c28aaSamw caller_context_t *); 1907c478bd9Sstevel@tonic-gate static int nfs_pageio(vnode_t *, page_t *, u_offset_t, size_t, int, 191da6c28aaSamw cred_t *, caller_context_t *); 192da6c28aaSamw static int nfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *, 193da6c28aaSamw caller_context_t *); 194da6c28aaSamw static int nfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *, 195da6c28aaSamw caller_context_t *); 196da6c28aaSamw static int nfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *, 197da6c28aaSamw caller_context_t *); 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate struct vnodeops *nfs_vnodeops; 2007c478bd9Sstevel@tonic-gate 2017c478bd9Sstevel@tonic-gate const fs_operation_def_t nfs_vnodeops_template[] = { 202aa59c4cbSrsb VOPNAME_OPEN, { .vop_open = nfs_open }, 203aa59c4cbSrsb VOPNAME_CLOSE, { .vop_close = nfs_close }, 204aa59c4cbSrsb VOPNAME_READ, { .vop_read = nfs_read }, 205aa59c4cbSrsb VOPNAME_WRITE, { .vop_write = nfs_write }, 206aa59c4cbSrsb VOPNAME_IOCTL, { .vop_ioctl = nfs_ioctl }, 207aa59c4cbSrsb VOPNAME_GETATTR, { .vop_getattr = nfs_getattr }, 208aa59c4cbSrsb VOPNAME_SETATTR, { .vop_setattr = nfs_setattr }, 209aa59c4cbSrsb VOPNAME_ACCESS, { .vop_access = nfs_access }, 210aa59c4cbSrsb VOPNAME_LOOKUP, { .vop_lookup = nfs_lookup }, 211aa59c4cbSrsb VOPNAME_CREATE, { .vop_create = nfs_create }, 212aa59c4cbSrsb VOPNAME_REMOVE, { .vop_remove = nfs_remove }, 213aa59c4cbSrsb VOPNAME_LINK, { .vop_link = nfs_link }, 214aa59c4cbSrsb VOPNAME_RENAME, { .vop_rename = nfs_rename }, 215aa59c4cbSrsb VOPNAME_MKDIR, { .vop_mkdir = nfs_mkdir }, 216aa59c4cbSrsb VOPNAME_RMDIR, { .vop_rmdir = nfs_rmdir }, 217aa59c4cbSrsb VOPNAME_READDIR, { .vop_readdir = nfs_readdir }, 218aa59c4cbSrsb VOPNAME_SYMLINK, { .vop_symlink = nfs_symlink }, 219aa59c4cbSrsb VOPNAME_READLINK, { .vop_readlink = nfs_readlink }, 220aa59c4cbSrsb VOPNAME_FSYNC, { .vop_fsync = nfs_fsync }, 221aa59c4cbSrsb VOPNAME_INACTIVE, { .vop_inactive = nfs_inactive }, 222aa59c4cbSrsb VOPNAME_FID, { .vop_fid = nfs_fid }, 223aa59c4cbSrsb VOPNAME_RWLOCK, { .vop_rwlock = nfs_rwlock }, 224aa59c4cbSrsb VOPNAME_RWUNLOCK, { .vop_rwunlock = nfs_rwunlock }, 225aa59c4cbSrsb VOPNAME_SEEK, { .vop_seek = nfs_seek }, 226aa59c4cbSrsb VOPNAME_FRLOCK, { .vop_frlock = nfs_frlock }, 227aa59c4cbSrsb VOPNAME_SPACE, { .vop_space = nfs_space }, 228aa59c4cbSrsb VOPNAME_REALVP, { .vop_realvp = nfs_realvp }, 229aa59c4cbSrsb VOPNAME_GETPAGE, { .vop_getpage = nfs_getpage }, 230aa59c4cbSrsb VOPNAME_PUTPAGE, { .vop_putpage = nfs_putpage }, 231aa59c4cbSrsb VOPNAME_MAP, { .vop_map = nfs_map }, 232aa59c4cbSrsb VOPNAME_ADDMAP, { .vop_addmap = nfs_addmap }, 233aa59c4cbSrsb VOPNAME_DELMAP, { .vop_delmap = nfs_delmap }, 234aa59c4cbSrsb VOPNAME_DUMP, { .vop_dump = nfs_dump }, 235aa59c4cbSrsb VOPNAME_PATHCONF, { .vop_pathconf = nfs_pathconf }, 236aa59c4cbSrsb VOPNAME_PAGEIO, { .vop_pageio = nfs_pageio }, 237aa59c4cbSrsb VOPNAME_SETSECATTR, { .vop_setsecattr = nfs_setsecattr }, 238aa59c4cbSrsb VOPNAME_GETSECATTR, { .vop_getsecattr = nfs_getsecattr }, 239aa59c4cbSrsb VOPNAME_SHRLOCK, { .vop_shrlock = nfs_shrlock }, 240df2381bfSpraks VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 2417c478bd9Sstevel@tonic-gate NULL, NULL 2427c478bd9Sstevel@tonic-gate }; 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate /* 2457c478bd9Sstevel@tonic-gate * XXX: This is referenced in modstubs.s 2467c478bd9Sstevel@tonic-gate */ 2477c478bd9Sstevel@tonic-gate struct vnodeops * 2487c478bd9Sstevel@tonic-gate nfs_getvnodeops(void) 2497c478bd9Sstevel@tonic-gate { 2507c478bd9Sstevel@tonic-gate return (nfs_vnodeops); 2517c478bd9Sstevel@tonic-gate } 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate /* ARGSUSED */ 2547c478bd9Sstevel@tonic-gate static int 255da6c28aaSamw nfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 2567c478bd9Sstevel@tonic-gate { 2577c478bd9Sstevel@tonic-gate int error; 2587c478bd9Sstevel@tonic-gate struct vattr va; 2597c478bd9Sstevel@tonic-gate rnode_t *rp; 2607c478bd9Sstevel@tonic-gate vnode_t *vp; 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate vp = *vpp; 2637c478bd9Sstevel@tonic-gate rp = VTOR(vp); 264108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 2657c478bd9Sstevel@tonic-gate return (EIO); 2667c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2677c478bd9Sstevel@tonic-gate if (rp->r_cred == NULL) { 2687c478bd9Sstevel@tonic-gate crhold(cr); 2697c478bd9Sstevel@tonic-gate rp->r_cred = cr; 2707c478bd9Sstevel@tonic-gate } 2717c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2727c478bd9Sstevel@tonic-gate 2737c478bd9Sstevel@tonic-gate /* 2747c478bd9Sstevel@tonic-gate * If there is no cached data or if close-to-open 2757c478bd9Sstevel@tonic-gate * consistency checking is turned off, we can avoid 2767c478bd9Sstevel@tonic-gate * the over the wire getattr. Otherwise, if the 2777c478bd9Sstevel@tonic-gate * file system is mounted readonly, then just verify 2787c478bd9Sstevel@tonic-gate * the caches are up to date using the normal mechanism. 2797c478bd9Sstevel@tonic-gate * Else, if the file is not mmap'd, then just mark 2807c478bd9Sstevel@tonic-gate * the attributes as timed out. They will be refreshed 2817c478bd9Sstevel@tonic-gate * and the caches validated prior to being used. 2827c478bd9Sstevel@tonic-gate * Else, the file system is mounted writeable so 2837c478bd9Sstevel@tonic-gate * force an over the wire GETATTR in order to ensure 2847c478bd9Sstevel@tonic-gate * that all cached data is valid. 2857c478bd9Sstevel@tonic-gate */ 2867c478bd9Sstevel@tonic-gate if (vp->v_count > 1 || 2877c478bd9Sstevel@tonic-gate ((vn_has_cached_data(vp) || HAVE_RDDIR_CACHE(rp)) && 2887c478bd9Sstevel@tonic-gate !(VTOMI(vp)->mi_flags & MI_NOCTO))) { 2897c478bd9Sstevel@tonic-gate if (vn_is_readonly(vp)) 2907c478bd9Sstevel@tonic-gate error = nfs_validate_caches(vp, cr); 2917c478bd9Sstevel@tonic-gate else if (rp->r_mapcnt == 0 && vp->v_count == 1) { 2927c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 2937c478bd9Sstevel@tonic-gate error = 0; 2947c478bd9Sstevel@tonic-gate } else { 2957c478bd9Sstevel@tonic-gate va.va_mask = AT_ALL; 2967c478bd9Sstevel@tonic-gate error = nfs_getattr_otw(vp, &va, cr); 2977c478bd9Sstevel@tonic-gate } 2987c478bd9Sstevel@tonic-gate } else 2997c478bd9Sstevel@tonic-gate error = 0; 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate return (error); 3027c478bd9Sstevel@tonic-gate } 3037c478bd9Sstevel@tonic-gate 304da6c28aaSamw /* ARGSUSED */ 3057c478bd9Sstevel@tonic-gate static int 306da6c28aaSamw nfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 307da6c28aaSamw caller_context_t *ct) 3087c478bd9Sstevel@tonic-gate { 3097c478bd9Sstevel@tonic-gate rnode_t *rp; 3107c478bd9Sstevel@tonic-gate int error; 3117c478bd9Sstevel@tonic-gate struct vattr va; 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate /* 3147c478bd9Sstevel@tonic-gate * zone_enter(2) prevents processes from changing zones with NFS files 3157c478bd9Sstevel@tonic-gate * open; if we happen to get here from the wrong zone we can't do 3167c478bd9Sstevel@tonic-gate * anything over the wire. 3177c478bd9Sstevel@tonic-gate */ 318108322fbScarlsonj if (VTOMI(vp)->mi_zone != nfs_zone()) { 3197c478bd9Sstevel@tonic-gate /* 3207c478bd9Sstevel@tonic-gate * We could attempt to clean up locks, except we're sure 3217c478bd9Sstevel@tonic-gate * that the current process didn't acquire any locks on 3227c478bd9Sstevel@tonic-gate * the file: any attempt to lock a file belong to another zone 3237c478bd9Sstevel@tonic-gate * will fail, and one can't lock an NFS file and then change 3247c478bd9Sstevel@tonic-gate * zones, as that fails too. 3257c478bd9Sstevel@tonic-gate * 3267c478bd9Sstevel@tonic-gate * Returning an error here is the sane thing to do. A 3277c478bd9Sstevel@tonic-gate * subsequent call to VN_RELE() which translates to a 3287c478bd9Sstevel@tonic-gate * nfs_inactive() will clean up state: if the zone of the 3297c478bd9Sstevel@tonic-gate * vnode's origin is still alive and kicking, an async worker 3307c478bd9Sstevel@tonic-gate * thread will handle the request (from the correct zone), and 3317c478bd9Sstevel@tonic-gate * everything (minus the final nfs_getattr_otw() call) should 3327c478bd9Sstevel@tonic-gate * be OK. If the zone is going away nfs_async_inactive() will 3337c478bd9Sstevel@tonic-gate * throw away cached pages inline. 3347c478bd9Sstevel@tonic-gate */ 3357c478bd9Sstevel@tonic-gate return (EIO); 3367c478bd9Sstevel@tonic-gate } 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate /* 3397c478bd9Sstevel@tonic-gate * If we are using local locking for this filesystem, then 3407c478bd9Sstevel@tonic-gate * release all of the SYSV style record locks. Otherwise, 3417c478bd9Sstevel@tonic-gate * we are doing network locking and we need to release all 3427c478bd9Sstevel@tonic-gate * of the network locks. All of the locks held by this 3437c478bd9Sstevel@tonic-gate * process on this file are released no matter what the 3447c478bd9Sstevel@tonic-gate * incoming reference count is. 3457c478bd9Sstevel@tonic-gate */ 3467c478bd9Sstevel@tonic-gate if (VTOMI(vp)->mi_flags & MI_LLOCK) { 3477c478bd9Sstevel@tonic-gate cleanlocks(vp, ttoproc(curthread)->p_pid, 0); 3487c478bd9Sstevel@tonic-gate cleanshares(vp, ttoproc(curthread)->p_pid); 3497c478bd9Sstevel@tonic-gate } else 3507c478bd9Sstevel@tonic-gate nfs_lockrelease(vp, flag, offset, cr); 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate if (count > 1) 3537c478bd9Sstevel@tonic-gate return (0); 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate /* 3567c478bd9Sstevel@tonic-gate * If the file has been `unlinked', then purge the 3577c478bd9Sstevel@tonic-gate * DNLC so that this vnode will get reycled quicker 3587c478bd9Sstevel@tonic-gate * and the .nfs* file on the server will get removed. 3597c478bd9Sstevel@tonic-gate */ 3607c478bd9Sstevel@tonic-gate rp = VTOR(vp); 3617c478bd9Sstevel@tonic-gate if (rp->r_unldvp != NULL) 3627c478bd9Sstevel@tonic-gate dnlc_purge_vp(vp); 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate /* 3657c478bd9Sstevel@tonic-gate * If the file was open for write and there are pages, 3667c478bd9Sstevel@tonic-gate * then if the file system was mounted using the "no-close- 3677c478bd9Sstevel@tonic-gate * to-open" semantics, then start an asynchronous flush 3687c478bd9Sstevel@tonic-gate * of the all of the pages in the file. 3697c478bd9Sstevel@tonic-gate * else the file system was not mounted using the "no-close- 3707c478bd9Sstevel@tonic-gate * to-open" semantics, then do a synchronous flush and 3717c478bd9Sstevel@tonic-gate * commit of all of the dirty and uncommitted pages. 3727c478bd9Sstevel@tonic-gate * 3737c478bd9Sstevel@tonic-gate * The asynchronous flush of the pages in the "nocto" path 3747c478bd9Sstevel@tonic-gate * mostly just associates a cred pointer with the rnode so 3757c478bd9Sstevel@tonic-gate * writes which happen later will have a better chance of 3767c478bd9Sstevel@tonic-gate * working. It also starts the data being written to the 3777c478bd9Sstevel@tonic-gate * server, but without unnecessarily delaying the application. 3787c478bd9Sstevel@tonic-gate */ 3797c478bd9Sstevel@tonic-gate if ((flag & FWRITE) && vn_has_cached_data(vp)) { 3807c478bd9Sstevel@tonic-gate if ((VTOMI(vp)->mi_flags & MI_NOCTO)) { 381da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, B_ASYNC, 382da6c28aaSamw cr, ct); 3837c478bd9Sstevel@tonic-gate if (error == EAGAIN) 3847c478bd9Sstevel@tonic-gate error = 0; 3857c478bd9Sstevel@tonic-gate } else 386da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 3877c478bd9Sstevel@tonic-gate if (!error) { 3887c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 3897c478bd9Sstevel@tonic-gate error = rp->r_error; 3907c478bd9Sstevel@tonic-gate rp->r_error = 0; 3917c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 3927c478bd9Sstevel@tonic-gate } 3937c478bd9Sstevel@tonic-gate } else { 3947c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 3957c478bd9Sstevel@tonic-gate error = rp->r_error; 3967c478bd9Sstevel@tonic-gate rp->r_error = 0; 3977c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate 4007c478bd9Sstevel@tonic-gate /* 4017c478bd9Sstevel@tonic-gate * If RWRITEATTR is set, then issue an over the wire GETATTR to 4027c478bd9Sstevel@tonic-gate * refresh the attribute cache with a set of attributes which 4037c478bd9Sstevel@tonic-gate * weren't returned from a WRITE. This will enable the close- 4047c478bd9Sstevel@tonic-gate * to-open processing to work. 4057c478bd9Sstevel@tonic-gate */ 4067c478bd9Sstevel@tonic-gate if (rp->r_flags & RWRITEATTR) 4077c478bd9Sstevel@tonic-gate (void) nfs_getattr_otw(vp, &va, cr); 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate return (error); 4107c478bd9Sstevel@tonic-gate } 4117c478bd9Sstevel@tonic-gate 4127c478bd9Sstevel@tonic-gate /* ARGSUSED */ 4137c478bd9Sstevel@tonic-gate static int 4147c478bd9Sstevel@tonic-gate nfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 4157c478bd9Sstevel@tonic-gate caller_context_t *ct) 4167c478bd9Sstevel@tonic-gate { 4177c478bd9Sstevel@tonic-gate rnode_t *rp; 4187c478bd9Sstevel@tonic-gate u_offset_t off; 4197c478bd9Sstevel@tonic-gate offset_t diff; 4207c478bd9Sstevel@tonic-gate int on; 4217c478bd9Sstevel@tonic-gate size_t n; 4227c478bd9Sstevel@tonic-gate caddr_t base; 4237c478bd9Sstevel@tonic-gate uint_t flags; 4247c478bd9Sstevel@tonic-gate int error; 4257c478bd9Sstevel@tonic-gate mntinfo_t *mi; 4267c478bd9Sstevel@tonic-gate 4277c478bd9Sstevel@tonic-gate rp = VTOR(vp); 4287c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 4297c478bd9Sstevel@tonic-gate 430108322fbScarlsonj if (nfs_zone() != mi->mi_zone) 4317c478bd9Sstevel@tonic-gate return (EIO); 4327c478bd9Sstevel@tonic-gate 4337c478bd9Sstevel@tonic-gate ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER)); 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate if (vp->v_type != VREG) 4367c478bd9Sstevel@tonic-gate return (EISDIR); 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if (uiop->uio_resid == 0) 4397c478bd9Sstevel@tonic-gate return (0); 4407c478bd9Sstevel@tonic-gate 4417c478bd9Sstevel@tonic-gate if (uiop->uio_loffset > MAXOFF32_T) 4427c478bd9Sstevel@tonic-gate return (EFBIG); 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate if (uiop->uio_loffset < 0 || 4457c478bd9Sstevel@tonic-gate uiop->uio_loffset + uiop->uio_resid > MAXOFF32_T) 4467c478bd9Sstevel@tonic-gate return (EINVAL); 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate /* 4497c478bd9Sstevel@tonic-gate * Bypass VM if caching has been disabled (e.g., locking) or if 4507c478bd9Sstevel@tonic-gate * using client-side direct I/O and the file is not mmap'd and 4517c478bd9Sstevel@tonic-gate * there are no cached pages. 4527c478bd9Sstevel@tonic-gate */ 4537c478bd9Sstevel@tonic-gate if ((vp->v_flag & VNOCACHE) || 4547c478bd9Sstevel@tonic-gate (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) && 4551384c586SDeepak Honnalli rp->r_mapcnt == 0 && rp->r_inmap == 0 && 4561384c586SDeepak Honnalli !vn_has_cached_data(vp))) { 4577c478bd9Sstevel@tonic-gate size_t bufsize; 4587c478bd9Sstevel@tonic-gate size_t resid = 0; 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate /* 4617c478bd9Sstevel@tonic-gate * Let's try to do read in as large a chunk as we can 4627c478bd9Sstevel@tonic-gate * (Filesystem (NFS client) bsize if possible/needed). 4637c478bd9Sstevel@tonic-gate * For V3, this is 32K and for V2, this is 8K. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate bufsize = MIN(uiop->uio_resid, VTOMI(vp)->mi_curread); 4667c478bd9Sstevel@tonic-gate base = kmem_alloc(bufsize, KM_SLEEP); 4677c478bd9Sstevel@tonic-gate do { 4687c478bd9Sstevel@tonic-gate n = MIN(uiop->uio_resid, bufsize); 4697c478bd9Sstevel@tonic-gate error = nfsread(vp, base, uiop->uio_offset, n, 4707c478bd9Sstevel@tonic-gate &resid, cr); 4717c478bd9Sstevel@tonic-gate if (!error) { 4727c478bd9Sstevel@tonic-gate n -= resid; 4737c478bd9Sstevel@tonic-gate error = uiomove(base, n, UIO_READ, uiop); 4747c478bd9Sstevel@tonic-gate } 4757c478bd9Sstevel@tonic-gate } while (!error && uiop->uio_resid > 0 && n > 0); 4767c478bd9Sstevel@tonic-gate kmem_free(base, bufsize); 4777c478bd9Sstevel@tonic-gate return (error); 4787c478bd9Sstevel@tonic-gate } 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate error = 0; 4817c478bd9Sstevel@tonic-gate 4827c478bd9Sstevel@tonic-gate do { 4837c478bd9Sstevel@tonic-gate off = uiop->uio_loffset & MAXBMASK; /* mapping offset */ 4847c478bd9Sstevel@tonic-gate on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */ 4857c478bd9Sstevel@tonic-gate n = MIN(MAXBSIZE - on, uiop->uio_resid); 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate error = nfs_validate_caches(vp, cr); 4887c478bd9Sstevel@tonic-gate if (error) 4897c478bd9Sstevel@tonic-gate break; 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 4925e4df02aSvv149972 while (rp->r_flags & RINCACHEPURGE) { 4935e4df02aSvv149972 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) { 4945e4df02aSvv149972 mutex_exit(&rp->r_statelock); 4955e4df02aSvv149972 return (EINTR); 4965e4df02aSvv149972 } 4975e4df02aSvv149972 } 4987c478bd9Sstevel@tonic-gate diff = rp->r_size - uiop->uio_loffset; 4997c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 5007c478bd9Sstevel@tonic-gate if (diff <= 0) 5017c478bd9Sstevel@tonic-gate break; 5027c478bd9Sstevel@tonic-gate if (diff < n) 5037c478bd9Sstevel@tonic-gate n = (size_t)diff; 5047c478bd9Sstevel@tonic-gate 505a5652762Spraks if (vpm_enable) { 506a5652762Spraks /* 507a5652762Spraks * Copy data. 508a5652762Spraks */ 509a5652762Spraks error = vpm_data_copy(vp, off + on, n, uiop, 510a5652762Spraks 1, NULL, 0, S_READ); 511a5652762Spraks } else { 512a5652762Spraks base = segmap_getmapflt(segkmap, vp, off + on, n, 513a5652762Spraks 1, S_READ); 5147c478bd9Sstevel@tonic-gate error = uiomove(base + on, n, UIO_READ, uiop); 515a5652762Spraks } 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate if (!error) { 5187c478bd9Sstevel@tonic-gate /* 5197c478bd9Sstevel@tonic-gate * If read a whole block or read to eof, 5207c478bd9Sstevel@tonic-gate * won't need this buffer again soon. 5217c478bd9Sstevel@tonic-gate */ 5227c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 5237c478bd9Sstevel@tonic-gate if (n + on == MAXBSIZE || 5247c478bd9Sstevel@tonic-gate uiop->uio_loffset == rp->r_size) 5257c478bd9Sstevel@tonic-gate flags = SM_DONTNEED; 5267c478bd9Sstevel@tonic-gate else 5277c478bd9Sstevel@tonic-gate flags = 0; 5287c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 529a5652762Spraks if (vpm_enable) { 530a5652762Spraks error = vpm_sync_pages(vp, off, n, flags); 531a5652762Spraks } else { 5327c478bd9Sstevel@tonic-gate error = segmap_release(segkmap, base, flags); 533a5652762Spraks } 534a5652762Spraks } else { 535a5652762Spraks if (vpm_enable) { 536a5652762Spraks (void) vpm_sync_pages(vp, off, n, 0); 537a5652762Spraks } else { 5387c478bd9Sstevel@tonic-gate (void) segmap_release(segkmap, base, 0); 539a5652762Spraks } 540a5652762Spraks } 5417c478bd9Sstevel@tonic-gate } while (!error && uiop->uio_resid > 0); 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate return (error); 5447c478bd9Sstevel@tonic-gate } 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate /* ARGSUSED */ 5477c478bd9Sstevel@tonic-gate static int 5487c478bd9Sstevel@tonic-gate nfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 5497c478bd9Sstevel@tonic-gate caller_context_t *ct) 5507c478bd9Sstevel@tonic-gate { 5517c478bd9Sstevel@tonic-gate rnode_t *rp; 5527c478bd9Sstevel@tonic-gate u_offset_t off; 5537c478bd9Sstevel@tonic-gate caddr_t base; 5547c478bd9Sstevel@tonic-gate uint_t flags; 5557c478bd9Sstevel@tonic-gate int remainder; 5567c478bd9Sstevel@tonic-gate size_t n; 5577c478bd9Sstevel@tonic-gate int on; 5587c478bd9Sstevel@tonic-gate int error; 5597c478bd9Sstevel@tonic-gate int resid; 5607c478bd9Sstevel@tonic-gate offset_t offset; 5617c478bd9Sstevel@tonic-gate rlim_t limit; 5627c478bd9Sstevel@tonic-gate mntinfo_t *mi; 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate rp = VTOR(vp); 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 567108322fbScarlsonj if (nfs_zone() != mi->mi_zone) 5687c478bd9Sstevel@tonic-gate return (EIO); 5697c478bd9Sstevel@tonic-gate if (vp->v_type != VREG) 5707c478bd9Sstevel@tonic-gate return (EISDIR); 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate if (uiop->uio_resid == 0) 5737c478bd9Sstevel@tonic-gate return (0); 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate if (ioflag & FAPPEND) { 5767c478bd9Sstevel@tonic-gate struct vattr va; 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Must serialize if appending. 5807c478bd9Sstevel@tonic-gate */ 5817c478bd9Sstevel@tonic-gate if (nfs_rw_lock_held(&rp->r_rwlock, RW_READER)) { 5827c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_rwlock); 5837c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, 5847c478bd9Sstevel@tonic-gate INTR(vp))) 5857c478bd9Sstevel@tonic-gate return (EINTR); 5867c478bd9Sstevel@tonic-gate } 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate va.va_mask = AT_SIZE; 5897c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 5907c478bd9Sstevel@tonic-gate if (error) 5917c478bd9Sstevel@tonic-gate return (error); 5927c478bd9Sstevel@tonic-gate uiop->uio_loffset = va.va_size; 5937c478bd9Sstevel@tonic-gate } 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate if (uiop->uio_loffset > MAXOFF32_T) 5967c478bd9Sstevel@tonic-gate return (EFBIG); 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate offset = uiop->uio_loffset + uiop->uio_resid; 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate if (uiop->uio_loffset < 0 || offset > MAXOFF32_T) 6017c478bd9Sstevel@tonic-gate return (EINVAL); 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate if (uiop->uio_llimit > (rlim64_t)MAXOFF32_T) { 6047c478bd9Sstevel@tonic-gate limit = MAXOFF32_T; 6057c478bd9Sstevel@tonic-gate } else { 6067c478bd9Sstevel@tonic-gate limit = (rlim_t)uiop->uio_llimit; 6077c478bd9Sstevel@tonic-gate } 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate /* 6107c478bd9Sstevel@tonic-gate * Check to make sure that the process will not exceed 6117c478bd9Sstevel@tonic-gate * its limit on file size. It is okay to write up to 6127c478bd9Sstevel@tonic-gate * the limit, but not beyond. Thus, the write which 6137c478bd9Sstevel@tonic-gate * reaches the limit will be short and the next write 6147c478bd9Sstevel@tonic-gate * will return an error. 6157c478bd9Sstevel@tonic-gate */ 6167c478bd9Sstevel@tonic-gate remainder = 0; 6177c478bd9Sstevel@tonic-gate if (offset > limit) { 6187c478bd9Sstevel@tonic-gate remainder = offset - limit; 6197c478bd9Sstevel@tonic-gate uiop->uio_resid = limit - uiop->uio_offset; 6207c478bd9Sstevel@tonic-gate if (uiop->uio_resid <= 0) { 6217c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(curthread); 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate uiop->uio_resid += remainder; 6247c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 6257c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], 6267c478bd9Sstevel@tonic-gate p->p_rctls, p, RCA_UNSAFE_SIGINFO); 6277c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 6287c478bd9Sstevel@tonic-gate return (EFBIG); 6297c478bd9Sstevel@tonic-gate } 6307c478bd9Sstevel@tonic-gate } 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) 6337c478bd9Sstevel@tonic-gate return (EINTR); 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate /* 6367c478bd9Sstevel@tonic-gate * Bypass VM if caching has been disabled (e.g., locking) or if 6377c478bd9Sstevel@tonic-gate * using client-side direct I/O and the file is not mmap'd and 6387c478bd9Sstevel@tonic-gate * there are no cached pages. 6397c478bd9Sstevel@tonic-gate */ 6407c478bd9Sstevel@tonic-gate if ((vp->v_flag & VNOCACHE) || 6417c478bd9Sstevel@tonic-gate (((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) && 6421384c586SDeepak Honnalli rp->r_mapcnt == 0 && rp->r_inmap == 0 && 6431384c586SDeepak Honnalli !vn_has_cached_data(vp))) { 6447c478bd9Sstevel@tonic-gate size_t bufsize; 6457c478bd9Sstevel@tonic-gate int count; 6467c478bd9Sstevel@tonic-gate uint_t org_offset; 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate nfs_fwrite: 6497c478bd9Sstevel@tonic-gate if (rp->r_flags & RSTALE) { 6507c478bd9Sstevel@tonic-gate resid = uiop->uio_resid; 6517c478bd9Sstevel@tonic-gate offset = uiop->uio_loffset; 6527c478bd9Sstevel@tonic-gate error = rp->r_error; 6538afffe5eSbatschul /* 6548afffe5eSbatschul * A close may have cleared r_error, if so, 6558afffe5eSbatschul * propagate ESTALE error return properly 6568afffe5eSbatschul */ 6578afffe5eSbatschul if (error == 0) 6588afffe5eSbatschul error = ESTALE; 6597c478bd9Sstevel@tonic-gate goto bottom; 6607c478bd9Sstevel@tonic-gate } 6617c478bd9Sstevel@tonic-gate bufsize = MIN(uiop->uio_resid, mi->mi_curwrite); 6627c478bd9Sstevel@tonic-gate base = kmem_alloc(bufsize, KM_SLEEP); 6637c478bd9Sstevel@tonic-gate do { 6647c478bd9Sstevel@tonic-gate resid = uiop->uio_resid; 6657c478bd9Sstevel@tonic-gate offset = uiop->uio_loffset; 6667c478bd9Sstevel@tonic-gate count = MIN(uiop->uio_resid, bufsize); 6677c478bd9Sstevel@tonic-gate org_offset = uiop->uio_offset; 6687c478bd9Sstevel@tonic-gate error = uiomove(base, count, UIO_WRITE, uiop); 6697c478bd9Sstevel@tonic-gate if (!error) { 6707c478bd9Sstevel@tonic-gate error = nfswrite(vp, base, org_offset, 6717c478bd9Sstevel@tonic-gate count, cr); 6727c478bd9Sstevel@tonic-gate } 6737c478bd9Sstevel@tonic-gate } while (!error && uiop->uio_resid > 0); 6747c478bd9Sstevel@tonic-gate kmem_free(base, bufsize); 6757c478bd9Sstevel@tonic-gate goto bottom; 6767c478bd9Sstevel@tonic-gate } 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate do { 6797c478bd9Sstevel@tonic-gate off = uiop->uio_loffset & MAXBMASK; /* mapping offset */ 6807c478bd9Sstevel@tonic-gate on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */ 6817c478bd9Sstevel@tonic-gate n = MIN(MAXBSIZE - on, uiop->uio_resid); 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate resid = uiop->uio_resid; 6847c478bd9Sstevel@tonic-gate offset = uiop->uio_loffset; 6857c478bd9Sstevel@tonic-gate 6867c478bd9Sstevel@tonic-gate if (rp->r_flags & RSTALE) { 6877c478bd9Sstevel@tonic-gate error = rp->r_error; 6888afffe5eSbatschul /* 6898afffe5eSbatschul * A close may have cleared r_error, if so, 6908afffe5eSbatschul * propagate ESTALE error return properly 6918afffe5eSbatschul */ 6928afffe5eSbatschul if (error == 0) 6938afffe5eSbatschul error = ESTALE; 6947c478bd9Sstevel@tonic-gate break; 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate /* 6987c478bd9Sstevel@tonic-gate * Don't create dirty pages faster than they 6997c478bd9Sstevel@tonic-gate * can be cleaned so that the system doesn't 7007c478bd9Sstevel@tonic-gate * get imbalanced. If the async queue is 7017c478bd9Sstevel@tonic-gate * maxed out, then wait for it to drain before 7027c478bd9Sstevel@tonic-gate * creating more dirty pages. Also, wait for 7037c478bd9Sstevel@tonic-gate * any threads doing pagewalks in the vop_getattr 7047c478bd9Sstevel@tonic-gate * entry points so that they don't block for 7057c478bd9Sstevel@tonic-gate * long periods. 7067c478bd9Sstevel@tonic-gate */ 7077c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 7087c478bd9Sstevel@tonic-gate while ((mi->mi_max_threads != 0 && 7097c478bd9Sstevel@tonic-gate rp->r_awcount > 2 * mi->mi_max_threads) || 7106edb4230SMarcel Telka rp->r_gcount > 0) { 7116edb4230SMarcel Telka if (INTR(vp)) { 7126edb4230SMarcel Telka klwp_t *lwp = ttolwp(curthread); 7136edb4230SMarcel Telka 7146edb4230SMarcel Telka if (lwp != NULL) 7156edb4230SMarcel Telka lwp->lwp_nostop++; 7166edb4230SMarcel Telka if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) { 7176edb4230SMarcel Telka mutex_exit(&rp->r_statelock); 7186edb4230SMarcel Telka if (lwp != NULL) 7196edb4230SMarcel Telka lwp->lwp_nostop--; 7206edb4230SMarcel Telka error = EINTR; 7216edb4230SMarcel Telka goto bottom; 7226edb4230SMarcel Telka } 7236edb4230SMarcel Telka if (lwp != NULL) 7246edb4230SMarcel Telka lwp->lwp_nostop--; 7256edb4230SMarcel Telka } else 7267c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 7276edb4230SMarcel Telka } 7287c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 7297c478bd9Sstevel@tonic-gate 7306f5f1c63SDonghai Qiao /* 7316f5f1c63SDonghai Qiao * Touch the page and fault it in if it is not in core 7326f5f1c63SDonghai Qiao * before segmap_getmapflt or vpm_data_copy can lock it. 7336f5f1c63SDonghai Qiao * This is to avoid the deadlock if the buffer is mapped 7346f5f1c63SDonghai Qiao * to the same file through mmap which we want to write. 7356f5f1c63SDonghai Qiao */ 7366f5f1c63SDonghai Qiao uio_prefaultpages((long)n, uiop); 7376f5f1c63SDonghai Qiao 738a5652762Spraks if (vpm_enable) { 739a5652762Spraks /* 740a5652762Spraks * It will use kpm mappings, so no need to 741a5652762Spraks * pass an address. 742a5652762Spraks */ 743a5652762Spraks error = writerp(rp, NULL, n, uiop, 0); 744a5652762Spraks } else { 7457c478bd9Sstevel@tonic-gate if (segmap_kpm) { 7467c478bd9Sstevel@tonic-gate int pon = uiop->uio_loffset & PAGEOFFSET; 747a5652762Spraks size_t pn = MIN(PAGESIZE - pon, 748a5652762Spraks uiop->uio_resid); 7497c478bd9Sstevel@tonic-gate int pagecreate; 7507c478bd9Sstevel@tonic-gate 7517c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 7527c478bd9Sstevel@tonic-gate pagecreate = (pon == 0) && (pn == PAGESIZE || 7537c478bd9Sstevel@tonic-gate uiop->uio_loffset + pn >= rp->r_size); 7547c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 7557c478bd9Sstevel@tonic-gate 7567c478bd9Sstevel@tonic-gate base = segmap_getmapflt(segkmap, vp, off + on, 7577c478bd9Sstevel@tonic-gate pn, !pagecreate, S_WRITE); 7587c478bd9Sstevel@tonic-gate 759a5652762Spraks error = writerp(rp, base + pon, n, uiop, 760a5652762Spraks pagecreate); 7617c478bd9Sstevel@tonic-gate 7627c478bd9Sstevel@tonic-gate } else { 7637c478bd9Sstevel@tonic-gate base = segmap_getmapflt(segkmap, vp, off + on, 7647c478bd9Sstevel@tonic-gate n, 0, S_READ); 7657c478bd9Sstevel@tonic-gate error = writerp(rp, base + on, n, uiop, 0); 7667c478bd9Sstevel@tonic-gate } 767a5652762Spraks } 7687c478bd9Sstevel@tonic-gate 7697c478bd9Sstevel@tonic-gate if (!error) { 7707c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_NOAC) 7717c478bd9Sstevel@tonic-gate flags = SM_WRITE; 7727c478bd9Sstevel@tonic-gate else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) { 7737c478bd9Sstevel@tonic-gate /* 7747c478bd9Sstevel@tonic-gate * Have written a whole block. 7757c478bd9Sstevel@tonic-gate * Start an asynchronous write 7767c478bd9Sstevel@tonic-gate * and mark the buffer to 7777c478bd9Sstevel@tonic-gate * indicate that it won't be 7787c478bd9Sstevel@tonic-gate * needed again soon. 7797c478bd9Sstevel@tonic-gate */ 7807c478bd9Sstevel@tonic-gate flags = SM_WRITE | SM_ASYNC | SM_DONTNEED; 7817c478bd9Sstevel@tonic-gate } else 7827c478bd9Sstevel@tonic-gate flags = 0; 7837c478bd9Sstevel@tonic-gate if ((ioflag & (FSYNC|FDSYNC)) || 7847c478bd9Sstevel@tonic-gate (rp->r_flags & ROUTOFSPACE)) { 7857c478bd9Sstevel@tonic-gate flags &= ~SM_ASYNC; 7867c478bd9Sstevel@tonic-gate flags |= SM_WRITE; 7877c478bd9Sstevel@tonic-gate } 788a5652762Spraks if (vpm_enable) { 789a5652762Spraks error = vpm_sync_pages(vp, off, n, flags); 790a5652762Spraks } else { 7917c478bd9Sstevel@tonic-gate error = segmap_release(segkmap, base, flags); 792a5652762Spraks } 793a5652762Spraks } else { 794a5652762Spraks if (vpm_enable) { 795a5652762Spraks (void) vpm_sync_pages(vp, off, n, 0); 7967c478bd9Sstevel@tonic-gate } else { 7977c478bd9Sstevel@tonic-gate (void) segmap_release(segkmap, base, 0); 798a5652762Spraks } 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * In the event that we got an access error while 8017c478bd9Sstevel@tonic-gate * faulting in a page for a write-only file just 8027c478bd9Sstevel@tonic-gate * force a write. 8037c478bd9Sstevel@tonic-gate */ 8047c478bd9Sstevel@tonic-gate if (error == EACCES) 8057c478bd9Sstevel@tonic-gate goto nfs_fwrite; 8067c478bd9Sstevel@tonic-gate } 8077c478bd9Sstevel@tonic-gate } while (!error && uiop->uio_resid > 0); 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate bottom: 8107c478bd9Sstevel@tonic-gate if (error) { 8117c478bd9Sstevel@tonic-gate uiop->uio_resid = resid + remainder; 8127c478bd9Sstevel@tonic-gate uiop->uio_loffset = offset; 8137c478bd9Sstevel@tonic-gate } else 8147c478bd9Sstevel@tonic-gate uiop->uio_resid += remainder; 8157c478bd9Sstevel@tonic-gate 8167c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_lkserlock); 8177c478bd9Sstevel@tonic-gate 8187c478bd9Sstevel@tonic-gate return (error); 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate /* 8227c478bd9Sstevel@tonic-gate * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED} 8237c478bd9Sstevel@tonic-gate */ 8247c478bd9Sstevel@tonic-gate static int 8257c478bd9Sstevel@tonic-gate nfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len, 8267c478bd9Sstevel@tonic-gate int flags, cred_t *cr) 8277c478bd9Sstevel@tonic-gate { 8287c478bd9Sstevel@tonic-gate struct buf *bp; 8297c478bd9Sstevel@tonic-gate int error; 8307c478bd9Sstevel@tonic-gate 831108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 8327c478bd9Sstevel@tonic-gate bp = pageio_setup(pp, len, vp, flags); 8337c478bd9Sstevel@tonic-gate ASSERT(bp != NULL); 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * pageio_setup should have set b_addr to 0. This 8377c478bd9Sstevel@tonic-gate * is correct since we want to do I/O on a page 8387c478bd9Sstevel@tonic-gate * boundary. bp_mapin will use this addr to calculate 8397c478bd9Sstevel@tonic-gate * an offset, and then set b_addr to the kernel virtual 8407c478bd9Sstevel@tonic-gate * address it allocated for us. 8417c478bd9Sstevel@tonic-gate */ 8427c478bd9Sstevel@tonic-gate ASSERT(bp->b_un.b_addr == 0); 8437c478bd9Sstevel@tonic-gate 8447c478bd9Sstevel@tonic-gate bp->b_edev = 0; 8457c478bd9Sstevel@tonic-gate bp->b_dev = 0; 8467c478bd9Sstevel@tonic-gate bp->b_lblkno = lbtodb(off); 8477c478bd9Sstevel@tonic-gate bp->b_file = vp; 8487c478bd9Sstevel@tonic-gate bp->b_offset = (offset_t)off; 8497c478bd9Sstevel@tonic-gate bp_mapin(bp); 8507c478bd9Sstevel@tonic-gate 8517c478bd9Sstevel@tonic-gate error = nfs_bio(bp, cr); 8527c478bd9Sstevel@tonic-gate 8537c478bd9Sstevel@tonic-gate bp_mapout(bp); 8547c478bd9Sstevel@tonic-gate pageio_done(bp); 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate return (error); 8577c478bd9Sstevel@tonic-gate } 8587c478bd9Sstevel@tonic-gate 8597c478bd9Sstevel@tonic-gate /* 8607c478bd9Sstevel@tonic-gate * Write to file. Writes to remote server in largest size 8617c478bd9Sstevel@tonic-gate * chunks that the server can handle. Write is synchronous. 8627c478bd9Sstevel@tonic-gate */ 8637c478bd9Sstevel@tonic-gate static int 8647c478bd9Sstevel@tonic-gate nfswrite(vnode_t *vp, caddr_t base, uint_t offset, int count, cred_t *cr) 8657c478bd9Sstevel@tonic-gate { 8667c478bd9Sstevel@tonic-gate rnode_t *rp; 8677c478bd9Sstevel@tonic-gate mntinfo_t *mi; 8687c478bd9Sstevel@tonic-gate struct nfswriteargs wa; 8697c478bd9Sstevel@tonic-gate struct nfsattrstat ns; 8707c478bd9Sstevel@tonic-gate int error; 8717c478bd9Sstevel@tonic-gate int tsize; 8727c478bd9Sstevel@tonic-gate int douprintf; 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate douprintf = 1; 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate rp = VTOR(vp); 8777c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 8787c478bd9Sstevel@tonic-gate 879108322fbScarlsonj ASSERT(nfs_zone() == mi->mi_zone); 8807c478bd9Sstevel@tonic-gate 8817c478bd9Sstevel@tonic-gate wa.wa_args = &wa.wa_args_buf; 8827c478bd9Sstevel@tonic-gate wa.wa_fhandle = *VTOFH(vp); 8837c478bd9Sstevel@tonic-gate 8847c478bd9Sstevel@tonic-gate do { 8857c478bd9Sstevel@tonic-gate tsize = MIN(mi->mi_curwrite, count); 8867c478bd9Sstevel@tonic-gate wa.wa_data = base; 8877c478bd9Sstevel@tonic-gate wa.wa_begoff = offset; 8887c478bd9Sstevel@tonic-gate wa.wa_totcount = tsize; 8897c478bd9Sstevel@tonic-gate wa.wa_count = tsize; 8907c478bd9Sstevel@tonic-gate wa.wa_offset = offset; 8917c478bd9Sstevel@tonic-gate 8927c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 8937c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 8947c478bd9Sstevel@tonic-gate kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 8957c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 8967c478bd9Sstevel@tonic-gate } 8977c478bd9Sstevel@tonic-gate wa.wa_mblk = NULL; 8987c478bd9Sstevel@tonic-gate do { 8997c478bd9Sstevel@tonic-gate error = rfs2call(mi, RFS_WRITE, 9007c478bd9Sstevel@tonic-gate xdr_writeargs, (caddr_t)&wa, 9017c478bd9Sstevel@tonic-gate xdr_attrstat, (caddr_t)&ns, cr, 9027c478bd9Sstevel@tonic-gate &douprintf, &ns.ns_status, 0, NULL); 9037c478bd9Sstevel@tonic-gate } while (error == ENFS_TRYAGAIN); 9047c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 9057c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 9067c478bd9Sstevel@tonic-gate kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats)); 9077c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 9087c478bd9Sstevel@tonic-gate } 9097c478bd9Sstevel@tonic-gate 9107c478bd9Sstevel@tonic-gate if (!error) { 9117c478bd9Sstevel@tonic-gate error = geterrno(ns.ns_status); 9127c478bd9Sstevel@tonic-gate /* 9137c478bd9Sstevel@tonic-gate * Can't check for stale fhandle and purge caches 9147c478bd9Sstevel@tonic-gate * here because pages are held by nfs_getpage. 9157c478bd9Sstevel@tonic-gate * Just mark the attribute cache as timed out 9167c478bd9Sstevel@tonic-gate * and set RWRITEATTR to indicate that the file 9177c478bd9Sstevel@tonic-gate * was modified with a WRITE operation. 9187c478bd9Sstevel@tonic-gate */ 9197c478bd9Sstevel@tonic-gate if (!error) { 9207c478bd9Sstevel@tonic-gate count -= tsize; 9217c478bd9Sstevel@tonic-gate base += tsize; 9227c478bd9Sstevel@tonic-gate offset += tsize; 9237c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 9247c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 925da6c28aaSamw KSTAT_IO_PTR(mi->mi_io_kstats)-> 926da6c28aaSamw writes++; 927da6c28aaSamw KSTAT_IO_PTR(mi->mi_io_kstats)-> 928da6c28aaSamw nwritten += tsize; 9297c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate lwp_stat_update(LWP_STAT_OUBLK, 1); 9327c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9337c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE_LOCKED(rp); 9347c478bd9Sstevel@tonic-gate rp->r_flags |= RWRITEATTR; 9357c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9367c478bd9Sstevel@tonic-gate } 9377c478bd9Sstevel@tonic-gate } 9387c478bd9Sstevel@tonic-gate } while (!error && count); 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate return (error); 9417c478bd9Sstevel@tonic-gate } 9427c478bd9Sstevel@tonic-gate 9437c478bd9Sstevel@tonic-gate /* 9447c478bd9Sstevel@tonic-gate * Read from a file. Reads data in largest chunks our interface can handle. 9457c478bd9Sstevel@tonic-gate */ 9467c478bd9Sstevel@tonic-gate static int 9470a701b1eSRobert Gordon nfsread(vnode_t *vp, caddr_t base, uint_t offset, 9480a701b1eSRobert Gordon int count, size_t *residp, cred_t *cr) 9497c478bd9Sstevel@tonic-gate { 9507c478bd9Sstevel@tonic-gate mntinfo_t *mi; 9517c478bd9Sstevel@tonic-gate struct nfsreadargs ra; 9527c478bd9Sstevel@tonic-gate struct nfsrdresult rr; 9537c478bd9Sstevel@tonic-gate int tsize; 9547c478bd9Sstevel@tonic-gate int error; 9557c478bd9Sstevel@tonic-gate int douprintf; 9567c478bd9Sstevel@tonic-gate failinfo_t fi; 9577c478bd9Sstevel@tonic-gate rnode_t *rp; 9587c478bd9Sstevel@tonic-gate struct vattr va; 9597c478bd9Sstevel@tonic-gate hrtime_t t; 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate rp = VTOR(vp); 9627c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 9637c478bd9Sstevel@tonic-gate 964108322fbScarlsonj ASSERT(nfs_zone() == mi->mi_zone); 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate douprintf = 1; 9677c478bd9Sstevel@tonic-gate 9687c478bd9Sstevel@tonic-gate ra.ra_fhandle = *VTOFH(vp); 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate fi.vp = vp; 9717c478bd9Sstevel@tonic-gate fi.fhp = (caddr_t)&ra.ra_fhandle; 9727c478bd9Sstevel@tonic-gate fi.copyproc = nfscopyfh; 9737c478bd9Sstevel@tonic-gate fi.lookupproc = nfslookup; 9747c478bd9Sstevel@tonic-gate fi.xattrdirproc = acl_getxattrdir2; 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate do { 9777c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 9787c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 9797c478bd9Sstevel@tonic-gate kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 9807c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 9817c478bd9Sstevel@tonic-gate } 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate do { 9847c478bd9Sstevel@tonic-gate tsize = MIN(mi->mi_curread, count); 9857c478bd9Sstevel@tonic-gate rr.rr_data = base; 9867c478bd9Sstevel@tonic-gate ra.ra_offset = offset; 9877c478bd9Sstevel@tonic-gate ra.ra_totcount = tsize; 9887c478bd9Sstevel@tonic-gate ra.ra_count = tsize; 9890a701b1eSRobert Gordon ra.ra_data = base; 9907c478bd9Sstevel@tonic-gate t = gethrtime(); 9917c478bd9Sstevel@tonic-gate error = rfs2call(mi, RFS_READ, 9927c478bd9Sstevel@tonic-gate xdr_readargs, (caddr_t)&ra, 9937c478bd9Sstevel@tonic-gate xdr_rdresult, (caddr_t)&rr, cr, 9947c478bd9Sstevel@tonic-gate &douprintf, &rr.rr_status, 0, &fi); 9957c478bd9Sstevel@tonic-gate } while (error == ENFS_TRYAGAIN); 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 9987c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 9997c478bd9Sstevel@tonic-gate kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats)); 10007c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 10017c478bd9Sstevel@tonic-gate } 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate if (!error) { 10047c478bd9Sstevel@tonic-gate error = geterrno(rr.rr_status); 10057c478bd9Sstevel@tonic-gate if (!error) { 10067c478bd9Sstevel@tonic-gate count -= rr.rr_count; 10077c478bd9Sstevel@tonic-gate base += rr.rr_count; 10087c478bd9Sstevel@tonic-gate offset += rr.rr_count; 10097c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 10107c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 10117c478bd9Sstevel@tonic-gate KSTAT_IO_PTR(mi->mi_io_kstats)->reads++; 10127c478bd9Sstevel@tonic-gate KSTAT_IO_PTR(mi->mi_io_kstats)->nread += 10137c478bd9Sstevel@tonic-gate rr.rr_count; 10147c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 10157c478bd9Sstevel@tonic-gate } 10167c478bd9Sstevel@tonic-gate lwp_stat_update(LWP_STAT_INBLK, 1); 10177c478bd9Sstevel@tonic-gate } 10187c478bd9Sstevel@tonic-gate } 10197c478bd9Sstevel@tonic-gate } while (!error && count && rr.rr_count == tsize); 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate *residp = count; 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate if (!error) { 10247c478bd9Sstevel@tonic-gate /* 10257c478bd9Sstevel@tonic-gate * Since no error occurred, we have the current 10267c478bd9Sstevel@tonic-gate * attributes and we need to do a cache check and then 10277c478bd9Sstevel@tonic-gate * potentially update the cached attributes. We can't 10287c478bd9Sstevel@tonic-gate * use the normal attribute check and cache mechanisms 10297c478bd9Sstevel@tonic-gate * because they might cause a cache flush which would 10307c478bd9Sstevel@tonic-gate * deadlock. Instead, we just check the cache to see 10317c478bd9Sstevel@tonic-gate * if the attributes have changed. If it is, then we 10327c478bd9Sstevel@tonic-gate * just mark the attributes as out of date. The next 10337c478bd9Sstevel@tonic-gate * time that the attributes are checked, they will be 10347c478bd9Sstevel@tonic-gate * out of date, new attributes will be fetched, and 10357c478bd9Sstevel@tonic-gate * the page cache will be flushed. If the attributes 10367c478bd9Sstevel@tonic-gate * weren't changed, then we just update the cached 10377c478bd9Sstevel@tonic-gate * attributes with these attributes. 10387c478bd9Sstevel@tonic-gate */ 10397c478bd9Sstevel@tonic-gate /* 10407c478bd9Sstevel@tonic-gate * If NFS_ACL is supported on the server, then the 10417c478bd9Sstevel@tonic-gate * attributes returned by server may have minimal 10427c478bd9Sstevel@tonic-gate * permissions sometimes denying access to users having 10437c478bd9Sstevel@tonic-gate * proper access. To get the proper attributes, mark 10447c478bd9Sstevel@tonic-gate * the attributes as expired so that they will be 10457c478bd9Sstevel@tonic-gate * regotten via the NFS_ACL GETATTR2 procedure. 10467c478bd9Sstevel@tonic-gate */ 10477c478bd9Sstevel@tonic-gate error = nattr_to_vattr(vp, &rr.rr_attr, &va); 10487c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 10497c478bd9Sstevel@tonic-gate if (error || !CACHE_VALID(rp, va.va_mtime, va.va_size) || 10507c478bd9Sstevel@tonic-gate (mi->mi_flags & MI_ACL)) { 10517c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10527c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 10537c478bd9Sstevel@tonic-gate } else { 10547c478bd9Sstevel@tonic-gate if (rp->r_mtime <= t) { 10557c478bd9Sstevel@tonic-gate nfs_attrcache_va(vp, &va); 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10587c478bd9Sstevel@tonic-gate } 10597c478bd9Sstevel@tonic-gate } 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate return (error); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate /* ARGSUSED */ 10657c478bd9Sstevel@tonic-gate static int 1066da6c28aaSamw nfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp, 1067da6c28aaSamw caller_context_t *ct) 10687c478bd9Sstevel@tonic-gate { 10697c478bd9Sstevel@tonic-gate 1070108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 10717c478bd9Sstevel@tonic-gate return (EIO); 10727c478bd9Sstevel@tonic-gate switch (cmd) { 10737c478bd9Sstevel@tonic-gate case _FIODIRECTIO: 10747c478bd9Sstevel@tonic-gate return (nfs_directio(vp, (int)arg, cr)); 10757c478bd9Sstevel@tonic-gate default: 10767c478bd9Sstevel@tonic-gate return (ENOTTY); 10777c478bd9Sstevel@tonic-gate } 10787c478bd9Sstevel@tonic-gate } 10797c478bd9Sstevel@tonic-gate 1080da6c28aaSamw /* ARGSUSED */ 10817c478bd9Sstevel@tonic-gate static int 1082da6c28aaSamw nfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 1083da6c28aaSamw caller_context_t *ct) 10847c478bd9Sstevel@tonic-gate { 10857c478bd9Sstevel@tonic-gate int error; 10867c478bd9Sstevel@tonic-gate rnode_t *rp; 10877c478bd9Sstevel@tonic-gate 1088108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 10897c478bd9Sstevel@tonic-gate return (EIO); 10907c478bd9Sstevel@tonic-gate /* 10917c478bd9Sstevel@tonic-gate * If it has been specified that the return value will 10927c478bd9Sstevel@tonic-gate * just be used as a hint, and we are only being asked 10937c478bd9Sstevel@tonic-gate * for size, fsid or rdevid, then return the client's 10947c478bd9Sstevel@tonic-gate * notion of these values without checking to make sure 10957c478bd9Sstevel@tonic-gate * that the attribute cache is up to date. 10967c478bd9Sstevel@tonic-gate * The whole point is to avoid an over the wire GETATTR 10977c478bd9Sstevel@tonic-gate * call. 10987c478bd9Sstevel@tonic-gate */ 10997c478bd9Sstevel@tonic-gate rp = VTOR(vp); 11007c478bd9Sstevel@tonic-gate if (flags & ATTR_HINT) { 11017c478bd9Sstevel@tonic-gate if (vap->va_mask == 11027c478bd9Sstevel@tonic-gate (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) { 11037c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 11047c478bd9Sstevel@tonic-gate if (vap->va_mask | AT_SIZE) 11057c478bd9Sstevel@tonic-gate vap->va_size = rp->r_size; 11067c478bd9Sstevel@tonic-gate if (vap->va_mask | AT_FSID) 11077c478bd9Sstevel@tonic-gate vap->va_fsid = rp->r_attr.va_fsid; 11087c478bd9Sstevel@tonic-gate if (vap->va_mask | AT_RDEV) 11097c478bd9Sstevel@tonic-gate vap->va_rdev = rp->r_attr.va_rdev; 11107c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 11117c478bd9Sstevel@tonic-gate return (0); 11127c478bd9Sstevel@tonic-gate } 11137c478bd9Sstevel@tonic-gate } 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate /* 11167c478bd9Sstevel@tonic-gate * Only need to flush pages if asking for the mtime 11177c478bd9Sstevel@tonic-gate * and if there any dirty pages or any outstanding 11187c478bd9Sstevel@tonic-gate * asynchronous (write) requests for this file. 11197c478bd9Sstevel@tonic-gate */ 11207c478bd9Sstevel@tonic-gate if (vap->va_mask & AT_MTIME) { 11217c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp) && 11227c478bd9Sstevel@tonic-gate ((rp->r_flags & RDIRTY) || rp->r_awcount > 0)) { 11237c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 11247c478bd9Sstevel@tonic-gate rp->r_gcount++; 11257c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 1126da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 11277c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 11287c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 11297c478bd9Sstevel@tonic-gate if (!rp->r_error) 11307c478bd9Sstevel@tonic-gate rp->r_error = error; 11317c478bd9Sstevel@tonic-gate } 11327c478bd9Sstevel@tonic-gate if (--rp->r_gcount == 0) 11337c478bd9Sstevel@tonic-gate cv_broadcast(&rp->r_cv); 11347c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 11357c478bd9Sstevel@tonic-gate } 11367c478bd9Sstevel@tonic-gate } 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate return (nfsgetattr(vp, vap, cr)); 11397c478bd9Sstevel@tonic-gate } 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate /*ARGSUSED4*/ 11427c478bd9Sstevel@tonic-gate static int 11437c478bd9Sstevel@tonic-gate nfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 11447c478bd9Sstevel@tonic-gate caller_context_t *ct) 11457c478bd9Sstevel@tonic-gate { 11467c478bd9Sstevel@tonic-gate int error; 11477c478bd9Sstevel@tonic-gate uint_t mask; 11487c478bd9Sstevel@tonic-gate struct vattr va; 11497c478bd9Sstevel@tonic-gate 11507c478bd9Sstevel@tonic-gate mask = vap->va_mask; 11517c478bd9Sstevel@tonic-gate 11527c478bd9Sstevel@tonic-gate if (mask & AT_NOSET) 11537c478bd9Sstevel@tonic-gate return (EINVAL); 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate if ((mask & AT_SIZE) && 11567c478bd9Sstevel@tonic-gate vap->va_type == VREG && 11577c478bd9Sstevel@tonic-gate vap->va_size > MAXOFF32_T) 11587c478bd9Sstevel@tonic-gate return (EFBIG); 11597c478bd9Sstevel@tonic-gate 1160108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 11617c478bd9Sstevel@tonic-gate return (EIO); 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate va.va_mask = AT_UID | AT_MODE; 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 11667c478bd9Sstevel@tonic-gate if (error) 11677c478bd9Sstevel@tonic-gate return (error); 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate error = secpolicy_vnode_setattr(cr, vp, vap, &va, flags, nfs_accessx, 11707c478bd9Sstevel@tonic-gate vp); 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate if (error) 11737c478bd9Sstevel@tonic-gate return (error); 11747c478bd9Sstevel@tonic-gate 117572102e74SBryan Cantrill error = nfssetattr(vp, vap, flags, cr); 117672102e74SBryan Cantrill 117772102e74SBryan Cantrill if (error == 0 && (mask & AT_SIZE) && vap->va_size == 0) 117872102e74SBryan Cantrill vnevent_truncate(vp, ct); 117972102e74SBryan Cantrill 118072102e74SBryan Cantrill return (error); 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate static int 11847c478bd9Sstevel@tonic-gate nfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr) 11857c478bd9Sstevel@tonic-gate { 11867c478bd9Sstevel@tonic-gate int error; 11877c478bd9Sstevel@tonic-gate uint_t mask; 11887c478bd9Sstevel@tonic-gate struct nfssaargs args; 11897c478bd9Sstevel@tonic-gate struct nfsattrstat ns; 11907c478bd9Sstevel@tonic-gate int douprintf; 11917c478bd9Sstevel@tonic-gate rnode_t *rp; 11927c478bd9Sstevel@tonic-gate struct vattr va; 11937c478bd9Sstevel@tonic-gate mode_t omode; 11947c478bd9Sstevel@tonic-gate mntinfo_t *mi; 11957c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 11967c478bd9Sstevel@tonic-gate hrtime_t t; 11977c478bd9Sstevel@tonic-gate 11987c478bd9Sstevel@tonic-gate mask = vap->va_mask; 11997c478bd9Sstevel@tonic-gate 1200108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate rp = VTOR(vp); 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * Only need to flush pages if there are any pages and 12067c478bd9Sstevel@tonic-gate * if the file is marked as dirty in some fashion. The 12077c478bd9Sstevel@tonic-gate * file must be flushed so that we can accurately 12087c478bd9Sstevel@tonic-gate * determine the size of the file and the cached data 12097c478bd9Sstevel@tonic-gate * after the SETATTR returns. A file is considered to 12107c478bd9Sstevel@tonic-gate * be dirty if it is either marked with RDIRTY, has 12117c478bd9Sstevel@tonic-gate * outstanding i/o's active, or is mmap'd. In this 12127c478bd9Sstevel@tonic-gate * last case, we can't tell whether there are dirty 12137c478bd9Sstevel@tonic-gate * pages, so we flush just to be sure. 12147c478bd9Sstevel@tonic-gate */ 12157c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp) && 12167c478bd9Sstevel@tonic-gate ((rp->r_flags & RDIRTY) || 12177c478bd9Sstevel@tonic-gate rp->r_count > 0 || 12187c478bd9Sstevel@tonic-gate rp->r_mapcnt > 0)) { 12197c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 1220da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL); 12217c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 12227c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 12237c478bd9Sstevel@tonic-gate if (!rp->r_error) 12247c478bd9Sstevel@tonic-gate rp->r_error = error; 12257c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 12267c478bd9Sstevel@tonic-gate } 12277c478bd9Sstevel@tonic-gate } 12287c478bd9Sstevel@tonic-gate 12297c478bd9Sstevel@tonic-gate /* 12307c478bd9Sstevel@tonic-gate * If the system call was utime(2) or utimes(2) and the 12317c478bd9Sstevel@tonic-gate * application did not specify the times, then set the 12327c478bd9Sstevel@tonic-gate * mtime nanosecond field to 1 billion. This will get 12337c478bd9Sstevel@tonic-gate * translated from 1 billion nanoseconds to 1 million 12347c478bd9Sstevel@tonic-gate * microseconds in the over the wire request. The 12357c478bd9Sstevel@tonic-gate * server will use 1 million in the microsecond field 12367c478bd9Sstevel@tonic-gate * to tell whether both the mtime and atime should be 12377c478bd9Sstevel@tonic-gate * set to the server's current time. 12387c478bd9Sstevel@tonic-gate * 12397c478bd9Sstevel@tonic-gate * This is an overload of the protocol and should be 12407c478bd9Sstevel@tonic-gate * documented in the NFS Version 2 protocol specification. 12417c478bd9Sstevel@tonic-gate */ 12427c478bd9Sstevel@tonic-gate if ((mask & AT_MTIME) && !(flags & ATTR_UTIME)) { 12437c478bd9Sstevel@tonic-gate vap->va_mtime.tv_nsec = 1000000000; 12447c478bd9Sstevel@tonic-gate if (NFS_TIME_T_OK(vap->va_mtime.tv_sec) && 12457c478bd9Sstevel@tonic-gate NFS_TIME_T_OK(vap->va_atime.tv_sec)) { 12467c478bd9Sstevel@tonic-gate error = vattr_to_sattr(vap, &args.saa_sa); 12477c478bd9Sstevel@tonic-gate } else { 12487c478bd9Sstevel@tonic-gate /* 12497c478bd9Sstevel@tonic-gate * Use server times. vap time values will not be used. 12507c478bd9Sstevel@tonic-gate * To ensure no time overflow, make sure vap has 12517c478bd9Sstevel@tonic-gate * valid values, but retain the original values. 12527c478bd9Sstevel@tonic-gate */ 12537c478bd9Sstevel@tonic-gate timestruc_t mtime = vap->va_mtime; 12547c478bd9Sstevel@tonic-gate timestruc_t atime = vap->va_atime; 12557c478bd9Sstevel@tonic-gate time_t now; 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate now = gethrestime_sec(); 12587c478bd9Sstevel@tonic-gate if (NFS_TIME_T_OK(now)) { 12597c478bd9Sstevel@tonic-gate /* Just in case server does not know of this */ 12607c478bd9Sstevel@tonic-gate vap->va_mtime.tv_sec = now; 12617c478bd9Sstevel@tonic-gate vap->va_atime.tv_sec = now; 12627c478bd9Sstevel@tonic-gate } else { 12637c478bd9Sstevel@tonic-gate vap->va_mtime.tv_sec = 0; 12647c478bd9Sstevel@tonic-gate vap->va_atime.tv_sec = 0; 12657c478bd9Sstevel@tonic-gate } 12667c478bd9Sstevel@tonic-gate error = vattr_to_sattr(vap, &args.saa_sa); 12677c478bd9Sstevel@tonic-gate /* set vap times back on */ 12687c478bd9Sstevel@tonic-gate vap->va_mtime = mtime; 12697c478bd9Sstevel@tonic-gate vap->va_atime = atime; 12707c478bd9Sstevel@tonic-gate } 12717c478bd9Sstevel@tonic-gate } else { 12727c478bd9Sstevel@tonic-gate /* Either do not set times or use the client specified times */ 12737c478bd9Sstevel@tonic-gate error = vattr_to_sattr(vap, &args.saa_sa); 12747c478bd9Sstevel@tonic-gate } 12757c478bd9Sstevel@tonic-gate if (error) { 12767c478bd9Sstevel@tonic-gate /* req time field(s) overflow - return immediately */ 12777c478bd9Sstevel@tonic-gate return (error); 12787c478bd9Sstevel@tonic-gate } 12797c478bd9Sstevel@tonic-gate args.saa_fh = *VTOFH(vp); 12807c478bd9Sstevel@tonic-gate 12817c478bd9Sstevel@tonic-gate va.va_mask = AT_MODE; 12827c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 12837c478bd9Sstevel@tonic-gate if (error) 12847c478bd9Sstevel@tonic-gate return (error); 12857c478bd9Sstevel@tonic-gate omode = va.va_mode; 12867c478bd9Sstevel@tonic-gate 12877c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate douprintf = 1; 12907c478bd9Sstevel@tonic-gate 12917c478bd9Sstevel@tonic-gate t = gethrtime(); 12927c478bd9Sstevel@tonic-gate 12937c478bd9Sstevel@tonic-gate error = rfs2call(mi, RFS_SETATTR, 12947c478bd9Sstevel@tonic-gate xdr_saargs, (caddr_t)&args, 12957c478bd9Sstevel@tonic-gate xdr_attrstat, (caddr_t)&ns, cr, 12967c478bd9Sstevel@tonic-gate &douprintf, &ns.ns_status, 0, NULL); 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate /* 12997c478bd9Sstevel@tonic-gate * Purge the access cache and ACL cache if changing either the 13007c478bd9Sstevel@tonic-gate * owner of the file, the group owner, or the mode. These may 13017c478bd9Sstevel@tonic-gate * change the access permissions of the file, so purge old 13027c478bd9Sstevel@tonic-gate * information and start over again. 13037c478bd9Sstevel@tonic-gate */ 13047c478bd9Sstevel@tonic-gate if ((mask & (AT_UID | AT_GID | AT_MODE)) && (mi->mi_flags & MI_ACL)) { 13057c478bd9Sstevel@tonic-gate (void) nfs_access_purge_rp(rp); 13067c478bd9Sstevel@tonic-gate if (rp->r_secattr != NULL) { 13077c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 13087c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 13097c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 13107c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 13117c478bd9Sstevel@tonic-gate if (vsp != NULL) 13127c478bd9Sstevel@tonic-gate nfs_acl_free(vsp); 13137c478bd9Sstevel@tonic-gate } 13147c478bd9Sstevel@tonic-gate } 13157c478bd9Sstevel@tonic-gate 13167c478bd9Sstevel@tonic-gate if (!error) { 13177c478bd9Sstevel@tonic-gate error = geterrno(ns.ns_status); 13187c478bd9Sstevel@tonic-gate if (!error) { 13197c478bd9Sstevel@tonic-gate /* 13207c478bd9Sstevel@tonic-gate * If changing the size of the file, invalidate 13217c478bd9Sstevel@tonic-gate * any local cached data which is no longer part 13227c478bd9Sstevel@tonic-gate * of the file. We also possibly invalidate the 13237c478bd9Sstevel@tonic-gate * last page in the file. We could use 13247c478bd9Sstevel@tonic-gate * pvn_vpzero(), but this would mark the page as 13257c478bd9Sstevel@tonic-gate * modified and require it to be written back to 13267c478bd9Sstevel@tonic-gate * the server for no particularly good reason. 13277c478bd9Sstevel@tonic-gate * This way, if we access it, then we bring it 13287c478bd9Sstevel@tonic-gate * back in. A read should be cheaper than a 13297c478bd9Sstevel@tonic-gate * write. 13307c478bd9Sstevel@tonic-gate */ 13317c478bd9Sstevel@tonic-gate if (mask & AT_SIZE) { 13327c478bd9Sstevel@tonic-gate nfs_invalidate_pages(vp, 13337c478bd9Sstevel@tonic-gate (vap->va_size & PAGEMASK), cr); 13347c478bd9Sstevel@tonic-gate } 13357c478bd9Sstevel@tonic-gate (void) nfs_cache_fattr(vp, &ns.ns_attr, &va, t, cr); 13367c478bd9Sstevel@tonic-gate /* 13377c478bd9Sstevel@tonic-gate * If NFS_ACL is supported on the server, then the 13387c478bd9Sstevel@tonic-gate * attributes returned by server may have minimal 13397c478bd9Sstevel@tonic-gate * permissions sometimes denying access to users having 13407c478bd9Sstevel@tonic-gate * proper access. To get the proper attributes, mark 13417c478bd9Sstevel@tonic-gate * the attributes as expired so that they will be 13427c478bd9Sstevel@tonic-gate * regotten via the NFS_ACL GETATTR2 procedure. 13437c478bd9Sstevel@tonic-gate */ 13447c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) { 13457c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 13467c478bd9Sstevel@tonic-gate } 13477c478bd9Sstevel@tonic-gate /* 13487c478bd9Sstevel@tonic-gate * This next check attempts to deal with NFS 13497c478bd9Sstevel@tonic-gate * servers which can not handle increasing 13507c478bd9Sstevel@tonic-gate * the size of the file via setattr. Most 13517c478bd9Sstevel@tonic-gate * of these servers do not return an error, 13527c478bd9Sstevel@tonic-gate * but do not change the size of the file. 13537c478bd9Sstevel@tonic-gate * Hence, this check and then attempt to set 13547c478bd9Sstevel@tonic-gate * the file size by writing 1 byte at the 13557c478bd9Sstevel@tonic-gate * offset of the end of the file that we need. 13567c478bd9Sstevel@tonic-gate */ 13577c478bd9Sstevel@tonic-gate if ((mask & AT_SIZE) && 13587c478bd9Sstevel@tonic-gate ns.ns_attr.na_size < (uint32_t)vap->va_size) { 13597c478bd9Sstevel@tonic-gate char zb = '\0'; 13607c478bd9Sstevel@tonic-gate 13617c478bd9Sstevel@tonic-gate error = nfswrite(vp, &zb, 13627c478bd9Sstevel@tonic-gate vap->va_size - sizeof (zb), 13637c478bd9Sstevel@tonic-gate sizeof (zb), cr); 13647c478bd9Sstevel@tonic-gate } 13657c478bd9Sstevel@tonic-gate /* 13667c478bd9Sstevel@tonic-gate * Some servers will change the mode to clear the setuid 13677c478bd9Sstevel@tonic-gate * and setgid bits when changing the uid or gid. The 13687c478bd9Sstevel@tonic-gate * client needs to compensate appropriately. 13697c478bd9Sstevel@tonic-gate */ 13707c478bd9Sstevel@tonic-gate if (mask & (AT_UID | AT_GID)) { 13717c478bd9Sstevel@tonic-gate int terror; 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate va.va_mask = AT_MODE; 13747c478bd9Sstevel@tonic-gate terror = nfsgetattr(vp, &va, cr); 13757c478bd9Sstevel@tonic-gate if (!terror && 13767c478bd9Sstevel@tonic-gate (((mask & AT_MODE) && 13777c478bd9Sstevel@tonic-gate va.va_mode != vap->va_mode) || 13787c478bd9Sstevel@tonic-gate (!(mask & AT_MODE) && 13797c478bd9Sstevel@tonic-gate va.va_mode != omode))) { 13807c478bd9Sstevel@tonic-gate va.va_mask = AT_MODE; 13817c478bd9Sstevel@tonic-gate if (mask & AT_MODE) 13827c478bd9Sstevel@tonic-gate va.va_mode = vap->va_mode; 13837c478bd9Sstevel@tonic-gate else 13847c478bd9Sstevel@tonic-gate va.va_mode = omode; 13857c478bd9Sstevel@tonic-gate (void) nfssetattr(vp, &va, 0, cr); 13867c478bd9Sstevel@tonic-gate } 13877c478bd9Sstevel@tonic-gate } 13887c478bd9Sstevel@tonic-gate } else { 13897c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 13907c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, vp, cr); 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate } else { 13937c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 13947c478bd9Sstevel@tonic-gate } 13957c478bd9Sstevel@tonic-gate 13967c478bd9Sstevel@tonic-gate return (error); 13977c478bd9Sstevel@tonic-gate } 13987c478bd9Sstevel@tonic-gate 13997c478bd9Sstevel@tonic-gate static int 14007c478bd9Sstevel@tonic-gate nfs_accessx(void *vp, int mode, cred_t *cr) 14017c478bd9Sstevel@tonic-gate { 1402108322fbScarlsonj ASSERT(nfs_zone() == VTOMI((vnode_t *)vp)->mi_zone); 1403da6c28aaSamw return (nfs_access(vp, mode, 0, cr, NULL)); 14047c478bd9Sstevel@tonic-gate } 14057c478bd9Sstevel@tonic-gate 1406da6c28aaSamw /* ARGSUSED */ 14077c478bd9Sstevel@tonic-gate static int 1408da6c28aaSamw nfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct) 14097c478bd9Sstevel@tonic-gate { 14107c478bd9Sstevel@tonic-gate struct vattr va; 14117c478bd9Sstevel@tonic-gate int error; 14127c478bd9Sstevel@tonic-gate mntinfo_t *mi; 14137c478bd9Sstevel@tonic-gate int shift = 0; 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 14167c478bd9Sstevel@tonic-gate 1417108322fbScarlsonj if (nfs_zone() != mi->mi_zone) 14187c478bd9Sstevel@tonic-gate return (EIO); 14197c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) { 14207c478bd9Sstevel@tonic-gate error = acl_access2(vp, mode, flags, cr); 14217c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) 14227c478bd9Sstevel@tonic-gate return (error); 14237c478bd9Sstevel@tonic-gate } 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate va.va_mask = AT_MODE | AT_UID | AT_GID; 14267c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 14277c478bd9Sstevel@tonic-gate if (error) 14287c478bd9Sstevel@tonic-gate return (error); 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate /* 14317c478bd9Sstevel@tonic-gate * Disallow write attempts on read-only 14327c478bd9Sstevel@tonic-gate * file systems, unless the file is a 14337c478bd9Sstevel@tonic-gate * device node. 14347c478bd9Sstevel@tonic-gate */ 14357c478bd9Sstevel@tonic-gate if ((mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp)) 14367c478bd9Sstevel@tonic-gate return (EROFS); 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate /* 14397c478bd9Sstevel@tonic-gate * Disallow attempts to access mandatory lock files. 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate if ((mode & (VWRITE | VREAD | VEXEC)) && 14427c478bd9Sstevel@tonic-gate MANDLOCK(vp, va.va_mode)) 14437c478bd9Sstevel@tonic-gate return (EACCES); 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate /* 14467c478bd9Sstevel@tonic-gate * Access check is based on only 14477c478bd9Sstevel@tonic-gate * one of owner, group, public. 14487c478bd9Sstevel@tonic-gate * If not owner, then check group. 14497c478bd9Sstevel@tonic-gate * If not a member of the group, 14507c478bd9Sstevel@tonic-gate * then check public access. 14517c478bd9Sstevel@tonic-gate */ 14527c478bd9Sstevel@tonic-gate if (crgetuid(cr) != va.va_uid) { 14537c478bd9Sstevel@tonic-gate shift += 3; 14547c478bd9Sstevel@tonic-gate if (!groupmember(va.va_gid, cr)) 14557c478bd9Sstevel@tonic-gate shift += 3; 14567c478bd9Sstevel@tonic-gate } 14577c478bd9Sstevel@tonic-gate 1458134a1f4eSCasper H.S. Dik return (secpolicy_vnode_access2(cr, vp, va.va_uid, 1459134a1f4eSCasper H.S. Dik va.va_mode << shift, mode)); 14607c478bd9Sstevel@tonic-gate } 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate static int nfs_do_symlink_cache = 1; 14637c478bd9Sstevel@tonic-gate 1464da6c28aaSamw /* ARGSUSED */ 14657c478bd9Sstevel@tonic-gate static int 1466da6c28aaSamw nfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct) 14677c478bd9Sstevel@tonic-gate { 14687c478bd9Sstevel@tonic-gate int error; 14697c478bd9Sstevel@tonic-gate struct nfsrdlnres rl; 14707c478bd9Sstevel@tonic-gate rnode_t *rp; 14717c478bd9Sstevel@tonic-gate int douprintf; 14727c478bd9Sstevel@tonic-gate failinfo_t fi; 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate /* 14757c478bd9Sstevel@tonic-gate * We want to be consistent with UFS semantics so we will return 14767c478bd9Sstevel@tonic-gate * EINVAL instead of ENXIO. This violates the XNFS spec and 14777c478bd9Sstevel@tonic-gate * the RFC 1094, which are wrong any way. BUGID 1138002. 14787c478bd9Sstevel@tonic-gate */ 14797c478bd9Sstevel@tonic-gate if (vp->v_type != VLNK) 14807c478bd9Sstevel@tonic-gate return (EINVAL); 14817c478bd9Sstevel@tonic-gate 1482108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 14837c478bd9Sstevel@tonic-gate return (EIO); 14847c478bd9Sstevel@tonic-gate 14857c478bd9Sstevel@tonic-gate rp = VTOR(vp); 14867c478bd9Sstevel@tonic-gate if (nfs_do_symlink_cache && rp->r_symlink.contents != NULL) { 14877c478bd9Sstevel@tonic-gate error = nfs_validate_caches(vp, cr); 14887c478bd9Sstevel@tonic-gate if (error) 14897c478bd9Sstevel@tonic-gate return (error); 14907c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 14917c478bd9Sstevel@tonic-gate if (rp->r_symlink.contents != NULL) { 14927c478bd9Sstevel@tonic-gate error = uiomove(rp->r_symlink.contents, 14937c478bd9Sstevel@tonic-gate rp->r_symlink.len, UIO_READ, uiop); 14947c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 14957c478bd9Sstevel@tonic-gate return (error); 14967c478bd9Sstevel@tonic-gate } 14977c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 14987c478bd9Sstevel@tonic-gate } 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate rl.rl_data = kmem_alloc(NFS_MAXPATHLEN, KM_SLEEP); 15027c478bd9Sstevel@tonic-gate 15037c478bd9Sstevel@tonic-gate fi.vp = vp; 15047c478bd9Sstevel@tonic-gate fi.fhp = NULL; /* no need to update, filehandle not copied */ 15057c478bd9Sstevel@tonic-gate fi.copyproc = nfscopyfh; 15067c478bd9Sstevel@tonic-gate fi.lookupproc = nfslookup; 15077c478bd9Sstevel@tonic-gate fi.xattrdirproc = acl_getxattrdir2; 15087c478bd9Sstevel@tonic-gate 15097c478bd9Sstevel@tonic-gate douprintf = 1; 15107c478bd9Sstevel@tonic-gate 15117c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(vp), RFS_READLINK, 15120a701b1eSRobert Gordon xdr_readlink, (caddr_t)VTOFH(vp), 15137c478bd9Sstevel@tonic-gate xdr_rdlnres, (caddr_t)&rl, cr, 15147c478bd9Sstevel@tonic-gate &douprintf, &rl.rl_status, 0, &fi); 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate if (error) { 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN); 15197c478bd9Sstevel@tonic-gate return (error); 15207c478bd9Sstevel@tonic-gate } 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate error = geterrno(rl.rl_status); 15237c478bd9Sstevel@tonic-gate if (!error) { 15247c478bd9Sstevel@tonic-gate error = uiomove(rl.rl_data, (int)rl.rl_count, UIO_READ, uiop); 15257c478bd9Sstevel@tonic-gate if (nfs_do_symlink_cache && rp->r_symlink.contents == NULL) { 15267c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 15277c478bd9Sstevel@tonic-gate if (rp->r_symlink.contents == NULL) { 15287c478bd9Sstevel@tonic-gate rp->r_symlink.contents = rl.rl_data; 15297c478bd9Sstevel@tonic-gate rp->r_symlink.len = (int)rl.rl_count; 15307c478bd9Sstevel@tonic-gate rp->r_symlink.size = NFS_MAXPATHLEN; 15317c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 15327c478bd9Sstevel@tonic-gate } else { 15337c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate kmem_free((void *)rl.rl_data, 15367c478bd9Sstevel@tonic-gate NFS_MAXPATHLEN); 15377c478bd9Sstevel@tonic-gate } 15387c478bd9Sstevel@tonic-gate } else { 15397c478bd9Sstevel@tonic-gate 15407c478bd9Sstevel@tonic-gate kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN); 15417c478bd9Sstevel@tonic-gate } 15427c478bd9Sstevel@tonic-gate } else { 15437c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, vp, cr); 15447c478bd9Sstevel@tonic-gate 15457c478bd9Sstevel@tonic-gate kmem_free((void *)rl.rl_data, NFS_MAXPATHLEN); 15467c478bd9Sstevel@tonic-gate } 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate /* 15497c478bd9Sstevel@tonic-gate * Conform to UFS semantics (see comment above) 15507c478bd9Sstevel@tonic-gate */ 15517c478bd9Sstevel@tonic-gate return (error == ENXIO ? EINVAL : error); 15527c478bd9Sstevel@tonic-gate } 15537c478bd9Sstevel@tonic-gate 15547c478bd9Sstevel@tonic-gate /* 15557c478bd9Sstevel@tonic-gate * Flush local dirty pages to stable storage on the server. 15567c478bd9Sstevel@tonic-gate * 15577c478bd9Sstevel@tonic-gate * If FNODSYNC is specified, then there is nothing to do because 15587c478bd9Sstevel@tonic-gate * metadata changes are not cached on the client before being 15597c478bd9Sstevel@tonic-gate * sent to the server. 15607c478bd9Sstevel@tonic-gate */ 1561da6c28aaSamw /* ARGSUSED */ 15627c478bd9Sstevel@tonic-gate static int 1563da6c28aaSamw nfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 15647c478bd9Sstevel@tonic-gate { 15657c478bd9Sstevel@tonic-gate int error; 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate if ((syncflag & FNODSYNC) || IS_SWAPVP(vp)) 15687c478bd9Sstevel@tonic-gate return (0); 15697c478bd9Sstevel@tonic-gate 1570108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 15717c478bd9Sstevel@tonic-gate return (EIO); 15727c478bd9Sstevel@tonic-gate 1573da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 15747c478bd9Sstevel@tonic-gate if (!error) 15757c478bd9Sstevel@tonic-gate error = VTOR(vp)->r_error; 15767c478bd9Sstevel@tonic-gate return (error); 15777c478bd9Sstevel@tonic-gate } 15787c478bd9Sstevel@tonic-gate 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate /* 15817c478bd9Sstevel@tonic-gate * Weirdness: if the file was removed or the target of a rename 15827c478bd9Sstevel@tonic-gate * operation while it was open, it got renamed instead. Here we 15837c478bd9Sstevel@tonic-gate * remove the renamed file. 15847c478bd9Sstevel@tonic-gate */ 1585da6c28aaSamw /* ARGSUSED */ 15867c478bd9Sstevel@tonic-gate static void 1587da6c28aaSamw nfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 15887c478bd9Sstevel@tonic-gate { 15897c478bd9Sstevel@tonic-gate rnode_t *rp; 15907c478bd9Sstevel@tonic-gate 15917c478bd9Sstevel@tonic-gate ASSERT(vp != DNLC_NO_VNODE); 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate /* 15947c478bd9Sstevel@tonic-gate * If this is coming from the wrong zone, we let someone in the right 15957c478bd9Sstevel@tonic-gate * zone take care of it asynchronously. We can get here due to 15967c478bd9Sstevel@tonic-gate * VN_RELE() being called from pageout() or fsflush(). This call may 15977c478bd9Sstevel@tonic-gate * potentially turn into an expensive no-op if, for instance, v_count 15987c478bd9Sstevel@tonic-gate * gets incremented in the meantime, but it's still correct. 15997c478bd9Sstevel@tonic-gate */ 1600108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) { 16017c478bd9Sstevel@tonic-gate nfs_async_inactive(vp, cr, nfs_inactive); 16027c478bd9Sstevel@tonic-gate return; 16037c478bd9Sstevel@tonic-gate } 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate rp = VTOR(vp); 16067c478bd9Sstevel@tonic-gate redo: 16077c478bd9Sstevel@tonic-gate if (rp->r_unldvp != NULL) { 16087c478bd9Sstevel@tonic-gate /* 16097c478bd9Sstevel@tonic-gate * Save the vnode pointer for the directory where the 16107c478bd9Sstevel@tonic-gate * unlinked-open file got renamed, then set it to NULL 16117c478bd9Sstevel@tonic-gate * to prevent another thread from getting here before 16127c478bd9Sstevel@tonic-gate * we're done with the remove. While we have the 16137c478bd9Sstevel@tonic-gate * statelock, make local copies of the pertinent rnode 16147c478bd9Sstevel@tonic-gate * fields. If we weren't to do this in an atomic way, the 16157c478bd9Sstevel@tonic-gate * the unl* fields could become inconsistent with respect 16167c478bd9Sstevel@tonic-gate * to each other due to a race condition between this 16177c478bd9Sstevel@tonic-gate * code and nfs_remove(). See bug report 1034328. 16187c478bd9Sstevel@tonic-gate */ 16197c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 16207c478bd9Sstevel@tonic-gate if (rp->r_unldvp != NULL) { 16217c478bd9Sstevel@tonic-gate vnode_t *unldvp; 16227c478bd9Sstevel@tonic-gate char *unlname; 16237c478bd9Sstevel@tonic-gate cred_t *unlcred; 16247c478bd9Sstevel@tonic-gate struct nfsdiropargs da; 16257c478bd9Sstevel@tonic-gate enum nfsstat status; 16267c478bd9Sstevel@tonic-gate int douprintf; 16277c478bd9Sstevel@tonic-gate int error; 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate unldvp = rp->r_unldvp; 16307c478bd9Sstevel@tonic-gate rp->r_unldvp = NULL; 16317c478bd9Sstevel@tonic-gate unlname = rp->r_unlname; 16327c478bd9Sstevel@tonic-gate rp->r_unlname = NULL; 16337c478bd9Sstevel@tonic-gate unlcred = rp->r_unlcred; 16347c478bd9Sstevel@tonic-gate rp->r_unlcred = NULL; 16357c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate /* 16387c478bd9Sstevel@tonic-gate * If there are any dirty pages left, then flush 16397c478bd9Sstevel@tonic-gate * them. This is unfortunate because they just 16407c478bd9Sstevel@tonic-gate * may get thrown away during the remove operation, 16417c478bd9Sstevel@tonic-gate * but we have to do this for correctness. 16427c478bd9Sstevel@tonic-gate */ 16437c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp) && 16447c478bd9Sstevel@tonic-gate ((rp->r_flags & RDIRTY) || rp->r_count > 0)) { 16457c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 1646da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, 1647da6c28aaSamw cr, ct); 16487c478bd9Sstevel@tonic-gate if (error) { 16497c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 16507c478bd9Sstevel@tonic-gate if (!rp->r_error) 16517c478bd9Sstevel@tonic-gate rp->r_error = error; 16527c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 16537c478bd9Sstevel@tonic-gate } 16547c478bd9Sstevel@tonic-gate } 16557c478bd9Sstevel@tonic-gate 16567c478bd9Sstevel@tonic-gate /* 16577c478bd9Sstevel@tonic-gate * Do the remove operation on the renamed file 16587c478bd9Sstevel@tonic-gate */ 16597c478bd9Sstevel@tonic-gate setdiropargs(&da, unlname, unldvp); 16607c478bd9Sstevel@tonic-gate 16617c478bd9Sstevel@tonic-gate douprintf = 1; 16627c478bd9Sstevel@tonic-gate 16637c478bd9Sstevel@tonic-gate (void) rfs2call(VTOMI(unldvp), RFS_REMOVE, 16647c478bd9Sstevel@tonic-gate xdr_diropargs, (caddr_t)&da, 16657c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, unlcred, 16667c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 16677c478bd9Sstevel@tonic-gate 16687c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(VTOR(unldvp))) 16697c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(unldvp); 16707c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(unldvp); 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate /* 16737c478bd9Sstevel@tonic-gate * Release stuff held for the remove 16747c478bd9Sstevel@tonic-gate */ 16757c478bd9Sstevel@tonic-gate VN_RELE(unldvp); 16767c478bd9Sstevel@tonic-gate kmem_free(unlname, MAXNAMELEN); 16777c478bd9Sstevel@tonic-gate crfree(unlcred); 16787c478bd9Sstevel@tonic-gate goto redo; 16797c478bd9Sstevel@tonic-gate } 16807c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 16817c478bd9Sstevel@tonic-gate } 16827c478bd9Sstevel@tonic-gate 16837c478bd9Sstevel@tonic-gate rp_addfree(rp, cr); 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * Remote file system operations having to do with directory manipulation. 16887c478bd9Sstevel@tonic-gate */ 16897c478bd9Sstevel@tonic-gate 1690da6c28aaSamw /* ARGSUSED */ 16917c478bd9Sstevel@tonic-gate static int 16927c478bd9Sstevel@tonic-gate nfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 1693da6c28aaSamw int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 1694da6c28aaSamw int *direntflags, pathname_t *realpnp) 16957c478bd9Sstevel@tonic-gate { 16967c478bd9Sstevel@tonic-gate int error; 16977c478bd9Sstevel@tonic-gate vnode_t *vp; 16987c478bd9Sstevel@tonic-gate vnode_t *avp = NULL; 16997c478bd9Sstevel@tonic-gate rnode_t *drp; 17007c478bd9Sstevel@tonic-gate 1701108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 17027c478bd9Sstevel@tonic-gate return (EPERM); 17037c478bd9Sstevel@tonic-gate 17047c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 17057c478bd9Sstevel@tonic-gate 17067c478bd9Sstevel@tonic-gate /* 17077c478bd9Sstevel@tonic-gate * Are we looking up extended attributes? If so, "dvp" is 17087c478bd9Sstevel@tonic-gate * the file or directory for which we want attributes, and 17097c478bd9Sstevel@tonic-gate * we need a lookup of the hidden attribute directory 17107c478bd9Sstevel@tonic-gate * before we lookup the rest of the path. 17117c478bd9Sstevel@tonic-gate */ 17127c478bd9Sstevel@tonic-gate if (flags & LOOKUP_XATTR) { 17137c478bd9Sstevel@tonic-gate bool_t cflag = ((flags & CREATE_XATTR_DIR) != 0); 17147c478bd9Sstevel@tonic-gate mntinfo_t *mi; 17157c478bd9Sstevel@tonic-gate 17167c478bd9Sstevel@tonic-gate mi = VTOMI(dvp); 17177c478bd9Sstevel@tonic-gate if (!(mi->mi_flags & MI_EXTATTR)) 17187c478bd9Sstevel@tonic-gate return (EINVAL); 17197c478bd9Sstevel@tonic-gate 17207c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) 17217c478bd9Sstevel@tonic-gate return (EINTR); 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate (void) nfslookup_dnlc(dvp, XATTR_DIR_NAME, &avp, cr); 17247c478bd9Sstevel@tonic-gate if (avp == NULL) 17257c478bd9Sstevel@tonic-gate error = acl_getxattrdir2(dvp, &avp, cflag, cr, 0); 17267c478bd9Sstevel@tonic-gate else 17277c478bd9Sstevel@tonic-gate error = 0; 17287c478bd9Sstevel@tonic-gate 17297c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate if (error) { 17327c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_EXTATTR) 17337c478bd9Sstevel@tonic-gate return (error); 17347c478bd9Sstevel@tonic-gate return (EINVAL); 17357c478bd9Sstevel@tonic-gate } 17367c478bd9Sstevel@tonic-gate dvp = avp; 17377c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 17387c478bd9Sstevel@tonic-gate } 17397c478bd9Sstevel@tonic-gate 17407c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR(dvp))) { 17417c478bd9Sstevel@tonic-gate error = EINTR; 17427c478bd9Sstevel@tonic-gate goto out; 17437c478bd9Sstevel@tonic-gate } 17447c478bd9Sstevel@tonic-gate 17457c478bd9Sstevel@tonic-gate error = nfslookup(dvp, nm, vpp, pnp, flags, rdir, cr, 0); 17467c478bd9Sstevel@tonic-gate 17477c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate /* 17507c478bd9Sstevel@tonic-gate * If vnode is a device, create special vnode. 17517c478bd9Sstevel@tonic-gate */ 17527c478bd9Sstevel@tonic-gate if (!error && IS_DEVVP(*vpp)) { 17537c478bd9Sstevel@tonic-gate vp = *vpp; 17547c478bd9Sstevel@tonic-gate *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr); 17557c478bd9Sstevel@tonic-gate VN_RELE(vp); 17567c478bd9Sstevel@tonic-gate } 17577c478bd9Sstevel@tonic-gate 17587c478bd9Sstevel@tonic-gate out: 17597c478bd9Sstevel@tonic-gate if (avp != NULL) 17607c478bd9Sstevel@tonic-gate VN_RELE(avp); 17617c478bd9Sstevel@tonic-gate 17627c478bd9Sstevel@tonic-gate return (error); 17637c478bd9Sstevel@tonic-gate } 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate static int nfs_lookup_neg_cache = 1; 17667c478bd9Sstevel@tonic-gate 17677c478bd9Sstevel@tonic-gate #ifdef DEBUG 17687c478bd9Sstevel@tonic-gate static int nfs_lookup_dnlc_hits = 0; 17697c478bd9Sstevel@tonic-gate static int nfs_lookup_dnlc_misses = 0; 17707c478bd9Sstevel@tonic-gate static int nfs_lookup_dnlc_neg_hits = 0; 17717c478bd9Sstevel@tonic-gate static int nfs_lookup_dnlc_disappears = 0; 17727c478bd9Sstevel@tonic-gate static int nfs_lookup_dnlc_lookups = 0; 17737c478bd9Sstevel@tonic-gate #endif 17747c478bd9Sstevel@tonic-gate 17757c478bd9Sstevel@tonic-gate /* ARGSUSED */ 17767c478bd9Sstevel@tonic-gate int 17777c478bd9Sstevel@tonic-gate nfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 17787c478bd9Sstevel@tonic-gate int flags, vnode_t *rdir, cred_t *cr, int rfscall_flags) 17797c478bd9Sstevel@tonic-gate { 17807c478bd9Sstevel@tonic-gate int error; 17817c478bd9Sstevel@tonic-gate 1782108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone); 17837c478bd9Sstevel@tonic-gate 17847c478bd9Sstevel@tonic-gate /* 17857c478bd9Sstevel@tonic-gate * If lookup is for "", just return dvp. Don't need 17867c478bd9Sstevel@tonic-gate * to send it over the wire, look it up in the dnlc, 17877c478bd9Sstevel@tonic-gate * or perform any access checks. 17887c478bd9Sstevel@tonic-gate */ 17897c478bd9Sstevel@tonic-gate if (*nm == '\0') { 17907c478bd9Sstevel@tonic-gate VN_HOLD(dvp); 17917c478bd9Sstevel@tonic-gate *vpp = dvp; 17927c478bd9Sstevel@tonic-gate return (0); 17937c478bd9Sstevel@tonic-gate } 17947c478bd9Sstevel@tonic-gate 17957c478bd9Sstevel@tonic-gate /* 17967c478bd9Sstevel@tonic-gate * Can't do lookups in non-directories. 17977c478bd9Sstevel@tonic-gate */ 17987c478bd9Sstevel@tonic-gate if (dvp->v_type != VDIR) 17997c478bd9Sstevel@tonic-gate return (ENOTDIR); 18007c478bd9Sstevel@tonic-gate 18017c478bd9Sstevel@tonic-gate /* 18027c478bd9Sstevel@tonic-gate * If we're called with RFSCALL_SOFT, it's important that 18037c478bd9Sstevel@tonic-gate * the only rfscall is one we make directly; if we permit 18047c478bd9Sstevel@tonic-gate * an access call because we're looking up "." or validating 18057c478bd9Sstevel@tonic-gate * a dnlc hit, we'll deadlock because that rfscall will not 18067c478bd9Sstevel@tonic-gate * have the RFSCALL_SOFT set. 18077c478bd9Sstevel@tonic-gate */ 18087c478bd9Sstevel@tonic-gate if (rfscall_flags & RFSCALL_SOFT) 18097c478bd9Sstevel@tonic-gate goto callit; 18107c478bd9Sstevel@tonic-gate 18117c478bd9Sstevel@tonic-gate /* 18127c478bd9Sstevel@tonic-gate * If lookup is for ".", just return dvp. Don't need 18137c478bd9Sstevel@tonic-gate * to send it over the wire or look it up in the dnlc, 18147c478bd9Sstevel@tonic-gate * just need to check access. 18157c478bd9Sstevel@tonic-gate */ 18167c478bd9Sstevel@tonic-gate if (strcmp(nm, ".") == 0) { 1817da6c28aaSamw error = nfs_access(dvp, VEXEC, 0, cr, NULL); 18187c478bd9Sstevel@tonic-gate if (error) 18197c478bd9Sstevel@tonic-gate return (error); 18207c478bd9Sstevel@tonic-gate VN_HOLD(dvp); 18217c478bd9Sstevel@tonic-gate *vpp = dvp; 18227c478bd9Sstevel@tonic-gate return (0); 18237c478bd9Sstevel@tonic-gate } 18247c478bd9Sstevel@tonic-gate 18257c478bd9Sstevel@tonic-gate /* 18267c478bd9Sstevel@tonic-gate * Lookup this name in the DNLC. If there was a valid entry, 18277c478bd9Sstevel@tonic-gate * then return the results of the lookup. 18287c478bd9Sstevel@tonic-gate */ 18297c478bd9Sstevel@tonic-gate error = nfslookup_dnlc(dvp, nm, vpp, cr); 18307c478bd9Sstevel@tonic-gate if (error || *vpp != NULL) 18317c478bd9Sstevel@tonic-gate return (error); 18327c478bd9Sstevel@tonic-gate 18337c478bd9Sstevel@tonic-gate callit: 18347c478bd9Sstevel@tonic-gate error = nfslookup_otw(dvp, nm, vpp, cr, rfscall_flags); 18357c478bd9Sstevel@tonic-gate 18367c478bd9Sstevel@tonic-gate return (error); 18377c478bd9Sstevel@tonic-gate } 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate static int 18407c478bd9Sstevel@tonic-gate nfslookup_dnlc(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr) 18417c478bd9Sstevel@tonic-gate { 18427c478bd9Sstevel@tonic-gate int error; 18437c478bd9Sstevel@tonic-gate vnode_t *vp; 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate ASSERT(*nm != '\0'); 1846108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone); 18477c478bd9Sstevel@tonic-gate 18487c478bd9Sstevel@tonic-gate /* 18497c478bd9Sstevel@tonic-gate * Lookup this name in the DNLC. If successful, then validate 18507c478bd9Sstevel@tonic-gate * the caches and then recheck the DNLC. The DNLC is rechecked 18517c478bd9Sstevel@tonic-gate * just in case this entry got invalidated during the call 18527c478bd9Sstevel@tonic-gate * to nfs_validate_caches. 18537c478bd9Sstevel@tonic-gate * 18547c478bd9Sstevel@tonic-gate * An assumption is being made that it is safe to say that a 18557c478bd9Sstevel@tonic-gate * file exists which may not on the server. Any operations to 18567c478bd9Sstevel@tonic-gate * the server will fail with ESTALE. 18577c478bd9Sstevel@tonic-gate */ 18587c478bd9Sstevel@tonic-gate #ifdef DEBUG 18597c478bd9Sstevel@tonic-gate nfs_lookup_dnlc_lookups++; 18607c478bd9Sstevel@tonic-gate #endif 18617c478bd9Sstevel@tonic-gate vp = dnlc_lookup(dvp, nm); 18627c478bd9Sstevel@tonic-gate if (vp != NULL) { 18637c478bd9Sstevel@tonic-gate VN_RELE(vp); 18647c478bd9Sstevel@tonic-gate if (vp == DNLC_NO_VNODE && !vn_is_readonly(dvp)) { 18657c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); 18667c478bd9Sstevel@tonic-gate } 18677c478bd9Sstevel@tonic-gate error = nfs_validate_caches(dvp, cr); 18687c478bd9Sstevel@tonic-gate if (error) 18697c478bd9Sstevel@tonic-gate return (error); 18707c478bd9Sstevel@tonic-gate vp = dnlc_lookup(dvp, nm); 18717c478bd9Sstevel@tonic-gate if (vp != NULL) { 1872da6c28aaSamw error = nfs_access(dvp, VEXEC, 0, cr, NULL); 18737c478bd9Sstevel@tonic-gate if (error) { 18747c478bd9Sstevel@tonic-gate VN_RELE(vp); 18757c478bd9Sstevel@tonic-gate return (error); 18767c478bd9Sstevel@tonic-gate } 18777c478bd9Sstevel@tonic-gate if (vp == DNLC_NO_VNODE) { 18787c478bd9Sstevel@tonic-gate VN_RELE(vp); 18797c478bd9Sstevel@tonic-gate #ifdef DEBUG 18807c478bd9Sstevel@tonic-gate nfs_lookup_dnlc_neg_hits++; 18817c478bd9Sstevel@tonic-gate #endif 18827c478bd9Sstevel@tonic-gate return (ENOENT); 18837c478bd9Sstevel@tonic-gate } 18847c478bd9Sstevel@tonic-gate *vpp = vp; 18857c478bd9Sstevel@tonic-gate #ifdef DEBUG 18867c478bd9Sstevel@tonic-gate nfs_lookup_dnlc_hits++; 18877c478bd9Sstevel@tonic-gate #endif 18887c478bd9Sstevel@tonic-gate return (0); 18897c478bd9Sstevel@tonic-gate } 18907c478bd9Sstevel@tonic-gate #ifdef DEBUG 18917c478bd9Sstevel@tonic-gate nfs_lookup_dnlc_disappears++; 18927c478bd9Sstevel@tonic-gate #endif 18937c478bd9Sstevel@tonic-gate } 18947c478bd9Sstevel@tonic-gate #ifdef DEBUG 18957c478bd9Sstevel@tonic-gate else 18967c478bd9Sstevel@tonic-gate nfs_lookup_dnlc_misses++; 18977c478bd9Sstevel@tonic-gate #endif 18987c478bd9Sstevel@tonic-gate 18997c478bd9Sstevel@tonic-gate *vpp = NULL; 19007c478bd9Sstevel@tonic-gate 19017c478bd9Sstevel@tonic-gate return (0); 19027c478bd9Sstevel@tonic-gate } 19037c478bd9Sstevel@tonic-gate 19047c478bd9Sstevel@tonic-gate static int 19057c478bd9Sstevel@tonic-gate nfslookup_otw(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, 19067c478bd9Sstevel@tonic-gate int rfscall_flags) 19077c478bd9Sstevel@tonic-gate { 19087c478bd9Sstevel@tonic-gate int error; 19097c478bd9Sstevel@tonic-gate struct nfsdiropargs da; 19107c478bd9Sstevel@tonic-gate struct nfsdiropres dr; 19117c478bd9Sstevel@tonic-gate int douprintf; 19127c478bd9Sstevel@tonic-gate failinfo_t fi; 19137c478bd9Sstevel@tonic-gate hrtime_t t; 19147c478bd9Sstevel@tonic-gate 19157c478bd9Sstevel@tonic-gate ASSERT(*nm != '\0'); 19167c478bd9Sstevel@tonic-gate ASSERT(dvp->v_type == VDIR); 1917108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(dvp)->mi_zone); 19187c478bd9Sstevel@tonic-gate 19197c478bd9Sstevel@tonic-gate setdiropargs(&da, nm, dvp); 19207c478bd9Sstevel@tonic-gate 19217c478bd9Sstevel@tonic-gate fi.vp = dvp; 19227c478bd9Sstevel@tonic-gate fi.fhp = NULL; /* no need to update, filehandle not copied */ 19237c478bd9Sstevel@tonic-gate fi.copyproc = nfscopyfh; 19247c478bd9Sstevel@tonic-gate fi.lookupproc = nfslookup; 19257c478bd9Sstevel@tonic-gate fi.xattrdirproc = acl_getxattrdir2; 19267c478bd9Sstevel@tonic-gate 19277c478bd9Sstevel@tonic-gate douprintf = 1; 19287c478bd9Sstevel@tonic-gate 19297c478bd9Sstevel@tonic-gate t = gethrtime(); 19307c478bd9Sstevel@tonic-gate 19317c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_LOOKUP, 19327c478bd9Sstevel@tonic-gate xdr_diropargs, (caddr_t)&da, 19337c478bd9Sstevel@tonic-gate xdr_diropres, (caddr_t)&dr, cr, 19347c478bd9Sstevel@tonic-gate &douprintf, &dr.dr_status, rfscall_flags, &fi); 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate if (!error) { 19377c478bd9Sstevel@tonic-gate error = geterrno(dr.dr_status); 19387c478bd9Sstevel@tonic-gate if (!error) { 19397c478bd9Sstevel@tonic-gate *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr, 19407c478bd9Sstevel@tonic-gate dvp->v_vfsp, t, cr, VTOR(dvp)->r_path, nm); 19417c478bd9Sstevel@tonic-gate /* 19427c478bd9Sstevel@tonic-gate * If NFS_ACL is supported on the server, then the 19437c478bd9Sstevel@tonic-gate * attributes returned by server may have minimal 19447c478bd9Sstevel@tonic-gate * permissions sometimes denying access to users having 19457c478bd9Sstevel@tonic-gate * proper access. To get the proper attributes, mark 19467c478bd9Sstevel@tonic-gate * the attributes as expired so that they will be 19477c478bd9Sstevel@tonic-gate * regotten via the NFS_ACL GETATTR2 procedure. 19487c478bd9Sstevel@tonic-gate */ 19497c478bd9Sstevel@tonic-gate if (VTOMI(*vpp)->mi_flags & MI_ACL) { 19507c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(*vpp); 19517c478bd9Sstevel@tonic-gate } 19527c478bd9Sstevel@tonic-gate if (!(rfscall_flags & RFSCALL_SOFT)) 19537c478bd9Sstevel@tonic-gate dnlc_update(dvp, nm, *vpp); 19547c478bd9Sstevel@tonic-gate } else { 19557c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 19567c478bd9Sstevel@tonic-gate if (error == ENOENT && nfs_lookup_neg_cache) 19577c478bd9Sstevel@tonic-gate dnlc_enter(dvp, nm, DNLC_NO_VNODE); 19587c478bd9Sstevel@tonic-gate } 19597c478bd9Sstevel@tonic-gate } 19607c478bd9Sstevel@tonic-gate 19617c478bd9Sstevel@tonic-gate return (error); 19627c478bd9Sstevel@tonic-gate } 19637c478bd9Sstevel@tonic-gate 19647c478bd9Sstevel@tonic-gate /* ARGSUSED */ 19657c478bd9Sstevel@tonic-gate static int 19667c478bd9Sstevel@tonic-gate nfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive, 1967da6c28aaSamw int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct, 1968da6c28aaSamw vsecattr_t *vsecp) 19697c478bd9Sstevel@tonic-gate { 19707c478bd9Sstevel@tonic-gate int error; 19717c478bd9Sstevel@tonic-gate struct nfscreatargs args; 19727c478bd9Sstevel@tonic-gate struct nfsdiropres dr; 19737c478bd9Sstevel@tonic-gate int douprintf; 19747c478bd9Sstevel@tonic-gate vnode_t *vp; 19757c478bd9Sstevel@tonic-gate rnode_t *rp; 19767c478bd9Sstevel@tonic-gate struct vattr vattr; 19777c478bd9Sstevel@tonic-gate rnode_t *drp; 19787c478bd9Sstevel@tonic-gate vnode_t *tempvp; 19797c478bd9Sstevel@tonic-gate hrtime_t t; 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 19827c478bd9Sstevel@tonic-gate 1983108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 19847c478bd9Sstevel@tonic-gate return (EPERM); 19857c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp))) 19867c478bd9Sstevel@tonic-gate return (EINTR); 19877c478bd9Sstevel@tonic-gate 19887c478bd9Sstevel@tonic-gate /* 19897c478bd9Sstevel@tonic-gate * We make a copy of the attributes because the caller does not 19907c478bd9Sstevel@tonic-gate * expect us to change what va points to. 19917c478bd9Sstevel@tonic-gate */ 19927c478bd9Sstevel@tonic-gate vattr = *va; 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate /* 19957c478bd9Sstevel@tonic-gate * If the pathname is "", just use dvp. Don't need 19967c478bd9Sstevel@tonic-gate * to send it over the wire, look it up in the dnlc, 19977c478bd9Sstevel@tonic-gate * or perform any access checks. 19987c478bd9Sstevel@tonic-gate */ 19997c478bd9Sstevel@tonic-gate if (*nm == '\0') { 20007c478bd9Sstevel@tonic-gate error = 0; 20017c478bd9Sstevel@tonic-gate VN_HOLD(dvp); 20027c478bd9Sstevel@tonic-gate vp = dvp; 20037c478bd9Sstevel@tonic-gate /* 20047c478bd9Sstevel@tonic-gate * If the pathname is ".", just use dvp. Don't need 20057c478bd9Sstevel@tonic-gate * to send it over the wire or look it up in the dnlc, 20067c478bd9Sstevel@tonic-gate * just need to check access. 20077c478bd9Sstevel@tonic-gate */ 20087c478bd9Sstevel@tonic-gate } else if (strcmp(nm, ".") == 0) { 2009da6c28aaSamw error = nfs_access(dvp, VEXEC, 0, cr, ct); 20107c478bd9Sstevel@tonic-gate if (error) { 20117c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 20127c478bd9Sstevel@tonic-gate return (error); 20137c478bd9Sstevel@tonic-gate } 20147c478bd9Sstevel@tonic-gate VN_HOLD(dvp); 20157c478bd9Sstevel@tonic-gate vp = dvp; 20167c478bd9Sstevel@tonic-gate /* 20177c478bd9Sstevel@tonic-gate * We need to go over the wire, just to be sure whether the 20187c478bd9Sstevel@tonic-gate * file exists or not. Using the DNLC can be dangerous in 20197c478bd9Sstevel@tonic-gate * this case when making a decision regarding existence. 20207c478bd9Sstevel@tonic-gate */ 20217c478bd9Sstevel@tonic-gate } else { 20227c478bd9Sstevel@tonic-gate error = nfslookup_otw(dvp, nm, &vp, cr, 0); 20237c478bd9Sstevel@tonic-gate } 20247c478bd9Sstevel@tonic-gate if (!error) { 20257c478bd9Sstevel@tonic-gate if (exclusive == EXCL) 20267c478bd9Sstevel@tonic-gate error = EEXIST; 20277c478bd9Sstevel@tonic-gate else if (vp->v_type == VDIR && (mode & VWRITE)) 20287c478bd9Sstevel@tonic-gate error = EISDIR; 20297c478bd9Sstevel@tonic-gate else { 20307c478bd9Sstevel@tonic-gate /* 20317c478bd9Sstevel@tonic-gate * If vnode is a device, create special vnode. 20327c478bd9Sstevel@tonic-gate */ 20337c478bd9Sstevel@tonic-gate if (IS_DEVVP(vp)) { 20347c478bd9Sstevel@tonic-gate tempvp = vp; 20357c478bd9Sstevel@tonic-gate vp = specvp(vp, vp->v_rdev, vp->v_type, cr); 20367c478bd9Sstevel@tonic-gate VN_RELE(tempvp); 20377c478bd9Sstevel@tonic-gate } 2038da6c28aaSamw if (!(error = VOP_ACCESS(vp, mode, 0, cr, ct))) { 20397c478bd9Sstevel@tonic-gate if ((vattr.va_mask & AT_SIZE) && 20407c478bd9Sstevel@tonic-gate vp->v_type == VREG) { 20417c478bd9Sstevel@tonic-gate vattr.va_mask = AT_SIZE; 20427c478bd9Sstevel@tonic-gate error = nfssetattr(vp, &vattr, 0, cr); 204372102e74SBryan Cantrill 204472102e74SBryan Cantrill if (!error) { 204572102e74SBryan Cantrill /* 204672102e74SBryan Cantrill * Existing file was truncated; 204772102e74SBryan Cantrill * emit a create event. 204872102e74SBryan Cantrill */ 204972102e74SBryan Cantrill vnevent_create(vp, ct); 205072102e74SBryan Cantrill } 20517c478bd9Sstevel@tonic-gate } 20527c478bd9Sstevel@tonic-gate } 20537c478bd9Sstevel@tonic-gate } 20547c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 20557c478bd9Sstevel@tonic-gate if (error) { 20567c478bd9Sstevel@tonic-gate VN_RELE(vp); 2057df2381bfSpraks } else { 20587c478bd9Sstevel@tonic-gate *vpp = vp; 2059df2381bfSpraks } 20607c478bd9Sstevel@tonic-gate return (error); 20617c478bd9Sstevel@tonic-gate } 20627c478bd9Sstevel@tonic-gate 20637c478bd9Sstevel@tonic-gate ASSERT(vattr.va_mask & AT_TYPE); 20647c478bd9Sstevel@tonic-gate if (vattr.va_type == VREG) { 20657c478bd9Sstevel@tonic-gate ASSERT(vattr.va_mask & AT_MODE); 20667c478bd9Sstevel@tonic-gate if (MANDMODE(vattr.va_mode)) { 20677c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 20687c478bd9Sstevel@tonic-gate return (EACCES); 20697c478bd9Sstevel@tonic-gate } 20707c478bd9Sstevel@tonic-gate } 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate dnlc_remove(dvp, nm); 20737c478bd9Sstevel@tonic-gate 20747c478bd9Sstevel@tonic-gate setdiropargs(&args.ca_da, nm, dvp); 20757c478bd9Sstevel@tonic-gate 20767c478bd9Sstevel@tonic-gate /* 20777c478bd9Sstevel@tonic-gate * Decide what the group-id of the created file should be. 20787c478bd9Sstevel@tonic-gate * Set it in attribute list as advisory...then do a setattr 20797c478bd9Sstevel@tonic-gate * if the server didn't get it right the first time. 20807c478bd9Sstevel@tonic-gate */ 20817c478bd9Sstevel@tonic-gate error = setdirgid(dvp, &vattr.va_gid, cr); 20827c478bd9Sstevel@tonic-gate if (error) { 20837c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 20847c478bd9Sstevel@tonic-gate return (error); 20857c478bd9Sstevel@tonic-gate } 20867c478bd9Sstevel@tonic-gate vattr.va_mask |= AT_GID; 20877c478bd9Sstevel@tonic-gate 20887c478bd9Sstevel@tonic-gate /* 20897c478bd9Sstevel@tonic-gate * This is a completely gross hack to make mknod 20907c478bd9Sstevel@tonic-gate * work over the wire until we can wack the protocol 20917c478bd9Sstevel@tonic-gate */ 20927c478bd9Sstevel@tonic-gate #define IFCHR 0020000 /* character special */ 20937c478bd9Sstevel@tonic-gate #define IFBLK 0060000 /* block special */ 20947c478bd9Sstevel@tonic-gate #define IFSOCK 0140000 /* socket */ 20957c478bd9Sstevel@tonic-gate 20967c478bd9Sstevel@tonic-gate /* 20977c478bd9Sstevel@tonic-gate * dev_t is uint_t in 5.x and short in 4.x. Both 4.x 20987c478bd9Sstevel@tonic-gate * supports 8 bit majors. 5.x supports 14 bit majors. 5.x supports 18 20997c478bd9Sstevel@tonic-gate * bits in the minor number where 4.x supports 8 bits. If the 5.x 21007c478bd9Sstevel@tonic-gate * minor/major numbers <= 8 bits long, compress the device 21017c478bd9Sstevel@tonic-gate * number before sending it. Otherwise, the 4.x server will not 21027c478bd9Sstevel@tonic-gate * create the device with the correct device number and nothing can be 21037c478bd9Sstevel@tonic-gate * done about this. 21047c478bd9Sstevel@tonic-gate */ 21057c478bd9Sstevel@tonic-gate if (vattr.va_type == VCHR || vattr.va_type == VBLK) { 21067c478bd9Sstevel@tonic-gate dev_t d = vattr.va_rdev; 21077c478bd9Sstevel@tonic-gate dev32_t dev32; 21087c478bd9Sstevel@tonic-gate 21097c478bd9Sstevel@tonic-gate if (vattr.va_type == VCHR) 21107c478bd9Sstevel@tonic-gate vattr.va_mode |= IFCHR; 21117c478bd9Sstevel@tonic-gate else 21127c478bd9Sstevel@tonic-gate vattr.va_mode |= IFBLK; 21137c478bd9Sstevel@tonic-gate 21147c478bd9Sstevel@tonic-gate (void) cmpldev(&dev32, d); 21157c478bd9Sstevel@tonic-gate if (dev32 & ~((SO4_MAXMAJ << L_BITSMINOR32) | SO4_MAXMIN)) 21167c478bd9Sstevel@tonic-gate vattr.va_size = (u_offset_t)dev32; 21177c478bd9Sstevel@tonic-gate else 21187c478bd9Sstevel@tonic-gate vattr.va_size = (u_offset_t)nfsv2_cmpdev(d); 21197c478bd9Sstevel@tonic-gate 21207c478bd9Sstevel@tonic-gate vattr.va_mask |= AT_MODE|AT_SIZE; 21217c478bd9Sstevel@tonic-gate } else if (vattr.va_type == VFIFO) { 21227c478bd9Sstevel@tonic-gate vattr.va_mode |= IFCHR; /* xtra kludge for namedpipe */ 21237c478bd9Sstevel@tonic-gate vattr.va_size = (u_offset_t)NFS_FIFO_DEV; /* blech */ 21247c478bd9Sstevel@tonic-gate vattr.va_mask |= AT_MODE|AT_SIZE; 21257c478bd9Sstevel@tonic-gate } else if (vattr.va_type == VSOCK) { 21267c478bd9Sstevel@tonic-gate vattr.va_mode |= IFSOCK; 21277c478bd9Sstevel@tonic-gate /* 21287c478bd9Sstevel@tonic-gate * To avoid triggering bugs in the servers set AT_SIZE 21297c478bd9Sstevel@tonic-gate * (all other RFS_CREATE calls set this). 21307c478bd9Sstevel@tonic-gate */ 21317c478bd9Sstevel@tonic-gate vattr.va_size = 0; 21327c478bd9Sstevel@tonic-gate vattr.va_mask |= AT_MODE|AT_SIZE; 21337c478bd9Sstevel@tonic-gate } 21347c478bd9Sstevel@tonic-gate 21357c478bd9Sstevel@tonic-gate args.ca_sa = &args.ca_sa_buf; 21367c478bd9Sstevel@tonic-gate error = vattr_to_sattr(&vattr, args.ca_sa); 21377c478bd9Sstevel@tonic-gate if (error) { 21387c478bd9Sstevel@tonic-gate /* req time field(s) overflow - return immediately */ 21397c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 21407c478bd9Sstevel@tonic-gate return (error); 21417c478bd9Sstevel@tonic-gate } 21427c478bd9Sstevel@tonic-gate 21437c478bd9Sstevel@tonic-gate douprintf = 1; 21447c478bd9Sstevel@tonic-gate 21457c478bd9Sstevel@tonic-gate t = gethrtime(); 21467c478bd9Sstevel@tonic-gate 21477c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_CREATE, 21487c478bd9Sstevel@tonic-gate xdr_creatargs, (caddr_t)&args, 21497c478bd9Sstevel@tonic-gate xdr_diropres, (caddr_t)&dr, cr, 21507c478bd9Sstevel@tonic-gate &douprintf, &dr.dr_status, 0, NULL); 21517c478bd9Sstevel@tonic-gate 21527c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); /* mod time changed */ 21537c478bd9Sstevel@tonic-gate 21547c478bd9Sstevel@tonic-gate if (!error) { 21557c478bd9Sstevel@tonic-gate error = geterrno(dr.dr_status); 21567c478bd9Sstevel@tonic-gate if (!error) { 21577c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(drp)) 21587c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(dvp); 21597c478bd9Sstevel@tonic-gate vp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr, 21607c478bd9Sstevel@tonic-gate dvp->v_vfsp, t, cr, NULL, NULL); 21617c478bd9Sstevel@tonic-gate /* 21627c478bd9Sstevel@tonic-gate * If NFS_ACL is supported on the server, then the 21637c478bd9Sstevel@tonic-gate * attributes returned by server may have minimal 21647c478bd9Sstevel@tonic-gate * permissions sometimes denying access to users having 21657c478bd9Sstevel@tonic-gate * proper access. To get the proper attributes, mark 21667c478bd9Sstevel@tonic-gate * the attributes as expired so that they will be 21677c478bd9Sstevel@tonic-gate * regotten via the NFS_ACL GETATTR2 procedure. 21687c478bd9Sstevel@tonic-gate */ 21697c478bd9Sstevel@tonic-gate if (VTOMI(vp)->mi_flags & MI_ACL) { 21707c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); 21717c478bd9Sstevel@tonic-gate } 21727c478bd9Sstevel@tonic-gate dnlc_update(dvp, nm, vp); 21737c478bd9Sstevel@tonic-gate rp = VTOR(vp); 21747c478bd9Sstevel@tonic-gate if (vattr.va_size == 0) { 21757c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 21767c478bd9Sstevel@tonic-gate rp->r_size = 0; 21777c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 21787c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp)) { 21797c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 21807c478bd9Sstevel@tonic-gate nfs_invalidate_pages(vp, 21817c478bd9Sstevel@tonic-gate (u_offset_t)0, cr); 21827c478bd9Sstevel@tonic-gate } 21837c478bd9Sstevel@tonic-gate } 21847c478bd9Sstevel@tonic-gate 21857c478bd9Sstevel@tonic-gate /* 21867c478bd9Sstevel@tonic-gate * Make sure the gid was set correctly. 21877c478bd9Sstevel@tonic-gate * If not, try to set it (but don't lose 21887c478bd9Sstevel@tonic-gate * any sleep over it). 21897c478bd9Sstevel@tonic-gate */ 21907c478bd9Sstevel@tonic-gate if (vattr.va_gid != rp->r_attr.va_gid) { 21917c478bd9Sstevel@tonic-gate vattr.va_mask = AT_GID; 21927c478bd9Sstevel@tonic-gate (void) nfssetattr(vp, &vattr, 0, cr); 21937c478bd9Sstevel@tonic-gate } 21947c478bd9Sstevel@tonic-gate 21957c478bd9Sstevel@tonic-gate /* 21967c478bd9Sstevel@tonic-gate * If vnode is a device create special vnode 21977c478bd9Sstevel@tonic-gate */ 21987c478bd9Sstevel@tonic-gate if (IS_DEVVP(vp)) { 21997c478bd9Sstevel@tonic-gate *vpp = specvp(vp, vp->v_rdev, vp->v_type, cr); 22007c478bd9Sstevel@tonic-gate VN_RELE(vp); 22017c478bd9Sstevel@tonic-gate } else 22027c478bd9Sstevel@tonic-gate *vpp = vp; 22037c478bd9Sstevel@tonic-gate } else { 22047c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 22057c478bd9Sstevel@tonic-gate } 22067c478bd9Sstevel@tonic-gate } 22077c478bd9Sstevel@tonic-gate 22087c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate return (error); 22117c478bd9Sstevel@tonic-gate } 22127c478bd9Sstevel@tonic-gate 22137c478bd9Sstevel@tonic-gate /* 22147c478bd9Sstevel@tonic-gate * Weirdness: if the vnode to be removed is open 22157c478bd9Sstevel@tonic-gate * we rename it instead of removing it and nfs_inactive 22167c478bd9Sstevel@tonic-gate * will remove the new name. 22177c478bd9Sstevel@tonic-gate */ 2218da6c28aaSamw /* ARGSUSED */ 22197c478bd9Sstevel@tonic-gate static int 2220da6c28aaSamw nfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, int flags) 22217c478bd9Sstevel@tonic-gate { 22227c478bd9Sstevel@tonic-gate int error; 22237c478bd9Sstevel@tonic-gate struct nfsdiropargs da; 22247c478bd9Sstevel@tonic-gate enum nfsstat status; 22257c478bd9Sstevel@tonic-gate vnode_t *vp; 22267c478bd9Sstevel@tonic-gate char *tmpname; 22277c478bd9Sstevel@tonic-gate int douprintf; 22287c478bd9Sstevel@tonic-gate rnode_t *rp; 22297c478bd9Sstevel@tonic-gate rnode_t *drp; 22307c478bd9Sstevel@tonic-gate 2231108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 22327c478bd9Sstevel@tonic-gate return (EPERM); 22337c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 22347c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp))) 22357c478bd9Sstevel@tonic-gate return (EINTR); 22367c478bd9Sstevel@tonic-gate 22377c478bd9Sstevel@tonic-gate error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0); 22387c478bd9Sstevel@tonic-gate if (error) { 22397c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 22407c478bd9Sstevel@tonic-gate return (error); 22417c478bd9Sstevel@tonic-gate } 22427c478bd9Sstevel@tonic-gate 22437c478bd9Sstevel@tonic-gate if (vp->v_type == VDIR && secpolicy_fs_linkdir(cr, dvp->v_vfsp)) { 22447c478bd9Sstevel@tonic-gate VN_RELE(vp); 22457c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 22467c478bd9Sstevel@tonic-gate return (EPERM); 22477c478bd9Sstevel@tonic-gate } 22487c478bd9Sstevel@tonic-gate 22497c478bd9Sstevel@tonic-gate /* 22507c478bd9Sstevel@tonic-gate * First just remove the entry from the name cache, as it 22517c478bd9Sstevel@tonic-gate * is most likely the only entry for this vp. 22527c478bd9Sstevel@tonic-gate */ 22537c478bd9Sstevel@tonic-gate dnlc_remove(dvp, nm); 22547c478bd9Sstevel@tonic-gate 22557c478bd9Sstevel@tonic-gate /* 22567c478bd9Sstevel@tonic-gate * If the file has a v_count > 1 then there may be more than one 22577c478bd9Sstevel@tonic-gate * entry in the name cache due multiple links or an open file, 22587c478bd9Sstevel@tonic-gate * but we don't have the real reference count so flush all 22597c478bd9Sstevel@tonic-gate * possible entries. 22607c478bd9Sstevel@tonic-gate */ 22617c478bd9Sstevel@tonic-gate if (vp->v_count > 1) 22627c478bd9Sstevel@tonic-gate dnlc_purge_vp(vp); 22637c478bd9Sstevel@tonic-gate 22647c478bd9Sstevel@tonic-gate /* 22657c478bd9Sstevel@tonic-gate * Now we have the real reference count on the vnode 22667c478bd9Sstevel@tonic-gate */ 22677c478bd9Sstevel@tonic-gate rp = VTOR(vp); 22687c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 22697c478bd9Sstevel@tonic-gate if (vp->v_count > 1 && 22707c478bd9Sstevel@tonic-gate (rp->r_unldvp == NULL || strcmp(nm, rp->r_unlname) == 0)) { 22717c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 22727c478bd9Sstevel@tonic-gate tmpname = newname(); 2273da6c28aaSamw error = nfsrename(dvp, nm, dvp, tmpname, cr, ct); 22747c478bd9Sstevel@tonic-gate if (error) 22757c478bd9Sstevel@tonic-gate kmem_free(tmpname, MAXNAMELEN); 22767c478bd9Sstevel@tonic-gate else { 22777c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 22787c478bd9Sstevel@tonic-gate if (rp->r_unldvp == NULL) { 22797c478bd9Sstevel@tonic-gate VN_HOLD(dvp); 22807c478bd9Sstevel@tonic-gate rp->r_unldvp = dvp; 22817c478bd9Sstevel@tonic-gate if (rp->r_unlcred != NULL) 22827c478bd9Sstevel@tonic-gate crfree(rp->r_unlcred); 22837c478bd9Sstevel@tonic-gate crhold(cr); 22847c478bd9Sstevel@tonic-gate rp->r_unlcred = cr; 22857c478bd9Sstevel@tonic-gate rp->r_unlname = tmpname; 22867c478bd9Sstevel@tonic-gate } else { 22877c478bd9Sstevel@tonic-gate kmem_free(rp->r_unlname, MAXNAMELEN); 22887c478bd9Sstevel@tonic-gate rp->r_unlname = tmpname; 22897c478bd9Sstevel@tonic-gate } 22907c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 22917c478bd9Sstevel@tonic-gate } 22927c478bd9Sstevel@tonic-gate } else { 22937c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 22947c478bd9Sstevel@tonic-gate /* 22957c478bd9Sstevel@tonic-gate * We need to flush any dirty pages which happen to 22967c478bd9Sstevel@tonic-gate * be hanging around before removing the file. This 22977c478bd9Sstevel@tonic-gate * shouldn't happen very often and mostly on file 22987c478bd9Sstevel@tonic-gate * systems mounted "nocto". 22997c478bd9Sstevel@tonic-gate */ 23007c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp) && 23017c478bd9Sstevel@tonic-gate ((rp->r_flags & RDIRTY) || rp->r_count > 0)) { 2302da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 23037c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 23047c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 23057c478bd9Sstevel@tonic-gate if (!rp->r_error) 23067c478bd9Sstevel@tonic-gate rp->r_error = error; 23077c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 23087c478bd9Sstevel@tonic-gate } 23097c478bd9Sstevel@tonic-gate } 23107c478bd9Sstevel@tonic-gate 23117c478bd9Sstevel@tonic-gate setdiropargs(&da, nm, dvp); 23127c478bd9Sstevel@tonic-gate 23137c478bd9Sstevel@tonic-gate douprintf = 1; 23147c478bd9Sstevel@tonic-gate 23157c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_REMOVE, 23167c478bd9Sstevel@tonic-gate xdr_diropargs, (caddr_t)&da, 23177c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, cr, 23187c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 23197c478bd9Sstevel@tonic-gate 23207c478bd9Sstevel@tonic-gate /* 23217c478bd9Sstevel@tonic-gate * The xattr dir may be gone after last attr is removed, 23227c478bd9Sstevel@tonic-gate * so flush it from dnlc. 23237c478bd9Sstevel@tonic-gate */ 23247c478bd9Sstevel@tonic-gate if (dvp->v_flag & V_XATTRDIR) 23257c478bd9Sstevel@tonic-gate dnlc_purge_vp(dvp); 23267c478bd9Sstevel@tonic-gate 23277c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); /* mod time changed */ 23287c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(vp); /* link count changed */ 23297c478bd9Sstevel@tonic-gate 23307c478bd9Sstevel@tonic-gate if (!error) { 23317c478bd9Sstevel@tonic-gate error = geterrno(status); 23327c478bd9Sstevel@tonic-gate if (!error) { 23337c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(drp)) 23347c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(dvp); 23357c478bd9Sstevel@tonic-gate } else { 23367c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 23377c478bd9Sstevel@tonic-gate } 23387c478bd9Sstevel@tonic-gate } 23397c478bd9Sstevel@tonic-gate } 23407c478bd9Sstevel@tonic-gate 2341df2381bfSpraks if (error == 0) { 2342da6c28aaSamw vnevent_remove(vp, dvp, nm, ct); 2343df2381bfSpraks } 23447c478bd9Sstevel@tonic-gate VN_RELE(vp); 23457c478bd9Sstevel@tonic-gate 23467c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 23477c478bd9Sstevel@tonic-gate 23487c478bd9Sstevel@tonic-gate return (error); 23497c478bd9Sstevel@tonic-gate } 23507c478bd9Sstevel@tonic-gate 2351da6c28aaSamw /* ARGSUSED */ 23527c478bd9Sstevel@tonic-gate static int 2353da6c28aaSamw nfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr, 2354da6c28aaSamw caller_context_t *ct, int flags) 23557c478bd9Sstevel@tonic-gate { 23567c478bd9Sstevel@tonic-gate int error; 23577c478bd9Sstevel@tonic-gate struct nfslinkargs args; 23587c478bd9Sstevel@tonic-gate enum nfsstat status; 23597c478bd9Sstevel@tonic-gate vnode_t *realvp; 23607c478bd9Sstevel@tonic-gate int douprintf; 23617c478bd9Sstevel@tonic-gate rnode_t *tdrp; 23627c478bd9Sstevel@tonic-gate 2363108322fbScarlsonj if (nfs_zone() != VTOMI(tdvp)->mi_zone) 23647c478bd9Sstevel@tonic-gate return (EPERM); 2365da6c28aaSamw if (VOP_REALVP(svp, &realvp, ct) == 0) 23667c478bd9Sstevel@tonic-gate svp = realvp; 23677c478bd9Sstevel@tonic-gate 23687c478bd9Sstevel@tonic-gate args.la_from = VTOFH(svp); 23697c478bd9Sstevel@tonic-gate setdiropargs(&args.la_to, tnm, tdvp); 23707c478bd9Sstevel@tonic-gate 23717c478bd9Sstevel@tonic-gate tdrp = VTOR(tdvp); 23727c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&tdrp->r_rwlock, RW_WRITER, INTR(tdvp))) 23737c478bd9Sstevel@tonic-gate return (EINTR); 23747c478bd9Sstevel@tonic-gate 23757c478bd9Sstevel@tonic-gate dnlc_remove(tdvp, tnm); 23767c478bd9Sstevel@tonic-gate 23777c478bd9Sstevel@tonic-gate douprintf = 1; 23787c478bd9Sstevel@tonic-gate 23797c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(svp), RFS_LINK, 23807c478bd9Sstevel@tonic-gate xdr_linkargs, (caddr_t)&args, 23817c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, cr, 23827c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 23837c478bd9Sstevel@tonic-gate 23847c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(tdvp); /* mod time changed */ 23857c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(svp); /* link count changed */ 23867c478bd9Sstevel@tonic-gate 23877c478bd9Sstevel@tonic-gate if (!error) { 23887c478bd9Sstevel@tonic-gate error = geterrno(status); 23897c478bd9Sstevel@tonic-gate if (!error) { 23907c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(tdrp)) 23917c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(tdvp); 23927c478bd9Sstevel@tonic-gate } 23937c478bd9Sstevel@tonic-gate } 23947c478bd9Sstevel@tonic-gate 23957c478bd9Sstevel@tonic-gate nfs_rw_exit(&tdrp->r_rwlock); 23967c478bd9Sstevel@tonic-gate 2397df2381bfSpraks if (!error) { 2398df2381bfSpraks /* 2399df2381bfSpraks * Notify the source file of this link operation. 2400df2381bfSpraks */ 2401da6c28aaSamw vnevent_link(svp, ct); 2402df2381bfSpraks } 24037c478bd9Sstevel@tonic-gate return (error); 24047c478bd9Sstevel@tonic-gate } 24057c478bd9Sstevel@tonic-gate 2406da6c28aaSamw /* ARGSUSED */ 24077c478bd9Sstevel@tonic-gate static int 2408da6c28aaSamw nfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr, 2409da6c28aaSamw caller_context_t *ct, int flags) 24107c478bd9Sstevel@tonic-gate { 24117c478bd9Sstevel@tonic-gate vnode_t *realvp; 24127c478bd9Sstevel@tonic-gate 2413108322fbScarlsonj if (nfs_zone() != VTOMI(odvp)->mi_zone) 24147c478bd9Sstevel@tonic-gate return (EPERM); 2415da6c28aaSamw if (VOP_REALVP(ndvp, &realvp, ct) == 0) 24167c478bd9Sstevel@tonic-gate ndvp = realvp; 24177c478bd9Sstevel@tonic-gate 2418da6c28aaSamw return (nfsrename(odvp, onm, ndvp, nnm, cr, ct)); 24197c478bd9Sstevel@tonic-gate } 24207c478bd9Sstevel@tonic-gate 24217c478bd9Sstevel@tonic-gate /* 24227c478bd9Sstevel@tonic-gate * nfsrename does the real work of renaming in NFS Version 2. 24237c478bd9Sstevel@tonic-gate */ 24247c478bd9Sstevel@tonic-gate static int 2425da6c28aaSamw nfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr, 2426da6c28aaSamw caller_context_t *ct) 24277c478bd9Sstevel@tonic-gate { 24287c478bd9Sstevel@tonic-gate int error; 24297c478bd9Sstevel@tonic-gate enum nfsstat status; 24307c478bd9Sstevel@tonic-gate struct nfsrnmargs args; 24317c478bd9Sstevel@tonic-gate int douprintf; 2432df2381bfSpraks vnode_t *nvp = NULL; 24337c478bd9Sstevel@tonic-gate vnode_t *ovp = NULL; 24347c478bd9Sstevel@tonic-gate char *tmpname; 24357c478bd9Sstevel@tonic-gate rnode_t *rp; 24367c478bd9Sstevel@tonic-gate rnode_t *odrp; 24377c478bd9Sstevel@tonic-gate rnode_t *ndrp; 24387c478bd9Sstevel@tonic-gate 2439108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(odvp)->mi_zone); 24407c478bd9Sstevel@tonic-gate if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 || 24417c478bd9Sstevel@tonic-gate strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0) 24427c478bd9Sstevel@tonic-gate return (EINVAL); 24437c478bd9Sstevel@tonic-gate 24447c478bd9Sstevel@tonic-gate odrp = VTOR(odvp); 24457c478bd9Sstevel@tonic-gate ndrp = VTOR(ndvp); 24467c478bd9Sstevel@tonic-gate if ((intptr_t)odrp < (intptr_t)ndrp) { 24477c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) 24487c478bd9Sstevel@tonic-gate return (EINTR); 24497c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) { 24507c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 24517c478bd9Sstevel@tonic-gate return (EINTR); 24527c478bd9Sstevel@tonic-gate } 24537c478bd9Sstevel@tonic-gate } else { 24547c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&ndrp->r_rwlock, RW_WRITER, INTR(ndvp))) 24557c478bd9Sstevel@tonic-gate return (EINTR); 24567c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&odrp->r_rwlock, RW_WRITER, INTR(odvp))) { 24577c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 24587c478bd9Sstevel@tonic-gate return (EINTR); 24597c478bd9Sstevel@tonic-gate } 24607c478bd9Sstevel@tonic-gate } 24617c478bd9Sstevel@tonic-gate 24627c478bd9Sstevel@tonic-gate /* 24637c478bd9Sstevel@tonic-gate * Lookup the target file. If it exists, it needs to be 24647c478bd9Sstevel@tonic-gate * checked to see whether it is a mount point and whether 24657c478bd9Sstevel@tonic-gate * it is active (open). 24667c478bd9Sstevel@tonic-gate */ 24677c478bd9Sstevel@tonic-gate error = nfslookup(ndvp, nnm, &nvp, NULL, 0, NULL, cr, 0); 24687c478bd9Sstevel@tonic-gate if (!error) { 24697c478bd9Sstevel@tonic-gate /* 24707c478bd9Sstevel@tonic-gate * If this file has been mounted on, then just 24717c478bd9Sstevel@tonic-gate * return busy because renaming to it would remove 24727c478bd9Sstevel@tonic-gate * the mounted file system from the name space. 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate if (vn_mountedvfs(nvp) != NULL) { 24757c478bd9Sstevel@tonic-gate VN_RELE(nvp); 24767c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 24777c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 24787c478bd9Sstevel@tonic-gate return (EBUSY); 24797c478bd9Sstevel@tonic-gate } 24807c478bd9Sstevel@tonic-gate 24817c478bd9Sstevel@tonic-gate /* 24827c478bd9Sstevel@tonic-gate * Purge the name cache of all references to this vnode 24837c478bd9Sstevel@tonic-gate * so that we can check the reference count to infer 24847c478bd9Sstevel@tonic-gate * whether it is active or not. 24857c478bd9Sstevel@tonic-gate */ 24867c478bd9Sstevel@tonic-gate /* 24877c478bd9Sstevel@tonic-gate * First just remove the entry from the name cache, as it 24887c478bd9Sstevel@tonic-gate * is most likely the only entry for this vp. 24897c478bd9Sstevel@tonic-gate */ 24907c478bd9Sstevel@tonic-gate dnlc_remove(ndvp, nnm); 24917c478bd9Sstevel@tonic-gate /* 24927c478bd9Sstevel@tonic-gate * If the file has a v_count > 1 then there may be more 24937c478bd9Sstevel@tonic-gate * than one entry in the name cache due multiple links 24947c478bd9Sstevel@tonic-gate * or an open file, but we don't have the real reference 24957c478bd9Sstevel@tonic-gate * count so flush all possible entries. 24967c478bd9Sstevel@tonic-gate */ 24977c478bd9Sstevel@tonic-gate if (nvp->v_count > 1) 24987c478bd9Sstevel@tonic-gate dnlc_purge_vp(nvp); 24997c478bd9Sstevel@tonic-gate 25007c478bd9Sstevel@tonic-gate /* 25017c478bd9Sstevel@tonic-gate * If the vnode is active and is not a directory, 25027c478bd9Sstevel@tonic-gate * arrange to rename it to a 25037c478bd9Sstevel@tonic-gate * temporary file so that it will continue to be 25047c478bd9Sstevel@tonic-gate * accessible. This implements the "unlink-open-file" 25057c478bd9Sstevel@tonic-gate * semantics for the target of a rename operation. 25067c478bd9Sstevel@tonic-gate * Before doing this though, make sure that the 25077c478bd9Sstevel@tonic-gate * source and target files are not already the same. 25087c478bd9Sstevel@tonic-gate */ 25097c478bd9Sstevel@tonic-gate if (nvp->v_count > 1 && nvp->v_type != VDIR) { 25107c478bd9Sstevel@tonic-gate /* 25117c478bd9Sstevel@tonic-gate * Lookup the source name. 25127c478bd9Sstevel@tonic-gate */ 25137c478bd9Sstevel@tonic-gate error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, 25147c478bd9Sstevel@tonic-gate cr, 0); 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate /* 25177c478bd9Sstevel@tonic-gate * The source name *should* already exist. 25187c478bd9Sstevel@tonic-gate */ 25197c478bd9Sstevel@tonic-gate if (error) { 25207c478bd9Sstevel@tonic-gate VN_RELE(nvp); 25217c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 25227c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 25237c478bd9Sstevel@tonic-gate return (error); 25247c478bd9Sstevel@tonic-gate } 25257c478bd9Sstevel@tonic-gate 25267c478bd9Sstevel@tonic-gate /* 25277c478bd9Sstevel@tonic-gate * Compare the two vnodes. If they are the same, 25287c478bd9Sstevel@tonic-gate * just release all held vnodes and return success. 25297c478bd9Sstevel@tonic-gate */ 25307c478bd9Sstevel@tonic-gate if (ovp == nvp) { 25317c478bd9Sstevel@tonic-gate VN_RELE(ovp); 25327c478bd9Sstevel@tonic-gate VN_RELE(nvp); 25337c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 25347c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 25357c478bd9Sstevel@tonic-gate return (0); 25367c478bd9Sstevel@tonic-gate } 25377c478bd9Sstevel@tonic-gate 25387c478bd9Sstevel@tonic-gate /* 25397c478bd9Sstevel@tonic-gate * Can't mix and match directories and non- 25407c478bd9Sstevel@tonic-gate * directories in rename operations. We already 25417c478bd9Sstevel@tonic-gate * know that the target is not a directory. If 25427c478bd9Sstevel@tonic-gate * the source is a directory, return an error. 25437c478bd9Sstevel@tonic-gate */ 25447c478bd9Sstevel@tonic-gate if (ovp->v_type == VDIR) { 25457c478bd9Sstevel@tonic-gate VN_RELE(ovp); 25467c478bd9Sstevel@tonic-gate VN_RELE(nvp); 25477c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 25487c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 25497c478bd9Sstevel@tonic-gate return (ENOTDIR); 25507c478bd9Sstevel@tonic-gate } 25517c478bd9Sstevel@tonic-gate 25527c478bd9Sstevel@tonic-gate /* 25537c478bd9Sstevel@tonic-gate * The target file exists, is not the same as 25547c478bd9Sstevel@tonic-gate * the source file, and is active. Link it 25557c478bd9Sstevel@tonic-gate * to a temporary filename to avoid having 25567c478bd9Sstevel@tonic-gate * the server removing the file completely. 25577c478bd9Sstevel@tonic-gate */ 25587c478bd9Sstevel@tonic-gate tmpname = newname(); 2559da6c28aaSamw error = nfs_link(ndvp, nvp, tmpname, cr, NULL, 0); 25607c478bd9Sstevel@tonic-gate if (error == EOPNOTSUPP) { 25617c478bd9Sstevel@tonic-gate error = nfs_rename(ndvp, nnm, ndvp, tmpname, 2562da6c28aaSamw cr, NULL, 0); 25637c478bd9Sstevel@tonic-gate } 25647c478bd9Sstevel@tonic-gate if (error) { 25657c478bd9Sstevel@tonic-gate kmem_free(tmpname, MAXNAMELEN); 25667c478bd9Sstevel@tonic-gate VN_RELE(ovp); 25677c478bd9Sstevel@tonic-gate VN_RELE(nvp); 25687c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 25697c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 25707c478bd9Sstevel@tonic-gate return (error); 25717c478bd9Sstevel@tonic-gate } 25727c478bd9Sstevel@tonic-gate rp = VTOR(nvp); 25737c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 25747c478bd9Sstevel@tonic-gate if (rp->r_unldvp == NULL) { 25757c478bd9Sstevel@tonic-gate VN_HOLD(ndvp); 25767c478bd9Sstevel@tonic-gate rp->r_unldvp = ndvp; 25777c478bd9Sstevel@tonic-gate if (rp->r_unlcred != NULL) 25787c478bd9Sstevel@tonic-gate crfree(rp->r_unlcred); 25797c478bd9Sstevel@tonic-gate crhold(cr); 25807c478bd9Sstevel@tonic-gate rp->r_unlcred = cr; 25817c478bd9Sstevel@tonic-gate rp->r_unlname = tmpname; 25827c478bd9Sstevel@tonic-gate } else { 25837c478bd9Sstevel@tonic-gate kmem_free(rp->r_unlname, MAXNAMELEN); 25847c478bd9Sstevel@tonic-gate rp->r_unlname = tmpname; 25857c478bd9Sstevel@tonic-gate } 25867c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 25877c478bd9Sstevel@tonic-gate } 25887c478bd9Sstevel@tonic-gate } 25897c478bd9Sstevel@tonic-gate 25907c478bd9Sstevel@tonic-gate if (ovp == NULL) { 25917c478bd9Sstevel@tonic-gate /* 25927c478bd9Sstevel@tonic-gate * When renaming directories to be a subdirectory of a 25937c478bd9Sstevel@tonic-gate * different parent, the dnlc entry for ".." will no 25947c478bd9Sstevel@tonic-gate * longer be valid, so it must be removed. 25957c478bd9Sstevel@tonic-gate * 25967c478bd9Sstevel@tonic-gate * We do a lookup here to determine whether we are renaming 25977c478bd9Sstevel@tonic-gate * a directory and we need to check if we are renaming 25987c478bd9Sstevel@tonic-gate * an unlinked file. This might have already been done 25997c478bd9Sstevel@tonic-gate * in previous code, so we check ovp == NULL to avoid 26007c478bd9Sstevel@tonic-gate * doing it twice. 26017c478bd9Sstevel@tonic-gate */ 26027c478bd9Sstevel@tonic-gate 26037c478bd9Sstevel@tonic-gate error = nfslookup(odvp, onm, &ovp, NULL, 0, NULL, cr, 0); 26047c478bd9Sstevel@tonic-gate 26057c478bd9Sstevel@tonic-gate /* 26067c478bd9Sstevel@tonic-gate * The source name *should* already exist. 26077c478bd9Sstevel@tonic-gate */ 26087c478bd9Sstevel@tonic-gate if (error) { 26097c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 26107c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 2611df2381bfSpraks if (nvp) { 2612df2381bfSpraks VN_RELE(nvp); 2613df2381bfSpraks } 26147c478bd9Sstevel@tonic-gate return (error); 26157c478bd9Sstevel@tonic-gate } 26167c478bd9Sstevel@tonic-gate ASSERT(ovp != NULL); 26177c478bd9Sstevel@tonic-gate } 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate dnlc_remove(odvp, onm); 26207c478bd9Sstevel@tonic-gate dnlc_remove(ndvp, nnm); 26217c478bd9Sstevel@tonic-gate 26227c478bd9Sstevel@tonic-gate setdiropargs(&args.rna_from, onm, odvp); 26237c478bd9Sstevel@tonic-gate setdiropargs(&args.rna_to, nnm, ndvp); 26247c478bd9Sstevel@tonic-gate 26257c478bd9Sstevel@tonic-gate douprintf = 1; 26267c478bd9Sstevel@tonic-gate 26277c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(odvp), RFS_RENAME, 26287c478bd9Sstevel@tonic-gate xdr_rnmargs, (caddr_t)&args, 26297c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, cr, 26307c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 26317c478bd9Sstevel@tonic-gate 26327c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(odvp); /* mod time changed */ 26337c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(ndvp); /* mod time changed */ 26347c478bd9Sstevel@tonic-gate 26357c478bd9Sstevel@tonic-gate if (!error) { 26367c478bd9Sstevel@tonic-gate error = geterrno(status); 26377c478bd9Sstevel@tonic-gate if (!error) { 26387c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(odrp)) 26397c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(odvp); 26407c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(ndrp)) 26417c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(ndvp); 26427c478bd9Sstevel@tonic-gate /* 26437c478bd9Sstevel@tonic-gate * when renaming directories to be a subdirectory of a 26447c478bd9Sstevel@tonic-gate * different parent, the dnlc entry for ".." will no 26457c478bd9Sstevel@tonic-gate * longer be valid, so it must be removed 26467c478bd9Sstevel@tonic-gate */ 26477c478bd9Sstevel@tonic-gate rp = VTOR(ovp); 26487c478bd9Sstevel@tonic-gate if (ndvp != odvp) { 26497c478bd9Sstevel@tonic-gate if (ovp->v_type == VDIR) { 26507c478bd9Sstevel@tonic-gate dnlc_remove(ovp, ".."); 26517c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(rp)) 26527c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(ovp); 26537c478bd9Sstevel@tonic-gate } 26547c478bd9Sstevel@tonic-gate } 26557c478bd9Sstevel@tonic-gate 26567c478bd9Sstevel@tonic-gate /* 26577c478bd9Sstevel@tonic-gate * If we are renaming the unlinked file, update the 26587c478bd9Sstevel@tonic-gate * r_unldvp and r_unlname as needed. 26597c478bd9Sstevel@tonic-gate */ 26607c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 26617c478bd9Sstevel@tonic-gate if (rp->r_unldvp != NULL) { 26627c478bd9Sstevel@tonic-gate if (strcmp(rp->r_unlname, onm) == 0) { 26637c478bd9Sstevel@tonic-gate (void) strncpy(rp->r_unlname, 26647c478bd9Sstevel@tonic-gate nnm, MAXNAMELEN); 26657c478bd9Sstevel@tonic-gate rp->r_unlname[MAXNAMELEN - 1] = '\0'; 26667c478bd9Sstevel@tonic-gate 26677c478bd9Sstevel@tonic-gate if (ndvp != rp->r_unldvp) { 26687c478bd9Sstevel@tonic-gate VN_RELE(rp->r_unldvp); 26697c478bd9Sstevel@tonic-gate rp->r_unldvp = ndvp; 26707c478bd9Sstevel@tonic-gate VN_HOLD(ndvp); 26717c478bd9Sstevel@tonic-gate } 26727c478bd9Sstevel@tonic-gate } 26737c478bd9Sstevel@tonic-gate } 26747c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 26757c478bd9Sstevel@tonic-gate } else { 26767c478bd9Sstevel@tonic-gate /* 26777c478bd9Sstevel@tonic-gate * System V defines rename to return EEXIST, not 26787c478bd9Sstevel@tonic-gate * ENOTEMPTY if the target directory is not empty. 26797c478bd9Sstevel@tonic-gate * Over the wire, the error is NFSERR_ENOTEMPTY 26807c478bd9Sstevel@tonic-gate * which geterrno maps to ENOTEMPTY. 26817c478bd9Sstevel@tonic-gate */ 26827c478bd9Sstevel@tonic-gate if (error == ENOTEMPTY) 26837c478bd9Sstevel@tonic-gate error = EEXIST; 26847c478bd9Sstevel@tonic-gate } 26857c478bd9Sstevel@tonic-gate } 26867c478bd9Sstevel@tonic-gate 2687df2381bfSpraks if (error == 0) { 2688df2381bfSpraks if (nvp) 2689da6c28aaSamw vnevent_rename_dest(nvp, ndvp, nnm, ct); 2690df2381bfSpraks 2691df2381bfSpraks if (odvp != ndvp) 2692da6c28aaSamw vnevent_rename_dest_dir(ndvp, ct); 2693df2381bfSpraks 2694df2381bfSpraks ASSERT(ovp != NULL); 2695da6c28aaSamw vnevent_rename_src(ovp, odvp, onm, ct); 2696df2381bfSpraks } 2697df2381bfSpraks 2698df2381bfSpraks if (nvp) { 2699df2381bfSpraks VN_RELE(nvp); 2700df2381bfSpraks } 27017c478bd9Sstevel@tonic-gate VN_RELE(ovp); 27027c478bd9Sstevel@tonic-gate 27037c478bd9Sstevel@tonic-gate nfs_rw_exit(&odrp->r_rwlock); 27047c478bd9Sstevel@tonic-gate nfs_rw_exit(&ndrp->r_rwlock); 27057c478bd9Sstevel@tonic-gate 27067c478bd9Sstevel@tonic-gate return (error); 27077c478bd9Sstevel@tonic-gate } 27087c478bd9Sstevel@tonic-gate 2709da6c28aaSamw /* ARGSUSED */ 27107c478bd9Sstevel@tonic-gate static int 2711da6c28aaSamw nfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, cred_t *cr, 2712da6c28aaSamw caller_context_t *ct, int flags, vsecattr_t *vsecp) 27137c478bd9Sstevel@tonic-gate { 27147c478bd9Sstevel@tonic-gate int error; 27157c478bd9Sstevel@tonic-gate struct nfscreatargs args; 27167c478bd9Sstevel@tonic-gate struct nfsdiropres dr; 27177c478bd9Sstevel@tonic-gate int douprintf; 27187c478bd9Sstevel@tonic-gate rnode_t *drp; 27197c478bd9Sstevel@tonic-gate hrtime_t t; 27207c478bd9Sstevel@tonic-gate 2721108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 27227c478bd9Sstevel@tonic-gate return (EPERM); 27237c478bd9Sstevel@tonic-gate 27247c478bd9Sstevel@tonic-gate setdiropargs(&args.ca_da, nm, dvp); 27257c478bd9Sstevel@tonic-gate 27267c478bd9Sstevel@tonic-gate /* 27277c478bd9Sstevel@tonic-gate * Decide what the group-id and set-gid bit of the created directory 27287c478bd9Sstevel@tonic-gate * should be. May have to do a setattr to get the gid right. 27297c478bd9Sstevel@tonic-gate */ 27307c478bd9Sstevel@tonic-gate error = setdirgid(dvp, &va->va_gid, cr); 27317c478bd9Sstevel@tonic-gate if (error) 27327c478bd9Sstevel@tonic-gate return (error); 27337c478bd9Sstevel@tonic-gate error = setdirmode(dvp, &va->va_mode, cr); 27347c478bd9Sstevel@tonic-gate if (error) 27357c478bd9Sstevel@tonic-gate return (error); 27367c478bd9Sstevel@tonic-gate va->va_mask |= AT_MODE|AT_GID; 27377c478bd9Sstevel@tonic-gate 27387c478bd9Sstevel@tonic-gate args.ca_sa = &args.ca_sa_buf; 27397c478bd9Sstevel@tonic-gate error = vattr_to_sattr(va, args.ca_sa); 27407c478bd9Sstevel@tonic-gate if (error) { 27417c478bd9Sstevel@tonic-gate /* req time field(s) overflow - return immediately */ 27427c478bd9Sstevel@tonic-gate return (error); 27437c478bd9Sstevel@tonic-gate } 27447c478bd9Sstevel@tonic-gate 27457c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 27467c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp))) 27477c478bd9Sstevel@tonic-gate return (EINTR); 27487c478bd9Sstevel@tonic-gate 27497c478bd9Sstevel@tonic-gate dnlc_remove(dvp, nm); 27507c478bd9Sstevel@tonic-gate 27517c478bd9Sstevel@tonic-gate douprintf = 1; 27527c478bd9Sstevel@tonic-gate 27537c478bd9Sstevel@tonic-gate t = gethrtime(); 27547c478bd9Sstevel@tonic-gate 27557c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_MKDIR, 27567c478bd9Sstevel@tonic-gate xdr_creatargs, (caddr_t)&args, 27577c478bd9Sstevel@tonic-gate xdr_diropres, (caddr_t)&dr, cr, 27587c478bd9Sstevel@tonic-gate &douprintf, &dr.dr_status, 0, NULL); 27597c478bd9Sstevel@tonic-gate 27607c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); /* mod time changed */ 27617c478bd9Sstevel@tonic-gate 27627c478bd9Sstevel@tonic-gate if (!error) { 27637c478bd9Sstevel@tonic-gate error = geterrno(dr.dr_status); 27647c478bd9Sstevel@tonic-gate if (!error) { 27657c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(drp)) 27667c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(dvp); 27677c478bd9Sstevel@tonic-gate /* 27687c478bd9Sstevel@tonic-gate * The attributes returned by RFS_MKDIR can not 27697c478bd9Sstevel@tonic-gate * be depended upon, so mark the attribute cache 27707c478bd9Sstevel@tonic-gate * as purged. A subsequent GETATTR will get the 27717c478bd9Sstevel@tonic-gate * correct attributes from the server. 27727c478bd9Sstevel@tonic-gate */ 27737c478bd9Sstevel@tonic-gate *vpp = makenfsnode(&dr.dr_fhandle, &dr.dr_attr, 27747c478bd9Sstevel@tonic-gate dvp->v_vfsp, t, cr, NULL, NULL); 27757c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(*vpp); 27767c478bd9Sstevel@tonic-gate dnlc_update(dvp, nm, *vpp); 27777c478bd9Sstevel@tonic-gate 27787c478bd9Sstevel@tonic-gate /* 27797c478bd9Sstevel@tonic-gate * Make sure the gid was set correctly. 27807c478bd9Sstevel@tonic-gate * If not, try to set it (but don't lose 27817c478bd9Sstevel@tonic-gate * any sleep over it). 27827c478bd9Sstevel@tonic-gate */ 27837c478bd9Sstevel@tonic-gate if (va->va_gid != VTOR(*vpp)->r_attr.va_gid) { 27847c478bd9Sstevel@tonic-gate va->va_mask = AT_GID; 27857c478bd9Sstevel@tonic-gate (void) nfssetattr(*vpp, va, 0, cr); 27867c478bd9Sstevel@tonic-gate } 27877c478bd9Sstevel@tonic-gate } else { 27887c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 27897c478bd9Sstevel@tonic-gate } 27907c478bd9Sstevel@tonic-gate } 27917c478bd9Sstevel@tonic-gate 27927c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 27937c478bd9Sstevel@tonic-gate 27947c478bd9Sstevel@tonic-gate return (error); 27957c478bd9Sstevel@tonic-gate } 27967c478bd9Sstevel@tonic-gate 2797da6c28aaSamw /* ARGSUSED */ 27987c478bd9Sstevel@tonic-gate static int 2799da6c28aaSamw nfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr, 2800da6c28aaSamw caller_context_t *ct, int flags) 28017c478bd9Sstevel@tonic-gate { 28027c478bd9Sstevel@tonic-gate int error; 28037c478bd9Sstevel@tonic-gate enum nfsstat status; 28047c478bd9Sstevel@tonic-gate struct nfsdiropargs da; 28057c478bd9Sstevel@tonic-gate vnode_t *vp; 28067c478bd9Sstevel@tonic-gate int douprintf; 28077c478bd9Sstevel@tonic-gate rnode_t *drp; 28087c478bd9Sstevel@tonic-gate 2809108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 28107c478bd9Sstevel@tonic-gate return (EPERM); 28117c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 28127c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp))) 28137c478bd9Sstevel@tonic-gate return (EINTR); 28147c478bd9Sstevel@tonic-gate 28157c478bd9Sstevel@tonic-gate /* 28167c478bd9Sstevel@tonic-gate * Attempt to prevent a rmdir(".") from succeeding. 28177c478bd9Sstevel@tonic-gate */ 28187c478bd9Sstevel@tonic-gate error = nfslookup(dvp, nm, &vp, NULL, 0, NULL, cr, 0); 28197c478bd9Sstevel@tonic-gate if (error) { 28207c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 28217c478bd9Sstevel@tonic-gate return (error); 28227c478bd9Sstevel@tonic-gate } 28237c478bd9Sstevel@tonic-gate 28247c478bd9Sstevel@tonic-gate if (vp == cdir) { 28257c478bd9Sstevel@tonic-gate VN_RELE(vp); 28267c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 28277c478bd9Sstevel@tonic-gate return (EINVAL); 28287c478bd9Sstevel@tonic-gate } 28297c478bd9Sstevel@tonic-gate 28307c478bd9Sstevel@tonic-gate setdiropargs(&da, nm, dvp); 28317c478bd9Sstevel@tonic-gate 28327c478bd9Sstevel@tonic-gate /* 28337c478bd9Sstevel@tonic-gate * First just remove the entry from the name cache, as it 28347c478bd9Sstevel@tonic-gate * is most likely an entry for this vp. 28357c478bd9Sstevel@tonic-gate */ 28367c478bd9Sstevel@tonic-gate dnlc_remove(dvp, nm); 28377c478bd9Sstevel@tonic-gate 28387c478bd9Sstevel@tonic-gate /* 28397c478bd9Sstevel@tonic-gate * If there vnode reference count is greater than one, then 28407c478bd9Sstevel@tonic-gate * there may be additional references in the DNLC which will 28417c478bd9Sstevel@tonic-gate * need to be purged. First, trying removing the entry for 28427c478bd9Sstevel@tonic-gate * the parent directory and see if that removes the additional 28437c478bd9Sstevel@tonic-gate * reference(s). If that doesn't do it, then use dnlc_purge_vp 28447c478bd9Sstevel@tonic-gate * to completely remove any references to the directory which 28457c478bd9Sstevel@tonic-gate * might still exist in the DNLC. 28467c478bd9Sstevel@tonic-gate */ 28477c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 28487c478bd9Sstevel@tonic-gate dnlc_remove(vp, ".."); 28497c478bd9Sstevel@tonic-gate if (vp->v_count > 1) 28507c478bd9Sstevel@tonic-gate dnlc_purge_vp(vp); 28517c478bd9Sstevel@tonic-gate } 28527c478bd9Sstevel@tonic-gate 28537c478bd9Sstevel@tonic-gate douprintf = 1; 28547c478bd9Sstevel@tonic-gate 28557c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_RMDIR, 28567c478bd9Sstevel@tonic-gate xdr_diropargs, (caddr_t)&da, 28577c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, cr, 28587c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 28597c478bd9Sstevel@tonic-gate 28607c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); /* mod time changed */ 28617c478bd9Sstevel@tonic-gate 28627c478bd9Sstevel@tonic-gate if (error) { 28637c478bd9Sstevel@tonic-gate VN_RELE(vp); 28647c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 28657c478bd9Sstevel@tonic-gate return (error); 28667c478bd9Sstevel@tonic-gate } 28677c478bd9Sstevel@tonic-gate 28687c478bd9Sstevel@tonic-gate error = geterrno(status); 28697c478bd9Sstevel@tonic-gate if (!error) { 28707c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(drp)) 28717c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(dvp); 28727c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(VTOR(vp))) 28737c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(vp); 28747c478bd9Sstevel@tonic-gate } else { 28757c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 28767c478bd9Sstevel@tonic-gate /* 28777c478bd9Sstevel@tonic-gate * System V defines rmdir to return EEXIST, not 28787c478bd9Sstevel@tonic-gate * ENOTEMPTY if the directory is not empty. Over 28797c478bd9Sstevel@tonic-gate * the wire, the error is NFSERR_ENOTEMPTY which 28807c478bd9Sstevel@tonic-gate * geterrno maps to ENOTEMPTY. 28817c478bd9Sstevel@tonic-gate */ 28827c478bd9Sstevel@tonic-gate if (error == ENOTEMPTY) 28837c478bd9Sstevel@tonic-gate error = EEXIST; 28847c478bd9Sstevel@tonic-gate } 28857c478bd9Sstevel@tonic-gate 2886df2381bfSpraks if (error == 0) { 2887da6c28aaSamw vnevent_rmdir(vp, dvp, nm, ct); 2888df2381bfSpraks } 28897c478bd9Sstevel@tonic-gate VN_RELE(vp); 28907c478bd9Sstevel@tonic-gate 28917c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 28927c478bd9Sstevel@tonic-gate 28937c478bd9Sstevel@tonic-gate return (error); 28947c478bd9Sstevel@tonic-gate } 28957c478bd9Sstevel@tonic-gate 2896da6c28aaSamw /* ARGSUSED */ 28977c478bd9Sstevel@tonic-gate static int 2898da6c28aaSamw nfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr, 2899da6c28aaSamw caller_context_t *ct, int flags) 29007c478bd9Sstevel@tonic-gate { 29017c478bd9Sstevel@tonic-gate int error; 29027c478bd9Sstevel@tonic-gate struct nfsslargs args; 29037c478bd9Sstevel@tonic-gate enum nfsstat status; 29047c478bd9Sstevel@tonic-gate int douprintf; 29057c478bd9Sstevel@tonic-gate rnode_t *drp; 29067c478bd9Sstevel@tonic-gate 2907108322fbScarlsonj if (nfs_zone() != VTOMI(dvp)->mi_zone) 29087c478bd9Sstevel@tonic-gate return (EPERM); 29097c478bd9Sstevel@tonic-gate setdiropargs(&args.sla_from, lnm, dvp); 29107c478bd9Sstevel@tonic-gate args.sla_sa = &args.sla_sa_buf; 29117c478bd9Sstevel@tonic-gate error = vattr_to_sattr(tva, args.sla_sa); 29127c478bd9Sstevel@tonic-gate if (error) { 29137c478bd9Sstevel@tonic-gate /* req time field(s) overflow - return immediately */ 29147c478bd9Sstevel@tonic-gate return (error); 29157c478bd9Sstevel@tonic-gate } 29167c478bd9Sstevel@tonic-gate args.sla_tnm = tnm; 29177c478bd9Sstevel@tonic-gate 29187c478bd9Sstevel@tonic-gate drp = VTOR(dvp); 29197c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&drp->r_rwlock, RW_WRITER, INTR(dvp))) 29207c478bd9Sstevel@tonic-gate return (EINTR); 29217c478bd9Sstevel@tonic-gate 29227c478bd9Sstevel@tonic-gate dnlc_remove(dvp, lnm); 29237c478bd9Sstevel@tonic-gate 29247c478bd9Sstevel@tonic-gate douprintf = 1; 29257c478bd9Sstevel@tonic-gate 29267c478bd9Sstevel@tonic-gate error = rfs2call(VTOMI(dvp), RFS_SYMLINK, 29277c478bd9Sstevel@tonic-gate xdr_slargs, (caddr_t)&args, 29287c478bd9Sstevel@tonic-gate xdr_enum, (caddr_t)&status, cr, 29297c478bd9Sstevel@tonic-gate &douprintf, &status, 0, NULL); 29307c478bd9Sstevel@tonic-gate 29317c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE(dvp); /* mod time changed */ 29327c478bd9Sstevel@tonic-gate 29337c478bd9Sstevel@tonic-gate if (!error) { 29347c478bd9Sstevel@tonic-gate error = geterrno(status); 29357c478bd9Sstevel@tonic-gate if (!error) { 29367c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(drp)) 29377c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(dvp); 29387c478bd9Sstevel@tonic-gate } else { 29397c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, dvp, cr); 29407c478bd9Sstevel@tonic-gate } 29417c478bd9Sstevel@tonic-gate } 29427c478bd9Sstevel@tonic-gate 29437c478bd9Sstevel@tonic-gate nfs_rw_exit(&drp->r_rwlock); 29447c478bd9Sstevel@tonic-gate 29457c478bd9Sstevel@tonic-gate return (error); 29467c478bd9Sstevel@tonic-gate } 29477c478bd9Sstevel@tonic-gate 29487c478bd9Sstevel@tonic-gate #ifdef DEBUG 29497c478bd9Sstevel@tonic-gate static int nfs_readdir_cache_hits = 0; 29507c478bd9Sstevel@tonic-gate static int nfs_readdir_cache_shorts = 0; 29517c478bd9Sstevel@tonic-gate static int nfs_readdir_cache_waits = 0; 29527c478bd9Sstevel@tonic-gate static int nfs_readdir_cache_misses = 0; 29537c478bd9Sstevel@tonic-gate static int nfs_readdir_readahead = 0; 29547c478bd9Sstevel@tonic-gate #endif 29557c478bd9Sstevel@tonic-gate 29567c478bd9Sstevel@tonic-gate static int nfs_shrinkreaddir = 0; 29577c478bd9Sstevel@tonic-gate 29587c478bd9Sstevel@tonic-gate /* 29597c478bd9Sstevel@tonic-gate * Read directory entries. 29607c478bd9Sstevel@tonic-gate * There are some weird things to look out for here. The uio_offset 29617c478bd9Sstevel@tonic-gate * field is either 0 or it is the offset returned from a previous 29627c478bd9Sstevel@tonic-gate * readdir. It is an opaque value used by the server to find the 29637c478bd9Sstevel@tonic-gate * correct directory block to read. The count field is the number 29647c478bd9Sstevel@tonic-gate * of blocks to read on the server. This is advisory only, the server 29657c478bd9Sstevel@tonic-gate * may return only one block's worth of entries. Entries may be compressed 29667c478bd9Sstevel@tonic-gate * on the server. 29677c478bd9Sstevel@tonic-gate */ 2968da6c28aaSamw /* ARGSUSED */ 29697c478bd9Sstevel@tonic-gate static int 2970da6c28aaSamw nfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp, 2971da6c28aaSamw caller_context_t *ct, int flags) 29727c478bd9Sstevel@tonic-gate { 29737c478bd9Sstevel@tonic-gate int error; 29747c478bd9Sstevel@tonic-gate size_t count; 29757c478bd9Sstevel@tonic-gate rnode_t *rp; 29767c478bd9Sstevel@tonic-gate rddir_cache *rdc; 29777c478bd9Sstevel@tonic-gate rddir_cache *nrdc; 29787c478bd9Sstevel@tonic-gate rddir_cache *rrdc; 29797c478bd9Sstevel@tonic-gate #ifdef DEBUG 29807c478bd9Sstevel@tonic-gate int missed; 29817c478bd9Sstevel@tonic-gate #endif 29827c478bd9Sstevel@tonic-gate rddir_cache srdc; 29837c478bd9Sstevel@tonic-gate avl_index_t where; 29847c478bd9Sstevel@tonic-gate 29857c478bd9Sstevel@tonic-gate rp = VTOR(vp); 29867c478bd9Sstevel@tonic-gate 29877c478bd9Sstevel@tonic-gate ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_READER)); 2988108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 29897c478bd9Sstevel@tonic-gate return (EIO); 29907c478bd9Sstevel@tonic-gate /* 29917c478bd9Sstevel@tonic-gate * Make sure that the directory cache is valid. 29927c478bd9Sstevel@tonic-gate */ 29937c478bd9Sstevel@tonic-gate if (HAVE_RDDIR_CACHE(rp)) { 29947c478bd9Sstevel@tonic-gate if (nfs_disable_rddir_cache) { 29957c478bd9Sstevel@tonic-gate /* 29967c478bd9Sstevel@tonic-gate * Setting nfs_disable_rddir_cache in /etc/system 29977c478bd9Sstevel@tonic-gate * allows interoperability with servers that do not 29987c478bd9Sstevel@tonic-gate * properly update the attributes of directories. 29997c478bd9Sstevel@tonic-gate * Any cached information gets purged before an 30007c478bd9Sstevel@tonic-gate * access is made to it. 30017c478bd9Sstevel@tonic-gate */ 30027c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(vp); 30037c478bd9Sstevel@tonic-gate } else { 30047c478bd9Sstevel@tonic-gate error = nfs_validate_caches(vp, cr); 30057c478bd9Sstevel@tonic-gate if (error) 30067c478bd9Sstevel@tonic-gate return (error); 30077c478bd9Sstevel@tonic-gate } 30087c478bd9Sstevel@tonic-gate } 30097c478bd9Sstevel@tonic-gate 30107c478bd9Sstevel@tonic-gate /* 30117c478bd9Sstevel@tonic-gate * UGLINESS: SunOS 3.2 servers apparently cannot always handle an 30127c478bd9Sstevel@tonic-gate * RFS_READDIR request with rda_count set to more than 0x400. So 30137c478bd9Sstevel@tonic-gate * we reduce the request size here purely for compatibility. 30147c478bd9Sstevel@tonic-gate * 30157c478bd9Sstevel@tonic-gate * In general, this is no longer required. However, if a server 30167c478bd9Sstevel@tonic-gate * is discovered which can not handle requests larger than 1024, 30177c478bd9Sstevel@tonic-gate * nfs_shrinkreaddir can be set to 1 to enable this backwards 30187c478bd9Sstevel@tonic-gate * compatibility. 30197c478bd9Sstevel@tonic-gate * 30207c478bd9Sstevel@tonic-gate * In any case, the request size is limited to NFS_MAXDATA bytes. 30217c478bd9Sstevel@tonic-gate */ 30227c478bd9Sstevel@tonic-gate count = MIN(uiop->uio_iov->iov_len, 30237c478bd9Sstevel@tonic-gate nfs_shrinkreaddir ? 0x400 : NFS_MAXDATA); 30247c478bd9Sstevel@tonic-gate 30257c478bd9Sstevel@tonic-gate nrdc = NULL; 30267c478bd9Sstevel@tonic-gate #ifdef DEBUG 30277c478bd9Sstevel@tonic-gate missed = 0; 30287c478bd9Sstevel@tonic-gate #endif 30297c478bd9Sstevel@tonic-gate top: 30307c478bd9Sstevel@tonic-gate /* 30317c478bd9Sstevel@tonic-gate * Short circuit last readdir which always returns 0 bytes. 30327c478bd9Sstevel@tonic-gate * This can be done after the directory has been read through 30337c478bd9Sstevel@tonic-gate * completely at least once. This will set r_direof which 30347c478bd9Sstevel@tonic-gate * can be used to find the value of the last cookie. 30357c478bd9Sstevel@tonic-gate */ 30367c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 30377c478bd9Sstevel@tonic-gate if (rp->r_direof != NULL && 30387c478bd9Sstevel@tonic-gate uiop->uio_offset == rp->r_direof->nfs_ncookie) { 30397c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 30407c478bd9Sstevel@tonic-gate #ifdef DEBUG 30417c478bd9Sstevel@tonic-gate nfs_readdir_cache_shorts++; 30427c478bd9Sstevel@tonic-gate #endif 30437c478bd9Sstevel@tonic-gate if (eofp) 30447c478bd9Sstevel@tonic-gate *eofp = 1; 30457c478bd9Sstevel@tonic-gate if (nrdc != NULL) 30467c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 30477c478bd9Sstevel@tonic-gate return (0); 30487c478bd9Sstevel@tonic-gate } 30497c478bd9Sstevel@tonic-gate /* 30507c478bd9Sstevel@tonic-gate * Look for a cache entry. Cache entries are identified 30517c478bd9Sstevel@tonic-gate * by the NFS cookie value and the byte count requested. 30527c478bd9Sstevel@tonic-gate */ 30537c478bd9Sstevel@tonic-gate srdc.nfs_cookie = uiop->uio_offset; 30547c478bd9Sstevel@tonic-gate srdc.buflen = count; 30557c478bd9Sstevel@tonic-gate rdc = avl_find(&rp->r_dir, &srdc, &where); 30567c478bd9Sstevel@tonic-gate if (rdc != NULL) { 30577c478bd9Sstevel@tonic-gate rddir_cache_hold(rdc); 30587c478bd9Sstevel@tonic-gate /* 30597c478bd9Sstevel@tonic-gate * If the cache entry is in the process of being 30607c478bd9Sstevel@tonic-gate * filled in, wait until this completes. The 30617c478bd9Sstevel@tonic-gate * RDDIRWAIT bit is set to indicate that someone 30627c478bd9Sstevel@tonic-gate * is waiting and then the thread currently 30637c478bd9Sstevel@tonic-gate * filling the entry is done, it should do a 30647c478bd9Sstevel@tonic-gate * cv_broadcast to wakeup all of the threads 30657c478bd9Sstevel@tonic-gate * waiting for it to finish. 30667c478bd9Sstevel@tonic-gate */ 30677c478bd9Sstevel@tonic-gate if (rdc->flags & RDDIR) { 30687c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_rwlock); 30697c478bd9Sstevel@tonic-gate rdc->flags |= RDDIRWAIT; 30707c478bd9Sstevel@tonic-gate #ifdef DEBUG 30717c478bd9Sstevel@tonic-gate nfs_readdir_cache_waits++; 30727c478bd9Sstevel@tonic-gate #endif 30737c478bd9Sstevel@tonic-gate if (!cv_wait_sig(&rdc->cv, &rp->r_statelock)) { 30747c478bd9Sstevel@tonic-gate /* 30757c478bd9Sstevel@tonic-gate * We got interrupted, probably 30767c478bd9Sstevel@tonic-gate * the user typed ^C or an alarm 30777c478bd9Sstevel@tonic-gate * fired. We free the new entry 30787c478bd9Sstevel@tonic-gate * if we allocated one. 30797c478bd9Sstevel@tonic-gate */ 30807c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 30817c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&rp->r_rwlock, 30827c478bd9Sstevel@tonic-gate RW_READER, FALSE); 30837c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 30847c478bd9Sstevel@tonic-gate if (nrdc != NULL) 30857c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 30867c478bd9Sstevel@tonic-gate return (EINTR); 30877c478bd9Sstevel@tonic-gate } 30887c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 30897c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&rp->r_rwlock, 30907c478bd9Sstevel@tonic-gate RW_READER, FALSE); 30917c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 30927c478bd9Sstevel@tonic-gate goto top; 30937c478bd9Sstevel@tonic-gate } 30947c478bd9Sstevel@tonic-gate /* 30957c478bd9Sstevel@tonic-gate * Check to see if a readdir is required to 30967c478bd9Sstevel@tonic-gate * fill the entry. If so, mark this entry 30977c478bd9Sstevel@tonic-gate * as being filled, remove our reference, 30987c478bd9Sstevel@tonic-gate * and branch to the code to fill the entry. 30997c478bd9Sstevel@tonic-gate */ 31007c478bd9Sstevel@tonic-gate if (rdc->flags & RDDIRREQ) { 31017c478bd9Sstevel@tonic-gate rdc->flags &= ~RDDIRREQ; 31027c478bd9Sstevel@tonic-gate rdc->flags |= RDDIR; 31037c478bd9Sstevel@tonic-gate if (nrdc != NULL) 31047c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 31057c478bd9Sstevel@tonic-gate nrdc = rdc; 31067c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 31077c478bd9Sstevel@tonic-gate goto bottom; 31087c478bd9Sstevel@tonic-gate } 31097c478bd9Sstevel@tonic-gate #ifdef DEBUG 31107c478bd9Sstevel@tonic-gate if (!missed) 31117c478bd9Sstevel@tonic-gate nfs_readdir_cache_hits++; 31127c478bd9Sstevel@tonic-gate #endif 31137c478bd9Sstevel@tonic-gate /* 31147c478bd9Sstevel@tonic-gate * If an error occurred while attempting 31157c478bd9Sstevel@tonic-gate * to fill the cache entry, just return it. 31167c478bd9Sstevel@tonic-gate */ 31177c478bd9Sstevel@tonic-gate if (rdc->error) { 31187c478bd9Sstevel@tonic-gate error = rdc->error; 31197c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 31207c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 31217c478bd9Sstevel@tonic-gate if (nrdc != NULL) 31227c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 31237c478bd9Sstevel@tonic-gate return (error); 31247c478bd9Sstevel@tonic-gate } 31257c478bd9Sstevel@tonic-gate 31267c478bd9Sstevel@tonic-gate /* 31277c478bd9Sstevel@tonic-gate * The cache entry is complete and good, 31287c478bd9Sstevel@tonic-gate * copyout the dirent structs to the calling 31297c478bd9Sstevel@tonic-gate * thread. 31307c478bd9Sstevel@tonic-gate */ 31317c478bd9Sstevel@tonic-gate error = uiomove(rdc->entries, rdc->entlen, UIO_READ, uiop); 31327c478bd9Sstevel@tonic-gate 31337c478bd9Sstevel@tonic-gate /* 31347c478bd9Sstevel@tonic-gate * If no error occurred during the copyout, 31357c478bd9Sstevel@tonic-gate * update the offset in the uio struct to 31367c478bd9Sstevel@tonic-gate * contain the value of the next cookie 31377c478bd9Sstevel@tonic-gate * and set the eof value appropriately. 31387c478bd9Sstevel@tonic-gate */ 31397c478bd9Sstevel@tonic-gate if (!error) { 31407c478bd9Sstevel@tonic-gate uiop->uio_offset = rdc->nfs_ncookie; 31417c478bd9Sstevel@tonic-gate if (eofp) 31427c478bd9Sstevel@tonic-gate *eofp = rdc->eof; 31437c478bd9Sstevel@tonic-gate } 31447c478bd9Sstevel@tonic-gate 31457c478bd9Sstevel@tonic-gate /* 31467c478bd9Sstevel@tonic-gate * Decide whether to do readahead. Don't if 31477c478bd9Sstevel@tonic-gate * have already read to the end of directory. 31487c478bd9Sstevel@tonic-gate */ 31497c478bd9Sstevel@tonic-gate if (rdc->eof) { 31507c478bd9Sstevel@tonic-gate rp->r_direof = rdc; 31517c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 31527c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 31537c478bd9Sstevel@tonic-gate if (nrdc != NULL) 31547c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 31557c478bd9Sstevel@tonic-gate return (error); 31567c478bd9Sstevel@tonic-gate } 31577c478bd9Sstevel@tonic-gate 31587c478bd9Sstevel@tonic-gate /* 31597c478bd9Sstevel@tonic-gate * Check to see whether we found an entry 31607c478bd9Sstevel@tonic-gate * for the readahead. If so, we don't need 31617c478bd9Sstevel@tonic-gate * to do anything further, so free the new 31627c478bd9Sstevel@tonic-gate * entry if one was allocated. Otherwise, 31637c478bd9Sstevel@tonic-gate * allocate a new entry, add it to the cache, 31647c478bd9Sstevel@tonic-gate * and then initiate an asynchronous readdir 31657c478bd9Sstevel@tonic-gate * operation to fill it. 31667c478bd9Sstevel@tonic-gate */ 31677c478bd9Sstevel@tonic-gate srdc.nfs_cookie = rdc->nfs_ncookie; 31687c478bd9Sstevel@tonic-gate srdc.buflen = count; 31697c478bd9Sstevel@tonic-gate rrdc = avl_find(&rp->r_dir, &srdc, &where); 31707c478bd9Sstevel@tonic-gate if (rrdc != NULL) { 31717c478bd9Sstevel@tonic-gate if (nrdc != NULL) 31727c478bd9Sstevel@tonic-gate rddir_cache_rele(nrdc); 31737c478bd9Sstevel@tonic-gate } else { 31747c478bd9Sstevel@tonic-gate if (nrdc != NULL) 31757c478bd9Sstevel@tonic-gate rrdc = nrdc; 31767c478bd9Sstevel@tonic-gate else { 31777c478bd9Sstevel@tonic-gate rrdc = rddir_cache_alloc(KM_NOSLEEP); 31787c478bd9Sstevel@tonic-gate } 31797c478bd9Sstevel@tonic-gate if (rrdc != NULL) { 31807c478bd9Sstevel@tonic-gate rrdc->nfs_cookie = rdc->nfs_ncookie; 31817c478bd9Sstevel@tonic-gate rrdc->buflen = count; 31827c478bd9Sstevel@tonic-gate avl_insert(&rp->r_dir, rrdc, where); 31837c478bd9Sstevel@tonic-gate rddir_cache_hold(rrdc); 31847c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 31857c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 31867c478bd9Sstevel@tonic-gate #ifdef DEBUG 31877c478bd9Sstevel@tonic-gate nfs_readdir_readahead++; 31887c478bd9Sstevel@tonic-gate #endif 31897c478bd9Sstevel@tonic-gate nfs_async_readdir(vp, rrdc, cr, nfsreaddir); 31907c478bd9Sstevel@tonic-gate return (error); 31917c478bd9Sstevel@tonic-gate } 31927c478bd9Sstevel@tonic-gate } 31937c478bd9Sstevel@tonic-gate 31947c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 31957c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 31967c478bd9Sstevel@tonic-gate return (error); 31977c478bd9Sstevel@tonic-gate } 31987c478bd9Sstevel@tonic-gate 31997c478bd9Sstevel@tonic-gate /* 32007c478bd9Sstevel@tonic-gate * Didn't find an entry in the cache. Construct a new empty 32017c478bd9Sstevel@tonic-gate * entry and link it into the cache. Other processes attempting 32027c478bd9Sstevel@tonic-gate * to access this entry will need to wait until it is filled in. 32037c478bd9Sstevel@tonic-gate * 32047c478bd9Sstevel@tonic-gate * Since kmem_alloc may block, another pass through the cache 32057c478bd9Sstevel@tonic-gate * will need to be taken to make sure that another process 32067c478bd9Sstevel@tonic-gate * hasn't already added an entry to the cache for this request. 32077c478bd9Sstevel@tonic-gate */ 32087c478bd9Sstevel@tonic-gate if (nrdc == NULL) { 32097c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 32107c478bd9Sstevel@tonic-gate nrdc = rddir_cache_alloc(KM_SLEEP); 32117c478bd9Sstevel@tonic-gate nrdc->nfs_cookie = uiop->uio_offset; 32127c478bd9Sstevel@tonic-gate nrdc->buflen = count; 32137c478bd9Sstevel@tonic-gate goto top; 32147c478bd9Sstevel@tonic-gate } 32157c478bd9Sstevel@tonic-gate 32167c478bd9Sstevel@tonic-gate /* 32177c478bd9Sstevel@tonic-gate * Add this entry to the cache. 32187c478bd9Sstevel@tonic-gate */ 32197c478bd9Sstevel@tonic-gate avl_insert(&rp->r_dir, nrdc, where); 32207c478bd9Sstevel@tonic-gate rddir_cache_hold(nrdc); 32217c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 32227c478bd9Sstevel@tonic-gate 32237c478bd9Sstevel@tonic-gate bottom: 32247c478bd9Sstevel@tonic-gate #ifdef DEBUG 32257c478bd9Sstevel@tonic-gate missed = 1; 32267c478bd9Sstevel@tonic-gate nfs_readdir_cache_misses++; 32277c478bd9Sstevel@tonic-gate #endif 32287c478bd9Sstevel@tonic-gate /* 32297c478bd9Sstevel@tonic-gate * Do the readdir. 32307c478bd9Sstevel@tonic-gate */ 32317c478bd9Sstevel@tonic-gate error = nfsreaddir(vp, nrdc, cr); 32327c478bd9Sstevel@tonic-gate 32337c478bd9Sstevel@tonic-gate /* 32347c478bd9Sstevel@tonic-gate * If this operation failed, just return the error which occurred. 32357c478bd9Sstevel@tonic-gate */ 32367c478bd9Sstevel@tonic-gate if (error != 0) 32377c478bd9Sstevel@tonic-gate return (error); 32387c478bd9Sstevel@tonic-gate 32397c478bd9Sstevel@tonic-gate /* 32407c478bd9Sstevel@tonic-gate * Since the RPC operation will have taken sometime and blocked 32417c478bd9Sstevel@tonic-gate * this process, another pass through the cache will need to be 32427c478bd9Sstevel@tonic-gate * taken to find the correct cache entry. It is possible that 32437c478bd9Sstevel@tonic-gate * the correct cache entry will not be there (although one was 32447c478bd9Sstevel@tonic-gate * added) because the directory changed during the RPC operation 32457c478bd9Sstevel@tonic-gate * and the readdir cache was flushed. In this case, just start 32467c478bd9Sstevel@tonic-gate * over. It is hoped that this will not happen too often... :-) 32477c478bd9Sstevel@tonic-gate */ 32487c478bd9Sstevel@tonic-gate nrdc = NULL; 32497c478bd9Sstevel@tonic-gate goto top; 32507c478bd9Sstevel@tonic-gate /* NOTREACHED */ 32517c478bd9Sstevel@tonic-gate } 32527c478bd9Sstevel@tonic-gate 32537c478bd9Sstevel@tonic-gate static int 32547c478bd9Sstevel@tonic-gate nfsreaddir(vnode_t *vp, rddir_cache *rdc, cred_t *cr) 32557c478bd9Sstevel@tonic-gate { 32567c478bd9Sstevel@tonic-gate int error; 32577c478bd9Sstevel@tonic-gate struct nfsrddirargs rda; 32587c478bd9Sstevel@tonic-gate struct nfsrddirres rd; 32597c478bd9Sstevel@tonic-gate rnode_t *rp; 32607c478bd9Sstevel@tonic-gate mntinfo_t *mi; 32617c478bd9Sstevel@tonic-gate uint_t count; 32627c478bd9Sstevel@tonic-gate int douprintf; 32637c478bd9Sstevel@tonic-gate failinfo_t fi, *fip; 32647c478bd9Sstevel@tonic-gate 3265108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 32667c478bd9Sstevel@tonic-gate count = rdc->buflen; 32677c478bd9Sstevel@tonic-gate 32687c478bd9Sstevel@tonic-gate rp = VTOR(vp); 32697c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 32707c478bd9Sstevel@tonic-gate 32717c478bd9Sstevel@tonic-gate rda.rda_fh = *VTOFH(vp); 32727c478bd9Sstevel@tonic-gate rda.rda_offset = rdc->nfs_cookie; 32737c478bd9Sstevel@tonic-gate 32747c478bd9Sstevel@tonic-gate /* 32757c478bd9Sstevel@tonic-gate * NFS client failover support 32767c478bd9Sstevel@tonic-gate * suppress failover unless we have a zero cookie 32777c478bd9Sstevel@tonic-gate */ 32787c478bd9Sstevel@tonic-gate if (rdc->nfs_cookie == (off_t)0) { 32797c478bd9Sstevel@tonic-gate fi.vp = vp; 32807c478bd9Sstevel@tonic-gate fi.fhp = (caddr_t)&rda.rda_fh; 32817c478bd9Sstevel@tonic-gate fi.copyproc = nfscopyfh; 32827c478bd9Sstevel@tonic-gate fi.lookupproc = nfslookup; 32837c478bd9Sstevel@tonic-gate fi.xattrdirproc = acl_getxattrdir2; 32847c478bd9Sstevel@tonic-gate fip = &fi; 32857c478bd9Sstevel@tonic-gate } else { 32867c478bd9Sstevel@tonic-gate fip = NULL; 32877c478bd9Sstevel@tonic-gate } 32887c478bd9Sstevel@tonic-gate 32897c478bd9Sstevel@tonic-gate rd.rd_entries = kmem_alloc(rdc->buflen, KM_SLEEP); 32907c478bd9Sstevel@tonic-gate rd.rd_size = count; 32917c478bd9Sstevel@tonic-gate rd.rd_offset = rda.rda_offset; 32927c478bd9Sstevel@tonic-gate 32937c478bd9Sstevel@tonic-gate douprintf = 1; 32947c478bd9Sstevel@tonic-gate 32957c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 32967c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 32977c478bd9Sstevel@tonic-gate kstat_runq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 32987c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 32997c478bd9Sstevel@tonic-gate } 33007c478bd9Sstevel@tonic-gate 33017c478bd9Sstevel@tonic-gate do { 33027c478bd9Sstevel@tonic-gate rda.rda_count = MIN(count, mi->mi_curread); 33037c478bd9Sstevel@tonic-gate error = rfs2call(mi, RFS_READDIR, 33047c478bd9Sstevel@tonic-gate xdr_rddirargs, (caddr_t)&rda, 33057c478bd9Sstevel@tonic-gate xdr_getrddirres, (caddr_t)&rd, cr, 33067c478bd9Sstevel@tonic-gate &douprintf, &rd.rd_status, 0, fip); 33077c478bd9Sstevel@tonic-gate } while (error == ENFS_TRYAGAIN); 33087c478bd9Sstevel@tonic-gate 33097c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 33107c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 33117c478bd9Sstevel@tonic-gate kstat_runq_exit(KSTAT_IO_PTR(mi->mi_io_kstats)); 33127c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 33137c478bd9Sstevel@tonic-gate } 33147c478bd9Sstevel@tonic-gate 33157c478bd9Sstevel@tonic-gate /* 33167c478bd9Sstevel@tonic-gate * Since we are actually doing a READDIR RPC, we must have 33177c478bd9Sstevel@tonic-gate * exclusive access to the cache entry being filled. Thus, 33187c478bd9Sstevel@tonic-gate * it is safe to update all fields except for the flags 33197c478bd9Sstevel@tonic-gate * field. The r_statelock in the rnode must be held to 33207c478bd9Sstevel@tonic-gate * prevent two different threads from simultaneously 33217c478bd9Sstevel@tonic-gate * attempting to update the flags field. This can happen 33227c478bd9Sstevel@tonic-gate * if we are turning off RDDIR and the other thread is 33237c478bd9Sstevel@tonic-gate * trying to set RDDIRWAIT. 33247c478bd9Sstevel@tonic-gate */ 33257c478bd9Sstevel@tonic-gate ASSERT(rdc->flags & RDDIR); 33267c478bd9Sstevel@tonic-gate if (!error) { 33277c478bd9Sstevel@tonic-gate error = geterrno(rd.rd_status); 33287c478bd9Sstevel@tonic-gate if (!error) { 33297c478bd9Sstevel@tonic-gate rdc->nfs_ncookie = rd.rd_offset; 33307c478bd9Sstevel@tonic-gate rdc->eof = rd.rd_eof ? 1 : 0; 33317c478bd9Sstevel@tonic-gate rdc->entlen = rd.rd_size; 33327c478bd9Sstevel@tonic-gate ASSERT(rdc->entlen <= rdc->buflen); 33337c478bd9Sstevel@tonic-gate #ifdef DEBUG 33347c478bd9Sstevel@tonic-gate rdc->entries = rddir_cache_buf_alloc(rdc->buflen, 33357c478bd9Sstevel@tonic-gate KM_SLEEP); 33367c478bd9Sstevel@tonic-gate #else 33377c478bd9Sstevel@tonic-gate rdc->entries = kmem_alloc(rdc->buflen, KM_SLEEP); 33387c478bd9Sstevel@tonic-gate #endif 33397c478bd9Sstevel@tonic-gate bcopy(rd.rd_entries, rdc->entries, rdc->entlen); 33407c478bd9Sstevel@tonic-gate rdc->error = 0; 33417c478bd9Sstevel@tonic-gate if (mi->mi_io_kstats) { 33427c478bd9Sstevel@tonic-gate mutex_enter(&mi->mi_lock); 33437c478bd9Sstevel@tonic-gate KSTAT_IO_PTR(mi->mi_io_kstats)->reads++; 33447c478bd9Sstevel@tonic-gate KSTAT_IO_PTR(mi->mi_io_kstats)->nread += 33457c478bd9Sstevel@tonic-gate rd.rd_size; 33467c478bd9Sstevel@tonic-gate mutex_exit(&mi->mi_lock); 33477c478bd9Sstevel@tonic-gate } 33487c478bd9Sstevel@tonic-gate } else { 33497c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, vp, cr); 33507c478bd9Sstevel@tonic-gate } 33517c478bd9Sstevel@tonic-gate } 33527c478bd9Sstevel@tonic-gate if (error) { 33537c478bd9Sstevel@tonic-gate rdc->entries = NULL; 33547c478bd9Sstevel@tonic-gate rdc->error = error; 33557c478bd9Sstevel@tonic-gate } 33567c478bd9Sstevel@tonic-gate kmem_free(rd.rd_entries, rdc->buflen); 33577c478bd9Sstevel@tonic-gate 33587c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 33597c478bd9Sstevel@tonic-gate rdc->flags &= ~RDDIR; 33607c478bd9Sstevel@tonic-gate if (rdc->flags & RDDIRWAIT) { 33617c478bd9Sstevel@tonic-gate rdc->flags &= ~RDDIRWAIT; 33627c478bd9Sstevel@tonic-gate cv_broadcast(&rdc->cv); 33637c478bd9Sstevel@tonic-gate } 33647c478bd9Sstevel@tonic-gate if (error) 33657c478bd9Sstevel@tonic-gate rdc->flags |= RDDIRREQ; 33667c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 33677c478bd9Sstevel@tonic-gate 33687c478bd9Sstevel@tonic-gate rddir_cache_rele(rdc); 33697c478bd9Sstevel@tonic-gate 33707c478bd9Sstevel@tonic-gate return (error); 33717c478bd9Sstevel@tonic-gate } 33727c478bd9Sstevel@tonic-gate 33737c478bd9Sstevel@tonic-gate #ifdef DEBUG 33747c478bd9Sstevel@tonic-gate static int nfs_bio_do_stop = 0; 33757c478bd9Sstevel@tonic-gate #endif 33767c478bd9Sstevel@tonic-gate 33777c478bd9Sstevel@tonic-gate static int 33787c478bd9Sstevel@tonic-gate nfs_bio(struct buf *bp, cred_t *cr) 33797c478bd9Sstevel@tonic-gate { 33807c478bd9Sstevel@tonic-gate rnode_t *rp = VTOR(bp->b_vp); 33817c478bd9Sstevel@tonic-gate int count; 33827c478bd9Sstevel@tonic-gate int error; 33837c478bd9Sstevel@tonic-gate cred_t *cred; 33847c478bd9Sstevel@tonic-gate uint_t offset; 33857c478bd9Sstevel@tonic-gate 33867c478bd9Sstevel@tonic-gate DTRACE_IO1(start, struct buf *, bp); 33877c478bd9Sstevel@tonic-gate 3388108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(bp->b_vp)->mi_zone); 33897c478bd9Sstevel@tonic-gate offset = dbtob(bp->b_blkno); 33907c478bd9Sstevel@tonic-gate 33917c478bd9Sstevel@tonic-gate if (bp->b_flags & B_READ) { 33927c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 33937c478bd9Sstevel@tonic-gate if (rp->r_cred != NULL) { 33947c478bd9Sstevel@tonic-gate cred = rp->r_cred; 33957c478bd9Sstevel@tonic-gate crhold(cred); 33967c478bd9Sstevel@tonic-gate } else { 33977c478bd9Sstevel@tonic-gate rp->r_cred = cr; 33987c478bd9Sstevel@tonic-gate crhold(cr); 33997c478bd9Sstevel@tonic-gate cred = cr; 34007c478bd9Sstevel@tonic-gate crhold(cred); 34017c478bd9Sstevel@tonic-gate } 34027c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34037c478bd9Sstevel@tonic-gate read_again: 34047c478bd9Sstevel@tonic-gate error = bp->b_error = nfsread(bp->b_vp, bp->b_un.b_addr, 34057c478bd9Sstevel@tonic-gate offset, bp->b_bcount, &bp->b_resid, cred); 34060a701b1eSRobert Gordon 34077c478bd9Sstevel@tonic-gate crfree(cred); 34087c478bd9Sstevel@tonic-gate if (!error) { 34097c478bd9Sstevel@tonic-gate if (bp->b_resid) { 34107c478bd9Sstevel@tonic-gate /* 34117c478bd9Sstevel@tonic-gate * Didn't get it all because we hit EOF, 34127c478bd9Sstevel@tonic-gate * zero all the memory beyond the EOF. 34137c478bd9Sstevel@tonic-gate */ 34147c478bd9Sstevel@tonic-gate /* bzero(rdaddr + */ 34157c478bd9Sstevel@tonic-gate bzero(bp->b_un.b_addr + 34167c478bd9Sstevel@tonic-gate bp->b_bcount - bp->b_resid, bp->b_resid); 34177c478bd9Sstevel@tonic-gate } 34187c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 34197c478bd9Sstevel@tonic-gate if (bp->b_resid == bp->b_bcount && 34207c478bd9Sstevel@tonic-gate offset >= rp->r_size) { 34217c478bd9Sstevel@tonic-gate /* 34227c478bd9Sstevel@tonic-gate * We didn't read anything at all as we are 34237c478bd9Sstevel@tonic-gate * past EOF. Return an error indicator back 34247c478bd9Sstevel@tonic-gate * but don't destroy the pages (yet). 34257c478bd9Sstevel@tonic-gate */ 34267c478bd9Sstevel@tonic-gate error = NFS_EOF; 34277c478bd9Sstevel@tonic-gate } 34287c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34297c478bd9Sstevel@tonic-gate } else if (error == EACCES) { 34307c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 34317c478bd9Sstevel@tonic-gate if (cred != cr) { 34327c478bd9Sstevel@tonic-gate if (rp->r_cred != NULL) 34337c478bd9Sstevel@tonic-gate crfree(rp->r_cred); 34347c478bd9Sstevel@tonic-gate rp->r_cred = cr; 34357c478bd9Sstevel@tonic-gate crhold(cr); 34367c478bd9Sstevel@tonic-gate cred = cr; 34377c478bd9Sstevel@tonic-gate crhold(cred); 34387c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34397c478bd9Sstevel@tonic-gate goto read_again; 34407c478bd9Sstevel@tonic-gate } 34417c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34427c478bd9Sstevel@tonic-gate } 34437c478bd9Sstevel@tonic-gate } else { 34447c478bd9Sstevel@tonic-gate if (!(rp->r_flags & RSTALE)) { 34457c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 34467c478bd9Sstevel@tonic-gate if (rp->r_cred != NULL) { 34477c478bd9Sstevel@tonic-gate cred = rp->r_cred; 34487c478bd9Sstevel@tonic-gate crhold(cred); 34497c478bd9Sstevel@tonic-gate } else { 34507c478bd9Sstevel@tonic-gate rp->r_cred = cr; 34517c478bd9Sstevel@tonic-gate crhold(cr); 34527c478bd9Sstevel@tonic-gate cred = cr; 34537c478bd9Sstevel@tonic-gate crhold(cred); 34547c478bd9Sstevel@tonic-gate } 34557c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34567c478bd9Sstevel@tonic-gate write_again: 34577c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 34587c478bd9Sstevel@tonic-gate count = MIN(bp->b_bcount, rp->r_size - offset); 34597c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34607c478bd9Sstevel@tonic-gate if (count < 0) 34617c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "nfs_bio: write count < 0"); 34627c478bd9Sstevel@tonic-gate #ifdef DEBUG 34637c478bd9Sstevel@tonic-gate if (count == 0) { 34647c478bd9Sstevel@tonic-gate zcmn_err(getzoneid(), CE_WARN, 34657c478bd9Sstevel@tonic-gate "nfs_bio: zero length write at %d", 34667c478bd9Sstevel@tonic-gate offset); 34677c478bd9Sstevel@tonic-gate nfs_printfhandle(&rp->r_fh); 34687c478bd9Sstevel@tonic-gate if (nfs_bio_do_stop) 34697c478bd9Sstevel@tonic-gate debug_enter("nfs_bio"); 34707c478bd9Sstevel@tonic-gate } 34717c478bd9Sstevel@tonic-gate #endif 34727c478bd9Sstevel@tonic-gate error = nfswrite(bp->b_vp, bp->b_un.b_addr, offset, 34737c478bd9Sstevel@tonic-gate count, cred); 34747c478bd9Sstevel@tonic-gate if (error == EACCES) { 34757c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 34767c478bd9Sstevel@tonic-gate if (cred != cr) { 34777c478bd9Sstevel@tonic-gate if (rp->r_cred != NULL) 34787c478bd9Sstevel@tonic-gate crfree(rp->r_cred); 34797c478bd9Sstevel@tonic-gate rp->r_cred = cr; 34807c478bd9Sstevel@tonic-gate crhold(cr); 34817c478bd9Sstevel@tonic-gate crfree(cred); 34827c478bd9Sstevel@tonic-gate cred = cr; 34837c478bd9Sstevel@tonic-gate crhold(cred); 34847c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34857c478bd9Sstevel@tonic-gate goto write_again; 34867c478bd9Sstevel@tonic-gate } 34877c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 34887c478bd9Sstevel@tonic-gate } 34897c478bd9Sstevel@tonic-gate bp->b_error = error; 34907c478bd9Sstevel@tonic-gate if (error && error != EINTR) { 34917c478bd9Sstevel@tonic-gate /* 34927c478bd9Sstevel@tonic-gate * Don't print EDQUOT errors on the console. 34937c478bd9Sstevel@tonic-gate * Don't print asynchronous EACCES errors. 34947c478bd9Sstevel@tonic-gate * Don't print EFBIG errors. 34957c478bd9Sstevel@tonic-gate * Print all other write errors. 34967c478bd9Sstevel@tonic-gate */ 34977c478bd9Sstevel@tonic-gate if (error != EDQUOT && error != EFBIG && 34987c478bd9Sstevel@tonic-gate (error != EACCES || 34997c478bd9Sstevel@tonic-gate !(bp->b_flags & B_ASYNC))) 35007c478bd9Sstevel@tonic-gate nfs_write_error(bp->b_vp, error, cred); 35017c478bd9Sstevel@tonic-gate /* 35027c478bd9Sstevel@tonic-gate * Update r_error and r_flags as appropriate. 35037c478bd9Sstevel@tonic-gate * If the error was ESTALE, then mark the 35047c478bd9Sstevel@tonic-gate * rnode as not being writeable and save 35057c478bd9Sstevel@tonic-gate * the error status. Otherwise, save any 35067c478bd9Sstevel@tonic-gate * errors which occur from asynchronous 35077c478bd9Sstevel@tonic-gate * page invalidations. Any errors occurring 35087c478bd9Sstevel@tonic-gate * from other operations should be saved 35097c478bd9Sstevel@tonic-gate * by the caller. 35107c478bd9Sstevel@tonic-gate */ 35117c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 35127c478bd9Sstevel@tonic-gate if (error == ESTALE) { 35137c478bd9Sstevel@tonic-gate rp->r_flags |= RSTALE; 35147c478bd9Sstevel@tonic-gate if (!rp->r_error) 35157c478bd9Sstevel@tonic-gate rp->r_error = error; 35167c478bd9Sstevel@tonic-gate } else if (!rp->r_error && 35177c478bd9Sstevel@tonic-gate (bp->b_flags & 35187c478bd9Sstevel@tonic-gate (B_INVAL|B_FORCE|B_ASYNC)) == 35197c478bd9Sstevel@tonic-gate (B_INVAL|B_FORCE|B_ASYNC)) { 35207c478bd9Sstevel@tonic-gate rp->r_error = error; 35217c478bd9Sstevel@tonic-gate } 35227c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 35237c478bd9Sstevel@tonic-gate } 35247c478bd9Sstevel@tonic-gate crfree(cred); 35258afffe5eSbatschul } else { 35267c478bd9Sstevel@tonic-gate error = rp->r_error; 35278afffe5eSbatschul /* 35288afffe5eSbatschul * A close may have cleared r_error, if so, 35298afffe5eSbatschul * propagate ESTALE error return properly 35308afffe5eSbatschul */ 35318afffe5eSbatschul if (error == 0) 35328afffe5eSbatschul error = ESTALE; 35338afffe5eSbatschul } 35347c478bd9Sstevel@tonic-gate } 35357c478bd9Sstevel@tonic-gate 35367c478bd9Sstevel@tonic-gate if (error != 0 && error != NFS_EOF) 35377c478bd9Sstevel@tonic-gate bp->b_flags |= B_ERROR; 35387c478bd9Sstevel@tonic-gate 35397c478bd9Sstevel@tonic-gate DTRACE_IO1(done, struct buf *, bp); 35407c478bd9Sstevel@tonic-gate 35417c478bd9Sstevel@tonic-gate return (error); 35427c478bd9Sstevel@tonic-gate } 35437c478bd9Sstevel@tonic-gate 3544da6c28aaSamw /* ARGSUSED */ 35457c478bd9Sstevel@tonic-gate static int 3546da6c28aaSamw nfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 35477c478bd9Sstevel@tonic-gate { 35487c478bd9Sstevel@tonic-gate struct nfs_fid *fp; 35497c478bd9Sstevel@tonic-gate rnode_t *rp; 35507c478bd9Sstevel@tonic-gate 35517c478bd9Sstevel@tonic-gate rp = VTOR(vp); 35527c478bd9Sstevel@tonic-gate 35537c478bd9Sstevel@tonic-gate if (fidp->fid_len < (sizeof (struct nfs_fid) - sizeof (short))) { 35547c478bd9Sstevel@tonic-gate fidp->fid_len = sizeof (struct nfs_fid) - sizeof (short); 35557c478bd9Sstevel@tonic-gate return (ENOSPC); 35567c478bd9Sstevel@tonic-gate } 35577c478bd9Sstevel@tonic-gate fp = (struct nfs_fid *)fidp; 35587c478bd9Sstevel@tonic-gate fp->nf_pad = 0; 35597c478bd9Sstevel@tonic-gate fp->nf_len = sizeof (struct nfs_fid) - sizeof (short); 35607c478bd9Sstevel@tonic-gate bcopy(rp->r_fh.fh_buf, fp->nf_data, NFS_FHSIZE); 35617c478bd9Sstevel@tonic-gate return (0); 35627c478bd9Sstevel@tonic-gate } 35637c478bd9Sstevel@tonic-gate 35647c478bd9Sstevel@tonic-gate /* ARGSUSED2 */ 35657c478bd9Sstevel@tonic-gate static int 35667c478bd9Sstevel@tonic-gate nfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 35677c478bd9Sstevel@tonic-gate { 35687c478bd9Sstevel@tonic-gate rnode_t *rp = VTOR(vp); 35697c478bd9Sstevel@tonic-gate 35707c478bd9Sstevel@tonic-gate if (!write_lock) { 35717c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE); 35727c478bd9Sstevel@tonic-gate return (V_WRITELOCK_FALSE); 35737c478bd9Sstevel@tonic-gate } 35747c478bd9Sstevel@tonic-gate 35757c478bd9Sstevel@tonic-gate if ((rp->r_flags & RDIRECTIO) || (VTOMI(vp)->mi_flags & MI_DIRECTIO)) { 35767c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, FALSE); 35777c478bd9Sstevel@tonic-gate if (rp->r_mapcnt == 0 && !vn_has_cached_data(vp)) 35787c478bd9Sstevel@tonic-gate return (V_WRITELOCK_FALSE); 35797c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_rwlock); 35807c478bd9Sstevel@tonic-gate } 35817c478bd9Sstevel@tonic-gate 35827c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, FALSE); 35837c478bd9Sstevel@tonic-gate return (V_WRITELOCK_TRUE); 35847c478bd9Sstevel@tonic-gate } 35857c478bd9Sstevel@tonic-gate 35867c478bd9Sstevel@tonic-gate /* ARGSUSED */ 35877c478bd9Sstevel@tonic-gate static void 35887c478bd9Sstevel@tonic-gate nfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 35897c478bd9Sstevel@tonic-gate { 35907c478bd9Sstevel@tonic-gate rnode_t *rp = VTOR(vp); 35917c478bd9Sstevel@tonic-gate 35927c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_rwlock); 35937c478bd9Sstevel@tonic-gate } 35947c478bd9Sstevel@tonic-gate 35957c478bd9Sstevel@tonic-gate /* ARGSUSED */ 35967c478bd9Sstevel@tonic-gate static int 3597da6c28aaSamw nfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct) 35987c478bd9Sstevel@tonic-gate { 35997c478bd9Sstevel@tonic-gate 36007c478bd9Sstevel@tonic-gate /* 36017c478bd9Sstevel@tonic-gate * Because we stuff the readdir cookie into the offset field 36027c478bd9Sstevel@tonic-gate * someone may attempt to do an lseek with the cookie which 36037c478bd9Sstevel@tonic-gate * we want to succeed. 36047c478bd9Sstevel@tonic-gate */ 36057c478bd9Sstevel@tonic-gate if (vp->v_type == VDIR) 36067c478bd9Sstevel@tonic-gate return (0); 36077c478bd9Sstevel@tonic-gate if (*noffp < 0 || *noffp > MAXOFF32_T) 36087c478bd9Sstevel@tonic-gate return (EINVAL); 36097c478bd9Sstevel@tonic-gate return (0); 36107c478bd9Sstevel@tonic-gate } 36117c478bd9Sstevel@tonic-gate 36127c478bd9Sstevel@tonic-gate /* 36137c478bd9Sstevel@tonic-gate * number of NFS_MAXDATA blocks to read ahead 36147c478bd9Sstevel@tonic-gate * optimized for 100 base-T. 36157c478bd9Sstevel@tonic-gate */ 36167c478bd9Sstevel@tonic-gate static int nfs_nra = 4; 36177c478bd9Sstevel@tonic-gate 36187c478bd9Sstevel@tonic-gate #ifdef DEBUG 36197c478bd9Sstevel@tonic-gate static int nfs_lostpage = 0; /* number of times we lost original page */ 36207c478bd9Sstevel@tonic-gate #endif 36217c478bd9Sstevel@tonic-gate 36227c478bd9Sstevel@tonic-gate /* 36237c478bd9Sstevel@tonic-gate * Return all the pages from [off..off+len) in file 36247c478bd9Sstevel@tonic-gate */ 3625da6c28aaSamw /* ARGSUSED */ 36267c478bd9Sstevel@tonic-gate static int 36277c478bd9Sstevel@tonic-gate nfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 36287c478bd9Sstevel@tonic-gate page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 3629da6c28aaSamw enum seg_rw rw, cred_t *cr, caller_context_t *ct) 36307c478bd9Sstevel@tonic-gate { 36317c478bd9Sstevel@tonic-gate rnode_t *rp; 36327c478bd9Sstevel@tonic-gate int error; 36337c478bd9Sstevel@tonic-gate mntinfo_t *mi; 36347c478bd9Sstevel@tonic-gate 36357c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) 36367c478bd9Sstevel@tonic-gate return (ENOSYS); 36377c478bd9Sstevel@tonic-gate 36387c478bd9Sstevel@tonic-gate ASSERT(off <= MAXOFF32_T); 3639108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 36407c478bd9Sstevel@tonic-gate return (EIO); 36417c478bd9Sstevel@tonic-gate if (protp != NULL) 36427c478bd9Sstevel@tonic-gate *protp = PROT_ALL; 36437c478bd9Sstevel@tonic-gate 36447c478bd9Sstevel@tonic-gate /* 36457c478bd9Sstevel@tonic-gate * Now valididate that the caches are up to date. 36467c478bd9Sstevel@tonic-gate */ 36477c478bd9Sstevel@tonic-gate error = nfs_validate_caches(vp, cr); 36487c478bd9Sstevel@tonic-gate if (error) 36497c478bd9Sstevel@tonic-gate return (error); 36507c478bd9Sstevel@tonic-gate 36517c478bd9Sstevel@tonic-gate rp = VTOR(vp); 36527c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 36537c478bd9Sstevel@tonic-gate retry: 36547c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 36557c478bd9Sstevel@tonic-gate 36567c478bd9Sstevel@tonic-gate /* 36577c478bd9Sstevel@tonic-gate * Don't create dirty pages faster than they 36587c478bd9Sstevel@tonic-gate * can be cleaned so that the system doesn't 36597c478bd9Sstevel@tonic-gate * get imbalanced. If the async queue is 36607c478bd9Sstevel@tonic-gate * maxed out, then wait for it to drain before 36617c478bd9Sstevel@tonic-gate * creating more dirty pages. Also, wait for 36627c478bd9Sstevel@tonic-gate * any threads doing pagewalks in the vop_getattr 36637c478bd9Sstevel@tonic-gate * entry points so that they don't block for 36647c478bd9Sstevel@tonic-gate * long periods. 36657c478bd9Sstevel@tonic-gate */ 36667c478bd9Sstevel@tonic-gate if (rw == S_CREATE) { 36677c478bd9Sstevel@tonic-gate while ((mi->mi_max_threads != 0 && 36687c478bd9Sstevel@tonic-gate rp->r_awcount > 2 * mi->mi_max_threads) || 36697c478bd9Sstevel@tonic-gate rp->r_gcount > 0) 36707c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 36717c478bd9Sstevel@tonic-gate } 36727c478bd9Sstevel@tonic-gate 36737c478bd9Sstevel@tonic-gate /* 36747c478bd9Sstevel@tonic-gate * If we are getting called as a side effect of an nfs_write() 36757c478bd9Sstevel@tonic-gate * operation the local file size might not be extended yet. 36767c478bd9Sstevel@tonic-gate * In this case we want to be able to return pages of zeroes. 36777c478bd9Sstevel@tonic-gate */ 36787c478bd9Sstevel@tonic-gate if (off + len > rp->r_size + PAGEOFFSET && seg != segkmap) { 36797c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 36807c478bd9Sstevel@tonic-gate return (EFAULT); /* beyond EOF */ 36817c478bd9Sstevel@tonic-gate } 36827c478bd9Sstevel@tonic-gate 36837c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 36847c478bd9Sstevel@tonic-gate 3685*06e6833aSJosef 'Jeff' Sipek error = pvn_getpages(nfs_getapage, vp, off, len, protp, pl, plsz, 36867c478bd9Sstevel@tonic-gate seg, addr, rw, cr); 36877c478bd9Sstevel@tonic-gate 36887c478bd9Sstevel@tonic-gate switch (error) { 36897c478bd9Sstevel@tonic-gate case NFS_EOF: 36907c478bd9Sstevel@tonic-gate nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr); 36917c478bd9Sstevel@tonic-gate goto retry; 36927c478bd9Sstevel@tonic-gate case ESTALE: 36937c478bd9Sstevel@tonic-gate PURGE_STALE_FH(error, vp, cr); 36947c478bd9Sstevel@tonic-gate } 36957c478bd9Sstevel@tonic-gate 36967c478bd9Sstevel@tonic-gate return (error); 36977c478bd9Sstevel@tonic-gate } 36987c478bd9Sstevel@tonic-gate 36997c478bd9Sstevel@tonic-gate /* 3700*06e6833aSJosef 'Jeff' Sipek * Called from pvn_getpages to get a particular page. 37017c478bd9Sstevel@tonic-gate */ 37027c478bd9Sstevel@tonic-gate /* ARGSUSED */ 37037c478bd9Sstevel@tonic-gate static int 37047c478bd9Sstevel@tonic-gate nfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp, 37057c478bd9Sstevel@tonic-gate page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 37067c478bd9Sstevel@tonic-gate enum seg_rw rw, cred_t *cr) 37077c478bd9Sstevel@tonic-gate { 37087c478bd9Sstevel@tonic-gate rnode_t *rp; 37097c478bd9Sstevel@tonic-gate uint_t bsize; 37107c478bd9Sstevel@tonic-gate struct buf *bp; 37117c478bd9Sstevel@tonic-gate page_t *pp; 37127c478bd9Sstevel@tonic-gate u_offset_t lbn; 37137c478bd9Sstevel@tonic-gate u_offset_t io_off; 37147c478bd9Sstevel@tonic-gate u_offset_t blkoff; 37157c478bd9Sstevel@tonic-gate u_offset_t rablkoff; 37167c478bd9Sstevel@tonic-gate size_t io_len; 37177c478bd9Sstevel@tonic-gate uint_t blksize; 37187c478bd9Sstevel@tonic-gate int error; 37197c478bd9Sstevel@tonic-gate int readahead; 37207c478bd9Sstevel@tonic-gate int readahead_issued = 0; 37217c478bd9Sstevel@tonic-gate int ra_window; /* readahead window */ 37227c478bd9Sstevel@tonic-gate page_t *pagefound; 37237c478bd9Sstevel@tonic-gate 3724108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 37257c478bd9Sstevel@tonic-gate return (EIO); 37267c478bd9Sstevel@tonic-gate rp = VTOR(vp); 37277c478bd9Sstevel@tonic-gate bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE); 37287c478bd9Sstevel@tonic-gate 37297c478bd9Sstevel@tonic-gate reread: 37307c478bd9Sstevel@tonic-gate bp = NULL; 37317c478bd9Sstevel@tonic-gate pp = NULL; 37327c478bd9Sstevel@tonic-gate pagefound = NULL; 37337c478bd9Sstevel@tonic-gate 37347c478bd9Sstevel@tonic-gate if (pl != NULL) 37357c478bd9Sstevel@tonic-gate pl[0] = NULL; 37367c478bd9Sstevel@tonic-gate 37377c478bd9Sstevel@tonic-gate error = 0; 37387c478bd9Sstevel@tonic-gate lbn = off / bsize; 37397c478bd9Sstevel@tonic-gate blkoff = lbn * bsize; 37407c478bd9Sstevel@tonic-gate 37417c478bd9Sstevel@tonic-gate /* 37427c478bd9Sstevel@tonic-gate * Queueing up the readahead before doing the synchronous read 37437c478bd9Sstevel@tonic-gate * results in a significant increase in read throughput because 37447c478bd9Sstevel@tonic-gate * of the increased parallelism between the async threads and 37457c478bd9Sstevel@tonic-gate * the process context. 37467c478bd9Sstevel@tonic-gate */ 37477c478bd9Sstevel@tonic-gate if ((off & ((vp->v_vfsp->vfs_bsize) - 1)) == 0 && 37487c478bd9Sstevel@tonic-gate rw != S_CREATE && 37497c478bd9Sstevel@tonic-gate !(vp->v_flag & VNOCACHE)) { 37507c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 37517c478bd9Sstevel@tonic-gate 37527c478bd9Sstevel@tonic-gate /* 37537c478bd9Sstevel@tonic-gate * Calculate the number of readaheads to do. 37547c478bd9Sstevel@tonic-gate * a) No readaheads at offset = 0. 37557c478bd9Sstevel@tonic-gate * b) Do maximum(nfs_nra) readaheads when the readahead 37567c478bd9Sstevel@tonic-gate * window is closed. 37577c478bd9Sstevel@tonic-gate * c) Do readaheads between 1 to (nfs_nra - 1) depending 37587c478bd9Sstevel@tonic-gate * upon how far the readahead window is open or close. 37597c478bd9Sstevel@tonic-gate * d) No readaheads if rp->r_nextr is not within the scope 37607c478bd9Sstevel@tonic-gate * of the readahead window (random i/o). 37617c478bd9Sstevel@tonic-gate */ 37627c478bd9Sstevel@tonic-gate 37637c478bd9Sstevel@tonic-gate if (off == 0) 37647c478bd9Sstevel@tonic-gate readahead = 0; 37657c478bd9Sstevel@tonic-gate else if (blkoff == rp->r_nextr) 37667c478bd9Sstevel@tonic-gate readahead = nfs_nra; 37677c478bd9Sstevel@tonic-gate else if (rp->r_nextr > blkoff && 37687c478bd9Sstevel@tonic-gate ((ra_window = (rp->r_nextr - blkoff) / bsize) 37697c478bd9Sstevel@tonic-gate <= (nfs_nra - 1))) 37707c478bd9Sstevel@tonic-gate readahead = nfs_nra - ra_window; 37717c478bd9Sstevel@tonic-gate else 37727c478bd9Sstevel@tonic-gate readahead = 0; 37737c478bd9Sstevel@tonic-gate 37747c478bd9Sstevel@tonic-gate rablkoff = rp->r_nextr; 37757c478bd9Sstevel@tonic-gate while (readahead > 0 && rablkoff + bsize < rp->r_size) { 37767c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 37777c478bd9Sstevel@tonic-gate if (nfs_async_readahead(vp, rablkoff + bsize, 37787c478bd9Sstevel@tonic-gate addr + (rablkoff + bsize - off), seg, cr, 37797c478bd9Sstevel@tonic-gate nfs_readahead) < 0) { 37807c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 37817c478bd9Sstevel@tonic-gate break; 37827c478bd9Sstevel@tonic-gate } 37837c478bd9Sstevel@tonic-gate readahead--; 37847c478bd9Sstevel@tonic-gate rablkoff += bsize; 37857c478bd9Sstevel@tonic-gate /* 37867c478bd9Sstevel@tonic-gate * Indicate that we did a readahead so 37877c478bd9Sstevel@tonic-gate * readahead offset is not updated 37887c478bd9Sstevel@tonic-gate * by the synchronous read below. 37897c478bd9Sstevel@tonic-gate */ 37907c478bd9Sstevel@tonic-gate readahead_issued = 1; 37917c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 37927c478bd9Sstevel@tonic-gate /* 37937c478bd9Sstevel@tonic-gate * set readahead offset to 37947c478bd9Sstevel@tonic-gate * offset of last async readahead 37957c478bd9Sstevel@tonic-gate * request. 37967c478bd9Sstevel@tonic-gate */ 37977c478bd9Sstevel@tonic-gate rp->r_nextr = rablkoff; 37987c478bd9Sstevel@tonic-gate } 37997c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 38007c478bd9Sstevel@tonic-gate } 38017c478bd9Sstevel@tonic-gate 38027c478bd9Sstevel@tonic-gate again: 38037c478bd9Sstevel@tonic-gate if ((pagefound = page_exists(vp, off)) == NULL) { 38047c478bd9Sstevel@tonic-gate if (pl == NULL) { 38057c478bd9Sstevel@tonic-gate (void) nfs_async_readahead(vp, blkoff, addr, seg, cr, 38067c478bd9Sstevel@tonic-gate nfs_readahead); 38077c478bd9Sstevel@tonic-gate } else if (rw == S_CREATE) { 38087c478bd9Sstevel@tonic-gate /* 38097c478bd9Sstevel@tonic-gate * Block for this page is not allocated, or the offset 38107c478bd9Sstevel@tonic-gate * is beyond the current allocation size, or we're 38117c478bd9Sstevel@tonic-gate * allocating a swap slot and the page was not found, 38127c478bd9Sstevel@tonic-gate * so allocate it and return a zero page. 38137c478bd9Sstevel@tonic-gate */ 38147c478bd9Sstevel@tonic-gate if ((pp = page_create_va(vp, off, 38157c478bd9Sstevel@tonic-gate PAGESIZE, PG_WAIT, seg, addr)) == NULL) 38167c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "nfs_getapage: page_create"); 38177c478bd9Sstevel@tonic-gate io_len = PAGESIZE; 38187c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 38197c478bd9Sstevel@tonic-gate rp->r_nextr = off + PAGESIZE; 38207c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 38217c478bd9Sstevel@tonic-gate } else { 38227c478bd9Sstevel@tonic-gate /* 38237c478bd9Sstevel@tonic-gate * Need to go to server to get a BLOCK, exception to 38247c478bd9Sstevel@tonic-gate * that being while reading at offset = 0 or doing 38257c478bd9Sstevel@tonic-gate * random i/o, in that case read only a PAGE. 38267c478bd9Sstevel@tonic-gate */ 38277c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 38287c478bd9Sstevel@tonic-gate if (blkoff < rp->r_size && 38297c478bd9Sstevel@tonic-gate blkoff + bsize >= rp->r_size) { 38307c478bd9Sstevel@tonic-gate /* 38317c478bd9Sstevel@tonic-gate * If only a block or less is left in 38327c478bd9Sstevel@tonic-gate * the file, read all that is remaining. 38337c478bd9Sstevel@tonic-gate */ 38347c478bd9Sstevel@tonic-gate if (rp->r_size <= off) { 38357c478bd9Sstevel@tonic-gate /* 38367c478bd9Sstevel@tonic-gate * Trying to access beyond EOF, 38377c478bd9Sstevel@tonic-gate * set up to get at least one page. 38387c478bd9Sstevel@tonic-gate */ 38397c478bd9Sstevel@tonic-gate blksize = off + PAGESIZE - blkoff; 38407c478bd9Sstevel@tonic-gate } else 38417c478bd9Sstevel@tonic-gate blksize = rp->r_size - blkoff; 38427c478bd9Sstevel@tonic-gate } else if ((off == 0) || 38437c478bd9Sstevel@tonic-gate (off != rp->r_nextr && !readahead_issued)) { 38447c478bd9Sstevel@tonic-gate blksize = PAGESIZE; 38457c478bd9Sstevel@tonic-gate blkoff = off; /* block = page here */ 38467c478bd9Sstevel@tonic-gate } else 38477c478bd9Sstevel@tonic-gate blksize = bsize; 38487c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 38497c478bd9Sstevel@tonic-gate 38507c478bd9Sstevel@tonic-gate pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 38517c478bd9Sstevel@tonic-gate &io_len, blkoff, blksize, 0); 38527c478bd9Sstevel@tonic-gate 38537c478bd9Sstevel@tonic-gate /* 38547c478bd9Sstevel@tonic-gate * Some other thread has entered the page, 38557c478bd9Sstevel@tonic-gate * so just use it. 38567c478bd9Sstevel@tonic-gate */ 38577c478bd9Sstevel@tonic-gate if (pp == NULL) 38587c478bd9Sstevel@tonic-gate goto again; 38597c478bd9Sstevel@tonic-gate 38607c478bd9Sstevel@tonic-gate /* 38617c478bd9Sstevel@tonic-gate * Now round the request size up to page boundaries. 38627c478bd9Sstevel@tonic-gate * This ensures that the entire page will be 38637c478bd9Sstevel@tonic-gate * initialized to zeroes if EOF is encountered. 38647c478bd9Sstevel@tonic-gate */ 38657c478bd9Sstevel@tonic-gate io_len = ptob(btopr(io_len)); 38667c478bd9Sstevel@tonic-gate 38677c478bd9Sstevel@tonic-gate bp = pageio_setup(pp, io_len, vp, B_READ); 38687c478bd9Sstevel@tonic-gate ASSERT(bp != NULL); 38697c478bd9Sstevel@tonic-gate 38707c478bd9Sstevel@tonic-gate /* 38717c478bd9Sstevel@tonic-gate * pageio_setup should have set b_addr to 0. This 38727c478bd9Sstevel@tonic-gate * is correct since we want to do I/O on a page 38737c478bd9Sstevel@tonic-gate * boundary. bp_mapin will use this addr to calculate 38747c478bd9Sstevel@tonic-gate * an offset, and then set b_addr to the kernel virtual 38757c478bd9Sstevel@tonic-gate * address it allocated for us. 38767c478bd9Sstevel@tonic-gate */ 38777c478bd9Sstevel@tonic-gate ASSERT(bp->b_un.b_addr == 0); 38787c478bd9Sstevel@tonic-gate 38797c478bd9Sstevel@tonic-gate bp->b_edev = 0; 38807c478bd9Sstevel@tonic-gate bp->b_dev = 0; 38817c478bd9Sstevel@tonic-gate bp->b_lblkno = lbtodb(io_off); 38827c478bd9Sstevel@tonic-gate bp->b_file = vp; 38837c478bd9Sstevel@tonic-gate bp->b_offset = (offset_t)off; 38847c478bd9Sstevel@tonic-gate bp_mapin(bp); 38857c478bd9Sstevel@tonic-gate 38867c478bd9Sstevel@tonic-gate /* 38877c478bd9Sstevel@tonic-gate * If doing a write beyond what we believe is EOF, 38887c478bd9Sstevel@tonic-gate * don't bother trying to read the pages from the 38897c478bd9Sstevel@tonic-gate * server, we'll just zero the pages here. We 38907c478bd9Sstevel@tonic-gate * don't check that the rw flag is S_WRITE here 38917c478bd9Sstevel@tonic-gate * because some implementations may attempt a 38927c478bd9Sstevel@tonic-gate * read access to the buffer before copying data. 38937c478bd9Sstevel@tonic-gate */ 38947c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 38957c478bd9Sstevel@tonic-gate if (io_off >= rp->r_size && seg == segkmap) { 38967c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 38977c478bd9Sstevel@tonic-gate bzero(bp->b_un.b_addr, io_len); 38987c478bd9Sstevel@tonic-gate } else { 38997c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 39007c478bd9Sstevel@tonic-gate error = nfs_bio(bp, cr); 39017c478bd9Sstevel@tonic-gate } 39027c478bd9Sstevel@tonic-gate 39037c478bd9Sstevel@tonic-gate /* 39047c478bd9Sstevel@tonic-gate * Unmap the buffer before freeing it. 39057c478bd9Sstevel@tonic-gate */ 39067c478bd9Sstevel@tonic-gate bp_mapout(bp); 39077c478bd9Sstevel@tonic-gate pageio_done(bp); 39087c478bd9Sstevel@tonic-gate 39097c478bd9Sstevel@tonic-gate if (error == NFS_EOF) { 39107c478bd9Sstevel@tonic-gate /* 39117c478bd9Sstevel@tonic-gate * If doing a write system call just return 39127c478bd9Sstevel@tonic-gate * zeroed pages, else user tried to get pages 39137c478bd9Sstevel@tonic-gate * beyond EOF, return error. We don't check 39147c478bd9Sstevel@tonic-gate * that the rw flag is S_WRITE here because 39157c478bd9Sstevel@tonic-gate * some implementations may attempt a read 39167c478bd9Sstevel@tonic-gate * access to the buffer before copying data. 39177c478bd9Sstevel@tonic-gate */ 39187c478bd9Sstevel@tonic-gate if (seg == segkmap) 39197c478bd9Sstevel@tonic-gate error = 0; 39207c478bd9Sstevel@tonic-gate else 39217c478bd9Sstevel@tonic-gate error = EFAULT; 39227c478bd9Sstevel@tonic-gate } 39237c478bd9Sstevel@tonic-gate 39247c478bd9Sstevel@tonic-gate if (!readahead_issued && !error) { 39257c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 39267c478bd9Sstevel@tonic-gate rp->r_nextr = io_off + io_len; 39277c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 39287c478bd9Sstevel@tonic-gate } 39297c478bd9Sstevel@tonic-gate } 39307c478bd9Sstevel@tonic-gate } 39317c478bd9Sstevel@tonic-gate 39327c478bd9Sstevel@tonic-gate out: 39337c478bd9Sstevel@tonic-gate if (pl == NULL) 39347c478bd9Sstevel@tonic-gate return (error); 39357c478bd9Sstevel@tonic-gate 39367c478bd9Sstevel@tonic-gate if (error) { 39377c478bd9Sstevel@tonic-gate if (pp != NULL) 39387c478bd9Sstevel@tonic-gate pvn_read_done(pp, B_ERROR); 39397c478bd9Sstevel@tonic-gate return (error); 39407c478bd9Sstevel@tonic-gate } 39417c478bd9Sstevel@tonic-gate 39427c478bd9Sstevel@tonic-gate if (pagefound) { 39437c478bd9Sstevel@tonic-gate se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED); 39447c478bd9Sstevel@tonic-gate 39457c478bd9Sstevel@tonic-gate /* 39467c478bd9Sstevel@tonic-gate * Page exists in the cache, acquire the appropriate lock. 39477c478bd9Sstevel@tonic-gate * If this fails, start all over again. 39487c478bd9Sstevel@tonic-gate */ 39497c478bd9Sstevel@tonic-gate if ((pp = page_lookup(vp, off, se)) == NULL) { 39507c478bd9Sstevel@tonic-gate #ifdef DEBUG 39517c478bd9Sstevel@tonic-gate nfs_lostpage++; 39527c478bd9Sstevel@tonic-gate #endif 39537c478bd9Sstevel@tonic-gate goto reread; 39547c478bd9Sstevel@tonic-gate } 39557c478bd9Sstevel@tonic-gate pl[0] = pp; 39567c478bd9Sstevel@tonic-gate pl[1] = NULL; 39577c478bd9Sstevel@tonic-gate return (0); 39587c478bd9Sstevel@tonic-gate } 39597c478bd9Sstevel@tonic-gate 39607c478bd9Sstevel@tonic-gate if (pp != NULL) 39617c478bd9Sstevel@tonic-gate pvn_plist_init(pp, pl, plsz, off, io_len, rw); 39627c478bd9Sstevel@tonic-gate 39637c478bd9Sstevel@tonic-gate return (error); 39647c478bd9Sstevel@tonic-gate } 39657c478bd9Sstevel@tonic-gate 39667c478bd9Sstevel@tonic-gate static void 39677c478bd9Sstevel@tonic-gate nfs_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, struct seg *seg, 39687c478bd9Sstevel@tonic-gate cred_t *cr) 39697c478bd9Sstevel@tonic-gate { 39707c478bd9Sstevel@tonic-gate int error; 39717c478bd9Sstevel@tonic-gate page_t *pp; 39727c478bd9Sstevel@tonic-gate u_offset_t io_off; 39737c478bd9Sstevel@tonic-gate size_t io_len; 39747c478bd9Sstevel@tonic-gate struct buf *bp; 39757c478bd9Sstevel@tonic-gate uint_t bsize, blksize; 39767c478bd9Sstevel@tonic-gate rnode_t *rp = VTOR(vp); 39777c478bd9Sstevel@tonic-gate 3978108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 39797c478bd9Sstevel@tonic-gate 39807c478bd9Sstevel@tonic-gate bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE); 39817c478bd9Sstevel@tonic-gate 39827c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 39837c478bd9Sstevel@tonic-gate if (blkoff < rp->r_size && blkoff + bsize > rp->r_size) { 39847c478bd9Sstevel@tonic-gate /* 39857c478bd9Sstevel@tonic-gate * If less than a block left in file read less 39867c478bd9Sstevel@tonic-gate * than a block. 39877c478bd9Sstevel@tonic-gate */ 39887c478bd9Sstevel@tonic-gate blksize = rp->r_size - blkoff; 39897c478bd9Sstevel@tonic-gate } else 39907c478bd9Sstevel@tonic-gate blksize = bsize; 39917c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 39927c478bd9Sstevel@tonic-gate 39937c478bd9Sstevel@tonic-gate pp = pvn_read_kluster(vp, blkoff, segkmap, addr, 39947c478bd9Sstevel@tonic-gate &io_off, &io_len, blkoff, blksize, 1); 39957c478bd9Sstevel@tonic-gate /* 39967c478bd9Sstevel@tonic-gate * The isra flag passed to the kluster function is 1, we may have 39977c478bd9Sstevel@tonic-gate * gotten a return value of NULL for a variety of reasons (# of free 39987c478bd9Sstevel@tonic-gate * pages < minfree, someone entered the page on the vnode etc). In all 39997c478bd9Sstevel@tonic-gate * cases, we want to punt on the readahead. 40007c478bd9Sstevel@tonic-gate */ 40017c478bd9Sstevel@tonic-gate if (pp == NULL) 40027c478bd9Sstevel@tonic-gate return; 40037c478bd9Sstevel@tonic-gate 40047c478bd9Sstevel@tonic-gate /* 40057c478bd9Sstevel@tonic-gate * Now round the request size up to page boundaries. 40067c478bd9Sstevel@tonic-gate * This ensures that the entire page will be 40077c478bd9Sstevel@tonic-gate * initialized to zeroes if EOF is encountered. 40087c478bd9Sstevel@tonic-gate */ 40097c478bd9Sstevel@tonic-gate io_len = ptob(btopr(io_len)); 40107c478bd9Sstevel@tonic-gate 40117c478bd9Sstevel@tonic-gate bp = pageio_setup(pp, io_len, vp, B_READ); 40127c478bd9Sstevel@tonic-gate ASSERT(bp != NULL); 40137c478bd9Sstevel@tonic-gate 40147c478bd9Sstevel@tonic-gate /* 40157c478bd9Sstevel@tonic-gate * pageio_setup should have set b_addr to 0. This is correct since 40167c478bd9Sstevel@tonic-gate * we want to do I/O on a page boundary. bp_mapin() will use this addr 40177c478bd9Sstevel@tonic-gate * to calculate an offset, and then set b_addr to the kernel virtual 40187c478bd9Sstevel@tonic-gate * address it allocated for us. 40197c478bd9Sstevel@tonic-gate */ 40207c478bd9Sstevel@tonic-gate ASSERT(bp->b_un.b_addr == 0); 40217c478bd9Sstevel@tonic-gate 40227c478bd9Sstevel@tonic-gate bp->b_edev = 0; 40237c478bd9Sstevel@tonic-gate bp->b_dev = 0; 40247c478bd9Sstevel@tonic-gate bp->b_lblkno = lbtodb(io_off); 40257c478bd9Sstevel@tonic-gate bp->b_file = vp; 40267c478bd9Sstevel@tonic-gate bp->b_offset = (offset_t)blkoff; 40277c478bd9Sstevel@tonic-gate bp_mapin(bp); 40287c478bd9Sstevel@tonic-gate 40297c478bd9Sstevel@tonic-gate /* 40307c478bd9Sstevel@tonic-gate * If doing a write beyond what we believe is EOF, don't bother trying 40317c478bd9Sstevel@tonic-gate * to read the pages from the server, we'll just zero the pages here. 40327c478bd9Sstevel@tonic-gate * We don't check that the rw flag is S_WRITE here because some 40337c478bd9Sstevel@tonic-gate * implementations may attempt a read access to the buffer before 40347c478bd9Sstevel@tonic-gate * copying data. 40357c478bd9Sstevel@tonic-gate */ 40367c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 40377c478bd9Sstevel@tonic-gate if (io_off >= rp->r_size && seg == segkmap) { 40387c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 40397c478bd9Sstevel@tonic-gate bzero(bp->b_un.b_addr, io_len); 40407c478bd9Sstevel@tonic-gate error = 0; 40417c478bd9Sstevel@tonic-gate } else { 40427c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 40437c478bd9Sstevel@tonic-gate error = nfs_bio(bp, cr); 40447c478bd9Sstevel@tonic-gate if (error == NFS_EOF) 40457c478bd9Sstevel@tonic-gate error = 0; 40467c478bd9Sstevel@tonic-gate } 40477c478bd9Sstevel@tonic-gate 40487c478bd9Sstevel@tonic-gate /* 40497c478bd9Sstevel@tonic-gate * Unmap the buffer before freeing it. 40507c478bd9Sstevel@tonic-gate */ 40517c478bd9Sstevel@tonic-gate bp_mapout(bp); 40527c478bd9Sstevel@tonic-gate pageio_done(bp); 40537c478bd9Sstevel@tonic-gate 40547c478bd9Sstevel@tonic-gate pvn_read_done(pp, error ? B_READ | B_ERROR : B_READ); 40557c478bd9Sstevel@tonic-gate 40567c478bd9Sstevel@tonic-gate /* 40577c478bd9Sstevel@tonic-gate * In case of error set readahead offset 40587c478bd9Sstevel@tonic-gate * to the lowest offset. 40597c478bd9Sstevel@tonic-gate * pvn_read_done() calls VN_DISPOSE to destroy the pages 40607c478bd9Sstevel@tonic-gate */ 40617c478bd9Sstevel@tonic-gate if (error && rp->r_nextr > io_off) { 40627c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 40637c478bd9Sstevel@tonic-gate if (rp->r_nextr > io_off) 40647c478bd9Sstevel@tonic-gate rp->r_nextr = io_off; 40657c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 40667c478bd9Sstevel@tonic-gate } 40677c478bd9Sstevel@tonic-gate } 40687c478bd9Sstevel@tonic-gate 40697c478bd9Sstevel@tonic-gate /* 40707c478bd9Sstevel@tonic-gate * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE} 40717c478bd9Sstevel@tonic-gate * If len == 0, do from off to EOF. 40727c478bd9Sstevel@tonic-gate * 40737c478bd9Sstevel@tonic-gate * The normal cases should be len == 0 && off == 0 (entire vp list), 40747c478bd9Sstevel@tonic-gate * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE 40757c478bd9Sstevel@tonic-gate * (from pageout). 40767c478bd9Sstevel@tonic-gate */ 4077da6c28aaSamw /* ARGSUSED */ 40787c478bd9Sstevel@tonic-gate static int 4079da6c28aaSamw nfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 4080da6c28aaSamw caller_context_t *ct) 40817c478bd9Sstevel@tonic-gate { 40827c478bd9Sstevel@tonic-gate int error; 40837c478bd9Sstevel@tonic-gate rnode_t *rp; 40847c478bd9Sstevel@tonic-gate 40857c478bd9Sstevel@tonic-gate ASSERT(cr != NULL); 40867c478bd9Sstevel@tonic-gate 40877c478bd9Sstevel@tonic-gate /* 40887c478bd9Sstevel@tonic-gate * XXX - Why should this check be made here? 40897c478bd9Sstevel@tonic-gate */ 40907c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) 40917c478bd9Sstevel@tonic-gate return (ENOSYS); 40927c478bd9Sstevel@tonic-gate 40937c478bd9Sstevel@tonic-gate if (len == 0 && !(flags & B_INVAL) && vn_is_readonly(vp)) 40947c478bd9Sstevel@tonic-gate return (0); 40957c478bd9Sstevel@tonic-gate 4096108322fbScarlsonj if (!(flags & B_ASYNC) && nfs_zone() != VTOMI(vp)->mi_zone) 40977c478bd9Sstevel@tonic-gate return (EIO); 40987c478bd9Sstevel@tonic-gate ASSERT(off <= MAXOFF32_T); 40997c478bd9Sstevel@tonic-gate 41007c478bd9Sstevel@tonic-gate rp = VTOR(vp); 41017c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 41027c478bd9Sstevel@tonic-gate rp->r_count++; 41037c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 41047c478bd9Sstevel@tonic-gate error = nfs_putpages(vp, off, len, flags, cr); 41057c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 41067c478bd9Sstevel@tonic-gate rp->r_count--; 41077c478bd9Sstevel@tonic-gate cv_broadcast(&rp->r_cv); 41087c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 41097c478bd9Sstevel@tonic-gate 41107c478bd9Sstevel@tonic-gate return (error); 41117c478bd9Sstevel@tonic-gate } 41127c478bd9Sstevel@tonic-gate 41137c478bd9Sstevel@tonic-gate /* 41147c478bd9Sstevel@tonic-gate * Write out a single page, possibly klustering adjacent dirty pages. 41157c478bd9Sstevel@tonic-gate */ 41167c478bd9Sstevel@tonic-gate int 41177c478bd9Sstevel@tonic-gate nfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp, 41187c478bd9Sstevel@tonic-gate int flags, cred_t *cr) 41197c478bd9Sstevel@tonic-gate { 41207c478bd9Sstevel@tonic-gate u_offset_t io_off; 41217c478bd9Sstevel@tonic-gate u_offset_t lbn_off; 41227c478bd9Sstevel@tonic-gate u_offset_t lbn; 41237c478bd9Sstevel@tonic-gate size_t io_len; 41247c478bd9Sstevel@tonic-gate uint_t bsize; 41257c478bd9Sstevel@tonic-gate int error; 41267c478bd9Sstevel@tonic-gate rnode_t *rp; 41277c478bd9Sstevel@tonic-gate 41287c478bd9Sstevel@tonic-gate ASSERT(!vn_is_readonly(vp)); 41297c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 41307c478bd9Sstevel@tonic-gate ASSERT(cr != NULL); 4131108322fbScarlsonj ASSERT((flags & B_ASYNC) || nfs_zone() == VTOMI(vp)->mi_zone); 41327c478bd9Sstevel@tonic-gate 41337c478bd9Sstevel@tonic-gate rp = VTOR(vp); 41347c478bd9Sstevel@tonic-gate ASSERT(rp->r_count > 0); 41357c478bd9Sstevel@tonic-gate 41367c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset <= MAXOFF32_T); 41377c478bd9Sstevel@tonic-gate 41387c478bd9Sstevel@tonic-gate bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE); 41397c478bd9Sstevel@tonic-gate lbn = pp->p_offset / bsize; 41407c478bd9Sstevel@tonic-gate lbn_off = lbn * bsize; 41417c478bd9Sstevel@tonic-gate 41427c478bd9Sstevel@tonic-gate /* 41437c478bd9Sstevel@tonic-gate * Find a kluster that fits in one block, or in 41447c478bd9Sstevel@tonic-gate * one page if pages are bigger than blocks. If 41457c478bd9Sstevel@tonic-gate * there is less file space allocated than a whole 41467c478bd9Sstevel@tonic-gate * page, we'll shorten the i/o request below. 41477c478bd9Sstevel@tonic-gate */ 41487c478bd9Sstevel@tonic-gate pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off, 41497c478bd9Sstevel@tonic-gate roundup(bsize, PAGESIZE), flags); 41507c478bd9Sstevel@tonic-gate 41517c478bd9Sstevel@tonic-gate /* 41527c478bd9Sstevel@tonic-gate * pvn_write_kluster shouldn't have returned a page with offset 41537c478bd9Sstevel@tonic-gate * behind the original page we were given. Verify that. 41547c478bd9Sstevel@tonic-gate */ 41557c478bd9Sstevel@tonic-gate ASSERT((pp->p_offset / bsize) >= lbn); 41567c478bd9Sstevel@tonic-gate 41577c478bd9Sstevel@tonic-gate /* 41587c478bd9Sstevel@tonic-gate * Now pp will have the list of kept dirty pages marked for 41597c478bd9Sstevel@tonic-gate * write back. It will also handle invalidation and freeing 41607c478bd9Sstevel@tonic-gate * of pages that are not dirty. Check for page length rounding 41617c478bd9Sstevel@tonic-gate * problems. 41627c478bd9Sstevel@tonic-gate */ 41637c478bd9Sstevel@tonic-gate if (io_off + io_len > lbn_off + bsize) { 41647c478bd9Sstevel@tonic-gate ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE); 41657c478bd9Sstevel@tonic-gate io_len = lbn_off + bsize - io_off; 41667c478bd9Sstevel@tonic-gate } 41677c478bd9Sstevel@tonic-gate /* 41687c478bd9Sstevel@tonic-gate * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a 41697c478bd9Sstevel@tonic-gate * consistent value of r_size. RMODINPROGRESS is set in writerp(). 41707c478bd9Sstevel@tonic-gate * When RMODINPROGRESS is set it indicates that a uiomove() is in 41717c478bd9Sstevel@tonic-gate * progress and the r_size has not been made consistent with the 41727c478bd9Sstevel@tonic-gate * new size of the file. When the uiomove() completes the r_size is 41737c478bd9Sstevel@tonic-gate * updated and the RMODINPROGRESS flag is cleared. 41747c478bd9Sstevel@tonic-gate * 41757c478bd9Sstevel@tonic-gate * The RMODINPROGRESS flag makes sure that nfs(3)_bio() sees a 41767c478bd9Sstevel@tonic-gate * consistent value of r_size. Without this handshaking, it is 41777c478bd9Sstevel@tonic-gate * possible that nfs(3)_bio() picks up the old value of r_size 41787c478bd9Sstevel@tonic-gate * before the uiomove() in writerp() completes. This will result 41797c478bd9Sstevel@tonic-gate * in the write through nfs(3)_bio() being dropped. 41807c478bd9Sstevel@tonic-gate * 41817c478bd9Sstevel@tonic-gate * More precisely, there is a window between the time the uiomove() 41827c478bd9Sstevel@tonic-gate * completes and the time the r_size is updated. If a VOP_PUTPAGE() 41837c478bd9Sstevel@tonic-gate * operation intervenes in this window, the page will be picked up, 41847c478bd9Sstevel@tonic-gate * because it is dirty (it will be unlocked, unless it was 41857c478bd9Sstevel@tonic-gate * pagecreate'd). When the page is picked up as dirty, the dirty 41867c478bd9Sstevel@tonic-gate * bit is reset (pvn_getdirty()). In nfs(3)write(), r_size is 41877c478bd9Sstevel@tonic-gate * checked. This will still be the old size. Therefore the page will 41887c478bd9Sstevel@tonic-gate * not be written out. When segmap_release() calls VOP_PUTPAGE(), 41897c478bd9Sstevel@tonic-gate * the page will be found to be clean and the write will be dropped. 41907c478bd9Sstevel@tonic-gate */ 41917c478bd9Sstevel@tonic-gate if (rp->r_flags & RMODINPROGRESS) { 41927c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 41937c478bd9Sstevel@tonic-gate if ((rp->r_flags & RMODINPROGRESS) && 41947c478bd9Sstevel@tonic-gate rp->r_modaddr + MAXBSIZE > io_off && 41957c478bd9Sstevel@tonic-gate rp->r_modaddr < io_off + io_len) { 41967c478bd9Sstevel@tonic-gate page_t *plist; 41977c478bd9Sstevel@tonic-gate /* 41987c478bd9Sstevel@tonic-gate * A write is in progress for this region of the file. 41997c478bd9Sstevel@tonic-gate * If we did not detect RMODINPROGRESS here then this 42007c478bd9Sstevel@tonic-gate * path through nfs_putapage() would eventually go to 42017c478bd9Sstevel@tonic-gate * nfs(3)_bio() and may not write out all of the data 42027c478bd9Sstevel@tonic-gate * in the pages. We end up losing data. So we decide 42037c478bd9Sstevel@tonic-gate * to set the modified bit on each page in the page 42047c478bd9Sstevel@tonic-gate * list and mark the rnode with RDIRTY. This write 42057c478bd9Sstevel@tonic-gate * will be restarted at some later time. 42067c478bd9Sstevel@tonic-gate */ 42077c478bd9Sstevel@tonic-gate plist = pp; 42087c478bd9Sstevel@tonic-gate while (plist != NULL) { 42097c478bd9Sstevel@tonic-gate pp = plist; 42107c478bd9Sstevel@tonic-gate page_sub(&plist, pp); 42117c478bd9Sstevel@tonic-gate hat_setmod(pp); 42127c478bd9Sstevel@tonic-gate page_io_unlock(pp); 42137c478bd9Sstevel@tonic-gate page_unlock(pp); 42147c478bd9Sstevel@tonic-gate } 42157c478bd9Sstevel@tonic-gate rp->r_flags |= RDIRTY; 42167c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 42177c478bd9Sstevel@tonic-gate if (offp) 42187c478bd9Sstevel@tonic-gate *offp = io_off; 42197c478bd9Sstevel@tonic-gate if (lenp) 42207c478bd9Sstevel@tonic-gate *lenp = io_len; 42217c478bd9Sstevel@tonic-gate return (0); 42227c478bd9Sstevel@tonic-gate } 42237c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 42247c478bd9Sstevel@tonic-gate } 42257c478bd9Sstevel@tonic-gate 42267c478bd9Sstevel@tonic-gate if (flags & B_ASYNC) { 42277c478bd9Sstevel@tonic-gate error = nfs_async_putapage(vp, pp, io_off, io_len, flags, cr, 42287c478bd9Sstevel@tonic-gate nfs_sync_putapage); 42297c478bd9Sstevel@tonic-gate } else 42307c478bd9Sstevel@tonic-gate error = nfs_sync_putapage(vp, pp, io_off, io_len, flags, cr); 42317c478bd9Sstevel@tonic-gate 42327c478bd9Sstevel@tonic-gate if (offp) 42337c478bd9Sstevel@tonic-gate *offp = io_off; 42347c478bd9Sstevel@tonic-gate if (lenp) 42357c478bd9Sstevel@tonic-gate *lenp = io_len; 42367c478bd9Sstevel@tonic-gate return (error); 42377c478bd9Sstevel@tonic-gate } 42387c478bd9Sstevel@tonic-gate 42397c478bd9Sstevel@tonic-gate static int 42407c478bd9Sstevel@tonic-gate nfs_sync_putapage(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len, 42417c478bd9Sstevel@tonic-gate int flags, cred_t *cr) 42427c478bd9Sstevel@tonic-gate { 42437c478bd9Sstevel@tonic-gate int error; 42447c478bd9Sstevel@tonic-gate rnode_t *rp; 42457c478bd9Sstevel@tonic-gate 42467c478bd9Sstevel@tonic-gate flags |= B_WRITE; 42477c478bd9Sstevel@tonic-gate 4248108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 42497c478bd9Sstevel@tonic-gate error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr); 42507c478bd9Sstevel@tonic-gate 42517c478bd9Sstevel@tonic-gate rp = VTOR(vp); 42527c478bd9Sstevel@tonic-gate 42537c478bd9Sstevel@tonic-gate if ((error == ENOSPC || error == EDQUOT || error == EACCES) && 42547c478bd9Sstevel@tonic-gate (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) { 42557c478bd9Sstevel@tonic-gate if (!(rp->r_flags & ROUTOFSPACE)) { 42567c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 42577c478bd9Sstevel@tonic-gate rp->r_flags |= ROUTOFSPACE; 42587c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 42597c478bd9Sstevel@tonic-gate } 42607c478bd9Sstevel@tonic-gate flags |= B_ERROR; 42617c478bd9Sstevel@tonic-gate pvn_write_done(pp, flags); 42627c478bd9Sstevel@tonic-gate /* 42637c478bd9Sstevel@tonic-gate * If this was not an async thread, then try again to 42647c478bd9Sstevel@tonic-gate * write out the pages, but this time, also destroy 42657c478bd9Sstevel@tonic-gate * them whether or not the write is successful. This 42667c478bd9Sstevel@tonic-gate * will prevent memory from filling up with these 42677c478bd9Sstevel@tonic-gate * pages and destroying them is the only alternative 42687c478bd9Sstevel@tonic-gate * if they can't be written out. 42697c478bd9Sstevel@tonic-gate * 42707c478bd9Sstevel@tonic-gate * Don't do this if this is an async thread because 42717c478bd9Sstevel@tonic-gate * when the pages are unlocked in pvn_write_done, 42727c478bd9Sstevel@tonic-gate * some other thread could have come along, locked 42737c478bd9Sstevel@tonic-gate * them, and queued for an async thread. It would be 42747c478bd9Sstevel@tonic-gate * possible for all of the async threads to be tied 42757c478bd9Sstevel@tonic-gate * up waiting to lock the pages again and they would 42767c478bd9Sstevel@tonic-gate * all already be locked and waiting for an async 42777c478bd9Sstevel@tonic-gate * thread to handle them. Deadlock. 42787c478bd9Sstevel@tonic-gate */ 42797c478bd9Sstevel@tonic-gate if (!(flags & B_ASYNC)) { 42807c478bd9Sstevel@tonic-gate error = nfs_putpage(vp, io_off, io_len, 4281da6c28aaSamw B_INVAL | B_FORCE, cr, NULL); 42827c478bd9Sstevel@tonic-gate } 42837c478bd9Sstevel@tonic-gate } else { 42847c478bd9Sstevel@tonic-gate if (error) 42857c478bd9Sstevel@tonic-gate flags |= B_ERROR; 42867c478bd9Sstevel@tonic-gate else if (rp->r_flags & ROUTOFSPACE) { 42877c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 42887c478bd9Sstevel@tonic-gate rp->r_flags &= ~ROUTOFSPACE; 42897c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 42907c478bd9Sstevel@tonic-gate } 42917c478bd9Sstevel@tonic-gate pvn_write_done(pp, flags); 42927c478bd9Sstevel@tonic-gate } 42937c478bd9Sstevel@tonic-gate 42947c478bd9Sstevel@tonic-gate return (error); 42957c478bd9Sstevel@tonic-gate } 42967c478bd9Sstevel@tonic-gate 4297da6c28aaSamw /* ARGSUSED */ 42987c478bd9Sstevel@tonic-gate static int 42997c478bd9Sstevel@tonic-gate nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 4300da6c28aaSamw size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4301da6c28aaSamw caller_context_t *ct) 43027c478bd9Sstevel@tonic-gate { 43037c478bd9Sstevel@tonic-gate struct segvn_crargs vn_a; 43047c478bd9Sstevel@tonic-gate int error; 43057c478bd9Sstevel@tonic-gate rnode_t *rp; 43067c478bd9Sstevel@tonic-gate struct vattr va; 43077c478bd9Sstevel@tonic-gate 4308108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 43097c478bd9Sstevel@tonic-gate return (EIO); 43107c478bd9Sstevel@tonic-gate 43117c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) 43127c478bd9Sstevel@tonic-gate return (ENOSYS); 43137c478bd9Sstevel@tonic-gate 43147c478bd9Sstevel@tonic-gate if (off > MAXOFF32_T) 43157c478bd9Sstevel@tonic-gate return (EFBIG); 43167c478bd9Sstevel@tonic-gate 43177c478bd9Sstevel@tonic-gate if (off < 0 || off + len < 0) 43187c478bd9Sstevel@tonic-gate return (ENXIO); 43197c478bd9Sstevel@tonic-gate 43207c478bd9Sstevel@tonic-gate if (vp->v_type != VREG) 43217c478bd9Sstevel@tonic-gate return (ENODEV); 43227c478bd9Sstevel@tonic-gate 43237c478bd9Sstevel@tonic-gate /* 43247c478bd9Sstevel@tonic-gate * If there is cached data and if close-to-open consistency 43257c478bd9Sstevel@tonic-gate * checking is not turned off and if the file system is not 43267c478bd9Sstevel@tonic-gate * mounted readonly, then force an over the wire getattr. 43277c478bd9Sstevel@tonic-gate * Otherwise, just invoke nfsgetattr to get a copy of the 43287c478bd9Sstevel@tonic-gate * attributes. The attribute cache will be used unless it 43297c478bd9Sstevel@tonic-gate * is timed out and if it is, then an over the wire getattr 43307c478bd9Sstevel@tonic-gate * will be issued. 43317c478bd9Sstevel@tonic-gate */ 43327c478bd9Sstevel@tonic-gate va.va_mask = AT_ALL; 43337c478bd9Sstevel@tonic-gate if (vn_has_cached_data(vp) && 43347c478bd9Sstevel@tonic-gate !(VTOMI(vp)->mi_flags & MI_NOCTO) && !vn_is_readonly(vp)) 43357c478bd9Sstevel@tonic-gate error = nfs_getattr_otw(vp, &va, cr); 43367c478bd9Sstevel@tonic-gate else 43377c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 43387c478bd9Sstevel@tonic-gate if (error) 43397c478bd9Sstevel@tonic-gate return (error); 43407c478bd9Sstevel@tonic-gate 43417c478bd9Sstevel@tonic-gate /* 43427c478bd9Sstevel@tonic-gate * Check to see if the vnode is currently marked as not cachable. 43437c478bd9Sstevel@tonic-gate * This means portions of the file are locked (through VOP_FRLOCK). 43447c478bd9Sstevel@tonic-gate * In this case the map request must be refused. We use 43457c478bd9Sstevel@tonic-gate * rp->r_lkserlock to avoid a race with concurrent lock requests. 43467c478bd9Sstevel@tonic-gate */ 43477c478bd9Sstevel@tonic-gate rp = VTOR(vp); 43487c478bd9Sstevel@tonic-gate 43491384c586SDeepak Honnalli /* 43501384c586SDeepak Honnalli * Atomically increment r_inmap after acquiring r_rwlock. The 43511384c586SDeepak Honnalli * idea here is to acquire r_rwlock to block read/write and 43521384c586SDeepak Honnalli * not to protect r_inmap. r_inmap will inform nfs_read/write() 43531384c586SDeepak Honnalli * that we are in nfs_map(). Now, r_rwlock is acquired in order 43541384c586SDeepak Honnalli * and we can prevent the deadlock that would have occurred 43551384c586SDeepak Honnalli * when nfs_addmap() would have acquired it out of order. 43561384c586SDeepak Honnalli * 43571384c586SDeepak Honnalli * Since we are not protecting r_inmap by any lock, we do not 43581384c586SDeepak Honnalli * hold any lock when we decrement it. We atomically decrement 43591384c586SDeepak Honnalli * r_inmap after we release r_lkserlock. 43601384c586SDeepak Honnalli */ 43611384c586SDeepak Honnalli 43621384c586SDeepak Honnalli if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp))) 43631384c586SDeepak Honnalli return (EINTR); 43641a5e258fSJosef 'Jeff' Sipek atomic_inc_uint(&rp->r_inmap); 43651384c586SDeepak Honnalli nfs_rw_exit(&rp->r_rwlock); 43661384c586SDeepak Honnalli 43671384c586SDeepak Honnalli if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) { 43681a5e258fSJosef 'Jeff' Sipek atomic_dec_uint(&rp->r_inmap); 43691384c586SDeepak Honnalli return (EINTR); 43701384c586SDeepak Honnalli } 43717c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOCACHE) { 43727c478bd9Sstevel@tonic-gate error = EAGAIN; 43737c478bd9Sstevel@tonic-gate goto done; 43747c478bd9Sstevel@tonic-gate } 43757c478bd9Sstevel@tonic-gate 43767c478bd9Sstevel@tonic-gate /* 43777c478bd9Sstevel@tonic-gate * Don't allow concurrent locks and mapping if mandatory locking is 43787c478bd9Sstevel@tonic-gate * enabled. 43797c478bd9Sstevel@tonic-gate */ 43807c478bd9Sstevel@tonic-gate if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) && 43817c478bd9Sstevel@tonic-gate MANDLOCK(vp, va.va_mode)) { 43827c478bd9Sstevel@tonic-gate error = EAGAIN; 43837c478bd9Sstevel@tonic-gate goto done; 43847c478bd9Sstevel@tonic-gate } 43857c478bd9Sstevel@tonic-gate 43867c478bd9Sstevel@tonic-gate as_rangelock(as); 438760946fe0Smec error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 438860946fe0Smec if (error != 0) { 43897c478bd9Sstevel@tonic-gate as_rangeunlock(as); 43907c478bd9Sstevel@tonic-gate goto done; 43917c478bd9Sstevel@tonic-gate } 43927c478bd9Sstevel@tonic-gate 43937c478bd9Sstevel@tonic-gate vn_a.vp = vp; 43947c478bd9Sstevel@tonic-gate vn_a.offset = off; 43957c478bd9Sstevel@tonic-gate vn_a.type = (flags & MAP_TYPE); 43967c478bd9Sstevel@tonic-gate vn_a.prot = (uchar_t)prot; 43977c478bd9Sstevel@tonic-gate vn_a.maxprot = (uchar_t)maxprot; 43987c478bd9Sstevel@tonic-gate vn_a.flags = (flags & ~MAP_TYPE); 43997c478bd9Sstevel@tonic-gate vn_a.cred = cr; 44007c478bd9Sstevel@tonic-gate vn_a.amp = NULL; 44017c478bd9Sstevel@tonic-gate vn_a.szc = 0; 44027c478bd9Sstevel@tonic-gate vn_a.lgrp_mem_policy_flags = 0; 44037c478bd9Sstevel@tonic-gate 44047c478bd9Sstevel@tonic-gate error = as_map(as, *addrp, len, segvn_create, &vn_a); 44057c478bd9Sstevel@tonic-gate as_rangeunlock(as); 44067c478bd9Sstevel@tonic-gate 44077c478bd9Sstevel@tonic-gate done: 44087c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_lkserlock); 44091a5e258fSJosef 'Jeff' Sipek atomic_dec_uint(&rp->r_inmap); 44107c478bd9Sstevel@tonic-gate return (error); 44117c478bd9Sstevel@tonic-gate } 44127c478bd9Sstevel@tonic-gate 44137c478bd9Sstevel@tonic-gate /* ARGSUSED */ 44147c478bd9Sstevel@tonic-gate static int 44157c478bd9Sstevel@tonic-gate nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4416da6c28aaSamw size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4417da6c28aaSamw caller_context_t *ct) 44187c478bd9Sstevel@tonic-gate { 44197c478bd9Sstevel@tonic-gate rnode_t *rp; 44207c478bd9Sstevel@tonic-gate 44217c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) 44227c478bd9Sstevel@tonic-gate return (ENOSYS); 4423108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 44247c478bd9Sstevel@tonic-gate return (EIO); 44257c478bd9Sstevel@tonic-gate 44267c478bd9Sstevel@tonic-gate rp = VTOR(vp); 44277c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len)); 44287c478bd9Sstevel@tonic-gate 44297c478bd9Sstevel@tonic-gate return (0); 44307c478bd9Sstevel@tonic-gate } 44317c478bd9Sstevel@tonic-gate 4432da6c28aaSamw /* ARGSUSED */ 44337c478bd9Sstevel@tonic-gate static int 4434da6c28aaSamw nfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset, 4435da6c28aaSamw struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct) 44367c478bd9Sstevel@tonic-gate { 44377c478bd9Sstevel@tonic-gate netobj lm_fh; 44387c478bd9Sstevel@tonic-gate int rc; 44397c478bd9Sstevel@tonic-gate u_offset_t start, end; 44407c478bd9Sstevel@tonic-gate rnode_t *rp; 44417c478bd9Sstevel@tonic-gate int error = 0, intr = INTR(vp); 44427c478bd9Sstevel@tonic-gate 44437c478bd9Sstevel@tonic-gate /* check for valid cmd parameter */ 44447c478bd9Sstevel@tonic-gate if (cmd != F_GETLK && cmd != F_SETLK && cmd != F_SETLKW) 44457c478bd9Sstevel@tonic-gate return (EINVAL); 4446108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 44477c478bd9Sstevel@tonic-gate return (EIO); 44487c478bd9Sstevel@tonic-gate 44497c478bd9Sstevel@tonic-gate /* Verify l_type. */ 44507c478bd9Sstevel@tonic-gate switch (bfp->l_type) { 44517c478bd9Sstevel@tonic-gate case F_RDLCK: 44527c478bd9Sstevel@tonic-gate if (cmd != F_GETLK && !(flag & FREAD)) 44537c478bd9Sstevel@tonic-gate return (EBADF); 44547c478bd9Sstevel@tonic-gate break; 44557c478bd9Sstevel@tonic-gate case F_WRLCK: 44567c478bd9Sstevel@tonic-gate if (cmd != F_GETLK && !(flag & FWRITE)) 44577c478bd9Sstevel@tonic-gate return (EBADF); 44587c478bd9Sstevel@tonic-gate break; 44597c478bd9Sstevel@tonic-gate case F_UNLCK: 44607c478bd9Sstevel@tonic-gate intr = 0; 44617c478bd9Sstevel@tonic-gate break; 44627c478bd9Sstevel@tonic-gate 44637c478bd9Sstevel@tonic-gate default: 44647c478bd9Sstevel@tonic-gate return (EINVAL); 44657c478bd9Sstevel@tonic-gate } 44667c478bd9Sstevel@tonic-gate 44677c478bd9Sstevel@tonic-gate /* check the validity of the lock range */ 44687c478bd9Sstevel@tonic-gate if (rc = flk_convert_lock_data(vp, bfp, &start, &end, offset)) 44697c478bd9Sstevel@tonic-gate return (rc); 44707c478bd9Sstevel@tonic-gate if (rc = flk_check_lock_data(start, end, MAXOFF32_T)) 44717c478bd9Sstevel@tonic-gate return (rc); 44727c478bd9Sstevel@tonic-gate 44737c478bd9Sstevel@tonic-gate /* 44747c478bd9Sstevel@tonic-gate * If the filesystem is mounted using local locking, pass the 44757c478bd9Sstevel@tonic-gate * request off to the local locking code. 44767c478bd9Sstevel@tonic-gate */ 44777c478bd9Sstevel@tonic-gate if (VTOMI(vp)->mi_flags & MI_LLOCK) { 44787c478bd9Sstevel@tonic-gate if (offset > MAXOFF32_T) 44797c478bd9Sstevel@tonic-gate return (EFBIG); 44807c478bd9Sstevel@tonic-gate if (cmd == F_SETLK || cmd == F_SETLKW) { 44817c478bd9Sstevel@tonic-gate /* 44827c478bd9Sstevel@tonic-gate * For complete safety, we should be holding 44837c478bd9Sstevel@tonic-gate * r_lkserlock. However, we can't call 44847c478bd9Sstevel@tonic-gate * lm_safelock and then fs_frlock while 44857c478bd9Sstevel@tonic-gate * holding r_lkserlock, so just invoke 44867c478bd9Sstevel@tonic-gate * lm_safelock and expect that this will 44877c478bd9Sstevel@tonic-gate * catch enough of the cases. 44887c478bd9Sstevel@tonic-gate */ 44897c478bd9Sstevel@tonic-gate if (!lm_safelock(vp, bfp, cr)) 44907c478bd9Sstevel@tonic-gate return (EAGAIN); 44917c478bd9Sstevel@tonic-gate } 4492da6c28aaSamw return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); 44937c478bd9Sstevel@tonic-gate } 44947c478bd9Sstevel@tonic-gate 44957c478bd9Sstevel@tonic-gate rp = VTOR(vp); 44967c478bd9Sstevel@tonic-gate 44977c478bd9Sstevel@tonic-gate /* 44987c478bd9Sstevel@tonic-gate * Check whether the given lock request can proceed, given the 44997c478bd9Sstevel@tonic-gate * current file mappings. 45007c478bd9Sstevel@tonic-gate */ 45017c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_WRITER, intr)) 45027c478bd9Sstevel@tonic-gate return (EINTR); 45037c478bd9Sstevel@tonic-gate if (cmd == F_SETLK || cmd == F_SETLKW) { 45047c478bd9Sstevel@tonic-gate if (!lm_safelock(vp, bfp, cr)) { 45057c478bd9Sstevel@tonic-gate rc = EAGAIN; 45067c478bd9Sstevel@tonic-gate goto done; 45077c478bd9Sstevel@tonic-gate } 45087c478bd9Sstevel@tonic-gate } 45097c478bd9Sstevel@tonic-gate 45107c478bd9Sstevel@tonic-gate /* 45117c478bd9Sstevel@tonic-gate * Flush the cache after waiting for async I/O to finish. For new 45127c478bd9Sstevel@tonic-gate * locks, this is so that the process gets the latest bits from the 45137c478bd9Sstevel@tonic-gate * server. For unlocks, this is so that other clients see the 45147c478bd9Sstevel@tonic-gate * latest bits once the file has been unlocked. If currently dirty 45157c478bd9Sstevel@tonic-gate * pages can't be flushed, then don't allow a lock to be set. But 45167c478bd9Sstevel@tonic-gate * allow unlocks to succeed, to avoid having orphan locks on the 45177c478bd9Sstevel@tonic-gate * server. 45187c478bd9Sstevel@tonic-gate */ 45197c478bd9Sstevel@tonic-gate if (cmd != F_GETLK) { 45207c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 45217c478bd9Sstevel@tonic-gate while (rp->r_count > 0) { 45227c478bd9Sstevel@tonic-gate if (intr) { 45237c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 45247c478bd9Sstevel@tonic-gate 45257c478bd9Sstevel@tonic-gate if (lwp != NULL) 45267c478bd9Sstevel@tonic-gate lwp->lwp_nostop++; 4527da6c28aaSamw if (cv_wait_sig(&rp->r_cv, &rp->r_statelock) 4528da6c28aaSamw == 0) { 45297c478bd9Sstevel@tonic-gate if (lwp != NULL) 45307c478bd9Sstevel@tonic-gate lwp->lwp_nostop--; 45317c478bd9Sstevel@tonic-gate rc = EINTR; 45327c478bd9Sstevel@tonic-gate break; 45337c478bd9Sstevel@tonic-gate } 45347c478bd9Sstevel@tonic-gate if (lwp != NULL) 45357c478bd9Sstevel@tonic-gate lwp->lwp_nostop--; 45367c478bd9Sstevel@tonic-gate } else 45377c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 45387c478bd9Sstevel@tonic-gate } 45397c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 45407c478bd9Sstevel@tonic-gate if (rc != 0) 45417c478bd9Sstevel@tonic-gate goto done; 4542da6c28aaSamw error = nfs_putpage(vp, (offset_t)0, 0, B_INVAL, cr, ct); 45437c478bd9Sstevel@tonic-gate if (error) { 45447c478bd9Sstevel@tonic-gate if (error == ENOSPC || error == EDQUOT) { 45457c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 45467c478bd9Sstevel@tonic-gate if (!rp->r_error) 45477c478bd9Sstevel@tonic-gate rp->r_error = error; 45487c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 45497c478bd9Sstevel@tonic-gate } 45507c478bd9Sstevel@tonic-gate if (bfp->l_type != F_UNLCK) { 45517c478bd9Sstevel@tonic-gate rc = ENOLCK; 45527c478bd9Sstevel@tonic-gate goto done; 45537c478bd9Sstevel@tonic-gate } 45547c478bd9Sstevel@tonic-gate } 45557c478bd9Sstevel@tonic-gate } 45567c478bd9Sstevel@tonic-gate 45577c478bd9Sstevel@tonic-gate lm_fh.n_len = sizeof (fhandle_t); 45587c478bd9Sstevel@tonic-gate lm_fh.n_bytes = (char *)VTOFH(vp); 45597c478bd9Sstevel@tonic-gate 45607c478bd9Sstevel@tonic-gate /* 45617c478bd9Sstevel@tonic-gate * Call the lock manager to do the real work of contacting 45627c478bd9Sstevel@tonic-gate * the server and obtaining the lock. 45637c478bd9Sstevel@tonic-gate */ 45647c478bd9Sstevel@tonic-gate rc = lm_frlock(vp, cmd, bfp, flag, offset, cr, &lm_fh, flk_cbp); 45657c478bd9Sstevel@tonic-gate 45667c478bd9Sstevel@tonic-gate if (rc == 0) 45677c478bd9Sstevel@tonic-gate nfs_lockcompletion(vp, cmd); 45687c478bd9Sstevel@tonic-gate 45697c478bd9Sstevel@tonic-gate done: 45707c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_lkserlock); 45717c478bd9Sstevel@tonic-gate return (rc); 45727c478bd9Sstevel@tonic-gate } 45737c478bd9Sstevel@tonic-gate 45747c478bd9Sstevel@tonic-gate /* 45757c478bd9Sstevel@tonic-gate * Free storage space associated with the specified vnode. The portion 45767c478bd9Sstevel@tonic-gate * to be freed is specified by bfp->l_start and bfp->l_len (already 45777c478bd9Sstevel@tonic-gate * normalized to a "whence" of 0). 45787c478bd9Sstevel@tonic-gate * 45797c478bd9Sstevel@tonic-gate * This is an experimental facility whose continued existence is not 45807c478bd9Sstevel@tonic-gate * guaranteed. Currently, we only support the special case 45817c478bd9Sstevel@tonic-gate * of l_len == 0, meaning free to end of file. 45827c478bd9Sstevel@tonic-gate */ 45837c478bd9Sstevel@tonic-gate /* ARGSUSED */ 45847c478bd9Sstevel@tonic-gate static int 45857c478bd9Sstevel@tonic-gate nfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, 45867c478bd9Sstevel@tonic-gate offset_t offset, cred_t *cr, caller_context_t *ct) 45877c478bd9Sstevel@tonic-gate { 45887c478bd9Sstevel@tonic-gate int error; 45897c478bd9Sstevel@tonic-gate 45907c478bd9Sstevel@tonic-gate ASSERT(vp->v_type == VREG); 45917c478bd9Sstevel@tonic-gate if (cmd != F_FREESP) 45927c478bd9Sstevel@tonic-gate return (EINVAL); 45937c478bd9Sstevel@tonic-gate 45947c478bd9Sstevel@tonic-gate if (offset > MAXOFF32_T) 45957c478bd9Sstevel@tonic-gate return (EFBIG); 45967c478bd9Sstevel@tonic-gate 45977c478bd9Sstevel@tonic-gate if ((bfp->l_start > MAXOFF32_T) || (bfp->l_end > MAXOFF32_T) || 45987c478bd9Sstevel@tonic-gate (bfp->l_len > MAXOFF32_T)) 45997c478bd9Sstevel@tonic-gate return (EFBIG); 46007c478bd9Sstevel@tonic-gate 4601108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 46027c478bd9Sstevel@tonic-gate return (EIO); 46037c478bd9Sstevel@tonic-gate 46047c478bd9Sstevel@tonic-gate error = convoff(vp, bfp, 0, offset); 46057c478bd9Sstevel@tonic-gate if (!error) { 46067c478bd9Sstevel@tonic-gate ASSERT(bfp->l_start >= 0); 46077c478bd9Sstevel@tonic-gate if (bfp->l_len == 0) { 46087c478bd9Sstevel@tonic-gate struct vattr va; 46097c478bd9Sstevel@tonic-gate 46107c478bd9Sstevel@tonic-gate /* 46117c478bd9Sstevel@tonic-gate * ftruncate should not change the ctime and 46127c478bd9Sstevel@tonic-gate * mtime if we truncate the file to its 46137c478bd9Sstevel@tonic-gate * previous size. 46147c478bd9Sstevel@tonic-gate */ 46157c478bd9Sstevel@tonic-gate va.va_mask = AT_SIZE; 46167c478bd9Sstevel@tonic-gate error = nfsgetattr(vp, &va, cr); 46177c478bd9Sstevel@tonic-gate if (error || va.va_size == bfp->l_start) 46187c478bd9Sstevel@tonic-gate return (error); 46197c478bd9Sstevel@tonic-gate va.va_mask = AT_SIZE; 46207c478bd9Sstevel@tonic-gate va.va_size = bfp->l_start; 46217c478bd9Sstevel@tonic-gate error = nfssetattr(vp, &va, 0, cr); 462272102e74SBryan Cantrill 462372102e74SBryan Cantrill if (error == 0 && bfp->l_start == 0) 462472102e74SBryan Cantrill vnevent_truncate(vp, ct); 46257c478bd9Sstevel@tonic-gate } else 46267c478bd9Sstevel@tonic-gate error = EINVAL; 46277c478bd9Sstevel@tonic-gate } 46287c478bd9Sstevel@tonic-gate 46297c478bd9Sstevel@tonic-gate return (error); 46307c478bd9Sstevel@tonic-gate } 46317c478bd9Sstevel@tonic-gate 46327c478bd9Sstevel@tonic-gate /* ARGSUSED */ 46337c478bd9Sstevel@tonic-gate static int 4634da6c28aaSamw nfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct) 46357c478bd9Sstevel@tonic-gate { 46367c478bd9Sstevel@tonic-gate 46377c478bd9Sstevel@tonic-gate return (EINVAL); 46387c478bd9Sstevel@tonic-gate } 46397c478bd9Sstevel@tonic-gate 46407c478bd9Sstevel@tonic-gate /* 46417c478bd9Sstevel@tonic-gate * Setup and add an address space callback to do the work of the delmap call. 46427c478bd9Sstevel@tonic-gate * The callback will (and must be) deleted in the actual callback function. 46437c478bd9Sstevel@tonic-gate * 46447c478bd9Sstevel@tonic-gate * This is done in order to take care of the problem that we have with holding 46457c478bd9Sstevel@tonic-gate * the address space's a_lock for a long period of time (e.g. if the NFS server 46467c478bd9Sstevel@tonic-gate * is down). Callbacks will be executed in the address space code while the 46477c478bd9Sstevel@tonic-gate * a_lock is not held. Holding the address space's a_lock causes things such 46487c478bd9Sstevel@tonic-gate * as ps and fork to hang because they are trying to acquire this lock as well. 46497c478bd9Sstevel@tonic-gate */ 46507c478bd9Sstevel@tonic-gate /* ARGSUSED */ 46517c478bd9Sstevel@tonic-gate static int 46527c478bd9Sstevel@tonic-gate nfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4653da6c28aaSamw size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr, 4654da6c28aaSamw caller_context_t *ct) 46557c478bd9Sstevel@tonic-gate { 46567c478bd9Sstevel@tonic-gate int caller_found; 46577c478bd9Sstevel@tonic-gate int error; 46587c478bd9Sstevel@tonic-gate rnode_t *rp; 46597c478bd9Sstevel@tonic-gate nfs_delmap_args_t *dmapp; 46607c478bd9Sstevel@tonic-gate nfs_delmapcall_t *delmap_call; 46617c478bd9Sstevel@tonic-gate 46627c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) 46637c478bd9Sstevel@tonic-gate return (ENOSYS); 46647c478bd9Sstevel@tonic-gate /* 46657c478bd9Sstevel@tonic-gate * A process may not change zones if it has NFS pages mmap'ed 46667c478bd9Sstevel@tonic-gate * in, so we can't legitimately get here from the wrong zone. 46677c478bd9Sstevel@tonic-gate */ 4668108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 46697c478bd9Sstevel@tonic-gate 46707c478bd9Sstevel@tonic-gate rp = VTOR(vp); 46717c478bd9Sstevel@tonic-gate 46727c478bd9Sstevel@tonic-gate /* 46737c478bd9Sstevel@tonic-gate * The way that the address space of this process deletes its mapping 46747c478bd9Sstevel@tonic-gate * of this file is via the following call chains: 46757c478bd9Sstevel@tonic-gate * - as_free()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap() 46767c478bd9Sstevel@tonic-gate * - as_unmap()->SEGOP_UNMAP()/segvn_unmap()->VOP_DELMAP()/nfs_delmap() 46777c478bd9Sstevel@tonic-gate * 46787c478bd9Sstevel@tonic-gate * With the use of address space callbacks we are allowed to drop the 46797c478bd9Sstevel@tonic-gate * address space lock, a_lock, while executing the NFS operations that 46807c478bd9Sstevel@tonic-gate * need to go over the wire. Returning EAGAIN to the caller of this 46817c478bd9Sstevel@tonic-gate * function is what drives the execution of the callback that we add 46827c478bd9Sstevel@tonic-gate * below. The callback will be executed by the address space code 46837c478bd9Sstevel@tonic-gate * after dropping the a_lock. When the callback is finished, since 46847c478bd9Sstevel@tonic-gate * we dropped the a_lock, it must be re-acquired and segvn_unmap() 46857c478bd9Sstevel@tonic-gate * is called again on the same segment to finish the rest of the work 46867c478bd9Sstevel@tonic-gate * that needs to happen during unmapping. 46877c478bd9Sstevel@tonic-gate * 46887c478bd9Sstevel@tonic-gate * This action of calling back into the segment driver causes 46897c478bd9Sstevel@tonic-gate * nfs_delmap() to get called again, but since the callback was 46907c478bd9Sstevel@tonic-gate * already executed at this point, it already did the work and there 46917c478bd9Sstevel@tonic-gate * is nothing left for us to do. 46927c478bd9Sstevel@tonic-gate * 46937c478bd9Sstevel@tonic-gate * To Summarize: 46947c478bd9Sstevel@tonic-gate * - The first time nfs_delmap is called by the current thread is when 46957c478bd9Sstevel@tonic-gate * we add the caller associated with this delmap to the delmap caller 46967c478bd9Sstevel@tonic-gate * list, add the callback, and return EAGAIN. 46977c478bd9Sstevel@tonic-gate * - The second time in this call chain when nfs_delmap is called we 46987c478bd9Sstevel@tonic-gate * will find this caller in the delmap caller list and realize there 46997c478bd9Sstevel@tonic-gate * is no more work to do thus removing this caller from the list and 47007c478bd9Sstevel@tonic-gate * returning the error that was set in the callback execution. 47017c478bd9Sstevel@tonic-gate */ 47027c478bd9Sstevel@tonic-gate caller_found = nfs_find_and_delete_delmapcall(rp, &error); 47037c478bd9Sstevel@tonic-gate if (caller_found) { 47047c478bd9Sstevel@tonic-gate /* 47057c478bd9Sstevel@tonic-gate * 'error' is from the actual delmap operations. To avoid 47067c478bd9Sstevel@tonic-gate * hangs, we need to handle the return of EAGAIN differently 47077c478bd9Sstevel@tonic-gate * since this is what drives the callback execution. 47087c478bd9Sstevel@tonic-gate * In this case, we don't want to return EAGAIN and do the 47097c478bd9Sstevel@tonic-gate * callback execution because there are none to execute. 47107c478bd9Sstevel@tonic-gate */ 47117c478bd9Sstevel@tonic-gate if (error == EAGAIN) 47127c478bd9Sstevel@tonic-gate return (0); 47137c478bd9Sstevel@tonic-gate else 47147c478bd9Sstevel@tonic-gate return (error); 47157c478bd9Sstevel@tonic-gate } 47167c478bd9Sstevel@tonic-gate 47177c478bd9Sstevel@tonic-gate /* current caller was not in the list */ 47187c478bd9Sstevel@tonic-gate delmap_call = nfs_init_delmapcall(); 47197c478bd9Sstevel@tonic-gate 47207c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 47217c478bd9Sstevel@tonic-gate list_insert_tail(&rp->r_indelmap, delmap_call); 47227c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 47237c478bd9Sstevel@tonic-gate 47247c478bd9Sstevel@tonic-gate dmapp = kmem_alloc(sizeof (nfs_delmap_args_t), KM_SLEEP); 47257c478bd9Sstevel@tonic-gate 47267c478bd9Sstevel@tonic-gate dmapp->vp = vp; 47277c478bd9Sstevel@tonic-gate dmapp->off = off; 47287c478bd9Sstevel@tonic-gate dmapp->addr = addr; 47297c478bd9Sstevel@tonic-gate dmapp->len = len; 47307c478bd9Sstevel@tonic-gate dmapp->prot = prot; 47317c478bd9Sstevel@tonic-gate dmapp->maxprot = maxprot; 47327c478bd9Sstevel@tonic-gate dmapp->flags = flags; 47337c478bd9Sstevel@tonic-gate dmapp->cr = cr; 47347c478bd9Sstevel@tonic-gate dmapp->caller = delmap_call; 47357c478bd9Sstevel@tonic-gate 47367c478bd9Sstevel@tonic-gate error = as_add_callback(as, nfs_delmap_callback, dmapp, 47377c478bd9Sstevel@tonic-gate AS_UNMAP_EVENT, addr, len, KM_SLEEP); 47387c478bd9Sstevel@tonic-gate 47397c478bd9Sstevel@tonic-gate return (error ? error : EAGAIN); 47407c478bd9Sstevel@tonic-gate } 47417c478bd9Sstevel@tonic-gate 47427c478bd9Sstevel@tonic-gate /* 47437c478bd9Sstevel@tonic-gate * Remove some pages from an mmap'd vnode. Just update the 47447c478bd9Sstevel@tonic-gate * count of pages. If doing close-to-open, then flush all 47457c478bd9Sstevel@tonic-gate * of the pages associated with this file. Otherwise, start 47467c478bd9Sstevel@tonic-gate * an asynchronous page flush to write out any dirty pages. 47477c478bd9Sstevel@tonic-gate * This will also associate a credential with the rnode which 47487c478bd9Sstevel@tonic-gate * can be used to write the pages. 47497c478bd9Sstevel@tonic-gate */ 47507c478bd9Sstevel@tonic-gate /* ARGSUSED */ 47517c478bd9Sstevel@tonic-gate static void 47527c478bd9Sstevel@tonic-gate nfs_delmap_callback(struct as *as, void *arg, uint_t event) 47537c478bd9Sstevel@tonic-gate { 47547c478bd9Sstevel@tonic-gate int error; 47557c478bd9Sstevel@tonic-gate rnode_t *rp; 47567c478bd9Sstevel@tonic-gate mntinfo_t *mi; 47577c478bd9Sstevel@tonic-gate nfs_delmap_args_t *dmapp = (nfs_delmap_args_t *)arg; 47587c478bd9Sstevel@tonic-gate 47597c478bd9Sstevel@tonic-gate rp = VTOR(dmapp->vp); 47607c478bd9Sstevel@tonic-gate mi = VTOMI(dmapp->vp); 47617c478bd9Sstevel@tonic-gate 47627c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rp->r_mapcnt, -btopr(dmapp->len)); 47637c478bd9Sstevel@tonic-gate ASSERT(rp->r_mapcnt >= 0); 47647c478bd9Sstevel@tonic-gate 47657c478bd9Sstevel@tonic-gate /* 47667c478bd9Sstevel@tonic-gate * Initiate a page flush if there are pages, the file system 47677c478bd9Sstevel@tonic-gate * was not mounted readonly, the segment was mapped shared, and 47687c478bd9Sstevel@tonic-gate * the pages themselves were writeable. 47697c478bd9Sstevel@tonic-gate */ 47707c478bd9Sstevel@tonic-gate if (vn_has_cached_data(dmapp->vp) && !vn_is_readonly(dmapp->vp) && 47717c478bd9Sstevel@tonic-gate dmapp->flags == MAP_SHARED && (dmapp->maxprot & PROT_WRITE)) { 47727c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 47737c478bd9Sstevel@tonic-gate rp->r_flags |= RDIRTY; 47747c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 47757c478bd9Sstevel@tonic-gate /* 47767c478bd9Sstevel@tonic-gate * If this is a cross-zone access a sync putpage won't work, so 47777c478bd9Sstevel@tonic-gate * the best we can do is try an async putpage. That seems 47787c478bd9Sstevel@tonic-gate * better than something more draconian such as discarding the 47797c478bd9Sstevel@tonic-gate * dirty pages. 47807c478bd9Sstevel@tonic-gate */ 47817c478bd9Sstevel@tonic-gate if ((mi->mi_flags & MI_NOCTO) || 4782108322fbScarlsonj nfs_zone() != mi->mi_zone) 47837c478bd9Sstevel@tonic-gate error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len, 4784da6c28aaSamw B_ASYNC, dmapp->cr, NULL); 47857c478bd9Sstevel@tonic-gate else 47867c478bd9Sstevel@tonic-gate error = nfs_putpage(dmapp->vp, dmapp->off, dmapp->len, 4787da6c28aaSamw 0, dmapp->cr, NULL); 47887c478bd9Sstevel@tonic-gate if (!error) { 47897c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 47907c478bd9Sstevel@tonic-gate error = rp->r_error; 47917c478bd9Sstevel@tonic-gate rp->r_error = 0; 47927c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 47937c478bd9Sstevel@tonic-gate } 47947c478bd9Sstevel@tonic-gate } else 47957c478bd9Sstevel@tonic-gate error = 0; 47967c478bd9Sstevel@tonic-gate 47977c478bd9Sstevel@tonic-gate if ((rp->r_flags & RDIRECTIO) || (mi->mi_flags & MI_DIRECTIO)) 47987c478bd9Sstevel@tonic-gate (void) nfs_putpage(dmapp->vp, dmapp->off, dmapp->len, 4799da6c28aaSamw B_INVAL, dmapp->cr, NULL); 48007c478bd9Sstevel@tonic-gate 48017c478bd9Sstevel@tonic-gate dmapp->caller->error = error; 48027c478bd9Sstevel@tonic-gate (void) as_delete_callback(as, arg); 48037c478bd9Sstevel@tonic-gate kmem_free(dmapp, sizeof (nfs_delmap_args_t)); 48047c478bd9Sstevel@tonic-gate } 48057c478bd9Sstevel@tonic-gate 48067c478bd9Sstevel@tonic-gate /* ARGSUSED */ 48077c478bd9Sstevel@tonic-gate static int 4808da6c28aaSamw nfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 4809da6c28aaSamw caller_context_t *ct) 48107c478bd9Sstevel@tonic-gate { 48117c478bd9Sstevel@tonic-gate int error = 0; 48127c478bd9Sstevel@tonic-gate 4813108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 48147c478bd9Sstevel@tonic-gate return (EIO); 48157c478bd9Sstevel@tonic-gate /* 48167c478bd9Sstevel@tonic-gate * This looks a little weird because it's written in a general 48177c478bd9Sstevel@tonic-gate * manner but we make little use of cases. If cntl() ever gets 48187c478bd9Sstevel@tonic-gate * widely used, the outer switch will make more sense. 48197c478bd9Sstevel@tonic-gate */ 48207c478bd9Sstevel@tonic-gate 48217c478bd9Sstevel@tonic-gate switch (cmd) { 48227c478bd9Sstevel@tonic-gate 48237c478bd9Sstevel@tonic-gate /* 48247c478bd9Sstevel@tonic-gate * Large file spec - need to base answer new query with 48257c478bd9Sstevel@tonic-gate * hardcoded constant based on the protocol. 48267c478bd9Sstevel@tonic-gate */ 48277c478bd9Sstevel@tonic-gate case _PC_FILESIZEBITS: 48287c478bd9Sstevel@tonic-gate *valp = 32; 48297c478bd9Sstevel@tonic-gate return (0); 48307c478bd9Sstevel@tonic-gate 48317c478bd9Sstevel@tonic-gate case _PC_LINK_MAX: 48327c478bd9Sstevel@tonic-gate case _PC_NAME_MAX: 48337c478bd9Sstevel@tonic-gate case _PC_PATH_MAX: 48347c478bd9Sstevel@tonic-gate case _PC_SYMLINK_MAX: 48357c478bd9Sstevel@tonic-gate case _PC_CHOWN_RESTRICTED: 48367c478bd9Sstevel@tonic-gate case _PC_NO_TRUNC: { 48377c478bd9Sstevel@tonic-gate mntinfo_t *mi; 48387c478bd9Sstevel@tonic-gate struct pathcnf *pc; 48397c478bd9Sstevel@tonic-gate 48407c478bd9Sstevel@tonic-gate if ((mi = VTOMI(vp)) == NULL || (pc = mi->mi_pathconf) == NULL) 48417c478bd9Sstevel@tonic-gate return (EINVAL); 48427c478bd9Sstevel@tonic-gate error = _PC_ISSET(cmd, pc->pc_mask); /* error or bool */ 48437c478bd9Sstevel@tonic-gate switch (cmd) { 48447c478bd9Sstevel@tonic-gate case _PC_LINK_MAX: 48457c478bd9Sstevel@tonic-gate *valp = pc->pc_link_max; 48467c478bd9Sstevel@tonic-gate break; 48477c478bd9Sstevel@tonic-gate case _PC_NAME_MAX: 48487c478bd9Sstevel@tonic-gate *valp = pc->pc_name_max; 48497c478bd9Sstevel@tonic-gate break; 48507c478bd9Sstevel@tonic-gate case _PC_PATH_MAX: 48517c478bd9Sstevel@tonic-gate case _PC_SYMLINK_MAX: 48527c478bd9Sstevel@tonic-gate *valp = pc->pc_path_max; 48537c478bd9Sstevel@tonic-gate break; 48547c478bd9Sstevel@tonic-gate case _PC_CHOWN_RESTRICTED: 48557c478bd9Sstevel@tonic-gate /* 48567c478bd9Sstevel@tonic-gate * if we got here, error is really a boolean which 48577c478bd9Sstevel@tonic-gate * indicates whether cmd is set or not. 48587c478bd9Sstevel@tonic-gate */ 48597c478bd9Sstevel@tonic-gate *valp = error ? 1 : 0; /* see above */ 48607c478bd9Sstevel@tonic-gate error = 0; 48617c478bd9Sstevel@tonic-gate break; 48627c478bd9Sstevel@tonic-gate case _PC_NO_TRUNC: 48637c478bd9Sstevel@tonic-gate /* 48647c478bd9Sstevel@tonic-gate * if we got here, error is really a boolean which 48657c478bd9Sstevel@tonic-gate * indicates whether cmd is set or not. 48667c478bd9Sstevel@tonic-gate */ 48677c478bd9Sstevel@tonic-gate *valp = error ? 1 : 0; /* see above */ 48687c478bd9Sstevel@tonic-gate error = 0; 48697c478bd9Sstevel@tonic-gate break; 48707c478bd9Sstevel@tonic-gate } 48717c478bd9Sstevel@tonic-gate return (error ? EINVAL : 0); 48727c478bd9Sstevel@tonic-gate } 48737c478bd9Sstevel@tonic-gate 48747c478bd9Sstevel@tonic-gate case _PC_XATTR_EXISTS: 48757c478bd9Sstevel@tonic-gate *valp = 0; 48767c478bd9Sstevel@tonic-gate if (vp->v_vfsp->vfs_flag & VFS_XATTR) { 48777c478bd9Sstevel@tonic-gate vnode_t *avp; 48787c478bd9Sstevel@tonic-gate rnode_t *rp; 48797c478bd9Sstevel@tonic-gate mntinfo_t *mi = VTOMI(vp); 48807c478bd9Sstevel@tonic-gate 48817c478bd9Sstevel@tonic-gate if (!(mi->mi_flags & MI_EXTATTR)) 48827c478bd9Sstevel@tonic-gate return (0); 48837c478bd9Sstevel@tonic-gate 48847c478bd9Sstevel@tonic-gate rp = VTOR(vp); 48857c478bd9Sstevel@tonic-gate if (nfs_rw_enter_sig(&rp->r_rwlock, RW_READER, 48867c478bd9Sstevel@tonic-gate INTR(vp))) 48877c478bd9Sstevel@tonic-gate return (EINTR); 48887c478bd9Sstevel@tonic-gate 48897c478bd9Sstevel@tonic-gate error = nfslookup_dnlc(vp, XATTR_DIR_NAME, &avp, cr); 48907c478bd9Sstevel@tonic-gate if (error || avp == NULL) 48917c478bd9Sstevel@tonic-gate error = acl_getxattrdir2(vp, &avp, 0, cr, 0); 48927c478bd9Sstevel@tonic-gate 48937c478bd9Sstevel@tonic-gate nfs_rw_exit(&rp->r_rwlock); 48947c478bd9Sstevel@tonic-gate 48957c478bd9Sstevel@tonic-gate if (error == 0 && avp != NULL) { 489693aeed83Smarks error = do_xattr_exists_check(avp, valp, cr); 48977c478bd9Sstevel@tonic-gate VN_RELE(avp); 48987c478bd9Sstevel@tonic-gate } 48997c478bd9Sstevel@tonic-gate } 49007c478bd9Sstevel@tonic-gate return (error ? EINVAL : 0); 49017c478bd9Sstevel@tonic-gate 49027c478bd9Sstevel@tonic-gate case _PC_ACL_ENABLED: 49037c478bd9Sstevel@tonic-gate *valp = _ACL_ACLENT_ENABLED; 49047c478bd9Sstevel@tonic-gate return (0); 49057c478bd9Sstevel@tonic-gate 49067c478bd9Sstevel@tonic-gate default: 49077c478bd9Sstevel@tonic-gate return (EINVAL); 49087c478bd9Sstevel@tonic-gate } 49097c478bd9Sstevel@tonic-gate } 49107c478bd9Sstevel@tonic-gate 49117c478bd9Sstevel@tonic-gate /* 49127c478bd9Sstevel@tonic-gate * Called by async thread to do synchronous pageio. Do the i/o, wait 49137c478bd9Sstevel@tonic-gate * for it to complete, and cleanup the page list when done. 49147c478bd9Sstevel@tonic-gate */ 49157c478bd9Sstevel@tonic-gate static int 49167c478bd9Sstevel@tonic-gate nfs_sync_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len, 49177c478bd9Sstevel@tonic-gate int flags, cred_t *cr) 49187c478bd9Sstevel@tonic-gate { 49197c478bd9Sstevel@tonic-gate int error; 49207c478bd9Sstevel@tonic-gate 4921108322fbScarlsonj ASSERT(nfs_zone() == VTOMI(vp)->mi_zone); 49227c478bd9Sstevel@tonic-gate error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr); 49237c478bd9Sstevel@tonic-gate if (flags & B_READ) 49247c478bd9Sstevel@tonic-gate pvn_read_done(pp, (error ? B_ERROR : 0) | flags); 49257c478bd9Sstevel@tonic-gate else 49267c478bd9Sstevel@tonic-gate pvn_write_done(pp, (error ? B_ERROR : 0) | flags); 49277c478bd9Sstevel@tonic-gate return (error); 49287c478bd9Sstevel@tonic-gate } 49297c478bd9Sstevel@tonic-gate 4930da6c28aaSamw /* ARGSUSED */ 49317c478bd9Sstevel@tonic-gate static int 49327c478bd9Sstevel@tonic-gate nfs_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len, 4933da6c28aaSamw int flags, cred_t *cr, caller_context_t *ct) 49347c478bd9Sstevel@tonic-gate { 49357c478bd9Sstevel@tonic-gate int error; 49367c478bd9Sstevel@tonic-gate rnode_t *rp; 49377c478bd9Sstevel@tonic-gate 49387c478bd9Sstevel@tonic-gate if (pp == NULL) 49397c478bd9Sstevel@tonic-gate return (EINVAL); 49407c478bd9Sstevel@tonic-gate 49417c478bd9Sstevel@tonic-gate if (io_off > MAXOFF32_T) 49427c478bd9Sstevel@tonic-gate return (EFBIG); 4943108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 49447c478bd9Sstevel@tonic-gate return (EIO); 49457c478bd9Sstevel@tonic-gate rp = VTOR(vp); 49467c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 49477c478bd9Sstevel@tonic-gate rp->r_count++; 49487c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 49497c478bd9Sstevel@tonic-gate 49507c478bd9Sstevel@tonic-gate if (flags & B_ASYNC) { 49517c478bd9Sstevel@tonic-gate error = nfs_async_pageio(vp, pp, io_off, io_len, flags, cr, 49527c478bd9Sstevel@tonic-gate nfs_sync_pageio); 49537c478bd9Sstevel@tonic-gate } else 49547c478bd9Sstevel@tonic-gate error = nfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr); 49557c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 49567c478bd9Sstevel@tonic-gate rp->r_count--; 49577c478bd9Sstevel@tonic-gate cv_broadcast(&rp->r_cv); 49587c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 49597c478bd9Sstevel@tonic-gate return (error); 49607c478bd9Sstevel@tonic-gate } 49617c478bd9Sstevel@tonic-gate 4962da6c28aaSamw /* ARGSUSED */ 49637c478bd9Sstevel@tonic-gate static int 4964da6c28aaSamw nfs_setsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr, 4965da6c28aaSamw caller_context_t *ct) 49667c478bd9Sstevel@tonic-gate { 49677c478bd9Sstevel@tonic-gate int error; 49687c478bd9Sstevel@tonic-gate mntinfo_t *mi; 49697c478bd9Sstevel@tonic-gate 49707c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 49717c478bd9Sstevel@tonic-gate 4972108322fbScarlsonj if (nfs_zone() != mi->mi_zone) 49737c478bd9Sstevel@tonic-gate return (EIO); 49747c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) { 49757c478bd9Sstevel@tonic-gate error = acl_setacl2(vp, vsecattr, flag, cr); 49767c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) 49777c478bd9Sstevel@tonic-gate return (error); 49787c478bd9Sstevel@tonic-gate } 49797c478bd9Sstevel@tonic-gate 49807c478bd9Sstevel@tonic-gate return (ENOSYS); 49817c478bd9Sstevel@tonic-gate } 49827c478bd9Sstevel@tonic-gate 4983da6c28aaSamw /* ARGSUSED */ 49847c478bd9Sstevel@tonic-gate static int 4985da6c28aaSamw nfs_getsecattr(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr, 4986da6c28aaSamw caller_context_t *ct) 49877c478bd9Sstevel@tonic-gate { 49887c478bd9Sstevel@tonic-gate int error; 49897c478bd9Sstevel@tonic-gate mntinfo_t *mi; 49907c478bd9Sstevel@tonic-gate 49917c478bd9Sstevel@tonic-gate mi = VTOMI(vp); 49927c478bd9Sstevel@tonic-gate 4993108322fbScarlsonj if (nfs_zone() != mi->mi_zone) 49947c478bd9Sstevel@tonic-gate return (EIO); 49957c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) { 49967c478bd9Sstevel@tonic-gate error = acl_getacl2(vp, vsecattr, flag, cr); 49977c478bd9Sstevel@tonic-gate if (mi->mi_flags & MI_ACL) 49987c478bd9Sstevel@tonic-gate return (error); 49997c478bd9Sstevel@tonic-gate } 50007c478bd9Sstevel@tonic-gate 5001da6c28aaSamw return (fs_fab_acl(vp, vsecattr, flag, cr, ct)); 50027c478bd9Sstevel@tonic-gate } 50037c478bd9Sstevel@tonic-gate 5004da6c28aaSamw /* ARGSUSED */ 50057c478bd9Sstevel@tonic-gate static int 5006da6c28aaSamw nfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr, 5007da6c28aaSamw caller_context_t *ct) 50087c478bd9Sstevel@tonic-gate { 50097c478bd9Sstevel@tonic-gate int error; 50107c478bd9Sstevel@tonic-gate struct shrlock nshr; 50117c478bd9Sstevel@tonic-gate struct nfs_owner nfs_owner; 50127c478bd9Sstevel@tonic-gate netobj lm_fh; 50137c478bd9Sstevel@tonic-gate 5014108322fbScarlsonj if (nfs_zone() != VTOMI(vp)->mi_zone) 50157c478bd9Sstevel@tonic-gate return (EIO); 50167c478bd9Sstevel@tonic-gate 50177c478bd9Sstevel@tonic-gate /* 50187c478bd9Sstevel@tonic-gate * check for valid cmd parameter 50197c478bd9Sstevel@tonic-gate */ 50207c478bd9Sstevel@tonic-gate if (cmd != F_SHARE && cmd != F_UNSHARE && cmd != F_HASREMOTELOCKS) 50217c478bd9Sstevel@tonic-gate return (EINVAL); 50227c478bd9Sstevel@tonic-gate 50237c478bd9Sstevel@tonic-gate /* 50247c478bd9Sstevel@tonic-gate * Check access permissions 50257c478bd9Sstevel@tonic-gate */ 50267c478bd9Sstevel@tonic-gate if (cmd == F_SHARE && 50277c478bd9Sstevel@tonic-gate (((shr->s_access & F_RDACC) && !(flag & FREAD)) || 50287c478bd9Sstevel@tonic-gate ((shr->s_access & F_WRACC) && !(flag & FWRITE)))) 50297c478bd9Sstevel@tonic-gate return (EBADF); 50307c478bd9Sstevel@tonic-gate 50317c478bd9Sstevel@tonic-gate /* 50327c478bd9Sstevel@tonic-gate * If the filesystem is mounted using local locking, pass the 50337c478bd9Sstevel@tonic-gate * request off to the local share code. 50347c478bd9Sstevel@tonic-gate */ 50357c478bd9Sstevel@tonic-gate if (VTOMI(vp)->mi_flags & MI_LLOCK) 5036da6c28aaSamw return (fs_shrlock(vp, cmd, shr, flag, cr, ct)); 50377c478bd9Sstevel@tonic-gate 50387c478bd9Sstevel@tonic-gate switch (cmd) { 50397c478bd9Sstevel@tonic-gate case F_SHARE: 50407c478bd9Sstevel@tonic-gate case F_UNSHARE: 50417c478bd9Sstevel@tonic-gate lm_fh.n_len = sizeof (fhandle_t); 50427c478bd9Sstevel@tonic-gate lm_fh.n_bytes = (char *)VTOFH(vp); 50437c478bd9Sstevel@tonic-gate 50447c478bd9Sstevel@tonic-gate /* 50457c478bd9Sstevel@tonic-gate * If passed an owner that is too large to fit in an 50467c478bd9Sstevel@tonic-gate * nfs_owner it is likely a recursive call from the 50477c478bd9Sstevel@tonic-gate * lock manager client and pass it straight through. If 50487c478bd9Sstevel@tonic-gate * it is not a nfs_owner then simply return an error. 50497c478bd9Sstevel@tonic-gate */ 50507c478bd9Sstevel@tonic-gate if (shr->s_own_len > sizeof (nfs_owner.lowner)) { 50517c478bd9Sstevel@tonic-gate if (((struct nfs_owner *)shr->s_owner)->magic != 50527c478bd9Sstevel@tonic-gate NFS_OWNER_MAGIC) 50537c478bd9Sstevel@tonic-gate return (EINVAL); 50547c478bd9Sstevel@tonic-gate 50557c478bd9Sstevel@tonic-gate if (error = lm_shrlock(vp, cmd, shr, flag, &lm_fh)) { 50567c478bd9Sstevel@tonic-gate error = set_errno(error); 50577c478bd9Sstevel@tonic-gate } 50587c478bd9Sstevel@tonic-gate return (error); 50597c478bd9Sstevel@tonic-gate } 50607c478bd9Sstevel@tonic-gate /* 50617c478bd9Sstevel@tonic-gate * Remote share reservations owner is a combination of 50627c478bd9Sstevel@tonic-gate * a magic number, hostname, and the local owner 50637c478bd9Sstevel@tonic-gate */ 50647c478bd9Sstevel@tonic-gate bzero(&nfs_owner, sizeof (nfs_owner)); 50657c478bd9Sstevel@tonic-gate nfs_owner.magic = NFS_OWNER_MAGIC; 50667c478bd9Sstevel@tonic-gate (void) strncpy(nfs_owner.hname, uts_nodename(), 50677c478bd9Sstevel@tonic-gate sizeof (nfs_owner.hname)); 50687c478bd9Sstevel@tonic-gate bcopy(shr->s_owner, nfs_owner.lowner, shr->s_own_len); 50697c478bd9Sstevel@tonic-gate nshr.s_access = shr->s_access; 50707c478bd9Sstevel@tonic-gate nshr.s_deny = shr->s_deny; 50717c478bd9Sstevel@tonic-gate nshr.s_sysid = 0; 50727c478bd9Sstevel@tonic-gate nshr.s_pid = ttoproc(curthread)->p_pid; 50737c478bd9Sstevel@tonic-gate nshr.s_own_len = sizeof (nfs_owner); 50747c478bd9Sstevel@tonic-gate nshr.s_owner = (caddr_t)&nfs_owner; 50757c478bd9Sstevel@tonic-gate 50767c478bd9Sstevel@tonic-gate if (error = lm_shrlock(vp, cmd, &nshr, flag, &lm_fh)) { 50777c478bd9Sstevel@tonic-gate error = set_errno(error); 50787c478bd9Sstevel@tonic-gate } 50797c478bd9Sstevel@tonic-gate 50807c478bd9Sstevel@tonic-gate break; 50817c478bd9Sstevel@tonic-gate 50827c478bd9Sstevel@tonic-gate case F_HASREMOTELOCKS: 50837c478bd9Sstevel@tonic-gate /* 50847c478bd9Sstevel@tonic-gate * NFS client can't store remote locks itself 50857c478bd9Sstevel@tonic-gate */ 50867c478bd9Sstevel@tonic-gate shr->s_access = 0; 50877c478bd9Sstevel@tonic-gate error = 0; 50887c478bd9Sstevel@tonic-gate break; 50897c478bd9Sstevel@tonic-gate 50907c478bd9Sstevel@tonic-gate default: 50917c478bd9Sstevel@tonic-gate error = EINVAL; 50927c478bd9Sstevel@tonic-gate break; 50937c478bd9Sstevel@tonic-gate } 50947c478bd9Sstevel@tonic-gate 50957c478bd9Sstevel@tonic-gate return (error); 50967c478bd9Sstevel@tonic-gate } 5097