1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*7c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 29*7c478bd9Sstevel@tonic-gate 30*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 31*7c478bd9Sstevel@tonic-gate 32*7c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/errno.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/stat.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/conf.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/strsubr.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/frame.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/stack.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/proc.h> 47*7c478bd9Sstevel@tonic-gate #include <sys/priv.h> 48*7c478bd9Sstevel@tonic-gate #include <sys/policy.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/ontrap.h> 50*7c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 51*7c478bd9Sstevel@tonic-gate #include <sys/prsystm.h> 52*7c478bd9Sstevel@tonic-gate 53*7c478bd9Sstevel@tonic-gate #include <vm/as.h> 54*7c478bd9Sstevel@tonic-gate #include <vm/seg.h> 55*7c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h> 56*7c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 57*7c478bd9Sstevel@tonic-gate #include <vm/seg_spt.h> 58*7c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 59*7c478bd9Sstevel@tonic-gate 60*7c478bd9Sstevel@tonic-gate extern struct seg_ops segdev_ops; /* needs a header file */ 61*7c478bd9Sstevel@tonic-gate extern struct seg_ops segspt_shmops; /* needs a header file */ 62*7c478bd9Sstevel@tonic-gate 63*7c478bd9Sstevel@tonic-gate static int 64*7c478bd9Sstevel@tonic-gate page_valid(struct seg *seg, caddr_t addr) 65*7c478bd9Sstevel@tonic-gate { 66*7c478bd9Sstevel@tonic-gate struct segvn_data *svd; 67*7c478bd9Sstevel@tonic-gate vnode_t *vp; 68*7c478bd9Sstevel@tonic-gate vattr_t vattr; 69*7c478bd9Sstevel@tonic-gate 70*7c478bd9Sstevel@tonic-gate /* 71*7c478bd9Sstevel@tonic-gate * Fail if the page doesn't map to a page in the underlying 72*7c478bd9Sstevel@tonic-gate * mapped file, if an underlying mapped file exists. 73*7c478bd9Sstevel@tonic-gate */ 74*7c478bd9Sstevel@tonic-gate vattr.va_mask = AT_SIZE; 75*7c478bd9Sstevel@tonic-gate if (seg->s_ops == &segvn_ops && 76*7c478bd9Sstevel@tonic-gate SEGOP_GETVP(seg, addr, &vp) == 0 && 77*7c478bd9Sstevel@tonic-gate vp != NULL && vp->v_type == VREG && 78*7c478bd9Sstevel@tonic-gate VOP_GETATTR(vp, &vattr, 0, CRED()) == 0) { 79*7c478bd9Sstevel@tonic-gate u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE); 80*7c478bd9Sstevel@tonic-gate u_offset_t offset = SEGOP_GETOFFSET(seg, addr); 81*7c478bd9Sstevel@tonic-gate 82*7c478bd9Sstevel@tonic-gate if (offset >= size) 83*7c478bd9Sstevel@tonic-gate return (0); 84*7c478bd9Sstevel@tonic-gate } 85*7c478bd9Sstevel@tonic-gate 86*7c478bd9Sstevel@tonic-gate /* 87*7c478bd9Sstevel@tonic-gate * Fail if this is an ISM shared segment and the address is 88*7c478bd9Sstevel@tonic-gate * not within the real size of the spt segment that backs it. 89*7c478bd9Sstevel@tonic-gate */ 90*7c478bd9Sstevel@tonic-gate if (seg->s_ops == &segspt_shmops && 91*7c478bd9Sstevel@tonic-gate addr >= seg->s_base + spt_realsize(seg)) 92*7c478bd9Sstevel@tonic-gate return (0); 93*7c478bd9Sstevel@tonic-gate 94*7c478bd9Sstevel@tonic-gate /* 95*7c478bd9Sstevel@tonic-gate * Fail if the segment is mapped from /dev/null. 96*7c478bd9Sstevel@tonic-gate * The key is that the mapping comes from segdev and the 97*7c478bd9Sstevel@tonic-gate * type is neither MAP_SHARED nor MAP_PRIVATE. 98*7c478bd9Sstevel@tonic-gate */ 99*7c478bd9Sstevel@tonic-gate if (seg->s_ops == &segdev_ops && 100*7c478bd9Sstevel@tonic-gate ((SEGOP_GETTYPE(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0)) 101*7c478bd9Sstevel@tonic-gate return (0); 102*7c478bd9Sstevel@tonic-gate 103*7c478bd9Sstevel@tonic-gate /* 104*7c478bd9Sstevel@tonic-gate * Fail if the page is a MAP_NORESERVE page that has 105*7c478bd9Sstevel@tonic-gate * not actually materialized. 106*7c478bd9Sstevel@tonic-gate * We cheat by knowing that segvn is the only segment 107*7c478bd9Sstevel@tonic-gate * driver that supports MAP_NORESERVE. 108*7c478bd9Sstevel@tonic-gate */ 109*7c478bd9Sstevel@tonic-gate if (seg->s_ops == &segvn_ops && 110*7c478bd9Sstevel@tonic-gate (svd = (struct segvn_data *)seg->s_data) != NULL && 111*7c478bd9Sstevel@tonic-gate (svd->vp == NULL || svd->vp->v_type != VREG) && 112*7c478bd9Sstevel@tonic-gate (svd->flags & MAP_NORESERVE)) { 113*7c478bd9Sstevel@tonic-gate /* 114*7c478bd9Sstevel@tonic-gate * Guilty knowledge here. We know that 115*7c478bd9Sstevel@tonic-gate * segvn_incore returns more than just the 116*7c478bd9Sstevel@tonic-gate * low-order bit that indicates the page is 117*7c478bd9Sstevel@tonic-gate * actually in memory. If any bits are set, 118*7c478bd9Sstevel@tonic-gate * then there is backing store for the page. 119*7c478bd9Sstevel@tonic-gate */ 120*7c478bd9Sstevel@tonic-gate char incore = 0; 121*7c478bd9Sstevel@tonic-gate (void) SEGOP_INCORE(seg, addr, PAGESIZE, &incore); 122*7c478bd9Sstevel@tonic-gate if (incore == 0) 123*7c478bd9Sstevel@tonic-gate return (0); 124*7c478bd9Sstevel@tonic-gate } 125*7c478bd9Sstevel@tonic-gate return (1); 126*7c478bd9Sstevel@tonic-gate } 127*7c478bd9Sstevel@tonic-gate 128*7c478bd9Sstevel@tonic-gate /* 129*7c478bd9Sstevel@tonic-gate * Map address "addr" in address space "as" into a kernel virtual address. 130*7c478bd9Sstevel@tonic-gate * The memory is guaranteed to be resident and locked down. 131*7c478bd9Sstevel@tonic-gate */ 132*7c478bd9Sstevel@tonic-gate static caddr_t 133*7c478bd9Sstevel@tonic-gate mapin(struct as *as, caddr_t addr, int writing) 134*7c478bd9Sstevel@tonic-gate { 135*7c478bd9Sstevel@tonic-gate page_t *pp; 136*7c478bd9Sstevel@tonic-gate caddr_t kaddr; 137*7c478bd9Sstevel@tonic-gate pfn_t pfnum; 138*7c478bd9Sstevel@tonic-gate 139*7c478bd9Sstevel@tonic-gate /* 140*7c478bd9Sstevel@tonic-gate * NB: Because of past mistakes, we have bits being returned 141*7c478bd9Sstevel@tonic-gate * by getpfnum that are actually the page type bits of the pte. 142*7c478bd9Sstevel@tonic-gate * When the object we are trying to map is a memory page with 143*7c478bd9Sstevel@tonic-gate * a page structure everything is ok and we can use the optimal 144*7c478bd9Sstevel@tonic-gate * method, ppmapin. Otherwise, we have to do something special. 145*7c478bd9Sstevel@tonic-gate */ 146*7c478bd9Sstevel@tonic-gate pfnum = hat_getpfnum(as->a_hat, addr); 147*7c478bd9Sstevel@tonic-gate if (pf_is_memory(pfnum)) { 148*7c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum); 149*7c478bd9Sstevel@tonic-gate if (pp != NULL) { 150*7c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 151*7c478bd9Sstevel@tonic-gate kaddr = ppmapin(pp, writing ? 152*7c478bd9Sstevel@tonic-gate (PROT_READ | PROT_WRITE) : PROT_READ, 153*7c478bd9Sstevel@tonic-gate (caddr_t)-1); 154*7c478bd9Sstevel@tonic-gate return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); 155*7c478bd9Sstevel@tonic-gate } 156*7c478bd9Sstevel@tonic-gate } 157*7c478bd9Sstevel@tonic-gate 158*7c478bd9Sstevel@tonic-gate /* 159*7c478bd9Sstevel@tonic-gate * Oh well, we didn't have a page struct for the object we were 160*7c478bd9Sstevel@tonic-gate * trying to map in; ppmapin doesn't handle devices, but allocating a 161*7c478bd9Sstevel@tonic-gate * heap address allows ppmapout to free virutal space when done. 162*7c478bd9Sstevel@tonic-gate */ 163*7c478bd9Sstevel@tonic-gate kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 164*7c478bd9Sstevel@tonic-gate 165*7c478bd9Sstevel@tonic-gate hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, 166*7c478bd9Sstevel@tonic-gate writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK); 167*7c478bd9Sstevel@tonic-gate 168*7c478bd9Sstevel@tonic-gate return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); 169*7c478bd9Sstevel@tonic-gate } 170*7c478bd9Sstevel@tonic-gate 171*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 172*7c478bd9Sstevel@tonic-gate static void 173*7c478bd9Sstevel@tonic-gate mapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing) 174*7c478bd9Sstevel@tonic-gate { 175*7c478bd9Sstevel@tonic-gate vaddr = (caddr_t)(uintptr_t)((uintptr_t)vaddr & PAGEMASK); 176*7c478bd9Sstevel@tonic-gate ppmapout(vaddr); 177*7c478bd9Sstevel@tonic-gate } 178*7c478bd9Sstevel@tonic-gate 179*7c478bd9Sstevel@tonic-gate /* 180*7c478bd9Sstevel@tonic-gate * Perform I/O to a given process. This will return EIO if we dectect 181*7c478bd9Sstevel@tonic-gate * corrupt memory and ENXIO if there is no such mapped address in the 182*7c478bd9Sstevel@tonic-gate * user process's address space. 183*7c478bd9Sstevel@tonic-gate */ 184*7c478bd9Sstevel@tonic-gate static int 185*7c478bd9Sstevel@tonic-gate urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a) 186*7c478bd9Sstevel@tonic-gate { 187*7c478bd9Sstevel@tonic-gate caddr_t addr = (caddr_t)a; 188*7c478bd9Sstevel@tonic-gate caddr_t page; 189*7c478bd9Sstevel@tonic-gate caddr_t vaddr; 190*7c478bd9Sstevel@tonic-gate struct seg *seg; 191*7c478bd9Sstevel@tonic-gate int error = 0; 192*7c478bd9Sstevel@tonic-gate int err = 0; 193*7c478bd9Sstevel@tonic-gate uint_t prot; 194*7c478bd9Sstevel@tonic-gate uint_t prot_rw = writing ? PROT_WRITE : PROT_READ; 195*7c478bd9Sstevel@tonic-gate int protchanged; 196*7c478bd9Sstevel@tonic-gate on_trap_data_t otd; 197*7c478bd9Sstevel@tonic-gate int retrycnt; 198*7c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 199*7c478bd9Sstevel@tonic-gate enum seg_rw rw; 200*7c478bd9Sstevel@tonic-gate 201*7c478bd9Sstevel@tonic-gate /* 202*7c478bd9Sstevel@tonic-gate * Locate segment containing address of interest. 203*7c478bd9Sstevel@tonic-gate */ 204*7c478bd9Sstevel@tonic-gate page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); 205*7c478bd9Sstevel@tonic-gate retrycnt = 0; 206*7c478bd9Sstevel@tonic-gate AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 207*7c478bd9Sstevel@tonic-gate retry: 208*7c478bd9Sstevel@tonic-gate if ((seg = as_segat(as, page)) == NULL || 209*7c478bd9Sstevel@tonic-gate !page_valid(seg, page)) { 210*7c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 211*7c478bd9Sstevel@tonic-gate return (ENXIO); 212*7c478bd9Sstevel@tonic-gate } 213*7c478bd9Sstevel@tonic-gate SEGOP_GETPROT(seg, page, 0, &prot); 214*7c478bd9Sstevel@tonic-gate 215*7c478bd9Sstevel@tonic-gate protchanged = 0; 216*7c478bd9Sstevel@tonic-gate if ((prot & prot_rw) == 0) { 217*7c478bd9Sstevel@tonic-gate protchanged = 1; 218*7c478bd9Sstevel@tonic-gate err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw); 219*7c478bd9Sstevel@tonic-gate 220*7c478bd9Sstevel@tonic-gate if (err == IE_RETRY) { 221*7c478bd9Sstevel@tonic-gate protchanged = 0; 222*7c478bd9Sstevel@tonic-gate ASSERT(retrycnt == 0); 223*7c478bd9Sstevel@tonic-gate retrycnt++; 224*7c478bd9Sstevel@tonic-gate goto retry; 225*7c478bd9Sstevel@tonic-gate } 226*7c478bd9Sstevel@tonic-gate 227*7c478bd9Sstevel@tonic-gate if (err != 0) { 228*7c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 229*7c478bd9Sstevel@tonic-gate return (ENXIO); 230*7c478bd9Sstevel@tonic-gate } 231*7c478bd9Sstevel@tonic-gate } 232*7c478bd9Sstevel@tonic-gate 233*7c478bd9Sstevel@tonic-gate /* 234*7c478bd9Sstevel@tonic-gate * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break 235*7c478bd9Sstevel@tonic-gate * sharing to avoid a copy on write of a softlocked page by another 236*7c478bd9Sstevel@tonic-gate * thread. But since we locked the address space as a writer no other 237*7c478bd9Sstevel@tonic-gate * thread can cause a copy on write. S_READ_NOCOW is passed as the 238*7c478bd9Sstevel@tonic-gate * access type to tell segvn that it's ok not to do a copy-on-write 239*7c478bd9Sstevel@tonic-gate * for this SOFTLOCK fault. 240*7c478bd9Sstevel@tonic-gate */ 241*7c478bd9Sstevel@tonic-gate if (writing) 242*7c478bd9Sstevel@tonic-gate rw = S_WRITE; 243*7c478bd9Sstevel@tonic-gate else if (seg->s_ops == &segvn_ops) 244*7c478bd9Sstevel@tonic-gate rw = S_READ_NOCOW; 245*7c478bd9Sstevel@tonic-gate else 246*7c478bd9Sstevel@tonic-gate rw = S_READ; 247*7c478bd9Sstevel@tonic-gate 248*7c478bd9Sstevel@tonic-gate if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) { 249*7c478bd9Sstevel@tonic-gate if (protchanged) 250*7c478bd9Sstevel@tonic-gate (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); 251*7c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 252*7c478bd9Sstevel@tonic-gate return (ENXIO); 253*7c478bd9Sstevel@tonic-gate } 254*7c478bd9Sstevel@tonic-gate CPU_STATS_ADD_K(vm, softlock, 1); 255*7c478bd9Sstevel@tonic-gate 256*7c478bd9Sstevel@tonic-gate /* 257*7c478bd9Sstevel@tonic-gate * Make sure we're not trying to read or write off the end of the page. 258*7c478bd9Sstevel@tonic-gate */ 259*7c478bd9Sstevel@tonic-gate ASSERT(len <= page + PAGESIZE - addr); 260*7c478bd9Sstevel@tonic-gate 261*7c478bd9Sstevel@tonic-gate /* 262*7c478bd9Sstevel@tonic-gate * Map in the locked page, copy to our local buffer, 263*7c478bd9Sstevel@tonic-gate * then map the page out and unlock it. 264*7c478bd9Sstevel@tonic-gate */ 265*7c478bd9Sstevel@tonic-gate vaddr = mapin(as, addr, writing); 266*7c478bd9Sstevel@tonic-gate 267*7c478bd9Sstevel@tonic-gate /* 268*7c478bd9Sstevel@tonic-gate * Since we are copying memory on behalf of the user process, 269*7c478bd9Sstevel@tonic-gate * protect against memory error correction faults. 270*7c478bd9Sstevel@tonic-gate */ 271*7c478bd9Sstevel@tonic-gate if (!on_trap(&otd, OT_DATA_EC)) { 272*7c478bd9Sstevel@tonic-gate if (seg->s_ops == &segdev_ops) { 273*7c478bd9Sstevel@tonic-gate /* 274*7c478bd9Sstevel@tonic-gate * Device memory can behave strangely; invoke 275*7c478bd9Sstevel@tonic-gate * a segdev-specific copy operation instead. 276*7c478bd9Sstevel@tonic-gate */ 277*7c478bd9Sstevel@tonic-gate if (writing) { 278*7c478bd9Sstevel@tonic-gate if (segdev_copyto(seg, addr, buf, vaddr, len)) 279*7c478bd9Sstevel@tonic-gate error = ENXIO; 280*7c478bd9Sstevel@tonic-gate } else { 281*7c478bd9Sstevel@tonic-gate if (segdev_copyfrom(seg, addr, vaddr, buf, len)) 282*7c478bd9Sstevel@tonic-gate error = ENXIO; 283*7c478bd9Sstevel@tonic-gate } 284*7c478bd9Sstevel@tonic-gate } else { 285*7c478bd9Sstevel@tonic-gate if (writing) 286*7c478bd9Sstevel@tonic-gate bcopy(buf, vaddr, len); 287*7c478bd9Sstevel@tonic-gate else 288*7c478bd9Sstevel@tonic-gate bcopy(vaddr, buf, len); 289*7c478bd9Sstevel@tonic-gate } 290*7c478bd9Sstevel@tonic-gate } else { 291*7c478bd9Sstevel@tonic-gate error = EIO; 292*7c478bd9Sstevel@tonic-gate } 293*7c478bd9Sstevel@tonic-gate no_trap(); 294*7c478bd9Sstevel@tonic-gate 295*7c478bd9Sstevel@tonic-gate /* 296*7c478bd9Sstevel@tonic-gate * If we're writing to an executable page, we may need to sychronize 297*7c478bd9Sstevel@tonic-gate * the I$ with the modifications we made through the D$. 298*7c478bd9Sstevel@tonic-gate */ 299*7c478bd9Sstevel@tonic-gate if (writing && (prot & PROT_EXEC)) 300*7c478bd9Sstevel@tonic-gate sync_icache(vaddr, (uint_t)len); 301*7c478bd9Sstevel@tonic-gate 302*7c478bd9Sstevel@tonic-gate mapout(as, addr, vaddr, writing); 303*7c478bd9Sstevel@tonic-gate 304*7c478bd9Sstevel@tonic-gate if (rw == S_READ_NOCOW) 305*7c478bd9Sstevel@tonic-gate rw = S_READ; 306*7c478bd9Sstevel@tonic-gate 307*7c478bd9Sstevel@tonic-gate (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw); 308*7c478bd9Sstevel@tonic-gate 309*7c478bd9Sstevel@tonic-gate if (protchanged) 310*7c478bd9Sstevel@tonic-gate (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); 311*7c478bd9Sstevel@tonic-gate 312*7c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 313*7c478bd9Sstevel@tonic-gate 314*7c478bd9Sstevel@tonic-gate return (error); 315*7c478bd9Sstevel@tonic-gate } 316*7c478bd9Sstevel@tonic-gate 317*7c478bd9Sstevel@tonic-gate int 318*7c478bd9Sstevel@tonic-gate uread(proc_t *p, void *buf, size_t len, uintptr_t a) 319*7c478bd9Sstevel@tonic-gate { 320*7c478bd9Sstevel@tonic-gate return (urw(p, 0, buf, len, a)); 321*7c478bd9Sstevel@tonic-gate } 322*7c478bd9Sstevel@tonic-gate 323*7c478bd9Sstevel@tonic-gate int 324*7c478bd9Sstevel@tonic-gate uwrite(proc_t *p, void *buf, size_t len, uintptr_t a) 325*7c478bd9Sstevel@tonic-gate { 326*7c478bd9Sstevel@tonic-gate return (urw(p, 1, buf, len, a)); 327*7c478bd9Sstevel@tonic-gate } 328