1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*7c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 29*7c478bd9Sstevel@tonic-gate 30*7c478bd9Sstevel@tonic-gate 31*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 32*7c478bd9Sstevel@tonic-gate 33*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/inttypes.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/param.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/signal.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/user.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/errno.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/var.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/proc.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/cred.h> 47*7c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 48*7c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/vm.h> 50*7c478bd9Sstevel@tonic-gate #include <sys/file.h> 51*7c478bd9Sstevel@tonic-gate #include <sys/mman.h> 52*7c478bd9Sstevel@tonic-gate #include <sys/vmparam.h> 53*7c478bd9Sstevel@tonic-gate #include <sys/fcntl.h> 54*7c478bd9Sstevel@tonic-gate #include <sys/lwpchan_impl.h> 55*7c478bd9Sstevel@tonic-gate 56*7c478bd9Sstevel@tonic-gate #include <vm/hat.h> 57*7c478bd9Sstevel@tonic-gate #include <vm/as.h> 58*7c478bd9Sstevel@tonic-gate #include <vm/seg.h> 59*7c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h> 60*7c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 61*7c478bd9Sstevel@tonic-gate 62*7c478bd9Sstevel@tonic-gate int use_brk_lpg = 1; 63*7c478bd9Sstevel@tonic-gate int use_stk_lpg = 1; 64*7c478bd9Sstevel@tonic-gate int use_zmap_lpg = 1; 65*7c478bd9Sstevel@tonic-gate 66*7c478bd9Sstevel@tonic-gate static int brk_lpg(caddr_t nva); 67*7c478bd9Sstevel@tonic-gate static int grow_lpg(caddr_t sp); 68*7c478bd9Sstevel@tonic-gate 69*7c478bd9Sstevel@tonic-gate int 70*7c478bd9Sstevel@tonic-gate brk(caddr_t nva) 71*7c478bd9Sstevel@tonic-gate { 72*7c478bd9Sstevel@tonic-gate int error; 73*7c478bd9Sstevel@tonic-gate proc_t *p = curproc; 74*7c478bd9Sstevel@tonic-gate 75*7c478bd9Sstevel@tonic-gate /* 76*7c478bd9Sstevel@tonic-gate * Serialize brk operations on an address space. 77*7c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_brksize 78*7c478bd9Sstevel@tonic-gate * and p_brkpageszc. 79*7c478bd9Sstevel@tonic-gate */ 80*7c478bd9Sstevel@tonic-gate as_rangelock(p->p_as); 81*7c478bd9Sstevel@tonic-gate if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) { 82*7c478bd9Sstevel@tonic-gate error = brk_lpg(nva); 83*7c478bd9Sstevel@tonic-gate } else { 84*7c478bd9Sstevel@tonic-gate error = brk_internal(nva, p->p_brkpageszc); 85*7c478bd9Sstevel@tonic-gate } 86*7c478bd9Sstevel@tonic-gate as_rangeunlock(p->p_as); 87*7c478bd9Sstevel@tonic-gate return ((error != 0 ? set_errno(error) : 0)); 88*7c478bd9Sstevel@tonic-gate } 89*7c478bd9Sstevel@tonic-gate 90*7c478bd9Sstevel@tonic-gate /* 91*7c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use, 92*7c478bd9Sstevel@tonic-gate * then call brk_internal(). 93*7c478bd9Sstevel@tonic-gate * Returns 0 on success. 94*7c478bd9Sstevel@tonic-gate */ 95*7c478bd9Sstevel@tonic-gate static int 96*7c478bd9Sstevel@tonic-gate brk_lpg(caddr_t nva) 97*7c478bd9Sstevel@tonic-gate { 98*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 99*7c478bd9Sstevel@tonic-gate size_t pgsz, len; 100*7c478bd9Sstevel@tonic-gate caddr_t addr; 101*7c478bd9Sstevel@tonic-gate caddr_t bssbase = p->p_bssbase; 102*7c478bd9Sstevel@tonic-gate caddr_t brkbase = p->p_brkbase; 103*7c478bd9Sstevel@tonic-gate int oszc, szc; 104*7c478bd9Sstevel@tonic-gate int err; 105*7c478bd9Sstevel@tonic-gate int remap = 0; 106*7c478bd9Sstevel@tonic-gate 107*7c478bd9Sstevel@tonic-gate oszc = p->p_brkpageszc; 108*7c478bd9Sstevel@tonic-gate 109*7c478bd9Sstevel@tonic-gate /* 110*7c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call 111*7c478bd9Sstevel@tonic-gate * to brk_internal() will initialize it. 112*7c478bd9Sstevel@tonic-gate */ 113*7c478bd9Sstevel@tonic-gate if (brkbase == 0) { 114*7c478bd9Sstevel@tonic-gate return (brk_internal(nva, oszc)); 115*7c478bd9Sstevel@tonic-gate } 116*7c478bd9Sstevel@tonic-gate 117*7c478bd9Sstevel@tonic-gate len = nva - bssbase; 118*7c478bd9Sstevel@tonic-gate 119*7c478bd9Sstevel@tonic-gate pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, &remap); 120*7c478bd9Sstevel@tonic-gate szc = page_szc(pgsz); 121*7c478bd9Sstevel@tonic-gate 122*7c478bd9Sstevel@tonic-gate /* 123*7c478bd9Sstevel@tonic-gate * Covers two cases: 124*7c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to 125*7c478bd9Sstevel@tonic-gate * ignore it in that case. 126*7c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable. 127*7c478bd9Sstevel@tonic-gate */ 128*7c478bd9Sstevel@tonic-gate if (szc <= oszc) { 129*7c478bd9Sstevel@tonic-gate err = brk_internal(nva, oszc); 130*7c478bd9Sstevel@tonic-gate /* If failed, back off to base page size. */ 131*7c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) { 132*7c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0); 133*7c478bd9Sstevel@tonic-gate } 134*7c478bd9Sstevel@tonic-gate return (err); 135*7c478bd9Sstevel@tonic-gate } 136*7c478bd9Sstevel@tonic-gate 137*7c478bd9Sstevel@tonic-gate if (remap == 0) { 138*7c478bd9Sstevel@tonic-gate /* 139*7c478bd9Sstevel@tonic-gate * Map from the current brk end up to the new page size 140*7c478bd9Sstevel@tonic-gate * alignment using the current page size. 141*7c478bd9Sstevel@tonic-gate */ 142*7c478bd9Sstevel@tonic-gate addr = brkbase + p->p_brksize; 143*7c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz); 144*7c478bd9Sstevel@tonic-gate if (addr < nva) { 145*7c478bd9Sstevel@tonic-gate err = brk_internal(addr, oszc); 146*7c478bd9Sstevel@tonic-gate /* 147*7c478bd9Sstevel@tonic-gate * In failure case, try again if oszc is not base page 148*7c478bd9Sstevel@tonic-gate * size, then return err. 149*7c478bd9Sstevel@tonic-gate */ 150*7c478bd9Sstevel@tonic-gate if (err != 0) { 151*7c478bd9Sstevel@tonic-gate if (oszc != 0) { 152*7c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0); 153*7c478bd9Sstevel@tonic-gate } 154*7c478bd9Sstevel@tonic-gate return (err); 155*7c478bd9Sstevel@tonic-gate } 156*7c478bd9Sstevel@tonic-gate } 157*7c478bd9Sstevel@tonic-gate } 158*7c478bd9Sstevel@tonic-gate 159*7c478bd9Sstevel@tonic-gate err = brk_internal(nva, szc); 160*7c478bd9Sstevel@tonic-gate /* If using szc failed, map with base page size and return. */ 161*7c478bd9Sstevel@tonic-gate if (err != 0) { 162*7c478bd9Sstevel@tonic-gate if (szc != 0) { 163*7c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0); 164*7c478bd9Sstevel@tonic-gate } 165*7c478bd9Sstevel@tonic-gate return (err); 166*7c478bd9Sstevel@tonic-gate } 167*7c478bd9Sstevel@tonic-gate 168*7c478bd9Sstevel@tonic-gate if (remap != 0) { 169*7c478bd9Sstevel@tonic-gate /* 170*7c478bd9Sstevel@tonic-gate * Round up brk base to a large page boundary and remap 171*7c478bd9Sstevel@tonic-gate * anything in the segment already faulted in beyond that 172*7c478bd9Sstevel@tonic-gate * point. 173*7c478bd9Sstevel@tonic-gate */ 174*7c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz); 175*7c478bd9Sstevel@tonic-gate len = (brkbase + p->p_brksize) - addr; 176*7c478bd9Sstevel@tonic-gate /* advisory, so ignore errors */ 177*7c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE); 178*7c478bd9Sstevel@tonic-gate } 179*7c478bd9Sstevel@tonic-gate 180*7c478bd9Sstevel@tonic-gate ASSERT(err == 0); 181*7c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */ 182*7c478bd9Sstevel@tonic-gate } 183*7c478bd9Sstevel@tonic-gate 184*7c478bd9Sstevel@tonic-gate /* 185*7c478bd9Sstevel@tonic-gate * Returns 0 on success. 186*7c478bd9Sstevel@tonic-gate */ 187*7c478bd9Sstevel@tonic-gate int 188*7c478bd9Sstevel@tonic-gate brk_internal(caddr_t nva, uint_t brkszc) 189*7c478bd9Sstevel@tonic-gate { 190*7c478bd9Sstevel@tonic-gate caddr_t ova; /* current break address */ 191*7c478bd9Sstevel@tonic-gate size_t size; 192*7c478bd9Sstevel@tonic-gate int error; 193*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 194*7c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 195*7c478bd9Sstevel@tonic-gate size_t pgsz; 196*7c478bd9Sstevel@tonic-gate uint_t szc; 197*7c478bd9Sstevel@tonic-gate rctl_qty_t as_rctl; 198*7c478bd9Sstevel@tonic-gate 199*7c478bd9Sstevel@tonic-gate /* 200*7c478bd9Sstevel@tonic-gate * extend heap to brkszc alignment but use current p->p_brkpageszc 201*7c478bd9Sstevel@tonic-gate * for the newly created segment. This allows the new extension 202*7c478bd9Sstevel@tonic-gate * segment to be concatenated successfully with the existing brk 203*7c478bd9Sstevel@tonic-gate * segment. 204*7c478bd9Sstevel@tonic-gate */ 205*7c478bd9Sstevel@tonic-gate if ((szc = brkszc) != 0) { 206*7c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 207*7c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE); 208*7c478bd9Sstevel@tonic-gate } else { 209*7c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 210*7c478bd9Sstevel@tonic-gate } 211*7c478bd9Sstevel@tonic-gate 212*7c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 213*7c478bd9Sstevel@tonic-gate as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA], 214*7c478bd9Sstevel@tonic-gate p->p_rctls, p); 215*7c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 216*7c478bd9Sstevel@tonic-gate 217*7c478bd9Sstevel@tonic-gate /* 218*7c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call 219*7c478bd9Sstevel@tonic-gate * to brk() will initialize it. 220*7c478bd9Sstevel@tonic-gate */ 221*7c478bd9Sstevel@tonic-gate if (p->p_brkbase == 0) 222*7c478bd9Sstevel@tonic-gate p->p_brkbase = nva; 223*7c478bd9Sstevel@tonic-gate 224*7c478bd9Sstevel@tonic-gate /* 225*7c478bd9Sstevel@tonic-gate * Before multiple page size support existed p_brksize was the value 226*7c478bd9Sstevel@tonic-gate * not rounded to the pagesize (i.e. it stored the exact user request 227*7c478bd9Sstevel@tonic-gate * for heap size). If pgsz is greater than PAGESIZE calculate the 228*7c478bd9Sstevel@tonic-gate * heap size as the real new heap size by rounding it up to pgsz. 229*7c478bd9Sstevel@tonic-gate * This is useful since we may want to know where the heap ends 230*7c478bd9Sstevel@tonic-gate * without knowing heap pagesize (e.g. some old code) and also if 231*7c478bd9Sstevel@tonic-gate * heap pagesize changes we can update p_brkpageszc but delay adding 232*7c478bd9Sstevel@tonic-gate * new mapping yet still know from p_brksize where the heap really 233*7c478bd9Sstevel@tonic-gate * ends. The user requested heap end is stored in libc variable. 234*7c478bd9Sstevel@tonic-gate */ 235*7c478bd9Sstevel@tonic-gate if (pgsz > PAGESIZE) { 236*7c478bd9Sstevel@tonic-gate caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz); 237*7c478bd9Sstevel@tonic-gate size = tnva - p->p_brkbase; 238*7c478bd9Sstevel@tonic-gate if (tnva < p->p_brkbase || (size > p->p_brksize && 239*7c478bd9Sstevel@tonic-gate size > (size_t)as_rctl)) { 240*7c478bd9Sstevel@tonic-gate szc = 0; 241*7c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 242*7c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase; 243*7c478bd9Sstevel@tonic-gate } 244*7c478bd9Sstevel@tonic-gate } else { 245*7c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase; 246*7c478bd9Sstevel@tonic-gate } 247*7c478bd9Sstevel@tonic-gate 248*7c478bd9Sstevel@tonic-gate /* 249*7c478bd9Sstevel@tonic-gate * use PAGESIZE to roundup ova because we want to know the real value 250*7c478bd9Sstevel@tonic-gate * of the current heap end in case p_brkpageszc changes since the last 251*7c478bd9Sstevel@tonic-gate * p_brksize was computed. 252*7c478bd9Sstevel@tonic-gate */ 253*7c478bd9Sstevel@tonic-gate nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz); 254*7c478bd9Sstevel@tonic-gate ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize), 255*7c478bd9Sstevel@tonic-gate PAGESIZE); 256*7c478bd9Sstevel@tonic-gate 257*7c478bd9Sstevel@tonic-gate if ((nva < p->p_brkbase) || (size > p->p_brksize && 258*7c478bd9Sstevel@tonic-gate size > as_rctl)) { 259*7c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 260*7c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p, 261*7c478bd9Sstevel@tonic-gate RCA_SAFE); 262*7c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 263*7c478bd9Sstevel@tonic-gate return (ENOMEM); 264*7c478bd9Sstevel@tonic-gate } 265*7c478bd9Sstevel@tonic-gate 266*7c478bd9Sstevel@tonic-gate if (nva > ova) { 267*7c478bd9Sstevel@tonic-gate struct segvn_crargs crargs = 268*7c478bd9Sstevel@tonic-gate SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 269*7c478bd9Sstevel@tonic-gate 270*7c478bd9Sstevel@tonic-gate if (!(p->p_datprot & PROT_EXEC)) { 271*7c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC; 272*7c478bd9Sstevel@tonic-gate } 273*7c478bd9Sstevel@tonic-gate 274*7c478bd9Sstevel@tonic-gate /* 275*7c478bd9Sstevel@tonic-gate * Add new zfod mapping to extend UNIX data segment 276*7c478bd9Sstevel@tonic-gate */ 277*7c478bd9Sstevel@tonic-gate crargs.szc = szc; 278*7c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP; 279*7c478bd9Sstevel@tonic-gate error = as_map(as, ova, (size_t)(nva - ova), segvn_create, 280*7c478bd9Sstevel@tonic-gate &crargs); 281*7c478bd9Sstevel@tonic-gate if (error) { 282*7c478bd9Sstevel@tonic-gate return (error); 283*7c478bd9Sstevel@tonic-gate } 284*7c478bd9Sstevel@tonic-gate 285*7c478bd9Sstevel@tonic-gate } else if (nva < ova) { 286*7c478bd9Sstevel@tonic-gate /* 287*7c478bd9Sstevel@tonic-gate * Release mapping to shrink UNIX data segment. 288*7c478bd9Sstevel@tonic-gate */ 289*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, nva, (size_t)(ova - nva)); 290*7c478bd9Sstevel@tonic-gate } 291*7c478bd9Sstevel@tonic-gate p->p_brksize = size; 292*7c478bd9Sstevel@tonic-gate p->p_brkpageszc = szc; 293*7c478bd9Sstevel@tonic-gate return (0); 294*7c478bd9Sstevel@tonic-gate } 295*7c478bd9Sstevel@tonic-gate 296*7c478bd9Sstevel@tonic-gate /* 297*7c478bd9Sstevel@tonic-gate * Grow the stack to include sp. Return 1 if successful, 0 otherwise. 298*7c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward. 299*7c478bd9Sstevel@tonic-gate */ 300*7c478bd9Sstevel@tonic-gate int 301*7c478bd9Sstevel@tonic-gate grow(caddr_t sp) 302*7c478bd9Sstevel@tonic-gate { 303*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 304*7c478bd9Sstevel@tonic-gate int err; 305*7c478bd9Sstevel@tonic-gate 306*7c478bd9Sstevel@tonic-gate /* 307*7c478bd9Sstevel@tonic-gate * Serialize grow operations on an address space. 308*7c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_stksize 309*7c478bd9Sstevel@tonic-gate * and p_stkpageszc. 310*7c478bd9Sstevel@tonic-gate */ 311*7c478bd9Sstevel@tonic-gate as_rangelock(p->p_as); 312*7c478bd9Sstevel@tonic-gate if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) { 313*7c478bd9Sstevel@tonic-gate err = grow_lpg(sp); 314*7c478bd9Sstevel@tonic-gate } else { 315*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, p->p_stkpageszc); 316*7c478bd9Sstevel@tonic-gate } 317*7c478bd9Sstevel@tonic-gate as_rangeunlock(p->p_as); 318*7c478bd9Sstevel@tonic-gate return ((err == 0 ? 1 : 0)); 319*7c478bd9Sstevel@tonic-gate } 320*7c478bd9Sstevel@tonic-gate 321*7c478bd9Sstevel@tonic-gate /* 322*7c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use, 323*7c478bd9Sstevel@tonic-gate * then call grow_internal(). 324*7c478bd9Sstevel@tonic-gate * Returns 0 on success. 325*7c478bd9Sstevel@tonic-gate */ 326*7c478bd9Sstevel@tonic-gate static int 327*7c478bd9Sstevel@tonic-gate grow_lpg(caddr_t sp) 328*7c478bd9Sstevel@tonic-gate { 329*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 330*7c478bd9Sstevel@tonic-gate size_t pgsz; 331*7c478bd9Sstevel@tonic-gate size_t len, newsize; 332*7c478bd9Sstevel@tonic-gate caddr_t addr, oldsp; 333*7c478bd9Sstevel@tonic-gate int oszc, szc; 334*7c478bd9Sstevel@tonic-gate int err; 335*7c478bd9Sstevel@tonic-gate int remap = 0; 336*7c478bd9Sstevel@tonic-gate 337*7c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp; 338*7c478bd9Sstevel@tonic-gate 339*7c478bd9Sstevel@tonic-gate oszc = p->p_stkpageszc; 340*7c478bd9Sstevel@tonic-gate pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, &remap); 341*7c478bd9Sstevel@tonic-gate szc = page_szc(pgsz); 342*7c478bd9Sstevel@tonic-gate 343*7c478bd9Sstevel@tonic-gate /* 344*7c478bd9Sstevel@tonic-gate * Covers two cases: 345*7c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to 346*7c478bd9Sstevel@tonic-gate * ignore it in that case. 347*7c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable. 348*7c478bd9Sstevel@tonic-gate * This shouldn't happen as the stack never shrinks. 349*7c478bd9Sstevel@tonic-gate */ 350*7c478bd9Sstevel@tonic-gate if (szc <= oszc) { 351*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, oszc); 352*7c478bd9Sstevel@tonic-gate /* failed, fall back to base page size */ 353*7c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) { 354*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0); 355*7c478bd9Sstevel@tonic-gate } 356*7c478bd9Sstevel@tonic-gate return (err); 357*7c478bd9Sstevel@tonic-gate } 358*7c478bd9Sstevel@tonic-gate 359*7c478bd9Sstevel@tonic-gate /* 360*7c478bd9Sstevel@tonic-gate * We've grown sufficiently to switch to a new page size. 361*7c478bd9Sstevel@tonic-gate * If we're not going to remap the whole segment with the new 362*7c478bd9Sstevel@tonic-gate * page size, split the grow into two operations: map to the new 363*7c478bd9Sstevel@tonic-gate * page size alignment boundary with the existing page size, then 364*7c478bd9Sstevel@tonic-gate * map the rest with the new page size. 365*7c478bd9Sstevel@tonic-gate */ 366*7c478bd9Sstevel@tonic-gate err = 0; 367*7c478bd9Sstevel@tonic-gate if (remap == 0) { 368*7c478bd9Sstevel@tonic-gate oldsp = p->p_usrstack - p->p_stksize; 369*7c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ALIGN((uintptr_t)oldsp, pgsz); 370*7c478bd9Sstevel@tonic-gate if (addr > sp) { 371*7c478bd9Sstevel@tonic-gate err = grow_internal(addr, oszc); 372*7c478bd9Sstevel@tonic-gate /* 373*7c478bd9Sstevel@tonic-gate * In this case, grow with oszc failed, so grow all the 374*7c478bd9Sstevel@tonic-gate * way to sp with base page size. 375*7c478bd9Sstevel@tonic-gate */ 376*7c478bd9Sstevel@tonic-gate if (err != 0) { 377*7c478bd9Sstevel@tonic-gate if (oszc != 0) { 378*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0); 379*7c478bd9Sstevel@tonic-gate } 380*7c478bd9Sstevel@tonic-gate return (err); 381*7c478bd9Sstevel@tonic-gate } 382*7c478bd9Sstevel@tonic-gate } 383*7c478bd9Sstevel@tonic-gate } 384*7c478bd9Sstevel@tonic-gate 385*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, szc); 386*7c478bd9Sstevel@tonic-gate /* The grow with szc failed, so fall back to base page size. */ 387*7c478bd9Sstevel@tonic-gate if (err != 0) { 388*7c478bd9Sstevel@tonic-gate if (szc != 0) { 389*7c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0); 390*7c478bd9Sstevel@tonic-gate } 391*7c478bd9Sstevel@tonic-gate return (err); 392*7c478bd9Sstevel@tonic-gate } 393*7c478bd9Sstevel@tonic-gate 394*7c478bd9Sstevel@tonic-gate if (remap) { 395*7c478bd9Sstevel@tonic-gate /* 396*7c478bd9Sstevel@tonic-gate * Round up stack pointer to a large page boundary and remap 397*7c478bd9Sstevel@tonic-gate * any pgsz pages in the segment already faulted in beyond that 398*7c478bd9Sstevel@tonic-gate * point. 399*7c478bd9Sstevel@tonic-gate */ 400*7c478bd9Sstevel@tonic-gate addr = p->p_usrstack - p->p_stksize; 401*7c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz); 402*7c478bd9Sstevel@tonic-gate len = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz) - addr; 403*7c478bd9Sstevel@tonic-gate /* advisory, so ignore errors */ 404*7c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE); 405*7c478bd9Sstevel@tonic-gate } 406*7c478bd9Sstevel@tonic-gate 407*7c478bd9Sstevel@tonic-gate /* Update page size code for stack. */ 408*7c478bd9Sstevel@tonic-gate p->p_stkpageszc = szc; 409*7c478bd9Sstevel@tonic-gate 410*7c478bd9Sstevel@tonic-gate ASSERT(err == 0); 411*7c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */ 412*7c478bd9Sstevel@tonic-gate } 413*7c478bd9Sstevel@tonic-gate 414*7c478bd9Sstevel@tonic-gate /* 415*7c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward. 416*7c478bd9Sstevel@tonic-gate * Returns 0 on success, errno on failure. 417*7c478bd9Sstevel@tonic-gate */ 418*7c478bd9Sstevel@tonic-gate int 419*7c478bd9Sstevel@tonic-gate grow_internal(caddr_t sp, uint_t growszc) 420*7c478bd9Sstevel@tonic-gate { 421*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 422*7c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 423*7c478bd9Sstevel@tonic-gate size_t newsize = p->p_usrstack - sp; 424*7c478bd9Sstevel@tonic-gate size_t oldsize; 425*7c478bd9Sstevel@tonic-gate int error; 426*7c478bd9Sstevel@tonic-gate size_t pgsz; 427*7c478bd9Sstevel@tonic-gate uint_t szc; 428*7c478bd9Sstevel@tonic-gate struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 429*7c478bd9Sstevel@tonic-gate 430*7c478bd9Sstevel@tonic-gate ASSERT(sp < p->p_usrstack); 431*7c478bd9Sstevel@tonic-gate 432*7c478bd9Sstevel@tonic-gate /* 433*7c478bd9Sstevel@tonic-gate * grow to growszc alignment but use current p->p_stkpageszc for 434*7c478bd9Sstevel@tonic-gate * the segvn_crargs szc passed to segvn_create. For memcntl to 435*7c478bd9Sstevel@tonic-gate * increase the szc, this allows the new extension segment to be 436*7c478bd9Sstevel@tonic-gate * concatenated successfully with the existing stack segment. 437*7c478bd9Sstevel@tonic-gate */ 438*7c478bd9Sstevel@tonic-gate if ((szc = growszc) != 0) { 439*7c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 440*7c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE); 441*7c478bd9Sstevel@tonic-gate newsize = P2ROUNDUP(newsize, pgsz); 442*7c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) { 443*7c478bd9Sstevel@tonic-gate szc = 0; 444*7c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 445*7c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp; 446*7c478bd9Sstevel@tonic-gate } 447*7c478bd9Sstevel@tonic-gate } else { 448*7c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 449*7c478bd9Sstevel@tonic-gate } 450*7c478bd9Sstevel@tonic-gate 451*7c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) { 452*7c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p, 453*7c478bd9Sstevel@tonic-gate RCA_UNSAFE_ALL); 454*7c478bd9Sstevel@tonic-gate 455*7c478bd9Sstevel@tonic-gate return (ENOMEM); 456*7c478bd9Sstevel@tonic-gate } 457*7c478bd9Sstevel@tonic-gate 458*7c478bd9Sstevel@tonic-gate oldsize = p->p_stksize; 459*7c478bd9Sstevel@tonic-gate newsize = P2ROUNDUP(newsize, pgsz); 460*7c478bd9Sstevel@tonic-gate ASSERT(P2PHASE(oldsize, PAGESIZE) == 0); 461*7c478bd9Sstevel@tonic-gate 462*7c478bd9Sstevel@tonic-gate if (newsize <= oldsize) { /* prevent the stack from shrinking */ 463*7c478bd9Sstevel@tonic-gate return (0); 464*7c478bd9Sstevel@tonic-gate } 465*7c478bd9Sstevel@tonic-gate 466*7c478bd9Sstevel@tonic-gate if (!(p->p_stkprot & PROT_EXEC)) { 467*7c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC; 468*7c478bd9Sstevel@tonic-gate } 469*7c478bd9Sstevel@tonic-gate /* 470*7c478bd9Sstevel@tonic-gate * extend stack with the p_stkpageszc. growszc is different than 471*7c478bd9Sstevel@tonic-gate * p_stkpageszc only on a memcntl to increase the stack pagesize. 472*7c478bd9Sstevel@tonic-gate */ 473*7c478bd9Sstevel@tonic-gate crargs.szc = p->p_stkpageszc; 474*7c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN; 475*7c478bd9Sstevel@tonic-gate 476*7c478bd9Sstevel@tonic-gate if ((error = as_map(as, p->p_usrstack - newsize, newsize - oldsize, 477*7c478bd9Sstevel@tonic-gate segvn_create, &crargs)) != 0) { 478*7c478bd9Sstevel@tonic-gate if (error == EAGAIN) { 479*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Sorry, no swap space to grow stack " 480*7c478bd9Sstevel@tonic-gate "for pid %d (%s)", p->p_pid, u.u_comm); 481*7c478bd9Sstevel@tonic-gate } 482*7c478bd9Sstevel@tonic-gate return (error); 483*7c478bd9Sstevel@tonic-gate } 484*7c478bd9Sstevel@tonic-gate p->p_stksize = newsize; 485*7c478bd9Sstevel@tonic-gate 486*7c478bd9Sstevel@tonic-gate 487*7c478bd9Sstevel@tonic-gate /* 488*7c478bd9Sstevel@tonic-gate * Set up translations so the process doesn't have to fault in 489*7c478bd9Sstevel@tonic-gate * the stack pages we just gave it. 490*7c478bd9Sstevel@tonic-gate */ 491*7c478bd9Sstevel@tonic-gate (void) as_fault(as->a_hat, as, 492*7c478bd9Sstevel@tonic-gate p->p_usrstack - newsize, newsize - oldsize, F_INVAL, S_WRITE); 493*7c478bd9Sstevel@tonic-gate 494*7c478bd9Sstevel@tonic-gate return (0); 495*7c478bd9Sstevel@tonic-gate } 496*7c478bd9Sstevel@tonic-gate 497*7c478bd9Sstevel@tonic-gate /* 498*7c478bd9Sstevel@tonic-gate * Used for MAP_ANON - fast way to get anonymous pages 499*7c478bd9Sstevel@tonic-gate */ 500*7c478bd9Sstevel@tonic-gate static int 501*7c478bd9Sstevel@tonic-gate zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags, 502*7c478bd9Sstevel@tonic-gate offset_t pos) 503*7c478bd9Sstevel@tonic-gate { 504*7c478bd9Sstevel@tonic-gate struct segvn_crargs a, b; 505*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 506*7c478bd9Sstevel@tonic-gate int err; 507*7c478bd9Sstevel@tonic-gate size_t pgsz; 508*7c478bd9Sstevel@tonic-gate size_t l0, l1, l2, l3, l4; /* 0th through 5th chunks */ 509*7c478bd9Sstevel@tonic-gate caddr_t ruaddr, ruaddr0; /* rounded up addresses */ 510*7c478bd9Sstevel@tonic-gate extern size_t auto_lpg_va_default; 511*7c478bd9Sstevel@tonic-gate 512*7c478bd9Sstevel@tonic-gate if (((PROT_ALL & uprot) != uprot)) 513*7c478bd9Sstevel@tonic-gate return (EACCES); 514*7c478bd9Sstevel@tonic-gate 515*7c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) { 516*7c478bd9Sstevel@tonic-gate caddr_t userlimit; 517*7c478bd9Sstevel@tonic-gate 518*7c478bd9Sstevel@tonic-gate /* 519*7c478bd9Sstevel@tonic-gate * Use the user address. First verify that 520*7c478bd9Sstevel@tonic-gate * the address to be used is page aligned. 521*7c478bd9Sstevel@tonic-gate * Then make some simple bounds checks. 522*7c478bd9Sstevel@tonic-gate */ 523*7c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0) 524*7c478bd9Sstevel@tonic-gate return (EINVAL); 525*7c478bd9Sstevel@tonic-gate 526*7c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ? 527*7c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit; 528*7c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) { 529*7c478bd9Sstevel@tonic-gate case RANGE_OKAY: 530*7c478bd9Sstevel@tonic-gate break; 531*7c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 532*7c478bd9Sstevel@tonic-gate return (ENOTSUP); 533*7c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 534*7c478bd9Sstevel@tonic-gate default: 535*7c478bd9Sstevel@tonic-gate return (ENOMEM); 536*7c478bd9Sstevel@tonic-gate } 537*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, *addrp, len); 538*7c478bd9Sstevel@tonic-gate } else { 539*7c478bd9Sstevel@tonic-gate /* 540*7c478bd9Sstevel@tonic-gate * No need to worry about vac alignment for anonymous 541*7c478bd9Sstevel@tonic-gate * pages since this is a "clone" object that doesn't 542*7c478bd9Sstevel@tonic-gate * yet exist. 543*7c478bd9Sstevel@tonic-gate */ 544*7c478bd9Sstevel@tonic-gate map_addr(addrp, len, pos, 0, flags); 545*7c478bd9Sstevel@tonic-gate if (*addrp == NULL) 546*7c478bd9Sstevel@tonic-gate return (ENOMEM); 547*7c478bd9Sstevel@tonic-gate } 548*7c478bd9Sstevel@tonic-gate 549*7c478bd9Sstevel@tonic-gate /* 550*7c478bd9Sstevel@tonic-gate * Use the seg_vn segment driver; passing in the NULL amp 551*7c478bd9Sstevel@tonic-gate * gives the desired "cloning" effect. 552*7c478bd9Sstevel@tonic-gate */ 553*7c478bd9Sstevel@tonic-gate a.vp = NULL; 554*7c478bd9Sstevel@tonic-gate a.offset = 0; 555*7c478bd9Sstevel@tonic-gate a.type = flags & MAP_TYPE; 556*7c478bd9Sstevel@tonic-gate a.prot = uprot; 557*7c478bd9Sstevel@tonic-gate a.maxprot = PROT_ALL; 558*7c478bd9Sstevel@tonic-gate a.flags = flags & ~MAP_TYPE; 559*7c478bd9Sstevel@tonic-gate a.cred = CRED(); 560*7c478bd9Sstevel@tonic-gate a.amp = NULL; 561*7c478bd9Sstevel@tonic-gate a.szc = 0; 562*7c478bd9Sstevel@tonic-gate a.lgrp_mem_policy_flags = 0; 563*7c478bd9Sstevel@tonic-gate 564*7c478bd9Sstevel@tonic-gate /* 565*7c478bd9Sstevel@tonic-gate * Call arch-specific map_pgsz routine to pick best page size to map 566*7c478bd9Sstevel@tonic-gate * this segment, and break the mapping up into parts if required. 567*7c478bd9Sstevel@tonic-gate * 568*7c478bd9Sstevel@tonic-gate * The parts work like this: 569*7c478bd9Sstevel@tonic-gate * 570*7c478bd9Sstevel@tonic-gate * addr --------- 571*7c478bd9Sstevel@tonic-gate * | | l0 572*7c478bd9Sstevel@tonic-gate * --------- 573*7c478bd9Sstevel@tonic-gate * | | l1 574*7c478bd9Sstevel@tonic-gate * --------- 575*7c478bd9Sstevel@tonic-gate * | | l2 576*7c478bd9Sstevel@tonic-gate * --------- 577*7c478bd9Sstevel@tonic-gate * | | l3 578*7c478bd9Sstevel@tonic-gate * --------- 579*7c478bd9Sstevel@tonic-gate * | | l4 580*7c478bd9Sstevel@tonic-gate * --------- 581*7c478bd9Sstevel@tonic-gate * addr+len 582*7c478bd9Sstevel@tonic-gate * 583*7c478bd9Sstevel@tonic-gate * Starting from the middle, l2 is the number of bytes mapped by the 584*7c478bd9Sstevel@tonic-gate * selected large page. l1 and l3 are mapped by auto_lpg_va_default 585*7c478bd9Sstevel@tonic-gate * page size pages, and l0 and l4 are mapped by base page size pages. 586*7c478bd9Sstevel@tonic-gate * If auto_lpg_va_default is the base page size, then l0 == l4 == 0. 587*7c478bd9Sstevel@tonic-gate * If the requested address or length are aligned to the selected large 588*7c478bd9Sstevel@tonic-gate * page size, l1 or l3 may also be 0. 589*7c478bd9Sstevel@tonic-gate */ 590*7c478bd9Sstevel@tonic-gate if (use_zmap_lpg) { 591*7c478bd9Sstevel@tonic-gate 592*7c478bd9Sstevel@tonic-gate pgsz = map_pgsz(MAPPGSZ_VA, p, *addrp, len, NULL); 593*7c478bd9Sstevel@tonic-gate if (pgsz <= PAGESIZE || len < pgsz) { 594*7c478bd9Sstevel@tonic-gate return (as_map(as, *addrp, len, segvn_create, &a)); 595*7c478bd9Sstevel@tonic-gate } 596*7c478bd9Sstevel@tonic-gate 597*7c478bd9Sstevel@tonic-gate ruaddr = (caddr_t)P2ROUNDUP((uintptr_t)*addrp, pgsz); 598*7c478bd9Sstevel@tonic-gate if (auto_lpg_va_default != MMU_PAGESIZE) { 599*7c478bd9Sstevel@tonic-gate ruaddr0 = (caddr_t)P2ROUNDUP((uintptr_t)*addrp, 600*7c478bd9Sstevel@tonic-gate auto_lpg_va_default); 601*7c478bd9Sstevel@tonic-gate l0 = ruaddr0 - *addrp; 602*7c478bd9Sstevel@tonic-gate } else { 603*7c478bd9Sstevel@tonic-gate l0 = 0; 604*7c478bd9Sstevel@tonic-gate ruaddr0 = *addrp; 605*7c478bd9Sstevel@tonic-gate } 606*7c478bd9Sstevel@tonic-gate l1 = ruaddr - ruaddr0; 607*7c478bd9Sstevel@tonic-gate l3 = P2PHASE(len - l0 - l1, pgsz); 608*7c478bd9Sstevel@tonic-gate if (auto_lpg_va_default == MMU_PAGESIZE) { 609*7c478bd9Sstevel@tonic-gate l4 = 0; 610*7c478bd9Sstevel@tonic-gate } else { 611*7c478bd9Sstevel@tonic-gate l4 = P2PHASE(l3, auto_lpg_va_default); 612*7c478bd9Sstevel@tonic-gate l3 -= l4; 613*7c478bd9Sstevel@tonic-gate } 614*7c478bd9Sstevel@tonic-gate l2 = len - l0 - l1 - l3 - l4; 615*7c478bd9Sstevel@tonic-gate 616*7c478bd9Sstevel@tonic-gate if (l0) { 617*7c478bd9Sstevel@tonic-gate b = a; 618*7c478bd9Sstevel@tonic-gate err = as_map(as, *addrp, l0, segvn_create, &b); 619*7c478bd9Sstevel@tonic-gate if (err) { 620*7c478bd9Sstevel@tonic-gate return (err); 621*7c478bd9Sstevel@tonic-gate } 622*7c478bd9Sstevel@tonic-gate } 623*7c478bd9Sstevel@tonic-gate 624*7c478bd9Sstevel@tonic-gate if (l1) { 625*7c478bd9Sstevel@tonic-gate b = a; 626*7c478bd9Sstevel@tonic-gate b.szc = page_szc(auto_lpg_va_default); 627*7c478bd9Sstevel@tonic-gate err = as_map(as, ruaddr0, l1, segvn_create, &b); 628*7c478bd9Sstevel@tonic-gate if (err) { 629*7c478bd9Sstevel@tonic-gate goto error1; 630*7c478bd9Sstevel@tonic-gate } 631*7c478bd9Sstevel@tonic-gate } 632*7c478bd9Sstevel@tonic-gate 633*7c478bd9Sstevel@tonic-gate if (l2) { 634*7c478bd9Sstevel@tonic-gate b = a; 635*7c478bd9Sstevel@tonic-gate b.szc = page_szc(pgsz); 636*7c478bd9Sstevel@tonic-gate err = as_map(as, ruaddr, l2, segvn_create, &b); 637*7c478bd9Sstevel@tonic-gate if (err) { 638*7c478bd9Sstevel@tonic-gate goto error2; 639*7c478bd9Sstevel@tonic-gate } 640*7c478bd9Sstevel@tonic-gate } 641*7c478bd9Sstevel@tonic-gate 642*7c478bd9Sstevel@tonic-gate if (l3) { 643*7c478bd9Sstevel@tonic-gate b = a; 644*7c478bd9Sstevel@tonic-gate b.szc = page_szc(auto_lpg_va_default); 645*7c478bd9Sstevel@tonic-gate err = as_map(as, ruaddr + l2, l3, segvn_create, &b); 646*7c478bd9Sstevel@tonic-gate if (err) { 647*7c478bd9Sstevel@tonic-gate goto error3; 648*7c478bd9Sstevel@tonic-gate } 649*7c478bd9Sstevel@tonic-gate } 650*7c478bd9Sstevel@tonic-gate if (l4) { 651*7c478bd9Sstevel@tonic-gate err = as_map(as, ruaddr + l2 + l3, l4, segvn_create, 652*7c478bd9Sstevel@tonic-gate &a); 653*7c478bd9Sstevel@tonic-gate if (err) { 654*7c478bd9Sstevel@tonic-gate error3: 655*7c478bd9Sstevel@tonic-gate if (l3) { 656*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, ruaddr + l2, l3); 657*7c478bd9Sstevel@tonic-gate } 658*7c478bd9Sstevel@tonic-gate error2: 659*7c478bd9Sstevel@tonic-gate if (l2) { 660*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, ruaddr, l2); 661*7c478bd9Sstevel@tonic-gate } 662*7c478bd9Sstevel@tonic-gate error1: 663*7c478bd9Sstevel@tonic-gate if (l1) { 664*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, ruaddr0, l1); 665*7c478bd9Sstevel@tonic-gate } 666*7c478bd9Sstevel@tonic-gate if (l0) { 667*7c478bd9Sstevel@tonic-gate (void) as_unmap(as, *addrp, l0); 668*7c478bd9Sstevel@tonic-gate } 669*7c478bd9Sstevel@tonic-gate return (err); 670*7c478bd9Sstevel@tonic-gate } 671*7c478bd9Sstevel@tonic-gate } 672*7c478bd9Sstevel@tonic-gate 673*7c478bd9Sstevel@tonic-gate return (0); 674*7c478bd9Sstevel@tonic-gate } 675*7c478bd9Sstevel@tonic-gate 676*7c478bd9Sstevel@tonic-gate return (as_map(as, *addrp, len, segvn_create, &a)); 677*7c478bd9Sstevel@tonic-gate } 678*7c478bd9Sstevel@tonic-gate 679*7c478bd9Sstevel@tonic-gate static int 680*7c478bd9Sstevel@tonic-gate smmap_common(caddr_t *addrp, size_t len, 681*7c478bd9Sstevel@tonic-gate int prot, int flags, struct file *fp, offset_t pos) 682*7c478bd9Sstevel@tonic-gate { 683*7c478bd9Sstevel@tonic-gate struct vnode *vp; 684*7c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 685*7c478bd9Sstevel@tonic-gate uint_t uprot, maxprot, type; 686*7c478bd9Sstevel@tonic-gate int error; 687*7c478bd9Sstevel@tonic-gate 688*7c478bd9Sstevel@tonic-gate if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW | 689*7c478bd9Sstevel@tonic-gate _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN | 690*7c478bd9Sstevel@tonic-gate MAP_TEXT | MAP_INITDATA)) != 0) { 691*7c478bd9Sstevel@tonic-gate /* | MAP_RENAME */ /* not implemented, let user know */ 692*7c478bd9Sstevel@tonic-gate return (EINVAL); 693*7c478bd9Sstevel@tonic-gate } 694*7c478bd9Sstevel@tonic-gate 695*7c478bd9Sstevel@tonic-gate if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) { 696*7c478bd9Sstevel@tonic-gate return (EINVAL); 697*7c478bd9Sstevel@tonic-gate } 698*7c478bd9Sstevel@tonic-gate 699*7c478bd9Sstevel@tonic-gate if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) { 700*7c478bd9Sstevel@tonic-gate return (EINVAL); 701*7c478bd9Sstevel@tonic-gate } 702*7c478bd9Sstevel@tonic-gate 703*7c478bd9Sstevel@tonic-gate #if defined(__sparc) 704*7c478bd9Sstevel@tonic-gate /* 705*7c478bd9Sstevel@tonic-gate * See if this is an "old mmap call". If so, remember this 706*7c478bd9Sstevel@tonic-gate * fact and convert the flags value given to mmap to indicate 707*7c478bd9Sstevel@tonic-gate * the specified address in the system call must be used. 708*7c478bd9Sstevel@tonic-gate * _MAP_NEW is turned set by all new uses of mmap. 709*7c478bd9Sstevel@tonic-gate */ 710*7c478bd9Sstevel@tonic-gate if ((flags & _MAP_NEW) == 0) 711*7c478bd9Sstevel@tonic-gate flags |= MAP_FIXED; 712*7c478bd9Sstevel@tonic-gate #endif 713*7c478bd9Sstevel@tonic-gate flags &= ~_MAP_NEW; 714*7c478bd9Sstevel@tonic-gate 715*7c478bd9Sstevel@tonic-gate type = flags & MAP_TYPE; 716*7c478bd9Sstevel@tonic-gate if (type != MAP_PRIVATE && type != MAP_SHARED) 717*7c478bd9Sstevel@tonic-gate return (EINVAL); 718*7c478bd9Sstevel@tonic-gate 719*7c478bd9Sstevel@tonic-gate 720*7c478bd9Sstevel@tonic-gate if (flags & MAP_ALIGN) { 721*7c478bd9Sstevel@tonic-gate 722*7c478bd9Sstevel@tonic-gate if (flags & MAP_FIXED) 723*7c478bd9Sstevel@tonic-gate return (EINVAL); 724*7c478bd9Sstevel@tonic-gate 725*7c478bd9Sstevel@tonic-gate /* alignment needs to be a power of 2 >= page size */ 726*7c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) || 727*7c478bd9Sstevel@tonic-gate !ISP2((uintptr_t)*addrp)) 728*7c478bd9Sstevel@tonic-gate return (EINVAL); 729*7c478bd9Sstevel@tonic-gate } 730*7c478bd9Sstevel@tonic-gate /* 731*7c478bd9Sstevel@tonic-gate * Check for bad lengths and file position. 732*7c478bd9Sstevel@tonic-gate * We let the VOP_MAP routine check for negative lengths 733*7c478bd9Sstevel@tonic-gate * since on some vnode types this might be appropriate. 734*7c478bd9Sstevel@tonic-gate */ 735*7c478bd9Sstevel@tonic-gate if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0) 736*7c478bd9Sstevel@tonic-gate return (EINVAL); 737*7c478bd9Sstevel@tonic-gate 738*7c478bd9Sstevel@tonic-gate maxprot = PROT_ALL; /* start out allowing all accesses */ 739*7c478bd9Sstevel@tonic-gate uprot = prot | PROT_USER; 740*7c478bd9Sstevel@tonic-gate 741*7c478bd9Sstevel@tonic-gate if (fp == NULL) { 742*7c478bd9Sstevel@tonic-gate ASSERT(flags & MAP_ANON); 743*7c478bd9Sstevel@tonic-gate as_rangelock(as); 744*7c478bd9Sstevel@tonic-gate error = zmap(as, addrp, len, uprot, flags, pos); 745*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 746*7c478bd9Sstevel@tonic-gate return (error); 747*7c478bd9Sstevel@tonic-gate } else if ((flags & MAP_ANON) != 0) 748*7c478bd9Sstevel@tonic-gate return (EINVAL); 749*7c478bd9Sstevel@tonic-gate 750*7c478bd9Sstevel@tonic-gate vp = fp->f_vnode; 751*7c478bd9Sstevel@tonic-gate 752*7c478bd9Sstevel@tonic-gate /* Can't execute code from "noexec" mounted filesystem. */ 753*7c478bd9Sstevel@tonic-gate if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) 754*7c478bd9Sstevel@tonic-gate maxprot &= ~PROT_EXEC; 755*7c478bd9Sstevel@tonic-gate 756*7c478bd9Sstevel@tonic-gate /* 757*7c478bd9Sstevel@tonic-gate * These checks were added as part of large files. 758*7c478bd9Sstevel@tonic-gate * 759*7c478bd9Sstevel@tonic-gate * Return EINVAL if the initial position is negative; return EOVERFLOW 760*7c478bd9Sstevel@tonic-gate * if (offset + len) would overflow the maximum allowed offset for the 761*7c478bd9Sstevel@tonic-gate * type of file descriptor being used. 762*7c478bd9Sstevel@tonic-gate */ 763*7c478bd9Sstevel@tonic-gate if (vp->v_type == VREG) { 764*7c478bd9Sstevel@tonic-gate if (pos < (offset_t)0) 765*7c478bd9Sstevel@tonic-gate return (EINVAL); 766*7c478bd9Sstevel@tonic-gate if ((offset_t)len > (OFFSET_MAX(fp) - pos)) 767*7c478bd9Sstevel@tonic-gate return (EOVERFLOW); 768*7c478bd9Sstevel@tonic-gate } 769*7c478bd9Sstevel@tonic-gate 770*7c478bd9Sstevel@tonic-gate if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) { 771*7c478bd9Sstevel@tonic-gate /* no write access allowed */ 772*7c478bd9Sstevel@tonic-gate maxprot &= ~PROT_WRITE; 773*7c478bd9Sstevel@tonic-gate } 774*7c478bd9Sstevel@tonic-gate 775*7c478bd9Sstevel@tonic-gate /* 776*7c478bd9Sstevel@tonic-gate * XXX - Do we also adjust maxprot based on protections 777*7c478bd9Sstevel@tonic-gate * of the vnode? E.g. if no execute permission is given 778*7c478bd9Sstevel@tonic-gate * on the vnode for the current user, maxprot probably 779*7c478bd9Sstevel@tonic-gate * should disallow PROT_EXEC also? This is different 780*7c478bd9Sstevel@tonic-gate * from the write access as this would be a per vnode 781*7c478bd9Sstevel@tonic-gate * test as opposed to a per fd test for writability. 782*7c478bd9Sstevel@tonic-gate */ 783*7c478bd9Sstevel@tonic-gate 784*7c478bd9Sstevel@tonic-gate /* 785*7c478bd9Sstevel@tonic-gate * Verify that the specified protections are not greater than 786*7c478bd9Sstevel@tonic-gate * the maximum allowable protections. Also test to make sure 787*7c478bd9Sstevel@tonic-gate * that the file descriptor does allows for read access since 788*7c478bd9Sstevel@tonic-gate * "write only" mappings are hard to do since normally we do 789*7c478bd9Sstevel@tonic-gate * the read from the file before the page can be written. 790*7c478bd9Sstevel@tonic-gate */ 791*7c478bd9Sstevel@tonic-gate if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0) 792*7c478bd9Sstevel@tonic-gate return (EACCES); 793*7c478bd9Sstevel@tonic-gate 794*7c478bd9Sstevel@tonic-gate /* 795*7c478bd9Sstevel@tonic-gate * If the user specified an address, do some simple checks here 796*7c478bd9Sstevel@tonic-gate */ 797*7c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) { 798*7c478bd9Sstevel@tonic-gate caddr_t userlimit; 799*7c478bd9Sstevel@tonic-gate 800*7c478bd9Sstevel@tonic-gate /* 801*7c478bd9Sstevel@tonic-gate * Use the user address. First verify that 802*7c478bd9Sstevel@tonic-gate * the address to be used is page aligned. 803*7c478bd9Sstevel@tonic-gate * Then make some simple bounds checks. 804*7c478bd9Sstevel@tonic-gate */ 805*7c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0) 806*7c478bd9Sstevel@tonic-gate return (EINVAL); 807*7c478bd9Sstevel@tonic-gate 808*7c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ? 809*7c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit; 810*7c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) { 811*7c478bd9Sstevel@tonic-gate case RANGE_OKAY: 812*7c478bd9Sstevel@tonic-gate break; 813*7c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 814*7c478bd9Sstevel@tonic-gate return (ENOTSUP); 815*7c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 816*7c478bd9Sstevel@tonic-gate default: 817*7c478bd9Sstevel@tonic-gate return (ENOMEM); 818*7c478bd9Sstevel@tonic-gate } 819*7c478bd9Sstevel@tonic-gate } 820*7c478bd9Sstevel@tonic-gate 821*7c478bd9Sstevel@tonic-gate 822*7c478bd9Sstevel@tonic-gate /* 823*7c478bd9Sstevel@tonic-gate * Ok, now let the vnode map routine do its thing to set things up. 824*7c478bd9Sstevel@tonic-gate */ 825*7c478bd9Sstevel@tonic-gate error = VOP_MAP(vp, pos, as, 826*7c478bd9Sstevel@tonic-gate addrp, len, uprot, maxprot, flags, fp->f_cred); 827*7c478bd9Sstevel@tonic-gate 828*7c478bd9Sstevel@tonic-gate if (error == 0) { 829*7c478bd9Sstevel@tonic-gate if (vp->v_type == VREG && 830*7c478bd9Sstevel@tonic-gate (flags & (MAP_TEXT | MAP_INITDATA)) != 0) { 831*7c478bd9Sstevel@tonic-gate /* 832*7c478bd9Sstevel@tonic-gate * Mark this as an executable vnode 833*7c478bd9Sstevel@tonic-gate */ 834*7c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 835*7c478bd9Sstevel@tonic-gate vp->v_flag |= VVMEXEC; 836*7c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 837*7c478bd9Sstevel@tonic-gate } 838*7c478bd9Sstevel@tonic-gate } 839*7c478bd9Sstevel@tonic-gate 840*7c478bd9Sstevel@tonic-gate return (error); 841*7c478bd9Sstevel@tonic-gate } 842*7c478bd9Sstevel@tonic-gate 843*7c478bd9Sstevel@tonic-gate #ifdef _LP64 844*7c478bd9Sstevel@tonic-gate /* 845*7c478bd9Sstevel@tonic-gate * LP64 mmap(2) system call: 64-bit offset, 64-bit address. 846*7c478bd9Sstevel@tonic-gate * 847*7c478bd9Sstevel@tonic-gate * The "large file" mmap routine mmap64(2) is also mapped to this routine 848*7c478bd9Sstevel@tonic-gate * by the 64-bit version of libc. 849*7c478bd9Sstevel@tonic-gate * 850*7c478bd9Sstevel@tonic-gate * Eventually, this should be the only version, and have smmap_common() 851*7c478bd9Sstevel@tonic-gate * folded back into it again. Some day. 852*7c478bd9Sstevel@tonic-gate */ 853*7c478bd9Sstevel@tonic-gate caddr_t 854*7c478bd9Sstevel@tonic-gate smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos) 855*7c478bd9Sstevel@tonic-gate { 856*7c478bd9Sstevel@tonic-gate struct file *fp; 857*7c478bd9Sstevel@tonic-gate int error; 858*7c478bd9Sstevel@tonic-gate 859*7c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32) 860*7c478bd9Sstevel@tonic-gate error = EINVAL; 861*7c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0) 862*7c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags, 863*7c478bd9Sstevel@tonic-gate NULL, (offset_t)pos); 864*7c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 865*7c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags, 866*7c478bd9Sstevel@tonic-gate fp, (offset_t)pos); 867*7c478bd9Sstevel@tonic-gate releasef(fd); 868*7c478bd9Sstevel@tonic-gate } else 869*7c478bd9Sstevel@tonic-gate error = EBADF; 870*7c478bd9Sstevel@tonic-gate 871*7c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : addr); 872*7c478bd9Sstevel@tonic-gate } 873*7c478bd9Sstevel@tonic-gate #endif /* _LP64 */ 874*7c478bd9Sstevel@tonic-gate 875*7c478bd9Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32) 876*7c478bd9Sstevel@tonic-gate 877*7c478bd9Sstevel@tonic-gate /* 878*7c478bd9Sstevel@tonic-gate * ILP32 mmap(2) system call: 32-bit offset, 32-bit address. 879*7c478bd9Sstevel@tonic-gate */ 880*7c478bd9Sstevel@tonic-gate caddr_t 881*7c478bd9Sstevel@tonic-gate smmap32(caddr32_t addr, size32_t len, int prot, int flags, int fd, off32_t pos) 882*7c478bd9Sstevel@tonic-gate { 883*7c478bd9Sstevel@tonic-gate struct file *fp; 884*7c478bd9Sstevel@tonic-gate int error; 885*7c478bd9Sstevel@tonic-gate caddr_t a = (caddr_t)(uintptr_t)addr; 886*7c478bd9Sstevel@tonic-gate 887*7c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32) 888*7c478bd9Sstevel@tonic-gate error = EINVAL; 889*7c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0) 890*7c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot, 891*7c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, (offset_t)pos); 892*7c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 893*7c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot, 894*7c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, (offset_t)pos); 895*7c478bd9Sstevel@tonic-gate releasef(fd); 896*7c478bd9Sstevel@tonic-gate } else 897*7c478bd9Sstevel@tonic-gate error = EBADF; 898*7c478bd9Sstevel@tonic-gate 899*7c478bd9Sstevel@tonic-gate ASSERT(error != 0 || (uintptr_t)(a + len) < (uintptr_t)UINT32_MAX); 900*7c478bd9Sstevel@tonic-gate 901*7c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : a); 902*7c478bd9Sstevel@tonic-gate } 903*7c478bd9Sstevel@tonic-gate 904*7c478bd9Sstevel@tonic-gate /* 905*7c478bd9Sstevel@tonic-gate * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address. 906*7c478bd9Sstevel@tonic-gate * 907*7c478bd9Sstevel@tonic-gate * Now things really get ugly because we can't use the C-style 908*7c478bd9Sstevel@tonic-gate * calling convention for more than 6 args, and 64-bit parameter 909*7c478bd9Sstevel@tonic-gate * passing on 32-bit systems is less than clean. 910*7c478bd9Sstevel@tonic-gate */ 911*7c478bd9Sstevel@tonic-gate 912*7c478bd9Sstevel@tonic-gate struct mmaplf32a { 913*7c478bd9Sstevel@tonic-gate caddr_t addr; 914*7c478bd9Sstevel@tonic-gate size_t len; 915*7c478bd9Sstevel@tonic-gate #ifdef _LP64 916*7c478bd9Sstevel@tonic-gate /* 917*7c478bd9Sstevel@tonic-gate * 32-bit contents, 64-bit cells 918*7c478bd9Sstevel@tonic-gate */ 919*7c478bd9Sstevel@tonic-gate uint64_t prot; 920*7c478bd9Sstevel@tonic-gate uint64_t flags; 921*7c478bd9Sstevel@tonic-gate uint64_t fd; 922*7c478bd9Sstevel@tonic-gate uint64_t offhi; 923*7c478bd9Sstevel@tonic-gate uint64_t offlo; 924*7c478bd9Sstevel@tonic-gate #else 925*7c478bd9Sstevel@tonic-gate /* 926*7c478bd9Sstevel@tonic-gate * 32-bit contents, 32-bit cells 927*7c478bd9Sstevel@tonic-gate */ 928*7c478bd9Sstevel@tonic-gate uint32_t prot; 929*7c478bd9Sstevel@tonic-gate uint32_t flags; 930*7c478bd9Sstevel@tonic-gate uint32_t fd; 931*7c478bd9Sstevel@tonic-gate uint32_t offhi; 932*7c478bd9Sstevel@tonic-gate uint32_t offlo; 933*7c478bd9Sstevel@tonic-gate #endif 934*7c478bd9Sstevel@tonic-gate }; 935*7c478bd9Sstevel@tonic-gate 936*7c478bd9Sstevel@tonic-gate int 937*7c478bd9Sstevel@tonic-gate smmaplf32(struct mmaplf32a *uap, rval_t *rvp) 938*7c478bd9Sstevel@tonic-gate { 939*7c478bd9Sstevel@tonic-gate struct file *fp; 940*7c478bd9Sstevel@tonic-gate int error; 941*7c478bd9Sstevel@tonic-gate caddr_t a = uap->addr; 942*7c478bd9Sstevel@tonic-gate int flags = (int)uap->flags; 943*7c478bd9Sstevel@tonic-gate int fd = (int)uap->fd; 944*7c478bd9Sstevel@tonic-gate #ifdef _BIG_ENDIAN 945*7c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offhi << 32) | (u_offset_t)uap->offlo; 946*7c478bd9Sstevel@tonic-gate #else 947*7c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offlo << 32) | (u_offset_t)uap->offhi; 948*7c478bd9Sstevel@tonic-gate #endif 949*7c478bd9Sstevel@tonic-gate 950*7c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32) 951*7c478bd9Sstevel@tonic-gate error = EINVAL; 952*7c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0) 953*7c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot, 954*7c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, off); 955*7c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 956*7c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot, 957*7c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, off); 958*7c478bd9Sstevel@tonic-gate releasef(fd); 959*7c478bd9Sstevel@tonic-gate } else 960*7c478bd9Sstevel@tonic-gate error = EBADF; 961*7c478bd9Sstevel@tonic-gate 962*7c478bd9Sstevel@tonic-gate if (error == 0) 963*7c478bd9Sstevel@tonic-gate rvp->r_val1 = (uintptr_t)a; 964*7c478bd9Sstevel@tonic-gate return (error); 965*7c478bd9Sstevel@tonic-gate } 966*7c478bd9Sstevel@tonic-gate 967*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL || _ILP32 */ 968*7c478bd9Sstevel@tonic-gate 969*7c478bd9Sstevel@tonic-gate int 970*7c478bd9Sstevel@tonic-gate munmap(caddr_t addr, size_t len) 971*7c478bd9Sstevel@tonic-gate { 972*7c478bd9Sstevel@tonic-gate struct proc *p = curproc; 973*7c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 974*7c478bd9Sstevel@tonic-gate 975*7c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0) 976*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 977*7c478bd9Sstevel@tonic-gate 978*7c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY) 979*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 980*7c478bd9Sstevel@tonic-gate 981*7c478bd9Sstevel@tonic-gate /* 982*7c478bd9Sstevel@tonic-gate * Discard lwpchan mappings. 983*7c478bd9Sstevel@tonic-gate */ 984*7c478bd9Sstevel@tonic-gate if (p->p_lcp != NULL) 985*7c478bd9Sstevel@tonic-gate lwpchan_delete_mapping(p, addr, addr + len); 986*7c478bd9Sstevel@tonic-gate if (as_unmap(as, addr, len) != 0) 987*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 988*7c478bd9Sstevel@tonic-gate 989*7c478bd9Sstevel@tonic-gate return (0); 990*7c478bd9Sstevel@tonic-gate } 991*7c478bd9Sstevel@tonic-gate 992*7c478bd9Sstevel@tonic-gate int 993*7c478bd9Sstevel@tonic-gate mprotect(caddr_t addr, size_t len, int prot) 994*7c478bd9Sstevel@tonic-gate { 995*7c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 996*7c478bd9Sstevel@tonic-gate uint_t uprot = prot | PROT_USER; 997*7c478bd9Sstevel@tonic-gate int error; 998*7c478bd9Sstevel@tonic-gate 999*7c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0) 1000*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 1001*7c478bd9Sstevel@tonic-gate 1002*7c478bd9Sstevel@tonic-gate switch (valid_usr_range(addr, len, prot, as, as->a_userlimit)) { 1003*7c478bd9Sstevel@tonic-gate case RANGE_OKAY: 1004*7c478bd9Sstevel@tonic-gate break; 1005*7c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 1006*7c478bd9Sstevel@tonic-gate return (set_errno(ENOTSUP)); 1007*7c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 1008*7c478bd9Sstevel@tonic-gate default: 1009*7c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 1010*7c478bd9Sstevel@tonic-gate } 1011*7c478bd9Sstevel@tonic-gate 1012*7c478bd9Sstevel@tonic-gate error = as_setprot(as, addr, len, uprot); 1013*7c478bd9Sstevel@tonic-gate if (error) 1014*7c478bd9Sstevel@tonic-gate return (set_errno(error)); 1015*7c478bd9Sstevel@tonic-gate return (0); 1016*7c478bd9Sstevel@tonic-gate } 1017*7c478bd9Sstevel@tonic-gate 1018*7c478bd9Sstevel@tonic-gate #define MC_CACHE 128 /* internal result buffer */ 1019*7c478bd9Sstevel@tonic-gate #define MC_QUANTUM (MC_CACHE * PAGESIZE) /* addresses covered in loop */ 1020*7c478bd9Sstevel@tonic-gate 1021*7c478bd9Sstevel@tonic-gate int 1022*7c478bd9Sstevel@tonic-gate mincore(caddr_t addr, size_t len, char *vecp) 1023*7c478bd9Sstevel@tonic-gate { 1024*7c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 1025*7c478bd9Sstevel@tonic-gate caddr_t ea; /* end address of loop */ 1026*7c478bd9Sstevel@tonic-gate size_t rl; /* inner result length */ 1027*7c478bd9Sstevel@tonic-gate char vec[MC_CACHE]; /* local vector cache */ 1028*7c478bd9Sstevel@tonic-gate int error; 1029*7c478bd9Sstevel@tonic-gate model_t model; 1030*7c478bd9Sstevel@tonic-gate long llen; 1031*7c478bd9Sstevel@tonic-gate 1032*7c478bd9Sstevel@tonic-gate model = get_udatamodel(); 1033*7c478bd9Sstevel@tonic-gate /* 1034*7c478bd9Sstevel@tonic-gate * Validate form of address parameters. 1035*7c478bd9Sstevel@tonic-gate */ 1036*7c478bd9Sstevel@tonic-gate if (model == DATAMODEL_NATIVE) { 1037*7c478bd9Sstevel@tonic-gate llen = (long)len; 1038*7c478bd9Sstevel@tonic-gate } else { 1039*7c478bd9Sstevel@tonic-gate llen = (int32_t)(size32_t)len; 1040*7c478bd9Sstevel@tonic-gate } 1041*7c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0) 1042*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 1043*7c478bd9Sstevel@tonic-gate 1044*7c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY) 1045*7c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 1046*7c478bd9Sstevel@tonic-gate 1047*7c478bd9Sstevel@tonic-gate /* 1048*7c478bd9Sstevel@tonic-gate * Loop over subranges of interval [addr : addr + len), recovering 1049*7c478bd9Sstevel@tonic-gate * results internally and then copying them out to caller. Subrange 1050*7c478bd9Sstevel@tonic-gate * is based on the size of MC_CACHE, defined above. 1051*7c478bd9Sstevel@tonic-gate */ 1052*7c478bd9Sstevel@tonic-gate for (ea = addr + len; addr < ea; addr += MC_QUANTUM) { 1053*7c478bd9Sstevel@tonic-gate error = as_incore(as, addr, 1054*7c478bd9Sstevel@tonic-gate (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl); 1055*7c478bd9Sstevel@tonic-gate if (rl != 0) { 1056*7c478bd9Sstevel@tonic-gate rl = (rl + PAGESIZE - 1) / PAGESIZE; 1057*7c478bd9Sstevel@tonic-gate if (copyout(vec, vecp, rl) != 0) 1058*7c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 1059*7c478bd9Sstevel@tonic-gate vecp += rl; 1060*7c478bd9Sstevel@tonic-gate } 1061*7c478bd9Sstevel@tonic-gate if (error != 0) 1062*7c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 1063*7c478bd9Sstevel@tonic-gate } 1064*7c478bd9Sstevel@tonic-gate return (0); 1065*7c478bd9Sstevel@tonic-gate } 1066