1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*7c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 29*7c478bd9Sstevel@tonic-gate 30*7c478bd9Sstevel@tonic-gate 31*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 32*7c478bd9Sstevel@tonic-gate 33*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/param.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/user.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/unistd.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/errno.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/proc.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/mman.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/cred.h> 47*7c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 48*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/policy.h> 50*7c478bd9Sstevel@tonic-gate 51*7c478bd9Sstevel@tonic-gate #include <vm/as.h> 52*7c478bd9Sstevel@tonic-gate #include <vm/seg.h> 53*7c478bd9Sstevel@tonic-gate 54*7c478bd9Sstevel@tonic-gate static uint_t mem_getpgszc(size_t); 55*7c478bd9Sstevel@tonic-gate 56*7c478bd9Sstevel@tonic-gate /* 57*7c478bd9Sstevel@tonic-gate * Memory control operations 58*7c478bd9Sstevel@tonic-gate */ 59*7c478bd9Sstevel@tonic-gate int 60*7c478bd9Sstevel@tonic-gate memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, int attr, int mask) 61*7c478bd9Sstevel@tonic-gate { 62*7c478bd9Sstevel@tonic-gate struct as *as = ttoproc(curthread)->p_as; 63*7c478bd9Sstevel@tonic-gate struct proc *p = ttoproc(curthread); 64*7c478bd9Sstevel@tonic-gate size_t pgsz; 65*7c478bd9Sstevel@tonic-gate uint_t szc, oszc, pgcmd; 66*7c478bd9Sstevel@tonic-gate int error = 0; 67*7c478bd9Sstevel@tonic-gate faultcode_t fc; 68*7c478bd9Sstevel@tonic-gate uintptr_t iarg; 69*7c478bd9Sstevel@tonic-gate STRUCT_DECL(memcntl_mha, mha); 70*7c478bd9Sstevel@tonic-gate 71*7c478bd9Sstevel@tonic-gate if (mask) 72*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 73*7c478bd9Sstevel@tonic-gate if ((cmd == MC_LOCKAS) || (cmd == MC_UNLOCKAS)) { 74*7c478bd9Sstevel@tonic-gate if ((addr != 0) || (len != 0)) { 75*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 76*7c478bd9Sstevel@tonic-gate } 77*7c478bd9Sstevel@tonic-gate } else if (cmd != MC_HAT_ADVISE) { 78*7c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0) { 79*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 80*7c478bd9Sstevel@tonic-gate } 81*7c478bd9Sstevel@tonic-gate /* 82*7c478bd9Sstevel@tonic-gate * We're only concerned with the address range 83*7c478bd9Sstevel@tonic-gate * here, not the protections. The protections 84*7c478bd9Sstevel@tonic-gate * are only used as a "filter" in this code, 85*7c478bd9Sstevel@tonic-gate * they aren't set or modified here. 86*7c478bd9Sstevel@tonic-gate */ 87*7c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, 88*7c478bd9Sstevel@tonic-gate as->a_userlimit) != RANGE_OKAY) { 89*7c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 90*7c478bd9Sstevel@tonic-gate } 91*7c478bd9Sstevel@tonic-gate } 92*7c478bd9Sstevel@tonic-gate 93*7c478bd9Sstevel@tonic-gate if (cmd == MC_HAT_ADVISE) { 94*7c478bd9Sstevel@tonic-gate if (attr != 0 || mask != 0) { 95*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 96*7c478bd9Sstevel@tonic-gate } 97*7c478bd9Sstevel@tonic-gate 98*7c478bd9Sstevel@tonic-gate } else { 99*7c478bd9Sstevel@tonic-gate if ((VALID_ATTR & attr) != attr) { 100*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 101*7c478bd9Sstevel@tonic-gate } 102*7c478bd9Sstevel@tonic-gate if ((attr & SHARED) && (attr & PRIVATE)) { 103*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 104*7c478bd9Sstevel@tonic-gate } 105*7c478bd9Sstevel@tonic-gate if (((cmd == MC_LOCKAS) || (cmd == MC_LOCK) || 106*7c478bd9Sstevel@tonic-gate (cmd == MC_UNLOCKAS) || (cmd == MC_UNLOCK)) && 107*7c478bd9Sstevel@tonic-gate (error = secpolicy_lock_memory(CRED())) != 0) 108*7c478bd9Sstevel@tonic-gate return (set_errno(error)); 109*7c478bd9Sstevel@tonic-gate } 110*7c478bd9Sstevel@tonic-gate if (attr) { 111*7c478bd9Sstevel@tonic-gate attr |= PROT_USER; 112*7c478bd9Sstevel@tonic-gate } 113*7c478bd9Sstevel@tonic-gate 114*7c478bd9Sstevel@tonic-gate switch (cmd) { 115*7c478bd9Sstevel@tonic-gate case MC_SYNC: 116*7c478bd9Sstevel@tonic-gate /* 117*7c478bd9Sstevel@tonic-gate * MS_SYNC used to be defined to be zero but is now non-zero. 118*7c478bd9Sstevel@tonic-gate * For binary compatibility we still accept zero 119*7c478bd9Sstevel@tonic-gate * (the absence of MS_ASYNC) to mean the same thing. 120*7c478bd9Sstevel@tonic-gate */ 121*7c478bd9Sstevel@tonic-gate iarg = (uintptr_t)arg; 122*7c478bd9Sstevel@tonic-gate if ((iarg & ~MS_INVALIDATE) == 0) 123*7c478bd9Sstevel@tonic-gate iarg |= MS_SYNC; 124*7c478bd9Sstevel@tonic-gate 125*7c478bd9Sstevel@tonic-gate if (((iarg & ~(MS_SYNC|MS_ASYNC|MS_INVALIDATE)) != 0) || 126*7c478bd9Sstevel@tonic-gate ((iarg & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))) { 127*7c478bd9Sstevel@tonic-gate error = set_errno(EINVAL); 128*7c478bd9Sstevel@tonic-gate } else { 129*7c478bd9Sstevel@tonic-gate error = as_ctl(as, addr, len, cmd, attr, iarg, NULL, 0); 130*7c478bd9Sstevel@tonic-gate if (error) { 131*7c478bd9Sstevel@tonic-gate (void) set_errno(error); 132*7c478bd9Sstevel@tonic-gate } 133*7c478bd9Sstevel@tonic-gate } 134*7c478bd9Sstevel@tonic-gate return (error); 135*7c478bd9Sstevel@tonic-gate case MC_LOCKAS: 136*7c478bd9Sstevel@tonic-gate if ((uintptr_t)arg & ~(MCL_FUTURE|MCL_CURRENT) || 137*7c478bd9Sstevel@tonic-gate (uintptr_t)arg == 0) { 138*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 139*7c478bd9Sstevel@tonic-gate } 140*7c478bd9Sstevel@tonic-gate break; 141*7c478bd9Sstevel@tonic-gate case MC_LOCK: 142*7c478bd9Sstevel@tonic-gate case MC_UNLOCKAS: 143*7c478bd9Sstevel@tonic-gate case MC_UNLOCK: 144*7c478bd9Sstevel@tonic-gate break; 145*7c478bd9Sstevel@tonic-gate case MC_HAT_ADVISE: 146*7c478bd9Sstevel@tonic-gate /* 147*7c478bd9Sstevel@tonic-gate * Set prefered page size. 148*7c478bd9Sstevel@tonic-gate */ 149*7c478bd9Sstevel@tonic-gate STRUCT_INIT(mha, get_udatamodel()); 150*7c478bd9Sstevel@tonic-gate if (copyin(arg, STRUCT_BUF(mha), STRUCT_SIZE(mha))) { 151*7c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 152*7c478bd9Sstevel@tonic-gate } 153*7c478bd9Sstevel@tonic-gate 154*7c478bd9Sstevel@tonic-gate pgcmd = STRUCT_FGET(mha, mha_cmd); 155*7c478bd9Sstevel@tonic-gate 156*7c478bd9Sstevel@tonic-gate /* 157*7c478bd9Sstevel@tonic-gate * Currently only MHA_MAPSIZE_VA, MHA_MAPSIZE_STACK 158*7c478bd9Sstevel@tonic-gate * and MHA_MAPSIZE_BSSBRK are supported. Only one 159*7c478bd9Sstevel@tonic-gate * command may be specified at a time. 160*7c478bd9Sstevel@tonic-gate */ 161*7c478bd9Sstevel@tonic-gate if ((~(MHA_MAPSIZE_VA|MHA_MAPSIZE_STACK|MHA_MAPSIZE_BSSBRK) & 162*7c478bd9Sstevel@tonic-gate pgcmd) || pgcmd == 0 || !ISP2(pgcmd) || 163*7c478bd9Sstevel@tonic-gate STRUCT_FGET(mha, mha_flags)) 164*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 165*7c478bd9Sstevel@tonic-gate 166*7c478bd9Sstevel@tonic-gate pgsz = STRUCT_FGET(mha, mha_pagesize); 167*7c478bd9Sstevel@tonic-gate 168*7c478bd9Sstevel@tonic-gate /* 169*7c478bd9Sstevel@tonic-gate * call platform specific map_pgsz() routine to get the 170*7c478bd9Sstevel@tonic-gate * optimal pgsz if pgsz is 0. 171*7c478bd9Sstevel@tonic-gate * 172*7c478bd9Sstevel@tonic-gate * For stack and heap operations addr and len must be zero. 173*7c478bd9Sstevel@tonic-gate */ 174*7c478bd9Sstevel@tonic-gate if ((pgcmd & (MHA_MAPSIZE_BSSBRK|MHA_MAPSIZE_STACK)) != 0) { 175*7c478bd9Sstevel@tonic-gate if (addr != NULL || len != 0) { 176*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 177*7c478bd9Sstevel@tonic-gate } 178*7c478bd9Sstevel@tonic-gate 179*7c478bd9Sstevel@tonic-gate /* 180*7c478bd9Sstevel@tonic-gate * Disable autompss for this process unless pgsz == 0, 181*7c478bd9Sstevel@tonic-gate * which means the system should pick. In the 182*7c478bd9Sstevel@tonic-gate * pgsz == 0 case, leave the SAUTOLPG setting alone, as 183*7c478bd9Sstevel@tonic-gate * we don't want to enable it when someone has 184*7c478bd9Sstevel@tonic-gate * disabled automatic large page selection for the 185*7c478bd9Sstevel@tonic-gate * whole system. 186*7c478bd9Sstevel@tonic-gate */ 187*7c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 188*7c478bd9Sstevel@tonic-gate if (pgsz != 0) { 189*7c478bd9Sstevel@tonic-gate p->p_flag &= ~SAUTOLPG; 190*7c478bd9Sstevel@tonic-gate } 191*7c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 192*7c478bd9Sstevel@tonic-gate 193*7c478bd9Sstevel@tonic-gate as_rangelock(as); 194*7c478bd9Sstevel@tonic-gate 195*7c478bd9Sstevel@tonic-gate if (pgsz == 0) { 196*7c478bd9Sstevel@tonic-gate int type; 197*7c478bd9Sstevel@tonic-gate 198*7c478bd9Sstevel@tonic-gate if (pgcmd == MHA_MAPSIZE_BSSBRK) 199*7c478bd9Sstevel@tonic-gate type = MAPPGSZ_HEAP; 200*7c478bd9Sstevel@tonic-gate else 201*7c478bd9Sstevel@tonic-gate type = MAPPGSZ_STK; 202*7c478bd9Sstevel@tonic-gate 203*7c478bd9Sstevel@tonic-gate pgsz = map_pgsz(type, p, 0, 0, NULL); 204*7c478bd9Sstevel@tonic-gate } 205*7c478bd9Sstevel@tonic-gate } else { 206*7c478bd9Sstevel@tonic-gate /* 207*7c478bd9Sstevel@tonic-gate * Note that we don't disable automatic large page 208*7c478bd9Sstevel@tonic-gate * selection for anon segments based on use of 209*7c478bd9Sstevel@tonic-gate * memcntl(). 210*7c478bd9Sstevel@tonic-gate */ 211*7c478bd9Sstevel@tonic-gate if (pgsz == 0) { 212*7c478bd9Sstevel@tonic-gate pgsz = map_pgsz(MAPPGSZ_VA, p, addr, len, 213*7c478bd9Sstevel@tonic-gate NULL); 214*7c478bd9Sstevel@tonic-gate } 215*7c478bd9Sstevel@tonic-gate 216*7c478bd9Sstevel@tonic-gate /* 217*7c478bd9Sstevel@tonic-gate * addr and len must be prefered page size aligned 218*7c478bd9Sstevel@tonic-gate * and valid for range specified. 219*7c478bd9Sstevel@tonic-gate */ 220*7c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(addr, pgsz) || 221*7c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(len, pgsz)) { 222*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 223*7c478bd9Sstevel@tonic-gate } 224*7c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, 225*7c478bd9Sstevel@tonic-gate as->a_userlimit) != RANGE_OKAY) { 226*7c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 227*7c478bd9Sstevel@tonic-gate } 228*7c478bd9Sstevel@tonic-gate } 229*7c478bd9Sstevel@tonic-gate 230*7c478bd9Sstevel@tonic-gate szc = mem_getpgszc(pgsz); 231*7c478bd9Sstevel@tonic-gate if (szc == (uint_t)-1) { 232*7c478bd9Sstevel@tonic-gate if ((pgcmd & (MHA_MAPSIZE_BSSBRK|MHA_MAPSIZE_STACK)) 233*7c478bd9Sstevel@tonic-gate != 0) { 234*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 235*7c478bd9Sstevel@tonic-gate } 236*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 237*7c478bd9Sstevel@tonic-gate } 238*7c478bd9Sstevel@tonic-gate 239*7c478bd9Sstevel@tonic-gate /* 240*7c478bd9Sstevel@tonic-gate * For stack and heap operations we first need to pad 241*7c478bd9Sstevel@tonic-gate * out existing range (create new mappings) to the new 242*7c478bd9Sstevel@tonic-gate * prefered page size boundary. Also the start of the 243*7c478bd9Sstevel@tonic-gate * .bss for the heap or user's stack base may not be on 244*7c478bd9Sstevel@tonic-gate * the new prefered page size boundary. For these cases 245*7c478bd9Sstevel@tonic-gate * we align the base of the request on the new prefered 246*7c478bd9Sstevel@tonic-gate * page size. 247*7c478bd9Sstevel@tonic-gate */ 248*7c478bd9Sstevel@tonic-gate if (pgcmd & MHA_MAPSIZE_BSSBRK) { 249*7c478bd9Sstevel@tonic-gate if (szc == p->p_brkpageszc) { 250*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 251*7c478bd9Sstevel@tonic-gate return (0); 252*7c478bd9Sstevel@tonic-gate } 253*7c478bd9Sstevel@tonic-gate if (szc > p->p_brkpageszc) { 254*7c478bd9Sstevel@tonic-gate error = brk_internal(p->p_brkbase 255*7c478bd9Sstevel@tonic-gate + p->p_brksize, szc); 256*7c478bd9Sstevel@tonic-gate if (error) { 257*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 258*7c478bd9Sstevel@tonic-gate return (set_errno(error)); 259*7c478bd9Sstevel@tonic-gate } 260*7c478bd9Sstevel@tonic-gate } 261*7c478bd9Sstevel@tonic-gate oszc = p->p_brkpageszc; 262*7c478bd9Sstevel@tonic-gate p->p_brkpageszc = szc; 263*7c478bd9Sstevel@tonic-gate 264*7c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(p->p_brkbase + p->p_brksize, pgsz)); 265*7c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, 266*7c478bd9Sstevel@tonic-gate pgsz); 267*7c478bd9Sstevel@tonic-gate len = (p->p_brkbase + p->p_brksize) - addr; 268*7c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(len, pgsz)); 269*7c478bd9Sstevel@tonic-gate /* 270*7c478bd9Sstevel@tonic-gate * Perhaps no existing pages to promote. 271*7c478bd9Sstevel@tonic-gate */ 272*7c478bd9Sstevel@tonic-gate if (len == 0) { 273*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 274*7c478bd9Sstevel@tonic-gate return (0); 275*7c478bd9Sstevel@tonic-gate } 276*7c478bd9Sstevel@tonic-gate } 277*7c478bd9Sstevel@tonic-gate /* 278*7c478bd9Sstevel@tonic-gate * The code below, as does grow.c, assumes stacks always grow 279*7c478bd9Sstevel@tonic-gate * downward. 280*7c478bd9Sstevel@tonic-gate */ 281*7c478bd9Sstevel@tonic-gate if (pgcmd & MHA_MAPSIZE_STACK) { 282*7c478bd9Sstevel@tonic-gate /* 283*7c478bd9Sstevel@tonic-gate * Some boxes (x86) have a top of stack that 284*7c478bd9Sstevel@tonic-gate * is not large page aligned. Since stacks are 285*7c478bd9Sstevel@tonic-gate * usually small we'll just return and do nothing 286*7c478bd9Sstevel@tonic-gate * for theses cases. Prefeered page size is advisory 287*7c478bd9Sstevel@tonic-gate * so no need to return an error. 288*7c478bd9Sstevel@tonic-gate */ 289*7c478bd9Sstevel@tonic-gate if (szc == p->p_stkpageszc || 290*7c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(p->p_usrstack, pgsz)) { 291*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 292*7c478bd9Sstevel@tonic-gate return (0); 293*7c478bd9Sstevel@tonic-gate } 294*7c478bd9Sstevel@tonic-gate 295*7c478bd9Sstevel@tonic-gate if (szc > p->p_stkpageszc) { 296*7c478bd9Sstevel@tonic-gate error = grow_internal(p->p_usrstack 297*7c478bd9Sstevel@tonic-gate - p->p_stksize, szc); 298*7c478bd9Sstevel@tonic-gate if (error) { 299*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 300*7c478bd9Sstevel@tonic-gate return (set_errno(error)); 301*7c478bd9Sstevel@tonic-gate } 302*7c478bd9Sstevel@tonic-gate } 303*7c478bd9Sstevel@tonic-gate oszc = p->p_stkpageszc; 304*7c478bd9Sstevel@tonic-gate p->p_stkpageszc = szc; 305*7c478bd9Sstevel@tonic-gate 306*7c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(p->p_usrstack, pgsz)); 307*7c478bd9Sstevel@tonic-gate addr = p->p_usrstack - p->p_stksize; 308*7c478bd9Sstevel@tonic-gate len = p->p_stksize; 309*7c478bd9Sstevel@tonic-gate 310*7c478bd9Sstevel@tonic-gate /* 311*7c478bd9Sstevel@tonic-gate * Perhaps nothing to promote, we wrapped around 312*7c478bd9Sstevel@tonic-gate * or grow did not not grow the stack to a large 313*7c478bd9Sstevel@tonic-gate * page boundary. 314*7c478bd9Sstevel@tonic-gate */ 315*7c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(len, pgsz) || len == 0 || 316*7c478bd9Sstevel@tonic-gate addr >= p->p_usrstack || (addr + len) < addr) { 317*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 318*7c478bd9Sstevel@tonic-gate return (0); 319*7c478bd9Sstevel@tonic-gate } 320*7c478bd9Sstevel@tonic-gate } 321*7c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(addr, pgsz)); 322*7c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(len, pgsz)); 323*7c478bd9Sstevel@tonic-gate error = as_setpagesize(as, addr, len, szc, B_TRUE); 324*7c478bd9Sstevel@tonic-gate 325*7c478bd9Sstevel@tonic-gate /* 326*7c478bd9Sstevel@tonic-gate * On stack or heap failures restore original 327*7c478bd9Sstevel@tonic-gate * pg size code. 328*7c478bd9Sstevel@tonic-gate */ 329*7c478bd9Sstevel@tonic-gate if (error) { 330*7c478bd9Sstevel@tonic-gate if ((pgcmd & MHA_MAPSIZE_BSSBRK) != 0) { 331*7c478bd9Sstevel@tonic-gate p->p_brkpageszc = oszc; 332*7c478bd9Sstevel@tonic-gate } 333*7c478bd9Sstevel@tonic-gate if ((pgcmd & MHA_MAPSIZE_STACK) != 0) { 334*7c478bd9Sstevel@tonic-gate p->p_stkpageszc = oszc; 335*7c478bd9Sstevel@tonic-gate } 336*7c478bd9Sstevel@tonic-gate (void) set_errno(error); 337*7c478bd9Sstevel@tonic-gate } 338*7c478bd9Sstevel@tonic-gate if ((pgcmd & (MHA_MAPSIZE_BSSBRK|MHA_MAPSIZE_STACK)) != 0) { 339*7c478bd9Sstevel@tonic-gate as_rangeunlock(as); 340*7c478bd9Sstevel@tonic-gate } 341*7c478bd9Sstevel@tonic-gate return (error); 342*7c478bd9Sstevel@tonic-gate case MC_ADVISE: 343*7c478bd9Sstevel@tonic-gate switch ((uintptr_t)arg) { 344*7c478bd9Sstevel@tonic-gate case MADV_WILLNEED: 345*7c478bd9Sstevel@tonic-gate fc = as_faulta(as, addr, len); 346*7c478bd9Sstevel@tonic-gate if (fc) { 347*7c478bd9Sstevel@tonic-gate if (FC_CODE(fc) == FC_OBJERR) 348*7c478bd9Sstevel@tonic-gate error = set_errno(FC_ERRNO(fc)); 349*7c478bd9Sstevel@tonic-gate else if (FC_CODE(fc) == FC_NOMAP) 350*7c478bd9Sstevel@tonic-gate error = set_errno(ENOMEM); 351*7c478bd9Sstevel@tonic-gate else 352*7c478bd9Sstevel@tonic-gate error = set_errno(EINVAL); 353*7c478bd9Sstevel@tonic-gate return (error); 354*7c478bd9Sstevel@tonic-gate } 355*7c478bd9Sstevel@tonic-gate break; 356*7c478bd9Sstevel@tonic-gate 357*7c478bd9Sstevel@tonic-gate case MADV_DONTNEED: 358*7c478bd9Sstevel@tonic-gate /* 359*7c478bd9Sstevel@tonic-gate * For now, don't need is turned into an as_ctl(MC_SYNC) 360*7c478bd9Sstevel@tonic-gate * operation flagged for async invalidate. 361*7c478bd9Sstevel@tonic-gate */ 362*7c478bd9Sstevel@tonic-gate error = as_ctl(as, addr, len, MC_SYNC, attr, 363*7c478bd9Sstevel@tonic-gate MS_ASYNC | MS_INVALIDATE, NULL, 0); 364*7c478bd9Sstevel@tonic-gate if (error) 365*7c478bd9Sstevel@tonic-gate (void) set_errno(error); 366*7c478bd9Sstevel@tonic-gate return (error); 367*7c478bd9Sstevel@tonic-gate 368*7c478bd9Sstevel@tonic-gate default: 369*7c478bd9Sstevel@tonic-gate error = as_ctl(as, addr, len, cmd, attr, 370*7c478bd9Sstevel@tonic-gate (uintptr_t)arg, NULL, 0); 371*7c478bd9Sstevel@tonic-gate if (error) 372*7c478bd9Sstevel@tonic-gate (void) set_errno(error); 373*7c478bd9Sstevel@tonic-gate return (error); 374*7c478bd9Sstevel@tonic-gate } 375*7c478bd9Sstevel@tonic-gate break; 376*7c478bd9Sstevel@tonic-gate default: 377*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 378*7c478bd9Sstevel@tonic-gate } 379*7c478bd9Sstevel@tonic-gate 380*7c478bd9Sstevel@tonic-gate error = as_ctl(as, addr, len, cmd, attr, (uintptr_t)arg, NULL, 0); 381*7c478bd9Sstevel@tonic-gate 382*7c478bd9Sstevel@tonic-gate if (error) 383*7c478bd9Sstevel@tonic-gate (void) set_errno(error); 384*7c478bd9Sstevel@tonic-gate return (error); 385*7c478bd9Sstevel@tonic-gate } 386*7c478bd9Sstevel@tonic-gate 387*7c478bd9Sstevel@tonic-gate /* 388*7c478bd9Sstevel@tonic-gate * Return page size code for page size passed in. If 389*7c478bd9Sstevel@tonic-gate * matching page size not found return -1. 390*7c478bd9Sstevel@tonic-gate */ 391*7c478bd9Sstevel@tonic-gate static uint_t 392*7c478bd9Sstevel@tonic-gate mem_getpgszc(size_t pgsz) { 393*7c478bd9Sstevel@tonic-gate return ((uint_t)page_user_szc(pgsz)); 394*7c478bd9Sstevel@tonic-gate } 395