17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 507b65a64Saguzovsk * Common Development and Distribution License (the "License"). 607b65a64Saguzovsk * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21783f4f5eSRoger A. Faulkner 22*1b3b16f3STheo Schlossnagle /* Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved. */ 23*1b3b16f3STheo Schlossnagle 247c478bd9Sstevel@tonic-gate /* 25783f4f5eSRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 267c478bd9Sstevel@tonic-gate * Use is subject to license terms. 277c478bd9Sstevel@tonic-gate */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 307c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 317c478bd9Sstevel@tonic-gate 327c478bd9Sstevel@tonic-gate #include <sys/types.h> 337c478bd9Sstevel@tonic-gate #include <sys/inttypes.h> 347c478bd9Sstevel@tonic-gate #include <sys/param.h> 357c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/signal.h> 387c478bd9Sstevel@tonic-gate #include <sys/user.h> 397c478bd9Sstevel@tonic-gate #include <sys/errno.h> 407c478bd9Sstevel@tonic-gate #include <sys/var.h> 417c478bd9Sstevel@tonic-gate #include <sys/proc.h> 427c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 437c478bd9Sstevel@tonic-gate #include <sys/debug.h> 447c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 457c478bd9Sstevel@tonic-gate #include <sys/cred.h> 467c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 477c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 487c478bd9Sstevel@tonic-gate #include <sys/vm.h> 497c478bd9Sstevel@tonic-gate #include <sys/file.h> 507c478bd9Sstevel@tonic-gate #include <sys/mman.h> 517c478bd9Sstevel@tonic-gate #include <sys/vmparam.h> 527c478bd9Sstevel@tonic-gate #include <sys/fcntl.h> 537c478bd9Sstevel@tonic-gate #include <sys/lwpchan_impl.h> 54da6c28aaSamw #include <sys/nbmlock.h> 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate #include <vm/hat.h> 577c478bd9Sstevel@tonic-gate #include <vm/as.h> 587c478bd9Sstevel@tonic-gate #include <vm/seg.h> 597c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h> 607c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate int use_brk_lpg = 1; 637c478bd9Sstevel@tonic-gate int use_stk_lpg = 1; 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate static int brk_lpg(caddr_t nva); 667c478bd9Sstevel@tonic-gate static int grow_lpg(caddr_t sp); 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate int 697c478bd9Sstevel@tonic-gate brk(caddr_t nva) 707c478bd9Sstevel@tonic-gate { 717c478bd9Sstevel@tonic-gate int error; 727c478bd9Sstevel@tonic-gate proc_t *p = curproc; 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate /* 757c478bd9Sstevel@tonic-gate * Serialize brk operations on an address space. 767c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_brksize 777c478bd9Sstevel@tonic-gate * and p_brkpageszc. 787c478bd9Sstevel@tonic-gate */ 797c478bd9Sstevel@tonic-gate as_rangelock(p->p_as); 807c478bd9Sstevel@tonic-gate if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) { 817c478bd9Sstevel@tonic-gate error = brk_lpg(nva); 827c478bd9Sstevel@tonic-gate } else { 837c478bd9Sstevel@tonic-gate error = brk_internal(nva, p->p_brkpageszc); 847c478bd9Sstevel@tonic-gate } 857c478bd9Sstevel@tonic-gate as_rangeunlock(p->p_as); 867c478bd9Sstevel@tonic-gate return ((error != 0 ? set_errno(error) : 0)); 877c478bd9Sstevel@tonic-gate } 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate /* 907c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use, 917c478bd9Sstevel@tonic-gate * then call brk_internal(). 927c478bd9Sstevel@tonic-gate * Returns 0 on success. 937c478bd9Sstevel@tonic-gate */ 947c478bd9Sstevel@tonic-gate static int 957c478bd9Sstevel@tonic-gate brk_lpg(caddr_t nva) 967c478bd9Sstevel@tonic-gate { 977c478bd9Sstevel@tonic-gate struct proc *p = curproc; 987c478bd9Sstevel@tonic-gate size_t pgsz, len; 99ec25b48fSsusans caddr_t addr, brkend; 1007c478bd9Sstevel@tonic-gate caddr_t bssbase = p->p_bssbase; 1017c478bd9Sstevel@tonic-gate caddr_t brkbase = p->p_brkbase; 1027c478bd9Sstevel@tonic-gate int oszc, szc; 1037c478bd9Sstevel@tonic-gate int err; 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate oszc = p->p_brkpageszc; 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 1087c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call 1097c478bd9Sstevel@tonic-gate * to brk_internal() will initialize it. 1107c478bd9Sstevel@tonic-gate */ 1117c478bd9Sstevel@tonic-gate if (brkbase == 0) { 1127c478bd9Sstevel@tonic-gate return (brk_internal(nva, oszc)); 1137c478bd9Sstevel@tonic-gate } 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate len = nva - bssbase; 1167c478bd9Sstevel@tonic-gate 117ec25b48fSsusans pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, 0); 1187c478bd9Sstevel@tonic-gate szc = page_szc(pgsz); 1197c478bd9Sstevel@tonic-gate 1207c478bd9Sstevel@tonic-gate /* 1217c478bd9Sstevel@tonic-gate * Covers two cases: 1227c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to 1237c478bd9Sstevel@tonic-gate * ignore it in that case. 1247c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable. 1257c478bd9Sstevel@tonic-gate */ 1267c478bd9Sstevel@tonic-gate if (szc <= oszc) { 1277c478bd9Sstevel@tonic-gate err = brk_internal(nva, oszc); 1287c478bd9Sstevel@tonic-gate /* If failed, back off to base page size. */ 1297c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) { 1307c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0); 1317c478bd9Sstevel@tonic-gate } 1327c478bd9Sstevel@tonic-gate return (err); 1337c478bd9Sstevel@tonic-gate } 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate err = brk_internal(nva, szc); 1367c478bd9Sstevel@tonic-gate /* If using szc failed, map with base page size and return. */ 1377c478bd9Sstevel@tonic-gate if (err != 0) { 1387c478bd9Sstevel@tonic-gate if (szc != 0) { 1397c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0); 1407c478bd9Sstevel@tonic-gate } 1417c478bd9Sstevel@tonic-gate return (err); 1427c478bd9Sstevel@tonic-gate } 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate /* 1457c478bd9Sstevel@tonic-gate * Round up brk base to a large page boundary and remap 1467c478bd9Sstevel@tonic-gate * anything in the segment already faulted in beyond that 1477c478bd9Sstevel@tonic-gate * point. 1487c478bd9Sstevel@tonic-gate */ 1497c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz); 150ec25b48fSsusans brkend = brkbase + p->p_brksize; 151ec25b48fSsusans len = brkend - addr; 152ec25b48fSsusans /* Check that len is not negative. Update page size code for heap. */ 153ec25b48fSsusans if (addr >= p->p_bssbase && brkend > addr && IS_P2ALIGNED(len, pgsz)) { 1547c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE); 155ec25b48fSsusans p->p_brkpageszc = szc; 1567c478bd9Sstevel@tonic-gate } 1577c478bd9Sstevel@tonic-gate 1587c478bd9Sstevel@tonic-gate ASSERT(err == 0); 1597c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */ 1607c478bd9Sstevel@tonic-gate } 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate /* 1637c478bd9Sstevel@tonic-gate * Returns 0 on success. 1647c478bd9Sstevel@tonic-gate */ 1657c478bd9Sstevel@tonic-gate int 1667c478bd9Sstevel@tonic-gate brk_internal(caddr_t nva, uint_t brkszc) 1677c478bd9Sstevel@tonic-gate { 1687c478bd9Sstevel@tonic-gate caddr_t ova; /* current break address */ 1697c478bd9Sstevel@tonic-gate size_t size; 1707c478bd9Sstevel@tonic-gate int error; 1717c478bd9Sstevel@tonic-gate struct proc *p = curproc; 1727c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 1737c478bd9Sstevel@tonic-gate size_t pgsz; 1747c478bd9Sstevel@tonic-gate uint_t szc; 1757c478bd9Sstevel@tonic-gate rctl_qty_t as_rctl; 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate /* 1787c478bd9Sstevel@tonic-gate * extend heap to brkszc alignment but use current p->p_brkpageszc 1797c478bd9Sstevel@tonic-gate * for the newly created segment. This allows the new extension 1807c478bd9Sstevel@tonic-gate * segment to be concatenated successfully with the existing brk 1817c478bd9Sstevel@tonic-gate * segment. 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate if ((szc = brkszc) != 0) { 1847c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 1857c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE); 1867c478bd9Sstevel@tonic-gate } else { 1877c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 1887c478bd9Sstevel@tonic-gate } 1897c478bd9Sstevel@tonic-gate 1907c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1917c478bd9Sstevel@tonic-gate as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA], 1927c478bd9Sstevel@tonic-gate p->p_rctls, p); 1937c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate /* 1967c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call 1977c478bd9Sstevel@tonic-gate * to brk() will initialize it. 1987c478bd9Sstevel@tonic-gate */ 1997c478bd9Sstevel@tonic-gate if (p->p_brkbase == 0) 2007c478bd9Sstevel@tonic-gate p->p_brkbase = nva; 2017c478bd9Sstevel@tonic-gate 2027c478bd9Sstevel@tonic-gate /* 2037c478bd9Sstevel@tonic-gate * Before multiple page size support existed p_brksize was the value 2047c478bd9Sstevel@tonic-gate * not rounded to the pagesize (i.e. it stored the exact user request 2057c478bd9Sstevel@tonic-gate * for heap size). If pgsz is greater than PAGESIZE calculate the 2067c478bd9Sstevel@tonic-gate * heap size as the real new heap size by rounding it up to pgsz. 2077c478bd9Sstevel@tonic-gate * This is useful since we may want to know where the heap ends 2087c478bd9Sstevel@tonic-gate * without knowing heap pagesize (e.g. some old code) and also if 2097c478bd9Sstevel@tonic-gate * heap pagesize changes we can update p_brkpageszc but delay adding 2107c478bd9Sstevel@tonic-gate * new mapping yet still know from p_brksize where the heap really 2117c478bd9Sstevel@tonic-gate * ends. The user requested heap end is stored in libc variable. 2127c478bd9Sstevel@tonic-gate */ 2137c478bd9Sstevel@tonic-gate if (pgsz > PAGESIZE) { 2147c478bd9Sstevel@tonic-gate caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz); 2157c478bd9Sstevel@tonic-gate size = tnva - p->p_brkbase; 2167c478bd9Sstevel@tonic-gate if (tnva < p->p_brkbase || (size > p->p_brksize && 2177c478bd9Sstevel@tonic-gate size > (size_t)as_rctl)) { 2187c478bd9Sstevel@tonic-gate szc = 0; 2197c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 2207c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase; 2217c478bd9Sstevel@tonic-gate } 2227c478bd9Sstevel@tonic-gate } else { 2237c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase; 2247c478bd9Sstevel@tonic-gate } 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate /* 2277c478bd9Sstevel@tonic-gate * use PAGESIZE to roundup ova because we want to know the real value 2287c478bd9Sstevel@tonic-gate * of the current heap end in case p_brkpageszc changes since the last 2297c478bd9Sstevel@tonic-gate * p_brksize was computed. 2307c478bd9Sstevel@tonic-gate */ 2317c478bd9Sstevel@tonic-gate nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz); 2327c478bd9Sstevel@tonic-gate ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize), 2337c478bd9Sstevel@tonic-gate PAGESIZE); 2347c478bd9Sstevel@tonic-gate 2357c478bd9Sstevel@tonic-gate if ((nva < p->p_brkbase) || (size > p->p_brksize && 2367c478bd9Sstevel@tonic-gate size > as_rctl)) { 2377c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2387c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p, 2397c478bd9Sstevel@tonic-gate RCA_SAFE); 2407c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 2417c478bd9Sstevel@tonic-gate return (ENOMEM); 2427c478bd9Sstevel@tonic-gate } 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate if (nva > ova) { 2457c478bd9Sstevel@tonic-gate struct segvn_crargs crargs = 2467c478bd9Sstevel@tonic-gate SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate if (!(p->p_datprot & PROT_EXEC)) { 2497c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC; 2507c478bd9Sstevel@tonic-gate } 2517c478bd9Sstevel@tonic-gate 2527c478bd9Sstevel@tonic-gate /* 2537c478bd9Sstevel@tonic-gate * Add new zfod mapping to extend UNIX data segment 254ec25b48fSsusans * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies 255ec25b48fSsusans * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate 256ec25b48fSsusans * page sizes if ova is not aligned to szc's pgsz. 2577c478bd9Sstevel@tonic-gate */ 258ec25b48fSsusans if (szc > 0) { 259ec25b48fSsusans caddr_t rbss; 260ec25b48fSsusans 261ec25b48fSsusans rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, 262ec25b48fSsusans pgsz); 263ec25b48fSsusans if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) { 264ec25b48fSsusans crargs.szc = p->p_brkpageszc ? p->p_brkpageszc : 265ec25b48fSsusans AS_MAP_NO_LPOOB; 266ec25b48fSsusans } else if (ova == rbss) { 2677c478bd9Sstevel@tonic-gate crargs.szc = szc; 268ec25b48fSsusans } else { 269ec25b48fSsusans crargs.szc = AS_MAP_HEAP; 270ec25b48fSsusans } 271ec25b48fSsusans } else { 272ec25b48fSsusans crargs.szc = AS_MAP_NO_LPOOB; 273ec25b48fSsusans } 2747c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP; 2757c478bd9Sstevel@tonic-gate error = as_map(as, ova, (size_t)(nva - ova), segvn_create, 2767c478bd9Sstevel@tonic-gate &crargs); 2777c478bd9Sstevel@tonic-gate if (error) { 2787c478bd9Sstevel@tonic-gate return (error); 2797c478bd9Sstevel@tonic-gate } 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate } else if (nva < ova) { 2827c478bd9Sstevel@tonic-gate /* 2837c478bd9Sstevel@tonic-gate * Release mapping to shrink UNIX data segment. 2847c478bd9Sstevel@tonic-gate */ 2857c478bd9Sstevel@tonic-gate (void) as_unmap(as, nva, (size_t)(ova - nva)); 2867c478bd9Sstevel@tonic-gate } 2877c478bd9Sstevel@tonic-gate p->p_brksize = size; 2887c478bd9Sstevel@tonic-gate return (0); 2897c478bd9Sstevel@tonic-gate } 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate /* 2927c478bd9Sstevel@tonic-gate * Grow the stack to include sp. Return 1 if successful, 0 otherwise. 2937c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward. 2947c478bd9Sstevel@tonic-gate */ 2957c478bd9Sstevel@tonic-gate int 2967c478bd9Sstevel@tonic-gate grow(caddr_t sp) 2977c478bd9Sstevel@tonic-gate { 2987c478bd9Sstevel@tonic-gate struct proc *p = curproc; 299ec25b48fSsusans struct as *as = p->p_as; 300ec25b48fSsusans size_t oldsize = p->p_stksize; 301ec25b48fSsusans size_t newsize; 3027c478bd9Sstevel@tonic-gate int err; 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate /* 3057c478bd9Sstevel@tonic-gate * Serialize grow operations on an address space. 3067c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_stksize 3077c478bd9Sstevel@tonic-gate * and p_stkpageszc. 3087c478bd9Sstevel@tonic-gate */ 309ec25b48fSsusans as_rangelock(as); 3107c478bd9Sstevel@tonic-gate if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) { 3117c478bd9Sstevel@tonic-gate err = grow_lpg(sp); 3127c478bd9Sstevel@tonic-gate } else { 3137c478bd9Sstevel@tonic-gate err = grow_internal(sp, p->p_stkpageszc); 3147c478bd9Sstevel@tonic-gate } 315ec25b48fSsusans as_rangeunlock(as); 316ec25b48fSsusans 317ec25b48fSsusans if (err == 0 && (newsize = p->p_stksize) > oldsize) { 318ec25b48fSsusans ASSERT(IS_P2ALIGNED(oldsize, PAGESIZE)); 319ec25b48fSsusans ASSERT(IS_P2ALIGNED(newsize, PAGESIZE)); 320ec25b48fSsusans /* 321ec25b48fSsusans * Set up translations so the process doesn't have to fault in 322ec25b48fSsusans * the stack pages we just gave it. 323ec25b48fSsusans */ 324ec25b48fSsusans (void) as_fault(as->a_hat, as, p->p_usrstack - newsize, 325ec25b48fSsusans newsize - oldsize, F_INVAL, S_WRITE); 326ec25b48fSsusans } 3277c478bd9Sstevel@tonic-gate return ((err == 0 ? 1 : 0)); 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate /* 3317c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use, 3327c478bd9Sstevel@tonic-gate * then call grow_internal(). 3337c478bd9Sstevel@tonic-gate * Returns 0 on success. 3347c478bd9Sstevel@tonic-gate */ 3357c478bd9Sstevel@tonic-gate static int 3367c478bd9Sstevel@tonic-gate grow_lpg(caddr_t sp) 3377c478bd9Sstevel@tonic-gate { 3387c478bd9Sstevel@tonic-gate struct proc *p = curproc; 3397c478bd9Sstevel@tonic-gate size_t pgsz; 3407c478bd9Sstevel@tonic-gate size_t len, newsize; 341ec25b48fSsusans caddr_t addr, saddr; 342ec25b48fSsusans caddr_t growend; 3437c478bd9Sstevel@tonic-gate int oszc, szc; 3447c478bd9Sstevel@tonic-gate int err; 3457c478bd9Sstevel@tonic-gate 3467c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp; 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate oszc = p->p_stkpageszc; 349ec25b48fSsusans pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, 0); 3507c478bd9Sstevel@tonic-gate szc = page_szc(pgsz); 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate /* 3537c478bd9Sstevel@tonic-gate * Covers two cases: 3547c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to 3557c478bd9Sstevel@tonic-gate * ignore it in that case. 3567c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable. 3577c478bd9Sstevel@tonic-gate * This shouldn't happen as the stack never shrinks. 3587c478bd9Sstevel@tonic-gate */ 3597c478bd9Sstevel@tonic-gate if (szc <= oszc) { 3607c478bd9Sstevel@tonic-gate err = grow_internal(sp, oszc); 3617c478bd9Sstevel@tonic-gate /* failed, fall back to base page size */ 3627c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) { 3637c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0); 3647c478bd9Sstevel@tonic-gate } 3657c478bd9Sstevel@tonic-gate return (err); 3667c478bd9Sstevel@tonic-gate } 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate /* 3697c478bd9Sstevel@tonic-gate * We've grown sufficiently to switch to a new page size. 370ec25b48fSsusans * So we are going to remap the whole segment with the new page size. 3717c478bd9Sstevel@tonic-gate */ 3727c478bd9Sstevel@tonic-gate err = grow_internal(sp, szc); 3737c478bd9Sstevel@tonic-gate /* The grow with szc failed, so fall back to base page size. */ 3747c478bd9Sstevel@tonic-gate if (err != 0) { 3757c478bd9Sstevel@tonic-gate if (szc != 0) { 3767c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0); 3777c478bd9Sstevel@tonic-gate } 3787c478bd9Sstevel@tonic-gate return (err); 3797c478bd9Sstevel@tonic-gate } 3807c478bd9Sstevel@tonic-gate 3817c478bd9Sstevel@tonic-gate /* 3827c478bd9Sstevel@tonic-gate * Round up stack pointer to a large page boundary and remap 3837c478bd9Sstevel@tonic-gate * any pgsz pages in the segment already faulted in beyond that 3847c478bd9Sstevel@tonic-gate * point. 3857c478bd9Sstevel@tonic-gate */ 386ec25b48fSsusans saddr = p->p_usrstack - p->p_stksize; 387ec25b48fSsusans addr = (caddr_t)P2ROUNDUP((uintptr_t)saddr, pgsz); 388ec25b48fSsusans growend = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz); 389ec25b48fSsusans len = growend - addr; 390ec25b48fSsusans /* Check that len is not negative. Update page size code for stack. */ 391ec25b48fSsusans if (addr >= saddr && growend > addr && IS_P2ALIGNED(len, pgsz)) { 3927c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE); 3937c478bd9Sstevel@tonic-gate p->p_stkpageszc = szc; 394ec25b48fSsusans } 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate ASSERT(err == 0); 3977c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */ 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate 4007c478bd9Sstevel@tonic-gate /* 4017c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward. 4027c478bd9Sstevel@tonic-gate * Returns 0 on success, errno on failure. 4037c478bd9Sstevel@tonic-gate */ 4047c478bd9Sstevel@tonic-gate int 4057c478bd9Sstevel@tonic-gate grow_internal(caddr_t sp, uint_t growszc) 4067c478bd9Sstevel@tonic-gate { 4077c478bd9Sstevel@tonic-gate struct proc *p = curproc; 408ec25b48fSsusans size_t newsize; 4097c478bd9Sstevel@tonic-gate size_t oldsize; 4107c478bd9Sstevel@tonic-gate int error; 4117c478bd9Sstevel@tonic-gate size_t pgsz; 4127c478bd9Sstevel@tonic-gate uint_t szc; 4137c478bd9Sstevel@tonic-gate struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 4147c478bd9Sstevel@tonic-gate 4157c478bd9Sstevel@tonic-gate ASSERT(sp < p->p_usrstack); 416ec25b48fSsusans sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE); 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate /* 4197c478bd9Sstevel@tonic-gate * grow to growszc alignment but use current p->p_stkpageszc for 4207c478bd9Sstevel@tonic-gate * the segvn_crargs szc passed to segvn_create. For memcntl to 4217c478bd9Sstevel@tonic-gate * increase the szc, this allows the new extension segment to be 4227c478bd9Sstevel@tonic-gate * concatenated successfully with the existing stack segment. 4237c478bd9Sstevel@tonic-gate */ 4247c478bd9Sstevel@tonic-gate if ((szc = growszc) != 0) { 4257c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc); 4267c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE); 427ec25b48fSsusans newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz); 4287c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) { 4297c478bd9Sstevel@tonic-gate szc = 0; 4307c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 4317c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp; 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate } else { 4347c478bd9Sstevel@tonic-gate pgsz = PAGESIZE; 435ec25b48fSsusans newsize = p->p_usrstack - sp; 4367c478bd9Sstevel@tonic-gate } 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) { 4397c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p, 4407c478bd9Sstevel@tonic-gate RCA_UNSAFE_ALL); 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate return (ENOMEM); 4437c478bd9Sstevel@tonic-gate } 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate oldsize = p->p_stksize; 4467c478bd9Sstevel@tonic-gate ASSERT(P2PHASE(oldsize, PAGESIZE) == 0); 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate if (newsize <= oldsize) { /* prevent the stack from shrinking */ 4497c478bd9Sstevel@tonic-gate return (0); 4507c478bd9Sstevel@tonic-gate } 4517c478bd9Sstevel@tonic-gate 4527c478bd9Sstevel@tonic-gate if (!(p->p_stkprot & PROT_EXEC)) { 4537c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC; 4547c478bd9Sstevel@tonic-gate } 4557c478bd9Sstevel@tonic-gate /* 456ec25b48fSsusans * extend stack with the proposed new growszc, which is different 457ec25b48fSsusans * than p_stkpageszc only on a memcntl to increase the stack pagesize. 458ec25b48fSsusans * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via 459ec25b48fSsusans * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes 460ec25b48fSsusans * if not aligned to szc's pgsz. 4617c478bd9Sstevel@tonic-gate */ 462ec25b48fSsusans if (szc > 0) { 463ec25b48fSsusans caddr_t oldsp = p->p_usrstack - oldsize; 464ec25b48fSsusans caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, 465ec25b48fSsusans pgsz); 466ec25b48fSsusans 467ec25b48fSsusans if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) { 468ec25b48fSsusans crargs.szc = p->p_stkpageszc ? p->p_stkpageszc : 469ec25b48fSsusans AS_MAP_NO_LPOOB; 470ec25b48fSsusans } else if (oldsp == austk) { 471ec25b48fSsusans crargs.szc = szc; 472ec25b48fSsusans } else { 473ec25b48fSsusans crargs.szc = AS_MAP_STACK; 474ec25b48fSsusans } 475ec25b48fSsusans } else { 476ec25b48fSsusans crargs.szc = AS_MAP_NO_LPOOB; 477ec25b48fSsusans } 4787c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN; 4797c478bd9Sstevel@tonic-gate 480ec25b48fSsusans if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize, 4817c478bd9Sstevel@tonic-gate segvn_create, &crargs)) != 0) { 4827c478bd9Sstevel@tonic-gate if (error == EAGAIN) { 4837c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Sorry, no swap space to grow stack " 484ae115bc7Smrj "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm); 4857c478bd9Sstevel@tonic-gate } 4867c478bd9Sstevel@tonic-gate return (error); 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate p->p_stksize = newsize; 4897c478bd9Sstevel@tonic-gate return (0); 4907c478bd9Sstevel@tonic-gate } 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 49360946fe0Smec * Find address for user to map. 49460946fe0Smec * If MAP_FIXED is not specified, we can pick any address we want, but we will 49560946fe0Smec * first try the value in *addrp if it is non-NULL. Thus this is implementing 49660946fe0Smec * a way to try and get a preferred address. 49760946fe0Smec */ 49860946fe0Smec int 49960946fe0Smec choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off, 50060946fe0Smec int vacalign, uint_t flags) 50160946fe0Smec { 50260946fe0Smec caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK); 50360946fe0Smec size_t lenp = len; 50460946fe0Smec 50560946fe0Smec ASSERT(AS_ISCLAIMGAP(as)); /* searches should be serialized */ 50660946fe0Smec if (flags & MAP_FIXED) { 50760946fe0Smec (void) as_unmap(as, *addrp, len); 50860946fe0Smec return (0); 50960946fe0Smec } else if (basep != NULL && ((flags & MAP_ALIGN) == 0) && 51060946fe0Smec !as_gap(as, len, &basep, &lenp, 0, *addrp)) { 51160946fe0Smec /* User supplied address was available */ 51260946fe0Smec *addrp = basep; 51360946fe0Smec } else { 51460946fe0Smec /* 51560946fe0Smec * No user supplied address or the address supplied was not 51660946fe0Smec * available. 51760946fe0Smec */ 51860946fe0Smec map_addr(addrp, len, off, vacalign, flags); 51960946fe0Smec } 52060946fe0Smec if (*addrp == NULL) 52160946fe0Smec return (ENOMEM); 52260946fe0Smec return (0); 52360946fe0Smec } 52460946fe0Smec 52560946fe0Smec 52660946fe0Smec /* 5277c478bd9Sstevel@tonic-gate * Used for MAP_ANON - fast way to get anonymous pages 5287c478bd9Sstevel@tonic-gate */ 5297c478bd9Sstevel@tonic-gate static int 5307c478bd9Sstevel@tonic-gate zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags, 5317c478bd9Sstevel@tonic-gate offset_t pos) 5327c478bd9Sstevel@tonic-gate { 533ec25b48fSsusans struct segvn_crargs vn_a; 53460946fe0Smec int error; 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate if (((PROT_ALL & uprot) != uprot)) 5377c478bd9Sstevel@tonic-gate return (EACCES); 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) { 5407c478bd9Sstevel@tonic-gate caddr_t userlimit; 5417c478bd9Sstevel@tonic-gate 5427c478bd9Sstevel@tonic-gate /* 5437c478bd9Sstevel@tonic-gate * Use the user address. First verify that 5447c478bd9Sstevel@tonic-gate * the address to be used is page aligned. 5457c478bd9Sstevel@tonic-gate * Then make some simple bounds checks. 5467c478bd9Sstevel@tonic-gate */ 5477c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0) 5487c478bd9Sstevel@tonic-gate return (EINVAL); 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ? 5517c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit; 5527c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) { 5537c478bd9Sstevel@tonic-gate case RANGE_OKAY: 5547c478bd9Sstevel@tonic-gate break; 5557c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 5567c478bd9Sstevel@tonic-gate return (ENOTSUP); 5577c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 5587c478bd9Sstevel@tonic-gate default: 5597c478bd9Sstevel@tonic-gate return (ENOMEM); 5607c478bd9Sstevel@tonic-gate } 56160946fe0Smec } 5627c478bd9Sstevel@tonic-gate /* 5637c478bd9Sstevel@tonic-gate * No need to worry about vac alignment for anonymous 5647c478bd9Sstevel@tonic-gate * pages since this is a "clone" object that doesn't 5657c478bd9Sstevel@tonic-gate * yet exist. 5667c478bd9Sstevel@tonic-gate */ 56760946fe0Smec error = choose_addr(as, addrp, len, pos, ADDR_NOVACALIGN, flags); 56860946fe0Smec if (error != 0) { 56960946fe0Smec return (error); 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate /* 5737c478bd9Sstevel@tonic-gate * Use the seg_vn segment driver; passing in the NULL amp 5747c478bd9Sstevel@tonic-gate * gives the desired "cloning" effect. 5757c478bd9Sstevel@tonic-gate */ 576ec25b48fSsusans vn_a.vp = NULL; 577ec25b48fSsusans vn_a.offset = 0; 578ec25b48fSsusans vn_a.type = flags & MAP_TYPE; 579ec25b48fSsusans vn_a.prot = uprot; 580ec25b48fSsusans vn_a.maxprot = PROT_ALL; 581ec25b48fSsusans vn_a.flags = flags & ~MAP_TYPE; 582ec25b48fSsusans vn_a.cred = CRED(); 583ec25b48fSsusans vn_a.amp = NULL; 584ec25b48fSsusans vn_a.szc = 0; 585ec25b48fSsusans vn_a.lgrp_mem_policy_flags = 0; 5867c478bd9Sstevel@tonic-gate 587ec25b48fSsusans return (as_map(as, *addrp, len, segvn_create, &vn_a)); 5887c478bd9Sstevel@tonic-gate } 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate static int 5917c478bd9Sstevel@tonic-gate smmap_common(caddr_t *addrp, size_t len, 5927c478bd9Sstevel@tonic-gate int prot, int flags, struct file *fp, offset_t pos) 5937c478bd9Sstevel@tonic-gate { 5947c478bd9Sstevel@tonic-gate struct vnode *vp; 5957c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 5967c478bd9Sstevel@tonic-gate uint_t uprot, maxprot, type; 5977c478bd9Sstevel@tonic-gate int error; 598da6c28aaSamw int in_crit = 0; 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW | 6017c478bd9Sstevel@tonic-gate _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN | 6027c478bd9Sstevel@tonic-gate MAP_TEXT | MAP_INITDATA)) != 0) { 6037c478bd9Sstevel@tonic-gate /* | MAP_RENAME */ /* not implemented, let user know */ 6047c478bd9Sstevel@tonic-gate return (EINVAL); 6057c478bd9Sstevel@tonic-gate } 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) { 6087c478bd9Sstevel@tonic-gate return (EINVAL); 6097c478bd9Sstevel@tonic-gate } 6107c478bd9Sstevel@tonic-gate 6117c478bd9Sstevel@tonic-gate if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) { 6127c478bd9Sstevel@tonic-gate return (EINVAL); 6137c478bd9Sstevel@tonic-gate } 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate #if defined(__sparc) 6167c478bd9Sstevel@tonic-gate /* 6177c478bd9Sstevel@tonic-gate * See if this is an "old mmap call". If so, remember this 6187c478bd9Sstevel@tonic-gate * fact and convert the flags value given to mmap to indicate 6197c478bd9Sstevel@tonic-gate * the specified address in the system call must be used. 6207c478bd9Sstevel@tonic-gate * _MAP_NEW is turned set by all new uses of mmap. 6217c478bd9Sstevel@tonic-gate */ 6227c478bd9Sstevel@tonic-gate if ((flags & _MAP_NEW) == 0) 6237c478bd9Sstevel@tonic-gate flags |= MAP_FIXED; 6247c478bd9Sstevel@tonic-gate #endif 6257c478bd9Sstevel@tonic-gate flags &= ~_MAP_NEW; 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate type = flags & MAP_TYPE; 6287c478bd9Sstevel@tonic-gate if (type != MAP_PRIVATE && type != MAP_SHARED) 6297c478bd9Sstevel@tonic-gate return (EINVAL); 6307c478bd9Sstevel@tonic-gate 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate if (flags & MAP_ALIGN) { 6337c478bd9Sstevel@tonic-gate 6347c478bd9Sstevel@tonic-gate if (flags & MAP_FIXED) 6357c478bd9Sstevel@tonic-gate return (EINVAL); 6367c478bd9Sstevel@tonic-gate 6377c478bd9Sstevel@tonic-gate /* alignment needs to be a power of 2 >= page size */ 6387c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) || 6397c478bd9Sstevel@tonic-gate !ISP2((uintptr_t)*addrp)) 6407c478bd9Sstevel@tonic-gate return (EINVAL); 6417c478bd9Sstevel@tonic-gate } 6427c478bd9Sstevel@tonic-gate /* 6437c478bd9Sstevel@tonic-gate * Check for bad lengths and file position. 6447c478bd9Sstevel@tonic-gate * We let the VOP_MAP routine check for negative lengths 6457c478bd9Sstevel@tonic-gate * since on some vnode types this might be appropriate. 6467c478bd9Sstevel@tonic-gate */ 6477c478bd9Sstevel@tonic-gate if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0) 6487c478bd9Sstevel@tonic-gate return (EINVAL); 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate maxprot = PROT_ALL; /* start out allowing all accesses */ 6517c478bd9Sstevel@tonic-gate uprot = prot | PROT_USER; 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate if (fp == NULL) { 6547c478bd9Sstevel@tonic-gate ASSERT(flags & MAP_ANON); 655783f4f5eSRoger A. Faulkner /* discard lwpchan mappings, like munmap() */ 656783f4f5eSRoger A. Faulkner if ((flags & MAP_FIXED) && curproc->p_lcp != NULL) 657783f4f5eSRoger A. Faulkner lwpchan_delete_mapping(curproc, *addrp, *addrp + len); 6587c478bd9Sstevel@tonic-gate as_rangelock(as); 6597c478bd9Sstevel@tonic-gate error = zmap(as, addrp, len, uprot, flags, pos); 6607c478bd9Sstevel@tonic-gate as_rangeunlock(as); 6612c5124a1SPrashanth Sreenivasa /* 6622c5124a1SPrashanth Sreenivasa * Tell machine specific code that lwp has mapped shared memory 6632c5124a1SPrashanth Sreenivasa */ 6642c5124a1SPrashanth Sreenivasa if (error == 0 && (flags & MAP_SHARED)) { 6652c5124a1SPrashanth Sreenivasa /* EMPTY */ 6662c5124a1SPrashanth Sreenivasa LWP_MMODEL_SHARED_AS(*addrp, len); 6672c5124a1SPrashanth Sreenivasa } 6687c478bd9Sstevel@tonic-gate return (error); 6697c478bd9Sstevel@tonic-gate } else if ((flags & MAP_ANON) != 0) 6707c478bd9Sstevel@tonic-gate return (EINVAL); 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate vp = fp->f_vnode; 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate /* Can't execute code from "noexec" mounted filesystem. */ 6757c478bd9Sstevel@tonic-gate if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) 6767c478bd9Sstevel@tonic-gate maxprot &= ~PROT_EXEC; 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate /* 6797c478bd9Sstevel@tonic-gate * These checks were added as part of large files. 6807c478bd9Sstevel@tonic-gate * 68149a63d68Speterte * Return ENXIO if the initial position is negative; return EOVERFLOW 6827c478bd9Sstevel@tonic-gate * if (offset + len) would overflow the maximum allowed offset for the 6837c478bd9Sstevel@tonic-gate * type of file descriptor being used. 6847c478bd9Sstevel@tonic-gate */ 6857c478bd9Sstevel@tonic-gate if (vp->v_type == VREG) { 68649a63d68Speterte if (pos < 0) 68749a63d68Speterte return (ENXIO); 6887c478bd9Sstevel@tonic-gate if ((offset_t)len > (OFFSET_MAX(fp) - pos)) 6897c478bd9Sstevel@tonic-gate return (EOVERFLOW); 6907c478bd9Sstevel@tonic-gate } 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) { 6937c478bd9Sstevel@tonic-gate /* no write access allowed */ 6947c478bd9Sstevel@tonic-gate maxprot &= ~PROT_WRITE; 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate /* 6987c478bd9Sstevel@tonic-gate * XXX - Do we also adjust maxprot based on protections 6997c478bd9Sstevel@tonic-gate * of the vnode? E.g. if no execute permission is given 7007c478bd9Sstevel@tonic-gate * on the vnode for the current user, maxprot probably 7017c478bd9Sstevel@tonic-gate * should disallow PROT_EXEC also? This is different 7027c478bd9Sstevel@tonic-gate * from the write access as this would be a per vnode 7037c478bd9Sstevel@tonic-gate * test as opposed to a per fd test for writability. 7047c478bd9Sstevel@tonic-gate */ 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate /* 7077c478bd9Sstevel@tonic-gate * Verify that the specified protections are not greater than 7087c478bd9Sstevel@tonic-gate * the maximum allowable protections. Also test to make sure 7097c478bd9Sstevel@tonic-gate * that the file descriptor does allows for read access since 7107c478bd9Sstevel@tonic-gate * "write only" mappings are hard to do since normally we do 7117c478bd9Sstevel@tonic-gate * the read from the file before the page can be written. 7127c478bd9Sstevel@tonic-gate */ 7137c478bd9Sstevel@tonic-gate if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0) 7147c478bd9Sstevel@tonic-gate return (EACCES); 7157c478bd9Sstevel@tonic-gate 7167c478bd9Sstevel@tonic-gate /* 7177c478bd9Sstevel@tonic-gate * If the user specified an address, do some simple checks here 7187c478bd9Sstevel@tonic-gate */ 7197c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) { 7207c478bd9Sstevel@tonic-gate caddr_t userlimit; 7217c478bd9Sstevel@tonic-gate 7227c478bd9Sstevel@tonic-gate /* 7237c478bd9Sstevel@tonic-gate * Use the user address. First verify that 7247c478bd9Sstevel@tonic-gate * the address to be used is page aligned. 7257c478bd9Sstevel@tonic-gate * Then make some simple bounds checks. 7267c478bd9Sstevel@tonic-gate */ 7277c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0) 7287c478bd9Sstevel@tonic-gate return (EINVAL); 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ? 7317c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit; 7327c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) { 7337c478bd9Sstevel@tonic-gate case RANGE_OKAY: 7347c478bd9Sstevel@tonic-gate break; 7357c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 7367c478bd9Sstevel@tonic-gate return (ENOTSUP); 7377c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 7387c478bd9Sstevel@tonic-gate default: 7397c478bd9Sstevel@tonic-gate return (ENOMEM); 7407c478bd9Sstevel@tonic-gate } 7417c478bd9Sstevel@tonic-gate } 7427c478bd9Sstevel@tonic-gate 743da6c28aaSamw if ((prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) && 744da6c28aaSamw nbl_need_check(vp)) { 745da6c28aaSamw int svmand; 746da6c28aaSamw nbl_op_t nop; 747da6c28aaSamw 748da6c28aaSamw nbl_start_crit(vp, RW_READER); 749da6c28aaSamw in_crit = 1; 750da6c28aaSamw error = nbl_svmand(vp, fp->f_cred, &svmand); 751da6c28aaSamw if (error != 0) 752da6c28aaSamw goto done; 753da6c28aaSamw if ((prot & PROT_WRITE) && (type == MAP_SHARED)) { 754da6c28aaSamw if (prot & (PROT_READ | PROT_EXEC)) { 755da6c28aaSamw nop = NBL_READWRITE; 756da6c28aaSamw } else { 757da6c28aaSamw nop = NBL_WRITE; 758da6c28aaSamw } 759da6c28aaSamw } else { 760da6c28aaSamw nop = NBL_READ; 761da6c28aaSamw } 762da6c28aaSamw if (nbl_conflict(vp, nop, 0, LONG_MAX, svmand, NULL)) { 763da6c28aaSamw error = EACCES; 764da6c28aaSamw goto done; 765da6c28aaSamw } 766da6c28aaSamw } 7677c478bd9Sstevel@tonic-gate 768783f4f5eSRoger A. Faulkner /* discard lwpchan mappings, like munmap() */ 769783f4f5eSRoger A. Faulkner if ((flags & MAP_FIXED) && curproc->p_lcp != NULL) 770783f4f5eSRoger A. Faulkner lwpchan_delete_mapping(curproc, *addrp, *addrp + len); 771783f4f5eSRoger A. Faulkner 7727c478bd9Sstevel@tonic-gate /* 7737c478bd9Sstevel@tonic-gate * Ok, now let the vnode map routine do its thing to set things up. 7747c478bd9Sstevel@tonic-gate */ 7757c478bd9Sstevel@tonic-gate error = VOP_MAP(vp, pos, as, 776da6c28aaSamw addrp, len, uprot, maxprot, flags, fp->f_cred, NULL); 7777c478bd9Sstevel@tonic-gate 7787c478bd9Sstevel@tonic-gate if (error == 0) { 7792c5124a1SPrashanth Sreenivasa /* 7802c5124a1SPrashanth Sreenivasa * Tell machine specific code that lwp has mapped shared memory 7812c5124a1SPrashanth Sreenivasa */ 7822c5124a1SPrashanth Sreenivasa if (flags & MAP_SHARED) { 7832c5124a1SPrashanth Sreenivasa /* EMPTY */ 7842c5124a1SPrashanth Sreenivasa LWP_MMODEL_SHARED_AS(*addrp, len); 7852c5124a1SPrashanth Sreenivasa } 7867c478bd9Sstevel@tonic-gate if (vp->v_type == VREG && 7877c478bd9Sstevel@tonic-gate (flags & (MAP_TEXT | MAP_INITDATA)) != 0) { 7887c478bd9Sstevel@tonic-gate /* 7897c478bd9Sstevel@tonic-gate * Mark this as an executable vnode 7907c478bd9Sstevel@tonic-gate */ 7917c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7927c478bd9Sstevel@tonic-gate vp->v_flag |= VVMEXEC; 7937c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7947c478bd9Sstevel@tonic-gate } 7957c478bd9Sstevel@tonic-gate } 7967c478bd9Sstevel@tonic-gate 797da6c28aaSamw done: 798da6c28aaSamw if (in_crit) 799da6c28aaSamw nbl_end_crit(vp); 8007c478bd9Sstevel@tonic-gate return (error); 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate #ifdef _LP64 8047c478bd9Sstevel@tonic-gate /* 8057c478bd9Sstevel@tonic-gate * LP64 mmap(2) system call: 64-bit offset, 64-bit address. 8067c478bd9Sstevel@tonic-gate * 8077c478bd9Sstevel@tonic-gate * The "large file" mmap routine mmap64(2) is also mapped to this routine 8087c478bd9Sstevel@tonic-gate * by the 64-bit version of libc. 8097c478bd9Sstevel@tonic-gate * 8107c478bd9Sstevel@tonic-gate * Eventually, this should be the only version, and have smmap_common() 8117c478bd9Sstevel@tonic-gate * folded back into it again. Some day. 8127c478bd9Sstevel@tonic-gate */ 8137c478bd9Sstevel@tonic-gate caddr_t 8147c478bd9Sstevel@tonic-gate smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos) 8157c478bd9Sstevel@tonic-gate { 8167c478bd9Sstevel@tonic-gate struct file *fp; 8177c478bd9Sstevel@tonic-gate int error; 8187c478bd9Sstevel@tonic-gate 819*1b3b16f3STheo Schlossnagle if (fd == -1 && (flags & MAP_ANON) != 0) 8207c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags, 8217c478bd9Sstevel@tonic-gate NULL, (offset_t)pos); 8227c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 8237c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags, 8247c478bd9Sstevel@tonic-gate fp, (offset_t)pos); 8257c478bd9Sstevel@tonic-gate releasef(fd); 8267c478bd9Sstevel@tonic-gate } else 8277c478bd9Sstevel@tonic-gate error = EBADF; 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : addr); 8307c478bd9Sstevel@tonic-gate } 8317c478bd9Sstevel@tonic-gate #endif /* _LP64 */ 8327c478bd9Sstevel@tonic-gate 8337c478bd9Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32) 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * ILP32 mmap(2) system call: 32-bit offset, 32-bit address. 8377c478bd9Sstevel@tonic-gate */ 8387c478bd9Sstevel@tonic-gate caddr_t 8397c478bd9Sstevel@tonic-gate smmap32(caddr32_t addr, size32_t len, int prot, int flags, int fd, off32_t pos) 8407c478bd9Sstevel@tonic-gate { 8417c478bd9Sstevel@tonic-gate struct file *fp; 8427c478bd9Sstevel@tonic-gate int error; 8437c478bd9Sstevel@tonic-gate caddr_t a = (caddr_t)(uintptr_t)addr; 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32) 8467c478bd9Sstevel@tonic-gate error = EINVAL; 8477c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0) 8487c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot, 8497c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, (offset_t)pos); 8507c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 8517c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot, 8527c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, (offset_t)pos); 8537c478bd9Sstevel@tonic-gate releasef(fd); 8547c478bd9Sstevel@tonic-gate } else 8557c478bd9Sstevel@tonic-gate error = EBADF; 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate ASSERT(error != 0 || (uintptr_t)(a + len) < (uintptr_t)UINT32_MAX); 8587c478bd9Sstevel@tonic-gate 8597c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : a); 8607c478bd9Sstevel@tonic-gate } 8617c478bd9Sstevel@tonic-gate 8627c478bd9Sstevel@tonic-gate /* 8637c478bd9Sstevel@tonic-gate * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address. 8647c478bd9Sstevel@tonic-gate * 8657c478bd9Sstevel@tonic-gate * Now things really get ugly because we can't use the C-style 8667c478bd9Sstevel@tonic-gate * calling convention for more than 6 args, and 64-bit parameter 8677c478bd9Sstevel@tonic-gate * passing on 32-bit systems is less than clean. 8687c478bd9Sstevel@tonic-gate */ 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate struct mmaplf32a { 8717c478bd9Sstevel@tonic-gate caddr_t addr; 8727c478bd9Sstevel@tonic-gate size_t len; 8737c478bd9Sstevel@tonic-gate #ifdef _LP64 8747c478bd9Sstevel@tonic-gate /* 8757c478bd9Sstevel@tonic-gate * 32-bit contents, 64-bit cells 8767c478bd9Sstevel@tonic-gate */ 8777c478bd9Sstevel@tonic-gate uint64_t prot; 8787c478bd9Sstevel@tonic-gate uint64_t flags; 8797c478bd9Sstevel@tonic-gate uint64_t fd; 8807c478bd9Sstevel@tonic-gate uint64_t offhi; 8817c478bd9Sstevel@tonic-gate uint64_t offlo; 8827c478bd9Sstevel@tonic-gate #else 8837c478bd9Sstevel@tonic-gate /* 8847c478bd9Sstevel@tonic-gate * 32-bit contents, 32-bit cells 8857c478bd9Sstevel@tonic-gate */ 8867c478bd9Sstevel@tonic-gate uint32_t prot; 8877c478bd9Sstevel@tonic-gate uint32_t flags; 8887c478bd9Sstevel@tonic-gate uint32_t fd; 8897c478bd9Sstevel@tonic-gate uint32_t offhi; 8907c478bd9Sstevel@tonic-gate uint32_t offlo; 8917c478bd9Sstevel@tonic-gate #endif 8927c478bd9Sstevel@tonic-gate }; 8937c478bd9Sstevel@tonic-gate 8947c478bd9Sstevel@tonic-gate int 8957c478bd9Sstevel@tonic-gate smmaplf32(struct mmaplf32a *uap, rval_t *rvp) 8967c478bd9Sstevel@tonic-gate { 8977c478bd9Sstevel@tonic-gate struct file *fp; 8987c478bd9Sstevel@tonic-gate int error; 8997c478bd9Sstevel@tonic-gate caddr_t a = uap->addr; 9007c478bd9Sstevel@tonic-gate int flags = (int)uap->flags; 9017c478bd9Sstevel@tonic-gate int fd = (int)uap->fd; 9027c478bd9Sstevel@tonic-gate #ifdef _BIG_ENDIAN 9037c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offhi << 32) | (u_offset_t)uap->offlo; 9047c478bd9Sstevel@tonic-gate #else 9057c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offlo << 32) | (u_offset_t)uap->offhi; 9067c478bd9Sstevel@tonic-gate #endif 9077c478bd9Sstevel@tonic-gate 9087c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32) 9097c478bd9Sstevel@tonic-gate error = EINVAL; 9107c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0) 9117c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot, 9127c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, off); 9137c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) { 9147c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot, 9157c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, off); 9167c478bd9Sstevel@tonic-gate releasef(fd); 9177c478bd9Sstevel@tonic-gate } else 9187c478bd9Sstevel@tonic-gate error = EBADF; 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate if (error == 0) 9217c478bd9Sstevel@tonic-gate rvp->r_val1 = (uintptr_t)a; 9227c478bd9Sstevel@tonic-gate return (error); 9237c478bd9Sstevel@tonic-gate } 9247c478bd9Sstevel@tonic-gate 9257c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL || _ILP32 */ 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate int 9287c478bd9Sstevel@tonic-gate munmap(caddr_t addr, size_t len) 9297c478bd9Sstevel@tonic-gate { 9307c478bd9Sstevel@tonic-gate struct proc *p = curproc; 9317c478bd9Sstevel@tonic-gate struct as *as = p->p_as; 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0) 9347c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 9357c478bd9Sstevel@tonic-gate 9367c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY) 9377c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 9387c478bd9Sstevel@tonic-gate 9397c478bd9Sstevel@tonic-gate /* 9407c478bd9Sstevel@tonic-gate * Discard lwpchan mappings. 9417c478bd9Sstevel@tonic-gate */ 9427c478bd9Sstevel@tonic-gate if (p->p_lcp != NULL) 9437c478bd9Sstevel@tonic-gate lwpchan_delete_mapping(p, addr, addr + len); 9447c478bd9Sstevel@tonic-gate if (as_unmap(as, addr, len) != 0) 9457c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 9467c478bd9Sstevel@tonic-gate 9477c478bd9Sstevel@tonic-gate return (0); 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate int 9517c478bd9Sstevel@tonic-gate mprotect(caddr_t addr, size_t len, int prot) 9527c478bd9Sstevel@tonic-gate { 9537c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 9547c478bd9Sstevel@tonic-gate uint_t uprot = prot | PROT_USER; 9557c478bd9Sstevel@tonic-gate int error; 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0) 9587c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate switch (valid_usr_range(addr, len, prot, as, as->a_userlimit)) { 9617c478bd9Sstevel@tonic-gate case RANGE_OKAY: 9627c478bd9Sstevel@tonic-gate break; 9637c478bd9Sstevel@tonic-gate case RANGE_BADPROT: 9647c478bd9Sstevel@tonic-gate return (set_errno(ENOTSUP)); 9657c478bd9Sstevel@tonic-gate case RANGE_BADADDR: 9667c478bd9Sstevel@tonic-gate default: 9677c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 9687c478bd9Sstevel@tonic-gate } 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate error = as_setprot(as, addr, len, uprot); 9717c478bd9Sstevel@tonic-gate if (error) 9727c478bd9Sstevel@tonic-gate return (set_errno(error)); 9737c478bd9Sstevel@tonic-gate return (0); 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate #define MC_CACHE 128 /* internal result buffer */ 9777c478bd9Sstevel@tonic-gate #define MC_QUANTUM (MC_CACHE * PAGESIZE) /* addresses covered in loop */ 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate int 9807c478bd9Sstevel@tonic-gate mincore(caddr_t addr, size_t len, char *vecp) 9817c478bd9Sstevel@tonic-gate { 9827c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as; 9837c478bd9Sstevel@tonic-gate caddr_t ea; /* end address of loop */ 9847c478bd9Sstevel@tonic-gate size_t rl; /* inner result length */ 9857c478bd9Sstevel@tonic-gate char vec[MC_CACHE]; /* local vector cache */ 9867c478bd9Sstevel@tonic-gate int error; 9877c478bd9Sstevel@tonic-gate model_t model; 9887c478bd9Sstevel@tonic-gate long llen; 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate model = get_udatamodel(); 9917c478bd9Sstevel@tonic-gate /* 9927c478bd9Sstevel@tonic-gate * Validate form of address parameters. 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate if (model == DATAMODEL_NATIVE) { 9957c478bd9Sstevel@tonic-gate llen = (long)len; 9967c478bd9Sstevel@tonic-gate } else { 9977c478bd9Sstevel@tonic-gate llen = (int32_t)(size32_t)len; 9987c478bd9Sstevel@tonic-gate } 9997c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0) 10007c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY) 10037c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 10047c478bd9Sstevel@tonic-gate 10057c478bd9Sstevel@tonic-gate /* 10067c478bd9Sstevel@tonic-gate * Loop over subranges of interval [addr : addr + len), recovering 10077c478bd9Sstevel@tonic-gate * results internally and then copying them out to caller. Subrange 10087c478bd9Sstevel@tonic-gate * is based on the size of MC_CACHE, defined above. 10097c478bd9Sstevel@tonic-gate */ 10107c478bd9Sstevel@tonic-gate for (ea = addr + len; addr < ea; addr += MC_QUANTUM) { 10117c478bd9Sstevel@tonic-gate error = as_incore(as, addr, 10127c478bd9Sstevel@tonic-gate (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl); 10137c478bd9Sstevel@tonic-gate if (rl != 0) { 10147c478bd9Sstevel@tonic-gate rl = (rl + PAGESIZE - 1) / PAGESIZE; 10157c478bd9Sstevel@tonic-gate if (copyout(vec, vecp, rl) != 0) 10167c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 10177c478bd9Sstevel@tonic-gate vecp += rl; 10187c478bd9Sstevel@tonic-gate } 10197c478bd9Sstevel@tonic-gate if (error != 0) 10207c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM)); 10217c478bd9Sstevel@tonic-gate } 10227c478bd9Sstevel@tonic-gate return (0); 10237c478bd9Sstevel@tonic-gate } 1024