17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50209230bSgjelinek * Common Development and Distribution License (the "License"). 60209230bSgjelinek * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2211494be0SStan Studzinski * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 267c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* 297c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 307c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California. 317c478bd9Sstevel@tonic-gate */ 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate /* 347c478bd9Sstevel@tonic-gate * segkp is a segment driver that administers the allocation and deallocation 357c478bd9Sstevel@tonic-gate * of pageable variable size chunks of kernel virtual address space. Each 367c478bd9Sstevel@tonic-gate * allocated resource is page-aligned. 377c478bd9Sstevel@tonic-gate * 387c478bd9Sstevel@tonic-gate * The user may specify whether the resource should be initialized to 0, 397c478bd9Sstevel@tonic-gate * include a redzone, or locked in memory. 407c478bd9Sstevel@tonic-gate */ 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate #include <sys/types.h> 437c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 447c478bd9Sstevel@tonic-gate #include <sys/thread.h> 457c478bd9Sstevel@tonic-gate #include <sys/param.h> 467c478bd9Sstevel@tonic-gate #include <sys/errno.h> 477c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 487c478bd9Sstevel@tonic-gate #include <sys/systm.h> 497c478bd9Sstevel@tonic-gate #include <sys/buf.h> 507c478bd9Sstevel@tonic-gate #include <sys/mman.h> 517c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 527c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 537c478bd9Sstevel@tonic-gate #include <sys/swap.h> 547c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 557c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 567c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 577c478bd9Sstevel@tonic-gate #include <sys/cred.h> 587c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 597c478bd9Sstevel@tonic-gate #include <sys/debug.h> 607c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 617c478bd9Sstevel@tonic-gate #include <sys/stack.h> 627c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 637c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 647c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate #include <vm/as.h> 677c478bd9Sstevel@tonic-gate #include <vm/seg.h> 687c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 697c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 707c478bd9Sstevel@tonic-gate #include <vm/anon.h> 717c478bd9Sstevel@tonic-gate #include <vm/page.h> 727c478bd9Sstevel@tonic-gate #include <vm/hat.h> 737c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate /* 767c478bd9Sstevel@tonic-gate * Private seg op routines 777c478bd9Sstevel@tonic-gate */ 787c478bd9Sstevel@tonic-gate static void segkp_badop(void); 797c478bd9Sstevel@tonic-gate static void segkp_dump(struct seg *seg); 807c478bd9Sstevel@tonic-gate static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 817c478bd9Sstevel@tonic-gate uint_t prot); 827c478bd9Sstevel@tonic-gate static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 837c478bd9Sstevel@tonic-gate static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 847c478bd9Sstevel@tonic-gate struct page ***page, enum lock_type type, 857c478bd9Sstevel@tonic-gate enum seg_rw rw); 867c478bd9Sstevel@tonic-gate static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 877c478bd9Sstevel@tonic-gate static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 887c478bd9Sstevel@tonic-gate static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 897c478bd9Sstevel@tonic-gate struct segkp_data **tkpd, struct anon_map *amp); 907c478bd9Sstevel@tonic-gate static void segkp_release_internal(struct seg *seg, 917c478bd9Sstevel@tonic-gate struct segkp_data *kpd, size_t len); 927c478bd9Sstevel@tonic-gate static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 937c478bd9Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags); 947c478bd9Sstevel@tonic-gate static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 957c478bd9Sstevel@tonic-gate size_t len, struct segkp_data *kpd, uint_t flags); 967c478bd9Sstevel@tonic-gate static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 977c478bd9Sstevel@tonic-gate static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 987c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg, 997c478bd9Sstevel@tonic-gate caddr_t addr); 1001bd5c35fSelowe static int segkp_capable(struct seg *seg, segcapability_t capability); 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate /* 1037c478bd9Sstevel@tonic-gate * Lock used to protect the hash table(s) and caches. 1047c478bd9Sstevel@tonic-gate */ 1057c478bd9Sstevel@tonic-gate static kmutex_t segkp_lock; 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 1087c478bd9Sstevel@tonic-gate * The segkp caches 1097c478bd9Sstevel@tonic-gate */ 1107c478bd9Sstevel@tonic-gate static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gate #define SEGKP_BADOP(t) (t(*)())segkp_badop 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate /* 1157c478bd9Sstevel@tonic-gate * When there are fewer than red_minavail bytes left on the stack, 1167c478bd9Sstevel@tonic-gate * segkp_map_red() will map in the redzone (if called). 5000 seems 1177c478bd9Sstevel@tonic-gate * to work reasonably well... 1187c478bd9Sstevel@tonic-gate */ 1197c478bd9Sstevel@tonic-gate long red_minavail = 5000; 1207c478bd9Sstevel@tonic-gate 1217c478bd9Sstevel@tonic-gate /* 1227c478bd9Sstevel@tonic-gate * will be set to 1 for 32 bit x86 systems only, in startup.c 1237c478bd9Sstevel@tonic-gate */ 1247c478bd9Sstevel@tonic-gate int segkp_fromheap = 0; 1257c478bd9Sstevel@tonic-gate ulong_t *segkp_bitmap; 1267c478bd9Sstevel@tonic-gate 1277c478bd9Sstevel@tonic-gate /* 1287c478bd9Sstevel@tonic-gate * If segkp_map_red() is called with the redzone already mapped and 1297c478bd9Sstevel@tonic-gate * with less than RED_DEEP_THRESHOLD bytes available on the stack, 1307c478bd9Sstevel@tonic-gate * then the stack situation has become quite serious; if much more stack 1317c478bd9Sstevel@tonic-gate * is consumed, we have the potential of scrogging the next thread/LWP 1327c478bd9Sstevel@tonic-gate * structure. To help debug the "can't happen" panics which may 133d3d50737SRafael Vanoni * result from this condition, we record hrestime and the calling thread 134d3d50737SRafael Vanoni * in red_deep_hires and red_deep_thread respectively. 1357c478bd9Sstevel@tonic-gate */ 1367c478bd9Sstevel@tonic-gate #define RED_DEEP_THRESHOLD 2000 1377c478bd9Sstevel@tonic-gate 138d3d50737SRafael Vanoni hrtime_t red_deep_hires; 1397c478bd9Sstevel@tonic-gate kthread_t *red_deep_thread; 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate uint32_t red_nmapped; 1427c478bd9Sstevel@tonic-gate uint32_t red_closest = UINT_MAX; 1437c478bd9Sstevel@tonic-gate uint32_t red_ndoubles; 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 1460209230bSgjelinek pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate static struct seg_ops segkp_ops = { 1497c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* dup */ 1507c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* unmap */ 1517c478bd9Sstevel@tonic-gate SEGKP_BADOP(void), /* free */ 1527c478bd9Sstevel@tonic-gate segkp_fault, 1537c478bd9Sstevel@tonic-gate SEGKP_BADOP(faultcode_t), /* faulta */ 1547c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* setprot */ 1557c478bd9Sstevel@tonic-gate segkp_checkprot, 1567c478bd9Sstevel@tonic-gate segkp_kluster, 1577c478bd9Sstevel@tonic-gate SEGKP_BADOP(size_t), /* swapout */ 1587c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* sync */ 1597c478bd9Sstevel@tonic-gate SEGKP_BADOP(size_t), /* incore */ 1607c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* lockop */ 1617c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* getprot */ 1627c478bd9Sstevel@tonic-gate SEGKP_BADOP(u_offset_t), /* getoffset */ 1637c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* gettype */ 1647c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* getvp */ 1657c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* advise */ 1667c478bd9Sstevel@tonic-gate segkp_dump, /* dump */ 1677c478bd9Sstevel@tonic-gate segkp_pagelock, /* pagelock */ 1687c478bd9Sstevel@tonic-gate SEGKP_BADOP(int), /* setpgsz */ 1697c478bd9Sstevel@tonic-gate segkp_getmemid, /* getmemid */ 1707c478bd9Sstevel@tonic-gate segkp_getpolicy, /* getpolicy */ 1711bd5c35fSelowe segkp_capable, /* capable */ 172*9d12795fSRobert Mustacchi seg_inherit_notsup /* inherit */ 1737c478bd9Sstevel@tonic-gate }; 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate static void 1777c478bd9Sstevel@tonic-gate segkp_badop(void) 1787c478bd9Sstevel@tonic-gate { 1797c478bd9Sstevel@tonic-gate panic("segkp_badop"); 1807c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 1817c478bd9Sstevel@tonic-gate } 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate static void segkpinit_mem_config(struct seg *); 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate static uint32_t segkp_indel; 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate /* 1887c478bd9Sstevel@tonic-gate * Allocate the segment specific private data struct and fill it in 1897c478bd9Sstevel@tonic-gate * with the per kp segment mutex, anon ptr. array and hash table. 1907c478bd9Sstevel@tonic-gate */ 1917c478bd9Sstevel@tonic-gate int 1927c478bd9Sstevel@tonic-gate segkp_create(struct seg *seg) 1937c478bd9Sstevel@tonic-gate { 1947c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd; 1957c478bd9Sstevel@tonic-gate size_t np; 1967c478bd9Sstevel@tonic-gate 1977c478bd9Sstevel@tonic-gate ASSERT(seg != NULL && seg->s_as == &kas); 1987c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate if (seg->s_size & PAGEOFFSET) { 2017c478bd9Sstevel@tonic-gate panic("Bad segkp size"); 2027c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 2037c478bd9Sstevel@tonic-gate } 2047c478bd9Sstevel@tonic-gate 2057c478bd9Sstevel@tonic-gate kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate /* 2087c478bd9Sstevel@tonic-gate * Allocate the virtual memory for segkp and initialize it 2097c478bd9Sstevel@tonic-gate */ 2107c478bd9Sstevel@tonic-gate if (segkp_fromheap) { 2117c478bd9Sstevel@tonic-gate np = btop(kvseg.s_size); 2127c478bd9Sstevel@tonic-gate segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 2137c478bd9Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 2147c478bd9Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 2157c478bd9Sstevel@tonic-gate } else { 2167c478bd9Sstevel@tonic-gate segkp_bitmap = NULL; 2177c478bd9Sstevel@tonic-gate np = btop(seg->s_size); 2187c478bd9Sstevel@tonic-gate kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 2197c478bd9Sstevel@tonic-gate seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 2207c478bd9Sstevel@tonic-gate VM_SLEEP); 2217c478bd9Sstevel@tonic-gate } 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 2267c478bd9Sstevel@tonic-gate KM_SLEEP); 2277c478bd9Sstevel@tonic-gate seg->s_data = (void *)kpsd; 2287c478bd9Sstevel@tonic-gate seg->s_ops = &segkp_ops; 2297c478bd9Sstevel@tonic-gate segkpinit_mem_config(seg); 2307c478bd9Sstevel@tonic-gate return (0); 2317c478bd9Sstevel@tonic-gate } 2327c478bd9Sstevel@tonic-gate 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * Find a free 'freelist' and initialize it with the appropriate attributes 2367c478bd9Sstevel@tonic-gate */ 2377c478bd9Sstevel@tonic-gate void * 2387c478bd9Sstevel@tonic-gate segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 2397c478bd9Sstevel@tonic-gate { 2407c478bd9Sstevel@tonic-gate int i; 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 2437c478bd9Sstevel@tonic-gate return ((void *)-1); 2447c478bd9Sstevel@tonic-gate 2457c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 2467c478bd9Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) { 2477c478bd9Sstevel@tonic-gate if (segkp_cache[i].kpf_inuse) 2487c478bd9Sstevel@tonic-gate continue; 2497c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_inuse = 1; 2507c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_max = maxsize; 2517c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_flags = flags; 2527c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_seg = seg; 2537c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_len = len; 2547c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 2557c478bd9Sstevel@tonic-gate return ((void *)(uintptr_t)i); 2567c478bd9Sstevel@tonic-gate } 2577c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 2587c478bd9Sstevel@tonic-gate return ((void *)-1); 2597c478bd9Sstevel@tonic-gate } 2607c478bd9Sstevel@tonic-gate 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Free all the cache resources. 2637c478bd9Sstevel@tonic-gate */ 2647c478bd9Sstevel@tonic-gate void 2657c478bd9Sstevel@tonic-gate segkp_cache_free(void) 2667c478bd9Sstevel@tonic-gate { 2677c478bd9Sstevel@tonic-gate struct segkp_data *kpd; 2687c478bd9Sstevel@tonic-gate struct seg *seg; 2697c478bd9Sstevel@tonic-gate int i; 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 2727c478bd9Sstevel@tonic-gate for (i = 0; i < SEGKP_MAX_CACHE; i++) { 2737c478bd9Sstevel@tonic-gate if (!segkp_cache[i].kpf_inuse) 2747c478bd9Sstevel@tonic-gate continue; 2757c478bd9Sstevel@tonic-gate /* 2767c478bd9Sstevel@tonic-gate * Disconnect the freelist and process each element 2777c478bd9Sstevel@tonic-gate */ 2787c478bd9Sstevel@tonic-gate kpd = segkp_cache[i].kpf_list; 2797c478bd9Sstevel@tonic-gate seg = segkp_cache[i].kpf_seg; 2807c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_list = NULL; 2817c478bd9Sstevel@tonic-gate segkp_cache[i].kpf_count = 0; 2827c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 2837c478bd9Sstevel@tonic-gate 2847c478bd9Sstevel@tonic-gate while (kpd != NULL) { 2857c478bd9Sstevel@tonic-gate struct segkp_data *next; 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate next = kpd->kp_next; 2887c478bd9Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len); 2897c478bd9Sstevel@tonic-gate kpd = next; 2907c478bd9Sstevel@tonic-gate } 2917c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 2927c478bd9Sstevel@tonic-gate } 2937c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate /* 2977c478bd9Sstevel@tonic-gate * There are 2 entries into segkp_get_internal. The first includes a cookie 2987c478bd9Sstevel@tonic-gate * used to access a pool of cached segkp resources. The second does not 2997c478bd9Sstevel@tonic-gate * use the cache. 3007c478bd9Sstevel@tonic-gate */ 3017c478bd9Sstevel@tonic-gate caddr_t 3027c478bd9Sstevel@tonic-gate segkp_get(struct seg *seg, size_t len, uint_t flags) 3037c478bd9Sstevel@tonic-gate { 3047c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 3077c478bd9Sstevel@tonic-gate kpd->kp_cookie = -1; 3087c478bd9Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 3097c478bd9Sstevel@tonic-gate } 3107c478bd9Sstevel@tonic-gate return (NULL); 3117c478bd9Sstevel@tonic-gate } 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate /* 3147c478bd9Sstevel@tonic-gate * Return a 'cached' segkp address 3157c478bd9Sstevel@tonic-gate */ 3167c478bd9Sstevel@tonic-gate caddr_t 3177c478bd9Sstevel@tonic-gate segkp_cache_get(void *cookie) 3187c478bd9Sstevel@tonic-gate { 3197c478bd9Sstevel@tonic-gate struct segkp_cache *freelist = NULL; 3207c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 3217c478bd9Sstevel@tonic-gate int index = (int)(uintptr_t)cookie; 3227c478bd9Sstevel@tonic-gate struct seg *seg; 3237c478bd9Sstevel@tonic-gate size_t len; 3247c478bd9Sstevel@tonic-gate uint_t flags; 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate if (index < 0 || index >= SEGKP_MAX_CACHE) 3277c478bd9Sstevel@tonic-gate return (NULL); 3287c478bd9Sstevel@tonic-gate freelist = &segkp_cache[index]; 3297c478bd9Sstevel@tonic-gate 3307c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 3317c478bd9Sstevel@tonic-gate seg = freelist->kpf_seg; 3327c478bd9Sstevel@tonic-gate flags = freelist->kpf_flags; 3337c478bd9Sstevel@tonic-gate if (freelist->kpf_list != NULL) { 3347c478bd9Sstevel@tonic-gate kpd = freelist->kpf_list; 3357c478bd9Sstevel@tonic-gate freelist->kpf_list = kpd->kp_next; 3367c478bd9Sstevel@tonic-gate freelist->kpf_count--; 3377c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 3387c478bd9Sstevel@tonic-gate kpd->kp_next = NULL; 3397c478bd9Sstevel@tonic-gate segkp_insert(seg, kpd); 3407c478bd9Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 3417c478bd9Sstevel@tonic-gate } 3427c478bd9Sstevel@tonic-gate len = freelist->kpf_len; 3437c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 3447c478bd9Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 3457c478bd9Sstevel@tonic-gate kpd->kp_cookie = index; 3467c478bd9Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 3477c478bd9Sstevel@tonic-gate } 3487c478bd9Sstevel@tonic-gate return (NULL); 3497c478bd9Sstevel@tonic-gate } 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate caddr_t 3527c478bd9Sstevel@tonic-gate segkp_get_withanonmap( 3537c478bd9Sstevel@tonic-gate struct seg *seg, 3547c478bd9Sstevel@tonic-gate size_t len, 3557c478bd9Sstevel@tonic-gate uint_t flags, 3567c478bd9Sstevel@tonic-gate struct anon_map *amp) 3577c478bd9Sstevel@tonic-gate { 3587c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 3597c478bd9Sstevel@tonic-gate 3607c478bd9Sstevel@tonic-gate ASSERT(amp != NULL); 3617c478bd9Sstevel@tonic-gate flags |= KPD_HASAMP; 3627c478bd9Sstevel@tonic-gate if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 3637c478bd9Sstevel@tonic-gate kpd->kp_cookie = -1; 3647c478bd9Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 3657c478bd9Sstevel@tonic-gate } 3667c478bd9Sstevel@tonic-gate return (NULL); 3677c478bd9Sstevel@tonic-gate } 3687c478bd9Sstevel@tonic-gate 3697c478bd9Sstevel@tonic-gate /* 3707c478bd9Sstevel@tonic-gate * This does the real work of segkp allocation. 3717c478bd9Sstevel@tonic-gate * Return to client base addr. len must be page-aligned. A null value is 3727c478bd9Sstevel@tonic-gate * returned if there are no more vm resources (e.g. pages, swap). The len 3737c478bd9Sstevel@tonic-gate * and base recorded in the private data structure include the redzone 3747c478bd9Sstevel@tonic-gate * and the redzone length (if applicable). If the user requests a redzone 3757c478bd9Sstevel@tonic-gate * either the first or last page is left unmapped depending whether stacks 3767c478bd9Sstevel@tonic-gate * grow to low or high memory. 3777c478bd9Sstevel@tonic-gate * 3787c478bd9Sstevel@tonic-gate * The client may also specify a no-wait flag. If that is set then the 3797c478bd9Sstevel@tonic-gate * request will choose a non-blocking path when requesting resources. 3807c478bd9Sstevel@tonic-gate * The default is make the client wait. 3817c478bd9Sstevel@tonic-gate */ 3827c478bd9Sstevel@tonic-gate static caddr_t 3837c478bd9Sstevel@tonic-gate segkp_get_internal( 3847c478bd9Sstevel@tonic-gate struct seg *seg, 3857c478bd9Sstevel@tonic-gate size_t len, 3867c478bd9Sstevel@tonic-gate uint_t flags, 3877c478bd9Sstevel@tonic-gate struct segkp_data **tkpd, 3887c478bd9Sstevel@tonic-gate struct anon_map *amp) 3897c478bd9Sstevel@tonic-gate { 3907c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 3917c478bd9Sstevel@tonic-gate struct segkp_data *kpd; 3927c478bd9Sstevel@tonic-gate caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 3937c478bd9Sstevel@tonic-gate pgcnt_t np = 0; /* number of pages in the resource */ 3947c478bd9Sstevel@tonic-gate pgcnt_t segkpindex; 3957c478bd9Sstevel@tonic-gate long i; 3967c478bd9Sstevel@tonic-gate caddr_t va; 3977c478bd9Sstevel@tonic-gate pgcnt_t pages = 0; 3987c478bd9Sstevel@tonic-gate ulong_t anon_idx = 0; 3997c478bd9Sstevel@tonic-gate int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 4007c478bd9Sstevel@tonic-gate caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate if (len & PAGEOFFSET) { 4037c478bd9Sstevel@tonic-gate panic("segkp_get: len is not page-aligned"); 4047c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate /* Only allow KPD_NO_ANON if we are going to lock it down */ 4107c478bd9Sstevel@tonic-gate if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 4117c478bd9Sstevel@tonic-gate return (NULL); 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 4147c478bd9Sstevel@tonic-gate return (NULL); 4157c478bd9Sstevel@tonic-gate /* 4167c478bd9Sstevel@tonic-gate * Fix up the len to reflect the REDZONE if applicable 4177c478bd9Sstevel@tonic-gate */ 4187c478bd9Sstevel@tonic-gate if (flags & KPD_HASREDZONE) 4197c478bd9Sstevel@tonic-gate len += PAGESIZE; 4207c478bd9Sstevel@tonic-gate np = btop(len); 4217c478bd9Sstevel@tonic-gate 4227c478bd9Sstevel@tonic-gate vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 4237c478bd9Sstevel@tonic-gate if (vbase == NULL) { 4247c478bd9Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 4257c478bd9Sstevel@tonic-gate return (NULL); 4267c478bd9Sstevel@tonic-gate } 4277c478bd9Sstevel@tonic-gate 4287c478bd9Sstevel@tonic-gate /* If locking, reserve physical memory */ 4297c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 4307c478bd9Sstevel@tonic-gate pages = btop(SEGKP_MAPLEN(len, flags)); 4317c478bd9Sstevel@tonic-gate if (page_resv(pages, kmflag) == 0) { 4327c478bd9Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len); 4337c478bd9Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 4347c478bd9Sstevel@tonic-gate return (NULL); 4357c478bd9Sstevel@tonic-gate } 4367c478bd9Sstevel@tonic-gate if ((flags & KPD_NO_ANON) == 0) 4377c478bd9Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages); 4387c478bd9Sstevel@tonic-gate } 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate /* 4417c478bd9Sstevel@tonic-gate * Reserve sufficient swap space for this vm resource. We'll 4427c478bd9Sstevel@tonic-gate * actually allocate it in the loop below, but reserving it 4437c478bd9Sstevel@tonic-gate * here allows us to back out more gracefully than if we 4447c478bd9Sstevel@tonic-gate * had an allocation failure in the body of the loop. 4457c478bd9Sstevel@tonic-gate * 4467c478bd9Sstevel@tonic-gate * Note that we don't need swap space for the red zone page. 4477c478bd9Sstevel@tonic-gate */ 4487c478bd9Sstevel@tonic-gate if (amp != NULL) { 4490209230bSgjelinek /* 4500209230bSgjelinek * The swap reservation has been done, if required, and the 4510209230bSgjelinek * anon_hdr is separate. 4520209230bSgjelinek */ 4537c478bd9Sstevel@tonic-gate anon_idx = 0; 4547c478bd9Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx; 4557c478bd9Sstevel@tonic-gate kpd->kp_anon = amp->ahp; 4567c478bd9Sstevel@tonic-gate 4577c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 4587c478bd9Sstevel@tonic-gate kpd, vbase, len, flags, 1); 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate } else if ((flags & KPD_NO_ANON) == 0) { 4610209230bSgjelinek if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 4627c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 4637c478bd9Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, 4647c478bd9Sstevel@tonic-gate -pages); 4657c478bd9Sstevel@tonic-gate page_unresv(pages); 4667c478bd9Sstevel@tonic-gate } 4677c478bd9Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), vbase, len); 4687c478bd9Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 4697c478bd9Sstevel@tonic-gate return (NULL); 4707c478bd9Sstevel@tonic-gate } 4710209230bSgjelinek atomic_add_long(&anon_segkp_pages_resv, 4720209230bSgjelinek btop(SEGKP_MAPLEN(len, flags))); 4737c478bd9Sstevel@tonic-gate anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 4747c478bd9Sstevel@tonic-gate kpd->kp_anon_idx = anon_idx; 4757c478bd9Sstevel@tonic-gate kpd->kp_anon = kpsd->kpsd_anon; 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 4787c478bd9Sstevel@tonic-gate kpd, vbase, len, flags, 1); 4797c478bd9Sstevel@tonic-gate } else { 4807c478bd9Sstevel@tonic-gate kpd->kp_anon = NULL; 4817c478bd9Sstevel@tonic-gate kpd->kp_anon_idx = 0; 4827c478bd9Sstevel@tonic-gate } 4837c478bd9Sstevel@tonic-gate 4847c478bd9Sstevel@tonic-gate /* 4857c478bd9Sstevel@tonic-gate * Allocate page and anon resources for the virtual address range 4867c478bd9Sstevel@tonic-gate * except the redzone 4877c478bd9Sstevel@tonic-gate */ 4887c478bd9Sstevel@tonic-gate if (segkp_fromheap) 4897c478bd9Sstevel@tonic-gate segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 4907c478bd9Sstevel@tonic-gate for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 4917c478bd9Sstevel@tonic-gate page_t *pl[2]; 4927c478bd9Sstevel@tonic-gate struct vnode *vp; 4937c478bd9Sstevel@tonic-gate anoff_t off; 4947c478bd9Sstevel@tonic-gate int err; 4957c478bd9Sstevel@tonic-gate page_t *pp = NULL; 4967c478bd9Sstevel@tonic-gate 4977c478bd9Sstevel@tonic-gate /* 4987c478bd9Sstevel@tonic-gate * Mark this page to be a segkp page in the bitmap. 4997c478bd9Sstevel@tonic-gate */ 5007c478bd9Sstevel@tonic-gate if (segkp_fromheap) { 5017c478bd9Sstevel@tonic-gate BT_ATOMIC_SET(segkp_bitmap, segkpindex); 5027c478bd9Sstevel@tonic-gate segkpindex++; 5037c478bd9Sstevel@tonic-gate } 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate /* 5067c478bd9Sstevel@tonic-gate * If this page is the red zone page, we don't need swap 5077c478bd9Sstevel@tonic-gate * space for it. Note that we skip over the code that 5087c478bd9Sstevel@tonic-gate * establishes MMU mappings, so that the page remains 5097c478bd9Sstevel@tonic-gate * invalid. 5107c478bd9Sstevel@tonic-gate */ 5117c478bd9Sstevel@tonic-gate if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 5127c478bd9Sstevel@tonic-gate continue; 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate if (kpd->kp_anon != NULL) { 5157c478bd9Sstevel@tonic-gate struct anon *ap; 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 5187c478bd9Sstevel@tonic-gate == NULL); 5197c478bd9Sstevel@tonic-gate /* 5207c478bd9Sstevel@tonic-gate * Determine the "vp" and "off" of the anon slot. 5217c478bd9Sstevel@tonic-gate */ 5227c478bd9Sstevel@tonic-gate ap = anon_alloc(NULL, 0); 5237c478bd9Sstevel@tonic-gate if (amp != NULL) 5247c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 5257c478bd9Sstevel@tonic-gate (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 5267c478bd9Sstevel@tonic-gate ap, ANON_SLEEP); 5277c478bd9Sstevel@tonic-gate if (amp != NULL) 5287c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 5297c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate /* 5327c478bd9Sstevel@tonic-gate * Create a page with the specified identity. The 5337c478bd9Sstevel@tonic-gate * page is returned with the "shared" lock held. 5347c478bd9Sstevel@tonic-gate */ 5357c478bd9Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 5367c478bd9Sstevel@tonic-gate NULL, pl, PAGESIZE, seg, va, S_CREATE, 537da6c28aaSamw kcred, NULL); 5387c478bd9Sstevel@tonic-gate if (err) { 5397c478bd9Sstevel@tonic-gate /* 5407c478bd9Sstevel@tonic-gate * XXX - This should not fail. 5417c478bd9Sstevel@tonic-gate */ 5427c478bd9Sstevel@tonic-gate panic("segkp_get: no pages"); 5437c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 5447c478bd9Sstevel@tonic-gate } 5457c478bd9Sstevel@tonic-gate pp = pl[0]; 5467c478bd9Sstevel@tonic-gate } else { 5477c478bd9Sstevel@tonic-gate ASSERT(page_exists(&kvp, 5487c478bd9Sstevel@tonic-gate (u_offset_t)(uintptr_t)va) == NULL); 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate if ((pp = page_create_va(&kvp, 5517c478bd9Sstevel@tonic-gate (u_offset_t)(uintptr_t)va, PAGESIZE, 5527c478bd9Sstevel@tonic-gate (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 5537c478bd9Sstevel@tonic-gate PG_NORELOC, seg, va)) == NULL) { 5547c478bd9Sstevel@tonic-gate /* 5557c478bd9Sstevel@tonic-gate * Legitimize resource; then destroy it. 5567c478bd9Sstevel@tonic-gate * Easier than trying to unwind here. 5577c478bd9Sstevel@tonic-gate */ 5587c478bd9Sstevel@tonic-gate kpd->kp_flags = flags; 5597c478bd9Sstevel@tonic-gate kpd->kp_base = vbase; 5607c478bd9Sstevel@tonic-gate kpd->kp_len = len; 5617c478bd9Sstevel@tonic-gate segkp_release_internal(seg, kpd, va - vbase); 5627c478bd9Sstevel@tonic-gate return (NULL); 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate page_io_unlock(pp); 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate if (flags & KPD_ZERO) 5687c478bd9Sstevel@tonic-gate pagezero(pp, 0, PAGESIZE); 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate /* 5717c478bd9Sstevel@tonic-gate * Load and lock an MMU translation for the page. 5727c478bd9Sstevel@tonic-gate */ 5737c478bd9Sstevel@tonic-gate hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 5747c478bd9Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate /* 5777c478bd9Sstevel@tonic-gate * Now, release lock on the page. 5787c478bd9Sstevel@tonic-gate */ 57911494be0SStan Studzinski if (flags & KPD_LOCKED) { 58011494be0SStan Studzinski /* 58111494be0SStan Studzinski * Indicate to page_retire framework that this 58211494be0SStan Studzinski * page can only be retired when it is freed. 58311494be0SStan Studzinski */ 58411494be0SStan Studzinski PP_SETRAF(pp); 5857c478bd9Sstevel@tonic-gate page_downgrade(pp); 58611494be0SStan Studzinski } else 5877c478bd9Sstevel@tonic-gate page_unlock(pp); 5887c478bd9Sstevel@tonic-gate } 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate kpd->kp_flags = flags; 5917c478bd9Sstevel@tonic-gate kpd->kp_base = vbase; 5927c478bd9Sstevel@tonic-gate kpd->kp_len = len; 5937c478bd9Sstevel@tonic-gate segkp_insert(seg, kpd); 5947c478bd9Sstevel@tonic-gate *tkpd = kpd; 5957c478bd9Sstevel@tonic-gate return (stom(kpd->kp_base, flags)); 5967c478bd9Sstevel@tonic-gate } 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate /* 5997c478bd9Sstevel@tonic-gate * Release the resource to cache if the pool(designate by the cookie) 6007c478bd9Sstevel@tonic-gate * has less than the maximum allowable. If inserted in cache, 6017c478bd9Sstevel@tonic-gate * segkp_delete insures element is taken off of active list. 6027c478bd9Sstevel@tonic-gate */ 6037c478bd9Sstevel@tonic-gate void 6047c478bd9Sstevel@tonic-gate segkp_release(struct seg *seg, caddr_t vaddr) 6057c478bd9Sstevel@tonic-gate { 6067c478bd9Sstevel@tonic-gate struct segkp_cache *freelist; 6077c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) { 6107c478bd9Sstevel@tonic-gate panic("segkp_release: null kpd"); 6117c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 6127c478bd9Sstevel@tonic-gate } 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate if (kpd->kp_cookie != -1) { 6157c478bd9Sstevel@tonic-gate freelist = &segkp_cache[kpd->kp_cookie]; 6167c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 6177c478bd9Sstevel@tonic-gate if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 6187c478bd9Sstevel@tonic-gate segkp_delete(seg, kpd); 6197c478bd9Sstevel@tonic-gate kpd->kp_next = freelist->kpf_list; 6207c478bd9Sstevel@tonic-gate freelist->kpf_list = kpd; 6217c478bd9Sstevel@tonic-gate freelist->kpf_count++; 6227c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 6237c478bd9Sstevel@tonic-gate return; 6247c478bd9Sstevel@tonic-gate } else { 6257c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 6267c478bd9Sstevel@tonic-gate kpd->kp_cookie = -1; 6277c478bd9Sstevel@tonic-gate } 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate segkp_release_internal(seg, kpd, kpd->kp_len); 6307c478bd9Sstevel@tonic-gate } 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate /* 6337c478bd9Sstevel@tonic-gate * Free the entire resource. segkp_unlock gets called with the start of the 6347c478bd9Sstevel@tonic-gate * mapped portion of the resource. The length is the size of the mapped 6357c478bd9Sstevel@tonic-gate * portion 6367c478bd9Sstevel@tonic-gate */ 6377c478bd9Sstevel@tonic-gate static void 6387c478bd9Sstevel@tonic-gate segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 6397c478bd9Sstevel@tonic-gate { 6407c478bd9Sstevel@tonic-gate caddr_t va; 6417c478bd9Sstevel@tonic-gate long i; 6427c478bd9Sstevel@tonic-gate long redzone; 6437c478bd9Sstevel@tonic-gate size_t np; 6447c478bd9Sstevel@tonic-gate page_t *pp; 6457c478bd9Sstevel@tonic-gate struct vnode *vp; 6467c478bd9Sstevel@tonic-gate anoff_t off; 6477c478bd9Sstevel@tonic-gate struct anon *ap; 6487c478bd9Sstevel@tonic-gate pgcnt_t segkpindex; 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate ASSERT(kpd != NULL); 6517c478bd9Sstevel@tonic-gate ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 6527c478bd9Sstevel@tonic-gate np = btop(len); 6537c478bd9Sstevel@tonic-gate 6547c478bd9Sstevel@tonic-gate /* Remove from active hash list */ 6557c478bd9Sstevel@tonic-gate if (kpd->kp_cookie == -1) { 6567c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 6577c478bd9Sstevel@tonic-gate segkp_delete(seg, kpd); 6587c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 6597c478bd9Sstevel@tonic-gate } 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * Precompute redzone page index. 6637c478bd9Sstevel@tonic-gate */ 6647c478bd9Sstevel@tonic-gate redzone = -1; 6657c478bd9Sstevel@tonic-gate if (kpd->kp_flags & KPD_HASREDZONE) 6667c478bd9Sstevel@tonic-gate redzone = KPD_REDZONE(kpd); 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate va = kpd->kp_base; 6707c478bd9Sstevel@tonic-gate 6717c478bd9Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 6727c478bd9Sstevel@tonic-gate ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 6737c478bd9Sstevel@tonic-gate /* 6747c478bd9Sstevel@tonic-gate * Free up those anon resources that are quiescent. 6757c478bd9Sstevel@tonic-gate */ 6767c478bd9Sstevel@tonic-gate if (segkp_fromheap) 6777c478bd9Sstevel@tonic-gate segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 6787c478bd9Sstevel@tonic-gate for (i = 0; i < np; i++, va += PAGESIZE) { 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate /* 6817c478bd9Sstevel@tonic-gate * Clear the bit for this page from the bitmap. 6827c478bd9Sstevel@tonic-gate */ 6837c478bd9Sstevel@tonic-gate if (segkp_fromheap) { 6847c478bd9Sstevel@tonic-gate BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 6857c478bd9Sstevel@tonic-gate segkpindex++; 6867c478bd9Sstevel@tonic-gate } 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate if (i == redzone) 6897c478bd9Sstevel@tonic-gate continue; 6907c478bd9Sstevel@tonic-gate if (kpd->kp_anon) { 6917c478bd9Sstevel@tonic-gate /* 6927c478bd9Sstevel@tonic-gate * Free up anon resources and destroy the 6937c478bd9Sstevel@tonic-gate * associated pages. 6947c478bd9Sstevel@tonic-gate * 6957c478bd9Sstevel@tonic-gate * Release the lock if there is one. Have to get the 6967c478bd9Sstevel@tonic-gate * page to do this, unfortunately. 6977c478bd9Sstevel@tonic-gate */ 6987c478bd9Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 6997c478bd9Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, 7007c478bd9Sstevel@tonic-gate kpd->kp_anon_idx + i); 7017c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 7027c478bd9Sstevel@tonic-gate /* Find the shared-locked page. */ 7037c478bd9Sstevel@tonic-gate pp = page_find(vp, (u_offset_t)off); 7047c478bd9Sstevel@tonic-gate if (pp == NULL) { 7057c478bd9Sstevel@tonic-gate panic("segkp_release: " 7067c478bd9Sstevel@tonic-gate "kp_anon: no page to unlock "); 7077c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7087c478bd9Sstevel@tonic-gate } 70911494be0SStan Studzinski if (PP_ISRAF(pp)) 71011494be0SStan Studzinski PP_CLRRAF(pp); 71111494be0SStan Studzinski 7127c478bd9Sstevel@tonic-gate page_unlock(pp); 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASAMP) == 0) { 7157c478bd9Sstevel@tonic-gate anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 7167c478bd9Sstevel@tonic-gate PAGESIZE); 7170209230bSgjelinek anon_unresv_zone(PAGESIZE, NULL); 7181a5e258fSJosef 'Jeff' Sipek atomic_dec_ulong(&anon_segkp_pages_resv); 7197c478bd9Sstevel@tonic-gate } 7207c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_VM, 7217c478bd9Sstevel@tonic-gate TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 7227c478bd9Sstevel@tonic-gate kpd, va, PAGESIZE, 0, 0); 7237c478bd9Sstevel@tonic-gate } else { 7247c478bd9Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 7257c478bd9Sstevel@tonic-gate pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 7267c478bd9Sstevel@tonic-gate if (pp == NULL) { 7277c478bd9Sstevel@tonic-gate panic("segkp_release: " 7287c478bd9Sstevel@tonic-gate "no page to unlock"); 7297c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7307c478bd9Sstevel@tonic-gate } 73111494be0SStan Studzinski if (PP_ISRAF(pp)) 73211494be0SStan Studzinski PP_CLRRAF(pp); 7337c478bd9Sstevel@tonic-gate /* 7347c478bd9Sstevel@tonic-gate * We should just upgrade the lock here 7357c478bd9Sstevel@tonic-gate * but there is no upgrade that waits. 7367c478bd9Sstevel@tonic-gate */ 7377c478bd9Sstevel@tonic-gate page_unlock(pp); 7387c478bd9Sstevel@tonic-gate } 7397c478bd9Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 7407c478bd9Sstevel@tonic-gate SE_EXCL); 7417c478bd9Sstevel@tonic-gate if (pp != NULL) 7427c478bd9Sstevel@tonic-gate page_destroy(pp, 0); 7437c478bd9Sstevel@tonic-gate } 7447c478bd9Sstevel@tonic-gate } 7457c478bd9Sstevel@tonic-gate 7467c478bd9Sstevel@tonic-gate /* If locked, release physical memory reservation */ 7477c478bd9Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 7487c478bd9Sstevel@tonic-gate pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 7497c478bd9Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 7507c478bd9Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages); 7517c478bd9Sstevel@tonic-gate page_unresv(pages); 7527c478bd9Sstevel@tonic-gate } 7537c478bd9Sstevel@tonic-gate 7547c478bd9Sstevel@tonic-gate vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 7557c478bd9Sstevel@tonic-gate kmem_free(kpd, sizeof (struct segkp_data)); 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate /* 7597c478bd9Sstevel@tonic-gate * segkp_map_red() will check the current frame pointer against the 7607c478bd9Sstevel@tonic-gate * stack base. If the amount of stack remaining is questionable 7617c478bd9Sstevel@tonic-gate * (less than red_minavail), then segkp_map_red() will map in the redzone 7627c478bd9Sstevel@tonic-gate * and return 1. Otherwise, it will return 0. segkp_map_red() can 7637c478bd9Sstevel@tonic-gate * _only_ be called when: 7647c478bd9Sstevel@tonic-gate * 7657c478bd9Sstevel@tonic-gate * - it is safe to sleep on page_create_va(). 7667c478bd9Sstevel@tonic-gate * - the caller is non-swappable. 7677c478bd9Sstevel@tonic-gate * 7687c478bd9Sstevel@tonic-gate * It is up to the caller to remember whether segkp_map_red() successfully 7697c478bd9Sstevel@tonic-gate * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 7707c478bd9Sstevel@tonic-gate * time. Note that the caller must _remain_ non-swappable until after 7717c478bd9Sstevel@tonic-gate * calling segkp_unmap_red(). 7727c478bd9Sstevel@tonic-gate * 7737c478bd9Sstevel@tonic-gate * Currently, this routine is only called from pagefault() (which necessarily 7747c478bd9Sstevel@tonic-gate * satisfies the above conditions). 7757c478bd9Sstevel@tonic-gate */ 7767c478bd9Sstevel@tonic-gate #if defined(STACK_GROWTH_DOWN) 7777c478bd9Sstevel@tonic-gate int 7787c478bd9Sstevel@tonic-gate segkp_map_red(void) 7797c478bd9Sstevel@tonic-gate { 7807c478bd9Sstevel@tonic-gate uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 7817c478bd9Sstevel@tonic-gate #ifndef _LP64 7827c478bd9Sstevel@tonic-gate caddr_t stkbase; 7837c478bd9Sstevel@tonic-gate #endif 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate /* 7887c478bd9Sstevel@tonic-gate * Optimize for the common case where we simply return. 7897c478bd9Sstevel@tonic-gate */ 7907c478bd9Sstevel@tonic-gate if ((curthread->t_red_pp == NULL) && 7917c478bd9Sstevel@tonic-gate (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 7927c478bd9Sstevel@tonic-gate return (0); 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate #if defined(_LP64) 7957c478bd9Sstevel@tonic-gate /* 7967c478bd9Sstevel@tonic-gate * XXX We probably need something better than this. 7977c478bd9Sstevel@tonic-gate */ 7987c478bd9Sstevel@tonic-gate panic("kernel stack overflow"); 7997c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 8007c478bd9Sstevel@tonic-gate #else /* _LP64 */ 8017c478bd9Sstevel@tonic-gate if (curthread->t_red_pp == NULL) { 8027c478bd9Sstevel@tonic-gate page_t *red_pp; 8037c478bd9Sstevel@tonic-gate struct seg kseg; 8047c478bd9Sstevel@tonic-gate 8057c478bd9Sstevel@tonic-gate caddr_t red_va = (caddr_t) 8067c478bd9Sstevel@tonic-gate (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 8077c478bd9Sstevel@tonic-gate PAGESIZE); 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 8107c478bd9Sstevel@tonic-gate NULL); 8117c478bd9Sstevel@tonic-gate 8127c478bd9Sstevel@tonic-gate /* 8137c478bd9Sstevel@tonic-gate * Allocate the physical for the red page. 8147c478bd9Sstevel@tonic-gate */ 8157c478bd9Sstevel@tonic-gate /* 8167c478bd9Sstevel@tonic-gate * No PG_NORELOC here to avoid waits. Unlikely to get 8177c478bd9Sstevel@tonic-gate * a relocate happening in the short time the page exists 8187c478bd9Sstevel@tonic-gate * and it will be OK anyway. 8197c478bd9Sstevel@tonic-gate */ 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate kseg.s_as = &kas; 8227c478bd9Sstevel@tonic-gate red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 8237c478bd9Sstevel@tonic-gate PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 8247c478bd9Sstevel@tonic-gate ASSERT(red_pp != NULL); 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate /* 8277c478bd9Sstevel@tonic-gate * So we now have a page to jam into the redzone... 8287c478bd9Sstevel@tonic-gate */ 8297c478bd9Sstevel@tonic-gate page_io_unlock(red_pp); 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate hat_memload(kas.a_hat, red_va, red_pp, 8327c478bd9Sstevel@tonic-gate (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 8337c478bd9Sstevel@tonic-gate page_downgrade(red_pp); 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * The page is left SE_SHARED locked so we can hold on to 8377c478bd9Sstevel@tonic-gate * the page_t pointer. 8387c478bd9Sstevel@tonic-gate */ 8397c478bd9Sstevel@tonic-gate curthread->t_red_pp = red_pp; 8407c478bd9Sstevel@tonic-gate 8411a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&red_nmapped); 8427c478bd9Sstevel@tonic-gate while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 84375d94465SJosef 'Jeff' Sipek (void) atomic_cas_32(&red_closest, red_closest, 8447c478bd9Sstevel@tonic-gate (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 8457c478bd9Sstevel@tonic-gate } 8467c478bd9Sstevel@tonic-gate return (1); 8477c478bd9Sstevel@tonic-gate } 8487c478bd9Sstevel@tonic-gate 8497c478bd9Sstevel@tonic-gate stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 8507c478bd9Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE); 8517c478bd9Sstevel@tonic-gate 8521a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&red_ndoubles); 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 8557c478bd9Sstevel@tonic-gate /* 8567c478bd9Sstevel@tonic-gate * Oh boy. We're already deep within the mapped-in 8577c478bd9Sstevel@tonic-gate * redzone page, and the caller is trying to prepare 8587c478bd9Sstevel@tonic-gate * for a deep stack run. We're running without a 8597c478bd9Sstevel@tonic-gate * redzone right now: if the caller plows off the 8607c478bd9Sstevel@tonic-gate * end of the stack, it'll plow another thread or 8617c478bd9Sstevel@tonic-gate * LWP structure. That situation could result in 8627c478bd9Sstevel@tonic-gate * a very hard-to-debug panic, so, in the spirit of 8637c478bd9Sstevel@tonic-gate * recording the name of one's killer in one's own 864d3d50737SRafael Vanoni * blood, we're going to record hrestime and the calling 8657c478bd9Sstevel@tonic-gate * thread. 8667c478bd9Sstevel@tonic-gate */ 867d3d50737SRafael Vanoni red_deep_hires = hrestime.tv_nsec; 8687c478bd9Sstevel@tonic-gate red_deep_thread = curthread; 8697c478bd9Sstevel@tonic-gate } 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 8737c478bd9Sstevel@tonic-gate */ 8747c478bd9Sstevel@tonic-gate ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 8757c478bd9Sstevel@tonic-gate return (0); 8767c478bd9Sstevel@tonic-gate #endif /* _LP64 */ 8777c478bd9Sstevel@tonic-gate } 8787c478bd9Sstevel@tonic-gate 8797c478bd9Sstevel@tonic-gate void 8807c478bd9Sstevel@tonic-gate segkp_unmap_red(void) 8817c478bd9Sstevel@tonic-gate { 8827c478bd9Sstevel@tonic-gate page_t *pp; 8837c478bd9Sstevel@tonic-gate caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 8847c478bd9Sstevel@tonic-gate (uintptr_t)PAGEMASK) - PAGESIZE); 8857c478bd9Sstevel@tonic-gate 8867c478bd9Sstevel@tonic-gate ASSERT(curthread->t_red_pp != NULL); 8877c478bd9Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 8887c478bd9Sstevel@tonic-gate 8897c478bd9Sstevel@tonic-gate /* 8907c478bd9Sstevel@tonic-gate * Because we locked the mapping down, we can't simply rely 8917c478bd9Sstevel@tonic-gate * on page_destroy() to clean everything up; we need to call 8927c478bd9Sstevel@tonic-gate * hat_unload() to explicitly unlock the mapping resources. 8937c478bd9Sstevel@tonic-gate */ 8947c478bd9Sstevel@tonic-gate hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 8957c478bd9Sstevel@tonic-gate 8967c478bd9Sstevel@tonic-gate pp = curthread->t_red_pp; 8977c478bd9Sstevel@tonic-gate 8987c478bd9Sstevel@tonic-gate ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate /* 9017c478bd9Sstevel@tonic-gate * Need to upgrade the SE_SHARED lock to SE_EXCL. 9027c478bd9Sstevel@tonic-gate */ 9037c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 9047c478bd9Sstevel@tonic-gate /* 9057c478bd9Sstevel@tonic-gate * As there is now wait for upgrade, release the 9067c478bd9Sstevel@tonic-gate * SE_SHARED lock and wait for SE_EXCL. 9077c478bd9Sstevel@tonic-gate */ 9087c478bd9Sstevel@tonic-gate page_unlock(pp); 9097c478bd9Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 9107c478bd9Sstevel@tonic-gate /* pp may be NULL here, hence the test below */ 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Destroy the page, with dontfree set to zero (i.e. free it). 9157c478bd9Sstevel@tonic-gate */ 9167c478bd9Sstevel@tonic-gate if (pp != NULL) 9177c478bd9Sstevel@tonic-gate page_destroy(pp, 0); 9187c478bd9Sstevel@tonic-gate curthread->t_red_pp = NULL; 9197c478bd9Sstevel@tonic-gate } 9207c478bd9Sstevel@tonic-gate #else 9217c478bd9Sstevel@tonic-gate #error Red stacks only supported with downwards stack growth. 9227c478bd9Sstevel@tonic-gate #endif 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate /* 9257c478bd9Sstevel@tonic-gate * Handle a fault on an address corresponding to one of the 9267c478bd9Sstevel@tonic-gate * resources in the segkp segment. 9277c478bd9Sstevel@tonic-gate */ 9287c478bd9Sstevel@tonic-gate faultcode_t 9297c478bd9Sstevel@tonic-gate segkp_fault( 9307c478bd9Sstevel@tonic-gate struct hat *hat, 9317c478bd9Sstevel@tonic-gate struct seg *seg, 9327c478bd9Sstevel@tonic-gate caddr_t vaddr, 9337c478bd9Sstevel@tonic-gate size_t len, 9347c478bd9Sstevel@tonic-gate enum fault_type type, 9357c478bd9Sstevel@tonic-gate enum seg_rw rw) 9367c478bd9Sstevel@tonic-gate { 9377c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 9387c478bd9Sstevel@tonic-gate int err; 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate /* 9437c478bd9Sstevel@tonic-gate * Sanity checks. 9447c478bd9Sstevel@tonic-gate */ 9457c478bd9Sstevel@tonic-gate if (type == F_PROT) { 9467c478bd9Sstevel@tonic-gate panic("segkp_fault: unexpected F_PROT fault"); 9477c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) 9517c478bd9Sstevel@tonic-gate return (FC_NOMAP); 9527c478bd9Sstevel@tonic-gate 9537c478bd9Sstevel@tonic-gate mutex_enter(&kpd->kp_lock); 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate if (type == F_SOFTLOCK) { 9567c478bd9Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 9577c478bd9Sstevel@tonic-gate /* 9587c478bd9Sstevel@tonic-gate * The F_SOFTLOCK case has more stringent 9597c478bd9Sstevel@tonic-gate * range requirements: the given range must exactly coincide 9607c478bd9Sstevel@tonic-gate * with the resource's mapped portion. Note reference to 9617c478bd9Sstevel@tonic-gate * redzone is handled since vaddr would not equal base 9627c478bd9Sstevel@tonic-gate */ 9637c478bd9Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 9647c478bd9Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 9657c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 9667c478bd9Sstevel@tonic-gate return (FC_MAKE_ERR(EFAULT)); 9677c478bd9Sstevel@tonic-gate } 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 9707c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 9717c478bd9Sstevel@tonic-gate return (FC_MAKE_ERR(err)); 9727c478bd9Sstevel@tonic-gate } 9737c478bd9Sstevel@tonic-gate kpd->kp_flags |= KPD_LOCKED; 9747c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 9757c478bd9Sstevel@tonic-gate return (0); 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate if (type == F_INVAL) { 9797c478bd9Sstevel@tonic-gate ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 9807c478bd9Sstevel@tonic-gate 9817c478bd9Sstevel@tonic-gate /* 9827c478bd9Sstevel@tonic-gate * Check if we touched the redzone. Somewhat optimistic 9837c478bd9Sstevel@tonic-gate * here if we are touching the redzone of our own stack 9847c478bd9Sstevel@tonic-gate * since we wouldn't have a stack to get this far... 9857c478bd9Sstevel@tonic-gate */ 9867c478bd9Sstevel@tonic-gate if ((kpd->kp_flags & KPD_HASREDZONE) && 9877c478bd9Sstevel@tonic-gate btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 9887c478bd9Sstevel@tonic-gate panic("segkp_fault: accessing redzone"); 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate /* 9917c478bd9Sstevel@tonic-gate * This fault may occur while the page is being F_SOFTLOCK'ed. 9927c478bd9Sstevel@tonic-gate * Return since a 2nd segkp_load is unnecessary and also would 9937c478bd9Sstevel@tonic-gate * result in the page being locked twice and eventually 9947c478bd9Sstevel@tonic-gate * hang the thread_reaper thread. 9957c478bd9Sstevel@tonic-gate */ 9967c478bd9Sstevel@tonic-gate if (kpd->kp_flags & KPD_LOCKED) { 9977c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 9987c478bd9Sstevel@tonic-gate return (0); 9997c478bd9Sstevel@tonic-gate } 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 10027c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 10037c478bd9Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0); 10047c478bd9Sstevel@tonic-gate } 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate if (type == F_SOFTUNLOCK) { 10077c478bd9Sstevel@tonic-gate uint_t flags; 10087c478bd9Sstevel@tonic-gate 10097c478bd9Sstevel@tonic-gate /* 10107c478bd9Sstevel@tonic-gate * Make sure the addr is LOCKED and it has anon backing 10117c478bd9Sstevel@tonic-gate * before unlocking 10127c478bd9Sstevel@tonic-gate */ 1013d32efdadSJonathan Adams if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 10147c478bd9Sstevel@tonic-gate panic("segkp_fault: bad unlock"); 10157c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 10167c478bd9Sstevel@tonic-gate } 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 10197c478bd9Sstevel@tonic-gate len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 10207c478bd9Sstevel@tonic-gate panic("segkp_fault: bad range"); 10217c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 10227c478bd9Sstevel@tonic-gate } 10237c478bd9Sstevel@tonic-gate 10247c478bd9Sstevel@tonic-gate if (rw == S_WRITE) 10257c478bd9Sstevel@tonic-gate flags = kpd->kp_flags | KPD_WRITEDIRTY; 10267c478bd9Sstevel@tonic-gate else 10277c478bd9Sstevel@tonic-gate flags = kpd->kp_flags; 10287c478bd9Sstevel@tonic-gate err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 10297c478bd9Sstevel@tonic-gate kpd->kp_flags &= ~KPD_LOCKED; 10307c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 10317c478bd9Sstevel@tonic-gate return (err ? FC_MAKE_ERR(err) : 0); 10327c478bd9Sstevel@tonic-gate } 10337c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 10347c478bd9Sstevel@tonic-gate panic("segkp_fault: bogus fault type: %d\n", type); 10357c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 10367c478bd9Sstevel@tonic-gate } 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate /* 10397c478bd9Sstevel@tonic-gate * Check that the given protections suffice over the range specified by 10407c478bd9Sstevel@tonic-gate * vaddr and len. For this segment type, the only issue is whether or 10417c478bd9Sstevel@tonic-gate * not the range lies completely within the mapped part of an allocated 10427c478bd9Sstevel@tonic-gate * resource. 10437c478bd9Sstevel@tonic-gate */ 10447c478bd9Sstevel@tonic-gate /* ARGSUSED */ 10457c478bd9Sstevel@tonic-gate static int 10467c478bd9Sstevel@tonic-gate segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 10477c478bd9Sstevel@tonic-gate { 10487c478bd9Sstevel@tonic-gate struct segkp_data *kpd = NULL; 10497c478bd9Sstevel@tonic-gate caddr_t mbase; 10507c478bd9Sstevel@tonic-gate size_t mlen; 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate if ((kpd = segkp_find(seg, vaddr)) == NULL) 10537c478bd9Sstevel@tonic-gate return (EACCES); 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate mutex_enter(&kpd->kp_lock); 10567c478bd9Sstevel@tonic-gate mbase = stom(kpd->kp_base, kpd->kp_flags); 10577c478bd9Sstevel@tonic-gate mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 10587c478bd9Sstevel@tonic-gate if (len > mlen || vaddr < mbase || 10597c478bd9Sstevel@tonic-gate ((vaddr + len) > (mbase + mlen))) { 10607c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 10617c478bd9Sstevel@tonic-gate return (EACCES); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate mutex_exit(&kpd->kp_lock); 10647c478bd9Sstevel@tonic-gate return (0); 10657c478bd9Sstevel@tonic-gate } 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate 10687c478bd9Sstevel@tonic-gate /* 10697c478bd9Sstevel@tonic-gate * Check to see if it makes sense to do kluster/read ahead to 10707c478bd9Sstevel@tonic-gate * addr + delta relative to the mapping at addr. We assume here 10717c478bd9Sstevel@tonic-gate * that delta is a signed PAGESIZE'd multiple (which can be negative). 10727c478bd9Sstevel@tonic-gate * 10737c478bd9Sstevel@tonic-gate * For seg_u we always "approve" of this action from our standpoint. 10747c478bd9Sstevel@tonic-gate */ 10757c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10767c478bd9Sstevel@tonic-gate static int 10777c478bd9Sstevel@tonic-gate segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 10787c478bd9Sstevel@tonic-gate { 10797c478bd9Sstevel@tonic-gate return (0); 10807c478bd9Sstevel@tonic-gate } 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate /* 10837c478bd9Sstevel@tonic-gate * Load and possibly lock intra-slot resources in the range given by 10847c478bd9Sstevel@tonic-gate * vaddr and len. 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate static int 10877c478bd9Sstevel@tonic-gate segkp_load( 10887c478bd9Sstevel@tonic-gate struct hat *hat, 10897c478bd9Sstevel@tonic-gate struct seg *seg, 10907c478bd9Sstevel@tonic-gate caddr_t vaddr, 10917c478bd9Sstevel@tonic-gate size_t len, 10927c478bd9Sstevel@tonic-gate struct segkp_data *kpd, 10937c478bd9Sstevel@tonic-gate uint_t flags) 10947c478bd9Sstevel@tonic-gate { 10957c478bd9Sstevel@tonic-gate caddr_t va; 10967c478bd9Sstevel@tonic-gate caddr_t vlim; 10977c478bd9Sstevel@tonic-gate ulong_t i; 10987c478bd9Sstevel@tonic-gate uint_t lock; 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock)); 11017c478bd9Sstevel@tonic-gate 11027c478bd9Sstevel@tonic-gate len = P2ROUNDUP(len, PAGESIZE); 11037c478bd9Sstevel@tonic-gate 11047c478bd9Sstevel@tonic-gate /* If locking, reserve physical memory */ 11057c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 11067c478bd9Sstevel@tonic-gate pgcnt_t pages = btop(len); 11077c478bd9Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 11087c478bd9Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, pages); 11097c478bd9Sstevel@tonic-gate (void) page_resv(pages, KM_SLEEP); 11107c478bd9Sstevel@tonic-gate } 11117c478bd9Sstevel@tonic-gate 11127c478bd9Sstevel@tonic-gate /* 11137c478bd9Sstevel@tonic-gate * Loop through the pages in the given range. 11147c478bd9Sstevel@tonic-gate */ 11157c478bd9Sstevel@tonic-gate va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 11167c478bd9Sstevel@tonic-gate vaddr = va; 11177c478bd9Sstevel@tonic-gate vlim = va + len; 11187c478bd9Sstevel@tonic-gate lock = flags & KPD_LOCKED; 11197c478bd9Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 11207c478bd9Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) { 11217c478bd9Sstevel@tonic-gate page_t *pl[2]; /* second element NULL terminator */ 11227c478bd9Sstevel@tonic-gate struct vnode *vp; 11237c478bd9Sstevel@tonic-gate anoff_t off; 11247c478bd9Sstevel@tonic-gate int err; 11257c478bd9Sstevel@tonic-gate struct anon *ap; 11267c478bd9Sstevel@tonic-gate 11277c478bd9Sstevel@tonic-gate /* 11287c478bd9Sstevel@tonic-gate * Summon the page. If it's not resident, arrange 11297c478bd9Sstevel@tonic-gate * for synchronous i/o to pull it in. 11307c478bd9Sstevel@tonic-gate */ 11317c478bd9Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 11327c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 11337c478bd9Sstevel@tonic-gate 11347c478bd9Sstevel@tonic-gate /* 11357c478bd9Sstevel@tonic-gate * The returned page list will have exactly one entry, 11367c478bd9Sstevel@tonic-gate * which is returned to us already kept. 11377c478bd9Sstevel@tonic-gate */ 11387c478bd9Sstevel@tonic-gate err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1139da6c28aaSamw pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate if (err) { 11427c478bd9Sstevel@tonic-gate /* 11437c478bd9Sstevel@tonic-gate * Back out of what we've done so far. 11447c478bd9Sstevel@tonic-gate */ 11457c478bd9Sstevel@tonic-gate (void) segkp_unlock(hat, seg, vaddr, 11467c478bd9Sstevel@tonic-gate (va - vaddr), kpd, flags); 11477c478bd9Sstevel@tonic-gate return (err); 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate 11507c478bd9Sstevel@tonic-gate /* 11517c478bd9Sstevel@tonic-gate * Load an MMU translation for the page. 11527c478bd9Sstevel@tonic-gate */ 11537c478bd9Sstevel@tonic-gate hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 11547c478bd9Sstevel@tonic-gate lock ? HAT_LOAD_LOCK : HAT_LOAD); 11557c478bd9Sstevel@tonic-gate 11567c478bd9Sstevel@tonic-gate if (!lock) { 11577c478bd9Sstevel@tonic-gate /* 11587c478bd9Sstevel@tonic-gate * Now, release "shared" lock on the page. 11597c478bd9Sstevel@tonic-gate */ 11607c478bd9Sstevel@tonic-gate page_unlock(pl[0]); 11617c478bd9Sstevel@tonic-gate } 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate return (0); 11647c478bd9Sstevel@tonic-gate } 11657c478bd9Sstevel@tonic-gate 11667c478bd9Sstevel@tonic-gate /* 11677c478bd9Sstevel@tonic-gate * At the very least unload the mmu-translations and unlock the range if locked 11687c478bd9Sstevel@tonic-gate * Can be called with the following flag value KPD_WRITEDIRTY which specifies 11697c478bd9Sstevel@tonic-gate * any dirty pages should be written to disk. 11707c478bd9Sstevel@tonic-gate */ 11717c478bd9Sstevel@tonic-gate static int 11727c478bd9Sstevel@tonic-gate segkp_unlock( 11737c478bd9Sstevel@tonic-gate struct hat *hat, 11747c478bd9Sstevel@tonic-gate struct seg *seg, 11757c478bd9Sstevel@tonic-gate caddr_t vaddr, 11767c478bd9Sstevel@tonic-gate size_t len, 11777c478bd9Sstevel@tonic-gate struct segkp_data *kpd, 11787c478bd9Sstevel@tonic-gate uint_t flags) 11797c478bd9Sstevel@tonic-gate { 11807c478bd9Sstevel@tonic-gate caddr_t va; 11817c478bd9Sstevel@tonic-gate caddr_t vlim; 11827c478bd9Sstevel@tonic-gate ulong_t i; 11837c478bd9Sstevel@tonic-gate struct page *pp; 11847c478bd9Sstevel@tonic-gate struct vnode *vp; 11857c478bd9Sstevel@tonic-gate anoff_t off; 11867c478bd9Sstevel@tonic-gate struct anon *ap; 11877c478bd9Sstevel@tonic-gate 11887c478bd9Sstevel@tonic-gate #ifdef lint 11897c478bd9Sstevel@tonic-gate seg = seg; 11907c478bd9Sstevel@tonic-gate #endif /* lint */ 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kpd->kp_lock)); 11937c478bd9Sstevel@tonic-gate 11947c478bd9Sstevel@tonic-gate /* 11957c478bd9Sstevel@tonic-gate * Loop through the pages in the given range. It is assumed 11967c478bd9Sstevel@tonic-gate * segkp_unlock is called with page aligned base 11977c478bd9Sstevel@tonic-gate */ 11987c478bd9Sstevel@tonic-gate va = vaddr; 11997c478bd9Sstevel@tonic-gate vlim = va + len; 12007c478bd9Sstevel@tonic-gate i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 12017c478bd9Sstevel@tonic-gate hat_unload(hat, va, len, 12027c478bd9Sstevel@tonic-gate ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 12037c478bd9Sstevel@tonic-gate for (; va < vlim; va += PAGESIZE, i++) { 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * Find the page associated with this part of the 12067c478bd9Sstevel@tonic-gate * slot, tracking it down through its associated swap 12077c478bd9Sstevel@tonic-gate * space. 12087c478bd9Sstevel@tonic-gate */ 12097c478bd9Sstevel@tonic-gate ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 12107c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 12117c478bd9Sstevel@tonic-gate 12127c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 12137c478bd9Sstevel@tonic-gate if ((pp = page_find(vp, off)) == NULL) { 12147c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 12157c478bd9Sstevel@tonic-gate panic("segkp_softunlock: missing page"); 12167c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 12177c478bd9Sstevel@tonic-gate } 12187c478bd9Sstevel@tonic-gate } 12197c478bd9Sstevel@tonic-gate } else { 12207c478bd9Sstevel@tonic-gate /* 12217c478bd9Sstevel@tonic-gate * Nothing to do if the slot is not locked and the 12227c478bd9Sstevel@tonic-gate * page doesn't exist. 12237c478bd9Sstevel@tonic-gate */ 12247c478bd9Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 12257c478bd9Sstevel@tonic-gate continue; 12267c478bd9Sstevel@tonic-gate } 12277c478bd9Sstevel@tonic-gate 12287c478bd9Sstevel@tonic-gate /* 12297c478bd9Sstevel@tonic-gate * If the page doesn't have any translations, is 12307c478bd9Sstevel@tonic-gate * dirty and not being shared, then push it out 12317c478bd9Sstevel@tonic-gate * asynchronously and avoid waiting for the 12327c478bd9Sstevel@tonic-gate * pageout daemon to do it for us. 12337c478bd9Sstevel@tonic-gate * 12347c478bd9Sstevel@tonic-gate * XXX - Do we really need to get the "exclusive" 12357c478bd9Sstevel@tonic-gate * lock via an upgrade? 12367c478bd9Sstevel@tonic-gate */ 12377c478bd9Sstevel@tonic-gate if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 12387c478bd9Sstevel@tonic-gate hat_ismod(pp) && page_tryupgrade(pp)) { 12397c478bd9Sstevel@tonic-gate /* 12407c478bd9Sstevel@tonic-gate * Hold the vnode before releasing the page lock to 12417c478bd9Sstevel@tonic-gate * prevent it from being freed and re-used by some 12427c478bd9Sstevel@tonic-gate * other thread. 12437c478bd9Sstevel@tonic-gate */ 12447c478bd9Sstevel@tonic-gate VN_HOLD(vp); 12457c478bd9Sstevel@tonic-gate page_unlock(pp); 12467c478bd9Sstevel@tonic-gate 12477c478bd9Sstevel@tonic-gate /* 12487c478bd9Sstevel@tonic-gate * Want most powerful credentials we can get so 12497c478bd9Sstevel@tonic-gate * use kcred. 12507c478bd9Sstevel@tonic-gate */ 12517c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1252da6c28aaSamw B_ASYNC | B_FREE, kcred, NULL); 12537c478bd9Sstevel@tonic-gate VN_RELE(vp); 12547c478bd9Sstevel@tonic-gate } else { 12557c478bd9Sstevel@tonic-gate page_unlock(pp); 12567c478bd9Sstevel@tonic-gate } 12577c478bd9Sstevel@tonic-gate } 12587c478bd9Sstevel@tonic-gate 12597c478bd9Sstevel@tonic-gate /* If unlocking, release physical memory */ 12607c478bd9Sstevel@tonic-gate if (flags & KPD_LOCKED) { 12617c478bd9Sstevel@tonic-gate pgcnt_t pages = btopr(len); 12627c478bd9Sstevel@tonic-gate if ((kpd->kp_flags & KPD_NO_ANON) == 0) 12637c478bd9Sstevel@tonic-gate atomic_add_long(&anon_segkp_pages_locked, -pages); 12647c478bd9Sstevel@tonic-gate page_unresv(pages); 12657c478bd9Sstevel@tonic-gate } 12667c478bd9Sstevel@tonic-gate return (0); 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate 12697c478bd9Sstevel@tonic-gate /* 12707c478bd9Sstevel@tonic-gate * Insert the kpd in the hash table. 12717c478bd9Sstevel@tonic-gate */ 12727c478bd9Sstevel@tonic-gate static void 12737c478bd9Sstevel@tonic-gate segkp_insert(struct seg *seg, struct segkp_data *kpd) 12747c478bd9Sstevel@tonic-gate { 12757c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 12767c478bd9Sstevel@tonic-gate int index; 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate /* 12797c478bd9Sstevel@tonic-gate * Insert the kpd based on the address that will be returned 12807c478bd9Sstevel@tonic-gate * via segkp_release. 12817c478bd9Sstevel@tonic-gate */ 12827c478bd9Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 12837c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 12847c478bd9Sstevel@tonic-gate kpd->kp_next = kpsd->kpsd_hash[index]; 12857c478bd9Sstevel@tonic-gate kpsd->kpsd_hash[index] = kpd; 12867c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 12877c478bd9Sstevel@tonic-gate } 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * Remove kpd from the hash table. 12917c478bd9Sstevel@tonic-gate */ 12927c478bd9Sstevel@tonic-gate static void 12937c478bd9Sstevel@tonic-gate segkp_delete(struct seg *seg, struct segkp_data *kpd) 12947c478bd9Sstevel@tonic-gate { 12957c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 12967c478bd9Sstevel@tonic-gate struct segkp_data **kpp; 12977c478bd9Sstevel@tonic-gate int index; 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&segkp_lock)); 13007c478bd9Sstevel@tonic-gate 13017c478bd9Sstevel@tonic-gate index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 13027c478bd9Sstevel@tonic-gate for (kpp = &kpsd->kpsd_hash[index]; 13037c478bd9Sstevel@tonic-gate *kpp != NULL; kpp = &((*kpp)->kp_next)) { 13047c478bd9Sstevel@tonic-gate if (*kpp == kpd) { 13057c478bd9Sstevel@tonic-gate *kpp = kpd->kp_next; 13067c478bd9Sstevel@tonic-gate return; 13077c478bd9Sstevel@tonic-gate } 13087c478bd9Sstevel@tonic-gate } 13097c478bd9Sstevel@tonic-gate panic("segkp_delete: unable to find element to delete"); 13107c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 13117c478bd9Sstevel@tonic-gate } 13127c478bd9Sstevel@tonic-gate 13137c478bd9Sstevel@tonic-gate /* 13147c478bd9Sstevel@tonic-gate * Find the kpd associated with a vaddr. 13157c478bd9Sstevel@tonic-gate * 13167c478bd9Sstevel@tonic-gate * Most of the callers of segkp_find will pass the vaddr that 13177c478bd9Sstevel@tonic-gate * hashes to the desired index, but there are cases where 13187c478bd9Sstevel@tonic-gate * this is not true in which case we have to (potentially) scan 13197c478bd9Sstevel@tonic-gate * the whole table looking for it. This should be very rare 13207c478bd9Sstevel@tonic-gate * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 13217c478bd9Sstevel@tonic-gate * middle of the segkp_data region). 13227c478bd9Sstevel@tonic-gate */ 13237c478bd9Sstevel@tonic-gate static struct segkp_data * 13247c478bd9Sstevel@tonic-gate segkp_find(struct seg *seg, caddr_t vaddr) 13257c478bd9Sstevel@tonic-gate { 13267c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 13277c478bd9Sstevel@tonic-gate struct segkp_data *kpd; 13287c478bd9Sstevel@tonic-gate int i; 13297c478bd9Sstevel@tonic-gate int stop; 13307c478bd9Sstevel@tonic-gate 13317c478bd9Sstevel@tonic-gate i = stop = SEGKP_HASH(vaddr); 13327c478bd9Sstevel@tonic-gate mutex_enter(&segkp_lock); 13337c478bd9Sstevel@tonic-gate do { 13347c478bd9Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 13357c478bd9Sstevel@tonic-gate kpd = kpd->kp_next) { 13367c478bd9Sstevel@tonic-gate if (vaddr >= kpd->kp_base && 13377c478bd9Sstevel@tonic-gate vaddr < kpd->kp_base + kpd->kp_len) { 13387c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 13397c478bd9Sstevel@tonic-gate return (kpd); 13407c478bd9Sstevel@tonic-gate } 13417c478bd9Sstevel@tonic-gate } 13427c478bd9Sstevel@tonic-gate if (--i < 0) 13437c478bd9Sstevel@tonic-gate i = SEGKP_HASHSZ - 1; /* Wrap */ 13447c478bd9Sstevel@tonic-gate } while (i != stop); 13457c478bd9Sstevel@tonic-gate mutex_exit(&segkp_lock); 13467c478bd9Sstevel@tonic-gate return (NULL); /* Not found */ 13477c478bd9Sstevel@tonic-gate } 13487c478bd9Sstevel@tonic-gate 13497c478bd9Sstevel@tonic-gate /* 13507c478bd9Sstevel@tonic-gate * returns size of swappable area. 13517c478bd9Sstevel@tonic-gate */ 13527c478bd9Sstevel@tonic-gate size_t 13537c478bd9Sstevel@tonic-gate swapsize(caddr_t v) 13547c478bd9Sstevel@tonic-gate { 13557c478bd9Sstevel@tonic-gate struct segkp_data *kpd; 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate if ((kpd = segkp_find(segkp, v)) != NULL) 13587c478bd9Sstevel@tonic-gate return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 13597c478bd9Sstevel@tonic-gate else 13607c478bd9Sstevel@tonic-gate return (NULL); 13617c478bd9Sstevel@tonic-gate } 13627c478bd9Sstevel@tonic-gate 13637c478bd9Sstevel@tonic-gate /* 13647c478bd9Sstevel@tonic-gate * Dump out all the active segkp pages 13657c478bd9Sstevel@tonic-gate */ 13667c478bd9Sstevel@tonic-gate static void 13677c478bd9Sstevel@tonic-gate segkp_dump(struct seg *seg) 13687c478bd9Sstevel@tonic-gate { 13697c478bd9Sstevel@tonic-gate int i; 13707c478bd9Sstevel@tonic-gate struct segkp_data *kpd; 13717c478bd9Sstevel@tonic-gate struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate for (i = 0; i < SEGKP_HASHSZ; i++) { 13747c478bd9Sstevel@tonic-gate for (kpd = kpsd->kpsd_hash[i]; 13757c478bd9Sstevel@tonic-gate kpd != NULL; kpd = kpd->kp_next) { 13767c478bd9Sstevel@tonic-gate pfn_t pfn; 13777c478bd9Sstevel@tonic-gate caddr_t addr; 13787c478bd9Sstevel@tonic-gate caddr_t eaddr; 13797c478bd9Sstevel@tonic-gate 13807c478bd9Sstevel@tonic-gate addr = kpd->kp_base; 13817c478bd9Sstevel@tonic-gate eaddr = addr + kpd->kp_len; 13827c478bd9Sstevel@tonic-gate while (addr < eaddr) { 13837c478bd9Sstevel@tonic-gate ASSERT(seg->s_as == &kas); 13847c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(seg->s_as->a_hat, addr); 13857c478bd9Sstevel@tonic-gate if (pfn != PFN_INVALID) 13867c478bd9Sstevel@tonic-gate dump_addpage(seg->s_as, addr, pfn); 13877c478bd9Sstevel@tonic-gate addr += PAGESIZE; 13887c478bd9Sstevel@tonic-gate dump_timeleft = dump_timeout; 13897c478bd9Sstevel@tonic-gate } 13907c478bd9Sstevel@tonic-gate } 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate } 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13957c478bd9Sstevel@tonic-gate static int 13967c478bd9Sstevel@tonic-gate segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 13977c478bd9Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 13987c478bd9Sstevel@tonic-gate { 13997c478bd9Sstevel@tonic-gate return (ENOTSUP); 14007c478bd9Sstevel@tonic-gate } 14017c478bd9Sstevel@tonic-gate 14027c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14037c478bd9Sstevel@tonic-gate static int 14047c478bd9Sstevel@tonic-gate segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 14057c478bd9Sstevel@tonic-gate { 14067c478bd9Sstevel@tonic-gate return (ENODEV); 14077c478bd9Sstevel@tonic-gate } 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14107c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t * 14117c478bd9Sstevel@tonic-gate segkp_getpolicy(struct seg *seg, caddr_t addr) 14127c478bd9Sstevel@tonic-gate { 14137c478bd9Sstevel@tonic-gate return (NULL); 14147c478bd9Sstevel@tonic-gate } 14157c478bd9Sstevel@tonic-gate 14161bd5c35fSelowe /*ARGSUSED*/ 14171bd5c35fSelowe static int 14181bd5c35fSelowe segkp_capable(struct seg *seg, segcapability_t capability) 14191bd5c35fSelowe { 14201bd5c35fSelowe return (0); 14211bd5c35fSelowe } 14221bd5c35fSelowe 14237c478bd9Sstevel@tonic-gate #include <sys/mem_config.h> 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14267c478bd9Sstevel@tonic-gate static void 14277c478bd9Sstevel@tonic-gate segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 14287c478bd9Sstevel@tonic-gate {} 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate /* 14317c478bd9Sstevel@tonic-gate * During memory delete, turn off caches so that pages are not held. 14327c478bd9Sstevel@tonic-gate * A better solution may be to unlock the pages while they are 14337c478bd9Sstevel@tonic-gate * in the cache so that they may be collected naturally. 14347c478bd9Sstevel@tonic-gate */ 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14377c478bd9Sstevel@tonic-gate static int 14387c478bd9Sstevel@tonic-gate segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 14397c478bd9Sstevel@tonic-gate { 14401a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&segkp_indel); 14417c478bd9Sstevel@tonic-gate segkp_cache_free(); 14427c478bd9Sstevel@tonic-gate return (0); 14437c478bd9Sstevel@tonic-gate } 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14467c478bd9Sstevel@tonic-gate static void 14477c478bd9Sstevel@tonic-gate segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 14487c478bd9Sstevel@tonic-gate { 14491a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&segkp_indel); 14507c478bd9Sstevel@tonic-gate } 14517c478bd9Sstevel@tonic-gate 14527c478bd9Sstevel@tonic-gate static kphysm_setup_vector_t segkp_mem_config_vec = { 14537c478bd9Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION, 14547c478bd9Sstevel@tonic-gate segkp_mem_config_post_add, 14557c478bd9Sstevel@tonic-gate segkp_mem_config_pre_del, 14567c478bd9Sstevel@tonic-gate segkp_mem_config_post_del, 14577c478bd9Sstevel@tonic-gate }; 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate static void 14607c478bd9Sstevel@tonic-gate segkpinit_mem_config(struct seg *seg) 14617c478bd9Sstevel@tonic-gate { 14627c478bd9Sstevel@tonic-gate int ret; 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 14657c478bd9Sstevel@tonic-gate ASSERT(ret == 0); 14667c478bd9Sstevel@tonic-gate } 1467