1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/systm.h> 32 #include <sys/mman.h> 33 #include <sys/buf.h> 34 #include <sys/vmem.h> 35 #include <sys/cmn_err.h> 36 #include <sys/debug.h> 37 #include <sys/machparam.h> 38 #include <vm/page.h> 39 #include <vm/seg_kmem.h> 40 #include <vm/seg_kpm.h> 41 42 #ifdef __sparc 43 #include <sys/cpu_module.h> 44 #define BP_FLUSH(addr, size) flush_instr_mem((void *)addr, size); 45 #else 46 #define BP_FLUSH(addr, size) 47 #endif 48 49 static vmem_t *bp_map_arena; 50 static size_t bp_align; 51 static uint_t bp_devload_flags = PROT_READ | PROT_WRITE | HAT_NOSYNC; 52 int bp_max_cache = 1 << 17; /* 128K default; tunable */ 53 int bp_mapin_kpm_enable = 1; /* enable default; tunable */ 54 55 static void * 56 bp_vmem_alloc(vmem_t *vmp, size_t size, int vmflag) 57 { 58 return (vmem_xalloc(vmp, size, bp_align, 0, 0, NULL, NULL, vmflag)); 59 } 60 61 void 62 bp_init(size_t align, uint_t devload_flags) 63 { 64 bp_align = MAX(align, PAGESIZE); 65 bp_devload_flags |= devload_flags; 66 67 if (bp_align <= bp_max_cache) 68 bp_map_arena = vmem_create("bp_map", NULL, 0, bp_align, 69 bp_vmem_alloc, vmem_free, heap_arena, 70 MIN(8 * bp_align, bp_max_cache), VM_SLEEP); 71 } 72 73 /* 74 * common routine so can be called with/without VM_SLEEP 75 */ 76 void * 77 bp_mapin_common(struct buf *bp, int flag) 78 { 79 struct as *as; 80 pfn_t pfnum; 81 page_t *pp; 82 page_t **pplist; 83 caddr_t kaddr; 84 caddr_t addr; 85 uintptr_t off; 86 size_t size; 87 pgcnt_t npages; 88 int color; 89 90 /* return if already mapped in, no pageio/physio, or physio to kas */ 91 if ((bp->b_flags & B_REMAPPED) || 92 !(bp->b_flags & (B_PAGEIO | B_PHYS)) || 93 (((bp->b_flags & (B_PAGEIO | B_PHYS)) == B_PHYS) && 94 ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas)))) 95 return (bp->b_un.b_addr); 96 97 ASSERT((bp->b_flags & (B_PAGEIO | B_PHYS)) != (B_PAGEIO | B_PHYS)); 98 99 addr = (caddr_t)bp->b_un.b_addr; 100 off = (uintptr_t)addr & PAGEOFFSET; 101 size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE); 102 npages = btop(size); 103 104 /* Fastpath single page IO to locked memory by using kpm. */ 105 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) && 106 kpm_enable && bp_mapin_kpm_enable) { 107 if (bp->b_flags & B_SHADOW) 108 pp = *bp->b_shadow; 109 else 110 pp = bp->b_pages; 111 kaddr = hat_kpm_mapin(pp, NULL); 112 bp->b_un.b_addr = kaddr + off; 113 bp->b_flags |= B_REMAPPED; 114 return (bp->b_un.b_addr); 115 } 116 117 /* 118 * Allocate kernel virtual space for remapping. 119 */ 120 color = bp_color(bp); 121 ASSERT(color < bp_align); 122 123 if (bp_map_arena != NULL) { 124 kaddr = (caddr_t)vmem_alloc(bp_map_arena, 125 P2ROUNDUP(color + size, bp_align), flag); 126 if (kaddr == NULL) 127 return (NULL); 128 kaddr += color; 129 } else { 130 kaddr = vmem_xalloc(heap_arena, size, bp_align, color, 131 0, NULL, NULL, flag); 132 if (kaddr == NULL) 133 return (NULL); 134 } 135 136 ASSERT(P2PHASE((uintptr_t)kaddr, bp_align) == color); 137 138 /* 139 * Map bp into the virtual space we just allocated. 140 */ 141 if (bp->b_flags & B_PAGEIO) { 142 pp = bp->b_pages; 143 pplist = NULL; 144 } else if (bp->b_flags & B_SHADOW) { 145 pp = NULL; 146 pplist = bp->b_shadow; 147 } else { 148 pp = NULL; 149 pplist = NULL; 150 if (bp->b_proc == NULL || (as = bp->b_proc->p_as) == NULL) 151 as = &kas; 152 } 153 154 bp->b_flags |= B_REMAPPED; 155 bp->b_un.b_addr = kaddr + off; 156 157 while (npages-- != 0) { 158 if (pp) { 159 pfnum = pp->p_pagenum; 160 pp = pp->p_next; 161 } else if (pplist == NULL) { 162 pfnum = hat_getpfnum(as->a_hat, 163 (caddr_t)((uintptr_t)addr & MMU_PAGEMASK)); 164 if (pfnum == PFN_INVALID) 165 panic("bp_mapin_common: hat_getpfnum for" 166 " addr %p failed\n", (void *)addr); 167 addr += PAGESIZE; 168 } else { 169 pfnum = (*pplist)->p_pagenum; 170 pplist++; 171 } 172 173 hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, 174 bp_devload_flags, HAT_LOAD_LOCK); 175 176 kaddr += PAGESIZE; 177 } 178 return (bp->b_un.b_addr); 179 } 180 181 /* 182 * Convert bp for pageio/physio to a kernel addressable location. 183 */ 184 void 185 bp_mapin(struct buf *bp) 186 { 187 (void) bp_mapin_common(bp, VM_SLEEP); 188 } 189 190 /* 191 * Release all the resources associated with a previous bp_mapin() call. 192 */ 193 void 194 bp_mapout(struct buf *bp) 195 { 196 caddr_t addr; 197 uintptr_t off; 198 uintptr_t base; 199 uintptr_t color; 200 size_t size; 201 pgcnt_t npages; 202 page_t *pp; 203 204 if ((bp->b_flags & B_REMAPPED) == 0) 205 return; 206 207 addr = bp->b_un.b_addr; 208 off = (uintptr_t)addr & PAGEOFFSET; 209 size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE); 210 npages = btop(size); 211 212 bp->b_un.b_addr = (caddr_t)off; /* debugging aid */ 213 214 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) && 215 kpm_enable && bp_mapin_kpm_enable) { 216 if (bp->b_flags & B_SHADOW) 217 pp = *bp->b_shadow; 218 else 219 pp = bp->b_pages; 220 hat_kpm_mapout(pp, NULL, addr); 221 bp->b_flags &= ~B_REMAPPED; 222 return; 223 } 224 225 base = (uintptr_t)addr & MMU_PAGEMASK; 226 BP_FLUSH(base, size); 227 hat_unload(kas.a_hat, (void *)base, size, 228 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK); 229 if (bp_map_arena != NULL) { 230 color = P2PHASE(base, bp_align); 231 vmem_free(bp_map_arena, (void *)(base - color), 232 P2ROUNDUP(color + size, bp_align)); 233 } else 234 vmem_free(heap_arena, (void *)base, size); 235 bp->b_flags &= ~B_REMAPPED; 236 } 237