1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Kernel Physical Mapping (segkpm) hat interface routines for sun4v. 28 */ 29 30 #include <sys/types.h> 31 #include <vm/hat.h> 32 #include <vm/hat_sfmmu.h> 33 #include <vm/page.h> 34 #include <sys/cmn_err.h> 35 #include <sys/machsystm.h> 36 #include <vm/seg_kpm.h> 37 #include <vm/mach_kpm.h> 38 39 /* 40 * Kernel Physical Mapping (kpm) facility 41 */ 42 43 void 44 mach_kpm_init() 45 { 46 uintptr_t start, end; 47 struct memlist *pmem; 48 49 /* 50 * Map each of the memsegs into the kpm segment, coalesing 51 * adjacent memsegs to allow mapping with the largest 52 * possible pages. 53 */ 54 pmem = phys_install; 55 start = pmem->address; 56 end = start + pmem->size; 57 for (;;) { 58 if (pmem == NULL || pmem->address > end) { 59 hat_devload(kas.a_hat, kpm_vbase + start, 60 end - start, mmu_btop(start), 61 PROT_READ | PROT_WRITE, 62 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 63 if (pmem == NULL) 64 break; 65 start = pmem->address; 66 } 67 end = pmem->address + pmem->size; 68 pmem = pmem->next; 69 } 70 } 71 72 /* -- hat_kpm interface section -- */ 73 74 /* 75 * Mapin a locked page and return the vaddr. 76 */ 77 /*ARGSUSED*/ 78 caddr_t 79 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 80 { 81 caddr_t vaddr; 82 83 if (kpm_enable == 0) { 84 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set"); 85 return ((caddr_t)NULL); 86 } 87 88 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 89 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked"); 90 return ((caddr_t)NULL); 91 } 92 93 vaddr = hat_kpm_page2va(pp, 1); 94 95 return (vaddr); 96 } 97 98 /* 99 * Mapout a locked page. 100 */ 101 /*ARGSUSED*/ 102 void 103 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 104 { 105 #ifdef DEBUG 106 if (kpm_enable == 0) { 107 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set"); 108 return; 109 } 110 111 if (IS_KPM_ADDR(vaddr) == 0) { 112 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address"); 113 return; 114 } 115 116 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 117 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked"); 118 return; 119 } 120 #endif 121 } 122 123 /* 124 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 125 * memory addresses that are not described by a page_t. It can 126 * also be used for normal pages that are not locked, but beware 127 * this is dangerous - no locking is performed, so the identity of 128 * the page could change. hat_kpm_mapin_pfn is not supported when 129 * vac_colors > 1, because the chosen va depends on the page identity, 130 * which could change. 131 * The caller must only pass pfn's for valid physical addresses; violation 132 * of this rule will cause panic. 133 */ 134 caddr_t 135 hat_kpm_mapin_pfn(pfn_t pfn) 136 { 137 caddr_t paddr, vaddr; 138 139 if (kpm_enable == 0) 140 return ((caddr_t)NULL); 141 142 paddr = (caddr_t)ptob(pfn); 143 vaddr = (uintptr_t)kpm_vbase + paddr; 144 145 return ((caddr_t)vaddr); 146 } 147 148 /*ARGSUSED*/ 149 void 150 hat_kpm_mapout_pfn(pfn_t pfn) 151 { 152 /* empty */ 153 } 154 155 /* 156 * Return the kpm virtual address for the page at pp. 157 */ 158 /*ARGSUSED*/ 159 caddr_t 160 hat_kpm_page2va(struct page *pp, int checkswap) 161 { 162 uintptr_t paddr, vaddr; 163 164 ASSERT(kpm_enable); 165 166 paddr = ptob(pp->p_pagenum); 167 168 vaddr = (uintptr_t)kpm_vbase + paddr; 169 170 return ((caddr_t)vaddr); 171 } 172 173 /* 174 * Return the page for the kpm virtual address vaddr. 175 * Caller is responsible for the kpm mapping and lock 176 * state of the page. 177 */ 178 page_t * 179 hat_kpm_vaddr2page(caddr_t vaddr) 180 { 181 uintptr_t paddr; 182 pfn_t pfn; 183 184 ASSERT(IS_KPM_ADDR(vaddr)); 185 186 SFMMU_KPM_VTOP(vaddr, paddr); 187 pfn = (pfn_t)btop(paddr); 188 189 return (page_numtopp_nolock(pfn)); 190 } 191 192 /* 193 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred. 194 * This should never happen on sun4v. 195 */ 196 int 197 hat_kpm_fault(struct hat *hat, caddr_t vaddr) 198 { 199 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 200 (void *)hat, (void *)vaddr); 201 202 return (0); 203 } 204 205 /*ARGSUSED*/ 206 void 207 hat_kpm_mseghash_clear(int nentries) 208 {} 209 210 /*ARGSUSED*/ 211 void 212 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 213 {} 214 215 /*ARGSUSED*/ 216 void 217 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 218 offset_t kpm_pages_off) 219 {} 220 221 /*ARGSUSED*/ 222 void 223 hat_kpm_addmem_mseg_insert(struct memseg *msp) 224 {} 225 226 /*ARGSUSED*/ 227 void 228 hat_kpm_addmem_memsegs_update(struct memseg *msp) 229 {} 230 231 /*ARGSUSED*/ 232 caddr_t 233 hat_kpm_mseg_reuse(struct memseg *msp) 234 { 235 return (0); 236 } 237 238 /*ARGSUSED*/ 239 void 240 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 241 {} 242 243 /*ARGSUSED*/ 244 void 245 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 246 struct memseg *lo, struct memseg *mid, struct memseg *hi) 247 {} 248 249 /* 250 * Walk the memsegs chain, applying func to each memseg span and vcolor. 251 */ 252 void 253 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 254 { 255 pfn_t pbase, pend; 256 void *base; 257 size_t size; 258 struct memseg *msp; 259 260 for (msp = memsegs; msp; msp = msp->next) { 261 pbase = msp->pages_base; 262 pend = msp->pages_end; 263 base = ptob(pbase) + kpm_vbase; 264 size = ptob(pend - pbase); 265 func(arg, base, size); 266 } 267 } 268 269 270 /* -- sfmmu_kpm internal section -- */ 271 272 /* 273 * Return the page frame number if a valid segkpm mapping exists 274 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed. 275 * Should only be used by other sfmmu routines. 276 */ 277 pfn_t 278 sfmmu_kpm_vatopfn(caddr_t vaddr) 279 { 280 uintptr_t paddr; 281 pfn_t pfn; 282 page_t *pp; 283 284 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr)); 285 286 SFMMU_KPM_VTOP(vaddr, paddr); 287 pfn = (pfn_t)btop(paddr); 288 pp = page_numtopp_nolock(pfn); 289 if (pp) 290 return (pfn); 291 else 292 return ((pfn_t)PFN_INVALID); 293 } 294