1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Kernel Physical Mapping (segkpm) hat interface routines for sun4v. 28 */ 29 30 #include <sys/types.h> 31 #include <vm/hat.h> 32 #include <vm/hat_sfmmu.h> 33 #include <vm/page.h> 34 #include <sys/cmn_err.h> 35 #include <sys/machsystm.h> 36 #include <vm/seg_kpm.h> 37 #include <vm/mach_kpm.h> 38 #include <vm/faultcode.h> 39 40 extern pfn_t memseg_get_start(struct memseg *); 41 42 /* 43 * Kernel Physical Mapping (kpm) facility 44 */ 45 46 47 void 48 mach_kpm_init() 49 { 50 uintptr_t start, end; 51 struct memlist *pmem; 52 53 /* 54 * Map each of the memsegs into the kpm segment, coalesing 55 * adjacent memsegs to allow mapping with the largest 56 * possible pages. 57 */ 58 pmem = phys_install; 59 start = pmem->ml_address; 60 end = start + pmem->ml_size; 61 for (;;) { 62 if (pmem == NULL || pmem->ml_address > end) { 63 hat_devload(kas.a_hat, kpm_vbase + start, 64 end - start, mmu_btop(start), 65 PROT_READ | PROT_WRITE, 66 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 67 if (pmem == NULL) 68 break; 69 start = pmem->ml_address; 70 } 71 end = pmem->ml_address + pmem->ml_size; 72 pmem = pmem->ml_next; 73 } 74 } 75 76 /* -- hat_kpm interface section -- */ 77 78 /* 79 * Mapin a locked page and return the vaddr. 80 */ 81 /*ARGSUSED*/ 82 caddr_t 83 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 84 { 85 caddr_t vaddr; 86 87 if (kpm_enable == 0) { 88 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set"); 89 return ((caddr_t)NULL); 90 } 91 92 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 93 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked"); 94 return ((caddr_t)NULL); 95 } 96 97 vaddr = hat_kpm_page2va(pp, 1); 98 99 return (vaddr); 100 } 101 102 /* 103 * Mapout a locked page. 104 */ 105 /*ARGSUSED*/ 106 void 107 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 108 { 109 #ifdef DEBUG 110 if (kpm_enable == 0) { 111 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set"); 112 return; 113 } 114 115 if (IS_KPM_ADDR(vaddr) == 0) { 116 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address"); 117 return; 118 } 119 120 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 121 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked"); 122 return; 123 } 124 #endif 125 } 126 127 /* 128 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 129 * memory addresses that are not described by a page_t. It can 130 * also be used for normal pages that are not locked, but beware 131 * this is dangerous - no locking is performed, so the identity of 132 * the page could change. hat_kpm_mapin_pfn is not supported when 133 * vac_colors > 1, because the chosen va depends on the page identity, 134 * which could change. 135 * The caller must only pass pfn's for valid physical addresses; violation 136 * of this rule will cause panic. 137 */ 138 caddr_t 139 hat_kpm_mapin_pfn(pfn_t pfn) 140 { 141 caddr_t paddr, vaddr; 142 143 if (kpm_enable == 0) 144 return ((caddr_t)NULL); 145 146 paddr = (caddr_t)ptob(pfn); 147 vaddr = (uintptr_t)kpm_vbase + paddr; 148 149 return ((caddr_t)vaddr); 150 } 151 152 /*ARGSUSED*/ 153 void 154 hat_kpm_mapout_pfn(pfn_t pfn) 155 { 156 /* empty */ 157 } 158 159 /* 160 * Return the kpm virtual address for the page at pp. 161 */ 162 /*ARGSUSED*/ 163 caddr_t 164 hat_kpm_page2va(struct page *pp, int checkswap) 165 { 166 uintptr_t paddr, vaddr; 167 168 ASSERT(kpm_enable); 169 170 paddr = ptob(pp->p_pagenum); 171 172 vaddr = (uintptr_t)kpm_vbase + paddr; 173 174 return ((caddr_t)vaddr); 175 } 176 177 /* 178 * Return the page for the kpm virtual address vaddr. 179 * Caller is responsible for the kpm mapping and lock 180 * state of the page. 181 */ 182 page_t * 183 hat_kpm_vaddr2page(caddr_t vaddr) 184 { 185 uintptr_t paddr; 186 pfn_t pfn; 187 188 ASSERT(IS_KPM_ADDR(vaddr)); 189 190 SFMMU_KPM_VTOP(vaddr, paddr); 191 pfn = (pfn_t)btop(paddr); 192 193 return (page_numtopp_nolock(pfn)); 194 } 195 196 /* 197 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred. 198 * This should never happen on sun4v. 199 */ 200 int 201 hat_kpm_fault(struct hat *hat, caddr_t vaddr) 202 { 203 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 204 (void *)hat, (void *)vaddr); 205 206 return (0); 207 } 208 209 /*ARGSUSED*/ 210 void 211 hat_kpm_mseghash_clear(int nentries) 212 {} 213 214 /*ARGSUSED*/ 215 void 216 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 217 {} 218 219 /*ARGSUSED*/ 220 void 221 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 222 offset_t kpm_pages_off) 223 { 224 pfn_t base, end; 225 226 /* 227 * kphysm_add_memory_dynamic() does not set nkpmpgs 228 * when page_t memory is externally allocated. That 229 * code must properly calculate nkpmpgs in all cases 230 * if nkpmpgs needs to be used at some point. 231 */ 232 233 /* 234 * The meta (page_t) pages for dynamically added memory are allocated 235 * either from the incoming memory itself or from existing memory. 236 * In the former case the base of the incoming pages will be different 237 * than the base of the dynamic segment so call memseg_get_start() to 238 * get the actual base of the incoming memory for each case. 239 */ 240 241 base = memseg_get_start(msp); 242 end = msp->pages_end; 243 244 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base), 245 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE, 246 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 247 } 248 249 /* 250 * Return end of metadata for an already setup memseg. 251 */ 252 caddr_t 253 hat_kpm_mseg_reuse(struct memseg *msp) 254 { 255 return ((caddr_t)msp->epages); 256 } 257 258 /*ARGSUSED*/ 259 void 260 hat_kpm_addmem_mseg_insert(struct memseg *msp) 261 {} 262 263 /*ARGSUSED*/ 264 void 265 hat_kpm_addmem_memsegs_update(struct memseg *msp) 266 {} 267 268 /*ARGSUSED*/ 269 void 270 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 271 { 272 pfn_t base, end; 273 274 /* 275 * The meta (page_t) pages for dynamically added memory are allocated 276 * either from the incoming memory itself or from existing memory. 277 * In the former case the base of the incoming pages will be different 278 * than the base of the dynamic segment so call memseg_get_start() to 279 * get the actual base of the incoming memory for each case. 280 */ 281 282 base = memseg_get_start(msp); 283 end = msp->pages_end; 284 285 hat_unload(kas.a_hat, kpm_vbase + mmu_ptob(base), mmu_ptob(end - base), 286 HAT_UNLOAD | HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP); 287 } 288 289 /*ARGSUSED*/ 290 void 291 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 292 struct memseg *lo, struct memseg *mid, struct memseg *hi) 293 {} 294 295 /* 296 * Walk the memsegs chain, applying func to each memseg span and vcolor. 297 */ 298 void 299 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 300 { 301 pfn_t pbase, pend; 302 void *base; 303 size_t size; 304 struct memseg *msp; 305 306 for (msp = memsegs; msp; msp = msp->next) { 307 pbase = msp->pages_base; 308 pend = msp->pages_end; 309 base = ptob(pbase) + kpm_vbase; 310 size = ptob(pend - pbase); 311 func(arg, base, size); 312 } 313 } 314 315 316 /* -- sfmmu_kpm internal section -- */ 317 318 /* 319 * Return the page frame number if a valid segkpm mapping exists 320 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed. 321 * Should only be used by other sfmmu routines. 322 */ 323 pfn_t 324 sfmmu_kpm_vatopfn(caddr_t vaddr) 325 { 326 uintptr_t paddr; 327 pfn_t pfn; 328 page_t *pp; 329 330 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr)); 331 332 SFMMU_KPM_VTOP(vaddr, paddr); 333 pfn = (pfn_t)btop(paddr); 334 pp = page_numtopp_nolock(pfn); 335 if (pp) 336 return (pfn); 337 else 338 return ((pfn_t)PFN_INVALID); 339 } 340