1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Nathan Whitehorn 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 #include <vm/uma.h> 42 #include <vm/vm.h> 43 #include <vm/vm_map.h> 44 #include <vm/vm_page.h> 45 #include <vm/vm_pageout.h> 46 47 #include <machine/md_var.h> 48 #include <machine/platform.h> 49 #include <machine/vmparam.h> 50 #include <machine/trap.h> 51 52 #include "mmu_oea64.h" 53 54 uintptr_t moea64_get_unique_vsid(void); 55 void moea64_release_vsid(uint64_t vsid); 56 static void slb_zone_init(void *); 57 58 static uma_zone_t slbt_zone; 59 static uma_zone_t slb_cache_zone; 60 int n_slbs = 64; 61 62 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); 63 64 struct slbtnode { 65 uint16_t ua_alloc; 66 uint8_t ua_level; 67 /* Only 36 bits needed for full 64-bit address space. */ 68 uint64_t ua_base; 69 union { 70 struct slbtnode *ua_child[16]; 71 struct slb slb_entries[16]; 72 } u; 73 }; 74 75 /* 76 * For a full 64-bit address space, there are 36 bits in play in an 77 * esid, so 8 levels, with the leaf being at level 0. 78 * 79 * |3333|3322|2222|2222|1111|1111|11 | | | esid 80 * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits 81 * +----+----+----+----+----+----+----+----+----+-------- 82 * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level 83 */ 84 #define UAD_ROOT_LEVEL 8 85 #define UAD_LEAF_LEVEL 0 86 87 static inline int 88 esid2idx(uint64_t esid, int level) 89 { 90 int shift; 91 92 shift = level * 4; 93 return ((esid >> shift) & 0xF); 94 } 95 96 /* 97 * The ua_base field should have 0 bits after the first 4*(level+1) 98 * bits; i.e. only 99 */ 100 #define uad_baseok(ua) \ 101 (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base) 102 103 104 static inline uint64_t 105 esid2base(uint64_t esid, int level) 106 { 107 uint64_t mask; 108 int shift; 109 110 shift = (level + 1) * 4; 111 mask = ~((1ULL << shift) - 1); 112 return (esid & mask); 113 } 114 115 /* 116 * Allocate a new leaf node for the specified esid/vmhandle from the 117 * parent node. 118 */ 119 static struct slb * 120 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent) 121 { 122 struct slbtnode *child; 123 struct slb *retval; 124 int idx; 125 126 idx = esid2idx(esid, parent->ua_level); 127 KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!")); 128 129 /* unlock and M_WAITOK and loop? */ 130 child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 131 KASSERT(child != NULL, ("unhandled NULL case")); 132 133 child->ua_level = UAD_LEAF_LEVEL; 134 child->ua_base = esid2base(esid, child->ua_level); 135 idx = esid2idx(esid, child->ua_level); 136 child->u.slb_entries[idx].slbv = slbv; 137 child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 138 setbit(&child->ua_alloc, idx); 139 140 retval = &child->u.slb_entries[idx]; 141 142 /* 143 * The above stores must be visible before the next one, so 144 * that a lockless searcher always sees a valid path through 145 * the tree. 146 */ 147 powerpc_lwsync(); 148 149 idx = esid2idx(esid, parent->ua_level); 150 parent->u.ua_child[idx] = child; 151 setbit(&parent->ua_alloc, idx); 152 153 return (retval); 154 } 155 156 /* 157 * Allocate a new intermediate node to fit between the parent and 158 * esid. 159 */ 160 static struct slbtnode* 161 make_intermediate(uint64_t esid, struct slbtnode *parent) 162 { 163 struct slbtnode *child, *inter; 164 int idx, level; 165 166 idx = esid2idx(esid, parent->ua_level); 167 child = parent->u.ua_child[idx]; 168 KASSERT(esid2base(esid, child->ua_level) != child->ua_base, 169 ("No need for an intermediate node?")); 170 171 /* 172 * Find the level where the existing child and our new esid 173 * meet. It must be lower than parent->ua_level or we would 174 * have chosen a different index in parent. 175 */ 176 level = child->ua_level + 1; 177 while (esid2base(esid, level) != 178 esid2base(child->ua_base, level)) 179 level++; 180 KASSERT(level < parent->ua_level, 181 ("Found splitting level %d for %09jx and %09jx, " 182 "but it's the same as %p's", 183 level, esid, child->ua_base, parent)); 184 185 /* unlock and M_WAITOK and loop? */ 186 inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 187 KASSERT(inter != NULL, ("unhandled NULL case")); 188 189 /* Set up intermediate node to point to child ... */ 190 inter->ua_level = level; 191 inter->ua_base = esid2base(esid, inter->ua_level); 192 idx = esid2idx(child->ua_base, inter->ua_level); 193 inter->u.ua_child[idx] = child; 194 setbit(&inter->ua_alloc, idx); 195 powerpc_lwsync(); 196 197 /* Set up parent to point to intermediate node ... */ 198 idx = esid2idx(inter->ua_base, parent->ua_level); 199 parent->u.ua_child[idx] = inter; 200 setbit(&parent->ua_alloc, idx); 201 202 return (inter); 203 } 204 205 uint64_t 206 kernel_va_to_slbv(vm_offset_t va) 207 { 208 uint64_t slbv; 209 210 /* Set kernel VSID to deterministic value */ 211 slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; 212 213 /* 214 * Figure out if this is a large-page mapping. 215 */ 216 if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) { 217 /* 218 * XXX: If we have set up a direct map, assumes 219 * all physical memory is mapped with large pages. 220 */ 221 222 if (mem_valid(DMAP_TO_PHYS(va), 0) == 0) 223 slbv |= SLBV_L; 224 } 225 226 return (slbv); 227 } 228 229 struct slb * 230 user_va_to_slb_entry(pmap_t pm, vm_offset_t va) 231 { 232 uint64_t esid = va >> ADDR_SR_SHFT; 233 struct slbtnode *ua; 234 int idx; 235 236 ua = pm->pm_slb_tree_root; 237 238 for (;;) { 239 KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!", 240 ua->ua_base, ua->ua_level)); 241 idx = esid2idx(esid, ua->ua_level); 242 243 /* 244 * This code is specific to ppc64 where a load is 245 * atomic, so no need for atomic_load macro. 246 */ 247 if (ua->ua_level == UAD_LEAF_LEVEL) 248 return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ? 249 &ua->u.slb_entries[idx] : NULL); 250 251 /* 252 * The following accesses are implicitly ordered under the POWER 253 * ISA by load dependencies (the store ordering is provided by 254 * the powerpc_lwsync() calls elsewhere) and so are run without 255 * barriers. 256 */ 257 ua = ua->u.ua_child[idx]; 258 if (ua == NULL || 259 esid2base(esid, ua->ua_level) != ua->ua_base) 260 return (NULL); 261 } 262 263 return (NULL); 264 } 265 266 uint64_t 267 va_to_vsid(pmap_t pm, vm_offset_t va) 268 { 269 struct slb *entry; 270 271 /* Shortcut kernel case */ 272 if (pm == kernel_pmap) 273 return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); 274 275 /* 276 * If there is no vsid for this VA, we need to add a new entry 277 * to the PMAP's segment table. 278 */ 279 280 entry = user_va_to_slb_entry(pm, va); 281 282 if (entry == NULL) 283 return (allocate_user_vsid(pm, 284 (uintptr_t)va >> ADDR_SR_SHFT, 0)); 285 286 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); 287 } 288 289 uint64_t 290 allocate_user_vsid(pmap_t pm, uint64_t esid, int large) 291 { 292 uint64_t vsid, slbv; 293 struct slbtnode *ua, *next, *inter; 294 struct slb *slb; 295 int idx; 296 297 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); 298 299 PMAP_LOCK_ASSERT(pm, MA_OWNED); 300 vsid = moea64_get_unique_vsid(); 301 302 slbv = vsid << SLBV_VSID_SHIFT; 303 if (large) 304 slbv |= SLBV_L; 305 306 ua = pm->pm_slb_tree_root; 307 308 /* Descend to the correct leaf or NULL pointer. */ 309 for (;;) { 310 KASSERT(uad_baseok(ua), 311 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 312 idx = esid2idx(esid, ua->ua_level); 313 314 if (ua->ua_level == UAD_LEAF_LEVEL) { 315 ua->u.slb_entries[idx].slbv = slbv; 316 eieio(); 317 ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) 318 | SLBE_VALID; 319 setbit(&ua->ua_alloc, idx); 320 slb = &ua->u.slb_entries[idx]; 321 break; 322 } 323 324 next = ua->u.ua_child[idx]; 325 if (next == NULL) { 326 slb = make_new_leaf(esid, slbv, ua); 327 break; 328 } 329 330 /* 331 * Check if the next item down has an okay ua_base. 332 * If not, we need to allocate an intermediate node. 333 */ 334 if (esid2base(esid, next->ua_level) != next->ua_base) { 335 inter = make_intermediate(esid, ua); 336 slb = make_new_leaf(esid, slbv, inter); 337 break; 338 } 339 340 ua = next; 341 } 342 343 /* 344 * Someone probably wants this soon, and it may be a wired 345 * SLB mapping, so pre-spill this entry. 346 */ 347 eieio(); 348 slb_insert_user(pm, slb); 349 350 return (vsid); 351 } 352 353 void 354 free_vsid(pmap_t pm, uint64_t esid, int large) 355 { 356 struct slbtnode *ua; 357 int idx; 358 359 PMAP_LOCK_ASSERT(pm, MA_OWNED); 360 361 ua = pm->pm_slb_tree_root; 362 /* Descend to the correct leaf. */ 363 for (;;) { 364 KASSERT(uad_baseok(ua), 365 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 366 367 idx = esid2idx(esid, ua->ua_level); 368 if (ua->ua_level == UAD_LEAF_LEVEL) { 369 ua->u.slb_entries[idx].slbv = 0; 370 eieio(); 371 ua->u.slb_entries[idx].slbe = 0; 372 clrbit(&ua->ua_alloc, idx); 373 return; 374 } 375 376 ua = ua->u.ua_child[idx]; 377 if (ua == NULL || 378 esid2base(esid, ua->ua_level) != ua->ua_base) { 379 /* Perhaps just return instead of assert? */ 380 KASSERT(0, 381 ("Asked to remove an entry that was never inserted!")); 382 return; 383 } 384 } 385 } 386 387 static void 388 free_slb_tree_node(struct slbtnode *ua) 389 { 390 int idx; 391 392 for (idx = 0; idx < 16; idx++) { 393 if (ua->ua_level != UAD_LEAF_LEVEL) { 394 if (ua->u.ua_child[idx] != NULL) 395 free_slb_tree_node(ua->u.ua_child[idx]); 396 } else { 397 if (ua->u.slb_entries[idx].slbv != 0) 398 moea64_release_vsid(ua->u.slb_entries[idx].slbv 399 >> SLBV_VSID_SHIFT); 400 } 401 } 402 403 uma_zfree(slbt_zone, ua); 404 } 405 406 void 407 slb_free_tree(pmap_t pm) 408 { 409 410 free_slb_tree_node(pm->pm_slb_tree_root); 411 } 412 413 struct slbtnode * 414 slb_alloc_tree(void) 415 { 416 struct slbtnode *root; 417 418 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 419 KASSERT(root != NULL, ("unhandled NULL case")); 420 root->ua_level = UAD_ROOT_LEVEL; 421 422 return (root); 423 } 424 425 /* Lock entries mapping kernel text and stacks */ 426 427 void 428 slb_insert_kernel(uint64_t slbe, uint64_t slbv) 429 { 430 struct slb *slbcache; 431 int i; 432 433 /* We don't want to be preempted while modifying the kernel map */ 434 critical_enter(); 435 436 slbcache = PCPU_GET(aim.slb); 437 438 /* Check for an unused slot, abusing the user slot as a full flag */ 439 if (slbcache[USER_SLB_SLOT].slbe == 0) { 440 for (i = 0; i < n_slbs; i++) { 441 if (i == USER_SLB_SLOT) 442 continue; 443 if (!(slbcache[i].slbe & SLBE_VALID)) 444 goto fillkernslb; 445 } 446 447 if (i == n_slbs) 448 slbcache[USER_SLB_SLOT].slbe = 1; 449 } 450 451 i = mftb() % n_slbs; 452 if (i == USER_SLB_SLOT) 453 i = (i+1) % n_slbs; 454 455 fillkernslb: 456 KASSERT(i != USER_SLB_SLOT, 457 ("Filling user SLB slot with a kernel mapping")); 458 slbcache[i].slbv = slbv; 459 slbcache[i].slbe = slbe | (uint64_t)i; 460 461 /* If it is for this CPU, put it in the SLB right away */ 462 if (pmap_bootstrapped) { 463 /* slbie not required */ 464 __asm __volatile ("slbmte %0, %1" :: 465 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe)); 466 } 467 468 critical_exit(); 469 } 470 471 void 472 slb_insert_user(pmap_t pm, struct slb *slb) 473 { 474 int i; 475 476 PMAP_LOCK_ASSERT(pm, MA_OWNED); 477 478 if (pm->pm_slb_len < n_slbs) { 479 i = pm->pm_slb_len; 480 pm->pm_slb_len++; 481 } else { 482 i = mftb() % n_slbs; 483 } 484 485 /* Note that this replacement is atomic with respect to trap_subr */ 486 pm->pm_slb[i] = slb; 487 } 488 489 static void * 490 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain, 491 u_int8_t *flags, int wait) 492 { 493 static vm_offset_t realmax = 0; 494 void *va; 495 vm_page_t m; 496 497 if (realmax == 0) 498 realmax = platform_real_maxaddr(); 499 500 *flags = UMA_SLAB_PRIV; 501 m = vm_page_alloc_contig_domain(NULL, 0, domain, 502 malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, 503 1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT); 504 if (m == NULL) 505 return (NULL); 506 507 if (hw_direct_map) 508 va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 509 else { 510 va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS); 511 pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); 512 } 513 514 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 515 bzero(va, PAGE_SIZE); 516 517 return (va); 518 } 519 520 static void 521 slb_zone_init(void *dummy) 522 { 523 524 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode), 525 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 526 slb_cache_zone = uma_zcreate("SLB cache", 527 (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL, 528 UMA_ALIGN_PTR, UMA_ZONE_VM); 529 530 if (platform_real_maxaddr() != VM_MAX_ADDRESS) { 531 uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc); 532 uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc); 533 } 534 } 535 536 struct slb ** 537 slb_alloc_user_cache(void) 538 { 539 return (uma_zalloc(slb_cache_zone, M_ZERO)); 540 } 541 542 void 543 slb_free_user_cache(struct slb **slb) 544 { 545 uma_zfree(slb_cache_zone, slb); 546 } 547 548 #if defined(__powerpc64__) 549 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */ 550 void 551 handle_kernel_slb_spill(int type, register_t dar, register_t srr0) 552 { 553 struct slb *slbcache; 554 uint64_t slbe, slbv; 555 uint64_t esid, addr; 556 int i; 557 558 addr = (type == EXC_ISE) ? srr0 : dar; 559 slbcache = PCPU_GET(aim.slb); 560 esid = (uintptr_t)addr >> ADDR_SR_SHFT; 561 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 562 563 /* See if the hardware flushed this somehow (can happen in LPARs) */ 564 for (i = 0; i < n_slbs; i++) 565 if (slbcache[i].slbe == (slbe | (uint64_t)i)) 566 return; 567 568 /* Not in the map, needs to actually be added */ 569 slbv = kernel_va_to_slbv(addr); 570 if (slbcache[USER_SLB_SLOT].slbe == 0) { 571 for (i = 0; i < n_slbs; i++) { 572 if (i == USER_SLB_SLOT) 573 continue; 574 if (!(slbcache[i].slbe & SLBE_VALID)) 575 goto fillkernslb; 576 } 577 578 if (i == n_slbs) 579 slbcache[USER_SLB_SLOT].slbe = 1; 580 } 581 582 /* Sacrifice a random SLB entry that is not the user entry */ 583 i = mftb() % n_slbs; 584 if (i == USER_SLB_SLOT) 585 i = (i+1) % n_slbs; 586 587 fillkernslb: 588 /* Write new entry */ 589 slbcache[i].slbv = slbv; 590 slbcache[i].slbe = slbe | (uint64_t)i; 591 592 /* Trap handler will restore from cache on exit */ 593 } 594 595 int 596 handle_user_slb_spill(pmap_t pm, vm_offset_t addr) 597 { 598 struct slb *user_entry; 599 uint64_t esid; 600 int i; 601 602 if (pm->pm_slb == NULL) 603 return (-1); 604 605 esid = (uintptr_t)addr >> ADDR_SR_SHFT; 606 607 PMAP_LOCK(pm); 608 user_entry = user_va_to_slb_entry(pm, addr); 609 610 if (user_entry == NULL) { 611 /* allocate_vsid auto-spills it */ 612 (void)allocate_user_vsid(pm, esid, 0); 613 } else { 614 /* 615 * Check that another CPU has not already mapped this. 616 * XXX: Per-thread SLB caches would be better. 617 */ 618 for (i = 0; i < pm->pm_slb_len; i++) 619 if (pm->pm_slb[i] == user_entry) 620 break; 621 622 if (i == pm->pm_slb_len) 623 slb_insert_user(pm, user_entry); 624 } 625 PMAP_UNLOCK(pm); 626 627 return (0); 628 } 629 #endif 630