slb.c (6f3544cd7084abbadd83637993a4f41fd30e6ccd) | slb.c (54c562081ff82b6429078f76d44491e7d1e95673) |
---|---|
1/*- 2 * Copyright (c) 2010 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * --- 186 unchanged lines hidden (view full) --- 195uint64_t 196kernel_va_to_slbv(vm_offset_t va) 197{ 198 uint64_t esid, slbv; 199 200 esid = (uintptr_t)va >> ADDR_SR_SHFT; 201 202 /* Set kernel VSID to deterministic value */ | 1/*- 2 * Copyright (c) 2010 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * --- 186 unchanged lines hidden (view full) --- 195uint64_t 196kernel_va_to_slbv(vm_offset_t va) 197{ 198 uint64_t esid, slbv; 199 200 esid = (uintptr_t)va >> ADDR_SR_SHFT; 201 202 /* Set kernel VSID to deterministic value */ |
203 slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT; | 203 slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; |
204 205 /* Figure out if this is a large-page mapping */ 206 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 207 /* 208 * XXX: If we have set up a direct map, assumes 209 * all physical memory is mapped with large pages. 210 */ 211 if (mem_valid(va, 0) == 0) --- 204 unchanged lines hidden (view full) --- 416 struct slb *slbcache; 417 int i, j; 418 419 /* We don't want to be preempted while modifying the kernel map */ 420 critical_enter(); 421 422 slbcache = PCPU_GET(slb); 423 | 204 205 /* Figure out if this is a large-page mapping */ 206 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 207 /* 208 * XXX: If we have set up a direct map, assumes 209 * all physical memory is mapped with large pages. 210 */ 211 if (mem_valid(va, 0) == 0) --- 204 unchanged lines hidden (view full) --- 416 struct slb *slbcache; 417 int i, j; 418 419 /* We don't want to be preempted while modifying the kernel map */ 420 critical_enter(); 421 422 slbcache = PCPU_GET(slb); 423 |
424 /* Check for an unused slot, abusing the USER_SR slot as a full flag */ 425 if (slbcache[USER_SR].slbe == 0) { 426 for (i = 0; i < USER_SR; i++) { | 424 /* Check for an unused slot, abusing the user slot as a full flag */ 425 if (slbcache[USER_SLB_SLOT].slbe == 0) { 426 for (i = 0; i < USER_SLB_SLOT; i++) { |
427 if (!(slbcache[i].slbe & SLBE_VALID)) 428 goto fillkernslb; 429 } 430 | 427 if (!(slbcache[i].slbe & SLBE_VALID)) 428 goto fillkernslb; 429 } 430 |
431 if (i == USER_SR) 432 slbcache[USER_SR].slbe = 1; | 431 if (i == USER_SLB_SLOT) 432 slbcache[USER_SLB_SLOT].slbe = 1; |
433 } 434 435 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) { | 433 } 434 435 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) { |
436 if (i == USER_SR) | 436 if (i == USER_SLB_SLOT) |
437 continue; 438 439 if (SLB_SPILLABLE(slbcache[i].slbe)) 440 break; 441 } 442 443 KASSERT(j < 64, ("All kernel SLB slots locked!")); 444 --- 53 unchanged lines hidden --- | 437 continue; 438 439 if (SLB_SPILLABLE(slbcache[i].slbe)) 440 break; 441 } 442 443 KASSERT(j < 64, ("All kernel SLB slots locked!")); 444 --- 53 unchanged lines hidden --- |