mmu_oea64.c (1a4fcaebe30b3067a19baf8871a27942f4bb32cf) | mmu_oea64.c (999987e51a2db77e5407c5a2cdb5d759b1317714) |
---|---|
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 168 unchanged lines hidden (view full) --- 177#define EIEIO() __asm __volatile("eieio"); 178 179/* 180 * The tlbie instruction must be executed in 64-bit mode 181 * so we have to twiddle MSR[SF] around every invocation. 182 * Just to add to the fun, exceptions must be off as well 183 * so that we can't trap in 64-bit mode. What a pain. 184 */ | 1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 168 unchanged lines hidden (view full) --- 177#define EIEIO() __asm __volatile("eieio"); 178 179/* 180 * The tlbie instruction must be executed in 64-bit mode 181 * so we have to twiddle MSR[SF] around every invocation. 182 * Just to add to the fun, exceptions must be off as well 183 * so that we can't trap in 64-bit mode. What a pain. 184 */ |
185struct mtx tlbie_mutex; |
|
185 186static __inline void 187TLBIE(pmap_t pmap, vm_offset_t va) { | 186 187static __inline void 188TLBIE(pmap_t pmap, vm_offset_t va) { |
189 uint64_t vpn; 190 register_t vpn_hi, vpn_lo; |
|
188 register_t msr; 189 register_t scratch; 190 | 191 register_t msr; 192 register_t scratch; 193 |
191 uint64_t vpn; 192 register_t vpn_hi, vpn_lo; 193 194#if 1 195 /* 196 * CPU documentation says that tlbie takes the VPN, not the 197 * VA. I think the code below does this correctly. We will see. 198 */ 199 | |
200 vpn = (uint64_t)(va & ADDR_PIDX); 201 if (pmap != NULL) 202 vpn |= (va_to_vsid(pmap,va) << 28); | 194 vpn = (uint64_t)(va & ADDR_PIDX); 195 if (pmap != NULL) 196 vpn |= (va_to_vsid(pmap,va) << 28); |
203#else 204 vpn = va; 205#endif | |
206 207 vpn_hi = (uint32_t)(vpn >> 32); 208 vpn_lo = (uint32_t)vpn; 209 | 197 198 vpn_hi = (uint32_t)(vpn >> 32); 199 vpn_lo = (uint32_t)vpn; 200 |
201 mtx_lock_spin(&tlbie_mutex); |
|
210 __asm __volatile("\ 211 mfmsr %0; \ 212 clrldi %1,%0,49; \ | 202 __asm __volatile("\ 203 mfmsr %0; \ 204 clrldi %1,%0,49; \ |
213 insrdi %1,1,1,0; \ | 205 mtmsr %1; \ 206 insrdi %1,%5,1,0; \ |
214 mtmsrd %1; \ 215 ptesync; \ 216 \ 217 sld %1,%2,%4; \ 218 or %1,%1,%3; \ 219 tlbie %1; \ 220 \ 221 mtmsrd %0; \ 222 eieio; \ 223 tlbsync; \ 224 ptesync;" | 207 mtmsrd %1; \ 208 ptesync; \ 209 \ 210 sld %1,%2,%4; \ 211 or %1,%1,%3; \ 212 tlbie %1; \ 213 \ 214 mtmsrd %0; \ 215 eieio; \ 216 tlbsync; \ 217 ptesync;" |
225 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32)); | 218 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)); 219 mtx_unlock_spin(&tlbie_mutex); |
226} 227 228#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 229#define ENABLE_TRANS(msr) mtmsr(msr); isync() 230 231#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 232#define VSID_TO_SR(vsid) ((vsid) & 0xf) 233#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) --- 113 unchanged lines hidden (view full) --- 347 * PTE calls. 348 */ 349static int moea64_pte_insert(u_int, struct lpte *); 350 351/* 352 * PVO calls. 353 */ 354static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, | 220} 221 222#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 223#define ENABLE_TRANS(msr) mtmsr(msr); isync() 224 225#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 226#define VSID_TO_SR(vsid) ((vsid) & 0xf) 227#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) --- 113 unchanged lines hidden (view full) --- 341 * PTE calls. 342 */ 343static int moea64_pte_insert(u_int, struct lpte *); 344 345/* 346 * PVO calls. 347 */ 348static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, |
355 vm_offset_t, vm_offset_t, uint64_t, int, int); | 349 vm_offset_t, vm_offset_t, uint64_t, int); |
356static void moea64_pvo_remove(struct pvo_entry *, int); 357static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 358static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 359 360/* 361 * Utility routines. 362 */ 363static void moea64_bridge_bootstrap(mmu_t mmup, --- 456 unchanged lines hidden (view full) --- 820 /* 821 * Initialize the lock that synchronizes access to the pteg and pvo 822 * tables. 823 */ 824 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 825 MTX_RECURSE); 826 827 /* | 350static void moea64_pvo_remove(struct pvo_entry *, int); 351static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 352static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 353 354/* 355 * Utility routines. 356 */ 357static void moea64_bridge_bootstrap(mmu_t mmup, --- 456 unchanged lines hidden (view full) --- 814 /* 815 * Initialize the lock that synchronizes access to the pteg and pvo 816 * tables. 817 */ 818 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 819 MTX_RECURSE); 820 821 /* |
822 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 823 */ 824 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 825 826 /* |
|
828 * Initialise the unmanaged pvo pool. 829 */ 830 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 831 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 832 moea64_bpvo_pool_index = 0; 833 834 /* 835 * Make sure kernel vsid is allocated as well as VSID 0. --- 413 unchanged lines hidden (view full) --- 1249 1250 if (wired) 1251 pvo_flags |= PVO_WIRED; 1252 1253 if ((m->flags & PG_FICTITIOUS) != 0) 1254 pvo_flags |= PVO_FAKE; 1255 1256 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), | 827 * Initialise the unmanaged pvo pool. 828 */ 829 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 830 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 831 moea64_bpvo_pool_index = 0; 832 833 /* 834 * Make sure kernel vsid is allocated as well as VSID 0. --- 413 unchanged lines hidden (view full) --- 1248 1249 if (wired) 1250 pvo_flags |= PVO_WIRED; 1251 1252 if ((m->flags & PG_FICTITIOUS) != 0) 1253 pvo_flags |= PVO_FAKE; 1254 1255 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), |
1257 pte_lo, pvo_flags, 0); | 1256 pte_lo, pvo_flags); |
1258 1259 if (pmap == kernel_pmap) 1260 TLBIE(pmap, va); 1261 1262 /* 1263 * Flush the page from the instruction cache if this page is 1264 * mapped executable and cacheable. 1265 */ --- 156 unchanged lines hidden (view full) --- 1422 } 1423 1424 va = pvo_allocator_start; 1425 pvo_allocator_start += PAGE_SIZE; 1426 1427 if (pvo_allocator_start >= pvo_allocator_end) 1428 panic("Ran out of PVO allocator buffer space!"); 1429 | 1257 1258 if (pmap == kernel_pmap) 1259 TLBIE(pmap, va); 1260 1261 /* 1262 * Flush the page from the instruction cache if this page is 1263 * mapped executable and cacheable. 1264 */ --- 156 unchanged lines hidden (view full) --- 1421 } 1422 1423 va = pvo_allocator_start; 1424 pvo_allocator_start += PAGE_SIZE; 1425 1426 if (pvo_allocator_start >= pvo_allocator_end) 1427 panic("Ran out of PVO allocator buffer space!"); 1428 |
1430 /* Now call pvo_enter in recursive mode */ | |
1431 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1432 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, | 1429 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1430 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, |
1433 PVO_WIRED | PVO_BOOTSTRAP, 1); | 1431 PVO_WIRED | PVO_BOOTSTRAP); |
1434 1435 TLBIE(kernel_pmap, va); | 1432 1433 TLBIE(kernel_pmap, va); |
1436 | 1434 |
1437 if (needed_lock) 1438 PMAP_UNLOCK(kernel_pmap); | 1435 if (needed_lock) 1436 PMAP_UNLOCK(kernel_pmap); |
1439 | 1437 |
1440 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1441 bzero((void *)va, PAGE_SIZE); 1442 1443 return (void *)va; 1444} 1445 1446void 1447moea64_init(mmu_t mmu) --- 126 unchanged lines hidden (view full) --- 1574 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1575 } 1576 1577 pte_lo = moea64_calc_wimg(pa); 1578 1579 PMAP_LOCK(kernel_pmap); 1580 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1581 &moea64_pvo_kunmanaged, va, pa, pte_lo, | 1438 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1439 bzero((void *)va, PAGE_SIZE); 1440 1441 return (void *)va; 1442} 1443 1444void 1445moea64_init(mmu_t mmu) --- 126 unchanged lines hidden (view full) --- 1572 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1573 } 1574 1575 pte_lo = moea64_calc_wimg(pa); 1576 1577 PMAP_LOCK(kernel_pmap); 1578 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1579 &moea64_pvo_kunmanaged, va, pa, pte_lo, |
1582 PVO_WIRED | VM_PROT_EXECUTE, 0); | 1580 PVO_WIRED | VM_PROT_EXECUTE); |
1583 1584 TLBIE(kernel_pmap, va); 1585 1586 if (error != 0 && error != ENOENT) 1587 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1588 pa, error); 1589 1590 /* --- 372 unchanged lines hidden (view full) --- 1963 } 1964 panic("moea64_bootstrap_alloc: could not allocate memory"); 1965} 1966 1967static void 1968tlbia(void) 1969{ 1970 vm_offset_t i; | 1581 1582 TLBIE(kernel_pmap, va); 1583 1584 if (error != 0 && error != ENOENT) 1585 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1586 pa, error); 1587 1588 /* --- 372 unchanged lines hidden (view full) --- 1961 } 1962 panic("moea64_bootstrap_alloc: could not allocate memory"); 1963} 1964 1965static void 1966tlbia(void) 1967{ 1968 vm_offset_t i; |
1969 register_t msr, scratch; |
|
1971 | 1970 |
1972 for (i = 0; i < 0xFF000; i += 0x00001000) 1973 TLBIE(NULL,i); | 1971 for (i = 0; i < 0xFF000; i += 0x00001000) { 1972 __asm __volatile("\ 1973 mfmsr %0; \ 1974 mr %1, %0; \ 1975 insrdi %1,%3,1,0; \ 1976 mtmsrd %1; \ 1977 ptesync; \ 1978 \ 1979 tlbiel %2; \ 1980 \ 1981 mtmsrd %0; \ 1982 eieio; \ 1983 tlbsync; \ 1984 ptesync;" 1985 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 1986 } |
1974} 1975 1976static int 1977moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, | 1987} 1988 1989static int 1990moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, |
1978 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags, int recurse) | 1991 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) |
1979{ 1980 struct pvo_entry *pvo; 1981 uint64_t vsid; 1982 int first; 1983 u_int ptegidx; 1984 int i; 1985 int bootstrap; 1986 --- 19 unchanged lines hidden (view full) --- 2006 va &= ~ADDR_POFF; 2007 vsid = va_to_vsid(pm, va); 2008 ptegidx = va_to_pteg(vsid, va); 2009 2010 /* 2011 * Remove any existing mapping for this page. Reuse the pvo entry if 2012 * there is a mapping. 2013 */ | 1992{ 1993 struct pvo_entry *pvo; 1994 uint64_t vsid; 1995 int first; 1996 u_int ptegidx; 1997 int i; 1998 int bootstrap; 1999 --- 19 unchanged lines hidden (view full) --- 2019 va &= ~ADDR_POFF; 2020 vsid = va_to_vsid(pm, va); 2021 ptegidx = va_to_pteg(vsid, va); 2022 2023 /* 2024 * Remove any existing mapping for this page. Reuse the pvo entry if 2025 * there is a mapping. 2026 */ |
2014 if (!recurse) 2015 LOCK_TABLE(); | 2027 LOCK_TABLE(); |
2016 2017 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2018 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2019 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2020 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2021 (pte_lo & LPTE_PP)) { | 2028 2029 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2030 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2031 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2032 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2033 (pte_lo & LPTE_PP)) { |
2022 if (!recurse) 2023 UNLOCK_TABLE(); | 2034 UNLOCK_TABLE(); |
2024 return (0); 2025 } 2026 moea64_pvo_remove(pvo, -1); 2027 break; 2028 } 2029 } 2030 2031 /* --- 4 unchanged lines hidden (view full) --- 2036 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2037 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2038 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2039 } 2040 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2041 moea64_bpvo_pool_index++; 2042 bootstrap = 1; 2043 } else { | 2035 return (0); 2036 } 2037 moea64_pvo_remove(pvo, -1); 2038 break; 2039 } 2040 } 2041 2042 /* --- 4 unchanged lines hidden (view full) --- 2047 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2048 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2049 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2050 } 2051 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2052 moea64_bpvo_pool_index++; 2053 bootstrap = 1; 2054 } else { |
2055 /* 2056 * Note: drop the table around the UMA allocation in 2057 * case the UMA allocator needs to manipulate the page 2058 * table. The mapping we are working with is already 2059 * protected by the PMAP lock. 2060 */ 2061 UNLOCK_TABLE(); |
|
2044 pvo = uma_zalloc(zone, M_NOWAIT); | 2062 pvo = uma_zalloc(zone, M_NOWAIT); |
2063 LOCK_TABLE(); |
|
2045 } 2046 2047 if (pvo == NULL) { | 2064 } 2065 2066 if (pvo == NULL) { |
2048 if (!recurse) 2049 UNLOCK_TABLE(); | 2067 UNLOCK_TABLE(); |
2050 return (ENOMEM); 2051 } 2052 2053 moea64_pvo_entries++; 2054 pvo->pvo_vaddr = va; 2055 pvo->pvo_pmap = pm; 2056 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2057 pvo->pvo_vaddr &= ~ADDR_POFF; --- 30 unchanged lines hidden (view full) --- 2088 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2089 if (i >= 0) { 2090 PVO_PTEGIDX_SET(pvo, i); 2091 } else { 2092 panic("moea64_pvo_enter: overflow"); 2093 moea64_pte_overflow++; 2094 } 2095 | 2068 return (ENOMEM); 2069 } 2070 2071 moea64_pvo_entries++; 2072 pvo->pvo_vaddr = va; 2073 pvo->pvo_pmap = pm; 2074 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2075 pvo->pvo_vaddr &= ~ADDR_POFF; --- 30 unchanged lines hidden (view full) --- 2106 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2107 if (i >= 0) { 2108 PVO_PTEGIDX_SET(pvo, i); 2109 } else { 2110 panic("moea64_pvo_enter: overflow"); 2111 moea64_pte_overflow++; 2112 } 2113 |
2096 if (!recurse) 2097 UNLOCK_TABLE(); | 2114 UNLOCK_TABLE(); |
2098 2099 return (first ? ENOENT : 0); 2100} 2101 2102static void 2103moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2104{ 2105 struct lpte *pt; --- 369 unchanged lines hidden --- | 2115 2116 return (first ? ENOENT : 0); 2117} 2118 2119static void 2120moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2121{ 2122 struct lpte *pt; --- 369 unchanged lines hidden --- |