1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117 #include "opt_kstack_pages.h" 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/cpu.h> 144 #include <machine/powerpc.h> 145 #include <machine/bat.h> 146 #include <machine/frame.h> 147 #include <machine/md_var.h> 148 #include <machine/psl.h> 149 #include <machine/pte.h> 150 #include <machine/sr.h> 151 #include <machine/mmuvar.h> 152 153 #include "mmu_if.h" 154 155 #define MOEA_DEBUG 156 157 #define TODO panic("%s: not implemented", __func__); 158 159 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 160 #define TLBSYNC() __asm __volatile("tlbsync"); 161 #define SYNC() __asm __volatile("sync"); 162 #define EIEIO() __asm __volatile("eieio"); 163 164 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 165 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 166 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 167 168 #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ 169 #define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ 170 #define PVO_WIRED 0x010 /* PVO entry is wired */ 171 #define PVO_MANAGED 0x020 /* PVO entry is managed */ 172 #define PVO_EXECUTABLE 0x040 /* PVO entry is executable */ 173 #define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during 174 bootstrap */ 175 #define PVO_FAKE 0x100 /* fictitious phys page */ 176 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 177 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 178 #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 179 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 180 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 181 #define PVO_PTEGIDX_CLR(pvo) \ 182 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 183 #define PVO_PTEGIDX_SET(pvo, i) \ 184 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 185 186 #define MOEA_PVO_CHECK(pvo) 187 188 struct ofw_map { 189 vm_offset_t om_va; 190 vm_size_t om_len; 191 vm_offset_t om_pa; 192 u_int om_mode; 193 }; 194 195 /* 196 * Map of physical memory regions. 197 */ 198 static struct mem_region *regions; 199 static struct mem_region *pregions; 200 u_int phys_avail_count; 201 int regions_sz, pregions_sz; 202 static struct ofw_map *translations; 203 204 extern struct pmap ofw_pmap; 205 206 207 208 /* 209 * Lock for the pteg and pvo tables. 210 */ 211 struct mtx moea_table_mutex; 212 213 /* 214 * PTEG data. 215 */ 216 static struct pteg *moea_pteg_table; 217 u_int moea_pteg_count; 218 u_int moea_pteg_mask; 219 220 /* 221 * PVO data. 222 */ 223 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 224 struct pvo_head moea_pvo_kunmanaged = 225 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 226 struct pvo_head moea_pvo_unmanaged = 227 LIST_HEAD_INITIALIZER(moea_pvo_unmanaged); /* list of unmanaged pages */ 228 229 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 230 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 231 232 #define BPVO_POOL_SIZE 32768 233 static struct pvo_entry *moea_bpvo_pool; 234 static int moea_bpvo_pool_index = 0; 235 236 #define VSID_NBPW (sizeof(u_int32_t) * 8) 237 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 238 239 static boolean_t moea_initialized = FALSE; 240 241 /* 242 * Statistics. 243 */ 244 u_int moea_pte_valid = 0; 245 u_int moea_pte_overflow = 0; 246 u_int moea_pte_replacements = 0; 247 u_int moea_pvo_entries = 0; 248 u_int moea_pvo_enter_calls = 0; 249 u_int moea_pvo_remove_calls = 0; 250 u_int moea_pte_spills = 0; 251 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 252 0, ""); 253 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 254 &moea_pte_overflow, 0, ""); 255 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 256 &moea_pte_replacements, 0, ""); 257 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 258 0, ""); 259 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 260 &moea_pvo_enter_calls, 0, ""); 261 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 262 &moea_pvo_remove_calls, 0, ""); 263 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 264 &moea_pte_spills, 0, ""); 265 266 struct pvo_entry *moea_pvo_zeropage; 267 struct mtx moea_pvo_zeropage_mtx; 268 269 vm_offset_t moea_rkva_start = VM_MIN_KERNEL_ADDRESS; 270 u_int moea_rkva_count = 4; 271 272 /* 273 * Allocate physical memory for use in moea_bootstrap. 274 */ 275 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 276 277 /* 278 * PTE calls. 279 */ 280 static int moea_pte_insert(u_int, struct pte *); 281 282 /* 283 * PVO calls. 284 */ 285 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 286 vm_offset_t, vm_offset_t, u_int, int); 287 static void moea_pvo_remove(struct pvo_entry *, int); 288 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 289 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 290 291 /* 292 * Utility routines. 293 */ 294 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 295 vm_prot_t, boolean_t); 296 static struct pvo_entry *moea_rkva_alloc(mmu_t); 297 static void moea_pa_map(struct pvo_entry *, vm_offset_t, 298 struct pte *, int *); 299 static void moea_pa_unmap(struct pvo_entry *, struct pte *, int *); 300 static void moea_syncicache(vm_offset_t, vm_size_t); 301 static boolean_t moea_query_bit(vm_page_t, int); 302 static u_int moea_clear_bit(vm_page_t, int, int *); 303 static void moea_kremove(mmu_t, vm_offset_t); 304 static void tlbia(void); 305 int moea_pte_spill(vm_offset_t); 306 307 /* 308 * Kernel MMU interface 309 */ 310 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 311 void moea_clear_modify(mmu_t, vm_page_t); 312 void moea_clear_reference(mmu_t, vm_page_t); 313 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 314 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 315 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 316 vm_prot_t); 317 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 318 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 319 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 320 void moea_init(mmu_t); 321 boolean_t moea_is_modified(mmu_t, vm_page_t); 322 boolean_t moea_ts_referenced(mmu_t, vm_page_t); 323 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 324 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 325 int moea_page_wired_mappings(mmu_t, vm_page_t); 326 void moea_pinit(mmu_t, pmap_t); 327 void moea_pinit0(mmu_t, pmap_t); 328 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 329 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 330 void moea_qremove(mmu_t, vm_offset_t, int); 331 void moea_release(mmu_t, pmap_t); 332 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 333 void moea_remove_all(mmu_t, vm_page_t); 334 void moea_remove_write(mmu_t, vm_page_t); 335 void moea_zero_page(mmu_t, vm_page_t); 336 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 337 void moea_zero_page_idle(mmu_t, vm_page_t); 338 void moea_activate(mmu_t, struct thread *); 339 void moea_deactivate(mmu_t, struct thread *); 340 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 341 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 342 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 343 vm_offset_t moea_kextract(mmu_t, vm_offset_t); 344 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 345 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 346 boolean_t moea_page_executable(mmu_t, vm_page_t); 347 348 static mmu_method_t moea_methods[] = { 349 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 350 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 351 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 352 MMUMETHOD(mmu_copy_page, moea_copy_page), 353 MMUMETHOD(mmu_enter, moea_enter), 354 MMUMETHOD(mmu_enter_object, moea_enter_object), 355 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 356 MMUMETHOD(mmu_extract, moea_extract), 357 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 358 MMUMETHOD(mmu_init, moea_init), 359 MMUMETHOD(mmu_is_modified, moea_is_modified), 360 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 361 MMUMETHOD(mmu_map, moea_map), 362 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 363 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 364 MMUMETHOD(mmu_pinit, moea_pinit), 365 MMUMETHOD(mmu_pinit0, moea_pinit0), 366 MMUMETHOD(mmu_protect, moea_protect), 367 MMUMETHOD(mmu_qenter, moea_qenter), 368 MMUMETHOD(mmu_qremove, moea_qremove), 369 MMUMETHOD(mmu_release, moea_release), 370 MMUMETHOD(mmu_remove, moea_remove), 371 MMUMETHOD(mmu_remove_all, moea_remove_all), 372 MMUMETHOD(mmu_remove_write, moea_remove_write), 373 MMUMETHOD(mmu_zero_page, moea_zero_page), 374 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 375 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 376 MMUMETHOD(mmu_activate, moea_activate), 377 MMUMETHOD(mmu_deactivate, moea_deactivate), 378 379 /* Internal interfaces */ 380 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 381 MMUMETHOD(mmu_mapdev, moea_mapdev), 382 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 383 MMUMETHOD(mmu_kextract, moea_kextract), 384 MMUMETHOD(mmu_kenter, moea_kenter), 385 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 386 MMUMETHOD(mmu_page_executable, moea_page_executable), 387 388 { 0, 0 } 389 }; 390 391 static mmu_def_t oea_mmu = { 392 MMU_TYPE_OEA, 393 moea_methods, 394 0 395 }; 396 MMU_DEF(oea_mmu); 397 398 399 static __inline int 400 va_to_sr(u_int *sr, vm_offset_t va) 401 { 402 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 403 } 404 405 static __inline u_int 406 va_to_pteg(u_int sr, vm_offset_t addr) 407 { 408 u_int hash; 409 410 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 411 ADDR_PIDX_SHFT); 412 return (hash & moea_pteg_mask); 413 } 414 415 static __inline struct pvo_head * 416 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 417 { 418 struct vm_page *pg; 419 420 pg = PHYS_TO_VM_PAGE(pa); 421 422 if (pg_p != NULL) 423 *pg_p = pg; 424 425 if (pg == NULL) 426 return (&moea_pvo_unmanaged); 427 428 return (&pg->md.mdpg_pvoh); 429 } 430 431 static __inline struct pvo_head * 432 vm_page_to_pvoh(vm_page_t m) 433 { 434 435 return (&m->md.mdpg_pvoh); 436 } 437 438 static __inline void 439 moea_attr_clear(vm_page_t m, int ptebit) 440 { 441 442 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 443 m->md.mdpg_attrs &= ~ptebit; 444 } 445 446 static __inline int 447 moea_attr_fetch(vm_page_t m) 448 { 449 450 return (m->md.mdpg_attrs); 451 } 452 453 static __inline void 454 moea_attr_save(vm_page_t m, int ptebit) 455 { 456 457 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 458 m->md.mdpg_attrs |= ptebit; 459 } 460 461 static __inline int 462 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 463 { 464 if (pt->pte_hi == pvo_pt->pte_hi) 465 return (1); 466 467 return (0); 468 } 469 470 static __inline int 471 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 472 { 473 return (pt->pte_hi & ~PTE_VALID) == 474 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 475 ((va >> ADDR_API_SHFT) & PTE_API) | which); 476 } 477 478 static __inline void 479 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 480 { 481 482 mtx_assert(&moea_table_mutex, MA_OWNED); 483 484 /* 485 * Construct a PTE. Default to IMB initially. Valid bit only gets 486 * set when the real pte is set in memory. 487 * 488 * Note: Don't set the valid bit for correct operation of tlb update. 489 */ 490 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 491 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 492 pt->pte_lo = pte_lo; 493 } 494 495 static __inline void 496 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 497 { 498 499 mtx_assert(&moea_table_mutex, MA_OWNED); 500 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 501 } 502 503 static __inline void 504 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 505 { 506 507 mtx_assert(&moea_table_mutex, MA_OWNED); 508 509 /* 510 * As shown in Section 7.6.3.2.3 511 */ 512 pt->pte_lo &= ~ptebit; 513 TLBIE(va); 514 EIEIO(); 515 TLBSYNC(); 516 SYNC(); 517 } 518 519 static __inline void 520 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 521 { 522 523 mtx_assert(&moea_table_mutex, MA_OWNED); 524 pvo_pt->pte_hi |= PTE_VALID; 525 526 /* 527 * Update the PTE as defined in section 7.6.3.1. 528 * Note that the REF/CHG bits are from pvo_pt and thus should havce 529 * been saved so this routine can restore them (if desired). 530 */ 531 pt->pte_lo = pvo_pt->pte_lo; 532 EIEIO(); 533 pt->pte_hi = pvo_pt->pte_hi; 534 SYNC(); 535 moea_pte_valid++; 536 } 537 538 static __inline void 539 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 540 { 541 542 mtx_assert(&moea_table_mutex, MA_OWNED); 543 pvo_pt->pte_hi &= ~PTE_VALID; 544 545 /* 546 * Force the reg & chg bits back into the PTEs. 547 */ 548 SYNC(); 549 550 /* 551 * Invalidate the pte. 552 */ 553 pt->pte_hi &= ~PTE_VALID; 554 555 SYNC(); 556 TLBIE(va); 557 EIEIO(); 558 TLBSYNC(); 559 SYNC(); 560 561 /* 562 * Save the reg & chg bits. 563 */ 564 moea_pte_synch(pt, pvo_pt); 565 moea_pte_valid--; 566 } 567 568 static __inline void 569 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 570 { 571 572 /* 573 * Invalidate the PTE 574 */ 575 moea_pte_unset(pt, pvo_pt, va); 576 moea_pte_set(pt, pvo_pt); 577 } 578 579 /* 580 * Quick sort callout for comparing memory regions. 581 */ 582 static int mr_cmp(const void *a, const void *b); 583 static int om_cmp(const void *a, const void *b); 584 585 static int 586 mr_cmp(const void *a, const void *b) 587 { 588 const struct mem_region *regiona; 589 const struct mem_region *regionb; 590 591 regiona = a; 592 regionb = b; 593 if (regiona->mr_start < regionb->mr_start) 594 return (-1); 595 else if (regiona->mr_start > regionb->mr_start) 596 return (1); 597 else 598 return (0); 599 } 600 601 static int 602 om_cmp(const void *a, const void *b) 603 { 604 const struct ofw_map *mapa; 605 const struct ofw_map *mapb; 606 607 mapa = a; 608 mapb = b; 609 if (mapa->om_pa < mapb->om_pa) 610 return (-1); 611 else if (mapa->om_pa > mapb->om_pa) 612 return (1); 613 else 614 return (0); 615 } 616 617 void 618 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 619 { 620 ihandle_t mmui; 621 phandle_t chosen, mmu; 622 int sz; 623 int i, j; 624 int ofw_mappings; 625 vm_size_t size, physsz, hwphyssz; 626 vm_offset_t pa, va, off; 627 u_int batl, batu; 628 629 /* 630 * Set up BAT0 to map the lowest 256 MB area 631 */ 632 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 633 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 634 635 /* 636 * Map PCI memory space. 637 */ 638 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 639 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 640 641 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 642 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 643 644 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 645 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 646 647 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 648 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 649 650 /* 651 * Map obio devices. 652 */ 653 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 654 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 655 656 /* 657 * Use an IBAT and a DBAT to map the bottom segment of memory 658 * where we are. 659 */ 660 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 661 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 662 __asm (".balign 32; \n" 663 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 664 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 665 :: "r"(batu), "r"(batl)); 666 667 #if 0 668 /* map frame buffer */ 669 batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 670 batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 671 __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 672 :: "r"(batu), "r"(batl)); 673 #endif 674 675 #if 1 676 /* map pci space */ 677 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 678 batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 679 __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 680 :: "r"(batu), "r"(batl)); 681 #endif 682 683 /* 684 * Set the start and end of kva. 685 */ 686 virtual_avail = VM_MIN_KERNEL_ADDRESS; 687 virtual_end = VM_MAX_KERNEL_ADDRESS; 688 689 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 690 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 691 692 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 693 for (i = 0; i < pregions_sz; i++) { 694 vm_offset_t pa; 695 vm_offset_t end; 696 697 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 698 pregions[i].mr_start, 699 pregions[i].mr_start + pregions[i].mr_size, 700 pregions[i].mr_size); 701 /* 702 * Install entries into the BAT table to allow all 703 * of physmem to be convered by on-demand BAT entries. 704 * The loop will sometimes set the same battable element 705 * twice, but that's fine since they won't be used for 706 * a while yet. 707 */ 708 pa = pregions[i].mr_start & 0xf0000000; 709 end = pregions[i].mr_start + pregions[i].mr_size; 710 do { 711 u_int n = pa >> ADDR_SR_SHFT; 712 713 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 714 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 715 pa += SEGMENT_LENGTH; 716 } while (pa < end); 717 } 718 719 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 720 panic("moea_bootstrap: phys_avail too small"); 721 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 722 phys_avail_count = 0; 723 physsz = 0; 724 hwphyssz = 0; 725 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 726 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 727 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 728 regions[i].mr_start + regions[i].mr_size, 729 regions[i].mr_size); 730 if (hwphyssz != 0 && 731 (physsz + regions[i].mr_size) >= hwphyssz) { 732 if (physsz < hwphyssz) { 733 phys_avail[j] = regions[i].mr_start; 734 phys_avail[j + 1] = regions[i].mr_start + 735 hwphyssz - physsz; 736 physsz = hwphyssz; 737 phys_avail_count++; 738 } 739 break; 740 } 741 phys_avail[j] = regions[i].mr_start; 742 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 743 phys_avail_count++; 744 physsz += regions[i].mr_size; 745 } 746 physmem = btoc(physsz); 747 748 /* 749 * Allocate PTEG table. 750 */ 751 #ifdef PTEGCOUNT 752 moea_pteg_count = PTEGCOUNT; 753 #else 754 moea_pteg_count = 0x1000; 755 756 while (moea_pteg_count < physmem) 757 moea_pteg_count <<= 1; 758 759 moea_pteg_count >>= 1; 760 #endif /* PTEGCOUNT */ 761 762 size = moea_pteg_count * sizeof(struct pteg); 763 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 764 size); 765 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 766 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 767 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 768 moea_pteg_mask = moea_pteg_count - 1; 769 770 /* 771 * Allocate pv/overflow lists. 772 */ 773 size = sizeof(struct pvo_head) * moea_pteg_count; 774 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 775 PAGE_SIZE); 776 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 777 for (i = 0; i < moea_pteg_count; i++) 778 LIST_INIT(&moea_pvo_table[i]); 779 780 /* 781 * Initialize the lock that synchronizes access to the pteg and pvo 782 * tables. 783 */ 784 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 785 MTX_RECURSE); 786 787 /* 788 * Allocate the message buffer. 789 */ 790 msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, 0); 791 792 /* 793 * Initialise the unmanaged pvo pool. 794 */ 795 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 796 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 797 moea_bpvo_pool_index = 0; 798 799 /* 800 * Make sure kernel vsid is allocated as well as VSID 0. 801 */ 802 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 803 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 804 moea_vsid_bitmap[0] |= 1; 805 806 /* 807 * Set up the Open Firmware pmap and add it's mappings. 808 */ 809 moea_pinit(mmup, &ofw_pmap); 810 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 811 ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 812 if ((chosen = OF_finddevice("/chosen")) == -1) 813 panic("moea_bootstrap: can't find /chosen"); 814 OF_getprop(chosen, "mmu", &mmui, 4); 815 if ((mmu = OF_instance_to_package(mmui)) == -1) 816 panic("moea_bootstrap: can't get mmu package"); 817 if ((sz = OF_getproplen(mmu, "translations")) == -1) 818 panic("moea_bootstrap: can't get ofw translation count"); 819 translations = NULL; 820 for (i = 0; phys_avail[i] != 0; i += 2) { 821 if (phys_avail[i + 1] >= sz) { 822 translations = (struct ofw_map *)phys_avail[i]; 823 break; 824 } 825 } 826 if (translations == NULL) 827 panic("moea_bootstrap: no space to copy translations"); 828 bzero(translations, sz); 829 if (OF_getprop(mmu, "translations", translations, sz) == -1) 830 panic("moea_bootstrap: can't get ofw translations"); 831 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 832 sz /= sizeof(*translations); 833 qsort(translations, sz, sizeof (*translations), om_cmp); 834 for (i = 0, ofw_mappings = 0; i < sz; i++) { 835 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 836 translations[i].om_pa, translations[i].om_va, 837 translations[i].om_len); 838 839 /* 840 * If the mapping is 1:1, let the RAM and device on-demand 841 * BAT tables take care of the translation. 842 */ 843 if (translations[i].om_va == translations[i].om_pa) 844 continue; 845 846 /* Enter the pages */ 847 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 848 struct vm_page m; 849 850 m.phys_addr = translations[i].om_pa + off; 851 PMAP_LOCK(&ofw_pmap); 852 moea_enter_locked(&ofw_pmap, 853 translations[i].om_va + off, &m, 854 VM_PROT_ALL, 1); 855 PMAP_UNLOCK(&ofw_pmap); 856 ofw_mappings++; 857 } 858 } 859 #ifdef SMP 860 TLBSYNC(); 861 #endif 862 863 /* 864 * Initialize the kernel pmap (which is statically allocated). 865 */ 866 PMAP_LOCK_INIT(kernel_pmap); 867 for (i = 0; i < 16; i++) { 868 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 869 } 870 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 871 kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 872 kernel_pmap->pm_active = ~0; 873 874 /* 875 * Allocate a kernel stack with a guard page for thread0 and map it 876 * into the kernel page map. 877 */ 878 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 879 kstack0_phys = pa; 880 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 881 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 882 kstack0); 883 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 884 for (i = 0; i < KSTACK_PAGES; i++) { 885 pa = kstack0_phys + i * PAGE_SIZE; 886 va = kstack0 + i * PAGE_SIZE; 887 moea_kenter(mmup, va, pa); 888 TLBIE(va); 889 } 890 891 /* 892 * Calculate the last available physical address. 893 */ 894 for (i = 0; phys_avail[i + 2] != 0; i += 2) 895 ; 896 Maxmem = powerpc_btop(phys_avail[i + 1]); 897 898 /* 899 * Allocate virtual address space for the message buffer. 900 */ 901 msgbufp = (struct msgbuf *)virtual_avail; 902 virtual_avail += round_page(MSGBUF_SIZE); 903 904 /* 905 * Initialize hardware. 906 */ 907 for (i = 0; i < 16; i++) { 908 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 909 } 910 __asm __volatile ("mtsr %0,%1" 911 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 912 __asm __volatile ("mtsr %0,%1" 913 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 914 __asm __volatile ("sync; mtsdr1 %0; isync" 915 :: "r"((u_int)moea_pteg_table | (moea_pteg_mask >> 10))); 916 tlbia(); 917 918 pmap_bootstrapped++; 919 } 920 921 /* 922 * Activate a user pmap. The pmap must be activated before it's address 923 * space can be accessed in any way. 924 */ 925 void 926 moea_activate(mmu_t mmu, struct thread *td) 927 { 928 pmap_t pm, pmr; 929 930 /* 931 * Load all the data we need up front to encourage the compiler to 932 * not issue any loads while we have interrupts disabled below. 933 */ 934 pm = &td->td_proc->p_vmspace->vm_pmap; 935 936 if ((pmr = (pmap_t)moea_kextract(mmu, (vm_offset_t)pm)) == NULL) 937 pmr = pm; 938 939 pm->pm_active |= PCPU_GET(cpumask); 940 PCPU_SET(curpmap, pmr); 941 } 942 943 void 944 moea_deactivate(mmu_t mmu, struct thread *td) 945 { 946 pmap_t pm; 947 948 pm = &td->td_proc->p_vmspace->vm_pmap; 949 pm->pm_active &= ~(PCPU_GET(cpumask)); 950 PCPU_SET(curpmap, NULL); 951 } 952 953 void 954 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 955 { 956 struct pvo_entry *pvo; 957 958 PMAP_LOCK(pm); 959 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 960 961 if (pvo != NULL) { 962 if (wired) { 963 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 964 pm->pm_stats.wired_count++; 965 pvo->pvo_vaddr |= PVO_WIRED; 966 } else { 967 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 968 pm->pm_stats.wired_count--; 969 pvo->pvo_vaddr &= ~PVO_WIRED; 970 } 971 } 972 PMAP_UNLOCK(pm); 973 } 974 975 void 976 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 977 { 978 vm_offset_t dst; 979 vm_offset_t src; 980 981 dst = VM_PAGE_TO_PHYS(mdst); 982 src = VM_PAGE_TO_PHYS(msrc); 983 984 kcopy((void *)src, (void *)dst, PAGE_SIZE); 985 } 986 987 /* 988 * Zero a page of physical memory by temporarily mapping it into the tlb. 989 */ 990 void 991 moea_zero_page(mmu_t mmu, vm_page_t m) 992 { 993 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 994 caddr_t va; 995 996 if (pa < SEGMENT_LENGTH) { 997 va = (caddr_t) pa; 998 } else if (moea_initialized) { 999 if (moea_pvo_zeropage == NULL) { 1000 moea_pvo_zeropage = moea_rkva_alloc(mmu); 1001 mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page", 1002 NULL, MTX_DEF); 1003 } 1004 mtx_lock(&moea_pvo_zeropage_mtx); 1005 moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL); 1006 va = (caddr_t)PVO_VADDR(moea_pvo_zeropage); 1007 } else { 1008 panic("moea_zero_page: can't zero pa %#x", pa); 1009 } 1010 1011 bzero(va, PAGE_SIZE); 1012 1013 if (pa >= SEGMENT_LENGTH) { 1014 moea_pa_unmap(moea_pvo_zeropage, NULL, NULL); 1015 mtx_unlock(&moea_pvo_zeropage_mtx); 1016 } 1017 } 1018 1019 void 1020 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1021 { 1022 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1023 caddr_t va; 1024 1025 if (pa < SEGMENT_LENGTH) { 1026 va = (caddr_t) pa; 1027 } else if (moea_initialized) { 1028 if (moea_pvo_zeropage == NULL) { 1029 moea_pvo_zeropage = moea_rkva_alloc(mmu); 1030 mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page", 1031 NULL, MTX_DEF); 1032 } 1033 mtx_lock(&moea_pvo_zeropage_mtx); 1034 moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL); 1035 va = (caddr_t)PVO_VADDR(moea_pvo_zeropage); 1036 } else { 1037 panic("moea_zero_page: can't zero pa %#x", pa); 1038 } 1039 1040 bzero(va + off, size); 1041 1042 if (pa >= SEGMENT_LENGTH) { 1043 moea_pa_unmap(moea_pvo_zeropage, NULL, NULL); 1044 mtx_unlock(&moea_pvo_zeropage_mtx); 1045 } 1046 } 1047 1048 void 1049 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1050 { 1051 1052 moea_zero_page(mmu, m); 1053 } 1054 1055 /* 1056 * Map the given physical page at the specified virtual address in the 1057 * target pmap with the protection requested. If specified the page 1058 * will be wired down. 1059 */ 1060 void 1061 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1062 boolean_t wired) 1063 { 1064 1065 vm_page_lock_queues(); 1066 PMAP_LOCK(pmap); 1067 moea_enter_locked(pmap, va, m, prot, wired); 1068 vm_page_unlock_queues(); 1069 PMAP_UNLOCK(pmap); 1070 } 1071 1072 /* 1073 * Map the given physical page at the specified virtual address in the 1074 * target pmap with the protection requested. If specified the page 1075 * will be wired down. 1076 * 1077 * The page queues and pmap must be locked. 1078 */ 1079 static void 1080 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1081 boolean_t wired) 1082 { 1083 struct pvo_head *pvo_head; 1084 uma_zone_t zone; 1085 vm_page_t pg; 1086 u_int pte_lo, pvo_flags, was_exec, i; 1087 int error; 1088 1089 if (!moea_initialized) { 1090 pvo_head = &moea_pvo_kunmanaged; 1091 zone = moea_upvo_zone; 1092 pvo_flags = 0; 1093 pg = NULL; 1094 was_exec = PTE_EXEC; 1095 } else { 1096 pvo_head = vm_page_to_pvoh(m); 1097 pg = m; 1098 zone = moea_mpvo_zone; 1099 pvo_flags = PVO_MANAGED; 1100 was_exec = 0; 1101 } 1102 if (pmap_bootstrapped) 1103 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1104 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1105 1106 /* XXX change the pvo head for fake pages */ 1107 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) 1108 pvo_head = &moea_pvo_kunmanaged; 1109 1110 /* 1111 * If this is a managed page, and it's the first reference to the page, 1112 * clear the execness of the page. Otherwise fetch the execness. 1113 */ 1114 if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { 1115 if (LIST_EMPTY(pvo_head)) { 1116 moea_attr_clear(pg, PTE_EXEC); 1117 } else { 1118 was_exec = moea_attr_fetch(pg) & PTE_EXEC; 1119 } 1120 } 1121 1122 /* 1123 * Assume the page is cache inhibited and access is guarded unless 1124 * it's in our available memory array. 1125 */ 1126 pte_lo = PTE_I | PTE_G; 1127 for (i = 0; i < pregions_sz; i++) { 1128 if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 1129 (VM_PAGE_TO_PHYS(m) < 1130 (pregions[i].mr_start + pregions[i].mr_size))) { 1131 pte_lo &= ~(PTE_I | PTE_G); 1132 break; 1133 } 1134 } 1135 1136 if (prot & VM_PROT_WRITE) { 1137 pte_lo |= PTE_BW; 1138 if (pmap_bootstrapped) 1139 vm_page_flag_set(m, PG_WRITEABLE); 1140 } else 1141 pte_lo |= PTE_BR; 1142 1143 if (prot & VM_PROT_EXECUTE) 1144 pvo_flags |= PVO_EXECUTABLE; 1145 1146 if (wired) 1147 pvo_flags |= PVO_WIRED; 1148 1149 if ((m->flags & PG_FICTITIOUS) != 0) 1150 pvo_flags |= PVO_FAKE; 1151 1152 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1153 pte_lo, pvo_flags); 1154 1155 /* 1156 * Flush the real page from the instruction cache if this page is 1157 * mapped executable and cacheable and was not previously mapped (or 1158 * was not mapped executable). 1159 */ 1160 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1161 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1162 /* 1163 * Flush the real memory from the cache. 1164 */ 1165 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1166 if (pg != NULL) 1167 moea_attr_save(pg, PTE_EXEC); 1168 } 1169 1170 /* XXX syncicache always until problems are sorted */ 1171 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1172 } 1173 1174 /* 1175 * Maps a sequence of resident pages belonging to the same object. 1176 * The sequence begins with the given page m_start. This page is 1177 * mapped at the given virtual address start. Each subsequent page is 1178 * mapped at a virtual address that is offset from start by the same 1179 * amount as the page is offset from m_start within the object. The 1180 * last page in the sequence is the page with the largest offset from 1181 * m_start that can be mapped at a virtual address less than the given 1182 * virtual address end. Not every virtual page between start and end 1183 * is mapped; only those for which a resident page exists with the 1184 * corresponding offset from m_start are mapped. 1185 */ 1186 void 1187 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1188 vm_page_t m_start, vm_prot_t prot) 1189 { 1190 vm_page_t m; 1191 vm_pindex_t diff, psize; 1192 1193 psize = atop(end - start); 1194 m = m_start; 1195 PMAP_LOCK(pm); 1196 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1197 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1198 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1199 m = TAILQ_NEXT(m, listq); 1200 } 1201 PMAP_UNLOCK(pm); 1202 } 1203 1204 void 1205 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1206 vm_prot_t prot) 1207 { 1208 1209 PMAP_LOCK(pm); 1210 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1211 FALSE); 1212 PMAP_UNLOCK(pm); 1213 1214 } 1215 1216 vm_paddr_t 1217 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1218 { 1219 struct pvo_entry *pvo; 1220 vm_paddr_t pa; 1221 1222 PMAP_LOCK(pm); 1223 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1224 if (pvo == NULL) 1225 pa = 0; 1226 else 1227 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1228 PMAP_UNLOCK(pm); 1229 return (pa); 1230 } 1231 1232 /* 1233 * Atomically extract and hold the physical page with the given 1234 * pmap and virtual address pair if that mapping permits the given 1235 * protection. 1236 */ 1237 vm_page_t 1238 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1239 { 1240 struct pvo_entry *pvo; 1241 vm_page_t m; 1242 1243 m = NULL; 1244 vm_page_lock_queues(); 1245 PMAP_LOCK(pmap); 1246 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1247 if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1248 ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1249 (prot & VM_PROT_WRITE) == 0)) { 1250 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1251 vm_page_hold(m); 1252 } 1253 vm_page_unlock_queues(); 1254 PMAP_UNLOCK(pmap); 1255 return (m); 1256 } 1257 1258 void 1259 moea_init(mmu_t mmu) 1260 { 1261 1262 CTR0(KTR_PMAP, "moea_init"); 1263 1264 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1266 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1267 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1268 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1269 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1270 moea_initialized = TRUE; 1271 } 1272 1273 boolean_t 1274 moea_is_modified(mmu_t mmu, vm_page_t m) 1275 { 1276 1277 if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 1278 return (FALSE); 1279 1280 return (moea_query_bit(m, PTE_CHG)); 1281 } 1282 1283 void 1284 moea_clear_reference(mmu_t mmu, vm_page_t m) 1285 { 1286 1287 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1288 return; 1289 moea_clear_bit(m, PTE_REF, NULL); 1290 } 1291 1292 void 1293 moea_clear_modify(mmu_t mmu, vm_page_t m) 1294 { 1295 1296 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1297 return; 1298 moea_clear_bit(m, PTE_CHG, NULL); 1299 } 1300 1301 /* 1302 * Clear the write and modified bits in each of the given page's mappings. 1303 */ 1304 void 1305 moea_remove_write(mmu_t mmu, vm_page_t m) 1306 { 1307 struct pvo_entry *pvo; 1308 struct pte *pt; 1309 pmap_t pmap; 1310 u_int lo; 1311 1312 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1313 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1314 (m->flags & PG_WRITEABLE) == 0) 1315 return; 1316 lo = moea_attr_fetch(m); 1317 SYNC(); 1318 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1319 pmap = pvo->pvo_pmap; 1320 PMAP_LOCK(pmap); 1321 if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) { 1322 pt = moea_pvo_to_pte(pvo, -1); 1323 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1324 pvo->pvo_pte.pte_lo |= PTE_BR; 1325 if (pt != NULL) { 1326 moea_pte_synch(pt, &pvo->pvo_pte); 1327 lo |= pvo->pvo_pte.pte_lo; 1328 pvo->pvo_pte.pte_lo &= ~PTE_CHG; 1329 moea_pte_change(pt, &pvo->pvo_pte, 1330 pvo->pvo_vaddr); 1331 mtx_unlock(&moea_table_mutex); 1332 } 1333 } 1334 PMAP_UNLOCK(pmap); 1335 } 1336 if ((lo & PTE_CHG) != 0) { 1337 moea_attr_clear(m, PTE_CHG); 1338 vm_page_dirty(m); 1339 } 1340 vm_page_flag_clear(m, PG_WRITEABLE); 1341 } 1342 1343 /* 1344 * moea_ts_referenced: 1345 * 1346 * Return a count of reference bits for a page, clearing those bits. 1347 * It is not necessary for every reference bit to be cleared, but it 1348 * is necessary that 0 only be returned when there are truly no 1349 * reference bits set. 1350 * 1351 * XXX: The exact number of bits to check and clear is a matter that 1352 * should be tested and standardized at some point in the future for 1353 * optimal aging of shared pages. 1354 */ 1355 boolean_t 1356 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1357 { 1358 int count; 1359 1360 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1361 return (0); 1362 1363 count = moea_clear_bit(m, PTE_REF, NULL); 1364 1365 return (count); 1366 } 1367 1368 /* 1369 * Map a wired page into kernel virtual address space. 1370 */ 1371 void 1372 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1373 { 1374 u_int pte_lo; 1375 int error; 1376 int i; 1377 1378 #if 0 1379 if (va < VM_MIN_KERNEL_ADDRESS) 1380 panic("moea_kenter: attempt to enter non-kernel address %#x", 1381 va); 1382 #endif 1383 1384 pte_lo = PTE_I | PTE_G; 1385 for (i = 0; i < pregions_sz; i++) { 1386 if ((pa >= pregions[i].mr_start) && 1387 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 1388 pte_lo &= ~(PTE_I | PTE_G); 1389 break; 1390 } 1391 } 1392 1393 PMAP_LOCK(kernel_pmap); 1394 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1395 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1396 1397 if (error != 0 && error != ENOENT) 1398 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1399 pa, error); 1400 1401 /* 1402 * Flush the real memory from the instruction cache. 1403 */ 1404 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1405 moea_syncicache(pa, PAGE_SIZE); 1406 } 1407 PMAP_UNLOCK(kernel_pmap); 1408 } 1409 1410 /* 1411 * Extract the physical page address associated with the given kernel virtual 1412 * address. 1413 */ 1414 vm_offset_t 1415 moea_kextract(mmu_t mmu, vm_offset_t va) 1416 { 1417 struct pvo_entry *pvo; 1418 vm_paddr_t pa; 1419 1420 #ifdef UMA_MD_SMALL_ALLOC 1421 /* 1422 * Allow direct mappings 1423 */ 1424 if (va < VM_MIN_KERNEL_ADDRESS) { 1425 return (va); 1426 } 1427 #endif 1428 1429 PMAP_LOCK(kernel_pmap); 1430 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1431 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1432 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1433 PMAP_UNLOCK(kernel_pmap); 1434 return (pa); 1435 } 1436 1437 /* 1438 * Remove a wired page from kernel virtual address space. 1439 */ 1440 void 1441 moea_kremove(mmu_t mmu, vm_offset_t va) 1442 { 1443 1444 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1445 } 1446 1447 /* 1448 * Map a range of physical addresses into kernel virtual address space. 1449 * 1450 * The value passed in *virt is a suggested virtual address for the mapping. 1451 * Architectures which can support a direct-mapped physical to virtual region 1452 * can return the appropriate address within that region, leaving '*virt' 1453 * unchanged. We cannot and therefore do not; *virt is updated with the 1454 * first usable address after the mapped region. 1455 */ 1456 vm_offset_t 1457 moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1458 vm_offset_t pa_end, int prot) 1459 { 1460 vm_offset_t sva, va; 1461 1462 sva = *virt; 1463 va = sva; 1464 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1465 moea_kenter(mmu, va, pa_start); 1466 *virt = va; 1467 return (sva); 1468 } 1469 1470 /* 1471 * Returns true if the pmap's pv is one of the first 1472 * 16 pvs linked to from this page. This count may 1473 * be changed upwards or downwards in the future; it 1474 * is only necessary that true be returned for a small 1475 * subset of pmaps for proper page aging. 1476 */ 1477 boolean_t 1478 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1479 { 1480 int loops; 1481 struct pvo_entry *pvo; 1482 1483 if (!moea_initialized || (m->flags & PG_FICTITIOUS)) 1484 return FALSE; 1485 1486 loops = 0; 1487 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1488 if (pvo->pvo_pmap == pmap) 1489 return (TRUE); 1490 if (++loops >= 16) 1491 break; 1492 } 1493 1494 return (FALSE); 1495 } 1496 1497 /* 1498 * Return the number of managed mappings to the given physical page 1499 * that are wired. 1500 */ 1501 int 1502 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1503 { 1504 struct pvo_entry *pvo; 1505 int count; 1506 1507 count = 0; 1508 if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0) 1509 return (count); 1510 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1511 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1512 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1513 count++; 1514 return (count); 1515 } 1516 1517 static u_int moea_vsidcontext; 1518 1519 void 1520 moea_pinit(mmu_t mmu, pmap_t pmap) 1521 { 1522 int i, mask; 1523 u_int entropy; 1524 1525 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1526 PMAP_LOCK_INIT(pmap); 1527 1528 entropy = 0; 1529 __asm __volatile("mftb %0" : "=r"(entropy)); 1530 1531 /* 1532 * Allocate some segment registers for this pmap. 1533 */ 1534 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1535 u_int hash, n; 1536 1537 /* 1538 * Create a new value by mutiplying by a prime and adding in 1539 * entropy from the timebase register. This is to make the 1540 * VSID more random so that the PT hash function collides 1541 * less often. (Note that the prime casues gcc to do shifts 1542 * instead of a multiply.) 1543 */ 1544 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1545 hash = moea_vsidcontext & (NPMAPS - 1); 1546 if (hash == 0) /* 0 is special, avoid it */ 1547 continue; 1548 n = hash >> 5; 1549 mask = 1 << (hash & (VSID_NBPW - 1)); 1550 hash = (moea_vsidcontext & 0xfffff); 1551 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1552 /* anything free in this bucket? */ 1553 if (moea_vsid_bitmap[n] == 0xffffffff) { 1554 entropy = (moea_vsidcontext >> 20); 1555 continue; 1556 } 1557 i = ffs(~moea_vsid_bitmap[i]) - 1; 1558 mask = 1 << i; 1559 hash &= 0xfffff & ~(VSID_NBPW - 1); 1560 hash |= i; 1561 } 1562 moea_vsid_bitmap[n] |= mask; 1563 for (i = 0; i < 16; i++) 1564 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1565 return; 1566 } 1567 1568 panic("moea_pinit: out of segments"); 1569 } 1570 1571 /* 1572 * Initialize the pmap associated with process 0. 1573 */ 1574 void 1575 moea_pinit0(mmu_t mmu, pmap_t pm) 1576 { 1577 1578 moea_pinit(mmu, pm); 1579 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1580 } 1581 1582 /* 1583 * Set the physical protection on the specified range of this map as requested. 1584 */ 1585 void 1586 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1587 vm_prot_t prot) 1588 { 1589 struct pvo_entry *pvo; 1590 struct pte *pt; 1591 int pteidx; 1592 1593 CTR4(KTR_PMAP, "moea_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1594 eva, prot); 1595 1596 1597 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1598 ("moea_protect: non current pmap")); 1599 1600 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1601 moea_remove(mmu, pm, sva, eva); 1602 return; 1603 } 1604 1605 vm_page_lock_queues(); 1606 PMAP_LOCK(pm); 1607 for (; sva < eva; sva += PAGE_SIZE) { 1608 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1609 if (pvo == NULL) 1610 continue; 1611 1612 if ((prot & VM_PROT_EXECUTE) == 0) 1613 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1614 1615 /* 1616 * Grab the PTE pointer before we diddle with the cached PTE 1617 * copy. 1618 */ 1619 pt = moea_pvo_to_pte(pvo, pteidx); 1620 /* 1621 * Change the protection of the page. 1622 */ 1623 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1624 pvo->pvo_pte.pte_lo |= PTE_BR; 1625 1626 /* 1627 * If the PVO is in the page table, update that pte as well. 1628 */ 1629 if (pt != NULL) { 1630 moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1631 mtx_unlock(&moea_table_mutex); 1632 } 1633 } 1634 vm_page_unlock_queues(); 1635 PMAP_UNLOCK(pm); 1636 } 1637 1638 /* 1639 * Map a list of wired pages into kernel virtual address space. This is 1640 * intended for temporary mappings which do not need page modification or 1641 * references recorded. Existing mappings in the region are overwritten. 1642 */ 1643 void 1644 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1645 { 1646 vm_offset_t va; 1647 1648 va = sva; 1649 while (count-- > 0) { 1650 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1651 va += PAGE_SIZE; 1652 m++; 1653 } 1654 } 1655 1656 /* 1657 * Remove page mappings from kernel virtual address space. Intended for 1658 * temporary mappings entered by moea_qenter. 1659 */ 1660 void 1661 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1662 { 1663 vm_offset_t va; 1664 1665 va = sva; 1666 while (count-- > 0) { 1667 moea_kremove(mmu, va); 1668 va += PAGE_SIZE; 1669 } 1670 } 1671 1672 void 1673 moea_release(mmu_t mmu, pmap_t pmap) 1674 { 1675 int idx, mask; 1676 1677 /* 1678 * Free segment register's VSID 1679 */ 1680 if (pmap->pm_sr[0] == 0) 1681 panic("moea_release"); 1682 1683 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1684 mask = 1 << (idx % VSID_NBPW); 1685 idx /= VSID_NBPW; 1686 moea_vsid_bitmap[idx] &= ~mask; 1687 PMAP_LOCK_DESTROY(pmap); 1688 } 1689 1690 /* 1691 * Remove the given range of addresses from the specified map. 1692 */ 1693 void 1694 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1695 { 1696 struct pvo_entry *pvo; 1697 int pteidx; 1698 1699 vm_page_lock_queues(); 1700 PMAP_LOCK(pm); 1701 for (; sva < eva; sva += PAGE_SIZE) { 1702 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1703 if (pvo != NULL) { 1704 moea_pvo_remove(pvo, pteidx); 1705 } 1706 } 1707 PMAP_UNLOCK(pm); 1708 vm_page_unlock_queues(); 1709 } 1710 1711 /* 1712 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1713 * will reflect changes in pte's back to the vm_page. 1714 */ 1715 void 1716 moea_remove_all(mmu_t mmu, vm_page_t m) 1717 { 1718 struct pvo_head *pvo_head; 1719 struct pvo_entry *pvo, *next_pvo; 1720 pmap_t pmap; 1721 1722 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1723 1724 pvo_head = vm_page_to_pvoh(m); 1725 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1726 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1727 1728 MOEA_PVO_CHECK(pvo); /* sanity check */ 1729 pmap = pvo->pvo_pmap; 1730 PMAP_LOCK(pmap); 1731 moea_pvo_remove(pvo, -1); 1732 PMAP_UNLOCK(pmap); 1733 } 1734 vm_page_flag_clear(m, PG_WRITEABLE); 1735 } 1736 1737 /* 1738 * Allocate a physical page of memory directly from the phys_avail map. 1739 * Can only be called from moea_bootstrap before avail start and end are 1740 * calculated. 1741 */ 1742 static vm_offset_t 1743 moea_bootstrap_alloc(vm_size_t size, u_int align) 1744 { 1745 vm_offset_t s, e; 1746 int i, j; 1747 1748 size = round_page(size); 1749 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1750 if (align != 0) 1751 s = (phys_avail[i] + align - 1) & ~(align - 1); 1752 else 1753 s = phys_avail[i]; 1754 e = s + size; 1755 1756 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1757 continue; 1758 1759 if (s == phys_avail[i]) { 1760 phys_avail[i] += size; 1761 } else if (e == phys_avail[i + 1]) { 1762 phys_avail[i + 1] -= size; 1763 } else { 1764 for (j = phys_avail_count * 2; j > i; j -= 2) { 1765 phys_avail[j] = phys_avail[j - 2]; 1766 phys_avail[j + 1] = phys_avail[j - 1]; 1767 } 1768 1769 phys_avail[i + 3] = phys_avail[i + 1]; 1770 phys_avail[i + 1] = s; 1771 phys_avail[i + 2] = e; 1772 phys_avail_count++; 1773 } 1774 1775 return (s); 1776 } 1777 panic("moea_bootstrap_alloc: could not allocate memory"); 1778 } 1779 1780 /* 1781 * Return an unmapped pvo for a kernel virtual address. 1782 * Used by pmap functions that operate on physical pages. 1783 */ 1784 static struct pvo_entry * 1785 moea_rkva_alloc(mmu_t mmu) 1786 { 1787 struct pvo_entry *pvo; 1788 struct pte *pt; 1789 vm_offset_t kva; 1790 int pteidx; 1791 1792 if (moea_rkva_count == 0) 1793 panic("moea_rkva_alloc: no more reserved KVAs"); 1794 1795 kva = moea_rkva_start + (PAGE_SIZE * --moea_rkva_count); 1796 moea_kenter(mmu, kva, 0); 1797 1798 pvo = moea_pvo_find_va(kernel_pmap, kva, &pteidx); 1799 1800 if (pvo == NULL) 1801 panic("moea_kva_alloc: moea_pvo_find_va failed"); 1802 1803 pt = moea_pvo_to_pte(pvo, pteidx); 1804 1805 if (pt == NULL) 1806 panic("moea_kva_alloc: moea_pvo_to_pte failed"); 1807 1808 moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1809 mtx_unlock(&moea_table_mutex); 1810 PVO_PTEGIDX_CLR(pvo); 1811 1812 moea_pte_overflow++; 1813 1814 return (pvo); 1815 } 1816 1817 static void 1818 moea_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1819 int *depth_p) 1820 { 1821 struct pte *pt; 1822 1823 /* 1824 * If this pvo already has a valid pte, we need to save it so it can 1825 * be restored later. We then just reload the new PTE over the old 1826 * slot. 1827 */ 1828 if (saved_pt != NULL) { 1829 pt = moea_pvo_to_pte(pvo, -1); 1830 1831 if (pt != NULL) { 1832 moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1833 mtx_unlock(&moea_table_mutex); 1834 PVO_PTEGIDX_CLR(pvo); 1835 moea_pte_overflow++; 1836 } 1837 1838 *saved_pt = pvo->pvo_pte; 1839 1840 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1841 } 1842 1843 pvo->pvo_pte.pte_lo |= pa; 1844 1845 if (!moea_pte_spill(pvo->pvo_vaddr)) 1846 panic("moea_pa_map: could not spill pvo %p", pvo); 1847 1848 if (depth_p != NULL) 1849 (*depth_p)++; 1850 } 1851 1852 static void 1853 moea_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1854 { 1855 struct pte *pt; 1856 1857 pt = moea_pvo_to_pte(pvo, -1); 1858 1859 if (pt != NULL) { 1860 moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1861 mtx_unlock(&moea_table_mutex); 1862 PVO_PTEGIDX_CLR(pvo); 1863 moea_pte_overflow++; 1864 } 1865 1866 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1867 1868 /* 1869 * If there is a saved PTE and it's valid, restore it and return. 1870 */ 1871 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1872 if (depth_p != NULL && --(*depth_p) == 0) 1873 panic("moea_pa_unmap: restoring but depth == 0"); 1874 1875 pvo->pvo_pte = *saved_pt; 1876 1877 if (!moea_pte_spill(pvo->pvo_vaddr)) 1878 panic("moea_pa_unmap: could not spill pvo %p", pvo); 1879 } 1880 } 1881 1882 static void 1883 moea_syncicache(vm_offset_t pa, vm_size_t len) 1884 { 1885 __syncicache((void *)pa, len); 1886 } 1887 1888 static void 1889 tlbia(void) 1890 { 1891 caddr_t i; 1892 1893 SYNC(); 1894 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1895 TLBIE(i); 1896 EIEIO(); 1897 } 1898 TLBSYNC(); 1899 SYNC(); 1900 } 1901 1902 static int 1903 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1904 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1905 { 1906 struct pvo_entry *pvo; 1907 u_int sr; 1908 int first; 1909 u_int ptegidx; 1910 int i; 1911 int bootstrap; 1912 1913 moea_pvo_enter_calls++; 1914 first = 0; 1915 bootstrap = 0; 1916 1917 /* 1918 * Compute the PTE Group index. 1919 */ 1920 va &= ~ADDR_POFF; 1921 sr = va_to_sr(pm->pm_sr, va); 1922 ptegidx = va_to_pteg(sr, va); 1923 1924 /* 1925 * Remove any existing mapping for this page. Reuse the pvo entry if 1926 * there is a mapping. 1927 */ 1928 mtx_lock(&moea_table_mutex); 1929 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1930 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1931 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1932 (pvo->pvo_pte.pte_lo & PTE_PP) == 1933 (pte_lo & PTE_PP)) { 1934 mtx_unlock(&moea_table_mutex); 1935 return (0); 1936 } 1937 moea_pvo_remove(pvo, -1); 1938 break; 1939 } 1940 } 1941 1942 /* 1943 * If we aren't overwriting a mapping, try to allocate. 1944 */ 1945 if (moea_initialized) { 1946 pvo = uma_zalloc(zone, M_NOWAIT); 1947 } else { 1948 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1949 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1950 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1951 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1952 } 1953 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1954 moea_bpvo_pool_index++; 1955 bootstrap = 1; 1956 } 1957 1958 if (pvo == NULL) { 1959 mtx_unlock(&moea_table_mutex); 1960 return (ENOMEM); 1961 } 1962 1963 moea_pvo_entries++; 1964 pvo->pvo_vaddr = va; 1965 pvo->pvo_pmap = pm; 1966 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1967 pvo->pvo_vaddr &= ~ADDR_POFF; 1968 if (flags & VM_PROT_EXECUTE) 1969 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1970 if (flags & PVO_WIRED) 1971 pvo->pvo_vaddr |= PVO_WIRED; 1972 if (pvo_head != &moea_pvo_kunmanaged) 1973 pvo->pvo_vaddr |= PVO_MANAGED; 1974 if (bootstrap) 1975 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1976 if (flags & PVO_FAKE) 1977 pvo->pvo_vaddr |= PVO_FAKE; 1978 1979 moea_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1980 1981 /* 1982 * Remember if the list was empty and therefore will be the first 1983 * item. 1984 */ 1985 if (LIST_FIRST(pvo_head) == NULL) 1986 first = 1; 1987 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1988 1989 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1990 pm->pm_stats.wired_count++; 1991 pm->pm_stats.resident_count++; 1992 1993 /* 1994 * We hope this succeeds but it isn't required. 1995 */ 1996 i = moea_pte_insert(ptegidx, &pvo->pvo_pte); 1997 if (i >= 0) { 1998 PVO_PTEGIDX_SET(pvo, i); 1999 } else { 2000 panic("moea_pvo_enter: overflow"); 2001 moea_pte_overflow++; 2002 } 2003 mtx_unlock(&moea_table_mutex); 2004 2005 return (first ? ENOENT : 0); 2006 } 2007 2008 static void 2009 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 2010 { 2011 struct pte *pt; 2012 2013 /* 2014 * If there is an active pte entry, we need to deactivate it (and 2015 * save the ref & cfg bits). 2016 */ 2017 pt = moea_pvo_to_pte(pvo, pteidx); 2018 if (pt != NULL) { 2019 moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2020 mtx_unlock(&moea_table_mutex); 2021 PVO_PTEGIDX_CLR(pvo); 2022 } else { 2023 moea_pte_overflow--; 2024 } 2025 2026 /* 2027 * Update our statistics. 2028 */ 2029 pvo->pvo_pmap->pm_stats.resident_count--; 2030 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 2031 pvo->pvo_pmap->pm_stats.wired_count--; 2032 2033 /* 2034 * Save the REF/CHG bits into their cache if the page is managed. 2035 */ 2036 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2037 struct vm_page *pg; 2038 2039 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 2040 if (pg != NULL) { 2041 moea_attr_save(pg, pvo->pvo_pte.pte_lo & 2042 (PTE_REF | PTE_CHG)); 2043 } 2044 } 2045 2046 /* 2047 * Remove this PVO from the PV list. 2048 */ 2049 LIST_REMOVE(pvo, pvo_vlink); 2050 2051 /* 2052 * Remove this from the overflow list and return it to the pool 2053 * if we aren't going to reuse it. 2054 */ 2055 LIST_REMOVE(pvo, pvo_olink); 2056 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2057 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2058 moea_upvo_zone, pvo); 2059 moea_pvo_entries--; 2060 moea_pvo_remove_calls++; 2061 } 2062 2063 static __inline int 2064 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2065 { 2066 int pteidx; 2067 2068 /* 2069 * We can find the actual pte entry without searching by grabbing 2070 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2071 * noticing the HID bit. 2072 */ 2073 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2074 if (pvo->pvo_pte.pte_hi & PTE_HID) 2075 pteidx ^= moea_pteg_mask * 8; 2076 2077 return (pteidx); 2078 } 2079 2080 static struct pvo_entry * 2081 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2082 { 2083 struct pvo_entry *pvo; 2084 int ptegidx; 2085 u_int sr; 2086 2087 va &= ~ADDR_POFF; 2088 sr = va_to_sr(pm->pm_sr, va); 2089 ptegidx = va_to_pteg(sr, va); 2090 2091 mtx_lock(&moea_table_mutex); 2092 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2093 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2094 if (pteidx_p) 2095 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2096 break; 2097 } 2098 } 2099 mtx_unlock(&moea_table_mutex); 2100 2101 return (pvo); 2102 } 2103 2104 static struct pte * 2105 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2106 { 2107 struct pte *pt; 2108 2109 /* 2110 * If we haven't been supplied the ptegidx, calculate it. 2111 */ 2112 if (pteidx == -1) { 2113 int ptegidx; 2114 u_int sr; 2115 2116 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2117 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2118 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2119 } 2120 2121 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2122 mtx_lock(&moea_table_mutex); 2123 2124 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2125 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2126 "valid pte index", pvo); 2127 } 2128 2129 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2130 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2131 "pvo but no valid pte", pvo); 2132 } 2133 2134 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2135 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 2136 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2137 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2138 } 2139 2140 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2141 != 0) { 2142 panic("moea_pvo_to_pte: pvo %p pte does not match " 2143 "pte %p in moea_pteg_table", pvo, pt); 2144 } 2145 2146 mtx_assert(&moea_table_mutex, MA_OWNED); 2147 return (pt); 2148 } 2149 2150 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 2151 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2152 "moea_pteg_table but valid in pvo", pvo, pt); 2153 } 2154 2155 mtx_unlock(&moea_table_mutex); 2156 return (NULL); 2157 } 2158 2159 /* 2160 * XXX: THIS STUFF SHOULD BE IN pte.c? 2161 */ 2162 int 2163 moea_pte_spill(vm_offset_t addr) 2164 { 2165 struct pvo_entry *source_pvo, *victim_pvo; 2166 struct pvo_entry *pvo; 2167 int ptegidx, i, j; 2168 u_int sr; 2169 struct pteg *pteg; 2170 struct pte *pt; 2171 2172 moea_pte_spills++; 2173 2174 sr = mfsrin(addr); 2175 ptegidx = va_to_pteg(sr, addr); 2176 2177 /* 2178 * Have to substitute some entry. Use the primary hash for this. 2179 * Use low bits of timebase as random generator. 2180 */ 2181 pteg = &moea_pteg_table[ptegidx]; 2182 mtx_lock(&moea_table_mutex); 2183 __asm __volatile("mftb %0" : "=r"(i)); 2184 i &= 7; 2185 pt = &pteg->pt[i]; 2186 2187 source_pvo = NULL; 2188 victim_pvo = NULL; 2189 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2190 /* 2191 * We need to find a pvo entry for this address. 2192 */ 2193 MOEA_PVO_CHECK(pvo); 2194 if (source_pvo == NULL && 2195 moea_pte_match(&pvo->pvo_pte, sr, addr, 2196 pvo->pvo_pte.pte_hi & PTE_HID)) { 2197 /* 2198 * Now found an entry to be spilled into the pteg. 2199 * The PTE is now valid, so we know it's active. 2200 */ 2201 j = moea_pte_insert(ptegidx, &pvo->pvo_pte); 2202 2203 if (j >= 0) { 2204 PVO_PTEGIDX_SET(pvo, j); 2205 moea_pte_overflow--; 2206 MOEA_PVO_CHECK(pvo); 2207 mtx_unlock(&moea_table_mutex); 2208 return (1); 2209 } 2210 2211 source_pvo = pvo; 2212 2213 if (victim_pvo != NULL) 2214 break; 2215 } 2216 2217 /* 2218 * We also need the pvo entry of the victim we are replacing 2219 * so save the R & C bits of the PTE. 2220 */ 2221 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2222 moea_pte_compare(pt, &pvo->pvo_pte)) { 2223 victim_pvo = pvo; 2224 if (source_pvo != NULL) 2225 break; 2226 } 2227 } 2228 2229 if (source_pvo == NULL) { 2230 mtx_unlock(&moea_table_mutex); 2231 return (0); 2232 } 2233 2234 if (victim_pvo == NULL) { 2235 if ((pt->pte_hi & PTE_HID) == 0) 2236 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2237 "entry", pt); 2238 2239 /* 2240 * If this is a secondary PTE, we need to search it's primary 2241 * pvo bucket for the matching PVO. 2242 */ 2243 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2244 pvo_olink) { 2245 MOEA_PVO_CHECK(pvo); 2246 /* 2247 * We also need the pvo entry of the victim we are 2248 * replacing so save the R & C bits of the PTE. 2249 */ 2250 if (moea_pte_compare(pt, &pvo->pvo_pte)) { 2251 victim_pvo = pvo; 2252 break; 2253 } 2254 } 2255 2256 if (victim_pvo == NULL) 2257 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2258 "entry", pt); 2259 } 2260 2261 /* 2262 * We are invalidating the TLB entry for the EA we are replacing even 2263 * though it's valid. If we don't, we lose any ref/chg bit changes 2264 * contained in the TLB entry. 2265 */ 2266 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2267 2268 moea_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2269 moea_pte_set(pt, &source_pvo->pvo_pte); 2270 2271 PVO_PTEGIDX_CLR(victim_pvo); 2272 PVO_PTEGIDX_SET(source_pvo, i); 2273 moea_pte_replacements++; 2274 2275 MOEA_PVO_CHECK(victim_pvo); 2276 MOEA_PVO_CHECK(source_pvo); 2277 2278 mtx_unlock(&moea_table_mutex); 2279 return (1); 2280 } 2281 2282 static int 2283 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2284 { 2285 struct pte *pt; 2286 int i; 2287 2288 mtx_assert(&moea_table_mutex, MA_OWNED); 2289 2290 /* 2291 * First try primary hash. 2292 */ 2293 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2294 if ((pt->pte_hi & PTE_VALID) == 0) { 2295 pvo_pt->pte_hi &= ~PTE_HID; 2296 moea_pte_set(pt, pvo_pt); 2297 return (i); 2298 } 2299 } 2300 2301 /* 2302 * Now try secondary hash. 2303 */ 2304 ptegidx ^= moea_pteg_mask; 2305 2306 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2307 if ((pt->pte_hi & PTE_VALID) == 0) { 2308 pvo_pt->pte_hi |= PTE_HID; 2309 moea_pte_set(pt, pvo_pt); 2310 return (i); 2311 } 2312 } 2313 2314 panic("moea_pte_insert: overflow"); 2315 return (-1); 2316 } 2317 2318 static boolean_t 2319 moea_query_bit(vm_page_t m, int ptebit) 2320 { 2321 struct pvo_entry *pvo; 2322 struct pte *pt; 2323 2324 #if 0 2325 if (moea_attr_fetch(m) & ptebit) 2326 return (TRUE); 2327 #endif 2328 2329 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2330 MOEA_PVO_CHECK(pvo); /* sanity check */ 2331 2332 /* 2333 * See if we saved the bit off. If so, cache it and return 2334 * success. 2335 */ 2336 if (pvo->pvo_pte.pte_lo & ptebit) { 2337 moea_attr_save(m, ptebit); 2338 MOEA_PVO_CHECK(pvo); /* sanity check */ 2339 return (TRUE); 2340 } 2341 } 2342 2343 /* 2344 * No luck, now go through the hard part of looking at the PTEs 2345 * themselves. Sync so that any pending REF/CHG bits are flushed to 2346 * the PTEs. 2347 */ 2348 SYNC(); 2349 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2350 MOEA_PVO_CHECK(pvo); /* sanity check */ 2351 2352 /* 2353 * See if this pvo has a valid PTE. if so, fetch the 2354 * REF/CHG bits from the valid PTE. If the appropriate 2355 * ptebit is set, cache it and return success. 2356 */ 2357 pt = moea_pvo_to_pte(pvo, -1); 2358 if (pt != NULL) { 2359 moea_pte_synch(pt, &pvo->pvo_pte); 2360 mtx_unlock(&moea_table_mutex); 2361 if (pvo->pvo_pte.pte_lo & ptebit) { 2362 moea_attr_save(m, ptebit); 2363 MOEA_PVO_CHECK(pvo); /* sanity check */ 2364 return (TRUE); 2365 } 2366 } 2367 } 2368 2369 return (FALSE); 2370 } 2371 2372 static u_int 2373 moea_clear_bit(vm_page_t m, int ptebit, int *origbit) 2374 { 2375 u_int count; 2376 struct pvo_entry *pvo; 2377 struct pte *pt; 2378 int rv; 2379 2380 /* 2381 * Clear the cached value. 2382 */ 2383 rv = moea_attr_fetch(m); 2384 moea_attr_clear(m, ptebit); 2385 2386 /* 2387 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2388 * we can reset the right ones). note that since the pvo entries and 2389 * list heads are accessed via BAT0 and are never placed in the page 2390 * table, we don't have to worry about further accesses setting the 2391 * REF/CHG bits. 2392 */ 2393 SYNC(); 2394 2395 /* 2396 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2397 * valid pte clear the ptebit from the valid pte. 2398 */ 2399 count = 0; 2400 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2401 MOEA_PVO_CHECK(pvo); /* sanity check */ 2402 pt = moea_pvo_to_pte(pvo, -1); 2403 if (pt != NULL) { 2404 moea_pte_synch(pt, &pvo->pvo_pte); 2405 if (pvo->pvo_pte.pte_lo & ptebit) { 2406 count++; 2407 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2408 } 2409 mtx_unlock(&moea_table_mutex); 2410 } 2411 rv |= pvo->pvo_pte.pte_lo; 2412 pvo->pvo_pte.pte_lo &= ~ptebit; 2413 MOEA_PVO_CHECK(pvo); /* sanity check */ 2414 } 2415 2416 if (origbit != NULL) { 2417 *origbit = rv; 2418 } 2419 2420 return (count); 2421 } 2422 2423 /* 2424 * Return true if the physical range is encompassed by the battable[idx] 2425 */ 2426 static int 2427 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2428 { 2429 u_int prot; 2430 u_int32_t start; 2431 u_int32_t end; 2432 u_int32_t bat_ble; 2433 2434 /* 2435 * Return immediately if not a valid mapping 2436 */ 2437 if (!battable[idx].batu & BAT_Vs) 2438 return (EINVAL); 2439 2440 /* 2441 * The BAT entry must be cache-inhibited, guarded, and r/w 2442 * so it can function as an i/o page 2443 */ 2444 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2445 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2446 return (EPERM); 2447 2448 /* 2449 * The address should be within the BAT range. Assume that the 2450 * start address in the BAT has the correct alignment (thus 2451 * not requiring masking) 2452 */ 2453 start = battable[idx].batl & BAT_PBS; 2454 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2455 end = start | (bat_ble << 15) | 0x7fff; 2456 2457 if ((pa < start) || ((pa + size) > end)) 2458 return (ERANGE); 2459 2460 return (0); 2461 } 2462 2463 boolean_t 2464 moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2465 { 2466 int i; 2467 2468 /* 2469 * This currently does not work for entries that 2470 * overlap 256M BAT segments. 2471 */ 2472 2473 for(i = 0; i < 16; i++) 2474 if (moea_bat_mapped(i, pa, size) == 0) 2475 return (0); 2476 2477 return (EFAULT); 2478 } 2479 2480 boolean_t 2481 moea_page_executable(mmu_t mmu, vm_page_t pg) 2482 { 2483 return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC); 2484 } 2485 2486 /* 2487 * Map a set of physical memory pages into the kernel virtual 2488 * address space. Return a pointer to where it is mapped. This 2489 * routine is intended to be used for mapping device memory, 2490 * NOT real memory. 2491 */ 2492 void * 2493 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2494 { 2495 vm_offset_t va, tmpva, ppa, offset; 2496 int i; 2497 2498 ppa = trunc_page(pa); 2499 offset = pa & PAGE_MASK; 2500 size = roundup(offset + size, PAGE_SIZE); 2501 2502 GIANT_REQUIRED; 2503 2504 /* 2505 * If the physical address lies within a valid BAT table entry, 2506 * return the 1:1 mapping. This currently doesn't work 2507 * for regions that overlap 256M BAT segments. 2508 */ 2509 for (i = 0; i < 16; i++) { 2510 if (moea_bat_mapped(i, pa, size) == 0) 2511 return ((void *) pa); 2512 } 2513 2514 va = kmem_alloc_nofault(kernel_map, size); 2515 if (!va) 2516 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2517 2518 for (tmpva = va; size > 0;) { 2519 moea_kenter(mmu, tmpva, ppa); 2520 TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 2521 size -= PAGE_SIZE; 2522 tmpva += PAGE_SIZE; 2523 ppa += PAGE_SIZE; 2524 } 2525 2526 return ((void *)(va + offset)); 2527 } 2528 2529 void 2530 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2531 { 2532 vm_offset_t base, offset; 2533 2534 /* 2535 * If this is outside kernel virtual space, then it's a 2536 * battable entry and doesn't require unmapping 2537 */ 2538 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2539 base = trunc_page(va); 2540 offset = va & PAGE_MASK; 2541 size = roundup(offset + size, PAGE_SIZE); 2542 kmem_free(kernel_map, base, size); 2543 } 2544 } 2545