1f9bac91bSBenno Rice /* 2f9bac91bSBenno Rice * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3f9bac91bSBenno Rice * Copyright (C) 1995, 1996 TooLs GmbH. 4f9bac91bSBenno Rice * All rights reserved. 5f9bac91bSBenno Rice * 6f9bac91bSBenno Rice * Redistribution and use in source and binary forms, with or without 7f9bac91bSBenno Rice * modification, are permitted provided that the following conditions 8f9bac91bSBenno Rice * are met: 9f9bac91bSBenno Rice * 1. Redistributions of source code must retain the above copyright 10f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer. 11f9bac91bSBenno Rice * 2. Redistributions in binary form must reproduce the above copyright 12f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer in the 13f9bac91bSBenno Rice * documentation and/or other materials provided with the distribution. 14f9bac91bSBenno Rice * 3. All advertising materials mentioning features or use of this software 15f9bac91bSBenno Rice * must display the following acknowledgement: 16f9bac91bSBenno Rice * This product includes software developed by TooLs GmbH. 17f9bac91bSBenno Rice * 4. The name of TooLs GmbH may not be used to endorse or promote products 18f9bac91bSBenno Rice * derived from this software without specific prior written permission. 19f9bac91bSBenno Rice * 20f9bac91bSBenno Rice * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21f9bac91bSBenno Rice * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22f9bac91bSBenno Rice * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23f9bac91bSBenno Rice * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24f9bac91bSBenno Rice * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25f9bac91bSBenno Rice * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26f9bac91bSBenno Rice * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27f9bac91bSBenno Rice * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28f9bac91bSBenno Rice * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29f9bac91bSBenno Rice * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30f9bac91bSBenno Rice * 31111c77dcSBenno Rice * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 32f9bac91bSBenno Rice */ 33f9bac91bSBenno Rice /* 34f9bac91bSBenno Rice * Copyright (C) 2001 Benno Rice. 35f9bac91bSBenno Rice * All rights reserved. 36f9bac91bSBenno Rice * 37f9bac91bSBenno Rice * Redistribution and use in source and binary forms, with or without 38f9bac91bSBenno Rice * modification, are permitted provided that the following conditions 39f9bac91bSBenno Rice * are met: 40f9bac91bSBenno Rice * 1. Redistributions of source code must retain the above copyright 41f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer. 42f9bac91bSBenno Rice * 2. Redistributions in binary form must reproduce the above copyright 43f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer in the 44f9bac91bSBenno Rice * documentation and/or other materials provided with the distribution. 45f9bac91bSBenno Rice * 46f9bac91bSBenno Rice * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 47f9bac91bSBenno Rice * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48f9bac91bSBenno Rice * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49f9bac91bSBenno Rice * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50f9bac91bSBenno Rice * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 51f9bac91bSBenno Rice * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 52f9bac91bSBenno Rice * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 53f9bac91bSBenno Rice * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 54f9bac91bSBenno Rice * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 55f9bac91bSBenno Rice * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56f9bac91bSBenno Rice */ 57f9bac91bSBenno Rice 58f9bac91bSBenno Rice #ifndef lint 59f9bac91bSBenno Rice static const char rcsid[] = 60f9bac91bSBenno Rice "$FreeBSD$"; 61f9bac91bSBenno Rice #endif /* not lint */ 62f9bac91bSBenno Rice 63f9bac91bSBenno Rice #include <sys/param.h> 64f9bac91bSBenno Rice #include <sys/systm.h> 650b27d710SPeter Wemm #include <sys/kernel.h> 66f9bac91bSBenno Rice #include <sys/proc.h> 67f9bac91bSBenno Rice #include <sys/malloc.h> 68f9bac91bSBenno Rice #include <sys/msgbuf.h> 69f9bac91bSBenno Rice #include <sys/vmmeter.h> 70f9bac91bSBenno Rice #include <sys/mman.h> 71f9bac91bSBenno Rice #include <sys/queue.h> 72f9bac91bSBenno Rice #include <sys/mutex.h> 73f9bac91bSBenno Rice 74f9bac91bSBenno Rice #include <vm/vm.h> 75f9bac91bSBenno Rice #include <vm/vm_param.h> 76f9bac91bSBenno Rice #include <sys/lock.h> 77f9bac91bSBenno Rice #include <vm/vm_kern.h> 78f9bac91bSBenno Rice #include <vm/vm_page.h> 79f9bac91bSBenno Rice #include <vm/vm_map.h> 80f9bac91bSBenno Rice #include <vm/vm_object.h> 81f9bac91bSBenno Rice #include <vm/vm_extern.h> 82f9bac91bSBenno Rice #include <vm/vm_pageout.h> 83f9bac91bSBenno Rice #include <vm/vm_pager.h> 84f9bac91bSBenno Rice #include <vm/vm_zone.h> 85f9bac91bSBenno Rice 86f9bac91bSBenno Rice #include <sys/user.h> 87f9bac91bSBenno Rice 88d699b539SMark Peek #include <machine/bat.h> 89f9bac91bSBenno Rice #include <machine/pcb.h> 90f9bac91bSBenno Rice #include <machine/powerpc.h> 91f9bac91bSBenno Rice #include <machine/pte.h> 92f9bac91bSBenno Rice 93f9bac91bSBenno Rice pte_t *ptable; 94f9bac91bSBenno Rice int ptab_cnt; 95f9bac91bSBenno Rice u_int ptab_mask; 96f9bac91bSBenno Rice #define HTABSIZE (ptab_cnt * 64) 97f9bac91bSBenno Rice 98f9bac91bSBenno Rice #define MINPV 2048 99f9bac91bSBenno Rice 100f9bac91bSBenno Rice struct pte_ovfl { 101f9bac91bSBenno Rice LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */ 102f9bac91bSBenno Rice struct pte po_pte; /* PTE for this mapping */ 103f9bac91bSBenno Rice }; 104f9bac91bSBenno Rice 105f9bac91bSBenno Rice LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */ 106f9bac91bSBenno Rice 107f9bac91bSBenno Rice static struct pmap kernel_pmap_store; 108f9bac91bSBenno Rice pmap_t kernel_pmap; 109f9bac91bSBenno Rice 110f9bac91bSBenno Rice static int npgs; 111f9bac91bSBenno Rice static u_int nextavail; 112f9bac91bSBenno Rice 113f9bac91bSBenno Rice #ifndef MSGBUFADDR 114f9bac91bSBenno Rice extern vm_offset_t msgbuf_paddr; 115f9bac91bSBenno Rice #endif 116f9bac91bSBenno Rice 117f9bac91bSBenno Rice static struct mem_region *mem, *avail; 118f9bac91bSBenno Rice 119f9bac91bSBenno Rice vm_offset_t avail_start; 120f9bac91bSBenno Rice vm_offset_t avail_end; 121f9bac91bSBenno Rice vm_offset_t virtual_avail; 122f9bac91bSBenno Rice vm_offset_t virtual_end; 123f9bac91bSBenno Rice 124f9bac91bSBenno Rice vm_offset_t kernel_vm_end; 125f9bac91bSBenno Rice 126f9bac91bSBenno Rice static int pmap_pagedaemon_waken = 0; 127f9bac91bSBenno Rice 128f9bac91bSBenno Rice extern unsigned int Maxmem; 129f9bac91bSBenno Rice 130f9bac91bSBenno Rice #define ATTRSHFT 4 131f9bac91bSBenno Rice 132f9bac91bSBenno Rice struct pv_entry *pv_table; 133f9bac91bSBenno Rice 134f9bac91bSBenno Rice static vm_zone_t pvzone; 135f9bac91bSBenno Rice static struct vm_zone pvzone_store; 136f9bac91bSBenno Rice static struct vm_object pvzone_obj; 137f9bac91bSBenno Rice static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 138f9bac91bSBenno Rice static struct pv_entry *pvinit; 139f9bac91bSBenno Rice 140f9bac91bSBenno Rice #if !defined(PMAP_SHPGPERPROC) 141f9bac91bSBenno Rice #define PMAP_SHPGPERPROC 200 142f9bac91bSBenno Rice #endif 143f9bac91bSBenno Rice 144f9bac91bSBenno Rice struct pv_page; 145f9bac91bSBenno Rice struct pv_page_info { 146f9bac91bSBenno Rice LIST_ENTRY(pv_page) pgi_list; 147f9bac91bSBenno Rice struct pv_entry *pgi_freelist; 148f9bac91bSBenno Rice int pgi_nfree; 149f9bac91bSBenno Rice }; 150f9bac91bSBenno Rice #define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry)) 151f9bac91bSBenno Rice struct pv_page { 152f9bac91bSBenno Rice struct pv_page_info pvp_pgi; 153f9bac91bSBenno Rice struct pv_entry pvp_pv[NPVPPG]; 154f9bac91bSBenno Rice }; 155f9bac91bSBenno Rice LIST_HEAD(pv_page_list, pv_page) pv_page_freelist; 156f9bac91bSBenno Rice int pv_nfree; 157f9bac91bSBenno Rice int pv_pcnt; 158f9bac91bSBenno Rice static struct pv_entry *pmap_alloc_pv(void); 159f9bac91bSBenno Rice static void pmap_free_pv(struct pv_entry *); 160f9bac91bSBenno Rice 161f9bac91bSBenno Rice struct po_page; 162f9bac91bSBenno Rice struct po_page_info { 163f9bac91bSBenno Rice LIST_ENTRY(po_page) pgi_list; 164f9bac91bSBenno Rice vm_page_t pgi_page; 165f9bac91bSBenno Rice LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist; 166f9bac91bSBenno Rice int pgi_nfree; 167f9bac91bSBenno Rice }; 168f9bac91bSBenno Rice #define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl)) 169f9bac91bSBenno Rice struct po_page { 170f9bac91bSBenno Rice struct po_page_info pop_pgi; 171f9bac91bSBenno Rice struct pte_ovfl pop_po[NPOPPG]; 172f9bac91bSBenno Rice }; 173f9bac91bSBenno Rice LIST_HEAD(po_page_list, po_page) po_page_freelist; 174f9bac91bSBenno Rice int po_nfree; 175f9bac91bSBenno Rice int po_pcnt; 176f9bac91bSBenno Rice static struct pte_ovfl *poalloc(void); 177f9bac91bSBenno Rice static void pofree(struct pte_ovfl *, int); 178f9bac91bSBenno Rice 179f9bac91bSBenno Rice static u_int usedsr[NPMAPS / sizeof(u_int) / 8]; 180f9bac91bSBenno Rice 181f9bac91bSBenno Rice static int pmap_initialized; 182f9bac91bSBenno Rice 183f9bac91bSBenno Rice int pte_spill(vm_offset_t); 184f9bac91bSBenno Rice 185f9bac91bSBenno Rice /* 186f9bac91bSBenno Rice * These small routines may have to be replaced, 187f9bac91bSBenno Rice * if/when we support processors other that the 604. 188f9bac91bSBenno Rice */ 189f9bac91bSBenno Rice static __inline void 190f9bac91bSBenno Rice tlbie(vm_offset_t ea) 191f9bac91bSBenno Rice { 192f9bac91bSBenno Rice 193f9bac91bSBenno Rice __asm __volatile ("tlbie %0" :: "r"(ea)); 194f9bac91bSBenno Rice } 195f9bac91bSBenno Rice 196f9bac91bSBenno Rice static __inline void 197f9bac91bSBenno Rice tlbsync(void) 198f9bac91bSBenno Rice { 199f9bac91bSBenno Rice 200f9bac91bSBenno Rice __asm __volatile ("sync; tlbsync; sync"); 201f9bac91bSBenno Rice } 202f9bac91bSBenno Rice 203f9bac91bSBenno Rice static __inline void 204f9bac91bSBenno Rice tlbia(void) 205f9bac91bSBenno Rice { 206f9bac91bSBenno Rice vm_offset_t i; 207f9bac91bSBenno Rice 208f9bac91bSBenno Rice __asm __volatile ("sync"); 209f9bac91bSBenno Rice for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) { 210f9bac91bSBenno Rice tlbie(i); 211f9bac91bSBenno Rice } 212f9bac91bSBenno Rice tlbsync(); 213f9bac91bSBenno Rice } 214f9bac91bSBenno Rice 215f9bac91bSBenno Rice static __inline int 216f9bac91bSBenno Rice ptesr(sr_t *sr, vm_offset_t addr) 217f9bac91bSBenno Rice { 218f9bac91bSBenno Rice 219f9bac91bSBenno Rice return sr[(u_int)addr >> ADDR_SR_SHFT]; 220f9bac91bSBenno Rice } 221f9bac91bSBenno Rice 222f9bac91bSBenno Rice static __inline int 223f9bac91bSBenno Rice pteidx(sr_t sr, vm_offset_t addr) 224f9bac91bSBenno Rice { 225f9bac91bSBenno Rice int hash; 226f9bac91bSBenno Rice 227f9bac91bSBenno Rice hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 228f9bac91bSBenno Rice return hash & ptab_mask; 229f9bac91bSBenno Rice } 230f9bac91bSBenno Rice 231f9bac91bSBenno Rice static __inline int 232f9bac91bSBenno Rice ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which) 233f9bac91bSBenno Rice { 234f9bac91bSBenno Rice 235f9bac91bSBenno Rice return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) | 236f9bac91bSBenno Rice (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which); 237f9bac91bSBenno Rice } 238f9bac91bSBenno Rice 239f9bac91bSBenno Rice static __inline struct pv_entry * 240f9bac91bSBenno Rice pa_to_pv(vm_offset_t pa) 241f9bac91bSBenno Rice { 242f9bac91bSBenno Rice #if 0 /* XXX */ 243f9bac91bSBenno Rice int bank, pg; 244f9bac91bSBenno Rice 245f9bac91bSBenno Rice bank = vm_physseg_find(atop(pa), &pg); 246f9bac91bSBenno Rice if (bank == -1) 247f9bac91bSBenno Rice return NULL; 248f9bac91bSBenno Rice return &vm_physmem[bank].pmseg.pvent[pg]; 249f9bac91bSBenno Rice #endif 250f9bac91bSBenno Rice return (NULL); 251f9bac91bSBenno Rice } 252f9bac91bSBenno Rice 253f9bac91bSBenno Rice static __inline char * 254f9bac91bSBenno Rice pa_to_attr(vm_offset_t pa) 255f9bac91bSBenno Rice { 256f9bac91bSBenno Rice #if 0 /* XXX */ 257f9bac91bSBenno Rice int bank, pg; 258f9bac91bSBenno Rice 259f9bac91bSBenno Rice bank = vm_physseg_find(atop(pa), &pg); 260f9bac91bSBenno Rice if (bank == -1) 261f9bac91bSBenno Rice return NULL; 262f9bac91bSBenno Rice return &vm_physmem[bank].pmseg.attrs[pg]; 263f9bac91bSBenno Rice #endif 264f9bac91bSBenno Rice return (NULL); 265f9bac91bSBenno Rice } 266f9bac91bSBenno Rice 267f9bac91bSBenno Rice /* 268f9bac91bSBenno Rice * Try to insert page table entry *pt into the ptable at idx. 269f9bac91bSBenno Rice * 270f9bac91bSBenno Rice * Note: *pt mustn't have PTE_VALID set. 271f9bac91bSBenno Rice * This is done here as required by Book III, 4.12. 272f9bac91bSBenno Rice */ 273f9bac91bSBenno Rice static int 274f9bac91bSBenno Rice pte_insert(int idx, pte_t *pt) 275f9bac91bSBenno Rice { 276f9bac91bSBenno Rice pte_t *ptp; 277f9bac91bSBenno Rice int i; 278f9bac91bSBenno Rice 279f9bac91bSBenno Rice /* 280f9bac91bSBenno Rice * First try primary hash. 281f9bac91bSBenno Rice */ 282f9bac91bSBenno Rice for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 283f9bac91bSBenno Rice if (!(ptp->pte_hi & PTE_VALID)) { 284f9bac91bSBenno Rice *ptp = *pt; 285f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_HID; 286f9bac91bSBenno Rice __asm __volatile ("sync"); 287f9bac91bSBenno Rice ptp->pte_hi |= PTE_VALID; 288f9bac91bSBenno Rice return 1; 289f9bac91bSBenno Rice } 290f9bac91bSBenno Rice } 291f9bac91bSBenno Rice 292f9bac91bSBenno Rice /* 293f9bac91bSBenno Rice * Then try secondary hash. 294f9bac91bSBenno Rice */ 295f9bac91bSBenno Rice 296f9bac91bSBenno Rice idx ^= ptab_mask; 297f9bac91bSBenno Rice 298f9bac91bSBenno Rice for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 299f9bac91bSBenno Rice if (!(ptp->pte_hi & PTE_VALID)) { 300f9bac91bSBenno Rice *ptp = *pt; 301f9bac91bSBenno Rice ptp->pte_hi |= PTE_HID; 302f9bac91bSBenno Rice __asm __volatile ("sync"); 303f9bac91bSBenno Rice ptp->pte_hi |= PTE_VALID; 304f9bac91bSBenno Rice return 1; 305f9bac91bSBenno Rice } 306f9bac91bSBenno Rice } 307f9bac91bSBenno Rice 308f9bac91bSBenno Rice return 0; 309f9bac91bSBenno Rice } 310f9bac91bSBenno Rice 311f9bac91bSBenno Rice /* 312f9bac91bSBenno Rice * Spill handler. 313f9bac91bSBenno Rice * 314f9bac91bSBenno Rice * Tries to spill a page table entry from the overflow area. 315f9bac91bSBenno Rice * Note that this routine runs in real mode on a separate stack, 316f9bac91bSBenno Rice * with interrupts disabled. 317f9bac91bSBenno Rice */ 318f9bac91bSBenno Rice int 319f9bac91bSBenno Rice pte_spill(vm_offset_t addr) 320f9bac91bSBenno Rice { 321f9bac91bSBenno Rice int idx, i; 322f9bac91bSBenno Rice sr_t sr; 323f9bac91bSBenno Rice struct pte_ovfl *po; 324f9bac91bSBenno Rice pte_t ps; 325f9bac91bSBenno Rice pte_t *pt; 326f9bac91bSBenno Rice 327f9bac91bSBenno Rice __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr)); 328f9bac91bSBenno Rice idx = pteidx(sr, addr); 329f9bac91bSBenno Rice for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 330f9bac91bSBenno Rice if (ptematch(&po->po_pte, sr, addr, 0)) { 331f9bac91bSBenno Rice /* 332f9bac91bSBenno Rice * Now found an entry to be spilled into the real 333f9bac91bSBenno Rice * ptable. 334f9bac91bSBenno Rice */ 335f9bac91bSBenno Rice if (pte_insert(idx, &po->po_pte)) { 336f9bac91bSBenno Rice LIST_REMOVE(po, po_list); 337f9bac91bSBenno Rice pofree(po, 0); 338f9bac91bSBenno Rice return 1; 339f9bac91bSBenno Rice } 340f9bac91bSBenno Rice /* 341f9bac91bSBenno Rice * Have to substitute some entry. Use the primary 342f9bac91bSBenno Rice * hash for this. 343f9bac91bSBenno Rice * 344f9bac91bSBenno Rice * Use low bits of timebase as random generator 345f9bac91bSBenno Rice */ 346f9bac91bSBenno Rice __asm ("mftb %0" : "=r"(i)); 347f9bac91bSBenno Rice pt = ptable + idx * 8 + (i & 7); 348f9bac91bSBenno Rice pt->pte_hi &= ~PTE_VALID; 349f9bac91bSBenno Rice ps = *pt; 350f9bac91bSBenno Rice __asm __volatile ("sync"); 351f9bac91bSBenno Rice tlbie(addr); 352f9bac91bSBenno Rice tlbsync(); 353f9bac91bSBenno Rice *pt = po->po_pte; 354f9bac91bSBenno Rice __asm __volatile ("sync"); 355f9bac91bSBenno Rice pt->pte_hi |= PTE_VALID; 356f9bac91bSBenno Rice po->po_pte = ps; 357f9bac91bSBenno Rice if (ps.pte_hi & PTE_HID) { 358f9bac91bSBenno Rice /* 359f9bac91bSBenno Rice * We took an entry that was on the alternate 360f9bac91bSBenno Rice * hash chain, so move it to it's original 361f9bac91bSBenno Rice * chain. 362f9bac91bSBenno Rice */ 363f9bac91bSBenno Rice po->po_pte.pte_hi &= ~PTE_HID; 364f9bac91bSBenno Rice LIST_REMOVE(po, po_list); 365f9bac91bSBenno Rice LIST_INSERT_HEAD(potable + (idx ^ ptab_mask), 366f9bac91bSBenno Rice po, po_list); 367f9bac91bSBenno Rice } 368f9bac91bSBenno Rice return 1; 369f9bac91bSBenno Rice } 370f9bac91bSBenno Rice } 371f9bac91bSBenno Rice 372f9bac91bSBenno Rice return 0; 373f9bac91bSBenno Rice } 374f9bac91bSBenno Rice 375f9bac91bSBenno Rice /* 376f9bac91bSBenno Rice * This is called during powerpc_init, before the system is really initialized. 377f9bac91bSBenno Rice */ 378f9bac91bSBenno Rice void 3795fd2c51eSMark Peek pmap_setavailmem(u_int kernelstart, u_int kernelend) 380f9bac91bSBenno Rice { 381f9bac91bSBenno Rice struct mem_region *mp, *mp1; 382f9bac91bSBenno Rice int cnt, i; 383f9bac91bSBenno Rice u_int s, e, sz; 384f9bac91bSBenno Rice 385f9bac91bSBenno Rice /* 386f9bac91bSBenno Rice * Get memory. 387f9bac91bSBenno Rice */ 388f9bac91bSBenno Rice mem_regions(&mem, &avail); 389f9bac91bSBenno Rice for (mp = mem; mp->size; mp++) 390f9bac91bSBenno Rice Maxmem += btoc(mp->size); 391f9bac91bSBenno Rice 392f9bac91bSBenno Rice /* 393f9bac91bSBenno Rice * Count the number of available entries. 394f9bac91bSBenno Rice */ 395f9bac91bSBenno Rice for (cnt = 0, mp = avail; mp->size; mp++) { 396f9bac91bSBenno Rice cnt++; 397f9bac91bSBenno Rice } 398f9bac91bSBenno Rice 399f9bac91bSBenno Rice /* 400f9bac91bSBenno Rice * Page align all regions. 401f9bac91bSBenno Rice * Non-page aligned memory isn't very interesting to us. 402f9bac91bSBenno Rice * Also, sort the entries for ascending addresses. 403f9bac91bSBenno Rice */ 404f9bac91bSBenno Rice kernelstart &= ~PAGE_MASK; 405f9bac91bSBenno Rice kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK; 406f9bac91bSBenno Rice for (mp = avail; mp->size; mp++) { 407f9bac91bSBenno Rice s = mp->start; 408f9bac91bSBenno Rice e = mp->start + mp->size; 409f9bac91bSBenno Rice /* 410f9bac91bSBenno Rice * Check whether this region holds all of the kernel. 411f9bac91bSBenno Rice */ 412f9bac91bSBenno Rice if (s < kernelstart && e > kernelend) { 413f9bac91bSBenno Rice avail[cnt].start = kernelend; 414f9bac91bSBenno Rice avail[cnt++].size = e - kernelend; 415f9bac91bSBenno Rice e = kernelstart; 416f9bac91bSBenno Rice } 417f9bac91bSBenno Rice /* 418f9bac91bSBenno Rice * Look whether this regions starts within the kernel. 419f9bac91bSBenno Rice */ 420f9bac91bSBenno Rice if (s >= kernelstart && s < kernelend) { 421f9bac91bSBenno Rice if (e <= kernelend) 422f9bac91bSBenno Rice goto empty; 423f9bac91bSBenno Rice s = kernelend; 424f9bac91bSBenno Rice } 425f9bac91bSBenno Rice /* 426f9bac91bSBenno Rice * Now look whether this region ends within the kernel. 427f9bac91bSBenno Rice */ 428f9bac91bSBenno Rice if (e > kernelstart && e <= kernelend) { 429f9bac91bSBenno Rice if (s >= kernelstart) 430f9bac91bSBenno Rice goto empty; 431f9bac91bSBenno Rice e = kernelstart; 432f9bac91bSBenno Rice } 433f9bac91bSBenno Rice /* 434f9bac91bSBenno Rice * Now page align the start and size of the region. 435f9bac91bSBenno Rice */ 436f9bac91bSBenno Rice s = round_page(s); 437f9bac91bSBenno Rice e = trunc_page(e); 438f9bac91bSBenno Rice if (e < s) { 439f9bac91bSBenno Rice e = s; 440f9bac91bSBenno Rice } 441f9bac91bSBenno Rice sz = e - s; 442f9bac91bSBenno Rice /* 443f9bac91bSBenno Rice * Check whether some memory is left here. 444f9bac91bSBenno Rice */ 445f9bac91bSBenno Rice if (sz == 0) { 446f9bac91bSBenno Rice empty: 447f9bac91bSBenno Rice bcopy(mp + 1, mp, 448f9bac91bSBenno Rice (cnt - (mp - avail)) * sizeof *mp); 449f9bac91bSBenno Rice cnt--; 450f9bac91bSBenno Rice mp--; 451f9bac91bSBenno Rice continue; 452f9bac91bSBenno Rice } 453f9bac91bSBenno Rice 454f9bac91bSBenno Rice /* 455f9bac91bSBenno Rice * Do an insertion sort. 456f9bac91bSBenno Rice */ 457f9bac91bSBenno Rice npgs += btoc(sz); 458f9bac91bSBenno Rice 459f9bac91bSBenno Rice for (mp1 = avail; mp1 < mp; mp1++) { 460f9bac91bSBenno Rice if (s < mp1->start) { 461f9bac91bSBenno Rice break; 462f9bac91bSBenno Rice } 463f9bac91bSBenno Rice } 464f9bac91bSBenno Rice 465f9bac91bSBenno Rice if (mp1 < mp) { 466f9bac91bSBenno Rice bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 467f9bac91bSBenno Rice mp1->start = s; 468f9bac91bSBenno Rice mp1->size = sz; 469f9bac91bSBenno Rice } else { 470f9bac91bSBenno Rice mp->start = s; 471f9bac91bSBenno Rice mp->size = sz; 472f9bac91bSBenno Rice } 473f9bac91bSBenno Rice } 474f9bac91bSBenno Rice 475f9bac91bSBenno Rice #ifdef HTABENTS 476f9bac91bSBenno Rice ptab_cnt = HTABENTS; 477f9bac91bSBenno Rice #else 478f9bac91bSBenno Rice ptab_cnt = (Maxmem + 1) / 2; 479f9bac91bSBenno Rice 480f9bac91bSBenno Rice /* The minimum is 1024 PTEGs. */ 481f9bac91bSBenno Rice if (ptab_cnt < 1024) { 482f9bac91bSBenno Rice ptab_cnt = 1024; 483f9bac91bSBenno Rice } 484f9bac91bSBenno Rice 485f9bac91bSBenno Rice /* Round up to power of 2. */ 486f9bac91bSBenno Rice __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1)); 487f9bac91bSBenno Rice ptab_cnt = 1 << (32 - i); 488f9bac91bSBenno Rice #endif 489f9bac91bSBenno Rice 490f9bac91bSBenno Rice /* 491f9bac91bSBenno Rice * Find suitably aligned memory for HTAB. 492f9bac91bSBenno Rice */ 493f9bac91bSBenno Rice for (mp = avail; mp->size; mp++) { 494f9bac91bSBenno Rice s = roundup(mp->start, HTABSIZE) - mp->start; 495f9bac91bSBenno Rice 496f9bac91bSBenno Rice if (mp->size < s + HTABSIZE) { 497f9bac91bSBenno Rice continue; 498f9bac91bSBenno Rice } 499f9bac91bSBenno Rice 500f9bac91bSBenno Rice ptable = (pte_t *)(mp->start + s); 501f9bac91bSBenno Rice 502f9bac91bSBenno Rice if (mp->size == s + HTABSIZE) { 503f9bac91bSBenno Rice if (s) 504f9bac91bSBenno Rice mp->size = s; 505f9bac91bSBenno Rice else { 506f9bac91bSBenno Rice bcopy(mp + 1, mp, 507f9bac91bSBenno Rice (cnt - (mp - avail)) * sizeof *mp); 508f9bac91bSBenno Rice mp = avail; 509f9bac91bSBenno Rice } 510f9bac91bSBenno Rice break; 511f9bac91bSBenno Rice } 512f9bac91bSBenno Rice 513f9bac91bSBenno Rice if (s != 0) { 514f9bac91bSBenno Rice bcopy(mp, mp + 1, 515f9bac91bSBenno Rice (cnt - (mp - avail)) * sizeof *mp); 516f9bac91bSBenno Rice mp++->size = s; 517f9bac91bSBenno Rice cnt++; 518f9bac91bSBenno Rice } 519f9bac91bSBenno Rice 520f9bac91bSBenno Rice mp->start += s + HTABSIZE; 521f9bac91bSBenno Rice mp->size -= s + HTABSIZE; 522f9bac91bSBenno Rice break; 523f9bac91bSBenno Rice } 524f9bac91bSBenno Rice 525f9bac91bSBenno Rice if (!mp->size) { 526f9bac91bSBenno Rice panic("not enough memory?"); 527f9bac91bSBenno Rice } 528f9bac91bSBenno Rice 529f9bac91bSBenno Rice npgs -= btoc(HTABSIZE); 530f9bac91bSBenno Rice bzero((void *)ptable, HTABSIZE); 531f9bac91bSBenno Rice ptab_mask = ptab_cnt - 1; 532f9bac91bSBenno Rice 533f9bac91bSBenno Rice /* 534f9bac91bSBenno Rice * We cannot do pmap_steal_memory here, 535f9bac91bSBenno Rice * since we don't run with translation enabled yet. 536f9bac91bSBenno Rice */ 537f9bac91bSBenno Rice s = sizeof(struct pte_ovtab) * ptab_cnt; 538f9bac91bSBenno Rice sz = round_page(s); 539f9bac91bSBenno Rice 540f9bac91bSBenno Rice for (mp = avail; mp->size; mp++) { 541f9bac91bSBenno Rice if (mp->size >= sz) { 542f9bac91bSBenno Rice break; 543f9bac91bSBenno Rice } 544f9bac91bSBenno Rice } 545f9bac91bSBenno Rice 546f9bac91bSBenno Rice if (!mp->size) { 547f9bac91bSBenno Rice panic("not enough memory?"); 548f9bac91bSBenno Rice } 549f9bac91bSBenno Rice 550f9bac91bSBenno Rice npgs -= btoc(sz); 551f9bac91bSBenno Rice potable = (struct pte_ovtab *)mp->start; 552f9bac91bSBenno Rice mp->size -= sz; 553f9bac91bSBenno Rice mp->start += sz; 554f9bac91bSBenno Rice 555f9bac91bSBenno Rice if (mp->size <= 0) { 556f9bac91bSBenno Rice bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 557f9bac91bSBenno Rice } 558f9bac91bSBenno Rice 559f9bac91bSBenno Rice for (i = 0; i < ptab_cnt; i++) { 560f9bac91bSBenno Rice LIST_INIT(potable + i); 561f9bac91bSBenno Rice } 562f9bac91bSBenno Rice 563f9bac91bSBenno Rice #ifndef MSGBUFADDR 564f9bac91bSBenno Rice /* 565f9bac91bSBenno Rice * allow for msgbuf 566f9bac91bSBenno Rice */ 567f9bac91bSBenno Rice sz = round_page(MSGBUFSIZE); 568f9bac91bSBenno Rice mp = NULL; 569f9bac91bSBenno Rice 570f9bac91bSBenno Rice for (mp1 = avail; mp1->size; mp1++) { 571f9bac91bSBenno Rice if (mp1->size >= sz) { 572f9bac91bSBenno Rice mp = mp1; 573f9bac91bSBenno Rice } 574f9bac91bSBenno Rice } 575f9bac91bSBenno Rice 576f9bac91bSBenno Rice if (mp == NULL) { 577f9bac91bSBenno Rice panic("not enough memory?"); 578f9bac91bSBenno Rice } 579f9bac91bSBenno Rice 580f9bac91bSBenno Rice npgs -= btoc(sz); 581f9bac91bSBenno Rice msgbuf_paddr = mp->start + mp->size - sz; 582f9bac91bSBenno Rice mp->size -= sz; 583f9bac91bSBenno Rice 584f9bac91bSBenno Rice if (mp->size <= 0) { 585f9bac91bSBenno Rice bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 586f9bac91bSBenno Rice } 587f9bac91bSBenno Rice #endif 588f9bac91bSBenno Rice 5895fd2c51eSMark Peek nextavail = avail->start; 5905fd2c51eSMark Peek avail_start = avail->start; 5915fd2c51eSMark Peek for (mp = avail, i = 0; mp->size; mp++) { 5925fd2c51eSMark Peek avail_end = mp->start + mp->size; 5935fd2c51eSMark Peek phys_avail[i++] = mp->start; 5945fd2c51eSMark Peek phys_avail[i++] = mp->start + mp->size; 5955fd2c51eSMark Peek } 5965fd2c51eSMark Peek 5975fd2c51eSMark Peek 5985fd2c51eSMark Peek } 5995fd2c51eSMark Peek 6005fd2c51eSMark Peek void 6015fd2c51eSMark Peek pmap_bootstrap() 6025fd2c51eSMark Peek { 6035fd2c51eSMark Peek int i; 60406fdffd8SMark Peek u_int32_t batl, batu; 6055fd2c51eSMark Peek 606f9bac91bSBenno Rice /* 607f9bac91bSBenno Rice * Initialize kernel pmap and hardware. 608f9bac91bSBenno Rice */ 609f9bac91bSBenno Rice kernel_pmap = &kernel_pmap_store; 610f9bac91bSBenno Rice 61106fdffd8SMark Peek batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 61206fdffd8SMark Peek batl = BATL(0x80000000, BAT_M, BAT_PP_RW); 613f9bac91bSBenno Rice __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl)); 614f9bac91bSBenno Rice 615f9bac91bSBenno Rice #if NPMAPS >= KERNEL_SEGMENT / 16 616f9bac91bSBenno Rice usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)] 617f9bac91bSBenno Rice |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8)); 618f9bac91bSBenno Rice #endif 619f9bac91bSBenno Rice 620f9bac91bSBenno Rice #if 0 /* XXX */ 621f9bac91bSBenno Rice for (i = 0; i < 16; i++) { 622f9bac91bSBenno Rice kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 623f9bac91bSBenno Rice __asm __volatile ("mtsrin %0,%1" 624f9bac91bSBenno Rice :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 625f9bac91bSBenno Rice } 626f9bac91bSBenno Rice #endif 627f9bac91bSBenno Rice 628f9bac91bSBenno Rice for (i = 0; i < 16; i++) { 629f9bac91bSBenno Rice int j; 630f9bac91bSBenno Rice 631f9bac91bSBenno Rice __asm __volatile ("mfsrin %0,%1" 632f9bac91bSBenno Rice : "=r" (j) 633f9bac91bSBenno Rice : "r" (i << ADDR_SR_SHFT)); 634f9bac91bSBenno Rice 635f9bac91bSBenno Rice kernel_pmap->pm_sr[i] = j; 636f9bac91bSBenno Rice } 637f9bac91bSBenno Rice 638f9bac91bSBenno Rice kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 639f9bac91bSBenno Rice __asm __volatile ("mtsr %0,%1" 640f9bac91bSBenno Rice :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 641f9bac91bSBenno Rice 642f9bac91bSBenno Rice __asm __volatile ("sync; mtsdr1 %0; isync" 643f9bac91bSBenno Rice :: "r"((u_int)ptable | (ptab_mask >> 10))); 644f9bac91bSBenno Rice 645f9bac91bSBenno Rice tlbia(); 646f9bac91bSBenno Rice 647f9bac91bSBenno Rice virtual_avail = VM_MIN_KERNEL_ADDRESS; 648f9bac91bSBenno Rice virtual_end = VM_MAX_KERNEL_ADDRESS; 649f9bac91bSBenno Rice } 650f9bac91bSBenno Rice 651f9bac91bSBenno Rice /* 652f9bac91bSBenno Rice * Initialize anything else for pmap handling. 653f9bac91bSBenno Rice * Called during vm_init(). 654f9bac91bSBenno Rice */ 655f9bac91bSBenno Rice void 656f9bac91bSBenno Rice pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 657f9bac91bSBenno Rice { 658f9bac91bSBenno Rice int initial_pvs; 659f9bac91bSBenno Rice 660f9bac91bSBenno Rice /* 661f9bac91bSBenno Rice * init the pv free list 662f9bac91bSBenno Rice */ 663f9bac91bSBenno Rice initial_pvs = vm_page_array_size; 664f9bac91bSBenno Rice if (initial_pvs < MINPV) { 665f9bac91bSBenno Rice initial_pvs = MINPV; 666f9bac91bSBenno Rice } 667f9bac91bSBenno Rice pvzone = &pvzone_store; 668f9bac91bSBenno Rice pvinit = (struct pv_entry *) kmem_alloc(kernel_map, 669f9bac91bSBenno Rice initial_pvs * sizeof(struct pv_entry)); 670f9bac91bSBenno Rice zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit, 671f9bac91bSBenno Rice vm_page_array_size); 672f9bac91bSBenno Rice 673f9bac91bSBenno Rice pmap_initialized = TRUE; 674f9bac91bSBenno Rice } 675f9bac91bSBenno Rice 676f9bac91bSBenno Rice /* 677f9bac91bSBenno Rice * Initialize a preallocated and zeroed pmap structure. 678f9bac91bSBenno Rice */ 679f9bac91bSBenno Rice void 680f9bac91bSBenno Rice pmap_pinit(struct pmap *pm) 681f9bac91bSBenno Rice { 682f9bac91bSBenno Rice int i, j; 683f9bac91bSBenno Rice 684f9bac91bSBenno Rice /* 685f9bac91bSBenno Rice * Allocate some segment registers for this pmap. 686f9bac91bSBenno Rice */ 687f9bac91bSBenno Rice pm->pm_refs = 1; 688f9bac91bSBenno Rice for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) { 689f9bac91bSBenno Rice if (usedsr[i] != 0xffffffff) { 690f9bac91bSBenno Rice j = ffs(~usedsr[i]) - 1; 691f9bac91bSBenno Rice usedsr[i] |= 1 << j; 692f9bac91bSBenno Rice pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16; 693f9bac91bSBenno Rice for (i = 1; i < 16; i++) { 694f9bac91bSBenno Rice pm->pm_sr[i] = pm->pm_sr[i - 1] + 1; 695f9bac91bSBenno Rice } 696f9bac91bSBenno Rice return; 697f9bac91bSBenno Rice } 698f9bac91bSBenno Rice } 699f9bac91bSBenno Rice panic("out of segments"); 700f9bac91bSBenno Rice } 701f9bac91bSBenno Rice 702f9bac91bSBenno Rice void 703f9bac91bSBenno Rice pmap_pinit2(pmap_t pmap) 704f9bac91bSBenno Rice { 705f9bac91bSBenno Rice 706f9bac91bSBenno Rice /* 707f9bac91bSBenno Rice * Nothing to be done. 708f9bac91bSBenno Rice */ 709f9bac91bSBenno Rice return; 710f9bac91bSBenno Rice } 711f9bac91bSBenno Rice 712f9bac91bSBenno Rice /* 713f9bac91bSBenno Rice * Add a reference to the given pmap. 714f9bac91bSBenno Rice */ 715f9bac91bSBenno Rice void 716f9bac91bSBenno Rice pmap_reference(struct pmap *pm) 717f9bac91bSBenno Rice { 718f9bac91bSBenno Rice 719f9bac91bSBenno Rice pm->pm_refs++; 720f9bac91bSBenno Rice } 721f9bac91bSBenno Rice 722f9bac91bSBenno Rice /* 723f9bac91bSBenno Rice * Retire the given pmap from service. 724f9bac91bSBenno Rice * Should only be called if the map contains no valid mappings. 725f9bac91bSBenno Rice */ 726f9bac91bSBenno Rice void 727f9bac91bSBenno Rice pmap_destroy(struct pmap *pm) 728f9bac91bSBenno Rice { 729f9bac91bSBenno Rice 730f9bac91bSBenno Rice if (--pm->pm_refs == 0) { 731f9bac91bSBenno Rice pmap_release(pm); 732f9bac91bSBenno Rice free((caddr_t)pm, M_VMPGDATA); 733f9bac91bSBenno Rice } 734f9bac91bSBenno Rice } 735f9bac91bSBenno Rice 736f9bac91bSBenno Rice /* 737f9bac91bSBenno Rice * Release any resources held by the given physical map. 738f9bac91bSBenno Rice * Called when a pmap initialized by pmap_pinit is being released. 739f9bac91bSBenno Rice */ 740f9bac91bSBenno Rice void 741f9bac91bSBenno Rice pmap_release(struct pmap *pm) 742f9bac91bSBenno Rice { 743f9bac91bSBenno Rice int i, j; 744f9bac91bSBenno Rice 745f9bac91bSBenno Rice if (!pm->pm_sr[0]) { 746f9bac91bSBenno Rice panic("pmap_release"); 747f9bac91bSBenno Rice } 748f9bac91bSBenno Rice i = pm->pm_sr[0] / 16; 749f9bac91bSBenno Rice j = i % (sizeof usedsr[0] * 8); 750f9bac91bSBenno Rice i /= sizeof usedsr[0] * 8; 751f9bac91bSBenno Rice usedsr[i] &= ~(1 << j); 752f9bac91bSBenno Rice } 753f9bac91bSBenno Rice 754f9bac91bSBenno Rice /* 755f9bac91bSBenno Rice * Copy the range specified by src_addr/len 756f9bac91bSBenno Rice * from the source map to the range dst_addr/len 757f9bac91bSBenno Rice * in the destination map. 758f9bac91bSBenno Rice * 759f9bac91bSBenno Rice * This routine is only advisory and need not do anything. 760f9bac91bSBenno Rice */ 761f9bac91bSBenno Rice void 762f9bac91bSBenno Rice pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr, 763f9bac91bSBenno Rice vm_size_t len, vm_offset_t src_addr) 764f9bac91bSBenno Rice { 765f9bac91bSBenno Rice 766f9bac91bSBenno Rice return; 767f9bac91bSBenno Rice } 768f9bac91bSBenno Rice 769f9bac91bSBenno Rice /* 770f9bac91bSBenno Rice * Garbage collects the physical map system for 771f9bac91bSBenno Rice * pages which are no longer used. 772f9bac91bSBenno Rice * Success need not be guaranteed -- that is, there 773f9bac91bSBenno Rice * may well be pages which are not referenced, but 774f9bac91bSBenno Rice * others may be collected. 775f9bac91bSBenno Rice * Called by the pageout daemon when pages are scarce. 776f9bac91bSBenno Rice */ 777f9bac91bSBenno Rice void 778f9bac91bSBenno Rice pmap_collect(void) 779f9bac91bSBenno Rice { 780f9bac91bSBenno Rice 781f9bac91bSBenno Rice return; 782f9bac91bSBenno Rice } 783f9bac91bSBenno Rice 784f9bac91bSBenno Rice /* 785f9bac91bSBenno Rice * Fill the given physical page with zeroes. 786f9bac91bSBenno Rice */ 787f9bac91bSBenno Rice void 788f9bac91bSBenno Rice pmap_zero_page(vm_offset_t pa) 789f9bac91bSBenno Rice { 790f9bac91bSBenno Rice #if 0 791f9bac91bSBenno Rice bzero((caddr_t)pa, PAGE_SIZE); 792f9bac91bSBenno Rice #else 793f9bac91bSBenno Rice int i; 794f9bac91bSBenno Rice 795f9bac91bSBenno Rice for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) { 796f9bac91bSBenno Rice __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 797f9bac91bSBenno Rice pa += CACHELINESIZE; 798f9bac91bSBenno Rice } 799f9bac91bSBenno Rice #endif 800f9bac91bSBenno Rice } 801f9bac91bSBenno Rice 802f9bac91bSBenno Rice void 803f9bac91bSBenno Rice pmap_zero_page_area(vm_offset_t pa, int off, int size) 804f9bac91bSBenno Rice { 805f9bac91bSBenno Rice 806f9bac91bSBenno Rice bzero((caddr_t)pa + off, size); 807f9bac91bSBenno Rice } 808f9bac91bSBenno Rice 809f9bac91bSBenno Rice /* 810f9bac91bSBenno Rice * Copy the given physical source page to its destination. 811f9bac91bSBenno Rice */ 812f9bac91bSBenno Rice void 813f9bac91bSBenno Rice pmap_copy_page(vm_offset_t src, vm_offset_t dst) 814f9bac91bSBenno Rice { 815f9bac91bSBenno Rice 816f9bac91bSBenno Rice bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE); 817f9bac91bSBenno Rice } 818f9bac91bSBenno Rice 819f9bac91bSBenno Rice static struct pv_entry * 820f9bac91bSBenno Rice pmap_alloc_pv() 821f9bac91bSBenno Rice { 822f9bac91bSBenno Rice pv_entry_count++; 823f9bac91bSBenno Rice 824f9bac91bSBenno Rice if (pv_entry_high_water && 825f9bac91bSBenno Rice (pv_entry_count > pv_entry_high_water) && 826f9bac91bSBenno Rice (pmap_pagedaemon_waken == 0)) { 827f9bac91bSBenno Rice pmap_pagedaemon_waken = 1; 828f9bac91bSBenno Rice wakeup(&vm_pages_needed); 829f9bac91bSBenno Rice } 830f9bac91bSBenno Rice 831f9bac91bSBenno Rice return zalloc(pvzone); 832f9bac91bSBenno Rice } 833f9bac91bSBenno Rice 834f9bac91bSBenno Rice static void 835f9bac91bSBenno Rice pmap_free_pv(struct pv_entry *pv) 836f9bac91bSBenno Rice { 837f9bac91bSBenno Rice 838f9bac91bSBenno Rice pv_entry_count--; 839f9bac91bSBenno Rice zfree(pvzone, pv); 840f9bac91bSBenno Rice } 841f9bac91bSBenno Rice 842f9bac91bSBenno Rice /* 843f9bac91bSBenno Rice * We really hope that we don't need overflow entries 844f9bac91bSBenno Rice * before the VM system is initialized! 845f9bac91bSBenno Rice * 846f9bac91bSBenno Rice * XXX: Should really be switched over to the zone allocator. 847f9bac91bSBenno Rice */ 848f9bac91bSBenno Rice static struct pte_ovfl * 849f9bac91bSBenno Rice poalloc() 850f9bac91bSBenno Rice { 851f9bac91bSBenno Rice struct po_page *pop; 852f9bac91bSBenno Rice struct pte_ovfl *po; 853f9bac91bSBenno Rice vm_page_t mem; 854f9bac91bSBenno Rice int i; 855f9bac91bSBenno Rice 856f9bac91bSBenno Rice if (!pmap_initialized) { 857f9bac91bSBenno Rice panic("poalloc"); 858f9bac91bSBenno Rice } 859f9bac91bSBenno Rice 860f9bac91bSBenno Rice if (po_nfree == 0) { 861f9bac91bSBenno Rice /* 862f9bac91bSBenno Rice * Since we cannot use maps for potable allocation, 863f9bac91bSBenno Rice * we have to steal some memory from the VM system. XXX 864f9bac91bSBenno Rice */ 865f9bac91bSBenno Rice mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM); 866f9bac91bSBenno Rice po_pcnt++; 867f9bac91bSBenno Rice pop = (struct po_page *)VM_PAGE_TO_PHYS(mem); 868f9bac91bSBenno Rice pop->pop_pgi.pgi_page = mem; 869f9bac91bSBenno Rice LIST_INIT(&pop->pop_pgi.pgi_freelist); 870f9bac91bSBenno Rice for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) { 871f9bac91bSBenno Rice LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, 872f9bac91bSBenno Rice po_list); 873f9bac91bSBenno Rice } 874f9bac91bSBenno Rice po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1; 875f9bac91bSBenno Rice LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 876f9bac91bSBenno Rice po = pop->pop_po; 877f9bac91bSBenno Rice } else { 878f9bac91bSBenno Rice po_nfree--; 879f9bac91bSBenno Rice pop = po_page_freelist.lh_first; 880f9bac91bSBenno Rice if (--pop->pop_pgi.pgi_nfree <= 0) { 881f9bac91bSBenno Rice LIST_REMOVE(pop, pop_pgi.pgi_list); 882f9bac91bSBenno Rice } 883f9bac91bSBenno Rice po = pop->pop_pgi.pgi_freelist.lh_first; 884f9bac91bSBenno Rice LIST_REMOVE(po, po_list); 885f9bac91bSBenno Rice } 886f9bac91bSBenno Rice 887f9bac91bSBenno Rice return po; 888f9bac91bSBenno Rice } 889f9bac91bSBenno Rice 890f9bac91bSBenno Rice static void 891f9bac91bSBenno Rice pofree(struct pte_ovfl *po, int freepage) 892f9bac91bSBenno Rice { 893f9bac91bSBenno Rice struct po_page *pop; 894f9bac91bSBenno Rice 895f9bac91bSBenno Rice pop = (struct po_page *)trunc_page((vm_offset_t)po); 896f9bac91bSBenno Rice switch (++pop->pop_pgi.pgi_nfree) { 897f9bac91bSBenno Rice case NPOPPG: 898f9bac91bSBenno Rice if (!freepage) { 899f9bac91bSBenno Rice break; 900f9bac91bSBenno Rice } 901f9bac91bSBenno Rice po_nfree -= NPOPPG - 1; 902f9bac91bSBenno Rice po_pcnt--; 903f9bac91bSBenno Rice LIST_REMOVE(pop, pop_pgi.pgi_list); 904f9bac91bSBenno Rice vm_page_free(pop->pop_pgi.pgi_page); 905f9bac91bSBenno Rice return; 906f9bac91bSBenno Rice case 1: 907f9bac91bSBenno Rice LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 908f9bac91bSBenno Rice default: 909f9bac91bSBenno Rice break; 910f9bac91bSBenno Rice } 911f9bac91bSBenno Rice LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list); 912f9bac91bSBenno Rice po_nfree++; 913f9bac91bSBenno Rice } 914f9bac91bSBenno Rice 915f9bac91bSBenno Rice /* 916f9bac91bSBenno Rice * This returns whether this is the first mapping of a page. 917f9bac91bSBenno Rice */ 918f9bac91bSBenno Rice static int 919f9bac91bSBenno Rice pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa) 920f9bac91bSBenno Rice { 921f9bac91bSBenno Rice struct pv_entry *pv, *npv; 922f9bac91bSBenno Rice int s, first; 923f9bac91bSBenno Rice 924f9bac91bSBenno Rice if (!pmap_initialized) { 925f9bac91bSBenno Rice return 0; 926f9bac91bSBenno Rice } 927f9bac91bSBenno Rice 928f9bac91bSBenno Rice s = splimp(); 929f9bac91bSBenno Rice 930f9bac91bSBenno Rice pv = pa_to_pv(pa); 931f9bac91bSBenno Rice first = pv->pv_idx; 932f9bac91bSBenno Rice if (pv->pv_idx == -1) { 933f9bac91bSBenno Rice /* 934f9bac91bSBenno Rice * No entries yet, use header as the first entry. 935f9bac91bSBenno Rice */ 936f9bac91bSBenno Rice pv->pv_va = va; 937f9bac91bSBenno Rice pv->pv_idx = pteidx; 938f9bac91bSBenno Rice pv->pv_next = NULL; 939f9bac91bSBenno Rice } else { 940f9bac91bSBenno Rice /* 941f9bac91bSBenno Rice * There is at least one other VA mapping this page. 942f9bac91bSBenno Rice * Place this entry after the header. 943f9bac91bSBenno Rice */ 944f9bac91bSBenno Rice npv = pmap_alloc_pv(); 945f9bac91bSBenno Rice npv->pv_va = va; 946f9bac91bSBenno Rice npv->pv_idx = pteidx; 947f9bac91bSBenno Rice npv->pv_next = pv->pv_next; 948f9bac91bSBenno Rice pv->pv_next = npv; 949f9bac91bSBenno Rice } 950f9bac91bSBenno Rice splx(s); 951f9bac91bSBenno Rice return first; 952f9bac91bSBenno Rice } 953f9bac91bSBenno Rice 954f9bac91bSBenno Rice static void 955f9bac91bSBenno Rice pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte) 956f9bac91bSBenno Rice { 957f9bac91bSBenno Rice struct pv_entry *pv, *npv; 958f9bac91bSBenno Rice char *attr; 959f9bac91bSBenno Rice 960f9bac91bSBenno Rice /* 961f9bac91bSBenno Rice * First transfer reference/change bits to cache. 962f9bac91bSBenno Rice */ 963f9bac91bSBenno Rice attr = pa_to_attr(pa); 964f9bac91bSBenno Rice if (attr == NULL) { 965f9bac91bSBenno Rice return; 966f9bac91bSBenno Rice } 967f9bac91bSBenno Rice *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT; 968f9bac91bSBenno Rice 969f9bac91bSBenno Rice /* 970f9bac91bSBenno Rice * Remove from the PV table. 971f9bac91bSBenno Rice */ 972f9bac91bSBenno Rice pv = pa_to_pv(pa); 973f9bac91bSBenno Rice 974f9bac91bSBenno Rice /* 975f9bac91bSBenno Rice * If it is the first entry on the list, it is actually 976f9bac91bSBenno Rice * in the header and we must copy the following entry up 977f9bac91bSBenno Rice * to the header. Otherwise we must search the list for 978f9bac91bSBenno Rice * the entry. In either case we free the now unused entry. 979f9bac91bSBenno Rice */ 980f9bac91bSBenno Rice if (pteidx == pv->pv_idx && va == pv->pv_va) { 981f9bac91bSBenno Rice npv = pv->pv_next; 982f9bac91bSBenno Rice if (npv) { 983f9bac91bSBenno Rice *pv = *npv; 984f9bac91bSBenno Rice pmap_free_pv(npv); 985f9bac91bSBenno Rice } else { 986f9bac91bSBenno Rice pv->pv_idx = -1; 987f9bac91bSBenno Rice } 988f9bac91bSBenno Rice } else { 989f9bac91bSBenno Rice for (; (npv = pv->pv_next); pv = npv) { 990f9bac91bSBenno Rice if (pteidx == npv->pv_idx && va == npv->pv_va) { 991f9bac91bSBenno Rice break; 992f9bac91bSBenno Rice } 993f9bac91bSBenno Rice } 994f9bac91bSBenno Rice if (npv) { 995f9bac91bSBenno Rice pv->pv_next = npv->pv_next; 996f9bac91bSBenno Rice pmap_free_pv(npv); 997f9bac91bSBenno Rice } 998f9bac91bSBenno Rice #ifdef DIAGNOSTIC 999f9bac91bSBenno Rice else { 1000f9bac91bSBenno Rice panic("pmap_remove_pv: not on list\n"); 1001f9bac91bSBenno Rice } 1002f9bac91bSBenno Rice #endif 1003f9bac91bSBenno Rice } 1004f9bac91bSBenno Rice } 1005f9bac91bSBenno Rice 1006f9bac91bSBenno Rice /* 1007f9bac91bSBenno Rice * Insert physical page at pa into the given pmap at virtual address va. 1008f9bac91bSBenno Rice */ 1009f9bac91bSBenno Rice void 1010f9bac91bSBenno Rice pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot, 1011f9bac91bSBenno Rice boolean_t wired) 1012f9bac91bSBenno Rice { 1013f9bac91bSBenno Rice sr_t sr; 1014f9bac91bSBenno Rice int idx, s; 1015f9bac91bSBenno Rice pte_t pte; 1016f9bac91bSBenno Rice struct pte_ovfl *po; 1017f9bac91bSBenno Rice struct mem_region *mp; 1018f9bac91bSBenno Rice vm_offset_t pa; 1019f9bac91bSBenno Rice 1020f9bac91bSBenno Rice pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK; 1021f9bac91bSBenno Rice 1022f9bac91bSBenno Rice /* 1023f9bac91bSBenno Rice * Have to remove any existing mapping first. 1024f9bac91bSBenno Rice */ 1025f9bac91bSBenno Rice pmap_remove(pm, va, va + PAGE_SIZE); 1026f9bac91bSBenno Rice 1027f9bac91bSBenno Rice /* 1028f9bac91bSBenno Rice * Compute the HTAB index. 1029f9bac91bSBenno Rice */ 1030f9bac91bSBenno Rice idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1031f9bac91bSBenno Rice /* 1032f9bac91bSBenno Rice * Construct the PTE. 1033f9bac91bSBenno Rice * 1034f9bac91bSBenno Rice * Note: Don't set the valid bit for correct operation of tlb update. 1035f9bac91bSBenno Rice */ 1036f9bac91bSBenno Rice pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) 1037f9bac91bSBenno Rice | ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1038f9bac91bSBenno Rice pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G; 1039f9bac91bSBenno Rice 1040f9bac91bSBenno Rice for (mp = mem; mp->size; mp++) { 1041f9bac91bSBenno Rice if (pa >= mp->start && pa < mp->start + mp->size) { 1042f9bac91bSBenno Rice pte.pte_lo &= ~(PTE_I | PTE_G); 1043f9bac91bSBenno Rice break; 1044f9bac91bSBenno Rice } 1045f9bac91bSBenno Rice } 1046f9bac91bSBenno Rice if (prot & VM_PROT_WRITE) { 1047f9bac91bSBenno Rice pte.pte_lo |= PTE_RW; 1048f9bac91bSBenno Rice } else { 1049f9bac91bSBenno Rice pte.pte_lo |= PTE_RO; 1050f9bac91bSBenno Rice } 1051f9bac91bSBenno Rice 1052f9bac91bSBenno Rice /* 1053f9bac91bSBenno Rice * Now record mapping for later back-translation. 1054f9bac91bSBenno Rice */ 1055f9bac91bSBenno Rice if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) { 1056f9bac91bSBenno Rice if (pmap_enter_pv(idx, va, pa)) { 1057f9bac91bSBenno Rice /* 1058f9bac91bSBenno Rice * Flush the real memory from the cache. 1059f9bac91bSBenno Rice */ 1060f9bac91bSBenno Rice __syncicache((void *)pa, PAGE_SIZE); 1061f9bac91bSBenno Rice } 1062f9bac91bSBenno Rice } 1063f9bac91bSBenno Rice 1064f9bac91bSBenno Rice s = splimp(); 1065f9bac91bSBenno Rice pm->pm_stats.resident_count++; 1066f9bac91bSBenno Rice /* 1067f9bac91bSBenno Rice * Try to insert directly into HTAB. 1068f9bac91bSBenno Rice */ 1069f9bac91bSBenno Rice if (pte_insert(idx, &pte)) { 1070f9bac91bSBenno Rice splx(s); 1071f9bac91bSBenno Rice return; 1072f9bac91bSBenno Rice } 1073f9bac91bSBenno Rice 1074f9bac91bSBenno Rice /* 1075f9bac91bSBenno Rice * Have to allocate overflow entry. 1076f9bac91bSBenno Rice * 1077f9bac91bSBenno Rice * Note, that we must use real addresses for these. 1078f9bac91bSBenno Rice */ 1079f9bac91bSBenno Rice po = poalloc(); 1080f9bac91bSBenno Rice po->po_pte = pte; 1081f9bac91bSBenno Rice LIST_INSERT_HEAD(potable + idx, po, po_list); 1082f9bac91bSBenno Rice splx(s); 1083f9bac91bSBenno Rice } 1084f9bac91bSBenno Rice 1085f9bac91bSBenno Rice void 1086f9bac91bSBenno Rice pmap_kenter(vm_offset_t va, vm_offset_t pa) 1087f9bac91bSBenno Rice { 1088f9bac91bSBenno Rice struct vm_page pg; 1089f9bac91bSBenno Rice 1090f9bac91bSBenno Rice pg.phys_addr = pa; 1091f9bac91bSBenno Rice pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE); 1092f9bac91bSBenno Rice } 1093f9bac91bSBenno Rice 1094f9bac91bSBenno Rice void 1095f9bac91bSBenno Rice pmap_kremove(vm_offset_t va) 1096f9bac91bSBenno Rice { 1097f9bac91bSBenno Rice pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 1098f9bac91bSBenno Rice } 1099f9bac91bSBenno Rice 1100f9bac91bSBenno Rice /* 1101f9bac91bSBenno Rice * Remove the given range of mapping entries. 1102f9bac91bSBenno Rice */ 1103f9bac91bSBenno Rice void 1104f9bac91bSBenno Rice pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva) 1105f9bac91bSBenno Rice { 1106f9bac91bSBenno Rice int idx, i, s; 1107f9bac91bSBenno Rice sr_t sr; 1108f9bac91bSBenno Rice pte_t *ptp; 1109f9bac91bSBenno Rice struct pte_ovfl *po, *npo; 1110f9bac91bSBenno Rice 1111f9bac91bSBenno Rice s = splimp(); 1112f9bac91bSBenno Rice while (va < endva) { 1113f9bac91bSBenno Rice idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1114f9bac91bSBenno Rice for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1115f9bac91bSBenno Rice if (ptematch(ptp, sr, va, PTE_VALID)) { 1116f9bac91bSBenno Rice pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1117f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1118f9bac91bSBenno Rice __asm __volatile ("sync"); 1119f9bac91bSBenno Rice tlbie(va); 1120f9bac91bSBenno Rice tlbsync(); 1121f9bac91bSBenno Rice pm->pm_stats.resident_count--; 1122f9bac91bSBenno Rice } 1123f9bac91bSBenno Rice } 1124f9bac91bSBenno Rice for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1125f9bac91bSBenno Rice ptp++) { 1126f9bac91bSBenno Rice if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1127f9bac91bSBenno Rice pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1128f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1129f9bac91bSBenno Rice __asm __volatile ("sync"); 1130f9bac91bSBenno Rice tlbie(va); 1131f9bac91bSBenno Rice tlbsync(); 1132f9bac91bSBenno Rice pm->pm_stats.resident_count--; 1133f9bac91bSBenno Rice } 1134f9bac91bSBenno Rice } 1135f9bac91bSBenno Rice for (po = potable[idx].lh_first; po; po = npo) { 1136f9bac91bSBenno Rice npo = po->po_list.le_next; 1137f9bac91bSBenno Rice if (ptematch(&po->po_pte, sr, va, 0)) { 1138f9bac91bSBenno Rice pmap_remove_pv(idx, va, po->po_pte.pte_lo, 1139f9bac91bSBenno Rice &po->po_pte); 1140f9bac91bSBenno Rice LIST_REMOVE(po, po_list); 1141f9bac91bSBenno Rice pofree(po, 1); 1142f9bac91bSBenno Rice pm->pm_stats.resident_count--; 1143f9bac91bSBenno Rice } 1144f9bac91bSBenno Rice } 1145f9bac91bSBenno Rice va += PAGE_SIZE; 1146f9bac91bSBenno Rice } 1147f9bac91bSBenno Rice splx(s); 1148f9bac91bSBenno Rice } 1149f9bac91bSBenno Rice 1150f9bac91bSBenno Rice static pte_t * 1151f9bac91bSBenno Rice pte_find(struct pmap *pm, vm_offset_t va) 1152f9bac91bSBenno Rice { 1153f9bac91bSBenno Rice int idx, i; 1154f9bac91bSBenno Rice sr_t sr; 1155f9bac91bSBenno Rice pte_t *ptp; 1156f9bac91bSBenno Rice struct pte_ovfl *po; 1157f9bac91bSBenno Rice 1158f9bac91bSBenno Rice idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1159f9bac91bSBenno Rice for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1160f9bac91bSBenno Rice if (ptematch(ptp, sr, va, PTE_VALID)) { 1161f9bac91bSBenno Rice return ptp; 1162f9bac91bSBenno Rice } 1163f9bac91bSBenno Rice } 1164f9bac91bSBenno Rice for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) { 1165f9bac91bSBenno Rice if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1166f9bac91bSBenno Rice return ptp; 1167f9bac91bSBenno Rice } 1168f9bac91bSBenno Rice } 1169f9bac91bSBenno Rice for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 1170f9bac91bSBenno Rice if (ptematch(&po->po_pte, sr, va, 0)) { 1171f9bac91bSBenno Rice return &po->po_pte; 1172f9bac91bSBenno Rice } 1173f9bac91bSBenno Rice } 1174f9bac91bSBenno Rice return 0; 1175f9bac91bSBenno Rice } 1176f9bac91bSBenno Rice 1177f9bac91bSBenno Rice /* 1178f9bac91bSBenno Rice * Get the physical page address for the given pmap/virtual address. 1179f9bac91bSBenno Rice */ 1180f9bac91bSBenno Rice vm_offset_t 1181f9bac91bSBenno Rice pmap_extract(pmap_t pm, vm_offset_t va) 1182f9bac91bSBenno Rice { 1183f9bac91bSBenno Rice pte_t *ptp; 1184f9bac91bSBenno Rice int s; 1185f9bac91bSBenno Rice 1186f9bac91bSBenno Rice s = splimp(); 1187f9bac91bSBenno Rice 1188f9bac91bSBenno Rice if (!(ptp = pte_find(pm, va))) { 1189f9bac91bSBenno Rice splx(s); 1190f9bac91bSBenno Rice return (0); 1191f9bac91bSBenno Rice } 1192f9bac91bSBenno Rice splx(s); 1193f9bac91bSBenno Rice return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1194f9bac91bSBenno Rice } 1195f9bac91bSBenno Rice 1196f9bac91bSBenno Rice /* 1197f9bac91bSBenno Rice * Lower the protection on the specified range of this pmap. 1198f9bac91bSBenno Rice * 1199f9bac91bSBenno Rice * There are only two cases: either the protection is going to 0, 1200f9bac91bSBenno Rice * or it is going to read-only. 1201f9bac91bSBenno Rice */ 1202f9bac91bSBenno Rice void 1203f9bac91bSBenno Rice pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1204f9bac91bSBenno Rice { 1205f9bac91bSBenno Rice pte_t *ptp; 1206f9bac91bSBenno Rice int valid, s; 1207f9bac91bSBenno Rice 1208f9bac91bSBenno Rice if (prot & VM_PROT_READ) { 1209f9bac91bSBenno Rice s = splimp(); 1210f9bac91bSBenno Rice while (sva < eva) { 1211f9bac91bSBenno Rice ptp = pte_find(pm, sva); 1212f9bac91bSBenno Rice if (ptp) { 1213f9bac91bSBenno Rice valid = ptp->pte_hi & PTE_VALID; 1214f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1215f9bac91bSBenno Rice __asm __volatile ("sync"); 1216f9bac91bSBenno Rice tlbie(sva); 1217f9bac91bSBenno Rice tlbsync(); 1218f9bac91bSBenno Rice ptp->pte_lo &= ~PTE_PP; 1219f9bac91bSBenno Rice ptp->pte_lo |= PTE_RO; 1220f9bac91bSBenno Rice __asm __volatile ("sync"); 1221f9bac91bSBenno Rice ptp->pte_hi |= valid; 1222f9bac91bSBenno Rice } 1223f9bac91bSBenno Rice sva += PAGE_SIZE; 1224f9bac91bSBenno Rice } 1225f9bac91bSBenno Rice splx(s); 1226f9bac91bSBenno Rice return; 1227f9bac91bSBenno Rice } 1228f9bac91bSBenno Rice pmap_remove(pm, sva, eva); 1229f9bac91bSBenno Rice } 1230f9bac91bSBenno Rice 1231f9bac91bSBenno Rice boolean_t 1232f9bac91bSBenno Rice ptemodify(vm_page_t pg, u_int mask, u_int val) 1233f9bac91bSBenno Rice { 1234f9bac91bSBenno Rice vm_offset_t pa; 1235f9bac91bSBenno Rice struct pv_entry *pv; 1236f9bac91bSBenno Rice pte_t *ptp; 1237f9bac91bSBenno Rice struct pte_ovfl *po; 1238f9bac91bSBenno Rice int i, s; 1239f9bac91bSBenno Rice char *attr; 1240f9bac91bSBenno Rice int rv; 1241f9bac91bSBenno Rice 1242f9bac91bSBenno Rice pa = VM_PAGE_TO_PHYS(pg); 1243f9bac91bSBenno Rice 1244f9bac91bSBenno Rice /* 1245f9bac91bSBenno Rice * First modify bits in cache. 1246f9bac91bSBenno Rice */ 1247f9bac91bSBenno Rice attr = pa_to_attr(pa); 1248f9bac91bSBenno Rice if (attr == NULL) { 1249f9bac91bSBenno Rice return FALSE; 1250f9bac91bSBenno Rice } 1251f9bac91bSBenno Rice 1252f9bac91bSBenno Rice *attr &= ~mask >> ATTRSHFT; 1253f9bac91bSBenno Rice *attr |= val >> ATTRSHFT; 1254f9bac91bSBenno Rice 1255f9bac91bSBenno Rice pv = pa_to_pv(pa); 1256f9bac91bSBenno Rice if (pv->pv_idx < 0) { 1257f9bac91bSBenno Rice return FALSE; 1258f9bac91bSBenno Rice } 1259f9bac91bSBenno Rice 1260f9bac91bSBenno Rice rv = FALSE; 1261f9bac91bSBenno Rice s = splimp(); 1262f9bac91bSBenno Rice for (; pv; pv = pv->pv_next) { 1263f9bac91bSBenno Rice for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1264f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1265f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1266f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1267f9bac91bSBenno Rice __asm __volatile ("sync"); 1268f9bac91bSBenno Rice tlbie(pv->pv_va); 1269f9bac91bSBenno Rice tlbsync(); 1270f9bac91bSBenno Rice rv |= ptp->pte_lo & mask; 1271f9bac91bSBenno Rice ptp->pte_lo &= ~mask; 1272f9bac91bSBenno Rice ptp->pte_lo |= val; 1273f9bac91bSBenno Rice __asm __volatile ("sync"); 1274f9bac91bSBenno Rice ptp->pte_hi |= PTE_VALID; 1275f9bac91bSBenno Rice } 1276f9bac91bSBenno Rice } 1277f9bac91bSBenno Rice for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1278f9bac91bSBenno Rice --i >= 0; ptp++) { 1279f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1280f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1281f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1282f9bac91bSBenno Rice __asm __volatile ("sync"); 1283f9bac91bSBenno Rice tlbie(pv->pv_va); 1284f9bac91bSBenno Rice tlbsync(); 1285f9bac91bSBenno Rice rv |= ptp->pte_lo & mask; 1286f9bac91bSBenno Rice ptp->pte_lo &= ~mask; 1287f9bac91bSBenno Rice ptp->pte_lo |= val; 1288f9bac91bSBenno Rice __asm __volatile ("sync"); 1289f9bac91bSBenno Rice ptp->pte_hi |= PTE_VALID; 1290f9bac91bSBenno Rice } 1291f9bac91bSBenno Rice } 1292f9bac91bSBenno Rice for (po = potable[pv->pv_idx].lh_first; po; 1293f9bac91bSBenno Rice po = po->po_list.le_next) { 1294f9bac91bSBenno Rice if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1295f9bac91bSBenno Rice rv |= ptp->pte_lo & mask; 1296f9bac91bSBenno Rice po->po_pte.pte_lo &= ~mask; 1297f9bac91bSBenno Rice po->po_pte.pte_lo |= val; 1298f9bac91bSBenno Rice } 1299f9bac91bSBenno Rice } 1300f9bac91bSBenno Rice } 1301f9bac91bSBenno Rice splx(s); 1302f9bac91bSBenno Rice return rv != 0; 1303f9bac91bSBenno Rice } 1304f9bac91bSBenno Rice 1305f9bac91bSBenno Rice int 1306f9bac91bSBenno Rice ptebits(vm_page_t pg, int bit) 1307f9bac91bSBenno Rice { 1308f9bac91bSBenno Rice struct pv_entry *pv; 1309f9bac91bSBenno Rice pte_t *ptp; 1310f9bac91bSBenno Rice struct pte_ovfl *po; 1311f9bac91bSBenno Rice int i, s, bits; 1312f9bac91bSBenno Rice char *attr; 1313f9bac91bSBenno Rice vm_offset_t pa; 1314f9bac91bSBenno Rice 1315f9bac91bSBenno Rice bits = 0; 1316f9bac91bSBenno Rice pa = VM_PAGE_TO_PHYS(pg); 1317f9bac91bSBenno Rice 1318f9bac91bSBenno Rice /* 1319f9bac91bSBenno Rice * First try the cache. 1320f9bac91bSBenno Rice */ 1321f9bac91bSBenno Rice attr = pa_to_attr(pa); 1322f9bac91bSBenno Rice if (attr == NULL) { 1323f9bac91bSBenno Rice return 0; 1324f9bac91bSBenno Rice } 1325f9bac91bSBenno Rice bits |= (*attr << ATTRSHFT) & bit; 1326f9bac91bSBenno Rice if (bits == bit) { 1327f9bac91bSBenno Rice return bits; 1328f9bac91bSBenno Rice } 1329f9bac91bSBenno Rice 1330f9bac91bSBenno Rice pv = pa_to_pv(pa); 1331f9bac91bSBenno Rice if (pv->pv_idx < 0) { 1332f9bac91bSBenno Rice return 0; 1333f9bac91bSBenno Rice } 1334f9bac91bSBenno Rice 1335f9bac91bSBenno Rice s = splimp(); 1336f9bac91bSBenno Rice for (; pv; pv = pv->pv_next) { 1337f9bac91bSBenno Rice for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1338f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1339f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1340f9bac91bSBenno Rice bits |= ptp->pte_lo & bit; 1341f9bac91bSBenno Rice if (bits == bit) { 1342f9bac91bSBenno Rice splx(s); 1343f9bac91bSBenno Rice return bits; 1344f9bac91bSBenno Rice } 1345f9bac91bSBenno Rice } 1346f9bac91bSBenno Rice } 1347f9bac91bSBenno Rice for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1348f9bac91bSBenno Rice --i >= 0; ptp++) { 1349f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1350f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1351f9bac91bSBenno Rice bits |= ptp->pte_lo & bit; 1352f9bac91bSBenno Rice if (bits == bit) { 1353f9bac91bSBenno Rice splx(s); 1354f9bac91bSBenno Rice return bits; 1355f9bac91bSBenno Rice } 1356f9bac91bSBenno Rice } 1357f9bac91bSBenno Rice } 1358f9bac91bSBenno Rice for (po = potable[pv->pv_idx].lh_first; po; 1359f9bac91bSBenno Rice po = po->po_list.le_next) { 1360f9bac91bSBenno Rice if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1361f9bac91bSBenno Rice bits |= po->po_pte.pte_lo & bit; 1362f9bac91bSBenno Rice if (bits == bit) { 1363f9bac91bSBenno Rice splx(s); 1364f9bac91bSBenno Rice return bits; 1365f9bac91bSBenno Rice } 1366f9bac91bSBenno Rice } 1367f9bac91bSBenno Rice } 1368f9bac91bSBenno Rice } 1369f9bac91bSBenno Rice splx(s); 1370f9bac91bSBenno Rice return bits; 1371f9bac91bSBenno Rice } 1372f9bac91bSBenno Rice 1373f9bac91bSBenno Rice /* 1374f9bac91bSBenno Rice * Lower the protection on the specified physical page. 1375f9bac91bSBenno Rice * 1376f9bac91bSBenno Rice * There are only two cases: either the protection is going to 0, 1377f9bac91bSBenno Rice * or it is going to read-only. 1378f9bac91bSBenno Rice */ 1379f9bac91bSBenno Rice void 1380f9bac91bSBenno Rice pmap_page_protect(vm_page_t m, vm_prot_t prot) 1381f9bac91bSBenno Rice { 1382f9bac91bSBenno Rice vm_offset_t pa; 1383f9bac91bSBenno Rice vm_offset_t va; 1384f9bac91bSBenno Rice pte_t *ptp; 1385f9bac91bSBenno Rice struct pte_ovfl *po, *npo; 1386f9bac91bSBenno Rice int i, s, idx; 1387f9bac91bSBenno Rice struct pv_entry *pv; 1388f9bac91bSBenno Rice 1389f9bac91bSBenno Rice pa = VM_PAGE_TO_PHYS(m); 1390f9bac91bSBenno Rice 1391f9bac91bSBenno Rice pa &= ~ADDR_POFF; 1392f9bac91bSBenno Rice if (prot & VM_PROT_READ) { 1393f9bac91bSBenno Rice ptemodify(m, PTE_PP, PTE_RO); 1394f9bac91bSBenno Rice return; 1395f9bac91bSBenno Rice } 1396f9bac91bSBenno Rice 1397f9bac91bSBenno Rice pv = pa_to_pv(pa); 1398f9bac91bSBenno Rice if (pv == NULL) { 1399f9bac91bSBenno Rice return; 1400f9bac91bSBenno Rice } 1401f9bac91bSBenno Rice 1402f9bac91bSBenno Rice s = splimp(); 1403f9bac91bSBenno Rice while (pv->pv_idx >= 0) { 1404f9bac91bSBenno Rice idx = pv->pv_idx; 1405f9bac91bSBenno Rice va = pv->pv_va; 1406f9bac91bSBenno Rice for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1407f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1408f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1409f9bac91bSBenno Rice pmap_remove_pv(idx, va, pa, ptp); 1410f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1411f9bac91bSBenno Rice __asm __volatile ("sync"); 1412f9bac91bSBenno Rice tlbie(va); 1413f9bac91bSBenno Rice tlbsync(); 1414f9bac91bSBenno Rice goto next; 1415f9bac91bSBenno Rice } 1416f9bac91bSBenno Rice } 1417f9bac91bSBenno Rice for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1418f9bac91bSBenno Rice ptp++) { 1419f9bac91bSBenno Rice if ((ptp->pte_hi & PTE_VALID) 1420f9bac91bSBenno Rice && (ptp->pte_lo & PTE_RPGN) == pa) { 1421f9bac91bSBenno Rice pmap_remove_pv(idx, va, pa, ptp); 1422f9bac91bSBenno Rice ptp->pte_hi &= ~PTE_VALID; 1423f9bac91bSBenno Rice __asm __volatile ("sync"); 1424f9bac91bSBenno Rice tlbie(va); 1425f9bac91bSBenno Rice tlbsync(); 1426f9bac91bSBenno Rice goto next; 1427f9bac91bSBenno Rice } 1428f9bac91bSBenno Rice } 1429f9bac91bSBenno Rice for (po = potable[idx].lh_first; po; po = npo) { 1430f9bac91bSBenno Rice npo = po->po_list.le_next; 1431f9bac91bSBenno Rice if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1432f9bac91bSBenno Rice pmap_remove_pv(idx, va, pa, &po->po_pte); 1433f9bac91bSBenno Rice LIST_REMOVE(po, po_list); 1434f9bac91bSBenno Rice pofree(po, 1); 1435f9bac91bSBenno Rice goto next; 1436f9bac91bSBenno Rice } 1437f9bac91bSBenno Rice } 1438f9bac91bSBenno Rice next: 1439f9bac91bSBenno Rice } 1440f9bac91bSBenno Rice splx(s); 1441f9bac91bSBenno Rice } 1442f9bac91bSBenno Rice 1443f9bac91bSBenno Rice /* 1444f9bac91bSBenno Rice * Activate the address space for the specified process. If the process 1445f9bac91bSBenno Rice * is the current process, load the new MMU context. 1446f9bac91bSBenno Rice */ 1447f9bac91bSBenno Rice void 1448b40ce416SJulian Elischer pmap_activate(struct thread *td) 1449f9bac91bSBenno Rice { 1450f9bac91bSBenno Rice struct pcb *pcb; 1451f9bac91bSBenno Rice pmap_t pmap; 1452f9bac91bSBenno Rice pmap_t rpm; 1453f9bac91bSBenno Rice int psl, i, ksr, seg; 1454f9bac91bSBenno Rice 1455b40ce416SJulian Elischer pcb = td->td_pcb; 14565fd2c51eSMark Peek pmap = vmspace_pmap(td->td_proc->p_vmspace); 1457f9bac91bSBenno Rice 1458f9bac91bSBenno Rice /* 1459f9bac91bSBenno Rice * XXX Normally performed in cpu_fork(). 1460f9bac91bSBenno Rice */ 1461f9bac91bSBenno Rice if (pcb->pcb_pm != pmap) { 1462f9bac91bSBenno Rice pcb->pcb_pm = pmap; 1463f9bac91bSBenno Rice (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap, 1464f9bac91bSBenno Rice (vm_offset_t)pcb->pcb_pm); 1465f9bac91bSBenno Rice } 1466f9bac91bSBenno Rice 1467b40ce416SJulian Elischer if (td == curthread) { 1468f9bac91bSBenno Rice /* Disable interrupts while switching. */ 1469111c77dcSBenno Rice psl = mfmsr(); 1470111c77dcSBenno Rice mtmsr(psl & ~PSL_EE); 1471f9bac91bSBenno Rice 1472f9bac91bSBenno Rice #if 0 /* XXX */ 1473f9bac91bSBenno Rice /* Store pointer to new current pmap. */ 1474f9bac91bSBenno Rice curpm = pcb->pcb_pmreal; 1475f9bac91bSBenno Rice #endif 1476f9bac91bSBenno Rice 1477f9bac91bSBenno Rice /* Save kernel SR. */ 1478f9bac91bSBenno Rice __asm __volatile("mfsr %0,14" : "=r"(ksr) :); 1479f9bac91bSBenno Rice 1480f9bac91bSBenno Rice /* 1481f9bac91bSBenno Rice * Set new segment registers. We use the pmap's real 1482f9bac91bSBenno Rice * address to avoid accessibility problems. 1483f9bac91bSBenno Rice */ 1484f9bac91bSBenno Rice rpm = pcb->pcb_pmreal; 1485f9bac91bSBenno Rice for (i = 0; i < 16; i++) { 1486f9bac91bSBenno Rice seg = rpm->pm_sr[i]; 1487f9bac91bSBenno Rice __asm __volatile("mtsrin %0,%1" 1488f9bac91bSBenno Rice :: "r"(seg), "r"(i << ADDR_SR_SHFT)); 1489f9bac91bSBenno Rice } 1490f9bac91bSBenno Rice 1491f9bac91bSBenno Rice /* Restore kernel SR. */ 1492f9bac91bSBenno Rice __asm __volatile("mtsr 14,%0" :: "r"(ksr)); 1493f9bac91bSBenno Rice 1494f9bac91bSBenno Rice /* Interrupts are OK again. */ 1495111c77dcSBenno Rice mtmsr(psl); 1496f9bac91bSBenno Rice } 1497f9bac91bSBenno Rice } 1498f9bac91bSBenno Rice 1499f9bac91bSBenno Rice /* 1500f9bac91bSBenno Rice * Add a list of wired pages to the kva 1501f9bac91bSBenno Rice * this routine is only used for temporary 1502f9bac91bSBenno Rice * kernel mappings that do not need to have 1503f9bac91bSBenno Rice * page modification or references recorded. 1504f9bac91bSBenno Rice * Note that old mappings are simply written 1505f9bac91bSBenno Rice * over. The page *must* be wired. 1506f9bac91bSBenno Rice */ 1507f9bac91bSBenno Rice void 1508f9bac91bSBenno Rice pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1509f9bac91bSBenno Rice { 1510f9bac91bSBenno Rice int i; 1511f9bac91bSBenno Rice 1512f9bac91bSBenno Rice for (i = 0; i < count; i++) { 1513f9bac91bSBenno Rice vm_offset_t tva = va + i * PAGE_SIZE; 1514f9bac91bSBenno Rice pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i])); 1515f9bac91bSBenno Rice } 1516f9bac91bSBenno Rice } 1517f9bac91bSBenno Rice 1518f9bac91bSBenno Rice /* 1519f9bac91bSBenno Rice * this routine jerks page mappings from the 1520f9bac91bSBenno Rice * kernel -- it is meant only for temporary mappings. 1521f9bac91bSBenno Rice */ 1522f9bac91bSBenno Rice void 1523f9bac91bSBenno Rice pmap_qremove(vm_offset_t va, int count) 1524f9bac91bSBenno Rice { 1525f9bac91bSBenno Rice vm_offset_t end_va; 1526f9bac91bSBenno Rice 1527f9bac91bSBenno Rice end_va = va + count*PAGE_SIZE; 1528f9bac91bSBenno Rice 1529f9bac91bSBenno Rice while (va < end_va) { 1530f9bac91bSBenno Rice unsigned *pte; 1531f9bac91bSBenno Rice 1532f9bac91bSBenno Rice pte = (unsigned *)vtopte(va); 1533f9bac91bSBenno Rice *pte = 0; 1534f9bac91bSBenno Rice tlbie(va); 1535f9bac91bSBenno Rice va += PAGE_SIZE; 1536f9bac91bSBenno Rice } 1537f9bac91bSBenno Rice } 1538f9bac91bSBenno Rice 1539f9bac91bSBenno Rice /* 1540f9bac91bSBenno Rice * pmap_ts_referenced: 1541f9bac91bSBenno Rice * 1542f9bac91bSBenno Rice * Return the count of reference bits for a page, clearing all of them. 1543f9bac91bSBenno Rice */ 1544f9bac91bSBenno Rice int 1545f9bac91bSBenno Rice pmap_ts_referenced(vm_page_t m) 1546f9bac91bSBenno Rice { 1547f9bac91bSBenno Rice 1548f9bac91bSBenno Rice /* XXX: coming soon... */ 1549f9bac91bSBenno Rice return (0); 1550f9bac91bSBenno Rice } 1551f9bac91bSBenno Rice 1552f9bac91bSBenno Rice /* 1553f9bac91bSBenno Rice * this routine returns true if a physical page resides 1554f9bac91bSBenno Rice * in the given pmap. 1555f9bac91bSBenno Rice */ 1556f9bac91bSBenno Rice boolean_t 1557f9bac91bSBenno Rice pmap_page_exists(pmap_t pmap, vm_page_t m) 1558f9bac91bSBenno Rice { 1559f9bac91bSBenno Rice #if 0 /* XXX: This must go! */ 1560f9bac91bSBenno Rice register pv_entry_t pv; 1561f9bac91bSBenno Rice int s; 1562f9bac91bSBenno Rice 1563f9bac91bSBenno Rice if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1564f9bac91bSBenno Rice return FALSE; 1565f9bac91bSBenno Rice 1566f9bac91bSBenno Rice s = splvm(); 1567f9bac91bSBenno Rice 1568f9bac91bSBenno Rice /* 1569f9bac91bSBenno Rice * Not found, check current mappings returning immediately if found. 1570f9bac91bSBenno Rice */ 1571f9bac91bSBenno Rice for (pv = pv_table; pv; pv = pv->pv_next) { 1572f9bac91bSBenno Rice if (pv->pv_pmap == pmap) { 1573f9bac91bSBenno Rice splx(s); 1574f9bac91bSBenno Rice return TRUE; 1575f9bac91bSBenno Rice } 1576f9bac91bSBenno Rice } 1577f9bac91bSBenno Rice splx(s); 1578f9bac91bSBenno Rice #endif 1579f9bac91bSBenno Rice return (FALSE); 1580f9bac91bSBenno Rice } 1581f9bac91bSBenno Rice 1582f9bac91bSBenno Rice /* 1583f9bac91bSBenno Rice * Used to map a range of physical addresses into kernel 1584f9bac91bSBenno Rice * virtual address space. 1585f9bac91bSBenno Rice * 1586f9bac91bSBenno Rice * For now, VM is already on, we only need to map the 1587f9bac91bSBenno Rice * specified memory. 1588f9bac91bSBenno Rice */ 1589f9bac91bSBenno Rice vm_offset_t 1590f9bac91bSBenno Rice pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 1591f9bac91bSBenno Rice { 1592f9bac91bSBenno Rice vm_offset_t sva, va; 1593f9bac91bSBenno Rice 1594f9bac91bSBenno Rice sva = *virt; 1595f9bac91bSBenno Rice va = sva; 1596f9bac91bSBenno Rice 1597f9bac91bSBenno Rice while (start < end) { 1598f9bac91bSBenno Rice pmap_kenter(va, start); 1599f9bac91bSBenno Rice va += PAGE_SIZE; 1600f9bac91bSBenno Rice start += PAGE_SIZE; 1601f9bac91bSBenno Rice } 1602f9bac91bSBenno Rice 1603f9bac91bSBenno Rice *virt = va; 1604f9bac91bSBenno Rice return (sva); 1605f9bac91bSBenno Rice } 1606f9bac91bSBenno Rice 1607f9bac91bSBenno Rice vm_offset_t 1608f9bac91bSBenno Rice pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 1609f9bac91bSBenno Rice { 1610f9bac91bSBenno Rice 1611f9bac91bSBenno Rice return (addr); 1612f9bac91bSBenno Rice } 1613f9bac91bSBenno Rice 1614f9bac91bSBenno Rice int 1615f9bac91bSBenno Rice pmap_mincore(pmap_t pmap, vm_offset_t addr) 1616f9bac91bSBenno Rice { 1617f9bac91bSBenno Rice 1618f9bac91bSBenno Rice /* XXX: coming soon... */ 1619f9bac91bSBenno Rice return (0); 1620f9bac91bSBenno Rice } 1621f9bac91bSBenno Rice 1622f9bac91bSBenno Rice void 1623f9bac91bSBenno Rice pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 1624f9bac91bSBenno Rice vm_pindex_t pindex, vm_size_t size, int limit) 1625f9bac91bSBenno Rice { 1626f9bac91bSBenno Rice 1627f9bac91bSBenno Rice /* XXX: coming soon... */ 1628f9bac91bSBenno Rice return; 1629f9bac91bSBenno Rice } 1630f9bac91bSBenno Rice 1631f9bac91bSBenno Rice void 1632f9bac91bSBenno Rice pmap_growkernel(vm_offset_t addr) 1633f9bac91bSBenno Rice { 1634f9bac91bSBenno Rice 1635f9bac91bSBenno Rice /* XXX: coming soon... */ 1636f9bac91bSBenno Rice return; 1637f9bac91bSBenno Rice } 1638f9bac91bSBenno Rice 1639f9bac91bSBenno Rice /* 1640f9bac91bSBenno Rice * Initialize the address space (zone) for the pv_entries. Set a 1641f9bac91bSBenno Rice * high water mark so that the system can recover from excessive 1642f9bac91bSBenno Rice * numbers of pv entries. 1643f9bac91bSBenno Rice */ 1644f9bac91bSBenno Rice void 1645f9bac91bSBenno Rice pmap_init2() 1646f9bac91bSBenno Rice { 16470b27d710SPeter Wemm int shpgperproc = PMAP_SHPGPERPROC; 16480b27d710SPeter Wemm 16490b27d710SPeter Wemm TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 16500b27d710SPeter Wemm pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 1651f9bac91bSBenno Rice pv_entry_high_water = 9 * (pv_entry_max / 10); 1652f9bac91bSBenno Rice zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); 1653f9bac91bSBenno Rice } 1654f9bac91bSBenno Rice 1655f9bac91bSBenno Rice void 1656f9bac91bSBenno Rice pmap_swapin_proc(struct proc *p) 1657f9bac91bSBenno Rice { 1658f9bac91bSBenno Rice 1659f9bac91bSBenno Rice /* XXX: coming soon... */ 1660f9bac91bSBenno Rice return; 1661f9bac91bSBenno Rice } 1662f9bac91bSBenno Rice 1663f9bac91bSBenno Rice void 1664f9bac91bSBenno Rice pmap_swapout_proc(struct proc *p) 1665f9bac91bSBenno Rice { 1666f9bac91bSBenno Rice 1667f9bac91bSBenno Rice /* XXX: coming soon... */ 1668f9bac91bSBenno Rice return; 1669f9bac91bSBenno Rice } 1670f9bac91bSBenno Rice 16715fd2c51eSMark Peek 16725fd2c51eSMark Peek /* 16735fd2c51eSMark Peek * Create the kernel stack (including pcb for i386) for a new thread. 16745fd2c51eSMark Peek * This routine directly affects the fork perf for a process and 16755fd2c51eSMark Peek * create performance for a thread. 16765fd2c51eSMark Peek */ 16775fd2c51eSMark Peek void 16785fd2c51eSMark Peek pmap_new_thread(td) 16795fd2c51eSMark Peek struct thread *td; 16805fd2c51eSMark Peek { 16815fd2c51eSMark Peek /* XXX: coming soon... */ 16825fd2c51eSMark Peek return; 16835fd2c51eSMark Peek } 16845fd2c51eSMark Peek 16855fd2c51eSMark Peek /* 16865fd2c51eSMark Peek * Dispose the kernel stack for a thread that has exited. 16875fd2c51eSMark Peek * This routine directly impacts the exit perf of a process and thread. 16885fd2c51eSMark Peek */ 16895fd2c51eSMark Peek void 16905fd2c51eSMark Peek pmap_dispose_thread(td) 16915fd2c51eSMark Peek struct thread *td; 16925fd2c51eSMark Peek { 16935fd2c51eSMark Peek /* XXX: coming soon... */ 16945fd2c51eSMark Peek return; 16955fd2c51eSMark Peek } 16965fd2c51eSMark Peek 16975fd2c51eSMark Peek /* 16985fd2c51eSMark Peek * Allow the Kernel stack for a thread to be prejudicially paged out. 16995fd2c51eSMark Peek */ 17005fd2c51eSMark Peek void 17015fd2c51eSMark Peek pmap_swapout_thread(td) 17025fd2c51eSMark Peek struct thread *td; 17035fd2c51eSMark Peek { 17045fd2c51eSMark Peek int i; 17055fd2c51eSMark Peek vm_object_t ksobj; 17065fd2c51eSMark Peek vm_offset_t ks; 17075fd2c51eSMark Peek vm_page_t m; 17085fd2c51eSMark Peek 17095fd2c51eSMark Peek ksobj = td->td_kstack_obj; 17105fd2c51eSMark Peek ks = td->td_kstack; 17115fd2c51eSMark Peek for (i = 0; i < KSTACK_PAGES; i++) { 17125fd2c51eSMark Peek m = vm_page_lookup(ksobj, i); 17135fd2c51eSMark Peek if (m == NULL) 17145fd2c51eSMark Peek panic("pmap_swapout_thread: kstack already missing?"); 17155fd2c51eSMark Peek vm_page_dirty(m); 17165fd2c51eSMark Peek vm_page_unwire(m, 0); 17175fd2c51eSMark Peek pmap_kremove(ks + i * PAGE_SIZE); 17185fd2c51eSMark Peek } 17195fd2c51eSMark Peek } 17205fd2c51eSMark Peek 17215fd2c51eSMark Peek /* 17225fd2c51eSMark Peek * Bring the kernel stack for a specified thread back in. 17235fd2c51eSMark Peek */ 17245fd2c51eSMark Peek void 17255fd2c51eSMark Peek pmap_swapin_thread(td) 17265fd2c51eSMark Peek struct thread *td; 17275fd2c51eSMark Peek { 17285fd2c51eSMark Peek int i, rv; 17295fd2c51eSMark Peek vm_object_t ksobj; 17305fd2c51eSMark Peek vm_offset_t ks; 17315fd2c51eSMark Peek vm_page_t m; 17325fd2c51eSMark Peek 17335fd2c51eSMark Peek ksobj = td->td_kstack_obj; 17345fd2c51eSMark Peek ks = td->td_kstack; 17355fd2c51eSMark Peek for (i = 0; i < KSTACK_PAGES; i++) { 17365fd2c51eSMark Peek m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 17375fd2c51eSMark Peek pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 17385fd2c51eSMark Peek if (m->valid != VM_PAGE_BITS_ALL) { 17395fd2c51eSMark Peek rv = vm_pager_get_pages(ksobj, &m, 1, 0); 17405fd2c51eSMark Peek if (rv != VM_PAGER_OK) 17415fd2c51eSMark Peek panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); 17425fd2c51eSMark Peek m = vm_page_lookup(ksobj, i); 17435fd2c51eSMark Peek m->valid = VM_PAGE_BITS_ALL; 17445fd2c51eSMark Peek } 17455fd2c51eSMark Peek vm_page_wire(m); 17465fd2c51eSMark Peek vm_page_wakeup(m); 17475fd2c51eSMark Peek vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 17485fd2c51eSMark Peek } 17495fd2c51eSMark Peek } 17505fd2c51eSMark Peek 1751f9bac91bSBenno Rice void 1752f9bac91bSBenno Rice pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) 1753f9bac91bSBenno Rice { 1754f9bac91bSBenno Rice 1755f9bac91bSBenno Rice return; 1756f9bac91bSBenno Rice } 1757f9bac91bSBenno Rice 1758f9bac91bSBenno Rice void 1759f9bac91bSBenno Rice pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 1760f9bac91bSBenno Rice { 1761f9bac91bSBenno Rice 1762f9bac91bSBenno Rice /* XXX: coming soon... */ 1763f9bac91bSBenno Rice return; 1764f9bac91bSBenno Rice } 1765f9bac91bSBenno Rice 1766f9bac91bSBenno Rice void 1767f9bac91bSBenno Rice pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) 1768f9bac91bSBenno Rice { 1769f9bac91bSBenno Rice 1770f9bac91bSBenno Rice /* XXX: coming soon... */ 1771f9bac91bSBenno Rice return; 1772f9bac91bSBenno Rice } 1773f9bac91bSBenno Rice 1774f9bac91bSBenno Rice void 1775f9bac91bSBenno Rice pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1776f9bac91bSBenno Rice { 1777f9bac91bSBenno Rice 1778f9bac91bSBenno Rice /* XXX: coming soon... */ 1779f9bac91bSBenno Rice return; 1780f9bac91bSBenno Rice } 1781f9bac91bSBenno Rice 1782f9bac91bSBenno Rice void 1783f9bac91bSBenno Rice pmap_pinit0(pmap_t pmap) 1784f9bac91bSBenno Rice { 1785f9bac91bSBenno Rice 1786f9bac91bSBenno Rice /* XXX: coming soon... */ 1787f9bac91bSBenno Rice return; 1788f9bac91bSBenno Rice } 1789f9bac91bSBenno Rice 1790f9bac91bSBenno Rice void 1791f9bac91bSBenno Rice pmap_dispose_proc(struct proc *p) 1792f9bac91bSBenno Rice { 1793f9bac91bSBenno Rice 1794f9bac91bSBenno Rice /* XXX: coming soon... */ 1795f9bac91bSBenno Rice return; 1796f9bac91bSBenno Rice } 1797f9bac91bSBenno Rice 1798f9bac91bSBenno Rice vm_offset_t 1799f9bac91bSBenno Rice pmap_steal_memory(vm_size_t size) 1800f9bac91bSBenno Rice { 1801f9bac91bSBenno Rice vm_size_t bank_size; 1802f9bac91bSBenno Rice vm_offset_t pa; 1803f9bac91bSBenno Rice 1804f9bac91bSBenno Rice size = round_page(size); 1805f9bac91bSBenno Rice 1806f9bac91bSBenno Rice bank_size = phys_avail[1] - phys_avail[0]; 1807f9bac91bSBenno Rice while (size > bank_size) { 1808f9bac91bSBenno Rice int i; 1809f9bac91bSBenno Rice for (i = 0; phys_avail[i+2]; i+= 2) { 1810f9bac91bSBenno Rice phys_avail[i] = phys_avail[i+2]; 1811f9bac91bSBenno Rice phys_avail[i+1] = phys_avail[i+3]; 1812f9bac91bSBenno Rice } 1813f9bac91bSBenno Rice phys_avail[i] = 0; 1814f9bac91bSBenno Rice phys_avail[i+1] = 0; 1815f9bac91bSBenno Rice if (!phys_avail[0]) 1816f9bac91bSBenno Rice panic("pmap_steal_memory: out of memory"); 1817f9bac91bSBenno Rice bank_size = phys_avail[1] - phys_avail[0]; 1818f9bac91bSBenno Rice } 1819f9bac91bSBenno Rice 1820f9bac91bSBenno Rice pa = phys_avail[0]; 1821f9bac91bSBenno Rice phys_avail[0] += size; 1822f9bac91bSBenno Rice 1823f9bac91bSBenno Rice bzero((caddr_t) pa, size); 1824f9bac91bSBenno Rice return pa; 1825f9bac91bSBenno Rice } 1826111c77dcSBenno Rice 1827111c77dcSBenno Rice /* 18285fd2c51eSMark Peek * Create the UAREA_PAGES for a new process. 1829111c77dcSBenno Rice * This routine directly affects the fork perf for a process. 1830111c77dcSBenno Rice */ 1831111c77dcSBenno Rice void 1832111c77dcSBenno Rice pmap_new_proc(struct proc *p) 1833111c77dcSBenno Rice { 1834111c77dcSBenno Rice int i; 1835111c77dcSBenno Rice vm_object_t upobj; 1836b8603f0eSPeter Wemm vm_offset_t up; 1837111c77dcSBenno Rice vm_page_t m; 1838111c77dcSBenno Rice pte_t pte; 1839111c77dcSBenno Rice sr_t sr; 1840111c77dcSBenno Rice int idx; 1841b8603f0eSPeter Wemm vm_offset_t va; 1842111c77dcSBenno Rice 1843111c77dcSBenno Rice /* 1844111c77dcSBenno Rice * allocate object for the upages 1845111c77dcSBenno Rice */ 1846b8603f0eSPeter Wemm upobj = p->p_upages_obj; 1847b8603f0eSPeter Wemm if (upobj == NULL) { 18485fd2c51eSMark Peek upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 1849111c77dcSBenno Rice p->p_upages_obj = upobj; 1850111c77dcSBenno Rice } 1851111c77dcSBenno Rice 18525fd2c51eSMark Peek /* get a kernel virtual address for the UAREA_PAGES for this proc */ 18535fd2c51eSMark Peek up = (vm_offset_t)p->p_uarea; 1854b8603f0eSPeter Wemm if (up == 0) { 18555fd2c51eSMark Peek up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 1856b8603f0eSPeter Wemm if (up == 0) 1857b8603f0eSPeter Wemm panic("pmap_new_proc: upage allocation failed"); 18585fd2c51eSMark Peek p->p_uarea = (struct user *)up; 1859111c77dcSBenno Rice } 1860111c77dcSBenno Rice 18615fd2c51eSMark Peek for (i = 0; i < UAREA_PAGES; i++) { 1862111c77dcSBenno Rice /* 1863111c77dcSBenno Rice * Get a kernel stack page 1864111c77dcSBenno Rice */ 1865111c77dcSBenno Rice m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1866111c77dcSBenno Rice 1867111c77dcSBenno Rice /* 1868111c77dcSBenno Rice * Wire the page 1869111c77dcSBenno Rice */ 1870111c77dcSBenno Rice m->wire_count++; 1871111c77dcSBenno Rice cnt.v_wire_count++; 1872111c77dcSBenno Rice 1873111c77dcSBenno Rice /* 1874111c77dcSBenno Rice * Enter the page into the kernel address space. 1875111c77dcSBenno Rice */ 1876b8603f0eSPeter Wemm va = up + i * PAGE_SIZE; 1877111c77dcSBenno Rice idx = pteidx(sr = ptesr(kernel_pmap->pm_sr, va), va); 1878111c77dcSBenno Rice 1879b8603f0eSPeter Wemm pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) | 1880b8603f0eSPeter Wemm ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1881111c77dcSBenno Rice pte.pte_lo = (VM_PAGE_TO_PHYS(m) & PTE_RPGN) | PTE_M | PTE_I | 1882111c77dcSBenno Rice PTE_G | PTE_RW; 1883111c77dcSBenno Rice 1884111c77dcSBenno Rice if (!pte_insert(idx, &pte)) { 1885111c77dcSBenno Rice struct pte_ovfl *po; 1886111c77dcSBenno Rice 1887111c77dcSBenno Rice po = poalloc(); 1888111c77dcSBenno Rice po->po_pte = pte; 1889111c77dcSBenno Rice LIST_INSERT_HEAD(potable + idx, po, po_list); 1890111c77dcSBenno Rice } 1891111c77dcSBenno Rice 1892111c77dcSBenno Rice tlbie(va); 1893111c77dcSBenno Rice 1894111c77dcSBenno Rice vm_page_wakeup(m); 1895111c77dcSBenno Rice vm_page_flag_clear(m, PG_ZERO); 1896111c77dcSBenno Rice vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1897111c77dcSBenno Rice m->valid = VM_PAGE_BITS_ALL; 1898111c77dcSBenno Rice } 1899111c77dcSBenno Rice } 1900bdf71f56SBenno Rice 1901bdf71f56SBenno Rice void * 1902bdf71f56SBenno Rice pmap_mapdev(vm_offset_t pa, vm_size_t len) 1903bdf71f56SBenno Rice { 1904bdf71f56SBenno Rice vm_offset_t faddr; 1905bdf71f56SBenno Rice vm_offset_t taddr, va; 1906bdf71f56SBenno Rice int off; 1907bdf71f56SBenno Rice 1908bdf71f56SBenno Rice faddr = trunc_page(pa); 1909bdf71f56SBenno Rice off = pa - faddr; 1910bdf71f56SBenno Rice len = round_page(off + len); 1911bdf71f56SBenno Rice 1912bdf71f56SBenno Rice GIANT_REQUIRED; 1913bdf71f56SBenno Rice 1914bdf71f56SBenno Rice va = taddr = kmem_alloc_pageable(kernel_map, len); 1915bdf71f56SBenno Rice 1916bdf71f56SBenno Rice if (va == 0) 1917bdf71f56SBenno Rice return NULL; 1918bdf71f56SBenno Rice 1919bdf71f56SBenno Rice for (; len > 0; len -= PAGE_SIZE) { 1920bdf71f56SBenno Rice pmap_kenter(taddr, faddr); 1921bdf71f56SBenno Rice faddr += PAGE_SIZE; 1922bdf71f56SBenno Rice taddr += PAGE_SIZE; 1923bdf71f56SBenno Rice } 1924bdf71f56SBenno Rice 1925bdf71f56SBenno Rice return (void *)(va + off); 1926bdf71f56SBenno Rice } 1927