1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD$ 48 */ 49 50 #ifndef _MACHINE_PMAP_H_ 51 #define _MACHINE_PMAP_H_ 52 53 #include <machine/pte.h> 54 55 /* 56 * Pte related macros 57 */ 58 #define PTE_NOCACHE 0 59 #define PTE_CACHE 1 60 #define PTE_PAGETABLE 2 61 62 #ifndef LOCORE 63 64 #include <sys/queue.h> 65 66 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 67 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 68 69 #ifdef _KERNEL 70 71 #define vtophys(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va)) 72 #define pmap_kextract(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va)) 73 74 #endif 75 76 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 77 /* 78 * Pmap stuff 79 */ 80 81 /* 82 * This structure is used to hold a virtual<->physical address 83 * association and is used mostly by bootstrap code 84 */ 85 struct pv_addr { 86 SLIST_ENTRY(pv_addr) pv_list; 87 vm_offset_t pv_va; 88 vm_paddr_t pv_pa; 89 }; 90 91 struct pv_entry; 92 93 struct md_page { 94 int pvh_attrs; 95 u_int uro_mappings; 96 u_int urw_mappings; 97 union { 98 u_short s_mappings[2]; /* Assume kernel count <= 65535 */ 99 u_int i_mappings; 100 } k_u; 101 #define kro_mappings k_u.s_mappings[0] 102 #define krw_mappings k_u.s_mappings[1] 103 #define k_mappings k_u.i_mappings 104 int pv_list_count; 105 TAILQ_HEAD(,pv_entry) pv_list; 106 }; 107 108 #define VM_MDPAGE_INIT(pg) \ 109 do { \ 110 TAILQ_INIT(&pg->pv_list); \ 111 mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\ 112 (pg)->mdpage.pvh_attrs = 0; \ 113 (pg)->mdpage.uro_mappings = 0; \ 114 (pg)->mdpage.urw_mappings = 0; \ 115 (pg)->mdpage.k_mappings = 0; \ 116 } while (/*CONSTCOND*/0) 117 118 struct l1_ttable; 119 struct l2_dtable; 120 121 122 /* 123 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 124 * A bucket size of 16 provides for 16MB of contiguous virtual address 125 * space per l2_dtable. Most processes will, therefore, require only two or 126 * three of these to map their whole working set. 127 */ 128 #define L2_BUCKET_LOG2 4 129 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 130 /* 131 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 132 * of l2_dtable structures required to track all possible page descriptors 133 * mappable by an L1 translation table is given by the following constants: 134 */ 135 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 136 #define L2_SIZE (1 << L2_LOG2) 137 138 struct pmap { 139 u_int8_t pm_domain; 140 struct l1_ttable *pm_l1; 141 struct l2_dtable *pm_l2[L2_SIZE]; 142 pd_entry_t *pm_pdir; /* KVA of page directory */ 143 int pm_count; /* reference count */ 144 int pm_active; /* active on cpus */ 145 struct pmap_statistics pm_stats; /* pmap statictics */ 146 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ 147 }; 148 149 typedef struct pmap *pmap_t; 150 151 #ifdef _KERNEL 152 extern pmap_t kernel_pmap; 153 #define pmap_kernel() kernel_pmap 154 155 #endif 156 157 158 /* 159 * For each vm_page_t, there is a list of all currently valid virtual 160 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 161 */ 162 typedef struct pv_entry { 163 pmap_t pv_pmap; /* pmap where mapping lies */ 164 vm_offset_t pv_va; /* virtual address for mapping */ 165 TAILQ_ENTRY(pv_entry) pv_list; 166 int pv_flags; /* flags (wired, etc...) */ 167 } *pv_entry_t; 168 169 #define PV_ENTRY_NULL ((pv_entry_t) 0) 170 171 #ifdef _KERNEL 172 173 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 174 175 /* 176 * virtual address to page table entry and 177 * to physical address. Likewise for alternate address space. 178 * Note: these work recursively, thus vtopte of a pte will give 179 * the corresponding pde that in turn maps it. 180 */ 181 182 /* 183 * The current top of kernel VM. 184 */ 185 extern vm_offset_t pmap_curmaxkvaddr; 186 187 struct pcb; 188 189 void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 190 /* Virtual address to page table entry */ 191 static __inline pt_entry_t * 192 vtopte(vm_offset_t va) 193 { 194 pd_entry_t *pdep; 195 pt_entry_t *ptep; 196 197 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 198 return (NULL); 199 return (ptep); 200 } 201 202 extern vm_offset_t avail_end; 203 extern vm_offset_t clean_eva; 204 extern vm_offset_t clean_sva; 205 extern vm_offset_t phys_avail[]; 206 extern vm_offset_t virtual_avail; 207 extern vm_offset_t virtual_end; 208 209 void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *); 210 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 211 void pmap_kremove(vm_offset_t); 212 void *pmap_mapdev(vm_offset_t, vm_size_t); 213 void pmap_unmapdev(vm_offset_t, vm_size_t); 214 vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 215 void pmap_debug(int); 216 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 217 void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 218 vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 219 void 220 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 221 int cache); 222 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 223 224 /* 225 * Definitions for MMU domains 226 */ 227 #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */ 228 #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */ 229 230 /* 231 * The new pmap ensures that page-tables are always mapping Write-Thru. 232 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 233 * on every change. 234 * 235 * Unfortunately, not all CPUs have a write-through cache mode. So we 236 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 237 * and if there is the chance for PTE syncs to be needed, we define 238 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 239 * the code. 240 */ 241 extern int pmap_needs_pte_sync; 242 243 /* 244 * These macros define the various bit masks in the PTE. 245 * 246 * We use these macros since we use different bits on different processor 247 * models. 248 */ 249 #define L1_S_PROT_U (L1_S_AP(AP_U)) 250 #define L1_S_PROT_W (L1_S_AP(AP_W)) 251 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 252 253 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 254 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)) 255 256 #define L2_L_PROT_U (L2_AP(AP_U)) 257 #define L2_L_PROT_W (L2_AP(AP_W)) 258 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 259 260 #define L2_L_CACHE_MASK_generic (L2_B|L2_C) 261 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X)) 262 263 #define L2_S_PROT_U_generic (L2_AP(AP_U)) 264 #define L2_S_PROT_W_generic (L2_AP(AP_W)) 265 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 266 267 #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 268 #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 269 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 270 271 #define L2_S_CACHE_MASK_generic (L2_B|L2_C) 272 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)) 273 274 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 275 #define L1_S_PROTO_xscale (L1_TYPE_S) 276 277 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 278 #define L1_C_PROTO_xscale (L1_TYPE_C) 279 280 #define L2_L_PROTO (L2_TYPE_L) 281 282 #define L2_S_PROTO_generic (L2_TYPE_S) 283 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 284 285 /* 286 * User-visible names for the ones that vary with MMU class. 287 */ 288 289 #if ARM_NMMUS > 1 290 /* More than one MMU class configured; use variables. */ 291 #define L2_S_PROT_U pte_l2_s_prot_u 292 #define L2_S_PROT_W pte_l2_s_prot_w 293 #define L2_S_PROT_MASK pte_l2_s_prot_mask 294 295 #define L1_S_CACHE_MASK pte_l1_s_cache_mask 296 #define L2_L_CACHE_MASK pte_l2_l_cache_mask 297 #define L2_S_CACHE_MASK pte_l2_s_cache_mask 298 299 #define L1_S_PROTO pte_l1_s_proto 300 #define L1_C_PROTO pte_l1_c_proto 301 #define L2_S_PROTO pte_l2_s_proto 302 303 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 304 #define L2_S_PROT_U L2_S_PROT_U_generic 305 #define L2_S_PROT_W L2_S_PROT_W_generic 306 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic 307 308 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 309 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 310 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 311 312 #define L1_S_PROTO L1_S_PROTO_generic 313 #define L1_C_PROTO L1_C_PROTO_generic 314 #define L2_S_PROTO L2_S_PROTO_generic 315 316 #elif ARM_MMU_XSCALE == 1 317 #define L2_S_PROT_U L2_S_PROT_U_xscale 318 #define L2_S_PROT_W L2_S_PROT_W_xscale 319 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 320 321 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 322 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 323 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 324 325 #define L1_S_PROTO L1_S_PROTO_xscale 326 #define L1_C_PROTO L1_C_PROTO_xscale 327 #define L2_S_PROTO L2_S_PROTO_xscale 328 329 #endif /* ARM_NMMUS > 1 */ 330 331 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1) 332 #define PMAP_NEEDS_PTE_SYNC 1 333 #define PMAP_INCLUDE_PTE_SYNC 334 #elif (ARM_MMU_SA1 == 0) 335 #define PMAP_NEEDS_PTE_SYNC 0 336 #endif 337 338 /* 339 * These macros return various bits based on kernel/user and protection. 340 * Note that the compiler will usually fold these at compile time. 341 */ 342 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 343 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 344 345 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 346 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 347 348 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 349 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 350 351 /* 352 * Macros to test if a mapping is mappable with an L1 Section mapping 353 * or an L2 Large Page mapping. 354 */ 355 #define L1_S_MAPPABLE_P(va, pa, size) \ 356 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 357 358 #define L2_L_MAPPABLE_P(va, pa, size) \ 359 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 360 361 /* 362 * Provide a fallback in case we were not able to determine it at 363 * compile-time. 364 */ 365 #ifndef PMAP_NEEDS_PTE_SYNC 366 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 367 #define PMAP_INCLUDE_PTE_SYNC 368 #endif 369 370 #define PTE_SYNC(pte) \ 371 do { \ 372 if (PMAP_NEEDS_PTE_SYNC) \ 373 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 374 } while (/*CONSTCOND*/0) 375 376 #define PTE_SYNC_RANGE(pte, cnt) \ 377 do { \ 378 if (PMAP_NEEDS_PTE_SYNC) { \ 379 cpu_dcache_wb_range((vm_offset_t)(pte), \ 380 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 381 } \ 382 } while (/*CONSTCOND*/0) 383 384 extern pt_entry_t pte_l1_s_cache_mode; 385 extern pt_entry_t pte_l1_s_cache_mask; 386 387 extern pt_entry_t pte_l2_l_cache_mode; 388 extern pt_entry_t pte_l2_l_cache_mask; 389 390 extern pt_entry_t pte_l2_s_cache_mode; 391 extern pt_entry_t pte_l2_s_cache_mask; 392 393 extern pt_entry_t pte_l1_s_cache_mode_pt; 394 extern pt_entry_t pte_l2_l_cache_mode_pt; 395 extern pt_entry_t pte_l2_s_cache_mode_pt; 396 397 extern pt_entry_t pte_l2_s_prot_u; 398 extern pt_entry_t pte_l2_s_prot_w; 399 extern pt_entry_t pte_l2_s_prot_mask; 400 401 extern pt_entry_t pte_l1_s_proto; 402 extern pt_entry_t pte_l1_c_proto; 403 extern pt_entry_t pte_l2_s_proto; 404 405 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 406 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 407 408 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 409 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 410 void pmap_zero_page_generic(vm_paddr_t, int, int); 411 412 void pmap_pte_init_generic(void); 413 #if defined(CPU_ARM8) 414 void pmap_pte_init_arm8(void); 415 #endif 416 #if defined(CPU_ARM9) 417 void pmap_pte_init_arm9(void); 418 #endif /* CPU_ARM9 */ 419 #if defined(CPU_ARM10) 420 void pmap_pte_init_arm10(void); 421 #endif /* CPU_ARM10 */ 422 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 423 424 #if /* ARM_MMU_SA1 == */1 425 void pmap_pte_init_sa1(void); 426 #endif /* ARM_MMU_SA1 == 1 */ 427 428 #if ARM_MMU_XSCALE == 1 429 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 430 void pmap_zero_page_xscale(vm_paddr_t, int, int); 431 432 void pmap_pte_init_xscale(void); 433 434 void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 435 436 void pmap_use_minicache(vm_offset_t, vm_size_t); 437 #endif /* ARM_MMU_XSCALE == 1 */ 438 #define PTE_KERNEL 0 439 #define PTE_USER 1 440 #define l1pte_valid(pde) ((pde) != 0) 441 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 442 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 443 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 444 445 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 446 #define l2pte_valid(pte) ((pte) != 0) 447 #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 448 #define l2pte_minidata(pte) (((pte) & \ 449 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 450 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 451 452 /* L1 and L2 page table macros */ 453 #define pmap_pde_v(pde) l1pte_valid(*(pde)) 454 #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 455 #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 456 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 457 458 #define pmap_pte_v(pte) l2pte_valid(*(pte)) 459 #define pmap_pte_pa(pte) l2pte_pa(*(pte)) 460 461 /* 462 * Flags that indicate attributes of pages or mappings of pages. 463 * 464 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 465 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 466 * pv_entry's for each page. They live in the same "namespace" so 467 * that we can clear multiple attributes at a time. 468 * 469 * Note the "non-cacheable" flag generally means the page has 470 * multiple mappings in a given address space. 471 */ 472 #define PVF_MOD 0x01 /* page is modified */ 473 #define PVF_REF 0x02 /* page is referenced */ 474 #define PVF_WIRED 0x04 /* mapping is wired */ 475 #define PVF_WRITE 0x08 /* mapping is writable */ 476 #define PVF_EXEC 0x10 /* mapping is executable */ 477 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */ 478 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */ 479 #define PVF_NC (PVF_UNC|PVF_KNC) 480 481 void vector_page_setprot(int); 482 483 void pmap_update(pmap_t); 484 485 /* 486 * This structure is used by machine-dependent code to describe 487 * static mappings of devices, created at bootstrap time. 488 */ 489 struct pmap_devmap { 490 vm_offset_t pd_va; /* virtual address */ 491 vm_paddr_t pd_pa; /* physical address */ 492 vm_size_t pd_size; /* size of region */ 493 vm_prot_t pd_prot; /* protection code */ 494 int pd_cache; /* cache attributes */ 495 }; 496 497 const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t); 498 const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t); 499 500 void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *); 501 void pmap_devmap_register(const struct pmap_devmap *); 502 503 extern char *_tmppt; 504 505 #endif /* _KERNEL */ 506 507 #endif /* !LOCORE */ 508 509 #endif /* !_MACHINE_PMAP_H_ */ 510