1 /*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD$ 48 */ 49 50 #ifndef _MACHINE_PMAP_H_ 51 #define _MACHINE_PMAP_H_ 52 53 #include <machine/pte.h> 54 #include <machine/cpuconf.h> 55 /* 56 * Pte related macros 57 */ 58 #if ARM_ARCH_6 || ARM_ARCH_7A 59 #ifdef SMP 60 #define PTE_NOCACHE 2 61 #else 62 #define PTE_NOCACHE 1 63 #endif 64 #define PTE_CACHE 6 65 #define PTE_DEVICE 2 66 #define PTE_PAGETABLE 4 67 #else 68 #define PTE_NOCACHE 1 69 #define PTE_CACHE 2 70 #define PTE_PAGETABLE 3 71 #endif 72 73 enum mem_type { 74 STRONG_ORD = 0, 75 DEVICE_NOSHARE, 76 DEVICE_SHARE, 77 NRML_NOCACHE, 78 NRML_IWT_OWT, 79 NRML_IWB_OWB, 80 NRML_IWBA_OWBA 81 }; 82 83 #ifndef LOCORE 84 85 #include <sys/queue.h> 86 #include <sys/_cpuset.h> 87 #include <sys/_lock.h> 88 #include <sys/_mutex.h> 89 90 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 91 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 92 93 #ifdef _KERNEL 94 95 #define vtophys(va) pmap_kextract((vm_offset_t)(va)) 96 97 #endif 98 99 #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 100 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 101 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 102 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 103 104 /* 105 * Pmap stuff 106 */ 107 108 /* 109 * This structure is used to hold a virtual<->physical address 110 * association and is used mostly by bootstrap code 111 */ 112 struct pv_addr { 113 SLIST_ENTRY(pv_addr) pv_list; 114 vm_offset_t pv_va; 115 vm_paddr_t pv_pa; 116 }; 117 118 struct pv_entry; 119 struct pv_chunk; 120 121 struct md_page { 122 int pvh_attrs; 123 vm_memattr_t pv_memattr; 124 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0 125 vm_offset_t pv_kva; /* first kernel VA mapping */ 126 #endif 127 TAILQ_HEAD(,pv_entry) pv_list; 128 }; 129 130 struct l1_ttable; 131 struct l2_dtable; 132 133 134 /* 135 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 136 * A bucket size of 16 provides for 16MB of contiguous virtual address 137 * space per l2_dtable. Most processes will, therefore, require only two or 138 * three of these to map their whole working set. 139 */ 140 #define L2_BUCKET_LOG2 4 141 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 142 /* 143 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 144 * of l2_dtable structures required to track all possible page descriptors 145 * mappable by an L1 translation table is given by the following constants: 146 */ 147 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 148 #define L2_SIZE (1 << L2_LOG2) 149 150 struct pmap { 151 struct mtx pm_mtx; 152 u_int8_t pm_domain; 153 struct l1_ttable *pm_l1; 154 struct l2_dtable *pm_l2[L2_SIZE]; 155 cpuset_t pm_active; /* active on cpus */ 156 struct pmap_statistics pm_stats; /* pmap statictics */ 157 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 158 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 159 #else 160 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 161 #endif 162 }; 163 164 typedef struct pmap *pmap_t; 165 166 #ifdef _KERNEL 167 extern struct pmap kernel_pmap_store; 168 #define kernel_pmap (&kernel_pmap_store) 169 #define pmap_kernel() kernel_pmap 170 171 #define PMAP_ASSERT_LOCKED(pmap) \ 172 mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 173 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 174 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 175 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 176 NULL, MTX_DEF | MTX_DUPOK) 177 #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 178 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 179 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 180 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 181 #endif 182 183 184 /* 185 * For each vm_page_t, there is a list of all currently valid virtual 186 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 187 */ 188 typedef struct pv_entry { 189 vm_offset_t pv_va; /* virtual address for mapping */ 190 TAILQ_ENTRY(pv_entry) pv_list; 191 int pv_flags; /* flags (wired, etc...) */ 192 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0 193 pmap_t pv_pmap; /* pmap where mapping lies */ 194 TAILQ_ENTRY(pv_entry) pv_plist; 195 #endif 196 } *pv_entry_t; 197 198 /* 199 * pv_entries are allocated in chunks per-process. This avoids the 200 * need to track per-pmap assignments. 201 */ 202 #define _NPCM 8 203 #define _NPCPV 252 204 205 struct pv_chunk { 206 pmap_t pc_pmap; 207 TAILQ_ENTRY(pv_chunk) pc_list; 208 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 209 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 210 TAILQ_ENTRY(pv_chunk) pc_lru; 211 struct pv_entry pc_pventry[_NPCPV]; 212 }; 213 214 #ifdef _KERNEL 215 216 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 217 218 /* 219 * virtual address to page table entry and 220 * to physical address. Likewise for alternate address space. 221 * Note: these work recursively, thus vtopte of a pte will give 222 * the corresponding pde that in turn maps it. 223 */ 224 225 /* 226 * The current top of kernel VM. 227 */ 228 extern vm_offset_t pmap_curmaxkvaddr; 229 230 struct pcb; 231 232 void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 233 /* Virtual address to page table entry */ 234 static __inline pt_entry_t * 235 vtopte(vm_offset_t va) 236 { 237 pd_entry_t *pdep; 238 pt_entry_t *ptep; 239 240 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 241 return (NULL); 242 return (ptep); 243 } 244 245 extern vm_paddr_t phys_avail[]; 246 extern vm_offset_t virtual_avail; 247 extern vm_offset_t virtual_end; 248 249 void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 250 int pmap_change_attr(vm_offset_t, vm_size_t, int); 251 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 252 void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 253 void *pmap_kenter_temp(vm_paddr_t pa, int i); 254 void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 255 vm_paddr_t pmap_kextract(vm_offset_t va); 256 void pmap_kremove(vm_offset_t); 257 void *pmap_mapdev(vm_offset_t, vm_size_t); 258 void pmap_unmapdev(vm_offset_t, vm_size_t); 259 vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 260 void pmap_debug(int); 261 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 262 void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 263 vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 264 void 265 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 266 int cache); 267 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 268 int pmap_dmap_iscurrent(pmap_t pmap); 269 270 /* 271 * Definitions for MMU domains 272 */ 273 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 274 #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 275 276 /* 277 * The new pmap ensures that page-tables are always mapping Write-Thru. 278 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 279 * on every change. 280 * 281 * Unfortunately, not all CPUs have a write-through cache mode. So we 282 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 283 * and if there is the chance for PTE syncs to be needed, we define 284 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 285 * the code. 286 */ 287 extern int pmap_needs_pte_sync; 288 289 /* 290 * These macros define the various bit masks in the PTE. 291 * 292 * We use these macros since we use different bits on different processor 293 * models. 294 */ 295 296 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 297 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 298 L1_S_XSCALE_TEX(TEX_XSCALE_T)) 299 300 #define L2_L_CACHE_MASK_generic (L2_B|L2_C) 301 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 302 L2_XSCALE_L_TEX(TEX_XSCALE_T)) 303 304 #define L2_S_PROT_U_generic (L2_AP(AP_U)) 305 #define L2_S_PROT_W_generic (L2_AP(AP_W)) 306 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 307 308 #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 309 #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 310 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 311 312 #define L2_S_CACHE_MASK_generic (L2_B|L2_C) 313 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 314 L2_XSCALE_T_TEX(TEX_XSCALE_X)) 315 316 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 317 #define L1_S_PROTO_xscale (L1_TYPE_S) 318 319 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 320 #define L1_C_PROTO_xscale (L1_TYPE_C) 321 322 #define L2_L_PROTO (L2_TYPE_L) 323 324 #define L2_S_PROTO_generic (L2_TYPE_S) 325 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 326 327 /* 328 * User-visible names for the ones that vary with MMU class. 329 */ 330 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 331 #define L2_AP(x) (L2_AP0(x)) 332 #else 333 #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 334 #endif 335 336 #if ARM_NMMUS > 1 337 /* More than one MMU class configured; use variables. */ 338 #define L2_S_PROT_U pte_l2_s_prot_u 339 #define L2_S_PROT_W pte_l2_s_prot_w 340 #define L2_S_PROT_MASK pte_l2_s_prot_mask 341 342 #define L1_S_CACHE_MASK pte_l1_s_cache_mask 343 #define L2_L_CACHE_MASK pte_l2_l_cache_mask 344 #define L2_S_CACHE_MASK pte_l2_s_cache_mask 345 346 #define L1_S_PROTO pte_l1_s_proto 347 #define L1_C_PROTO pte_l1_c_proto 348 #define L2_S_PROTO pte_l2_s_proto 349 350 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 351 #define L2_S_PROT_U L2_S_PROT_U_generic 352 #define L2_S_PROT_W L2_S_PROT_W_generic 353 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic 354 355 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 356 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 357 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 358 359 #define L1_S_PROTO L1_S_PROTO_generic 360 #define L1_C_PROTO L1_C_PROTO_generic 361 #define L2_S_PROTO L2_S_PROTO_generic 362 363 #elif ARM_MMU_XSCALE == 1 364 #define L2_S_PROT_U L2_S_PROT_U_xscale 365 #define L2_S_PROT_W L2_S_PROT_W_xscale 366 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 367 368 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 369 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 370 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 371 372 #define L1_S_PROTO L1_S_PROTO_xscale 373 #define L1_C_PROTO L1_C_PROTO_xscale 374 #define L2_S_PROTO L2_S_PROTO_xscale 375 376 #elif (ARM_MMU_V6 + ARM_MMU_V7) != 0 377 /* 378 * AP[2:1] access permissions model: 379 * 380 * AP[2](APX) - Write Disable 381 * AP[1] - User Enable 382 * AP[0] - Reference Flag 383 * 384 * AP[2] AP[1] Kernel User 385 * 0 0 R/W N 386 * 0 1 R/W R/W 387 * 1 0 R N 388 * 1 1 R R 389 * 390 */ 391 #define L2_S_PROT_R (0) /* kernel read */ 392 #define L2_S_PROT_U (L2_AP0(2)) /* user read */ 393 #define L2_S_REF (L2_AP0(1)) /* reference flag */ 394 395 #define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX) 396 #define L2_S_EXECUTABLE(pte) (!(pte & L2_XN)) 397 #define L2_S_WRITABLE(pte) (!(pte & L2_APX)) 398 #define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF)) 399 400 #ifndef SMP 401 #define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C) 402 #define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C) 403 #define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C) 404 #else 405 #define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED) 406 #define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED) 407 #define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED) 408 #endif /* SMP */ 409 410 #define L1_S_PROTO (L1_TYPE_S) 411 #define L1_C_PROTO (L1_TYPE_C) 412 #define L2_S_PROTO (L2_TYPE_S) 413 414 #ifndef SMP 415 #define ARM_L1S_STRONG_ORD (0) 416 #define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 417 #define ARM_L1S_DEVICE_SHARE (L1_S_B) 418 #define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)) 419 #define ARM_L1S_NRML_IWT_OWT (L1_S_C) 420 #define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B) 421 #define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B) 422 423 #define ARM_L2L_STRONG_ORD (0) 424 #define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 425 #define ARM_L2L_DEVICE_SHARE (L2_B) 426 #define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)) 427 #define ARM_L2L_NRML_IWT_OWT (L2_C) 428 #define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B) 429 #define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B) 430 431 #define ARM_L2S_STRONG_ORD (0) 432 #define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 433 #define ARM_L2S_DEVICE_SHARE (L2_B) 434 #define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)) 435 #define ARM_L2S_NRML_IWT_OWT (L2_C) 436 #define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B) 437 #define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B) 438 #else 439 #define ARM_L1S_STRONG_ORD (0) 440 #define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 441 #define ARM_L1S_DEVICE_SHARE (L1_S_B) 442 #define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED) 443 #define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED) 444 #define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED) 445 #define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED) 446 447 #define ARM_L2L_STRONG_ORD (0) 448 #define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 449 #define ARM_L2L_DEVICE_SHARE (L2_B) 450 #define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED) 451 #define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED) 452 #define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 453 #define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED) 454 455 #define ARM_L2S_STRONG_ORD (0) 456 #define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 457 #define ARM_L2S_DEVICE_SHARE (L2_B) 458 #define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED) 459 #define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED) 460 #define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 461 #define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED) 462 #endif /* SMP */ 463 #endif /* ARM_NMMUS > 1 */ 464 465 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1) 466 #define PMAP_NEEDS_PTE_SYNC 1 467 #define PMAP_INCLUDE_PTE_SYNC 468 #elif defined(CPU_XSCALE_81342) 469 #define PMAP_NEEDS_PTE_SYNC 1 470 #define PMAP_INCLUDE_PTE_SYNC 471 #elif (ARM_MMU_SA1 == 0) 472 #define PMAP_NEEDS_PTE_SYNC 0 473 #endif 474 475 /* 476 * These macros return various bits based on kernel/user and protection. 477 * Note that the compiler will usually fold these at compile time. 478 */ 479 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0 480 481 #define L1_S_PROT_U (L1_S_AP(AP_U)) 482 #define L1_S_PROT_W (L1_S_AP(AP_W)) 483 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 484 #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 485 486 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 487 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 488 489 #define L2_L_PROT_U (L2_AP(AP_U)) 490 #define L2_L_PROT_W (L2_AP(AP_W)) 491 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 492 493 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 494 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 495 496 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 497 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 498 #else 499 #define L1_S_PROT_U (L1_S_AP(AP_U)) 500 #define L1_S_PROT_MASK (L1_S_APX|L1_S_AP(0x3)) 501 #define L1_S_WRITABLE(pd) (!((pd) & L1_S_APX)) 502 503 #define L1_S_PROT(ku, pr) (L1_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L1_S_PROT_U : 0) | \ 504 (((pr) & VM_PROT_WRITE) ? L1_S_APX : 0))) 505 506 #define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3)) 507 #define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 508 (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 509 510 #define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 511 (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 512 513 #endif 514 515 /* 516 * Macros to test if a mapping is mappable with an L1 Section mapping 517 * or an L2 Large Page mapping. 518 */ 519 #define L1_S_MAPPABLE_P(va, pa, size) \ 520 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 521 522 #define L2_L_MAPPABLE_P(va, pa, size) \ 523 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 524 525 /* 526 * Provide a fallback in case we were not able to determine it at 527 * compile-time. 528 */ 529 #ifndef PMAP_NEEDS_PTE_SYNC 530 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 531 #define PMAP_INCLUDE_PTE_SYNC 532 #endif 533 534 #define PTE_SYNC(pte) \ 535 do { \ 536 if (PMAP_NEEDS_PTE_SYNC) { \ 537 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 538 cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 539 } else \ 540 cpu_drain_writebuf(); \ 541 } while (/*CONSTCOND*/0) 542 543 #define PTE_SYNC_RANGE(pte, cnt) \ 544 do { \ 545 if (PMAP_NEEDS_PTE_SYNC) { \ 546 cpu_dcache_wb_range((vm_offset_t)(pte), \ 547 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 548 cpu_l2cache_wb_range((vm_offset_t)(pte), \ 549 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 550 } else \ 551 cpu_drain_writebuf(); \ 552 } while (/*CONSTCOND*/0) 553 554 extern pt_entry_t pte_l1_s_cache_mode; 555 extern pt_entry_t pte_l1_s_cache_mask; 556 557 extern pt_entry_t pte_l2_l_cache_mode; 558 extern pt_entry_t pte_l2_l_cache_mask; 559 560 extern pt_entry_t pte_l2_s_cache_mode; 561 extern pt_entry_t pte_l2_s_cache_mask; 562 563 extern pt_entry_t pte_l1_s_cache_mode_pt; 564 extern pt_entry_t pte_l2_l_cache_mode_pt; 565 extern pt_entry_t pte_l2_s_cache_mode_pt; 566 567 extern pt_entry_t pte_l2_s_prot_u; 568 extern pt_entry_t pte_l2_s_prot_w; 569 extern pt_entry_t pte_l2_s_prot_mask; 570 571 extern pt_entry_t pte_l1_s_proto; 572 extern pt_entry_t pte_l1_c_proto; 573 extern pt_entry_t pte_l2_s_proto; 574 575 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 576 extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 577 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 578 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 579 580 #if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342) 581 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 582 void pmap_zero_page_generic(vm_paddr_t, int, int); 583 584 void pmap_pte_init_generic(void); 585 #if defined(CPU_ARM8) 586 void pmap_pte_init_arm8(void); 587 #endif 588 #if defined(CPU_ARM9) 589 void pmap_pte_init_arm9(void); 590 #endif /* CPU_ARM9 */ 591 #if defined(CPU_ARM10) 592 void pmap_pte_init_arm10(void); 593 #endif /* CPU_ARM10 */ 594 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 595 void pmap_pte_init_mmu_v6(void); 596 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */ 597 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 598 599 #if /* ARM_MMU_SA1 == */1 600 void pmap_pte_init_sa1(void); 601 #endif /* ARM_MMU_SA1 == 1 */ 602 603 #if ARM_MMU_XSCALE == 1 604 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 605 void pmap_zero_page_xscale(vm_paddr_t, int, int); 606 607 void pmap_pte_init_xscale(void); 608 609 void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 610 611 void pmap_use_minicache(vm_offset_t, vm_size_t); 612 #endif /* ARM_MMU_XSCALE == 1 */ 613 #if defined(CPU_XSCALE_81342) 614 #define ARM_HAVE_SUPERSECTIONS 615 #endif 616 617 #define PTE_KERNEL 0 618 #define PTE_USER 1 619 #define l1pte_valid(pde) ((pde) != 0) 620 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 621 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 622 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 623 624 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 625 #define l2pte_valid(pte) ((pte) != 0) 626 #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 627 #define l2pte_minidata(pte) (((pte) & \ 628 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 629 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 630 631 /* L1 and L2 page table macros */ 632 #define pmap_pde_v(pde) l1pte_valid(*(pde)) 633 #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 634 #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 635 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 636 637 #define pmap_pte_v(pte) l2pte_valid(*(pte)) 638 #define pmap_pte_pa(pte) l2pte_pa(*(pte)) 639 640 /* 641 * Flags that indicate attributes of pages or mappings of pages. 642 * 643 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 644 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 645 * pv_entry's for each page. They live in the same "namespace" so 646 * that we can clear multiple attributes at a time. 647 * 648 * Note the "non-cacheable" flag generally means the page has 649 * multiple mappings in a given address space. 650 */ 651 #define PVF_MOD 0x01 /* page is modified */ 652 #define PVF_REF 0x02 /* page is referenced */ 653 #define PVF_WIRED 0x04 /* mapping is wired */ 654 #define PVF_WRITE 0x08 /* mapping is writable */ 655 #define PVF_EXEC 0x10 /* mapping is executable */ 656 #define PVF_NC 0x20 /* mapping is non-cacheable */ 657 #define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 658 #define PVF_UNMAN 0x80 /* mapping is unmanaged */ 659 660 void vector_page_setprot(int); 661 662 /* 663 * This structure is used by machine-dependent code to describe 664 * static mappings of devices, created at bootstrap time. 665 */ 666 struct pmap_devmap { 667 vm_offset_t pd_va; /* virtual address */ 668 vm_paddr_t pd_pa; /* physical address */ 669 vm_size_t pd_size; /* size of region */ 670 vm_prot_t pd_prot; /* protection code */ 671 int pd_cache; /* cache attributes */ 672 }; 673 674 const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t); 675 const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t); 676 677 void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *); 678 void pmap_devmap_register(const struct pmap_devmap *); 679 680 #define SECTION_CACHE 0x1 681 #define SECTION_PT 0x2 682 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 683 #ifdef ARM_HAVE_SUPERSECTIONS 684 void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 685 #endif 686 687 extern char *_tmppt; 688 689 void pmap_postinit(void); 690 691 #ifdef ARM_USE_SMALL_ALLOC 692 void arm_add_smallalloc_pages(void *, void *, int, int); 693 vm_offset_t arm_ptovirt(vm_paddr_t); 694 void arm_init_smallalloc(void); 695 struct arm_small_page { 696 void *addr; 697 TAILQ_ENTRY(arm_small_page) pg_list; 698 }; 699 700 #endif 701 702 #define ARM_NOCACHE_KVA_SIZE 0x1000000 703 extern vm_offset_t arm_nocache_startaddr; 704 void *arm_remap_nocache(void *, vm_size_t); 705 void arm_unmap_nocache(void *, vm_size_t); 706 707 extern vm_paddr_t dump_avail[]; 708 #endif /* _KERNEL */ 709 710 #endif /* !LOCORE */ 711 712 #endif /* !_MACHINE_PMAP_H_ */ 713