/titanic_41/usr/src/uts/i86pc/vm/ |
H A D | i86_mmu.c | 209 window_size = mmu_btop(map_len) * mmu.pte_size; in hat_kmap_init() 218 mmu.kmap_htables = in hat_kmap_init() 230 mmu.kmap_htables[i] = ht; in hat_kmap_init() 245 mmu.kmap_addr = map_addr; in hat_kmap_init() 246 mmu.kmap_eaddr = map_eaddr; in hat_kmap_init() 247 mmu.kmap_ptes = (x86pte_t *)ptes; in hat_kmap_init() 323 level_t lpagel = mmu.max_page_level; in hat_kern_alloc() 373 nwindows = MAX(nwindows, mmu.max_level); in hat_kern_alloc() 381 mmu.pwin_base = vmem_xalloc(heap_arena, nwindows * MMU_PAGESIZE, in hat_kern_alloc() 383 ASSERT(nwindows <= MMU_PAGESIZE / mmu.pte_size); in hat_kern_alloc() [all …]
|
H A D | hat_pte.h | 103 (pa_to_ma(pfn_to_pa(pfn)) | mmu.ptp_bits[(l) + 1]) 106 ((pfn_to_pa(pfn & ~PFN_IS_FOREIGN_MFN) | mmu.pte_bits[l]) | \ 108 (pa_to_ma(pfn_to_pa(pfn)) | mmu.pte_bits[l])) 111 (pfn_to_pa(pfn) | mmu.ptp_bits[(l) + 1]) 113 (pfn_to_pa(pfn) | mmu.pte_bits[l]) 142 #define LEVEL_SHIFT(l) (mmu.level_shift[l]) 143 #define LEVEL_SIZE(l) (mmu.level_size[l]) 144 #define LEVEL_OFFSET(l) (mmu.level_offset[l]) 145 #define LEVEL_MASK(l) (mmu.level_mask[l]) 228 #define PWIN_VA(x) (mmu.pwin_base + ((x) << MMU_PAGESHIFT)) [all …]
|
H A D | hat_i86.c | 91 struct hat_mmu_info mmu; variable 228 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); in hati_constructor() 281 use_vlp = mmu.pae_hat; in hat_alloc() 293 hat->hat_num_hash = mmu.vlp_hash_cnt; in hat_alloc() 296 hat->hat_num_hash = mmu.hash_cnt; in hat_alloc() 357 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); in hat_alloc() 359 xen_pin(hat->hat_user_ptable, mmu.max_level); in hat_alloc() 511 mmu.max_page_level = lvl; in set_max_page_level() 514 mmu.umax_page_level = 1; in set_max_page_level() 516 mmu.umax_page_level = lvl; in set_max_page_level() [all …]
|
H A D | htable.c | 582 if (htable_steal_passes > mmu.ptes_per_table) in htable_steal() 583 htable_steal_passes = mmu.ptes_per_table; in htable_steal() 591 threshold = pass * mmu.ptes_per_table / htable_steal_passes; in htable_steal() 657 ht->ht_level == mmu.max_level) { in htable_steal() 881 if (level == mmu.max_level) { in htable_alloc() 941 x86pte_zero(ht, 0, mmu.ptes_per_table); in htable_alloc() 946 if (level == mmu.max_level) in htable_alloc() 990 if (ht->ht_level == mmu.max_level && hat != NULL) { in htable_free() 1127 ASSERT(new->ht_level != mmu.max_level); in link_ptp() 1206 if (level >= mmu.max_page_level && in htable_release() [all …]
|
H A D | hat_kdi.c | 88 hat_kdi_use_pae = mmu.pae_hat; in hat_kdi_init() 184 for (level = mmu.max_level; ; --level) { in kdi_vtop() 185 index = (va >> LEVEL_SHIFT(level)) & (mmu.ptes_per_table - 1); in kdi_vtop() 186 *pap += index << mmu.pte_size_shift; in kdi_vtop() 188 if (kdi_pread((caddr_t)&pte, mmu.pte_size, *pap, &len) != 0) in kdi_vtop() 192 if (level > 0 && level <= mmu.max_page_level && in kdi_vtop()
|
H A D | htable.h | 133 ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\ 138 ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l) == mmu.hole_start ? \ 139 mmu.hole_end : (va & LEVEL_MASK(l)) + LEVEL_SIZE(l)) 144 (!mmu.pae_hat ? 1024 : ((ht)->ht_level == 2 ? 4 : 512))
|
H A D | kboot_mmu.c | 266 probe_va = mmu.hole_end; in kbm_probe() 319 if (PTE_GET(pte_val, mmu.pt_nx)) in kbm_probe()
|
H A D | hat_i86.h | 71 #define TOP_LEVEL(h) (((h)->hat_flags & HAT_VLP) ? VLP_LEVEL : mmu.max_level)
|
H A D | vm_machdep.c | 400 for (l = mmu.umax_page_level; l > 0; --l) { in map_pgsz() 417 for (l = mmu.umax_page_level; l > 0; --l) { in map_pgsz() 474 if (mmu.max_page_level == 0) in map_pgszcvec() 741 mmu.umax_page_level; in map_addr_proc() 1147 pfnseg = mmu.highest_pfn; in page_get_contigpage() 1744 for (i = 0; i <= mmu.max_page_level; i++) { in page_coloring_init() 1764 for (i = 0; i <= mmu.max_page_level; i++) { in page_coloring_init() 1785 for (i = 0; i <= mmu.max_page_level; i++) { in page_coloring_init() 3967 offset += mmu.hole_start; /* something in VA hole */ in page_get_physical()
|
/titanic_41/usr/src/cmd/mdb/i86pc/modules/unix/ |
H A D | i86mmu.c | 65 if (mmu.num_level == 0) in platform_vtop() 159 struct hat_mmu_info mmu; variable 179 if (mmu.num_level != 0) in init_mmu() 182 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1) in init_mmu() 418 if (PTE_GET(pte, mmu.pt_nx)) in do_pte_dcmd() 430 if (PTE_GET(pte, mmu.pt_global)) in do_pte_dcmd() 494 if (mmu.num_level == 0) in pte_dcmd() 520 if (level < 0 || level > mmu.max_level) in pte_dcmd() 532 entry >>= mmu.level_shift[htable->ht_level]; in va2entry() 545 ptr += va2entry(htable, addr) << mmu.pte_size_shift; in get_pte() [all …]
|
/titanic_41/usr/src/cmd/fm/eversholt/files/sparc/sun4u/ |
H A D | tomatillo.esc | 48 event error.io.tom.mmu.inval@hostbridge/pcibus; 49 event error.io.tom.mmu.prot@hostbridge/pcibus; 50 event error.io.tom.mmu.bva@hostbridge/pcibus; 51 event error.io.tom.mmu.btt@hostbridge/pcibus; 59 event ereport.io.tom.mmu.btt@hostbridge/pcibus/pcidev/pcifn{within(5s)}; 60 event ereport.io.tom.mmu.bva@hostbridge/pcibus/pcidev/pcifn{within(5s)}; 61 event ereport.io.tom.mmu.prot@hostbridge/pcibus/pcidev/pcifn{within(5s)}; 62 event ereport.io.tom.mmu.inval@hostbridge/pcibus/pcidev/pcifn{within(5s)}; 93 error.io.tom.mmu.inval@hostbridge/pcibus, 94 error.io.tom.mmu.prot@hostbridge/pcibus, [all …]
|
/titanic_41/usr/src/psm/stand/bootblks/common/ |
H A D | util.fth | 138 " mmu" chosen-ph get-int-prop constant mmu-ih 142 : mmu-claim ( [ virt ] size align -- base ) 143 " claim" mmu-ih $call-method 146 : mmu-map ( phys.lo phys.hi virt size -- ) 147 -1 " map" mmu-ih $call-method 159 over 0 mmu-claim ( align size virt r: phys.lo phys.hi ) 162 2dup swap mmu-claim ( align size virt r: phys.lo phys.hi ) 164 r> r> 2over swap mmu-map ( align size virt )
|
/titanic_41/usr/src/uts/i86xpv/os/ |
H A D | xpv_panic.c | 170 if (mmu.pae_hat) in xpv_panic_map() 200 for (l = mmu.max_level; l >= 0; l--) in xpv_va_walk() 208 for (l = mmu.max_level; l >= 0; l--) { in xpv_va_walk() 219 if (l == mmu.max_level && mmu.pae_hat) in xpv_va_walk() 228 (idx << mmu.pte_size_shift)); in xpv_va_walk() 233 scan_va += mmu.level_size[l]; in xpv_va_walk() 768 for (l = mmu.max_level; l >= 0; l--) in xpv_do_panic() 769 xpv_panic_nptes[l] = mmu.ptes_per_table; in xpv_do_panic() 771 if (mmu.pae_hat) in xpv_do_panic() 772 xpv_panic_nptes[mmu.max_level] = 4; in xpv_do_panic() [all …]
|
/titanic_41/usr/src/uts/i86pc/os/ |
H A D | startup.c | 380 ((uintptr_t)P2ROUNDUP((uintptr_t)(x), mmu.level_size[1])) 384 ((uintptr_t)P2ROUNDUP((uintptr_t)(x), mmu.level_size[mmu.max_level])) 663 valloc_align = mmu.level_size[mmu.max_page_level > 0]; in perform_allocations() 1045 if (mmu.pae_hat) { in startup_memlist() 1069 mmu.pt_nx = 0; in startup_memlist() 1071 PRM_DEBUG(mmu.pt_nx); in startup_memlist() 1211 valloc_base = P2ALIGN(valloc_base, mmu.level_size[1]); in startup_memlist() 1351 kernelbase = eprom_kernelbase & mmu.level_mask[1]; in startup_kmem() 1358 ASSERT((kernelbase & mmu.level_offset[1]) == 0); in startup_kmem() 2061 if (!auto_lpg_disable && mmu.max_page_level > 0) { in startup_vm() [all …]
|
/titanic_41/usr/src/uts/intel/asm/ |
H A D | Makefile | 39 mmu.h \
|
/titanic_41/usr/src/cmd/mdb/intel/modules/mdb_kb/ |
H A D | mdb_kb.c | 744 xkb_get_pte(mmu_info_t *mmu, char *ptep) in xkb_get_pte() argument 748 if (mmu->mi_ptesize == 8) { in xkb_get_pte() 778 mmu_info_t *mmu = &xkb->xkb_mmu; in xkb_va_to_mfn() local 782 for (level = mmu->mi_max; ; --level) { in xkb_va_to_mfn() 788 entry = (va >> mmu->mi_shift[level]) & (mmu->mi_ptes - 1); in xkb_va_to_mfn() 790 pte = xkb_get_pte(mmu, (char *)xkb->xkb_pt_map[level].mm_map + in xkb_va_to_mfn() 791 entry * mmu->mi_ptesize); in xkb_va_to_mfn() 806 mfn += (va & ((1 << mmu->mi_shift[level]) - 1)) >> in xkb_va_to_mfn()
|
/titanic_41/usr/src/uts/sun4v/ml/ |
H A D | wbuf.s | 63 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or 199 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or 399 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or
|
/titanic_41/usr/src/ |
H A D | Makefile.psm | 52 ROOT_PSM_MMU_DIR_32 = $(ROOT_PSM_MOD_DIR)/mmu 71 ROOT_PSM_MMU_DIR_64 = $(ROOT_PSM_MOD_DIR)/mmu/$(SUBDIR64)
|
/titanic_41/usr/src/uts/sun4u/ml/ |
H A D | wbuf.s | 62 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or 196 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or 337 ! g5 = mmu trap type, g6 = tag access reg (g5 != T_ALIGNMENT) or
|
/titanic_41/usr/src/uts/sun4u/sys/ |
H A D | zuluvm.h | 99 caddr_t mmu, caddr_t imr);
|
H A D | Makefile | 87 mmu.h \
|
/titanic_41/usr/src/uts/sun4v/sys/ |
H A D | Makefile | 81 mmu.h \
|
/titanic_41/usr/src/uts/sun4/io/fpc/ |
H A D | fpc.h | 55 mmu, enumerator
|
H A D | fpc-kstats.c | 268 case mmu: in fpc_dev_kstat() 321 else if (reg_group == mmu) in fpc_dev_kstat()
|
/titanic_41/usr/src/uts/sun4u/io/px/ |
H A D | px_err_impl.h | 207 PX_ERPT_SEND_DEC(mmu);
|