Lines Matching refs:mmu
278 struct hat_mmu_info mmu; variable
412 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); in hati_constructor()
488 hat->hat_max_level = mmu.max_level; in hat_alloc()
499 hat->hat_max_level = mmu.max_level32; in hat_alloc()
500 hat->hat_num_copied = mmu.num_copied_ents32; in hat_alloc()
506 hat->hat_max_level = mmu.max_level; in hat_alloc()
507 hat->hat_num_copied = mmu.num_copied_ents; in hat_alloc()
513 hat->hat_max_level = mmu.max_level; in hat_alloc()
533 hat->hat_num_hash = mmu.hat32_hash_cnt; in hat_alloc()
536 hat->hat_num_hash = mmu.hash_cnt; in hat_alloc()
587 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); in hat_alloc()
588 xen_pin(hat->hat_user_ptable, mmu.max_level); in hat_alloc()
614 hat->hat_max_level = mmu.max_level; in hat_cpu_alloc()
618 hat->hat_num_hash = mmu.hash_cnt; in hat_cpu_alloc()
766 mmu.max_page_level = lvl; in set_max_page_level()
769 mmu.umax_page_level = 1; in set_max_page_level()
771 mmu.umax_page_level = lvl; in set_max_page_level()
785 nptes = mmu.top_level_count; in mmu_calc_user_slots()
786 shift = _userlimit >> mmu.level_shift[mmu.max_level]; in mmu_calc_user_slots()
793 mmu.top_level_uslots = ent + 1; in mmu_calc_user_slots()
799 mmu.top_level_uslots32 = 1; in mmu_calc_user_slots()
806 mmu.num_copied_ents = mmu.top_level_uslots; in mmu_calc_user_slots()
807 mmu.num_copied_ents32 = 4; in mmu_calc_user_slots()
827 mmu.pt_global = PT_GLOBAL; in mmu_init()
837 mmu.pt_global = 0; in mmu_init()
843 mmu.pae_hat = kbm_pae_support; in mmu_init()
845 mmu.pt_nx = PT_NX; in mmu_init()
847 mmu.pt_nx = 0; in mmu_init()
867 mmu.hole_start = (1ul << (va_bits - 1)); in mmu_init()
868 mmu.hole_end = 0ul - mmu.hole_start - 1; in mmu_init()
870 mmu.hole_end = 0; in mmu_init()
871 mmu.hole_start = mmu.hole_end - 1; in mmu_init()
879 hole_start = mmu.hole_start - hole_start; in mmu_init()
881 hole_start = mmu.hole_start; in mmu_init()
883 hole_end = mmu.hole_end; in mmu_init()
885 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); in mmu_init()
886 if (mmu.pae_hat == 0 && pa_bits > 32) in mmu_init()
887 mmu.highest_pfn = PFN_4G - 1; in mmu_init()
889 if (mmu.pae_hat) { in mmu_init()
890 mmu.pte_size = 8; /* 8 byte PTEs */ in mmu_init()
891 mmu.pte_size_shift = 3; in mmu_init()
893 mmu.pte_size = 4; /* 4 byte PTEs */ in mmu_init()
894 mmu.pte_size_shift = 2; in mmu_init()
897 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE)) in mmu_init()
904 mmu.num_level = 4; in mmu_init()
905 mmu.max_level = 3; in mmu_init()
906 mmu.ptes_per_table = 512; in mmu_init()
907 mmu.top_level_count = 512; in mmu_init()
912 mmu.max_level32 = 2; in mmu_init()
914 mmu.level_shift[0] = 12; in mmu_init()
915 mmu.level_shift[1] = 21; in mmu_init()
916 mmu.level_shift[2] = 30; in mmu_init()
917 mmu.level_shift[3] = 39; in mmu_init()
920 for (i = 0; i < mmu.num_level; ++i) { in mmu_init()
921 mmu.level_size[i] = 1UL << mmu.level_shift[i]; in mmu_init()
922 mmu.level_offset[i] = mmu.level_size[i] - 1; in mmu_init()
923 mmu.level_mask[i] = ~mmu.level_offset[i]; in mmu_init()
929 mmu_page_sizes = mmu.max_page_level + 1; in mmu_init()
930 mmu_exported_page_sizes = mmu.umax_page_level + 1; in mmu_init()
937 for (i = 0; i <= mmu.max_page_level; ++i) { in mmu_init()
938 mmu.pte_bits[i] = PT_VALID | pt_kern; in mmu_init()
940 mmu.pte_bits[i] |= PT_PAGESIZE; in mmu_init()
946 for (i = 1; i < mmu.num_level; ++i) in mmu_init()
947 mmu.ptp_bits[i] = PT_PTPBITS; in mmu_init()
955 max_htables = physmax / mmu.ptes_per_table; in mmu_init()
956 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); in mmu_init()
957 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) in mmu_init()
958 mmu.hash_cnt >>= 1; in mmu_init()
959 mmu.hat32_hash_cnt = mmu.hash_cnt; in mmu_init()
967 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) in mmu_init()
968 mmu.hash_cnt <<= 1; in mmu_init()
991 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, in hat_init()
998 if (mmu.hash_cnt == mmu.hat32_hash_cnt) { in hat_init()
1002 mmu.hat32_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, in hat_init()
1022 ASSERT3U(mmu.max_level, >, 0); in hat_init()
1023 kas.a_hat->hat_max_level = mmu.max_level; in hat_init()
1038 kas.a_hat->hat_num_hash = mmu.hash_cnt; in hat_init()
1040 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); in hat_init()
1279 if (rp->hkr_level <= mmu.max_page_level && in hat_init_finish()
1301 if (mmu.pae_hat) { in hat_init_finish()
1364 bzero(l3ptes, sizeof (x86pte_t) * mmu.top_level_uslots); in hat_pcp_update()
1366 bzero(l3uptes, sizeof (x86pte_t) * mmu.top_level_uslots); in hat_pcp_update()
1380 sizeof (x86pte_t) * mmu.top_level_uslots); in hat_pcp_update()
1382 sizeof (x86pte_t) * mmu.top_level_uslots); in hat_pcp_update()
1516 EQUIV(kpti_enable, !mmu.pt_global); in hat_switch()
1585 PTE_SET(pte, mmu.pt_nx); in hati_mkpte()
1727 for (l = 0; l <= mmu.max_page_level; l++) in hat_get_mapped_size()
1994 PTE_SET(pte, mmu.pt_global); in hati_load_common()
2022 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); in hat_kmap_load()
2033 PTE_SET(pte, mmu.pt_global); in hat_kmap_load()
2038 if (mmu.pae_hat) in hat_kmap_load()
2039 pte_ptr = mmu.kmap_ptes + pg_off; in hat_kmap_load()
2041 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; in hat_kmap_load()
2042 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> in hat_kmap_load()
2113 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { in hat_memload()
2178 for (level = mmu.max_page_level; ; --level) { in hat_memload_array()
2307 for (level = mmu.max_page_level; ; --level) { in hat_devload()
2784 pg_index = mmu_btop(va - mmu.kmap_addr); in hat_kmap_unload()
2785 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); in hat_kmap_unload()
2791 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) in hat_kmap_unload()
2817 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { in hat_unload()
2933 ASSERT(ht->ht_level <= mmu.max_page_level); in hat_unload_callback()
3087 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); in hat_getattr()
3101 if (!PTE_GET(pte, mmu.pt_nx)) in hat_getattr()
3157 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) in hat_updateattr()
3158 newpte &= ~mmu.pt_nx; in hat_updateattr()
3170 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) in hat_updateattr()
3171 newpte |= mmu.pt_nx; in hat_updateattr()
3182 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) in hat_updateattr()
3183 newpte |= mmu.pt_nx; in hat_updateattr()
3297 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { in hat_getpfnum()
3301 pg_index = mmu_btop(vaddr - mmu.kmap_addr); in hat_getpfnum()
3302 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); in hat_getpfnum()
3347 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { in hat_probe()
3348 pg_off = mmu_btop(vaddr - mmu.kmap_addr); in hat_probe()
3349 if (mmu.pae_hat) in hat_probe()
3350 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); in hat_probe()
3353 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); in hat_probe()
3460 if (l == mmu.max_level) in hat_share()
3609 l = mmu.max_page_level; in hat_unshare()
3610 if (l == mmu.max_level) in hat_unshare()
4412 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); in hat_mempte_release()
4413 if (mmu.pae_hat) in hat_mempte_release()
4473 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); in hat_mempte_remap()
4474 if (mmu.pae_hat) in hat_mempte_remap()
4555 ASSERT(level <= mmu.max_page_level); in clear_boot_mappings()
4954 *pte_ma = base_ma + (entry << mmu.pte_size_shift); in hat_prepare_mapping()