1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 /* This file is dual-licensed; see usr/src/contrib/bhyve/LICENSE */ 12 13 /* 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2021 Oxide Computer Company 16 */ 17 18 #include <sys/types.h> 19 #include <sys/param.h> 20 #include <sys/atomic.h> 21 #include <sys/kmem.h> 22 #include <sys/machsystm.h> 23 #include <sys/mach_mmu.h> 24 #include <sys/mman.h> 25 #include <sys/x86_archext.h> 26 #include <vm/hat_pte.h> 27 28 #include <sys/vmm_gpt.h> 29 #include <sys/vmm_vm.h> 30 31 static inline uint64_t 32 rvi_prot(uint_t prot) 33 { 34 uint64_t bits; 35 36 bits = 0; 37 if ((prot & PROT_WRITE) != 0) 38 bits |= PT_WRITABLE; 39 if ((prot & PROT_EXEC) == 0) 40 bits |= PT_NX; 41 42 return (bits); 43 } 44 45 static uint_t 46 rvi_pte_prot(uint64_t pte) 47 { 48 uint_t prot; 49 50 if ((pte & PT_VALID) == 0) 51 return (0); 52 53 prot = PROT_READ; 54 if ((pte & PT_NX) == 0) 55 prot |= PROT_EXEC; 56 if ((pte & PT_WRITABLE) != 0) 57 prot |= PROT_WRITE; 58 59 return (prot); 60 } 61 62 /* Make sure that PAT indexes line up as expected */ 63 CTASSERT((PAT_DEFAULT_ATTRIBUTE & 0xf) == MTRR_TYPE_WB); 64 CTASSERT(((PAT_DEFAULT_ATTRIBUTE >> 24) & 0xf) == MTRR_TYPE_UC); 65 66 static inline uint64_t 67 rvi_attr_to_pat(uint8_t attr) 68 { 69 70 if (attr == MTRR_TYPE_UC) 71 return (PT_NOCACHE | PT_WRITETHRU); 72 if (attr == MTRR_TYPE_WB) 73 return (0); 74 75 panic("unexpected memattr %x", attr); 76 } 77 78 static uint64_t 79 rvi_map_table(uint64_t pfn) 80 { 81 const uint64_t paddr = pfn_to_pa(pfn); 82 const uint64_t flags = PT_USER | PT_REF | PT_VALID; 83 const uint64_t pat = rvi_attr_to_pat(MTRR_TYPE_WB); 84 const uint64_t rprot = PT_WRITABLE; 85 return (paddr | flags | pat | rprot); 86 } 87 88 static uint64_t 89 rvi_map_page(uint64_t pfn, uint_t prot, uint8_t attr) 90 { 91 const uint64_t paddr = pfn_to_pa(pfn); 92 const uint64_t flags = PT_USER | PT_REF | PT_VALID; 93 const uint64_t pat = rvi_attr_to_pat(attr); 94 const uint64_t rprot = rvi_prot(prot); 95 return (paddr | flags | pat | rprot); 96 } 97 98 static pfn_t 99 rvi_pte_pfn(uint64_t pte) 100 { 101 return (mmu_btop(pte & PT_PADDR)); 102 } 103 104 static bool 105 rvi_pte_is_present(uint64_t pte) 106 { 107 return ((pte & PT_VALID) == PT_VALID); 108 } 109 110 static uint_t 111 rvi_reset_bits(volatile uint64_t *entry, uint64_t mask, uint64_t bits) 112 { 113 uint64_t pte, newpte, oldpte = 0; 114 115 /* 116 * We use volatile and atomic ops here because we may be 117 * racing against hardware modifying these bits. 118 */ 119 VERIFY3P(entry, !=, NULL); 120 oldpte = *entry; 121 do { 122 pte = oldpte; 123 newpte = (pte & ~mask) | bits; 124 oldpte = atomic_cas_64(entry, pte, newpte); 125 } while (oldpte != pte); 126 127 return (oldpte & mask); 128 } 129 130 static uint_t 131 rvi_reset_dirty(uint64_t *entry, bool on) 132 { 133 return (rvi_reset_bits(entry, PT_MOD, on ? (PT_MOD | PT_REF) : 0)); 134 } 135 136 static uint_t 137 rvi_reset_accessed(uint64_t *entry, bool on) 138 { 139 return (rvi_reset_bits(entry, (PT_MOD | PT_REF), on ? PT_REF : 0)); 140 } 141 142 static uint64_t 143 rvi_get_pmtp(pfn_t root_pfn) 144 { 145 return (root_pfn << PAGESHIFT); 146 } 147 148 vmm_pte_ops_t rvi_pte_ops = { 149 .vpeo_map_table = rvi_map_table, 150 .vpeo_map_page = rvi_map_page, 151 .vpeo_pte_pfn = rvi_pte_pfn, 152 .vpeo_pte_is_present = rvi_pte_is_present, 153 .vpeo_pte_prot = rvi_pte_prot, 154 .vpeo_reset_dirty = rvi_reset_dirty, 155 .vpeo_reset_accessed = rvi_reset_accessed, 156 .vpeo_get_pmtp = rvi_get_pmtp, 157 }; 158