1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ARM64_KVM_NESTED_H 3 #define __ARM64_KVM_NESTED_H 4 5 #include <linux/bitfield.h> 6 #include <linux/kvm_host.h> 7 #include <asm/kvm_emulate.h> 8 #include <asm/kvm_pgtable.h> 9 10 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) 11 { 12 return (!__is_defined(__KVM_NVHE_HYPERVISOR__) && 13 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && 14 vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2)); 15 } 16 17 /* Translation helpers from non-VHE EL2 to EL1 */ 18 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2) 19 { 20 return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT; 21 } 22 23 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr) 24 { 25 return TCR_EPD1_MASK | /* disable TTBR1_EL1 */ 26 ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) | 27 tcr_el2_ps_to_tcr_el1_ips(tcr) | 28 (tcr & TCR_EL2_TG0_MASK) | 29 (tcr & TCR_EL2_ORGN0_MASK) | 30 (tcr & TCR_EL2_IRGN0_MASK) | 31 (tcr & TCR_EL2_T0SZ_MASK); 32 } 33 34 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2) 35 { 36 u64 cpacr_el1 = CPACR_EL1_RES1; 37 38 if (cptr_el2 & CPTR_EL2_TTA) 39 cpacr_el1 |= CPACR_EL1_TTA; 40 if (!(cptr_el2 & CPTR_EL2_TFP)) 41 cpacr_el1 |= CPACR_EL1_FPEN; 42 if (!(cptr_el2 & CPTR_EL2_TZ)) 43 cpacr_el1 |= CPACR_EL1_ZEN; 44 45 cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM); 46 47 return cpacr_el1; 48 } 49 50 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val) 51 { 52 /* Only preserve the minimal set of bits we support */ 53 val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA | 54 SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE); 55 val |= SCTLR_EL1_RES1; 56 57 return val; 58 } 59 60 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) 61 { 62 /* Clear the ASID field */ 63 return ttbr0 & ~GENMASK_ULL(63, 48); 64 } 65 66 extern bool forward_smc_trap(struct kvm_vcpu *vcpu); 67 extern bool forward_debug_exception(struct kvm_vcpu *vcpu); 68 extern void kvm_init_nested(struct kvm *kvm); 69 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu); 70 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu); 71 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu); 72 73 union tlbi_info; 74 75 extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid, 76 const union tlbi_info *info, 77 void (*)(struct kvm_s2_mmu *, 78 const union tlbi_info *)); 79 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu); 80 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu); 81 82 extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu); 83 extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu); 84 extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu); 85 86 extern void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu); 87 88 struct kvm_s2_trans { 89 phys_addr_t output; 90 unsigned long block_size; 91 bool writable; 92 bool readable; 93 int level; 94 u32 esr; 95 u64 desc; 96 }; 97 98 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans) 99 { 100 return trans->output; 101 } 102 103 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans) 104 { 105 return trans->block_size; 106 } 107 108 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans) 109 { 110 return trans->esr; 111 } 112 113 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans) 114 { 115 return trans->readable; 116 } 117 118 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans) 119 { 120 return trans->writable; 121 } 122 123 static inline bool kvm_has_xnx(struct kvm *kvm) 124 { 125 return cpus_have_final_cap(ARM64_HAS_XNX) && 126 kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP); 127 } 128 129 static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans) 130 { 131 u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 132 133 if (!kvm_has_xnx(kvm)) 134 xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 135 136 switch (xn) { 137 case 0b00: 138 case 0b01: 139 return true; 140 default: 141 return false; 142 } 143 } 144 145 static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans) 146 { 147 u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 148 149 if (!kvm_has_xnx(kvm)) 150 xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 151 152 switch (xn) { 153 case 0b00: 154 case 0b11: 155 return true; 156 default: 157 return false; 158 } 159 } 160 161 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa, 162 struct kvm_s2_trans *result); 163 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, 164 struct kvm_s2_trans *trans); 165 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2); 166 extern void kvm_nested_s2_wp(struct kvm *kvm); 167 extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block); 168 extern void kvm_nested_s2_flush(struct kvm *kvm); 169 170 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val); 171 172 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr) 173 { 174 struct kvm *kvm = vpcu->kvm; 175 u8 CRm = sys_reg_CRm(instr); 176 177 if (!(sys_reg_Op0(instr) == TLBI_Op0 && 178 sys_reg_Op1(instr) == TLBI_Op1_EL1)) 179 return false; 180 181 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS || 182 (sys_reg_CRn(instr) == TLBI_CRn_nXS && 183 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)))) 184 return false; 185 186 if (CRm == TLBI_CRm_nROS && 187 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 188 return false; 189 190 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS || 191 CRm == TLBI_CRm_RNS) && 192 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 193 return false; 194 195 return true; 196 } 197 198 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr) 199 { 200 struct kvm *kvm = vpcu->kvm; 201 u8 CRm = sys_reg_CRm(instr); 202 203 if (!(sys_reg_Op0(instr) == TLBI_Op0 && 204 sys_reg_Op1(instr) == TLBI_Op1_EL2)) 205 return false; 206 207 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS || 208 (sys_reg_CRn(instr) == TLBI_CRn_nXS && 209 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)))) 210 return false; 211 212 if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS) 213 return false; 214 215 if (CRm == TLBI_CRm_nROS && 216 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 217 return false; 218 219 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS || 220 CRm == TLBI_CRm_RNS) && 221 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 222 return false; 223 224 return true; 225 } 226 227 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu); 228 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val); 229 230 #ifdef CONFIG_ARM64_PTR_AUTH 231 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr); 232 #else 233 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr) 234 { 235 /* We really should never execute this... */ 236 WARN_ON_ONCE(1); 237 *elr = 0xbad9acc0debadbad; 238 return false; 239 } 240 #endif 241 242 #define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0) 243 244 static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans) 245 { 246 return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level); 247 } 248 249 /* Adjust alignment for the contiguous bit as per StageOA() */ 250 #define contiguous_bit_shift(d, wi, l) \ 251 ({ \ 252 u8 shift = 0; \ 253 \ 254 if ((d) & PTE_CONT) { \ 255 switch (BIT((wi)->pgshift)) { \ 256 case SZ_4K: \ 257 shift = 4; \ 258 break; \ 259 case SZ_16K: \ 260 shift = (l) == 2 ? 5 : 7; \ 261 break; \ 262 case SZ_64K: \ 263 shift = 5; \ 264 break; \ 265 } \ 266 } \ 267 \ 268 shift; \ 269 }) 270 271 static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid) 272 { 273 u64 base, tg, num, scale; 274 int shift; 275 276 tg = FIELD_GET(GENMASK(47, 46), val); 277 278 switch(tg) { 279 case 1: 280 shift = 12; 281 break; 282 case 2: 283 shift = 14; 284 break; 285 case 3: 286 default: /* IMPDEF: handle tg==0 as 64k */ 287 shift = 16; 288 break; 289 } 290 291 base = (val & GENMASK(36, 0)) << shift; 292 293 if (asid) 294 *asid = FIELD_GET(TLBIR_ASID_MASK, val); 295 296 scale = FIELD_GET(GENMASK(45, 44), val); 297 num = FIELD_GET(GENMASK(43, 39), val); 298 *range = __TLBI_RANGE_PAGES(num, scale) << shift; 299 300 return base; 301 } 302 303 static inline unsigned int ps_to_output_size(unsigned int ps, bool pa52bit) 304 { 305 switch (ps) { 306 case 0: return 32; 307 case 1: return 36; 308 case 2: return 40; 309 case 3: return 42; 310 case 4: return 44; 311 case 5: return 48; 312 case 6: if (pa52bit) 313 return 52; 314 fallthrough; 315 default: 316 return 48; 317 } 318 } 319 320 enum trans_regime { 321 TR_EL10, 322 TR_EL20, 323 TR_EL2, 324 }; 325 326 struct s1_walk_info; 327 328 struct s1_walk_context { 329 struct s1_walk_info *wi; 330 u64 table_ipa; 331 int level; 332 }; 333 334 struct s1_walk_filter { 335 int (*fn)(struct s1_walk_context *, void *); 336 void *priv; 337 }; 338 339 struct s1_walk_info { 340 struct s1_walk_filter *filter; 341 u64 baddr; 342 enum trans_regime regime; 343 unsigned int max_oa_bits; 344 unsigned int pgshift; 345 unsigned int txsz; 346 int sl; 347 u8 sh; 348 bool as_el0; 349 bool hpd; 350 bool e0poe; 351 bool poe; 352 bool pan; 353 bool be; 354 bool s2; 355 bool pa52bit; 356 bool ha; 357 }; 358 359 struct s1_walk_result { 360 union { 361 struct { 362 u64 desc; 363 u64 pa; 364 s8 level; 365 u8 APTable; 366 bool nG; 367 u16 asid; 368 bool UXNTable; 369 bool PXNTable; 370 bool uwxn; 371 bool uov; 372 bool ur; 373 bool uw; 374 bool ux; 375 bool pwxn; 376 bool pov; 377 bool pr; 378 bool pw; 379 bool px; 380 }; 381 struct { 382 u8 fst; 383 bool ptw; 384 bool s2; 385 }; 386 }; 387 bool failed; 388 }; 389 390 int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, 391 struct s1_walk_result *wr, u64 va); 392 int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, 393 int *level); 394 395 /* VNCR management */ 396 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu); 397 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu); 398 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val); 399 400 #define vncr_fixmap(c) \ 401 ({ \ 402 u32 __c = (c); \ 403 BUG_ON(__c >= NR_CPUS); \ 404 (FIX_VNCR - __c); \ 405 }) 406 407 int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new); 408 409 #endif /* __ARM64_KVM_NESTED_H */ 410