xref: /linux/arch/arm64/include/asm/kvm_nested.h (revision 67fda56e76da4c4be9a8502d7211dbba024576d2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARM64_KVM_NESTED_H
3 #define __ARM64_KVM_NESTED_H
4 
5 #include <linux/bitfield.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_emulate.h>
8 
9 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
10 {
11 	return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
12 		cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
13 		vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
14 }
15 
16 /* Translation helpers from non-VHE EL2 to EL1 */
17 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
18 {
19 	return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
20 }
21 
22 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
23 {
24 	return TCR_EPD1_MASK |				/* disable TTBR1_EL1 */
25 	       ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
26 	       tcr_el2_ps_to_tcr_el1_ips(tcr) |
27 	       (tcr & TCR_EL2_TG0_MASK) |
28 	       (tcr & TCR_EL2_ORGN0_MASK) |
29 	       (tcr & TCR_EL2_IRGN0_MASK) |
30 	       (tcr & TCR_EL2_T0SZ_MASK);
31 }
32 
33 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
34 {
35 	u64 cpacr_el1 = 0;
36 
37 	if (cptr_el2 & CPTR_EL2_TTA)
38 		cpacr_el1 |= CPACR_ELx_TTA;
39 	if (!(cptr_el2 & CPTR_EL2_TFP))
40 		cpacr_el1 |= CPACR_ELx_FPEN;
41 	if (!(cptr_el2 & CPTR_EL2_TZ))
42 		cpacr_el1 |= CPACR_ELx_ZEN;
43 
44 	return cpacr_el1;
45 }
46 
47 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
48 {
49 	/* Only preserve the minimal set of bits we support */
50 	val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
51 		SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
52 	val |= SCTLR_EL1_RES1;
53 
54 	return val;
55 }
56 
57 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
58 {
59 	/* Clear the ASID field */
60 	return ttbr0 & ~GENMASK_ULL(63, 48);
61 }
62 
63 extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
64 extern void kvm_init_nested(struct kvm *kvm);
65 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
66 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
67 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
68 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
69 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
70 
71 struct kvm_s2_trans {
72 	phys_addr_t output;
73 	unsigned long block_size;
74 	bool writable;
75 	bool readable;
76 	int level;
77 	u32 esr;
78 	u64 upper_attr;
79 };
80 
81 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
82 {
83 	return trans->output;
84 }
85 
86 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
87 {
88 	return trans->block_size;
89 }
90 
91 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
92 {
93 	return trans->esr;
94 }
95 
96 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
97 {
98 	return trans->readable;
99 }
100 
101 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
102 {
103 	return trans->writable;
104 }
105 
106 static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
107 {
108 	return !(trans->upper_attr & BIT(54));
109 }
110 
111 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
112 			      struct kvm_s2_trans *result);
113 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
114 				    struct kvm_s2_trans *trans);
115 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
116 extern void kvm_nested_s2_wp(struct kvm *kvm);
117 extern void kvm_nested_s2_unmap(struct kvm *kvm);
118 extern void kvm_nested_s2_flush(struct kvm *kvm);
119 
120 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
121 {
122 	struct kvm *kvm = vpcu->kvm;
123 	u8 CRm = sys_reg_CRm(instr);
124 
125 	if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
126 	      sys_reg_Op1(instr) == TLBI_Op1_EL1))
127 		return false;
128 
129 	if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
130 	      (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
131 	       kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
132 		return false;
133 
134 	if (CRm == TLBI_CRm_nROS &&
135 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
136 		return false;
137 
138 	if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
139 	     CRm == TLBI_CRm_RNS) &&
140 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
141 		return false;
142 
143 	return true;
144 }
145 
146 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
147 {
148 	struct kvm *kvm = vpcu->kvm;
149 	u8 CRm = sys_reg_CRm(instr);
150 
151 	if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
152 	      sys_reg_Op1(instr) == TLBI_Op1_EL2))
153 		return false;
154 
155 	if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
156 	      (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
157 	       kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
158 		return false;
159 
160 	if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
161 		return false;
162 
163 	if (CRm == TLBI_CRm_nROS &&
164 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
165 		return false;
166 
167 	if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
168 	     CRm == TLBI_CRm_RNS) &&
169 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
170 		return false;
171 
172 	return true;
173 }
174 
175 int kvm_init_nv_sysregs(struct kvm *kvm);
176 
177 #ifdef CONFIG_ARM64_PTR_AUTH
178 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
179 #else
180 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
181 {
182 	/* We really should never execute this... */
183 	WARN_ON_ONCE(1);
184 	*elr = 0xbad9acc0debadbad;
185 	return false;
186 }
187 #endif
188 
189 #endif /* __ARM64_KVM_NESTED_H */
190