1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10
11 #include <nvhe/mem_protect.h>
12
13 struct tlb_inv_context {
14 struct kvm_s2_mmu *mmu;
15 u64 tcr;
16 u64 sctlr;
17 };
18
enter_vmid_context(struct kvm_s2_mmu * mmu,struct tlb_inv_context * cxt,bool nsh)19 static void enter_vmid_context(struct kvm_s2_mmu *mmu,
20 struct tlb_inv_context *cxt,
21 bool nsh)
22 {
23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
24 struct kvm_cpu_context *host_ctxt;
25 struct kvm_vcpu *vcpu;
26
27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
28 vcpu = host_ctxt->__hyp_running_vcpu;
29 cxt->mmu = NULL;
30
31 /*
32 * We have two requirements:
33 *
34 * - ensure that the page table updates are visible to all
35 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
36 * being either ish or nsh, depending on the invalidation
37 * type.
38 *
39 * - complete any speculative page table walk started before
40 * we trapped to EL2 so that we can mess with the MM
41 * registers out of context, for which dsb(nsh) is enough
42 *
43 * The composition of these two barriers is a dsb(DOMAIN), and
44 * the 'nsh' parameter tracks the distinction between
45 * Inner-Shareable and Non-Shareable, as specified by the
46 * callers.
47 */
48 if (nsh)
49 dsb(nsh);
50 else
51 dsb(ish);
52
53 /*
54 * If we're already in the desired context, then there's nothing to do.
55 */
56 if (vcpu) {
57 /*
58 * We're in guest context. However, for this to work, this needs
59 * to be called from within __kvm_vcpu_run(), which ensures that
60 * __hyp_running_vcpu is set to the current guest vcpu.
61 */
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
63 return;
64
65 cxt->mmu = vcpu->arch.hw_mmu;
66 } else {
67 /* We're in host context. */
68 if (mmu == host_s2_mmu)
69 return;
70
71 cxt->mmu = host_s2_mmu;
72 }
73
74 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
75 u64 val;
76
77 /*
78 * For CPUs that are affected by ARM 1319367, we need to
79 * avoid a Stage-1 walk with the old VMID while we have
80 * the new VMID set in the VTTBR in order to invalidate TLBs.
81 * We're guaranteed that the host S1 MMU is enabled, so
82 * we can simply set the EPD bits to avoid any further
83 * TLB fill. For guests, we ensure that the S1 MMU is
84 * temporarily enabled in the next context.
85 */
86 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
87 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
88 write_sysreg_el1(val, SYS_TCR);
89 isb();
90
91 if (vcpu) {
92 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
93 if (!(val & SCTLR_ELx_M)) {
94 val |= SCTLR_ELx_M;
95 write_sysreg_el1(val, SYS_SCTLR);
96 isb();
97 }
98 } else {
99 /* The host S1 MMU is always enabled. */
100 cxt->sctlr = SCTLR_ELx_M;
101 }
102 }
103
104 /*
105 * __load_stage2() includes an ISB only when the AT
106 * workaround is applied. Take care of the opposite condition,
107 * ensuring that we always have an ISB, but not two ISBs back
108 * to back.
109 */
110 if (vcpu)
111 __load_host_stage2();
112 else
113 __load_stage2(mmu, kern_hyp_va(mmu->arch));
114
115 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
116 }
117
exit_vmid_context(struct tlb_inv_context * cxt)118 static void exit_vmid_context(struct tlb_inv_context *cxt)
119 {
120 struct kvm_s2_mmu *mmu = cxt->mmu;
121 struct kvm_cpu_context *host_ctxt;
122 struct kvm_vcpu *vcpu;
123
124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
125 vcpu = host_ctxt->__hyp_running_vcpu;
126
127 if (!mmu)
128 return;
129
130 if (vcpu)
131 __load_stage2(mmu, kern_hyp_va(mmu->arch));
132 else
133 __load_host_stage2();
134
135 /* Ensure write of the old VMID */
136 isb();
137
138 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
139 if (!(cxt->sctlr & SCTLR_ELx_M)) {
140 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
141 isb();
142 }
143
144 write_sysreg_el1(cxt->tcr, SYS_TCR);
145 }
146 }
147
__kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)148 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
149 phys_addr_t ipa, int level)
150 {
151 struct tlb_inv_context cxt;
152
153 /* Switch to requested VMID */
154 enter_vmid_context(mmu, &cxt, false);
155
156 /*
157 * We could do so much better if we had the VA as well.
158 * Instead, we invalidate Stage-2 for this IPA, and the
159 * whole of Stage-1. Weep...
160 */
161 ipa >>= 12;
162 __tlbi_level(ipas2e1is, ipa, level);
163
164 /*
165 * We have to ensure completion of the invalidation at Stage-2,
166 * since a table walk on another CPU could refill a TLB with a
167 * complete (S1 + S2) walk based on the old Stage-2 mapping if
168 * the Stage-1 invalidation happened first.
169 */
170 dsb(ish);
171 __tlbi(vmalle1is);
172 dsb(ish);
173 isb();
174
175 exit_vmid_context(&cxt);
176 }
177
__kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)178 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
179 phys_addr_t ipa, int level)
180 {
181 struct tlb_inv_context cxt;
182
183 /* Switch to requested VMID */
184 enter_vmid_context(mmu, &cxt, true);
185
186 /*
187 * We could do so much better if we had the VA as well.
188 * Instead, we invalidate Stage-2 for this IPA, and the
189 * whole of Stage-1. Weep...
190 */
191 ipa >>= 12;
192 __tlbi_level(ipas2e1, ipa, level);
193
194 /*
195 * We have to ensure completion of the invalidation at Stage-2,
196 * since a table walk on another CPU could refill a TLB with a
197 * complete (S1 + S2) walk based on the old Stage-2 mapping if
198 * the Stage-1 invalidation happened first.
199 */
200 dsb(nsh);
201 __tlbi(vmalle1);
202 dsb(nsh);
203 isb();
204
205 exit_vmid_context(&cxt);
206 }
207
__kvm_tlb_flush_vmid_range(struct kvm_s2_mmu * mmu,phys_addr_t start,unsigned long pages)208 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
209 phys_addr_t start, unsigned long pages)
210 {
211 struct tlb_inv_context cxt;
212 unsigned long stride;
213
214 /*
215 * Since the range of addresses may not be mapped at
216 * the same level, assume the worst case as PAGE_SIZE
217 */
218 stride = PAGE_SIZE;
219 start = round_down(start, stride);
220
221 /* Switch to requested VMID */
222 enter_vmid_context(mmu, &cxt, false);
223
224 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
225 TLBI_TTL_UNKNOWN);
226
227 dsb(ish);
228 __tlbi(vmalle1is);
229 dsb(ish);
230 isb();
231
232 exit_vmid_context(&cxt);
233 }
234
__kvm_tlb_flush_vmid(struct kvm_s2_mmu * mmu)235 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
236 {
237 struct tlb_inv_context cxt;
238
239 /* Switch to requested VMID */
240 enter_vmid_context(mmu, &cxt, false);
241
242 __tlbi(vmalls12e1is);
243 dsb(ish);
244 isb();
245
246 exit_vmid_context(&cxt);
247 }
248
__kvm_flush_cpu_context(struct kvm_s2_mmu * mmu)249 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
250 {
251 struct tlb_inv_context cxt;
252
253 /* Switch to requested VMID */
254 enter_vmid_context(mmu, &cxt, false);
255
256 __tlbi(vmalle1);
257 asm volatile("ic iallu");
258 dsb(nsh);
259 isb();
260
261 exit_vmid_context(&cxt);
262 }
263
__kvm_flush_vm_context(void)264 void __kvm_flush_vm_context(void)
265 {
266 /* Same remark as in enter_vmid_context() */
267 dsb(ish);
268 __tlbi(alle1is);
269 dsb(ish);
270 }
271