xref: /linux/arch/arm64/kvm/hyp/nvhe/tlb.c (revision c098564d91c55d408ed31e8885b915a5e2006249)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10 
11 #include <nvhe/mem_protect.h>
12 
13 struct tlb_inv_context {
14 	u64		tcr;
15 };
16 
17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
18 				  struct tlb_inv_context *cxt)
19 {
20 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
21 		u64 val;
22 
23 		/*
24 		 * For CPUs that are affected by ARM 1319367, we need to
25 		 * avoid a host Stage-1 walk while we have the guest's
26 		 * VMID set in the VTTBR in order to invalidate TLBs.
27 		 * We're guaranteed that the S1 MMU is enabled, so we can
28 		 * simply set the EPD bits to avoid any further TLB fill.
29 		 */
30 		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
31 		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
32 		write_sysreg_el1(val, SYS_TCR);
33 		isb();
34 	}
35 
36 	/*
37 	 * __load_guest_stage2() includes an ISB only when the AT
38 	 * workaround is applied. Take care of the opposite condition,
39 	 * ensuring that we always have an ISB, but not two ISBs back
40 	 * to back.
41 	 */
42 	__load_guest_stage2(mmu);
43 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
44 }
45 
46 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
47 {
48 	__load_host_stage2();
49 
50 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
51 		/* Ensure write of the host VMID */
52 		isb();
53 		/* Restore the host's TCR_EL1 */
54 		write_sysreg_el1(cxt->tcr, SYS_TCR);
55 	}
56 }
57 
58 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
59 			      phys_addr_t ipa, int level)
60 {
61 	struct tlb_inv_context cxt;
62 
63 	dsb(ishst);
64 
65 	/* Switch to requested VMID */
66 	__tlb_switch_to_guest(mmu, &cxt);
67 
68 	/*
69 	 * We could do so much better if we had the VA as well.
70 	 * Instead, we invalidate Stage-2 for this IPA, and the
71 	 * whole of Stage-1. Weep...
72 	 */
73 	ipa >>= 12;
74 	__tlbi_level(ipas2e1is, ipa, level);
75 
76 	/*
77 	 * We have to ensure completion of the invalidation at Stage-2,
78 	 * since a table walk on another CPU could refill a TLB with a
79 	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
80 	 * the Stage-1 invalidation happened first.
81 	 */
82 	dsb(ish);
83 	__tlbi(vmalle1is);
84 	dsb(ish);
85 	isb();
86 
87 	/*
88 	 * If the host is running at EL1 and we have a VPIPT I-cache,
89 	 * then we must perform I-cache maintenance at EL2 in order for
90 	 * it to have an effect on the guest. Since the guest cannot hit
91 	 * I-cache lines allocated with a different VMID, we don't need
92 	 * to worry about junk out of guest reset (we nuke the I-cache on
93 	 * VMID rollover), but we do need to be careful when remapping
94 	 * executable pages for the same guest. This can happen when KSM
95 	 * takes a CoW fault on an executable page, copies the page into
96 	 * a page that was previously mapped in the guest and then needs
97 	 * to invalidate the guest view of the I-cache for that page
98 	 * from EL1. To solve this, we invalidate the entire I-cache when
99 	 * unmapping a page from a guest if we have a VPIPT I-cache but
100 	 * the host is running at EL1. As above, we could do better if
101 	 * we had the VA.
102 	 *
103 	 * The moral of this story is: if you have a VPIPT I-cache, then
104 	 * you should be running with VHE enabled.
105 	 */
106 	if (icache_is_vpipt())
107 		__flush_icache_all();
108 
109 	__tlb_switch_to_host(&cxt);
110 }
111 
112 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
113 {
114 	struct tlb_inv_context cxt;
115 
116 	dsb(ishst);
117 
118 	/* Switch to requested VMID */
119 	__tlb_switch_to_guest(mmu, &cxt);
120 
121 	__tlbi(vmalls12e1is);
122 	dsb(ish);
123 	isb();
124 
125 	__tlb_switch_to_host(&cxt);
126 }
127 
128 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
129 {
130 	struct tlb_inv_context cxt;
131 
132 	/* Switch to requested VMID */
133 	__tlb_switch_to_guest(mmu, &cxt);
134 
135 	__tlbi(vmalle1);
136 	asm volatile("ic iallu");
137 	dsb(nsh);
138 	isb();
139 
140 	__tlb_switch_to_host(&cxt);
141 }
142 
143 void __kvm_flush_vm_context(void)
144 {
145 	dsb(ishst);
146 	__tlbi(alle1is);
147 
148 	/*
149 	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
150 	 * is necessary across a VMID rollover.
151 	 *
152 	 * VPIPT caches constrain lookup and maintenance to the active VMID,
153 	 * so we need to invalidate lines with a stale VMID to avoid an ABA
154 	 * race after multiple rollovers.
155 	 *
156 	 */
157 	if (icache_is_vpipt())
158 		asm volatile("ic ialluis");
159 
160 	dsb(ish);
161 }
162