xref: /linux/arch/x86/hyperv/mmu.c (revision 856e7c4b619af622d56b3b454f7bec32a170ac99)
1 #define pr_fmt(fmt)  "Hyper-V: " fmt
2 
3 #include <linux/hyperv.h>
4 #include <linux/log2.h>
5 #include <linux/slab.h>
6 #include <linux/types.h>
7 
8 #include <asm/fpu/api.h>
9 #include <asm/mshyperv.h>
10 #include <asm/msr.h>
11 #include <asm/tlbflush.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include <asm/trace/hyperv.h>
15 
16 /* Each gva in gva_list encodes up to 4096 pages to flush */
17 #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
18 
19 
20 /*
21  * Fills in gva_list starting from offset. Returns the number of items added.
22  */
23 static inline int fill_gva_list(u64 gva_list[], int offset,
24 				unsigned long start, unsigned long end)
25 {
26 	int gva_n = offset;
27 	unsigned long cur = start, diff;
28 
29 	do {
30 		diff = end > cur ? end - cur : 0;
31 
32 		gva_list[gva_n] = cur & PAGE_MASK;
33 		/*
34 		 * Lower 12 bits encode the number of additional
35 		 * pages to flush (in addition to the 'cur' page).
36 		 */
37 		if (diff >= HV_TLB_FLUSH_UNIT)
38 			gva_list[gva_n] |= ~PAGE_MASK;
39 		else if (diff)
40 			gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
41 
42 		cur += HV_TLB_FLUSH_UNIT;
43 		gva_n++;
44 
45 	} while (cur < end);
46 
47 	return gva_n - offset;
48 }
49 
50 static void hyperv_flush_tlb_others(const struct cpumask *cpus,
51 				    const struct flush_tlb_info *info)
52 {
53 	int cpu, vcpu, gva_n, max_gvas;
54 	struct hv_tlb_flush **flush_pcpu;
55 	struct hv_tlb_flush *flush;
56 	u64 status = U64_MAX;
57 	unsigned long flags;
58 
59 	trace_hyperv_mmu_flush_tlb_others(cpus, info);
60 
61 	if (!hv_hypercall_pg)
62 		goto do_native;
63 
64 	if (cpumask_empty(cpus))
65 		return;
66 
67 	local_irq_save(flags);
68 
69 	flush_pcpu = (struct hv_tlb_flush **)
70 		     this_cpu_ptr(hyperv_pcpu_input_arg);
71 
72 	flush = *flush_pcpu;
73 
74 	if (unlikely(!flush)) {
75 		local_irq_restore(flags);
76 		goto do_native;
77 	}
78 
79 	if (info->mm) {
80 		/*
81 		 * AddressSpace argument must match the CR3 with PCID bits
82 		 * stripped out.
83 		 */
84 		flush->address_space = virt_to_phys(info->mm->pgd);
85 		flush->address_space &= CR3_ADDR_MASK;
86 		flush->flags = 0;
87 	} else {
88 		flush->address_space = 0;
89 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
90 	}
91 
92 	flush->processor_mask = 0;
93 	if (cpumask_equal(cpus, cpu_present_mask)) {
94 		flush->flags |= HV_FLUSH_ALL_PROCESSORS;
95 	} else {
96 		for_each_cpu(cpu, cpus) {
97 			vcpu = hv_cpu_number_to_vp_number(cpu);
98 			if (vcpu >= 64)
99 				goto do_native;
100 
101 			__set_bit(vcpu, (unsigned long *)
102 				  &flush->processor_mask);
103 		}
104 	}
105 
106 	/*
107 	 * We can flush not more than max_gvas with one hypercall. Flush the
108 	 * whole address space if we were asked to do more.
109 	 */
110 	max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
111 
112 	if (info->end == TLB_FLUSH_ALL) {
113 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
114 		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
115 					 flush, NULL);
116 	} else if (info->end &&
117 		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
118 		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
119 					 flush, NULL);
120 	} else {
121 		gva_n = fill_gva_list(flush->gva_list, 0,
122 				      info->start, info->end);
123 		status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
124 					     gva_n, 0, flush, NULL);
125 	}
126 
127 	local_irq_restore(flags);
128 
129 	if (!(status & HV_HYPERCALL_RESULT_MASK))
130 		return;
131 do_native:
132 	native_flush_tlb_others(cpus, info);
133 }
134 
135 static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
136 				       const struct flush_tlb_info *info)
137 {
138 	int nr_bank = 0, max_gvas, gva_n;
139 	struct hv_tlb_flush_ex **flush_pcpu;
140 	struct hv_tlb_flush_ex *flush;
141 	u64 status = U64_MAX;
142 	unsigned long flags;
143 
144 	trace_hyperv_mmu_flush_tlb_others(cpus, info);
145 
146 	if (!hv_hypercall_pg)
147 		goto do_native;
148 
149 	if (cpumask_empty(cpus))
150 		return;
151 
152 	local_irq_save(flags);
153 
154 	flush_pcpu = (struct hv_tlb_flush_ex **)
155 		     this_cpu_ptr(hyperv_pcpu_input_arg);
156 
157 	flush = *flush_pcpu;
158 
159 	if (unlikely(!flush)) {
160 		local_irq_restore(flags);
161 		goto do_native;
162 	}
163 
164 	if (info->mm) {
165 		/*
166 		 * AddressSpace argument must match the CR3 with PCID bits
167 		 * stripped out.
168 		 */
169 		flush->address_space = virt_to_phys(info->mm->pgd);
170 		flush->address_space &= CR3_ADDR_MASK;
171 		flush->flags = 0;
172 	} else {
173 		flush->address_space = 0;
174 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
175 	}
176 
177 	flush->hv_vp_set.valid_bank_mask = 0;
178 
179 	if (!cpumask_equal(cpus, cpu_present_mask)) {
180 		flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
181 		nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
182 	}
183 
184 	if (!nr_bank) {
185 		flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
186 		flush->flags |= HV_FLUSH_ALL_PROCESSORS;
187 	}
188 
189 	/*
190 	 * We can flush not more than max_gvas with one hypercall. Flush the
191 	 * whole address space if we were asked to do more.
192 	 */
193 	max_gvas =
194 		(PAGE_SIZE - sizeof(*flush) - nr_bank *
195 		 sizeof(flush->hv_vp_set.bank_contents[0])) /
196 		sizeof(flush->gva_list[0]);
197 
198 	if (info->end == TLB_FLUSH_ALL) {
199 		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
200 		status = hv_do_rep_hypercall(
201 			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
202 			0, nr_bank, flush, NULL);
203 	} else if (info->end &&
204 		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
205 		status = hv_do_rep_hypercall(
206 			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
207 			0, nr_bank, flush, NULL);
208 	} else {
209 		gva_n = fill_gva_list(flush->gva_list, nr_bank,
210 				      info->start, info->end);
211 		status = hv_do_rep_hypercall(
212 			HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
213 			gva_n, nr_bank, flush, NULL);
214 	}
215 
216 	local_irq_restore(flags);
217 
218 	if (!(status & HV_HYPERCALL_RESULT_MASK))
219 		return;
220 do_native:
221 	native_flush_tlb_others(cpus, info);
222 }
223 
224 void hyperv_setup_mmu_ops(void)
225 {
226 	if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
227 		return;
228 
229 	if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
230 		pr_info("Using hypercall for remote TLB flush\n");
231 		pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
232 	} else {
233 		pr_info("Using ext hypercall for remote TLB flush\n");
234 		pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
235 	}
236 }
237