xref: /linux/arch/x86/include/asm/paravirt.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6 
7 #ifndef __ASSEMBLER__
8 #include <asm/paravirt-base.h>
9 #endif
10 #include <asm/paravirt_types.h>
11 
12 #ifdef CONFIG_PARAVIRT
13 #include <asm/pgtable_types.h>
14 #include <asm/asm.h>
15 #include <asm/nospec-branch.h>
16 
17 #ifndef __ASSEMBLER__
18 #include <linux/types.h>
19 #include <linux/cpumask.h>
20 #include <asm/frame.h>
21 
22 void native_flush_tlb_local(void);
23 void native_flush_tlb_global(void);
24 void native_flush_tlb_one_user(unsigned long addr);
25 void native_flush_tlb_multi(const struct cpumask *cpumask,
26 			     const struct flush_tlb_info *info);
27 
28 static inline void __flush_tlb_local(void)
29 {
30 	PVOP_VCALL0(pv_ops, mmu.flush_tlb_user);
31 }
32 
33 static inline void __flush_tlb_global(void)
34 {
35 	PVOP_VCALL0(pv_ops, mmu.flush_tlb_kernel);
36 }
37 
38 static inline void __flush_tlb_one_user(unsigned long addr)
39 {
40 	PVOP_VCALL1(pv_ops, mmu.flush_tlb_one_user, addr);
41 }
42 
43 static inline void __flush_tlb_multi(const struct cpumask *cpumask,
44 				      const struct flush_tlb_info *info)
45 {
46 	PVOP_VCALL2(pv_ops, mmu.flush_tlb_multi, cpumask, info);
47 }
48 
49 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
50 {
51 	PVOP_VCALL1(pv_ops, mmu.exit_mmap, mm);
52 }
53 
54 static inline void notify_page_enc_status_changed(unsigned long pfn,
55 						  int npages, bool enc)
56 {
57 	PVOP_VCALL3(pv_ops, mmu.notify_page_enc_status_changed, pfn, npages, enc);
58 }
59 
60 static __always_inline void arch_safe_halt(void)
61 {
62 	PVOP_VCALL0(pv_ops, irq.safe_halt);
63 }
64 
65 static inline void halt(void)
66 {
67 	PVOP_VCALL0(pv_ops, irq.halt);
68 }
69 
70 #ifdef CONFIG_PARAVIRT_XXL
71 static inline void load_sp0(unsigned long sp0)
72 {
73 	PVOP_VCALL1(pv_ops, cpu.load_sp0, sp0);
74 }
75 
76 /* The paravirtualized CPUID instruction. */
77 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
78 			   unsigned int *ecx, unsigned int *edx)
79 {
80 	PVOP_VCALL4(pv_ops, cpu.cpuid, eax, ebx, ecx, edx);
81 }
82 
83 /*
84  * These special macros can be used to get or set a debugging register
85  */
86 static __always_inline unsigned long paravirt_get_debugreg(int reg)
87 {
88 	return PVOP_CALL1(unsigned long, pv_ops, cpu.get_debugreg, reg);
89 }
90 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
91 static __always_inline void set_debugreg(unsigned long val, int reg)
92 {
93 	PVOP_VCALL2(pv_ops, cpu.set_debugreg, reg, val);
94 }
95 
96 static inline unsigned long read_cr0(void)
97 {
98 	return PVOP_CALL0(unsigned long, pv_ops, cpu.read_cr0);
99 }
100 
101 static inline void write_cr0(unsigned long x)
102 {
103 	PVOP_VCALL1(pv_ops, cpu.write_cr0, x);
104 }
105 
106 static __always_inline unsigned long read_cr2(void)
107 {
108 	return PVOP_ALT_CALLEE0(unsigned long, pv_ops, mmu.read_cr2,
109 				"mov %%cr2, %%rax", ALT_NOT_XEN);
110 }
111 
112 static __always_inline void write_cr2(unsigned long x)
113 {
114 	PVOP_VCALL1(pv_ops, mmu.write_cr2, x);
115 }
116 
117 static inline unsigned long __read_cr3(void)
118 {
119 	return PVOP_ALT_CALL0(unsigned long, pv_ops, mmu.read_cr3,
120 			      "mov %%cr3, %%rax", ALT_NOT_XEN);
121 }
122 
123 static inline void write_cr3(unsigned long x)
124 {
125 	PVOP_ALT_VCALL1(pv_ops, mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
126 }
127 
128 static inline void __write_cr4(unsigned long x)
129 {
130 	PVOP_VCALL1(pv_ops, cpu.write_cr4, x);
131 }
132 
133 static inline u64 paravirt_read_msr(u32 msr)
134 {
135 	return PVOP_CALL1(u64, pv_ops, cpu.read_msr, msr);
136 }
137 
138 static inline void paravirt_write_msr(u32 msr, u64 val)
139 {
140 	PVOP_VCALL2(pv_ops, cpu.write_msr, msr, val);
141 }
142 
143 static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
144 {
145 	return PVOP_CALL2(int, pv_ops, cpu.read_msr_safe, msr, val);
146 }
147 
148 static inline int paravirt_write_msr_safe(u32 msr, u64 val)
149 {
150 	return PVOP_CALL2(int, pv_ops, cpu.write_msr_safe, msr, val);
151 }
152 
153 #define rdmsr(msr, val1, val2)			\
154 do {						\
155 	u64 _l = paravirt_read_msr(msr);	\
156 	val1 = (u32)_l;				\
157 	val2 = _l >> 32;			\
158 } while (0)
159 
160 static __always_inline void wrmsr(u32 msr, u32 low, u32 high)
161 {
162 	paravirt_write_msr(msr, (u64)high << 32 | low);
163 }
164 
165 #define rdmsrq(msr, val)			\
166 do {						\
167 	val = paravirt_read_msr(msr);		\
168 } while (0)
169 
170 static inline void wrmsrq(u32 msr, u64 val)
171 {
172 	paravirt_write_msr(msr, val);
173 }
174 
175 static inline int wrmsrq_safe(u32 msr, u64 val)
176 {
177 	return paravirt_write_msr_safe(msr, val);
178 }
179 
180 /* rdmsr with exception handling */
181 #define rdmsr_safe(msr, a, b)				\
182 ({							\
183 	u64 _l;						\
184 	int _err = paravirt_read_msr_safe((msr), &_l);	\
185 	(*a) = (u32)_l;					\
186 	(*b) = (u32)(_l >> 32);				\
187 	_err;						\
188 })
189 
190 static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
191 {
192 	return paravirt_read_msr_safe(msr, p);
193 }
194 
195 static __always_inline u64 rdpmc(int counter)
196 {
197 	return PVOP_CALL1(u64, pv_ops, cpu.read_pmc, counter);
198 }
199 
200 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
201 {
202 	PVOP_VCALL2(pv_ops, cpu.alloc_ldt, ldt, entries);
203 }
204 
205 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
206 {
207 	PVOP_VCALL2(pv_ops, cpu.free_ldt, ldt, entries);
208 }
209 
210 static inline void load_TR_desc(void)
211 {
212 	PVOP_VCALL0(pv_ops, cpu.load_tr_desc);
213 }
214 static inline void load_gdt(const struct desc_ptr *dtr)
215 {
216 	PVOP_VCALL1(pv_ops, cpu.load_gdt, dtr);
217 }
218 static inline void load_idt(const struct desc_ptr *dtr)
219 {
220 	PVOP_VCALL1(pv_ops, cpu.load_idt, dtr);
221 }
222 static inline void set_ldt(const void *addr, unsigned entries)
223 {
224 	PVOP_VCALL2(pv_ops, cpu.set_ldt, addr, entries);
225 }
226 static inline unsigned long paravirt_store_tr(void)
227 {
228 	return PVOP_CALL0(unsigned long, pv_ops, cpu.store_tr);
229 }
230 
231 #define store_tr(tr)	((tr) = paravirt_store_tr())
232 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
233 {
234 	PVOP_VCALL2(pv_ops, cpu.load_tls, t, cpu);
235 }
236 
237 static inline void load_gs_index(unsigned int gs)
238 {
239 	PVOP_VCALL1(pv_ops, cpu.load_gs_index, gs);
240 }
241 
242 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
243 				   const void *desc)
244 {
245 	PVOP_VCALL3(pv_ops, cpu.write_ldt_entry, dt, entry, desc);
246 }
247 
248 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
249 				   void *desc, int type)
250 {
251 	PVOP_VCALL4(pv_ops, cpu.write_gdt_entry, dt, entry, desc, type);
252 }
253 
254 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
255 {
256 	PVOP_VCALL3(pv_ops, cpu.write_idt_entry, dt, entry, g);
257 }
258 
259 #ifdef CONFIG_X86_IOPL_IOPERM
260 static inline void tss_invalidate_io_bitmap(void)
261 {
262 	PVOP_VCALL0(pv_ops, cpu.invalidate_io_bitmap);
263 }
264 
265 static inline void tss_update_io_bitmap(void)
266 {
267 	PVOP_VCALL0(pv_ops, cpu.update_io_bitmap);
268 }
269 #endif
270 
271 static inline void paravirt_enter_mmap(struct mm_struct *next)
272 {
273 	PVOP_VCALL1(pv_ops, mmu.enter_mmap, next);
274 }
275 
276 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
277 {
278 	return PVOP_CALL1(int, pv_ops, mmu.pgd_alloc, mm);
279 }
280 
281 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
282 {
283 	PVOP_VCALL2(pv_ops, mmu.pgd_free, mm, pgd);
284 }
285 
286 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
287 {
288 	PVOP_VCALL2(pv_ops, mmu.alloc_pte, mm, pfn);
289 }
290 static inline void paravirt_release_pte(unsigned long pfn)
291 {
292 	PVOP_VCALL1(pv_ops, mmu.release_pte, pfn);
293 }
294 
295 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
296 {
297 	PVOP_VCALL2(pv_ops, mmu.alloc_pmd, mm, pfn);
298 }
299 
300 static inline void paravirt_release_pmd(unsigned long pfn)
301 {
302 	PVOP_VCALL1(pv_ops, mmu.release_pmd, pfn);
303 }
304 
305 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
306 {
307 	PVOP_VCALL2(pv_ops, mmu.alloc_pud, mm, pfn);
308 }
309 static inline void paravirt_release_pud(unsigned long pfn)
310 {
311 	PVOP_VCALL1(pv_ops, mmu.release_pud, pfn);
312 }
313 
314 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
315 {
316 	PVOP_VCALL2(pv_ops, mmu.alloc_p4d, mm, pfn);
317 }
318 
319 static inline void paravirt_release_p4d(unsigned long pfn)
320 {
321 	PVOP_VCALL1(pv_ops, mmu.release_p4d, pfn);
322 }
323 
324 static inline pte_t __pte(pteval_t val)
325 {
326 	return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, pv_ops, mmu.make_pte, val,
327 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
328 }
329 
330 static inline pteval_t pte_val(pte_t pte)
331 {
332 	return PVOP_ALT_CALLEE1(pteval_t, pv_ops, mmu.pte_val, pte.pte,
333 				"mov %%rdi, %%rax", ALT_NOT_XEN);
334 }
335 
336 static inline pgd_t __pgd(pgdval_t val)
337 {
338 	return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, pv_ops, mmu.make_pgd, val,
339 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
340 }
341 
342 static inline pgdval_t pgd_val(pgd_t pgd)
343 {
344 	return PVOP_ALT_CALLEE1(pgdval_t, pv_ops, mmu.pgd_val, pgd.pgd,
345 				"mov %%rdi, %%rax", ALT_NOT_XEN);
346 }
347 
348 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
349 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
350 					   pte_t *ptep)
351 {
352 	pteval_t ret;
353 
354 	ret = PVOP_CALL3(pteval_t, pv_ops, mmu.ptep_modify_prot_start, vma, addr, ptep);
355 
356 	return (pte_t) { .pte = ret };
357 }
358 
359 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
360 					   pte_t *ptep, pte_t old_pte, pte_t pte)
361 {
362 
363 	PVOP_VCALL4(pv_ops, mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
364 }
365 
366 static inline void set_pte(pte_t *ptep, pte_t pte)
367 {
368 	PVOP_VCALL2(pv_ops, mmu.set_pte, ptep, pte.pte);
369 }
370 
371 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
372 {
373 	PVOP_VCALL2(pv_ops, mmu.set_pmd, pmdp, native_pmd_val(pmd));
374 }
375 
376 static inline pmd_t __pmd(pmdval_t val)
377 {
378 	return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, pv_ops, mmu.make_pmd, val,
379 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
380 }
381 
382 static inline pmdval_t pmd_val(pmd_t pmd)
383 {
384 	return PVOP_ALT_CALLEE1(pmdval_t, pv_ops, mmu.pmd_val, pmd.pmd,
385 				"mov %%rdi, %%rax", ALT_NOT_XEN);
386 }
387 
388 static inline void set_pud(pud_t *pudp, pud_t pud)
389 {
390 	PVOP_VCALL2(pv_ops, mmu.set_pud, pudp, native_pud_val(pud));
391 }
392 
393 static inline pud_t __pud(pudval_t val)
394 {
395 	pudval_t ret;
396 
397 	ret = PVOP_ALT_CALLEE1(pudval_t, pv_ops, mmu.make_pud, val,
398 			       "mov %%rdi, %%rax", ALT_NOT_XEN);
399 
400 	return (pud_t) { ret };
401 }
402 
403 static inline pudval_t pud_val(pud_t pud)
404 {
405 	return PVOP_ALT_CALLEE1(pudval_t, pv_ops, mmu.pud_val, pud.pud,
406 				"mov %%rdi, %%rax", ALT_NOT_XEN);
407 }
408 
409 static inline void pud_clear(pud_t *pudp)
410 {
411 	set_pud(pudp, native_make_pud(0));
412 }
413 
414 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
415 {
416 	p4dval_t val = native_p4d_val(p4d);
417 
418 	PVOP_VCALL2(pv_ops, mmu.set_p4d, p4dp, val);
419 }
420 
421 static inline p4d_t __p4d(p4dval_t val)
422 {
423 	p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, pv_ops, mmu.make_p4d, val,
424 					"mov %%rdi, %%rax", ALT_NOT_XEN);
425 
426 	return (p4d_t) { ret };
427 }
428 
429 static inline p4dval_t p4d_val(p4d_t p4d)
430 {
431 	return PVOP_ALT_CALLEE1(p4dval_t, pv_ops, mmu.p4d_val, p4d.p4d,
432 				"mov %%rdi, %%rax", ALT_NOT_XEN);
433 }
434 
435 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
436 {
437 	PVOP_VCALL2(pv_ops, mmu.set_pgd, pgdp, native_pgd_val(pgd));
438 }
439 
440 #define set_pgd(pgdp, pgdval) do {					\
441 	if (pgtable_l5_enabled())						\
442 		__set_pgd(pgdp, pgdval);				\
443 	else								\
444 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
445 } while (0)
446 
447 #define pgd_clear(pgdp) do {						\
448 	if (pgtable_l5_enabled())					\
449 		set_pgd(pgdp, native_make_pgd(0));			\
450 } while (0)
451 
452 static inline void p4d_clear(p4d_t *p4dp)
453 {
454 	set_p4d(p4dp, native_make_p4d(0));
455 }
456 
457 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
458 {
459 	set_pte(ptep, pte);
460 }
461 
462 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
463 			     pte_t *ptep)
464 {
465 	set_pte(ptep, native_make_pte(0));
466 }
467 
468 static inline void pmd_clear(pmd_t *pmdp)
469 {
470 	set_pmd(pmdp, native_make_pmd(0));
471 }
472 
473 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
474 static inline void arch_start_context_switch(struct task_struct *prev)
475 {
476 	PVOP_VCALL1(pv_ops, cpu.start_context_switch, prev);
477 }
478 
479 static inline void arch_end_context_switch(struct task_struct *next)
480 {
481 	PVOP_VCALL1(pv_ops, cpu.end_context_switch, next);
482 }
483 
484 static inline void arch_enter_lazy_mmu_mode(void)
485 {
486 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.enter);
487 }
488 
489 static inline void arch_leave_lazy_mmu_mode(void)
490 {
491 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.leave);
492 }
493 
494 static inline void arch_flush_lazy_mmu_mode(void)
495 {
496 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.flush);
497 }
498 
499 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
500 				phys_addr_t phys, pgprot_t flags)
501 {
502 	pv_ops.mmu.set_fixmap(idx, phys, flags);
503 }
504 
505 static __always_inline unsigned long arch_local_save_flags(void)
506 {
507 	return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax",
508 				ALT_NOT_XEN);
509 }
510 
511 static __always_inline void arch_local_irq_disable(void)
512 {
513 	PVOP_ALT_VCALLEE0(pv_ops, irq.irq_disable, "cli", ALT_NOT_XEN);
514 }
515 
516 static __always_inline void arch_local_irq_enable(void)
517 {
518 	PVOP_ALT_VCALLEE0(pv_ops, irq.irq_enable, "sti", ALT_NOT_XEN);
519 }
520 
521 static __always_inline unsigned long arch_local_irq_save(void)
522 {
523 	unsigned long f;
524 
525 	f = arch_local_save_flags();
526 	arch_local_irq_disable();
527 	return f;
528 }
529 #endif
530 
531 #else  /* __ASSEMBLER__ */
532 
533 #ifdef CONFIG_X86_64
534 #ifdef CONFIG_PARAVIRT_XXL
535 #ifdef CONFIG_DEBUG_ENTRY
536 
537 #define PARA_INDIRECT(addr)	*addr(%rip)
538 
539 .macro PARA_IRQ_save_fl
540 	ANNOTATE_RETPOLINE_SAFE;
541 	call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
542 .endm
543 
544 #define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl",			\
545 				 "ALT_CALL_INSTR", ALT_CALL_ALWAYS,	\
546 				 "pushf; pop %rax", ALT_NOT_XEN
547 #endif
548 #endif /* CONFIG_PARAVIRT_XXL */
549 #endif	/* CONFIG_X86_64 */
550 
551 #endif /* __ASSEMBLER__ */
552 #else  /* CONFIG_PARAVIRT */
553 # define default_banner x86_init_noop
554 #endif /* !CONFIG_PARAVIRT */
555 
556 #ifndef __ASSEMBLER__
557 #ifndef CONFIG_PARAVIRT_XXL
558 static inline void paravirt_enter_mmap(struct mm_struct *mm)
559 {
560 }
561 #endif
562 
563 #ifndef CONFIG_PARAVIRT
564 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
565 {
566 }
567 #endif
568 
569 #endif /* __ASSEMBLER__ */
570 #endif /* _ASM_X86_PARAVIRT_H */
571