xref: /linux/arch/x86/include/asm/paravirt.h (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6 
7 #ifndef __ASSEMBLER__
8 #include <asm/paravirt-base.h>
9 #endif
10 #include <asm/paravirt_types.h>
11 
12 #ifdef CONFIG_PARAVIRT
13 #include <asm/pgtable_types.h>
14 #include <asm/asm.h>
15 #include <asm/nospec-branch.h>
16 
17 #ifndef __ASSEMBLER__
18 #include <linux/types.h>
19 #include <linux/cpumask.h>
20 #include <asm/frame.h>
21 
22 /* The paravirtualized I/O functions */
23 static inline void slow_down_io(void)
24 {
25 	PVOP_VCALL0(pv_ops, cpu.io_delay);
26 #ifdef REALLY_SLOW_IO
27 	PVOP_VCALL0(pv_ops, cpu.io_delay);
28 	PVOP_VCALL0(pv_ops, cpu.io_delay);
29 	PVOP_VCALL0(pv_ops, cpu.io_delay);
30 #endif
31 }
32 
33 void native_flush_tlb_local(void);
34 void native_flush_tlb_global(void);
35 void native_flush_tlb_one_user(unsigned long addr);
36 void native_flush_tlb_multi(const struct cpumask *cpumask,
37 			     const struct flush_tlb_info *info);
38 
39 static inline void __flush_tlb_local(void)
40 {
41 	PVOP_VCALL0(pv_ops, mmu.flush_tlb_user);
42 }
43 
44 static inline void __flush_tlb_global(void)
45 {
46 	PVOP_VCALL0(pv_ops, mmu.flush_tlb_kernel);
47 }
48 
49 static inline void __flush_tlb_one_user(unsigned long addr)
50 {
51 	PVOP_VCALL1(pv_ops, mmu.flush_tlb_one_user, addr);
52 }
53 
54 static inline void __flush_tlb_multi(const struct cpumask *cpumask,
55 				      const struct flush_tlb_info *info)
56 {
57 	PVOP_VCALL2(pv_ops, mmu.flush_tlb_multi, cpumask, info);
58 }
59 
60 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
61 {
62 	PVOP_VCALL1(pv_ops, mmu.exit_mmap, mm);
63 }
64 
65 static inline void notify_page_enc_status_changed(unsigned long pfn,
66 						  int npages, bool enc)
67 {
68 	PVOP_VCALL3(pv_ops, mmu.notify_page_enc_status_changed, pfn, npages, enc);
69 }
70 
71 static __always_inline void arch_safe_halt(void)
72 {
73 	PVOP_VCALL0(pv_ops, irq.safe_halt);
74 }
75 
76 static inline void halt(void)
77 {
78 	PVOP_VCALL0(pv_ops, irq.halt);
79 }
80 
81 #ifdef CONFIG_PARAVIRT_XXL
82 static inline void load_sp0(unsigned long sp0)
83 {
84 	PVOP_VCALL1(pv_ops, cpu.load_sp0, sp0);
85 }
86 
87 /* The paravirtualized CPUID instruction. */
88 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
89 			   unsigned int *ecx, unsigned int *edx)
90 {
91 	PVOP_VCALL4(pv_ops, cpu.cpuid, eax, ebx, ecx, edx);
92 }
93 
94 /*
95  * These special macros can be used to get or set a debugging register
96  */
97 static __always_inline unsigned long paravirt_get_debugreg(int reg)
98 {
99 	return PVOP_CALL1(unsigned long, pv_ops, cpu.get_debugreg, reg);
100 }
101 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
102 static __always_inline void set_debugreg(unsigned long val, int reg)
103 {
104 	PVOP_VCALL2(pv_ops, cpu.set_debugreg, reg, val);
105 }
106 
107 static inline unsigned long read_cr0(void)
108 {
109 	return PVOP_CALL0(unsigned long, pv_ops, cpu.read_cr0);
110 }
111 
112 static inline void write_cr0(unsigned long x)
113 {
114 	PVOP_VCALL1(pv_ops, cpu.write_cr0, x);
115 }
116 
117 static __always_inline unsigned long read_cr2(void)
118 {
119 	return PVOP_ALT_CALLEE0(unsigned long, pv_ops, mmu.read_cr2,
120 				"mov %%cr2, %%rax", ALT_NOT_XEN);
121 }
122 
123 static __always_inline void write_cr2(unsigned long x)
124 {
125 	PVOP_VCALL1(pv_ops, mmu.write_cr2, x);
126 }
127 
128 static inline unsigned long __read_cr3(void)
129 {
130 	return PVOP_ALT_CALL0(unsigned long, pv_ops, mmu.read_cr3,
131 			      "mov %%cr3, %%rax", ALT_NOT_XEN);
132 }
133 
134 static inline void write_cr3(unsigned long x)
135 {
136 	PVOP_ALT_VCALL1(pv_ops, mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
137 }
138 
139 static inline void __write_cr4(unsigned long x)
140 {
141 	PVOP_VCALL1(pv_ops, cpu.write_cr4, x);
142 }
143 
144 static inline u64 paravirt_read_msr(u32 msr)
145 {
146 	return PVOP_CALL1(u64, pv_ops, cpu.read_msr, msr);
147 }
148 
149 static inline void paravirt_write_msr(u32 msr, u64 val)
150 {
151 	PVOP_VCALL2(pv_ops, cpu.write_msr, msr, val);
152 }
153 
154 static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
155 {
156 	return PVOP_CALL2(int, pv_ops, cpu.read_msr_safe, msr, val);
157 }
158 
159 static inline int paravirt_write_msr_safe(u32 msr, u64 val)
160 {
161 	return PVOP_CALL2(int, pv_ops, cpu.write_msr_safe, msr, val);
162 }
163 
164 #define rdmsr(msr, val1, val2)			\
165 do {						\
166 	u64 _l = paravirt_read_msr(msr);	\
167 	val1 = (u32)_l;				\
168 	val2 = _l >> 32;			\
169 } while (0)
170 
171 static __always_inline void wrmsr(u32 msr, u32 low, u32 high)
172 {
173 	paravirt_write_msr(msr, (u64)high << 32 | low);
174 }
175 
176 #define rdmsrq(msr, val)			\
177 do {						\
178 	val = paravirt_read_msr(msr);		\
179 } while (0)
180 
181 static inline void wrmsrq(u32 msr, u64 val)
182 {
183 	paravirt_write_msr(msr, val);
184 }
185 
186 static inline int wrmsrq_safe(u32 msr, u64 val)
187 {
188 	return paravirt_write_msr_safe(msr, val);
189 }
190 
191 /* rdmsr with exception handling */
192 #define rdmsr_safe(msr, a, b)				\
193 ({							\
194 	u64 _l;						\
195 	int _err = paravirt_read_msr_safe((msr), &_l);	\
196 	(*a) = (u32)_l;					\
197 	(*b) = (u32)(_l >> 32);				\
198 	_err;						\
199 })
200 
201 static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
202 {
203 	return paravirt_read_msr_safe(msr, p);
204 }
205 
206 static __always_inline u64 rdpmc(int counter)
207 {
208 	return PVOP_CALL1(u64, pv_ops, cpu.read_pmc, counter);
209 }
210 
211 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
212 {
213 	PVOP_VCALL2(pv_ops, cpu.alloc_ldt, ldt, entries);
214 }
215 
216 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
217 {
218 	PVOP_VCALL2(pv_ops, cpu.free_ldt, ldt, entries);
219 }
220 
221 static inline void load_TR_desc(void)
222 {
223 	PVOP_VCALL0(pv_ops, cpu.load_tr_desc);
224 }
225 static inline void load_gdt(const struct desc_ptr *dtr)
226 {
227 	PVOP_VCALL1(pv_ops, cpu.load_gdt, dtr);
228 }
229 static inline void load_idt(const struct desc_ptr *dtr)
230 {
231 	PVOP_VCALL1(pv_ops, cpu.load_idt, dtr);
232 }
233 static inline void set_ldt(const void *addr, unsigned entries)
234 {
235 	PVOP_VCALL2(pv_ops, cpu.set_ldt, addr, entries);
236 }
237 static inline unsigned long paravirt_store_tr(void)
238 {
239 	return PVOP_CALL0(unsigned long, pv_ops, cpu.store_tr);
240 }
241 
242 #define store_tr(tr)	((tr) = paravirt_store_tr())
243 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
244 {
245 	PVOP_VCALL2(pv_ops, cpu.load_tls, t, cpu);
246 }
247 
248 static inline void load_gs_index(unsigned int gs)
249 {
250 	PVOP_VCALL1(pv_ops, cpu.load_gs_index, gs);
251 }
252 
253 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
254 				   const void *desc)
255 {
256 	PVOP_VCALL3(pv_ops, cpu.write_ldt_entry, dt, entry, desc);
257 }
258 
259 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
260 				   void *desc, int type)
261 {
262 	PVOP_VCALL4(pv_ops, cpu.write_gdt_entry, dt, entry, desc, type);
263 }
264 
265 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
266 {
267 	PVOP_VCALL3(pv_ops, cpu.write_idt_entry, dt, entry, g);
268 }
269 
270 #ifdef CONFIG_X86_IOPL_IOPERM
271 static inline void tss_invalidate_io_bitmap(void)
272 {
273 	PVOP_VCALL0(pv_ops, cpu.invalidate_io_bitmap);
274 }
275 
276 static inline void tss_update_io_bitmap(void)
277 {
278 	PVOP_VCALL0(pv_ops, cpu.update_io_bitmap);
279 }
280 #endif
281 
282 static inline void paravirt_enter_mmap(struct mm_struct *next)
283 {
284 	PVOP_VCALL1(pv_ops, mmu.enter_mmap, next);
285 }
286 
287 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
288 {
289 	return PVOP_CALL1(int, pv_ops, mmu.pgd_alloc, mm);
290 }
291 
292 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
293 {
294 	PVOP_VCALL2(pv_ops, mmu.pgd_free, mm, pgd);
295 }
296 
297 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
298 {
299 	PVOP_VCALL2(pv_ops, mmu.alloc_pte, mm, pfn);
300 }
301 static inline void paravirt_release_pte(unsigned long pfn)
302 {
303 	PVOP_VCALL1(pv_ops, mmu.release_pte, pfn);
304 }
305 
306 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
307 {
308 	PVOP_VCALL2(pv_ops, mmu.alloc_pmd, mm, pfn);
309 }
310 
311 static inline void paravirt_release_pmd(unsigned long pfn)
312 {
313 	PVOP_VCALL1(pv_ops, mmu.release_pmd, pfn);
314 }
315 
316 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
317 {
318 	PVOP_VCALL2(pv_ops, mmu.alloc_pud, mm, pfn);
319 }
320 static inline void paravirt_release_pud(unsigned long pfn)
321 {
322 	PVOP_VCALL1(pv_ops, mmu.release_pud, pfn);
323 }
324 
325 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
326 {
327 	PVOP_VCALL2(pv_ops, mmu.alloc_p4d, mm, pfn);
328 }
329 
330 static inline void paravirt_release_p4d(unsigned long pfn)
331 {
332 	PVOP_VCALL1(pv_ops, mmu.release_p4d, pfn);
333 }
334 
335 static inline pte_t __pte(pteval_t val)
336 {
337 	return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, pv_ops, mmu.make_pte, val,
338 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
339 }
340 
341 static inline pteval_t pte_val(pte_t pte)
342 {
343 	return PVOP_ALT_CALLEE1(pteval_t, pv_ops, mmu.pte_val, pte.pte,
344 				"mov %%rdi, %%rax", ALT_NOT_XEN);
345 }
346 
347 static inline pgd_t __pgd(pgdval_t val)
348 {
349 	return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, pv_ops, mmu.make_pgd, val,
350 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
351 }
352 
353 static inline pgdval_t pgd_val(pgd_t pgd)
354 {
355 	return PVOP_ALT_CALLEE1(pgdval_t, pv_ops, mmu.pgd_val, pgd.pgd,
356 				"mov %%rdi, %%rax", ALT_NOT_XEN);
357 }
358 
359 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
360 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
361 					   pte_t *ptep)
362 {
363 	pteval_t ret;
364 
365 	ret = PVOP_CALL3(pteval_t, pv_ops, mmu.ptep_modify_prot_start, vma, addr, ptep);
366 
367 	return (pte_t) { .pte = ret };
368 }
369 
370 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
371 					   pte_t *ptep, pte_t old_pte, pte_t pte)
372 {
373 
374 	PVOP_VCALL4(pv_ops, mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
375 }
376 
377 static inline void set_pte(pte_t *ptep, pte_t pte)
378 {
379 	PVOP_VCALL2(pv_ops, mmu.set_pte, ptep, pte.pte);
380 }
381 
382 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
383 {
384 	PVOP_VCALL2(pv_ops, mmu.set_pmd, pmdp, native_pmd_val(pmd));
385 }
386 
387 static inline pmd_t __pmd(pmdval_t val)
388 {
389 	return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, pv_ops, mmu.make_pmd, val,
390 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
391 }
392 
393 static inline pmdval_t pmd_val(pmd_t pmd)
394 {
395 	return PVOP_ALT_CALLEE1(pmdval_t, pv_ops, mmu.pmd_val, pmd.pmd,
396 				"mov %%rdi, %%rax", ALT_NOT_XEN);
397 }
398 
399 static inline void set_pud(pud_t *pudp, pud_t pud)
400 {
401 	PVOP_VCALL2(pv_ops, mmu.set_pud, pudp, native_pud_val(pud));
402 }
403 
404 static inline pud_t __pud(pudval_t val)
405 {
406 	pudval_t ret;
407 
408 	ret = PVOP_ALT_CALLEE1(pudval_t, pv_ops, mmu.make_pud, val,
409 			       "mov %%rdi, %%rax", ALT_NOT_XEN);
410 
411 	return (pud_t) { ret };
412 }
413 
414 static inline pudval_t pud_val(pud_t pud)
415 {
416 	return PVOP_ALT_CALLEE1(pudval_t, pv_ops, mmu.pud_val, pud.pud,
417 				"mov %%rdi, %%rax", ALT_NOT_XEN);
418 }
419 
420 static inline void pud_clear(pud_t *pudp)
421 {
422 	set_pud(pudp, native_make_pud(0));
423 }
424 
425 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
426 {
427 	p4dval_t val = native_p4d_val(p4d);
428 
429 	PVOP_VCALL2(pv_ops, mmu.set_p4d, p4dp, val);
430 }
431 
432 static inline p4d_t __p4d(p4dval_t val)
433 {
434 	p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, pv_ops, mmu.make_p4d, val,
435 					"mov %%rdi, %%rax", ALT_NOT_XEN);
436 
437 	return (p4d_t) { ret };
438 }
439 
440 static inline p4dval_t p4d_val(p4d_t p4d)
441 {
442 	return PVOP_ALT_CALLEE1(p4dval_t, pv_ops, mmu.p4d_val, p4d.p4d,
443 				"mov %%rdi, %%rax", ALT_NOT_XEN);
444 }
445 
446 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
447 {
448 	PVOP_VCALL2(pv_ops, mmu.set_pgd, pgdp, native_pgd_val(pgd));
449 }
450 
451 #define set_pgd(pgdp, pgdval) do {					\
452 	if (pgtable_l5_enabled())						\
453 		__set_pgd(pgdp, pgdval);				\
454 	else								\
455 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
456 } while (0)
457 
458 #define pgd_clear(pgdp) do {						\
459 	if (pgtable_l5_enabled())					\
460 		set_pgd(pgdp, native_make_pgd(0));			\
461 } while (0)
462 
463 static inline void p4d_clear(p4d_t *p4dp)
464 {
465 	set_p4d(p4dp, native_make_p4d(0));
466 }
467 
468 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
469 {
470 	set_pte(ptep, pte);
471 }
472 
473 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
474 			     pte_t *ptep)
475 {
476 	set_pte(ptep, native_make_pte(0));
477 }
478 
479 static inline void pmd_clear(pmd_t *pmdp)
480 {
481 	set_pmd(pmdp, native_make_pmd(0));
482 }
483 
484 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
485 static inline void arch_start_context_switch(struct task_struct *prev)
486 {
487 	PVOP_VCALL1(pv_ops, cpu.start_context_switch, prev);
488 }
489 
490 static inline void arch_end_context_switch(struct task_struct *next)
491 {
492 	PVOP_VCALL1(pv_ops, cpu.end_context_switch, next);
493 }
494 
495 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
496 static inline void arch_enter_lazy_mmu_mode(void)
497 {
498 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.enter);
499 }
500 
501 static inline void arch_leave_lazy_mmu_mode(void)
502 {
503 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.leave);
504 }
505 
506 static inline void arch_flush_lazy_mmu_mode(void)
507 {
508 	PVOP_VCALL0(pv_ops, mmu.lazy_mode.flush);
509 }
510 
511 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
512 				phys_addr_t phys, pgprot_t flags)
513 {
514 	pv_ops.mmu.set_fixmap(idx, phys, flags);
515 }
516 
517 static __always_inline unsigned long arch_local_save_flags(void)
518 {
519 	return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax",
520 				ALT_NOT_XEN);
521 }
522 
523 static __always_inline void arch_local_irq_disable(void)
524 {
525 	PVOP_ALT_VCALLEE0(pv_ops, irq.irq_disable, "cli", ALT_NOT_XEN);
526 }
527 
528 static __always_inline void arch_local_irq_enable(void)
529 {
530 	PVOP_ALT_VCALLEE0(pv_ops, irq.irq_enable, "sti", ALT_NOT_XEN);
531 }
532 
533 static __always_inline unsigned long arch_local_irq_save(void)
534 {
535 	unsigned long f;
536 
537 	f = arch_local_save_flags();
538 	arch_local_irq_disable();
539 	return f;
540 }
541 #endif
542 
543 #else  /* __ASSEMBLER__ */
544 
545 #ifdef CONFIG_X86_64
546 #ifdef CONFIG_PARAVIRT_XXL
547 #ifdef CONFIG_DEBUG_ENTRY
548 
549 #define PARA_INDIRECT(addr)	*addr(%rip)
550 
551 .macro PARA_IRQ_save_fl
552 	ANNOTATE_RETPOLINE_SAFE;
553 	call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
554 .endm
555 
556 #define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl",			\
557 				 "ALT_CALL_INSTR", ALT_CALL_ALWAYS,	\
558 				 "pushf; pop %rax", ALT_NOT_XEN
559 #endif
560 #endif /* CONFIG_PARAVIRT_XXL */
561 #endif	/* CONFIG_X86_64 */
562 
563 #endif /* __ASSEMBLER__ */
564 #else  /* CONFIG_PARAVIRT */
565 # define default_banner x86_init_noop
566 #endif /* !CONFIG_PARAVIRT */
567 
568 #ifndef __ASSEMBLER__
569 #ifndef CONFIG_PARAVIRT_XXL
570 static inline void paravirt_enter_mmap(struct mm_struct *mm)
571 {
572 }
573 #endif
574 
575 #ifndef CONFIG_PARAVIRT
576 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
577 {
578 }
579 #endif
580 
581 #endif /* __ASSEMBLER__ */
582 #endif /* _ASM_X86_PARAVIRT_H */
583