xref: /linux/arch/x86/include/asm/paravirt.h (revision 6a143a7cf94730f57544ea14a987dc025364dbb8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6 
7 #ifdef CONFIG_PARAVIRT
8 #include <asm/pgtable_types.h>
9 #include <asm/asm.h>
10 #include <asm/nospec-branch.h>
11 
12 #include <asm/paravirt_types.h>
13 
14 #ifndef __ASSEMBLY__
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <asm/frame.h>
19 
20 static inline unsigned long long paravirt_sched_clock(void)
21 {
22 	return PVOP_CALL0(unsigned long long, time.sched_clock);
23 }
24 
25 struct static_key;
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
28 
29 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
30 bool pv_is_native_spin_unlock(void);
31 __visible bool __native_vcpu_is_preempted(long cpu);
32 bool pv_is_native_vcpu_is_preempted(void);
33 
34 static inline u64 paravirt_steal_clock(int cpu)
35 {
36 	return PVOP_CALL1(u64, time.steal_clock, cpu);
37 }
38 
39 /* The paravirtualized I/O functions */
40 static inline void slow_down_io(void)
41 {
42 	pv_ops.cpu.io_delay();
43 #ifdef REALLY_SLOW_IO
44 	pv_ops.cpu.io_delay();
45 	pv_ops.cpu.io_delay();
46 	pv_ops.cpu.io_delay();
47 #endif
48 }
49 
50 void native_flush_tlb_local(void);
51 void native_flush_tlb_global(void);
52 void native_flush_tlb_one_user(unsigned long addr);
53 void native_flush_tlb_others(const struct cpumask *cpumask,
54 			     const struct flush_tlb_info *info);
55 
56 static inline void __flush_tlb_local(void)
57 {
58 	PVOP_VCALL0(mmu.flush_tlb_user);
59 }
60 
61 static inline void __flush_tlb_global(void)
62 {
63 	PVOP_VCALL0(mmu.flush_tlb_kernel);
64 }
65 
66 static inline void __flush_tlb_one_user(unsigned long addr)
67 {
68 	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
69 }
70 
71 static inline void __flush_tlb_others(const struct cpumask *cpumask,
72 				      const struct flush_tlb_info *info)
73 {
74 	PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
75 }
76 
77 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
78 {
79 	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
80 }
81 
82 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
83 {
84 	PVOP_VCALL1(mmu.exit_mmap, mm);
85 }
86 
87 #ifdef CONFIG_PARAVIRT_XXL
88 static inline void load_sp0(unsigned long sp0)
89 {
90 	PVOP_VCALL1(cpu.load_sp0, sp0);
91 }
92 
93 /* The paravirtualized CPUID instruction. */
94 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
95 			   unsigned int *ecx, unsigned int *edx)
96 {
97 	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
98 }
99 
100 /*
101  * These special macros can be used to get or set a debugging register
102  */
103 static inline unsigned long paravirt_get_debugreg(int reg)
104 {
105 	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
106 }
107 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
108 static inline void set_debugreg(unsigned long val, int reg)
109 {
110 	PVOP_VCALL2(cpu.set_debugreg, reg, val);
111 }
112 
113 static inline unsigned long read_cr0(void)
114 {
115 	return PVOP_CALL0(unsigned long, cpu.read_cr0);
116 }
117 
118 static inline void write_cr0(unsigned long x)
119 {
120 	PVOP_VCALL1(cpu.write_cr0, x);
121 }
122 
123 static inline unsigned long read_cr2(void)
124 {
125 	return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
126 }
127 
128 static inline void write_cr2(unsigned long x)
129 {
130 	PVOP_VCALL1(mmu.write_cr2, x);
131 }
132 
133 static inline unsigned long __read_cr3(void)
134 {
135 	return PVOP_CALL0(unsigned long, mmu.read_cr3);
136 }
137 
138 static inline void write_cr3(unsigned long x)
139 {
140 	PVOP_VCALL1(mmu.write_cr3, x);
141 }
142 
143 static inline void __write_cr4(unsigned long x)
144 {
145 	PVOP_VCALL1(cpu.write_cr4, x);
146 }
147 
148 static inline void arch_safe_halt(void)
149 {
150 	PVOP_VCALL0(irq.safe_halt);
151 }
152 
153 static inline void halt(void)
154 {
155 	PVOP_VCALL0(irq.halt);
156 }
157 
158 static inline void wbinvd(void)
159 {
160 	PVOP_VCALL0(cpu.wbinvd);
161 }
162 
163 static inline u64 paravirt_read_msr(unsigned msr)
164 {
165 	return PVOP_CALL1(u64, cpu.read_msr, msr);
166 }
167 
168 static inline void paravirt_write_msr(unsigned msr,
169 				      unsigned low, unsigned high)
170 {
171 	PVOP_VCALL3(cpu.write_msr, msr, low, high);
172 }
173 
174 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
175 {
176 	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
177 }
178 
179 static inline int paravirt_write_msr_safe(unsigned msr,
180 					  unsigned low, unsigned high)
181 {
182 	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
183 }
184 
185 #define rdmsr(msr, val1, val2)			\
186 do {						\
187 	u64 _l = paravirt_read_msr(msr);	\
188 	val1 = (u32)_l;				\
189 	val2 = _l >> 32;			\
190 } while (0)
191 
192 #define wrmsr(msr, val1, val2)			\
193 do {						\
194 	paravirt_write_msr(msr, val1, val2);	\
195 } while (0)
196 
197 #define rdmsrl(msr, val)			\
198 do {						\
199 	val = paravirt_read_msr(msr);		\
200 } while (0)
201 
202 static inline void wrmsrl(unsigned msr, u64 val)
203 {
204 	wrmsr(msr, (u32)val, (u32)(val>>32));
205 }
206 
207 #define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
208 
209 /* rdmsr with exception handling */
210 #define rdmsr_safe(msr, a, b)				\
211 ({							\
212 	int _err;					\
213 	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
214 	(*a) = (u32)_l;					\
215 	(*b) = _l >> 32;				\
216 	_err;						\
217 })
218 
219 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
220 {
221 	int err;
222 
223 	*p = paravirt_read_msr_safe(msr, &err);
224 	return err;
225 }
226 
227 static inline unsigned long long paravirt_read_pmc(int counter)
228 {
229 	return PVOP_CALL1(u64, cpu.read_pmc, counter);
230 }
231 
232 #define rdpmc(counter, low, high)		\
233 do {						\
234 	u64 _l = paravirt_read_pmc(counter);	\
235 	low = (u32)_l;				\
236 	high = _l >> 32;			\
237 } while (0)
238 
239 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
240 
241 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
242 {
243 	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
244 }
245 
246 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
247 {
248 	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
249 }
250 
251 static inline void load_TR_desc(void)
252 {
253 	PVOP_VCALL0(cpu.load_tr_desc);
254 }
255 static inline void load_gdt(const struct desc_ptr *dtr)
256 {
257 	PVOP_VCALL1(cpu.load_gdt, dtr);
258 }
259 static inline void load_idt(const struct desc_ptr *dtr)
260 {
261 	PVOP_VCALL1(cpu.load_idt, dtr);
262 }
263 static inline void set_ldt(const void *addr, unsigned entries)
264 {
265 	PVOP_VCALL2(cpu.set_ldt, addr, entries);
266 }
267 static inline unsigned long paravirt_store_tr(void)
268 {
269 	return PVOP_CALL0(unsigned long, cpu.store_tr);
270 }
271 
272 #define store_tr(tr)	((tr) = paravirt_store_tr())
273 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
274 {
275 	PVOP_VCALL2(cpu.load_tls, t, cpu);
276 }
277 
278 static inline void load_gs_index(unsigned int gs)
279 {
280 	PVOP_VCALL1(cpu.load_gs_index, gs);
281 }
282 
283 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
284 				   const void *desc)
285 {
286 	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
287 }
288 
289 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
290 				   void *desc, int type)
291 {
292 	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
293 }
294 
295 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
296 {
297 	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
298 }
299 
300 #ifdef CONFIG_X86_IOPL_IOPERM
301 static inline void tss_invalidate_io_bitmap(void)
302 {
303 	PVOP_VCALL0(cpu.invalidate_io_bitmap);
304 }
305 
306 static inline void tss_update_io_bitmap(void)
307 {
308 	PVOP_VCALL0(cpu.update_io_bitmap);
309 }
310 #endif
311 
312 static inline void paravirt_activate_mm(struct mm_struct *prev,
313 					struct mm_struct *next)
314 {
315 	PVOP_VCALL2(mmu.activate_mm, prev, next);
316 }
317 
318 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
319 					  struct mm_struct *mm)
320 {
321 	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
322 }
323 
324 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
325 {
326 	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
327 }
328 
329 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
330 {
331 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
332 }
333 
334 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
335 {
336 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
337 }
338 static inline void paravirt_release_pte(unsigned long pfn)
339 {
340 	PVOP_VCALL1(mmu.release_pte, pfn);
341 }
342 
343 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
344 {
345 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
346 }
347 
348 static inline void paravirt_release_pmd(unsigned long pfn)
349 {
350 	PVOP_VCALL1(mmu.release_pmd, pfn);
351 }
352 
353 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
354 {
355 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
356 }
357 static inline void paravirt_release_pud(unsigned long pfn)
358 {
359 	PVOP_VCALL1(mmu.release_pud, pfn);
360 }
361 
362 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
363 {
364 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
365 }
366 
367 static inline void paravirt_release_p4d(unsigned long pfn)
368 {
369 	PVOP_VCALL1(mmu.release_p4d, pfn);
370 }
371 
372 static inline pte_t __pte(pteval_t val)
373 {
374 	return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
375 }
376 
377 static inline pteval_t pte_val(pte_t pte)
378 {
379 	return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
380 }
381 
382 static inline pgd_t __pgd(pgdval_t val)
383 {
384 	return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
385 }
386 
387 static inline pgdval_t pgd_val(pgd_t pgd)
388 {
389 	return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
390 }
391 
392 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
393 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
394 					   pte_t *ptep)
395 {
396 	pteval_t ret;
397 
398 	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
399 
400 	return (pte_t) { .pte = ret };
401 }
402 
403 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
404 					   pte_t *ptep, pte_t old_pte, pte_t pte)
405 {
406 
407 	PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
408 }
409 
410 static inline void set_pte(pte_t *ptep, pte_t pte)
411 {
412 	PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
413 }
414 
415 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
416 {
417 	PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
418 }
419 
420 static inline pmd_t __pmd(pmdval_t val)
421 {
422 	return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
423 }
424 
425 static inline pmdval_t pmd_val(pmd_t pmd)
426 {
427 	return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
428 }
429 
430 static inline void set_pud(pud_t *pudp, pud_t pud)
431 {
432 	PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
433 }
434 
435 static inline pud_t __pud(pudval_t val)
436 {
437 	pudval_t ret;
438 
439 	ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
440 
441 	return (pud_t) { ret };
442 }
443 
444 static inline pudval_t pud_val(pud_t pud)
445 {
446 	return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
447 }
448 
449 static inline void pud_clear(pud_t *pudp)
450 {
451 	set_pud(pudp, native_make_pud(0));
452 }
453 
454 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
455 {
456 	p4dval_t val = native_p4d_val(p4d);
457 
458 	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
459 }
460 
461 #if CONFIG_PGTABLE_LEVELS >= 5
462 
463 static inline p4d_t __p4d(p4dval_t val)
464 {
465 	p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
466 
467 	return (p4d_t) { ret };
468 }
469 
470 static inline p4dval_t p4d_val(p4d_t p4d)
471 {
472 	return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
473 }
474 
475 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
476 {
477 	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
478 }
479 
480 #define set_pgd(pgdp, pgdval) do {					\
481 	if (pgtable_l5_enabled())						\
482 		__set_pgd(pgdp, pgdval);				\
483 	else								\
484 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
485 } while (0)
486 
487 #define pgd_clear(pgdp) do {						\
488 	if (pgtable_l5_enabled())					\
489 		set_pgd(pgdp, native_make_pgd(0));			\
490 } while (0)
491 
492 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
493 
494 static inline void p4d_clear(p4d_t *p4dp)
495 {
496 	set_p4d(p4dp, native_make_p4d(0));
497 }
498 
499 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
500 {
501 	set_pte(ptep, pte);
502 }
503 
504 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
505 			     pte_t *ptep)
506 {
507 	set_pte(ptep, native_make_pte(0));
508 }
509 
510 static inline void pmd_clear(pmd_t *pmdp)
511 {
512 	set_pmd(pmdp, native_make_pmd(0));
513 }
514 
515 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
516 static inline void arch_start_context_switch(struct task_struct *prev)
517 {
518 	PVOP_VCALL1(cpu.start_context_switch, prev);
519 }
520 
521 static inline void arch_end_context_switch(struct task_struct *next)
522 {
523 	PVOP_VCALL1(cpu.end_context_switch, next);
524 }
525 
526 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
527 static inline void arch_enter_lazy_mmu_mode(void)
528 {
529 	PVOP_VCALL0(mmu.lazy_mode.enter);
530 }
531 
532 static inline void arch_leave_lazy_mmu_mode(void)
533 {
534 	PVOP_VCALL0(mmu.lazy_mode.leave);
535 }
536 
537 static inline void arch_flush_lazy_mmu_mode(void)
538 {
539 	PVOP_VCALL0(mmu.lazy_mode.flush);
540 }
541 
542 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
543 				phys_addr_t phys, pgprot_t flags)
544 {
545 	pv_ops.mmu.set_fixmap(idx, phys, flags);
546 }
547 #endif
548 
549 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
550 
551 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
552 							u32 val)
553 {
554 	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
555 }
556 
557 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
558 {
559 	PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
560 }
561 
562 static __always_inline void pv_wait(u8 *ptr, u8 val)
563 {
564 	PVOP_VCALL2(lock.wait, ptr, val);
565 }
566 
567 static __always_inline void pv_kick(int cpu)
568 {
569 	PVOP_VCALL1(lock.kick, cpu);
570 }
571 
572 static __always_inline bool pv_vcpu_is_preempted(long cpu)
573 {
574 	return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
575 }
576 
577 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
578 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
579 
580 #endif /* SMP && PARAVIRT_SPINLOCKS */
581 
582 #ifdef CONFIG_X86_32
583 /* save and restore all caller-save registers, except return value */
584 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
585 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
586 #else
587 /* save and restore all caller-save registers, except return value */
588 #define PV_SAVE_ALL_CALLER_REGS						\
589 	"push %rcx;"							\
590 	"push %rdx;"							\
591 	"push %rsi;"							\
592 	"push %rdi;"							\
593 	"push %r8;"							\
594 	"push %r9;"							\
595 	"push %r10;"							\
596 	"push %r11;"
597 #define PV_RESTORE_ALL_CALLER_REGS					\
598 	"pop %r11;"							\
599 	"pop %r10;"							\
600 	"pop %r9;"							\
601 	"pop %r8;"							\
602 	"pop %rdi;"							\
603 	"pop %rsi;"							\
604 	"pop %rdx;"							\
605 	"pop %rcx;"
606 #endif
607 
608 /*
609  * Generate a thunk around a function which saves all caller-save
610  * registers except for the return value.  This allows C functions to
611  * be called from assembler code where fewer than normal registers are
612  * available.  It may also help code generation around calls from C
613  * code if the common case doesn't use many registers.
614  *
615  * When a callee is wrapped in a thunk, the caller can assume that all
616  * arg regs and all scratch registers are preserved across the
617  * call. The return value in rax/eax will not be saved, even for void
618  * functions.
619  */
620 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
621 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
622 	extern typeof(func) __raw_callee_save_##func;			\
623 									\
624 	asm(".pushsection .text;"					\
625 	    ".globl " PV_THUNK_NAME(func) ";"				\
626 	    ".type " PV_THUNK_NAME(func) ", @function;"			\
627 	    PV_THUNK_NAME(func) ":"					\
628 	    FRAME_BEGIN							\
629 	    PV_SAVE_ALL_CALLER_REGS					\
630 	    "call " #func ";"						\
631 	    PV_RESTORE_ALL_CALLER_REGS					\
632 	    FRAME_END							\
633 	    "ret;"							\
634 	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
635 	    ".popsection")
636 
637 /* Get a reference to a callee-save function */
638 #define PV_CALLEE_SAVE(func)						\
639 	((struct paravirt_callee_save) { __raw_callee_save_##func })
640 
641 /* Promise that "func" already uses the right calling convention */
642 #define __PV_IS_CALLEE_SAVE(func)			\
643 	((struct paravirt_callee_save) { func })
644 
645 #ifdef CONFIG_PARAVIRT_XXL
646 static inline notrace unsigned long arch_local_save_flags(void)
647 {
648 	return PVOP_CALLEE0(unsigned long, irq.save_fl);
649 }
650 
651 static inline notrace void arch_local_irq_disable(void)
652 {
653 	PVOP_VCALLEE0(irq.irq_disable);
654 }
655 
656 static inline notrace void arch_local_irq_enable(void)
657 {
658 	PVOP_VCALLEE0(irq.irq_enable);
659 }
660 
661 static inline notrace unsigned long arch_local_irq_save(void)
662 {
663 	unsigned long f;
664 
665 	f = arch_local_save_flags();
666 	arch_local_irq_disable();
667 	return f;
668 }
669 #endif
670 
671 
672 /* Make sure as little as possible of this mess escapes. */
673 #undef PARAVIRT_CALL
674 #undef __PVOP_CALL
675 #undef __PVOP_VCALL
676 #undef PVOP_VCALL0
677 #undef PVOP_CALL0
678 #undef PVOP_VCALL1
679 #undef PVOP_CALL1
680 #undef PVOP_VCALL2
681 #undef PVOP_CALL2
682 #undef PVOP_VCALL3
683 #undef PVOP_CALL3
684 #undef PVOP_VCALL4
685 #undef PVOP_CALL4
686 
687 extern void default_banner(void);
688 
689 #else  /* __ASSEMBLY__ */
690 
691 #define _PVSITE(ptype, ops, word, algn)		\
692 771:;						\
693 	ops;					\
694 772:;						\
695 	.pushsection .parainstructions,"a";	\
696 	 .align	algn;				\
697 	 word 771b;				\
698 	 .byte ptype;				\
699 	 .byte 772b-771b;			\
700 	.popsection
701 
702 
703 #define COND_PUSH(set, mask, reg)			\
704 	.if ((~(set)) & mask); push %reg; .endif
705 #define COND_POP(set, mask, reg)			\
706 	.if ((~(set)) & mask); pop %reg; .endif
707 
708 #ifdef CONFIG_X86_64
709 
710 #define PV_SAVE_REGS(set)			\
711 	COND_PUSH(set, CLBR_RAX, rax);		\
712 	COND_PUSH(set, CLBR_RCX, rcx);		\
713 	COND_PUSH(set, CLBR_RDX, rdx);		\
714 	COND_PUSH(set, CLBR_RSI, rsi);		\
715 	COND_PUSH(set, CLBR_RDI, rdi);		\
716 	COND_PUSH(set, CLBR_R8, r8);		\
717 	COND_PUSH(set, CLBR_R9, r9);		\
718 	COND_PUSH(set, CLBR_R10, r10);		\
719 	COND_PUSH(set, CLBR_R11, r11)
720 #define PV_RESTORE_REGS(set)			\
721 	COND_POP(set, CLBR_R11, r11);		\
722 	COND_POP(set, CLBR_R10, r10);		\
723 	COND_POP(set, CLBR_R9, r9);		\
724 	COND_POP(set, CLBR_R8, r8);		\
725 	COND_POP(set, CLBR_RDI, rdi);		\
726 	COND_POP(set, CLBR_RSI, rsi);		\
727 	COND_POP(set, CLBR_RDX, rdx);		\
728 	COND_POP(set, CLBR_RCX, rcx);		\
729 	COND_POP(set, CLBR_RAX, rax)
730 
731 #define PARA_PATCH(off)		((off) / 8)
732 #define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .quad, 8)
733 #define PARA_INDIRECT(addr)	*addr(%rip)
734 #else
735 #define PV_SAVE_REGS(set)			\
736 	COND_PUSH(set, CLBR_EAX, eax);		\
737 	COND_PUSH(set, CLBR_EDI, edi);		\
738 	COND_PUSH(set, CLBR_ECX, ecx);		\
739 	COND_PUSH(set, CLBR_EDX, edx)
740 #define PV_RESTORE_REGS(set)			\
741 	COND_POP(set, CLBR_EDX, edx);		\
742 	COND_POP(set, CLBR_ECX, ecx);		\
743 	COND_POP(set, CLBR_EDI, edi);		\
744 	COND_POP(set, CLBR_EAX, eax)
745 
746 #define PARA_PATCH(off)		((off) / 4)
747 #define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .long, 4)
748 #define PARA_INDIRECT(addr)	*%cs:addr
749 #endif
750 
751 #ifdef CONFIG_PARAVIRT_XXL
752 #define INTERRUPT_RETURN						\
753 	PARA_SITE(PARA_PATCH(PV_CPU_iret),				\
754 		  ANNOTATE_RETPOLINE_SAFE;				\
755 		  jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
756 
757 #define DISABLE_INTERRUPTS(clobbers)					\
758 	PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),			\
759 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
760 		  ANNOTATE_RETPOLINE_SAFE;				\
761 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);	\
762 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
763 
764 #define ENABLE_INTERRUPTS(clobbers)					\
765 	PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),			\
766 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
767 		  ANNOTATE_RETPOLINE_SAFE;				\
768 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);		\
769 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
770 #endif
771 
772 #ifdef CONFIG_X86_64
773 #ifdef CONFIG_PARAVIRT_XXL
774 #ifdef CONFIG_DEBUG_ENTRY
775 #define SAVE_FLAGS(clobbers)                                        \
776 	PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),			    \
777 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
778 		  ANNOTATE_RETPOLINE_SAFE;			    \
779 		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);	    \
780 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
781 #endif
782 #endif /* CONFIG_PARAVIRT_XXL */
783 #endif	/* CONFIG_X86_64 */
784 
785 #endif /* __ASSEMBLY__ */
786 #else  /* CONFIG_PARAVIRT */
787 # define default_banner x86_init_noop
788 #endif /* !CONFIG_PARAVIRT */
789 
790 #ifndef __ASSEMBLY__
791 #ifndef CONFIG_PARAVIRT_XXL
792 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
793 					  struct mm_struct *mm)
794 {
795 }
796 #endif
797 
798 #ifndef CONFIG_PARAVIRT
799 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
800 {
801 }
802 #endif
803 #endif /* __ASSEMBLY__ */
804 #endif /* _ASM_X86_PARAVIRT_H */
805