xref: /linux/arch/x86/include/asm/paravirt.h (revision 8838a1a2d219a86ab05e679c73f68dd75a25aca5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6 
7 #include <asm/paravirt_types.h>
8 
9 #ifndef __ASSEMBLY__
10 struct mm_struct;
11 #endif
12 
13 #ifdef CONFIG_PARAVIRT
14 #include <asm/pgtable_types.h>
15 #include <asm/asm.h>
16 #include <asm/nospec-branch.h>
17 
18 #ifndef __ASSEMBLY__
19 #include <linux/bug.h>
20 #include <linux/types.h>
21 #include <linux/cpumask.h>
22 #include <linux/static_call_types.h>
23 #include <asm/frame.h>
24 
25 u64 dummy_steal_clock(int cpu);
26 u64 dummy_sched_clock(void);
27 
28 DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
29 DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
30 
31 void paravirt_set_sched_clock(u64 (*func)(void));
32 
33 static __always_inline u64 paravirt_sched_clock(void)
34 {
35 	return static_call(pv_sched_clock)();
36 }
37 
38 struct static_key;
39 extern struct static_key paravirt_steal_enabled;
40 extern struct static_key paravirt_steal_rq_enabled;
41 
42 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
43 bool pv_is_native_spin_unlock(void);
44 __visible bool __native_vcpu_is_preempted(long cpu);
45 bool pv_is_native_vcpu_is_preempted(void);
46 
47 static inline u64 paravirt_steal_clock(int cpu)
48 {
49 	return static_call(pv_steal_clock)(cpu);
50 }
51 
52 #ifdef CONFIG_PARAVIRT_SPINLOCKS
53 void __init paravirt_set_cap(void);
54 #endif
55 
56 /* The paravirtualized I/O functions */
57 static inline void slow_down_io(void)
58 {
59 	PVOP_VCALL0(cpu.io_delay);
60 #ifdef REALLY_SLOW_IO
61 	PVOP_VCALL0(cpu.io_delay);
62 	PVOP_VCALL0(cpu.io_delay);
63 	PVOP_VCALL0(cpu.io_delay);
64 #endif
65 }
66 
67 void native_flush_tlb_local(void);
68 void native_flush_tlb_global(void);
69 void native_flush_tlb_one_user(unsigned long addr);
70 void native_flush_tlb_multi(const struct cpumask *cpumask,
71 			     const struct flush_tlb_info *info);
72 
73 static inline void __flush_tlb_local(void)
74 {
75 	PVOP_VCALL0(mmu.flush_tlb_user);
76 }
77 
78 static inline void __flush_tlb_global(void)
79 {
80 	PVOP_VCALL0(mmu.flush_tlb_kernel);
81 }
82 
83 static inline void __flush_tlb_one_user(unsigned long addr)
84 {
85 	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
86 }
87 
88 static inline void __flush_tlb_multi(const struct cpumask *cpumask,
89 				      const struct flush_tlb_info *info)
90 {
91 	PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
92 }
93 
94 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
95 {
96 	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
97 }
98 
99 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
100 {
101 	PVOP_VCALL1(mmu.exit_mmap, mm);
102 }
103 
104 static inline void notify_page_enc_status_changed(unsigned long pfn,
105 						  int npages, bool enc)
106 {
107 	PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
108 }
109 
110 #ifdef CONFIG_PARAVIRT_XXL
111 static inline void load_sp0(unsigned long sp0)
112 {
113 	PVOP_VCALL1(cpu.load_sp0, sp0);
114 }
115 
116 /* The paravirtualized CPUID instruction. */
117 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
118 			   unsigned int *ecx, unsigned int *edx)
119 {
120 	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
121 }
122 
123 /*
124  * These special macros can be used to get or set a debugging register
125  */
126 static __always_inline unsigned long paravirt_get_debugreg(int reg)
127 {
128 	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
129 }
130 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
131 static __always_inline void set_debugreg(unsigned long val, int reg)
132 {
133 	PVOP_VCALL2(cpu.set_debugreg, reg, val);
134 }
135 
136 static inline unsigned long read_cr0(void)
137 {
138 	return PVOP_CALL0(unsigned long, cpu.read_cr0);
139 }
140 
141 static inline void write_cr0(unsigned long x)
142 {
143 	PVOP_VCALL1(cpu.write_cr0, x);
144 }
145 
146 static __always_inline unsigned long read_cr2(void)
147 {
148 	return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
149 				"mov %%cr2, %%rax;", ALT_NOT_XEN);
150 }
151 
152 static __always_inline void write_cr2(unsigned long x)
153 {
154 	PVOP_VCALL1(mmu.write_cr2, x);
155 }
156 
157 static inline unsigned long __read_cr3(void)
158 {
159 	return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
160 			      "mov %%cr3, %%rax;", ALT_NOT_XEN);
161 }
162 
163 static inline void write_cr3(unsigned long x)
164 {
165 	PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
166 }
167 
168 static inline void __write_cr4(unsigned long x)
169 {
170 	PVOP_VCALL1(cpu.write_cr4, x);
171 }
172 
173 static __always_inline void arch_safe_halt(void)
174 {
175 	PVOP_VCALL0(irq.safe_halt);
176 }
177 
178 static inline void halt(void)
179 {
180 	PVOP_VCALL0(irq.halt);
181 }
182 
183 static inline u64 paravirt_read_msr(unsigned msr)
184 {
185 	return PVOP_CALL1(u64, cpu.read_msr, msr);
186 }
187 
188 static inline void paravirt_write_msr(unsigned msr,
189 				      unsigned low, unsigned high)
190 {
191 	PVOP_VCALL3(cpu.write_msr, msr, low, high);
192 }
193 
194 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
195 {
196 	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
197 }
198 
199 static inline int paravirt_write_msr_safe(unsigned msr,
200 					  unsigned low, unsigned high)
201 {
202 	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
203 }
204 
205 #define rdmsr(msr, val1, val2)			\
206 do {						\
207 	u64 _l = paravirt_read_msr(msr);	\
208 	val1 = (u32)_l;				\
209 	val2 = _l >> 32;			\
210 } while (0)
211 
212 #define wrmsr(msr, val1, val2)			\
213 do {						\
214 	paravirt_write_msr(msr, val1, val2);	\
215 } while (0)
216 
217 #define rdmsrl(msr, val)			\
218 do {						\
219 	val = paravirt_read_msr(msr);		\
220 } while (0)
221 
222 static inline void wrmsrl(unsigned msr, u64 val)
223 {
224 	wrmsr(msr, (u32)val, (u32)(val>>32));
225 }
226 
227 #define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
228 
229 /* rdmsr with exception handling */
230 #define rdmsr_safe(msr, a, b)				\
231 ({							\
232 	int _err;					\
233 	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
234 	(*a) = (u32)_l;					\
235 	(*b) = _l >> 32;				\
236 	_err;						\
237 })
238 
239 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
240 {
241 	int err;
242 
243 	*p = paravirt_read_msr_safe(msr, &err);
244 	return err;
245 }
246 
247 static inline unsigned long long paravirt_read_pmc(int counter)
248 {
249 	return PVOP_CALL1(u64, cpu.read_pmc, counter);
250 }
251 
252 #define rdpmc(counter, low, high)		\
253 do {						\
254 	u64 _l = paravirt_read_pmc(counter);	\
255 	low = (u32)_l;				\
256 	high = _l >> 32;			\
257 } while (0)
258 
259 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
260 
261 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
262 {
263 	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
264 }
265 
266 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
267 {
268 	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
269 }
270 
271 static inline void load_TR_desc(void)
272 {
273 	PVOP_VCALL0(cpu.load_tr_desc);
274 }
275 static inline void load_gdt(const struct desc_ptr *dtr)
276 {
277 	PVOP_VCALL1(cpu.load_gdt, dtr);
278 }
279 static inline void load_idt(const struct desc_ptr *dtr)
280 {
281 	PVOP_VCALL1(cpu.load_idt, dtr);
282 }
283 static inline void set_ldt(const void *addr, unsigned entries)
284 {
285 	PVOP_VCALL2(cpu.set_ldt, addr, entries);
286 }
287 static inline unsigned long paravirt_store_tr(void)
288 {
289 	return PVOP_CALL0(unsigned long, cpu.store_tr);
290 }
291 
292 #define store_tr(tr)	((tr) = paravirt_store_tr())
293 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
294 {
295 	PVOP_VCALL2(cpu.load_tls, t, cpu);
296 }
297 
298 static inline void load_gs_index(unsigned int gs)
299 {
300 	PVOP_VCALL1(cpu.load_gs_index, gs);
301 }
302 
303 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
304 				   const void *desc)
305 {
306 	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
307 }
308 
309 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
310 				   void *desc, int type)
311 {
312 	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
313 }
314 
315 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
316 {
317 	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
318 }
319 
320 #ifdef CONFIG_X86_IOPL_IOPERM
321 static inline void tss_invalidate_io_bitmap(void)
322 {
323 	PVOP_VCALL0(cpu.invalidate_io_bitmap);
324 }
325 
326 static inline void tss_update_io_bitmap(void)
327 {
328 	PVOP_VCALL0(cpu.update_io_bitmap);
329 }
330 #endif
331 
332 static inline void paravirt_enter_mmap(struct mm_struct *next)
333 {
334 	PVOP_VCALL1(mmu.enter_mmap, next);
335 }
336 
337 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
338 {
339 	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
340 }
341 
342 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
343 {
344 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
345 }
346 
347 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
348 {
349 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
350 }
351 static inline void paravirt_release_pte(unsigned long pfn)
352 {
353 	PVOP_VCALL1(mmu.release_pte, pfn);
354 }
355 
356 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
357 {
358 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
359 }
360 
361 static inline void paravirt_release_pmd(unsigned long pfn)
362 {
363 	PVOP_VCALL1(mmu.release_pmd, pfn);
364 }
365 
366 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
367 {
368 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
369 }
370 static inline void paravirt_release_pud(unsigned long pfn)
371 {
372 	PVOP_VCALL1(mmu.release_pud, pfn);
373 }
374 
375 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
376 {
377 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
378 }
379 
380 static inline void paravirt_release_p4d(unsigned long pfn)
381 {
382 	PVOP_VCALL1(mmu.release_p4d, pfn);
383 }
384 
385 static inline pte_t __pte(pteval_t val)
386 {
387 	return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
388 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
389 }
390 
391 static inline pteval_t pte_val(pte_t pte)
392 {
393 	return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
394 				"mov %%rdi, %%rax", ALT_NOT_XEN);
395 }
396 
397 static inline pgd_t __pgd(pgdval_t val)
398 {
399 	return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
400 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
401 }
402 
403 static inline pgdval_t pgd_val(pgd_t pgd)
404 {
405 	return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
406 				"mov %%rdi, %%rax", ALT_NOT_XEN);
407 }
408 
409 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
410 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
411 					   pte_t *ptep)
412 {
413 	pteval_t ret;
414 
415 	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
416 
417 	return (pte_t) { .pte = ret };
418 }
419 
420 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
421 					   pte_t *ptep, pte_t old_pte, pte_t pte)
422 {
423 
424 	PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
425 }
426 
427 static inline void set_pte(pte_t *ptep, pte_t pte)
428 {
429 	PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
430 }
431 
432 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
433 {
434 	PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
435 }
436 
437 static inline pmd_t __pmd(pmdval_t val)
438 {
439 	return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
440 					  "mov %%rdi, %%rax", ALT_NOT_XEN) };
441 }
442 
443 static inline pmdval_t pmd_val(pmd_t pmd)
444 {
445 	return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
446 				"mov %%rdi, %%rax", ALT_NOT_XEN);
447 }
448 
449 static inline void set_pud(pud_t *pudp, pud_t pud)
450 {
451 	PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
452 }
453 
454 static inline pud_t __pud(pudval_t val)
455 {
456 	pudval_t ret;
457 
458 	ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
459 			       "mov %%rdi, %%rax", ALT_NOT_XEN);
460 
461 	return (pud_t) { ret };
462 }
463 
464 static inline pudval_t pud_val(pud_t pud)
465 {
466 	return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
467 				"mov %%rdi, %%rax", ALT_NOT_XEN);
468 }
469 
470 static inline void pud_clear(pud_t *pudp)
471 {
472 	set_pud(pudp, native_make_pud(0));
473 }
474 
475 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
476 {
477 	p4dval_t val = native_p4d_val(p4d);
478 
479 	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
480 }
481 
482 #if CONFIG_PGTABLE_LEVELS >= 5
483 
484 static inline p4d_t __p4d(p4dval_t val)
485 {
486 	p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
487 					"mov %%rdi, %%rax", ALT_NOT_XEN);
488 
489 	return (p4d_t) { ret };
490 }
491 
492 static inline p4dval_t p4d_val(p4d_t p4d)
493 {
494 	return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
495 				"mov %%rdi, %%rax", ALT_NOT_XEN);
496 }
497 
498 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
499 {
500 	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
501 }
502 
503 #define set_pgd(pgdp, pgdval) do {					\
504 	if (pgtable_l5_enabled())						\
505 		__set_pgd(pgdp, pgdval);				\
506 	else								\
507 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
508 } while (0)
509 
510 #define pgd_clear(pgdp) do {						\
511 	if (pgtable_l5_enabled())					\
512 		set_pgd(pgdp, native_make_pgd(0));			\
513 } while (0)
514 
515 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
516 
517 static inline void p4d_clear(p4d_t *p4dp)
518 {
519 	set_p4d(p4dp, native_make_p4d(0));
520 }
521 
522 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
523 {
524 	set_pte(ptep, pte);
525 }
526 
527 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
528 			     pte_t *ptep)
529 {
530 	set_pte(ptep, native_make_pte(0));
531 }
532 
533 static inline void pmd_clear(pmd_t *pmdp)
534 {
535 	set_pmd(pmdp, native_make_pmd(0));
536 }
537 
538 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
539 static inline void arch_start_context_switch(struct task_struct *prev)
540 {
541 	PVOP_VCALL1(cpu.start_context_switch, prev);
542 }
543 
544 static inline void arch_end_context_switch(struct task_struct *next)
545 {
546 	PVOP_VCALL1(cpu.end_context_switch, next);
547 }
548 
549 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
550 static inline void arch_enter_lazy_mmu_mode(void)
551 {
552 	PVOP_VCALL0(mmu.lazy_mode.enter);
553 }
554 
555 static inline void arch_leave_lazy_mmu_mode(void)
556 {
557 	PVOP_VCALL0(mmu.lazy_mode.leave);
558 }
559 
560 static inline void arch_flush_lazy_mmu_mode(void)
561 {
562 	PVOP_VCALL0(mmu.lazy_mode.flush);
563 }
564 
565 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
566 				phys_addr_t phys, pgprot_t flags)
567 {
568 	pv_ops.mmu.set_fixmap(idx, phys, flags);
569 }
570 #endif
571 
572 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
573 
574 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
575 							u32 val)
576 {
577 	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
578 }
579 
580 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
581 {
582 	PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
583 			  "movb $0, (%%" _ASM_ARG1 ");",
584 			  ALT_NOT(X86_FEATURE_PVUNLOCK));
585 }
586 
587 static __always_inline void pv_wait(u8 *ptr, u8 val)
588 {
589 	PVOP_VCALL2(lock.wait, ptr, val);
590 }
591 
592 static __always_inline void pv_kick(int cpu)
593 {
594 	PVOP_VCALL1(lock.kick, cpu);
595 }
596 
597 static __always_inline bool pv_vcpu_is_preempted(long cpu)
598 {
599 	return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
600 				"xor %%" _ASM_AX ", %%" _ASM_AX ";",
601 				ALT_NOT(X86_FEATURE_VCPUPREEMPT));
602 }
603 
604 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
605 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
606 
607 #endif /* SMP && PARAVIRT_SPINLOCKS */
608 
609 #ifdef CONFIG_X86_32
610 /* save and restore all caller-save registers, except return value */
611 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
612 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
613 #else
614 /* save and restore all caller-save registers, except return value */
615 #define PV_SAVE_ALL_CALLER_REGS						\
616 	"push %rcx;"							\
617 	"push %rdx;"							\
618 	"push %rsi;"							\
619 	"push %rdi;"							\
620 	"push %r8;"							\
621 	"push %r9;"							\
622 	"push %r10;"							\
623 	"push %r11;"
624 #define PV_RESTORE_ALL_CALLER_REGS					\
625 	"pop %r11;"							\
626 	"pop %r10;"							\
627 	"pop %r9;"							\
628 	"pop %r8;"							\
629 	"pop %rdi;"							\
630 	"pop %rsi;"							\
631 	"pop %rdx;"							\
632 	"pop %rcx;"
633 #endif
634 
635 /*
636  * Generate a thunk around a function which saves all caller-save
637  * registers except for the return value.  This allows C functions to
638  * be called from assembler code where fewer than normal registers are
639  * available.  It may also help code generation around calls from C
640  * code if the common case doesn't use many registers.
641  *
642  * When a callee is wrapped in a thunk, the caller can assume that all
643  * arg regs and all scratch registers are preserved across the
644  * call. The return value in rax/eax will not be saved, even for void
645  * functions.
646  */
647 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
648 #define __PV_CALLEE_SAVE_REGS_THUNK(func, section)			\
649 	extern typeof(func) __raw_callee_save_##func;			\
650 									\
651 	asm(".pushsection " section ", \"ax\";"				\
652 	    ".globl " PV_THUNK_NAME(func) ";"				\
653 	    ".type " PV_THUNK_NAME(func) ", @function;"			\
654 	    ASM_FUNC_ALIGN						\
655 	    PV_THUNK_NAME(func) ":"					\
656 	    ASM_ENDBR							\
657 	    FRAME_BEGIN							\
658 	    PV_SAVE_ALL_CALLER_REGS					\
659 	    "call " #func ";"						\
660 	    PV_RESTORE_ALL_CALLER_REGS					\
661 	    FRAME_END							\
662 	    ASM_RET							\
663 	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
664 	    ".popsection")
665 
666 #define PV_CALLEE_SAVE_REGS_THUNK(func)			\
667 	__PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
668 
669 /* Get a reference to a callee-save function */
670 #define PV_CALLEE_SAVE(func)						\
671 	((struct paravirt_callee_save) { __raw_callee_save_##func })
672 
673 /* Promise that "func" already uses the right calling convention */
674 #define __PV_IS_CALLEE_SAVE(func)			\
675 	((struct paravirt_callee_save) { func })
676 
677 #ifdef CONFIG_PARAVIRT_XXL
678 static __always_inline unsigned long arch_local_save_flags(void)
679 {
680 	return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
681 				ALT_NOT_XEN);
682 }
683 
684 static __always_inline void arch_local_irq_disable(void)
685 {
686 	PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
687 }
688 
689 static __always_inline void arch_local_irq_enable(void)
690 {
691 	PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
692 }
693 
694 static __always_inline unsigned long arch_local_irq_save(void)
695 {
696 	unsigned long f;
697 
698 	f = arch_local_save_flags();
699 	arch_local_irq_disable();
700 	return f;
701 }
702 #endif
703 
704 
705 /* Make sure as little as possible of this mess escapes. */
706 #undef PARAVIRT_CALL
707 #undef __PVOP_CALL
708 #undef __PVOP_VCALL
709 #undef PVOP_VCALL0
710 #undef PVOP_CALL0
711 #undef PVOP_VCALL1
712 #undef PVOP_CALL1
713 #undef PVOP_VCALL2
714 #undef PVOP_CALL2
715 #undef PVOP_VCALL3
716 #undef PVOP_CALL3
717 #undef PVOP_VCALL4
718 #undef PVOP_CALL4
719 
720 extern void default_banner(void);
721 void native_pv_lock_init(void) __init;
722 
723 #else  /* __ASSEMBLY__ */
724 
725 #ifdef CONFIG_X86_64
726 #ifdef CONFIG_PARAVIRT_XXL
727 #ifdef CONFIG_DEBUG_ENTRY
728 
729 #define PARA_INDIRECT(addr)	*addr(%rip)
730 
731 .macro PARA_IRQ_save_fl
732 	ANNOTATE_RETPOLINE_SAFE;
733 	call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
734 .endm
735 
736 #define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;",			\
737 				 "ALT_CALL_INSTR;", ALT_CALL_ALWAYS,	\
738 				 "pushf; pop %rax;", ALT_NOT_XEN
739 #endif
740 #endif /* CONFIG_PARAVIRT_XXL */
741 #endif	/* CONFIG_X86_64 */
742 
743 #endif /* __ASSEMBLY__ */
744 #else  /* CONFIG_PARAVIRT */
745 # define default_banner x86_init_noop
746 
747 #ifndef __ASSEMBLY__
748 static inline void native_pv_lock_init(void)
749 {
750 }
751 #endif
752 #endif /* !CONFIG_PARAVIRT */
753 
754 #ifndef __ASSEMBLY__
755 #ifndef CONFIG_PARAVIRT_XXL
756 static inline void paravirt_enter_mmap(struct mm_struct *mm)
757 {
758 }
759 #endif
760 
761 #ifndef CONFIG_PARAVIRT
762 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
763 {
764 }
765 #endif
766 
767 #ifndef CONFIG_PARAVIRT_SPINLOCKS
768 static inline void paravirt_set_cap(void)
769 {
770 }
771 #endif
772 #endif /* __ASSEMBLY__ */
773 #endif /* _ASM_X86_PARAVIRT_H */
774