xref: /linux/arch/x86/include/asm/paravirt_types.h (revision baea32b242be8ff857cc27b910c6c325c24a7247)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
4 
5 #ifdef CONFIG_PARAVIRT
6 
7 #ifndef __ASSEMBLER__
8 #include <linux/types.h>
9 
10 #include <asm/paravirt-base.h>
11 #include <asm/desc_defs.h>
12 #include <asm/pgtable_types.h>
13 #include <asm/nospec-branch.h>
14 
15 struct thread_struct;
16 struct mm_struct;
17 struct task_struct;
18 struct cpumask;
19 struct flush_tlb_info;
20 struct vm_area_struct;
21 
22 #ifdef CONFIG_PARAVIRT_XXL
23 struct pv_lazy_ops {
24 	/* Set deferred update mode, used for batching operations. */
25 	void (*enter)(void);
26 	void (*leave)(void);
27 	void (*flush)(void);
28 } __no_randomize_layout;
29 #endif
30 
31 struct pv_cpu_ops {
32 	/* hooks for various privileged instructions */
33 	void (*io_delay)(void);
34 
35 #ifdef CONFIG_PARAVIRT_XXL
36 	unsigned long (*get_debugreg)(int regno);
37 	void (*set_debugreg)(int regno, unsigned long value);
38 
39 	unsigned long (*read_cr0)(void);
40 	void (*write_cr0)(unsigned long);
41 
42 	void (*write_cr4)(unsigned long);
43 
44 	/* Segment descriptor handling */
45 	void (*load_tr_desc)(void);
46 	void (*load_gdt)(const struct desc_ptr *);
47 	void (*load_idt)(const struct desc_ptr *);
48 	void (*set_ldt)(const void *desc, unsigned entries);
49 	unsigned long (*store_tr)(void);
50 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
51 	void (*load_gs_index)(unsigned int idx);
52 	void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
53 				const void *desc);
54 	void (*write_gdt_entry)(struct desc_struct *,
55 				int entrynum, const void *desc, int size);
56 	void (*write_idt_entry)(gate_desc *,
57 				int entrynum, const gate_desc *gate);
58 	void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
59 	void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
60 
61 	void (*load_sp0)(unsigned long sp0);
62 
63 #ifdef CONFIG_X86_IOPL_IOPERM
64 	void (*invalidate_io_bitmap)(void);
65 	void (*update_io_bitmap)(void);
66 #endif
67 
68 	/* cpuid emulation, mostly so that caps bits can be disabled */
69 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
70 		      unsigned int *ecx, unsigned int *edx);
71 
72 	/* Unsafe MSR operations.  These will warn or panic on failure. */
73 	u64 (*read_msr)(u32 msr);
74 	void (*write_msr)(u32 msr, u64 val);
75 
76 	/*
77 	 * Safe MSR operations.
78 	 * Returns 0 or -EIO.
79 	 */
80 	int (*read_msr_safe)(u32 msr, u64 *val);
81 	int (*write_msr_safe)(u32 msr, u64 val);
82 
83 	u64 (*read_pmc)(int counter);
84 
85 	void (*start_context_switch)(struct task_struct *prev);
86 	void (*end_context_switch)(struct task_struct *next);
87 #endif
88 } __no_randomize_layout;
89 
90 struct pv_irq_ops {
91 #ifdef CONFIG_PARAVIRT_XXL
92 	/*
93 	 * Get/set interrupt state.  save_fl is expected to use X86_EFLAGS_IF;
94 	 * all other bits returned from save_fl are undefined.
95 	 *
96 	 * NOTE: These functions callers expect the callee to preserve
97 	 * more registers than the standard C calling convention.
98 	 */
99 	struct paravirt_callee_save save_fl;
100 	struct paravirt_callee_save irq_disable;
101 	struct paravirt_callee_save irq_enable;
102 #endif
103 	void (*safe_halt)(void);
104 	void (*halt)(void);
105 } __no_randomize_layout;
106 
107 struct pv_mmu_ops {
108 	/* TLB operations */
109 	void (*flush_tlb_user)(void);
110 	void (*flush_tlb_kernel)(void);
111 	void (*flush_tlb_one_user)(unsigned long addr);
112 	void (*flush_tlb_multi)(const struct cpumask *cpus,
113 				const struct flush_tlb_info *info);
114 
115 	/* Hook for intercepting the destruction of an mm_struct. */
116 	void (*exit_mmap)(struct mm_struct *mm);
117 	void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
118 
119 #ifdef CONFIG_PARAVIRT_XXL
120 	struct paravirt_callee_save read_cr2;
121 	void (*write_cr2)(unsigned long);
122 
123 	unsigned long (*read_cr3)(void);
124 	void (*write_cr3)(unsigned long);
125 
126 	/* Hook for intercepting the creation/use of an mm_struct. */
127 	void (*enter_mmap)(struct mm_struct *mm);
128 
129 	/* Hooks for allocating and freeing a pagetable top-level */
130 	int  (*pgd_alloc)(struct mm_struct *mm);
131 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
132 
133 	/*
134 	 * Hooks for allocating/releasing pagetable pages when they're
135 	 * attached to a pagetable
136 	 */
137 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
138 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
139 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
140 	void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
141 	void (*release_pte)(unsigned long pfn);
142 	void (*release_pmd)(unsigned long pfn);
143 	void (*release_pud)(unsigned long pfn);
144 	void (*release_p4d)(unsigned long pfn);
145 
146 	/* Pagetable manipulation functions */
147 	void (*set_pte)(pte_t *ptep, pte_t pteval);
148 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
149 
150 	pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
151 					pte_t *ptep);
152 	void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
153 					pte_t *ptep, pte_t pte);
154 
155 	struct paravirt_callee_save pte_val;
156 	struct paravirt_callee_save make_pte;
157 
158 	struct paravirt_callee_save pgd_val;
159 	struct paravirt_callee_save make_pgd;
160 
161 	void (*set_pud)(pud_t *pudp, pud_t pudval);
162 
163 	struct paravirt_callee_save pmd_val;
164 	struct paravirt_callee_save make_pmd;
165 
166 	struct paravirt_callee_save pud_val;
167 	struct paravirt_callee_save make_pud;
168 
169 	void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
170 
171 	struct paravirt_callee_save p4d_val;
172 	struct paravirt_callee_save make_p4d;
173 
174 	void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
175 
176 	struct pv_lazy_ops lazy_mode;
177 
178 	/* dom0 ops */
179 
180 	/* Sometimes the physical address is a pfn, and sometimes its
181 	   an mfn.  We can tell which is which from the index. */
182 	void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
183 			   phys_addr_t phys, pgprot_t flags);
184 #endif
185 } __no_randomize_layout;
186 
187 /* This contains all the paravirt structures: we get a convenient
188  * number for each function using the offset which we use to indicate
189  * what to patch. */
190 struct paravirt_patch_template {
191 	struct pv_cpu_ops	cpu;
192 	struct pv_irq_ops	irq;
193 	struct pv_mmu_ops	mmu;
194 } __no_randomize_layout;
195 
196 extern struct paravirt_patch_template pv_ops;
197 
198 #define paravirt_ptr(array, op)	[paravirt_opptr] "m" (array.op)
199 
200 /*
201  * This generates an indirect call based on the operation type number.
202  *
203  * Since alternatives run after enabling CET/IBT -- the latter setting/clearing
204  * capabilities and the former requiring all capabilities being finalized --
205  * these indirect calls are subject to IBT and the paravirt stubs should have
206  * ENDBR on.
207  *
208  * OTOH since this is effectively a __nocfi indirect call, the paravirt stubs
209  * don't need to bother with CFI prefixes.
210  */
211 #define PARAVIRT_CALL					\
212 	ANNOTATE_RETPOLINE_SAFE "\n\t"			\
213 	"call *%[paravirt_opptr]"
214 
215 /*
216  * These macros are intended to wrap calls through one of the paravirt
217  * ops structs, so that they can be later identified and patched at
218  * runtime.
219  *
220  * Normally, a call to a pv_op function is a simple indirect call:
221  * (pv_op_struct.operations)(args...).
222  *
223  * Unfortunately, this is a relatively slow operation for modern CPUs,
224  * because it cannot necessarily determine what the destination
225  * address is.  In this case, the address is a runtime constant, so at
226  * the very least we can patch the call to a simple direct call, or,
227  * ideally, patch an inline implementation into the callsite.  (Direct
228  * calls are essentially free, because the call and return addresses
229  * are completely predictable.)
230  *
231  * For i386, these macros rely on the standard gcc "regparm(3)" calling
232  * convention, in which the first three arguments are placed in %eax,
233  * %edx, %ecx (in that order), and the remaining arguments are placed
234  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
235  * to be modified (either clobbered or used for return values).
236  * X86_64, on the other hand, already specifies a register-based calling
237  * conventions, returning at %rax, with parameters going in %rdi, %rsi,
238  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
239  * special handling for dealing with 4 arguments, unlike i386.
240  * However, x86_64 also has to clobber all caller saved registers, which
241  * unfortunately, are quite a bit (r8 - r11)
242  *
243  * Unfortunately there's no way to get gcc to generate the args setup
244  * for the call, and then allow the call itself to be generated by an
245  * inline asm.  Because of this, we must do the complete arg setup and
246  * return value handling from within these macros.  This is fairly
247  * cumbersome.
248  *
249  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
250  * It could be extended to more arguments, but there would be little
251  * to be gained from that.  For each number of arguments, there are
252  * two VCALL and CALL variants for void and non-void functions.
253  *
254  * When there is a return value, the invoker of the macro must specify
255  * the return type.  The macro then uses sizeof() on that type to
256  * determine whether it's a 32 or 64 bit value and places the return
257  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
258  * 64-bit). For x86_64 machines, it just returns in %rax regardless of
259  * the return value size.
260  *
261  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
262  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
263  * in low,high order
264  *
265  * Small structures are passed and returned in registers.  The macro
266  * calling convention can't directly deal with this, so the wrapper
267  * functions must do it.
268  *
269  * These PVOP_* macros are only defined within this header.  This
270  * means that all uses must be wrapped in inline functions.  This also
271  * makes sure the incoming and outgoing types are always correct.
272  */
273 #ifdef CONFIG_X86_32
274 #define PVOP_CALL_ARGS							\
275 	unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
276 
277 #define PVOP_CALL_ARG1(x)		"a" ((unsigned long)(x))
278 #define PVOP_CALL_ARG2(x)		"d" ((unsigned long)(x))
279 #define PVOP_CALL_ARG3(x)		"c" ((unsigned long)(x))
280 
281 #define PVOP_VCALL_CLOBBERS		"=a" (__eax), "=d" (__edx),	\
282 					"=c" (__ecx)
283 #define PVOP_CALL_CLOBBERS		PVOP_VCALL_CLOBBERS
284 
285 #define PVOP_VCALLEE_CLOBBERS		"=a" (__eax), "=d" (__edx)
286 #define PVOP_CALLEE_CLOBBERS		PVOP_VCALLEE_CLOBBERS
287 
288 #define EXTRA_CLOBBERS
289 #define VEXTRA_CLOBBERS
290 #else  /* CONFIG_X86_64 */
291 /* [re]ax isn't an arg, but the return val */
292 #define PVOP_CALL_ARGS						\
293 	unsigned long __edi = __edi, __esi = __esi,		\
294 		__edx = __edx, __ecx = __ecx, __eax = __eax;
295 
296 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
297 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
298 #define PVOP_CALL_ARG3(x)		"d" ((unsigned long)(x))
299 #define PVOP_CALL_ARG4(x)		"c" ((unsigned long)(x))
300 
301 #define PVOP_VCALL_CLOBBERS	"=D" (__edi),				\
302 				"=S" (__esi), "=d" (__edx),		\
303 				"=c" (__ecx)
304 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
305 
306 /*
307  * void functions are still allowed [re]ax for scratch.
308  *
309  * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
310  * registers. Make sure we model this with the appropriate clobbers.
311  */
312 #ifdef CONFIG_ZERO_CALL_USED_REGS
313 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax), PVOP_VCALL_CLOBBERS
314 #else
315 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
316 #endif
317 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
318 
319 #define EXTRA_CLOBBERS	 , "r8", "r9", "r10", "r11"
320 #define VEXTRA_CLOBBERS	 , "rax", "r8", "r9", "r10", "r11"
321 #endif	/* CONFIG_X86_32 */
322 
323 #define PVOP_RETVAL(rettype)						\
324 	({	unsigned long __mask = ~0UL;				\
325 		BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long));	\
326 		switch (sizeof(rettype)) {				\
327 		case 1: __mask =       0xffUL; break;			\
328 		case 2: __mask =     0xffffUL; break;			\
329 		case 4: __mask = 0xffffffffUL; break;			\
330 		default: break;						\
331 		}							\
332 		__mask & __eax;						\
333 	})
334 
335 /*
336  * Use alternative patching for paravirt calls:
337  * - For replacing an indirect call with a direct one, use the "normal"
338  *   ALTERNATIVE() macro with the indirect call as the initial code sequence,
339  *   which will be replaced with the related direct call by using the
340  *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
341  * - In case the replacement is either a direct call or a short code sequence
342  *   depending on a feature bit, the ALTERNATIVE_2() macro is being used.
343  *   The indirect call is the initial code sequence again, while the special
344  *   code sequence is selected with the specified feature bit. In case the
345  *   feature is not active, the direct call is used as above via the
346  *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
347  */
348 #define ____PVOP_CALL(ret, array, op, call_clbr, extra_clbr, ...)	\
349 	({								\
350 		PVOP_CALL_ARGS;						\
351 		asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR,	\
352 				ALT_CALL_ALWAYS)			\
353 			     : call_clbr, ASM_CALL_CONSTRAINT		\
354 			     : paravirt_ptr(array, op),			\
355 			       ##__VA_ARGS__				\
356 			     : "memory", "cc" extra_clbr);		\
357 		ret;							\
358 	})
359 
360 #define ____PVOP_ALT_CALL(ret, array, op, alt, cond, call_clbr,		\
361 			  extra_clbr, ...)				\
362 	({								\
363 		PVOP_CALL_ARGS;						\
364 		asm volatile(ALTERNATIVE_2(PARAVIRT_CALL,		\
365 				 ALT_CALL_INSTR, ALT_CALL_ALWAYS,	\
366 				 alt, cond)				\
367 			     : call_clbr, ASM_CALL_CONSTRAINT		\
368 			     : paravirt_ptr(array, op),			\
369 			       ##__VA_ARGS__				\
370 			     : "memory", "cc" extra_clbr);		\
371 		ret;							\
372 	})
373 
374 #define __PVOP_CALL(rettype, array, op, ...)				\
375 	____PVOP_CALL(PVOP_RETVAL(rettype), array, op,			\
376 		      PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
377 
378 #define __PVOP_ALT_CALL(rettype, array, op, alt, cond, ...)		\
379 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), array, op, alt, cond,	\
380 			  PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS,		\
381 			  ##__VA_ARGS__)
382 
383 #define __PVOP_CALLEESAVE(rettype, array, op, ...)			\
384 	____PVOP_CALL(PVOP_RETVAL(rettype), array, op.func,		\
385 		      PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
386 
387 #define __PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond, ...)	\
388 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), array, op.func, alt, cond, \
389 			  PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
390 
391 
392 #define __PVOP_VCALL(array, op, ...)					\
393 	(void)____PVOP_CALL(, array, op, PVOP_VCALL_CLOBBERS,		\
394 		       VEXTRA_CLOBBERS, ##__VA_ARGS__)
395 
396 #define __PVOP_ALT_VCALL(array, op, alt, cond, ...)			\
397 	(void)____PVOP_ALT_CALL(, array, op, alt, cond,			\
398 				PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,	\
399 				##__VA_ARGS__)
400 
401 #define __PVOP_VCALLEESAVE(array, op, ...)				\
402 	(void)____PVOP_CALL(, array, op.func,				\
403 			    PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
404 
405 #define __PVOP_ALT_VCALLEESAVE(array, op, alt, cond, ...)		\
406 	(void)____PVOP_ALT_CALL(, array, op.func, alt, cond,		\
407 				PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
408 
409 
410 #define PVOP_CALL0(rettype, array, op)					\
411 	__PVOP_CALL(rettype, array, op)
412 #define PVOP_VCALL0(array, op)						\
413 	__PVOP_VCALL(array, op)
414 #define PVOP_ALT_CALL0(rettype, array, op, alt, cond)			\
415 	__PVOP_ALT_CALL(rettype, array, op, alt, cond)
416 #define PVOP_ALT_VCALL0(array, op, alt, cond)				\
417 	__PVOP_ALT_VCALL(array, op, alt, cond)
418 
419 #define PVOP_CALLEE0(rettype, array, op)				\
420 	__PVOP_CALLEESAVE(rettype, array, op)
421 #define PVOP_VCALLEE0(array, op)					\
422 	__PVOP_VCALLEESAVE(array, op)
423 #define PVOP_ALT_CALLEE0(rettype, array, op, alt, cond)			\
424 	__PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond)
425 #define PVOP_ALT_VCALLEE0(array, op, alt, cond)				\
426 	__PVOP_ALT_VCALLEESAVE(array, op, alt, cond)
427 
428 
429 #define PVOP_CALL1(rettype, array, op, arg1)				\
430 	__PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1))
431 #define PVOP_VCALL1(array, op, arg1)					\
432 	__PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1))
433 #define PVOP_ALT_VCALL1(array, op, arg1, alt, cond)			\
434 	__PVOP_ALT_VCALL(array, op, alt, cond, PVOP_CALL_ARG1(arg1))
435 
436 #define PVOP_CALLEE1(rettype, array, op, arg1)				\
437 	__PVOP_CALLEESAVE(rettype, array, op, PVOP_CALL_ARG1(arg1))
438 #define PVOP_VCALLEE1(array, op, arg1)					\
439 	__PVOP_VCALLEESAVE(array, op, PVOP_CALL_ARG1(arg1))
440 #define PVOP_ALT_CALLEE1(rettype, array, op, arg1, alt, cond)		\
441 	__PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond, PVOP_CALL_ARG1(arg1))
442 #define PVOP_ALT_VCALLEE1(array, op, arg1, alt, cond)			\
443 	__PVOP_ALT_VCALLEESAVE(array, op, alt, cond, PVOP_CALL_ARG1(arg1))
444 
445 
446 #define PVOP_CALL2(rettype, array, op, arg1, arg2)			\
447 	__PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
448 #define PVOP_VCALL2(array, op, arg1, arg2)				\
449 	__PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
450 
451 #define PVOP_CALL3(rettype, array, op, arg1, arg2, arg3)		\
452 	__PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1),		\
453 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
454 #define PVOP_VCALL3(array, op, arg1, arg2, arg3)			\
455 	__PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1),			\
456 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
457 
458 #define PVOP_CALL4(rettype, array, op, arg1, arg2, arg3, arg4)		\
459 	__PVOP_CALL(rettype, array, op,					\
460 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
461 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
462 #define PVOP_VCALL4(array, op, arg1, arg2, arg3, arg4)			\
463 	__PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
464 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
465 
466 #endif	/* __ASSEMBLER__ */
467 
468 #define ALT_NOT_XEN	ALT_NOT(X86_FEATURE_XENPV)
469 
470 #ifdef CONFIG_X86_32
471 /* save and restore all caller-save registers, except return value */
472 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
473 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
474 #else
475 /* save and restore all caller-save registers, except return value */
476 #define PV_SAVE_ALL_CALLER_REGS						\
477 	"push %rcx;"							\
478 	"push %rdx;"							\
479 	"push %rsi;"							\
480 	"push %rdi;"							\
481 	"push %r8;"							\
482 	"push %r9;"							\
483 	"push %r10;"							\
484 	"push %r11;"
485 #define PV_RESTORE_ALL_CALLER_REGS					\
486 	"pop %r11;"							\
487 	"pop %r10;"							\
488 	"pop %r9;"							\
489 	"pop %r8;"							\
490 	"pop %rdi;"							\
491 	"pop %rsi;"							\
492 	"pop %rdx;"							\
493 	"pop %rcx;"
494 #endif
495 
496 /*
497  * Generate a thunk around a function which saves all caller-save
498  * registers except for the return value.  This allows C functions to
499  * be called from assembler code where fewer than normal registers are
500  * available.  It may also help code generation around calls from C
501  * code if the common case doesn't use many registers.
502  *
503  * When a callee is wrapped in a thunk, the caller can assume that all
504  * arg regs and all scratch registers are preserved across the
505  * call. The return value in rax/eax will not be saved, even for void
506  * functions.
507  */
508 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
509 #define __PV_CALLEE_SAVE_REGS_THUNK(func, section)			\
510 	extern typeof(func) __raw_callee_save_##func;			\
511 									\
512 	asm(".pushsection " section ", \"ax\";"				\
513 	    ".globl " PV_THUNK_NAME(func) ";"				\
514 	    ".type " PV_THUNK_NAME(func) ", @function;"			\
515 	    ASM_FUNC_ALIGN						\
516 	    PV_THUNK_NAME(func) ":"					\
517 	    ASM_ENDBR							\
518 	    FRAME_BEGIN							\
519 	    PV_SAVE_ALL_CALLER_REGS					\
520 	    "call " #func ";"						\
521 	    PV_RESTORE_ALL_CALLER_REGS					\
522 	    FRAME_END							\
523 	    ASM_RET							\
524 	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
525 	    ".popsection")
526 
527 #define PV_CALLEE_SAVE_REGS_THUNK(func)			\
528 	__PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
529 
530 /* Get a reference to a callee-save function */
531 #define PV_CALLEE_SAVE(func)						\
532 	((struct paravirt_callee_save) { __raw_callee_save_##func })
533 
534 /* Promise that "func" already uses the right calling convention */
535 #define __PV_IS_CALLEE_SAVE(func)			\
536 	((struct paravirt_callee_save) { func })
537 
538 #endif  /* CONFIG_PARAVIRT */
539 #endif	/* _ASM_X86_PARAVIRT_TYPES_H */
540