xref: /linux/arch/x86/include/asm/paravirt_types.h (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
4 
5 #ifdef CONFIG_PARAVIRT
6 
7 #ifndef __ASSEMBLY__
8 #include <linux/types.h>
9 
10 #include <asm/desc_defs.h>
11 #include <asm/pgtable_types.h>
12 #include <asm/nospec-branch.h>
13 
14 struct page;
15 struct thread_struct;
16 struct desc_ptr;
17 struct tss_struct;
18 struct mm_struct;
19 struct desc_struct;
20 struct task_struct;
21 struct cpumask;
22 struct flush_tlb_info;
23 struct mmu_gather;
24 struct vm_area_struct;
25 
26 /*
27  * Wrapper type for pointers to code which uses the non-standard
28  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
29  */
30 struct paravirt_callee_save {
31 	void *func;
32 };
33 
34 /* general info */
35 struct pv_info {
36 #ifdef CONFIG_PARAVIRT_XXL
37 	u16 extra_user_64bit_cs;  /* __USER_CS if none */
38 #endif
39 
40 	const char *name;
41 };
42 
43 #ifdef CONFIG_PARAVIRT_XXL
44 struct pv_lazy_ops {
45 	/* Set deferred update mode, used for batching operations. */
46 	void (*enter)(void);
47 	void (*leave)(void);
48 	void (*flush)(void);
49 } __no_randomize_layout;
50 #endif
51 
52 struct pv_cpu_ops {
53 	/* hooks for various privileged instructions */
54 	void (*io_delay)(void);
55 
56 #ifdef CONFIG_PARAVIRT_XXL
57 	unsigned long (*get_debugreg)(int regno);
58 	void (*set_debugreg)(int regno, unsigned long value);
59 
60 	unsigned long (*read_cr0)(void);
61 	void (*write_cr0)(unsigned long);
62 
63 	void (*write_cr4)(unsigned long);
64 
65 	/* Segment descriptor handling */
66 	void (*load_tr_desc)(void);
67 	void (*load_gdt)(const struct desc_ptr *);
68 	void (*load_idt)(const struct desc_ptr *);
69 	void (*set_ldt)(const void *desc, unsigned entries);
70 	unsigned long (*store_tr)(void);
71 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
72 	void (*load_gs_index)(unsigned int idx);
73 	void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
74 				const void *desc);
75 	void (*write_gdt_entry)(struct desc_struct *,
76 				int entrynum, const void *desc, int size);
77 	void (*write_idt_entry)(gate_desc *,
78 				int entrynum, const gate_desc *gate);
79 	void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
80 	void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
81 
82 	void (*load_sp0)(unsigned long sp0);
83 
84 #ifdef CONFIG_X86_IOPL_IOPERM
85 	void (*invalidate_io_bitmap)(void);
86 	void (*update_io_bitmap)(void);
87 #endif
88 
89 	void (*wbinvd)(void);
90 
91 	/* cpuid emulation, mostly so that caps bits can be disabled */
92 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
93 		      unsigned int *ecx, unsigned int *edx);
94 
95 	/* Unsafe MSR operations.  These will warn or panic on failure. */
96 	u64 (*read_msr)(unsigned int msr);
97 	void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
98 
99 	/*
100 	 * Safe MSR operations.
101 	 * read sets err to 0 or -EIO.  write returns 0 or -EIO.
102 	 */
103 	u64 (*read_msr_safe)(unsigned int msr, int *err);
104 	int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
105 
106 	u64 (*read_pmc)(int counter);
107 
108 	void (*start_context_switch)(struct task_struct *prev);
109 	void (*end_context_switch)(struct task_struct *next);
110 #endif
111 } __no_randomize_layout;
112 
113 struct pv_irq_ops {
114 #ifdef CONFIG_PARAVIRT_XXL
115 	/*
116 	 * Get/set interrupt state.  save_fl is expected to use X86_EFLAGS_IF;
117 	 * all other bits returned from save_fl are undefined.
118 	 *
119 	 * NOTE: These functions callers expect the callee to preserve
120 	 * more registers than the standard C calling convention.
121 	 */
122 	struct paravirt_callee_save save_fl;
123 	struct paravirt_callee_save irq_disable;
124 	struct paravirt_callee_save irq_enable;
125 
126 	void (*safe_halt)(void);
127 	void (*halt)(void);
128 #endif
129 } __no_randomize_layout;
130 
131 struct pv_mmu_ops {
132 	/* TLB operations */
133 	void (*flush_tlb_user)(void);
134 	void (*flush_tlb_kernel)(void);
135 	void (*flush_tlb_one_user)(unsigned long addr);
136 	void (*flush_tlb_multi)(const struct cpumask *cpus,
137 				const struct flush_tlb_info *info);
138 
139 	void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
140 
141 	/* Hook for intercepting the destruction of an mm_struct. */
142 	void (*exit_mmap)(struct mm_struct *mm);
143 	void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
144 
145 #ifdef CONFIG_PARAVIRT_XXL
146 	struct paravirt_callee_save read_cr2;
147 	void (*write_cr2)(unsigned long);
148 
149 	unsigned long (*read_cr3)(void);
150 	void (*write_cr3)(unsigned long);
151 
152 	/* Hook for intercepting the creation/use of an mm_struct. */
153 	void (*enter_mmap)(struct mm_struct *mm);
154 
155 	/* Hooks for allocating and freeing a pagetable top-level */
156 	int  (*pgd_alloc)(struct mm_struct *mm);
157 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
158 
159 	/*
160 	 * Hooks for allocating/releasing pagetable pages when they're
161 	 * attached to a pagetable
162 	 */
163 	void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
164 	void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
165 	void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
166 	void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
167 	void (*release_pte)(unsigned long pfn);
168 	void (*release_pmd)(unsigned long pfn);
169 	void (*release_pud)(unsigned long pfn);
170 	void (*release_p4d)(unsigned long pfn);
171 
172 	/* Pagetable manipulation functions */
173 	void (*set_pte)(pte_t *ptep, pte_t pteval);
174 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
175 
176 	pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
177 					pte_t *ptep);
178 	void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
179 					pte_t *ptep, pte_t pte);
180 
181 	struct paravirt_callee_save pte_val;
182 	struct paravirt_callee_save make_pte;
183 
184 	struct paravirt_callee_save pgd_val;
185 	struct paravirt_callee_save make_pgd;
186 
187 	void (*set_pud)(pud_t *pudp, pud_t pudval);
188 
189 	struct paravirt_callee_save pmd_val;
190 	struct paravirt_callee_save make_pmd;
191 
192 	struct paravirt_callee_save pud_val;
193 	struct paravirt_callee_save make_pud;
194 
195 	void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
196 
197 #if CONFIG_PGTABLE_LEVELS >= 5
198 	struct paravirt_callee_save p4d_val;
199 	struct paravirt_callee_save make_p4d;
200 
201 	void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
202 #endif	/* CONFIG_PGTABLE_LEVELS >= 5 */
203 
204 	struct pv_lazy_ops lazy_mode;
205 
206 	/* dom0 ops */
207 
208 	/* Sometimes the physical address is a pfn, and sometimes its
209 	   an mfn.  We can tell which is which from the index. */
210 	void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
211 			   phys_addr_t phys, pgprot_t flags);
212 #endif
213 } __no_randomize_layout;
214 
215 struct arch_spinlock;
216 #ifdef CONFIG_SMP
217 #include <asm/spinlock_types.h>
218 #endif
219 
220 struct qspinlock;
221 
222 struct pv_lock_ops {
223 	void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
224 	struct paravirt_callee_save queued_spin_unlock;
225 
226 	void (*wait)(u8 *ptr, u8 val);
227 	void (*kick)(int cpu);
228 
229 	struct paravirt_callee_save vcpu_is_preempted;
230 } __no_randomize_layout;
231 
232 /* This contains all the paravirt structures: we get a convenient
233  * number for each function using the offset which we use to indicate
234  * what to patch. */
235 struct paravirt_patch_template {
236 	struct pv_cpu_ops	cpu;
237 	struct pv_irq_ops	irq;
238 	struct pv_mmu_ops	mmu;
239 	struct pv_lock_ops	lock;
240 } __no_randomize_layout;
241 
242 extern struct pv_info pv_info;
243 extern struct paravirt_patch_template pv_ops;
244 
245 #define paravirt_ptr(op)	[paravirt_opptr] "m" (pv_ops.op)
246 
247 int paravirt_disable_iospace(void);
248 
249 /* This generates an indirect call based on the operation type number. */
250 #define PARAVIRT_CALL					\
251 	ANNOTATE_RETPOLINE_SAFE				\
252 	"call *%[paravirt_opptr];"
253 
254 /*
255  * These macros are intended to wrap calls through one of the paravirt
256  * ops structs, so that they can be later identified and patched at
257  * runtime.
258  *
259  * Normally, a call to a pv_op function is a simple indirect call:
260  * (pv_op_struct.operations)(args...).
261  *
262  * Unfortunately, this is a relatively slow operation for modern CPUs,
263  * because it cannot necessarily determine what the destination
264  * address is.  In this case, the address is a runtime constant, so at
265  * the very least we can patch the call to a simple direct call, or,
266  * ideally, patch an inline implementation into the callsite.  (Direct
267  * calls are essentially free, because the call and return addresses
268  * are completely predictable.)
269  *
270  * For i386, these macros rely on the standard gcc "regparm(3)" calling
271  * convention, in which the first three arguments are placed in %eax,
272  * %edx, %ecx (in that order), and the remaining arguments are placed
273  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
274  * to be modified (either clobbered or used for return values).
275  * X86_64, on the other hand, already specifies a register-based calling
276  * conventions, returning at %rax, with parameters going in %rdi, %rsi,
277  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
278  * special handling for dealing with 4 arguments, unlike i386.
279  * However, x86_64 also has to clobber all caller saved registers, which
280  * unfortunately, are quite a bit (r8 - r11)
281  *
282  * Unfortunately there's no way to get gcc to generate the args setup
283  * for the call, and then allow the call itself to be generated by an
284  * inline asm.  Because of this, we must do the complete arg setup and
285  * return value handling from within these macros.  This is fairly
286  * cumbersome.
287  *
288  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
289  * It could be extended to more arguments, but there would be little
290  * to be gained from that.  For each number of arguments, there are
291  * two VCALL and CALL variants for void and non-void functions.
292  *
293  * When there is a return value, the invoker of the macro must specify
294  * the return type.  The macro then uses sizeof() on that type to
295  * determine whether it's a 32 or 64 bit value and places the return
296  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
297  * 64-bit). For x86_64 machines, it just returns in %rax regardless of
298  * the return value size.
299  *
300  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
301  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
302  * in low,high order
303  *
304  * Small structures are passed and returned in registers.  The macro
305  * calling convention can't directly deal with this, so the wrapper
306  * functions must do it.
307  *
308  * These PVOP_* macros are only defined within this header.  This
309  * means that all uses must be wrapped in inline functions.  This also
310  * makes sure the incoming and outgoing types are always correct.
311  */
312 #ifdef CONFIG_X86_32
313 #define PVOP_CALL_ARGS							\
314 	unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
315 
316 #define PVOP_CALL_ARG1(x)		"a" ((unsigned long)(x))
317 #define PVOP_CALL_ARG2(x)		"d" ((unsigned long)(x))
318 #define PVOP_CALL_ARG3(x)		"c" ((unsigned long)(x))
319 
320 #define PVOP_VCALL_CLOBBERS		"=a" (__eax), "=d" (__edx),	\
321 					"=c" (__ecx)
322 #define PVOP_CALL_CLOBBERS		PVOP_VCALL_CLOBBERS
323 
324 #define PVOP_VCALLEE_CLOBBERS		"=a" (__eax), "=d" (__edx)
325 #define PVOP_CALLEE_CLOBBERS		PVOP_VCALLEE_CLOBBERS
326 
327 #define EXTRA_CLOBBERS
328 #define VEXTRA_CLOBBERS
329 #else  /* CONFIG_X86_64 */
330 /* [re]ax isn't an arg, but the return val */
331 #define PVOP_CALL_ARGS						\
332 	unsigned long __edi = __edi, __esi = __esi,		\
333 		__edx = __edx, __ecx = __ecx, __eax = __eax;
334 
335 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
336 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
337 #define PVOP_CALL_ARG3(x)		"d" ((unsigned long)(x))
338 #define PVOP_CALL_ARG4(x)		"c" ((unsigned long)(x))
339 
340 #define PVOP_VCALL_CLOBBERS	"=D" (__edi),				\
341 				"=S" (__esi), "=d" (__edx),		\
342 				"=c" (__ecx)
343 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
344 
345 /*
346  * void functions are still allowed [re]ax for scratch.
347  *
348  * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
349  * registers. Make sure we model this with the appropriate clobbers.
350  */
351 #ifdef CONFIG_ZERO_CALL_USED_REGS
352 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax), PVOP_VCALL_CLOBBERS
353 #else
354 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
355 #endif
356 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
357 
358 #define EXTRA_CLOBBERS	 , "r8", "r9", "r10", "r11"
359 #define VEXTRA_CLOBBERS	 , "rax", "r8", "r9", "r10", "r11"
360 #endif	/* CONFIG_X86_32 */
361 
362 #ifdef CONFIG_PARAVIRT_DEBUG
363 #define PVOP_TEST_NULL(op)	BUG_ON(pv_ops.op == NULL)
364 #else
365 #define PVOP_TEST_NULL(op)	((void)pv_ops.op)
366 #endif
367 
368 #define PVOP_RETVAL(rettype)						\
369 	({	unsigned long __mask = ~0UL;				\
370 		BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long));	\
371 		switch (sizeof(rettype)) {				\
372 		case 1: __mask =       0xffUL; break;			\
373 		case 2: __mask =     0xffffUL; break;			\
374 		case 4: __mask = 0xffffffffUL; break;			\
375 		default: break;						\
376 		}							\
377 		__mask & __eax;						\
378 	})
379 
380 /*
381  * Use alternative patching for paravirt calls:
382  * - For replacing an indirect call with a direct one, use the "normal"
383  *   ALTERNATIVE() macro with the indirect call as the initial code sequence,
384  *   which will be replaced with the related direct call by using the
385  *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
386  * - In case the replacement is either a direct call or a short code sequence
387  *   depending on a feature bit, the ALTERNATIVE_2() macro is being used.
388  *   The indirect call is the initial code sequence again, while the special
389  *   code sequence is selected with the specified feature bit. In case the
390  *   feature is not active, the direct call is used as above via the
391  *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
392  */
393 #define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...)	\
394 	({								\
395 		PVOP_CALL_ARGS;						\
396 		PVOP_TEST_NULL(op);					\
397 		asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR,	\
398 				ALT_CALL_ALWAYS)			\
399 			     : call_clbr, ASM_CALL_CONSTRAINT		\
400 			     : paravirt_ptr(op),			\
401 			       ##__VA_ARGS__				\
402 			     : "memory", "cc" extra_clbr);		\
403 		ret;							\
404 	})
405 
406 #define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr,		\
407 			  extra_clbr, ...)				\
408 	({								\
409 		PVOP_CALL_ARGS;						\
410 		PVOP_TEST_NULL(op);					\
411 		asm volatile(ALTERNATIVE_2(PARAVIRT_CALL,		\
412 				 ALT_CALL_INSTR, ALT_CALL_ALWAYS,	\
413 				 alt, cond)				\
414 			     : call_clbr, ASM_CALL_CONSTRAINT		\
415 			     : paravirt_ptr(op),			\
416 			       ##__VA_ARGS__				\
417 			     : "memory", "cc" extra_clbr);		\
418 		ret;							\
419 	})
420 
421 #define __PVOP_CALL(rettype, op, ...)					\
422 	____PVOP_CALL(PVOP_RETVAL(rettype), op,				\
423 		      PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
424 
425 #define __PVOP_ALT_CALL(rettype, op, alt, cond, ...)			\
426 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond,		\
427 			  PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS,		\
428 			  ##__VA_ARGS__)
429 
430 #define __PVOP_CALLEESAVE(rettype, op, ...)				\
431 	____PVOP_CALL(PVOP_RETVAL(rettype), op.func,			\
432 		      PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
433 
434 #define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...)		\
435 	____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond,	\
436 			  PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
437 
438 
439 #define __PVOP_VCALL(op, ...)						\
440 	(void)____PVOP_CALL(, op, PVOP_VCALL_CLOBBERS,			\
441 		       VEXTRA_CLOBBERS, ##__VA_ARGS__)
442 
443 #define __PVOP_ALT_VCALL(op, alt, cond, ...)				\
444 	(void)____PVOP_ALT_CALL(, op, alt, cond,			\
445 				PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,	\
446 				##__VA_ARGS__)
447 
448 #define __PVOP_VCALLEESAVE(op, ...)					\
449 	(void)____PVOP_CALL(, op.func,					\
450 			    PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
451 
452 #define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...)			\
453 	(void)____PVOP_ALT_CALL(, op.func, alt, cond,			\
454 				PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
455 
456 
457 #define PVOP_CALL0(rettype, op)						\
458 	__PVOP_CALL(rettype, op)
459 #define PVOP_VCALL0(op)							\
460 	__PVOP_VCALL(op)
461 #define PVOP_ALT_CALL0(rettype, op, alt, cond)				\
462 	__PVOP_ALT_CALL(rettype, op, alt, cond)
463 #define PVOP_ALT_VCALL0(op, alt, cond)					\
464 	__PVOP_ALT_VCALL(op, alt, cond)
465 
466 #define PVOP_CALLEE0(rettype, op)					\
467 	__PVOP_CALLEESAVE(rettype, op)
468 #define PVOP_VCALLEE0(op)						\
469 	__PVOP_VCALLEESAVE(op)
470 #define PVOP_ALT_CALLEE0(rettype, op, alt, cond)			\
471 	__PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
472 #define PVOP_ALT_VCALLEE0(op, alt, cond)				\
473 	__PVOP_ALT_VCALLEESAVE(op, alt, cond)
474 
475 
476 #define PVOP_CALL1(rettype, op, arg1)					\
477 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
478 #define PVOP_VCALL1(op, arg1)						\
479 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
480 #define PVOP_ALT_VCALL1(op, arg1, alt, cond)				\
481 	__PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
482 
483 #define PVOP_CALLEE1(rettype, op, arg1)					\
484 	__PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
485 #define PVOP_VCALLEE1(op, arg1)						\
486 	__PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
487 #define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond)			\
488 	__PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
489 #define PVOP_ALT_VCALLEE1(op, arg1, alt, cond)				\
490 	__PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
491 
492 
493 #define PVOP_CALL2(rettype, op, arg1, arg2)				\
494 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
495 #define PVOP_VCALL2(op, arg1, arg2)					\
496 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
497 
498 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
499 	__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1),			\
500 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
501 #define PVOP_VCALL3(op, arg1, arg2, arg3)				\
502 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1),				\
503 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
504 
505 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
506 	__PVOP_CALL(rettype, op,					\
507 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
508 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
509 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
510 	__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),	\
511 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
512 
513 unsigned long paravirt_ret0(void);
514 #ifdef CONFIG_PARAVIRT_XXL
515 u64 _paravirt_ident_64(u64);
516 unsigned long pv_native_save_fl(void);
517 void pv_native_irq_disable(void);
518 void pv_native_irq_enable(void);
519 unsigned long pv_native_read_cr2(void);
520 #endif
521 
522 #define paravirt_nop	((void *)nop_func)
523 
524 #endif	/* __ASSEMBLY__ */
525 
526 #define ALT_NOT_XEN	ALT_NOT(X86_FEATURE_XENPV)
527 
528 #endif  /* CONFIG_PARAVIRT */
529 #endif	/* _ASM_X86_PARAVIRT_TYPES_H */
530