xref: /linux/arch/x86/include/asm/paravirt.h (revision 95298d63c67673c654c08952672d016212b26054)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6 
7 #ifdef CONFIG_PARAVIRT
8 #include <asm/pgtable_types.h>
9 #include <asm/asm.h>
10 #include <asm/nospec-branch.h>
11 
12 #include <asm/paravirt_types.h>
13 
14 #ifndef __ASSEMBLY__
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <asm/frame.h>
19 
20 static inline unsigned long long paravirt_sched_clock(void)
21 {
22 	return PVOP_CALL0(unsigned long long, time.sched_clock);
23 }
24 
25 struct static_key;
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
28 
29 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
30 bool pv_is_native_spin_unlock(void);
31 __visible bool __native_vcpu_is_preempted(long cpu);
32 bool pv_is_native_vcpu_is_preempted(void);
33 
34 static inline u64 paravirt_steal_clock(int cpu)
35 {
36 	return PVOP_CALL1(u64, time.steal_clock, cpu);
37 }
38 
39 /* The paravirtualized I/O functions */
40 static inline void slow_down_io(void)
41 {
42 	pv_ops.cpu.io_delay();
43 #ifdef REALLY_SLOW_IO
44 	pv_ops.cpu.io_delay();
45 	pv_ops.cpu.io_delay();
46 	pv_ops.cpu.io_delay();
47 #endif
48 }
49 
50 void native_flush_tlb_local(void);
51 void native_flush_tlb_global(void);
52 void native_flush_tlb_one_user(unsigned long addr);
53 void native_flush_tlb_others(const struct cpumask *cpumask,
54 			     const struct flush_tlb_info *info);
55 
56 static inline void __flush_tlb_local(void)
57 {
58 	PVOP_VCALL0(mmu.flush_tlb_user);
59 }
60 
61 static inline void __flush_tlb_global(void)
62 {
63 	PVOP_VCALL0(mmu.flush_tlb_kernel);
64 }
65 
66 static inline void __flush_tlb_one_user(unsigned long addr)
67 {
68 	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
69 }
70 
71 static inline void __flush_tlb_others(const struct cpumask *cpumask,
72 				      const struct flush_tlb_info *info)
73 {
74 	PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
75 }
76 
77 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
78 {
79 	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
80 }
81 
82 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
83 {
84 	PVOP_VCALL1(mmu.exit_mmap, mm);
85 }
86 
87 #ifdef CONFIG_PARAVIRT_XXL
88 static inline void load_sp0(unsigned long sp0)
89 {
90 	PVOP_VCALL1(cpu.load_sp0, sp0);
91 }
92 
93 /* The paravirtualized CPUID instruction. */
94 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
95 			   unsigned int *ecx, unsigned int *edx)
96 {
97 	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
98 }
99 
100 /*
101  * These special macros can be used to get or set a debugging register
102  */
103 static inline unsigned long paravirt_get_debugreg(int reg)
104 {
105 	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
106 }
107 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
108 static inline void set_debugreg(unsigned long val, int reg)
109 {
110 	PVOP_VCALL2(cpu.set_debugreg, reg, val);
111 }
112 
113 static inline unsigned long read_cr0(void)
114 {
115 	return PVOP_CALL0(unsigned long, cpu.read_cr0);
116 }
117 
118 static inline void write_cr0(unsigned long x)
119 {
120 	PVOP_VCALL1(cpu.write_cr0, x);
121 }
122 
123 static inline unsigned long read_cr2(void)
124 {
125 	return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
126 }
127 
128 static inline void write_cr2(unsigned long x)
129 {
130 	PVOP_VCALL1(mmu.write_cr2, x);
131 }
132 
133 static inline unsigned long __read_cr3(void)
134 {
135 	return PVOP_CALL0(unsigned long, mmu.read_cr3);
136 }
137 
138 static inline void write_cr3(unsigned long x)
139 {
140 	PVOP_VCALL1(mmu.write_cr3, x);
141 }
142 
143 static inline void __write_cr4(unsigned long x)
144 {
145 	PVOP_VCALL1(cpu.write_cr4, x);
146 }
147 
148 static inline void arch_safe_halt(void)
149 {
150 	PVOP_VCALL0(irq.safe_halt);
151 }
152 
153 static inline void halt(void)
154 {
155 	PVOP_VCALL0(irq.halt);
156 }
157 
158 static inline void wbinvd(void)
159 {
160 	PVOP_VCALL0(cpu.wbinvd);
161 }
162 
163 #define get_kernel_rpl()  (pv_info.kernel_rpl)
164 
165 static inline u64 paravirt_read_msr(unsigned msr)
166 {
167 	return PVOP_CALL1(u64, cpu.read_msr, msr);
168 }
169 
170 static inline void paravirt_write_msr(unsigned msr,
171 				      unsigned low, unsigned high)
172 {
173 	PVOP_VCALL3(cpu.write_msr, msr, low, high);
174 }
175 
176 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
177 {
178 	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
179 }
180 
181 static inline int paravirt_write_msr_safe(unsigned msr,
182 					  unsigned low, unsigned high)
183 {
184 	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
185 }
186 
187 #define rdmsr(msr, val1, val2)			\
188 do {						\
189 	u64 _l = paravirt_read_msr(msr);	\
190 	val1 = (u32)_l;				\
191 	val2 = _l >> 32;			\
192 } while (0)
193 
194 #define wrmsr(msr, val1, val2)			\
195 do {						\
196 	paravirt_write_msr(msr, val1, val2);	\
197 } while (0)
198 
199 #define rdmsrl(msr, val)			\
200 do {						\
201 	val = paravirt_read_msr(msr);		\
202 } while (0)
203 
204 static inline void wrmsrl(unsigned msr, u64 val)
205 {
206 	wrmsr(msr, (u32)val, (u32)(val>>32));
207 }
208 
209 #define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
210 
211 /* rdmsr with exception handling */
212 #define rdmsr_safe(msr, a, b)				\
213 ({							\
214 	int _err;					\
215 	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
216 	(*a) = (u32)_l;					\
217 	(*b) = _l >> 32;				\
218 	_err;						\
219 })
220 
221 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
222 {
223 	int err;
224 
225 	*p = paravirt_read_msr_safe(msr, &err);
226 	return err;
227 }
228 
229 static inline unsigned long long paravirt_read_pmc(int counter)
230 {
231 	return PVOP_CALL1(u64, cpu.read_pmc, counter);
232 }
233 
234 #define rdpmc(counter, low, high)		\
235 do {						\
236 	u64 _l = paravirt_read_pmc(counter);	\
237 	low = (u32)_l;				\
238 	high = _l >> 32;			\
239 } while (0)
240 
241 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
242 
243 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
244 {
245 	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
246 }
247 
248 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
249 {
250 	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
251 }
252 
253 static inline void load_TR_desc(void)
254 {
255 	PVOP_VCALL0(cpu.load_tr_desc);
256 }
257 static inline void load_gdt(const struct desc_ptr *dtr)
258 {
259 	PVOP_VCALL1(cpu.load_gdt, dtr);
260 }
261 static inline void load_idt(const struct desc_ptr *dtr)
262 {
263 	PVOP_VCALL1(cpu.load_idt, dtr);
264 }
265 static inline void set_ldt(const void *addr, unsigned entries)
266 {
267 	PVOP_VCALL2(cpu.set_ldt, addr, entries);
268 }
269 static inline unsigned long paravirt_store_tr(void)
270 {
271 	return PVOP_CALL0(unsigned long, cpu.store_tr);
272 }
273 
274 #define store_tr(tr)	((tr) = paravirt_store_tr())
275 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
276 {
277 	PVOP_VCALL2(cpu.load_tls, t, cpu);
278 }
279 
280 #ifdef CONFIG_X86_64
281 static inline void load_gs_index(unsigned int gs)
282 {
283 	PVOP_VCALL1(cpu.load_gs_index, gs);
284 }
285 #endif
286 
287 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
288 				   const void *desc)
289 {
290 	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
291 }
292 
293 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
294 				   void *desc, int type)
295 {
296 	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
297 }
298 
299 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
300 {
301 	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
302 }
303 
304 #ifdef CONFIG_X86_IOPL_IOPERM
305 static inline void tss_update_io_bitmap(void)
306 {
307 	PVOP_VCALL0(cpu.update_io_bitmap);
308 }
309 #endif
310 
311 static inline void paravirt_activate_mm(struct mm_struct *prev,
312 					struct mm_struct *next)
313 {
314 	PVOP_VCALL2(mmu.activate_mm, prev, next);
315 }
316 
317 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
318 					  struct mm_struct *mm)
319 {
320 	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
321 }
322 
323 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
324 {
325 	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
326 }
327 
328 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
329 {
330 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
331 }
332 
333 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
334 {
335 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
336 }
337 static inline void paravirt_release_pte(unsigned long pfn)
338 {
339 	PVOP_VCALL1(mmu.release_pte, pfn);
340 }
341 
342 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
343 {
344 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
345 }
346 
347 static inline void paravirt_release_pmd(unsigned long pfn)
348 {
349 	PVOP_VCALL1(mmu.release_pmd, pfn);
350 }
351 
352 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
353 {
354 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
355 }
356 static inline void paravirt_release_pud(unsigned long pfn)
357 {
358 	PVOP_VCALL1(mmu.release_pud, pfn);
359 }
360 
361 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
362 {
363 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
364 }
365 
366 static inline void paravirt_release_p4d(unsigned long pfn)
367 {
368 	PVOP_VCALL1(mmu.release_p4d, pfn);
369 }
370 
371 static inline pte_t __pte(pteval_t val)
372 {
373 	pteval_t ret;
374 
375 	if (sizeof(pteval_t) > sizeof(long))
376 		ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
377 	else
378 		ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
379 
380 	return (pte_t) { .pte = ret };
381 }
382 
383 static inline pteval_t pte_val(pte_t pte)
384 {
385 	pteval_t ret;
386 
387 	if (sizeof(pteval_t) > sizeof(long))
388 		ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
389 				   pte.pte, (u64)pte.pte >> 32);
390 	else
391 		ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
392 
393 	return ret;
394 }
395 
396 static inline pgd_t __pgd(pgdval_t val)
397 {
398 	pgdval_t ret;
399 
400 	if (sizeof(pgdval_t) > sizeof(long))
401 		ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
402 	else
403 		ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
404 
405 	return (pgd_t) { ret };
406 }
407 
408 static inline pgdval_t pgd_val(pgd_t pgd)
409 {
410 	pgdval_t ret;
411 
412 	if (sizeof(pgdval_t) > sizeof(long))
413 		ret =  PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
414 				    pgd.pgd, (u64)pgd.pgd >> 32);
415 	else
416 		ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
417 
418 	return ret;
419 }
420 
421 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
422 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
423 					   pte_t *ptep)
424 {
425 	pteval_t ret;
426 
427 	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
428 
429 	return (pte_t) { .pte = ret };
430 }
431 
432 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
433 					   pte_t *ptep, pte_t old_pte, pte_t pte)
434 {
435 
436 	if (sizeof(pteval_t) > sizeof(long))
437 		/* 5 arg words */
438 		pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
439 	else
440 		PVOP_VCALL4(mmu.ptep_modify_prot_commit,
441 			    vma, addr, ptep, pte.pte);
442 }
443 
444 static inline void set_pte(pte_t *ptep, pte_t pte)
445 {
446 	if (sizeof(pteval_t) > sizeof(long))
447 		PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
448 	else
449 		PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
450 }
451 
452 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
453 			      pte_t *ptep, pte_t pte)
454 {
455 	if (sizeof(pteval_t) > sizeof(long))
456 		/* 5 arg words */
457 		pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
458 	else
459 		PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
460 }
461 
462 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
463 {
464 	pmdval_t val = native_pmd_val(pmd);
465 
466 	if (sizeof(pmdval_t) > sizeof(long))
467 		PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
468 	else
469 		PVOP_VCALL2(mmu.set_pmd, pmdp, val);
470 }
471 
472 #if CONFIG_PGTABLE_LEVELS >= 3
473 static inline pmd_t __pmd(pmdval_t val)
474 {
475 	pmdval_t ret;
476 
477 	if (sizeof(pmdval_t) > sizeof(long))
478 		ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
479 	else
480 		ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
481 
482 	return (pmd_t) { ret };
483 }
484 
485 static inline pmdval_t pmd_val(pmd_t pmd)
486 {
487 	pmdval_t ret;
488 
489 	if (sizeof(pmdval_t) > sizeof(long))
490 		ret =  PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
491 				    pmd.pmd, (u64)pmd.pmd >> 32);
492 	else
493 		ret =  PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
494 
495 	return ret;
496 }
497 
498 static inline void set_pud(pud_t *pudp, pud_t pud)
499 {
500 	pudval_t val = native_pud_val(pud);
501 
502 	if (sizeof(pudval_t) > sizeof(long))
503 		PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
504 	else
505 		PVOP_VCALL2(mmu.set_pud, pudp, val);
506 }
507 #if CONFIG_PGTABLE_LEVELS >= 4
508 static inline pud_t __pud(pudval_t val)
509 {
510 	pudval_t ret;
511 
512 	ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
513 
514 	return (pud_t) { ret };
515 }
516 
517 static inline pudval_t pud_val(pud_t pud)
518 {
519 	return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
520 }
521 
522 static inline void pud_clear(pud_t *pudp)
523 {
524 	set_pud(pudp, __pud(0));
525 }
526 
527 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
528 {
529 	p4dval_t val = native_p4d_val(p4d);
530 
531 	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
532 }
533 
534 #if CONFIG_PGTABLE_LEVELS >= 5
535 
536 static inline p4d_t __p4d(p4dval_t val)
537 {
538 	p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
539 
540 	return (p4d_t) { ret };
541 }
542 
543 static inline p4dval_t p4d_val(p4d_t p4d)
544 {
545 	return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
546 }
547 
548 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
549 {
550 	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
551 }
552 
553 #define set_pgd(pgdp, pgdval) do {					\
554 	if (pgtable_l5_enabled())						\
555 		__set_pgd(pgdp, pgdval);				\
556 	else								\
557 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
558 } while (0)
559 
560 #define pgd_clear(pgdp) do {						\
561 	if (pgtable_l5_enabled())						\
562 		set_pgd(pgdp, __pgd(0));				\
563 } while (0)
564 
565 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
566 
567 static inline void p4d_clear(p4d_t *p4dp)
568 {
569 	set_p4d(p4dp, __p4d(0));
570 }
571 
572 #endif	/* CONFIG_PGTABLE_LEVELS == 4 */
573 
574 #endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
575 
576 #ifdef CONFIG_X86_PAE
577 /* Special-case pte-setting operations for PAE, which can't update a
578    64-bit pte atomically */
579 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
580 {
581 	PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
582 }
583 
584 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
585 			     pte_t *ptep)
586 {
587 	PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
588 }
589 
590 static inline void pmd_clear(pmd_t *pmdp)
591 {
592 	PVOP_VCALL1(mmu.pmd_clear, pmdp);
593 }
594 #else  /* !CONFIG_X86_PAE */
595 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
596 {
597 	set_pte(ptep, pte);
598 }
599 
600 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
601 			     pte_t *ptep)
602 {
603 	set_pte_at(mm, addr, ptep, __pte(0));
604 }
605 
606 static inline void pmd_clear(pmd_t *pmdp)
607 {
608 	set_pmd(pmdp, __pmd(0));
609 }
610 #endif	/* CONFIG_X86_PAE */
611 
612 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
613 static inline void arch_start_context_switch(struct task_struct *prev)
614 {
615 	PVOP_VCALL1(cpu.start_context_switch, prev);
616 }
617 
618 static inline void arch_end_context_switch(struct task_struct *next)
619 {
620 	PVOP_VCALL1(cpu.end_context_switch, next);
621 }
622 
623 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
624 static inline void arch_enter_lazy_mmu_mode(void)
625 {
626 	PVOP_VCALL0(mmu.lazy_mode.enter);
627 }
628 
629 static inline void arch_leave_lazy_mmu_mode(void)
630 {
631 	PVOP_VCALL0(mmu.lazy_mode.leave);
632 }
633 
634 static inline void arch_flush_lazy_mmu_mode(void)
635 {
636 	PVOP_VCALL0(mmu.lazy_mode.flush);
637 }
638 
639 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
640 				phys_addr_t phys, pgprot_t flags)
641 {
642 	pv_ops.mmu.set_fixmap(idx, phys, flags);
643 }
644 #endif
645 
646 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
647 
648 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
649 							u32 val)
650 {
651 	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
652 }
653 
654 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
655 {
656 	PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
657 }
658 
659 static __always_inline void pv_wait(u8 *ptr, u8 val)
660 {
661 	PVOP_VCALL2(lock.wait, ptr, val);
662 }
663 
664 static __always_inline void pv_kick(int cpu)
665 {
666 	PVOP_VCALL1(lock.kick, cpu);
667 }
668 
669 static __always_inline bool pv_vcpu_is_preempted(long cpu)
670 {
671 	return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
672 }
673 
674 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
675 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
676 
677 #endif /* SMP && PARAVIRT_SPINLOCKS */
678 
679 #ifdef CONFIG_X86_32
680 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
681 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
682 
683 /* save and restore all caller-save registers, except return value */
684 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
685 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
686 
687 #define PV_FLAGS_ARG "0"
688 #define PV_EXTRA_CLOBBERS
689 #define PV_VEXTRA_CLOBBERS
690 #else
691 /* save and restore all caller-save registers, except return value */
692 #define PV_SAVE_ALL_CALLER_REGS						\
693 	"push %rcx;"							\
694 	"push %rdx;"							\
695 	"push %rsi;"							\
696 	"push %rdi;"							\
697 	"push %r8;"							\
698 	"push %r9;"							\
699 	"push %r10;"							\
700 	"push %r11;"
701 #define PV_RESTORE_ALL_CALLER_REGS					\
702 	"pop %r11;"							\
703 	"pop %r10;"							\
704 	"pop %r9;"							\
705 	"pop %r8;"							\
706 	"pop %rdi;"							\
707 	"pop %rsi;"							\
708 	"pop %rdx;"							\
709 	"pop %rcx;"
710 
711 /* We save some registers, but all of them, that's too much. We clobber all
712  * caller saved registers but the argument parameter */
713 #define PV_SAVE_REGS "pushq %%rdi;"
714 #define PV_RESTORE_REGS "popq %%rdi;"
715 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
716 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
717 #define PV_FLAGS_ARG "D"
718 #endif
719 
720 /*
721  * Generate a thunk around a function which saves all caller-save
722  * registers except for the return value.  This allows C functions to
723  * be called from assembler code where fewer than normal registers are
724  * available.  It may also help code generation around calls from C
725  * code if the common case doesn't use many registers.
726  *
727  * When a callee is wrapped in a thunk, the caller can assume that all
728  * arg regs and all scratch registers are preserved across the
729  * call. The return value in rax/eax will not be saved, even for void
730  * functions.
731  */
732 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
733 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
734 	extern typeof(func) __raw_callee_save_##func;			\
735 									\
736 	asm(".pushsection .text;"					\
737 	    ".globl " PV_THUNK_NAME(func) ";"				\
738 	    ".type " PV_THUNK_NAME(func) ", @function;"			\
739 	    PV_THUNK_NAME(func) ":"					\
740 	    FRAME_BEGIN							\
741 	    PV_SAVE_ALL_CALLER_REGS					\
742 	    "call " #func ";"						\
743 	    PV_RESTORE_ALL_CALLER_REGS					\
744 	    FRAME_END							\
745 	    "ret;"							\
746 	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
747 	    ".popsection")
748 
749 /* Get a reference to a callee-save function */
750 #define PV_CALLEE_SAVE(func)						\
751 	((struct paravirt_callee_save) { __raw_callee_save_##func })
752 
753 /* Promise that "func" already uses the right calling convention */
754 #define __PV_IS_CALLEE_SAVE(func)			\
755 	((struct paravirt_callee_save) { func })
756 
757 #ifdef CONFIG_PARAVIRT_XXL
758 static inline notrace unsigned long arch_local_save_flags(void)
759 {
760 	return PVOP_CALLEE0(unsigned long, irq.save_fl);
761 }
762 
763 static inline notrace void arch_local_irq_restore(unsigned long f)
764 {
765 	PVOP_VCALLEE1(irq.restore_fl, f);
766 }
767 
768 static inline notrace void arch_local_irq_disable(void)
769 {
770 	PVOP_VCALLEE0(irq.irq_disable);
771 }
772 
773 static inline notrace void arch_local_irq_enable(void)
774 {
775 	PVOP_VCALLEE0(irq.irq_enable);
776 }
777 
778 static inline notrace unsigned long arch_local_irq_save(void)
779 {
780 	unsigned long f;
781 
782 	f = arch_local_save_flags();
783 	arch_local_irq_disable();
784 	return f;
785 }
786 #endif
787 
788 
789 /* Make sure as little as possible of this mess escapes. */
790 #undef PARAVIRT_CALL
791 #undef __PVOP_CALL
792 #undef __PVOP_VCALL
793 #undef PVOP_VCALL0
794 #undef PVOP_CALL0
795 #undef PVOP_VCALL1
796 #undef PVOP_CALL1
797 #undef PVOP_VCALL2
798 #undef PVOP_CALL2
799 #undef PVOP_VCALL3
800 #undef PVOP_CALL3
801 #undef PVOP_VCALL4
802 #undef PVOP_CALL4
803 
804 extern void default_banner(void);
805 
806 #else  /* __ASSEMBLY__ */
807 
808 #define _PVSITE(ptype, ops, word, algn)		\
809 771:;						\
810 	ops;					\
811 772:;						\
812 	.pushsection .parainstructions,"a";	\
813 	 .align	algn;				\
814 	 word 771b;				\
815 	 .byte ptype;				\
816 	 .byte 772b-771b;			\
817 	.popsection
818 
819 
820 #define COND_PUSH(set, mask, reg)			\
821 	.if ((~(set)) & mask); push %reg; .endif
822 #define COND_POP(set, mask, reg)			\
823 	.if ((~(set)) & mask); pop %reg; .endif
824 
825 #ifdef CONFIG_X86_64
826 
827 #define PV_SAVE_REGS(set)			\
828 	COND_PUSH(set, CLBR_RAX, rax);		\
829 	COND_PUSH(set, CLBR_RCX, rcx);		\
830 	COND_PUSH(set, CLBR_RDX, rdx);		\
831 	COND_PUSH(set, CLBR_RSI, rsi);		\
832 	COND_PUSH(set, CLBR_RDI, rdi);		\
833 	COND_PUSH(set, CLBR_R8, r8);		\
834 	COND_PUSH(set, CLBR_R9, r9);		\
835 	COND_PUSH(set, CLBR_R10, r10);		\
836 	COND_PUSH(set, CLBR_R11, r11)
837 #define PV_RESTORE_REGS(set)			\
838 	COND_POP(set, CLBR_R11, r11);		\
839 	COND_POP(set, CLBR_R10, r10);		\
840 	COND_POP(set, CLBR_R9, r9);		\
841 	COND_POP(set, CLBR_R8, r8);		\
842 	COND_POP(set, CLBR_RDI, rdi);		\
843 	COND_POP(set, CLBR_RSI, rsi);		\
844 	COND_POP(set, CLBR_RDX, rdx);		\
845 	COND_POP(set, CLBR_RCX, rcx);		\
846 	COND_POP(set, CLBR_RAX, rax)
847 
848 #define PARA_PATCH(off)		((off) / 8)
849 #define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .quad, 8)
850 #define PARA_INDIRECT(addr)	*addr(%rip)
851 #else
852 #define PV_SAVE_REGS(set)			\
853 	COND_PUSH(set, CLBR_EAX, eax);		\
854 	COND_PUSH(set, CLBR_EDI, edi);		\
855 	COND_PUSH(set, CLBR_ECX, ecx);		\
856 	COND_PUSH(set, CLBR_EDX, edx)
857 #define PV_RESTORE_REGS(set)			\
858 	COND_POP(set, CLBR_EDX, edx);		\
859 	COND_POP(set, CLBR_ECX, ecx);		\
860 	COND_POP(set, CLBR_EDI, edi);		\
861 	COND_POP(set, CLBR_EAX, eax)
862 
863 #define PARA_PATCH(off)		((off) / 4)
864 #define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .long, 4)
865 #define PARA_INDIRECT(addr)	*%cs:addr
866 #endif
867 
868 #ifdef CONFIG_PARAVIRT_XXL
869 #define INTERRUPT_RETURN						\
870 	PARA_SITE(PARA_PATCH(PV_CPU_iret),				\
871 		  ANNOTATE_RETPOLINE_SAFE;				\
872 		  jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
873 
874 #define DISABLE_INTERRUPTS(clobbers)					\
875 	PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),			\
876 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
877 		  ANNOTATE_RETPOLINE_SAFE;				\
878 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);	\
879 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
880 
881 #define ENABLE_INTERRUPTS(clobbers)					\
882 	PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),			\
883 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
884 		  ANNOTATE_RETPOLINE_SAFE;				\
885 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);		\
886 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
887 #endif
888 
889 #ifdef CONFIG_X86_64
890 #ifdef CONFIG_PARAVIRT_XXL
891 /*
892  * If swapgs is used while the userspace stack is still current,
893  * there's no way to call a pvop.  The PV replacement *must* be
894  * inlined, or the swapgs instruction must be trapped and emulated.
895  */
896 #define SWAPGS_UNSAFE_STACK						\
897 	PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
898 
899 /*
900  * Note: swapgs is very special, and in practise is either going to be
901  * implemented with a single "swapgs" instruction or something very
902  * special.  Either way, we don't need to save any registers for
903  * it.
904  */
905 #define SWAPGS								\
906 	PARA_SITE(PARA_PATCH(PV_CPU_swapgs),				\
907 		  ANNOTATE_RETPOLINE_SAFE;				\
908 		  call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);		\
909 		 )
910 
911 #define USERGS_SYSRET64							\
912 	PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64),			\
913 		  ANNOTATE_RETPOLINE_SAFE;				\
914 		  jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
915 
916 #ifdef CONFIG_DEBUG_ENTRY
917 #define SAVE_FLAGS(clobbers)                                        \
918 	PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),			    \
919 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
920 		  ANNOTATE_RETPOLINE_SAFE;			    \
921 		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);	    \
922 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
923 #endif
924 #endif /* CONFIG_PARAVIRT_XXL */
925 #endif	/* CONFIG_X86_64 */
926 
927 #ifdef CONFIG_PARAVIRT_XXL
928 
929 #define GET_CR2_INTO_AX							\
930 	PARA_SITE(PARA_PATCH(PV_MMU_read_cr2),				\
931 		  ANNOTATE_RETPOLINE_SAFE;				\
932 		  call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);		\
933 		 )
934 
935 #endif /* CONFIG_PARAVIRT_XXL */
936 
937 
938 #endif /* __ASSEMBLY__ */
939 #else  /* CONFIG_PARAVIRT */
940 # define default_banner x86_init_noop
941 #endif /* !CONFIG_PARAVIRT */
942 
943 #ifndef __ASSEMBLY__
944 #ifndef CONFIG_PARAVIRT_XXL
945 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
946 					  struct mm_struct *mm)
947 {
948 }
949 #endif
950 
951 #ifndef CONFIG_PARAVIRT
952 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
953 {
954 }
955 #endif
956 #endif /* __ASSEMBLY__ */
957 #endif /* _ASM_X86_PARAVIRT_H */
958