xref: /linux/arch/x86/include/asm/paravirt.h (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5 
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9 
10 #include <asm/paravirt_types.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 
17 static inline int paravirt_enabled(void)
18 {
19 	return pv_info.paravirt_enabled;
20 }
21 
22 static inline void load_sp0(struct tss_struct *tss,
23 			     struct thread_struct *thread)
24 {
25 	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27 
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30 			   unsigned int *ecx, unsigned int *edx)
31 {
32 	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34 
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40 	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
45 	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46 }
47 
48 static inline void clts(void)
49 {
50 	PVOP_VCALL0(pv_cpu_ops.clts);
51 }
52 
53 static inline unsigned long read_cr0(void)
54 {
55 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57 
58 static inline void write_cr0(unsigned long x)
59 {
60 	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61 }
62 
63 static inline unsigned long read_cr2(void)
64 {
65 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67 
68 static inline void write_cr2(unsigned long x)
69 {
70 	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71 }
72 
73 static inline unsigned long read_cr3(void)
74 {
75 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77 
78 static inline void write_cr3(unsigned long x)
79 {
80 	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81 }
82 
83 static inline unsigned long read_cr4(void)
84 {
85 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long read_cr4_safe(void)
88 {
89 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91 
92 static inline void write_cr4(unsigned long x)
93 {
94 	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95 }
96 
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102 
103 static inline void write_cr8(unsigned long x)
104 {
105 	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108 
109 static inline void arch_safe_halt(void)
110 {
111 	PVOP_VCALL0(pv_irq_ops.safe_halt);
112 }
113 
114 static inline void halt(void)
115 {
116 	PVOP_VCALL0(pv_irq_ops.halt);
117 }
118 
119 static inline void wbinvd(void)
120 {
121 	PVOP_VCALL0(pv_cpu_ops.wbinvd);
122 }
123 
124 #define get_kernel_rpl()  (pv_info.kernel_rpl)
125 
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128 	return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130 
131 static inline int paravirt_rdmsr_regs(u32 *regs)
132 {
133 	return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
134 }
135 
136 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
137 {
138 	return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
139 }
140 
141 static inline int paravirt_wrmsr_regs(u32 *regs)
142 {
143 	return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
144 }
145 
146 /* These should all do BUG_ON(_err), but our headers are too tangled. */
147 #define rdmsr(msr, val1, val2)			\
148 do {						\
149 	int _err;				\
150 	u64 _l = paravirt_read_msr(msr, &_err);	\
151 	val1 = (u32)_l;				\
152 	val2 = _l >> 32;			\
153 } while (0)
154 
155 #define wrmsr(msr, val1, val2)			\
156 do {						\
157 	paravirt_write_msr(msr, val1, val2);	\
158 } while (0)
159 
160 #define rdmsrl(msr, val)			\
161 do {						\
162 	int _err;				\
163 	val = paravirt_read_msr(msr, &_err);	\
164 } while (0)
165 
166 #define wrmsrl(msr, val)	wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
167 #define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
168 
169 /* rdmsr with exception handling */
170 #define rdmsr_safe(msr, a, b)			\
171 ({						\
172 	int _err;				\
173 	u64 _l = paravirt_read_msr(msr, &_err);	\
174 	(*a) = (u32)_l;				\
175 	(*b) = _l >> 32;			\
176 	_err;					\
177 })
178 
179 #define rdmsr_safe_regs(regs)	paravirt_rdmsr_regs(regs)
180 #define wrmsr_safe_regs(regs)	paravirt_wrmsr_regs(regs)
181 
182 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
183 {
184 	int err;
185 
186 	*p = paravirt_read_msr(msr, &err);
187 	return err;
188 }
189 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
190 {
191 	u32 gprs[8] = { 0 };
192 	int err;
193 
194 	gprs[1] = msr;
195 	gprs[7] = 0x9c5a203a;
196 
197 	err = paravirt_rdmsr_regs(gprs);
198 
199 	*p = gprs[0] | ((u64)gprs[2] << 32);
200 
201 	return err;
202 }
203 
204 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
205 {
206 	u32 gprs[8] = { 0 };
207 
208 	gprs[0] = (u32)val;
209 	gprs[1] = msr;
210 	gprs[2] = val >> 32;
211 	gprs[7] = 0x9c5a203a;
212 
213 	return paravirt_wrmsr_regs(gprs);
214 }
215 
216 static inline u64 paravirt_read_tsc(void)
217 {
218 	return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
219 }
220 
221 #define rdtscl(low)				\
222 do {						\
223 	u64 _l = paravirt_read_tsc();		\
224 	low = (int)_l;				\
225 } while (0)
226 
227 #define rdtscll(val) (val = paravirt_read_tsc())
228 
229 static inline unsigned long long paravirt_sched_clock(void)
230 {
231 	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
232 }
233 
234 struct static_key;
235 extern struct static_key paravirt_steal_enabled;
236 extern struct static_key paravirt_steal_rq_enabled;
237 
238 static inline u64 paravirt_steal_clock(int cpu)
239 {
240 	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
241 }
242 
243 static inline unsigned long long paravirt_read_pmc(int counter)
244 {
245 	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
246 }
247 
248 #define rdpmc(counter, low, high)		\
249 do {						\
250 	u64 _l = paravirt_read_pmc(counter);	\
251 	low = (u32)_l;				\
252 	high = _l >> 32;			\
253 } while (0)
254 
255 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
256 {
257 	return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
258 }
259 
260 #define rdtscp(low, high, aux)				\
261 do {							\
262 	int __aux;					\
263 	unsigned long __val = paravirt_rdtscp(&__aux);	\
264 	(low) = (u32)__val;				\
265 	(high) = (u32)(__val >> 32);			\
266 	(aux) = __aux;					\
267 } while (0)
268 
269 #define rdtscpll(val, aux)				\
270 do {							\
271 	unsigned long __aux; 				\
272 	val = paravirt_rdtscp(&__aux);			\
273 	(aux) = __aux;					\
274 } while (0)
275 
276 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
277 {
278 	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
279 }
280 
281 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
282 {
283 	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
284 }
285 
286 static inline void load_TR_desc(void)
287 {
288 	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
289 }
290 static inline void load_gdt(const struct desc_ptr *dtr)
291 {
292 	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
293 }
294 static inline void load_idt(const struct desc_ptr *dtr)
295 {
296 	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
297 }
298 static inline void set_ldt(const void *addr, unsigned entries)
299 {
300 	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
301 }
302 static inline void store_gdt(struct desc_ptr *dtr)
303 {
304 	PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
305 }
306 static inline void store_idt(struct desc_ptr *dtr)
307 {
308 	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
309 }
310 static inline unsigned long paravirt_store_tr(void)
311 {
312 	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
313 }
314 #define store_tr(tr)	((tr) = paravirt_store_tr())
315 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
316 {
317 	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
318 }
319 
320 #ifdef CONFIG_X86_64
321 static inline void load_gs_index(unsigned int gs)
322 {
323 	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
324 }
325 #endif
326 
327 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
328 				   const void *desc)
329 {
330 	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
331 }
332 
333 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
334 				   void *desc, int type)
335 {
336 	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
337 }
338 
339 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
340 {
341 	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
342 }
343 static inline void set_iopl_mask(unsigned mask)
344 {
345 	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
346 }
347 
348 /* The paravirtualized I/O functions */
349 static inline void slow_down_io(void)
350 {
351 	pv_cpu_ops.io_delay();
352 #ifdef REALLY_SLOW_IO
353 	pv_cpu_ops.io_delay();
354 	pv_cpu_ops.io_delay();
355 	pv_cpu_ops.io_delay();
356 #endif
357 }
358 
359 #ifdef CONFIG_SMP
360 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
361 				    unsigned long start_esp)
362 {
363 	PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
364 		    phys_apicid, start_eip, start_esp);
365 }
366 #endif
367 
368 static inline void paravirt_activate_mm(struct mm_struct *prev,
369 					struct mm_struct *next)
370 {
371 	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
372 }
373 
374 static inline void arch_dup_mmap(struct mm_struct *oldmm,
375 				 struct mm_struct *mm)
376 {
377 	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
378 }
379 
380 static inline void arch_exit_mmap(struct mm_struct *mm)
381 {
382 	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
383 }
384 
385 static inline void __flush_tlb(void)
386 {
387 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
388 }
389 static inline void __flush_tlb_global(void)
390 {
391 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
392 }
393 static inline void __flush_tlb_single(unsigned long addr)
394 {
395 	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
396 }
397 
398 static inline void flush_tlb_others(const struct cpumask *cpumask,
399 				    struct mm_struct *mm,
400 				    unsigned long va)
401 {
402 	PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
403 }
404 
405 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
406 {
407 	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
408 }
409 
410 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
411 {
412 	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
413 }
414 
415 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
416 {
417 	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
418 }
419 static inline void paravirt_release_pte(unsigned long pfn)
420 {
421 	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
422 }
423 
424 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
425 {
426 	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
427 }
428 
429 static inline void paravirt_release_pmd(unsigned long pfn)
430 {
431 	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
432 }
433 
434 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
435 {
436 	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
437 }
438 static inline void paravirt_release_pud(unsigned long pfn)
439 {
440 	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
441 }
442 
443 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
444 			      pte_t *ptep)
445 {
446 	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
447 }
448 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
449 			      pmd_t *pmdp)
450 {
451 	PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
452 }
453 
454 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
455 				    pte_t *ptep)
456 {
457 	PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
458 }
459 
460 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
461 				    pmd_t *pmdp)
462 {
463 	PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
464 }
465 
466 static inline pte_t __pte(pteval_t val)
467 {
468 	pteval_t ret;
469 
470 	if (sizeof(pteval_t) > sizeof(long))
471 		ret = PVOP_CALLEE2(pteval_t,
472 				   pv_mmu_ops.make_pte,
473 				   val, (u64)val >> 32);
474 	else
475 		ret = PVOP_CALLEE1(pteval_t,
476 				   pv_mmu_ops.make_pte,
477 				   val);
478 
479 	return (pte_t) { .pte = ret };
480 }
481 
482 static inline pteval_t pte_val(pte_t pte)
483 {
484 	pteval_t ret;
485 
486 	if (sizeof(pteval_t) > sizeof(long))
487 		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
488 				   pte.pte, (u64)pte.pte >> 32);
489 	else
490 		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
491 				   pte.pte);
492 
493 	return ret;
494 }
495 
496 static inline pgd_t __pgd(pgdval_t val)
497 {
498 	pgdval_t ret;
499 
500 	if (sizeof(pgdval_t) > sizeof(long))
501 		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
502 				   val, (u64)val >> 32);
503 	else
504 		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
505 				   val);
506 
507 	return (pgd_t) { ret };
508 }
509 
510 static inline pgdval_t pgd_val(pgd_t pgd)
511 {
512 	pgdval_t ret;
513 
514 	if (sizeof(pgdval_t) > sizeof(long))
515 		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
516 				    pgd.pgd, (u64)pgd.pgd >> 32);
517 	else
518 		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
519 				    pgd.pgd);
520 
521 	return ret;
522 }
523 
524 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
525 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
526 					   pte_t *ptep)
527 {
528 	pteval_t ret;
529 
530 	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
531 			 mm, addr, ptep);
532 
533 	return (pte_t) { .pte = ret };
534 }
535 
536 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
537 					   pte_t *ptep, pte_t pte)
538 {
539 	if (sizeof(pteval_t) > sizeof(long))
540 		/* 5 arg words */
541 		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
542 	else
543 		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
544 			    mm, addr, ptep, pte.pte);
545 }
546 
547 static inline void set_pte(pte_t *ptep, pte_t pte)
548 {
549 	if (sizeof(pteval_t) > sizeof(long))
550 		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
551 			    pte.pte, (u64)pte.pte >> 32);
552 	else
553 		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
554 			    pte.pte);
555 }
556 
557 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
558 			      pte_t *ptep, pte_t pte)
559 {
560 	if (sizeof(pteval_t) > sizeof(long))
561 		/* 5 arg words */
562 		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
563 	else
564 		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
565 }
566 
567 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
568 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
569 			      pmd_t *pmdp, pmd_t pmd)
570 {
571 	if (sizeof(pmdval_t) > sizeof(long))
572 		/* 5 arg words */
573 		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
574 	else
575 		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
576 			    native_pmd_val(pmd));
577 }
578 #endif
579 
580 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
581 {
582 	pmdval_t val = native_pmd_val(pmd);
583 
584 	if (sizeof(pmdval_t) > sizeof(long))
585 		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
586 	else
587 		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
588 }
589 
590 #if PAGETABLE_LEVELS >= 3
591 static inline pmd_t __pmd(pmdval_t val)
592 {
593 	pmdval_t ret;
594 
595 	if (sizeof(pmdval_t) > sizeof(long))
596 		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
597 				   val, (u64)val >> 32);
598 	else
599 		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
600 				   val);
601 
602 	return (pmd_t) { ret };
603 }
604 
605 static inline pmdval_t pmd_val(pmd_t pmd)
606 {
607 	pmdval_t ret;
608 
609 	if (sizeof(pmdval_t) > sizeof(long))
610 		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
611 				    pmd.pmd, (u64)pmd.pmd >> 32);
612 	else
613 		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
614 				    pmd.pmd);
615 
616 	return ret;
617 }
618 
619 static inline void set_pud(pud_t *pudp, pud_t pud)
620 {
621 	pudval_t val = native_pud_val(pud);
622 
623 	if (sizeof(pudval_t) > sizeof(long))
624 		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
625 			    val, (u64)val >> 32);
626 	else
627 		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
628 			    val);
629 }
630 #if PAGETABLE_LEVELS == 4
631 static inline pud_t __pud(pudval_t val)
632 {
633 	pudval_t ret;
634 
635 	if (sizeof(pudval_t) > sizeof(long))
636 		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
637 				   val, (u64)val >> 32);
638 	else
639 		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
640 				   val);
641 
642 	return (pud_t) { ret };
643 }
644 
645 static inline pudval_t pud_val(pud_t pud)
646 {
647 	pudval_t ret;
648 
649 	if (sizeof(pudval_t) > sizeof(long))
650 		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
651 				    pud.pud, (u64)pud.pud >> 32);
652 	else
653 		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
654 				    pud.pud);
655 
656 	return ret;
657 }
658 
659 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
660 {
661 	pgdval_t val = native_pgd_val(pgd);
662 
663 	if (sizeof(pgdval_t) > sizeof(long))
664 		PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
665 			    val, (u64)val >> 32);
666 	else
667 		PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
668 			    val);
669 }
670 
671 static inline void pgd_clear(pgd_t *pgdp)
672 {
673 	set_pgd(pgdp, __pgd(0));
674 }
675 
676 static inline void pud_clear(pud_t *pudp)
677 {
678 	set_pud(pudp, __pud(0));
679 }
680 
681 #endif	/* PAGETABLE_LEVELS == 4 */
682 
683 #endif	/* PAGETABLE_LEVELS >= 3 */
684 
685 #ifdef CONFIG_X86_PAE
686 /* Special-case pte-setting operations for PAE, which can't update a
687    64-bit pte atomically */
688 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
689 {
690 	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
691 		    pte.pte, pte.pte >> 32);
692 }
693 
694 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
695 			     pte_t *ptep)
696 {
697 	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
698 }
699 
700 static inline void pmd_clear(pmd_t *pmdp)
701 {
702 	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
703 }
704 #else  /* !CONFIG_X86_PAE */
705 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
706 {
707 	set_pte(ptep, pte);
708 }
709 
710 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
711 			     pte_t *ptep)
712 {
713 	set_pte_at(mm, addr, ptep, __pte(0));
714 }
715 
716 static inline void pmd_clear(pmd_t *pmdp)
717 {
718 	set_pmd(pmdp, __pmd(0));
719 }
720 #endif	/* CONFIG_X86_PAE */
721 
722 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
723 static inline void arch_start_context_switch(struct task_struct *prev)
724 {
725 	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
726 }
727 
728 static inline void arch_end_context_switch(struct task_struct *next)
729 {
730 	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
731 }
732 
733 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
734 static inline void arch_enter_lazy_mmu_mode(void)
735 {
736 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
737 }
738 
739 static inline void arch_leave_lazy_mmu_mode(void)
740 {
741 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
742 }
743 
744 void arch_flush_lazy_mmu_mode(void);
745 
746 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
747 				phys_addr_t phys, pgprot_t flags)
748 {
749 	pv_mmu_ops.set_fixmap(idx, phys, flags);
750 }
751 
752 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
753 
754 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
755 {
756 	return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
757 }
758 
759 static inline int arch_spin_is_contended(struct arch_spinlock *lock)
760 {
761 	return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
762 }
763 #define arch_spin_is_contended	arch_spin_is_contended
764 
765 static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
766 {
767 	PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
768 }
769 
770 static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
771 						  unsigned long flags)
772 {
773 	PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
774 }
775 
776 static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
777 {
778 	return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
779 }
780 
781 static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
782 {
783 	PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
784 }
785 
786 #endif
787 
788 #ifdef CONFIG_X86_32
789 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
790 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
791 
792 /* save and restore all caller-save registers, except return value */
793 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
794 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
795 
796 #define PV_FLAGS_ARG "0"
797 #define PV_EXTRA_CLOBBERS
798 #define PV_VEXTRA_CLOBBERS
799 #else
800 /* save and restore all caller-save registers, except return value */
801 #define PV_SAVE_ALL_CALLER_REGS						\
802 	"push %rcx;"							\
803 	"push %rdx;"							\
804 	"push %rsi;"							\
805 	"push %rdi;"							\
806 	"push %r8;"							\
807 	"push %r9;"							\
808 	"push %r10;"							\
809 	"push %r11;"
810 #define PV_RESTORE_ALL_CALLER_REGS					\
811 	"pop %r11;"							\
812 	"pop %r10;"							\
813 	"pop %r9;"							\
814 	"pop %r8;"							\
815 	"pop %rdi;"							\
816 	"pop %rsi;"							\
817 	"pop %rdx;"							\
818 	"pop %rcx;"
819 
820 /* We save some registers, but all of them, that's too much. We clobber all
821  * caller saved registers but the argument parameter */
822 #define PV_SAVE_REGS "pushq %%rdi;"
823 #define PV_RESTORE_REGS "popq %%rdi;"
824 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
825 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
826 #define PV_FLAGS_ARG "D"
827 #endif
828 
829 /*
830  * Generate a thunk around a function which saves all caller-save
831  * registers except for the return value.  This allows C functions to
832  * be called from assembler code where fewer than normal registers are
833  * available.  It may also help code generation around calls from C
834  * code if the common case doesn't use many registers.
835  *
836  * When a callee is wrapped in a thunk, the caller can assume that all
837  * arg regs and all scratch registers are preserved across the
838  * call. The return value in rax/eax will not be saved, even for void
839  * functions.
840  */
841 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
842 	extern typeof(func) __raw_callee_save_##func;			\
843 	static void *__##func##__ __used = func;			\
844 									\
845 	asm(".pushsection .text;"					\
846 	    "__raw_callee_save_" #func ": "				\
847 	    PV_SAVE_ALL_CALLER_REGS					\
848 	    "call " #func ";"						\
849 	    PV_RESTORE_ALL_CALLER_REGS					\
850 	    "ret;"							\
851 	    ".popsection")
852 
853 /* Get a reference to a callee-save function */
854 #define PV_CALLEE_SAVE(func)						\
855 	((struct paravirt_callee_save) { __raw_callee_save_##func })
856 
857 /* Promise that "func" already uses the right calling convention */
858 #define __PV_IS_CALLEE_SAVE(func)			\
859 	((struct paravirt_callee_save) { func })
860 
861 static inline notrace unsigned long arch_local_save_flags(void)
862 {
863 	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
864 }
865 
866 static inline notrace void arch_local_irq_restore(unsigned long f)
867 {
868 	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
869 }
870 
871 static inline notrace void arch_local_irq_disable(void)
872 {
873 	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
874 }
875 
876 static inline notrace void arch_local_irq_enable(void)
877 {
878 	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
879 }
880 
881 static inline notrace unsigned long arch_local_irq_save(void)
882 {
883 	unsigned long f;
884 
885 	f = arch_local_save_flags();
886 	arch_local_irq_disable();
887 	return f;
888 }
889 
890 
891 /* Make sure as little as possible of this mess escapes. */
892 #undef PARAVIRT_CALL
893 #undef __PVOP_CALL
894 #undef __PVOP_VCALL
895 #undef PVOP_VCALL0
896 #undef PVOP_CALL0
897 #undef PVOP_VCALL1
898 #undef PVOP_CALL1
899 #undef PVOP_VCALL2
900 #undef PVOP_CALL2
901 #undef PVOP_VCALL3
902 #undef PVOP_CALL3
903 #undef PVOP_VCALL4
904 #undef PVOP_CALL4
905 
906 extern void default_banner(void);
907 
908 #else  /* __ASSEMBLY__ */
909 
910 #define _PVSITE(ptype, clobbers, ops, word, algn)	\
911 771:;						\
912 	ops;					\
913 772:;						\
914 	.pushsection .parainstructions,"a";	\
915 	 .align	algn;				\
916 	 word 771b;				\
917 	 .byte ptype;				\
918 	 .byte 772b-771b;			\
919 	 .short clobbers;			\
920 	.popsection
921 
922 
923 #define COND_PUSH(set, mask, reg)			\
924 	.if ((~(set)) & mask); push %reg; .endif
925 #define COND_POP(set, mask, reg)			\
926 	.if ((~(set)) & mask); pop %reg; .endif
927 
928 #ifdef CONFIG_X86_64
929 
930 #define PV_SAVE_REGS(set)			\
931 	COND_PUSH(set, CLBR_RAX, rax);		\
932 	COND_PUSH(set, CLBR_RCX, rcx);		\
933 	COND_PUSH(set, CLBR_RDX, rdx);		\
934 	COND_PUSH(set, CLBR_RSI, rsi);		\
935 	COND_PUSH(set, CLBR_RDI, rdi);		\
936 	COND_PUSH(set, CLBR_R8, r8);		\
937 	COND_PUSH(set, CLBR_R9, r9);		\
938 	COND_PUSH(set, CLBR_R10, r10);		\
939 	COND_PUSH(set, CLBR_R11, r11)
940 #define PV_RESTORE_REGS(set)			\
941 	COND_POP(set, CLBR_R11, r11);		\
942 	COND_POP(set, CLBR_R10, r10);		\
943 	COND_POP(set, CLBR_R9, r9);		\
944 	COND_POP(set, CLBR_R8, r8);		\
945 	COND_POP(set, CLBR_RDI, rdi);		\
946 	COND_POP(set, CLBR_RSI, rsi);		\
947 	COND_POP(set, CLBR_RDX, rdx);		\
948 	COND_POP(set, CLBR_RCX, rcx);		\
949 	COND_POP(set, CLBR_RAX, rax)
950 
951 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
952 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
953 #define PARA_INDIRECT(addr)	*addr(%rip)
954 #else
955 #define PV_SAVE_REGS(set)			\
956 	COND_PUSH(set, CLBR_EAX, eax);		\
957 	COND_PUSH(set, CLBR_EDI, edi);		\
958 	COND_PUSH(set, CLBR_ECX, ecx);		\
959 	COND_PUSH(set, CLBR_EDX, edx)
960 #define PV_RESTORE_REGS(set)			\
961 	COND_POP(set, CLBR_EDX, edx);		\
962 	COND_POP(set, CLBR_ECX, ecx);		\
963 	COND_POP(set, CLBR_EDI, edi);		\
964 	COND_POP(set, CLBR_EAX, eax)
965 
966 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
967 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
968 #define PARA_INDIRECT(addr)	*%cs:addr
969 #endif
970 
971 #define INTERRUPT_RETURN						\
972 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
973 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
974 
975 #define DISABLE_INTERRUPTS(clobbers)					\
976 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
977 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
978 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
979 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
980 
981 #define ENABLE_INTERRUPTS(clobbers)					\
982 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
983 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
984 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
985 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
986 
987 #define USERGS_SYSRET32							\
988 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),	\
989 		  CLBR_NONE,						\
990 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
991 
992 #ifdef CONFIG_X86_32
993 #define GET_CR0_INTO_EAX				\
994 	push %ecx; push %edx;				\
995 	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
996 	pop %edx; pop %ecx
997 
998 #define ENABLE_INTERRUPTS_SYSEXIT					\
999 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),	\
1000 		  CLBR_NONE,						\
1001 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1002 
1003 
1004 #else	/* !CONFIG_X86_32 */
1005 
1006 /*
1007  * If swapgs is used while the userspace stack is still current,
1008  * there's no way to call a pvop.  The PV replacement *must* be
1009  * inlined, or the swapgs instruction must be trapped and emulated.
1010  */
1011 #define SWAPGS_UNSAFE_STACK						\
1012 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
1013 		  swapgs)
1014 
1015 /*
1016  * Note: swapgs is very special, and in practise is either going to be
1017  * implemented with a single "swapgs" instruction or something very
1018  * special.  Either way, we don't need to save any registers for
1019  * it.
1020  */
1021 #define SWAPGS								\
1022 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
1023 		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
1024 		 )
1025 
1026 #define GET_CR2_INTO_RAX				\
1027 	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
1028 
1029 #define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
1030 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1031 		  CLBR_NONE,						\
1032 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1033 
1034 #define USERGS_SYSRET64							\
1035 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
1036 		  CLBR_NONE,						\
1037 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1038 
1039 #define ENABLE_INTERRUPTS_SYSEXIT32					\
1040 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),	\
1041 		  CLBR_NONE,						\
1042 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1043 #endif	/* CONFIG_X86_32 */
1044 
1045 #endif /* __ASSEMBLY__ */
1046 #else  /* CONFIG_PARAVIRT */
1047 # define default_banner x86_init_noop
1048 #endif /* !CONFIG_PARAVIRT */
1049 #endif /* _ASM_X86_PARAVIRT_H */
1050