xref: /linux/arch/x86/include/asm/paravirt.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5 
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9 
10 #include <asm/paravirt_types.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 
17 static inline int paravirt_enabled(void)
18 {
19 	return pv_info.paravirt_enabled;
20 }
21 
22 static inline void load_sp0(struct tss_struct *tss,
23 			     struct thread_struct *thread)
24 {
25 	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27 
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30 			   unsigned int *ecx, unsigned int *edx)
31 {
32 	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34 
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40 	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
45 	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46 }
47 
48 static inline void clts(void)
49 {
50 	PVOP_VCALL0(pv_cpu_ops.clts);
51 }
52 
53 static inline unsigned long read_cr0(void)
54 {
55 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57 
58 static inline void write_cr0(unsigned long x)
59 {
60 	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61 }
62 
63 static inline unsigned long read_cr2(void)
64 {
65 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67 
68 static inline void write_cr2(unsigned long x)
69 {
70 	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71 }
72 
73 static inline unsigned long read_cr3(void)
74 {
75 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77 
78 static inline void write_cr3(unsigned long x)
79 {
80 	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81 }
82 
83 static inline unsigned long __read_cr4(void)
84 {
85 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long __read_cr4_safe(void)
88 {
89 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91 
92 static inline void __write_cr4(unsigned long x)
93 {
94 	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95 }
96 
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100 	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102 
103 static inline void write_cr8(unsigned long x)
104 {
105 	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108 
109 static inline void arch_safe_halt(void)
110 {
111 	PVOP_VCALL0(pv_irq_ops.safe_halt);
112 }
113 
114 static inline void halt(void)
115 {
116 	PVOP_VCALL0(pv_irq_ops.halt);
117 }
118 
119 static inline void wbinvd(void)
120 {
121 	PVOP_VCALL0(pv_cpu_ops.wbinvd);
122 }
123 
124 #define get_kernel_rpl()  (pv_info.kernel_rpl)
125 
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128 	return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130 
131 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132 {
133 	return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134 }
135 
136 /* These should all do BUG_ON(_err), but our headers are too tangled. */
137 #define rdmsr(msr, val1, val2)			\
138 do {						\
139 	int _err;				\
140 	u64 _l = paravirt_read_msr(msr, &_err);	\
141 	val1 = (u32)_l;				\
142 	val2 = _l >> 32;			\
143 } while (0)
144 
145 #define wrmsr(msr, val1, val2)			\
146 do {						\
147 	paravirt_write_msr(msr, val1, val2);	\
148 } while (0)
149 
150 #define rdmsrl(msr, val)			\
151 do {						\
152 	int _err;				\
153 	val = paravirt_read_msr(msr, &_err);	\
154 } while (0)
155 
156 static inline void wrmsrl(unsigned msr, u64 val)
157 {
158 	wrmsr(msr, (u32)val, (u32)(val>>32));
159 }
160 
161 #define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
162 
163 /* rdmsr with exception handling */
164 #define rdmsr_safe(msr, a, b)			\
165 ({						\
166 	int _err;				\
167 	u64 _l = paravirt_read_msr(msr, &_err);	\
168 	(*a) = (u32)_l;				\
169 	(*b) = _l >> 32;			\
170 	_err;					\
171 })
172 
173 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
174 {
175 	int err;
176 
177 	*p = paravirt_read_msr(msr, &err);
178 	return err;
179 }
180 
181 static inline unsigned long long paravirt_sched_clock(void)
182 {
183 	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
184 }
185 
186 struct static_key;
187 extern struct static_key paravirt_steal_enabled;
188 extern struct static_key paravirt_steal_rq_enabled;
189 
190 static inline u64 paravirt_steal_clock(int cpu)
191 {
192 	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
193 }
194 
195 static inline unsigned long long paravirt_read_pmc(int counter)
196 {
197 	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
198 }
199 
200 #define rdpmc(counter, low, high)		\
201 do {						\
202 	u64 _l = paravirt_read_pmc(counter);	\
203 	low = (u32)_l;				\
204 	high = _l >> 32;			\
205 } while (0)
206 
207 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
208 
209 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
210 {
211 	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
212 }
213 
214 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
215 {
216 	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
217 }
218 
219 static inline void load_TR_desc(void)
220 {
221 	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
222 }
223 static inline void load_gdt(const struct desc_ptr *dtr)
224 {
225 	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
226 }
227 static inline void load_idt(const struct desc_ptr *dtr)
228 {
229 	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
230 }
231 static inline void set_ldt(const void *addr, unsigned entries)
232 {
233 	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
234 }
235 static inline void store_idt(struct desc_ptr *dtr)
236 {
237 	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
238 }
239 static inline unsigned long paravirt_store_tr(void)
240 {
241 	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
242 }
243 #define store_tr(tr)	((tr) = paravirt_store_tr())
244 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
245 {
246 	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
247 }
248 
249 #ifdef CONFIG_X86_64
250 static inline void load_gs_index(unsigned int gs)
251 {
252 	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
253 }
254 #endif
255 
256 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
257 				   const void *desc)
258 {
259 	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
260 }
261 
262 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
263 				   void *desc, int type)
264 {
265 	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
266 }
267 
268 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
269 {
270 	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
271 }
272 static inline void set_iopl_mask(unsigned mask)
273 {
274 	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
275 }
276 
277 /* The paravirtualized I/O functions */
278 static inline void slow_down_io(void)
279 {
280 	pv_cpu_ops.io_delay();
281 #ifdef REALLY_SLOW_IO
282 	pv_cpu_ops.io_delay();
283 	pv_cpu_ops.io_delay();
284 	pv_cpu_ops.io_delay();
285 #endif
286 }
287 
288 #ifdef CONFIG_SMP
289 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
290 				    unsigned long start_esp)
291 {
292 	PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
293 		    phys_apicid, start_eip, start_esp);
294 }
295 #endif
296 
297 static inline void paravirt_activate_mm(struct mm_struct *prev,
298 					struct mm_struct *next)
299 {
300 	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
301 }
302 
303 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
304 					  struct mm_struct *mm)
305 {
306 	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
307 }
308 
309 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
310 {
311 	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
312 }
313 
314 static inline void __flush_tlb(void)
315 {
316 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
317 }
318 static inline void __flush_tlb_global(void)
319 {
320 	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
321 }
322 static inline void __flush_tlb_single(unsigned long addr)
323 {
324 	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
325 }
326 
327 static inline void flush_tlb_others(const struct cpumask *cpumask,
328 				    struct mm_struct *mm,
329 				    unsigned long start,
330 				    unsigned long end)
331 {
332 	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
333 }
334 
335 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
336 {
337 	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
338 }
339 
340 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
341 {
342 	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
343 }
344 
345 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
346 {
347 	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
348 }
349 static inline void paravirt_release_pte(unsigned long pfn)
350 {
351 	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
352 }
353 
354 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
355 {
356 	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
357 }
358 
359 static inline void paravirt_release_pmd(unsigned long pfn)
360 {
361 	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
362 }
363 
364 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
365 {
366 	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
367 }
368 static inline void paravirt_release_pud(unsigned long pfn)
369 {
370 	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
371 }
372 
373 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
374 			      pte_t *ptep)
375 {
376 	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
377 }
378 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
379 			      pmd_t *pmdp)
380 {
381 	PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
382 }
383 
384 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
385 				    pte_t *ptep)
386 {
387 	PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
388 }
389 
390 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
391 				    pmd_t *pmdp)
392 {
393 	PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
394 }
395 
396 static inline pte_t __pte(pteval_t val)
397 {
398 	pteval_t ret;
399 
400 	if (sizeof(pteval_t) > sizeof(long))
401 		ret = PVOP_CALLEE2(pteval_t,
402 				   pv_mmu_ops.make_pte,
403 				   val, (u64)val >> 32);
404 	else
405 		ret = PVOP_CALLEE1(pteval_t,
406 				   pv_mmu_ops.make_pte,
407 				   val);
408 
409 	return (pte_t) { .pte = ret };
410 }
411 
412 static inline pteval_t pte_val(pte_t pte)
413 {
414 	pteval_t ret;
415 
416 	if (sizeof(pteval_t) > sizeof(long))
417 		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
418 				   pte.pte, (u64)pte.pte >> 32);
419 	else
420 		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
421 				   pte.pte);
422 
423 	return ret;
424 }
425 
426 static inline pgd_t __pgd(pgdval_t val)
427 {
428 	pgdval_t ret;
429 
430 	if (sizeof(pgdval_t) > sizeof(long))
431 		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
432 				   val, (u64)val >> 32);
433 	else
434 		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
435 				   val);
436 
437 	return (pgd_t) { ret };
438 }
439 
440 static inline pgdval_t pgd_val(pgd_t pgd)
441 {
442 	pgdval_t ret;
443 
444 	if (sizeof(pgdval_t) > sizeof(long))
445 		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
446 				    pgd.pgd, (u64)pgd.pgd >> 32);
447 	else
448 		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
449 				    pgd.pgd);
450 
451 	return ret;
452 }
453 
454 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
455 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
456 					   pte_t *ptep)
457 {
458 	pteval_t ret;
459 
460 	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
461 			 mm, addr, ptep);
462 
463 	return (pte_t) { .pte = ret };
464 }
465 
466 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
467 					   pte_t *ptep, pte_t pte)
468 {
469 	if (sizeof(pteval_t) > sizeof(long))
470 		/* 5 arg words */
471 		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
472 	else
473 		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
474 			    mm, addr, ptep, pte.pte);
475 }
476 
477 static inline void set_pte(pte_t *ptep, pte_t pte)
478 {
479 	if (sizeof(pteval_t) > sizeof(long))
480 		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
481 			    pte.pte, (u64)pte.pte >> 32);
482 	else
483 		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
484 			    pte.pte);
485 }
486 
487 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
488 			      pte_t *ptep, pte_t pte)
489 {
490 	if (sizeof(pteval_t) > sizeof(long))
491 		/* 5 arg words */
492 		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
493 	else
494 		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
495 }
496 
497 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
498 			      pmd_t *pmdp, pmd_t pmd)
499 {
500 	if (sizeof(pmdval_t) > sizeof(long))
501 		/* 5 arg words */
502 		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
503 	else
504 		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
505 			    native_pmd_val(pmd));
506 }
507 
508 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
509 {
510 	pmdval_t val = native_pmd_val(pmd);
511 
512 	if (sizeof(pmdval_t) > sizeof(long))
513 		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
514 	else
515 		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
516 }
517 
518 #if CONFIG_PGTABLE_LEVELS >= 3
519 static inline pmd_t __pmd(pmdval_t val)
520 {
521 	pmdval_t ret;
522 
523 	if (sizeof(pmdval_t) > sizeof(long))
524 		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
525 				   val, (u64)val >> 32);
526 	else
527 		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
528 				   val);
529 
530 	return (pmd_t) { ret };
531 }
532 
533 static inline pmdval_t pmd_val(pmd_t pmd)
534 {
535 	pmdval_t ret;
536 
537 	if (sizeof(pmdval_t) > sizeof(long))
538 		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
539 				    pmd.pmd, (u64)pmd.pmd >> 32);
540 	else
541 		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
542 				    pmd.pmd);
543 
544 	return ret;
545 }
546 
547 static inline void set_pud(pud_t *pudp, pud_t pud)
548 {
549 	pudval_t val = native_pud_val(pud);
550 
551 	if (sizeof(pudval_t) > sizeof(long))
552 		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
553 			    val, (u64)val >> 32);
554 	else
555 		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
556 			    val);
557 }
558 #if CONFIG_PGTABLE_LEVELS == 4
559 static inline pud_t __pud(pudval_t val)
560 {
561 	pudval_t ret;
562 
563 	if (sizeof(pudval_t) > sizeof(long))
564 		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
565 				   val, (u64)val >> 32);
566 	else
567 		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
568 				   val);
569 
570 	return (pud_t) { ret };
571 }
572 
573 static inline pudval_t pud_val(pud_t pud)
574 {
575 	pudval_t ret;
576 
577 	if (sizeof(pudval_t) > sizeof(long))
578 		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
579 				    pud.pud, (u64)pud.pud >> 32);
580 	else
581 		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
582 				    pud.pud);
583 
584 	return ret;
585 }
586 
587 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
588 {
589 	pgdval_t val = native_pgd_val(pgd);
590 
591 	if (sizeof(pgdval_t) > sizeof(long))
592 		PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
593 			    val, (u64)val >> 32);
594 	else
595 		PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
596 			    val);
597 }
598 
599 static inline void pgd_clear(pgd_t *pgdp)
600 {
601 	set_pgd(pgdp, __pgd(0));
602 }
603 
604 static inline void pud_clear(pud_t *pudp)
605 {
606 	set_pud(pudp, __pud(0));
607 }
608 
609 #endif	/* CONFIG_PGTABLE_LEVELS == 4 */
610 
611 #endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
612 
613 #ifdef CONFIG_X86_PAE
614 /* Special-case pte-setting operations for PAE, which can't update a
615    64-bit pte atomically */
616 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
617 {
618 	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
619 		    pte.pte, pte.pte >> 32);
620 }
621 
622 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
623 			     pte_t *ptep)
624 {
625 	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
626 }
627 
628 static inline void pmd_clear(pmd_t *pmdp)
629 {
630 	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
631 }
632 #else  /* !CONFIG_X86_PAE */
633 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
634 {
635 	set_pte(ptep, pte);
636 }
637 
638 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
639 			     pte_t *ptep)
640 {
641 	set_pte_at(mm, addr, ptep, __pte(0));
642 }
643 
644 static inline void pmd_clear(pmd_t *pmdp)
645 {
646 	set_pmd(pmdp, __pmd(0));
647 }
648 #endif	/* CONFIG_X86_PAE */
649 
650 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
651 static inline void arch_start_context_switch(struct task_struct *prev)
652 {
653 	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
654 }
655 
656 static inline void arch_end_context_switch(struct task_struct *next)
657 {
658 	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
659 }
660 
661 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
662 static inline void arch_enter_lazy_mmu_mode(void)
663 {
664 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
665 }
666 
667 static inline void arch_leave_lazy_mmu_mode(void)
668 {
669 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
670 }
671 
672 static inline void arch_flush_lazy_mmu_mode(void)
673 {
674 	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
675 }
676 
677 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
678 				phys_addr_t phys, pgprot_t flags)
679 {
680 	pv_mmu_ops.set_fixmap(idx, phys, flags);
681 }
682 
683 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
684 
685 #ifdef CONFIG_QUEUED_SPINLOCKS
686 
687 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
688 							u32 val)
689 {
690 	PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
691 }
692 
693 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
694 {
695 	PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
696 }
697 
698 static __always_inline void pv_wait(u8 *ptr, u8 val)
699 {
700 	PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
701 }
702 
703 static __always_inline void pv_kick(int cpu)
704 {
705 	PVOP_VCALL1(pv_lock_ops.kick, cpu);
706 }
707 
708 #else /* !CONFIG_QUEUED_SPINLOCKS */
709 
710 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
711 							__ticket_t ticket)
712 {
713 	PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
714 }
715 
716 static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
717 							__ticket_t ticket)
718 {
719 	PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
720 }
721 
722 #endif /* CONFIG_QUEUED_SPINLOCKS */
723 
724 #endif /* SMP && PARAVIRT_SPINLOCKS */
725 
726 #ifdef CONFIG_X86_32
727 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
728 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
729 
730 /* save and restore all caller-save registers, except return value */
731 #define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
732 #define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
733 
734 #define PV_FLAGS_ARG "0"
735 #define PV_EXTRA_CLOBBERS
736 #define PV_VEXTRA_CLOBBERS
737 #else
738 /* save and restore all caller-save registers, except return value */
739 #define PV_SAVE_ALL_CALLER_REGS						\
740 	"push %rcx;"							\
741 	"push %rdx;"							\
742 	"push %rsi;"							\
743 	"push %rdi;"							\
744 	"push %r8;"							\
745 	"push %r9;"							\
746 	"push %r10;"							\
747 	"push %r11;"
748 #define PV_RESTORE_ALL_CALLER_REGS					\
749 	"pop %r11;"							\
750 	"pop %r10;"							\
751 	"pop %r9;"							\
752 	"pop %r8;"							\
753 	"pop %rdi;"							\
754 	"pop %rsi;"							\
755 	"pop %rdx;"							\
756 	"pop %rcx;"
757 
758 /* We save some registers, but all of them, that's too much. We clobber all
759  * caller saved registers but the argument parameter */
760 #define PV_SAVE_REGS "pushq %%rdi;"
761 #define PV_RESTORE_REGS "popq %%rdi;"
762 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
763 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
764 #define PV_FLAGS_ARG "D"
765 #endif
766 
767 /*
768  * Generate a thunk around a function which saves all caller-save
769  * registers except for the return value.  This allows C functions to
770  * be called from assembler code where fewer than normal registers are
771  * available.  It may also help code generation around calls from C
772  * code if the common case doesn't use many registers.
773  *
774  * When a callee is wrapped in a thunk, the caller can assume that all
775  * arg regs and all scratch registers are preserved across the
776  * call. The return value in rax/eax will not be saved, even for void
777  * functions.
778  */
779 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
780 	extern typeof(func) __raw_callee_save_##func;			\
781 									\
782 	asm(".pushsection .text;"					\
783 	    ".globl __raw_callee_save_" #func " ; "			\
784 	    "__raw_callee_save_" #func ": "				\
785 	    PV_SAVE_ALL_CALLER_REGS					\
786 	    "call " #func ";"						\
787 	    PV_RESTORE_ALL_CALLER_REGS					\
788 	    "ret;"							\
789 	    ".popsection")
790 
791 /* Get a reference to a callee-save function */
792 #define PV_CALLEE_SAVE(func)						\
793 	((struct paravirt_callee_save) { __raw_callee_save_##func })
794 
795 /* Promise that "func" already uses the right calling convention */
796 #define __PV_IS_CALLEE_SAVE(func)			\
797 	((struct paravirt_callee_save) { func })
798 
799 static inline notrace unsigned long arch_local_save_flags(void)
800 {
801 	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
802 }
803 
804 static inline notrace void arch_local_irq_restore(unsigned long f)
805 {
806 	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
807 }
808 
809 static inline notrace void arch_local_irq_disable(void)
810 {
811 	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
812 }
813 
814 static inline notrace void arch_local_irq_enable(void)
815 {
816 	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
817 }
818 
819 static inline notrace unsigned long arch_local_irq_save(void)
820 {
821 	unsigned long f;
822 
823 	f = arch_local_save_flags();
824 	arch_local_irq_disable();
825 	return f;
826 }
827 
828 
829 /* Make sure as little as possible of this mess escapes. */
830 #undef PARAVIRT_CALL
831 #undef __PVOP_CALL
832 #undef __PVOP_VCALL
833 #undef PVOP_VCALL0
834 #undef PVOP_CALL0
835 #undef PVOP_VCALL1
836 #undef PVOP_CALL1
837 #undef PVOP_VCALL2
838 #undef PVOP_CALL2
839 #undef PVOP_VCALL3
840 #undef PVOP_CALL3
841 #undef PVOP_VCALL4
842 #undef PVOP_CALL4
843 
844 extern void default_banner(void);
845 
846 #else  /* __ASSEMBLY__ */
847 
848 #define _PVSITE(ptype, clobbers, ops, word, algn)	\
849 771:;						\
850 	ops;					\
851 772:;						\
852 	.pushsection .parainstructions,"a";	\
853 	 .align	algn;				\
854 	 word 771b;				\
855 	 .byte ptype;				\
856 	 .byte 772b-771b;			\
857 	 .short clobbers;			\
858 	.popsection
859 
860 
861 #define COND_PUSH(set, mask, reg)			\
862 	.if ((~(set)) & mask); push %reg; .endif
863 #define COND_POP(set, mask, reg)			\
864 	.if ((~(set)) & mask); pop %reg; .endif
865 
866 #ifdef CONFIG_X86_64
867 
868 #define PV_SAVE_REGS(set)			\
869 	COND_PUSH(set, CLBR_RAX, rax);		\
870 	COND_PUSH(set, CLBR_RCX, rcx);		\
871 	COND_PUSH(set, CLBR_RDX, rdx);		\
872 	COND_PUSH(set, CLBR_RSI, rsi);		\
873 	COND_PUSH(set, CLBR_RDI, rdi);		\
874 	COND_PUSH(set, CLBR_R8, r8);		\
875 	COND_PUSH(set, CLBR_R9, r9);		\
876 	COND_PUSH(set, CLBR_R10, r10);		\
877 	COND_PUSH(set, CLBR_R11, r11)
878 #define PV_RESTORE_REGS(set)			\
879 	COND_POP(set, CLBR_R11, r11);		\
880 	COND_POP(set, CLBR_R10, r10);		\
881 	COND_POP(set, CLBR_R9, r9);		\
882 	COND_POP(set, CLBR_R8, r8);		\
883 	COND_POP(set, CLBR_RDI, rdi);		\
884 	COND_POP(set, CLBR_RSI, rsi);		\
885 	COND_POP(set, CLBR_RDX, rdx);		\
886 	COND_POP(set, CLBR_RCX, rcx);		\
887 	COND_POP(set, CLBR_RAX, rax)
888 
889 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
890 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
891 #define PARA_INDIRECT(addr)	*addr(%rip)
892 #else
893 #define PV_SAVE_REGS(set)			\
894 	COND_PUSH(set, CLBR_EAX, eax);		\
895 	COND_PUSH(set, CLBR_EDI, edi);		\
896 	COND_PUSH(set, CLBR_ECX, ecx);		\
897 	COND_PUSH(set, CLBR_EDX, edx)
898 #define PV_RESTORE_REGS(set)			\
899 	COND_POP(set, CLBR_EDX, edx);		\
900 	COND_POP(set, CLBR_ECX, ecx);		\
901 	COND_POP(set, CLBR_EDI, edi);		\
902 	COND_POP(set, CLBR_EAX, eax)
903 
904 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
905 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
906 #define PARA_INDIRECT(addr)	*%cs:addr
907 #endif
908 
909 #define INTERRUPT_RETURN						\
910 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
911 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
912 
913 #define DISABLE_INTERRUPTS(clobbers)					\
914 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
915 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
916 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
917 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
918 
919 #define ENABLE_INTERRUPTS(clobbers)					\
920 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
921 		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
922 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
923 		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
924 
925 #define USERGS_SYSRET32							\
926 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),	\
927 		  CLBR_NONE,						\
928 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
929 
930 #ifdef CONFIG_X86_32
931 #define GET_CR0_INTO_EAX				\
932 	push %ecx; push %edx;				\
933 	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
934 	pop %edx; pop %ecx
935 
936 #define ENABLE_INTERRUPTS_SYSEXIT					\
937 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),	\
938 		  CLBR_NONE,						\
939 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
940 
941 
942 #else	/* !CONFIG_X86_32 */
943 
944 /*
945  * If swapgs is used while the userspace stack is still current,
946  * there's no way to call a pvop.  The PV replacement *must* be
947  * inlined, or the swapgs instruction must be trapped and emulated.
948  */
949 #define SWAPGS_UNSAFE_STACK						\
950 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
951 		  swapgs)
952 
953 /*
954  * Note: swapgs is very special, and in practise is either going to be
955  * implemented with a single "swapgs" instruction or something very
956  * special.  Either way, we don't need to save any registers for
957  * it.
958  */
959 #define SWAPGS								\
960 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
961 		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
962 		 )
963 
964 #define GET_CR2_INTO_RAX				\
965 	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
966 
967 #define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
968 	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
969 		  CLBR_NONE,						\
970 		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
971 
972 #define USERGS_SYSRET64							\
973 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
974 		  CLBR_NONE,						\
975 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
976 #endif	/* CONFIG_X86_32 */
977 
978 #endif /* __ASSEMBLY__ */
979 #else  /* CONFIG_PARAVIRT */
980 # define default_banner x86_init_noop
981 #ifndef __ASSEMBLY__
982 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
983 					  struct mm_struct *mm)
984 {
985 }
986 
987 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
988 {
989 }
990 #endif /* __ASSEMBLY__ */
991 #endif /* !CONFIG_PARAVIRT */
992 #endif /* _ASM_X86_PARAVIRT_H */
993