xref: /linux/arch/x86/xen/enlighten_pv.c (revision d947d6848a790616d6d2ca64097b6e818ffe3017)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Core of Xen paravirt_ops implementation.
4  *
5  * This file contains the xen_paravirt_ops structure itself, and the
6  * implementations for:
7  * - privileged instructions
8  * - interrupt flags
9  * - segment operations
10  * - booting and setup
11  *
12  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
13  */
14 
15 #include <linux/cpu.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/smp.h>
19 #include <linux/preempt.h>
20 #include <linux/hardirq.h>
21 #include <linux/percpu.h>
22 #include <linux/delay.h>
23 #include <linux/start_kernel.h>
24 #include <linux/sched.h>
25 #include <linux/kprobes.h>
26 #include <linux/kstrtox.h>
27 #include <linux/memblock.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/page-flags.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/edd.h>
34 #include <linux/reboot.h>
35 #include <linux/virtio_anchor.h>
36 #include <linux/stackprotector.h>
37 
38 #include <xen/xen.h>
39 #include <xen/events.h>
40 #include <xen/interface/xen.h>
41 #include <xen/interface/version.h>
42 #include <xen/interface/physdev.h>
43 #include <xen/interface/vcpu.h>
44 #include <xen/interface/memory.h>
45 #include <xen/interface/nmi.h>
46 #include <xen/interface/xen-mca.h>
47 #include <xen/features.h>
48 #include <xen/page.h>
49 #include <xen/hvc-console.h>
50 #include <xen/acpi.h>
51 
52 #include <asm/paravirt.h>
53 #include <asm/apic.h>
54 #include <asm/page.h>
55 #include <asm/xen/pci.h>
56 #include <asm/xen/hypercall.h>
57 #include <asm/xen/hypervisor.h>
58 #include <asm/xen/cpuid.h>
59 #include <asm/fixmap.h>
60 #include <asm/processor.h>
61 #include <asm/proto.h>
62 #include <asm/msr-index.h>
63 #include <asm/traps.h>
64 #include <asm/setup.h>
65 #include <asm/desc.h>
66 #include <asm/pgalloc.h>
67 #include <asm/tlbflush.h>
68 #include <asm/reboot.h>
69 #include <asm/hypervisor.h>
70 #include <asm/mach_traps.h>
71 #include <asm/mtrr.h>
72 #include <asm/mwait.h>
73 #include <asm/pci_x86.h>
74 #include <asm/cpu.h>
75 #ifdef CONFIG_X86_IOPL_IOPERM
76 #include <asm/io_bitmap.h>
77 #endif
78 
79 #ifdef CONFIG_ACPI
80 #include <linux/acpi.h>
81 #include <asm/acpi.h>
82 #include <acpi/proc_cap_intel.h>
83 #include <acpi/processor.h>
84 #include <xen/interface/platform.h>
85 #endif
86 
87 #include "xen-ops.h"
88 
89 #include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
90 
91 void *xen_initial_gdt;
92 
93 static int xen_cpu_up_prepare_pv(unsigned int cpu);
94 static int xen_cpu_dead_pv(unsigned int cpu);
95 
96 struct tls_descs {
97 	struct desc_struct desc[3];
98 };
99 
100 DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
101 DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
102 
xen_get_lazy_mode(void)103 enum xen_lazy_mode xen_get_lazy_mode(void)
104 {
105 	if (in_interrupt())
106 		return XEN_LAZY_NONE;
107 
108 	return this_cpu_read(xen_lazy_mode);
109 }
110 
111 /*
112  * Updating the 3 TLS descriptors in the GDT on every task switch is
113  * surprisingly expensive so we avoid updating them if they haven't
114  * changed.  Since Xen writes different descriptors than the one
115  * passed in the update_descriptor hypercall we keep shadow copies to
116  * compare against.
117  */
118 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
119 
120 static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE);
121 
parse_xen_msr_safe(char * str)122 static int __init parse_xen_msr_safe(char *str)
123 {
124 	if (str)
125 		return kstrtobool(str, &xen_msr_safe);
126 	return -EINVAL;
127 }
128 early_param("xen_msr_safe", parse_xen_msr_safe);
129 
130 /* Get MTRR settings from Xen and put them into mtrr_state. */
xen_set_mtrr_data(void)131 static void __init xen_set_mtrr_data(void)
132 {
133 #ifdef CONFIG_MTRR
134 	struct xen_platform_op op = {
135 		.cmd = XENPF_read_memtype,
136 		.interface_version = XENPF_INTERFACE_VERSION,
137 	};
138 	unsigned int reg;
139 	unsigned long mask;
140 	uint32_t eax, width;
141 	static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata;
142 
143 	/* Get physical address width (only 64-bit cpus supported). */
144 	width = 36;
145 	eax = cpuid_eax(0x80000000);
146 	if ((eax >> 16) == 0x8000 && eax >= 0x80000008) {
147 		eax = cpuid_eax(0x80000008);
148 		width = eax & 0xff;
149 	}
150 
151 	for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) {
152 		op.u.read_memtype.reg = reg;
153 		if (HYPERVISOR_platform_op(&op))
154 			break;
155 
156 		/*
157 		 * Only called in dom0, which has all RAM PFNs mapped at
158 		 * RAM MFNs, and all PCI space etc. is identity mapped.
159 		 * This means we can treat MFN == PFN regarding MTRR settings.
160 		 */
161 		var[reg].base_lo = op.u.read_memtype.type;
162 		var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT;
163 		var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT);
164 		mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1);
165 		mask &= (1UL << width) - 1;
166 		if (mask)
167 			mask |= MTRR_PHYSMASK_V;
168 		var[reg].mask_lo = mask;
169 		var[reg].mask_hi = mask >> 32;
170 	}
171 
172 	/* Only overwrite MTRR state if any MTRR could be got from Xen. */
173 	if (reg)
174 		mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
175 #endif
176 }
177 
xen_pv_init_platform(void)178 static void __init xen_pv_init_platform(void)
179 {
180 	/* PV guests can't operate virtio devices without grants. */
181 	if (IS_ENABLED(CONFIG_XEN_VIRTIO))
182 		virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
183 
184 	populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
185 
186 	set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
187 	HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
188 
189 	/* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
190 	xen_vcpu_info_reset(0);
191 
192 	/* pvclock is in shared info area */
193 	xen_init_time_ops();
194 
195 	if (xen_initial_domain())
196 		xen_set_mtrr_data();
197 	else
198 		mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
199 
200 	/* Adjust nr_cpu_ids before "enumeration" happens */
201 	xen_smp_count_cpus();
202 }
203 
xen_pv_guest_late_init(void)204 static void __init xen_pv_guest_late_init(void)
205 {
206 #ifndef CONFIG_SMP
207 	/* Setup shared vcpu info for non-smp configurations */
208 	xen_setup_vcpu_info_placement();
209 #endif
210 }
211 
212 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
213 static __read_mostly unsigned int cpuid_leaf5_edx_val;
214 
xen_cpuid(unsigned int * ax,unsigned int * bx,unsigned int * cx,unsigned int * dx)215 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
216 		      unsigned int *cx, unsigned int *dx)
217 {
218 	unsigned int maskebx = ~0;
219 	unsigned int or_ebx = 0;
220 
221 	/*
222 	 * Mask out inconvenient features, to try and disable as many
223 	 * unsupported kernel subsystems as possible.
224 	 */
225 	switch (*ax) {
226 	case 0x1:
227 		/* Replace initial APIC ID in bits 24-31 of EBX. */
228 		/* See xen_pv_smp_config() for related topology preparations. */
229 		maskebx = 0x00ffffff;
230 		or_ebx = smp_processor_id() << 24;
231 		break;
232 
233 	case CPUID_MWAIT_LEAF:
234 		/* Synthesize the values.. */
235 		*ax = 0;
236 		*bx = 0;
237 		*cx = cpuid_leaf5_ecx_val;
238 		*dx = cpuid_leaf5_edx_val;
239 		return;
240 
241 	case 0xb:
242 		/* Suppress extended topology stuff */
243 		maskebx = 0;
244 		break;
245 	}
246 
247 	asm(XEN_EMULATE_PREFIX "cpuid"
248 		: "=a" (*ax),
249 		  "=b" (*bx),
250 		  "=c" (*cx),
251 		  "=d" (*dx)
252 		: "0" (*ax), "2" (*cx));
253 
254 	*bx &= maskebx;
255 	*bx |= or_ebx;
256 }
257 
xen_check_mwait(void)258 static bool __init xen_check_mwait(void)
259 {
260 #ifdef CONFIG_ACPI
261 	struct xen_platform_op op = {
262 		.cmd			= XENPF_set_processor_pminfo,
263 		.u.set_pminfo.id	= -1,
264 		.u.set_pminfo.type	= XEN_PM_PDC,
265 	};
266 	uint32_t buf[3];
267 	unsigned int ax, bx, cx, dx;
268 	unsigned int mwait_mask;
269 
270 	/* We need to determine whether it is OK to expose the MWAIT
271 	 * capability to the kernel to harvest deeper than C3 states from ACPI
272 	 * _CST using the processor_harvest_xen.c module. For this to work, we
273 	 * need to gather the MWAIT_LEAF values (which the cstate.c code
274 	 * checks against). The hypervisor won't expose the MWAIT flag because
275 	 * it would break backwards compatibility; so we will find out directly
276 	 * from the hardware and hypercall.
277 	 */
278 	if (!xen_initial_domain())
279 		return false;
280 
281 	/*
282 	 * When running under platform earlier than Xen4.2, do not expose
283 	 * mwait, to avoid the risk of loading native acpi pad driver
284 	 */
285 	if (!xen_running_on_version_or_later(4, 2))
286 		return false;
287 
288 	ax = 1;
289 	cx = 0;
290 
291 	native_cpuid(&ax, &bx, &cx, &dx);
292 
293 	mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
294 		     (1 << (X86_FEATURE_MWAIT % 32));
295 
296 	if ((cx & mwait_mask) != mwait_mask)
297 		return false;
298 
299 	/* We need to emulate the MWAIT_LEAF and for that we need both
300 	 * ecx and edx. The hypercall provides only partial information.
301 	 */
302 
303 	ax = CPUID_MWAIT_LEAF;
304 	bx = 0;
305 	cx = 0;
306 	dx = 0;
307 
308 	native_cpuid(&ax, &bx, &cx, &dx);
309 
310 	/* Ask the Hypervisor whether to clear ACPI_PROC_CAP_C_C2C3_FFH. If so,
311 	 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
312 	 */
313 	buf[0] = ACPI_PDC_REVISION_ID;
314 	buf[1] = 1;
315 	buf[2] = (ACPI_PROC_CAP_C_CAPABILITY_SMP | ACPI_PROC_CAP_EST_CAPABILITY_SWSMP);
316 
317 	set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
318 
319 	if ((HYPERVISOR_platform_op(&op) == 0) &&
320 	    (buf[2] & (ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH))) {
321 		cpuid_leaf5_ecx_val = cx;
322 		cpuid_leaf5_edx_val = dx;
323 	}
324 	return true;
325 #else
326 	return false;
327 #endif
328 }
329 
xen_check_xsave(void)330 static bool __init xen_check_xsave(void)
331 {
332 	unsigned int cx, xsave_mask;
333 
334 	cx = cpuid_ecx(1);
335 
336 	xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) |
337 		     (1 << (X86_FEATURE_OSXSAVE % 32));
338 
339 	/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
340 	return (cx & xsave_mask) == xsave_mask;
341 }
342 
xen_init_capabilities(void)343 static void __init xen_init_capabilities(void)
344 {
345 	setup_force_cpu_cap(X86_FEATURE_XENPV);
346 	setup_clear_cpu_cap(X86_FEATURE_DCA);
347 	setup_clear_cpu_cap(X86_FEATURE_APERFMPERF);
348 	setup_clear_cpu_cap(X86_FEATURE_MTRR);
349 	setup_clear_cpu_cap(X86_FEATURE_ACC);
350 	setup_clear_cpu_cap(X86_FEATURE_X2APIC);
351 	setup_clear_cpu_cap(X86_FEATURE_SME);
352 	setup_clear_cpu_cap(X86_FEATURE_LKGS);
353 
354 	/*
355 	 * Xen PV would need some work to support PCID: CR3 handling as well
356 	 * as xen_flush_tlb_others() would need updating.
357 	 */
358 	setup_clear_cpu_cap(X86_FEATURE_PCID);
359 
360 	if (!xen_initial_domain())
361 		setup_clear_cpu_cap(X86_FEATURE_ACPI);
362 
363 	if (xen_check_mwait())
364 		setup_force_cpu_cap(X86_FEATURE_MWAIT);
365 	else
366 		setup_clear_cpu_cap(X86_FEATURE_MWAIT);
367 
368 	if (!xen_check_xsave()) {
369 		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
370 		setup_clear_cpu_cap(X86_FEATURE_OSXSAVE);
371 	}
372 }
373 
xen_set_debugreg(int reg,unsigned long val)374 static noinstr void xen_set_debugreg(int reg, unsigned long val)
375 {
376 	HYPERVISOR_set_debugreg(reg, val);
377 }
378 
xen_get_debugreg(int reg)379 static noinstr unsigned long xen_get_debugreg(int reg)
380 {
381 	return HYPERVISOR_get_debugreg(reg);
382 }
383 
xen_start_context_switch(struct task_struct * prev)384 static void xen_start_context_switch(struct task_struct *prev)
385 {
386 	BUG_ON(preemptible());
387 
388 	if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
389 		arch_leave_lazy_mmu_mode();
390 		set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
391 	}
392 	enter_lazy(XEN_LAZY_CPU);
393 }
394 
xen_end_context_switch(struct task_struct * next)395 static void xen_end_context_switch(struct task_struct *next)
396 {
397 	BUG_ON(preemptible());
398 
399 	xen_mc_flush();
400 	leave_lazy(XEN_LAZY_CPU);
401 	if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
402 		arch_enter_lazy_mmu_mode();
403 }
404 
xen_store_tr(void)405 static unsigned long xen_store_tr(void)
406 {
407 	return 0;
408 }
409 
410 /*
411  * Set the page permissions for a particular virtual address.  If the
412  * address is a vmalloc mapping (or other non-linear mapping), then
413  * find the linear mapping of the page and also set its protections to
414  * match.
415  */
set_aliased_prot(void * v,pgprot_t prot)416 static void set_aliased_prot(void *v, pgprot_t prot)
417 {
418 	int level;
419 	pte_t *ptep;
420 	pte_t pte;
421 	unsigned long pfn;
422 	unsigned char dummy;
423 	void *va;
424 
425 	ptep = lookup_address((unsigned long)v, &level);
426 	BUG_ON(ptep == NULL);
427 
428 	pfn = pte_pfn(*ptep);
429 	pte = pfn_pte(pfn, prot);
430 
431 	/*
432 	 * Careful: update_va_mapping() will fail if the virtual address
433 	 * we're poking isn't populated in the page tables.  We don't
434 	 * need to worry about the direct map (that's always in the page
435 	 * tables), but we need to be careful about vmap space.  In
436 	 * particular, the top level page table can lazily propagate
437 	 * entries between processes, so if we've switched mms since we
438 	 * vmapped the target in the first place, we might not have the
439 	 * top-level page table entry populated.
440 	 *
441 	 * We disable preemption because we want the same mm active when
442 	 * we probe the target and when we issue the hypercall.  We'll
443 	 * have the same nominal mm, but if we're a kernel thread, lazy
444 	 * mm dropping could change our pgd.
445 	 *
446 	 * Out of an abundance of caution, this uses __get_user() to fault
447 	 * in the target address just in case there's some obscure case
448 	 * in which the target address isn't readable.
449 	 */
450 
451 	preempt_disable();
452 
453 	copy_from_kernel_nofault(&dummy, v, 1);
454 
455 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
456 		BUG();
457 
458 	va = __va(PFN_PHYS(pfn));
459 
460 	if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
461 		BUG();
462 
463 	preempt_enable();
464 }
465 
xen_alloc_ldt(struct desc_struct * ldt,unsigned entries)466 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
467 {
468 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
469 	int i;
470 
471 	/*
472 	 * We need to mark the all aliases of the LDT pages RO.  We
473 	 * don't need to call vm_flush_aliases(), though, since that's
474 	 * only responsible for flushing aliases out the TLBs, not the
475 	 * page tables, and Xen will flush the TLB for us if needed.
476 	 *
477 	 * To avoid confusing future readers: none of this is necessary
478 	 * to load the LDT.  The hypervisor only checks this when the
479 	 * LDT is faulted in due to subsequent descriptor access.
480 	 */
481 
482 	for (i = 0; i < entries; i += entries_per_page)
483 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
484 }
485 
xen_free_ldt(struct desc_struct * ldt,unsigned entries)486 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
487 {
488 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
489 	int i;
490 
491 	for (i = 0; i < entries; i += entries_per_page)
492 		set_aliased_prot(ldt + i, PAGE_KERNEL);
493 }
494 
xen_set_ldt(const void * addr,unsigned entries)495 static void xen_set_ldt(const void *addr, unsigned entries)
496 {
497 	struct mmuext_op *op;
498 	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
499 
500 	trace_xen_cpu_set_ldt(addr, entries);
501 
502 	op = mcs.args;
503 	op->cmd = MMUEXT_SET_LDT;
504 	op->arg1.linear_addr = (unsigned long)addr;
505 	op->arg2.nr_ents = entries;
506 
507 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
508 
509 	xen_mc_issue(XEN_LAZY_CPU);
510 }
511 
xen_load_gdt(const struct desc_ptr * dtr)512 static void xen_load_gdt(const struct desc_ptr *dtr)
513 {
514 	unsigned long va = dtr->address;
515 	unsigned int size = dtr->size + 1;
516 	unsigned long pfn, mfn;
517 	int level;
518 	pte_t *ptep;
519 	void *virt;
520 
521 	/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
522 	BUG_ON(size > PAGE_SIZE);
523 	BUG_ON(va & ~PAGE_MASK);
524 
525 	/*
526 	 * The GDT is per-cpu and is in the percpu data area.
527 	 * That can be virtually mapped, so we need to do a
528 	 * page-walk to get the underlying MFN for the
529 	 * hypercall.  The page can also be in the kernel's
530 	 * linear range, so we need to RO that mapping too.
531 	 */
532 	ptep = lookup_address(va, &level);
533 	BUG_ON(ptep == NULL);
534 
535 	pfn = pte_pfn(*ptep);
536 	mfn = pfn_to_mfn(pfn);
537 	virt = __va(PFN_PHYS(pfn));
538 
539 	make_lowmem_page_readonly((void *)va);
540 	make_lowmem_page_readonly(virt);
541 
542 	if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
543 		BUG();
544 }
545 
546 /*
547  * load_gdt for early boot, when the gdt is only mapped once
548  */
xen_load_gdt_boot(const struct desc_ptr * dtr)549 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
550 {
551 	unsigned long va = dtr->address;
552 	unsigned int size = dtr->size + 1;
553 	unsigned long pfn, mfn;
554 	pte_t pte;
555 
556 	/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
557 	BUG_ON(size > PAGE_SIZE);
558 	BUG_ON(va & ~PAGE_MASK);
559 
560 	pfn = virt_to_pfn((void *)va);
561 	mfn = pfn_to_mfn(pfn);
562 
563 	pte = pfn_pte(pfn, PAGE_KERNEL_RO);
564 
565 	if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
566 		BUG();
567 
568 	if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
569 		BUG();
570 }
571 
desc_equal(const struct desc_struct * d1,const struct desc_struct * d2)572 static inline bool desc_equal(const struct desc_struct *d1,
573 			      const struct desc_struct *d2)
574 {
575 	return !memcmp(d1, d2, sizeof(*d1));
576 }
577 
load_TLS_descriptor(struct thread_struct * t,unsigned int cpu,unsigned int i)578 static void load_TLS_descriptor(struct thread_struct *t,
579 				unsigned int cpu, unsigned int i)
580 {
581 	struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
582 	struct desc_struct *gdt;
583 	xmaddr_t maddr;
584 	struct multicall_space mc;
585 
586 	if (desc_equal(shadow, &t->tls_array[i]))
587 		return;
588 
589 	*shadow = t->tls_array[i];
590 
591 	gdt = get_cpu_gdt_rw(cpu);
592 	maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
593 	mc = __xen_mc_entry(0);
594 
595 	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
596 }
597 
xen_load_tls(struct thread_struct * t,unsigned int cpu)598 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
599 {
600 	/*
601 	 * In lazy mode we need to zero %fs, otherwise we may get an
602 	 * exception between the new %fs descriptor being loaded and
603 	 * %fs being effectively cleared at __switch_to().
604 	 */
605 	if (xen_get_lazy_mode() == XEN_LAZY_CPU)
606 		loadsegment(fs, 0);
607 
608 	xen_mc_batch();
609 
610 	load_TLS_descriptor(t, cpu, 0);
611 	load_TLS_descriptor(t, cpu, 1);
612 	load_TLS_descriptor(t, cpu, 2);
613 
614 	xen_mc_issue(XEN_LAZY_CPU);
615 }
616 
xen_load_gs_index(unsigned int idx)617 static void xen_load_gs_index(unsigned int idx)
618 {
619 	if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
620 		BUG();
621 }
622 
xen_write_ldt_entry(struct desc_struct * dt,int entrynum,const void * ptr)623 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
624 				const void *ptr)
625 {
626 	xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
627 	u64 entry = *(u64 *)ptr;
628 
629 	trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
630 
631 	preempt_disable();
632 
633 	xen_mc_flush();
634 	if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
635 		BUG();
636 
637 	preempt_enable();
638 }
639 
640 void noist_exc_debug(struct pt_regs *regs);
641 
DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)642 DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
643 {
644 	/* On Xen PV, NMI doesn't use IST.  The C part is the same as native. */
645 	exc_nmi(regs);
646 }
647 
DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault)648 DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault)
649 {
650 	/* On Xen PV, DF doesn't use IST.  The C part is the same as native. */
651 	exc_double_fault(regs, error_code);
652 }
653 
DEFINE_IDTENTRY_RAW(xenpv_exc_debug)654 DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
655 {
656 	/*
657 	 * There's no IST on Xen PV, but we still need to dispatch
658 	 * to the correct handler.
659 	 */
660 	if (user_mode(regs))
661 		noist_exc_debug(regs);
662 	else
663 		exc_debug(regs);
664 }
665 
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)666 DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
667 {
668 	/* This should never happen and there is no way to handle it. */
669 	instrumentation_begin();
670 	pr_err("Unknown trap in Xen PV mode.");
671 	BUG();
672 	instrumentation_end();
673 }
674 
675 #ifdef CONFIG_X86_MCE
DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)676 DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
677 {
678 	/*
679 	 * There's no IST on Xen PV, but we still need to dispatch
680 	 * to the correct handler.
681 	 */
682 	if (user_mode(regs))
683 		noist_exc_machine_check(regs);
684 	else
685 		exc_machine_check(regs);
686 }
687 #endif
688 
689 struct trap_array_entry {
690 	void (*orig)(void);
691 	void (*xen)(void);
692 	bool ist_okay;
693 };
694 
695 #define TRAP_ENTRY(func, ist_ok) {			\
696 	.orig		= asm_##func,			\
697 	.xen		= xen_asm_##func,		\
698 	.ist_okay	= ist_ok }
699 
700 #define TRAP_ENTRY_REDIR(func, ist_ok) {		\
701 	.orig		= asm_##func,			\
702 	.xen		= xen_asm_xenpv_##func,		\
703 	.ist_okay	= ist_ok }
704 
705 static struct trap_array_entry trap_array[] = {
706 	TRAP_ENTRY_REDIR(exc_debug,			true  ),
707 	TRAP_ENTRY_REDIR(exc_double_fault,		true  ),
708 #ifdef CONFIG_X86_MCE
709 	TRAP_ENTRY_REDIR(exc_machine_check,		true  ),
710 #endif
711 	TRAP_ENTRY_REDIR(exc_nmi,			true  ),
712 	TRAP_ENTRY(exc_int3,				false ),
713 	TRAP_ENTRY(exc_overflow,			false ),
714 #ifdef CONFIG_IA32_EMULATION
715 	TRAP_ENTRY(int80_emulation,			false ),
716 #endif
717 	TRAP_ENTRY(exc_page_fault,			false ),
718 	TRAP_ENTRY(exc_divide_error,			false ),
719 	TRAP_ENTRY(exc_bounds,				false ),
720 	TRAP_ENTRY(exc_invalid_op,			false ),
721 	TRAP_ENTRY(exc_device_not_available,		false ),
722 	TRAP_ENTRY(exc_coproc_segment_overrun,		false ),
723 	TRAP_ENTRY(exc_invalid_tss,			false ),
724 	TRAP_ENTRY(exc_segment_not_present,		false ),
725 	TRAP_ENTRY(exc_stack_segment,			false ),
726 	TRAP_ENTRY(exc_general_protection,		false ),
727 	TRAP_ENTRY(exc_spurious_interrupt_bug,		false ),
728 	TRAP_ENTRY(exc_coprocessor_error,		false ),
729 	TRAP_ENTRY(exc_alignment_check,			false ),
730 	TRAP_ENTRY(exc_simd_coprocessor_error,		false ),
731 #ifdef CONFIG_X86_CET
732 	TRAP_ENTRY(exc_control_protection,		false ),
733 #endif
734 };
735 
get_trap_addr(void ** addr,unsigned int ist)736 static bool __ref get_trap_addr(void **addr, unsigned int ist)
737 {
738 	unsigned int nr;
739 	bool ist_okay = false;
740 	bool found = false;
741 
742 	/*
743 	 * Replace trap handler addresses by Xen specific ones.
744 	 * Check for known traps using IST and whitelist them.
745 	 * The debugger ones are the only ones we care about.
746 	 * Xen will handle faults like double_fault, so we should never see
747 	 * them.  Warn if there's an unexpected IST-using fault handler.
748 	 */
749 	for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
750 		struct trap_array_entry *entry = trap_array + nr;
751 
752 		if (*addr == entry->orig) {
753 			*addr = entry->xen;
754 			ist_okay = entry->ist_okay;
755 			found = true;
756 			break;
757 		}
758 	}
759 
760 	if (nr == ARRAY_SIZE(trap_array) &&
761 	    *addr >= (void *)early_idt_handler_array[0] &&
762 	    *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
763 		nr = (*addr - (void *)early_idt_handler_array[0]) /
764 		     EARLY_IDT_HANDLER_SIZE;
765 		*addr = (void *)xen_early_idt_handler_array[nr];
766 		found = true;
767 	}
768 
769 	if (!found)
770 		*addr = (void *)xen_asm_exc_xen_unknown_trap;
771 
772 	if (WARN_ON(found && ist != 0 && !ist_okay))
773 		return false;
774 
775 	return true;
776 }
777 
cvt_gate_to_trap(int vector,const gate_desc * val,struct trap_info * info)778 static int cvt_gate_to_trap(int vector, const gate_desc *val,
779 			    struct trap_info *info)
780 {
781 	unsigned long addr;
782 
783 	if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT)
784 		return 0;
785 
786 	info->vector = vector;
787 
788 	addr = gate_offset(val);
789 	if (!get_trap_addr((void **)&addr, val->bits.ist))
790 		return 0;
791 	info->address = addr;
792 
793 	info->cs = gate_segment(val);
794 	info->flags = val->bits.dpl;
795 	/* interrupt gates clear IF */
796 	if (val->bits.type == GATE_INTERRUPT)
797 		info->flags |= 1 << 2;
798 
799 	return 1;
800 }
801 
802 /* Locations of each CPU's IDT */
803 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
804 
805 /* Set an IDT entry.  If the entry is part of the current IDT, then
806    also update Xen. */
xen_write_idt_entry(gate_desc * dt,int entrynum,const gate_desc * g)807 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
808 {
809 	unsigned long p = (unsigned long)&dt[entrynum];
810 	unsigned long start, end;
811 
812 	trace_xen_cpu_write_idt_entry(dt, entrynum, g);
813 
814 	preempt_disable();
815 
816 	start = __this_cpu_read(idt_desc.address);
817 	end = start + __this_cpu_read(idt_desc.size) + 1;
818 
819 	xen_mc_flush();
820 
821 	native_write_idt_entry(dt, entrynum, g);
822 
823 	if (p >= start && (p + 8) <= end) {
824 		struct trap_info info[2];
825 
826 		info[1].address = 0;
827 
828 		if (cvt_gate_to_trap(entrynum, g, &info[0]))
829 			if (HYPERVISOR_set_trap_table(info))
830 				BUG();
831 	}
832 
833 	preempt_enable();
834 }
835 
xen_convert_trap_info(const struct desc_ptr * desc,struct trap_info * traps,bool full)836 static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
837 				      struct trap_info *traps, bool full)
838 {
839 	unsigned in, out, count;
840 
841 	count = (desc->size+1) / sizeof(gate_desc);
842 	BUG_ON(count > 256);
843 
844 	for (in = out = 0; in < count; in++) {
845 		gate_desc *entry = (gate_desc *)(desc->address) + in;
846 
847 		if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
848 			out++;
849 	}
850 
851 	return out;
852 }
853 
xen_copy_trap_info(struct trap_info * traps)854 void xen_copy_trap_info(struct trap_info *traps)
855 {
856 	const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
857 
858 	xen_convert_trap_info(desc, traps, true);
859 }
860 
861 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
862    hold a spinlock to protect the static traps[] array (static because
863    it avoids allocation, and saves stack space). */
xen_load_idt(const struct desc_ptr * desc)864 static void xen_load_idt(const struct desc_ptr *desc)
865 {
866 	static DEFINE_SPINLOCK(lock);
867 	static struct trap_info traps[257];
868 	static const struct trap_info zero = { };
869 	unsigned out;
870 
871 	trace_xen_cpu_load_idt(desc);
872 
873 	spin_lock(&lock);
874 
875 	memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
876 
877 	out = xen_convert_trap_info(desc, traps, false);
878 	traps[out] = zero;
879 
880 	xen_mc_flush();
881 	if (HYPERVISOR_set_trap_table(traps))
882 		BUG();
883 
884 	spin_unlock(&lock);
885 }
886 
887 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
888    they're handled differently. */
xen_write_gdt_entry(struct desc_struct * dt,int entry,const void * desc,int type)889 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
890 				const void *desc, int type)
891 {
892 	trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
893 
894 	preempt_disable();
895 
896 	switch (type) {
897 	case DESC_LDT:
898 	case DESC_TSS:
899 		/* ignore */
900 		break;
901 
902 	default: {
903 		xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
904 
905 		xen_mc_flush();
906 		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
907 			BUG();
908 	}
909 
910 	}
911 
912 	preempt_enable();
913 }
914 
915 /*
916  * Version of write_gdt_entry for use at early boot-time needed to
917  * update an entry as simply as possible.
918  */
xen_write_gdt_entry_boot(struct desc_struct * dt,int entry,const void * desc,int type)919 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
920 					    const void *desc, int type)
921 {
922 	trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
923 
924 	switch (type) {
925 	case DESC_LDT:
926 	case DESC_TSS:
927 		/* ignore */
928 		break;
929 
930 	default: {
931 		xmaddr_t maddr = virt_to_machine(&dt[entry]);
932 
933 		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
934 			dt[entry] = *(struct desc_struct *)desc;
935 	}
936 
937 	}
938 }
939 
xen_load_sp0(unsigned long sp0)940 static void xen_load_sp0(unsigned long sp0)
941 {
942 	struct multicall_space mcs;
943 
944 	mcs = xen_mc_entry(0);
945 	MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
946 	xen_mc_issue(XEN_LAZY_CPU);
947 	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
948 }
949 
950 #ifdef CONFIG_X86_IOPL_IOPERM
xen_invalidate_io_bitmap(void)951 static void xen_invalidate_io_bitmap(void)
952 {
953 	struct physdev_set_iobitmap iobitmap = {
954 		.bitmap = NULL,
955 		.nr_ports = 0,
956 	};
957 
958 	native_tss_invalidate_io_bitmap();
959 	HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
960 }
961 
xen_update_io_bitmap(void)962 static void xen_update_io_bitmap(void)
963 {
964 	struct physdev_set_iobitmap iobitmap;
965 	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
966 
967 	native_tss_update_io_bitmap();
968 
969 	iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
970 			  tss->x86_tss.io_bitmap_base;
971 	if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
972 		iobitmap.nr_ports = 0;
973 	else
974 		iobitmap.nr_ports = IO_BITMAP_BITS;
975 
976 	HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
977 }
978 #endif
979 
xen_io_delay(void)980 static void xen_io_delay(void)
981 {
982 }
983 
984 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
985 
xen_read_cr0(void)986 static unsigned long xen_read_cr0(void)
987 {
988 	unsigned long cr0 = this_cpu_read(xen_cr0_value);
989 
990 	if (unlikely(cr0 == 0)) {
991 		cr0 = native_read_cr0();
992 		this_cpu_write(xen_cr0_value, cr0);
993 	}
994 
995 	return cr0;
996 }
997 
xen_write_cr0(unsigned long cr0)998 static void xen_write_cr0(unsigned long cr0)
999 {
1000 	struct multicall_space mcs;
1001 
1002 	this_cpu_write(xen_cr0_value, cr0);
1003 
1004 	/* Only pay attention to cr0.TS; everything else is
1005 	   ignored. */
1006 	mcs = xen_mc_entry(0);
1007 
1008 	MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1009 
1010 	xen_mc_issue(XEN_LAZY_CPU);
1011 }
1012 
xen_write_cr4(unsigned long cr4)1013 static void xen_write_cr4(unsigned long cr4)
1014 {
1015 	cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
1016 
1017 	native_write_cr4(cr4);
1018 }
1019 
xen_do_read_msr(unsigned int msr,int * err)1020 static u64 xen_do_read_msr(unsigned int msr, int *err)
1021 {
1022 	u64 val = 0;	/* Avoid uninitialized value for safe variant. */
1023 
1024 	if (pmu_msr_read(msr, &val, err))
1025 		return val;
1026 
1027 	if (err)
1028 		val = native_read_msr_safe(msr, err);
1029 	else
1030 		val = native_read_msr(msr);
1031 
1032 	switch (msr) {
1033 	case MSR_IA32_APICBASE:
1034 		val &= ~X2APIC_ENABLE;
1035 		if (smp_processor_id() == 0)
1036 			val |= MSR_IA32_APICBASE_BSP;
1037 		else
1038 			val &= ~MSR_IA32_APICBASE_BSP;
1039 		break;
1040 	}
1041 	return val;
1042 }
1043 
set_seg(unsigned int which,unsigned int low,unsigned int high,int * err)1044 static void set_seg(unsigned int which, unsigned int low, unsigned int high,
1045 		    int *err)
1046 {
1047 	u64 base = ((u64)high << 32) | low;
1048 
1049 	if (HYPERVISOR_set_segment_base(which, base) == 0)
1050 		return;
1051 
1052 	if (err)
1053 		*err = -EIO;
1054 	else
1055 		WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base);
1056 }
1057 
1058 /*
1059  * Support write_msr_safe() and write_msr() semantics.
1060  * With err == NULL write_msr() semantics are selected.
1061  * Supplying an err pointer requires err to be pre-initialized with 0.
1062  */
xen_do_write_msr(unsigned int msr,unsigned int low,unsigned int high,int * err)1063 static void xen_do_write_msr(unsigned int msr, unsigned int low,
1064 			     unsigned int high, int *err)
1065 {
1066 	switch (msr) {
1067 	case MSR_FS_BASE:
1068 		set_seg(SEGBASE_FS, low, high, err);
1069 		break;
1070 
1071 	case MSR_KERNEL_GS_BASE:
1072 		set_seg(SEGBASE_GS_USER, low, high, err);
1073 		break;
1074 
1075 	case MSR_GS_BASE:
1076 		set_seg(SEGBASE_GS_KERNEL, low, high, err);
1077 		break;
1078 
1079 	case MSR_STAR:
1080 	case MSR_CSTAR:
1081 	case MSR_LSTAR:
1082 	case MSR_SYSCALL_MASK:
1083 	case MSR_IA32_SYSENTER_CS:
1084 	case MSR_IA32_SYSENTER_ESP:
1085 	case MSR_IA32_SYSENTER_EIP:
1086 		/* Fast syscall setup is all done in hypercalls, so
1087 		   these are all ignored.  Stub them out here to stop
1088 		   Xen console noise. */
1089 		break;
1090 
1091 	default:
1092 		if (!pmu_msr_write(msr, low, high, err)) {
1093 			if (err)
1094 				*err = native_write_msr_safe(msr, low, high);
1095 			else
1096 				native_write_msr(msr, low, high);
1097 		}
1098 	}
1099 }
1100 
xen_read_msr_safe(unsigned int msr,int * err)1101 static u64 xen_read_msr_safe(unsigned int msr, int *err)
1102 {
1103 	return xen_do_read_msr(msr, err);
1104 }
1105 
xen_write_msr_safe(unsigned int msr,unsigned int low,unsigned int high)1106 static int xen_write_msr_safe(unsigned int msr, unsigned int low,
1107 			      unsigned int high)
1108 {
1109 	int err = 0;
1110 
1111 	xen_do_write_msr(msr, low, high, &err);
1112 
1113 	return err;
1114 }
1115 
xen_read_msr(unsigned int msr)1116 static u64 xen_read_msr(unsigned int msr)
1117 {
1118 	int err;
1119 
1120 	return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL);
1121 }
1122 
xen_write_msr(unsigned int msr,unsigned low,unsigned high)1123 static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
1124 {
1125 	int err;
1126 
1127 	xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL);
1128 }
1129 
1130 /* This is called once we have the cpu_possible_mask */
xen_setup_vcpu_info_placement(void)1131 void __init xen_setup_vcpu_info_placement(void)
1132 {
1133 	int cpu;
1134 
1135 	for_each_possible_cpu(cpu) {
1136 		/* Set up direct vCPU id mapping for PV guests. */
1137 		per_cpu(xen_vcpu_id, cpu) = cpu;
1138 		xen_vcpu_setup(cpu);
1139 	}
1140 
1141 	pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1142 	pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1143 	pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1144 	pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
1145 }
1146 
1147 static const struct pv_info xen_info __initconst = {
1148 	.extra_user_64bit_cs = FLAT_USER_CS64,
1149 	.name = "Xen",
1150 };
1151 
1152 static const typeof(pv_ops) xen_cpu_ops __initconst = {
1153 	.cpu = {
1154 		.cpuid = xen_cpuid,
1155 
1156 		.set_debugreg = xen_set_debugreg,
1157 		.get_debugreg = xen_get_debugreg,
1158 
1159 		.read_cr0 = xen_read_cr0,
1160 		.write_cr0 = xen_write_cr0,
1161 
1162 		.write_cr4 = xen_write_cr4,
1163 
1164 		.wbinvd = pv_native_wbinvd,
1165 
1166 		.read_msr = xen_read_msr,
1167 		.write_msr = xen_write_msr,
1168 
1169 		.read_msr_safe = xen_read_msr_safe,
1170 		.write_msr_safe = xen_write_msr_safe,
1171 
1172 		.read_pmc = xen_read_pmc,
1173 
1174 		.load_tr_desc = paravirt_nop,
1175 		.set_ldt = xen_set_ldt,
1176 		.load_gdt = xen_load_gdt,
1177 		.load_idt = xen_load_idt,
1178 		.load_tls = xen_load_tls,
1179 		.load_gs_index = xen_load_gs_index,
1180 
1181 		.alloc_ldt = xen_alloc_ldt,
1182 		.free_ldt = xen_free_ldt,
1183 
1184 		.store_tr = xen_store_tr,
1185 
1186 		.write_ldt_entry = xen_write_ldt_entry,
1187 		.write_gdt_entry = xen_write_gdt_entry,
1188 		.write_idt_entry = xen_write_idt_entry,
1189 		.load_sp0 = xen_load_sp0,
1190 
1191 #ifdef CONFIG_X86_IOPL_IOPERM
1192 		.invalidate_io_bitmap = xen_invalidate_io_bitmap,
1193 		.update_io_bitmap = xen_update_io_bitmap,
1194 #endif
1195 		.io_delay = xen_io_delay,
1196 
1197 		.start_context_switch = xen_start_context_switch,
1198 		.end_context_switch = xen_end_context_switch,
1199 	},
1200 };
1201 
xen_restart(char * msg)1202 static void xen_restart(char *msg)
1203 {
1204 	xen_reboot(SHUTDOWN_reboot);
1205 }
1206 
xen_machine_halt(void)1207 static void xen_machine_halt(void)
1208 {
1209 	xen_reboot(SHUTDOWN_poweroff);
1210 }
1211 
xen_machine_power_off(void)1212 static void xen_machine_power_off(void)
1213 {
1214 	do_kernel_power_off();
1215 	xen_reboot(SHUTDOWN_poweroff);
1216 }
1217 
xen_crash_shutdown(struct pt_regs * regs)1218 static void xen_crash_shutdown(struct pt_regs *regs)
1219 {
1220 	xen_reboot(SHUTDOWN_crash);
1221 }
1222 
1223 static const struct machine_ops xen_machine_ops __initconst = {
1224 	.restart = xen_restart,
1225 	.halt = xen_machine_halt,
1226 	.power_off = xen_machine_power_off,
1227 	.shutdown = xen_machine_halt,
1228 	.crash_shutdown = xen_crash_shutdown,
1229 	.emergency_restart = xen_emergency_restart,
1230 };
1231 
xen_get_nmi_reason(void)1232 static unsigned char xen_get_nmi_reason(void)
1233 {
1234 	unsigned char reason = 0;
1235 
1236 	/* Construct a value which looks like it came from port 0x61. */
1237 	if (test_bit(_XEN_NMIREASON_io_error,
1238 		     &HYPERVISOR_shared_info->arch.nmi_reason))
1239 		reason |= NMI_REASON_IOCHK;
1240 	if (test_bit(_XEN_NMIREASON_pci_serr,
1241 		     &HYPERVISOR_shared_info->arch.nmi_reason))
1242 		reason |= NMI_REASON_SERR;
1243 
1244 	return reason;
1245 }
1246 
xen_boot_params_init_edd(void)1247 static void __init xen_boot_params_init_edd(void)
1248 {
1249 #if IS_ENABLED(CONFIG_EDD)
1250 	struct xen_platform_op op;
1251 	struct edd_info *edd_info;
1252 	u32 *mbr_signature;
1253 	unsigned nr;
1254 	int ret;
1255 
1256 	edd_info = boot_params.eddbuf;
1257 	mbr_signature = boot_params.edd_mbr_sig_buffer;
1258 
1259 	op.cmd = XENPF_firmware_info;
1260 
1261 	op.u.firmware_info.type = XEN_FW_DISK_INFO;
1262 	for (nr = 0; nr < EDDMAXNR; nr++) {
1263 		struct edd_info *info = edd_info + nr;
1264 
1265 		op.u.firmware_info.index = nr;
1266 		info->params.length = sizeof(info->params);
1267 		set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1268 				     &info->params);
1269 		ret = HYPERVISOR_platform_op(&op);
1270 		if (ret)
1271 			break;
1272 
1273 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1274 		C(device);
1275 		C(version);
1276 		C(interface_support);
1277 		C(legacy_max_cylinder);
1278 		C(legacy_max_head);
1279 		C(legacy_sectors_per_track);
1280 #undef C
1281 	}
1282 	boot_params.eddbuf_entries = nr;
1283 
1284 	op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1285 	for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1286 		op.u.firmware_info.index = nr;
1287 		ret = HYPERVISOR_platform_op(&op);
1288 		if (ret)
1289 			break;
1290 		mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1291 	}
1292 	boot_params.edd_mbr_sig_buf_entries = nr;
1293 #endif
1294 }
1295 
1296 /*
1297  * Set up the GDT and segment registers for -fstack-protector.  Until
1298  * we do this, we have to be careful not to call any stack-protected
1299  * function, which is most of the kernel.
1300  */
xen_setup_gdt(int cpu)1301 static void __init xen_setup_gdt(int cpu)
1302 {
1303 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
1304 	pv_ops.cpu.load_gdt = xen_load_gdt_boot;
1305 
1306 	switch_gdt_and_percpu_base(cpu);
1307 
1308 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
1309 	pv_ops.cpu.load_gdt = xen_load_gdt;
1310 }
1311 
xen_dom0_set_legacy_features(void)1312 static void __init xen_dom0_set_legacy_features(void)
1313 {
1314 	x86_platform.legacy.rtc = 1;
1315 }
1316 
xen_domu_set_legacy_features(void)1317 static void __init xen_domu_set_legacy_features(void)
1318 {
1319 	x86_platform.legacy.rtc = 0;
1320 }
1321 
1322 extern void early_xen_iret_patch(void);
1323 
1324 /* First C function to be called on Xen boot */
xen_start_kernel(struct start_info * si)1325 asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
1326 {
1327 	struct physdev_set_iopl set_iopl;
1328 	unsigned long initrd_start = 0;
1329 	int rc;
1330 
1331 	if (!si)
1332 		return;
1333 
1334 	clear_bss();
1335 
1336 	xen_start_info = si;
1337 
1338 	__text_gen_insn(&early_xen_iret_patch,
1339 			JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret,
1340 			JMP32_INSN_SIZE);
1341 
1342 	xen_domain_type = XEN_PV_DOMAIN;
1343 	xen_start_flags = xen_start_info->flags;
1344 
1345 	xen_setup_features();
1346 
1347 	/* Install Xen paravirt ops */
1348 	pv_info = xen_info;
1349 	pv_ops.cpu = xen_cpu_ops.cpu;
1350 	xen_init_irq_ops();
1351 
1352 	/*
1353 	 * Setup xen_vcpu early because it is needed for
1354 	 * local_irq_disable(), irqs_disabled(), e.g. in printk().
1355 	 *
1356 	 * Don't do the full vcpu_info placement stuff until we have
1357 	 * the cpu_possible_mask and a non-dummy shared_info.
1358 	 */
1359 	xen_vcpu_info_reset(0);
1360 
1361 	x86_platform.get_nmi_reason = xen_get_nmi_reason;
1362 	x86_platform.realmode_reserve = x86_init_noop;
1363 	x86_platform.realmode_init = x86_init_noop;
1364 
1365 	x86_init.resources.memory_setup = xen_memory_setup;
1366 	x86_init.irqs.intr_mode_select	= x86_init_noop;
1367 	x86_init.irqs.intr_mode_init	= x86_64_probe_apic;
1368 	x86_init.oem.arch_setup = xen_arch_setup;
1369 	x86_init.oem.banner = xen_banner;
1370 	x86_init.hyper.init_platform = xen_pv_init_platform;
1371 	x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
1372 
1373 	/*
1374 	 * Set up some pagetable state before starting to set any ptes.
1375 	 */
1376 
1377 	xen_setup_machphys_mapping();
1378 	xen_init_mmu_ops();
1379 
1380 	/* Prevent unwanted bits from being set in PTEs. */
1381 	__supported_pte_mask &= ~_PAGE_GLOBAL;
1382 	__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
1383 
1384 	/* Get mfn list */
1385 	xen_build_dynamic_phys_to_machine();
1386 
1387 	/* Work out if we support NX */
1388 	get_cpu_cap(&boot_cpu_data);
1389 	x86_configure_nx();
1390 
1391 	/*
1392 	 * Set up kernel GDT and segment registers, mainly so that
1393 	 * -fstack-protector code can be executed.
1394 	 */
1395 	xen_setup_gdt(0);
1396 
1397 	/* Determine virtual and physical address sizes */
1398 	get_cpu_address_sizes(&boot_cpu_data);
1399 
1400 	/* Let's presume PV guests always boot on vCPU with id 0. */
1401 	per_cpu(xen_vcpu_id, 0) = 0;
1402 
1403 	idt_setup_early_handler();
1404 
1405 	xen_init_capabilities();
1406 
1407 	/*
1408 	 * set up the basic apic ops.
1409 	 */
1410 	xen_init_apic();
1411 
1412 	machine_ops = xen_machine_ops;
1413 
1414 	/*
1415 	 * The only reliable way to retain the initial address of the
1416 	 * percpu gdt_page is to remember it here, so we can go and
1417 	 * mark it RW later, when the initial percpu area is freed.
1418 	 */
1419 	xen_initial_gdt = &per_cpu(gdt_page, 0);
1420 
1421 	xen_smp_init();
1422 
1423 #ifdef CONFIG_ACPI_NUMA
1424 	/*
1425 	 * The pages we from Xen are not related to machine pages, so
1426 	 * any NUMA information the kernel tries to get from ACPI will
1427 	 * be meaningless.  Prevent it from trying.
1428 	 */
1429 	disable_srat();
1430 #endif
1431 	WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
1432 
1433 	local_irq_disable();
1434 	early_boot_irqs_disabled = true;
1435 
1436 	xen_raw_console_write("mapping kernel into physical memory\n");
1437 	xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
1438 				   xen_start_info->nr_pages);
1439 	xen_reserve_special_pages();
1440 
1441 	/*
1442 	 * We used to do this in xen_arch_setup, but that is too late
1443 	 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1444 	 * early_amd_init which pokes 0xcf8 port.
1445 	 */
1446 	set_iopl.iopl = 1;
1447 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1448 	if (rc != 0)
1449 		xen_raw_printk("physdev_op failed %d\n", rc);
1450 
1451 
1452 	if (xen_start_info->mod_start) {
1453 	    if (xen_start_info->flags & SIF_MOD_START_PFN)
1454 		initrd_start = PFN_PHYS(xen_start_info->mod_start);
1455 	    else
1456 		initrd_start = __pa(xen_start_info->mod_start);
1457 	}
1458 
1459 	/* Poke various useful things into boot_params */
1460 	boot_params.hdr.type_of_loader = (9 << 4) | 0;
1461 	boot_params.hdr.ramdisk_image = initrd_start;
1462 	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1463 	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1464 	boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
1465 
1466 	if (!xen_initial_domain()) {
1467 		if (pci_xen)
1468 			x86_init.pci.arch_init = pci_xen_init;
1469 		x86_platform.set_legacy_features =
1470 				xen_domu_set_legacy_features;
1471 	} else {
1472 		const struct dom0_vga_console_info *info =
1473 			(void *)((char *)xen_start_info +
1474 				 xen_start_info->console.dom0.info_off);
1475 		struct xen_platform_op op = {
1476 			.cmd = XENPF_firmware_info,
1477 			.interface_version = XENPF_INTERFACE_VERSION,
1478 			.u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1479 		};
1480 
1481 		x86_platform.set_legacy_features =
1482 				xen_dom0_set_legacy_features;
1483 		xen_init_vga(info, xen_start_info->console.dom0.info_size,
1484 			     &boot_params.screen_info);
1485 		xen_start_info->console.domU.mfn = 0;
1486 		xen_start_info->console.domU.evtchn = 0;
1487 
1488 		if (HYPERVISOR_platform_op(&op) == 0)
1489 			boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1490 
1491 		/* Make sure ACS will be enabled */
1492 		pci_request_acs();
1493 
1494 		xen_acpi_sleep_register();
1495 
1496 		xen_boot_params_init_edd();
1497 
1498 #ifdef CONFIG_ACPI
1499 		/*
1500 		 * Disable selecting "Firmware First mode" for correctable
1501 		 * memory errors, as this is the duty of the hypervisor to
1502 		 * decide.
1503 		 */
1504 		acpi_disable_cmcff = 1;
1505 #endif
1506 	}
1507 
1508 	xen_add_preferred_consoles();
1509 
1510 #ifdef CONFIG_PCI
1511 	/* PCI BIOS service won't work from a PV guest. */
1512 	pci_probe &= ~PCI_PROBE_BIOS;
1513 #endif
1514 	xen_raw_console_write("about to get started...\n");
1515 
1516 	/* We need this for printk timestamps */
1517 	xen_setup_runstate_info(0);
1518 
1519 	xen_efi_init(&boot_params);
1520 
1521 	/* Start the world */
1522 	cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1523 	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1524 }
1525 
xen_cpu_up_prepare_pv(unsigned int cpu)1526 static int xen_cpu_up_prepare_pv(unsigned int cpu)
1527 {
1528 	int rc;
1529 
1530 	if (per_cpu(xen_vcpu, cpu) == NULL)
1531 		return -ENODEV;
1532 
1533 	xen_setup_timer(cpu);
1534 
1535 	rc = xen_smp_intr_init(cpu);
1536 	if (rc) {
1537 		WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
1538 		     cpu, rc);
1539 		return rc;
1540 	}
1541 
1542 	rc = xen_smp_intr_init_pv(cpu);
1543 	if (rc) {
1544 		WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n",
1545 		     cpu, rc);
1546 		return rc;
1547 	}
1548 
1549 	return 0;
1550 }
1551 
xen_cpu_dead_pv(unsigned int cpu)1552 static int xen_cpu_dead_pv(unsigned int cpu)
1553 {
1554 	xen_smp_intr_free(cpu);
1555 	xen_smp_intr_free_pv(cpu);
1556 
1557 	xen_teardown_timer(cpu);
1558 
1559 	return 0;
1560 }
1561 
xen_platform_pv(void)1562 static uint32_t __init xen_platform_pv(void)
1563 {
1564 	if (xen_pv_domain())
1565 		return xen_cpuid_base();
1566 
1567 	return 0;
1568 }
1569 
1570 const __initconst struct hypervisor_x86 x86_hyper_xen_pv = {
1571 	.name                   = "Xen PV",
1572 	.detect                 = xen_platform_pv,
1573 	.type			= X86_HYPER_XEN_PV,
1574 	.runtime.pin_vcpu       = xen_pin_vcpu,
1575 	.ignore_nopv		= true,
1576 };
1577