xref: /linux/arch/x86/xen/enlighten.c (revision b3e5ced63e051e8f911b795ac5b06229a5328f7b)
1 /*
2  * Core of Xen paravirt_ops implementation.
3  *
4  * This file contains the xen_paravirt_ops structure itself, and the
5  * implementations for:
6  * - privileged instructions
7  * - interrupt flags
8  * - segment operations
9  * - booting and setup
10  *
11  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
12  */
13 
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <linux/mm.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35 
36 #include <xen/xen.h>
37 #include <xen/events.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/version.h>
40 #include <xen/interface/physdev.h>
41 #include <xen/interface/vcpu.h>
42 #include <xen/interface/memory.h>
43 #include <xen/interface/nmi.h>
44 #include <xen/interface/xen-mca.h>
45 #include <xen/features.h>
46 #include <xen/page.h>
47 #include <xen/hvm.h>
48 #include <xen/hvc-console.h>
49 #include <xen/acpi.h>
50 
51 #include <asm/paravirt.h>
52 #include <asm/apic.h>
53 #include <asm/page.h>
54 #include <asm/xen/pci.h>
55 #include <asm/xen/hypercall.h>
56 #include <asm/xen/hypervisor.h>
57 #include <asm/fixmap.h>
58 #include <asm/processor.h>
59 #include <asm/proto.h>
60 #include <asm/msr-index.h>
61 #include <asm/traps.h>
62 #include <asm/setup.h>
63 #include <asm/desc.h>
64 #include <asm/pgalloc.h>
65 #include <asm/pgtable.h>
66 #include <asm/tlbflush.h>
67 #include <asm/reboot.h>
68 #include <asm/stackprotector.h>
69 #include <asm/hypervisor.h>
70 #include <asm/mach_traps.h>
71 #include <asm/mwait.h>
72 #include <asm/pci_x86.h>
73 #include <asm/pat.h>
74 
75 #ifdef CONFIG_ACPI
76 #include <linux/acpi.h>
77 #include <asm/acpi.h>
78 #include <acpi/pdc_intel.h>
79 #include <acpi/processor.h>
80 #include <xen/interface/platform.h>
81 #endif
82 
83 #include "xen-ops.h"
84 #include "mmu.h"
85 #include "smp.h"
86 #include "multicalls.h"
87 
88 EXPORT_SYMBOL_GPL(hypercall_page);
89 
90 /*
91  * Pointer to the xen_vcpu_info structure or
92  * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
93  * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
94  * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
95  * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
96  * acknowledge pending events.
97  * Also more subtly it is used by the patched version of irq enable/disable
98  * e.g. xen_irq_enable_direct and xen_iret in PV mode.
99  *
100  * The desire to be able to do those mask/unmask operations as a single
101  * instruction by using the per-cpu offset held in %gs is the real reason
102  * vcpu info is in a per-cpu pointer and the original reason for this
103  * hypercall.
104  *
105  */
106 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
107 
108 /*
109  * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
110  * hypercall. This can be used both in PV and PVHVM mode. The structure
111  * overrides the default per_cpu(xen_vcpu, cpu) value.
112  */
113 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
114 
115 enum xen_domain_type xen_domain_type = XEN_NATIVE;
116 EXPORT_SYMBOL_GPL(xen_domain_type);
117 
118 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
119 EXPORT_SYMBOL(machine_to_phys_mapping);
120 unsigned long  machine_to_phys_nr;
121 EXPORT_SYMBOL(machine_to_phys_nr);
122 
123 struct start_info *xen_start_info;
124 EXPORT_SYMBOL_GPL(xen_start_info);
125 
126 struct shared_info xen_dummy_shared_info;
127 
128 void *xen_initial_gdt;
129 
130 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
131 __read_mostly int xen_have_vector_callback;
132 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
133 
134 /*
135  * Point at some empty memory to start with. We map the real shared_info
136  * page as soon as fixmap is up and running.
137  */
138 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
139 
140 /*
141  * Flag to determine whether vcpu info placement is available on all
142  * VCPUs.  We assume it is to start with, and then set it to zero on
143  * the first failure.  This is because it can succeed on some VCPUs
144  * and not others, since it can involve hypervisor memory allocation,
145  * or because the guest failed to guarantee all the appropriate
146  * constraints on all VCPUs (ie buffer can't cross a page boundary).
147  *
148  * Note that any particular CPU may be using a placed vcpu structure,
149  * but we can only optimise if the all are.
150  *
151  * 0: not available, 1: available
152  */
153 static int have_vcpu_info_placement = 1;
154 
155 struct tls_descs {
156 	struct desc_struct desc[3];
157 };
158 
159 /*
160  * Updating the 3 TLS descriptors in the GDT on every task switch is
161  * surprisingly expensive so we avoid updating them if they haven't
162  * changed.  Since Xen writes different descriptors than the one
163  * passed in the update_descriptor hypercall we keep shadow copies to
164  * compare against.
165  */
166 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
167 
168 static void clamp_max_cpus(void)
169 {
170 #ifdef CONFIG_SMP
171 	if (setup_max_cpus > MAX_VIRT_CPUS)
172 		setup_max_cpus = MAX_VIRT_CPUS;
173 #endif
174 }
175 
176 static void xen_vcpu_setup(int cpu)
177 {
178 	struct vcpu_register_vcpu_info info;
179 	int err;
180 	struct vcpu_info *vcpup;
181 
182 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
183 
184 	/*
185 	 * This path is called twice on PVHVM - first during bootup via
186 	 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
187 	 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
188 	 * As we can only do the VCPUOP_register_vcpu_info once lets
189 	 * not over-write its result.
190 	 *
191 	 * For PV it is called during restore (xen_vcpu_restore) and bootup
192 	 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
193 	 * use this function.
194 	 */
195 	if (xen_hvm_domain()) {
196 		if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
197 			return;
198 	}
199 	if (cpu < MAX_VIRT_CPUS)
200 		per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
201 
202 	if (!have_vcpu_info_placement) {
203 		if (cpu >= MAX_VIRT_CPUS)
204 			clamp_max_cpus();
205 		return;
206 	}
207 
208 	vcpup = &per_cpu(xen_vcpu_info, cpu);
209 	info.mfn = arbitrary_virt_to_mfn(vcpup);
210 	info.offset = offset_in_page(vcpup);
211 
212 	/* Check to see if the hypervisor will put the vcpu_info
213 	   structure where we want it, which allows direct access via
214 	   a percpu-variable.
215 	   N.B. This hypercall can _only_ be called once per CPU. Subsequent
216 	   calls will error out with -EINVAL. This is due to the fact that
217 	   hypervisor has no unregister variant and this hypercall does not
218 	   allow to over-write info.mfn and info.offset.
219 	 */
220 	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
221 
222 	if (err) {
223 		printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
224 		have_vcpu_info_placement = 0;
225 		clamp_max_cpus();
226 	} else {
227 		/* This cpu is using the registered vcpu info, even if
228 		   later ones fail to. */
229 		per_cpu(xen_vcpu, cpu) = vcpup;
230 	}
231 }
232 
233 /*
234  * On restore, set the vcpu placement up again.
235  * If it fails, then we're in a bad state, since
236  * we can't back out from using it...
237  */
238 void xen_vcpu_restore(void)
239 {
240 	int cpu;
241 
242 	for_each_possible_cpu(cpu) {
243 		bool other_cpu = (cpu != smp_processor_id());
244 		bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
245 
246 		if (other_cpu && is_up &&
247 		    HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
248 			BUG();
249 
250 		xen_setup_runstate_info(cpu);
251 
252 		if (have_vcpu_info_placement)
253 			xen_vcpu_setup(cpu);
254 
255 		if (other_cpu && is_up &&
256 		    HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
257 			BUG();
258 	}
259 }
260 
261 static void __init xen_banner(void)
262 {
263 	unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
264 	struct xen_extraversion extra;
265 	HYPERVISOR_xen_version(XENVER_extraversion, &extra);
266 
267 	pr_info("Booting paravirtualized kernel %son %s\n",
268 		xen_feature(XENFEAT_auto_translated_physmap) ?
269 			"with PVH extensions " : "", pv_info.name);
270 	printk(KERN_INFO "Xen version: %d.%d%s%s\n",
271 	       version >> 16, version & 0xffff, extra.extraversion,
272 	       xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
273 }
274 /* Check if running on Xen version (major, minor) or later */
275 bool
276 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
277 {
278 	unsigned int version;
279 
280 	if (!xen_domain())
281 		return false;
282 
283 	version = HYPERVISOR_xen_version(XENVER_version, NULL);
284 	if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
285 		((version >> 16) > major))
286 		return true;
287 	return false;
288 }
289 
290 #define CPUID_THERM_POWER_LEAF 6
291 #define APERFMPERF_PRESENT 0
292 
293 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
294 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
295 
296 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
297 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
298 static __read_mostly unsigned int cpuid_leaf5_edx_val;
299 
300 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
301 		      unsigned int *cx, unsigned int *dx)
302 {
303 	unsigned maskebx = ~0;
304 	unsigned maskecx = ~0;
305 	unsigned maskedx = ~0;
306 	unsigned setecx = 0;
307 	/*
308 	 * Mask out inconvenient features, to try and disable as many
309 	 * unsupported kernel subsystems as possible.
310 	 */
311 	switch (*ax) {
312 	case 1:
313 		maskecx = cpuid_leaf1_ecx_mask;
314 		setecx = cpuid_leaf1_ecx_set_mask;
315 		maskedx = cpuid_leaf1_edx_mask;
316 		break;
317 
318 	case CPUID_MWAIT_LEAF:
319 		/* Synthesize the values.. */
320 		*ax = 0;
321 		*bx = 0;
322 		*cx = cpuid_leaf5_ecx_val;
323 		*dx = cpuid_leaf5_edx_val;
324 		return;
325 
326 	case CPUID_THERM_POWER_LEAF:
327 		/* Disabling APERFMPERF for kernel usage */
328 		maskecx = ~(1 << APERFMPERF_PRESENT);
329 		break;
330 
331 	case 0xb:
332 		/* Suppress extended topology stuff */
333 		maskebx = 0;
334 		break;
335 	}
336 
337 	asm(XEN_EMULATE_PREFIX "cpuid"
338 		: "=a" (*ax),
339 		  "=b" (*bx),
340 		  "=c" (*cx),
341 		  "=d" (*dx)
342 		: "0" (*ax), "2" (*cx));
343 
344 	*bx &= maskebx;
345 	*cx &= maskecx;
346 	*cx |= setecx;
347 	*dx &= maskedx;
348 
349 }
350 
351 static bool __init xen_check_mwait(void)
352 {
353 #ifdef CONFIG_ACPI
354 	struct xen_platform_op op = {
355 		.cmd			= XENPF_set_processor_pminfo,
356 		.u.set_pminfo.id	= -1,
357 		.u.set_pminfo.type	= XEN_PM_PDC,
358 	};
359 	uint32_t buf[3];
360 	unsigned int ax, bx, cx, dx;
361 	unsigned int mwait_mask;
362 
363 	/* We need to determine whether it is OK to expose the MWAIT
364 	 * capability to the kernel to harvest deeper than C3 states from ACPI
365 	 * _CST using the processor_harvest_xen.c module. For this to work, we
366 	 * need to gather the MWAIT_LEAF values (which the cstate.c code
367 	 * checks against). The hypervisor won't expose the MWAIT flag because
368 	 * it would break backwards compatibility; so we will find out directly
369 	 * from the hardware and hypercall.
370 	 */
371 	if (!xen_initial_domain())
372 		return false;
373 
374 	/*
375 	 * When running under platform earlier than Xen4.2, do not expose
376 	 * mwait, to avoid the risk of loading native acpi pad driver
377 	 */
378 	if (!xen_running_on_version_or_later(4, 2))
379 		return false;
380 
381 	ax = 1;
382 	cx = 0;
383 
384 	native_cpuid(&ax, &bx, &cx, &dx);
385 
386 	mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
387 		     (1 << (X86_FEATURE_MWAIT % 32));
388 
389 	if ((cx & mwait_mask) != mwait_mask)
390 		return false;
391 
392 	/* We need to emulate the MWAIT_LEAF and for that we need both
393 	 * ecx and edx. The hypercall provides only partial information.
394 	 */
395 
396 	ax = CPUID_MWAIT_LEAF;
397 	bx = 0;
398 	cx = 0;
399 	dx = 0;
400 
401 	native_cpuid(&ax, &bx, &cx, &dx);
402 
403 	/* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
404 	 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
405 	 */
406 	buf[0] = ACPI_PDC_REVISION_ID;
407 	buf[1] = 1;
408 	buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
409 
410 	set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
411 
412 	if ((HYPERVISOR_dom0_op(&op) == 0) &&
413 	    (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
414 		cpuid_leaf5_ecx_val = cx;
415 		cpuid_leaf5_edx_val = dx;
416 	}
417 	return true;
418 #else
419 	return false;
420 #endif
421 }
422 static void __init xen_init_cpuid_mask(void)
423 {
424 	unsigned int ax, bx, cx, dx;
425 	unsigned int xsave_mask;
426 
427 	cpuid_leaf1_edx_mask =
428 		~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
429 		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */
430 
431 	if (!xen_initial_domain())
432 		cpuid_leaf1_edx_mask &=
433 			~((1 << X86_FEATURE_ACPI));  /* disable ACPI */
434 
435 	cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
436 
437 	ax = 1;
438 	cx = 0;
439 	cpuid(1, &ax, &bx, &cx, &dx);
440 
441 	xsave_mask =
442 		(1 << (X86_FEATURE_XSAVE % 32)) |
443 		(1 << (X86_FEATURE_OSXSAVE % 32));
444 
445 	/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
446 	if ((cx & xsave_mask) != xsave_mask)
447 		cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
448 	if (xen_check_mwait())
449 		cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
450 }
451 
452 static void xen_set_debugreg(int reg, unsigned long val)
453 {
454 	HYPERVISOR_set_debugreg(reg, val);
455 }
456 
457 static unsigned long xen_get_debugreg(int reg)
458 {
459 	return HYPERVISOR_get_debugreg(reg);
460 }
461 
462 static void xen_end_context_switch(struct task_struct *next)
463 {
464 	xen_mc_flush();
465 	paravirt_end_context_switch(next);
466 }
467 
468 static unsigned long xen_store_tr(void)
469 {
470 	return 0;
471 }
472 
473 /*
474  * Set the page permissions for a particular virtual address.  If the
475  * address is a vmalloc mapping (or other non-linear mapping), then
476  * find the linear mapping of the page and also set its protections to
477  * match.
478  */
479 static void set_aliased_prot(void *v, pgprot_t prot)
480 {
481 	int level;
482 	pte_t *ptep;
483 	pte_t pte;
484 	unsigned long pfn;
485 	struct page *page;
486 
487 	ptep = lookup_address((unsigned long)v, &level);
488 	BUG_ON(ptep == NULL);
489 
490 	pfn = pte_pfn(*ptep);
491 	page = pfn_to_page(pfn);
492 
493 	pte = pfn_pte(pfn, prot);
494 
495 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 		BUG();
497 
498 	if (!PageHighMem(page)) {
499 		void *av = __va(PFN_PHYS(pfn));
500 
501 		if (av != v)
502 			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
503 				BUG();
504 	} else
505 		kmap_flush_unused();
506 }
507 
508 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
509 {
510 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 	int i;
512 
513 	for(i = 0; i < entries; i += entries_per_page)
514 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515 }
516 
517 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
518 {
519 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
520 	int i;
521 
522 	for(i = 0; i < entries; i += entries_per_page)
523 		set_aliased_prot(ldt + i, PAGE_KERNEL);
524 }
525 
526 static void xen_set_ldt(const void *addr, unsigned entries)
527 {
528 	struct mmuext_op *op;
529 	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
530 
531 	trace_xen_cpu_set_ldt(addr, entries);
532 
533 	op = mcs.args;
534 	op->cmd = MMUEXT_SET_LDT;
535 	op->arg1.linear_addr = (unsigned long)addr;
536 	op->arg2.nr_ents = entries;
537 
538 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
539 
540 	xen_mc_issue(PARAVIRT_LAZY_CPU);
541 }
542 
543 static void xen_load_gdt(const struct desc_ptr *dtr)
544 {
545 	unsigned long va = dtr->address;
546 	unsigned int size = dtr->size + 1;
547 	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
548 	unsigned long frames[pages];
549 	int f;
550 
551 	/*
552 	 * A GDT can be up to 64k in size, which corresponds to 8192
553 	 * 8-byte entries, or 16 4k pages..
554 	 */
555 
556 	BUG_ON(size > 65536);
557 	BUG_ON(va & ~PAGE_MASK);
558 
559 	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
560 		int level;
561 		pte_t *ptep;
562 		unsigned long pfn, mfn;
563 		void *virt;
564 
565 		/*
566 		 * The GDT is per-cpu and is in the percpu data area.
567 		 * That can be virtually mapped, so we need to do a
568 		 * page-walk to get the underlying MFN for the
569 		 * hypercall.  The page can also be in the kernel's
570 		 * linear range, so we need to RO that mapping too.
571 		 */
572 		ptep = lookup_address(va, &level);
573 		BUG_ON(ptep == NULL);
574 
575 		pfn = pte_pfn(*ptep);
576 		mfn = pfn_to_mfn(pfn);
577 		virt = __va(PFN_PHYS(pfn));
578 
579 		frames[f] = mfn;
580 
581 		make_lowmem_page_readonly((void *)va);
582 		make_lowmem_page_readonly(virt);
583 	}
584 
585 	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
586 		BUG();
587 }
588 
589 /*
590  * load_gdt for early boot, when the gdt is only mapped once
591  */
592 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
593 {
594 	unsigned long va = dtr->address;
595 	unsigned int size = dtr->size + 1;
596 	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
597 	unsigned long frames[pages];
598 	int f;
599 
600 	/*
601 	 * A GDT can be up to 64k in size, which corresponds to 8192
602 	 * 8-byte entries, or 16 4k pages..
603 	 */
604 
605 	BUG_ON(size > 65536);
606 	BUG_ON(va & ~PAGE_MASK);
607 
608 	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
609 		pte_t pte;
610 		unsigned long pfn, mfn;
611 
612 		pfn = virt_to_pfn(va);
613 		mfn = pfn_to_mfn(pfn);
614 
615 		pte = pfn_pte(pfn, PAGE_KERNEL_RO);
616 
617 		if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
618 			BUG();
619 
620 		frames[f] = mfn;
621 	}
622 
623 	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
624 		BUG();
625 }
626 
627 static inline bool desc_equal(const struct desc_struct *d1,
628 			      const struct desc_struct *d2)
629 {
630 	return d1->a == d2->a && d1->b == d2->b;
631 }
632 
633 static void load_TLS_descriptor(struct thread_struct *t,
634 				unsigned int cpu, unsigned int i)
635 {
636 	struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
637 	struct desc_struct *gdt;
638 	xmaddr_t maddr;
639 	struct multicall_space mc;
640 
641 	if (desc_equal(shadow, &t->tls_array[i]))
642 		return;
643 
644 	*shadow = t->tls_array[i];
645 
646 	gdt = get_cpu_gdt_table(cpu);
647 	maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
648 	mc = __xen_mc_entry(0);
649 
650 	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
651 }
652 
653 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
654 {
655 	/*
656 	 * XXX sleazy hack: If we're being called in a lazy-cpu zone
657 	 * and lazy gs handling is enabled, it means we're in a
658 	 * context switch, and %gs has just been saved.  This means we
659 	 * can zero it out to prevent faults on exit from the
660 	 * hypervisor if the next process has no %gs.  Either way, it
661 	 * has been saved, and the new value will get loaded properly.
662 	 * This will go away as soon as Xen has been modified to not
663 	 * save/restore %gs for normal hypercalls.
664 	 *
665 	 * On x86_64, this hack is not used for %gs, because gs points
666 	 * to KERNEL_GS_BASE (and uses it for PDA references), so we
667 	 * must not zero %gs on x86_64
668 	 *
669 	 * For x86_64, we need to zero %fs, otherwise we may get an
670 	 * exception between the new %fs descriptor being loaded and
671 	 * %fs being effectively cleared at __switch_to().
672 	 */
673 	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
674 #ifdef CONFIG_X86_32
675 		lazy_load_gs(0);
676 #else
677 		loadsegment(fs, 0);
678 #endif
679 	}
680 
681 	xen_mc_batch();
682 
683 	load_TLS_descriptor(t, cpu, 0);
684 	load_TLS_descriptor(t, cpu, 1);
685 	load_TLS_descriptor(t, cpu, 2);
686 
687 	xen_mc_issue(PARAVIRT_LAZY_CPU);
688 }
689 
690 #ifdef CONFIG_X86_64
691 static void xen_load_gs_index(unsigned int idx)
692 {
693 	if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
694 		BUG();
695 }
696 #endif
697 
698 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
699 				const void *ptr)
700 {
701 	xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
702 	u64 entry = *(u64 *)ptr;
703 
704 	trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
705 
706 	preempt_disable();
707 
708 	xen_mc_flush();
709 	if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
710 		BUG();
711 
712 	preempt_enable();
713 }
714 
715 static int cvt_gate_to_trap(int vector, const gate_desc *val,
716 			    struct trap_info *info)
717 {
718 	unsigned long addr;
719 
720 	if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
721 		return 0;
722 
723 	info->vector = vector;
724 
725 	addr = gate_offset(*val);
726 #ifdef CONFIG_X86_64
727 	/*
728 	 * Look for known traps using IST, and substitute them
729 	 * appropriately.  The debugger ones are the only ones we care
730 	 * about.  Xen will handle faults like double_fault,
731 	 * so we should never see them.  Warn if
732 	 * there's an unexpected IST-using fault handler.
733 	 */
734 	if (addr == (unsigned long)debug)
735 		addr = (unsigned long)xen_debug;
736 	else if (addr == (unsigned long)int3)
737 		addr = (unsigned long)xen_int3;
738 	else if (addr == (unsigned long)stack_segment)
739 		addr = (unsigned long)xen_stack_segment;
740 	else if (addr == (unsigned long)double_fault) {
741 		/* Don't need to handle these */
742 		return 0;
743 #ifdef CONFIG_X86_MCE
744 	} else if (addr == (unsigned long)machine_check) {
745 		/*
746 		 * when xen hypervisor inject vMCE to guest,
747 		 * use native mce handler to handle it
748 		 */
749 		;
750 #endif
751 	} else if (addr == (unsigned long)nmi)
752 		/*
753 		 * Use the native version as well.
754 		 */
755 		;
756 	else {
757 		/* Some other trap using IST? */
758 		if (WARN_ON(val->ist != 0))
759 			return 0;
760 	}
761 #endif	/* CONFIG_X86_64 */
762 	info->address = addr;
763 
764 	info->cs = gate_segment(*val);
765 	info->flags = val->dpl;
766 	/* interrupt gates clear IF */
767 	if (val->type == GATE_INTERRUPT)
768 		info->flags |= 1 << 2;
769 
770 	return 1;
771 }
772 
773 /* Locations of each CPU's IDT */
774 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
775 
776 /* Set an IDT entry.  If the entry is part of the current IDT, then
777    also update Xen. */
778 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
779 {
780 	unsigned long p = (unsigned long)&dt[entrynum];
781 	unsigned long start, end;
782 
783 	trace_xen_cpu_write_idt_entry(dt, entrynum, g);
784 
785 	preempt_disable();
786 
787 	start = __this_cpu_read(idt_desc.address);
788 	end = start + __this_cpu_read(idt_desc.size) + 1;
789 
790 	xen_mc_flush();
791 
792 	native_write_idt_entry(dt, entrynum, g);
793 
794 	if (p >= start && (p + 8) <= end) {
795 		struct trap_info info[2];
796 
797 		info[1].address = 0;
798 
799 		if (cvt_gate_to_trap(entrynum, g, &info[0]))
800 			if (HYPERVISOR_set_trap_table(info))
801 				BUG();
802 	}
803 
804 	preempt_enable();
805 }
806 
807 static void xen_convert_trap_info(const struct desc_ptr *desc,
808 				  struct trap_info *traps)
809 {
810 	unsigned in, out, count;
811 
812 	count = (desc->size+1) / sizeof(gate_desc);
813 	BUG_ON(count > 256);
814 
815 	for (in = out = 0; in < count; in++) {
816 		gate_desc *entry = (gate_desc*)(desc->address) + in;
817 
818 		if (cvt_gate_to_trap(in, entry, &traps[out]))
819 			out++;
820 	}
821 	traps[out].address = 0;
822 }
823 
824 void xen_copy_trap_info(struct trap_info *traps)
825 {
826 	const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
827 
828 	xen_convert_trap_info(desc, traps);
829 }
830 
831 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
832    hold a spinlock to protect the static traps[] array (static because
833    it avoids allocation, and saves stack space). */
834 static void xen_load_idt(const struct desc_ptr *desc)
835 {
836 	static DEFINE_SPINLOCK(lock);
837 	static struct trap_info traps[257];
838 
839 	trace_xen_cpu_load_idt(desc);
840 
841 	spin_lock(&lock);
842 
843 	memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
844 
845 	xen_convert_trap_info(desc, traps);
846 
847 	xen_mc_flush();
848 	if (HYPERVISOR_set_trap_table(traps))
849 		BUG();
850 
851 	spin_unlock(&lock);
852 }
853 
854 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
855    they're handled differently. */
856 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
857 				const void *desc, int type)
858 {
859 	trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
860 
861 	preempt_disable();
862 
863 	switch (type) {
864 	case DESC_LDT:
865 	case DESC_TSS:
866 		/* ignore */
867 		break;
868 
869 	default: {
870 		xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
871 
872 		xen_mc_flush();
873 		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
874 			BUG();
875 	}
876 
877 	}
878 
879 	preempt_enable();
880 }
881 
882 /*
883  * Version of write_gdt_entry for use at early boot-time needed to
884  * update an entry as simply as possible.
885  */
886 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
887 					    const void *desc, int type)
888 {
889 	trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
890 
891 	switch (type) {
892 	case DESC_LDT:
893 	case DESC_TSS:
894 		/* ignore */
895 		break;
896 
897 	default: {
898 		xmaddr_t maddr = virt_to_machine(&dt[entry]);
899 
900 		if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
901 			dt[entry] = *(struct desc_struct *)desc;
902 	}
903 
904 	}
905 }
906 
907 static void xen_load_sp0(struct tss_struct *tss,
908 			 struct thread_struct *thread)
909 {
910 	struct multicall_space mcs;
911 
912 	mcs = xen_mc_entry(0);
913 	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
914 	xen_mc_issue(PARAVIRT_LAZY_CPU);
915 	tss->x86_tss.sp0 = thread->sp0;
916 }
917 
918 static void xen_set_iopl_mask(unsigned mask)
919 {
920 	struct physdev_set_iopl set_iopl;
921 
922 	/* Force the change at ring 0. */
923 	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
924 	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
925 }
926 
927 static void xen_io_delay(void)
928 {
929 }
930 
931 static void xen_clts(void)
932 {
933 	struct multicall_space mcs;
934 
935 	mcs = xen_mc_entry(0);
936 
937 	MULTI_fpu_taskswitch(mcs.mc, 0);
938 
939 	xen_mc_issue(PARAVIRT_LAZY_CPU);
940 }
941 
942 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
943 
944 static unsigned long xen_read_cr0(void)
945 {
946 	unsigned long cr0 = this_cpu_read(xen_cr0_value);
947 
948 	if (unlikely(cr0 == 0)) {
949 		cr0 = native_read_cr0();
950 		this_cpu_write(xen_cr0_value, cr0);
951 	}
952 
953 	return cr0;
954 }
955 
956 static void xen_write_cr0(unsigned long cr0)
957 {
958 	struct multicall_space mcs;
959 
960 	this_cpu_write(xen_cr0_value, cr0);
961 
962 	/* Only pay attention to cr0.TS; everything else is
963 	   ignored. */
964 	mcs = xen_mc_entry(0);
965 
966 	MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
967 
968 	xen_mc_issue(PARAVIRT_LAZY_CPU);
969 }
970 
971 static void xen_write_cr4(unsigned long cr4)
972 {
973 	cr4 &= ~X86_CR4_PGE;
974 	cr4 &= ~X86_CR4_PSE;
975 
976 	native_write_cr4(cr4);
977 }
978 #ifdef CONFIG_X86_64
979 static inline unsigned long xen_read_cr8(void)
980 {
981 	return 0;
982 }
983 static inline void xen_write_cr8(unsigned long val)
984 {
985 	BUG_ON(val);
986 }
987 #endif
988 
989 static u64 xen_read_msr_safe(unsigned int msr, int *err)
990 {
991 	u64 val;
992 
993 	val = native_read_msr_safe(msr, err);
994 	switch (msr) {
995 	case MSR_IA32_APICBASE:
996 #ifdef CONFIG_X86_X2APIC
997 		if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
998 #endif
999 			val &= ~X2APIC_ENABLE;
1000 		break;
1001 	}
1002 	return val;
1003 }
1004 
1005 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1006 {
1007 	int ret;
1008 
1009 	ret = 0;
1010 
1011 	switch (msr) {
1012 #ifdef CONFIG_X86_64
1013 		unsigned which;
1014 		u64 base;
1015 
1016 	case MSR_FS_BASE:		which = SEGBASE_FS; goto set;
1017 	case MSR_KERNEL_GS_BASE:	which = SEGBASE_GS_USER; goto set;
1018 	case MSR_GS_BASE:		which = SEGBASE_GS_KERNEL; goto set;
1019 
1020 	set:
1021 		base = ((u64)high << 32) | low;
1022 		if (HYPERVISOR_set_segment_base(which, base) != 0)
1023 			ret = -EIO;
1024 		break;
1025 #endif
1026 
1027 	case MSR_STAR:
1028 	case MSR_CSTAR:
1029 	case MSR_LSTAR:
1030 	case MSR_SYSCALL_MASK:
1031 	case MSR_IA32_SYSENTER_CS:
1032 	case MSR_IA32_SYSENTER_ESP:
1033 	case MSR_IA32_SYSENTER_EIP:
1034 		/* Fast syscall setup is all done in hypercalls, so
1035 		   these are all ignored.  Stub them out here to stop
1036 		   Xen console noise. */
1037 
1038 	default:
1039 		ret = native_write_msr_safe(msr, low, high);
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 void xen_setup_shared_info(void)
1046 {
1047 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1048 		set_fixmap(FIX_PARAVIRT_BOOTMAP,
1049 			   xen_start_info->shared_info);
1050 
1051 		HYPERVISOR_shared_info =
1052 			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1053 	} else
1054 		HYPERVISOR_shared_info =
1055 			(struct shared_info *)__va(xen_start_info->shared_info);
1056 
1057 #ifndef CONFIG_SMP
1058 	/* In UP this is as good a place as any to set up shared info */
1059 	xen_setup_vcpu_info_placement();
1060 #endif
1061 
1062 	xen_setup_mfn_list_list();
1063 }
1064 
1065 /* This is called once we have the cpu_possible_mask */
1066 void xen_setup_vcpu_info_placement(void)
1067 {
1068 	int cpu;
1069 
1070 	for_each_possible_cpu(cpu)
1071 		xen_vcpu_setup(cpu);
1072 
1073 	/* xen_vcpu_setup managed to place the vcpu_info within the
1074 	 * percpu area for all cpus, so make use of it. Note that for
1075 	 * PVH we want to use native IRQ mechanism. */
1076 	if (have_vcpu_info_placement && !xen_pvh_domain()) {
1077 		pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1078 		pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1079 		pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1080 		pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1081 		pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1082 	}
1083 }
1084 
1085 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1086 			  unsigned long addr, unsigned len)
1087 {
1088 	char *start, *end, *reloc;
1089 	unsigned ret;
1090 
1091 	start = end = reloc = NULL;
1092 
1093 #define SITE(op, x)							\
1094 	case PARAVIRT_PATCH(op.x):					\
1095 	if (have_vcpu_info_placement) {					\
1096 		start = (char *)xen_##x##_direct;			\
1097 		end = xen_##x##_direct_end;				\
1098 		reloc = xen_##x##_direct_reloc;				\
1099 	}								\
1100 	goto patch_site
1101 
1102 	switch (type) {
1103 		SITE(pv_irq_ops, irq_enable);
1104 		SITE(pv_irq_ops, irq_disable);
1105 		SITE(pv_irq_ops, save_fl);
1106 		SITE(pv_irq_ops, restore_fl);
1107 #undef SITE
1108 
1109 	patch_site:
1110 		if (start == NULL || (end-start) > len)
1111 			goto default_patch;
1112 
1113 		ret = paravirt_patch_insns(insnbuf, len, start, end);
1114 
1115 		/* Note: because reloc is assigned from something that
1116 		   appears to be an array, gcc assumes it's non-null,
1117 		   but doesn't know its relationship with start and
1118 		   end. */
1119 		if (reloc > start && reloc < end) {
1120 			int reloc_off = reloc - start;
1121 			long *relocp = (long *)(insnbuf + reloc_off);
1122 			long delta = start - (char *)addr;
1123 
1124 			*relocp += delta;
1125 		}
1126 		break;
1127 
1128 	default_patch:
1129 	default:
1130 		ret = paravirt_patch_default(type, clobbers, insnbuf,
1131 					     addr, len);
1132 		break;
1133 	}
1134 
1135 	return ret;
1136 }
1137 
1138 static const struct pv_info xen_info __initconst = {
1139 	.paravirt_enabled = 1,
1140 	.shared_kernel_pmd = 0,
1141 
1142 #ifdef CONFIG_X86_64
1143 	.extra_user_64bit_cs = FLAT_USER_CS64,
1144 #endif
1145 
1146 	.name = "Xen",
1147 };
1148 
1149 static const struct pv_init_ops xen_init_ops __initconst = {
1150 	.patch = xen_patch,
1151 };
1152 
1153 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1154 	.cpuid = xen_cpuid,
1155 
1156 	.set_debugreg = xen_set_debugreg,
1157 	.get_debugreg = xen_get_debugreg,
1158 
1159 	.clts = xen_clts,
1160 
1161 	.read_cr0 = xen_read_cr0,
1162 	.write_cr0 = xen_write_cr0,
1163 
1164 	.read_cr4 = native_read_cr4,
1165 	.read_cr4_safe = native_read_cr4_safe,
1166 	.write_cr4 = xen_write_cr4,
1167 
1168 #ifdef CONFIG_X86_64
1169 	.read_cr8 = xen_read_cr8,
1170 	.write_cr8 = xen_write_cr8,
1171 #endif
1172 
1173 	.wbinvd = native_wbinvd,
1174 
1175 	.read_msr = xen_read_msr_safe,
1176 	.write_msr = xen_write_msr_safe,
1177 
1178 	.read_tsc = native_read_tsc,
1179 	.read_pmc = native_read_pmc,
1180 
1181 	.read_tscp = native_read_tscp,
1182 
1183 	.iret = xen_iret,
1184 	.irq_enable_sysexit = xen_sysexit,
1185 #ifdef CONFIG_X86_64
1186 	.usergs_sysret32 = xen_sysret32,
1187 	.usergs_sysret64 = xen_sysret64,
1188 #endif
1189 
1190 	.load_tr_desc = paravirt_nop,
1191 	.set_ldt = xen_set_ldt,
1192 	.load_gdt = xen_load_gdt,
1193 	.load_idt = xen_load_idt,
1194 	.load_tls = xen_load_tls,
1195 #ifdef CONFIG_X86_64
1196 	.load_gs_index = xen_load_gs_index,
1197 #endif
1198 
1199 	.alloc_ldt = xen_alloc_ldt,
1200 	.free_ldt = xen_free_ldt,
1201 
1202 	.store_idt = native_store_idt,
1203 	.store_tr = xen_store_tr,
1204 
1205 	.write_ldt_entry = xen_write_ldt_entry,
1206 	.write_gdt_entry = xen_write_gdt_entry,
1207 	.write_idt_entry = xen_write_idt_entry,
1208 	.load_sp0 = xen_load_sp0,
1209 
1210 	.set_iopl_mask = xen_set_iopl_mask,
1211 	.io_delay = xen_io_delay,
1212 
1213 	/* Xen takes care of %gs when switching to usermode for us */
1214 	.swapgs = paravirt_nop,
1215 
1216 	.start_context_switch = paravirt_start_context_switch,
1217 	.end_context_switch = xen_end_context_switch,
1218 };
1219 
1220 static const struct pv_apic_ops xen_apic_ops __initconst = {
1221 #ifdef CONFIG_X86_LOCAL_APIC
1222 	.startup_ipi_hook = paravirt_nop,
1223 #endif
1224 };
1225 
1226 static void xen_reboot(int reason)
1227 {
1228 	struct sched_shutdown r = { .reason = reason };
1229 
1230 	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1231 		BUG();
1232 }
1233 
1234 static void xen_restart(char *msg)
1235 {
1236 	xen_reboot(SHUTDOWN_reboot);
1237 }
1238 
1239 static void xen_emergency_restart(void)
1240 {
1241 	xen_reboot(SHUTDOWN_reboot);
1242 }
1243 
1244 static void xen_machine_halt(void)
1245 {
1246 	xen_reboot(SHUTDOWN_poweroff);
1247 }
1248 
1249 static void xen_machine_power_off(void)
1250 {
1251 	if (pm_power_off)
1252 		pm_power_off();
1253 	xen_reboot(SHUTDOWN_poweroff);
1254 }
1255 
1256 static void xen_crash_shutdown(struct pt_regs *regs)
1257 {
1258 	xen_reboot(SHUTDOWN_crash);
1259 }
1260 
1261 static int
1262 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1263 {
1264 	xen_reboot(SHUTDOWN_crash);
1265 	return NOTIFY_DONE;
1266 }
1267 
1268 static struct notifier_block xen_panic_block = {
1269 	.notifier_call= xen_panic_event,
1270 	.priority = INT_MIN
1271 };
1272 
1273 int xen_panic_handler_init(void)
1274 {
1275 	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1276 	return 0;
1277 }
1278 
1279 static const struct machine_ops xen_machine_ops __initconst = {
1280 	.restart = xen_restart,
1281 	.halt = xen_machine_halt,
1282 	.power_off = xen_machine_power_off,
1283 	.shutdown = xen_machine_halt,
1284 	.crash_shutdown = xen_crash_shutdown,
1285 	.emergency_restart = xen_emergency_restart,
1286 };
1287 
1288 static unsigned char xen_get_nmi_reason(void)
1289 {
1290 	unsigned char reason = 0;
1291 
1292 	/* Construct a value which looks like it came from port 0x61. */
1293 	if (test_bit(_XEN_NMIREASON_io_error,
1294 		     &HYPERVISOR_shared_info->arch.nmi_reason))
1295 		reason |= NMI_REASON_IOCHK;
1296 	if (test_bit(_XEN_NMIREASON_pci_serr,
1297 		     &HYPERVISOR_shared_info->arch.nmi_reason))
1298 		reason |= NMI_REASON_SERR;
1299 
1300 	return reason;
1301 }
1302 
1303 static void __init xen_boot_params_init_edd(void)
1304 {
1305 #if IS_ENABLED(CONFIG_EDD)
1306 	struct xen_platform_op op;
1307 	struct edd_info *edd_info;
1308 	u32 *mbr_signature;
1309 	unsigned nr;
1310 	int ret;
1311 
1312 	edd_info = boot_params.eddbuf;
1313 	mbr_signature = boot_params.edd_mbr_sig_buffer;
1314 
1315 	op.cmd = XENPF_firmware_info;
1316 
1317 	op.u.firmware_info.type = XEN_FW_DISK_INFO;
1318 	for (nr = 0; nr < EDDMAXNR; nr++) {
1319 		struct edd_info *info = edd_info + nr;
1320 
1321 		op.u.firmware_info.index = nr;
1322 		info->params.length = sizeof(info->params);
1323 		set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1324 				     &info->params);
1325 		ret = HYPERVISOR_dom0_op(&op);
1326 		if (ret)
1327 			break;
1328 
1329 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1330 		C(device);
1331 		C(version);
1332 		C(interface_support);
1333 		C(legacy_max_cylinder);
1334 		C(legacy_max_head);
1335 		C(legacy_sectors_per_track);
1336 #undef C
1337 	}
1338 	boot_params.eddbuf_entries = nr;
1339 
1340 	op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1341 	for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1342 		op.u.firmware_info.index = nr;
1343 		ret = HYPERVISOR_dom0_op(&op);
1344 		if (ret)
1345 			break;
1346 		mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1347 	}
1348 	boot_params.edd_mbr_sig_buf_entries = nr;
1349 #endif
1350 }
1351 
1352 /*
1353  * Set up the GDT and segment registers for -fstack-protector.  Until
1354  * we do this, we have to be careful not to call any stack-protected
1355  * function, which is most of the kernel.
1356  *
1357  * Note, that it is __ref because the only caller of this after init
1358  * is PVH which is not going to use xen_load_gdt_boot or other
1359  * __init functions.
1360  */
1361 static void __ref xen_setup_gdt(int cpu)
1362 {
1363 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1364 #ifdef CONFIG_X86_64
1365 		unsigned long dummy;
1366 
1367 		load_percpu_segment(cpu); /* We need to access per-cpu area */
1368 		switch_to_new_gdt(cpu); /* GDT and GS set */
1369 
1370 		/* We are switching of the Xen provided GDT to our HVM mode
1371 		 * GDT. The new GDT has  __KERNEL_CS with CS.L = 1
1372 		 * and we are jumping to reload it.
1373 		 */
1374 		asm volatile ("pushq %0\n"
1375 			      "leaq 1f(%%rip),%0\n"
1376 			      "pushq %0\n"
1377 			      "lretq\n"
1378 			      "1:\n"
1379 			      : "=&r" (dummy) : "0" (__KERNEL_CS));
1380 
1381 		/*
1382 		 * While not needed, we also set the %es, %ds, and %fs
1383 		 * to zero. We don't care about %ss as it is NULL.
1384 		 * Strictly speaking this is not needed as Xen zeros those
1385 		 * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE)
1386 		 *
1387 		 * Linux zeros them in cpu_init() and in secondary_startup_64
1388 		 * (for BSP).
1389 		 */
1390 		loadsegment(es, 0);
1391 		loadsegment(ds, 0);
1392 		loadsegment(fs, 0);
1393 #else
1394 		/* PVH: TODO Implement. */
1395 		BUG();
1396 #endif
1397 		return; /* PVH does not need any PV GDT ops. */
1398 	}
1399 	pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1400 	pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1401 
1402 	setup_stack_canary_segment(0);
1403 	switch_to_new_gdt(0);
1404 
1405 	pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1406 	pv_cpu_ops.load_gdt = xen_load_gdt;
1407 }
1408 
1409 #ifdef CONFIG_XEN_PVH
1410 /*
1411  * A PV guest starts with default flags that are not set for PVH, set them
1412  * here asap.
1413  */
1414 static void xen_pvh_set_cr_flags(int cpu)
1415 {
1416 
1417 	/* Some of these are setup in 'secondary_startup_64'. The others:
1418 	 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
1419 	 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
1420 	write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1421 
1422 	if (!cpu)
1423 		return;
1424 	/*
1425 	 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
1426 	 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
1427 	*/
1428 	if (cpu_has_pse)
1429 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
1430 
1431 	if (cpu_has_pge)
1432 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
1433 }
1434 
1435 /*
1436  * Note, that it is ref - because the only caller of this after init
1437  * is PVH which is not going to use xen_load_gdt_boot or other
1438  * __init functions.
1439  */
1440 void __ref xen_pvh_secondary_vcpu_init(int cpu)
1441 {
1442 	xen_setup_gdt(cpu);
1443 	xen_pvh_set_cr_flags(cpu);
1444 }
1445 
1446 static void __init xen_pvh_early_guest_init(void)
1447 {
1448 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1449 		return;
1450 
1451 	if (!xen_feature(XENFEAT_hvm_callback_vector))
1452 		return;
1453 
1454 	xen_have_vector_callback = 1;
1455 
1456 	xen_pvh_early_cpu_init(0, false);
1457 	xen_pvh_set_cr_flags(0);
1458 
1459 #ifdef CONFIG_X86_32
1460 	BUG(); /* PVH: Implement proper support. */
1461 #endif
1462 }
1463 #endif    /* CONFIG_XEN_PVH */
1464 
1465 /* First C function to be called on Xen boot */
1466 asmlinkage __visible void __init xen_start_kernel(void)
1467 {
1468 	struct physdev_set_iopl set_iopl;
1469 	unsigned long initrd_start = 0;
1470 	int rc;
1471 
1472 	if (!xen_start_info)
1473 		return;
1474 
1475 	xen_domain_type = XEN_PV_DOMAIN;
1476 
1477 	xen_setup_features();
1478 #ifdef CONFIG_XEN_PVH
1479 	xen_pvh_early_guest_init();
1480 #endif
1481 	xen_setup_machphys_mapping();
1482 
1483 	/* Install Xen paravirt ops */
1484 	pv_info = xen_info;
1485 	pv_init_ops = xen_init_ops;
1486 	pv_apic_ops = xen_apic_ops;
1487 	if (!xen_pvh_domain()) {
1488 		pv_cpu_ops = xen_cpu_ops;
1489 
1490 		x86_platform.get_nmi_reason = xen_get_nmi_reason;
1491 	}
1492 
1493 	if (xen_feature(XENFEAT_auto_translated_physmap))
1494 		x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1495 	else
1496 		x86_init.resources.memory_setup = xen_memory_setup;
1497 	x86_init.oem.arch_setup = xen_arch_setup;
1498 	x86_init.oem.banner = xen_banner;
1499 
1500 	xen_init_time_ops();
1501 
1502 	/*
1503 	 * Set up some pagetable state before starting to set any ptes.
1504 	 */
1505 
1506 	xen_init_mmu_ops();
1507 
1508 	/* Prevent unwanted bits from being set in PTEs. */
1509 	__supported_pte_mask &= ~_PAGE_GLOBAL;
1510 
1511 	/*
1512 	 * Prevent page tables from being allocated in highmem, even
1513 	 * if CONFIG_HIGHPTE is enabled.
1514 	 */
1515 	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1516 
1517 	/* Work out if we support NX */
1518 	x86_configure_nx();
1519 
1520 	/* Get mfn list */
1521 	xen_build_dynamic_phys_to_machine();
1522 
1523 	/*
1524 	 * Set up kernel GDT and segment registers, mainly so that
1525 	 * -fstack-protector code can be executed.
1526 	 */
1527 	xen_setup_gdt(0);
1528 
1529 	xen_init_irq_ops();
1530 	xen_init_cpuid_mask();
1531 
1532 #ifdef CONFIG_X86_LOCAL_APIC
1533 	/*
1534 	 * set up the basic apic ops.
1535 	 */
1536 	xen_init_apic();
1537 #endif
1538 
1539 	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1540 		pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1541 		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1542 	}
1543 
1544 	machine_ops = xen_machine_ops;
1545 
1546 	/*
1547 	 * The only reliable way to retain the initial address of the
1548 	 * percpu gdt_page is to remember it here, so we can go and
1549 	 * mark it RW later, when the initial percpu area is freed.
1550 	 */
1551 	xen_initial_gdt = &per_cpu(gdt_page, 0);
1552 
1553 	xen_smp_init();
1554 
1555 #ifdef CONFIG_ACPI_NUMA
1556 	/*
1557 	 * The pages we from Xen are not related to machine pages, so
1558 	 * any NUMA information the kernel tries to get from ACPI will
1559 	 * be meaningless.  Prevent it from trying.
1560 	 */
1561 	acpi_numa = -1;
1562 #endif
1563 	/* Don't do the full vcpu_info placement stuff until we have a
1564 	   possible map and a non-dummy shared_info. */
1565 	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1566 
1567 	local_irq_disable();
1568 	early_boot_irqs_disabled = true;
1569 
1570 	xen_raw_console_write("mapping kernel into physical memory\n");
1571 	xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1572 
1573 	/*
1574 	 * Modify the cache mode translation tables to match Xen's PAT
1575 	 * configuration.
1576 	 */
1577 
1578 	pat_init_cache_modes();
1579 
1580 	/* keep using Xen gdt for now; no urgent need to change it */
1581 
1582 #ifdef CONFIG_X86_32
1583 	pv_info.kernel_rpl = 1;
1584 	if (xen_feature(XENFEAT_supervisor_mode_kernel))
1585 		pv_info.kernel_rpl = 0;
1586 #else
1587 	pv_info.kernel_rpl = 0;
1588 #endif
1589 	/* set the limit of our address space */
1590 	xen_reserve_top();
1591 
1592 	/* PVH: runs at default kernel iopl of 0 */
1593 	if (!xen_pvh_domain()) {
1594 		/*
1595 		 * We used to do this in xen_arch_setup, but that is too late
1596 		 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1597 		 * early_amd_init which pokes 0xcf8 port.
1598 		 */
1599 		set_iopl.iopl = 1;
1600 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1601 		if (rc != 0)
1602 			xen_raw_printk("physdev_op failed %d\n", rc);
1603 	}
1604 
1605 #ifdef CONFIG_X86_32
1606 	/* set up basic CPUID stuff */
1607 	cpu_detect(&new_cpu_data);
1608 	set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1609 	new_cpu_data.wp_works_ok = 1;
1610 	new_cpu_data.x86_capability[0] = cpuid_edx(1);
1611 #endif
1612 
1613 	if (xen_start_info->mod_start) {
1614 	    if (xen_start_info->flags & SIF_MOD_START_PFN)
1615 		initrd_start = PFN_PHYS(xen_start_info->mod_start);
1616 	    else
1617 		initrd_start = __pa(xen_start_info->mod_start);
1618 	}
1619 
1620 	/* Poke various useful things into boot_params */
1621 	boot_params.hdr.type_of_loader = (9 << 4) | 0;
1622 	boot_params.hdr.ramdisk_image = initrd_start;
1623 	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1624 	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1625 
1626 	if (!xen_initial_domain()) {
1627 		add_preferred_console("xenboot", 0, NULL);
1628 		add_preferred_console("tty", 0, NULL);
1629 		add_preferred_console("hvc", 0, NULL);
1630 		if (pci_xen)
1631 			x86_init.pci.arch_init = pci_xen_init;
1632 	} else {
1633 		const struct dom0_vga_console_info *info =
1634 			(void *)((char *)xen_start_info +
1635 				 xen_start_info->console.dom0.info_off);
1636 		struct xen_platform_op op = {
1637 			.cmd = XENPF_firmware_info,
1638 			.interface_version = XENPF_INTERFACE_VERSION,
1639 			.u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1640 		};
1641 
1642 		xen_init_vga(info, xen_start_info->console.dom0.info_size);
1643 		xen_start_info->console.domU.mfn = 0;
1644 		xen_start_info->console.domU.evtchn = 0;
1645 
1646 		if (HYPERVISOR_dom0_op(&op) == 0)
1647 			boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1648 
1649 		/* Make sure ACS will be enabled */
1650 		pci_request_acs();
1651 
1652 		xen_acpi_sleep_register();
1653 
1654 		/* Avoid searching for BIOS MP tables */
1655 		x86_init.mpparse.find_smp_config = x86_init_noop;
1656 		x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1657 
1658 		xen_boot_params_init_edd();
1659 	}
1660 #ifdef CONFIG_PCI
1661 	/* PCI BIOS service won't work from a PV guest. */
1662 	pci_probe &= ~PCI_PROBE_BIOS;
1663 #endif
1664 	xen_raw_console_write("about to get started...\n");
1665 
1666 	xen_setup_runstate_info(0);
1667 
1668 	xen_efi_init();
1669 
1670 	/* Start the world */
1671 #ifdef CONFIG_X86_32
1672 	i386_start_kernel();
1673 #else
1674 	cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1675 	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1676 #endif
1677 }
1678 
1679 void __ref xen_hvm_init_shared_info(void)
1680 {
1681 	int cpu;
1682 	struct xen_add_to_physmap xatp;
1683 	static struct shared_info *shared_info_page = 0;
1684 
1685 	if (!shared_info_page)
1686 		shared_info_page = (struct shared_info *)
1687 			extend_brk(PAGE_SIZE, PAGE_SIZE);
1688 	xatp.domid = DOMID_SELF;
1689 	xatp.idx = 0;
1690 	xatp.space = XENMAPSPACE_shared_info;
1691 	xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1692 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1693 		BUG();
1694 
1695 	HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1696 
1697 	/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1698 	 * page, we use it in the event channel upcall and in some pvclock
1699 	 * related functions. We don't need the vcpu_info placement
1700 	 * optimizations because we don't use any pv_mmu or pv_irq op on
1701 	 * HVM.
1702 	 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1703 	 * online but xen_hvm_init_shared_info is run at resume time too and
1704 	 * in that case multiple vcpus might be online. */
1705 	for_each_online_cpu(cpu) {
1706 		/* Leave it to be NULL. */
1707 		if (cpu >= MAX_VIRT_CPUS)
1708 			continue;
1709 		per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1710 	}
1711 }
1712 
1713 #ifdef CONFIG_XEN_PVHVM
1714 static void __init init_hvm_pv_info(void)
1715 {
1716 	int major, minor;
1717 	uint32_t eax, ebx, ecx, edx, pages, msr, base;
1718 	u64 pfn;
1719 
1720 	base = xen_cpuid_base();
1721 	cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1722 
1723 	major = eax >> 16;
1724 	minor = eax & 0xffff;
1725 	printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1726 
1727 	cpuid(base + 2, &pages, &msr, &ecx, &edx);
1728 
1729 	pfn = __pa(hypercall_page);
1730 	wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1731 
1732 	xen_setup_features();
1733 
1734 	pv_info.name = "Xen HVM";
1735 
1736 	xen_domain_type = XEN_HVM_DOMAIN;
1737 }
1738 
1739 static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1740 			      void *hcpu)
1741 {
1742 	int cpu = (long)hcpu;
1743 	switch (action) {
1744 	case CPU_UP_PREPARE:
1745 		xen_vcpu_setup(cpu);
1746 		if (xen_have_vector_callback) {
1747 			if (xen_feature(XENFEAT_hvm_safe_pvclock))
1748 				xen_setup_timer(cpu);
1749 		}
1750 		break;
1751 	default:
1752 		break;
1753 	}
1754 	return NOTIFY_OK;
1755 }
1756 
1757 static struct notifier_block xen_hvm_cpu_notifier = {
1758 	.notifier_call	= xen_hvm_cpu_notify,
1759 };
1760 
1761 static void __init xen_hvm_guest_init(void)
1762 {
1763 	init_hvm_pv_info();
1764 
1765 	xen_hvm_init_shared_info();
1766 
1767 	xen_panic_handler_init();
1768 
1769 	if (xen_feature(XENFEAT_hvm_callback_vector))
1770 		xen_have_vector_callback = 1;
1771 	xen_hvm_smp_init();
1772 	register_cpu_notifier(&xen_hvm_cpu_notifier);
1773 	xen_unplug_emulated_devices();
1774 	x86_init.irqs.intr_init = xen_init_IRQ;
1775 	xen_hvm_init_time_ops();
1776 	xen_hvm_init_mmu_ops();
1777 }
1778 
1779 static bool xen_nopv = false;
1780 static __init int xen_parse_nopv(char *arg)
1781 {
1782        xen_nopv = true;
1783        return 0;
1784 }
1785 early_param("xen_nopv", xen_parse_nopv);
1786 
1787 static uint32_t __init xen_hvm_platform(void)
1788 {
1789 	if (xen_nopv)
1790 		return 0;
1791 
1792 	if (xen_pv_domain())
1793 		return 0;
1794 
1795 	return xen_cpuid_base();
1796 }
1797 
1798 bool xen_hvm_need_lapic(void)
1799 {
1800 	if (xen_nopv)
1801 		return false;
1802 	if (xen_pv_domain())
1803 		return false;
1804 	if (!xen_hvm_domain())
1805 		return false;
1806 	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1807 		return false;
1808 	return true;
1809 }
1810 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1811 
1812 const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1813 	.name			= "Xen HVM",
1814 	.detect			= xen_hvm_platform,
1815 	.init_platform		= xen_hvm_guest_init,
1816 	.x2apic_available	= xen_x2apic_para_available,
1817 };
1818 EXPORT_SYMBOL(x86_hyper_xen_hvm);
1819 #endif
1820