1 /* 2 * Core of Xen paravirt_ops implementation. 3 * 4 * This file contains the xen_paravirt_ops structure itself, and the 5 * implementations for: 6 * - privileged instructions 7 * - interrupt flags 8 * - segment operations 9 * - booting and setup 10 * 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/smp.h> 17 #include <linux/preempt.h> 18 #include <linux/hardirq.h> 19 #include <linux/percpu.h> 20 #include <linux/delay.h> 21 #include <linux/start_kernel.h> 22 #include <linux/sched.h> 23 #include <linux/bootmem.h> 24 #include <linux/module.h> 25 #include <linux/mm.h> 26 #include <linux/page-flags.h> 27 #include <linux/highmem.h> 28 #include <linux/console.h> 29 30 #include <xen/interface/xen.h> 31 #include <xen/interface/version.h> 32 #include <xen/interface/physdev.h> 33 #include <xen/interface/vcpu.h> 34 #include <xen/features.h> 35 #include <xen/page.h> 36 #include <xen/hvc-console.h> 37 38 #include <asm/paravirt.h> 39 #include <asm/apic.h> 40 #include <asm/page.h> 41 #include <asm/xen/hypercall.h> 42 #include <asm/xen/hypervisor.h> 43 #include <asm/fixmap.h> 44 #include <asm/processor.h> 45 #include <asm/msr-index.h> 46 #include <asm/setup.h> 47 #include <asm/desc.h> 48 #include <asm/pgtable.h> 49 #include <asm/tlbflush.h> 50 #include <asm/reboot.h> 51 52 #include "xen-ops.h" 53 #include "mmu.h" 54 #include "multicalls.h" 55 56 EXPORT_SYMBOL_GPL(hypercall_page); 57 58 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 59 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 60 61 enum xen_domain_type xen_domain_type = XEN_NATIVE; 62 EXPORT_SYMBOL_GPL(xen_domain_type); 63 64 struct start_info *xen_start_info; 65 EXPORT_SYMBOL_GPL(xen_start_info); 66 67 struct shared_info xen_dummy_shared_info; 68 69 void *xen_initial_gdt; 70 71 /* 72 * Point at some empty memory to start with. We map the real shared_info 73 * page as soon as fixmap is up and running. 74 */ 75 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 76 77 /* 78 * Flag to determine whether vcpu info placement is available on all 79 * VCPUs. We assume it is to start with, and then set it to zero on 80 * the first failure. This is because it can succeed on some VCPUs 81 * and not others, since it can involve hypervisor memory allocation, 82 * or because the guest failed to guarantee all the appropriate 83 * constraints on all VCPUs (ie buffer can't cross a page boundary). 84 * 85 * Note that any particular CPU may be using a placed vcpu structure, 86 * but we can only optimise if the all are. 87 * 88 * 0: not available, 1: available 89 */ 90 static int have_vcpu_info_placement = 1; 91 92 static void xen_vcpu_setup(int cpu) 93 { 94 struct vcpu_register_vcpu_info info; 95 int err; 96 struct vcpu_info *vcpup; 97 98 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 99 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 100 101 if (!have_vcpu_info_placement) 102 return; /* already tested, not available */ 103 104 vcpup = &per_cpu(xen_vcpu_info, cpu); 105 106 info.mfn = arbitrary_virt_to_mfn(vcpup); 107 info.offset = offset_in_page(vcpup); 108 109 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", 110 cpu, vcpup, info.mfn, info.offset); 111 112 /* Check to see if the hypervisor will put the vcpu_info 113 structure where we want it, which allows direct access via 114 a percpu-variable. */ 115 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 116 117 if (err) { 118 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 119 have_vcpu_info_placement = 0; 120 } else { 121 /* This cpu is using the registered vcpu info, even if 122 later ones fail to. */ 123 per_cpu(xen_vcpu, cpu) = vcpup; 124 125 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", 126 cpu, vcpup); 127 } 128 } 129 130 /* 131 * On restore, set the vcpu placement up again. 132 * If it fails, then we're in a bad state, since 133 * we can't back out from using it... 134 */ 135 void xen_vcpu_restore(void) 136 { 137 if (have_vcpu_info_placement) { 138 int cpu; 139 140 for_each_online_cpu(cpu) { 141 bool other_cpu = (cpu != smp_processor_id()); 142 143 if (other_cpu && 144 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 145 BUG(); 146 147 xen_vcpu_setup(cpu); 148 149 if (other_cpu && 150 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 151 BUG(); 152 } 153 154 BUG_ON(!have_vcpu_info_placement); 155 } 156 } 157 158 static void __init xen_banner(void) 159 { 160 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); 161 struct xen_extraversion extra; 162 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 163 164 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 165 pv_info.name); 166 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 167 version >> 16, version & 0xffff, extra.extraversion, 168 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 169 } 170 171 static void xen_cpuid(unsigned int *ax, unsigned int *bx, 172 unsigned int *cx, unsigned int *dx) 173 { 174 unsigned maskedx = ~0; 175 176 /* 177 * Mask out inconvenient features, to try and disable as many 178 * unsupported kernel subsystems as possible. 179 */ 180 if (*ax == 1) 181 maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ 182 (1 << X86_FEATURE_ACPI) | /* disable ACPI */ 183 (1 << X86_FEATURE_MCE) | /* disable MCE */ 184 (1 << X86_FEATURE_MCA) | /* disable MCA */ 185 (1 << X86_FEATURE_ACC)); /* thermal monitoring */ 186 187 asm(XEN_EMULATE_PREFIX "cpuid" 188 : "=a" (*ax), 189 "=b" (*bx), 190 "=c" (*cx), 191 "=d" (*dx) 192 : "0" (*ax), "2" (*cx)); 193 *dx &= maskedx; 194 } 195 196 static void xen_set_debugreg(int reg, unsigned long val) 197 { 198 HYPERVISOR_set_debugreg(reg, val); 199 } 200 201 static unsigned long xen_get_debugreg(int reg) 202 { 203 return HYPERVISOR_get_debugreg(reg); 204 } 205 206 void xen_leave_lazy(void) 207 { 208 paravirt_leave_lazy(paravirt_get_lazy_mode()); 209 xen_mc_flush(); 210 } 211 212 static unsigned long xen_store_tr(void) 213 { 214 return 0; 215 } 216 217 /* 218 * Set the page permissions for a particular virtual address. If the 219 * address is a vmalloc mapping (or other non-linear mapping), then 220 * find the linear mapping of the page and also set its protections to 221 * match. 222 */ 223 static void set_aliased_prot(void *v, pgprot_t prot) 224 { 225 int level; 226 pte_t *ptep; 227 pte_t pte; 228 unsigned long pfn; 229 struct page *page; 230 231 ptep = lookup_address((unsigned long)v, &level); 232 BUG_ON(ptep == NULL); 233 234 pfn = pte_pfn(*ptep); 235 page = pfn_to_page(pfn); 236 237 pte = pfn_pte(pfn, prot); 238 239 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 240 BUG(); 241 242 if (!PageHighMem(page)) { 243 void *av = __va(PFN_PHYS(pfn)); 244 245 if (av != v) 246 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) 247 BUG(); 248 } else 249 kmap_flush_unused(); 250 } 251 252 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 253 { 254 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 255 int i; 256 257 for(i = 0; i < entries; i += entries_per_page) 258 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 259 } 260 261 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) 262 { 263 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 264 int i; 265 266 for(i = 0; i < entries; i += entries_per_page) 267 set_aliased_prot(ldt + i, PAGE_KERNEL); 268 } 269 270 static void xen_set_ldt(const void *addr, unsigned entries) 271 { 272 struct mmuext_op *op; 273 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 274 275 op = mcs.args; 276 op->cmd = MMUEXT_SET_LDT; 277 op->arg1.linear_addr = (unsigned long)addr; 278 op->arg2.nr_ents = entries; 279 280 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 281 282 xen_mc_issue(PARAVIRT_LAZY_CPU); 283 } 284 285 static void xen_load_gdt(const struct desc_ptr *dtr) 286 { 287 unsigned long *frames; 288 unsigned long va = dtr->address; 289 unsigned int size = dtr->size + 1; 290 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 291 int f; 292 struct multicall_space mcs; 293 294 /* A GDT can be up to 64k in size, which corresponds to 8192 295 8-byte entries, or 16 4k pages.. */ 296 297 BUG_ON(size > 65536); 298 BUG_ON(va & ~PAGE_MASK); 299 300 mcs = xen_mc_entry(sizeof(*frames) * pages); 301 frames = mcs.args; 302 303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 304 frames[f] = arbitrary_virt_to_mfn((void *)va); 305 306 make_lowmem_page_readonly((void *)va); 307 make_lowmem_page_readonly(mfn_to_virt(frames[f])); 308 } 309 310 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 311 312 xen_mc_issue(PARAVIRT_LAZY_CPU); 313 } 314 315 static void load_TLS_descriptor(struct thread_struct *t, 316 unsigned int cpu, unsigned int i) 317 { 318 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 319 xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 320 struct multicall_space mc = __xen_mc_entry(0); 321 322 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 323 } 324 325 static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 326 { 327 /* 328 * XXX sleazy hack: If we're being called in a lazy-cpu zone 329 * and lazy gs handling is enabled, it means we're in a 330 * context switch, and %gs has just been saved. This means we 331 * can zero it out to prevent faults on exit from the 332 * hypervisor if the next process has no %gs. Either way, it 333 * has been saved, and the new value will get loaded properly. 334 * This will go away as soon as Xen has been modified to not 335 * save/restore %gs for normal hypercalls. 336 * 337 * On x86_64, this hack is not used for %gs, because gs points 338 * to KERNEL_GS_BASE (and uses it for PDA references), so we 339 * must not zero %gs on x86_64 340 * 341 * For x86_64, we need to zero %fs, otherwise we may get an 342 * exception between the new %fs descriptor being loaded and 343 * %fs being effectively cleared at __switch_to(). 344 */ 345 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 346 #ifdef CONFIG_X86_32 347 lazy_load_gs(0); 348 #else 349 loadsegment(fs, 0); 350 #endif 351 } 352 353 xen_mc_batch(); 354 355 load_TLS_descriptor(t, cpu, 0); 356 load_TLS_descriptor(t, cpu, 1); 357 load_TLS_descriptor(t, cpu, 2); 358 359 xen_mc_issue(PARAVIRT_LAZY_CPU); 360 } 361 362 #ifdef CONFIG_X86_64 363 static void xen_load_gs_index(unsigned int idx) 364 { 365 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) 366 BUG(); 367 } 368 #endif 369 370 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, 371 const void *ptr) 372 { 373 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); 374 u64 entry = *(u64 *)ptr; 375 376 preempt_disable(); 377 378 xen_mc_flush(); 379 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) 380 BUG(); 381 382 preempt_enable(); 383 } 384 385 static int cvt_gate_to_trap(int vector, const gate_desc *val, 386 struct trap_info *info) 387 { 388 if (val->type != 0xf && val->type != 0xe) 389 return 0; 390 391 info->vector = vector; 392 info->address = gate_offset(*val); 393 info->cs = gate_segment(*val); 394 info->flags = val->dpl; 395 /* interrupt gates clear IF */ 396 if (val->type == 0xe) 397 info->flags |= 4; 398 399 return 1; 400 } 401 402 /* Locations of each CPU's IDT */ 403 static DEFINE_PER_CPU(struct desc_ptr, idt_desc); 404 405 /* Set an IDT entry. If the entry is part of the current IDT, then 406 also update Xen. */ 407 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) 408 { 409 unsigned long p = (unsigned long)&dt[entrynum]; 410 unsigned long start, end; 411 412 preempt_disable(); 413 414 start = __get_cpu_var(idt_desc).address; 415 end = start + __get_cpu_var(idt_desc).size + 1; 416 417 xen_mc_flush(); 418 419 native_write_idt_entry(dt, entrynum, g); 420 421 if (p >= start && (p + 8) <= end) { 422 struct trap_info info[2]; 423 424 info[1].address = 0; 425 426 if (cvt_gate_to_trap(entrynum, g, &info[0])) 427 if (HYPERVISOR_set_trap_table(info)) 428 BUG(); 429 } 430 431 preempt_enable(); 432 } 433 434 static void xen_convert_trap_info(const struct desc_ptr *desc, 435 struct trap_info *traps) 436 { 437 unsigned in, out, count; 438 439 count = (desc->size+1) / sizeof(gate_desc); 440 BUG_ON(count > 256); 441 442 for (in = out = 0; in < count; in++) { 443 gate_desc *entry = (gate_desc*)(desc->address) + in; 444 445 if (cvt_gate_to_trap(in, entry, &traps[out])) 446 out++; 447 } 448 traps[out].address = 0; 449 } 450 451 void xen_copy_trap_info(struct trap_info *traps) 452 { 453 const struct desc_ptr *desc = &__get_cpu_var(idt_desc); 454 455 xen_convert_trap_info(desc, traps); 456 } 457 458 /* Load a new IDT into Xen. In principle this can be per-CPU, so we 459 hold a spinlock to protect the static traps[] array (static because 460 it avoids allocation, and saves stack space). */ 461 static void xen_load_idt(const struct desc_ptr *desc) 462 { 463 static DEFINE_SPINLOCK(lock); 464 static struct trap_info traps[257]; 465 466 spin_lock(&lock); 467 468 __get_cpu_var(idt_desc) = *desc; 469 470 xen_convert_trap_info(desc, traps); 471 472 xen_mc_flush(); 473 if (HYPERVISOR_set_trap_table(traps)) 474 BUG(); 475 476 spin_unlock(&lock); 477 } 478 479 /* Write a GDT descriptor entry. Ignore LDT descriptors, since 480 they're handled differently. */ 481 static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 482 const void *desc, int type) 483 { 484 preempt_disable(); 485 486 switch (type) { 487 case DESC_LDT: 488 case DESC_TSS: 489 /* ignore */ 490 break; 491 492 default: { 493 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); 494 495 xen_mc_flush(); 496 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 497 BUG(); 498 } 499 500 } 501 502 preempt_enable(); 503 } 504 505 static void xen_load_sp0(struct tss_struct *tss, 506 struct thread_struct *thread) 507 { 508 struct multicall_space mcs = xen_mc_entry(0); 509 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 510 xen_mc_issue(PARAVIRT_LAZY_CPU); 511 } 512 513 static void xen_set_iopl_mask(unsigned mask) 514 { 515 struct physdev_set_iopl set_iopl; 516 517 /* Force the change at ring 0. */ 518 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; 519 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 520 } 521 522 static void xen_io_delay(void) 523 { 524 } 525 526 #ifdef CONFIG_X86_LOCAL_APIC 527 static u32 xen_apic_read(u32 reg) 528 { 529 return 0; 530 } 531 532 static void xen_apic_write(u32 reg, u32 val) 533 { 534 /* Warn to see if there's any stray references */ 535 WARN_ON(1); 536 } 537 538 static u64 xen_apic_icr_read(void) 539 { 540 return 0; 541 } 542 543 static void xen_apic_icr_write(u32 low, u32 id) 544 { 545 /* Warn to see if there's any stray references */ 546 WARN_ON(1); 547 } 548 549 static void xen_apic_wait_icr_idle(void) 550 { 551 return; 552 } 553 554 static u32 xen_safe_apic_wait_icr_idle(void) 555 { 556 return 0; 557 } 558 559 static void set_xen_basic_apic_ops(void) 560 { 561 apic->read = xen_apic_read; 562 apic->write = xen_apic_write; 563 apic->icr_read = xen_apic_icr_read; 564 apic->icr_write = xen_apic_icr_write; 565 apic->wait_icr_idle = xen_apic_wait_icr_idle; 566 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 567 } 568 569 #endif 570 571 572 static void xen_clts(void) 573 { 574 struct multicall_space mcs; 575 576 mcs = xen_mc_entry(0); 577 578 MULTI_fpu_taskswitch(mcs.mc, 0); 579 580 xen_mc_issue(PARAVIRT_LAZY_CPU); 581 } 582 583 static void xen_write_cr0(unsigned long cr0) 584 { 585 struct multicall_space mcs; 586 587 /* Only pay attention to cr0.TS; everything else is 588 ignored. */ 589 mcs = xen_mc_entry(0); 590 591 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); 592 593 xen_mc_issue(PARAVIRT_LAZY_CPU); 594 } 595 596 static void xen_write_cr4(unsigned long cr4) 597 { 598 cr4 &= ~X86_CR4_PGE; 599 cr4 &= ~X86_CR4_PSE; 600 601 native_write_cr4(cr4); 602 } 603 604 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 605 { 606 int ret; 607 608 ret = 0; 609 610 switch (msr) { 611 #ifdef CONFIG_X86_64 612 unsigned which; 613 u64 base; 614 615 case MSR_FS_BASE: which = SEGBASE_FS; goto set; 616 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; 617 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; 618 619 set: 620 base = ((u64)high << 32) | low; 621 if (HYPERVISOR_set_segment_base(which, base) != 0) 622 ret = -EFAULT; 623 break; 624 #endif 625 626 case MSR_STAR: 627 case MSR_CSTAR: 628 case MSR_LSTAR: 629 case MSR_SYSCALL_MASK: 630 case MSR_IA32_SYSENTER_CS: 631 case MSR_IA32_SYSENTER_ESP: 632 case MSR_IA32_SYSENTER_EIP: 633 /* Fast syscall setup is all done in hypercalls, so 634 these are all ignored. Stub them out here to stop 635 Xen console noise. */ 636 break; 637 638 default: 639 ret = native_write_msr_safe(msr, low, high); 640 } 641 642 return ret; 643 } 644 645 void xen_setup_shared_info(void) 646 { 647 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 648 set_fixmap(FIX_PARAVIRT_BOOTMAP, 649 xen_start_info->shared_info); 650 651 HYPERVISOR_shared_info = 652 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 653 } else 654 HYPERVISOR_shared_info = 655 (struct shared_info *)__va(xen_start_info->shared_info); 656 657 #ifndef CONFIG_SMP 658 /* In UP this is as good a place as any to set up shared info */ 659 xen_setup_vcpu_info_placement(); 660 #endif 661 662 xen_setup_mfn_list_list(); 663 } 664 665 /* This is called once we have the cpu_possible_map */ 666 void xen_setup_vcpu_info_placement(void) 667 { 668 int cpu; 669 670 for_each_possible_cpu(cpu) 671 xen_vcpu_setup(cpu); 672 673 /* xen_vcpu_setup managed to place the vcpu_info within the 674 percpu area for all cpus, so make use of it */ 675 if (have_vcpu_info_placement) { 676 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 677 678 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 679 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 680 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); 681 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); 682 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 683 } 684 } 685 686 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 687 unsigned long addr, unsigned len) 688 { 689 char *start, *end, *reloc; 690 unsigned ret; 691 692 start = end = reloc = NULL; 693 694 #define SITE(op, x) \ 695 case PARAVIRT_PATCH(op.x): \ 696 if (have_vcpu_info_placement) { \ 697 start = (char *)xen_##x##_direct; \ 698 end = xen_##x##_direct_end; \ 699 reloc = xen_##x##_direct_reloc; \ 700 } \ 701 goto patch_site 702 703 switch (type) { 704 SITE(pv_irq_ops, irq_enable); 705 SITE(pv_irq_ops, irq_disable); 706 SITE(pv_irq_ops, save_fl); 707 SITE(pv_irq_ops, restore_fl); 708 #undef SITE 709 710 patch_site: 711 if (start == NULL || (end-start) > len) 712 goto default_patch; 713 714 ret = paravirt_patch_insns(insnbuf, len, start, end); 715 716 /* Note: because reloc is assigned from something that 717 appears to be an array, gcc assumes it's non-null, 718 but doesn't know its relationship with start and 719 end. */ 720 if (reloc > start && reloc < end) { 721 int reloc_off = reloc - start; 722 long *relocp = (long *)(insnbuf + reloc_off); 723 long delta = start - (char *)addr; 724 725 *relocp += delta; 726 } 727 break; 728 729 default_patch: 730 default: 731 ret = paravirt_patch_default(type, clobbers, insnbuf, 732 addr, len); 733 break; 734 } 735 736 return ret; 737 } 738 739 static const struct pv_info xen_info __initdata = { 740 .paravirt_enabled = 1, 741 .shared_kernel_pmd = 0, 742 743 .name = "Xen", 744 }; 745 746 static const struct pv_init_ops xen_init_ops __initdata = { 747 .patch = xen_patch, 748 749 .banner = xen_banner, 750 .memory_setup = xen_memory_setup, 751 .arch_setup = xen_arch_setup, 752 .post_allocator_init = xen_post_allocator_init, 753 }; 754 755 static const struct pv_time_ops xen_time_ops __initdata = { 756 .time_init = xen_time_init, 757 758 .set_wallclock = xen_set_wallclock, 759 .get_wallclock = xen_get_wallclock, 760 .get_tsc_khz = xen_tsc_khz, 761 .sched_clock = xen_sched_clock, 762 }; 763 764 static const struct pv_cpu_ops xen_cpu_ops __initdata = { 765 .cpuid = xen_cpuid, 766 767 .set_debugreg = xen_set_debugreg, 768 .get_debugreg = xen_get_debugreg, 769 770 .clts = xen_clts, 771 772 .read_cr0 = native_read_cr0, 773 .write_cr0 = xen_write_cr0, 774 775 .read_cr4 = native_read_cr4, 776 .read_cr4_safe = native_read_cr4_safe, 777 .write_cr4 = xen_write_cr4, 778 779 .wbinvd = native_wbinvd, 780 781 .read_msr = native_read_msr_safe, 782 .write_msr = xen_write_msr_safe, 783 .read_tsc = native_read_tsc, 784 .read_pmc = native_read_pmc, 785 786 .iret = xen_iret, 787 .irq_enable_sysexit = xen_sysexit, 788 #ifdef CONFIG_X86_64 789 .usergs_sysret32 = xen_sysret32, 790 .usergs_sysret64 = xen_sysret64, 791 #endif 792 793 .load_tr_desc = paravirt_nop, 794 .set_ldt = xen_set_ldt, 795 .load_gdt = xen_load_gdt, 796 .load_idt = xen_load_idt, 797 .load_tls = xen_load_tls, 798 #ifdef CONFIG_X86_64 799 .load_gs_index = xen_load_gs_index, 800 #endif 801 802 .alloc_ldt = xen_alloc_ldt, 803 .free_ldt = xen_free_ldt, 804 805 .store_gdt = native_store_gdt, 806 .store_idt = native_store_idt, 807 .store_tr = xen_store_tr, 808 809 .write_ldt_entry = xen_write_ldt_entry, 810 .write_gdt_entry = xen_write_gdt_entry, 811 .write_idt_entry = xen_write_idt_entry, 812 .load_sp0 = xen_load_sp0, 813 814 .set_iopl_mask = xen_set_iopl_mask, 815 .io_delay = xen_io_delay, 816 817 /* Xen takes care of %gs when switching to usermode for us */ 818 .swapgs = paravirt_nop, 819 820 .lazy_mode = { 821 .enter = paravirt_enter_lazy_cpu, 822 .leave = xen_leave_lazy, 823 }, 824 }; 825 826 static const struct pv_apic_ops xen_apic_ops __initdata = { 827 #ifdef CONFIG_X86_LOCAL_APIC 828 .setup_boot_clock = paravirt_nop, 829 .setup_secondary_clock = paravirt_nop, 830 .startup_ipi_hook = paravirt_nop, 831 #endif 832 }; 833 834 static void xen_reboot(int reason) 835 { 836 struct sched_shutdown r = { .reason = reason }; 837 838 #ifdef CONFIG_SMP 839 smp_send_stop(); 840 #endif 841 842 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 843 BUG(); 844 } 845 846 static void xen_restart(char *msg) 847 { 848 xen_reboot(SHUTDOWN_reboot); 849 } 850 851 static void xen_emergency_restart(void) 852 { 853 xen_reboot(SHUTDOWN_reboot); 854 } 855 856 static void xen_machine_halt(void) 857 { 858 xen_reboot(SHUTDOWN_poweroff); 859 } 860 861 static void xen_crash_shutdown(struct pt_regs *regs) 862 { 863 xen_reboot(SHUTDOWN_crash); 864 } 865 866 static const struct machine_ops __initdata xen_machine_ops = { 867 .restart = xen_restart, 868 .halt = xen_machine_halt, 869 .power_off = xen_machine_halt, 870 .shutdown = xen_machine_halt, 871 .crash_shutdown = xen_crash_shutdown, 872 .emergency_restart = xen_emergency_restart, 873 }; 874 875 876 /* First C function to be called on Xen boot */ 877 asmlinkage void __init xen_start_kernel(void) 878 { 879 pgd_t *pgd; 880 881 if (!xen_start_info) 882 return; 883 884 xen_domain_type = XEN_PV_DOMAIN; 885 886 BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0); 887 888 xen_setup_features(); 889 890 /* Install Xen paravirt ops */ 891 pv_info = xen_info; 892 pv_init_ops = xen_init_ops; 893 pv_time_ops = xen_time_ops; 894 pv_cpu_ops = xen_cpu_ops; 895 pv_apic_ops = xen_apic_ops; 896 pv_mmu_ops = xen_mmu_ops; 897 898 xen_init_irq_ops(); 899 900 #ifdef CONFIG_X86_LOCAL_APIC 901 /* 902 * set up the basic apic ops. 903 */ 904 set_xen_basic_apic_ops(); 905 #endif 906 907 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 908 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 909 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 910 } 911 912 machine_ops = xen_machine_ops; 913 914 #ifdef CONFIG_X86_64 915 /* 916 * Setup percpu state. We only need to do this for 64-bit 917 * because 32-bit already has %fs set properly. 918 */ 919 load_percpu_segment(0); 920 #endif 921 /* 922 * The only reliable way to retain the initial address of the 923 * percpu gdt_page is to remember it here, so we can go and 924 * mark it RW later, when the initial percpu area is freed. 925 */ 926 xen_initial_gdt = &per_cpu(gdt_page, 0); 927 928 xen_smp_init(); 929 930 /* Get mfn list */ 931 if (!xen_feature(XENFEAT_auto_translated_physmap)) 932 xen_build_dynamic_phys_to_machine(); 933 934 pgd = (pgd_t *)xen_start_info->pt_base; 935 936 /* Prevent unwanted bits from being set in PTEs. */ 937 __supported_pte_mask &= ~_PAGE_GLOBAL; 938 if (!xen_initial_domain()) 939 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 940 941 /* Don't do the full vcpu_info placement stuff until we have a 942 possible map and a non-dummy shared_info. */ 943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 944 945 local_irq_disable(); 946 early_boot_irqs_off(); 947 948 xen_raw_console_write("mapping kernel into physical memory\n"); 949 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 950 951 init_mm.pgd = pgd; 952 953 /* keep using Xen gdt for now; no urgent need to change it */ 954 955 pv_info.kernel_rpl = 1; 956 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 957 pv_info.kernel_rpl = 0; 958 959 /* set the limit of our address space */ 960 xen_reserve_top(); 961 962 #ifdef CONFIG_X86_32 963 /* set up basic CPUID stuff */ 964 cpu_detect(&new_cpu_data); 965 new_cpu_data.hard_math = 1; 966 new_cpu_data.x86_capability[0] = cpuid_edx(1); 967 #endif 968 969 /* Poke various useful things into boot_params */ 970 boot_params.hdr.type_of_loader = (9 << 4) | 0; 971 boot_params.hdr.ramdisk_image = xen_start_info->mod_start 972 ? __pa(xen_start_info->mod_start) : 0; 973 boot_params.hdr.ramdisk_size = xen_start_info->mod_len; 974 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); 975 976 if (!xen_initial_domain()) { 977 add_preferred_console("xenboot", 0, NULL); 978 add_preferred_console("tty", 0, NULL); 979 add_preferred_console("hvc", 0, NULL); 980 } 981 982 xen_raw_console_write("about to get started...\n"); 983 984 /* Start the world */ 985 #ifdef CONFIG_X86_32 986 i386_start_kernel(); 987 #else 988 x86_64_start_reservations((char *)__pa_symbol(&boot_params)); 989 #endif 990 } 991