smp_pv.c (97d052ea3fa853b9aabcc4baca1a605cb1188611) | smp_pv.c (a13f2ef168cb2a033a284eb841bcc481ffbc90cf) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Xen SMP support 4 * 5 * This file implements the Xen versions of smp_ops. SMP under Xen is 6 * very straightforward. Bringing a CPU up is simply a matter of 7 * loading its initial context and setting it running. 8 * --- 15 unchanged lines hidden (view full) --- 24#include <linux/cpuhotplug.h> 25#include <linux/stackprotector.h> 26#include <linux/pgtable.h> 27 28#include <asm/paravirt.h> 29#include <asm/idtentry.h> 30#include <asm/desc.h> 31#include <asm/cpu.h> | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Xen SMP support 4 * 5 * This file implements the Xen versions of smp_ops. SMP under Xen is 6 * very straightforward. Bringing a CPU up is simply a matter of 7 * loading its initial context and setting it running. 8 * --- 15 unchanged lines hidden (view full) --- 24#include <linux/cpuhotplug.h> 25#include <linux/stackprotector.h> 26#include <linux/pgtable.h> 27 28#include <asm/paravirt.h> 29#include <asm/idtentry.h> 30#include <asm/desc.h> 31#include <asm/cpu.h> |
32#include <asm/io_apic.h> | |
33 34#include <xen/interface/xen.h> 35#include <xen/interface/vcpu.h> 36#include <xen/interface/xenpmu.h> 37 38#include <asm/spec-ctrl.h> 39#include <asm/xen/interface.h> 40#include <asm/xen/hypercall.h> --- 47 unchanged lines hidden (view full) --- 88 89 /* We can take interrupts now: we're officially "up". */ 90 local_irq_enable(); 91} 92 93asmlinkage __visible void cpu_bringup_and_idle(void) 94{ 95 cpu_bringup(); | 32 33#include <xen/interface/xen.h> 34#include <xen/interface/vcpu.h> 35#include <xen/interface/xenpmu.h> 36 37#include <asm/spec-ctrl.h> 38#include <asm/xen/interface.h> 39#include <asm/xen/hypercall.h> --- 47 unchanged lines hidden (view full) --- 87 88 /* We can take interrupts now: we're officially "up". */ 89 local_irq_enable(); 90} 91 92asmlinkage __visible void cpu_bringup_and_idle(void) 93{ 94 cpu_bringup(); |
95 boot_init_stack_canary(); |
|
96 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); | 96 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
97 prevent_tail_call_optimization(); |
|
97} 98 99void xen_smp_intr_free_pv(unsigned int cpu) 100{ 101 if (per_cpu(xen_irq_work, cpu).irq >= 0) { 102 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); 103 per_cpu(xen_irq_work, cpu).irq = -1; 104 kfree(per_cpu(xen_irq_work, cpu).name); --- 101 unchanged lines hidden (view full) --- 206 BUG_ON(smp_processor_id() != 0); 207 native_smp_prepare_boot_cpu(); 208 209 if (!xen_feature(XENFEAT_writable_page_tables)) 210 /* We've switched to the "real" per-cpu gdt, so make 211 * sure the old memory can be recycled. */ 212 make_lowmem_page_readwrite(xen_initial_gdt); 213 | 98} 99 100void xen_smp_intr_free_pv(unsigned int cpu) 101{ 102 if (per_cpu(xen_irq_work, cpu).irq >= 0) { 103 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); 104 per_cpu(xen_irq_work, cpu).irq = -1; 105 kfree(per_cpu(xen_irq_work, cpu).name); --- 101 unchanged lines hidden (view full) --- 207 BUG_ON(smp_processor_id() != 0); 208 native_smp_prepare_boot_cpu(); 209 210 if (!xen_feature(XENFEAT_writable_page_tables)) 211 /* We've switched to the "real" per-cpu gdt, so make 212 * sure the old memory can be recycled. */ 213 make_lowmem_page_readwrite(xen_initial_gdt); 214 |
214#ifdef CONFIG_X86_32 215 /* 216 * Xen starts us with XEN_FLAT_RING1_DS, but linux code 217 * expects __USER_DS 218 */ 219 loadsegment(ds, __USER_DS); 220 loadsegment(es, __USER_DS); 221#endif 222 | |
223 xen_filter_cpu_maps(); 224 xen_setup_vcpu_info_placement(); 225 226 /* 227 * The alternative logic (which patches the unlock/lock) runs before 228 * the smp bootup up code is activated. Hence we need to set this up 229 * the core kernel is being patched. Otherwise we will have only 230 * modules patched but not core code. --- 64 unchanged lines hidden (view full) --- 295 return 0; 296 297 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 298 if (ctxt == NULL) 299 return -ENOMEM; 300 301 gdt = get_cpu_gdt_rw(cpu); 302 | 215 xen_filter_cpu_maps(); 216 xen_setup_vcpu_info_placement(); 217 218 /* 219 * The alternative logic (which patches the unlock/lock) runs before 220 * the smp bootup up code is activated. Hence we need to set this up 221 * the core kernel is being patched. Otherwise we will have only 222 * modules patched but not core code. --- 64 unchanged lines hidden (view full) --- 287 return 0; 288 289 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 290 if (ctxt == NULL) 291 return -ENOMEM; 292 293 gdt = get_cpu_gdt_rw(cpu); 294 |
303#ifdef CONFIG_X86_32 304 ctxt->user_regs.fs = __KERNEL_PERCPU; 305 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 306#endif | |
307 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 308 309 /* 310 * Bring up the CPU in cpu_bringup_and_idle() with the stack 311 * pointing just below where pt_regs would be if it were a normal 312 * kernel entry. 313 */ 314 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; --- 21 unchanged lines hidden (view full) --- 336 /* 337 * Set SS:SP that Xen will use when entering guest kernel mode 338 * from guest user mode. Subsequent calls to load_sp0() can 339 * change this value. 340 */ 341 ctxt->kernel_ss = __KERNEL_DS; 342 ctxt->kernel_sp = task_top_of_stack(idle); 343 | 295 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 296 297 /* 298 * Bring up the CPU in cpu_bringup_and_idle() with the stack 299 * pointing just below where pt_regs would be if it were a normal 300 * kernel entry. 301 */ 302 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; --- 21 unchanged lines hidden (view full) --- 324 /* 325 * Set SS:SP that Xen will use when entering guest kernel mode 326 * from guest user mode. Subsequent calls to load_sp0() can 327 * change this value. 328 */ 329 ctxt->kernel_ss = __KERNEL_DS; 330 ctxt->kernel_sp = task_top_of_stack(idle); 331 |
344#ifdef CONFIG_X86_32 345 ctxt->event_callback_cs = __KERNEL_CS; 346 ctxt->failsafe_callback_cs = __KERNEL_CS; 347#else | |
348 ctxt->gs_base_kernel = per_cpu_offset(cpu); | 332 ctxt->gs_base_kernel = per_cpu_offset(cpu); |
349#endif | |
350 ctxt->event_callback_eip = 351 (unsigned long)xen_asm_exc_xen_hypervisor_callback; 352 ctxt->failsafe_callback_eip = 353 (unsigned long)xen_failsafe_callback; 354 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 355 356 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 357 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) --- 156 unchanged lines hidden --- | 333 ctxt->event_callback_eip = 334 (unsigned long)xen_asm_exc_xen_hypervisor_callback; 335 ctxt->failsafe_callback_eip = 336 (unsigned long)xen_failsafe_callback; 337 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 338 339 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 340 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) --- 156 unchanged lines hidden --- |