xref: /linux/arch/x86/xen/smp.c (revision 4e73e0eb633f8a1b5cbf20e7f42c6dbfec1d1ca7)
1 /*
2  * Xen SMP support
3  *
4  * This file implements the Xen versions of smp_ops.  SMP under Xen is
5  * very straightforward.  Bringing a CPU up is simply a matter of
6  * loading its initial context and setting it running.
7  *
8  * IPIs are handled through the Xen event mechanism.
9  *
10  * Because virtual CPUs can be scheduled onto any real CPU, there's no
11  * useful topology information for the kernel to make use of.  As a
12  * result, all CPUs are treated as if they're single-core and
13  * single-threaded.
14  */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/smp.h>
18 
19 #include <asm/paravirt.h>
20 #include <asm/desc.h>
21 #include <asm/pgtable.h>
22 #include <asm/cpu.h>
23 
24 #include <xen/interface/xen.h>
25 #include <xen/interface/vcpu.h>
26 
27 #include <asm/xen/interface.h>
28 #include <asm/xen/hypercall.h>
29 
30 #include <xen/page.h>
31 #include <xen/events.h>
32 
33 #include "xen-ops.h"
34 #include "mmu.h"
35 
36 cpumask_var_t xen_cpu_initialized_map;
37 
38 static DEFINE_PER_CPU(int, resched_irq);
39 static DEFINE_PER_CPU(int, callfunc_irq);
40 static DEFINE_PER_CPU(int, callfuncsingle_irq);
41 static DEFINE_PER_CPU(int, debug_irq) = -1;
42 
43 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
45 
46 /*
47  * Reschedule call back. Nothing to do,
48  * all the work is done automatically when
49  * we return from the interrupt.
50  */
51 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52 {
53 	inc_irq_stat(irq_resched_count);
54 
55 	return IRQ_HANDLED;
56 }
57 
58 static __cpuinit void cpu_bringup(void)
59 {
60 	int cpu = smp_processor_id();
61 
62 	cpu_init();
63 	touch_softlockup_watchdog();
64 	preempt_disable();
65 
66 	xen_enable_sysenter();
67 	xen_enable_syscall();
68 
69 	cpu = smp_processor_id();
70 	smp_store_cpu_info(cpu);
71 	cpu_data(cpu).x86_max_cores = 1;
72 	set_cpu_sibling_map(cpu);
73 
74 	xen_setup_cpu_clockevents();
75 
76 	cpu_set(cpu, cpu_online_map);
77 	percpu_write(cpu_state, CPU_ONLINE);
78 	wmb();
79 
80 	/* We can take interrupts now: we're officially "up". */
81 	local_irq_enable();
82 
83 	wmb();			/* make sure everything is out */
84 }
85 
86 static __cpuinit void cpu_bringup_and_idle(void)
87 {
88 	cpu_bringup();
89 	cpu_idle();
90 }
91 
92 static int xen_smp_intr_init(unsigned int cpu)
93 {
94 	int rc;
95 	const char *resched_name, *callfunc_name, *debug_name;
96 
97 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
98 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
99 				    cpu,
100 				    xen_reschedule_interrupt,
101 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
102 				    resched_name,
103 				    NULL);
104 	if (rc < 0)
105 		goto fail;
106 	per_cpu(resched_irq, cpu) = rc;
107 
108 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
110 				    cpu,
111 				    xen_call_function_interrupt,
112 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
113 				    callfunc_name,
114 				    NULL);
115 	if (rc < 0)
116 		goto fail;
117 	per_cpu(callfunc_irq, cpu) = rc;
118 
119 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
121 				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
122 				     debug_name, NULL);
123 	if (rc < 0)
124 		goto fail;
125 	per_cpu(debug_irq, cpu) = rc;
126 
127 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
129 				    cpu,
130 				    xen_call_function_single_interrupt,
131 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
132 				    callfunc_name,
133 				    NULL);
134 	if (rc < 0)
135 		goto fail;
136 	per_cpu(callfuncsingle_irq, cpu) = rc;
137 
138 	return 0;
139 
140  fail:
141 	if (per_cpu(resched_irq, cpu) >= 0)
142 		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
143 	if (per_cpu(callfunc_irq, cpu) >= 0)
144 		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
145 	if (per_cpu(debug_irq, cpu) >= 0)
146 		unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
147 	if (per_cpu(callfuncsingle_irq, cpu) >= 0)
148 		unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
149 
150 	return rc;
151 }
152 
153 static void __init xen_fill_possible_map(void)
154 {
155 	int i, rc;
156 
157 	for (i = 0; i < nr_cpu_ids; i++) {
158 		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
159 		if (rc >= 0) {
160 			num_processors++;
161 			set_cpu_possible(i, true);
162 		}
163 	}
164 }
165 
166 static void __init xen_smp_prepare_boot_cpu(void)
167 {
168 	BUG_ON(smp_processor_id() != 0);
169 	native_smp_prepare_boot_cpu();
170 
171 	/* We've switched to the "real" per-cpu gdt, so make sure the
172 	   old memory can be recycled */
173 	make_lowmem_page_readwrite(xen_initial_gdt);
174 
175 	xen_setup_vcpu_info_placement();
176 }
177 
178 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
179 {
180 	unsigned cpu;
181 
182 	xen_init_lock_cpu(0);
183 
184 	smp_store_cpu_info(0);
185 	cpu_data(0).x86_max_cores = 1;
186 	set_cpu_sibling_map(0);
187 
188 	if (xen_smp_intr_init(0))
189 		BUG();
190 
191 	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
192 		panic("could not allocate xen_cpu_initialized_map\n");
193 
194 	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
195 
196 	/* Restrict the possible_map according to max_cpus. */
197 	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
198 		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
199 			continue;
200 		set_cpu_possible(cpu, false);
201 	}
202 
203 	for_each_possible_cpu (cpu) {
204 		struct task_struct *idle;
205 
206 		if (cpu == 0)
207 			continue;
208 
209 		idle = fork_idle(cpu);
210 		if (IS_ERR(idle))
211 			panic("failed fork for CPU %d", cpu);
212 
213 		set_cpu_present(cpu, true);
214 	}
215 }
216 
217 static __cpuinit int
218 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
219 {
220 	struct vcpu_guest_context *ctxt;
221 	struct desc_struct *gdt;
222 	unsigned long gdt_mfn;
223 
224 	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
225 		return 0;
226 
227 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
228 	if (ctxt == NULL)
229 		return -ENOMEM;
230 
231 	gdt = get_cpu_gdt_table(cpu);
232 
233 	ctxt->flags = VGCF_IN_KERNEL;
234 	ctxt->user_regs.ds = __USER_DS;
235 	ctxt->user_regs.es = __USER_DS;
236 	ctxt->user_regs.ss = __KERNEL_DS;
237 #ifdef CONFIG_X86_32
238 	ctxt->user_regs.fs = __KERNEL_PERCPU;
239 #else
240 	ctxt->gs_base_kernel = per_cpu_offset(cpu);
241 #endif
242 	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
243 	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
244 
245 	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
246 
247 	xen_copy_trap_info(ctxt->trap_ctxt);
248 
249 	ctxt->ldt_ents = 0;
250 
251 	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
252 
253 	gdt_mfn = arbitrary_virt_to_mfn(gdt);
254 	make_lowmem_page_readonly(gdt);
255 	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
256 
257 	ctxt->gdt_frames[0] = gdt_mfn;
258 	ctxt->gdt_ents      = GDT_ENTRIES;
259 
260 	ctxt->user_regs.cs = __KERNEL_CS;
261 	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
262 
263 	ctxt->kernel_ss = __KERNEL_DS;
264 	ctxt->kernel_sp = idle->thread.sp0;
265 
266 #ifdef CONFIG_X86_32
267 	ctxt->event_callback_cs     = __KERNEL_CS;
268 	ctxt->failsafe_callback_cs  = __KERNEL_CS;
269 #endif
270 	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
271 	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
272 
273 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
274 	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
275 
276 	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
277 		BUG();
278 
279 	kfree(ctxt);
280 	return 0;
281 }
282 
283 static int __cpuinit xen_cpu_up(unsigned int cpu)
284 {
285 	struct task_struct *idle = idle_task(cpu);
286 	int rc;
287 
288 	per_cpu(current_task, cpu) = idle;
289 #ifdef CONFIG_X86_32
290 	irq_ctx_init(cpu);
291 #else
292 	clear_tsk_thread_flag(idle, TIF_FORK);
293 	per_cpu(kernel_stack, cpu) =
294 		(unsigned long)task_stack_page(idle) -
295 		KERNEL_STACK_OFFSET + THREAD_SIZE;
296 #endif
297 	xen_setup_timer(cpu);
298 	xen_init_lock_cpu(cpu);
299 
300 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
301 
302 	/* make sure interrupts start blocked */
303 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
304 
305 	rc = cpu_initialize_context(cpu, idle);
306 	if (rc)
307 		return rc;
308 
309 	if (num_online_cpus() == 1)
310 		alternatives_smp_switch(1);
311 
312 	rc = xen_smp_intr_init(cpu);
313 	if (rc)
314 		return rc;
315 
316 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
317 	BUG_ON(rc);
318 
319 	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
320 		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
321 		barrier();
322 	}
323 
324 	return 0;
325 }
326 
327 static void xen_smp_cpus_done(unsigned int max_cpus)
328 {
329 }
330 
331 #ifdef CONFIG_HOTPLUG_CPU
332 static int xen_cpu_disable(void)
333 {
334 	unsigned int cpu = smp_processor_id();
335 	if (cpu == 0)
336 		return -EBUSY;
337 
338 	cpu_disable_common();
339 
340 	load_cr3(swapper_pg_dir);
341 	return 0;
342 }
343 
344 static void xen_cpu_die(unsigned int cpu)
345 {
346 	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
347 		current->state = TASK_UNINTERRUPTIBLE;
348 		schedule_timeout(HZ/10);
349 	}
350 	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
351 	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
352 	unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
353 	unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
354 	xen_uninit_lock_cpu(cpu);
355 	xen_teardown_timer(cpu);
356 
357 	if (num_online_cpus() == 1)
358 		alternatives_smp_switch(0);
359 }
360 
361 static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
362 {
363 	play_dead_common();
364 	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
365 	cpu_bringup();
366 }
367 
368 #else /* !CONFIG_HOTPLUG_CPU */
369 static int xen_cpu_disable(void)
370 {
371 	return -ENOSYS;
372 }
373 
374 static void xen_cpu_die(unsigned int cpu)
375 {
376 	BUG();
377 }
378 
379 static void xen_play_dead(void)
380 {
381 	BUG();
382 }
383 
384 #endif
385 static void stop_self(void *v)
386 {
387 	int cpu = smp_processor_id();
388 
389 	/* make sure we're not pinning something down */
390 	load_cr3(swapper_pg_dir);
391 	/* should set up a minimal gdt */
392 
393 	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
394 	BUG();
395 }
396 
397 static void xen_smp_send_stop(void)
398 {
399 	smp_call_function(stop_self, NULL, 0);
400 }
401 
402 static void xen_smp_send_reschedule(int cpu)
403 {
404 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
405 }
406 
407 static void xen_send_IPI_mask(const struct cpumask *mask,
408 			      enum ipi_vector vector)
409 {
410 	unsigned cpu;
411 
412 	for_each_cpu_and(cpu, mask, cpu_online_mask)
413 		xen_send_IPI_one(cpu, vector);
414 }
415 
416 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
417 {
418 	int cpu;
419 
420 	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
421 
422 	/* Make sure other vcpus get a chance to run if they need to. */
423 	for_each_cpu(cpu, mask) {
424 		if (xen_vcpu_stolen(cpu)) {
425 			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
426 			break;
427 		}
428 	}
429 }
430 
431 static void xen_smp_send_call_function_single_ipi(int cpu)
432 {
433 	xen_send_IPI_mask(cpumask_of(cpu),
434 			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
435 }
436 
437 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
438 {
439 	irq_enter();
440 	generic_smp_call_function_interrupt();
441 	inc_irq_stat(irq_call_count);
442 	irq_exit();
443 
444 	return IRQ_HANDLED;
445 }
446 
447 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
448 {
449 	irq_enter();
450 	generic_smp_call_function_single_interrupt();
451 	inc_irq_stat(irq_call_count);
452 	irq_exit();
453 
454 	return IRQ_HANDLED;
455 }
456 
457 static const struct smp_ops xen_smp_ops __initdata = {
458 	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
459 	.smp_prepare_cpus = xen_smp_prepare_cpus,
460 	.smp_cpus_done = xen_smp_cpus_done,
461 
462 	.cpu_up = xen_cpu_up,
463 	.cpu_die = xen_cpu_die,
464 	.cpu_disable = xen_cpu_disable,
465 	.play_dead = xen_play_dead,
466 
467 	.smp_send_stop = xen_smp_send_stop,
468 	.smp_send_reschedule = xen_smp_send_reschedule,
469 
470 	.send_call_func_ipi = xen_smp_send_call_function_ipi,
471 	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
472 };
473 
474 void __init xen_smp_init(void)
475 {
476 	smp_ops = xen_smp_ops;
477 	xen_fill_possible_map();
478 	xen_init_spinlocks();
479 }
480