xref: /linux/arch/s390/kernel/smp.c (revision f7a904dffe30a02636053d8022498ced7e44d31c)
1 /*
2  *  arch/s390/kernel/smp.c
3  *
4  *    Copyright IBM Corp. 1999, 2009
5  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *		 Heiko Carstens (heiko.carstens@de.ibm.com)
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * We work with logical cpu numbering everywhere we can. The only
14  * functions using the real cpu address (got from STAP) are the sigp
15  * functions. For all other functions we use the identity mapping.
16  * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17  * used e.g. to find the idle task belonging to a logical cpu. Every array
18  * in the kernel is sorted by the logical cpu number and not by the physical
19  * one which is causing all the confusion with __cpu_logical_map and
20  * cpu_number_map in other architectures.
21  */
22 
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/mm.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqflags.h>
36 #include <linux/cpu.h>
37 #include <linux/timex.h>
38 #include <linux/bootmem.h>
39 #include <asm/ipl.h>
40 #include <asm/setup.h>
41 #include <asm/sigp.h>
42 #include <asm/pgalloc.h>
43 #include <asm/irq.h>
44 #include <asm/s390_ext.h>
45 #include <asm/cpcmd.h>
46 #include <asm/tlbflush.h>
47 #include <asm/timer.h>
48 #include <asm/lowcore.h>
49 #include <asm/sclp.h>
50 #include <asm/cputime.h>
51 #include <asm/vdso.h>
52 #include <asm/cpu.h>
53 #include "entry.h"
54 
55 /* logical cpu to cpu address */
56 int __cpu_logical_map[NR_CPUS];
57 
58 static struct task_struct *current_set[NR_CPUS];
59 
60 static u8 smp_cpu_type;
61 static int smp_use_sigp_detection;
62 
63 enum s390_cpu_state {
64 	CPU_STATE_STANDBY,
65 	CPU_STATE_CONFIGURED,
66 };
67 
68 DEFINE_MUTEX(smp_cpu_state_mutex);
69 int smp_cpu_polarization[NR_CPUS];
70 static int smp_cpu_state[NR_CPUS];
71 static int cpu_management;
72 
73 static DEFINE_PER_CPU(struct cpu, cpu_devices);
74 
75 static void smp_ext_bitcall(int, ec_bit_sig);
76 
77 static int cpu_stopped(int cpu)
78 {
79 	__u32 status;
80 
81 	switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
82 	case sigp_status_stored:
83 		/* Check for stopped and check stop state */
84 		if (status & 0x50)
85 			return 1;
86 		break;
87 	default:
88 		break;
89 	}
90 	return 0;
91 }
92 
93 void smp_send_stop(void)
94 {
95 	int cpu, rc;
96 
97 	/* Disable all interrupts/machine checks */
98 	__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
99 	trace_hardirqs_off();
100 
101 	/* stop all processors */
102 	for_each_online_cpu(cpu) {
103 		if (cpu == smp_processor_id())
104 			continue;
105 		do {
106 			rc = signal_processor(cpu, sigp_stop);
107 		} while (rc == sigp_busy);
108 
109 		while (!cpu_stopped(cpu))
110 			cpu_relax();
111 	}
112 }
113 
114 /*
115  * This is the main routine where commands issued by other
116  * cpus are handled.
117  */
118 
119 static void do_ext_call_interrupt(__u16 code)
120 {
121 	unsigned long bits;
122 
123 	/*
124 	 * handle bit signal external calls
125 	 *
126 	 * For the ec_schedule signal we have to do nothing. All the work
127 	 * is done automatically when we return from the interrupt.
128 	 */
129 	bits = xchg(&S390_lowcore.ext_call_fast, 0);
130 
131 	if (test_bit(ec_call_function, &bits))
132 		generic_smp_call_function_interrupt();
133 
134 	if (test_bit(ec_call_function_single, &bits))
135 		generic_smp_call_function_single_interrupt();
136 }
137 
138 /*
139  * Send an external call sigp to another cpu and return without waiting
140  * for its completion.
141  */
142 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
143 {
144 	/*
145 	 * Set signaling bit in lowcore of target cpu and kick it
146 	 */
147 	set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
148 	while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
149 		udelay(10);
150 }
151 
152 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
153 {
154 	int cpu;
155 
156 	for_each_cpu(cpu, mask)
157 		smp_ext_bitcall(cpu, ec_call_function);
158 }
159 
160 void arch_send_call_function_single_ipi(int cpu)
161 {
162 	smp_ext_bitcall(cpu, ec_call_function_single);
163 }
164 
165 #ifndef CONFIG_64BIT
166 /*
167  * this function sends a 'purge tlb' signal to another CPU.
168  */
169 static void smp_ptlb_callback(void *info)
170 {
171 	__tlb_flush_local();
172 }
173 
174 void smp_ptlb_all(void)
175 {
176 	on_each_cpu(smp_ptlb_callback, NULL, 1);
177 }
178 EXPORT_SYMBOL(smp_ptlb_all);
179 #endif /* ! CONFIG_64BIT */
180 
181 /*
182  * this function sends a 'reschedule' IPI to another CPU.
183  * it goes straight through and wastes no time serializing
184  * anything. Worst case is that we lose a reschedule ...
185  */
186 void smp_send_reschedule(int cpu)
187 {
188 	smp_ext_bitcall(cpu, ec_schedule);
189 }
190 
191 /*
192  * parameter area for the set/clear control bit callbacks
193  */
194 struct ec_creg_mask_parms {
195 	unsigned long orvals[16];
196 	unsigned long andvals[16];
197 };
198 
199 /*
200  * callback for setting/clearing control bits
201  */
202 static void smp_ctl_bit_callback(void *info)
203 {
204 	struct ec_creg_mask_parms *pp = info;
205 	unsigned long cregs[16];
206 	int i;
207 
208 	__ctl_store(cregs, 0, 15);
209 	for (i = 0; i <= 15; i++)
210 		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
211 	__ctl_load(cregs, 0, 15);
212 }
213 
214 /*
215  * Set a bit in a control register of all cpus
216  */
217 void smp_ctl_set_bit(int cr, int bit)
218 {
219 	struct ec_creg_mask_parms parms;
220 
221 	memset(&parms.orvals, 0, sizeof(parms.orvals));
222 	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
223 	parms.orvals[cr] = 1 << bit;
224 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
225 }
226 EXPORT_SYMBOL(smp_ctl_set_bit);
227 
228 /*
229  * Clear a bit in a control register of all cpus
230  */
231 void smp_ctl_clear_bit(int cr, int bit)
232 {
233 	struct ec_creg_mask_parms parms;
234 
235 	memset(&parms.orvals, 0, sizeof(parms.orvals));
236 	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
237 	parms.andvals[cr] = ~(1L << bit);
238 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
239 }
240 EXPORT_SYMBOL(smp_ctl_clear_bit);
241 
242 /*
243  * In early ipl state a temp. logically cpu number is needed, so the sigp
244  * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
245  * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
246  */
247 #define CPU_INIT_NO	1
248 
249 #ifdef CONFIG_ZFCPDUMP
250 
251 /*
252  * zfcpdump_prefix_array holds prefix registers for the following scenario:
253  * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
254  * save its prefix registers, since they get lost, when switching from 31 bit
255  * to 64 bit.
256  */
257 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
258 	__attribute__((__section__(".data")));
259 
260 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
261 {
262 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
263 		return;
264 	if (cpu >= NR_CPUS) {
265 		pr_warning("CPU %i exceeds the maximum %i and is excluded from "
266 			   "the dump\n", cpu, NR_CPUS - 1);
267 		return;
268 	}
269 	zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
270 	__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
271 	while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
272 	       sigp_busy)
273 		cpu_relax();
274 	memcpy(zfcpdump_save_areas[cpu],
275 	       (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
276 	       SAVE_AREA_SIZE);
277 #ifdef CONFIG_64BIT
278 	/* copy original prefix register */
279 	zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
280 #endif
281 }
282 
283 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
284 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
285 
286 #else
287 
288 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
289 
290 #endif /* CONFIG_ZFCPDUMP */
291 
292 static int cpu_known(int cpu_id)
293 {
294 	int cpu;
295 
296 	for_each_present_cpu(cpu) {
297 		if (__cpu_logical_map[cpu] == cpu_id)
298 			return 1;
299 	}
300 	return 0;
301 }
302 
303 static int smp_rescan_cpus_sigp(cpumask_t avail)
304 {
305 	int cpu_id, logical_cpu;
306 
307 	logical_cpu = cpumask_first(&avail);
308 	if (logical_cpu >= nr_cpu_ids)
309 		return 0;
310 	for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
311 		if (cpu_known(cpu_id))
312 			continue;
313 		__cpu_logical_map[logical_cpu] = cpu_id;
314 		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
315 		if (!cpu_stopped(logical_cpu))
316 			continue;
317 		cpu_set(logical_cpu, cpu_present_map);
318 		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
319 		logical_cpu = cpumask_next(logical_cpu, &avail);
320 		if (logical_cpu >= nr_cpu_ids)
321 			break;
322 	}
323 	return 0;
324 }
325 
326 static int smp_rescan_cpus_sclp(cpumask_t avail)
327 {
328 	struct sclp_cpu_info *info;
329 	int cpu_id, logical_cpu, cpu;
330 	int rc;
331 
332 	logical_cpu = cpumask_first(&avail);
333 	if (logical_cpu >= nr_cpu_ids)
334 		return 0;
335 	info = kmalloc(sizeof(*info), GFP_KERNEL);
336 	if (!info)
337 		return -ENOMEM;
338 	rc = sclp_get_cpu_info(info);
339 	if (rc)
340 		goto out;
341 	for (cpu = 0; cpu < info->combined; cpu++) {
342 		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
343 			continue;
344 		cpu_id = info->cpu[cpu].address;
345 		if (cpu_known(cpu_id))
346 			continue;
347 		__cpu_logical_map[logical_cpu] = cpu_id;
348 		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
349 		cpu_set(logical_cpu, cpu_present_map);
350 		if (cpu >= info->configured)
351 			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
352 		else
353 			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
354 		logical_cpu = cpumask_next(logical_cpu, &avail);
355 		if (logical_cpu >= nr_cpu_ids)
356 			break;
357 	}
358 out:
359 	kfree(info);
360 	return rc;
361 }
362 
363 static int __smp_rescan_cpus(void)
364 {
365 	cpumask_t avail;
366 
367 	cpus_xor(avail, cpu_possible_map, cpu_present_map);
368 	if (smp_use_sigp_detection)
369 		return smp_rescan_cpus_sigp(avail);
370 	else
371 		return smp_rescan_cpus_sclp(avail);
372 }
373 
374 static void __init smp_detect_cpus(void)
375 {
376 	unsigned int cpu, c_cpus, s_cpus;
377 	struct sclp_cpu_info *info;
378 	u16 boot_cpu_addr, cpu_addr;
379 
380 	c_cpus = 1;
381 	s_cpus = 0;
382 	boot_cpu_addr = __cpu_logical_map[0];
383 	info = kmalloc(sizeof(*info), GFP_KERNEL);
384 	if (!info)
385 		panic("smp_detect_cpus failed to allocate memory\n");
386 	/* Use sigp detection algorithm if sclp doesn't work. */
387 	if (sclp_get_cpu_info(info)) {
388 		smp_use_sigp_detection = 1;
389 		for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
390 			if (cpu == boot_cpu_addr)
391 				continue;
392 			__cpu_logical_map[CPU_INIT_NO] = cpu;
393 			if (!cpu_stopped(CPU_INIT_NO))
394 				continue;
395 			smp_get_save_area(c_cpus, cpu);
396 			c_cpus++;
397 		}
398 		goto out;
399 	}
400 
401 	if (info->has_cpu_type) {
402 		for (cpu = 0; cpu < info->combined; cpu++) {
403 			if (info->cpu[cpu].address == boot_cpu_addr) {
404 				smp_cpu_type = info->cpu[cpu].type;
405 				break;
406 			}
407 		}
408 	}
409 
410 	for (cpu = 0; cpu < info->combined; cpu++) {
411 		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
412 			continue;
413 		cpu_addr = info->cpu[cpu].address;
414 		if (cpu_addr == boot_cpu_addr)
415 			continue;
416 		__cpu_logical_map[CPU_INIT_NO] = cpu_addr;
417 		if (!cpu_stopped(CPU_INIT_NO)) {
418 			s_cpus++;
419 			continue;
420 		}
421 		smp_get_save_area(c_cpus, cpu_addr);
422 		c_cpus++;
423 	}
424 out:
425 	kfree(info);
426 	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
427 	get_online_cpus();
428 	__smp_rescan_cpus();
429 	put_online_cpus();
430 }
431 
432 /*
433  *	Activate a secondary processor.
434  */
435 int __cpuinit start_secondary(void *cpuvoid)
436 {
437 	/* Setup the cpu */
438 	cpu_init();
439 	preempt_disable();
440 	/* Enable TOD clock interrupts on the secondary cpu. */
441 	init_cpu_timer();
442 	/* Enable cpu timer interrupts on the secondary cpu. */
443 	init_cpu_vtimer();
444 	/* Enable pfault pseudo page faults on this cpu. */
445 	pfault_init();
446 
447 	/* call cpu notifiers */
448 	notify_cpu_starting(smp_processor_id());
449 	/* Mark this cpu as online */
450 	ipi_call_lock();
451 	cpu_set(smp_processor_id(), cpu_online_map);
452 	ipi_call_unlock();
453 	/* Switch on interrupts */
454 	local_irq_enable();
455 	/* Print info about this processor */
456 	print_cpu_info();
457 	/* cpu_idle will call schedule for us */
458 	cpu_idle();
459 	return 0;
460 }
461 
462 static void __init smp_create_idle(unsigned int cpu)
463 {
464 	struct task_struct *p;
465 
466 	/*
467 	 *  don't care about the psw and regs settings since we'll never
468 	 *  reschedule the forked task.
469 	 */
470 	p = fork_idle(cpu);
471 	if (IS_ERR(p))
472 		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
473 	current_set[cpu] = p;
474 }
475 
476 static int __cpuinit smp_alloc_lowcore(int cpu)
477 {
478 	unsigned long async_stack, panic_stack;
479 	struct _lowcore *lowcore;
480 
481 	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
482 	if (!lowcore)
483 		return -ENOMEM;
484 	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
485 	panic_stack = __get_free_page(GFP_KERNEL);
486 	if (!panic_stack || !async_stack)
487 		goto out;
488 	memcpy(lowcore, &S390_lowcore, 512);
489 	memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
490 	lowcore->async_stack = async_stack + ASYNC_SIZE;
491 	lowcore->panic_stack = panic_stack + PAGE_SIZE;
492 
493 #ifndef CONFIG_64BIT
494 	if (MACHINE_HAS_IEEE) {
495 		unsigned long save_area;
496 
497 		save_area = get_zeroed_page(GFP_KERNEL);
498 		if (!save_area)
499 			goto out;
500 		lowcore->extended_save_area_addr = (u32) save_area;
501 	}
502 #else
503 	if (vdso_alloc_per_cpu(cpu, lowcore))
504 		goto out;
505 #endif
506 	lowcore_ptr[cpu] = lowcore;
507 	return 0;
508 
509 out:
510 	free_page(panic_stack);
511 	free_pages(async_stack, ASYNC_ORDER);
512 	free_pages((unsigned long) lowcore, LC_ORDER);
513 	return -ENOMEM;
514 }
515 
516 static void smp_free_lowcore(int cpu)
517 {
518 	struct _lowcore *lowcore;
519 
520 	lowcore = lowcore_ptr[cpu];
521 #ifndef CONFIG_64BIT
522 	if (MACHINE_HAS_IEEE)
523 		free_page((unsigned long) lowcore->extended_save_area_addr);
524 #else
525 	vdso_free_per_cpu(cpu, lowcore);
526 #endif
527 	free_page(lowcore->panic_stack - PAGE_SIZE);
528 	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
529 	free_pages((unsigned long) lowcore, LC_ORDER);
530 	lowcore_ptr[cpu] = NULL;
531 }
532 
533 /* Upping and downing of CPUs */
534 int __cpuinit __cpu_up(unsigned int cpu)
535 {
536 	struct task_struct *idle;
537 	struct _lowcore *cpu_lowcore;
538 	struct stack_frame *sf;
539 	sigp_ccode ccode;
540 	u32 lowcore;
541 
542 	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
543 		return -EIO;
544 	if (smp_alloc_lowcore(cpu))
545 		return -ENOMEM;
546 	do {
547 		ccode = signal_processor(cpu, sigp_initial_cpu_reset);
548 		if (ccode == sigp_busy)
549 			udelay(10);
550 		if (ccode == sigp_not_operational)
551 			goto err_out;
552 	} while (ccode == sigp_busy);
553 
554 	lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
555 	while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
556 		udelay(10);
557 
558 	idle = current_set[cpu];
559 	cpu_lowcore = lowcore_ptr[cpu];
560 	cpu_lowcore->kernel_stack = (unsigned long)
561 		task_stack_page(idle) + THREAD_SIZE;
562 	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
563 	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
564 				     - sizeof(struct pt_regs)
565 				     - sizeof(struct stack_frame));
566 	memset(sf, 0, sizeof(struct stack_frame));
567 	sf->gprs[9] = (unsigned long) sf;
568 	cpu_lowcore->save_area[15] = (unsigned long) sf;
569 	__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
570 	asm volatile(
571 		"	stam	0,15,0(%0)"
572 		: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
573 	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
574 	cpu_lowcore->current_task = (unsigned long) idle;
575 	cpu_lowcore->cpu_nr = cpu;
576 	cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
577 	cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
578 	cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
579 	eieio();
580 
581 	while (signal_processor(cpu, sigp_restart) == sigp_busy)
582 		udelay(10);
583 
584 	while (!cpu_online(cpu))
585 		cpu_relax();
586 	return 0;
587 
588 err_out:
589 	smp_free_lowcore(cpu);
590 	return -EIO;
591 }
592 
593 static int __init setup_possible_cpus(char *s)
594 {
595 	int pcpus, cpu;
596 
597 	pcpus = simple_strtoul(s, NULL, 0);
598 	init_cpu_possible(cpumask_of(0));
599 	for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
600 		set_cpu_possible(cpu, true);
601 	return 0;
602 }
603 early_param("possible_cpus", setup_possible_cpus);
604 
605 #ifdef CONFIG_HOTPLUG_CPU
606 
607 int __cpu_disable(void)
608 {
609 	struct ec_creg_mask_parms cr_parms;
610 	int cpu = smp_processor_id();
611 
612 	cpu_clear(cpu, cpu_online_map);
613 
614 	/* Disable pfault pseudo page faults on this cpu. */
615 	pfault_fini();
616 
617 	memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
618 	memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
619 
620 	/* disable all external interrupts */
621 	cr_parms.orvals[0] = 0;
622 	cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
623 				1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);
624 	/* disable all I/O interrupts */
625 	cr_parms.orvals[6] = 0;
626 	cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
627 				1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
628 	/* disable most machine checks */
629 	cr_parms.orvals[14] = 0;
630 	cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
631 				 1 << 25 | 1 << 24);
632 
633 	smp_ctl_bit_callback(&cr_parms);
634 
635 	return 0;
636 }
637 
638 void __cpu_die(unsigned int cpu)
639 {
640 	/* Wait until target cpu is down */
641 	while (!cpu_stopped(cpu))
642 		cpu_relax();
643 	while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy)
644 		udelay(10);
645 	smp_free_lowcore(cpu);
646 	pr_info("Processor %d stopped\n", cpu);
647 }
648 
649 void cpu_die(void)
650 {
651 	idle_task_exit();
652 	while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy)
653 		cpu_relax();
654 	for (;;);
655 }
656 
657 #endif /* CONFIG_HOTPLUG_CPU */
658 
659 void __init smp_prepare_cpus(unsigned int max_cpus)
660 {
661 #ifndef CONFIG_64BIT
662 	unsigned long save_area = 0;
663 #endif
664 	unsigned long async_stack, panic_stack;
665 	struct _lowcore *lowcore;
666 	unsigned int cpu;
667 
668 	smp_detect_cpus();
669 
670 	/* request the 0x1201 emergency signal external interrupt */
671 	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
672 		panic("Couldn't request external interrupt 0x1201");
673 	print_cpu_info();
674 
675 	/* Reallocate current lowcore, but keep its contents. */
676 	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
677 	panic_stack = __get_free_page(GFP_KERNEL);
678 	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
679 	BUG_ON(!lowcore || !panic_stack || !async_stack);
680 #ifndef CONFIG_64BIT
681 	if (MACHINE_HAS_IEEE)
682 		save_area = get_zeroed_page(GFP_KERNEL);
683 #endif
684 	local_irq_disable();
685 	local_mcck_disable();
686 	lowcore_ptr[smp_processor_id()] = lowcore;
687 	*lowcore = S390_lowcore;
688 	lowcore->panic_stack = panic_stack + PAGE_SIZE;
689 	lowcore->async_stack = async_stack + ASYNC_SIZE;
690 #ifndef CONFIG_64BIT
691 	if (MACHINE_HAS_IEEE)
692 		lowcore->extended_save_area_addr = (u32) save_area;
693 #endif
694 	set_prefix((u32)(unsigned long) lowcore);
695 	local_mcck_enable();
696 	local_irq_enable();
697 #ifdef CONFIG_64BIT
698 	if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
699 		BUG();
700 #endif
701 	for_each_possible_cpu(cpu)
702 		if (cpu != smp_processor_id())
703 			smp_create_idle(cpu);
704 }
705 
706 void __init smp_prepare_boot_cpu(void)
707 {
708 	BUG_ON(smp_processor_id() != 0);
709 
710 	current_thread_info()->cpu = 0;
711 	cpu_set(0, cpu_present_map);
712 	cpu_set(0, cpu_online_map);
713 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
714 	current_set[0] = current;
715 	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
716 	smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
717 }
718 
719 void __init smp_cpus_done(unsigned int max_cpus)
720 {
721 }
722 
723 void __init smp_setup_processor_id(void)
724 {
725 	S390_lowcore.cpu_nr = 0;
726 	__cpu_logical_map[0] = stap();
727 }
728 
729 /*
730  * the frequency of the profiling timer can be changed
731  * by writing a multiplier value into /proc/profile.
732  *
733  * usually you want to run this on all CPUs ;)
734  */
735 int setup_profiling_timer(unsigned int multiplier)
736 {
737 	return 0;
738 }
739 
740 #ifdef CONFIG_HOTPLUG_CPU
741 static ssize_t cpu_configure_show(struct sys_device *dev,
742 				struct sysdev_attribute *attr, char *buf)
743 {
744 	ssize_t count;
745 
746 	mutex_lock(&smp_cpu_state_mutex);
747 	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
748 	mutex_unlock(&smp_cpu_state_mutex);
749 	return count;
750 }
751 
752 static ssize_t cpu_configure_store(struct sys_device *dev,
753 				  struct sysdev_attribute *attr,
754 				  const char *buf, size_t count)
755 {
756 	int cpu = dev->id;
757 	int val, rc;
758 	char delim;
759 
760 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
761 		return -EINVAL;
762 	if (val != 0 && val != 1)
763 		return -EINVAL;
764 
765 	get_online_cpus();
766 	mutex_lock(&smp_cpu_state_mutex);
767 	rc = -EBUSY;
768 	if (cpu_online(cpu))
769 		goto out;
770 	rc = 0;
771 	switch (val) {
772 	case 0:
773 		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
774 			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
775 			if (!rc) {
776 				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
777 				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
778 			}
779 		}
780 		break;
781 	case 1:
782 		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
783 			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
784 			if (!rc) {
785 				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
786 				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
787 			}
788 		}
789 		break;
790 	default:
791 		break;
792 	}
793 out:
794 	mutex_unlock(&smp_cpu_state_mutex);
795 	put_online_cpus();
796 	return rc ? rc : count;
797 }
798 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
799 #endif /* CONFIG_HOTPLUG_CPU */
800 
801 static ssize_t cpu_polarization_show(struct sys_device *dev,
802 				     struct sysdev_attribute *attr, char *buf)
803 {
804 	int cpu = dev->id;
805 	ssize_t count;
806 
807 	mutex_lock(&smp_cpu_state_mutex);
808 	switch (smp_cpu_polarization[cpu]) {
809 	case POLARIZATION_HRZ:
810 		count = sprintf(buf, "horizontal\n");
811 		break;
812 	case POLARIZATION_VL:
813 		count = sprintf(buf, "vertical:low\n");
814 		break;
815 	case POLARIZATION_VM:
816 		count = sprintf(buf, "vertical:medium\n");
817 		break;
818 	case POLARIZATION_VH:
819 		count = sprintf(buf, "vertical:high\n");
820 		break;
821 	default:
822 		count = sprintf(buf, "unknown\n");
823 		break;
824 	}
825 	mutex_unlock(&smp_cpu_state_mutex);
826 	return count;
827 }
828 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
829 
830 static ssize_t show_cpu_address(struct sys_device *dev,
831 				struct sysdev_attribute *attr, char *buf)
832 {
833 	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
834 }
835 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
836 
837 
838 static struct attribute *cpu_common_attrs[] = {
839 #ifdef CONFIG_HOTPLUG_CPU
840 	&attr_configure.attr,
841 #endif
842 	&attr_address.attr,
843 	&attr_polarization.attr,
844 	NULL,
845 };
846 
847 static struct attribute_group cpu_common_attr_group = {
848 	.attrs = cpu_common_attrs,
849 };
850 
851 static ssize_t show_capability(struct sys_device *dev,
852 				struct sysdev_attribute *attr, char *buf)
853 {
854 	unsigned int capability;
855 	int rc;
856 
857 	rc = get_cpu_capability(&capability);
858 	if (rc)
859 		return rc;
860 	return sprintf(buf, "%u\n", capability);
861 }
862 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
863 
864 static ssize_t show_idle_count(struct sys_device *dev,
865 				struct sysdev_attribute *attr, char *buf)
866 {
867 	struct s390_idle_data *idle;
868 	unsigned long long idle_count;
869 	unsigned int sequence;
870 
871 	idle = &per_cpu(s390_idle, dev->id);
872 repeat:
873 	sequence = idle->sequence;
874 	smp_rmb();
875 	if (sequence & 1)
876 		goto repeat;
877 	idle_count = idle->idle_count;
878 	if (idle->idle_enter)
879 		idle_count++;
880 	smp_rmb();
881 	if (idle->sequence != sequence)
882 		goto repeat;
883 	return sprintf(buf, "%llu\n", idle_count);
884 }
885 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
886 
887 static ssize_t show_idle_time(struct sys_device *dev,
888 				struct sysdev_attribute *attr, char *buf)
889 {
890 	struct s390_idle_data *idle;
891 	unsigned long long now, idle_time, idle_enter;
892 	unsigned int sequence;
893 
894 	idle = &per_cpu(s390_idle, dev->id);
895 	now = get_clock();
896 repeat:
897 	sequence = idle->sequence;
898 	smp_rmb();
899 	if (sequence & 1)
900 		goto repeat;
901 	idle_time = idle->idle_time;
902 	idle_enter = idle->idle_enter;
903 	if (idle_enter != 0ULL && idle_enter < now)
904 		idle_time += now - idle_enter;
905 	smp_rmb();
906 	if (idle->sequence != sequence)
907 		goto repeat;
908 	return sprintf(buf, "%llu\n", idle_time >> 12);
909 }
910 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
911 
912 static struct attribute *cpu_online_attrs[] = {
913 	&attr_capability.attr,
914 	&attr_idle_count.attr,
915 	&attr_idle_time_us.attr,
916 	NULL,
917 };
918 
919 static struct attribute_group cpu_online_attr_group = {
920 	.attrs = cpu_online_attrs,
921 };
922 
923 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
924 				    unsigned long action, void *hcpu)
925 {
926 	unsigned int cpu = (unsigned int)(long)hcpu;
927 	struct cpu *c = &per_cpu(cpu_devices, cpu);
928 	struct sys_device *s = &c->sysdev;
929 	struct s390_idle_data *idle;
930 
931 	switch (action) {
932 	case CPU_ONLINE:
933 	case CPU_ONLINE_FROZEN:
934 		idle = &per_cpu(s390_idle, cpu);
935 		memset(idle, 0, sizeof(struct s390_idle_data));
936 		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
937 			return NOTIFY_BAD;
938 		break;
939 	case CPU_DEAD:
940 	case CPU_DEAD_FROZEN:
941 		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
942 		break;
943 	}
944 	return NOTIFY_OK;
945 }
946 
947 static struct notifier_block __cpuinitdata smp_cpu_nb = {
948 	.notifier_call = smp_cpu_notify,
949 };
950 
951 static int __devinit smp_add_present_cpu(int cpu)
952 {
953 	struct cpu *c = &per_cpu(cpu_devices, cpu);
954 	struct sys_device *s = &c->sysdev;
955 	int rc;
956 
957 	c->hotpluggable = 1;
958 	rc = register_cpu(c, cpu);
959 	if (rc)
960 		goto out;
961 	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
962 	if (rc)
963 		goto out_cpu;
964 	if (!cpu_online(cpu))
965 		goto out;
966 	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
967 	if (!rc)
968 		return 0;
969 	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
970 out_cpu:
971 #ifdef CONFIG_HOTPLUG_CPU
972 	unregister_cpu(c);
973 #endif
974 out:
975 	return rc;
976 }
977 
978 #ifdef CONFIG_HOTPLUG_CPU
979 
980 int __ref smp_rescan_cpus(void)
981 {
982 	cpumask_t newcpus;
983 	int cpu;
984 	int rc;
985 
986 	get_online_cpus();
987 	mutex_lock(&smp_cpu_state_mutex);
988 	newcpus = cpu_present_map;
989 	rc = __smp_rescan_cpus();
990 	if (rc)
991 		goto out;
992 	cpus_andnot(newcpus, cpu_present_map, newcpus);
993 	for_each_cpu_mask(cpu, newcpus) {
994 		rc = smp_add_present_cpu(cpu);
995 		if (rc)
996 			cpu_clear(cpu, cpu_present_map);
997 	}
998 	rc = 0;
999 out:
1000 	mutex_unlock(&smp_cpu_state_mutex);
1001 	put_online_cpus();
1002 	if (!cpus_empty(newcpus))
1003 		topology_schedule_update();
1004 	return rc;
1005 }
1006 
1007 static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
1008 				  size_t count)
1009 {
1010 	int rc;
1011 
1012 	rc = smp_rescan_cpus();
1013 	return rc ? rc : count;
1014 }
1015 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1016 #endif /* CONFIG_HOTPLUG_CPU */
1017 
1018 static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1019 {
1020 	ssize_t count;
1021 
1022 	mutex_lock(&smp_cpu_state_mutex);
1023 	count = sprintf(buf, "%d\n", cpu_management);
1024 	mutex_unlock(&smp_cpu_state_mutex);
1025 	return count;
1026 }
1027 
1028 static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
1029 				 size_t count)
1030 {
1031 	int val, rc;
1032 	char delim;
1033 
1034 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
1035 		return -EINVAL;
1036 	if (val != 0 && val != 1)
1037 		return -EINVAL;
1038 	rc = 0;
1039 	get_online_cpus();
1040 	mutex_lock(&smp_cpu_state_mutex);
1041 	if (cpu_management == val)
1042 		goto out;
1043 	rc = topology_set_cpu_management(val);
1044 	if (!rc)
1045 		cpu_management = val;
1046 out:
1047 	mutex_unlock(&smp_cpu_state_mutex);
1048 	put_online_cpus();
1049 	return rc ? rc : count;
1050 }
1051 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1052 			 dispatching_store);
1053 
1054 static int __init topology_init(void)
1055 {
1056 	int cpu;
1057 	int rc;
1058 
1059 	register_cpu_notifier(&smp_cpu_nb);
1060 
1061 #ifdef CONFIG_HOTPLUG_CPU
1062 	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1063 	if (rc)
1064 		return rc;
1065 #endif
1066 	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1067 	if (rc)
1068 		return rc;
1069 	for_each_present_cpu(cpu) {
1070 		rc = smp_add_present_cpu(cpu);
1071 		if (rc)
1072 			return rc;
1073 	}
1074 	return 0;
1075 }
1076 subsys_initcall(topology_init);
1077