xref: /linux/arch/s390/include/asm/smp.h (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /*
2  *  include/asm-s390/smp.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *               Heiko Carstens (heiko.carstens@de.ibm.com)
9  */
10 #ifndef __ASM_SMP_H
11 #define __ASM_SMP_H
12 
13 #include <linux/threads.h>
14 #include <linux/cpumask.h>
15 #include <linux/bitops.h>
16 
17 #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
18 
19 #include <asm/lowcore.h>
20 #include <asm/sigp.h>
21 #include <asm/ptrace.h>
22 #include <asm/system.h>
23 
24 /*
25   s390 specific smp.c headers
26  */
27 typedef struct
28 {
29 	int        intresting;
30 	sigp_ccode ccode;
31 	__u32      status;
32 	__u16      cpu;
33 } sigp_info;
34 
35 extern void machine_restart_smp(char *);
36 extern void machine_halt_smp(void);
37 extern void machine_power_off_smp(void);
38 
39 #define NO_PROC_ID		0xFF		/* No processor magic marker */
40 
41 /*
42  *	This magic constant controls our willingness to transfer
43  *	a process across CPUs. Such a transfer incurs misses on the L1
44  *	cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
45  *	gut feeling is this will vary by board in value. For a board
46  *	with separate L2 cache it probably depends also on the RSS, and
47  *	for a board with shared L2 cache it ought to decay fast as other
48  *	processes are run.
49  */
50 
51 #define PROC_CHANGE_PENALTY	20		/* Schedule penalty */
52 
53 #define raw_smp_processor_id()	(S390_lowcore.cpu_data.cpu_nr)
54 
55 static inline __u16 hard_smp_processor_id(void)
56 {
57 	return stap();
58 }
59 
60 /*
61  * returns 1 if cpu is in stopped/check stopped state or not operational
62  * returns 0 otherwise
63  */
64 static inline int
65 smp_cpu_not_running(int cpu)
66 {
67 	__u32 status;
68 
69 	switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
70 	case sigp_order_code_accepted:
71 	case sigp_status_stored:
72 		/* Check for stopped and check stop state */
73 		if (status & 0x50)
74 			return 1;
75 		break;
76 	case sigp_not_operational:
77 		return 1;
78 	default:
79 		break;
80 	}
81 	return 0;
82 }
83 
84 #define cpu_logical_map(cpu) (cpu)
85 
86 extern int __cpu_disable (void);
87 extern void __cpu_die (unsigned int cpu);
88 extern void cpu_die (void) __attribute__ ((noreturn));
89 extern int __cpu_up (unsigned int cpu);
90 
91 extern struct mutex smp_cpu_state_mutex;
92 extern int smp_cpu_polarization[];
93 
94 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
95 	void *info, int wait);
96 #endif
97 
98 #ifndef CONFIG_SMP
99 static inline void smp_send_stop(void)
100 {
101 	/* Disable all interrupts/machine checks */
102 	__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
103 }
104 
105 #define hard_smp_processor_id()		0
106 #define smp_cpu_not_running(cpu)	1
107 #endif
108 
109 #ifdef CONFIG_HOTPLUG_CPU
110 extern int smp_rescan_cpus(void);
111 #else
112 static inline int smp_rescan_cpus(void) { return 0; }
113 #endif
114 
115 extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
116 #endif
117