xref: /linux/arch/sh/include/asm/smp.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef __ASM_SH_SMP_H
2 #define __ASM_SH_SMP_H
3 
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <asm/smp-ops.h>
7 
8 #ifdef CONFIG_SMP
9 
10 #include <linux/spinlock.h>
11 #include <linux/atomic.h>
12 #include <asm/current.h>
13 #include <asm/percpu.h>
14 
15 #define raw_smp_processor_id()	(current_thread_info()->cpu)
16 
17 /* Map from cpu id to sequential logical cpu number. */
18 extern int __cpu_number_map[NR_CPUS];
19 #define cpu_number_map(cpu)  __cpu_number_map[cpu]
20 
21 /* The reverse map from sequential logical cpu number to cpu id.  */
22 extern int __cpu_logical_map[NR_CPUS];
23 #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
24 
25 enum {
26 	SMP_MSG_FUNCTION,
27 	SMP_MSG_RESCHEDULE,
28 	SMP_MSG_FUNCTION_SINGLE,
29 	SMP_MSG_TIMER,
30 
31 	SMP_MSG_NR,	/* must be last */
32 };
33 
34 DECLARE_PER_CPU(int, cpu_state);
35 
36 void smp_message_recv(unsigned int msg);
37 void smp_timer_broadcast(const struct cpumask *mask);
38 
39 void local_timer_interrupt(void);
40 void local_timer_setup(unsigned int cpu);
41 void local_timer_stop(unsigned int cpu);
42 
43 void arch_send_call_function_single_ipi(int cpu);
44 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
45 
46 void native_play_dead(void);
47 void native_cpu_die(unsigned int cpu);
48 int native_cpu_disable(unsigned int cpu);
49 
50 #ifdef CONFIG_HOTPLUG_CPU
51 void play_dead_common(void);
52 extern int __cpu_disable(void);
53 
54 static inline void __cpu_die(unsigned int cpu)
55 {
56 	extern struct plat_smp_ops *mp_ops;     /* private */
57 
58 	mp_ops->cpu_die(cpu);
59 }
60 #endif
61 
62 static inline int hard_smp_processor_id(void)
63 {
64 	extern struct plat_smp_ops *mp_ops;	/* private */
65 
66 	if (!mp_ops)
67 		return 0;	/* boot CPU */
68 
69 	return mp_ops->smp_processor_id();
70 }
71 
72 #else
73 
74 #define hard_smp_processor_id()	(0)
75 
76 #endif /* CONFIG_SMP */
77 
78 #endif /* __ASM_SH_SMP_H */
79