1 #ifndef _ASM_X86_SMP_H 2 #define _ASM_X86_SMP_H 3 #ifndef __ASSEMBLY__ 4 #include <linux/cpumask.h> 5 #include <linux/init.h> 6 #include <asm/percpu.h> 7 8 /* 9 * We need the APIC definitions automatically as part of 'smp.h' 10 */ 11 #ifdef CONFIG_X86_LOCAL_APIC 12 # include <asm/mpspec.h> 13 # include <asm/apic.h> 14 # ifdef CONFIG_X86_IO_APIC 15 # include <asm/io_apic.h> 16 # endif 17 #endif 18 #include <asm/pda.h> 19 #include <asm/thread_info.h> 20 21 #ifdef CONFIG_X86_64 22 23 extern cpumask_var_t cpu_callin_mask; 24 extern cpumask_var_t cpu_callout_mask; 25 extern cpumask_var_t cpu_initialized_mask; 26 extern cpumask_var_t cpu_sibling_setup_mask; 27 28 #else /* CONFIG_X86_32 */ 29 30 extern cpumask_t cpu_callin_map; 31 extern cpumask_t cpu_callout_map; 32 extern cpumask_t cpu_initialized; 33 extern cpumask_t cpu_sibling_setup_map; 34 35 #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) 36 #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) 37 #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) 38 #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) 39 40 #endif /* CONFIG_X86_32 */ 41 42 extern void (*mtrr_hook)(void); 43 extern void zap_low_mappings(void); 44 45 extern int __cpuinit get_local_pda(int cpu); 46 47 extern int smp_num_siblings; 48 extern unsigned int num_processors; 49 50 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 51 DECLARE_PER_CPU(cpumask_t, cpu_core_map); 52 DECLARE_PER_CPU(u16, cpu_llc_id); 53 #ifdef CONFIG_X86_32 54 DECLARE_PER_CPU(int, cpu_number); 55 #endif 56 57 static inline struct cpumask *cpu_sibling_mask(int cpu) 58 { 59 return &per_cpu(cpu_sibling_map, cpu); 60 } 61 62 static inline struct cpumask *cpu_core_mask(int cpu) 63 { 64 return &per_cpu(cpu_core_map, cpu); 65 } 66 67 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 68 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 69 70 /* Static state in head.S used to set up a CPU */ 71 extern struct { 72 void *sp; 73 unsigned short ss; 74 } stack_start; 75 76 struct smp_ops { 77 void (*smp_prepare_boot_cpu)(void); 78 void (*smp_prepare_cpus)(unsigned max_cpus); 79 void (*smp_cpus_done)(unsigned max_cpus); 80 81 void (*smp_send_stop)(void); 82 void (*smp_send_reschedule)(int cpu); 83 84 int (*cpu_up)(unsigned cpu); 85 int (*cpu_disable)(void); 86 void (*cpu_die)(unsigned int cpu); 87 void (*play_dead)(void); 88 89 void (*send_call_func_ipi)(const struct cpumask *mask); 90 void (*send_call_func_single_ipi)(int cpu); 91 }; 92 93 /* Globals due to paravirt */ 94 extern void set_cpu_sibling_map(int cpu); 95 96 #ifdef CONFIG_SMP 97 #ifndef CONFIG_PARAVIRT 98 #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) 99 #endif 100 extern struct smp_ops smp_ops; 101 102 static inline void smp_send_stop(void) 103 { 104 smp_ops.smp_send_stop(); 105 } 106 107 static inline void smp_prepare_boot_cpu(void) 108 { 109 smp_ops.smp_prepare_boot_cpu(); 110 } 111 112 static inline void smp_prepare_cpus(unsigned int max_cpus) 113 { 114 smp_ops.smp_prepare_cpus(max_cpus); 115 } 116 117 static inline void smp_cpus_done(unsigned int max_cpus) 118 { 119 smp_ops.smp_cpus_done(max_cpus); 120 } 121 122 static inline int __cpu_up(unsigned int cpu) 123 { 124 return smp_ops.cpu_up(cpu); 125 } 126 127 static inline int __cpu_disable(void) 128 { 129 return smp_ops.cpu_disable(); 130 } 131 132 static inline void __cpu_die(unsigned int cpu) 133 { 134 smp_ops.cpu_die(cpu); 135 } 136 137 static inline void play_dead(void) 138 { 139 smp_ops.play_dead(); 140 } 141 142 static inline void smp_send_reschedule(int cpu) 143 { 144 smp_ops.smp_send_reschedule(cpu); 145 } 146 147 static inline void arch_send_call_function_single_ipi(int cpu) 148 { 149 smp_ops.send_call_func_single_ipi(cpu); 150 } 151 152 static inline void arch_send_call_function_ipi(cpumask_t mask) 153 { 154 smp_ops.send_call_func_ipi(&mask); 155 } 156 157 void cpu_disable_common(void); 158 void native_smp_prepare_boot_cpu(void); 159 void native_smp_prepare_cpus(unsigned int max_cpus); 160 void native_smp_cpus_done(unsigned int max_cpus); 161 int native_cpu_up(unsigned int cpunum); 162 int native_cpu_disable(void); 163 void native_cpu_die(unsigned int cpu); 164 void native_play_dead(void); 165 void play_dead_common(void); 166 167 void native_send_call_func_ipi(const struct cpumask *mask); 168 void native_send_call_func_single_ipi(int cpu); 169 170 extern void prefill_possible_map(void); 171 172 void smp_store_cpu_info(int id); 173 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 174 175 /* We don't mark CPUs online until __cpu_up(), so we need another measure */ 176 static inline int num_booting_cpus(void) 177 { 178 return cpumask_weight(cpu_callout_mask); 179 } 180 #else 181 static inline void prefill_possible_map(void) 182 { 183 } 184 #endif /* CONFIG_SMP */ 185 186 extern unsigned disabled_cpus __cpuinitdata; 187 188 #ifdef CONFIG_X86_32_SMP 189 /* 190 * This function is needed by all SMP systems. It must _always_ be valid 191 * from the initial startup. We map APIC_BASE very early in page_setup(), 192 * so this is correct in the x86 case. 193 */ 194 #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 195 extern int safe_smp_processor_id(void); 196 197 #elif defined(CONFIG_X86_64_SMP) 198 #define raw_smp_processor_id() read_pda(cpunumber) 199 200 #define stack_smp_processor_id() \ 201 ({ \ 202 struct thread_info *ti; \ 203 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ 204 ti->cpu; \ 205 }) 206 #define safe_smp_processor_id() smp_processor_id() 207 208 #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ 209 #define cpu_physical_id(cpu) boot_cpu_physical_apicid 210 #define safe_smp_processor_id() 0 211 #define stack_smp_processor_id() 0 212 #endif 213 214 #ifdef CONFIG_X86_LOCAL_APIC 215 216 #ifndef CONFIG_X86_64 217 static inline int logical_smp_processor_id(void) 218 { 219 /* we don't want to mark this access volatile - bad code generation */ 220 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 221 } 222 223 #include <mach_apicdef.h> 224 static inline unsigned int read_apic_id(void) 225 { 226 unsigned int reg; 227 228 reg = *(u32 *)(APIC_BASE + APIC_ID); 229 230 return GET_APIC_ID(reg); 231 } 232 #endif 233 234 235 # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) 236 extern int hard_smp_processor_id(void); 237 # else 238 #include <mach_apicdef.h> 239 static inline int hard_smp_processor_id(void) 240 { 241 /* we don't want to mark this access volatile - bad code generation */ 242 return read_apic_id(); 243 } 244 # endif /* APIC_DEFINITION */ 245 246 #else /* CONFIG_X86_LOCAL_APIC */ 247 248 # ifndef CONFIG_SMP 249 # define hard_smp_processor_id() 0 250 # endif 251 252 #endif /* CONFIG_X86_LOCAL_APIC */ 253 254 #ifdef CONFIG_X86_HAS_BOOT_CPU_ID 255 extern unsigned char boot_cpu_id; 256 #else 257 #define boot_cpu_id 0 258 #endif 259 260 #endif /* __ASSEMBLY__ */ 261 #endif /* _ASM_X86_SMP_H */ 262