1 /* smp.c: Sparc SMP support. 2 * 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) 6 */ 7 8 #include <asm/head.h> 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/threads.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/init.h> 17 #include <linux/spinlock.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/cpu.h> 24 25 #include <asm/ptrace.h> 26 #include <linux/atomic.h> 27 28 #include <asm/irq.h> 29 #include <asm/page.h> 30 #include <asm/pgalloc.h> 31 #include <asm/pgtable.h> 32 #include <asm/oplib.h> 33 #include <asm/cacheflush.h> 34 #include <asm/tlbflush.h> 35 #include <asm/cpudata.h> 36 #include <asm/timer.h> 37 #include <asm/leon.h> 38 39 #include "kernel.h" 40 #include "irq.h" 41 42 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; 43 44 cpumask_t smp_commenced_mask = CPU_MASK_NONE; 45 46 const struct sparc32_ipi_ops *sparc32_ipi_ops; 47 48 /* The only guaranteed locking primitive available on all Sparc 49 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically 50 * places the current byte at the effective address into dest_reg and 51 * places 0xff there afterwards. Pretty lame locking primitive 52 * compared to the Alpha and the Intel no? Most Sparcs have 'swap' 53 * instruction which is much better... 54 */ 55 56 void smp_store_cpu_info(int id) 57 { 58 int cpu_node; 59 int mid; 60 61 cpu_data(id).udelay_val = loops_per_jiffy; 62 63 cpu_find_by_mid(id, &cpu_node); 64 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 65 "clock-frequency", 0); 66 cpu_data(id).prom_node = cpu_node; 67 mid = cpu_get_hwmid(cpu_node); 68 69 if (mid < 0) { 70 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); 71 mid = 0; 72 } 73 cpu_data(id).mid = mid; 74 } 75 76 void __init smp_cpus_done(unsigned int max_cpus) 77 { 78 extern void smp4m_smp_done(void); 79 extern void smp4d_smp_done(void); 80 unsigned long bogosum = 0; 81 int cpu, num = 0; 82 83 for_each_online_cpu(cpu) { 84 num++; 85 bogosum += cpu_data(cpu).udelay_val; 86 } 87 88 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 89 num, bogosum/(500000/HZ), 90 (bogosum/(5000/HZ))%100); 91 92 switch(sparc_cpu_model) { 93 case sun4m: 94 smp4m_smp_done(); 95 break; 96 case sun4d: 97 smp4d_smp_done(); 98 break; 99 case sparc_leon: 100 leon_smp_done(); 101 break; 102 case sun4e: 103 printk("SUN4E\n"); 104 BUG(); 105 break; 106 case sun4u: 107 printk("SUN4U\n"); 108 BUG(); 109 break; 110 default: 111 printk("UNKNOWN!\n"); 112 BUG(); 113 break; 114 } 115 } 116 117 void cpu_panic(void) 118 { 119 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 120 panic("SMP bolixed\n"); 121 } 122 123 struct linux_prom_registers smp_penguin_ctable = { 0 }; 124 125 void smp_send_reschedule(int cpu) 126 { 127 /* 128 * CPU model dependent way of implementing IPI generation targeting 129 * a single CPU. The trap handler needs only to do trap entry/return 130 * to call schedule. 131 */ 132 sparc32_ipi_ops->resched(cpu); 133 } 134 135 void smp_send_stop(void) 136 { 137 } 138 139 void arch_send_call_function_single_ipi(int cpu) 140 { 141 /* trigger one IPI single call on one CPU */ 142 sparc32_ipi_ops->single(cpu); 143 } 144 145 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 146 { 147 int cpu; 148 149 /* trigger IPI mask call on each CPU */ 150 for_each_cpu(cpu, mask) 151 sparc32_ipi_ops->mask_one(cpu); 152 } 153 154 void smp_resched_interrupt(void) 155 { 156 irq_enter(); 157 scheduler_ipi(); 158 local_cpu_data().irq_resched_count++; 159 irq_exit(); 160 /* re-schedule routine called by interrupt return code. */ 161 } 162 163 void smp_call_function_single_interrupt(void) 164 { 165 irq_enter(); 166 generic_smp_call_function_single_interrupt(); 167 local_cpu_data().irq_call_count++; 168 irq_exit(); 169 } 170 171 void smp_call_function_interrupt(void) 172 { 173 irq_enter(); 174 generic_smp_call_function_interrupt(); 175 local_cpu_data().irq_call_count++; 176 irq_exit(); 177 } 178 179 int setup_profiling_timer(unsigned int multiplier) 180 { 181 return -EINVAL; 182 } 183 184 void __init smp_prepare_cpus(unsigned int max_cpus) 185 { 186 extern void __init smp4m_boot_cpus(void); 187 extern void __init smp4d_boot_cpus(void); 188 int i, cpuid, extra; 189 190 printk("Entering SMP Mode...\n"); 191 192 extra = 0; 193 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { 194 if (cpuid >= NR_CPUS) 195 extra++; 196 } 197 /* i = number of cpus */ 198 if (extra && max_cpus > i - extra) 199 printk("Warning: NR_CPUS is too low to start all cpus\n"); 200 201 smp_store_cpu_info(boot_cpu_id); 202 203 switch(sparc_cpu_model) { 204 case sun4m: 205 smp4m_boot_cpus(); 206 break; 207 case sun4d: 208 smp4d_boot_cpus(); 209 break; 210 case sparc_leon: 211 leon_boot_cpus(); 212 break; 213 case sun4e: 214 printk("SUN4E\n"); 215 BUG(); 216 break; 217 case sun4u: 218 printk("SUN4U\n"); 219 BUG(); 220 break; 221 default: 222 printk("UNKNOWN!\n"); 223 BUG(); 224 break; 225 } 226 } 227 228 /* Set this up early so that things like the scheduler can init 229 * properly. We use the same cpu mask for both the present and 230 * possible cpu map. 231 */ 232 void __init smp_setup_cpu_possible_map(void) 233 { 234 int instance, mid; 235 236 instance = 0; 237 while (!cpu_find_by_instance(instance, NULL, &mid)) { 238 if (mid < NR_CPUS) { 239 set_cpu_possible(mid, true); 240 set_cpu_present(mid, true); 241 } 242 instance++; 243 } 244 } 245 246 void __init smp_prepare_boot_cpu(void) 247 { 248 int cpuid = hard_smp_processor_id(); 249 250 if (cpuid >= NR_CPUS) { 251 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 252 prom_halt(); 253 } 254 if (cpuid != 0) 255 printk("boot cpu id != 0, this could work but is untested\n"); 256 257 current_thread_info()->cpu = cpuid; 258 set_cpu_online(cpuid, true); 259 set_cpu_possible(cpuid, true); 260 } 261 262 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 263 { 264 extern int smp4m_boot_one_cpu(int, struct task_struct *); 265 extern int smp4d_boot_one_cpu(int, struct task_struct *); 266 int ret=0; 267 268 switch(sparc_cpu_model) { 269 case sun4m: 270 ret = smp4m_boot_one_cpu(cpu, tidle); 271 break; 272 case sun4d: 273 ret = smp4d_boot_one_cpu(cpu, tidle); 274 break; 275 case sparc_leon: 276 ret = leon_boot_one_cpu(cpu, tidle); 277 break; 278 case sun4e: 279 printk("SUN4E\n"); 280 BUG(); 281 break; 282 case sun4u: 283 printk("SUN4U\n"); 284 BUG(); 285 break; 286 default: 287 printk("UNKNOWN!\n"); 288 BUG(); 289 break; 290 } 291 292 if (!ret) { 293 cpumask_set_cpu(cpu, &smp_commenced_mask); 294 while (!cpu_online(cpu)) 295 mb(); 296 } 297 return ret; 298 } 299 300 void arch_cpu_pre_starting(void *arg) 301 { 302 local_ops->cache_all(); 303 local_ops->tlb_all(); 304 305 switch(sparc_cpu_model) { 306 case sun4m: 307 sun4m_cpu_pre_starting(arg); 308 break; 309 case sun4d: 310 sun4d_cpu_pre_starting(arg); 311 break; 312 case sparc_leon: 313 leon_cpu_pre_starting(arg); 314 break; 315 default: 316 BUG(); 317 } 318 } 319 320 void arch_cpu_pre_online(void *arg) 321 { 322 unsigned int cpuid = hard_smp_processor_id(); 323 324 register_percpu_ce(cpuid); 325 326 calibrate_delay(); 327 smp_store_cpu_info(cpuid); 328 329 local_ops->cache_all(); 330 local_ops->tlb_all(); 331 332 switch(sparc_cpu_model) { 333 case sun4m: 334 sun4m_cpu_pre_online(arg); 335 break; 336 case sun4d: 337 sun4d_cpu_pre_online(arg); 338 break; 339 case sparc_leon: 340 leon_cpu_pre_online(arg); 341 break; 342 default: 343 BUG(); 344 } 345 } 346 347 void sparc_start_secondary(void *arg) 348 { 349 unsigned int cpu; 350 351 /* 352 * SMP booting is extremely fragile in some architectures. So run 353 * the cpu initialization code first before anything else. 354 */ 355 arch_cpu_pre_starting(arg); 356 357 preempt_disable(); 358 cpu = smp_processor_id(); 359 360 /* Invoke the CPU_STARTING notifier callbacks */ 361 notify_cpu_starting(cpu); 362 363 arch_cpu_pre_online(arg); 364 365 /* Set the CPU in the cpu_online_mask */ 366 set_cpu_online(cpu, true); 367 368 /* Enable local interrupts now */ 369 local_irq_enable(); 370 371 wmb(); 372 cpu_startup_entry(CPUHP_ONLINE); 373 374 /* We should never reach here! */ 375 BUG(); 376 } 377 378 void smp_callin(void) 379 { 380 sparc_start_secondary(NULL); 381 } 382 383 void smp_bogo(struct seq_file *m) 384 { 385 int i; 386 387 for_each_online_cpu(i) { 388 seq_printf(m, 389 "Cpu%dBogo\t: %lu.%02lu\n", 390 i, 391 cpu_data(i).udelay_val/(500000/HZ), 392 (cpu_data(i).udelay_val/(5000/HZ))%100); 393 } 394 } 395 396 void smp_info(struct seq_file *m) 397 { 398 int i; 399 400 seq_printf(m, "State:\n"); 401 for_each_online_cpu(i) 402 seq_printf(m, "CPU%d\t\t: online\n", i); 403 } 404