1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/spinlock.h> 26 #include <linux/threads.h> 27 #include <linux/module.h> 28 #include <linux/time.h> 29 #include <linux/timex.h> 30 #include <linux/sched.h> 31 #include <linux/cpumask.h> 32 #include <linux/cpu.h> 33 #include <linux/err.h> 34 35 #include <asm/atomic.h> 36 #include <asm/cpu.h> 37 #include <asm/processor.h> 38 #include <asm/system.h> 39 #include <asm/mmu_context.h> 40 #include <asm/smp.h> 41 42 #ifdef CONFIG_MIPS_MT_SMTC 43 #include <asm/mipsmtregs.h> 44 #endif /* CONFIG_MIPS_MT_SMTC */ 45 46 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 48 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 50 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 51 52 EXPORT_SYMBOL(phys_cpu_present_map); 53 EXPORT_SYMBOL(cpu_online_map); 54 55 extern void __init calibrate_delay(void); 56 extern void cpu_idle(void); 57 58 /* 59 * First C code run on the secondary CPUs after being started up by 60 * the master. 61 */ 62 asmlinkage __cpuinit void start_secondary(void) 63 { 64 unsigned int cpu; 65 66 #ifdef CONFIG_MIPS_MT_SMTC 67 /* Only do cpu_probe for first TC of CPU */ 68 if ((read_c0_tcbind() & TCBIND_CURTC) == 0) 69 #endif /* CONFIG_MIPS_MT_SMTC */ 70 cpu_probe(); 71 cpu_report(); 72 per_cpu_trap_init(); 73 prom_init_secondary(); 74 75 /* 76 * XXX parity protection should be folded in here when it's converted 77 * to an option instead of something based on .cputype 78 */ 79 80 calibrate_delay(); 81 preempt_disable(); 82 cpu = smp_processor_id(); 83 cpu_data[cpu].udelay_val = loops_per_jiffy; 84 85 prom_smp_finish(); 86 87 cpu_set(cpu, cpu_callin_map); 88 89 cpu_idle(); 90 } 91 92 DEFINE_SPINLOCK(smp_call_lock); 93 94 struct call_data_struct *call_data; 95 96 /* 97 * Run a function on all other CPUs. 98 * <func> The function to run. This must be fast and non-blocking. 99 * <info> An arbitrary pointer to pass to the function. 100 * <retry> If true, keep retrying until ready. 101 * <wait> If true, wait until function has completed on other CPUs. 102 * [RETURNS] 0 on success, else a negative status code. 103 * 104 * Does not return until remote CPUs are nearly ready to execute <func> 105 * or are or have executed. 106 * 107 * You must not call this function with disabled interrupts or from a 108 * hardware interrupt handler or from a bottom half handler: 109 * 110 * CPU A CPU B 111 * Disable interrupts 112 * smp_call_function() 113 * Take call_lock 114 * Send IPIs 115 * Wait for all cpus to acknowledge IPI 116 * CPU A has not responded, spin waiting 117 * for cpu A to respond, holding call_lock 118 * smp_call_function() 119 * Spin waiting for call_lock 120 * Deadlock Deadlock 121 */ 122 int smp_call_function (void (*func) (void *info), void *info, int retry, 123 int wait) 124 { 125 struct call_data_struct data; 126 int i, cpus = num_online_cpus() - 1; 127 int cpu = smp_processor_id(); 128 129 /* 130 * Can die spectacularly if this CPU isn't yet marked online 131 */ 132 BUG_ON(!cpu_online(cpu)); 133 134 if (!cpus) 135 return 0; 136 137 /* Can deadlock when called with interrupts disabled */ 138 WARN_ON(irqs_disabled()); 139 140 data.func = func; 141 data.info = info; 142 atomic_set(&data.started, 0); 143 data.wait = wait; 144 if (wait) 145 atomic_set(&data.finished, 0); 146 147 spin_lock(&smp_call_lock); 148 call_data = &data; 149 smp_mb(); 150 151 /* Send a message to all other CPUs and wait for them to respond */ 152 for_each_online_cpu(i) 153 if (i != cpu) 154 core_send_ipi(i, SMP_CALL_FUNCTION); 155 156 /* Wait for response */ 157 /* FIXME: lock-up detection, backtrace on lock-up */ 158 while (atomic_read(&data.started) != cpus) 159 barrier(); 160 161 if (wait) 162 while (atomic_read(&data.finished) != cpus) 163 barrier(); 164 call_data = NULL; 165 spin_unlock(&smp_call_lock); 166 167 return 0; 168 } 169 170 171 void smp_call_function_interrupt(void) 172 { 173 void (*func) (void *info) = call_data->func; 174 void *info = call_data->info; 175 int wait = call_data->wait; 176 177 /* 178 * Notify initiating CPU that I've grabbed the data and am 179 * about to execute the function. 180 */ 181 smp_mb(); 182 atomic_inc(&call_data->started); 183 184 /* 185 * At this point the info structure may be out of scope unless wait==1. 186 */ 187 irq_enter(); 188 (*func)(info); 189 irq_exit(); 190 191 if (wait) { 192 smp_mb(); 193 atomic_inc(&call_data->finished); 194 } 195 } 196 197 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 198 int retry, int wait) 199 { 200 struct call_data_struct data; 201 int me; 202 203 /* 204 * Can die spectacularly if this CPU isn't yet marked online 205 */ 206 if (!cpu_online(cpu)) 207 return 0; 208 209 me = get_cpu(); 210 BUG_ON(!cpu_online(me)); 211 212 if (cpu == me) { 213 local_irq_disable(); 214 func(info); 215 local_irq_enable(); 216 put_cpu(); 217 return 0; 218 } 219 220 /* Can deadlock when called with interrupts disabled */ 221 WARN_ON(irqs_disabled()); 222 223 data.func = func; 224 data.info = info; 225 atomic_set(&data.started, 0); 226 data.wait = wait; 227 if (wait) 228 atomic_set(&data.finished, 0); 229 230 spin_lock(&smp_call_lock); 231 call_data = &data; 232 smp_mb(); 233 234 /* Send a message to the other CPU */ 235 core_send_ipi(cpu, SMP_CALL_FUNCTION); 236 237 /* Wait for response */ 238 /* FIXME: lock-up detection, backtrace on lock-up */ 239 while (atomic_read(&data.started) != 1) 240 barrier(); 241 242 if (wait) 243 while (atomic_read(&data.finished) != 1) 244 barrier(); 245 call_data = NULL; 246 spin_unlock(&smp_call_lock); 247 248 put_cpu(); 249 return 0; 250 } 251 252 static void stop_this_cpu(void *dummy) 253 { 254 /* 255 * Remove this CPU: 256 */ 257 cpu_clear(smp_processor_id(), cpu_online_map); 258 local_irq_enable(); /* May need to service _machine_restart IPI */ 259 for (;;); /* Wait if available. */ 260 } 261 262 void smp_send_stop(void) 263 { 264 smp_call_function(stop_this_cpu, NULL, 1, 0); 265 } 266 267 void __init smp_cpus_done(unsigned int max_cpus) 268 { 269 prom_cpus_done(); 270 } 271 272 /* called from main before smp_init() */ 273 void __init smp_prepare_cpus(unsigned int max_cpus) 274 { 275 init_new_context(current, &init_mm); 276 current_thread_info()->cpu = 0; 277 plat_prepare_cpus(max_cpus); 278 #ifndef CONFIG_HOTPLUG_CPU 279 cpu_present_map = cpu_possible_map; 280 #endif 281 } 282 283 /* preload SMP state for boot cpu */ 284 void __devinit smp_prepare_boot_cpu(void) 285 { 286 /* 287 * This assumes that bootup is always handled by the processor 288 * with the logic and physical number 0. 289 */ 290 __cpu_number_map[0] = 0; 291 __cpu_logical_map[0] = 0; 292 cpu_set(0, phys_cpu_present_map); 293 cpu_set(0, cpu_online_map); 294 cpu_set(0, cpu_callin_map); 295 } 296 297 /* 298 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 299 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 300 * physical, not logical. 301 */ 302 int __cpuinit __cpu_up(unsigned int cpu) 303 { 304 struct task_struct *idle; 305 306 /* 307 * Processor goes to start_secondary(), sets online flag 308 * The following code is purely to make sure 309 * Linux can schedule processes on this slave. 310 */ 311 idle = fork_idle(cpu); 312 if (IS_ERR(idle)) 313 panic(KERN_ERR "Fork failed for CPU %d", cpu); 314 315 prom_boot_secondary(cpu, idle); 316 317 /* 318 * Trust is futile. We should really have timeouts ... 319 */ 320 while (!cpu_isset(cpu, cpu_callin_map)) 321 udelay(100); 322 323 cpu_set(cpu, cpu_online_map); 324 325 return 0; 326 } 327 328 /* Not really SMP stuff ... */ 329 int setup_profiling_timer(unsigned int multiplier) 330 { 331 return 0; 332 } 333 334 static void flush_tlb_all_ipi(void *info) 335 { 336 local_flush_tlb_all(); 337 } 338 339 void flush_tlb_all(void) 340 { 341 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); 342 } 343 344 static void flush_tlb_mm_ipi(void *mm) 345 { 346 local_flush_tlb_mm((struct mm_struct *)mm); 347 } 348 349 /* 350 * Special Variant of smp_call_function for use by TLB functions: 351 * 352 * o No return value 353 * o collapses to normal function call on UP kernels 354 * o collapses to normal function call on systems with a single shared 355 * primary cache. 356 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 357 */ 358 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 359 { 360 #ifndef CONFIG_MIPS_MT_SMTC 361 smp_call_function(func, info, 1, 1); 362 #endif 363 } 364 365 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 366 { 367 preempt_disable(); 368 369 smp_on_other_tlbs(func, info); 370 func(info); 371 372 preempt_enable(); 373 } 374 375 /* 376 * The following tlb flush calls are invoked when old translations are 377 * being torn down, or pte attributes are changing. For single threaded 378 * address spaces, a new context is obtained on the current cpu, and tlb 379 * context on other cpus are invalidated to force a new context allocation 380 * at switch_mm time, should the mm ever be used on other cpus. For 381 * multithreaded address spaces, intercpu interrupts have to be sent. 382 * Another case where intercpu interrupts are required is when the target 383 * mm might be active on another cpu (eg debuggers doing the flushes on 384 * behalf of debugees, kswapd stealing pages from another process etc). 385 * Kanoj 07/00. 386 */ 387 388 void flush_tlb_mm(struct mm_struct *mm) 389 { 390 preempt_disable(); 391 392 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 393 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); 394 } else { 395 int i; 396 for (i = 0; i < num_online_cpus(); i++) 397 if (smp_processor_id() != i) 398 cpu_context(i, mm) = 0; 399 } 400 local_flush_tlb_mm(mm); 401 402 preempt_enable(); 403 } 404 405 struct flush_tlb_data { 406 struct vm_area_struct *vma; 407 unsigned long addr1; 408 unsigned long addr2; 409 }; 410 411 static void flush_tlb_range_ipi(void *info) 412 { 413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 414 415 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 416 } 417 418 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 419 { 420 struct mm_struct *mm = vma->vm_mm; 421 422 preempt_disable(); 423 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 424 struct flush_tlb_data fd; 425 426 fd.vma = vma; 427 fd.addr1 = start; 428 fd.addr2 = end; 429 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); 430 } else { 431 int i; 432 for (i = 0; i < num_online_cpus(); i++) 433 if (smp_processor_id() != i) 434 cpu_context(i, mm) = 0; 435 } 436 local_flush_tlb_range(vma, start, end); 437 preempt_enable(); 438 } 439 440 static void flush_tlb_kernel_range_ipi(void *info) 441 { 442 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 443 444 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 445 } 446 447 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 448 { 449 struct flush_tlb_data fd; 450 451 fd.addr1 = start; 452 fd.addr2 = end; 453 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); 454 } 455 456 static void flush_tlb_page_ipi(void *info) 457 { 458 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 459 460 local_flush_tlb_page(fd->vma, fd->addr1); 461 } 462 463 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 464 { 465 preempt_disable(); 466 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 467 struct flush_tlb_data fd; 468 469 fd.vma = vma; 470 fd.addr1 = page; 471 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); 472 } else { 473 int i; 474 for (i = 0; i < num_online_cpus(); i++) 475 if (smp_processor_id() != i) 476 cpu_context(i, vma->vm_mm) = 0; 477 } 478 local_flush_tlb_page(vma, page); 479 preempt_enable(); 480 } 481 482 static void flush_tlb_one_ipi(void *info) 483 { 484 unsigned long vaddr = (unsigned long) info; 485 486 local_flush_tlb_one(vaddr); 487 } 488 489 void flush_tlb_one(unsigned long vaddr) 490 { 491 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 492 } 493 494 EXPORT_SYMBOL(flush_tlb_page); 495 EXPORT_SYMBOL(flush_tlb_one); 496