1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2000, 2001 Kanoj Sarcar 17 * Copyright (C) 2000, 2001 Ralf Baechle 18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation 20 */ 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/spinlock.h> 26 #include <linux/threads.h> 27 #include <linux/module.h> 28 #include <linux/time.h> 29 #include <linux/timex.h> 30 #include <linux/sched.h> 31 #include <linux/cpumask.h> 32 #include <linux/cpu.h> 33 #include <linux/err.h> 34 35 #include <asm/atomic.h> 36 #include <asm/cpu.h> 37 #include <asm/processor.h> 38 #include <asm/system.h> 39 #include <asm/mmu_context.h> 40 #include <asm/time.h> 41 42 #ifdef CONFIG_MIPS_MT_SMTC 43 #include <asm/mipsmtregs.h> 44 #endif /* CONFIG_MIPS_MT_SMTC */ 45 46 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 48 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 50 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 51 52 EXPORT_SYMBOL(phys_cpu_present_map); 53 EXPORT_SYMBOL(cpu_online_map); 54 55 extern void __init calibrate_delay(void); 56 extern void cpu_idle(void); 57 58 /* Number of TCs (or siblings in Intel speak) per CPU core */ 59 int smp_num_siblings = 1; 60 EXPORT_SYMBOL(smp_num_siblings); 61 62 /* representing the TCs (or siblings in Intel speak) of each logical CPU */ 63 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 64 EXPORT_SYMBOL(cpu_sibling_map); 65 66 /* representing cpus for which sibling maps can be computed */ 67 static cpumask_t cpu_sibling_setup_map; 68 69 static inline void set_cpu_sibling_map(int cpu) 70 { 71 int i; 72 73 cpu_set(cpu, cpu_sibling_setup_map); 74 75 if (smp_num_siblings > 1) { 76 for_each_cpu_mask(i, cpu_sibling_setup_map) { 77 if (cpu_data[cpu].core == cpu_data[i].core) { 78 cpu_set(i, cpu_sibling_map[cpu]); 79 cpu_set(cpu, cpu_sibling_map[i]); 80 } 81 } 82 } else 83 cpu_set(cpu, cpu_sibling_map[cpu]); 84 } 85 86 struct plat_smp_ops *mp_ops; 87 88 __cpuinit void register_smp_ops(struct plat_smp_ops *ops) 89 { 90 if (ops) 91 printk(KERN_WARNING "Overriding previous set SMP ops\n"); 92 93 mp_ops = ops; 94 } 95 96 /* 97 * First C code run on the secondary CPUs after being started up by 98 * the master. 99 */ 100 asmlinkage __cpuinit void start_secondary(void) 101 { 102 unsigned int cpu; 103 104 #ifdef CONFIG_MIPS_MT_SMTC 105 /* Only do cpu_probe for first TC of CPU */ 106 if ((read_c0_tcbind() & TCBIND_CURTC) == 0) 107 #endif /* CONFIG_MIPS_MT_SMTC */ 108 cpu_probe(); 109 cpu_report(); 110 per_cpu_trap_init(); 111 mips_clockevent_init(); 112 mp_ops->init_secondary(); 113 114 /* 115 * XXX parity protection should be folded in here when it's converted 116 * to an option instead of something based on .cputype 117 */ 118 119 calibrate_delay(); 120 preempt_disable(); 121 cpu = smp_processor_id(); 122 cpu_data[cpu].udelay_val = loops_per_jiffy; 123 124 mp_ops->smp_finish(); 125 set_cpu_sibling_map(cpu); 126 127 cpu_set(cpu, cpu_callin_map); 128 129 cpu_idle(); 130 } 131 132 DEFINE_SPINLOCK(smp_call_lock); 133 134 struct call_data_struct *call_data; 135 136 /* 137 * Run a function on all other CPUs. 138 * 139 * <mask> cpuset_t of all processors to run the function on. 140 * <func> The function to run. This must be fast and non-blocking. 141 * <info> An arbitrary pointer to pass to the function. 142 * <retry> If true, keep retrying until ready. 143 * <wait> If true, wait until function has completed on other CPUs. 144 * [RETURNS] 0 on success, else a negative status code. 145 * 146 * Does not return until remote CPUs are nearly ready to execute <func> 147 * or are or have executed. 148 * 149 * You must not call this function with disabled interrupts or from a 150 * hardware interrupt handler or from a bottom half handler: 151 * 152 * CPU A CPU B 153 * Disable interrupts 154 * smp_call_function() 155 * Take call_lock 156 * Send IPIs 157 * Wait for all cpus to acknowledge IPI 158 * CPU A has not responded, spin waiting 159 * for cpu A to respond, holding call_lock 160 * smp_call_function() 161 * Spin waiting for call_lock 162 * Deadlock Deadlock 163 */ 164 int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), 165 void *info, int retry, int wait) 166 { 167 struct call_data_struct data; 168 int cpu = smp_processor_id(); 169 int cpus; 170 171 /* 172 * Can die spectacularly if this CPU isn't yet marked online 173 */ 174 BUG_ON(!cpu_online(cpu)); 175 176 cpu_clear(cpu, mask); 177 cpus = cpus_weight(mask); 178 if (!cpus) 179 return 0; 180 181 /* Can deadlock when called with interrupts disabled */ 182 WARN_ON(irqs_disabled()); 183 184 data.func = func; 185 data.info = info; 186 atomic_set(&data.started, 0); 187 data.wait = wait; 188 if (wait) 189 atomic_set(&data.finished, 0); 190 191 spin_lock(&smp_call_lock); 192 call_data = &data; 193 smp_mb(); 194 195 /* Send a message to all other CPUs and wait for them to respond */ 196 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 197 198 /* Wait for response */ 199 /* FIXME: lock-up detection, backtrace on lock-up */ 200 while (atomic_read(&data.started) != cpus) 201 barrier(); 202 203 if (wait) 204 while (atomic_read(&data.finished) != cpus) 205 barrier(); 206 call_data = NULL; 207 spin_unlock(&smp_call_lock); 208 209 return 0; 210 } 211 212 int smp_call_function(void (*func) (void *info), void *info, int retry, 213 int wait) 214 { 215 return smp_call_function_mask(cpu_online_map, func, info, retry, wait); 216 } 217 218 void smp_call_function_interrupt(void) 219 { 220 void (*func) (void *info) = call_data->func; 221 void *info = call_data->info; 222 int wait = call_data->wait; 223 224 /* 225 * Notify initiating CPU that I've grabbed the data and am 226 * about to execute the function. 227 */ 228 smp_mb(); 229 atomic_inc(&call_data->started); 230 231 /* 232 * At this point the info structure may be out of scope unless wait==1. 233 */ 234 irq_enter(); 235 (*func)(info); 236 irq_exit(); 237 238 if (wait) { 239 smp_mb(); 240 atomic_inc(&call_data->finished); 241 } 242 } 243 244 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 245 int retry, int wait) 246 { 247 int ret, me; 248 249 /* 250 * Can die spectacularly if this CPU isn't yet marked online 251 */ 252 if (!cpu_online(cpu)) 253 return 0; 254 255 me = get_cpu(); 256 BUG_ON(!cpu_online(me)); 257 258 if (cpu == me) { 259 local_irq_disable(); 260 func(info); 261 local_irq_enable(); 262 put_cpu(); 263 return 0; 264 } 265 266 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, 267 wait); 268 269 put_cpu(); 270 return 0; 271 } 272 273 static void stop_this_cpu(void *dummy) 274 { 275 /* 276 * Remove this CPU: 277 */ 278 cpu_clear(smp_processor_id(), cpu_online_map); 279 local_irq_enable(); /* May need to service _machine_restart IPI */ 280 for (;;); /* Wait if available. */ 281 } 282 283 void smp_send_stop(void) 284 { 285 smp_call_function(stop_this_cpu, NULL, 1, 0); 286 } 287 288 void __init smp_cpus_done(unsigned int max_cpus) 289 { 290 mp_ops->cpus_done(); 291 } 292 293 /* called from main before smp_init() */ 294 void __init smp_prepare_cpus(unsigned int max_cpus) 295 { 296 init_new_context(current, &init_mm); 297 current_thread_info()->cpu = 0; 298 mp_ops->prepare_cpus(max_cpus); 299 set_cpu_sibling_map(0); 300 #ifndef CONFIG_HOTPLUG_CPU 301 cpu_present_map = cpu_possible_map; 302 #endif 303 } 304 305 /* preload SMP state for boot cpu */ 306 void __devinit smp_prepare_boot_cpu(void) 307 { 308 /* 309 * This assumes that bootup is always handled by the processor 310 * with the logic and physical number 0. 311 */ 312 __cpu_number_map[0] = 0; 313 __cpu_logical_map[0] = 0; 314 cpu_set(0, phys_cpu_present_map); 315 cpu_set(0, cpu_online_map); 316 cpu_set(0, cpu_callin_map); 317 } 318 319 /* 320 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu 321 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 322 * physical, not logical. 323 */ 324 int __cpuinit __cpu_up(unsigned int cpu) 325 { 326 struct task_struct *idle; 327 328 /* 329 * Processor goes to start_secondary(), sets online flag 330 * The following code is purely to make sure 331 * Linux can schedule processes on this slave. 332 */ 333 idle = fork_idle(cpu); 334 if (IS_ERR(idle)) 335 panic(KERN_ERR "Fork failed for CPU %d", cpu); 336 337 mp_ops->boot_secondary(cpu, idle); 338 339 /* 340 * Trust is futile. We should really have timeouts ... 341 */ 342 while (!cpu_isset(cpu, cpu_callin_map)) 343 udelay(100); 344 345 cpu_set(cpu, cpu_online_map); 346 347 return 0; 348 } 349 350 /* Not really SMP stuff ... */ 351 int setup_profiling_timer(unsigned int multiplier) 352 { 353 return 0; 354 } 355 356 static void flush_tlb_all_ipi(void *info) 357 { 358 local_flush_tlb_all(); 359 } 360 361 void flush_tlb_all(void) 362 { 363 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); 364 } 365 366 static void flush_tlb_mm_ipi(void *mm) 367 { 368 local_flush_tlb_mm((struct mm_struct *)mm); 369 } 370 371 /* 372 * Special Variant of smp_call_function for use by TLB functions: 373 * 374 * o No return value 375 * o collapses to normal function call on UP kernels 376 * o collapses to normal function call on systems with a single shared 377 * primary cache. 378 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 379 */ 380 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 381 { 382 #ifndef CONFIG_MIPS_MT_SMTC 383 smp_call_function(func, info, 1, 1); 384 #endif 385 } 386 387 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 388 { 389 preempt_disable(); 390 391 smp_on_other_tlbs(func, info); 392 func(info); 393 394 preempt_enable(); 395 } 396 397 /* 398 * The following tlb flush calls are invoked when old translations are 399 * being torn down, or pte attributes are changing. For single threaded 400 * address spaces, a new context is obtained on the current cpu, and tlb 401 * context on other cpus are invalidated to force a new context allocation 402 * at switch_mm time, should the mm ever be used on other cpus. For 403 * multithreaded address spaces, intercpu interrupts have to be sent. 404 * Another case where intercpu interrupts are required is when the target 405 * mm might be active on another cpu (eg debuggers doing the flushes on 406 * behalf of debugees, kswapd stealing pages from another process etc). 407 * Kanoj 07/00. 408 */ 409 410 void flush_tlb_mm(struct mm_struct *mm) 411 { 412 preempt_disable(); 413 414 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 415 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 416 } else { 417 cpumask_t mask = cpu_online_map; 418 unsigned int cpu; 419 420 cpu_clear(smp_processor_id(), mask); 421 for_each_cpu_mask(cpu, mask) 422 if (cpu_context(cpu, mm)) 423 cpu_context(cpu, mm) = 0; 424 } 425 local_flush_tlb_mm(mm); 426 427 preempt_enable(); 428 } 429 430 struct flush_tlb_data { 431 struct vm_area_struct *vma; 432 unsigned long addr1; 433 unsigned long addr2; 434 }; 435 436 static void flush_tlb_range_ipi(void *info) 437 { 438 struct flush_tlb_data *fd = info; 439 440 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 441 } 442 443 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 444 { 445 struct mm_struct *mm = vma->vm_mm; 446 447 preempt_disable(); 448 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 449 struct flush_tlb_data fd = { 450 .vma = vma, 451 .addr1 = start, 452 .addr2 = end, 453 }; 454 455 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 456 } else { 457 cpumask_t mask = cpu_online_map; 458 unsigned int cpu; 459 460 cpu_clear(smp_processor_id(), mask); 461 for_each_cpu_mask(cpu, mask) 462 if (cpu_context(cpu, mm)) 463 cpu_context(cpu, mm) = 0; 464 } 465 local_flush_tlb_range(vma, start, end); 466 preempt_enable(); 467 } 468 469 static void flush_tlb_kernel_range_ipi(void *info) 470 { 471 struct flush_tlb_data *fd = info; 472 473 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 474 } 475 476 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 477 { 478 struct flush_tlb_data fd = { 479 .addr1 = start, 480 .addr2 = end, 481 }; 482 483 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); 484 } 485 486 static void flush_tlb_page_ipi(void *info) 487 { 488 struct flush_tlb_data *fd = info; 489 490 local_flush_tlb_page(fd->vma, fd->addr1); 491 } 492 493 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 494 { 495 preempt_disable(); 496 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 497 struct flush_tlb_data fd = { 498 .vma = vma, 499 .addr1 = page, 500 }; 501 502 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 503 } else { 504 cpumask_t mask = cpu_online_map; 505 unsigned int cpu; 506 507 cpu_clear(smp_processor_id(), mask); 508 for_each_cpu_mask(cpu, mask) 509 if (cpu_context(cpu, vma->vm_mm)) 510 cpu_context(cpu, vma->vm_mm) = 0; 511 } 512 local_flush_tlb_page(vma, page); 513 preempt_enable(); 514 } 515 516 static void flush_tlb_one_ipi(void *info) 517 { 518 unsigned long vaddr = (unsigned long) info; 519 520 local_flush_tlb_one(vaddr); 521 } 522 523 void flush_tlb_one(unsigned long vaddr) 524 { 525 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 526 } 527 528 EXPORT_SYMBOL(flush_tlb_page); 529 EXPORT_SYMBOL(flush_tlb_one); 530