1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Author: Andy Fleming <afleming@freescale.com> 4 * Kumar Gala <galak@kernel.crashing.org> 5 * 6 * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc. 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/kernel.h> 11 #include <linux/sched/hotplug.h> 12 #include <linux/init.h> 13 #include <linux/delay.h> 14 #include <linux/of.h> 15 #include <linux/kexec.h> 16 #include <linux/highmem.h> 17 #include <linux/cpu.h> 18 #include <linux/fsl/guts.h> 19 #include <linux/pgtable.h> 20 21 #include <asm/machdep.h> 22 #include <asm/page.h> 23 #include <asm/mpic.h> 24 #include <asm/cacheflush.h> 25 #include <asm/dbell.h> 26 #include <asm/code-patching.h> 27 #include <asm/cputhreads.h> 28 #include <asm/fsl_pm.h> 29 30 #include <sysdev/fsl_soc.h> 31 #include <sysdev/mpic.h> 32 #include "smp.h" 33 34 struct epapr_spin_table { 35 u32 addr_h; 36 u32 addr_l; 37 u32 r3_h; 38 u32 r3_l; 39 u32 reserved; 40 u32 pir; 41 }; 42 43 static u64 timebase; 44 static int tb_req; 45 static int tb_valid; 46 47 static void mpc85xx_give_timebase(void) 48 { 49 unsigned long flags; 50 51 local_irq_save(flags); 52 hard_irq_disable(); 53 54 while (!tb_req) 55 barrier(); 56 tb_req = 0; 57 58 qoriq_pm_ops->freeze_time_base(true); 59 #ifdef CONFIG_PPC64 60 /* 61 * e5500/e6500 have a workaround for erratum A-006958 in place 62 * that will reread the timebase until TBL is non-zero. 63 * That would be a bad thing when the timebase is frozen. 64 * 65 * Thus, we read it manually, and instead of checking that 66 * TBL is non-zero, we ensure that TB does not change. We don't 67 * do that for the main mftb implementation, because it requires 68 * a scratch register 69 */ 70 { 71 u64 prev; 72 73 asm volatile("mfspr %0, %1" : "=r" (timebase) : 74 "i" (SPRN_TBRL)); 75 76 do { 77 prev = timebase; 78 asm volatile("mfspr %0, %1" : "=r" (timebase) : 79 "i" (SPRN_TBRL)); 80 } while (prev != timebase); 81 } 82 #else 83 timebase = get_tb(); 84 #endif 85 mb(); 86 tb_valid = 1; 87 88 while (tb_valid) 89 barrier(); 90 91 qoriq_pm_ops->freeze_time_base(false); 92 93 local_irq_restore(flags); 94 } 95 96 static void mpc85xx_take_timebase(void) 97 { 98 unsigned long flags; 99 100 local_irq_save(flags); 101 hard_irq_disable(); 102 103 tb_req = 1; 104 while (!tb_valid) 105 barrier(); 106 107 set_tb(timebase >> 32, timebase & 0xffffffff); 108 isync(); 109 tb_valid = 0; 110 111 local_irq_restore(flags); 112 } 113 114 #ifdef CONFIG_HOTPLUG_CPU 115 static void smp_85xx_cpu_offline_self(void) 116 { 117 unsigned int cpu = smp_processor_id(); 118 119 local_irq_disable(); 120 hard_irq_disable(); 121 /* mask all irqs to prevent cpu wakeup */ 122 qoriq_pm_ops->irq_mask(cpu); 123 124 idle_task_exit(); 125 126 mtspr(SPRN_TCR, 0); 127 mtspr(SPRN_TSR, mfspr(SPRN_TSR)); 128 129 generic_set_cpu_dead(cpu); 130 131 cur_cpu_spec->cpu_down_flush(); 132 133 qoriq_pm_ops->cpu_die(cpu); 134 135 while (1) 136 ; 137 } 138 139 static void qoriq_cpu_kill(unsigned int cpu) 140 { 141 int i; 142 143 for (i = 0; i < 500; i++) { 144 if (is_cpu_dead(cpu)) { 145 #ifdef CONFIG_PPC64 146 paca_ptrs[cpu]->cpu_start = 0; 147 #endif 148 return; 149 } 150 msleep(20); 151 } 152 pr_err("CPU%d didn't die...\n", cpu); 153 } 154 #endif 155 156 /* 157 * To keep it compatible with old boot program which uses 158 * cache-inhibit spin table, we need to flush the cache 159 * before accessing spin table to invalidate any staled data. 160 * We also need to flush the cache after writing to spin 161 * table to push data out. 162 */ 163 static inline void flush_spin_table(void *spin_table) 164 { 165 flush_dcache_range((ulong)spin_table, 166 (ulong)spin_table + sizeof(struct epapr_spin_table)); 167 } 168 169 static inline u32 read_spin_table_addr_l(void *spin_table) 170 { 171 flush_dcache_range((ulong)spin_table, 172 (ulong)spin_table + sizeof(struct epapr_spin_table)); 173 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); 174 } 175 176 #ifdef CONFIG_PPC64 177 static void wake_hw_thread(void *info) 178 { 179 void fsl_secondary_thread_init(void); 180 unsigned long inia; 181 int cpu = *(const int *)info; 182 183 inia = ppc_function_entry(fsl_secondary_thread_init); 184 book3e_start_thread(cpu_thread_in_core(cpu), inia); 185 } 186 #endif 187 188 static int smp_85xx_start_cpu(int cpu) 189 { 190 int ret = 0; 191 struct device_node *np; 192 const u64 *cpu_rel_addr; 193 unsigned long flags; 194 int ioremappable; 195 int hw_cpu = get_hard_smp_processor_id(cpu); 196 struct epapr_spin_table __iomem *spin_table; 197 198 np = of_get_cpu_node(cpu, NULL); 199 cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); 200 if (!cpu_rel_addr) { 201 pr_err("No cpu-release-addr for cpu %d\n", cpu); 202 return -ENOENT; 203 } 204 205 /* 206 * A secondary core could be in a spinloop in the bootpage 207 * (0xfffff000), somewhere in highmem, or somewhere in lowmem. 208 * The bootpage and highmem can be accessed via ioremap(), but 209 * we need to directly access the spinloop if its in lowmem. 210 */ 211 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1); 212 213 /* Map the spin table */ 214 if (ioremappable) 215 spin_table = ioremap_coherent(*cpu_rel_addr, 216 sizeof(struct epapr_spin_table)); 217 else 218 spin_table = phys_to_virt(*cpu_rel_addr); 219 220 local_irq_save(flags); 221 hard_irq_disable(); 222 223 if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) 224 qoriq_pm_ops->cpu_up_prepare(cpu); 225 226 /* if cpu is not spinning, reset it */ 227 if (read_spin_table_addr_l(spin_table) != 1) { 228 /* 229 * We don't set the BPTR register here since it already points 230 * to the boot page properly. 231 */ 232 mpic_reset_core(cpu); 233 234 /* 235 * wait until core is ready... 236 * We need to invalidate the stale data, in case the boot 237 * loader uses a cache-inhibited spin table. 238 */ 239 if (!spin_event_timeout( 240 read_spin_table_addr_l(spin_table) == 1, 241 10000, 100)) { 242 pr_err("timeout waiting for cpu %d to reset\n", 243 hw_cpu); 244 ret = -EAGAIN; 245 goto err; 246 } 247 } 248 249 flush_spin_table(spin_table); 250 out_be32(&spin_table->pir, hw_cpu); 251 #ifdef CONFIG_PPC64 252 out_be64((u64 *)(&spin_table->addr_h), 253 __pa(ppc_function_entry(generic_secondary_smp_init))); 254 #else 255 #ifdef CONFIG_PHYS_ADDR_T_64BIT 256 /* 257 * We need also to write addr_h to spin table for systems 258 * in which their physical memory start address was configured 259 * to above 4G, otherwise the secondary core can not get 260 * correct entry to start from. 261 */ 262 out_be32(&spin_table->addr_h, __pa(__early_start) >> 32); 263 #endif 264 out_be32(&spin_table->addr_l, __pa(__early_start)); 265 #endif 266 flush_spin_table(spin_table); 267 err: 268 local_irq_restore(flags); 269 270 if (ioremappable) 271 iounmap(spin_table); 272 273 return ret; 274 } 275 276 static int smp_85xx_kick_cpu(int nr) 277 { 278 int ret = 0; 279 #ifdef CONFIG_PPC64 280 int primary = nr; 281 #endif 282 283 WARN_ON(nr < 0 || nr >= num_possible_cpus()); 284 285 pr_debug("kick CPU #%d\n", nr); 286 287 #ifdef CONFIG_PPC64 288 if (threads_per_core == 2) { 289 if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) 290 return -ENOENT; 291 292 booting_thread_hwid = cpu_thread_in_core(nr); 293 primary = cpu_first_thread_sibling(nr); 294 295 if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) 296 qoriq_pm_ops->cpu_up_prepare(nr); 297 298 /* 299 * If either thread in the core is online, use it to start 300 * the other. 301 */ 302 if (cpu_online(primary)) { 303 smp_call_function_single(primary, 304 wake_hw_thread, &nr, 1); 305 goto done; 306 } else if (cpu_online(primary + 1)) { 307 smp_call_function_single(primary + 1, 308 wake_hw_thread, &nr, 1); 309 goto done; 310 } 311 312 /* 313 * If getting here, it means both threads in the core are 314 * offline. So start the primary thread, then it will start 315 * the thread specified in booting_thread_hwid, the one 316 * corresponding to nr. 317 */ 318 319 } else if (threads_per_core == 1) { 320 /* 321 * If one core has only one thread, set booting_thread_hwid to 322 * an invalid value. 323 */ 324 booting_thread_hwid = INVALID_THREAD_HWID; 325 326 } else if (threads_per_core > 2) { 327 pr_err("Do not support more than 2 threads per CPU."); 328 return -EINVAL; 329 } 330 331 ret = smp_85xx_start_cpu(primary); 332 if (ret) 333 return ret; 334 335 done: 336 paca_ptrs[nr]->cpu_start = 1; 337 generic_set_cpu_up(nr); 338 339 return ret; 340 #else 341 ret = smp_85xx_start_cpu(nr); 342 if (ret) 343 return ret; 344 345 generic_set_cpu_up(nr); 346 347 return ret; 348 #endif 349 } 350 351 struct smp_ops_t smp_85xx_ops = { 352 .cause_nmi_ipi = NULL, 353 .kick_cpu = smp_85xx_kick_cpu, 354 .cpu_bootable = smp_generic_cpu_bootable, 355 #ifdef CONFIG_HOTPLUG_CPU 356 .cpu_disable = generic_cpu_disable, 357 .cpu_die = generic_cpu_die, 358 #endif 359 #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) 360 .give_timebase = smp_generic_give_timebase, 361 .take_timebase = smp_generic_take_timebase, 362 #endif 363 }; 364 365 #ifdef CONFIG_KEXEC_CORE 366 #ifdef CONFIG_PPC32 367 atomic_t kexec_down_cpus = ATOMIC_INIT(0); 368 369 static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 370 { 371 local_irq_disable(); 372 373 if (secondary) { 374 cur_cpu_spec->cpu_down_flush(); 375 atomic_inc(&kexec_down_cpus); 376 /* loop forever */ 377 while (1); 378 } 379 } 380 381 static void mpc85xx_smp_kexec_down(void *arg) 382 { 383 if (ppc_md.kexec_cpu_down) 384 ppc_md.kexec_cpu_down(0,1); 385 } 386 #else 387 static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) 388 { 389 int cpu = smp_processor_id(); 390 int sibling = cpu_last_thread_sibling(cpu); 391 bool notified = false; 392 int disable_cpu; 393 int disable_threadbit = 0; 394 long start = mftb(); 395 long now; 396 397 local_irq_disable(); 398 hard_irq_disable(); 399 mpic_teardown_this_cpu(secondary); 400 401 #ifdef CONFIG_CRASH_DUMP 402 if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { 403 /* 404 * We enter the crash kernel on whatever cpu crashed, 405 * even if it's a secondary thread. If that's the case, 406 * disable the corresponding primary thread. 407 */ 408 disable_threadbit = 1; 409 disable_cpu = cpu_first_thread_sibling(cpu); 410 } else if (sibling == crashing_cpu) { 411 return; 412 } 413 #endif 414 if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { 415 disable_threadbit = 2; 416 disable_cpu = sibling; 417 } 418 419 if (disable_threadbit) { 420 while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) { 421 barrier(); 422 now = mftb(); 423 if (!notified && now - start > 1000000) { 424 pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", 425 __func__, smp_processor_id(), 426 disable_cpu, 427 paca_ptrs[disable_cpu]->kexec_state); 428 notified = true; 429 } 430 } 431 432 if (notified) { 433 pr_info("%s: cpu %d done waiting\n", 434 __func__, disable_cpu); 435 } 436 437 mtspr(SPRN_TENC, disable_threadbit); 438 while (mfspr(SPRN_TENSR) & disable_threadbit) 439 cpu_relax(); 440 } 441 } 442 #endif 443 444 static void mpc85xx_smp_machine_kexec(struct kimage *image) 445 { 446 #ifdef CONFIG_PPC32 447 int timeout = INT_MAX; 448 int i, num_cpus = num_present_cpus(); 449 450 if (image->type == KEXEC_TYPE_DEFAULT) 451 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); 452 453 while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && 454 ( timeout > 0 ) ) 455 { 456 timeout--; 457 } 458 459 if ( !timeout ) 460 printk(KERN_ERR "Unable to bring down secondary cpu(s)"); 461 462 for_each_online_cpu(i) 463 { 464 if ( i == smp_processor_id() ) continue; 465 mpic_reset_core(i); 466 } 467 #endif 468 469 default_machine_kexec(image); 470 } 471 #endif /* CONFIG_KEXEC_CORE */ 472 473 static void smp_85xx_setup_cpu(int cpu_nr) 474 { 475 mpic_setup_this_cpu(); 476 } 477 478 void __init mpc85xx_smp_init(void) 479 { 480 struct device_node *np; 481 482 483 np = of_find_node_by_type(NULL, "open-pic"); 484 if (np) { 485 smp_85xx_ops.probe = smp_mpic_probe; 486 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; 487 smp_85xx_ops.message_pass = smp_mpic_message_pass; 488 } else 489 smp_85xx_ops.setup_cpu = NULL; 490 491 if (cpu_has_feature(CPU_FTR_DBELL)) { 492 /* 493 * If left NULL, .message_pass defaults to 494 * smp_muxed_ipi_message_pass 495 */ 496 smp_85xx_ops.message_pass = NULL; 497 smp_85xx_ops.cause_ipi = doorbell_global_ipi; 498 smp_85xx_ops.probe = NULL; 499 } 500 501 #ifdef CONFIG_FSL_CORENET_RCPM 502 /* Assign a value to qoriq_pm_ops on PPC_E500MC */ 503 fsl_rcpm_init(); 504 #else 505 /* Assign a value to qoriq_pm_ops on !PPC_E500MC */ 506 mpc85xx_setup_pmc(); 507 #endif 508 if (qoriq_pm_ops) { 509 smp_85xx_ops.give_timebase = mpc85xx_give_timebase; 510 smp_85xx_ops.take_timebase = mpc85xx_take_timebase; 511 #ifdef CONFIG_HOTPLUG_CPU 512 smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self; 513 smp_85xx_ops.cpu_die = qoriq_cpu_kill; 514 #endif 515 } 516 smp_ops = &smp_85xx_ops; 517 518 #ifdef CONFIG_KEXEC_CORE 519 ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; 520 ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; 521 #endif 522 } 523