1 // SPDX-License-Identifier: GPL-2.0 2 /* smp.c: Sparc64 SMP support. 3 * 4 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 5 */ 6 7 #include <linux/export.h> 8 #include <linux/kernel.h> 9 #include <linux/sched/mm.h> 10 #include <linux/sched/hotplug.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/threads.h> 14 #include <linux/smp.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/delay.h> 18 #include <linux/init.h> 19 #include <linux/spinlock.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/cache.h> 23 #include <linux/jiffies.h> 24 #include <linux/profile.h> 25 #include <linux/memblock.h> 26 #include <linux/vmalloc.h> 27 #include <linux/ftrace.h> 28 #include <linux/cpu.h> 29 #include <linux/slab.h> 30 #include <linux/kgdb.h> 31 32 #include <asm/head.h> 33 #include <asm/ptrace.h> 34 #include <linux/atomic.h> 35 #include <asm/tlbflush.h> 36 #include <asm/mmu_context.h> 37 #include <asm/cpudata.h> 38 #include <asm/hvtramp.h> 39 #include <asm/io.h> 40 #include <asm/timer.h> 41 #include <asm/setup.h> 42 43 #include <asm/irq.h> 44 #include <asm/irq_regs.h> 45 #include <asm/page.h> 46 #include <asm/oplib.h> 47 #include <linux/uaccess.h> 48 #include <asm/starfire.h> 49 #include <asm/tlb.h> 50 #include <asm/pgalloc.h> 51 #include <asm/sections.h> 52 #include <asm/prom.h> 53 #include <asm/mdesc.h> 54 #include <asm/ldc.h> 55 #include <asm/hypervisor.h> 56 #include <asm/pcr.h> 57 58 #include "cpumap.h" 59 #include "kernel.h" 60 61 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62 cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 63 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64 65 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { 66 [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 67 68 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { 69 [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; 70 71 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 72 EXPORT_SYMBOL(cpu_core_map); 73 EXPORT_SYMBOL(cpu_core_sib_map); 74 EXPORT_SYMBOL(cpu_core_sib_cache_map); 75 76 static cpumask_t smp_commenced_mask; 77 78 static DEFINE_PER_CPU(bool, poke); 79 static bool cpu_poke; 80 81 void smp_info(struct seq_file *m) 82 { 83 int i; 84 85 seq_printf(m, "State:\n"); 86 for_each_online_cpu(i) 87 seq_printf(m, "CPU%d:\t\tonline\n", i); 88 } 89 90 void smp_bogo(struct seq_file *m) 91 { 92 int i; 93 94 for_each_online_cpu(i) 95 seq_printf(m, 96 "Cpu%dClkTck\t: %016lx\n", 97 i, cpu_data(i).clock_tick); 98 } 99 100 extern void setup_sparc64_timer(void); 101 102 static volatile unsigned long callin_flag = 0; 103 104 void smp_callin(void) 105 { 106 int cpuid = hard_smp_processor_id(); 107 108 __local_per_cpu_offset = __per_cpu_offset(cpuid); 109 110 if (tlb_type == hypervisor) 111 sun4v_ktsb_register(); 112 113 __flush_tlb_all(); 114 115 setup_sparc64_timer(); 116 117 if (cheetah_pcache_forced_on) 118 cheetah_enable_pcache(); 119 120 callin_flag = 1; 121 __asm__ __volatile__("membar #Sync\n\t" 122 "flush %%g6" : : : "memory"); 123 124 /* Clear this or we will die instantly when we 125 * schedule back to this idler... 126 */ 127 current_thread_info()->new_child = 0; 128 129 /* Attach to the address space of init_task. */ 130 mmgrab(&init_mm); 131 current->active_mm = &init_mm; 132 133 /* inform the notifiers about the new cpu */ 134 notify_cpu_starting(cpuid); 135 136 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 137 rmb(); 138 139 set_cpu_online(cpuid, true); 140 141 local_irq_enable(); 142 143 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 144 } 145 146 void cpu_panic(void) 147 { 148 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 149 panic("SMP bolixed\n"); 150 } 151 152 /* This tick register synchronization scheme is taken entirely from 153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 154 * 155 * The only change I've made is to rework it so that the master 156 * initiates the synchonization instead of the slave. -DaveM 157 */ 158 159 #define MASTER 0 160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) 161 162 #define NUM_ROUNDS 64 /* magic value */ 163 #define NUM_ITERS 5 /* likewise */ 164 165 static DEFINE_RAW_SPINLOCK(itc_sync_lock); 166 static unsigned long go[SLAVE + 1]; 167 168 #define DEBUG_TICK_SYNC 0 169 170 static inline long get_delta (long *rt, long *master) 171 { 172 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 173 unsigned long tcenter, t0, t1, tm; 174 unsigned long i; 175 176 for (i = 0; i < NUM_ITERS; i++) { 177 t0 = tick_ops->get_tick(); 178 go[MASTER] = 1; 179 membar_safe("#StoreLoad"); 180 while (!(tm = go[SLAVE])) 181 rmb(); 182 go[SLAVE] = 0; 183 wmb(); 184 t1 = tick_ops->get_tick(); 185 186 if (t1 - t0 < best_t1 - best_t0) 187 best_t0 = t0, best_t1 = t1, best_tm = tm; 188 } 189 190 *rt = best_t1 - best_t0; 191 *master = best_tm - best_t0; 192 193 /* average best_t0 and best_t1 without overflow: */ 194 tcenter = (best_t0/2 + best_t1/2); 195 if (best_t0 % 2 + best_t1 % 2 == 2) 196 tcenter++; 197 return tcenter - best_tm; 198 } 199 200 void smp_synchronize_tick_client(void) 201 { 202 long i, delta, adj, adjust_latency = 0, done = 0; 203 unsigned long flags, rt, master_time_stamp; 204 #if DEBUG_TICK_SYNC 205 struct { 206 long rt; /* roundtrip time */ 207 long master; /* master's timestamp */ 208 long diff; /* difference between midpoint and master's timestamp */ 209 long lat; /* estimate of itc adjustment latency */ 210 } t[NUM_ROUNDS]; 211 #endif 212 213 go[MASTER] = 1; 214 215 while (go[MASTER]) 216 rmb(); 217 218 local_irq_save(flags); 219 { 220 for (i = 0; i < NUM_ROUNDS; i++) { 221 delta = get_delta(&rt, &master_time_stamp); 222 if (delta == 0) 223 done = 1; /* let's lock on to this... */ 224 225 if (!done) { 226 if (i > 0) { 227 adjust_latency += -delta; 228 adj = -delta + adjust_latency/4; 229 } else 230 adj = -delta; 231 232 tick_ops->add_tick(adj); 233 } 234 #if DEBUG_TICK_SYNC 235 t[i].rt = rt; 236 t[i].master = master_time_stamp; 237 t[i].diff = delta; 238 t[i].lat = adjust_latency/4; 239 #endif 240 } 241 } 242 local_irq_restore(flags); 243 244 #if DEBUG_TICK_SYNC 245 for (i = 0; i < NUM_ROUNDS; i++) 246 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 247 t[i].rt, t[i].master, t[i].diff, t[i].lat); 248 #endif 249 250 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " 251 "(last diff %ld cycles, maxerr %lu cycles)\n", 252 smp_processor_id(), delta, rt); 253 } 254 255 static void smp_start_sync_tick_client(int cpu); 256 257 static void smp_synchronize_one_tick(int cpu) 258 { 259 unsigned long flags, i; 260 261 go[MASTER] = 0; 262 263 smp_start_sync_tick_client(cpu); 264 265 /* wait for client to be ready */ 266 while (!go[MASTER]) 267 rmb(); 268 269 /* now let the client proceed into his loop */ 270 go[MASTER] = 0; 271 membar_safe("#StoreLoad"); 272 273 raw_spin_lock_irqsave(&itc_sync_lock, flags); 274 { 275 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 276 while (!go[MASTER]) 277 rmb(); 278 go[MASTER] = 0; 279 wmb(); 280 go[SLAVE] = tick_ops->get_tick(); 281 membar_safe("#StoreLoad"); 282 } 283 } 284 raw_spin_unlock_irqrestore(&itc_sync_lock, flags); 285 } 286 287 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 288 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, 289 void **descrp) 290 { 291 extern unsigned long sparc64_ttable_tl0; 292 extern unsigned long kern_locked_tte_data; 293 struct hvtramp_descr *hdesc; 294 unsigned long trampoline_ra; 295 struct trap_per_cpu *tb; 296 u64 tte_vaddr, tte_data; 297 unsigned long hv_err; 298 int i; 299 300 hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings); 301 if (!hdesc) { 302 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 303 "hvtramp_descr.\n"); 304 return; 305 } 306 *descrp = hdesc; 307 308 hdesc->cpu = cpu; 309 hdesc->num_mappings = num_kernel_image_mappings; 310 311 tb = &trap_block[cpu]; 312 313 hdesc->fault_info_va = (unsigned long) &tb->fault_info; 314 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 315 316 hdesc->thread_reg = thread_reg; 317 318 tte_vaddr = (unsigned long) KERNBASE; 319 tte_data = kern_locked_tte_data; 320 321 for (i = 0; i < hdesc->num_mappings; i++) { 322 hdesc->maps[i].vaddr = tte_vaddr; 323 hdesc->maps[i].tte = tte_data; 324 tte_vaddr += 0x400000; 325 tte_data += 0x400000; 326 } 327 328 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 329 330 hv_err = sun4v_cpu_start(cpu, trampoline_ra, 331 kimage_addr_to_ra(&sparc64_ttable_tl0), 332 __pa(hdesc)); 333 if (hv_err) 334 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " 335 "gives error %lu\n", hv_err); 336 } 337 #endif 338 339 extern unsigned long sparc64_cpu_startup; 340 341 /* The OBP cpu startup callback truncates the 3rd arg cookie to 342 * 32-bits (I think) so to be safe we have it read the pointer 343 * contained here so we work on >4GB machines. -DaveM 344 */ 345 static struct thread_info *cpu_new_thread = NULL; 346 347 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 348 { 349 unsigned long entry = 350 (unsigned long)(&sparc64_cpu_startup); 351 unsigned long cookie = 352 (unsigned long)(&cpu_new_thread); 353 void *descr = NULL; 354 int timeout, ret; 355 356 callin_flag = 0; 357 cpu_new_thread = task_thread_info(idle); 358 359 if (tlb_type == hypervisor) { 360 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 361 if (ldom_domaining_enabled) 362 ldom_startcpu_cpuid(cpu, 363 (unsigned long) cpu_new_thread, 364 &descr); 365 else 366 #endif 367 prom_startcpu_cpuid(cpu, entry, cookie); 368 } else { 369 struct device_node *dp = of_find_node_by_cpuid(cpu); 370 371 prom_startcpu(dp->phandle, entry, cookie); 372 } 373 374 for (timeout = 0; timeout < 50000; timeout++) { 375 if (callin_flag) 376 break; 377 udelay(100); 378 } 379 380 if (callin_flag) { 381 ret = 0; 382 } else { 383 printk("Processor %d is stuck.\n", cpu); 384 ret = -ENODEV; 385 } 386 cpu_new_thread = NULL; 387 388 kfree(descr); 389 390 return ret; 391 } 392 393 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) 394 { 395 u64 result, target; 396 int stuck, tmp; 397 398 if (this_is_starfire) { 399 /* map to real upaid */ 400 cpu = (((cpu & 0x3c) << 1) | 401 ((cpu & 0x40) >> 4) | 402 (cpu & 0x3)); 403 } 404 405 target = (cpu << 14) | 0x70; 406 again: 407 /* Ok, this is the real Spitfire Errata #54. 408 * One must read back from a UDB internal register 409 * after writes to the UDB interrupt dispatch, but 410 * before the membar Sync for that write. 411 * So we use the high UDB control register (ASI 0x7f, 412 * ADDR 0x20) for the dummy read. -DaveM 413 */ 414 tmp = 0x40; 415 __asm__ __volatile__( 416 "wrpr %1, %2, %%pstate\n\t" 417 "stxa %4, [%0] %3\n\t" 418 "stxa %5, [%0+%8] %3\n\t" 419 "add %0, %8, %0\n\t" 420 "stxa %6, [%0+%8] %3\n\t" 421 "membar #Sync\n\t" 422 "stxa %%g0, [%7] %3\n\t" 423 "membar #Sync\n\t" 424 "mov 0x20, %%g1\n\t" 425 "ldxa [%%g1] 0x7f, %%g0\n\t" 426 "membar #Sync" 427 : "=r" (tmp) 428 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), 429 "r" (data0), "r" (data1), "r" (data2), "r" (target), 430 "r" (0x10), "0" (tmp) 431 : "g1"); 432 433 /* NOTE: PSTATE_IE is still clear. */ 434 stuck = 100000; 435 do { 436 __asm__ __volatile__("ldxa [%%g0] %1, %0" 437 : "=r" (result) 438 : "i" (ASI_INTR_DISPATCH_STAT)); 439 if (result == 0) { 440 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 441 : : "r" (pstate)); 442 return; 443 } 444 stuck -= 1; 445 if (stuck == 0) 446 break; 447 } while (result & 0x1); 448 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 449 : : "r" (pstate)); 450 if (stuck == 0) { 451 printk("CPU[%d]: mondo stuckage result[%016llx]\n", 452 smp_processor_id(), result); 453 } else { 454 udelay(2); 455 goto again; 456 } 457 } 458 459 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) 460 { 461 u64 *mondo, data0, data1, data2; 462 u16 *cpu_list; 463 u64 pstate; 464 int i; 465 466 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 467 cpu_list = __va(tb->cpu_list_pa); 468 mondo = __va(tb->cpu_mondo_block_pa); 469 data0 = mondo[0]; 470 data1 = mondo[1]; 471 data2 = mondo[2]; 472 for (i = 0; i < cnt; i++) 473 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); 474 } 475 476 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt 477 * packet, but we have no use for that. However we do take advantage of 478 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 479 */ 480 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) 481 { 482 int nack_busy_id, is_jbus, need_more; 483 u64 *mondo, pstate, ver, busy_mask; 484 u16 *cpu_list; 485 486 cpu_list = __va(tb->cpu_list_pa); 487 mondo = __va(tb->cpu_mondo_block_pa); 488 489 /* Unfortunately, someone at Sun had the brilliant idea to make the 490 * busy/nack fields hard-coded by ITID number for this Ultra-III 491 * derivative processor. 492 */ 493 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 494 is_jbus = ((ver >> 32) == __JALAPENO_ID || 495 (ver >> 32) == __SERRANO_ID); 496 497 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 498 499 retry: 500 need_more = 0; 501 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 502 : : "r" (pstate), "i" (PSTATE_IE)); 503 504 /* Setup the dispatch data registers. */ 505 __asm__ __volatile__("stxa %0, [%3] %6\n\t" 506 "stxa %1, [%4] %6\n\t" 507 "stxa %2, [%5] %6\n\t" 508 "membar #Sync\n\t" 509 : /* no outputs */ 510 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), 511 "r" (0x40), "r" (0x50), "r" (0x60), 512 "i" (ASI_INTR_W)); 513 514 nack_busy_id = 0; 515 busy_mask = 0; 516 { 517 int i; 518 519 for (i = 0; i < cnt; i++) { 520 u64 target, nr; 521 522 nr = cpu_list[i]; 523 if (nr == 0xffff) 524 continue; 525 526 target = (nr << 14) | 0x70; 527 if (is_jbus) { 528 busy_mask |= (0x1UL << (nr * 2)); 529 } else { 530 target |= (nack_busy_id << 24); 531 busy_mask |= (0x1UL << 532 (nack_busy_id * 2)); 533 } 534 __asm__ __volatile__( 535 "stxa %%g0, [%0] %1\n\t" 536 "membar #Sync\n\t" 537 : /* no outputs */ 538 : "r" (target), "i" (ASI_INTR_W)); 539 nack_busy_id++; 540 if (nack_busy_id == 32) { 541 need_more = 1; 542 break; 543 } 544 } 545 } 546 547 /* Now, poll for completion. */ 548 { 549 u64 dispatch_stat, nack_mask; 550 long stuck; 551 552 stuck = 100000 * nack_busy_id; 553 nack_mask = busy_mask << 1; 554 do { 555 __asm__ __volatile__("ldxa [%%g0] %1, %0" 556 : "=r" (dispatch_stat) 557 : "i" (ASI_INTR_DISPATCH_STAT)); 558 if (!(dispatch_stat & (busy_mask | nack_mask))) { 559 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 560 : : "r" (pstate)); 561 if (unlikely(need_more)) { 562 int i, this_cnt = 0; 563 for (i = 0; i < cnt; i++) { 564 if (cpu_list[i] == 0xffff) 565 continue; 566 cpu_list[i] = 0xffff; 567 this_cnt++; 568 if (this_cnt == 32) 569 break; 570 } 571 goto retry; 572 } 573 return; 574 } 575 if (!--stuck) 576 break; 577 } while (dispatch_stat & busy_mask); 578 579 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 580 : : "r" (pstate)); 581 582 if (dispatch_stat & busy_mask) { 583 /* Busy bits will not clear, continue instead 584 * of freezing up on this cpu. 585 */ 586 printk("CPU[%d]: mondo stuckage result[%016llx]\n", 587 smp_processor_id(), dispatch_stat); 588 } else { 589 int i, this_busy_nack = 0; 590 591 /* Delay some random time with interrupts enabled 592 * to prevent deadlock. 593 */ 594 udelay(2 * nack_busy_id); 595 596 /* Clear out the mask bits for cpus which did not 597 * NACK us. 598 */ 599 for (i = 0; i < cnt; i++) { 600 u64 check_mask, nr; 601 602 nr = cpu_list[i]; 603 if (nr == 0xffff) 604 continue; 605 606 if (is_jbus) 607 check_mask = (0x2UL << (2*nr)); 608 else 609 check_mask = (0x2UL << 610 this_busy_nack); 611 if ((dispatch_stat & check_mask) == 0) 612 cpu_list[i] = 0xffff; 613 this_busy_nack += 2; 614 if (this_busy_nack == 64) 615 break; 616 } 617 618 goto retry; 619 } 620 } 621 } 622 623 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) 624 #define MONDO_USEC_WAIT_MIN 2 625 #define MONDO_USEC_WAIT_MAX 100 626 #define MONDO_RETRY_LIMIT 500000 627 628 /* Multi-cpu list version. 629 * 630 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. 631 * Sometimes not all cpus receive the mondo, requiring us to re-send 632 * the mondo until all cpus have received, or cpus are truly stuck 633 * unable to receive mondo, and we timeout. 634 * Occasionally a target cpu strand is borrowed briefly by hypervisor to 635 * perform guest service, such as PCIe error handling. Consider the 636 * service time, 1 second overall wait is reasonable for 1 cpu. 637 * Here two in-between mondo check wait time are defined: 2 usec for 638 * single cpu quick turn around and up to 100usec for large cpu count. 639 * Deliver mondo to large number of cpus could take longer, we adjusts 640 * the retry count as long as target cpus are making forward progress. 641 */ 642 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 643 { 644 int this_cpu, tot_cpus, prev_sent, i, rem; 645 int usec_wait, retries, tot_retries; 646 u16 first_cpu = 0xffff; 647 unsigned long xc_rcvd = 0; 648 unsigned long status; 649 int ecpuerror_id = 0; 650 int enocpu_id = 0; 651 u16 *cpu_list; 652 u16 cpu; 653 654 this_cpu = smp_processor_id(); 655 cpu_list = __va(tb->cpu_list_pa); 656 usec_wait = cnt * MONDO_USEC_WAIT_MIN; 657 if (usec_wait > MONDO_USEC_WAIT_MAX) 658 usec_wait = MONDO_USEC_WAIT_MAX; 659 retries = tot_retries = 0; 660 tot_cpus = cnt; 661 prev_sent = 0; 662 663 do { 664 int n_sent, mondo_delivered, target_cpu_busy; 665 666 status = sun4v_cpu_mondo_send(cnt, 667 tb->cpu_list_pa, 668 tb->cpu_mondo_block_pa); 669 670 /* HV_EOK means all cpus received the xcall, we're done. */ 671 if (likely(status == HV_EOK)) 672 goto xcall_done; 673 674 /* If not these non-fatal errors, panic */ 675 if (unlikely((status != HV_EWOULDBLOCK) && 676 (status != HV_ECPUERROR) && 677 (status != HV_ENOCPU))) 678 goto fatal_errors; 679 680 /* First, see if we made any forward progress. 681 * 682 * Go through the cpu_list, count the target cpus that have 683 * received our mondo (n_sent), and those that did not (rem). 684 * Re-pack cpu_list with the cpus remain to be retried in the 685 * front - this simplifies tracking the truly stalled cpus. 686 * 687 * The hypervisor indicates successful sends by setting 688 * cpu list entries to the value 0xffff. 689 * 690 * EWOULDBLOCK means some target cpus did not receive the 691 * mondo and retry usually helps. 692 * 693 * ECPUERROR means at least one target cpu is in error state, 694 * it's usually safe to skip the faulty cpu and retry. 695 * 696 * ENOCPU means one of the target cpu doesn't belong to the 697 * domain, perhaps offlined which is unexpected, but not 698 * fatal and it's okay to skip the offlined cpu. 699 */ 700 rem = 0; 701 n_sent = 0; 702 for (i = 0; i < cnt; i++) { 703 cpu = cpu_list[i]; 704 if (likely(cpu == 0xffff)) { 705 n_sent++; 706 } else if ((status == HV_ECPUERROR) && 707 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { 708 ecpuerror_id = cpu + 1; 709 } else if (status == HV_ENOCPU && !cpu_online(cpu)) { 710 enocpu_id = cpu + 1; 711 } else { 712 cpu_list[rem++] = cpu; 713 } 714 } 715 716 /* No cpu remained, we're done. */ 717 if (rem == 0) 718 break; 719 720 /* Otherwise, update the cpu count for retry. */ 721 cnt = rem; 722 723 /* Record the overall number of mondos received by the 724 * first of the remaining cpus. 725 */ 726 if (first_cpu != cpu_list[0]) { 727 first_cpu = cpu_list[0]; 728 xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 729 } 730 731 /* Was any mondo delivered successfully? */ 732 mondo_delivered = (n_sent > prev_sent); 733 prev_sent = n_sent; 734 735 /* or, was any target cpu busy processing other mondos? */ 736 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); 737 xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 738 739 /* Retry count is for no progress. If we're making progress, 740 * reset the retry count. 741 */ 742 if (likely(mondo_delivered || target_cpu_busy)) { 743 tot_retries += retries; 744 retries = 0; 745 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { 746 goto fatal_mondo_timeout; 747 } 748 749 /* Delay a little bit to let other cpus catch up on 750 * their cpu mondo queue work. 751 */ 752 if (!mondo_delivered) 753 udelay(usec_wait); 754 755 retries++; 756 } while (1); 757 758 xcall_done: 759 if (unlikely(ecpuerror_id > 0)) { 760 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", 761 this_cpu, ecpuerror_id - 1); 762 } else if (unlikely(enocpu_id > 0)) { 763 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", 764 this_cpu, enocpu_id - 1); 765 } 766 return; 767 768 fatal_errors: 769 /* fatal errors include bad alignment, etc */ 770 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", 771 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); 772 panic("Unexpected SUN4V mondo error %lu\n", status); 773 774 fatal_mondo_timeout: 775 /* some cpus being non-responsive to the cpu mondo */ 776 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", 777 this_cpu, first_cpu, (tot_retries + retries), tot_cpus); 778 panic("SUN4V mondo timeout panic\n"); 779 } 780 781 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 782 783 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 784 { 785 struct trap_per_cpu *tb; 786 int this_cpu, i, cnt; 787 unsigned long flags; 788 u16 *cpu_list; 789 u64 *mondo; 790 791 /* We have to do this whole thing with interrupts fully disabled. 792 * Otherwise if we send an xcall from interrupt context it will 793 * corrupt both our mondo block and cpu list state. 794 * 795 * One consequence of this is that we cannot use timeout mechanisms 796 * that depend upon interrupts being delivered locally. So, for 797 * example, we cannot sample jiffies and expect it to advance. 798 * 799 * Fortunately, udelay() uses %stick/%tick so we can use that. 800 */ 801 local_irq_save(flags); 802 803 this_cpu = smp_processor_id(); 804 tb = &trap_block[this_cpu]; 805 806 mondo = __va(tb->cpu_mondo_block_pa); 807 mondo[0] = data0; 808 mondo[1] = data1; 809 mondo[2] = data2; 810 wmb(); 811 812 cpu_list = __va(tb->cpu_list_pa); 813 814 /* Setup the initial cpu list. */ 815 cnt = 0; 816 for_each_cpu(i, mask) { 817 if (i == this_cpu || !cpu_online(i)) 818 continue; 819 cpu_list[cnt++] = i; 820 } 821 822 if (cnt) 823 xcall_deliver_impl(tb, cnt); 824 825 local_irq_restore(flags); 826 } 827 828 /* Send cross call to all processors mentioned in MASK_P 829 * except self. Really, there are only two cases currently, 830 * "cpu_online_mask" and "mm_cpumask(mm)". 831 */ 832 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 833 { 834 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); 835 836 xcall_deliver(data0, data1, data2, mask); 837 } 838 839 /* Send cross call to all processors except self. */ 840 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 841 { 842 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); 843 } 844 845 extern unsigned long xcall_sync_tick; 846 847 static void smp_start_sync_tick_client(int cpu) 848 { 849 xcall_deliver((u64) &xcall_sync_tick, 0, 0, 850 cpumask_of(cpu)); 851 } 852 853 extern unsigned long xcall_call_function; 854 855 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 856 { 857 xcall_deliver((u64) &xcall_call_function, 0, 0, mask); 858 } 859 860 extern unsigned long xcall_call_function_single; 861 862 void arch_send_call_function_single_ipi(int cpu) 863 { 864 xcall_deliver((u64) &xcall_call_function_single, 0, 0, 865 cpumask_of(cpu)); 866 } 867 868 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 869 { 870 clear_softint(1 << irq); 871 irq_enter(); 872 generic_smp_call_function_interrupt(); 873 irq_exit(); 874 } 875 876 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 877 { 878 clear_softint(1 << irq); 879 irq_enter(); 880 generic_smp_call_function_single_interrupt(); 881 irq_exit(); 882 } 883 884 static void tsb_sync(void *info) 885 { 886 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; 887 struct mm_struct *mm = info; 888 889 /* It is not valid to test "current->active_mm == mm" here. 890 * 891 * The value of "current" is not changed atomically with 892 * switch_mm(). But that's OK, we just need to check the 893 * current cpu's trap block PGD physical address. 894 */ 895 if (tp->pgd_paddr == __pa(mm->pgd)) 896 tsb_context_switch(mm); 897 } 898 899 void smp_tsb_sync(struct mm_struct *mm) 900 { 901 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); 902 } 903 904 extern unsigned long xcall_flush_tlb_mm; 905 extern unsigned long xcall_flush_tlb_page; 906 extern unsigned long xcall_flush_tlb_kernel_range; 907 extern unsigned long xcall_fetch_glob_regs; 908 extern unsigned long xcall_fetch_glob_pmu; 909 extern unsigned long xcall_fetch_glob_pmu_n4; 910 extern unsigned long xcall_receive_signal; 911 extern unsigned long xcall_new_mmu_context_version; 912 #ifdef CONFIG_KGDB 913 extern unsigned long xcall_kgdb_capture; 914 #endif 915 916 #ifdef DCACHE_ALIASING_POSSIBLE 917 extern unsigned long xcall_flush_dcache_page_cheetah; 918 #endif 919 extern unsigned long xcall_flush_dcache_page_spitfire; 920 921 static inline void __local_flush_dcache_folio(struct folio *folio) 922 { 923 unsigned int i, nr = folio_nr_pages(folio); 924 925 #ifdef DCACHE_ALIASING_POSSIBLE 926 for (i = 0; i < nr; i++) 927 __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE, 928 ((tlb_type == spitfire) && 929 folio_flush_mapping(folio) != NULL)); 930 #else 931 if (folio_flush_mapping(folio) != NULL && 932 tlb_type == spitfire) { 933 unsigned long pfn = folio_pfn(folio) 934 for (i = 0; i < nr; i++) 935 __flush_icache_page((pfn + i) * PAGE_SIZE); 936 } 937 #endif 938 } 939 940 void smp_flush_dcache_folio_impl(struct folio *folio, int cpu) 941 { 942 int this_cpu; 943 944 if (tlb_type == hypervisor) 945 return; 946 947 #ifdef CONFIG_DEBUG_DCFLUSH 948 atomic_inc(&dcpage_flushes); 949 #endif 950 951 this_cpu = get_cpu(); 952 953 if (cpu == this_cpu) { 954 __local_flush_dcache_folio(folio); 955 } else if (cpu_online(cpu)) { 956 void *pg_addr = folio_address(folio); 957 u64 data0 = 0; 958 959 if (tlb_type == spitfire) { 960 data0 = ((u64)&xcall_flush_dcache_page_spitfire); 961 if (folio_flush_mapping(folio) != NULL) 962 data0 |= ((u64)1 << 32); 963 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 964 #ifdef DCACHE_ALIASING_POSSIBLE 965 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 966 #endif 967 } 968 if (data0) { 969 unsigned int i, nr = folio_nr_pages(folio); 970 971 for (i = 0; i < nr; i++) { 972 xcall_deliver(data0, __pa(pg_addr), 973 (u64) pg_addr, cpumask_of(cpu)); 974 #ifdef CONFIG_DEBUG_DCFLUSH 975 atomic_inc(&dcpage_flushes_xcall); 976 #endif 977 pg_addr += PAGE_SIZE; 978 } 979 } 980 } 981 982 put_cpu(); 983 } 984 985 void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio) 986 { 987 void *pg_addr; 988 u64 data0; 989 990 if (tlb_type == hypervisor) 991 return; 992 993 preempt_disable(); 994 995 #ifdef CONFIG_DEBUG_DCFLUSH 996 atomic_inc(&dcpage_flushes); 997 #endif 998 data0 = 0; 999 pg_addr = folio_address(folio); 1000 if (tlb_type == spitfire) { 1001 data0 = ((u64)&xcall_flush_dcache_page_spitfire); 1002 if (folio_flush_mapping(folio) != NULL) 1003 data0 |= ((u64)1 << 32); 1004 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1005 #ifdef DCACHE_ALIASING_POSSIBLE 1006 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 1007 #endif 1008 } 1009 if (data0) { 1010 unsigned int i, nr = folio_nr_pages(folio); 1011 1012 for (i = 0; i < nr; i++) { 1013 xcall_deliver(data0, __pa(pg_addr), 1014 (u64) pg_addr, cpu_online_mask); 1015 #ifdef CONFIG_DEBUG_DCFLUSH 1016 atomic_inc(&dcpage_flushes_xcall); 1017 #endif 1018 pg_addr += PAGE_SIZE; 1019 } 1020 } 1021 __local_flush_dcache_folio(folio); 1022 1023 preempt_enable(); 1024 } 1025 1026 #ifdef CONFIG_KGDB 1027 void kgdb_roundup_cpus(void) 1028 { 1029 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); 1030 } 1031 #endif 1032 1033 void smp_fetch_global_regs(void) 1034 { 1035 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1036 } 1037 1038 void smp_fetch_global_pmu(void) 1039 { 1040 if (tlb_type == hypervisor && 1041 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 1042 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); 1043 else 1044 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); 1045 } 1046 1047 /* We know that the window frames of the user have been flushed 1048 * to the stack before we get here because all callers of us 1049 * are flush_tlb_*() routines, and these run after flush_cache_*() 1050 * which performs the flushw. 1051 * 1052 * mm->cpu_vm_mask is a bit mask of which cpus an address 1053 * space has (potentially) executed on, this is the heuristic 1054 * we use to limit cross calls. 1055 */ 1056 1057 /* This currently is only used by the hugetlb arch pre-fault 1058 * hook on UltraSPARC-III+ and later when changing the pagesize 1059 * bits of the context register for an address space. 1060 */ 1061 void smp_flush_tlb_mm(struct mm_struct *mm) 1062 { 1063 u32 ctx = CTX_HWBITS(mm->context); 1064 1065 get_cpu(); 1066 1067 smp_cross_call_masked(&xcall_flush_tlb_mm, 1068 ctx, 0, 0, 1069 mm_cpumask(mm)); 1070 1071 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1072 1073 put_cpu(); 1074 } 1075 1076 struct tlb_pending_info { 1077 unsigned long ctx; 1078 unsigned long nr; 1079 unsigned long *vaddrs; 1080 }; 1081 1082 static void tlb_pending_func(void *info) 1083 { 1084 struct tlb_pending_info *t = info; 1085 1086 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); 1087 } 1088 1089 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1090 { 1091 u32 ctx = CTX_HWBITS(mm->context); 1092 struct tlb_pending_info info; 1093 1094 get_cpu(); 1095 1096 info.ctx = ctx; 1097 info.nr = nr; 1098 info.vaddrs = vaddrs; 1099 1100 smp_call_function_many(mm_cpumask(mm), tlb_pending_func, 1101 &info, 1); 1102 1103 __flush_tlb_pending(ctx, nr, vaddrs); 1104 1105 put_cpu(); 1106 } 1107 1108 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 1109 { 1110 unsigned long context = CTX_HWBITS(mm->context); 1111 1112 get_cpu(); 1113 1114 smp_cross_call_masked(&xcall_flush_tlb_page, 1115 context, vaddr, 0, 1116 mm_cpumask(mm)); 1117 1118 __flush_tlb_page(context, vaddr); 1119 1120 put_cpu(); 1121 } 1122 1123 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1124 { 1125 start &= PAGE_MASK; 1126 end = PAGE_ALIGN(end); 1127 if (start != end) { 1128 smp_cross_call(&xcall_flush_tlb_kernel_range, 1129 0, start, end); 1130 1131 __flush_tlb_kernel_range(start, end); 1132 } 1133 } 1134 1135 /* CPU capture. */ 1136 /* #define CAPTURE_DEBUG */ 1137 extern unsigned long xcall_capture; 1138 1139 static atomic_t smp_capture_depth = ATOMIC_INIT(0); 1140 static atomic_t smp_capture_registry = ATOMIC_INIT(0); 1141 static unsigned long penguins_are_doing_time; 1142 1143 void smp_capture(void) 1144 { 1145 int result = atomic_add_return(1, &smp_capture_depth); 1146 1147 if (result == 1) { 1148 int ncpus = num_online_cpus(); 1149 1150 #ifdef CAPTURE_DEBUG 1151 printk("CPU[%d]: Sending penguins to jail...", 1152 smp_processor_id()); 1153 #endif 1154 penguins_are_doing_time = 1; 1155 atomic_inc(&smp_capture_registry); 1156 smp_cross_call(&xcall_capture, 0, 0, 0); 1157 while (atomic_read(&smp_capture_registry) != ncpus) 1158 rmb(); 1159 #ifdef CAPTURE_DEBUG 1160 printk("done\n"); 1161 #endif 1162 } 1163 } 1164 1165 void smp_release(void) 1166 { 1167 if (atomic_dec_and_test(&smp_capture_depth)) { 1168 #ifdef CAPTURE_DEBUG 1169 printk("CPU[%d]: Giving pardon to " 1170 "imprisoned penguins\n", 1171 smp_processor_id()); 1172 #endif 1173 penguins_are_doing_time = 0; 1174 membar_safe("#StoreLoad"); 1175 atomic_dec(&smp_capture_registry); 1176 } 1177 } 1178 1179 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE 1180 * set, so they can service tlb flush xcalls... 1181 */ 1182 extern void prom_world(int); 1183 1184 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1185 { 1186 clear_softint(1 << irq); 1187 1188 preempt_disable(); 1189 1190 __asm__ __volatile__("flushw"); 1191 prom_world(1); 1192 atomic_inc(&smp_capture_registry); 1193 membar_safe("#StoreLoad"); 1194 while (penguins_are_doing_time) 1195 rmb(); 1196 atomic_dec(&smp_capture_registry); 1197 prom_world(0); 1198 1199 preempt_enable(); 1200 } 1201 1202 void __init smp_prepare_cpus(unsigned int max_cpus) 1203 { 1204 } 1205 1206 void __init smp_setup_processor_id(void) 1207 { 1208 if (tlb_type == spitfire) 1209 xcall_deliver_impl = spitfire_xcall_deliver; 1210 else if (tlb_type == cheetah || tlb_type == cheetah_plus) 1211 xcall_deliver_impl = cheetah_xcall_deliver; 1212 else 1213 xcall_deliver_impl = hypervisor_xcall_deliver; 1214 } 1215 1216 void smp_fill_in_sib_core_maps(void) 1217 { 1218 unsigned int i; 1219 1220 for_each_present_cpu(i) { 1221 unsigned int j; 1222 1223 cpumask_clear(&cpu_core_map[i]); 1224 if (cpu_data(i).core_id == 0) { 1225 cpumask_set_cpu(i, &cpu_core_map[i]); 1226 continue; 1227 } 1228 1229 for_each_present_cpu(j) { 1230 if (cpu_data(i).core_id == 1231 cpu_data(j).core_id) 1232 cpumask_set_cpu(j, &cpu_core_map[i]); 1233 } 1234 } 1235 1236 for_each_present_cpu(i) { 1237 unsigned int j; 1238 1239 for_each_present_cpu(j) { 1240 if (cpu_data(i).max_cache_id == 1241 cpu_data(j).max_cache_id) 1242 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); 1243 1244 if (cpu_data(i).sock_id == cpu_data(j).sock_id) 1245 cpumask_set_cpu(j, &cpu_core_sib_map[i]); 1246 } 1247 } 1248 1249 for_each_present_cpu(i) { 1250 unsigned int j; 1251 1252 cpumask_clear(&per_cpu(cpu_sibling_map, i)); 1253 if (cpu_data(i).proc_id == -1) { 1254 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); 1255 continue; 1256 } 1257 1258 for_each_present_cpu(j) { 1259 if (cpu_data(i).proc_id == 1260 cpu_data(j).proc_id) 1261 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); 1262 } 1263 } 1264 } 1265 1266 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 1267 { 1268 int ret = smp_boot_one_cpu(cpu, tidle); 1269 1270 if (!ret) { 1271 cpumask_set_cpu(cpu, &smp_commenced_mask); 1272 while (!cpu_online(cpu)) 1273 mb(); 1274 if (!cpu_online(cpu)) { 1275 ret = -ENODEV; 1276 } else { 1277 /* On SUN4V, writes to %tick and %stick are 1278 * not allowed. 1279 */ 1280 if (tlb_type != hypervisor) 1281 smp_synchronize_one_tick(cpu); 1282 } 1283 } 1284 return ret; 1285 } 1286 1287 #ifdef CONFIG_HOTPLUG_CPU 1288 void cpu_play_dead(void) 1289 { 1290 int cpu = smp_processor_id(); 1291 unsigned long pstate; 1292 1293 idle_task_exit(); 1294 1295 if (tlb_type == hypervisor) { 1296 struct trap_per_cpu *tb = &trap_block[cpu]; 1297 1298 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, 1299 tb->cpu_mondo_pa, 0); 1300 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, 1301 tb->dev_mondo_pa, 0); 1302 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, 1303 tb->resum_mondo_pa, 0); 1304 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, 1305 tb->nonresum_mondo_pa, 0); 1306 } 1307 1308 cpumask_clear_cpu(cpu, &smp_commenced_mask); 1309 membar_safe("#Sync"); 1310 1311 local_irq_disable(); 1312 1313 __asm__ __volatile__( 1314 "rdpr %%pstate, %0\n\t" 1315 "wrpr %0, %1, %%pstate" 1316 : "=r" (pstate) 1317 : "i" (PSTATE_IE)); 1318 1319 while (1) 1320 barrier(); 1321 } 1322 1323 int __cpu_disable(void) 1324 { 1325 int cpu = smp_processor_id(); 1326 cpuinfo_sparc *c; 1327 int i; 1328 1329 for_each_cpu(i, &cpu_core_map[cpu]) 1330 cpumask_clear_cpu(cpu, &cpu_core_map[i]); 1331 cpumask_clear(&cpu_core_map[cpu]); 1332 1333 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) 1334 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); 1335 cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); 1336 1337 c = &cpu_data(cpu); 1338 1339 c->core_id = 0; 1340 c->proc_id = -1; 1341 1342 smp_wmb(); 1343 1344 /* Make sure no interrupts point to this cpu. */ 1345 fixup_irqs(); 1346 1347 local_irq_enable(); 1348 mdelay(1); 1349 local_irq_disable(); 1350 1351 set_cpu_online(cpu, false); 1352 1353 cpu_map_rebuild(); 1354 1355 return 0; 1356 } 1357 1358 void __cpu_die(unsigned int cpu) 1359 { 1360 int i; 1361 1362 for (i = 0; i < 100; i++) { 1363 smp_rmb(); 1364 if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) 1365 break; 1366 msleep(100); 1367 } 1368 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { 1369 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1370 } else { 1371 #if defined(CONFIG_SUN_LDOMS) 1372 unsigned long hv_err; 1373 int limit = 100; 1374 1375 do { 1376 hv_err = sun4v_cpu_stop(cpu); 1377 if (hv_err == HV_EOK) { 1378 set_cpu_present(cpu, false); 1379 break; 1380 } 1381 } while (--limit > 0); 1382 if (limit <= 0) { 1383 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", 1384 hv_err); 1385 } 1386 #endif 1387 } 1388 } 1389 #endif 1390 1391 void __init smp_cpus_done(unsigned int max_cpus) 1392 { 1393 } 1394 1395 static void send_cpu_ipi(int cpu) 1396 { 1397 xcall_deliver((u64) &xcall_receive_signal, 1398 0, 0, cpumask_of(cpu)); 1399 } 1400 1401 void scheduler_poke(void) 1402 { 1403 if (!cpu_poke) 1404 return; 1405 1406 if (!__this_cpu_read(poke)) 1407 return; 1408 1409 __this_cpu_write(poke, false); 1410 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 1411 } 1412 1413 static unsigned long send_cpu_poke(int cpu) 1414 { 1415 unsigned long hv_err; 1416 1417 per_cpu(poke, cpu) = true; 1418 hv_err = sun4v_cpu_poke(cpu); 1419 if (hv_err != HV_EOK) { 1420 per_cpu(poke, cpu) = false; 1421 pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", 1422 __func__, hv_err); 1423 } 1424 1425 return hv_err; 1426 } 1427 1428 void arch_smp_send_reschedule(int cpu) 1429 { 1430 if (cpu == smp_processor_id()) { 1431 WARN_ON_ONCE(preemptible()); 1432 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 1433 return; 1434 } 1435 1436 /* Use cpu poke to resume idle cpu if supported. */ 1437 if (cpu_poke && idle_cpu(cpu)) { 1438 unsigned long ret; 1439 1440 ret = send_cpu_poke(cpu); 1441 if (ret == HV_EOK) 1442 return; 1443 } 1444 1445 /* Use IPI in following cases: 1446 * - cpu poke not supported 1447 * - cpu not idle 1448 * - send_cpu_poke() returns with error 1449 */ 1450 send_cpu_ipi(cpu); 1451 } 1452 1453 void smp_init_cpu_poke(void) 1454 { 1455 unsigned long major; 1456 unsigned long minor; 1457 int ret; 1458 1459 if (tlb_type != hypervisor) 1460 return; 1461 1462 ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); 1463 if (ret) { 1464 pr_debug("HV_GRP_CORE is not registered\n"); 1465 return; 1466 } 1467 1468 if (major == 1 && minor >= 6) { 1469 /* CPU POKE is registered. */ 1470 cpu_poke = true; 1471 return; 1472 } 1473 1474 pr_debug("CPU_POKE not supported\n"); 1475 } 1476 1477 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1478 { 1479 clear_softint(1 << irq); 1480 scheduler_ipi(); 1481 } 1482 1483 static void stop_this_cpu(void *dummy) 1484 { 1485 set_cpu_online(smp_processor_id(), false); 1486 prom_stopself(); 1487 } 1488 1489 void smp_send_stop(void) 1490 { 1491 int cpu; 1492 1493 if (tlb_type == hypervisor) { 1494 int this_cpu = smp_processor_id(); 1495 #ifdef CONFIG_SERIAL_SUNHV 1496 sunhv_migrate_hvcons_irq(this_cpu); 1497 #endif 1498 for_each_online_cpu(cpu) { 1499 if (cpu == this_cpu) 1500 continue; 1501 1502 set_cpu_online(cpu, false); 1503 #ifdef CONFIG_SUN_LDOMS 1504 if (ldom_domaining_enabled) { 1505 unsigned long hv_err; 1506 hv_err = sun4v_cpu_stop(cpu); 1507 if (hv_err) 1508 printk(KERN_ERR "sun4v_cpu_stop() " 1509 "failed err=%lu\n", hv_err); 1510 } else 1511 #endif 1512 prom_stopcpu_cpuid(cpu); 1513 } 1514 } else 1515 smp_call_function(stop_this_cpu, NULL, 0); 1516 } 1517 1518 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 1519 { 1520 if (cpu_to_node(from) == cpu_to_node(to)) 1521 return LOCAL_DISTANCE; 1522 else 1523 return REMOTE_DISTANCE; 1524 } 1525 1526 static int __init pcpu_cpu_to_node(int cpu) 1527 { 1528 return cpu_to_node(cpu); 1529 } 1530 1531 void __init setup_per_cpu_areas(void) 1532 { 1533 unsigned long delta; 1534 unsigned int cpu; 1535 int rc = -EINVAL; 1536 1537 if (pcpu_chosen_fc != PCPU_FC_PAGE) { 1538 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1539 PERCPU_DYNAMIC_RESERVE, 4 << 20, 1540 pcpu_cpu_distance, 1541 pcpu_cpu_to_node); 1542 if (rc) 1543 pr_warn("PERCPU: %s allocator failed (%d), " 1544 "falling back to page size\n", 1545 pcpu_fc_names[pcpu_chosen_fc], rc); 1546 } 1547 if (rc < 0) 1548 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, 1549 pcpu_cpu_to_node); 1550 if (rc < 0) 1551 panic("cannot initialize percpu area (err=%d)", rc); 1552 1553 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1554 for_each_possible_cpu(cpu) 1555 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; 1556 1557 /* Setup %g5 for the boot cpu. */ 1558 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1559 1560 of_fill_in_cpu_data(); 1561 if (tlb_type == hypervisor) 1562 mdesc_fill_in_cpu_data(cpu_all_mask); 1563 } 1564