1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
37 #include <linux/clockchips.h>
38 #include <linux/kexec.h>
39
40 #include <asm/ptrace.h>
41 #include <linux/atomic.h>
42 #include <asm/irq.h>
43 #include <asm/hw_irq.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/dbell.h>
46 #include <asm/page.h>
47 #include <asm/smp.h>
48 #include <asm/time.h>
49 #include <asm/machdep.h>
50 #include <asm/mmu_context.h>
51 #include <asm/cputhreads.h>
52 #include <asm/cputable.h>
53 #include <asm/mpic.h>
54 #include <asm/vdso_datapage.h>
55 #ifdef CONFIG_PPC64
56 #include <asm/paca.h>
57 #endif
58 #include <asm/vdso.h>
59 #include <asm/debug.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
62 #include <asm/kup.h>
63 #include <asm/fadump.h>
64
65 #include <trace/events/ipi.h>
66
67 #ifdef DEBUG
68 #include <asm/udbg.h>
69 #define DBG(fmt...) udbg_printf(fmt)
70 #else
71 #define DBG(fmt...)
72 #endif
73
74 #ifdef CONFIG_HOTPLUG_CPU
75 /* State of each CPU during hotplug phases */
76 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
77 #endif
78
79 struct task_struct *secondary_current;
80 bool has_big_cores __ro_after_init;
81 bool coregroup_enabled __ro_after_init;
82 bool thread_group_shares_l2 __ro_after_init;
83 bool thread_group_shares_l3 __ro_after_init;
84
85 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
86 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
87 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
88 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
89 static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
90
91 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
92 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
93 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
94 EXPORT_SYMBOL_GPL(has_big_cores);
95
96 #define MAX_THREAD_LIST_SIZE 8
97 #define THREAD_GROUP_SHARE_L1 1
98 #define THREAD_GROUP_SHARE_L2_L3 2
99 struct thread_groups {
100 unsigned int property;
101 unsigned int nr_groups;
102 unsigned int threads_per_group;
103 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
104 };
105
106 /* Maximum number of properties that groups of threads within a core can share */
107 #define MAX_THREAD_GROUP_PROPERTIES 2
108
109 struct thread_groups_list {
110 unsigned int nr_properties;
111 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
112 };
113
114 static struct thread_groups_list tgl[NR_CPUS] __initdata;
115 /*
116 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
117 * the set its siblings that share the L1-cache.
118 */
119 DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
120
121 /*
122 * On some big-cores system, thread_group_l2_cache_map for each CPU
123 * corresponds to the set its siblings within the core that share the
124 * L2-cache.
125 */
126 DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
127
128 /*
129 * On P10, thread_group_l3_cache_map for each CPU is equal to the
130 * thread_group_l2_cache_map
131 */
132 DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
133
134 /* SMP operations for this machine */
135 struct smp_ops_t *smp_ops;
136
137 /* Can't be static due to PowerMac hackery */
138 volatile unsigned int cpu_callin_map[NR_CPUS];
139
140 int smt_enabled_at_boot = 1;
141
142 /*
143 * Returns 1 if the specified cpu should be brought up during boot.
144 * Used to inhibit booting threads if they've been disabled or
145 * limited on the command line
146 */
smp_generic_cpu_bootable(unsigned int nr)147 int smp_generic_cpu_bootable(unsigned int nr)
148 {
149 /* Special case - we inhibit secondary thread startup
150 * during boot if the user requests it.
151 */
152 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
153 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
154 return 0;
155 if (smt_enabled_at_boot
156 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
157 return 0;
158 }
159
160 return 1;
161 }
162
163
164 #ifdef CONFIG_PPC64
smp_generic_kick_cpu(int nr)165 int smp_generic_kick_cpu(int nr)
166 {
167 if (nr < 0 || nr >= nr_cpu_ids)
168 return -EINVAL;
169
170 /*
171 * The processor is currently spinning, waiting for the
172 * cpu_start field to become non-zero After we set cpu_start,
173 * the processor will continue on to secondary_start
174 */
175 if (!paca_ptrs[nr]->cpu_start) {
176 paca_ptrs[nr]->cpu_start = 1;
177 smp_mb();
178 return 0;
179 }
180
181 #ifdef CONFIG_HOTPLUG_CPU
182 /*
183 * Ok it's not there, so it might be soft-unplugged, let's
184 * try to bring it back
185 */
186 generic_set_cpu_up(nr);
187 smp_wmb();
188 smp_send_reschedule(nr);
189 #endif /* CONFIG_HOTPLUG_CPU */
190
191 return 0;
192 }
193 #endif /* CONFIG_PPC64 */
194
call_function_action(int irq,void * data)195 static irqreturn_t call_function_action(int irq, void *data)
196 {
197 generic_smp_call_function_interrupt();
198 return IRQ_HANDLED;
199 }
200
reschedule_action(int irq,void * data)201 static irqreturn_t reschedule_action(int irq, void *data)
202 {
203 scheduler_ipi();
204 return IRQ_HANDLED;
205 }
206
207 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast_ipi_action(int irq,void * data)208 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
209 {
210 timer_broadcast_interrupt();
211 return IRQ_HANDLED;
212 }
213 #endif
214
215 #ifdef CONFIG_NMI_IPI
nmi_ipi_action(int irq,void * data)216 static irqreturn_t nmi_ipi_action(int irq, void *data)
217 {
218 smp_handle_nmi_ipi(get_irq_regs());
219 return IRQ_HANDLED;
220 }
221 #endif
222
223 static irq_handler_t smp_ipi_action[] = {
224 [PPC_MSG_CALL_FUNCTION] = call_function_action,
225 [PPC_MSG_RESCHEDULE] = reschedule_action,
226 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
227 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
228 #endif
229 #ifdef CONFIG_NMI_IPI
230 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
231 #endif
232 };
233
234 /*
235 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
236 * than going through the call function infrastructure, and strongly
237 * serialized, so it is more appropriate for debugging.
238 */
239 const char *smp_ipi_name[] = {
240 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
241 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
242 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
243 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
244 #endif
245 #ifdef CONFIG_NMI_IPI
246 [PPC_MSG_NMI_IPI] = "nmi ipi",
247 #endif
248 };
249
250 /* optional function to request ipi, for controllers with >= 4 ipis */
smp_request_message_ipi(int virq,int msg)251 int smp_request_message_ipi(int virq, int msg)
252 {
253 int err;
254
255 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
256 return -EINVAL;
257 #ifndef CONFIG_NMI_IPI
258 if (msg == PPC_MSG_NMI_IPI)
259 return 1;
260 #endif
261
262 err = request_irq(virq, smp_ipi_action[msg],
263 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
264 smp_ipi_name[msg], NULL);
265 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
266 virq, smp_ipi_name[msg], err);
267
268 return err;
269 }
270
271 #ifdef CONFIG_PPC_SMP_MUXED_IPI
272 struct cpu_messages {
273 long messages; /* current messages */
274 };
275 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
276
smp_muxed_ipi_set_message(int cpu,int msg)277 void smp_muxed_ipi_set_message(int cpu, int msg)
278 {
279 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
280 char *message = (char *)&info->messages;
281
282 /*
283 * Order previous accesses before accesses in the IPI handler.
284 */
285 smp_mb();
286 WRITE_ONCE(message[msg], 1);
287 }
288
smp_muxed_ipi_message_pass(int cpu,int msg)289 void smp_muxed_ipi_message_pass(int cpu, int msg)
290 {
291 smp_muxed_ipi_set_message(cpu, msg);
292
293 /*
294 * cause_ipi functions are required to include a full barrier
295 * before doing whatever causes the IPI.
296 */
297 smp_ops->cause_ipi(cpu);
298 }
299
300 #ifdef __BIG_ENDIAN__
301 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
302 #else
303 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
304 #endif
305
smp_ipi_demux(void)306 irqreturn_t smp_ipi_demux(void)
307 {
308 mb(); /* order any irq clear */
309
310 return smp_ipi_demux_relaxed();
311 }
312
313 /* sync-free variant. Callers should ensure synchronization */
smp_ipi_demux_relaxed(void)314 irqreturn_t smp_ipi_demux_relaxed(void)
315 {
316 struct cpu_messages *info;
317 unsigned long all;
318
319 info = this_cpu_ptr(&ipi_message);
320 do {
321 all = xchg(&info->messages, 0);
322 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
323 /*
324 * Must check for PPC_MSG_RM_HOST_ACTION messages
325 * before PPC_MSG_CALL_FUNCTION messages because when
326 * a VM is destroyed, we call kick_all_cpus_sync()
327 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
328 * messages have completed before we free any VCPUs.
329 */
330 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
331 kvmppc_xics_ipi_action();
332 #endif
333 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
334 generic_smp_call_function_interrupt();
335 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
336 scheduler_ipi();
337 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
338 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
339 timer_broadcast_interrupt();
340 #endif
341 #ifdef CONFIG_NMI_IPI
342 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
343 nmi_ipi_action(0, NULL);
344 #endif
345 } while (READ_ONCE(info->messages));
346
347 return IRQ_HANDLED;
348 }
349 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
350
do_message_pass(int cpu,int msg)351 static inline void do_message_pass(int cpu, int msg)
352 {
353 if (smp_ops->message_pass)
354 smp_ops->message_pass(cpu, msg);
355 #ifdef CONFIG_PPC_SMP_MUXED_IPI
356 else
357 smp_muxed_ipi_message_pass(cpu, msg);
358 #endif
359 }
360
arch_smp_send_reschedule(int cpu)361 void arch_smp_send_reschedule(int cpu)
362 {
363 if (likely(smp_ops))
364 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
365 }
366 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
367
arch_send_call_function_single_ipi(int cpu)368 void arch_send_call_function_single_ipi(int cpu)
369 {
370 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
371 }
372
arch_send_call_function_ipi_mask(const struct cpumask * mask)373 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
374 {
375 unsigned int cpu;
376
377 for_each_cpu(cpu, mask)
378 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
379 }
380
381 #ifdef CONFIG_NMI_IPI
382
383 /*
384 * "NMI IPI" system.
385 *
386 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
387 * a running system. They can be used for crash, debug, halt/reboot, etc.
388 *
389 * The IPI call waits with interrupts disabled until all targets enter the
390 * NMI handler, then returns. Subsequent IPIs can be issued before targets
391 * have returned from their handlers, so there is no guarantee about
392 * concurrency or re-entrancy.
393 *
394 * A new NMI can be issued before all targets exit the handler.
395 *
396 * The IPI call may time out without all targets entering the NMI handler.
397 * In that case, there is some logic to recover (and ignore subsequent
398 * NMI interrupts that may eventually be raised), but the platform interrupt
399 * handler may not be able to distinguish this from other exception causes,
400 * which may cause a crash.
401 */
402
403 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
404 static struct cpumask nmi_ipi_pending_mask;
405 static bool nmi_ipi_busy = false;
406 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
407
nmi_ipi_lock_start(unsigned long * flags)408 noinstr static void nmi_ipi_lock_start(unsigned long *flags)
409 {
410 raw_local_irq_save(*flags);
411 hard_irq_disable();
412 while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
413 raw_local_irq_restore(*flags);
414 spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
415 raw_local_irq_save(*flags);
416 hard_irq_disable();
417 }
418 }
419
nmi_ipi_lock(void)420 noinstr static void nmi_ipi_lock(void)
421 {
422 while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
423 spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
424 }
425
nmi_ipi_unlock(void)426 noinstr static void nmi_ipi_unlock(void)
427 {
428 smp_mb();
429 WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
430 raw_atomic_set(&__nmi_ipi_lock, 0);
431 }
432
nmi_ipi_unlock_end(unsigned long * flags)433 noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
434 {
435 nmi_ipi_unlock();
436 raw_local_irq_restore(*flags);
437 }
438
439 /*
440 * Platform NMI handler calls this to ack
441 */
smp_handle_nmi_ipi(struct pt_regs * regs)442 noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
443 {
444 void (*fn)(struct pt_regs *) = NULL;
445 unsigned long flags;
446 int me = raw_smp_processor_id();
447 int ret = 0;
448
449 /*
450 * Unexpected NMIs are possible here because the interrupt may not
451 * be able to distinguish NMI IPIs from other types of NMIs, or
452 * because the caller may have timed out.
453 */
454 nmi_ipi_lock_start(&flags);
455 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
456 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
457 fn = READ_ONCE(nmi_ipi_function);
458 WARN_ON_ONCE(!fn);
459 ret = 1;
460 }
461 nmi_ipi_unlock_end(&flags);
462
463 if (fn)
464 fn(regs);
465
466 return ret;
467 }
468
do_smp_send_nmi_ipi(int cpu,bool safe)469 static void do_smp_send_nmi_ipi(int cpu, bool safe)
470 {
471 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
472 return;
473
474 if (cpu >= 0) {
475 do_message_pass(cpu, PPC_MSG_NMI_IPI);
476 } else {
477 int c;
478
479 for_each_online_cpu(c) {
480 if (c == raw_smp_processor_id())
481 continue;
482 do_message_pass(c, PPC_MSG_NMI_IPI);
483 }
484 }
485 }
486
487 /*
488 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
489 * - fn is the target callback function.
490 * - delay_us > 0 is the delay before giving up waiting for targets to
491 * begin executing the handler, == 0 specifies indefinite delay.
492 */
__smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us,bool safe)493 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
494 u64 delay_us, bool safe)
495 {
496 unsigned long flags;
497 int me = raw_smp_processor_id();
498 int ret = 1;
499
500 BUG_ON(cpu == me);
501 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
502
503 if (unlikely(!smp_ops))
504 return 0;
505
506 nmi_ipi_lock_start(&flags);
507 while (nmi_ipi_busy) {
508 nmi_ipi_unlock_end(&flags);
509 spin_until_cond(!nmi_ipi_busy);
510 nmi_ipi_lock_start(&flags);
511 }
512 nmi_ipi_busy = true;
513 nmi_ipi_function = fn;
514
515 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
516
517 if (cpu < 0) {
518 /* ALL_OTHERS */
519 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
520 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
521 } else {
522 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
523 }
524
525 nmi_ipi_unlock();
526
527 /* Interrupts remain hard disabled */
528
529 do_smp_send_nmi_ipi(cpu, safe);
530
531 nmi_ipi_lock();
532 /* nmi_ipi_busy is set here, so unlock/lock is okay */
533 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
534 nmi_ipi_unlock();
535 udelay(1);
536 nmi_ipi_lock();
537 if (delay_us) {
538 delay_us--;
539 if (!delay_us)
540 break;
541 }
542 }
543
544 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
545 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
546 ret = 0;
547 cpumask_clear(&nmi_ipi_pending_mask);
548 }
549
550 nmi_ipi_function = NULL;
551 nmi_ipi_busy = false;
552
553 nmi_ipi_unlock_end(&flags);
554
555 return ret;
556 }
557
smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)558 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
559 {
560 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
561 }
562
smp_send_safe_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)563 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
564 {
565 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
566 }
567 #endif /* CONFIG_NMI_IPI */
568
569 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)570 void tick_broadcast(const struct cpumask *mask)
571 {
572 unsigned int cpu;
573
574 for_each_cpu(cpu, mask)
575 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
576 }
577 #endif
578
579 #ifdef CONFIG_DEBUGGER
debugger_ipi_callback(struct pt_regs * regs)580 static void debugger_ipi_callback(struct pt_regs *regs)
581 {
582 debugger_ipi(regs);
583 }
584
smp_send_debugger_break(void)585 void smp_send_debugger_break(void)
586 {
587 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
588 }
589 #endif
590
591 #ifdef CONFIG_CRASH_DUMP
crash_send_ipi(void (* crash_ipi_callback)(struct pt_regs *))592 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
593 {
594 int cpu;
595
596 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
597 if (kdump_in_progress() && crash_wake_offline) {
598 for_each_present_cpu(cpu) {
599 if (cpu_online(cpu))
600 continue;
601 /*
602 * crash_ipi_callback will wait for
603 * all cpus, including offline CPUs.
604 * We don't care about nmi_ipi_function.
605 * Offline cpus will jump straight into
606 * crash_ipi_callback, we can skip the
607 * entire NMI dance and waiting for
608 * cpus to clear pending mask, etc.
609 */
610 do_smp_send_nmi_ipi(cpu, false);
611 }
612 }
613 }
614 #endif
615
crash_smp_send_stop(void)616 void crash_smp_send_stop(void)
617 {
618 static bool stopped = false;
619
620 /*
621 * In case of fadump, register data for all CPUs is captured by f/w
622 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
623 * this rtas call to avoid tricky post processing of those CPUs'
624 * backtraces.
625 */
626 if (should_fadump_crash())
627 return;
628
629 if (stopped)
630 return;
631
632 stopped = true;
633
634 #ifdef CONFIG_CRASH_DUMP
635 if (kexec_crash_image) {
636 crash_kexec_prepare();
637 return;
638 }
639 #endif
640
641 smp_send_stop();
642 }
643
644 #ifdef CONFIG_NMI_IPI
nmi_stop_this_cpu(struct pt_regs * regs)645 static void nmi_stop_this_cpu(struct pt_regs *regs)
646 {
647 /*
648 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
649 */
650 set_cpu_online(smp_processor_id(), false);
651
652 spin_begin();
653 while (1)
654 spin_cpu_relax();
655 }
656
smp_send_stop(void)657 void smp_send_stop(void)
658 {
659 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
660 }
661
662 #else /* CONFIG_NMI_IPI */
663
stop_this_cpu(void * dummy)664 static void stop_this_cpu(void *dummy)
665 {
666 hard_irq_disable();
667
668 /*
669 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
670 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
671 * to know other CPUs are offline before it breaks locks to flush
672 * printk buffers, in case we panic()ed while holding the lock.
673 */
674 set_cpu_online(smp_processor_id(), false);
675
676 spin_begin();
677 while (1)
678 spin_cpu_relax();
679 }
680
smp_send_stop(void)681 void smp_send_stop(void)
682 {
683 static bool stopped = false;
684
685 /*
686 * Prevent waiting on csd lock from a previous smp_send_stop.
687 * This is racy, but in general callers try to do the right
688 * thing and only fire off one smp_send_stop (e.g., see
689 * kernel/panic.c)
690 */
691 if (stopped)
692 return;
693
694 stopped = true;
695
696 smp_call_function(stop_this_cpu, NULL, 0);
697 }
698 #endif /* CONFIG_NMI_IPI */
699
700 static struct task_struct *current_set[NR_CPUS];
701
smp_store_cpu_info(int id)702 static void smp_store_cpu_info(int id)
703 {
704 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
705 #ifdef CONFIG_PPC_E500
706 per_cpu(next_tlbcam_idx, id)
707 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
708 #endif
709 }
710
711 /*
712 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
713 * rather than just passing around the cpumask we pass around a function that
714 * returns the that cpumask for the given CPU.
715 */
set_cpus_related(int i,int j,struct cpumask * (* get_cpumask)(int))716 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
717 {
718 cpumask_set_cpu(i, get_cpumask(j));
719 cpumask_set_cpu(j, get_cpumask(i));
720 }
721
722 #ifdef CONFIG_HOTPLUG_CPU
set_cpus_unrelated(int i,int j,struct cpumask * (* get_cpumask)(int))723 static void set_cpus_unrelated(int i, int j,
724 struct cpumask *(*get_cpumask)(int))
725 {
726 cpumask_clear_cpu(i, get_cpumask(j));
727 cpumask_clear_cpu(j, get_cpumask(i));
728 }
729 #endif
730
731 /*
732 * Extends set_cpus_related. Instead of setting one CPU at a time in
733 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
734 */
or_cpumasks_related(int i,int j,struct cpumask * (* srcmask)(int),struct cpumask * (* dstmask)(int))735 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
736 struct cpumask *(*dstmask)(int))
737 {
738 struct cpumask *mask;
739 int k;
740
741 mask = srcmask(j);
742 for_each_cpu(k, srcmask(i))
743 cpumask_or(dstmask(k), dstmask(k), mask);
744
745 if (i == j)
746 return;
747
748 mask = srcmask(i);
749 for_each_cpu(k, srcmask(j))
750 cpumask_or(dstmask(k), dstmask(k), mask);
751 }
752
753 /*
754 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
755 * property for the CPU device node @dn and stores
756 * the parsed output in the thread_groups_list
757 * structure @tglp.
758 *
759 * @dn: The device node of the CPU device.
760 * @tglp: Pointer to a thread group list structure into which the parsed
761 * output of "ibm,thread-groups" is stored.
762 *
763 * ibm,thread-groups[0..N-1] array defines which group of threads in
764 * the CPU-device node can be grouped together based on the property.
765 *
766 * This array can represent thread groupings for multiple properties.
767 *
768 * ibm,thread-groups[i + 0] tells us the property based on which the
769 * threads are being grouped together. If this value is 1, it implies
770 * that the threads in the same group share L1, translation cache. If
771 * the value is 2, it implies that the threads in the same group share
772 * the same L2 cache.
773 *
774 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
775 * property ibm,thread-groups[i]
776 *
777 * ibm,thread-groups[i+2] tells us the number of threads in each such
778 * group.
779 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
780 *
781 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
782 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
783 * the grouping.
784 *
785 * Example:
786 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
787 * This can be decomposed up into two consecutive arrays:
788 * a) [1,2,4,8,10,12,14,9,11,13,15]
789 * b) [2,2,4,8,10,12,14,9,11,13,15]
790 *
791 * where in,
792 *
793 * a) provides information of Property "1" being shared by "2" groups,
794 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
795 * the first group is {8,10,12,14} and the
796 * "ibm,ppc-interrupt-server#s" of the second group is
797 * {9,11,13,15}. Property "1" is indicative of the thread in the
798 * group sharing L1 cache, translation cache and Instruction Data
799 * flow.
800 *
801 * b) provides information of Property "2" being shared by "2" groups,
802 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
803 * the first group is {8,10,12,14} and the
804 * "ibm,ppc-interrupt-server#s" of the second group is
805 * {9,11,13,15}. Property "2" indicates that the threads in each
806 * group share the L2-cache.
807 *
808 * Returns 0 on success, -EINVAL if the property does not exist,
809 * -ENODATA if property does not have a value, and -EOVERFLOW if the
810 * property data isn't large enough.
811 */
parse_thread_groups(struct device_node * dn,struct thread_groups_list * tglp)812 static int parse_thread_groups(struct device_node *dn,
813 struct thread_groups_list *tglp)
814 {
815 unsigned int property_idx = 0;
816 u32 *thread_group_array;
817 size_t total_threads;
818 int ret = 0, count;
819 u32 *thread_list;
820 int i = 0;
821
822 count = of_property_count_u32_elems(dn, "ibm,thread-groups");
823 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
824 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
825 thread_group_array, count);
826 if (ret)
827 goto out_free;
828
829 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
830 int j;
831 struct thread_groups *tg = &tglp->property_tgs[property_idx++];
832
833 tg->property = thread_group_array[i];
834 tg->nr_groups = thread_group_array[i + 1];
835 tg->threads_per_group = thread_group_array[i + 2];
836 total_threads = tg->nr_groups * tg->threads_per_group;
837
838 thread_list = &thread_group_array[i + 3];
839
840 for (j = 0; j < total_threads; j++)
841 tg->thread_list[j] = thread_list[j];
842 i = i + 3 + total_threads;
843 }
844
845 tglp->nr_properties = property_idx;
846
847 out_free:
848 kfree(thread_group_array);
849 return ret;
850 }
851
852 /*
853 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
854 * that @cpu belongs to.
855 *
856 * @cpu : The logical CPU whose thread group is being searched.
857 * @tg : The thread-group structure of the CPU node which @cpu belongs
858 * to.
859 *
860 * Returns the index to tg->thread_list that points to the start
861 * of the thread_group that @cpu belongs to.
862 *
863 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
864 * tg->thread_list.
865 */
get_cpu_thread_group_start(int cpu,struct thread_groups * tg)866 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
867 {
868 int hw_cpu_id = get_hard_smp_processor_id(cpu);
869 int i, j;
870
871 for (i = 0; i < tg->nr_groups; i++) {
872 int group_start = i * tg->threads_per_group;
873
874 for (j = 0; j < tg->threads_per_group; j++) {
875 int idx = group_start + j;
876
877 if (tg->thread_list[idx] == hw_cpu_id)
878 return group_start;
879 }
880 }
881
882 return -1;
883 }
884
get_thread_groups(int cpu,int group_property,int * err)885 static struct thread_groups *__init get_thread_groups(int cpu,
886 int group_property,
887 int *err)
888 {
889 struct device_node *dn = of_get_cpu_node(cpu, NULL);
890 struct thread_groups_list *cpu_tgl = &tgl[cpu];
891 struct thread_groups *tg = NULL;
892 int i;
893 *err = 0;
894
895 if (!dn) {
896 *err = -ENODATA;
897 return NULL;
898 }
899
900 if (!cpu_tgl->nr_properties) {
901 *err = parse_thread_groups(dn, cpu_tgl);
902 if (*err)
903 goto out;
904 }
905
906 for (i = 0; i < cpu_tgl->nr_properties; i++) {
907 if (cpu_tgl->property_tgs[i].property == group_property) {
908 tg = &cpu_tgl->property_tgs[i];
909 break;
910 }
911 }
912
913 if (!tg)
914 *err = -EINVAL;
915 out:
916 of_node_put(dn);
917 return tg;
918 }
919
update_mask_from_threadgroup(cpumask_var_t * mask,struct thread_groups * tg,int cpu,int cpu_group_start)920 static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
921 int cpu, int cpu_group_start)
922 {
923 int first_thread = cpu_first_thread_sibling(cpu);
924 int i;
925
926 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
927
928 for (i = first_thread; i < first_thread + threads_per_core; i++) {
929 int i_group_start = get_cpu_thread_group_start(i, tg);
930
931 if (unlikely(i_group_start == -1)) {
932 WARN_ON_ONCE(1);
933 return -ENODATA;
934 }
935
936 if (i_group_start == cpu_group_start)
937 cpumask_set_cpu(i, *mask);
938 }
939
940 return 0;
941 }
942
init_thread_group_cache_map(int cpu,int cache_property)943 static int __init init_thread_group_cache_map(int cpu, int cache_property)
944
945 {
946 int cpu_group_start = -1, err = 0;
947 struct thread_groups *tg = NULL;
948 cpumask_var_t *mask = NULL;
949
950 if (cache_property != THREAD_GROUP_SHARE_L1 &&
951 cache_property != THREAD_GROUP_SHARE_L2_L3)
952 return -EINVAL;
953
954 tg = get_thread_groups(cpu, cache_property, &err);
955
956 if (!tg)
957 return err;
958
959 cpu_group_start = get_cpu_thread_group_start(cpu, tg);
960
961 if (unlikely(cpu_group_start == -1)) {
962 WARN_ON_ONCE(1);
963 return -ENODATA;
964 }
965
966 if (cache_property == THREAD_GROUP_SHARE_L1) {
967 mask = &per_cpu(thread_group_l1_cache_map, cpu);
968 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
969 }
970 else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
971 mask = &per_cpu(thread_group_l2_cache_map, cpu);
972 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
973 mask = &per_cpu(thread_group_l3_cache_map, cpu);
974 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
975 }
976
977
978 return 0;
979 }
980
981 static bool shared_caches __ro_after_init;
982
983 #ifdef CONFIG_SCHED_SMT
984 /* cpumask of CPUs with asymmetric SMT dependency */
powerpc_smt_flags(void)985 static int powerpc_smt_flags(void)
986 {
987 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
988
989 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
990 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
991 flags |= SD_ASYM_PACKING;
992 }
993 return flags;
994 }
995 #endif
996
997 /*
998 * On shared processor LPARs scheduled on a big core (which has two or more
999 * independent thread groups per core), prefer lower numbered CPUs, so
1000 * that workload consolidates to lesser number of cores.
1001 */
1002 static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
1003
1004 /*
1005 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1006 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1007 * since the migrated task remains cache hot. We want to take advantage of this
1008 * at the scheduler level so an extra topology level is required.
1009 */
powerpc_shared_cache_flags(void)1010 static int powerpc_shared_cache_flags(void)
1011 {
1012 if (static_branch_unlikely(&splpar_asym_pack))
1013 return SD_SHARE_LLC | SD_ASYM_PACKING;
1014
1015 return SD_SHARE_LLC;
1016 }
1017
powerpc_shared_proc_flags(void)1018 static int powerpc_shared_proc_flags(void)
1019 {
1020 if (static_branch_unlikely(&splpar_asym_pack))
1021 return SD_ASYM_PACKING;
1022
1023 return 0;
1024 }
1025
1026 /*
1027 * We can't just pass cpu_l2_cache_mask() directly because
1028 * returns a non-const pointer and the compiler barfs on that.
1029 */
shared_cache_mask(int cpu)1030 static const struct cpumask *shared_cache_mask(int cpu)
1031 {
1032 return per_cpu(cpu_l2_cache_map, cpu);
1033 }
1034
1035 #ifdef CONFIG_SCHED_SMT
smallcore_smt_mask(int cpu)1036 static const struct cpumask *smallcore_smt_mask(int cpu)
1037 {
1038 return cpu_smallcore_mask(cpu);
1039 }
1040 #endif
1041
cpu_coregroup_mask(int cpu)1042 static struct cpumask *cpu_coregroup_mask(int cpu)
1043 {
1044 return per_cpu(cpu_coregroup_map, cpu);
1045 }
1046
has_coregroup_support(void)1047 static bool has_coregroup_support(void)
1048 {
1049 /* Coregroup identification not available on shared systems */
1050 if (is_shared_processor())
1051 return 0;
1052
1053 return coregroup_enabled;
1054 }
1055
cpu_mc_mask(int cpu)1056 static const struct cpumask *cpu_mc_mask(int cpu)
1057 {
1058 return cpu_coregroup_mask(cpu);
1059 }
1060
init_big_cores(void)1061 static int __init init_big_cores(void)
1062 {
1063 int cpu;
1064
1065 for_each_possible_cpu(cpu) {
1066 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1067
1068 if (err)
1069 return err;
1070
1071 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1072 GFP_KERNEL,
1073 cpu_to_node(cpu));
1074 }
1075
1076 has_big_cores = true;
1077
1078 for_each_possible_cpu(cpu) {
1079 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1080
1081 if (err)
1082 return err;
1083 }
1084
1085 thread_group_shares_l2 = true;
1086 thread_group_shares_l3 = true;
1087 pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1088
1089 return 0;
1090 }
1091
smp_prepare_cpus(unsigned int max_cpus)1092 void __init smp_prepare_cpus(unsigned int max_cpus)
1093 {
1094 unsigned int cpu, num_threads;
1095
1096 DBG("smp_prepare_cpus\n");
1097
1098 /*
1099 * setup_cpu may need to be called on the boot cpu. We haven't
1100 * spun any cpus up but lets be paranoid.
1101 */
1102 BUG_ON(boot_cpuid != smp_processor_id());
1103
1104 /* Fixup boot cpu */
1105 smp_store_cpu_info(boot_cpuid);
1106 cpu_callin_map[boot_cpuid] = 1;
1107
1108 for_each_possible_cpu(cpu) {
1109 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1110 GFP_KERNEL, cpu_to_node(cpu));
1111 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1112 GFP_KERNEL, cpu_to_node(cpu));
1113 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1114 GFP_KERNEL, cpu_to_node(cpu));
1115 if (has_coregroup_support())
1116 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1117 GFP_KERNEL, cpu_to_node(cpu));
1118
1119 #ifdef CONFIG_NUMA
1120 /*
1121 * numa_node_id() works after this.
1122 */
1123 if (cpu_present(cpu)) {
1124 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1125 set_cpu_numa_mem(cpu,
1126 local_memory_node(numa_cpu_lookup_table[cpu]));
1127 }
1128 #endif
1129 }
1130
1131 /* Init the cpumasks so the boot CPU is related to itself */
1132 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1133 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1134 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1135
1136 if (has_coregroup_support())
1137 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1138
1139 init_big_cores();
1140 if (has_big_cores) {
1141 cpumask_set_cpu(boot_cpuid,
1142 cpu_smallcore_mask(boot_cpuid));
1143 }
1144
1145 if (cpu_to_chip_id(boot_cpuid) != -1) {
1146 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1147
1148 /*
1149 * All threads of a core will all belong to the same core,
1150 * chip_id_lookup_table will have one entry per core.
1151 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1152 * other CPUs, will also not have chip-id.
1153 */
1154 chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1155 if (chip_id_lookup_table)
1156 memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1157 }
1158
1159 if (smp_ops && smp_ops->probe)
1160 smp_ops->probe();
1161
1162 // Initalise the generic SMT topology support
1163 num_threads = 1;
1164 if (smt_enabled_at_boot)
1165 num_threads = smt_enabled_at_boot;
1166 cpu_smt_set_num_threads(num_threads, threads_per_core);
1167 }
1168
smp_prepare_boot_cpu(void)1169 void __init smp_prepare_boot_cpu(void)
1170 {
1171 BUG_ON(smp_processor_id() != boot_cpuid);
1172 #ifdef CONFIG_PPC64
1173 paca_ptrs[boot_cpuid]->__current = current;
1174 #endif
1175 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1176 current_set[boot_cpuid] = current;
1177 }
1178
1179 #ifdef CONFIG_HOTPLUG_CPU
1180
generic_cpu_disable(void)1181 int generic_cpu_disable(void)
1182 {
1183 unsigned int cpu = smp_processor_id();
1184
1185 if (cpu == boot_cpuid)
1186 return -EBUSY;
1187
1188 set_cpu_online(cpu, false);
1189 #ifdef CONFIG_PPC64
1190 vdso_data->processorCount--;
1191 #endif
1192 /* Update affinity of all IRQs previously aimed at this CPU */
1193 irq_migrate_all_off_this_cpu();
1194
1195 /*
1196 * Depending on the details of the interrupt controller, it's possible
1197 * that one of the interrupts we just migrated away from this CPU is
1198 * actually already pending on this CPU. If we leave it in that state
1199 * the interrupt will never be EOI'ed, and will never fire again. So
1200 * temporarily enable interrupts here, to allow any pending interrupt to
1201 * be received (and EOI'ed), before we take this CPU offline.
1202 */
1203 local_irq_enable();
1204 mdelay(1);
1205 local_irq_disable();
1206
1207 return 0;
1208 }
1209
generic_cpu_die(unsigned int cpu)1210 void generic_cpu_die(unsigned int cpu)
1211 {
1212 int i;
1213
1214 for (i = 0; i < 100; i++) {
1215 smp_rmb();
1216 if (is_cpu_dead(cpu))
1217 return;
1218 msleep(100);
1219 }
1220 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1221 }
1222
generic_set_cpu_dead(unsigned int cpu)1223 void generic_set_cpu_dead(unsigned int cpu)
1224 {
1225 per_cpu(cpu_state, cpu) = CPU_DEAD;
1226 }
1227
1228 /*
1229 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1230 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1231 * which makes the delay in generic_cpu_die() not happen.
1232 */
generic_set_cpu_up(unsigned int cpu)1233 void generic_set_cpu_up(unsigned int cpu)
1234 {
1235 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1236 }
1237
generic_check_cpu_restart(unsigned int cpu)1238 int generic_check_cpu_restart(unsigned int cpu)
1239 {
1240 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1241 }
1242
is_cpu_dead(unsigned int cpu)1243 int is_cpu_dead(unsigned int cpu)
1244 {
1245 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1246 }
1247
secondaries_inhibited(void)1248 static bool secondaries_inhibited(void)
1249 {
1250 return kvm_hv_mode_active();
1251 }
1252
1253 #else /* HOTPLUG_CPU */
1254
1255 #define secondaries_inhibited() 0
1256
1257 #endif
1258
cpu_idle_thread_init(unsigned int cpu,struct task_struct * idle)1259 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1260 {
1261 #ifdef CONFIG_PPC64
1262 paca_ptrs[cpu]->__current = idle;
1263 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1264 THREAD_SIZE - STACK_FRAME_MIN_SIZE;
1265 #endif
1266 task_thread_info(idle)->cpu = cpu;
1267 secondary_current = current_set[cpu] = idle;
1268 }
1269
__cpu_up(unsigned int cpu,struct task_struct * tidle)1270 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1271 {
1272 const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
1273 const bool booting = system_state < SYSTEM_RUNNING;
1274 const unsigned long hp_spin_ms = 1;
1275 unsigned long deadline;
1276 int rc;
1277 const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
1278
1279 /*
1280 * Don't allow secondary threads to come online if inhibited
1281 */
1282 if (threads_per_core > 1 && secondaries_inhibited() &&
1283 cpu_thread_in_subcore(cpu))
1284 return -EBUSY;
1285
1286 if (smp_ops == NULL ||
1287 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1288 return -EINVAL;
1289
1290 cpu_idle_thread_init(cpu, tidle);
1291
1292 /*
1293 * The platform might need to allocate resources prior to bringing
1294 * up the CPU
1295 */
1296 if (smp_ops->prepare_cpu) {
1297 rc = smp_ops->prepare_cpu(cpu);
1298 if (rc)
1299 return rc;
1300 }
1301
1302 /* Make sure callin-map entry is 0 (can be leftover a CPU
1303 * hotplug
1304 */
1305 cpu_callin_map[cpu] = 0;
1306
1307 /* The information for processor bringup must
1308 * be written out to main store before we release
1309 * the processor.
1310 */
1311 smp_mb();
1312
1313 /* wake up cpus */
1314 DBG("smp: kicking cpu %d\n", cpu);
1315 rc = smp_ops->kick_cpu(cpu);
1316 if (rc) {
1317 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1318 return rc;
1319 }
1320
1321 /*
1322 * At boot time, simply spin on the callin word until the
1323 * deadline passes.
1324 *
1325 * At run time, spin for an optimistic amount of time to avoid
1326 * sleeping in the common case.
1327 */
1328 deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
1329 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
1330
1331 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
1332 const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
1333 const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
1334
1335 deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
1336 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
1337 fsleep(sleep_interval_us);
1338 }
1339
1340 if (!cpu_callin_map[cpu]) {
1341 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1342 return -ENOENT;
1343 }
1344
1345 DBG("Processor %u found.\n", cpu);
1346
1347 if (smp_ops->give_timebase)
1348 smp_ops->give_timebase();
1349
1350 /* Wait until cpu puts itself in the online & active maps */
1351 spin_until_cond(cpu_online(cpu));
1352
1353 return 0;
1354 }
1355
1356 /* Return the value of the reg property corresponding to the given
1357 * logical cpu.
1358 */
cpu_to_core_id(int cpu)1359 int cpu_to_core_id(int cpu)
1360 {
1361 struct device_node *np;
1362 int id = -1;
1363
1364 np = of_get_cpu_node(cpu, NULL);
1365 if (!np)
1366 goto out;
1367
1368 id = of_get_cpu_hwid(np, 0);
1369 out:
1370 of_node_put(np);
1371 return id;
1372 }
1373 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1374
1375 /* Helper routines for cpu to core mapping */
cpu_core_index_of_thread(int cpu)1376 int cpu_core_index_of_thread(int cpu)
1377 {
1378 return cpu >> threads_shift;
1379 }
1380 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1381
cpu_first_thread_of_core(int core)1382 int cpu_first_thread_of_core(int core)
1383 {
1384 return core << threads_shift;
1385 }
1386 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1387
1388 /* Must be called when no change can occur to cpu_present_mask,
1389 * i.e. during cpu online or offline.
1390 */
cpu_to_l2cache(int cpu)1391 static struct device_node *cpu_to_l2cache(int cpu)
1392 {
1393 struct device_node *np;
1394 struct device_node *cache;
1395
1396 if (!cpu_present(cpu))
1397 return NULL;
1398
1399 np = of_get_cpu_node(cpu, NULL);
1400 if (np == NULL)
1401 return NULL;
1402
1403 cache = of_find_next_cache_node(np);
1404
1405 of_node_put(np);
1406
1407 return cache;
1408 }
1409
update_mask_by_l2(int cpu,cpumask_var_t * mask)1410 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1411 {
1412 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1413 struct device_node *l2_cache, *np;
1414 int i;
1415
1416 if (has_big_cores)
1417 submask_fn = cpu_smallcore_mask;
1418
1419 /*
1420 * If the threads in a thread-group share L2 cache, then the
1421 * L2-mask can be obtained from thread_group_l2_cache_map.
1422 */
1423 if (thread_group_shares_l2) {
1424 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1425
1426 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1427 if (cpu_online(i))
1428 set_cpus_related(i, cpu, cpu_l2_cache_mask);
1429 }
1430
1431 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1432 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1433 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1434 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1435 cpu);
1436 }
1437
1438 return true;
1439 }
1440
1441 l2_cache = cpu_to_l2cache(cpu);
1442 if (!l2_cache || !*mask) {
1443 /* Assume only core siblings share cache with this CPU */
1444 for_each_cpu(i, cpu_sibling_mask(cpu))
1445 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1446
1447 return false;
1448 }
1449
1450 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1451
1452 /* Update l2-cache mask with all the CPUs that are part of submask */
1453 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1454
1455 /* Skip all CPUs already part of current CPU l2-cache mask */
1456 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1457
1458 for_each_cpu(i, *mask) {
1459 /*
1460 * when updating the marks the current CPU has not been marked
1461 * online, but we need to update the cache masks
1462 */
1463 np = cpu_to_l2cache(i);
1464
1465 /* Skip all CPUs already part of current CPU l2-cache */
1466 if (np == l2_cache) {
1467 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1468 cpumask_andnot(*mask, *mask, submask_fn(i));
1469 } else {
1470 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1471 }
1472
1473 of_node_put(np);
1474 }
1475 of_node_put(l2_cache);
1476
1477 return true;
1478 }
1479
1480 #ifdef CONFIG_HOTPLUG_CPU
remove_cpu_from_masks(int cpu)1481 static void remove_cpu_from_masks(int cpu)
1482 {
1483 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1484 int i;
1485
1486 unmap_cpu_from_node(cpu);
1487
1488 if (shared_caches)
1489 mask_fn = cpu_l2_cache_mask;
1490
1491 for_each_cpu(i, mask_fn(cpu)) {
1492 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1493 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1494 if (has_big_cores)
1495 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1496 }
1497
1498 for_each_cpu(i, cpu_core_mask(cpu))
1499 set_cpus_unrelated(cpu, i, cpu_core_mask);
1500
1501 if (has_coregroup_support()) {
1502 for_each_cpu(i, cpu_coregroup_mask(cpu))
1503 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1504 }
1505 }
1506 #endif
1507
add_cpu_to_smallcore_masks(int cpu)1508 static inline void add_cpu_to_smallcore_masks(int cpu)
1509 {
1510 int i;
1511
1512 if (!has_big_cores)
1513 return;
1514
1515 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1516
1517 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1518 if (cpu_online(i))
1519 set_cpus_related(i, cpu, cpu_smallcore_mask);
1520 }
1521 }
1522
update_coregroup_mask(int cpu,cpumask_var_t * mask)1523 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1524 {
1525 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1526 int coregroup_id = cpu_to_coregroup_id(cpu);
1527 int i;
1528
1529 if (shared_caches)
1530 submask_fn = cpu_l2_cache_mask;
1531
1532 if (!*mask) {
1533 /* Assume only siblings are part of this CPU's coregroup */
1534 for_each_cpu(i, submask_fn(cpu))
1535 set_cpus_related(cpu, i, cpu_coregroup_mask);
1536
1537 return;
1538 }
1539
1540 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1541
1542 /* Update coregroup mask with all the CPUs that are part of submask */
1543 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1544
1545 /* Skip all CPUs already part of coregroup mask */
1546 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1547
1548 for_each_cpu(i, *mask) {
1549 /* Skip all CPUs not part of this coregroup */
1550 if (coregroup_id == cpu_to_coregroup_id(i)) {
1551 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1552 cpumask_andnot(*mask, *mask, submask_fn(i));
1553 } else {
1554 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1555 }
1556 }
1557 }
1558
add_cpu_to_masks(int cpu)1559 static void add_cpu_to_masks(int cpu)
1560 {
1561 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1562 int first_thread = cpu_first_thread_sibling(cpu);
1563 cpumask_var_t mask;
1564 int chip_id = -1;
1565 bool ret;
1566 int i;
1567
1568 /*
1569 * This CPU will not be in the online mask yet so we need to manually
1570 * add it to its own thread sibling mask.
1571 */
1572 map_cpu_to_node(cpu, cpu_to_node(cpu));
1573 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1574 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1575
1576 for (i = first_thread; i < first_thread + threads_per_core; i++)
1577 if (cpu_online(i))
1578 set_cpus_related(i, cpu, cpu_sibling_mask);
1579
1580 add_cpu_to_smallcore_masks(cpu);
1581
1582 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1583 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1584 update_mask_by_l2(cpu, &mask);
1585
1586 if (has_coregroup_support())
1587 update_coregroup_mask(cpu, &mask);
1588
1589 if (chip_id_lookup_table && ret)
1590 chip_id = cpu_to_chip_id(cpu);
1591
1592 if (shared_caches)
1593 submask_fn = cpu_l2_cache_mask;
1594
1595 /* Update core_mask with all the CPUs that are part of submask */
1596 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1597
1598 /* Skip all CPUs already part of current CPU core mask */
1599 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1600
1601 /* If chip_id is -1; limit the cpu_core_mask to within PKG */
1602 if (chip_id == -1)
1603 cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1604
1605 for_each_cpu(i, mask) {
1606 if (chip_id == cpu_to_chip_id(i)) {
1607 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1608 cpumask_andnot(mask, mask, submask_fn(i));
1609 } else {
1610 cpumask_andnot(mask, mask, cpu_core_mask(i));
1611 }
1612 }
1613
1614 free_cpumask_var(mask);
1615 }
1616
1617 /* Activate a secondary processor. */
1618 __no_stack_protector
start_secondary(void * unused)1619 void start_secondary(void *unused)
1620 {
1621 unsigned int cpu = raw_smp_processor_id();
1622
1623 /* PPC64 calls setup_kup() in early_setup_secondary() */
1624 if (IS_ENABLED(CONFIG_PPC32))
1625 setup_kup();
1626
1627 mmgrab_lazy_tlb(&init_mm);
1628 current->active_mm = &init_mm;
1629 VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
1630 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1631 inc_mm_active_cpus(&init_mm);
1632
1633 smp_store_cpu_info(cpu);
1634 set_dec(tb_ticks_per_jiffy);
1635 rcutree_report_cpu_starting(cpu);
1636 cpu_callin_map[cpu] = 1;
1637
1638 if (smp_ops->setup_cpu)
1639 smp_ops->setup_cpu(cpu);
1640 if (smp_ops->take_timebase)
1641 smp_ops->take_timebase();
1642
1643 secondary_cpu_time_init();
1644
1645 #ifdef CONFIG_PPC64
1646 if (system_state == SYSTEM_RUNNING)
1647 vdso_data->processorCount++;
1648
1649 vdso_getcpu_init();
1650 #endif
1651 set_numa_node(numa_cpu_lookup_table[cpu]);
1652 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1653
1654 /* Update topology CPU masks */
1655 add_cpu_to_masks(cpu);
1656
1657 /*
1658 * Check for any shared caches. Note that this must be done on a
1659 * per-core basis because one core in the pair might be disabled.
1660 */
1661 if (!shared_caches) {
1662 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1663 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1664
1665 if (has_big_cores)
1666 sibling_mask = cpu_smallcore_mask;
1667
1668 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1669 shared_caches = true;
1670 }
1671
1672 smp_wmb();
1673 notify_cpu_starting(cpu);
1674 set_cpu_online(cpu, true);
1675
1676 boot_init_stack_canary();
1677
1678 local_irq_enable();
1679
1680 /* We can enable ftrace for secondary cpus now */
1681 this_cpu_enable_ftrace();
1682
1683 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1684
1685 BUG();
1686 }
1687
1688 static struct sched_domain_topology_level powerpc_topology[6];
1689
build_sched_topology(void)1690 static void __init build_sched_topology(void)
1691 {
1692 int i = 0;
1693
1694 if (is_shared_processor() && has_big_cores)
1695 static_branch_enable(&splpar_asym_pack);
1696
1697 #ifdef CONFIG_SCHED_SMT
1698 if (has_big_cores) {
1699 pr_info("Big cores detected but using small core scheduling\n");
1700 powerpc_topology[i++] = (struct sched_domain_topology_level){
1701 smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1702 };
1703 } else {
1704 powerpc_topology[i++] = (struct sched_domain_topology_level){
1705 cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1706 };
1707 }
1708 #endif
1709 if (shared_caches) {
1710 powerpc_topology[i++] = (struct sched_domain_topology_level){
1711 shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
1712 };
1713 }
1714 if (has_coregroup_support()) {
1715 powerpc_topology[i++] = (struct sched_domain_topology_level){
1716 cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
1717 };
1718 }
1719 powerpc_topology[i++] = (struct sched_domain_topology_level){
1720 cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
1721 };
1722
1723 /* There must be one trailing NULL entry left. */
1724 BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
1725
1726 set_sched_topology(powerpc_topology);
1727 }
1728
smp_cpus_done(unsigned int max_cpus)1729 void __init smp_cpus_done(unsigned int max_cpus)
1730 {
1731 /*
1732 * We are running pinned to the boot CPU, see rest_init().
1733 */
1734 if (smp_ops && smp_ops->setup_cpu)
1735 smp_ops->setup_cpu(boot_cpuid);
1736
1737 if (smp_ops && smp_ops->bringup_done)
1738 smp_ops->bringup_done();
1739
1740 dump_numa_cpu_topology();
1741 build_sched_topology();
1742 }
1743
1744 /*
1745 * For asym packing, by default lower numbered CPU has higher priority.
1746 * On shared processors, pack to lower numbered core. However avoid moving
1747 * between thread_groups within the same core.
1748 */
arch_asym_cpu_priority(int cpu)1749 int arch_asym_cpu_priority(int cpu)
1750 {
1751 if (static_branch_unlikely(&splpar_asym_pack))
1752 return -cpu / threads_per_core;
1753
1754 return -cpu;
1755 }
1756
1757 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)1758 int __cpu_disable(void)
1759 {
1760 int cpu = smp_processor_id();
1761 int err;
1762
1763 if (!smp_ops->cpu_disable)
1764 return -ENOSYS;
1765
1766 this_cpu_disable_ftrace();
1767
1768 err = smp_ops->cpu_disable();
1769 if (err)
1770 return err;
1771
1772 /* Update sibling maps */
1773 remove_cpu_from_masks(cpu);
1774
1775 return 0;
1776 }
1777
__cpu_die(unsigned int cpu)1778 void __cpu_die(unsigned int cpu)
1779 {
1780 /*
1781 * This could perhaps be a generic call in idlea_task_dead(), but
1782 * that requires testing from all archs, so first put it here to
1783 */
1784 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
1785 dec_mm_active_cpus(&init_mm);
1786 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
1787
1788 if (smp_ops->cpu_die)
1789 smp_ops->cpu_die(cpu);
1790 }
1791
arch_cpu_idle_dead(void)1792 void __noreturn arch_cpu_idle_dead(void)
1793 {
1794 /*
1795 * Disable on the down path. This will be re-enabled by
1796 * start_secondary() via start_secondary_resume() below
1797 */
1798 this_cpu_disable_ftrace();
1799
1800 if (smp_ops->cpu_offline_self)
1801 smp_ops->cpu_offline_self();
1802
1803 /* If we return, we re-enter start_secondary */
1804 start_secondary_resume();
1805 }
1806
1807 #endif
1808