xref: /linux/arch/powerpc/kernel/smp.c (revision beb4f4722cf60d9f0803054dec4eb5025f2cf594)
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 #undef DEBUG
19 
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
37 #include <linux/random.h>
38 #include <linux/stackprotector.h>
39 
40 #include <asm/ptrace.h>
41 #include <linux/atomic.h>
42 #include <asm/irq.h>
43 #include <asm/hw_irq.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/dbell.h>
46 #include <asm/page.h>
47 #include <asm/pgtable.h>
48 #include <asm/prom.h>
49 #include <asm/smp.h>
50 #include <asm/time.h>
51 #include <asm/machdep.h>
52 #include <asm/cputhreads.h>
53 #include <asm/cputable.h>
54 #include <asm/mpic.h>
55 #include <asm/vdso_datapage.h>
56 #ifdef CONFIG_PPC64
57 #include <asm/paca.h>
58 #endif
59 #include <asm/vdso.h>
60 #include <asm/debug.h>
61 #include <asm/kexec.h>
62 #include <asm/asm-prototypes.h>
63 #include <asm/cpu_has_feature.h>
64 #include <asm/ftrace.h>
65 
66 #ifdef DEBUG
67 #include <asm/udbg.h>
68 #define DBG(fmt...) udbg_printf(fmt)
69 #else
70 #define DBG(fmt...)
71 #endif
72 
73 #ifdef CONFIG_HOTPLUG_CPU
74 /* State of each CPU during hotplug phases */
75 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76 #endif
77 
78 struct thread_info *secondary_ti;
79 bool has_big_cores;
80 
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
85 
86 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
87 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
89 EXPORT_SYMBOL_GPL(has_big_cores);
90 
91 #define MAX_THREAD_LIST_SIZE	8
92 #define THREAD_GROUP_SHARE_L1   1
93 struct thread_groups {
94 	unsigned int property;
95 	unsigned int nr_groups;
96 	unsigned int threads_per_group;
97 	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
98 };
99 
100 /*
101  * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
102  * the set its siblings that share the L1-cache.
103  */
104 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
105 
106 /* SMP operations for this machine */
107 struct smp_ops_t *smp_ops;
108 
109 /* Can't be static due to PowerMac hackery */
110 volatile unsigned int cpu_callin_map[NR_CPUS];
111 
112 int smt_enabled_at_boot = 1;
113 
114 /*
115  * Returns 1 if the specified cpu should be brought up during boot.
116  * Used to inhibit booting threads if they've been disabled or
117  * limited on the command line
118  */
119 int smp_generic_cpu_bootable(unsigned int nr)
120 {
121 	/* Special case - we inhibit secondary thread startup
122 	 * during boot if the user requests it.
123 	 */
124 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
125 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
126 			return 0;
127 		if (smt_enabled_at_boot
128 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
129 			return 0;
130 	}
131 
132 	return 1;
133 }
134 
135 
136 #ifdef CONFIG_PPC64
137 int smp_generic_kick_cpu(int nr)
138 {
139 	if (nr < 0 || nr >= nr_cpu_ids)
140 		return -EINVAL;
141 
142 	/*
143 	 * The processor is currently spinning, waiting for the
144 	 * cpu_start field to become non-zero After we set cpu_start,
145 	 * the processor will continue on to secondary_start
146 	 */
147 	if (!paca_ptrs[nr]->cpu_start) {
148 		paca_ptrs[nr]->cpu_start = 1;
149 		smp_mb();
150 		return 0;
151 	}
152 
153 #ifdef CONFIG_HOTPLUG_CPU
154 	/*
155 	 * Ok it's not there, so it might be soft-unplugged, let's
156 	 * try to bring it back
157 	 */
158 	generic_set_cpu_up(nr);
159 	smp_wmb();
160 	smp_send_reschedule(nr);
161 #endif /* CONFIG_HOTPLUG_CPU */
162 
163 	return 0;
164 }
165 #endif /* CONFIG_PPC64 */
166 
167 static irqreturn_t call_function_action(int irq, void *data)
168 {
169 	generic_smp_call_function_interrupt();
170 	return IRQ_HANDLED;
171 }
172 
173 static irqreturn_t reschedule_action(int irq, void *data)
174 {
175 	scheduler_ipi();
176 	return IRQ_HANDLED;
177 }
178 
179 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
180 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
181 {
182 	timer_broadcast_interrupt();
183 	return IRQ_HANDLED;
184 }
185 #endif
186 
187 #ifdef CONFIG_NMI_IPI
188 static irqreturn_t nmi_ipi_action(int irq, void *data)
189 {
190 	smp_handle_nmi_ipi(get_irq_regs());
191 	return IRQ_HANDLED;
192 }
193 #endif
194 
195 static irq_handler_t smp_ipi_action[] = {
196 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
197 	[PPC_MSG_RESCHEDULE] = reschedule_action,
198 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
199 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
200 #endif
201 #ifdef CONFIG_NMI_IPI
202 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
203 #endif
204 };
205 
206 /*
207  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
208  * than going through the call function infrastructure, and strongly
209  * serialized, so it is more appropriate for debugging.
210  */
211 const char *smp_ipi_name[] = {
212 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
213 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
214 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
215 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
216 #endif
217 #ifdef CONFIG_NMI_IPI
218 	[PPC_MSG_NMI_IPI] = "nmi ipi",
219 #endif
220 };
221 
222 /* optional function to request ipi, for controllers with >= 4 ipis */
223 int smp_request_message_ipi(int virq, int msg)
224 {
225 	int err;
226 
227 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
228 		return -EINVAL;
229 #ifndef CONFIG_NMI_IPI
230 	if (msg == PPC_MSG_NMI_IPI)
231 		return 1;
232 #endif
233 
234 	err = request_irq(virq, smp_ipi_action[msg],
235 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
236 			  smp_ipi_name[msg], NULL);
237 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
238 		virq, smp_ipi_name[msg], err);
239 
240 	return err;
241 }
242 
243 #ifdef CONFIG_PPC_SMP_MUXED_IPI
244 struct cpu_messages {
245 	long messages;			/* current messages */
246 };
247 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
248 
249 void smp_muxed_ipi_set_message(int cpu, int msg)
250 {
251 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
252 	char *message = (char *)&info->messages;
253 
254 	/*
255 	 * Order previous accesses before accesses in the IPI handler.
256 	 */
257 	smp_mb();
258 	message[msg] = 1;
259 }
260 
261 void smp_muxed_ipi_message_pass(int cpu, int msg)
262 {
263 	smp_muxed_ipi_set_message(cpu, msg);
264 
265 	/*
266 	 * cause_ipi functions are required to include a full barrier
267 	 * before doing whatever causes the IPI.
268 	 */
269 	smp_ops->cause_ipi(cpu);
270 }
271 
272 #ifdef __BIG_ENDIAN__
273 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
274 #else
275 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
276 #endif
277 
278 irqreturn_t smp_ipi_demux(void)
279 {
280 	mb();	/* order any irq clear */
281 
282 	return smp_ipi_demux_relaxed();
283 }
284 
285 /* sync-free variant. Callers should ensure synchronization */
286 irqreturn_t smp_ipi_demux_relaxed(void)
287 {
288 	struct cpu_messages *info;
289 	unsigned long all;
290 
291 	info = this_cpu_ptr(&ipi_message);
292 	do {
293 		all = xchg(&info->messages, 0);
294 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
295 		/*
296 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
297 		 * before PPC_MSG_CALL_FUNCTION messages because when
298 		 * a VM is destroyed, we call kick_all_cpus_sync()
299 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
300 		 * messages have completed before we free any VCPUs.
301 		 */
302 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
303 			kvmppc_xics_ipi_action();
304 #endif
305 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
306 			generic_smp_call_function_interrupt();
307 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
308 			scheduler_ipi();
309 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
310 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
311 			timer_broadcast_interrupt();
312 #endif
313 #ifdef CONFIG_NMI_IPI
314 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
315 			nmi_ipi_action(0, NULL);
316 #endif
317 	} while (info->messages);
318 
319 	return IRQ_HANDLED;
320 }
321 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
322 
323 static inline void do_message_pass(int cpu, int msg)
324 {
325 	if (smp_ops->message_pass)
326 		smp_ops->message_pass(cpu, msg);
327 #ifdef CONFIG_PPC_SMP_MUXED_IPI
328 	else
329 		smp_muxed_ipi_message_pass(cpu, msg);
330 #endif
331 }
332 
333 void smp_send_reschedule(int cpu)
334 {
335 	if (likely(smp_ops))
336 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
337 }
338 EXPORT_SYMBOL_GPL(smp_send_reschedule);
339 
340 void arch_send_call_function_single_ipi(int cpu)
341 {
342 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
343 }
344 
345 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
346 {
347 	unsigned int cpu;
348 
349 	for_each_cpu(cpu, mask)
350 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
351 }
352 
353 #ifdef CONFIG_NMI_IPI
354 
355 /*
356  * "NMI IPI" system.
357  *
358  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
359  * a running system. They can be used for crash, debug, halt/reboot, etc.
360  *
361  * The IPI call waits with interrupts disabled until all targets enter the
362  * NMI handler, then returns. Subsequent IPIs can be issued before targets
363  * have returned from their handlers, so there is no guarantee about
364  * concurrency or re-entrancy.
365  *
366  * A new NMI can be issued before all targets exit the handler.
367  *
368  * The IPI call may time out without all targets entering the NMI handler.
369  * In that case, there is some logic to recover (and ignore subsequent
370  * NMI interrupts that may eventually be raised), but the platform interrupt
371  * handler may not be able to distinguish this from other exception causes,
372  * which may cause a crash.
373  */
374 
375 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
376 static struct cpumask nmi_ipi_pending_mask;
377 static bool nmi_ipi_busy = false;
378 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
379 
380 static void nmi_ipi_lock_start(unsigned long *flags)
381 {
382 	raw_local_irq_save(*flags);
383 	hard_irq_disable();
384 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
385 		raw_local_irq_restore(*flags);
386 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
387 		raw_local_irq_save(*flags);
388 		hard_irq_disable();
389 	}
390 }
391 
392 static void nmi_ipi_lock(void)
393 {
394 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
395 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
396 }
397 
398 static void nmi_ipi_unlock(void)
399 {
400 	smp_mb();
401 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
402 	atomic_set(&__nmi_ipi_lock, 0);
403 }
404 
405 static void nmi_ipi_unlock_end(unsigned long *flags)
406 {
407 	nmi_ipi_unlock();
408 	raw_local_irq_restore(*flags);
409 }
410 
411 /*
412  * Platform NMI handler calls this to ack
413  */
414 int smp_handle_nmi_ipi(struct pt_regs *regs)
415 {
416 	void (*fn)(struct pt_regs *) = NULL;
417 	unsigned long flags;
418 	int me = raw_smp_processor_id();
419 	int ret = 0;
420 
421 	/*
422 	 * Unexpected NMIs are possible here because the interrupt may not
423 	 * be able to distinguish NMI IPIs from other types of NMIs, or
424 	 * because the caller may have timed out.
425 	 */
426 	nmi_ipi_lock_start(&flags);
427 	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
428 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
429 		fn = READ_ONCE(nmi_ipi_function);
430 		WARN_ON_ONCE(!fn);
431 		ret = 1;
432 	}
433 	nmi_ipi_unlock_end(&flags);
434 
435 	if (fn)
436 		fn(regs);
437 
438 	return ret;
439 }
440 
441 static void do_smp_send_nmi_ipi(int cpu, bool safe)
442 {
443 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
444 		return;
445 
446 	if (cpu >= 0) {
447 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
448 	} else {
449 		int c;
450 
451 		for_each_online_cpu(c) {
452 			if (c == raw_smp_processor_id())
453 				continue;
454 			do_message_pass(c, PPC_MSG_NMI_IPI);
455 		}
456 	}
457 }
458 
459 /*
460  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
461  * - fn is the target callback function.
462  * - delay_us > 0 is the delay before giving up waiting for targets to
463  *   begin executing the handler, == 0 specifies indefinite delay.
464  */
465 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
466 				u64 delay_us, bool safe)
467 {
468 	unsigned long flags;
469 	int me = raw_smp_processor_id();
470 	int ret = 1;
471 
472 	BUG_ON(cpu == me);
473 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
474 
475 	if (unlikely(!smp_ops))
476 		return 0;
477 
478 	nmi_ipi_lock_start(&flags);
479 	while (nmi_ipi_busy) {
480 		nmi_ipi_unlock_end(&flags);
481 		spin_until_cond(!nmi_ipi_busy);
482 		nmi_ipi_lock_start(&flags);
483 	}
484 	nmi_ipi_busy = true;
485 	nmi_ipi_function = fn;
486 
487 	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
488 
489 	if (cpu < 0) {
490 		/* ALL_OTHERS */
491 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
492 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
493 	} else {
494 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
495 	}
496 
497 	nmi_ipi_unlock();
498 
499 	/* Interrupts remain hard disabled */
500 
501 	do_smp_send_nmi_ipi(cpu, safe);
502 
503 	nmi_ipi_lock();
504 	/* nmi_ipi_busy is set here, so unlock/lock is okay */
505 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
506 		nmi_ipi_unlock();
507 		udelay(1);
508 		nmi_ipi_lock();
509 		if (delay_us) {
510 			delay_us--;
511 			if (!delay_us)
512 				break;
513 		}
514 	}
515 
516 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
517 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
518 		ret = 0;
519 		cpumask_clear(&nmi_ipi_pending_mask);
520 	}
521 
522 	nmi_ipi_function = NULL;
523 	nmi_ipi_busy = false;
524 
525 	nmi_ipi_unlock_end(&flags);
526 
527 	return ret;
528 }
529 
530 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
531 {
532 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
533 }
534 
535 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
536 {
537 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
538 }
539 #endif /* CONFIG_NMI_IPI */
540 
541 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
542 void tick_broadcast(const struct cpumask *mask)
543 {
544 	unsigned int cpu;
545 
546 	for_each_cpu(cpu, mask)
547 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
548 }
549 #endif
550 
551 #ifdef CONFIG_DEBUGGER
552 void debugger_ipi_callback(struct pt_regs *regs)
553 {
554 	debugger_ipi(regs);
555 }
556 
557 void smp_send_debugger_break(void)
558 {
559 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
560 }
561 #endif
562 
563 #ifdef CONFIG_KEXEC_CORE
564 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
565 {
566 	int cpu;
567 
568 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
569 	if (kdump_in_progress() && crash_wake_offline) {
570 		for_each_present_cpu(cpu) {
571 			if (cpu_online(cpu))
572 				continue;
573 			/*
574 			 * crash_ipi_callback will wait for
575 			 * all cpus, including offline CPUs.
576 			 * We don't care about nmi_ipi_function.
577 			 * Offline cpus will jump straight into
578 			 * crash_ipi_callback, we can skip the
579 			 * entire NMI dance and waiting for
580 			 * cpus to clear pending mask, etc.
581 			 */
582 			do_smp_send_nmi_ipi(cpu, false);
583 		}
584 	}
585 }
586 #endif
587 
588 #ifdef CONFIG_NMI_IPI
589 static void nmi_stop_this_cpu(struct pt_regs *regs)
590 {
591 	/*
592 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
593 	 */
594 	spin_begin();
595 	while (1)
596 		spin_cpu_relax();
597 }
598 
599 void smp_send_stop(void)
600 {
601 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
602 }
603 
604 #else /* CONFIG_NMI_IPI */
605 
606 static void stop_this_cpu(void *dummy)
607 {
608 	hard_irq_disable();
609 	spin_begin();
610 	while (1)
611 		spin_cpu_relax();
612 }
613 
614 void smp_send_stop(void)
615 {
616 	static bool stopped = false;
617 
618 	/*
619 	 * Prevent waiting on csd lock from a previous smp_send_stop.
620 	 * This is racy, but in general callers try to do the right
621 	 * thing and only fire off one smp_send_stop (e.g., see
622 	 * kernel/panic.c)
623 	 */
624 	if (stopped)
625 		return;
626 
627 	stopped = true;
628 
629 	smp_call_function(stop_this_cpu, NULL, 0);
630 }
631 #endif /* CONFIG_NMI_IPI */
632 
633 struct thread_info *current_set[NR_CPUS];
634 
635 static void smp_store_cpu_info(int id)
636 {
637 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
638 #ifdef CONFIG_PPC_FSL_BOOK3E
639 	per_cpu(next_tlbcam_idx, id)
640 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
641 #endif
642 }
643 
644 /*
645  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
646  * rather than just passing around the cpumask we pass around a function that
647  * returns the that cpumask for the given CPU.
648  */
649 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
650 {
651 	cpumask_set_cpu(i, get_cpumask(j));
652 	cpumask_set_cpu(j, get_cpumask(i));
653 }
654 
655 #ifdef CONFIG_HOTPLUG_CPU
656 static void set_cpus_unrelated(int i, int j,
657 		struct cpumask *(*get_cpumask)(int))
658 {
659 	cpumask_clear_cpu(i, get_cpumask(j));
660 	cpumask_clear_cpu(j, get_cpumask(i));
661 }
662 #endif
663 
664 /*
665  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
666  *                      property for the CPU device node @dn and stores
667  *                      the parsed output in the thread_groups
668  *                      structure @tg if the ibm,thread-groups[0]
669  *                      matches @property.
670  *
671  * @dn: The device node of the CPU device.
672  * @tg: Pointer to a thread group structure into which the parsed
673  *      output of "ibm,thread-groups" is stored.
674  * @property: The property of the thread-group that the caller is
675  *            interested in.
676  *
677  * ibm,thread-groups[0..N-1] array defines which group of threads in
678  * the CPU-device node can be grouped together based on the property.
679  *
680  * ibm,thread-groups[0] tells us the property based on which the
681  * threads are being grouped together. If this value is 1, it implies
682  * that the threads in the same group share L1, translation cache.
683  *
684  * ibm,thread-groups[1] tells us how many such thread groups exist.
685  *
686  * ibm,thread-groups[2] tells us the number of threads in each such
687  * group.
688  *
689  * ibm,thread-groups[3..N-1] is the list of threads identified by
690  * "ibm,ppc-interrupt-server#s" arranged as per their membership in
691  * the grouping.
692  *
693  * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
694  * implies that there are 2 groups of 4 threads each, where each group
695  * of threads share L1, translation cache.
696  *
697  * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
698  * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
699  * 11, 12} structure
700  *
701  * Returns 0 on success, -EINVAL if the property does not exist,
702  * -ENODATA if property does not have a value, and -EOVERFLOW if the
703  * property data isn't large enough.
704  */
705 static int parse_thread_groups(struct device_node *dn,
706 			       struct thread_groups *tg,
707 			       unsigned int property)
708 {
709 	int i;
710 	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
711 	u32 *thread_list;
712 	size_t total_threads;
713 	int ret;
714 
715 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
716 					 thread_group_array, 3);
717 	if (ret)
718 		return ret;
719 
720 	tg->property = thread_group_array[0];
721 	tg->nr_groups = thread_group_array[1];
722 	tg->threads_per_group = thread_group_array[2];
723 	if (tg->property != property ||
724 	    tg->nr_groups < 1 ||
725 	    tg->threads_per_group < 1)
726 		return -ENODATA;
727 
728 	total_threads = tg->nr_groups * tg->threads_per_group;
729 
730 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
731 					 thread_group_array,
732 					 3 + total_threads);
733 	if (ret)
734 		return ret;
735 
736 	thread_list = &thread_group_array[3];
737 
738 	for (i = 0 ; i < total_threads; i++)
739 		tg->thread_list[i] = thread_list[i];
740 
741 	return 0;
742 }
743 
744 /*
745  * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
746  *                              that @cpu belongs to.
747  *
748  * @cpu : The logical CPU whose thread group is being searched.
749  * @tg : The thread-group structure of the CPU node which @cpu belongs
750  *       to.
751  *
752  * Returns the index to tg->thread_list that points to the the start
753  * of the thread_group that @cpu belongs to.
754  *
755  * Returns -1 if cpu doesn't belong to any of the groups pointed to by
756  * tg->thread_list.
757  */
758 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
759 {
760 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
761 	int i, j;
762 
763 	for (i = 0; i < tg->nr_groups; i++) {
764 		int group_start = i * tg->threads_per_group;
765 
766 		for (j = 0; j < tg->threads_per_group; j++) {
767 			int idx = group_start + j;
768 
769 			if (tg->thread_list[idx] == hw_cpu_id)
770 				return group_start;
771 		}
772 	}
773 
774 	return -1;
775 }
776 
777 static int init_cpu_l1_cache_map(int cpu)
778 
779 {
780 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
781 	struct thread_groups tg = {.property = 0,
782 				   .nr_groups = 0,
783 				   .threads_per_group = 0};
784 	int first_thread = cpu_first_thread_sibling(cpu);
785 	int i, cpu_group_start = -1, err = 0;
786 
787 	if (!dn)
788 		return -ENODATA;
789 
790 	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
791 	if (err)
792 		goto out;
793 
794 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
795 				GFP_KERNEL,
796 				cpu_to_node(cpu));
797 
798 	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
799 
800 	if (unlikely(cpu_group_start == -1)) {
801 		WARN_ON_ONCE(1);
802 		err = -ENODATA;
803 		goto out;
804 	}
805 
806 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
807 		int i_group_start = get_cpu_thread_group_start(i, &tg);
808 
809 		if (unlikely(i_group_start == -1)) {
810 			WARN_ON_ONCE(1);
811 			err = -ENODATA;
812 			goto out;
813 		}
814 
815 		if (i_group_start == cpu_group_start)
816 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
817 	}
818 
819 out:
820 	of_node_put(dn);
821 	return err;
822 }
823 
824 static int init_big_cores(void)
825 {
826 	int cpu;
827 
828 	for_each_possible_cpu(cpu) {
829 		int err = init_cpu_l1_cache_map(cpu);
830 
831 		if (err)
832 			return err;
833 
834 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
835 					GFP_KERNEL,
836 					cpu_to_node(cpu));
837 	}
838 
839 	has_big_cores = true;
840 	return 0;
841 }
842 
843 void __init smp_prepare_cpus(unsigned int max_cpus)
844 {
845 	unsigned int cpu;
846 
847 	DBG("smp_prepare_cpus\n");
848 
849 	/*
850 	 * setup_cpu may need to be called on the boot cpu. We havent
851 	 * spun any cpus up but lets be paranoid.
852 	 */
853 	BUG_ON(boot_cpuid != smp_processor_id());
854 
855 	/* Fixup boot cpu */
856 	smp_store_cpu_info(boot_cpuid);
857 	cpu_callin_map[boot_cpuid] = 1;
858 
859 	for_each_possible_cpu(cpu) {
860 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
861 					GFP_KERNEL, cpu_to_node(cpu));
862 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
863 					GFP_KERNEL, cpu_to_node(cpu));
864 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
865 					GFP_KERNEL, cpu_to_node(cpu));
866 		/*
867 		 * numa_node_id() works after this.
868 		 */
869 		if (cpu_present(cpu)) {
870 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
871 			set_cpu_numa_mem(cpu,
872 				local_memory_node(numa_cpu_lookup_table[cpu]));
873 		}
874 	}
875 
876 	/* Init the cpumasks so the boot CPU is related to itself */
877 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
878 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
879 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
880 
881 	init_big_cores();
882 	if (has_big_cores) {
883 		cpumask_set_cpu(boot_cpuid,
884 				cpu_smallcore_mask(boot_cpuid));
885 	}
886 
887 	if (smp_ops && smp_ops->probe)
888 		smp_ops->probe();
889 }
890 
891 void smp_prepare_boot_cpu(void)
892 {
893 	BUG_ON(smp_processor_id() != boot_cpuid);
894 #ifdef CONFIG_PPC64
895 	paca_ptrs[boot_cpuid]->__current = current;
896 #endif
897 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
898 	current_set[boot_cpuid] = task_thread_info(current);
899 }
900 
901 #ifdef CONFIG_HOTPLUG_CPU
902 
903 int generic_cpu_disable(void)
904 {
905 	unsigned int cpu = smp_processor_id();
906 
907 	if (cpu == boot_cpuid)
908 		return -EBUSY;
909 
910 	set_cpu_online(cpu, false);
911 #ifdef CONFIG_PPC64
912 	vdso_data->processorCount--;
913 #endif
914 	/* Update affinity of all IRQs previously aimed at this CPU */
915 	irq_migrate_all_off_this_cpu();
916 
917 	/*
918 	 * Depending on the details of the interrupt controller, it's possible
919 	 * that one of the interrupts we just migrated away from this CPU is
920 	 * actually already pending on this CPU. If we leave it in that state
921 	 * the interrupt will never be EOI'ed, and will never fire again. So
922 	 * temporarily enable interrupts here, to allow any pending interrupt to
923 	 * be received (and EOI'ed), before we take this CPU offline.
924 	 */
925 	local_irq_enable();
926 	mdelay(1);
927 	local_irq_disable();
928 
929 	return 0;
930 }
931 
932 void generic_cpu_die(unsigned int cpu)
933 {
934 	int i;
935 
936 	for (i = 0; i < 100; i++) {
937 		smp_rmb();
938 		if (is_cpu_dead(cpu))
939 			return;
940 		msleep(100);
941 	}
942 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
943 }
944 
945 void generic_set_cpu_dead(unsigned int cpu)
946 {
947 	per_cpu(cpu_state, cpu) = CPU_DEAD;
948 }
949 
950 /*
951  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
952  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
953  * which makes the delay in generic_cpu_die() not happen.
954  */
955 void generic_set_cpu_up(unsigned int cpu)
956 {
957 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
958 }
959 
960 int generic_check_cpu_restart(unsigned int cpu)
961 {
962 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
963 }
964 
965 int is_cpu_dead(unsigned int cpu)
966 {
967 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
968 }
969 
970 static bool secondaries_inhibited(void)
971 {
972 	return kvm_hv_mode_active();
973 }
974 
975 #else /* HOTPLUG_CPU */
976 
977 #define secondaries_inhibited()		0
978 
979 #endif
980 
981 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
982 {
983 	struct thread_info *ti = task_thread_info(idle);
984 
985 #ifdef CONFIG_PPC64
986 	paca_ptrs[cpu]->__current = idle;
987 	paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
988 #endif
989 	ti->cpu = cpu;
990 	secondary_ti = current_set[cpu] = ti;
991 }
992 
993 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
994 {
995 	int rc, c;
996 
997 	/*
998 	 * Don't allow secondary threads to come online if inhibited
999 	 */
1000 	if (threads_per_core > 1 && secondaries_inhibited() &&
1001 	    cpu_thread_in_subcore(cpu))
1002 		return -EBUSY;
1003 
1004 	if (smp_ops == NULL ||
1005 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1006 		return -EINVAL;
1007 
1008 	cpu_idle_thread_init(cpu, tidle);
1009 
1010 	/*
1011 	 * The platform might need to allocate resources prior to bringing
1012 	 * up the CPU
1013 	 */
1014 	if (smp_ops->prepare_cpu) {
1015 		rc = smp_ops->prepare_cpu(cpu);
1016 		if (rc)
1017 			return rc;
1018 	}
1019 
1020 	/* Make sure callin-map entry is 0 (can be leftover a CPU
1021 	 * hotplug
1022 	 */
1023 	cpu_callin_map[cpu] = 0;
1024 
1025 	/* The information for processor bringup must
1026 	 * be written out to main store before we release
1027 	 * the processor.
1028 	 */
1029 	smp_mb();
1030 
1031 	/* wake up cpus */
1032 	DBG("smp: kicking cpu %d\n", cpu);
1033 	rc = smp_ops->kick_cpu(cpu);
1034 	if (rc) {
1035 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1036 		return rc;
1037 	}
1038 
1039 	/*
1040 	 * wait to see if the cpu made a callin (is actually up).
1041 	 * use this value that I found through experimentation.
1042 	 * -- Cort
1043 	 */
1044 	if (system_state < SYSTEM_RUNNING)
1045 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1046 			udelay(100);
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 	else
1049 		/*
1050 		 * CPUs can take much longer to come up in the
1051 		 * hotplug case.  Wait five seconds.
1052 		 */
1053 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1054 			msleep(1);
1055 #endif
1056 
1057 	if (!cpu_callin_map[cpu]) {
1058 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1059 		return -ENOENT;
1060 	}
1061 
1062 	DBG("Processor %u found.\n", cpu);
1063 
1064 	if (smp_ops->give_timebase)
1065 		smp_ops->give_timebase();
1066 
1067 	/* Wait until cpu puts itself in the online & active maps */
1068 	spin_until_cond(cpu_online(cpu));
1069 
1070 	return 0;
1071 }
1072 
1073 /* Return the value of the reg property corresponding to the given
1074  * logical cpu.
1075  */
1076 int cpu_to_core_id(int cpu)
1077 {
1078 	struct device_node *np;
1079 	const __be32 *reg;
1080 	int id = -1;
1081 
1082 	np = of_get_cpu_node(cpu, NULL);
1083 	if (!np)
1084 		goto out;
1085 
1086 	reg = of_get_property(np, "reg", NULL);
1087 	if (!reg)
1088 		goto out;
1089 
1090 	id = be32_to_cpup(reg);
1091 out:
1092 	of_node_put(np);
1093 	return id;
1094 }
1095 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1096 
1097 /* Helper routines for cpu to core mapping */
1098 int cpu_core_index_of_thread(int cpu)
1099 {
1100 	return cpu >> threads_shift;
1101 }
1102 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1103 
1104 int cpu_first_thread_of_core(int core)
1105 {
1106 	return core << threads_shift;
1107 }
1108 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1109 
1110 /* Must be called when no change can occur to cpu_present_mask,
1111  * i.e. during cpu online or offline.
1112  */
1113 static struct device_node *cpu_to_l2cache(int cpu)
1114 {
1115 	struct device_node *np;
1116 	struct device_node *cache;
1117 
1118 	if (!cpu_present(cpu))
1119 		return NULL;
1120 
1121 	np = of_get_cpu_node(cpu, NULL);
1122 	if (np == NULL)
1123 		return NULL;
1124 
1125 	cache = of_find_next_cache_node(np);
1126 
1127 	of_node_put(np);
1128 
1129 	return cache;
1130 }
1131 
1132 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1133 {
1134 	struct device_node *l2_cache, *np;
1135 	int i;
1136 
1137 	l2_cache = cpu_to_l2cache(cpu);
1138 	if (!l2_cache)
1139 		return false;
1140 
1141 	for_each_cpu(i, cpu_online_mask) {
1142 		/*
1143 		 * when updating the marks the current CPU has not been marked
1144 		 * online, but we need to update the cache masks
1145 		 */
1146 		np = cpu_to_l2cache(i);
1147 		if (!np)
1148 			continue;
1149 
1150 		if (np == l2_cache)
1151 			set_cpus_related(cpu, i, mask_fn);
1152 
1153 		of_node_put(np);
1154 	}
1155 	of_node_put(l2_cache);
1156 
1157 	return true;
1158 }
1159 
1160 #ifdef CONFIG_HOTPLUG_CPU
1161 static void remove_cpu_from_masks(int cpu)
1162 {
1163 	int i;
1164 
1165 	/* NB: cpu_core_mask is a superset of the others */
1166 	for_each_cpu(i, cpu_core_mask(cpu)) {
1167 		set_cpus_unrelated(cpu, i, cpu_core_mask);
1168 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1169 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1170 		if (has_big_cores)
1171 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1172 	}
1173 }
1174 #endif
1175 
1176 static inline void add_cpu_to_smallcore_masks(int cpu)
1177 {
1178 	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1179 	int i, first_thread = cpu_first_thread_sibling(cpu);
1180 
1181 	if (!has_big_cores)
1182 		return;
1183 
1184 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1185 
1186 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1187 		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1188 			set_cpus_related(i, cpu, cpu_smallcore_mask);
1189 	}
1190 }
1191 
1192 static void add_cpu_to_masks(int cpu)
1193 {
1194 	int first_thread = cpu_first_thread_sibling(cpu);
1195 	int chipid = cpu_to_chip_id(cpu);
1196 	int i;
1197 
1198 	/*
1199 	 * This CPU will not be in the online mask yet so we need to manually
1200 	 * add it to it's own thread sibling mask.
1201 	 */
1202 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1203 
1204 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1205 		if (cpu_online(i))
1206 			set_cpus_related(i, cpu, cpu_sibling_mask);
1207 
1208 	add_cpu_to_smallcore_masks(cpu);
1209 	/*
1210 	 * Copy the thread sibling mask into the cache sibling mask
1211 	 * and mark any CPUs that share an L2 with this CPU.
1212 	 */
1213 	for_each_cpu(i, cpu_sibling_mask(cpu))
1214 		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1215 	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1216 
1217 	/*
1218 	 * Copy the cache sibling mask into core sibling mask and mark
1219 	 * any CPUs on the same chip as this CPU.
1220 	 */
1221 	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1222 		set_cpus_related(cpu, i, cpu_core_mask);
1223 
1224 	if (chipid == -1)
1225 		return;
1226 
1227 	for_each_cpu(i, cpu_online_mask)
1228 		if (cpu_to_chip_id(i) == chipid)
1229 			set_cpus_related(cpu, i, cpu_core_mask);
1230 }
1231 
1232 static bool shared_caches;
1233 
1234 /* Activate a secondary processor. */
1235 void start_secondary(void *unused)
1236 {
1237 	unsigned int cpu = smp_processor_id();
1238 	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1239 
1240 	mmgrab(&init_mm);
1241 	current->active_mm = &init_mm;
1242 
1243 	smp_store_cpu_info(cpu);
1244 	set_dec(tb_ticks_per_jiffy);
1245 	preempt_disable();
1246 	cpu_callin_map[cpu] = 1;
1247 
1248 	if (smp_ops->setup_cpu)
1249 		smp_ops->setup_cpu(cpu);
1250 	if (smp_ops->take_timebase)
1251 		smp_ops->take_timebase();
1252 
1253 	secondary_cpu_time_init();
1254 
1255 #ifdef CONFIG_PPC64
1256 	if (system_state == SYSTEM_RUNNING)
1257 		vdso_data->processorCount++;
1258 
1259 	vdso_getcpu_init();
1260 #endif
1261 	/* Update topology CPU masks */
1262 	add_cpu_to_masks(cpu);
1263 
1264 	if (has_big_cores)
1265 		sibling_mask = cpu_smallcore_mask;
1266 	/*
1267 	 * Check for any shared caches. Note that this must be done on a
1268 	 * per-core basis because one core in the pair might be disabled.
1269 	 */
1270 	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1271 		shared_caches = true;
1272 
1273 	set_numa_node(numa_cpu_lookup_table[cpu]);
1274 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1275 
1276 	smp_wmb();
1277 	notify_cpu_starting(cpu);
1278 	set_cpu_online(cpu, true);
1279 
1280 	boot_init_stack_canary();
1281 
1282 	local_irq_enable();
1283 
1284 	/* We can enable ftrace for secondary cpus now */
1285 	this_cpu_enable_ftrace();
1286 
1287 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1288 
1289 	BUG();
1290 }
1291 
1292 int setup_profiling_timer(unsigned int multiplier)
1293 {
1294 	return 0;
1295 }
1296 
1297 #ifdef CONFIG_SCHED_SMT
1298 /* cpumask of CPUs with asymetric SMT dependancy */
1299 static int powerpc_smt_flags(void)
1300 {
1301 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1302 
1303 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1304 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1305 		flags |= SD_ASYM_PACKING;
1306 	}
1307 	return flags;
1308 }
1309 #endif
1310 
1311 static struct sched_domain_topology_level powerpc_topology[] = {
1312 #ifdef CONFIG_SCHED_SMT
1313 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1314 #endif
1315 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1316 	{ NULL, },
1317 };
1318 
1319 /*
1320  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1321  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1322  * since the migrated task remains cache hot. We want to take advantage of this
1323  * at the scheduler level so an extra topology level is required.
1324  */
1325 static int powerpc_shared_cache_flags(void)
1326 {
1327 	return SD_SHARE_PKG_RESOURCES;
1328 }
1329 
1330 /*
1331  * We can't just pass cpu_l2_cache_mask() directly because
1332  * returns a non-const pointer and the compiler barfs on that.
1333  */
1334 static const struct cpumask *shared_cache_mask(int cpu)
1335 {
1336 	return cpu_l2_cache_mask(cpu);
1337 }
1338 
1339 #ifdef CONFIG_SCHED_SMT
1340 static const struct cpumask *smallcore_smt_mask(int cpu)
1341 {
1342 	return cpu_smallcore_mask(cpu);
1343 }
1344 #endif
1345 
1346 static struct sched_domain_topology_level power9_topology[] = {
1347 #ifdef CONFIG_SCHED_SMT
1348 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1349 #endif
1350 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1351 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1352 	{ NULL, },
1353 };
1354 
1355 void __init smp_cpus_done(unsigned int max_cpus)
1356 {
1357 	/*
1358 	 * We are running pinned to the boot CPU, see rest_init().
1359 	 */
1360 	if (smp_ops && smp_ops->setup_cpu)
1361 		smp_ops->setup_cpu(boot_cpuid);
1362 
1363 	if (smp_ops && smp_ops->bringup_done)
1364 		smp_ops->bringup_done();
1365 
1366 	/*
1367 	 * On a shared LPAR, associativity needs to be requested.
1368 	 * Hence, get numa topology before dumping cpu topology
1369 	 */
1370 	shared_proc_topology_init();
1371 	dump_numa_cpu_topology();
1372 
1373 #ifdef CONFIG_SCHED_SMT
1374 	if (has_big_cores) {
1375 		pr_info("Using small cores at SMT level\n");
1376 		power9_topology[0].mask = smallcore_smt_mask;
1377 		powerpc_topology[0].mask = smallcore_smt_mask;
1378 	}
1379 #endif
1380 	/*
1381 	 * If any CPU detects that it's sharing a cache with another CPU then
1382 	 * use the deeper topology that is aware of this sharing.
1383 	 */
1384 	if (shared_caches) {
1385 		pr_info("Using shared cache scheduler topology\n");
1386 		set_sched_topology(power9_topology);
1387 	} else {
1388 		pr_info("Using standard scheduler topology\n");
1389 		set_sched_topology(powerpc_topology);
1390 	}
1391 }
1392 
1393 #ifdef CONFIG_HOTPLUG_CPU
1394 int __cpu_disable(void)
1395 {
1396 	int cpu = smp_processor_id();
1397 	int err;
1398 
1399 	if (!smp_ops->cpu_disable)
1400 		return -ENOSYS;
1401 
1402 	this_cpu_disable_ftrace();
1403 
1404 	err = smp_ops->cpu_disable();
1405 	if (err)
1406 		return err;
1407 
1408 	/* Update sibling maps */
1409 	remove_cpu_from_masks(cpu);
1410 
1411 	return 0;
1412 }
1413 
1414 void __cpu_die(unsigned int cpu)
1415 {
1416 	if (smp_ops->cpu_die)
1417 		smp_ops->cpu_die(cpu);
1418 }
1419 
1420 void cpu_die(void)
1421 {
1422 	/*
1423 	 * Disable on the down path. This will be re-enabled by
1424 	 * start_secondary() via start_secondary_resume() below
1425 	 */
1426 	this_cpu_disable_ftrace();
1427 
1428 	if (ppc_md.cpu_die)
1429 		ppc_md.cpu_die();
1430 
1431 	/* If we return, we re-enter start_secondary */
1432 	start_secondary_resume();
1433 }
1434 
1435 #endif
1436