xref: /linux/arch/mips/kernel/smp-cps.c (revision c02ce1735b150cf7c3b43790b48e23dcd17c0d46)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/memblock.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/slab.h>
14 #include <linux/smp.h>
15 #include <linux/types.h>
16 #include <linux/irq.h>
17 
18 #include <asm/bcache.h>
19 #include <asm/mips-cps.h>
20 #include <asm/mips_mt.h>
21 #include <asm/mipsregs.h>
22 #include <asm/pm-cps.h>
23 #include <asm/r4kcache.h>
24 #include <asm/regdef.h>
25 #include <asm/smp.h>
26 #include <asm/smp-cps.h>
27 #include <asm/time.h>
28 #include <asm/uasm.h>
29 
30 #define BEV_VEC_SIZE	0x500
31 #define BEV_VEC_ALIGN	0x1000
32 
33 enum label_id {
34 	label_not_nmi = 1,
35 };
36 
37 UASM_L_LA(_not_nmi)
38 
39 static DECLARE_BITMAP(core_power, NR_CPUS);
40 static uint32_t core_entry_reg;
41 static phys_addr_t cps_vec_pa;
42 
43 struct core_boot_config *mips_cps_core_bootcfg;
44 
45 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
46 {
47 	return min(smp_max_threads, mips_cps_numvps(cluster, core));
48 }
49 
50 static void __init *mips_cps_build_core_entry(void *addr)
51 {
52 	extern void (*nmi_handler)(void);
53 	u32 *p = addr;
54 	u32 val;
55 	struct uasm_label labels[2];
56 	struct uasm_reloc relocs[2];
57 	struct uasm_label *l = labels;
58 	struct uasm_reloc *r = relocs;
59 
60 	memset(labels, 0, sizeof(labels));
61 	memset(relocs, 0, sizeof(relocs));
62 
63 	uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
64 	UASM_i_LA(&p, GPR_T9, ST0_NMI);
65 	uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
66 
67 	uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
68 	uasm_i_nop(&p);
69 	UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
70 
71 	uasm_l_not_nmi(&l, p);
72 
73 	val = CAUSEF_IV;
74 	uasm_i_lui(&p, GPR_K0, val >> 16);
75 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
76 	uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
77 	val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
78 	uasm_i_lui(&p, GPR_K0, val >> 16);
79 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
80 	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
81 	uasm_i_ehb(&p);
82 	uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
83 	UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
84 #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
85 	UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
86 #else
87 	UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
88 #endif
89 	uasm_i_jr(&p, GPR_T9);
90 	uasm_i_nop(&p);
91 
92 	uasm_resolve_relocs(relocs, labels);
93 
94 	return p;
95 }
96 
97 static int __init allocate_cps_vecs(void)
98 {
99 	/* Try to allocate in KSEG1 first */
100 	cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
101 						0x0, CSEGX_SIZE - 1);
102 
103 	if (cps_vec_pa)
104 		core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
105 					CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
106 
107 	if (!cps_vec_pa && mips_cm_is64) {
108 		cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
109 							0x0, SZ_4G - 1);
110 		if (cps_vec_pa)
111 			core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
112 					CM_GCR_Cx_RESET_BASE_MODE;
113 	}
114 
115 	if (!cps_vec_pa)
116 		return -ENOMEM;
117 
118 	return 0;
119 }
120 
121 static void __init setup_cps_vecs(void)
122 {
123 	void *cps_vec;
124 
125 	cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
126 	mips_cps_build_core_entry(cps_vec);
127 
128 	memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
129 	memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
130 	memcpy(cps_vec + 0x300, &excep_cache, 0x80);
131 	memcpy(cps_vec + 0x380, &excep_genex, 0x80);
132 	memcpy(cps_vec + 0x400, &excep_intex, 0x80);
133 	memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
134 
135 	/* Make sure no prefetched data in cache */
136 	blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
137 	bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
138 	__sync();
139 }
140 
141 static void __init cps_smp_setup(void)
142 {
143 	unsigned int nclusters, ncores, nvpes, core_vpes;
144 	int cl, c, v;
145 
146 	/* Detect & record VPE topology */
147 	nvpes = 0;
148 	nclusters = mips_cps_numclusters();
149 	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
150 	for (cl = 0; cl < nclusters; cl++) {
151 		if (cl > 0)
152 			pr_cont(",");
153 		pr_cont("{");
154 
155 		ncores = mips_cps_numcores(cl);
156 		for (c = 0; c < ncores; c++) {
157 			core_vpes = core_vpe_count(cl, c);
158 
159 			if (c > 0)
160 				pr_cont(",");
161 			pr_cont("%u", core_vpes);
162 
163 			/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
164 			if (!cl && !c)
165 				smp_num_siblings = core_vpes;
166 
167 			for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
168 				cpu_set_cluster(&cpu_data[nvpes + v], cl);
169 				cpu_set_core(&cpu_data[nvpes + v], c);
170 				cpu_set_vpe_id(&cpu_data[nvpes + v], v);
171 			}
172 
173 			nvpes += core_vpes;
174 		}
175 
176 		pr_cont("}");
177 	}
178 	pr_cont(" total %u\n", nvpes);
179 
180 	/* Indicate present CPUs (CPU being synonymous with VPE) */
181 	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
182 		set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
183 		set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
184 		__cpu_number_map[v] = v;
185 		__cpu_logical_map[v] = v;
186 	}
187 
188 	/* Set a coherent default CCA (CWB) */
189 	change_c0_config(CONF_CM_CMASK, 0x5);
190 
191 	/* Core 0 is powered up (we're running on it) */
192 	bitmap_set(core_power, 0, 1);
193 
194 	/* Initialise core 0 */
195 	mips_cps_core_init();
196 
197 	/* Make core 0 coherent with everything */
198 	write_gcr_cl_coherence(0xff);
199 
200 	if (allocate_cps_vecs())
201 		pr_err("Failed to allocate CPS vectors\n");
202 
203 	if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
204 		write_gcr_bev_base(core_entry_reg);
205 
206 #ifdef CONFIG_MIPS_MT_FPAFF
207 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
208 	if (cpu_has_fpu)
209 		cpumask_set_cpu(0, &mt_fpu_cpumask);
210 #endif /* CONFIG_MIPS_MT_FPAFF */
211 }
212 
213 static void __init cps_prepare_cpus(unsigned int max_cpus)
214 {
215 	unsigned ncores, core_vpes, c, cca;
216 	bool cca_unsuitable, cores_limited;
217 
218 	mips_mt_set_cpuoptions();
219 
220 	if (!core_entry_reg) {
221 		pr_err("core_entry address unsuitable, disabling smp-cps\n");
222 		goto err_out;
223 	}
224 
225 	/* Detect whether the CCA is unsuited to multi-core SMP */
226 	cca = read_c0_config() & CONF_CM_CMASK;
227 	switch (cca) {
228 	case 0x4: /* CWBE */
229 	case 0x5: /* CWB */
230 		/* The CCA is coherent, multi-core is fine */
231 		cca_unsuitable = false;
232 		break;
233 
234 	default:
235 		/* CCA is not coherent, multi-core is not usable */
236 		cca_unsuitable = true;
237 	}
238 
239 	/* Warn the user if the CCA prevents multi-core */
240 	cores_limited = false;
241 	if (cca_unsuitable || cpu_has_dc_aliases) {
242 		for_each_present_cpu(c) {
243 			if (cpus_are_siblings(smp_processor_id(), c))
244 				continue;
245 
246 			set_cpu_present(c, false);
247 			cores_limited = true;
248 		}
249 	}
250 	if (cores_limited)
251 		pr_warn("Using only one core due to %s%s%s\n",
252 			cca_unsuitable ? "unsuitable CCA" : "",
253 			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
254 			cpu_has_dc_aliases ? "dcache aliasing" : "");
255 
256 	setup_cps_vecs();
257 
258 	/* Allocate core boot configuration structs */
259 	ncores = mips_cps_numcores(0);
260 	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
261 					GFP_KERNEL);
262 	if (!mips_cps_core_bootcfg) {
263 		pr_err("Failed to allocate boot config for %u cores\n", ncores);
264 		goto err_out;
265 	}
266 
267 	/* Allocate VPE boot configuration structs */
268 	for (c = 0; c < ncores; c++) {
269 		core_vpes = core_vpe_count(0, c);
270 		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
271 				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
272 				GFP_KERNEL);
273 		if (!mips_cps_core_bootcfg[c].vpe_config) {
274 			pr_err("Failed to allocate %u VPE boot configs\n",
275 			       core_vpes);
276 			goto err_out;
277 		}
278 	}
279 
280 	/* Mark this CPU as booted */
281 	atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
282 		   1 << cpu_vpe_id(&current_cpu_data));
283 
284 	return;
285 err_out:
286 	/* Clean up allocations */
287 	if (mips_cps_core_bootcfg) {
288 		for (c = 0; c < ncores; c++)
289 			kfree(mips_cps_core_bootcfg[c].vpe_config);
290 		kfree(mips_cps_core_bootcfg);
291 		mips_cps_core_bootcfg = NULL;
292 	}
293 
294 	/* Effectively disable SMP by declaring CPUs not present */
295 	for_each_possible_cpu(c) {
296 		if (c == 0)
297 			continue;
298 		set_cpu_present(c, false);
299 	}
300 }
301 
302 static void boot_core(unsigned int core, unsigned int vpe_id)
303 {
304 	u32 stat, seq_state;
305 	unsigned timeout;
306 
307 	/* Select the appropriate core */
308 	mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
309 
310 	/* Set its reset vector */
311 	write_gcr_co_reset_base(core_entry_reg);
312 
313 	/* Ensure its coherency is disabled */
314 	write_gcr_co_coherence(0);
315 
316 	/* Start it with the legacy memory map and exception base */
317 	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
318 
319 	/* Ensure the core can access the GCRs */
320 	set_gcr_access(1 << core);
321 
322 	if (mips_cpc_present()) {
323 		/* Reset the core */
324 		mips_cpc_lock_other(core);
325 
326 		if (mips_cm_revision() >= CM_REV_CM3) {
327 			/* Run only the requested VP following the reset */
328 			write_cpc_co_vp_stop(0xf);
329 			write_cpc_co_vp_run(1 << vpe_id);
330 
331 			/*
332 			 * Ensure that the VP_RUN register is written before the
333 			 * core leaves reset.
334 			 */
335 			wmb();
336 		}
337 
338 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
339 
340 		timeout = 100;
341 		while (true) {
342 			stat = read_cpc_co_stat_conf();
343 			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
344 			seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
345 
346 			/* U6 == coherent execution, ie. the core is up */
347 			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
348 				break;
349 
350 			/* Delay a little while before we start warning */
351 			if (timeout) {
352 				timeout--;
353 				mdelay(10);
354 				continue;
355 			}
356 
357 			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
358 				core, stat);
359 			mdelay(1000);
360 		}
361 
362 		mips_cpc_unlock_other();
363 	} else {
364 		/* Take the core out of reset */
365 		write_gcr_co_reset_release(0);
366 	}
367 
368 	mips_cm_unlock_other();
369 
370 	/* The core is now powered up */
371 	bitmap_set(core_power, core, 1);
372 }
373 
374 static void remote_vpe_boot(void *dummy)
375 {
376 	unsigned core = cpu_core(&current_cpu_data);
377 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
378 
379 	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
380 }
381 
382 static int cps_boot_secondary(int cpu, struct task_struct *idle)
383 {
384 	unsigned core = cpu_core(&cpu_data[cpu]);
385 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
386 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
387 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
388 	unsigned int remote;
389 	int err;
390 
391 	/* We don't yet support booting CPUs in other clusters */
392 	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
393 		return -ENOSYS;
394 
395 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
396 	vpe_cfg->sp = __KSTK_TOS(idle);
397 	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
398 
399 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
400 
401 	preempt_disable();
402 
403 	if (!test_bit(core, core_power)) {
404 		/* Boot a VPE on a powered down core */
405 		boot_core(core, vpe_id);
406 		goto out;
407 	}
408 
409 	if (cpu_has_vp) {
410 		mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
411 		write_gcr_co_reset_base(core_entry_reg);
412 		mips_cm_unlock_other();
413 	}
414 
415 	if (!cpus_are_siblings(cpu, smp_processor_id())) {
416 		/* Boot a VPE on another powered up core */
417 		for (remote = 0; remote < NR_CPUS; remote++) {
418 			if (!cpus_are_siblings(cpu, remote))
419 				continue;
420 			if (cpu_online(remote))
421 				break;
422 		}
423 		if (remote >= NR_CPUS) {
424 			pr_crit("No online CPU in core %u to start CPU%d\n",
425 				core, cpu);
426 			goto out;
427 		}
428 
429 		err = smp_call_function_single(remote, remote_vpe_boot,
430 					       NULL, 1);
431 		if (err)
432 			panic("Failed to call remote CPU\n");
433 		goto out;
434 	}
435 
436 	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
437 
438 	/* Boot a VPE on this core */
439 	mips_cps_boot_vpes(core_cfg, vpe_id);
440 out:
441 	preempt_enable();
442 	return 0;
443 }
444 
445 static void cps_init_secondary(void)
446 {
447 	int core = cpu_core(&current_cpu_data);
448 
449 	/* Disable MT - we only want to run 1 TC per VPE */
450 	if (cpu_has_mipsmt)
451 		dmt();
452 
453 	if (mips_cm_revision() >= CM_REV_CM3) {
454 		unsigned int ident = read_gic_vl_ident();
455 
456 		/*
457 		 * Ensure that our calculation of the VP ID matches up with
458 		 * what the GIC reports, otherwise we'll have configured
459 		 * interrupts incorrectly.
460 		 */
461 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
462 	}
463 
464 	if (core > 0 && !read_gcr_cl_coherence())
465 		pr_warn("Core %u is not in coherent domain\n", core);
466 
467 	if (cpu_has_veic)
468 		clear_c0_status(ST0_IM);
469 	else
470 		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
471 					 STATUSF_IP4 | STATUSF_IP5 |
472 					 STATUSF_IP6 | STATUSF_IP7);
473 }
474 
475 static void cps_smp_finish(void)
476 {
477 	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
478 
479 #ifdef CONFIG_MIPS_MT_FPAFF
480 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
481 	if (cpu_has_fpu)
482 		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
483 #endif /* CONFIG_MIPS_MT_FPAFF */
484 
485 	local_irq_enable();
486 }
487 
488 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
489 
490 enum cpu_death {
491 	CPU_DEATH_HALT,
492 	CPU_DEATH_POWER,
493 };
494 
495 static void cps_shutdown_this_cpu(enum cpu_death death)
496 {
497 	unsigned int cpu, core, vpe_id;
498 
499 	cpu = smp_processor_id();
500 	core = cpu_core(&cpu_data[cpu]);
501 
502 	if (death == CPU_DEATH_HALT) {
503 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
504 
505 		pr_debug("Halting core %d VP%d\n", core, vpe_id);
506 		if (cpu_has_mipsmt) {
507 			/* Halt this TC */
508 			write_c0_tchalt(TCHALT_H);
509 			instruction_hazard();
510 		} else if (cpu_has_vp) {
511 			write_cpc_cl_vp_stop(1 << vpe_id);
512 
513 			/* Ensure that the VP_STOP register is written */
514 			wmb();
515 		}
516 	} else {
517 		if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
518 			pr_debug("Gating power to core %d\n", core);
519 			/* Power down the core */
520 			cps_pm_enter_state(CPS_PM_POWER_GATED);
521 		}
522 	}
523 }
524 
525 #ifdef CONFIG_KEXEC_CORE
526 
527 static void cps_kexec_nonboot_cpu(void)
528 {
529 	if (cpu_has_mipsmt || cpu_has_vp)
530 		cps_shutdown_this_cpu(CPU_DEATH_HALT);
531 	else
532 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
533 }
534 
535 #endif /* CONFIG_KEXEC_CORE */
536 
537 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
538 
539 #ifdef CONFIG_HOTPLUG_CPU
540 
541 static int cps_cpu_disable(void)
542 {
543 	unsigned cpu = smp_processor_id();
544 	struct core_boot_config *core_cfg;
545 
546 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
547 		return -EINVAL;
548 
549 	core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
550 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
551 	smp_mb__after_atomic();
552 	set_cpu_online(cpu, false);
553 	calculate_cpu_foreign_map();
554 	irq_migrate_all_off_this_cpu();
555 
556 	return 0;
557 }
558 
559 static unsigned cpu_death_sibling;
560 static enum cpu_death cpu_death;
561 
562 void play_dead(void)
563 {
564 	unsigned int cpu;
565 
566 	local_irq_disable();
567 	idle_task_exit();
568 	cpu = smp_processor_id();
569 	cpu_death = CPU_DEATH_POWER;
570 
571 	pr_debug("CPU%d going offline\n", cpu);
572 
573 	if (cpu_has_mipsmt || cpu_has_vp) {
574 		/* Look for another online VPE within the core */
575 		for_each_online_cpu(cpu_death_sibling) {
576 			if (!cpus_are_siblings(cpu, cpu_death_sibling))
577 				continue;
578 
579 			/*
580 			 * There is an online VPE within the core. Just halt
581 			 * this TC and leave the core alone.
582 			 */
583 			cpu_death = CPU_DEATH_HALT;
584 			break;
585 		}
586 	}
587 
588 	cpuhp_ap_report_dead();
589 
590 	cps_shutdown_this_cpu(cpu_death);
591 
592 	/* This should never be reached */
593 	panic("Failed to offline CPU %u", cpu);
594 }
595 
596 static void wait_for_sibling_halt(void *ptr_cpu)
597 {
598 	unsigned cpu = (unsigned long)ptr_cpu;
599 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
600 	unsigned halted;
601 	unsigned long flags;
602 
603 	do {
604 		local_irq_save(flags);
605 		settc(vpe_id);
606 		halted = read_tc_c0_tchalt();
607 		local_irq_restore(flags);
608 	} while (!(halted & TCHALT_H));
609 }
610 
611 static void cps_cpu_die(unsigned int cpu) { }
612 
613 static void cps_cleanup_dead_cpu(unsigned cpu)
614 {
615 	unsigned core = cpu_core(&cpu_data[cpu]);
616 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
617 	ktime_t fail_time;
618 	unsigned stat;
619 	int err;
620 
621 	/*
622 	 * Now wait for the CPU to actually offline. Without doing this that
623 	 * offlining may race with one or more of:
624 	 *
625 	 *   - Onlining the CPU again.
626 	 *   - Powering down the core if another VPE within it is offlined.
627 	 *   - A sibling VPE entering a non-coherent state.
628 	 *
629 	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
630 	 * with which we could race, so do nothing.
631 	 */
632 	if (cpu_death == CPU_DEATH_POWER) {
633 		/*
634 		 * Wait for the core to enter a powered down or clock gated
635 		 * state, the latter happening when a JTAG probe is connected
636 		 * in which case the CPC will refuse to power down the core.
637 		 */
638 		fail_time = ktime_add_ms(ktime_get(), 2000);
639 		do {
640 			mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
641 			mips_cpc_lock_other(core);
642 			stat = read_cpc_co_stat_conf();
643 			stat &= CPC_Cx_STAT_CONF_SEQSTATE;
644 			stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
645 			mips_cpc_unlock_other();
646 			mips_cm_unlock_other();
647 
648 			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
649 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
650 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
651 				break;
652 
653 			/*
654 			 * The core ought to have powered down, but didn't &
655 			 * now we don't really know what state it's in. It's
656 			 * likely that its _pwr_up pin has been wired to logic
657 			 * 1 & it powered back up as soon as we powered it
658 			 * down...
659 			 *
660 			 * The best we can do is warn the user & continue in
661 			 * the hope that the core is doing nothing harmful &
662 			 * might behave properly if we online it later.
663 			 */
664 			if (WARN(ktime_after(ktime_get(), fail_time),
665 				 "CPU%u hasn't powered down, seq. state %u\n",
666 				 cpu, stat))
667 				break;
668 		} while (1);
669 
670 		/* Indicate the core is powered off */
671 		bitmap_clear(core_power, core, 1);
672 	} else if (cpu_has_mipsmt) {
673 		/*
674 		 * Have a CPU with access to the offlined CPUs registers wait
675 		 * for its TC to halt.
676 		 */
677 		err = smp_call_function_single(cpu_death_sibling,
678 					       wait_for_sibling_halt,
679 					       (void *)(unsigned long)cpu, 1);
680 		if (err)
681 			panic("Failed to call remote sibling CPU\n");
682 	} else if (cpu_has_vp) {
683 		do {
684 			mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
685 			stat = read_cpc_co_vp_running();
686 			mips_cm_unlock_other();
687 		} while (stat & (1 << vpe_id));
688 	}
689 }
690 
691 #endif /* CONFIG_HOTPLUG_CPU */
692 
693 static const struct plat_smp_ops cps_smp_ops = {
694 	.smp_setup		= cps_smp_setup,
695 	.prepare_cpus		= cps_prepare_cpus,
696 	.boot_secondary		= cps_boot_secondary,
697 	.init_secondary		= cps_init_secondary,
698 	.smp_finish		= cps_smp_finish,
699 	.send_ipi_single	= mips_smp_send_ipi_single,
700 	.send_ipi_mask		= mips_smp_send_ipi_mask,
701 #ifdef CONFIG_HOTPLUG_CPU
702 	.cpu_disable		= cps_cpu_disable,
703 	.cpu_die		= cps_cpu_die,
704 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
705 #endif
706 #ifdef CONFIG_KEXEC_CORE
707 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
708 #endif
709 };
710 
711 bool mips_cps_smp_in_use(void)
712 {
713 	extern const struct plat_smp_ops *mp_ops;
714 	return mp_ops == &cps_smp_ops;
715 }
716 
717 int register_cps_smp_ops(void)
718 {
719 	if (!mips_cm_present()) {
720 		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
721 		return -ENODEV;
722 	}
723 
724 	/* check we have a GIC - we need one for IPIs */
725 	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
726 		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
727 		return -ENODEV;
728 	}
729 
730 	register_smp_ops(&cps_smp_ops);
731 	return 0;
732 }
733