xref: /linux/arch/mips/kernel/smp-cps.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/memblock.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/slab.h>
14 #include <linux/smp.h>
15 #include <linux/types.h>
16 #include <linux/irq.h>
17 
18 #include <asm/bcache.h>
19 #include <asm/mips-cps.h>
20 #include <asm/mips_mt.h>
21 #include <asm/mipsregs.h>
22 #include <asm/pm-cps.h>
23 #include <asm/r4kcache.h>
24 #include <asm/regdef.h>
25 #include <asm/smp.h>
26 #include <asm/smp-cps.h>
27 #include <asm/time.h>
28 #include <asm/uasm.h>
29 
30 #define BEV_VEC_SIZE	0x500
31 #define BEV_VEC_ALIGN	0x1000
32 
33 enum label_id {
34 	label_not_nmi = 1,
35 };
36 
37 UASM_L_LA(_not_nmi)
38 
39 static DECLARE_BITMAP(core_power, NR_CPUS);
40 static u64 core_entry_reg;
41 static phys_addr_t cps_vec_pa;
42 
43 struct core_boot_config *mips_cps_core_bootcfg;
44 
45 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
46 {
47 	return min(smp_max_threads, mips_cps_numvps(cluster, core));
48 }
49 
50 static void __init *mips_cps_build_core_entry(void *addr)
51 {
52 	extern void (*nmi_handler)(void);
53 	u32 *p = addr;
54 	u32 val;
55 	struct uasm_label labels[2];
56 	struct uasm_reloc relocs[2];
57 	struct uasm_label *l = labels;
58 	struct uasm_reloc *r = relocs;
59 
60 	memset(labels, 0, sizeof(labels));
61 	memset(relocs, 0, sizeof(relocs));
62 
63 	uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
64 	UASM_i_LA(&p, GPR_T9, ST0_NMI);
65 	uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
66 
67 	uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
68 	uasm_i_nop(&p);
69 	UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
70 
71 	uasm_l_not_nmi(&l, p);
72 
73 	val = CAUSEF_IV;
74 	uasm_i_lui(&p, GPR_K0, val >> 16);
75 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
76 	uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
77 	val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
78 	uasm_i_lui(&p, GPR_K0, val >> 16);
79 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
80 	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
81 	uasm_i_ehb(&p);
82 	uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
83 	UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
84 #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
85 	UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
86 #else
87 	UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
88 #endif
89 	uasm_i_jr(&p, GPR_T9);
90 	uasm_i_nop(&p);
91 
92 	uasm_resolve_relocs(relocs, labels);
93 
94 	return p;
95 }
96 
97 static bool __init check_64bit_reset(void)
98 {
99 	bool cx_64bit_reset = false;
100 
101 	mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
102 	write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
103 	if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
104 	    CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
105 		cx_64bit_reset = true;
106 	mips_cm_unlock_other();
107 
108 	return cx_64bit_reset;
109 }
110 
111 static int __init allocate_cps_vecs(void)
112 {
113 	/* Try to allocate in KSEG1 first */
114 	cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
115 						0x0, CSEGX_SIZE - 1);
116 
117 	if (cps_vec_pa)
118 		core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
119 					CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
120 
121 	if (!cps_vec_pa && mips_cm_is64) {
122 		phys_addr_t end;
123 
124 		if (check_64bit_reset()) {
125 			pr_info("VP Local Reset Exception Base support 47 bits address\n");
126 			end = MEMBLOCK_ALLOC_ANYWHERE;
127 		} else {
128 			end = SZ_4G - 1;
129 		}
130 		cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
131 		if (cps_vec_pa) {
132 			if (check_64bit_reset())
133 				core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
134 					CM_GCR_Cx_RESET_BASE_MODE;
135 			else
136 				core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
137 					CM_GCR_Cx_RESET_BASE_MODE;
138 		}
139 	}
140 
141 	if (!cps_vec_pa)
142 		return -ENOMEM;
143 
144 	return 0;
145 }
146 
147 static void __init setup_cps_vecs(void)
148 {
149 	void *cps_vec;
150 
151 	cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
152 	mips_cps_build_core_entry(cps_vec);
153 
154 	memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
155 	memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
156 	memcpy(cps_vec + 0x300, &excep_cache, 0x80);
157 	memcpy(cps_vec + 0x380, &excep_genex, 0x80);
158 	memcpy(cps_vec + 0x400, &excep_intex, 0x80);
159 	memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
160 
161 	/* Make sure no prefetched data in cache */
162 	blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
163 	bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
164 	__sync();
165 }
166 
167 static void __init cps_smp_setup(void)
168 {
169 	unsigned int nclusters, ncores, nvpes, core_vpes;
170 	int cl, c, v;
171 
172 	/* Detect & record VPE topology */
173 	nvpes = 0;
174 	nclusters = mips_cps_numclusters();
175 	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
176 	for (cl = 0; cl < nclusters; cl++) {
177 		if (cl > 0)
178 			pr_cont(",");
179 		pr_cont("{");
180 
181 		ncores = mips_cps_numcores(cl);
182 		for (c = 0; c < ncores; c++) {
183 			core_vpes = core_vpe_count(cl, c);
184 
185 			if (c > 0)
186 				pr_cont(",");
187 			pr_cont("%u", core_vpes);
188 
189 			/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
190 			if (!cl && !c)
191 				smp_num_siblings = core_vpes;
192 
193 			for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
194 				cpu_set_cluster(&cpu_data[nvpes + v], cl);
195 				cpu_set_core(&cpu_data[nvpes + v], c);
196 				cpu_set_vpe_id(&cpu_data[nvpes + v], v);
197 			}
198 
199 			nvpes += core_vpes;
200 		}
201 
202 		pr_cont("}");
203 	}
204 	pr_cont(" total %u\n", nvpes);
205 
206 	/* Indicate present CPUs (CPU being synonymous with VPE) */
207 	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
208 		set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
209 		set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
210 		__cpu_number_map[v] = v;
211 		__cpu_logical_map[v] = v;
212 	}
213 
214 	/* Set a coherent default CCA (CWB) */
215 	change_c0_config(CONF_CM_CMASK, 0x5);
216 
217 	/* Core 0 is powered up (we're running on it) */
218 	bitmap_set(core_power, 0, 1);
219 
220 	/* Initialise core 0 */
221 	mips_cps_core_init();
222 
223 	/* Make core 0 coherent with everything */
224 	write_gcr_cl_coherence(0xff);
225 
226 	if (allocate_cps_vecs())
227 		pr_err("Failed to allocate CPS vectors\n");
228 
229 	if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
230 		write_gcr_bev_base(core_entry_reg);
231 
232 #ifdef CONFIG_MIPS_MT_FPAFF
233 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
234 	if (cpu_has_fpu)
235 		cpumask_set_cpu(0, &mt_fpu_cpumask);
236 #endif /* CONFIG_MIPS_MT_FPAFF */
237 }
238 
239 static void __init cps_prepare_cpus(unsigned int max_cpus)
240 {
241 	unsigned ncores, core_vpes, c, cca;
242 	bool cca_unsuitable, cores_limited;
243 
244 	mips_mt_set_cpuoptions();
245 
246 	if (!core_entry_reg) {
247 		pr_err("core_entry address unsuitable, disabling smp-cps\n");
248 		goto err_out;
249 	}
250 
251 	/* Detect whether the CCA is unsuited to multi-core SMP */
252 	cca = read_c0_config() & CONF_CM_CMASK;
253 	switch (cca) {
254 	case 0x4: /* CWBE */
255 	case 0x5: /* CWB */
256 		/* The CCA is coherent, multi-core is fine */
257 		cca_unsuitable = false;
258 		break;
259 
260 	default:
261 		/* CCA is not coherent, multi-core is not usable */
262 		cca_unsuitable = true;
263 	}
264 
265 	/* Warn the user if the CCA prevents multi-core */
266 	cores_limited = false;
267 	if (cca_unsuitable || cpu_has_dc_aliases) {
268 		for_each_present_cpu(c) {
269 			if (cpus_are_siblings(smp_processor_id(), c))
270 				continue;
271 
272 			set_cpu_present(c, false);
273 			cores_limited = true;
274 		}
275 	}
276 	if (cores_limited)
277 		pr_warn("Using only one core due to %s%s%s\n",
278 			cca_unsuitable ? "unsuitable CCA" : "",
279 			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
280 			cpu_has_dc_aliases ? "dcache aliasing" : "");
281 
282 	setup_cps_vecs();
283 
284 	/* Allocate core boot configuration structs */
285 	ncores = mips_cps_numcores(0);
286 	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
287 					GFP_KERNEL);
288 	if (!mips_cps_core_bootcfg) {
289 		pr_err("Failed to allocate boot config for %u cores\n", ncores);
290 		goto err_out;
291 	}
292 
293 	/* Allocate VPE boot configuration structs */
294 	for (c = 0; c < ncores; c++) {
295 		core_vpes = core_vpe_count(0, c);
296 		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
297 				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
298 				GFP_KERNEL);
299 		if (!mips_cps_core_bootcfg[c].vpe_config) {
300 			pr_err("Failed to allocate %u VPE boot configs\n",
301 			       core_vpes);
302 			goto err_out;
303 		}
304 	}
305 
306 	/* Mark this CPU as booted */
307 	atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
308 		   1 << cpu_vpe_id(&current_cpu_data));
309 
310 	return;
311 err_out:
312 	/* Clean up allocations */
313 	if (mips_cps_core_bootcfg) {
314 		for (c = 0; c < ncores; c++)
315 			kfree(mips_cps_core_bootcfg[c].vpe_config);
316 		kfree(mips_cps_core_bootcfg);
317 		mips_cps_core_bootcfg = NULL;
318 	}
319 
320 	/* Effectively disable SMP by declaring CPUs not present */
321 	for_each_possible_cpu(c) {
322 		if (c == 0)
323 			continue;
324 		set_cpu_present(c, false);
325 	}
326 }
327 
328 static void boot_core(unsigned int core, unsigned int vpe_id)
329 {
330 	u32 stat, seq_state;
331 	unsigned timeout;
332 
333 	/* Select the appropriate core */
334 	mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
335 
336 	/* Set its reset vector */
337 	if (mips_cm_is64)
338 		write_gcr_co_reset64_base(core_entry_reg);
339 	else
340 		write_gcr_co_reset_base(core_entry_reg);
341 
342 	/* Ensure its coherency is disabled */
343 	write_gcr_co_coherence(0);
344 
345 	/* Start it with the legacy memory map and exception base */
346 	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
347 
348 	/* Ensure the core can access the GCRs */
349 	if (mips_cm_revision() < CM_REV_CM3)
350 		set_gcr_access(1 << core);
351 	else
352 		set_gcr_access_cm3(1 << core);
353 
354 	if (mips_cpc_present()) {
355 		/* Reset the core */
356 		mips_cpc_lock_other(core);
357 
358 		if (mips_cm_revision() >= CM_REV_CM3) {
359 			/* Run only the requested VP following the reset */
360 			write_cpc_co_vp_stop(0xf);
361 			write_cpc_co_vp_run(1 << vpe_id);
362 
363 			/*
364 			 * Ensure that the VP_RUN register is written before the
365 			 * core leaves reset.
366 			 */
367 			wmb();
368 		}
369 
370 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
371 
372 		timeout = 100;
373 		while (true) {
374 			stat = read_cpc_co_stat_conf();
375 			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
376 			seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
377 
378 			/* U6 == coherent execution, ie. the core is up */
379 			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
380 				break;
381 
382 			/* Delay a little while before we start warning */
383 			if (timeout) {
384 				timeout--;
385 				mdelay(10);
386 				continue;
387 			}
388 
389 			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
390 				core, stat);
391 			mdelay(1000);
392 		}
393 
394 		mips_cpc_unlock_other();
395 	} else {
396 		/* Take the core out of reset */
397 		write_gcr_co_reset_release(0);
398 	}
399 
400 	mips_cm_unlock_other();
401 
402 	/* The core is now powered up */
403 	bitmap_set(core_power, core, 1);
404 }
405 
406 static void remote_vpe_boot(void *dummy)
407 {
408 	unsigned core = cpu_core(&current_cpu_data);
409 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
410 
411 	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
412 }
413 
414 static int cps_boot_secondary(int cpu, struct task_struct *idle)
415 {
416 	unsigned core = cpu_core(&cpu_data[cpu]);
417 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
418 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
419 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
420 	unsigned int remote;
421 	int err;
422 
423 	/* We don't yet support booting CPUs in other clusters */
424 	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
425 		return -ENOSYS;
426 
427 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
428 	vpe_cfg->sp = __KSTK_TOS(idle);
429 	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
430 
431 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
432 
433 	preempt_disable();
434 
435 	if (!test_bit(core, core_power)) {
436 		/* Boot a VPE on a powered down core */
437 		boot_core(core, vpe_id);
438 		goto out;
439 	}
440 
441 	if (cpu_has_vp) {
442 		mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
443 		if (mips_cm_is64)
444 			write_gcr_co_reset64_base(core_entry_reg);
445 		else
446 			write_gcr_co_reset_base(core_entry_reg);
447 		mips_cm_unlock_other();
448 	}
449 
450 	if (!cpus_are_siblings(cpu, smp_processor_id())) {
451 		/* Boot a VPE on another powered up core */
452 		for (remote = 0; remote < NR_CPUS; remote++) {
453 			if (!cpus_are_siblings(cpu, remote))
454 				continue;
455 			if (cpu_online(remote))
456 				break;
457 		}
458 		if (remote >= NR_CPUS) {
459 			pr_crit("No online CPU in core %u to start CPU%d\n",
460 				core, cpu);
461 			goto out;
462 		}
463 
464 		err = smp_call_function_single(remote, remote_vpe_boot,
465 					       NULL, 1);
466 		if (err)
467 			panic("Failed to call remote CPU\n");
468 		goto out;
469 	}
470 
471 	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
472 
473 	/* Boot a VPE on this core */
474 	mips_cps_boot_vpes(core_cfg, vpe_id);
475 out:
476 	preempt_enable();
477 	return 0;
478 }
479 
480 static void cps_init_secondary(void)
481 {
482 	int core = cpu_core(&current_cpu_data);
483 
484 	/* Disable MT - we only want to run 1 TC per VPE */
485 	if (cpu_has_mipsmt)
486 		dmt();
487 
488 	if (mips_cm_revision() >= CM_REV_CM3) {
489 		unsigned int ident = read_gic_vl_ident();
490 
491 		/*
492 		 * Ensure that our calculation of the VP ID matches up with
493 		 * what the GIC reports, otherwise we'll have configured
494 		 * interrupts incorrectly.
495 		 */
496 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
497 	}
498 
499 	if (core > 0 && !read_gcr_cl_coherence())
500 		pr_warn("Core %u is not in coherent domain\n", core);
501 
502 	if (cpu_has_veic)
503 		clear_c0_status(ST0_IM);
504 	else
505 		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
506 					 STATUSF_IP4 | STATUSF_IP5 |
507 					 STATUSF_IP6 | STATUSF_IP7);
508 }
509 
510 static void cps_smp_finish(void)
511 {
512 	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
513 
514 #ifdef CONFIG_MIPS_MT_FPAFF
515 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
516 	if (cpu_has_fpu)
517 		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
518 #endif /* CONFIG_MIPS_MT_FPAFF */
519 
520 	local_irq_enable();
521 }
522 
523 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
524 
525 enum cpu_death {
526 	CPU_DEATH_HALT,
527 	CPU_DEATH_POWER,
528 };
529 
530 static void cps_shutdown_this_cpu(enum cpu_death death)
531 {
532 	unsigned int cpu, core, vpe_id;
533 
534 	cpu = smp_processor_id();
535 	core = cpu_core(&cpu_data[cpu]);
536 
537 	if (death == CPU_DEATH_HALT) {
538 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
539 
540 		pr_debug("Halting core %d VP%d\n", core, vpe_id);
541 		if (cpu_has_mipsmt) {
542 			/* Halt this TC */
543 			write_c0_tchalt(TCHALT_H);
544 			instruction_hazard();
545 		} else if (cpu_has_vp) {
546 			write_cpc_cl_vp_stop(1 << vpe_id);
547 
548 			/* Ensure that the VP_STOP register is written */
549 			wmb();
550 		}
551 	} else {
552 		if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
553 			pr_debug("Gating power to core %d\n", core);
554 			/* Power down the core */
555 			cps_pm_enter_state(CPS_PM_POWER_GATED);
556 		}
557 	}
558 }
559 
560 #ifdef CONFIG_KEXEC_CORE
561 
562 static void cps_kexec_nonboot_cpu(void)
563 {
564 	if (cpu_has_mipsmt || cpu_has_vp)
565 		cps_shutdown_this_cpu(CPU_DEATH_HALT);
566 	else
567 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
568 }
569 
570 #endif /* CONFIG_KEXEC_CORE */
571 
572 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
573 
574 #ifdef CONFIG_HOTPLUG_CPU
575 
576 static int cps_cpu_disable(void)
577 {
578 	unsigned cpu = smp_processor_id();
579 	struct core_boot_config *core_cfg;
580 
581 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
582 		return -EINVAL;
583 
584 	core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
585 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
586 	smp_mb__after_atomic();
587 	set_cpu_online(cpu, false);
588 	calculate_cpu_foreign_map();
589 	irq_migrate_all_off_this_cpu();
590 
591 	return 0;
592 }
593 
594 static unsigned cpu_death_sibling;
595 static enum cpu_death cpu_death;
596 
597 void play_dead(void)
598 {
599 	unsigned int cpu;
600 
601 	local_irq_disable();
602 	idle_task_exit();
603 	cpu = smp_processor_id();
604 	cpu_death = CPU_DEATH_POWER;
605 
606 	pr_debug("CPU%d going offline\n", cpu);
607 
608 	if (cpu_has_mipsmt || cpu_has_vp) {
609 		/* Look for another online VPE within the core */
610 		for_each_online_cpu(cpu_death_sibling) {
611 			if (!cpus_are_siblings(cpu, cpu_death_sibling))
612 				continue;
613 
614 			/*
615 			 * There is an online VPE within the core. Just halt
616 			 * this TC and leave the core alone.
617 			 */
618 			cpu_death = CPU_DEATH_HALT;
619 			break;
620 		}
621 	}
622 
623 	cpuhp_ap_report_dead();
624 
625 	cps_shutdown_this_cpu(cpu_death);
626 
627 	/* This should never be reached */
628 	panic("Failed to offline CPU %u", cpu);
629 }
630 
631 static void wait_for_sibling_halt(void *ptr_cpu)
632 {
633 	unsigned cpu = (unsigned long)ptr_cpu;
634 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
635 	unsigned halted;
636 	unsigned long flags;
637 
638 	do {
639 		local_irq_save(flags);
640 		settc(vpe_id);
641 		halted = read_tc_c0_tchalt();
642 		local_irq_restore(flags);
643 	} while (!(halted & TCHALT_H));
644 }
645 
646 static void cps_cpu_die(unsigned int cpu) { }
647 
648 static void cps_cleanup_dead_cpu(unsigned cpu)
649 {
650 	unsigned core = cpu_core(&cpu_data[cpu]);
651 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
652 	ktime_t fail_time;
653 	unsigned stat;
654 	int err;
655 
656 	/*
657 	 * Now wait for the CPU to actually offline. Without doing this that
658 	 * offlining may race with one or more of:
659 	 *
660 	 *   - Onlining the CPU again.
661 	 *   - Powering down the core if another VPE within it is offlined.
662 	 *   - A sibling VPE entering a non-coherent state.
663 	 *
664 	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
665 	 * with which we could race, so do nothing.
666 	 */
667 	if (cpu_death == CPU_DEATH_POWER) {
668 		/*
669 		 * Wait for the core to enter a powered down or clock gated
670 		 * state, the latter happening when a JTAG probe is connected
671 		 * in which case the CPC will refuse to power down the core.
672 		 */
673 		fail_time = ktime_add_ms(ktime_get(), 2000);
674 		do {
675 			mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
676 			mips_cpc_lock_other(core);
677 			stat = read_cpc_co_stat_conf();
678 			stat &= CPC_Cx_STAT_CONF_SEQSTATE;
679 			stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
680 			mips_cpc_unlock_other();
681 			mips_cm_unlock_other();
682 
683 			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
684 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
685 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
686 				break;
687 
688 			/*
689 			 * The core ought to have powered down, but didn't &
690 			 * now we don't really know what state it's in. It's
691 			 * likely that its _pwr_up pin has been wired to logic
692 			 * 1 & it powered back up as soon as we powered it
693 			 * down...
694 			 *
695 			 * The best we can do is warn the user & continue in
696 			 * the hope that the core is doing nothing harmful &
697 			 * might behave properly if we online it later.
698 			 */
699 			if (WARN(ktime_after(ktime_get(), fail_time),
700 				 "CPU%u hasn't powered down, seq. state %u\n",
701 				 cpu, stat))
702 				break;
703 		} while (1);
704 
705 		/* Indicate the core is powered off */
706 		bitmap_clear(core_power, core, 1);
707 	} else if (cpu_has_mipsmt) {
708 		/*
709 		 * Have a CPU with access to the offlined CPUs registers wait
710 		 * for its TC to halt.
711 		 */
712 		err = smp_call_function_single(cpu_death_sibling,
713 					       wait_for_sibling_halt,
714 					       (void *)(unsigned long)cpu, 1);
715 		if (err)
716 			panic("Failed to call remote sibling CPU\n");
717 	} else if (cpu_has_vp) {
718 		do {
719 			mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
720 			stat = read_cpc_co_vp_running();
721 			mips_cm_unlock_other();
722 		} while (stat & (1 << vpe_id));
723 	}
724 }
725 
726 #endif /* CONFIG_HOTPLUG_CPU */
727 
728 static const struct plat_smp_ops cps_smp_ops = {
729 	.smp_setup		= cps_smp_setup,
730 	.prepare_cpus		= cps_prepare_cpus,
731 	.boot_secondary		= cps_boot_secondary,
732 	.init_secondary		= cps_init_secondary,
733 	.smp_finish		= cps_smp_finish,
734 	.send_ipi_single	= mips_smp_send_ipi_single,
735 	.send_ipi_mask		= mips_smp_send_ipi_mask,
736 #ifdef CONFIG_HOTPLUG_CPU
737 	.cpu_disable		= cps_cpu_disable,
738 	.cpu_die		= cps_cpu_die,
739 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
740 #endif
741 #ifdef CONFIG_KEXEC_CORE
742 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
743 #endif
744 };
745 
746 bool mips_cps_smp_in_use(void)
747 {
748 	extern const struct plat_smp_ops *mp_ops;
749 	return mp_ops == &cps_smp_ops;
750 }
751 
752 int register_cps_smp_ops(void)
753 {
754 	if (!mips_cm_present()) {
755 		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
756 		return -ENODEV;
757 	}
758 
759 	/* check we have a GIC - we need one for IPIs */
760 	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
761 		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
762 		return -ENODEV;
763 	}
764 
765 	register_smp_ops(&cps_smp_ops);
766 	return 0;
767 }
768