xref: /linux/arch/mips/kernel/smp-cps.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/memblock.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/slab.h>
14 #include <linux/smp.h>
15 #include <linux/types.h>
16 #include <linux/irq.h>
17 
18 #include <asm/bcache.h>
19 #include <asm/mips-cps.h>
20 #include <asm/mips_mt.h>
21 #include <asm/mipsregs.h>
22 #include <asm/pm-cps.h>
23 #include <asm/r4kcache.h>
24 #include <asm/regdef.h>
25 #include <asm/smp.h>
26 #include <asm/smp-cps.h>
27 #include <asm/time.h>
28 #include <asm/uasm.h>
29 
30 #define BEV_VEC_SIZE	0x500
31 #define BEV_VEC_ALIGN	0x1000
32 
33 enum label_id {
34 	label_not_nmi = 1,
35 };
36 
37 UASM_L_LA(_not_nmi)
38 
39 static u64 core_entry_reg;
40 static phys_addr_t cps_vec_pa;
41 
42 struct cluster_boot_config *mips_cps_cluster_bootcfg;
43 
44 static void power_up_other_cluster(unsigned int cluster)
45 {
46 	u32 stat, seq_state;
47 	unsigned int timeout;
48 
49 	mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
50 			   CM_GCR_Cx_OTHER_BLOCK_LOCAL);
51 	stat = read_cpc_co_stat_conf();
52 	mips_cm_unlock_other();
53 
54 	seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
55 	seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
56 	if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
57 		return;
58 
59 	/* Set endianness & power up the CM */
60 	mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
61 	write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
62 	write_cpc_redir_pwrup_ctl(1);
63 	mips_cm_unlock_other();
64 
65 	/* Wait for the CM to start up */
66 	timeout = 1000;
67 	mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
68 			   CM_GCR_Cx_OTHER_BLOCK_LOCAL);
69 	while (1) {
70 		stat = read_cpc_co_stat_conf();
71 		seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
72 		seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
73 		if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
74 			break;
75 
76 		if (timeout) {
77 			mdelay(1);
78 			timeout--;
79 		} else {
80 			pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
81 				cluster, stat);
82 			mdelay(1000);
83 		}
84 	}
85 
86 	mips_cm_unlock_other();
87 }
88 
89 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
90 {
91 	return min(smp_max_threads, mips_cps_numvps(cluster, core));
92 }
93 
94 static void __init *mips_cps_build_core_entry(void *addr)
95 {
96 	extern void (*nmi_handler)(void);
97 	u32 *p = addr;
98 	u32 val;
99 	struct uasm_label labels[2];
100 	struct uasm_reloc relocs[2];
101 	struct uasm_label *l = labels;
102 	struct uasm_reloc *r = relocs;
103 
104 	memset(labels, 0, sizeof(labels));
105 	memset(relocs, 0, sizeof(relocs));
106 
107 	uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
108 	UASM_i_LA(&p, GPR_T9, ST0_NMI);
109 	uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
110 
111 	uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
112 	uasm_i_nop(&p);
113 	UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
114 
115 	uasm_l_not_nmi(&l, p);
116 
117 	val = CAUSEF_IV;
118 	uasm_i_lui(&p, GPR_K0, val >> 16);
119 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
120 	uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
121 	val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
122 	uasm_i_lui(&p, GPR_K0, val >> 16);
123 	uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
124 	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
125 	uasm_i_ehb(&p);
126 	uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
127 	UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
128 #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
129 	UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
130 #else
131 	UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
132 #endif
133 	uasm_i_jr(&p, GPR_T9);
134 	uasm_i_nop(&p);
135 
136 	uasm_resolve_relocs(relocs, labels);
137 
138 	return p;
139 }
140 
141 static bool __init check_64bit_reset(void)
142 {
143 	bool cx_64bit_reset = false;
144 
145 	mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
146 	write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
147 	if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
148 	    CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
149 		cx_64bit_reset = true;
150 	mips_cm_unlock_other();
151 
152 	return cx_64bit_reset;
153 }
154 
155 static int __init allocate_cps_vecs(void)
156 {
157 	/* Try to allocate in KSEG1 first */
158 	cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
159 						0x0, CSEGX_SIZE - 1);
160 
161 	if (cps_vec_pa)
162 		core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
163 					CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
164 
165 	if (!cps_vec_pa && mips_cm_is64) {
166 		phys_addr_t end;
167 
168 		if (check_64bit_reset()) {
169 			pr_info("VP Local Reset Exception Base support 47 bits address\n");
170 			end = MEMBLOCK_ALLOC_ANYWHERE;
171 		} else {
172 			end = SZ_4G - 1;
173 		}
174 		cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
175 		if (cps_vec_pa) {
176 			if (check_64bit_reset())
177 				core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
178 					CM_GCR_Cx_RESET_BASE_MODE;
179 			else
180 				core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
181 					CM_GCR_Cx_RESET_BASE_MODE;
182 		}
183 	}
184 
185 	if (!cps_vec_pa)
186 		return -ENOMEM;
187 
188 	return 0;
189 }
190 
191 static void __init setup_cps_vecs(void)
192 {
193 	void *cps_vec;
194 
195 	cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
196 	mips_cps_build_core_entry(cps_vec);
197 
198 	memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
199 	memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
200 	memcpy(cps_vec + 0x300, &excep_cache, 0x80);
201 	memcpy(cps_vec + 0x380, &excep_genex, 0x80);
202 	memcpy(cps_vec + 0x400, &excep_intex, 0x80);
203 	memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
204 
205 	/* Make sure no prefetched data in cache */
206 	blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
207 	bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
208 	__sync();
209 }
210 
211 static void __init cps_smp_setup(void)
212 {
213 	unsigned int nclusters, ncores, nvpes, core_vpes;
214 	int cl, c, v;
215 
216 	/* Detect & record VPE topology */
217 	nvpes = 0;
218 	nclusters = mips_cps_numclusters();
219 	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
220 	for (cl = 0; cl < nclusters; cl++) {
221 		if (cl > 0)
222 			pr_cont(",");
223 		pr_cont("{");
224 
225 		if (mips_cm_revision() >= CM_REV_CM3_5)
226 			power_up_other_cluster(cl);
227 
228 		ncores = mips_cps_numcores(cl);
229 		for (c = 0; c < ncores; c++) {
230 			core_vpes = core_vpe_count(cl, c);
231 
232 			if (c > 0)
233 				pr_cont(",");
234 			pr_cont("%u", core_vpes);
235 
236 			/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
237 			if (!cl && !c)
238 				smp_num_siblings = core_vpes;
239 
240 			for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
241 				cpu_set_cluster(&cpu_data[nvpes + v], cl);
242 				cpu_set_core(&cpu_data[nvpes + v], c);
243 				cpu_set_vpe_id(&cpu_data[nvpes + v], v);
244 			}
245 
246 			nvpes += core_vpes;
247 		}
248 
249 		pr_cont("}");
250 	}
251 	pr_cont(" total %u\n", nvpes);
252 
253 	/* Indicate present CPUs (CPU being synonymous with VPE) */
254 	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
255 		set_cpu_possible(v, true);
256 		set_cpu_present(v, true);
257 		__cpu_number_map[v] = v;
258 		__cpu_logical_map[v] = v;
259 	}
260 
261 	/* Set a coherent default CCA (CWB) */
262 	change_c0_config(CONF_CM_CMASK, 0x5);
263 
264 	/* Initialise core 0 */
265 	mips_cps_core_init();
266 
267 	/* Make core 0 coherent with everything */
268 	write_gcr_cl_coherence(0xff);
269 
270 	if (allocate_cps_vecs())
271 		pr_err("Failed to allocate CPS vectors\n");
272 
273 	if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
274 		write_gcr_bev_base(core_entry_reg);
275 
276 #ifdef CONFIG_MIPS_MT_FPAFF
277 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
278 	if (cpu_has_fpu)
279 		cpumask_set_cpu(0, &mt_fpu_cpumask);
280 #endif /* CONFIG_MIPS_MT_FPAFF */
281 }
282 
283 static void __init cps_prepare_cpus(unsigned int max_cpus)
284 {
285 	unsigned int nclusters, ncores, core_vpes, c, cl, cca;
286 	bool cca_unsuitable, cores_limited;
287 	struct cluster_boot_config *cluster_bootcfg;
288 	struct core_boot_config *core_bootcfg;
289 
290 	mips_mt_set_cpuoptions();
291 
292 	if (!core_entry_reg) {
293 		pr_err("core_entry address unsuitable, disabling smp-cps\n");
294 		goto err_out;
295 	}
296 
297 	/* Detect whether the CCA is unsuited to multi-core SMP */
298 	cca = read_c0_config() & CONF_CM_CMASK;
299 	switch (cca) {
300 	case 0x4: /* CWBE */
301 	case 0x5: /* CWB */
302 		/* The CCA is coherent, multi-core is fine */
303 		cca_unsuitable = false;
304 		break;
305 
306 	default:
307 		/* CCA is not coherent, multi-core is not usable */
308 		cca_unsuitable = true;
309 	}
310 
311 	/* Warn the user if the CCA prevents multi-core */
312 	cores_limited = false;
313 	if (cca_unsuitable || cpu_has_dc_aliases) {
314 		for_each_present_cpu(c) {
315 			if (cpus_are_siblings(smp_processor_id(), c))
316 				continue;
317 
318 			set_cpu_present(c, false);
319 			cores_limited = true;
320 		}
321 	}
322 	if (cores_limited)
323 		pr_warn("Using only one core due to %s%s%s\n",
324 			cca_unsuitable ? "unsuitable CCA" : "",
325 			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
326 			cpu_has_dc_aliases ? "dcache aliasing" : "");
327 
328 	setup_cps_vecs();
329 
330 	/* Allocate cluster boot configuration structs */
331 	nclusters = mips_cps_numclusters();
332 	mips_cps_cluster_bootcfg = kcalloc(nclusters,
333 					   sizeof(*mips_cps_cluster_bootcfg),
334 					   GFP_KERNEL);
335 
336 	if (nclusters > 1)
337 		mips_cm_update_property();
338 
339 	for (cl = 0; cl < nclusters; cl++) {
340 		/* Allocate core boot configuration structs */
341 		ncores = mips_cps_numcores(cl);
342 		core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg),
343 					GFP_KERNEL);
344 		if (!core_bootcfg)
345 			goto err_out;
346 		mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
347 
348 		mips_cps_cluster_bootcfg[cl].core_power =
349 			kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
350 				GFP_KERNEL);
351 
352 		/* Allocate VPE boot configuration structs */
353 		for (c = 0; c < ncores; c++) {
354 			core_vpes = core_vpe_count(cl, c);
355 			core_bootcfg[c].vpe_config = kcalloc(core_vpes,
356 					sizeof(*core_bootcfg[c].vpe_config),
357 					GFP_KERNEL);
358 			if (!core_bootcfg[c].vpe_config)
359 				goto err_out;
360 		}
361 	}
362 
363 	/* Mark this CPU as powered up & booted */
364 	cl = cpu_cluster(&current_cpu_data);
365 	c = cpu_core(&current_cpu_data);
366 	cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
367 	core_bootcfg = &cluster_bootcfg->core_config[c];
368 	bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1);
369 	atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data));
370 
371 	return;
372 err_out:
373 	/* Clean up allocations */
374 	if (mips_cps_cluster_bootcfg) {
375 		for (cl = 0; cl < nclusters; cl++) {
376 			cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
377 			ncores = mips_cps_numcores(cl);
378 			for (c = 0; c < ncores; c++) {
379 				core_bootcfg = &cluster_bootcfg->core_config[c];
380 				kfree(core_bootcfg->vpe_config);
381 			}
382 			kfree(mips_cps_cluster_bootcfg[c].core_config);
383 		}
384 		kfree(mips_cps_cluster_bootcfg);
385 		mips_cps_cluster_bootcfg = NULL;
386 	}
387 
388 	/* Effectively disable SMP by declaring CPUs not present */
389 	for_each_possible_cpu(c) {
390 		if (c == 0)
391 			continue;
392 		set_cpu_present(c, false);
393 	}
394 }
395 
396 static void init_cluster_l2(void)
397 {
398 	u32 l2_cfg, l2sm_cop, result;
399 
400 	while (!mips_cm_is_l2_hci_broken) {
401 		l2_cfg = read_gcr_redir_l2_ram_config();
402 
403 		/* If HCI is not supported, use the state machine below */
404 		if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
405 			break;
406 		if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
407 			break;
408 
409 		/* If the HCI_DONE bit is set, we're finished */
410 		if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
411 			return;
412 	}
413 
414 	l2sm_cop = read_gcr_redir_l2sm_cop();
415 	if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
416 		 "L2 init not supported on this system yet"))
417 		return;
418 
419 	/* Clear L2 tag registers */
420 	write_gcr_redir_l2_tag_state(0);
421 	write_gcr_redir_l2_ecc(0);
422 
423 	/* Ensure the L2 tag writes complete before the state machine starts */
424 	mb();
425 
426 	/* Wait for the L2 state machine to be idle */
427 	do {
428 		l2sm_cop = read_gcr_redir_l2sm_cop();
429 	} while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
430 
431 	/* Start a store tag operation */
432 	l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
433 	l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
434 	l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
435 	write_gcr_redir_l2sm_cop(l2sm_cop);
436 
437 	/* Ensure the state machine starts before we poll for completion */
438 	mb();
439 
440 	/* Wait for the operation to be complete */
441 	do {
442 		l2sm_cop = read_gcr_redir_l2sm_cop();
443 		result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
444 		result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
445 	} while (!result);
446 
447 	WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
448 	     "L2 state machine failed cache init with error %u\n", result);
449 }
450 
451 static void boot_core(unsigned int cluster, unsigned int core,
452 		      unsigned int vpe_id)
453 {
454 	struct cluster_boot_config *cluster_cfg;
455 	u32 access, stat, seq_state;
456 	unsigned int timeout, ncores;
457 
458 	cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
459 	ncores = mips_cps_numcores(cluster);
460 
461 	if ((cluster != cpu_cluster(&current_cpu_data)) &&
462 	    bitmap_empty(cluster_cfg->core_power, ncores)) {
463 		power_up_other_cluster(cluster);
464 
465 		mips_cm_lock_other(cluster, core, 0,
466 				   CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
467 
468 		/* Ensure cluster GCRs are where we expect */
469 		write_gcr_redir_base(read_gcr_base());
470 		write_gcr_redir_cpc_base(read_gcr_cpc_base());
471 		write_gcr_redir_gic_base(read_gcr_gic_base());
472 
473 		init_cluster_l2();
474 
475 		/* Mirror L2 configuration */
476 		write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
477 		write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
478 		write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
479 
480 		/* Mirror ECC/parity setup */
481 		write_gcr_redir_err_control(read_gcr_err_control());
482 
483 		/* Set BEV base */
484 		write_gcr_redir_bev_base(core_entry_reg);
485 
486 		mips_cm_unlock_other();
487 	}
488 
489 	if (cluster != cpu_cluster(&current_cpu_data)) {
490 		mips_cm_lock_other(cluster, core, 0,
491 				   CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
492 
493 		/* Ensure the core can access the GCRs */
494 		access = read_gcr_redir_access();
495 		access |= BIT(core);
496 		write_gcr_redir_access(access);
497 
498 		mips_cm_unlock_other();
499 	} else {
500 		/* Ensure the core can access the GCRs */
501 		access = read_gcr_access();
502 		access |= BIT(core);
503 		write_gcr_access(access);
504 	}
505 
506 	/* Select the appropriate core */
507 	mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
508 
509 	/* Set its reset vector */
510 	if (mips_cm_is64)
511 		write_gcr_co_reset64_base(core_entry_reg);
512 	else
513 		write_gcr_co_reset_base(core_entry_reg);
514 
515 	/* Ensure its coherency is disabled */
516 	write_gcr_co_coherence(0);
517 
518 	/* Start it with the legacy memory map and exception base */
519 	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
520 
521 	/* Ensure the core can access the GCRs */
522 	if (mips_cm_revision() < CM_REV_CM3)
523 		set_gcr_access(1 << core);
524 	else
525 		set_gcr_access_cm3(1 << core);
526 
527 	if (mips_cpc_present()) {
528 		/* Reset the core */
529 		mips_cpc_lock_other(core);
530 
531 		if (mips_cm_revision() >= CM_REV_CM3) {
532 			/* Run only the requested VP following the reset */
533 			write_cpc_co_vp_stop(0xf);
534 			write_cpc_co_vp_run(1 << vpe_id);
535 
536 			/*
537 			 * Ensure that the VP_RUN register is written before the
538 			 * core leaves reset.
539 			 */
540 			wmb();
541 		}
542 
543 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
544 
545 		timeout = 100;
546 		while (true) {
547 			stat = read_cpc_co_stat_conf();
548 			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
549 			seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
550 
551 			/* U6 == coherent execution, ie. the core is up */
552 			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
553 				break;
554 
555 			/* Delay a little while before we start warning */
556 			if (timeout) {
557 				timeout--;
558 				mdelay(10);
559 				continue;
560 			}
561 
562 			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
563 				core, stat);
564 			mdelay(1000);
565 		}
566 
567 		mips_cpc_unlock_other();
568 	} else {
569 		/* Take the core out of reset */
570 		write_gcr_co_reset_release(0);
571 	}
572 
573 	mips_cm_unlock_other();
574 
575 	/* The core is now powered up */
576 	bitmap_set(cluster_cfg->core_power, core, 1);
577 
578 	/*
579 	 * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
580 	 * the cluster do (eg. if they're all removed via hotplug.
581 	 */
582 	if (mips_cm_revision() >= CM_REV_CM3_5) {
583 		mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
584 		write_cpc_redir_pwrup_ctl(0);
585 		mips_cm_unlock_other();
586 	}
587 }
588 
589 static void remote_vpe_boot(void *dummy)
590 {
591 	unsigned int cluster = cpu_cluster(&current_cpu_data);
592 	unsigned core = cpu_core(&current_cpu_data);
593 	struct cluster_boot_config *cluster_cfg =
594 		&mips_cps_cluster_bootcfg[cluster];
595 	struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
596 
597 	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
598 }
599 
600 static int cps_boot_secondary(int cpu, struct task_struct *idle)
601 {
602 	unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
603 	unsigned core = cpu_core(&cpu_data[cpu]);
604 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
605 	struct cluster_boot_config *cluster_cfg =
606 		&mips_cps_cluster_bootcfg[cluster];
607 	struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
608 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
609 	unsigned int remote;
610 	int err;
611 
612 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
613 	vpe_cfg->sp = __KSTK_TOS(idle);
614 	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
615 
616 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
617 
618 	preempt_disable();
619 
620 	if (!test_bit(core, cluster_cfg->core_power)) {
621 		/* Boot a VPE on a powered down core */
622 		boot_core(cluster, core, vpe_id);
623 		goto out;
624 	}
625 
626 	if (cpu_has_vp) {
627 		mips_cm_lock_other(cluster, core, vpe_id,
628 				   CM_GCR_Cx_OTHER_BLOCK_LOCAL);
629 		if (mips_cm_is64)
630 			write_gcr_co_reset64_base(core_entry_reg);
631 		else
632 			write_gcr_co_reset_base(core_entry_reg);
633 		mips_cm_unlock_other();
634 	}
635 
636 	if (!cpus_are_siblings(cpu, smp_processor_id())) {
637 		/* Boot a VPE on another powered up core */
638 		for (remote = 0; remote < NR_CPUS; remote++) {
639 			if (!cpus_are_siblings(cpu, remote))
640 				continue;
641 			if (cpu_online(remote))
642 				break;
643 		}
644 		if (remote >= NR_CPUS) {
645 			pr_crit("No online CPU in core %u to start CPU%d\n",
646 				core, cpu);
647 			goto out;
648 		}
649 
650 		err = smp_call_function_single(remote, remote_vpe_boot,
651 					       NULL, 1);
652 		if (err)
653 			panic("Failed to call remote CPU\n");
654 		goto out;
655 	}
656 
657 	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
658 
659 	/* Boot a VPE on this core */
660 	mips_cps_boot_vpes(core_cfg, vpe_id);
661 out:
662 	preempt_enable();
663 	return 0;
664 }
665 
666 static void cps_init_secondary(void)
667 {
668 	int core = cpu_core(&current_cpu_data);
669 
670 	/* Disable MT - we only want to run 1 TC per VPE */
671 	if (cpu_has_mipsmt)
672 		dmt();
673 
674 	if (mips_cm_revision() >= CM_REV_CM3) {
675 		unsigned int ident = read_gic_vl_ident();
676 
677 		/*
678 		 * Ensure that our calculation of the VP ID matches up with
679 		 * what the GIC reports, otherwise we'll have configured
680 		 * interrupts incorrectly.
681 		 */
682 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
683 	}
684 
685 	if (core > 0 && !read_gcr_cl_coherence())
686 		pr_warn("Core %u is not in coherent domain\n", core);
687 
688 	if (cpu_has_veic)
689 		clear_c0_status(ST0_IM);
690 	else
691 		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
692 					 STATUSF_IP4 | STATUSF_IP5 |
693 					 STATUSF_IP6 | STATUSF_IP7);
694 }
695 
696 static void cps_smp_finish(void)
697 {
698 	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
699 
700 #ifdef CONFIG_MIPS_MT_FPAFF
701 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
702 	if (cpu_has_fpu)
703 		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
704 #endif /* CONFIG_MIPS_MT_FPAFF */
705 
706 	local_irq_enable();
707 }
708 
709 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
710 
711 enum cpu_death {
712 	CPU_DEATH_HALT,
713 	CPU_DEATH_POWER,
714 };
715 
716 static void cps_shutdown_this_cpu(enum cpu_death death)
717 {
718 	unsigned int cpu, core, vpe_id;
719 
720 	cpu = smp_processor_id();
721 	core = cpu_core(&cpu_data[cpu]);
722 
723 	if (death == CPU_DEATH_HALT) {
724 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
725 
726 		pr_debug("Halting core %d VP%d\n", core, vpe_id);
727 		if (cpu_has_mipsmt) {
728 			/* Halt this TC */
729 			write_c0_tchalt(TCHALT_H);
730 			instruction_hazard();
731 		} else if (cpu_has_vp) {
732 			write_cpc_cl_vp_stop(1 << vpe_id);
733 
734 			/* Ensure that the VP_STOP register is written */
735 			wmb();
736 		}
737 	} else {
738 		if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
739 			pr_debug("Gating power to core %d\n", core);
740 			/* Power down the core */
741 			cps_pm_enter_state(CPS_PM_POWER_GATED);
742 		}
743 	}
744 }
745 
746 #ifdef CONFIG_KEXEC_CORE
747 
748 static void cps_kexec_nonboot_cpu(void)
749 {
750 	if (cpu_has_mipsmt || cpu_has_vp)
751 		cps_shutdown_this_cpu(CPU_DEATH_HALT);
752 	else
753 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
754 }
755 
756 #endif /* CONFIG_KEXEC_CORE */
757 
758 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
759 
760 #ifdef CONFIG_HOTPLUG_CPU
761 
762 static int cps_cpu_disable(void)
763 {
764 	unsigned cpu = smp_processor_id();
765 	struct cluster_boot_config *cluster_cfg;
766 	struct core_boot_config *core_cfg;
767 
768 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
769 		return -EINVAL;
770 
771 	cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(&current_cpu_data)];
772 	core_cfg = &cluster_cfg->core_config[cpu_core(&current_cpu_data)];
773 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
774 	smp_mb__after_atomic();
775 	set_cpu_online(cpu, false);
776 	calculate_cpu_foreign_map();
777 	irq_migrate_all_off_this_cpu();
778 
779 	return 0;
780 }
781 
782 static unsigned cpu_death_sibling;
783 static enum cpu_death cpu_death;
784 
785 void play_dead(void)
786 {
787 	unsigned int cpu;
788 
789 	local_irq_disable();
790 	idle_task_exit();
791 	cpu = smp_processor_id();
792 	cpu_death = CPU_DEATH_POWER;
793 
794 	pr_debug("CPU%d going offline\n", cpu);
795 
796 	if (cpu_has_mipsmt || cpu_has_vp) {
797 		/* Look for another online VPE within the core */
798 		for_each_online_cpu(cpu_death_sibling) {
799 			if (!cpus_are_siblings(cpu, cpu_death_sibling))
800 				continue;
801 
802 			/*
803 			 * There is an online VPE within the core. Just halt
804 			 * this TC and leave the core alone.
805 			 */
806 			cpu_death = CPU_DEATH_HALT;
807 			break;
808 		}
809 	}
810 
811 	cpuhp_ap_report_dead();
812 
813 	cps_shutdown_this_cpu(cpu_death);
814 
815 	/* This should never be reached */
816 	panic("Failed to offline CPU %u", cpu);
817 }
818 
819 static void wait_for_sibling_halt(void *ptr_cpu)
820 {
821 	unsigned cpu = (unsigned long)ptr_cpu;
822 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
823 	unsigned halted;
824 	unsigned long flags;
825 
826 	do {
827 		local_irq_save(flags);
828 		settc(vpe_id);
829 		halted = read_tc_c0_tchalt();
830 		local_irq_restore(flags);
831 	} while (!(halted & TCHALT_H));
832 }
833 
834 static void cps_cpu_die(unsigned int cpu) { }
835 
836 static void cps_cleanup_dead_cpu(unsigned cpu)
837 {
838 	unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
839 	unsigned core = cpu_core(&cpu_data[cpu]);
840 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
841 	ktime_t fail_time;
842 	unsigned stat;
843 	int err;
844 	struct cluster_boot_config *cluster_cfg;
845 
846 	cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
847 
848 	/*
849 	 * Now wait for the CPU to actually offline. Without doing this that
850 	 * offlining may race with one or more of:
851 	 *
852 	 *   - Onlining the CPU again.
853 	 *   - Powering down the core if another VPE within it is offlined.
854 	 *   - A sibling VPE entering a non-coherent state.
855 	 *
856 	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
857 	 * with which we could race, so do nothing.
858 	 */
859 	if (cpu_death == CPU_DEATH_POWER) {
860 		/*
861 		 * Wait for the core to enter a powered down or clock gated
862 		 * state, the latter happening when a JTAG probe is connected
863 		 * in which case the CPC will refuse to power down the core.
864 		 */
865 		fail_time = ktime_add_ms(ktime_get(), 2000);
866 		do {
867 			mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
868 			mips_cpc_lock_other(core);
869 			stat = read_cpc_co_stat_conf();
870 			stat &= CPC_Cx_STAT_CONF_SEQSTATE;
871 			stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
872 			mips_cpc_unlock_other();
873 			mips_cm_unlock_other();
874 
875 			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
876 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
877 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
878 				break;
879 
880 			/*
881 			 * The core ought to have powered down, but didn't &
882 			 * now we don't really know what state it's in. It's
883 			 * likely that its _pwr_up pin has been wired to logic
884 			 * 1 & it powered back up as soon as we powered it
885 			 * down...
886 			 *
887 			 * The best we can do is warn the user & continue in
888 			 * the hope that the core is doing nothing harmful &
889 			 * might behave properly if we online it later.
890 			 */
891 			if (WARN(ktime_after(ktime_get(), fail_time),
892 				 "CPU%u hasn't powered down, seq. state %u\n",
893 				 cpu, stat))
894 				break;
895 		} while (1);
896 
897 		/* Indicate the core is powered off */
898 		bitmap_clear(cluster_cfg->core_power, core, 1);
899 	} else if (cpu_has_mipsmt) {
900 		/*
901 		 * Have a CPU with access to the offlined CPUs registers wait
902 		 * for its TC to halt.
903 		 */
904 		err = smp_call_function_single(cpu_death_sibling,
905 					       wait_for_sibling_halt,
906 					       (void *)(unsigned long)cpu, 1);
907 		if (err)
908 			panic("Failed to call remote sibling CPU\n");
909 	} else if (cpu_has_vp) {
910 		do {
911 			mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
912 			stat = read_cpc_co_vp_running();
913 			mips_cm_unlock_other();
914 		} while (stat & (1 << vpe_id));
915 	}
916 }
917 
918 #endif /* CONFIG_HOTPLUG_CPU */
919 
920 static const struct plat_smp_ops cps_smp_ops = {
921 	.smp_setup		= cps_smp_setup,
922 	.prepare_cpus		= cps_prepare_cpus,
923 	.boot_secondary		= cps_boot_secondary,
924 	.init_secondary		= cps_init_secondary,
925 	.smp_finish		= cps_smp_finish,
926 	.send_ipi_single	= mips_smp_send_ipi_single,
927 	.send_ipi_mask		= mips_smp_send_ipi_mask,
928 #ifdef CONFIG_HOTPLUG_CPU
929 	.cpu_disable		= cps_cpu_disable,
930 	.cpu_die		= cps_cpu_die,
931 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
932 #endif
933 #ifdef CONFIG_KEXEC_CORE
934 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
935 #endif
936 };
937 
938 bool mips_cps_smp_in_use(void)
939 {
940 	extern const struct plat_smp_ops *mp_ops;
941 	return mp_ops == &cps_smp_ops;
942 }
943 
944 int register_cps_smp_ops(void)
945 {
946 	if (!mips_cm_present()) {
947 		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
948 		return -ENODEV;
949 	}
950 
951 	/* check we have a GIC - we need one for IPIs */
952 	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
953 		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
954 		return -ENODEV;
955 	}
956 
957 	register_smp_ops(&cps_smp_ops);
958 	return 0;
959 }
960