1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7 #include <linux/cpu.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/memblock.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/slab.h>
14 #include <linux/smp.h>
15 #include <linux/types.h>
16 #include <linux/irq.h>
17
18 #include <asm/bcache.h>
19 #include <asm/mips-cps.h>
20 #include <asm/mips_mt.h>
21 #include <asm/mipsregs.h>
22 #include <asm/pm-cps.h>
23 #include <asm/r4kcache.h>
24 #include <asm/regdef.h>
25 #include <asm/smp.h>
26 #include <asm/smp-cps.h>
27 #include <asm/time.h>
28 #include <asm/uasm.h>
29
30 #define BEV_VEC_SIZE 0x500
31 #define BEV_VEC_ALIGN 0x1000
32
33 enum label_id {
34 label_not_nmi = 1,
35 };
36
37 UASM_L_LA(_not_nmi)
38
39 static u64 core_entry_reg;
40 static phys_addr_t cps_vec_pa;
41
42 struct cluster_boot_config *mips_cps_cluster_bootcfg;
43
power_up_other_cluster(unsigned int cluster)44 static void power_up_other_cluster(unsigned int cluster)
45 {
46 u32 stat, seq_state;
47 unsigned int timeout;
48
49 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
50 CM_GCR_Cx_OTHER_BLOCK_LOCAL);
51 stat = read_cpc_co_stat_conf();
52 mips_cm_unlock_other();
53
54 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
55 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
56 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
57 return;
58
59 /* Set endianness & power up the CM */
60 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
61 write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
62 write_cpc_redir_pwrup_ctl(1);
63 mips_cm_unlock_other();
64
65 /* Wait for the CM to start up */
66 timeout = 1000;
67 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
68 CM_GCR_Cx_OTHER_BLOCK_LOCAL);
69 while (1) {
70 stat = read_cpc_co_stat_conf();
71 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
72 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
73 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
74 break;
75
76 if (timeout) {
77 mdelay(1);
78 timeout--;
79 } else {
80 pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
81 cluster, stat);
82 mdelay(1000);
83 }
84 }
85
86 mips_cm_unlock_other();
87 }
88
core_vpe_count(unsigned int cluster,unsigned core)89 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
90 {
91 return min(smp_max_threads, mips_cps_numvps(cluster, core));
92 }
93
mips_cps_build_core_entry(void * addr)94 static void __init *mips_cps_build_core_entry(void *addr)
95 {
96 extern void (*nmi_handler)(void);
97 u32 *p = addr;
98 u32 val;
99 struct uasm_label labels[2];
100 struct uasm_reloc relocs[2];
101 struct uasm_label *l = labels;
102 struct uasm_reloc *r = relocs;
103
104 memset(labels, 0, sizeof(labels));
105 memset(relocs, 0, sizeof(relocs));
106
107 uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
108 UASM_i_LA(&p, GPR_T9, ST0_NMI);
109 uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
110
111 uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
112 uasm_i_nop(&p);
113 UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
114
115 uasm_l_not_nmi(&l, p);
116
117 val = CAUSEF_IV;
118 uasm_i_lui(&p, GPR_K0, val >> 16);
119 uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
120 uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
121 val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
122 uasm_i_lui(&p, GPR_K0, val >> 16);
123 uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
124 uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
125 uasm_i_ehb(&p);
126 uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
127 UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
128 #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
129 UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
130 #else
131 UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
132 #endif
133 uasm_i_jr(&p, GPR_T9);
134 uasm_i_nop(&p);
135
136 uasm_resolve_relocs(relocs, labels);
137
138 return p;
139 }
140
check_64bit_reset(void)141 static bool __init check_64bit_reset(void)
142 {
143 bool cx_64bit_reset = false;
144
145 mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
146 write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
147 if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
148 CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
149 cx_64bit_reset = true;
150 mips_cm_unlock_other();
151
152 return cx_64bit_reset;
153 }
154
allocate_cps_vecs(void)155 static int __init allocate_cps_vecs(void)
156 {
157 /* Try to allocate in KSEG1 first */
158 cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
159 0x0, CSEGX_SIZE - 1);
160
161 if (cps_vec_pa)
162 core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
163 CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
164
165 if (!cps_vec_pa && mips_cm_is64) {
166 phys_addr_t end;
167
168 if (check_64bit_reset()) {
169 pr_info("VP Local Reset Exception Base support 47 bits address\n");
170 end = MEMBLOCK_ALLOC_ANYWHERE;
171 } else {
172 end = SZ_4G - 1;
173 }
174 cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
175 if (cps_vec_pa) {
176 if (check_64bit_reset())
177 core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
178 CM_GCR_Cx_RESET_BASE_MODE;
179 else
180 core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
181 CM_GCR_Cx_RESET_BASE_MODE;
182 }
183 }
184
185 if (!cps_vec_pa)
186 return -ENOMEM;
187
188 return 0;
189 }
190
setup_cps_vecs(void)191 static void __init setup_cps_vecs(void)
192 {
193 void *cps_vec;
194
195 cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
196 mips_cps_build_core_entry(cps_vec);
197
198 memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
199 memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
200 memcpy(cps_vec + 0x300, &excep_cache, 0x80);
201 memcpy(cps_vec + 0x380, &excep_genex, 0x80);
202 memcpy(cps_vec + 0x400, &excep_intex, 0x80);
203 memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
204
205 /* Make sure no prefetched data in cache */
206 blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
207 bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
208 __sync();
209 }
210
cps_smp_setup(void)211 static void __init cps_smp_setup(void)
212 {
213 unsigned int nclusters, ncores, nvpes, core_vpes;
214 int cl, c, v;
215
216 /* Detect & record VPE topology */
217 nvpes = 0;
218 nclusters = mips_cps_numclusters();
219 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
220 for (cl = 0; cl < nclusters; cl++) {
221 if (cl > 0)
222 pr_cont(",");
223 pr_cont("{");
224
225 if (mips_cm_revision() >= CM_REV_CM3_5)
226 power_up_other_cluster(cl);
227
228 ncores = mips_cps_numcores(cl);
229 for (c = 0; c < ncores; c++) {
230 core_vpes = core_vpe_count(cl, c);
231
232 if (c > 0)
233 pr_cont(",");
234 pr_cont("%u", core_vpes);
235
236 /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
237 if (!cl && !c)
238 smp_num_siblings = core_vpes;
239 cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask);
240
241 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
242 cpu_set_cluster(&cpu_data[nvpes + v], cl);
243 cpu_set_core(&cpu_data[nvpes + v], c);
244 cpu_set_vpe_id(&cpu_data[nvpes + v], v);
245 }
246
247 nvpes += core_vpes;
248 }
249
250 pr_cont("}");
251 }
252 pr_cont(" total %u\n", nvpes);
253
254 /* Indicate present CPUs (CPU being synonymous with VPE) */
255 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
256 set_cpu_possible(v, true);
257 set_cpu_present(v, true);
258 __cpu_number_map[v] = v;
259 __cpu_logical_map[v] = v;
260 }
261
262 /* Set a coherent default CCA (CWB) */
263 change_c0_config(CONF_CM_CMASK, 0x5);
264
265 /* Initialise core 0 */
266 mips_cps_core_init();
267
268 /* Make core 0 coherent with everything */
269 write_gcr_cl_coherence(0xff);
270
271 if (allocate_cps_vecs())
272 pr_err("Failed to allocate CPS vectors\n");
273
274 if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
275 write_gcr_bev_base(core_entry_reg);
276
277 #ifdef CONFIG_MIPS_MT_FPAFF
278 /* If we have an FPU, enroll ourselves in the FPU-full mask */
279 if (cpu_has_fpu)
280 cpumask_set_cpu(0, &mt_fpu_cpumask);
281 #endif /* CONFIG_MIPS_MT_FPAFF */
282 }
283
calibrate_delay_is_known(void)284 unsigned long calibrate_delay_is_known(void)
285 {
286 int first_cpu_cluster = 0;
287
288 /* The calibration has to be done on the primary CPU of the cluster */
289 if (mips_cps_first_online_in_cluster(&first_cpu_cluster))
290 return 0;
291
292 return cpu_data[first_cpu_cluster].udelay_val;
293 }
294
cps_prepare_cpus(unsigned int max_cpus)295 static void __init cps_prepare_cpus(unsigned int max_cpus)
296 {
297 unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca;
298 bool cca_unsuitable, cores_limited;
299 struct cluster_boot_config *cluster_bootcfg;
300 struct core_boot_config *core_bootcfg;
301
302 mips_mt_set_cpuoptions();
303
304 if (!core_entry_reg) {
305 pr_err("core_entry address unsuitable, disabling smp-cps\n");
306 goto err_out;
307 }
308
309 /* Detect whether the CCA is unsuited to multi-core SMP */
310 cca = read_c0_config() & CONF_CM_CMASK;
311 switch (cca) {
312 case 0x4: /* CWBE */
313 case 0x5: /* CWB */
314 /* The CCA is coherent, multi-core is fine */
315 cca_unsuitable = false;
316 break;
317
318 default:
319 /* CCA is not coherent, multi-core is not usable */
320 cca_unsuitable = true;
321 }
322
323 /* Warn the user if the CCA prevents multi-core */
324 cores_limited = false;
325 if (cca_unsuitable || cpu_has_dc_aliases) {
326 for_each_present_cpu(c) {
327 if (cpus_are_siblings(smp_processor_id(), c))
328 continue;
329
330 set_cpu_present(c, false);
331 cores_limited = true;
332 }
333 }
334 if (cores_limited)
335 pr_warn("Using only one core due to %s%s%s\n",
336 cca_unsuitable ? "unsuitable CCA" : "",
337 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
338 cpu_has_dc_aliases ? "dcache aliasing" : "");
339
340 setup_cps_vecs();
341
342 /* Allocate cluster boot configuration structs */
343 nclusters = mips_cps_numclusters();
344 mips_cps_cluster_bootcfg = kzalloc_objs(*mips_cps_cluster_bootcfg,
345 nclusters);
346 if (!mips_cps_cluster_bootcfg)
347 goto err_out;
348
349 if (nclusters > 1)
350 mips_cm_update_property();
351
352 for (cl = 0; cl < nclusters; cl++) {
353 /* Allocate core boot configuration structs */
354 ncores = mips_cps_numcores(cl);
355 core_bootcfg = kzalloc_objs(*core_bootcfg, ncores);
356 if (!core_bootcfg)
357 goto err_out;
358 mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
359
360 mips_cps_cluster_bootcfg[cl].core_power =
361 kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
362 GFP_KERNEL);
363 if (!mips_cps_cluster_bootcfg[cl].core_power)
364 goto err_out;
365
366 /* Allocate VPE boot configuration structs */
367 for (c = 0; c < ncores; c++) {
368 int v;
369 core_vpes = core_vpe_count(cl, c);
370 core_bootcfg[c].vpe_config = kzalloc_objs(*core_bootcfg[c].vpe_config,
371 core_vpes);
372 for (v = 0; v < core_vpes; v++)
373 cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask);
374 if (!core_bootcfg[c].vpe_config)
375 goto err_out;
376 }
377 }
378
379 /* Mark this CPU as powered up & booted */
380 cl = cpu_cluster(¤t_cpu_data);
381 c = cpu_core(¤t_cpu_data);
382 cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
383 cpu_smt_set_num_threads(core_vpes, core_vpes);
384 core_bootcfg = &cluster_bootcfg->core_config[c];
385 bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1);
386 atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data));
387
388 return;
389 err_out:
390 /* Clean up allocations */
391 if (mips_cps_cluster_bootcfg) {
392 for (cl = 0; cl < nclusters; cl++) {
393 cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
394 ncores = mips_cps_numcores(cl);
395 for (c = 0; c < ncores; c++) {
396 core_bootcfg = &cluster_bootcfg->core_config[c];
397 kfree(core_bootcfg->vpe_config);
398 }
399 kfree(mips_cps_cluster_bootcfg[c].core_config);
400 }
401 kfree(mips_cps_cluster_bootcfg);
402 mips_cps_cluster_bootcfg = NULL;
403 }
404
405 /* Effectively disable SMP by declaring CPUs not present */
406 for_each_possible_cpu(c) {
407 if (c == 0)
408 continue;
409 set_cpu_present(c, false);
410 }
411 }
412
init_cluster_l2(void)413 static void init_cluster_l2(void)
414 {
415 u32 l2_cfg, l2sm_cop, result;
416
417 while (!mips_cm_is_l2_hci_broken) {
418 l2_cfg = read_gcr_redir_l2_ram_config();
419
420 /* If HCI is not supported, use the state machine below */
421 if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
422 break;
423 if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
424 break;
425
426 /* If the HCI_DONE bit is set, we're finished */
427 if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
428 return;
429 }
430
431 l2sm_cop = read_gcr_redir_l2sm_cop();
432 if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
433 "L2 init not supported on this system yet"))
434 return;
435
436 /* Clear L2 tag registers */
437 write_gcr_redir_l2_tag_state(0);
438 write_gcr_redir_l2_ecc(0);
439
440 /* Ensure the L2 tag writes complete before the state machine starts */
441 mb();
442
443 /* Wait for the L2 state machine to be idle */
444 do {
445 l2sm_cop = read_gcr_redir_l2sm_cop();
446 } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
447
448 /* Start a store tag operation */
449 l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
450 l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
451 l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
452 write_gcr_redir_l2sm_cop(l2sm_cop);
453
454 /* Ensure the state machine starts before we poll for completion */
455 mb();
456
457 /* Wait for the operation to be complete */
458 do {
459 l2sm_cop = read_gcr_redir_l2sm_cop();
460 result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
461 result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
462 } while (!result);
463
464 WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
465 "L2 state machine failed cache init with error %u\n", result);
466 }
467
boot_core(unsigned int cluster,unsigned int core,unsigned int vpe_id)468 static void boot_core(unsigned int cluster, unsigned int core,
469 unsigned int vpe_id)
470 {
471 struct cluster_boot_config *cluster_cfg;
472 u32 access, stat, seq_state;
473 unsigned int timeout, ncores;
474
475 cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
476 ncores = mips_cps_numcores(cluster);
477
478 if ((cluster != cpu_cluster(¤t_cpu_data)) &&
479 bitmap_empty(cluster_cfg->core_power, ncores)) {
480 power_up_other_cluster(cluster);
481
482 mips_cm_lock_other(cluster, core, 0,
483 CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
484
485 /* Ensure cluster GCRs are where we expect */
486 write_gcr_redir_base(read_gcr_base());
487 write_gcr_redir_cpc_base(read_gcr_cpc_base());
488 write_gcr_redir_gic_base(read_gcr_gic_base());
489
490 init_cluster_l2();
491
492 /* Mirror L2 configuration */
493 write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
494 write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
495 write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
496
497 /* Mirror ECC/parity setup */
498 write_gcr_redir_err_control(read_gcr_err_control());
499
500 /* Set BEV base */
501 write_gcr_redir_bev_base(core_entry_reg);
502
503 mips_cm_unlock_other();
504 }
505
506 if (cluster != cpu_cluster(¤t_cpu_data)) {
507 mips_cm_lock_other(cluster, core, 0,
508 CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
509
510 /* Ensure the core can access the GCRs */
511 access = read_gcr_redir_access();
512 access |= BIT(core);
513 write_gcr_redir_access(access);
514
515 mips_cm_unlock_other();
516 } else {
517 /* Ensure the core can access the GCRs */
518 access = read_gcr_access();
519 access |= BIT(core);
520 write_gcr_access(access);
521 }
522
523 /* Select the appropriate core */
524 mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
525
526 /* Set its reset vector */
527 if (mips_cm_is64)
528 write_gcr_co_reset64_base(core_entry_reg);
529 else
530 write_gcr_co_reset_base(core_entry_reg);
531
532 /* Ensure its coherency is disabled */
533 write_gcr_co_coherence(0);
534
535 /* Start it with the legacy memory map and exception base */
536 write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
537
538 /* Ensure the core can access the GCRs */
539 if (mips_cm_revision() < CM_REV_CM3)
540 set_gcr_access(1 << core);
541 else
542 set_gcr_access_cm3(1 << core);
543
544 if (mips_cpc_present()) {
545 /* Reset the core */
546 mips_cpc_lock_other(core);
547
548 if (mips_cm_revision() >= CM_REV_CM3) {
549 /* Run only the requested VP following the reset */
550 write_cpc_co_vp_stop(0xf);
551 write_cpc_co_vp_run(1 << vpe_id);
552
553 /*
554 * Ensure that the VP_RUN register is written before the
555 * core leaves reset.
556 */
557 wmb();
558 }
559
560 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
561
562 timeout = 100;
563 while (true) {
564 stat = read_cpc_co_stat_conf();
565 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
566 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
567
568 /* U6 == coherent execution, ie. the core is up */
569 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
570 break;
571
572 /* Delay a little while before we start warning */
573 if (timeout) {
574 timeout--;
575 mdelay(10);
576 continue;
577 }
578
579 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
580 core, stat);
581 mdelay(1000);
582 }
583
584 mips_cpc_unlock_other();
585 } else {
586 /* Take the core out of reset */
587 write_gcr_co_reset_release(0);
588 }
589
590 mips_cm_unlock_other();
591
592 /* The core is now powered up */
593 bitmap_set(cluster_cfg->core_power, core, 1);
594
595 /*
596 * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
597 * the cluster do (eg. if they're all removed via hotplug.
598 */
599 if (mips_cm_revision() >= CM_REV_CM3_5) {
600 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
601 write_cpc_redir_pwrup_ctl(0);
602 mips_cm_unlock_other();
603 }
604 }
605
remote_vpe_boot(void * dummy)606 static void remote_vpe_boot(void *dummy)
607 {
608 unsigned int cluster = cpu_cluster(¤t_cpu_data);
609 unsigned core = cpu_core(¤t_cpu_data);
610 struct cluster_boot_config *cluster_cfg =
611 &mips_cps_cluster_bootcfg[cluster];
612 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
613
614 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
615 }
616
cps_boot_secondary(int cpu,struct task_struct * idle)617 static int cps_boot_secondary(int cpu, struct task_struct *idle)
618 {
619 unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
620 unsigned core = cpu_core(&cpu_data[cpu]);
621 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
622 struct cluster_boot_config *cluster_cfg =
623 &mips_cps_cluster_bootcfg[cluster];
624 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
625 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
626 unsigned int remote;
627 int err;
628
629 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
630 vpe_cfg->sp = __KSTK_TOS(idle);
631 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
632
633 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
634
635 preempt_disable();
636
637 if (!test_bit(core, cluster_cfg->core_power)) {
638 /* Boot a VPE on a powered down core */
639 boot_core(cluster, core, vpe_id);
640 goto out;
641 }
642
643 if (cpu_has_vp) {
644 mips_cm_lock_other(cluster, core, vpe_id,
645 CM_GCR_Cx_OTHER_BLOCK_LOCAL);
646 if (mips_cm_is64)
647 write_gcr_co_reset64_base(core_entry_reg);
648 else
649 write_gcr_co_reset_base(core_entry_reg);
650 mips_cm_unlock_other();
651 }
652
653 if (!cpus_are_siblings(cpu, smp_processor_id())) {
654 /* Boot a VPE on another powered up core */
655 for (remote = 0; remote < NR_CPUS; remote++) {
656 if (!cpus_are_siblings(cpu, remote))
657 continue;
658 if (cpu_online(remote))
659 break;
660 }
661 if (remote >= NR_CPUS) {
662 pr_crit("No online CPU in core %u to start CPU%d\n",
663 core, cpu);
664 goto out;
665 }
666
667 err = smp_call_function_single(remote, remote_vpe_boot,
668 NULL, 1);
669 if (err)
670 panic("Failed to call remote CPU\n");
671 goto out;
672 }
673
674 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
675
676 /* Boot a VPE on this core */
677 mips_cps_boot_vpes(core_cfg, vpe_id);
678 out:
679 preempt_enable();
680 return 0;
681 }
682
cps_init_secondary(void)683 static void cps_init_secondary(void)
684 {
685 int core = cpu_core(¤t_cpu_data);
686
687 /* Disable MT - we only want to run 1 TC per VPE */
688 if (cpu_has_mipsmt)
689 dmt();
690
691 if (mips_cm_revision() >= CM_REV_CM3) {
692 unsigned int ident = read_gic_vl_ident();
693
694 /*
695 * Ensure that our calculation of the VP ID matches up with
696 * what the GIC reports, otherwise we'll have configured
697 * interrupts incorrectly.
698 */
699 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
700 }
701
702 if (core > 0 && !read_gcr_cl_coherence())
703 pr_warn("Core %u is not in coherent domain\n", core);
704
705 if (cpu_has_veic)
706 clear_c0_status(ST0_IM);
707 else
708 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
709 STATUSF_IP4 | STATUSF_IP5 |
710 STATUSF_IP6 | STATUSF_IP7);
711 }
712
cps_smp_finish(void)713 static void cps_smp_finish(void)
714 {
715 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
716
717 #ifdef CONFIG_MIPS_MT_FPAFF
718 /* If we have an FPU, enroll ourselves in the FPU-full mask */
719 if (cpu_has_fpu)
720 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
721 #endif /* CONFIG_MIPS_MT_FPAFF */
722
723 local_irq_enable();
724 }
725
726 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
727
728 enum cpu_death {
729 CPU_DEATH_HALT,
730 CPU_DEATH_POWER,
731 };
732
cps_shutdown_this_cpu(enum cpu_death death)733 static void cps_shutdown_this_cpu(enum cpu_death death)
734 {
735 unsigned int cpu, core, vpe_id;
736
737 cpu = smp_processor_id();
738 core = cpu_core(&cpu_data[cpu]);
739
740 if (death == CPU_DEATH_HALT) {
741 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
742
743 pr_debug("Halting core %d VP%d\n", core, vpe_id);
744 if (cpu_has_mipsmt) {
745 /* Halt this TC */
746 write_c0_tchalt(TCHALT_H);
747 instruction_hazard();
748 } else if (cpu_has_vp) {
749 write_cpc_cl_vp_stop(1 << vpe_id);
750
751 /* Ensure that the VP_STOP register is written */
752 wmb();
753 }
754 } else {
755 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
756 pr_debug("Gating power to core %d\n", core);
757 /* Power down the core */
758 cps_pm_enter_state(CPS_PM_POWER_GATED);
759 }
760 }
761 }
762
763 #ifdef CONFIG_KEXEC_CORE
764
cps_kexec_nonboot_cpu(void)765 static void cps_kexec_nonboot_cpu(void)
766 {
767 if (cpu_has_mipsmt || cpu_has_vp)
768 cps_shutdown_this_cpu(CPU_DEATH_HALT);
769 else
770 cps_shutdown_this_cpu(CPU_DEATH_POWER);
771 }
772
773 #endif /* CONFIG_KEXEC_CORE */
774
775 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
776
777 #ifdef CONFIG_HOTPLUG_CPU
778
cps_cpu_disable(void)779 static int cps_cpu_disable(void)
780 {
781 unsigned cpu = smp_processor_id();
782 struct cluster_boot_config *cluster_cfg;
783 struct core_boot_config *core_cfg;
784
785 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
786 return -EINVAL;
787
788 cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(¤t_cpu_data)];
789 core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)];
790 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
791 smp_mb__after_atomic();
792 set_cpu_online(cpu, false);
793 calculate_cpu_foreign_map();
794 irq_migrate_all_off_this_cpu();
795
796 return 0;
797 }
798
799 static unsigned cpu_death_sibling;
800 static enum cpu_death cpu_death;
801
play_dead(void)802 void play_dead(void)
803 {
804 unsigned int cpu;
805
806 local_irq_disable();
807 idle_task_exit();
808 cpu = smp_processor_id();
809 cpu_death = CPU_DEATH_POWER;
810
811 pr_debug("CPU%d going offline\n", cpu);
812
813 if (cpu_has_mipsmt || cpu_has_vp) {
814 /* Look for another online VPE within the core */
815 for_each_online_cpu(cpu_death_sibling) {
816 if (!cpus_are_siblings(cpu, cpu_death_sibling))
817 continue;
818
819 /*
820 * There is an online VPE within the core. Just halt
821 * this TC and leave the core alone.
822 */
823 cpu_death = CPU_DEATH_HALT;
824 break;
825 }
826 }
827
828 cpuhp_ap_report_dead();
829
830 cps_shutdown_this_cpu(cpu_death);
831
832 /* This should never be reached */
833 panic("Failed to offline CPU %u", cpu);
834 }
835
wait_for_sibling_halt(void * ptr_cpu)836 static void wait_for_sibling_halt(void *ptr_cpu)
837 {
838 unsigned cpu = (unsigned long)ptr_cpu;
839 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
840 unsigned halted;
841 unsigned long flags;
842
843 do {
844 local_irq_save(flags);
845 settc(vpe_id);
846 halted = read_tc_c0_tchalt();
847 local_irq_restore(flags);
848 } while (!(halted & TCHALT_H));
849 }
850
cps_cpu_die(unsigned int cpu)851 static void cps_cpu_die(unsigned int cpu) { }
852
cps_cleanup_dead_cpu(unsigned cpu)853 static void cps_cleanup_dead_cpu(unsigned cpu)
854 {
855 unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
856 unsigned core = cpu_core(&cpu_data[cpu]);
857 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
858 ktime_t fail_time;
859 unsigned stat;
860 int err;
861 struct cluster_boot_config *cluster_cfg;
862
863 cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
864
865 /*
866 * Now wait for the CPU to actually offline. Without doing this that
867 * offlining may race with one or more of:
868 *
869 * - Onlining the CPU again.
870 * - Powering down the core if another VPE within it is offlined.
871 * - A sibling VPE entering a non-coherent state.
872 *
873 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
874 * with which we could race, so do nothing.
875 */
876 if (cpu_death == CPU_DEATH_POWER) {
877 /*
878 * Wait for the core to enter a powered down or clock gated
879 * state, the latter happening when a JTAG probe is connected
880 * in which case the CPC will refuse to power down the core.
881 */
882 fail_time = ktime_add_ms(ktime_get(), 2000);
883 do {
884 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
885 mips_cpc_lock_other(core);
886 stat = read_cpc_co_stat_conf();
887 stat &= CPC_Cx_STAT_CONF_SEQSTATE;
888 stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
889 mips_cpc_unlock_other();
890 mips_cm_unlock_other();
891
892 if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
893 stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
894 stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
895 break;
896
897 /*
898 * The core ought to have powered down, but didn't &
899 * now we don't really know what state it's in. It's
900 * likely that its _pwr_up pin has been wired to logic
901 * 1 & it powered back up as soon as we powered it
902 * down...
903 *
904 * The best we can do is warn the user & continue in
905 * the hope that the core is doing nothing harmful &
906 * might behave properly if we online it later.
907 */
908 if (WARN(ktime_after(ktime_get(), fail_time),
909 "CPU%u hasn't powered down, seq. state %u\n",
910 cpu, stat))
911 break;
912 } while (1);
913
914 /* Indicate the core is powered off */
915 bitmap_clear(cluster_cfg->core_power, core, 1);
916 } else if (cpu_has_mipsmt) {
917 /*
918 * Have a CPU with access to the offlined CPUs registers wait
919 * for its TC to halt.
920 */
921 err = smp_call_function_single(cpu_death_sibling,
922 wait_for_sibling_halt,
923 (void *)(unsigned long)cpu, 1);
924 if (err)
925 panic("Failed to call remote sibling CPU\n");
926 } else if (cpu_has_vp) {
927 do {
928 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
929 stat = read_cpc_co_vp_running();
930 mips_cm_unlock_other();
931 } while (stat & (1 << vpe_id));
932 }
933 }
934
935 #endif /* CONFIG_HOTPLUG_CPU */
936
937 static const struct plat_smp_ops cps_smp_ops = {
938 .smp_setup = cps_smp_setup,
939 .prepare_cpus = cps_prepare_cpus,
940 .boot_secondary = cps_boot_secondary,
941 .init_secondary = cps_init_secondary,
942 .smp_finish = cps_smp_finish,
943 .send_ipi_single = mips_smp_send_ipi_single,
944 .send_ipi_mask = mips_smp_send_ipi_mask,
945 #ifdef CONFIG_HOTPLUG_CPU
946 .cpu_disable = cps_cpu_disable,
947 .cpu_die = cps_cpu_die,
948 .cleanup_dead_cpu = cps_cleanup_dead_cpu,
949 #endif
950 #ifdef CONFIG_KEXEC_CORE
951 .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
952 #endif
953 };
954
mips_cps_smp_in_use(void)955 bool mips_cps_smp_in_use(void)
956 {
957 extern const struct plat_smp_ops *mp_ops;
958 return mp_ops == &cps_smp_ops;
959 }
960
register_cps_smp_ops(void)961 int register_cps_smp_ops(void)
962 {
963 if (!mips_cm_present()) {
964 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
965 return -ENODEV;
966 }
967
968 /* check we have a GIC - we need one for IPIs */
969 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
970 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
971 return -ENODEV;
972 }
973
974 register_smp_ops(&cps_smp_ops);
975 return 0;
976 }
977