Lines Matching +full:state +full:- +full:labels
1 // SPDX-License-Identifier: GPL-2.0-or-later
19 #include <asm/mips-cps.h>
22 #include <asm/pm-cps.h>
26 #include <asm/smp-cps.h>
78 timeout--; in power_up_other_cluster()
99 struct uasm_label labels[2]; in mips_cps_build_core_entry() local
101 struct uasm_label *l = labels; in mips_cps_build_core_entry()
104 memset(labels, 0, sizeof(labels)); in mips_cps_build_core_entry()
136 uasm_resolve_relocs(relocs, labels); in mips_cps_build_core_entry()
159 0x0, CSEGX_SIZE - 1); in allocate_cps_vecs()
172 end = SZ_4G - 1; in allocate_cps_vecs()
186 return -ENOMEM; in allocate_cps_vecs()
241 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { in cps_smp_setup()
278 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_setup()
305 pr_err("core_entry address unsuitable, disabling smp-cps\n"); in cps_prepare_cpus()
309 /* Detect whether the CCA is unsuited to multi-core SMP */ in cps_prepare_cpus()
314 /* The CCA is coherent, multi-core is fine */ in cps_prepare_cpus()
319 /* CCA is not coherent, multi-core is not usable */ in cps_prepare_cpus()
323 /* Warn the user if the CCA prevents multi-core */ in cps_prepare_cpus()
387 core_bootcfg = &cluster_bootcfg->core_config[c]; in cps_prepare_cpus()
388 bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1); in cps_prepare_cpus()
389 atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); in cps_prepare_cpus()
399 core_bootcfg = &cluster_bootcfg->core_config[c]; in cps_prepare_cpus()
400 kfree(core_bootcfg->vpe_config); in cps_prepare_cpus()
423 /* If HCI is not supported, use the state machine below */ in init_cluster_l2()
443 /* Ensure the L2 tag writes complete before the state machine starts */ in init_cluster_l2()
446 /* Wait for the L2 state machine to be idle */ in init_cluster_l2()
457 /* Ensure the state machine starts before we poll for completion */ in init_cluster_l2()
468 "L2 state machine failed cache init with error %u\n", result); in init_cluster_l2()
482 bitmap_empty(cluster_cfg->core_power, ncores)) { in boot_core()
577 timeout--; in boot_core()
596 bitmap_set(cluster_cfg->core_power, core, 1); in boot_core()
615 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; in remote_vpe_boot()
627 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; in cps_boot_secondary()
628 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; in cps_boot_secondary()
632 vpe_cfg->pc = (unsigned long)&smp_bootstrap; in cps_boot_secondary()
633 vpe_cfg->sp = __KSTK_TOS(idle); in cps_boot_secondary()
634 vpe_cfg->gp = (unsigned long)task_thread_info(idle); in cps_boot_secondary()
636 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); in cps_boot_secondary()
640 if (!test_bit(core, cluster_cfg->core_power)) { in cps_boot_secondary()
690 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary()
721 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_finish()
789 return -EINVAL; in cps_cpu_disable()
792 core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)]; in cps_cpu_disable()
793 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); in cps_cpu_disable()
872 * - Onlining the CPU again. in cps_cleanup_dead_cpu()
873 * - Powering down the core if another VPE within it is offlined. in cps_cleanup_dead_cpu()
874 * - A sibling VPE entering a non-coherent state. in cps_cleanup_dead_cpu()
876 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing in cps_cleanup_dead_cpu()
882 * state, the latter happening when a JTAG probe is connected in cps_cleanup_dead_cpu()
902 * now we don't really know what state it's in. It's in cps_cleanup_dead_cpu()
912 "CPU%u hasn't powered down, seq. state %u\n", in cps_cleanup_dead_cpu()
918 bitmap_clear(cluster_cfg->core_power, core, 1); in cps_cleanup_dead_cpu()
968 return -ENODEV; in register_cps_smp_ops()
971 /* check we have a GIC - we need one for IPIs */ in register_cps_smp_ops()
974 return -ENODEV; in register_cps_smp_ops()