Lines Matching +full:per +full:- +full:cpu +full:- +full:cluster

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include <linux/cpu.h>
19 #include <asm/mips-cps.h>
22 #include <asm/pm-cps.h>
26 #include <asm/smp-cps.h>
44 static void power_up_other_cluster(unsigned int cluster) in power_up_other_cluster() argument
49 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, in power_up_other_cluster()
60 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in power_up_other_cluster()
67 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, in power_up_other_cluster()
78 timeout--; in power_up_other_cluster()
80 pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n", in power_up_other_cluster()
81 cluster, stat); in power_up_other_cluster()
89 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) in core_vpe_count() argument
91 return min(smp_max_threads, mips_cps_numvps(cluster, core)); in core_vpe_count()
159 0x0, CSEGX_SIZE - 1); in allocate_cps_vecs()
172 end = SZ_4G - 1; in allocate_cps_vecs()
186 return -ENOMEM; in allocate_cps_vecs()
236 /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ in cps_smp_setup()
241 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { in cps_smp_setup()
254 /* Indicate present CPUs (CPU being synonymous with VPE) */ in cps_smp_setup()
278 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_setup()
288 /* The calibration has to be done on the primary CPU of the cluster */ in calibrate_delay_is_known()
305 pr_err("core_entry address unsuitable, disabling smp-cps\n"); in cps_prepare_cpus()
309 /* Detect whether the CCA is unsuited to multi-core SMP */ in cps_prepare_cpus()
314 /* The CCA is coherent, multi-core is fine */ in cps_prepare_cpus()
319 /* CCA is not coherent, multi-core is not usable */ in cps_prepare_cpus()
323 /* Warn the user if the CCA prevents multi-core */ in cps_prepare_cpus()
342 /* Allocate cluster boot configuration structs */ in cps_prepare_cpus()
382 /* Mark this CPU as powered up & booted */ in cps_prepare_cpus()
387 core_bootcfg = &cluster_bootcfg->core_config[c]; in cps_prepare_cpus()
388 bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1); in cps_prepare_cpus()
389 atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data)); in cps_prepare_cpus()
399 core_bootcfg = &cluster_bootcfg->core_config[c]; in cps_prepare_cpus()
400 kfree(core_bootcfg->vpe_config); in cps_prepare_cpus()
471 static void boot_core(unsigned int cluster, unsigned int core, in boot_core() argument
478 cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; in boot_core()
479 ncores = mips_cps_numcores(cluster); in boot_core()
481 if ((cluster != cpu_cluster(&current_cpu_data)) && in boot_core()
482 bitmap_empty(cluster_cfg->core_power, ncores)) { in boot_core()
483 power_up_other_cluster(cluster); in boot_core()
485 mips_cm_lock_other(cluster, core, 0, in boot_core()
488 /* Ensure cluster GCRs are where we expect */ in boot_core()
509 if (cluster != cpu_cluster(&current_cpu_data)) { in boot_core()
510 mips_cm_lock_other(cluster, core, 0, in boot_core()
527 mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in boot_core()
577 timeout--; in boot_core()
596 bitmap_set(cluster_cfg->core_power, core, 1); in boot_core()
600 * the cluster do (eg. if they're all removed via hotplug. in boot_core()
603 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in boot_core()
611 unsigned int cluster = cpu_cluster(&current_cpu_data); in remote_vpe_boot() local
614 &mips_cps_cluster_bootcfg[cluster]; in remote_vpe_boot()
615 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; in remote_vpe_boot()
620 static int cps_boot_secondary(int cpu, struct task_struct *idle) in cps_boot_secondary() argument
622 unsigned int cluster = cpu_cluster(&cpu_data[cpu]); in cps_boot_secondary() local
623 unsigned core = cpu_core(&cpu_data[cpu]); in cps_boot_secondary()
624 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_boot_secondary()
626 &mips_cps_cluster_bootcfg[cluster]; in cps_boot_secondary()
627 struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; in cps_boot_secondary()
628 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; in cps_boot_secondary()
632 vpe_cfg->pc = (unsigned long)&smp_bootstrap; in cps_boot_secondary()
633 vpe_cfg->sp = __KSTK_TOS(idle); in cps_boot_secondary()
634 vpe_cfg->gp = (unsigned long)task_thread_info(idle); in cps_boot_secondary()
636 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); in cps_boot_secondary()
640 if (!test_bit(core, cluster_cfg->core_power)) { in cps_boot_secondary()
642 boot_core(cluster, core, vpe_id); in cps_boot_secondary()
647 mips_cm_lock_other(cluster, core, vpe_id, in cps_boot_secondary()
656 if (!cpus_are_siblings(cpu, smp_processor_id())) { in cps_boot_secondary()
659 if (!cpus_are_siblings(cpu, remote)) in cps_boot_secondary()
665 pr_crit("No online CPU in core %u to start CPU%d\n", in cps_boot_secondary()
666 core, cpu); in cps_boot_secondary()
673 panic("Failed to call remote CPU\n"); in cps_boot_secondary()
690 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary()
721 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_finish()
738 unsigned int cpu, core, vpe_id; in cps_shutdown_this_cpu() local
740 cpu = smp_processor_id(); in cps_shutdown_this_cpu()
741 core = cpu_core(&cpu_data[cpu]); in cps_shutdown_this_cpu()
744 vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_shutdown_this_cpu()
784 unsigned cpu = smp_processor_id(); in cps_cpu_disable() local
789 return -EINVAL; in cps_cpu_disable()
792 core_cfg = &cluster_cfg->core_config[cpu_core(&current_cpu_data)]; in cps_cpu_disable()
793 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); in cps_cpu_disable()
795 set_cpu_online(cpu, false); in cps_cpu_disable()
807 unsigned int cpu; in play_dead() local
811 cpu = smp_processor_id(); in play_dead()
814 pr_debug("CPU%d going offline\n", cpu); in play_dead()
819 if (!cpus_are_siblings(cpu, cpu_death_sibling)) in play_dead()
836 panic("Failed to offline CPU %u", cpu); in play_dead()
841 unsigned cpu = (unsigned long)ptr_cpu; in wait_for_sibling_halt() local
842 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); in wait_for_sibling_halt()
854 static void cps_cpu_die(unsigned int cpu) { } in cps_cpu_die() argument
856 static void cps_cleanup_dead_cpu(unsigned cpu) in cps_cleanup_dead_cpu() argument
858 unsigned int cluster = cpu_cluster(&cpu_data[cpu]); in cps_cleanup_dead_cpu() local
859 unsigned core = cpu_core(&cpu_data[cpu]); in cps_cleanup_dead_cpu()
860 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_cleanup_dead_cpu()
866 cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; in cps_cleanup_dead_cpu()
869 * Now wait for the CPU to actually offline. Without doing this that in cps_cleanup_dead_cpu()
872 * - Onlining the CPU again. in cps_cleanup_dead_cpu()
873 * - Powering down the core if another VPE within it is offlined. in cps_cleanup_dead_cpu()
874 * - A sibling VPE entering a non-coherent state. in cps_cleanup_dead_cpu()
876 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing in cps_cleanup_dead_cpu()
912 "CPU%u hasn't powered down, seq. state %u\n", in cps_cleanup_dead_cpu()
913 cpu, stat)) in cps_cleanup_dead_cpu()
918 bitmap_clear(cluster_cfg->core_power, core, 1); in cps_cleanup_dead_cpu()
921 * Have a CPU with access to the offlined CPUs registers wait in cps_cleanup_dead_cpu()
926 (void *)(unsigned long)cpu, 1); in cps_cleanup_dead_cpu()
928 panic("Failed to call remote sibling CPU\n"); in cps_cleanup_dead_cpu()
968 return -ENODEV; in register_cps_smp_ops()
971 /* check we have a GIC - we need one for IPIs */ in register_cps_smp_ops()
974 return -ENODEV; in register_cps_smp_ops()