| /linux/arch/arm/common/ |
| H A D | mcpm_entry.c | 34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 63 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) in __mcpm_outbound_leave_critical() argument 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument [all …]
|
| H A D | mcpm_head.S | 56 ubfx r10, r0, #8, #8 @ r10 = cluster 88 mla r8, r0, r10, r8 @ r8 = sync cluster base 96 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN 100 mla r11, r0, r10, r11 @ r11 = cluster first man lock 106 bne mcpm_setup_wait @ wait for cluster setup if so 109 cmp r0, #CLUSTER_UP @ cluster already up? 110 bne mcpm_setup @ if not, set up the cluster 120 @ Signal that the cluster is being brought up: 125 @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this 128 @ Wait for any previously-pending cluster teardown operations to abort [all …]
|
| H A D | bL_switcher_dummy_if.c | 22 unsigned int cpu, cluster; in bL_switcher_write() local 40 cluster = val[2] - '0'; in bL_switcher_write() 41 ret = bL_switch_request(cpu, cluster); in bL_switcher_write()
|
| /linux/arch/arm/mach-sunxi/ |
| H A D | mc_smp.c | 87 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument 90 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15() 105 __func__, cluster, core); in sunxi_core_is_cortex_a15() 115 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument 121 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 125 cluster, cpu); in sunxi_cpu_power_switch_set() 129 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 131 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 133 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 135 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() [all …]
|
| /linux/arch/arm/mach-versatile/ |
| H A D | tc2_pm.c | 46 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument 48 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup() 49 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup() 51 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup() 53 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup() 57 static int tc2_pm_cluster_powerup(unsigned int cluster) in tc2_pm_cluster_powerup() argument 59 pr_debug("%s: cluster %u\n", __func__, cluster); in tc2_pm_cluster_powerup() 60 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup() 62 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup() 66 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument [all …]
|
| H A D | spc.h | 13 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); 14 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); 15 void ve_spc_powerdown(u32 cluster, bool enable); 16 int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
|
| /linux/fs/ocfs2/cluster/ |
| H A D | nodemanager.c | 50 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_configured_node_map() local 52 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); in o2nm_configured_node_map() 54 if (cluster == NULL) in o2nm_configured_node_map() 57 read_lock(&cluster->cl_nodes_lock); in o2nm_configured_node_map() 58 bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES); in o2nm_configured_node_map() 59 read_unlock(&cluster->cl_nodes_lock); in o2nm_configured_node_map() 65 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, in o2nm_node_ip_tree_lookup() argument 70 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; in o2nm_node_ip_tree_lookup() 103 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_get_node_by_ip() local 105 if (cluster == NULL) in o2nm_get_node_by_ip() [all …]
|
| /linux/arch/arm/mach-hisi/ |
| H A D | platmcpm.c | 71 static bool hip04_cluster_is_down(unsigned int cluster) in hip04_cluster_is_down() argument 76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down() 81 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) in hip04_set_snoop_filter() argument 89 data |= 1 << cluster; in hip04_set_snoop_filter() 91 data &= ~(1 << cluster); in hip04_set_snoop_filter() 100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local 106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in hip04_boot_secondary() 110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary() 115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary() 118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); in hip04_boot_secondary() [all …]
|
| /linux/arch/arm/mach-exynos/ |
| H A D | mcpm-exynos.c | 57 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) in exynos_cpu_powerup() argument 59 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); in exynos_cpu_powerup() 62 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in exynos_cpu_powerup() 64 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup() 76 if (cluster && in exynos_cpu_powerup() 77 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { in exynos_cpu_powerup() 93 cpu, cluster); in exynos_cpu_powerup() 106 static int exynos_cluster_powerup(unsigned int cluster) in exynos_cluster_powerup() argument 108 pr_debug("%s: cluster %u\n", __func__, cluster); in exynos_cluster_powerup() 109 if (cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cluster_powerup() [all …]
|
| H A D | platsmp.c | 139 void exynos_cluster_power_down(int cluster) in exynos_cluster_power_down() argument 141 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_down() 148 void exynos_cluster_power_up(int cluster) in exynos_cluster_power_up() argument 151 EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_up() 159 int exynos_cluster_power_state(int cluster) in exynos_cluster_power_state() argument 161 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & in exynos_cluster_power_state()
|
| /linux/drivers/remoteproc/ |
| H A D | mtk_scp.c | 71 struct mtk_scp_of_cluster *scp_cluster = scp->cluster; in scp_wdt_handler() 176 val = readl(scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_assert() 178 writel(val, scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_assert() 185 val = readl(scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_deassert() 187 writel(val, scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_deassert() 192 writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET); in mt8192_scp_reset_assert() 197 writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR); in mt8192_scp_reset_deassert() 202 writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET); in mt8195_scp_c1_reset_assert() 207 writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR); in mt8195_scp_c1_reset_deassert() 214 scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST); in mt8183_scp_irq_handler() [all …]
|
| H A D | xlnx_r5_remoteproc.c | 1034 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) in zynqmp_r5_get_tcm_node_from_dt() argument 1046 for (i = 0; i < cluster->core_count; i++) { in zynqmp_r5_get_tcm_node_from_dt() 1047 r5_core = cluster->r5_cores[i]; in zynqmp_r5_get_tcm_node_from_dt() 1141 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster) in zynqmp_r5_get_tcm_node() argument 1144 struct device *dev = cluster->dev; in zynqmp_r5_get_tcm_node() 1149 if (cluster->mode == SPLIT_MODE) { in zynqmp_r5_get_tcm_node() 1158 tcm_bank_count = tcm_bank_count / cluster->core_count; in zynqmp_r5_get_tcm_node() 1166 for (i = 0; i < cluster->core_count; i++) { in zynqmp_r5_get_tcm_node() 1167 r5_core = cluster->r5_cores[i]; in zynqmp_r5_get_tcm_node() 1201 static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, in zynqmp_r5_core_init() argument [all …]
|
| /linux/arch/arm/include/asm/ |
| H A D | mcpm.h | 44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, 84 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); 132 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); 219 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster); 220 int (*cluster_powerup)(unsigned int cluster); 221 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster); 222 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster); 223 void (*cluster_powerdown_prepare)(unsigned int cluster); 226 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); [all …]
|
| /linux/Documentation/arch/arm/ |
| H A D | cluster-pm-race-avoidance.rst | 6 cluster setup and teardown operations and to manage hardware coherency 29 cluster-level operations are only performed when it is truly safe to do 34 are not immediately enabled when a cluster powers up. Since enabling or 38 power-down and power-up at the cluster level. 48 Each cluster and CPU is assigned a state, as follows: 67 The CPU or cluster is not coherent, and is either powered off or 71 The CPU or cluster has committed to moving to the UP state. 76 The CPU or cluster is active and coherent at the hardware 81 The CPU or cluster has committed to moving to the DOWN 89 Each cluster is also assigned a state, but it is necessary to split the [all …]
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-ocfs2 | 14 covers how ocfs2 uses distributed locking between cluster 18 cluster nodes can interoperate if they have an identical 34 the available plugins to support ocfs2 cluster operation. 35 A cluster plugin is required to use ocfs2 in a cluster. 38 * 'o2cb' - The classic o2cb cluster stack that ocfs2 has 40 * 'user' - A plugin supporting userspace cluster software 54 cluster plugin is currently in use by the filesystem. 62 the cluster stack in use. The contents may change 63 when all filesystems are unmounted and the cluster stack 71 of current ocfs2 cluster stack. This value is set by [all …]
|
| /linux/arch/mips/kernel/ |
| H A D | smp-cps.c | 44 static void power_up_other_cluster(unsigned int cluster) in power_up_other_cluster() argument 49 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, in power_up_other_cluster() 60 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in power_up_other_cluster() 67 mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, in power_up_other_cluster() 81 cluster, stat); in power_up_other_cluster() 89 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) in core_vpe_count() argument 91 return min(smp_max_threads, mips_cps_numvps(cluster, core)); in core_vpe_count() 471 static void boot_core(unsigned int cluster, unsigned int core, in boot_core() argument 478 cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; in boot_core() 479 ncores = mips_cps_numcores(cluster); in boot_core() [all …]
|
| /linux/fs/fat/ |
| H A D | cache.c | 220 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) in fat_get_cluster() argument 239 if (cluster == 0) in fat_get_cluster() 242 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { in fat_get_cluster() 251 while (*fclus < cluster) { in fat_get_cluster() 286 static int fat_bmap_cluster(struct inode *inode, int cluster) in fat_bmap_cluster() argument 294 ret = fat_get_cluster(inode, cluster, &fclus, &dclus); in fat_bmap_cluster() 311 int cluster, offset; in fat_get_mapped_cluster() local 313 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_get_mapped_cluster() 315 cluster = fat_bmap_cluster(inode, cluster); in fat_get_mapped_cluster() 316 if (cluster < 0) in fat_get_mapped_cluster() [all …]
|
| /linux/arch/arm64/boot/dts/apple/ |
| H A D | t600x-dieX.dtsi | 10 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq"; 16 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq"; 22 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
|
| /linux/arch/arm/mach-milbeaut/ |
| H A D | platsmp.c | 25 unsigned int mpidr, cpu, cluster; in m10v_boot_secondary() local 32 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_boot_secondary() 38 __func__, cpu, l_cpu, cluster); in m10v_boot_secondary() 48 unsigned int mpidr, cpu, cluster; in m10v_smp_init() local 61 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_smp_init() 62 pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); in m10v_smp_init()
|
| /linux/arch/x86/kernel/apic/ |
| H A D | x2apic_cluster.c | 105 static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster) in prefill_clustermask() argument 113 if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster) in prefill_clustermask() 124 static int alloc_clustermask(unsigned int cpu, u32 cluster, int node) in alloc_clustermask() argument 149 if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) { in alloc_clustermask() 171 prefill_clustermask(cmsk, cpu, cluster); in alloc_clustermask() 179 u32 cluster = apic_cluster(phys_apicid); in x2apic_prepare_cpu() local 180 u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf)); in x2apic_prepare_cpu() 185 if (alloc_clustermask(cpu, cluster, node) < 0) in x2apic_prepare_cpu()
|
| /linux/Documentation/mm/ |
| H A D | swap-table.rst | 9 Swap table implements swap cache as a per-cluster swap cache value array. 45 in a swap in or swap out path. We should already have the swap cluster, 48 If we have a per-cluster array to store swap cache value in the cluster. 49 Swap cache lookup within the cluster can be a very simple array lookup. 51 We give such a per-cluster swap cache value array a name: the swap table. 54 PTE. The size of a swap table for one swap cluster typically matches a PTE 63 Swap table modification requires taking the cluster lock. If a folio 65 locked prior to the cluster lock. After adding or removing is done, the
|
| /linux/arch/alpha/kernel/ |
| H A D | setup.c | 241 for ((_cluster) = (memdesc)->cluster, (i) = 0; \ 288 struct memclust_struct * cluster; in setup_memory() local 297 for_each_mem_cluster(memdesc, cluster, i) { in setup_memory() 301 i, cluster->usage, cluster->start_pfn, in setup_memory() 302 cluster->start_pfn + cluster->numpages); in setup_memory() 304 end = cluster->start_pfn + cluster->numpages; in setup_memory() 308 memblock_add(PFN_PHYS(cluster->start_pfn), in setup_memory() 309 cluster->numpages << PAGE_SHIFT); in setup_memory() 314 if (cluster->usage & 3) in setup_memory() 315 memblock_reserve(PFN_PHYS(cluster->start_pfn), in setup_memory() [all …]
|
| /linux/fs/gfs2/ |
| H A D | Kconfig | 10 A cluster filesystem. 12 Allows a cluster of computers to simultaneously use a block device 18 machine show up immediately on all other machines in the cluster. 20 To use the GFS2 filesystem in a cluster, you will need to enable 22 be found here: http://sources.redhat.com/cluster 36 in a cluster environment.
|
| /linux/tools/perf/util/ |
| H A D | cpumap.c | 241 else if (a->cluster != b->cluster) in aggr_cpu_id__cmp() 242 return a->cluster - b->cluster; in aggr_cpu_id__cmp() 339 int cluster = cpu__get_cluster_id(cpu); in aggr_cpu_id__cluster() local 343 if (cluster < 0) in aggr_cpu_id__cluster() 344 cluster = 0; in aggr_cpu_id__cluster() 350 id.cluster = cluster; in aggr_cpu_id__cluster() 739 a->cluster == b->cluster && in aggr_cpu_id__equal() 752 a->cluster == -1 && in aggr_cpu_id__is_empty() 766 .cluster = -1, in aggr_cpu_id__empty()
|
| /linux/Documentation/driver-api/md/ |
| H A D | md-cluster.rst | 5 The cluster MD is a shared-device RAID for a cluster, it supports 12 Separate write-intent-bitmaps are used for each cluster node. 45 node joins the cluster, it acquires the lock in PW mode and it stays 46 so during the lifetime the node is part of the cluster. The lock 55 joins the cluster. 128 The DLM LVB is used to communicate within nodes of the cluster. There 145 acknowledged by all nodes in the cluster. The BAST of the resource 216 When a node fails, the DLM informs the cluster with the slot 217 number. The node starts a cluster recovery thread. The cluster 285 There are 17 call-backs which the md core can make to the cluster [all …]
|