Home
last modified time | relevance | path

Searched full:cluster (Results 1 – 25 of 656) sorted by relevance

12345678910>>...27

/linux/arch/arm/common/
H A Dmcpm_entry.c3 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
24 * see Documentation/arch/arm/cluster-pm-race-avoidance.rst.
34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument
36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
42 * cluster can be torn down without disrupting this CPU.
47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument
50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
56 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
[all …]
H A Dmcpm_head.S3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
8 * Refer to Documentation/arch/arm/cluster-pm-race-avoidance.rst
28 1903: .asciz " cluster"
56 ubfx r10, r0, #8, #8 @ r10 = cluster
88 mla r8, r0, r10, r8 @ r8 = sync cluster base
96 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
100 mla r11, r0, r10, r11 @ r11 = cluster first man lock
106 bne mcpm_setup_wait @ wait for cluster setup if so
109 cmp r0, #CLUSTER_UP @ cluster already up?
110 bne mcpm_setup @ if not, set up the cluster
[all …]
H A DbL_switcher.c3 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
118 * with the cluster number.
141 * bL_switch_to - Switch to a specific cluster for the current CPU
142 * @new_cluster_id: the ID of the cluster to switch to.
273 int cluster; in bL_switcher_thread() local
288 cluster = t->wanted_cluster; in bL_switcher_thread()
295 if (cluster != -1) { in bL_switcher_thread()
296 bL_switch_to(cluster); in bL_switcher_thread()
321 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
325 * @new_cluster_id: the ID of the cluster to switch to.
[all …]
/linux/arch/arm/mach-sunxi/
H A Dmc_smp.c11 * Cluster cache enable trampoline code adapted from MCPM framework
87 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
90 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
102 * would be mid way in a core or cluster power sequence. in sunxi_core_is_cortex_a15()
104 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", in sunxi_core_is_cortex_a15()
105 __func__, cluster, core); in sunxi_core_is_cortex_a15()
115 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
121 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
124 pr_debug("power clamp for cluster %u cpu %u already open\n", in sunxi_cpu_power_switch_set()
125 cluster, cpu); in sunxi_cpu_power_switch_set()
[all …]
/linux/arch/arm/mach-versatile/
H A Dtc2_pm.c46 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument
48 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup()
49 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup()
51 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup()
53 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup()
57 static int tc2_pm_cluster_powerup(unsigned int cluster) in tc2_pm_cluster_powerup() argument
59 pr_debug("%s: cluster %u\n", __func__, cluster); in tc2_pm_cluster_powerup()
60 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup()
62 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup()
66 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument
[all …]
H A Dspc.c50 /* SPC CPU/cluster reset statue */
71 /* TC2 static dual-cluster configuration */
97 * A15s cluster identifier
111 static inline bool cluster_is_a15(u32 cluster) in cluster_is_a15() argument
113 return cluster == info->a15_clusid; in cluster_is_a15()
142 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
150 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) in ve_spc_cpu_wakeup_irq() argument
154 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq()
159 if (!cluster_is_a15(cluster)) in ve_spc_cpu_wakeup_irq()
175 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
[all …]
/linux/Documentation/arch/arm/
H A Dcluster-pm-race-avoidance.rst2 Cluster-wide Power-up/power-down race avoidance algorithm
6 cluster setup and teardown operations and to manage hardware coherency
29 cluster-level operations are only performed when it is truly safe to do
34 are not immediately enabled when a cluster powers up. Since enabling or
38 power-down and power-up at the cluster level.
48 Each cluster and CPU is assigned a state, as follows:
67 The CPU or cluster is not coherent, and is either powered off or
71 The CPU or cluster has committed to moving to the UP state.
76 The CPU or cluster is active and coherent at the hardware
81 The CPU or cluster has committed to moving to the DOWN
[all …]
/linux/fs/ocfs2/cluster/
H A Dnodemanager.c18 * cluster active at a time. Changing this will require trickling
19 * cluster references throughout where nodes are looked up */
49 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_configured_node_map() local
51 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); in o2nm_configured_node_map()
53 if (cluster == NULL) in o2nm_configured_node_map()
56 read_lock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
57 bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES); in o2nm_configured_node_map()
58 read_unlock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
64 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, in o2nm_node_ip_tree_lookup() argument
69 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; in o2nm_node_ip_tree_lookup()
[all …]
/linux/arch/arm/mach-exynos/
H A Dmcpm-exynos.c57 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) in exynos_cpu_powerup() argument
59 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); in exynos_cpu_powerup()
62 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in exynos_cpu_powerup()
64 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup()
71 * This assumes the cluster number of the big cores(Cortex A15) in exynos_cpu_powerup()
76 if (cluster && in exynos_cpu_powerup()
77 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { in exynos_cpu_powerup()
92 pr_err("cpu %u cluster %u powerup failed\n", in exynos_cpu_powerup()
93 cpu, cluster); in exynos_cpu_powerup()
106 static int exynos_cluster_powerup(unsigned int cluster) in exynos_cluster_powerup() argument
[all …]
H A Dplatsmp.c136 * exynos_cluster_power_down() - power down the specified cluster
137 * @cluster: the cluster to power down
139 void exynos_cluster_power_down(int cluster) in exynos_cluster_power_down() argument
141 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_down()
145 * exynos_cluster_power_up() - power up the specified cluster
146 * @cluster: the cluster to power up
148 void exynos_cluster_power_up(int cluster) in exynos_cluster_power_up() argument
151 EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_up()
155 * exynos_cluster_power_state() - returns the power state of the cluster
156 * @cluster: the cluster to retrieve the power state from
[all …]
/linux/arch/arm/include/asm/
H A Dmcpm.h13 * Maximum number of possible clusters / CPUs per cluster.
39 * This is used to indicate where the given CPU from given cluster should
44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
55 * CPU/cluster power operations API for higher subsystems to use.
66 * mcpm_cpu_power_up - make given CPU in given cluster runable
68 * @cpu: CPU number within given cluster
69 * @cluster: cluster number for the CPU
71 * The identified CPU is brought out of reset. If the cluster was powered
73 * in the cluster run, and ensuring appropriate cluster setup.
[all …]
/linux/arch/mips/include/asm/
H A Dmips-cps.h124 * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster
125 * @cluster: the ID of the cluster whose config we want
127 * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster.
131 static inline uint64_t mips_cps_cluster_config(unsigned int cluster) in mips_cps_cluster_config() argument
139 * within this cluster. in mips_cps_cluster_config()
141 WARN_ON(cluster != 0); in mips_cps_cluster_config()
149 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in mips_cps_cluster_config()
158 * mips_cps_numcores - return the number of cores present in a cluster
159 * @cluster: the ID of the cluster whose core count we want
164 static inline unsigned int mips_cps_numcores(unsigned int cluster) in mips_cps_numcores() argument
[all …]
/linux/arch/arm/mach-hisi/
H A Dplatmcpm.c71 static bool hip04_cluster_is_down(unsigned int cluster) in hip04_cluster_is_down() argument
76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down()
81 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) in hip04_set_snoop_filter() argument
89 data |= 1 << cluster; in hip04_set_snoop_filter()
91 data &= ~(1 << cluster); in hip04_set_snoop_filter()
100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local
106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in hip04_boot_secondary()
110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary()
115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary()
118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); in hip04_boot_secondary()
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-ocfs214 covers how ocfs2 uses distributed locking between cluster
18 cluster nodes can interoperate if they have an identical
34 the available plugins to support ocfs2 cluster operation.
35 A cluster plugin is required to use ocfs2 in a cluster.
38 * 'o2cb' - The classic o2cb cluster stack that ocfs2 has
40 * 'user' - A plugin supporting userspace cluster software
54 cluster plugin is currently in use by the filesystem.
62 the cluster stack in use. The contents may change
63 when all filesystems are unmounted and the cluster stack
71 of current ocfs2 cluster stack. This value is set by
[all …]
/linux/arch/x86/kernel/apic/
H A Dx2apic_cluster.c56 /* Collapse cpus in a cluster so a single IPI per cluster is sent */ in __x2apic_send_IPI_mask()
68 /* Remove cluster CPUs from tmpmask */ in __x2apic_send_IPI_mask()
105 static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster) in prefill_clustermask() argument
113 if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster) in prefill_clustermask()
124 static int alloc_clustermask(unsigned int cpu, u32 cluster, int node) in alloc_clustermask() argument
130 * At boot time, the CPU present mask is stable. The cluster mask is in alloc_clustermask()
131 * allocated for the first CPU in the cluster and propagated to all in alloc_clustermask()
132 * present siblings in the cluster. If the cluster mask is already set in alloc_clustermask()
144 * any more) to find any existing cluster mask. in alloc_clustermask()
149 if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) { in alloc_clustermask()
[all …]
/linux/Documentation/devicetree/bindings/cpufreq/
H A Dapple,cluster-cpufreq.yaml4 $id: http://devicetree.org/schemas/cpufreq/apple,cluster-cpufreq.yaml#
7 title: Apple SoC cluster cpufreq device
13 Apple SoCs (e.g. M1) have a per-cpu-cluster DVFS controller that is part of
14 the cluster management register block. This binding uses the standard
23 - apple,t8103-cluster-cpufreq
24 - apple,t8112-cluster-cpufreq
25 - const: apple,cluster-cpufreq
27 - const: apple,t6000-cluster-cpufreq
28 - const: apple,t8103-cluster-cpufreq
29 - const: apple,cluster-cpufreq
[all …]
/linux/drivers/clk/mvebu/
H A Dap-cpu-clk.c127 * struct ap806_clk: CPU cluster clock controller instance
128 * @cluster: Cluster clock controller index
129 * @clk_name: Cluster clock controller name
130 * @dev : Cluster clock device
131 * @hw: HW specific structure of Cluster clock controller
135 unsigned int cluster; member
151 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_recalc_rate()
167 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
169 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
171 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
[all …]
/linux/fs/fat/
H A Dcache.c7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
21 int fcluster; /* cluster number in the file. */
22 int dcluster; /* cluster number on disk. */
123 /* Find the same part as "new" in cluster-chain. */ in fat_cache_merge()
225 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) in fat_get_cluster() argument
240 "%s: invalid start cluster (i_pos %lld, start %08x)", in fat_get_cluster()
244 if (cluster == 0) in fat_get_cluster()
247 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { in fat_get_cluster()
256 while (*fclus < cluster) { in fat_get_cluster()
257 /* prevent the infinite loop of cluster chain */ in fat_get_cluster()
[all …]
/linux/drivers/cpuidle/
H A Dcpuidle-big_little.c32 * cluster state since, when all CPUs in a cluster hit it, the cluster
35 * There is no notion of cluster states in the menu governor, so CPUs
36 * have to define CPU states where possibly the cluster will be shutdown
38 * at random times; however the cluster state provides target_residency
39 * values as if all CPUs in a cluster enter the state at once; this is
46 * current cluster operating point. It is the time it takes to get the CPU
47 * up and running when the CPU is powered up on cluster wake-up from shutdown.
51 * target_residency: it is the minimum amount of time the cluster has
52 * to be down to break even in terms of power consumption. cluster
70 .desc = "ARM little-cluster power down",
[all …]
/linux/tools/perf/pmu-events/arch/s390/cf_z14/
H A Dextended.json132 "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
133 …1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache wit…
139 "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
140 … Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
146 "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
147 …ata cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache wit…
153 "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
154 …ta cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache wit…
160 "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
161 …he Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
[all …]
/linux/drivers/remoteproc/
H A Dxlnx_r5_remoteproc.c33 * settings for RPU cluster mode which
34 * reflects possible values of xlnx,cluster-mode dt-property
122 /* In lockstep mode cluster uses each 64KB TCM from second core as well */
162 * @dev: r5f subsystem cluster device node
163 * @mode: cluster mode of type zynqmp_r5_cluster_mode
164 * @core_count: number of r5 cores used for this cluster mode
1015 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) in zynqmp_r5_get_tcm_node_from_dt() argument
1027 for (i = 0; i < cluster->core_count; i++) { in zynqmp_r5_get_tcm_node_from_dt()
1028 r5_core = cluster->r5_cores[i]; in zynqmp_r5_get_tcm_node_from_dt()
1118 * @cluster: pointer to zynqmp_r5_cluster type object
[all …]
/linux/tools/perf/pmu-events/arch/s390/cf_z15/
H A Dextended.json132 "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
133 …1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache wit…
139 "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
140 … Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
146 "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
147 …ata cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache wit…
153 "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
154 …ta cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache wit…
160 "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
161 …he Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
[all …]
/linux/fs/ext4/
H A Dextents_status.h88 * Pending cluster reservations for bigalloc file systems
90 * A cluster with a pending reservation is a logical cluster shared by at
93 * reservation is said to be pending because a cluster reservation would
94 * have to be taken in the event all blocks in the cluster shared with
98 * The set of pending cluster reservations is an auxiliary data structure
99 * used with the extents status tree to implement reserved cluster/block
101 * records all pending cluster reservations.
106 * reserved cluster count if it results in the removal of all delayed
107 * and unwritten extents (blocks) from a cluster that is not shared with a
109 * whether the cluster is shared can be done by searching for a pending
[all …]
/linux/Documentation/driver-api/md/
H A Dmd-cluster.rst2 MD Cluster
5 The cluster MD is a shared-device RAID for a cluster, it supports
12 Separate write-intent-bitmaps are used for each cluster node.
45 node joins the cluster, it acquires the lock in PW mode and it stays
46 so during the lifetime the node is part of the cluster. The lock
55 joins the cluster.
128 The DLM LVB is used to communicate within nodes of the cluster. There
145 acknowledged by all nodes in the cluster. The BAST of the resource
216 When a node fails, the DLM informs the cluster with the slot
217 number. The node starts a cluster recovery thread. The cluster
[all …]
/linux/tools/perf/pmu-events/arch/x86/icelakex/
H A Dother.json113 …t, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are con…
123 … on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
153 …t, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are con…
163 …et, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are con…
203 … on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
213 … on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
243 …t, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are con…
253 …et, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are con…
283 … on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
293 … on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
[all …]

12345678910>>...27