Lines Matching defs:cpu
3 * Arch specific cpu topology information
12 #include <linux/cpu.h>
67 int cpu;
78 for_each_cpu(cpu, cpus) {
79 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
83 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
84 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
98 int cpu;
102 for_each_cpu(cpu, cpus) {
103 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
106 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
107 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
160 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
162 per_cpu(cpu_scale, cpu) = capacity;
186 int cpu;
188 cpu = cpumask_first(cpus);
189 max_capacity = arch_scale_cpu_capacity(cpu);
190 max_freq = arch_scale_freq_ref(cpu);
203 trace_hw_pressure_update(cpu, pressure);
205 for_each_cpu(cpu, cpus)
206 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
214 struct cpu *cpu = container_of(dev, struct cpu, dev);
216 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
224 static int cpu_capacity_sysctl_add(unsigned int cpu)
226 struct device *cpu_dev = get_cpu_device(cpu);
236 static int cpu_capacity_sysctl_remove(unsigned int cpu)
238 struct device *cpu_dev = get_cpu_device(cpu);
250 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
290 int cpu;
296 for_each_possible_cpu(cpu) {
297 capacity = raw_capacity[cpu] *
298 (per_cpu(capacity_freq_ref, cpu) ?: 1);
303 for_each_possible_cpu(cpu) {
304 capacity = raw_capacity[cpu] *
305 (per_cpu(capacity_freq_ref, cpu) ?: 1);
308 topology_set_cpu_scale(cpu, capacity);
310 cpu, topology_get_cpu_scale(cpu));
314 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
336 raw_capacity[cpu] = cpu_capacity;
338 cpu_node, raw_capacity[cpu]);
348 per_cpu(capacity_freq_ref, cpu) =
365 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
376 int cpu;
386 for_each_possible_cpu(cpu) {
387 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
390 raw_capacity[cpu] = perf_caps.highest_perf;
391 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
393 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
396 cpu, raw_capacity[cpu]);
400 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
405 for_each_possible_cpu(cpu) {
406 freq_inv_set_max_ratio(cpu,
407 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
409 capacity = raw_capacity[cpu];
412 topology_set_cpu_scale(cpu, capacity);
414 cpu, topology_get_cpu_scale(cpu));
440 int cpu;
445 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
451 for_each_cpu(cpu, policy->related_cpus) {
452 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
453 freq_inv_set_max_ratio(cpu,
454 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
480 * information is not needed for cpu capacity initialization.
517 * This function returns the logic cpu number of the node.
519 * (1) logic cpu number which is > 0.
528 int cpu;
530 of_parse_phandle(node, "cpu", 0);
535 cpu = of_cpu_node_to_id(cpu_node);
536 if (cpu >= 0)
537 topology_parse_cpu_capacity(cpu_node, cpu);
539 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
542 return cpu;
551 int cpu;
562 cpu = get_cpu_for_node(t);
563 if (cpu >= 0) {
564 cpu_topology[cpu].package_id = package_id;
565 cpu_topology[cpu].cluster_id = cluster_id;
566 cpu_topology[cpu].core_id = core_id;
567 cpu_topology[cpu].thread_id = i;
568 } else if (cpu != -ENODEV) {
577 cpu = get_cpu_for_node(core);
578 if (cpu >= 0) {
585 cpu_topology[cpu].package_id = package_id;
586 cpu_topology[cpu].cluster_id = cluster_id;
587 cpu_topology[cpu].core_id = core_id;
588 } else if (leaf && cpu != -ENODEV) {
641 pr_err("%pOF: cpu-map children should be clusters\n", c);
706 int cpu;
716 * When topology is provided cpu-map is essentially a root
720 of_get_child_by_name(cn, "cpu-map");
735 for_each_possible_cpu(cpu)
736 if (cpu_topology[cpu].package_id < 0) {
745 * cpu topology table
750 const struct cpumask *cpu_coregroup_mask(int cpu)
752 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
755 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
757 core_mask = &cpu_topology[cpu].core_sibling;
760 if (last_level_cache_is_valid(cpu)) {
761 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
762 core_mask = &cpu_topology[cpu].llc_sibling;
766 * For systems with no shared cpu-side LLC but with clusters defined,
771 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
772 core_mask = &cpu_topology[cpu].cluster_sibling;
777 const struct cpumask *cpu_clustergroup_mask(int cpu)
783 if (cpumask_subset(cpu_coregroup_mask(cpu),
784 &cpu_topology[cpu].cluster_sibling))
785 return topology_sibling_cpumask(cpu);
787 return &cpu_topology[cpu].cluster_sibling;
793 int cpu, ret;
800 for_each_online_cpu(cpu) {
801 cpu_topo = &cpu_topology[cpu];
803 if (last_level_cache_is_shared(cpu, cpuid)) {
804 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
812 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
818 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
826 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
830 static void clear_cpu_topology(int cpu)
832 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
835 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
838 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
841 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
843 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
848 unsigned int cpu;
850 for_each_possible_cpu(cpu) {
851 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
858 clear_cpu_topology(cpu);
862 void remove_cpu_topology(unsigned int cpu)
866 for_each_cpu(sibling, topology_core_cpumask(cpu))
867 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
868 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
869 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
870 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
871 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
872 for_each_cpu(sibling, topology_llc_cpumask(cpu))
873 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
875 clear_cpu_topology(cpu);
886 int cpu, ret;
902 for_each_possible_cpu(cpu) {
903 ret = fetch_cache_info(cpu);