Lines Matching +full:1 +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0
3 * Arch specific cpu topology information
12 #include <linux/cpu.h>
31 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
66 int cpu; in topology_set_scale_freq_source() local
77 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
78 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
81 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { in topology_set_scale_freq_source()
82 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
83 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
97 int cpu; in topology_clear_scale_freq_source() local
101 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
102 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
104 if (sfd && sfd->source == source) { in topology_clear_scale_freq_source()
105 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
106 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
114 * use-after-free races. in topology_clear_scale_freq_source()
127 sfd->set_freq_scale(); in topology_scale_freq_tick()
159 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
161 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
167 * topology_update_hw_pressure() - Update HW pressure for CPUs
173 * operating on stale data when hot-plug is used for some CPUs. The
185 int cpu; in topology_update_hw_pressure() local
187 cpu = cpumask_first(cpus); in topology_update_hw_pressure()
188 max_capacity = arch_scale_cpu_capacity(cpu); in topology_update_hw_pressure()
189 max_freq = arch_scale_freq_ref(cpu); in topology_update_hw_pressure()
200 pressure = max_capacity - capacity; in topology_update_hw_pressure()
202 trace_hw_pressure_update(cpu, pressure); in topology_update_hw_pressure()
204 for_each_cpu(cpu, cpus) in topology_update_hw_pressure()
205 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure()
213 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
223 static int cpu_capacity_sysctl_add(unsigned int cpu) in cpu_capacity_sysctl_add() argument
225 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_add()
228 return -ENOENT; in cpu_capacity_sysctl_add()
235 static int cpu_capacity_sysctl_remove(unsigned int cpu) in cpu_capacity_sysctl_remove() argument
237 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_remove()
240 return -ENOENT; in cpu_capacity_sysctl_remove()
249 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", in register_cpu_capacity_sysctl()
269 update_topology = 1; in update_topology_flags_workfn()
289 int cpu; in topology_normalize_cpu_scale() local
294 capacity_scale = 1; in topology_normalize_cpu_scale()
295 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
296 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); in topology_normalize_cpu_scale()
301 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
302 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); in topology_normalize_cpu_scale()
305 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
306 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_normalize_cpu_scale()
307 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
311 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
321 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", in topology_parse_cpu_capacity()
333 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
335 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
338 * Update capacity_freq_ref for calculating early boot CPU capacities. in topology_parse_cpu_capacity()
339 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
345 per_cpu(capacity_freq_ref, cpu) = in topology_parse_cpu_capacity()
362 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) in freq_inv_set_max_ratio() argument
373 int cpu; in topology_init_cpu_capacity_cppc() local
383 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
384 if (!cppc_get_perf_caps(cpu, &perf_caps) && in topology_init_cpu_capacity_cppc()
387 raw_capacity[cpu] = perf_caps.highest_perf; in topology_init_cpu_capacity_cppc()
388 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
390 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
392 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", in topology_init_cpu_capacity_cppc()
393 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
397 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); in topology_init_cpu_capacity_cppc()
402 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
403 freq_inv_set_max_ratio(cpu, in topology_init_cpu_capacity_cppc()
404 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in topology_init_cpu_capacity_cppc()
406 capacity = raw_capacity[cpu]; in topology_init_cpu_capacity_cppc()
409 topology_set_cpu_scale(cpu, capacity); in topology_init_cpu_capacity_cppc()
410 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_init_cpu_capacity_cppc()
411 cpu, topology_get_cpu_scale(cpu)); in topology_init_cpu_capacity_cppc()
437 int cpu; in init_cpu_capacity_callback() local
442 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", in init_cpu_capacity_callback()
443 cpumask_pr_args(policy->related_cpus), in init_cpu_capacity_callback()
446 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); in init_cpu_capacity_callback()
448 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
449 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback()
450 freq_inv_set_max_ratio(cpu, in init_cpu_capacity_callback()
451 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in init_cpu_capacity_callback()
476 * On ACPI-based systems skip registering cpufreq notifier as cpufreq in register_cpufreq_notifier()
477 * information is not needed for cpu capacity initialization. in register_cpufreq_notifier()
480 return -EINVAL; in register_cpufreq_notifier()
483 return -ENOMEM; in register_cpufreq_notifier()
510 * This function returns the logic cpu number of the node.
512 * (1) logic cpu number which is > 0.
513 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
514 * there is no possible logical CPU in the kernel to match. This happens
516 * CPU nodes in DT. We need to just ignore this case.
517 * (3) -1 if the node does not exist in the device tree
521 int cpu; in get_cpu_for_node() local
523 of_parse_phandle(node, "cpu", 0); in get_cpu_for_node()
526 return -1; in get_cpu_for_node()
528 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
529 if (cpu >= 0) in get_cpu_for_node()
530 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
532 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", in get_cpu_for_node()
535 return cpu; in get_cpu_for_node()
544 int cpu; in parse_core() local
555 cpu = get_cpu_for_node(t); in parse_core()
556 if (cpu >= 0) { in parse_core()
557 cpu_topology[cpu].package_id = package_id; in parse_core()
558 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
559 cpu_topology[cpu].core_id = core_id; in parse_core()
560 cpu_topology[cpu].thread_id = i; in parse_core()
561 } else if (cpu != -ENODEV) { in parse_core()
562 pr_err("%pOF: Can't get CPU for thread\n", t); in parse_core()
563 return -EINVAL; in parse_core()
566 } while (1); in parse_core()
568 cpu = get_cpu_for_node(core); in parse_core()
569 if (cpu >= 0) { in parse_core()
571 pr_err("%pOF: Core has both threads and CPU\n", in parse_core()
573 return -EINVAL; in parse_core()
576 cpu_topology[cpu].package_id = package_id; in parse_core()
577 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
578 cpu_topology[cpu].core_id = core_id; in parse_core()
579 } else if (leaf && cpu != -ENODEV) { in parse_core()
580 pr_err("%pOF: Can't get CPU for leaf core\n", core); in parse_core()
581 return -EINVAL; in parse_core()
611 ret = parse_cluster(c, package_id, i, depth + 1); in parse_cluster()
617 } while (1); in parse_cluster()
632 pr_err("%pOF: cpu-map children should be clusters\n", c); in parse_cluster()
633 return -EINVAL; in parse_cluster()
641 pr_err("%pOF: Non-leaf cluster with core %s\n", in parse_cluster()
643 return -EINVAL; in parse_cluster()
647 } while (1); in parse_cluster()
670 ret = parse_cluster(c, package_id, -1, 0); in parse_socket()
675 } while (1); in parse_socket()
678 ret = parse_cluster(socket, 0, -1, 0); in parse_socket()
686 int cpu; in parse_dt_topology() local
691 pr_err("No CPU information found in DT\n"); in parse_dt_topology()
696 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
700 of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
715 for_each_possible_cpu(cpu) in parse_dt_topology()
716 if (cpu_topology[cpu].package_id < 0) { in parse_dt_topology()
717 return -EINVAL; in parse_dt_topology()
725 * cpu topology table
730 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
732 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
735 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
737 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
740 if (last_level_cache_is_valid(cpu)) { in cpu_coregroup_mask()
741 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
742 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
746 * For systems with no shared cpu-side LLC but with clusters defined, in cpu_coregroup_mask()
751 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) in cpu_coregroup_mask()
752 core_mask = &cpu_topology[cpu].cluster_sibling; in cpu_coregroup_mask()
757 const struct cpumask *cpu_clustergroup_mask(int cpu) in cpu_clustergroup_mask() argument
763 if (cpumask_subset(cpu_coregroup_mask(cpu), in cpu_clustergroup_mask()
764 &cpu_topology[cpu].cluster_sibling)) in cpu_clustergroup_mask()
765 return topology_sibling_cpumask(cpu); in cpu_clustergroup_mask()
767 return &cpu_topology[cpu].cluster_sibling; in cpu_clustergroup_mask()
773 int cpu, ret; in update_siblings_masks() local
776 if (ret && ret != -ENOENT) in update_siblings_masks()
780 for_each_online_cpu(cpu) { in update_siblings_masks()
781 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
783 if (last_level_cache_is_shared(cpu, cpuid)) { in update_siblings_masks()
784 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
785 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); in update_siblings_masks()
788 if (cpuid_topo->package_id != cpu_topo->package_id) in update_siblings_masks()
791 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); in update_siblings_masks()
792 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
794 if (cpuid_topo->cluster_id != cpu_topo->cluster_id) in update_siblings_masks()
797 if (cpuid_topo->cluster_id >= 0) { in update_siblings_masks()
798 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
799 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); in update_siblings_masks()
802 if (cpuid_topo->core_id != cpu_topo->core_id) in update_siblings_masks()
805 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); in update_siblings_masks()
806 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
810 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
812 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
814 cpumask_clear(&cpu_topo->llc_sibling); in clear_cpu_topology()
815 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
817 cpumask_clear(&cpu_topo->cluster_sibling); in clear_cpu_topology()
818 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
820 cpumask_clear(&cpu_topo->core_sibling); in clear_cpu_topology()
821 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
822 cpumask_clear(&cpu_topo->thread_sibling); in clear_cpu_topology()
823 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
828 unsigned int cpu; in reset_cpu_topology() local
830 for_each_possible_cpu(cpu) { in reset_cpu_topology()
831 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
833 cpu_topo->thread_id = -1; in reset_cpu_topology()
834 cpu_topo->core_id = -1; in reset_cpu_topology()
835 cpu_topo->cluster_id = -1; in reset_cpu_topology()
836 cpu_topo->package_id = -1; in reset_cpu_topology()
838 clear_cpu_topology(cpu); in reset_cpu_topology()
842 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
846 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
847 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
848 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
849 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
850 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
851 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
852 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
853 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
855 clear_cpu_topology(cpu); in remove_cpu_topology()
866 int cpu, ret; in init_cpu_topology() local
877 * arch-specific early cache level detection a chance to run. in init_cpu_topology()
882 for_each_possible_cpu(cpu) { in init_cpu_topology()
883 ret = fetch_cache_info(cpu); in init_cpu_topology()
886 else if (ret != -ENOENT) in init_cpu_topology()
896 if (cpuid_topo->package_id != -1) in store_cpu_topology()
899 cpuid_topo->thread_id = -1; in store_cpu_topology()
900 cpuid_topo->core_id = cpuid; in store_cpu_topology()
901 cpuid_topo->package_id = cpu_to_node(cpuid); in store_cpu_topology()
903 pr_debug("CPU%u: package %d core %d thread %d\n", in store_cpu_topology()
904 cpuid, cpuid_topo->package_id, cpuid_topo->core_id, in store_cpu_topology()
905 cpuid_topo->thread_id); in store_cpu_topology()