xref: /linux/drivers/base/arch_topology.c (revision c89756bcf406af313d191cfe3709e7c175c5b0cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arch specific cpu topology information
4  *
5  * Copyright (C) 2016, ARM Ltd.
6  * Written by: Juri Lelli, ARM Ltd.
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cleanup.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/cpu_smt.h>
15 #include <linux/device.h>
16 #include <linux/of.h>
17 #include <linux/slab.h>
18 #include <linux/sched/topology.h>
19 #include <linux/cpuset.h>
20 #include <linux/cpumask.h>
21 #include <linux/init.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched.h>
24 #include <linux/units.h>
25 
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/hw_pressure.h>
28 
29 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
30 static struct cpumask scale_freq_counters_mask;
31 static bool scale_freq_invariant;
32 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
33 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
34 
supports_scale_freq_counters(const struct cpumask * cpus)35 static bool supports_scale_freq_counters(const struct cpumask *cpus)
36 {
37 	return cpumask_subset(cpus, &scale_freq_counters_mask);
38 }
39 
topology_scale_freq_invariant(void)40 bool topology_scale_freq_invariant(void)
41 {
42 	return cpufreq_supports_freq_invariance() ||
43 	       supports_scale_freq_counters(cpu_online_mask);
44 }
45 
update_scale_freq_invariant(bool status)46 static void update_scale_freq_invariant(bool status)
47 {
48 	if (scale_freq_invariant == status)
49 		return;
50 
51 	/*
52 	 * Task scheduler behavior depends on frequency invariance support,
53 	 * either cpufreq or counter driven. If the support status changes as
54 	 * a result of counter initialisation and use, retrigger the build of
55 	 * scheduling domains to ensure the information is propagated properly.
56 	 */
57 	if (topology_scale_freq_invariant() == status) {
58 		scale_freq_invariant = status;
59 		rebuild_sched_domains_energy();
60 	}
61 }
62 
topology_set_scale_freq_source(struct scale_freq_data * data,const struct cpumask * cpus)63 void topology_set_scale_freq_source(struct scale_freq_data *data,
64 				    const struct cpumask *cpus)
65 {
66 	struct scale_freq_data *sfd;
67 	int cpu;
68 
69 	/*
70 	 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
71 	 * supported by cpufreq.
72 	 */
73 	if (cpumask_empty(&scale_freq_counters_mask))
74 		scale_freq_invariant = topology_scale_freq_invariant();
75 
76 	rcu_read_lock();
77 
78 	for_each_cpu(cpu, cpus) {
79 		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
80 
81 		/* Use ARCH provided counters whenever possible */
82 		if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
83 			rcu_assign_pointer(per_cpu(sft_data, cpu), data);
84 			cpumask_set_cpu(cpu, &scale_freq_counters_mask);
85 		}
86 	}
87 
88 	rcu_read_unlock();
89 
90 	update_scale_freq_invariant(true);
91 }
92 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
93 
topology_clear_scale_freq_source(enum scale_freq_source source,const struct cpumask * cpus)94 void topology_clear_scale_freq_source(enum scale_freq_source source,
95 				      const struct cpumask *cpus)
96 {
97 	struct scale_freq_data *sfd;
98 	int cpu;
99 
100 	rcu_read_lock();
101 
102 	for_each_cpu(cpu, cpus) {
103 		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
104 
105 		if (sfd && sfd->source == source) {
106 			rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
107 			cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
108 		}
109 	}
110 
111 	rcu_read_unlock();
112 
113 	/*
114 	 * Make sure all references to previous sft_data are dropped to avoid
115 	 * use-after-free races.
116 	 */
117 	synchronize_rcu();
118 
119 	update_scale_freq_invariant(false);
120 }
121 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
122 
topology_scale_freq_tick(void)123 void topology_scale_freq_tick(void)
124 {
125 	struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
126 
127 	if (sfd)
128 		sfd->set_freq_scale();
129 }
130 
131 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
132 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
133 
topology_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)134 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
135 			     unsigned long max_freq)
136 {
137 	unsigned long scale;
138 	int i;
139 
140 	if (WARN_ON_ONCE(!cur_freq || !max_freq))
141 		return;
142 
143 	/*
144 	 * If the use of counters for FIE is enabled, just return as we don't
145 	 * want to update the scale factor with information from CPUFREQ.
146 	 * Instead the scale factor will be updated from arch_scale_freq_tick.
147 	 */
148 	if (supports_scale_freq_counters(cpus))
149 		return;
150 
151 	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
152 
153 	for_each_cpu(i, cpus)
154 		per_cpu(arch_freq_scale, i) = scale;
155 }
156 
157 DEFINE_PER_CPU(unsigned long, hw_pressure);
158 
159 /**
160  * topology_update_hw_pressure() - Update HW pressure for CPUs
161  * @cpus        : The related CPUs for which capacity has been reduced
162  * @capped_freq : The maximum allowed frequency that CPUs can run at
163  *
164  * Update the value of HW pressure for all @cpus in the mask. The
165  * cpumask should include all (online+offline) affected CPUs, to avoid
166  * operating on stale data when hot-plug is used for some CPUs. The
167  * @capped_freq reflects the currently allowed max CPUs frequency due to
168  * HW capping. It might be also a boost frequency value, which is bigger
169  * than the internal 'capacity_freq_ref' max frequency. In such case the
170  * pressure value should simply be removed, since this is an indication that
171  * there is no HW throttling. The @capped_freq must be provided in kHz.
172  */
topology_update_hw_pressure(const struct cpumask * cpus,unsigned long capped_freq)173 void topology_update_hw_pressure(const struct cpumask *cpus,
174 				      unsigned long capped_freq)
175 {
176 	unsigned long max_capacity, capacity, pressure;
177 	u32 max_freq;
178 	int cpu;
179 
180 	cpu = cpumask_first(cpus);
181 	max_capacity = arch_scale_cpu_capacity(cpu);
182 	max_freq = arch_scale_freq_ref(cpu);
183 
184 	/*
185 	 * Handle properly the boost frequencies, which should simply clean
186 	 * the HW pressure value.
187 	 */
188 	if (max_freq <= capped_freq)
189 		capacity = max_capacity;
190 	else
191 		capacity = mult_frac(max_capacity, capped_freq, max_freq);
192 
193 	pressure = max_capacity - capacity;
194 
195 	trace_hw_pressure_update(cpu, pressure);
196 
197 	for_each_cpu(cpu, cpus)
198 		WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
199 }
200 EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
201 
202 static void update_topology_flags_workfn(struct work_struct *work);
203 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
204 
205 static int update_topology;
206 
topology_update_cpu_topology(void)207 int topology_update_cpu_topology(void)
208 {
209 	return update_topology;
210 }
211 
212 /*
213  * Updating the sched_domains can't be done directly from cpufreq callbacks
214  * due to locking, so queue the work for later.
215  */
update_topology_flags_workfn(struct work_struct * work)216 static void update_topology_flags_workfn(struct work_struct *work)
217 {
218 	update_topology = 1;
219 	rebuild_sched_domains();
220 	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
221 	update_topology = 0;
222 }
223 
224 static u32 *raw_capacity;
225 
free_raw_capacity(void)226 static int free_raw_capacity(void)
227 {
228 	kfree(raw_capacity);
229 	raw_capacity = NULL;
230 
231 	return 0;
232 }
233 
topology_normalize_cpu_scale(void)234 void topology_normalize_cpu_scale(void)
235 {
236 	u64 capacity;
237 	u64 capacity_scale;
238 	int cpu;
239 
240 	if (!raw_capacity)
241 		return;
242 
243 	capacity_scale = 1;
244 	for_each_possible_cpu(cpu) {
245 		capacity = raw_capacity[cpu] *
246 			   (per_cpu(capacity_freq_ref, cpu) ?: 1);
247 		capacity_scale = max(capacity, capacity_scale);
248 	}
249 
250 	pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
251 	for_each_possible_cpu(cpu) {
252 		capacity = raw_capacity[cpu] *
253 			   (per_cpu(capacity_freq_ref, cpu) ?: 1);
254 		capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
255 			capacity_scale);
256 		topology_set_cpu_scale(cpu, capacity);
257 		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
258 			cpu, topology_get_cpu_scale(cpu));
259 	}
260 }
261 
topology_parse_cpu_capacity(struct device_node * cpu_node,int cpu)262 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
263 {
264 	struct clk *cpu_clk;
265 	static bool cap_parsing_failed;
266 	int ret;
267 	u32 cpu_capacity;
268 
269 	if (cap_parsing_failed)
270 		return false;
271 
272 	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
273 				   &cpu_capacity);
274 	if (!ret) {
275 		if (!raw_capacity) {
276 			raw_capacity = kcalloc(num_possible_cpus(),
277 					       sizeof(*raw_capacity),
278 					       GFP_KERNEL);
279 			if (!raw_capacity) {
280 				cap_parsing_failed = true;
281 				return false;
282 			}
283 		}
284 		raw_capacity[cpu] = cpu_capacity;
285 		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
286 			cpu_node, raw_capacity[cpu]);
287 
288 		/*
289 		 * Update capacity_freq_ref for calculating early boot CPU capacities.
290 		 * For non-clk CPU DVFS mechanism, there's no way to get the
291 		 * frequency value now, assuming they are running at the same
292 		 * frequency (by keeping the initial capacity_freq_ref value).
293 		 */
294 		cpu_clk = of_clk_get(cpu_node, 0);
295 		if (!PTR_ERR_OR_ZERO(cpu_clk)) {
296 			per_cpu(capacity_freq_ref, cpu) =
297 				clk_get_rate(cpu_clk) / HZ_PER_KHZ;
298 			clk_put(cpu_clk);
299 		}
300 	} else {
301 		if (raw_capacity) {
302 			pr_err("cpu_capacity: missing %pOF raw capacity\n",
303 				cpu_node);
304 			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
305 		}
306 		cap_parsing_failed = true;
307 		free_raw_capacity();
308 	}
309 
310 	return !ret;
311 }
312 
freq_inv_set_max_ratio(int cpu,u64 max_rate)313 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
314 {
315 }
316 
317 #ifdef CONFIG_ACPI_CPPC_LIB
318 #include <acpi/cppc_acpi.h>
319 
topology_init_cpu_capacity_cppc(void)320 static inline void topology_init_cpu_capacity_cppc(void)
321 {
322 	u64 capacity, capacity_scale = 0;
323 	struct cppc_perf_caps perf_caps;
324 	int cpu;
325 
326 	if (likely(!acpi_cpc_valid()))
327 		return;
328 
329 	raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
330 			       GFP_KERNEL);
331 	if (!raw_capacity)
332 		return;
333 
334 	for_each_possible_cpu(cpu) {
335 		if (!cppc_get_perf_caps(cpu, &perf_caps) &&
336 		    (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
337 		    (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
338 			raw_capacity[cpu] = perf_caps.highest_perf;
339 			capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
340 
341 			per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
342 
343 			pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
344 				 cpu, raw_capacity[cpu]);
345 			continue;
346 		}
347 
348 		pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
349 		pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
350 		goto exit;
351 	}
352 
353 	for_each_possible_cpu(cpu) {
354 		freq_inv_set_max_ratio(cpu,
355 				       per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
356 
357 		capacity = raw_capacity[cpu];
358 		capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
359 				     capacity_scale);
360 		topology_set_cpu_scale(cpu, capacity);
361 		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
362 			cpu, topology_get_cpu_scale(cpu));
363 	}
364 
365 	schedule_work(&update_topology_flags_work);
366 	pr_debug("cpu_capacity: cpu_capacity initialization done\n");
367 
368 exit:
369 	free_raw_capacity();
370 }
acpi_processor_init_invariance_cppc(void)371 void acpi_processor_init_invariance_cppc(void)
372 {
373 	topology_init_cpu_capacity_cppc();
374 }
375 #endif
376 
377 #ifdef CONFIG_CPU_FREQ
378 static cpumask_var_t cpus_to_visit;
379 static void parsing_done_workfn(struct work_struct *work);
380 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
381 
382 static int
init_cpu_capacity_callback(struct notifier_block * nb,unsigned long val,void * data)383 init_cpu_capacity_callback(struct notifier_block *nb,
384 			   unsigned long val,
385 			   void *data)
386 {
387 	struct cpufreq_policy *policy = data;
388 	int cpu;
389 
390 	if (val != CPUFREQ_CREATE_POLICY)
391 		return 0;
392 
393 	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
394 		 cpumask_pr_args(policy->related_cpus),
395 		 cpumask_pr_args(cpus_to_visit));
396 
397 	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
398 
399 	for_each_cpu(cpu, policy->related_cpus) {
400 		per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
401 		freq_inv_set_max_ratio(cpu,
402 				       per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
403 	}
404 
405 	if (cpumask_empty(cpus_to_visit)) {
406 		if (raw_capacity) {
407 			topology_normalize_cpu_scale();
408 			schedule_work(&update_topology_flags_work);
409 			free_raw_capacity();
410 		}
411 		pr_debug("cpu_capacity: parsing done\n");
412 		schedule_work(&parsing_done_work);
413 	}
414 
415 	return 0;
416 }
417 
418 static struct notifier_block init_cpu_capacity_notifier = {
419 	.notifier_call = init_cpu_capacity_callback,
420 };
421 
register_cpufreq_notifier(void)422 static int __init register_cpufreq_notifier(void)
423 {
424 	int ret;
425 
426 	/*
427 	 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
428 	 * information is not needed for cpu capacity initialization.
429 	 */
430 	if (!acpi_disabled)
431 		return -EINVAL;
432 
433 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
434 		return -ENOMEM;
435 
436 	cpumask_copy(cpus_to_visit, cpu_possible_mask);
437 
438 	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
439 					CPUFREQ_POLICY_NOTIFIER);
440 
441 	if (ret)
442 		free_cpumask_var(cpus_to_visit);
443 
444 	return ret;
445 }
446 core_initcall(register_cpufreq_notifier);
447 
parsing_done_workfn(struct work_struct * work)448 static void parsing_done_workfn(struct work_struct *work)
449 {
450 	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
451 					 CPUFREQ_POLICY_NOTIFIER);
452 	free_cpumask_var(cpus_to_visit);
453 }
454 
455 #else
456 core_initcall(free_raw_capacity);
457 #endif
458 
459 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
460 
461 /* Used to enable the SMT control */
462 static unsigned int max_smt_thread_num = 1;
463 
464 /*
465  * This function returns the logic cpu number of the node.
466  * There are basically three kinds of return values:
467  * (1) logic cpu number which is > 0.
468  * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
469  * there is no possible logical CPU in the kernel to match. This happens
470  * when CONFIG_NR_CPUS is configure to be smaller than the number of
471  * CPU nodes in DT. We need to just ignore this case.
472  * (3) -1 if the node does not exist in the device tree
473  */
get_cpu_for_node(struct device_node * node)474 static int __init get_cpu_for_node(struct device_node *node)
475 {
476 	int cpu;
477 	struct device_node *cpu_node __free(device_node) =
478 		of_parse_phandle(node, "cpu", 0);
479 
480 	if (!cpu_node)
481 		return -1;
482 
483 	cpu = of_cpu_node_to_id(cpu_node);
484 	if (cpu >= 0)
485 		topology_parse_cpu_capacity(cpu_node, cpu);
486 	else
487 		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
488 			cpu_node, cpumask_pr_args(cpu_possible_mask));
489 
490 	return cpu;
491 }
492 
parse_core(struct device_node * core,int package_id,int cluster_id,int core_id)493 static int __init parse_core(struct device_node *core, int package_id,
494 			     int cluster_id, int core_id)
495 {
496 	char name[20];
497 	bool leaf = true;
498 	int i = 0;
499 	int cpu;
500 
501 	do {
502 		snprintf(name, sizeof(name), "thread%d", i);
503 		struct device_node *t __free(device_node) =
504 			of_get_child_by_name(core, name);
505 
506 		if (!t)
507 			break;
508 
509 		leaf = false;
510 		cpu = get_cpu_for_node(t);
511 		if (cpu >= 0) {
512 			cpu_topology[cpu].package_id = package_id;
513 			cpu_topology[cpu].cluster_id = cluster_id;
514 			cpu_topology[cpu].core_id = core_id;
515 			cpu_topology[cpu].thread_id = i;
516 		} else if (cpu != -ENODEV) {
517 			pr_err("%pOF: Can't get CPU for thread\n", t);
518 			return -EINVAL;
519 		}
520 		i++;
521 	} while (1);
522 
523 	max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
524 
525 	cpu = get_cpu_for_node(core);
526 	if (cpu >= 0) {
527 		if (!leaf) {
528 			pr_err("%pOF: Core has both threads and CPU\n",
529 			       core);
530 			return -EINVAL;
531 		}
532 
533 		cpu_topology[cpu].package_id = package_id;
534 		cpu_topology[cpu].cluster_id = cluster_id;
535 		cpu_topology[cpu].core_id = core_id;
536 	} else if (leaf && cpu != -ENODEV) {
537 		pr_err("%pOF: Can't get CPU for leaf core\n", core);
538 		return -EINVAL;
539 	}
540 
541 	return 0;
542 }
543 
parse_cluster(struct device_node * cluster,int package_id,int cluster_id,int depth)544 static int __init parse_cluster(struct device_node *cluster, int package_id,
545 				int cluster_id, int depth)
546 {
547 	char name[20];
548 	bool leaf = true;
549 	bool has_cores = false;
550 	int core_id = 0;
551 	int i, ret;
552 
553 	/*
554 	 * First check for child clusters; we currently ignore any
555 	 * information about the nesting of clusters and present the
556 	 * scheduler with a flat list of them.
557 	 */
558 	i = 0;
559 	do {
560 		snprintf(name, sizeof(name), "cluster%d", i);
561 		struct device_node *c __free(device_node) =
562 			of_get_child_by_name(cluster, name);
563 
564 		if (!c)
565 			break;
566 
567 		leaf = false;
568 		ret = parse_cluster(c, package_id, i, depth + 1);
569 		if (depth > 0)
570 			pr_warn("Topology for clusters of clusters not yet supported\n");
571 		if (ret != 0)
572 			return ret;
573 		i++;
574 	} while (1);
575 
576 	/* Now check for cores */
577 	i = 0;
578 	do {
579 		snprintf(name, sizeof(name), "core%d", i);
580 		struct device_node *c __free(device_node) =
581 			of_get_child_by_name(cluster, name);
582 
583 		if (!c)
584 			break;
585 
586 		has_cores = true;
587 
588 		if (depth == 0) {
589 			pr_err("%pOF: cpu-map children should be clusters\n", c);
590 			return -EINVAL;
591 		}
592 
593 		if (leaf) {
594 			ret = parse_core(c, package_id, cluster_id, core_id++);
595 			if (ret != 0)
596 				return ret;
597 		} else {
598 			pr_err("%pOF: Non-leaf cluster with core %s\n",
599 			       cluster, name);
600 			return -EINVAL;
601 		}
602 
603 		i++;
604 	} while (1);
605 
606 	if (leaf && !has_cores)
607 		pr_warn("%pOF: empty cluster\n", cluster);
608 
609 	return 0;
610 }
611 
parse_socket(struct device_node * socket)612 static int __init parse_socket(struct device_node *socket)
613 {
614 	char name[20];
615 	bool has_socket = false;
616 	int package_id = 0, ret;
617 
618 	do {
619 		snprintf(name, sizeof(name), "socket%d", package_id);
620 		struct device_node *c __free(device_node) =
621 			of_get_child_by_name(socket, name);
622 
623 		if (!c)
624 			break;
625 
626 		has_socket = true;
627 		ret = parse_cluster(c, package_id, -1, 0);
628 		if (ret != 0)
629 			return ret;
630 
631 		package_id++;
632 	} while (1);
633 
634 	if (!has_socket)
635 		ret = parse_cluster(socket, 0, -1, 0);
636 
637 	/*
638 	 * Reset the max_smt_thread_num to 1 on failure. Since on failure
639 	 * we need to notify the framework the SMT is not supported, but
640 	 * max_smt_thread_num can be initialized to the SMT thread number
641 	 * of the cores which are successfully parsed.
642 	 */
643 	if (ret)
644 		max_smt_thread_num = 1;
645 
646 	cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
647 
648 	return ret;
649 }
650 
parse_dt_topology(void)651 static int __init parse_dt_topology(void)
652 {
653 	int ret = 0;
654 	int cpu;
655 	struct device_node *cn __free(device_node) =
656 		of_find_node_by_path("/cpus");
657 
658 	if (!cn) {
659 		pr_err("No CPU information found in DT\n");
660 		return 0;
661 	}
662 
663 	/*
664 	 * When topology is provided cpu-map is essentially a root
665 	 * cluster with restricted subnodes.
666 	 */
667 	struct device_node *map __free(device_node) =
668 		of_get_child_by_name(cn, "cpu-map");
669 
670 	if (!map)
671 		return ret;
672 
673 	ret = parse_socket(map);
674 	if (ret != 0)
675 		return ret;
676 
677 	topology_normalize_cpu_scale();
678 
679 	/*
680 	 * Check that all cores are in the topology; the SMP code will
681 	 * only mark cores described in the DT as possible.
682 	 */
683 	for_each_possible_cpu(cpu)
684 		if (cpu_topology[cpu].package_id < 0) {
685 			return -EINVAL;
686 		}
687 
688 	return ret;
689 }
690 #endif
691 
692 /*
693  * cpu topology table
694  */
695 struct cpu_topology cpu_topology[NR_CPUS];
696 EXPORT_SYMBOL_GPL(cpu_topology);
697 
cpu_coregroup_mask(int cpu)698 const struct cpumask *cpu_coregroup_mask(int cpu)
699 {
700 	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
701 
702 	/* Find the smaller of NUMA, core or LLC siblings */
703 	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
704 		/* not numa in package, lets use the package siblings */
705 		core_mask = &cpu_topology[cpu].core_sibling;
706 	}
707 
708 	if (last_level_cache_is_valid(cpu)) {
709 		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
710 			core_mask = &cpu_topology[cpu].llc_sibling;
711 	}
712 
713 	/*
714 	 * For systems with no shared cpu-side LLC but with clusters defined,
715 	 * extend core_mask to cluster_siblings. The sched domain builder will
716 	 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
717 	 */
718 	if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
719 	    cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
720 		core_mask = &cpu_topology[cpu].cluster_sibling;
721 
722 	return core_mask;
723 }
724 
cpu_clustergroup_mask(int cpu)725 const struct cpumask *cpu_clustergroup_mask(int cpu)
726 {
727 	/*
728 	 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
729 	 * cpu_coregroup_mask().
730 	 */
731 	if (cpumask_subset(cpu_coregroup_mask(cpu),
732 			   &cpu_topology[cpu].cluster_sibling))
733 		return topology_sibling_cpumask(cpu);
734 
735 	return &cpu_topology[cpu].cluster_sibling;
736 }
737 
update_siblings_masks(unsigned int cpuid)738 void update_siblings_masks(unsigned int cpuid)
739 {
740 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
741 	int cpu, ret;
742 
743 	ret = detect_cache_attributes(cpuid);
744 	if (ret && ret != -ENOENT)
745 		pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
746 
747 	/* update core and thread sibling masks */
748 	for_each_online_cpu(cpu) {
749 		cpu_topo = &cpu_topology[cpu];
750 
751 		if (last_level_cache_is_shared(cpu, cpuid)) {
752 			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
753 			cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
754 		}
755 
756 		if (cpuid_topo->package_id != cpu_topo->package_id)
757 			continue;
758 
759 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
760 		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
761 
762 		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
763 			continue;
764 
765 		if (cpuid_topo->cluster_id >= 0) {
766 			cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
767 			cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
768 		}
769 
770 		if (cpuid_topo->core_id != cpu_topo->core_id)
771 			continue;
772 
773 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
774 		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
775 	}
776 }
777 
clear_cpu_topology(int cpu)778 static void clear_cpu_topology(int cpu)
779 {
780 	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
781 
782 	cpumask_clear(&cpu_topo->llc_sibling);
783 	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
784 
785 	cpumask_clear(&cpu_topo->cluster_sibling);
786 	cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
787 
788 	cpumask_clear(&cpu_topo->core_sibling);
789 	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
790 	cpumask_clear(&cpu_topo->thread_sibling);
791 	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
792 }
793 
reset_cpu_topology(void)794 void __init reset_cpu_topology(void)
795 {
796 	unsigned int cpu;
797 
798 	for_each_possible_cpu(cpu) {
799 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
800 
801 		cpu_topo->thread_id = -1;
802 		cpu_topo->core_id = -1;
803 		cpu_topo->cluster_id = -1;
804 		cpu_topo->package_id = -1;
805 
806 		clear_cpu_topology(cpu);
807 	}
808 }
809 
remove_cpu_topology(unsigned int cpu)810 void remove_cpu_topology(unsigned int cpu)
811 {
812 	int sibling;
813 
814 	for_each_cpu(sibling, topology_core_cpumask(cpu))
815 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
816 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
817 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
818 	for_each_cpu(sibling, topology_cluster_cpumask(cpu))
819 		cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
820 	for_each_cpu(sibling, topology_llc_cpumask(cpu))
821 		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
822 
823 	clear_cpu_topology(cpu);
824 }
825 
parse_acpi_topology(void)826 __weak int __init parse_acpi_topology(void)
827 {
828 	return 0;
829 }
830 
831 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
init_cpu_topology(void)832 void __init init_cpu_topology(void)
833 {
834 	int cpu, ret;
835 
836 	reset_cpu_topology();
837 	ret = parse_acpi_topology();
838 	if (!ret)
839 		ret = of_have_populated_dt() && parse_dt_topology();
840 
841 	if (ret) {
842 		/*
843 		 * Discard anything that was parsed if we hit an error so we
844 		 * don't use partial information. But do not return yet to give
845 		 * arch-specific early cache level detection a chance to run.
846 		 */
847 		reset_cpu_topology();
848 	}
849 
850 	for_each_possible_cpu(cpu) {
851 		ret = fetch_cache_info(cpu);
852 		if (!ret)
853 			continue;
854 		else if (ret != -ENOENT)
855 			pr_err("Early cacheinfo failed, ret = %d\n", ret);
856 		return;
857 	}
858 }
859 
store_cpu_topology(unsigned int cpuid)860 void store_cpu_topology(unsigned int cpuid)
861 {
862 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
863 
864 	if (cpuid_topo->package_id != -1)
865 		goto topology_populated;
866 
867 	cpuid_topo->thread_id = -1;
868 	cpuid_topo->core_id = cpuid;
869 	cpuid_topo->package_id = cpu_to_node(cpuid);
870 
871 	pr_debug("CPU%u: package %d core %d thread %d\n",
872 		 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
873 		 cpuid_topo->thread_id);
874 
875 topology_populated:
876 	update_siblings_masks(cpuid);
877 }
878 #endif
879