1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Arch specific cpu topology information 4 * 5 * Copyright (C) 2016, ARM Ltd. 6 * Written by: Juri Lelli, ARM Ltd. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/cacheinfo.h> 11 #include <linux/cleanup.h> 12 #include <linux/cpu.h> 13 #include <linux/cpufreq.h> 14 #include <linux/device.h> 15 #include <linux/of.h> 16 #include <linux/slab.h> 17 #include <linux/sched/topology.h> 18 #include <linux/cpuset.h> 19 #include <linux/cpumask.h> 20 #include <linux/init.h> 21 #include <linux/rcupdate.h> 22 #include <linux/sched.h> 23 #include <linux/units.h> 24 25 #define CREATE_TRACE_POINTS 26 #include <trace/events/hw_pressure.h> 27 28 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); 29 static struct cpumask scale_freq_counters_mask; 30 static bool scale_freq_invariant; 31 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; 32 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); 33 34 static bool supports_scale_freq_counters(const struct cpumask *cpus) 35 { 36 return cpumask_subset(cpus, &scale_freq_counters_mask); 37 } 38 39 bool topology_scale_freq_invariant(void) 40 { 41 return cpufreq_supports_freq_invariance() || 42 supports_scale_freq_counters(cpu_online_mask); 43 } 44 45 static void update_scale_freq_invariant(bool status) 46 { 47 if (scale_freq_invariant == status) 48 return; 49 50 /* 51 * Task scheduler behavior depends on frequency invariance support, 52 * either cpufreq or counter driven. If the support status changes as 53 * a result of counter initialisation and use, retrigger the build of 54 * scheduling domains to ensure the information is propagated properly. 55 */ 56 if (topology_scale_freq_invariant() == status) { 57 scale_freq_invariant = status; 58 rebuild_sched_domains_energy(); 59 } 60 } 61 62 void topology_set_scale_freq_source(struct scale_freq_data *data, 63 const struct cpumask *cpus) 64 { 65 struct scale_freq_data *sfd; 66 int cpu; 67 68 /* 69 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is 70 * supported by cpufreq. 71 */ 72 if (cpumask_empty(&scale_freq_counters_mask)) 73 scale_freq_invariant = topology_scale_freq_invariant(); 74 75 rcu_read_lock(); 76 77 for_each_cpu(cpu, cpus) { 78 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); 79 80 /* Use ARCH provided counters whenever possible */ 81 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { 82 rcu_assign_pointer(per_cpu(sft_data, cpu), data); 83 cpumask_set_cpu(cpu, &scale_freq_counters_mask); 84 } 85 } 86 87 rcu_read_unlock(); 88 89 update_scale_freq_invariant(true); 90 } 91 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); 92 93 void topology_clear_scale_freq_source(enum scale_freq_source source, 94 const struct cpumask *cpus) 95 { 96 struct scale_freq_data *sfd; 97 int cpu; 98 99 rcu_read_lock(); 100 101 for_each_cpu(cpu, cpus) { 102 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); 103 104 if (sfd && sfd->source == source) { 105 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); 106 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); 107 } 108 } 109 110 rcu_read_unlock(); 111 112 /* 113 * Make sure all references to previous sft_data are dropped to avoid 114 * use-after-free races. 115 */ 116 synchronize_rcu(); 117 118 update_scale_freq_invariant(false); 119 } 120 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); 121 122 void topology_scale_freq_tick(void) 123 { 124 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data)); 125 126 if (sfd) 127 sfd->set_freq_scale(); 128 } 129 130 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; 131 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale); 132 133 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, 134 unsigned long max_freq) 135 { 136 unsigned long scale; 137 int i; 138 139 if (WARN_ON_ONCE(!cur_freq || !max_freq)) 140 return; 141 142 /* 143 * If the use of counters for FIE is enabled, just return as we don't 144 * want to update the scale factor with information from CPUFREQ. 145 * Instead the scale factor will be updated from arch_scale_freq_tick. 146 */ 147 if (supports_scale_freq_counters(cpus)) 148 return; 149 150 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; 151 152 for_each_cpu(i, cpus) 153 per_cpu(arch_freq_scale, i) = scale; 154 } 155 156 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; 157 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); 158 159 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) 160 { 161 per_cpu(cpu_scale, cpu) = capacity; 162 } 163 164 DEFINE_PER_CPU(unsigned long, hw_pressure); 165 166 /** 167 * topology_update_hw_pressure() - Update HW pressure for CPUs 168 * @cpus : The related CPUs for which capacity has been reduced 169 * @capped_freq : The maximum allowed frequency that CPUs can run at 170 * 171 * Update the value of HW pressure for all @cpus in the mask. The 172 * cpumask should include all (online+offline) affected CPUs, to avoid 173 * operating on stale data when hot-plug is used for some CPUs. The 174 * @capped_freq reflects the currently allowed max CPUs frequency due to 175 * HW capping. It might be also a boost frequency value, which is bigger 176 * than the internal 'capacity_freq_ref' max frequency. In such case the 177 * pressure value should simply be removed, since this is an indication that 178 * there is no HW throttling. The @capped_freq must be provided in kHz. 179 */ 180 void topology_update_hw_pressure(const struct cpumask *cpus, 181 unsigned long capped_freq) 182 { 183 unsigned long max_capacity, capacity, pressure; 184 u32 max_freq; 185 int cpu; 186 187 cpu = cpumask_first(cpus); 188 max_capacity = arch_scale_cpu_capacity(cpu); 189 max_freq = arch_scale_freq_ref(cpu); 190 191 /* 192 * Handle properly the boost frequencies, which should simply clean 193 * the HW pressure value. 194 */ 195 if (max_freq <= capped_freq) 196 capacity = max_capacity; 197 else 198 capacity = mult_frac(max_capacity, capped_freq, max_freq); 199 200 pressure = max_capacity - capacity; 201 202 trace_hw_pressure_update(cpu, pressure); 203 204 for_each_cpu(cpu, cpus) 205 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); 206 } 207 EXPORT_SYMBOL_GPL(topology_update_hw_pressure); 208 209 static ssize_t cpu_capacity_show(struct device *dev, 210 struct device_attribute *attr, 211 char *buf) 212 { 213 struct cpu *cpu = container_of(dev, struct cpu, dev); 214 215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); 216 } 217 218 static void update_topology_flags_workfn(struct work_struct *work); 219 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); 220 221 static DEVICE_ATTR_RO(cpu_capacity); 222 223 static int cpu_capacity_sysctl_add(unsigned int cpu) 224 { 225 struct device *cpu_dev = get_cpu_device(cpu); 226 227 if (!cpu_dev) 228 return -ENOENT; 229 230 device_create_file(cpu_dev, &dev_attr_cpu_capacity); 231 232 return 0; 233 } 234 235 static int cpu_capacity_sysctl_remove(unsigned int cpu) 236 { 237 struct device *cpu_dev = get_cpu_device(cpu); 238 239 if (!cpu_dev) 240 return -ENOENT; 241 242 device_remove_file(cpu_dev, &dev_attr_cpu_capacity); 243 244 return 0; 245 } 246 247 static int register_cpu_capacity_sysctl(void) 248 { 249 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", 250 cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); 251 252 return 0; 253 } 254 subsys_initcall(register_cpu_capacity_sysctl); 255 256 static int update_topology; 257 258 int topology_update_cpu_topology(void) 259 { 260 return update_topology; 261 } 262 263 /* 264 * Updating the sched_domains can't be done directly from cpufreq callbacks 265 * due to locking, so queue the work for later. 266 */ 267 static void update_topology_flags_workfn(struct work_struct *work) 268 { 269 update_topology = 1; 270 rebuild_sched_domains(); 271 pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); 272 update_topology = 0; 273 } 274 275 static u32 *raw_capacity; 276 277 static int free_raw_capacity(void) 278 { 279 kfree(raw_capacity); 280 raw_capacity = NULL; 281 282 return 0; 283 } 284 285 void topology_normalize_cpu_scale(void) 286 { 287 u64 capacity; 288 u64 capacity_scale; 289 int cpu; 290 291 if (!raw_capacity) 292 return; 293 294 capacity_scale = 1; 295 for_each_possible_cpu(cpu) { 296 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); 297 capacity_scale = max(capacity, capacity_scale); 298 } 299 300 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); 301 for_each_possible_cpu(cpu) { 302 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); 303 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, 304 capacity_scale); 305 topology_set_cpu_scale(cpu, capacity); 306 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", 307 cpu, topology_get_cpu_scale(cpu)); 308 } 309 } 310 311 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) 312 { 313 struct clk *cpu_clk; 314 static bool cap_parsing_failed; 315 int ret; 316 u32 cpu_capacity; 317 318 if (cap_parsing_failed) 319 return false; 320 321 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", 322 &cpu_capacity); 323 if (!ret) { 324 if (!raw_capacity) { 325 raw_capacity = kcalloc(num_possible_cpus(), 326 sizeof(*raw_capacity), 327 GFP_KERNEL); 328 if (!raw_capacity) { 329 cap_parsing_failed = true; 330 return false; 331 } 332 } 333 raw_capacity[cpu] = cpu_capacity; 334 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", 335 cpu_node, raw_capacity[cpu]); 336 337 /* 338 * Update capacity_freq_ref for calculating early boot CPU capacities. 339 * For non-clk CPU DVFS mechanism, there's no way to get the 340 * frequency value now, assuming they are running at the same 341 * frequency (by keeping the initial capacity_freq_ref value). 342 */ 343 cpu_clk = of_clk_get(cpu_node, 0); 344 if (!PTR_ERR_OR_ZERO(cpu_clk)) { 345 per_cpu(capacity_freq_ref, cpu) = 346 clk_get_rate(cpu_clk) / HZ_PER_KHZ; 347 clk_put(cpu_clk); 348 } 349 } else { 350 if (raw_capacity) { 351 pr_err("cpu_capacity: missing %pOF raw capacity\n", 352 cpu_node); 353 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); 354 } 355 cap_parsing_failed = true; 356 free_raw_capacity(); 357 } 358 359 return !ret; 360 } 361 362 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) 363 { 364 } 365 366 #ifdef CONFIG_ACPI_CPPC_LIB 367 #include <acpi/cppc_acpi.h> 368 369 void topology_init_cpu_capacity_cppc(void) 370 { 371 u64 capacity, capacity_scale = 0; 372 struct cppc_perf_caps perf_caps; 373 int cpu; 374 375 if (likely(!acpi_cpc_valid())) 376 return; 377 378 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), 379 GFP_KERNEL); 380 if (!raw_capacity) 381 return; 382 383 for_each_possible_cpu(cpu) { 384 if (!cppc_get_perf_caps(cpu, &perf_caps) && 385 (perf_caps.highest_perf >= perf_caps.nominal_perf) && 386 (perf_caps.highest_perf >= perf_caps.lowest_perf)) { 387 raw_capacity[cpu] = perf_caps.highest_perf; 388 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); 389 390 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); 391 392 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", 393 cpu, raw_capacity[cpu]); 394 continue; 395 } 396 397 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); 398 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); 399 goto exit; 400 } 401 402 for_each_possible_cpu(cpu) { 403 freq_inv_set_max_ratio(cpu, 404 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); 405 406 capacity = raw_capacity[cpu]; 407 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, 408 capacity_scale); 409 topology_set_cpu_scale(cpu, capacity); 410 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", 411 cpu, topology_get_cpu_scale(cpu)); 412 } 413 414 schedule_work(&update_topology_flags_work); 415 pr_debug("cpu_capacity: cpu_capacity initialization done\n"); 416 417 exit: 418 free_raw_capacity(); 419 } 420 #endif 421 422 #ifdef CONFIG_CPU_FREQ 423 static cpumask_var_t cpus_to_visit; 424 static void parsing_done_workfn(struct work_struct *work); 425 static DECLARE_WORK(parsing_done_work, parsing_done_workfn); 426 427 static int 428 init_cpu_capacity_callback(struct notifier_block *nb, 429 unsigned long val, 430 void *data) 431 { 432 struct cpufreq_policy *policy = data; 433 int cpu; 434 435 if (val != CPUFREQ_CREATE_POLICY) 436 return 0; 437 438 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", 439 cpumask_pr_args(policy->related_cpus), 440 cpumask_pr_args(cpus_to_visit)); 441 442 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); 443 444 for_each_cpu(cpu, policy->related_cpus) { 445 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; 446 freq_inv_set_max_ratio(cpu, 447 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); 448 } 449 450 if (cpumask_empty(cpus_to_visit)) { 451 if (raw_capacity) { 452 topology_normalize_cpu_scale(); 453 schedule_work(&update_topology_flags_work); 454 free_raw_capacity(); 455 } 456 pr_debug("cpu_capacity: parsing done\n"); 457 schedule_work(&parsing_done_work); 458 } 459 460 return 0; 461 } 462 463 static struct notifier_block init_cpu_capacity_notifier = { 464 .notifier_call = init_cpu_capacity_callback, 465 }; 466 467 static int __init register_cpufreq_notifier(void) 468 { 469 int ret; 470 471 /* 472 * On ACPI-based systems skip registering cpufreq notifier as cpufreq 473 * information is not needed for cpu capacity initialization. 474 */ 475 if (!acpi_disabled) 476 return -EINVAL; 477 478 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) 479 return -ENOMEM; 480 481 cpumask_copy(cpus_to_visit, cpu_possible_mask); 482 483 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, 484 CPUFREQ_POLICY_NOTIFIER); 485 486 if (ret) 487 free_cpumask_var(cpus_to_visit); 488 489 return ret; 490 } 491 core_initcall(register_cpufreq_notifier); 492 493 static void parsing_done_workfn(struct work_struct *work) 494 { 495 cpufreq_unregister_notifier(&init_cpu_capacity_notifier, 496 CPUFREQ_POLICY_NOTIFIER); 497 free_cpumask_var(cpus_to_visit); 498 } 499 500 #else 501 core_initcall(free_raw_capacity); 502 #endif 503 504 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 505 /* 506 * This function returns the logic cpu number of the node. 507 * There are basically three kinds of return values: 508 * (1) logic cpu number which is > 0. 509 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but 510 * there is no possible logical CPU in the kernel to match. This happens 511 * when CONFIG_NR_CPUS is configure to be smaller than the number of 512 * CPU nodes in DT. We need to just ignore this case. 513 * (3) -1 if the node does not exist in the device tree 514 */ 515 static int __init get_cpu_for_node(struct device_node *node) 516 { 517 int cpu; 518 struct device_node *cpu_node __free(device_node) = 519 of_parse_phandle(node, "cpu", 0); 520 521 if (!cpu_node) 522 return -1; 523 524 cpu = of_cpu_node_to_id(cpu_node); 525 if (cpu >= 0) 526 topology_parse_cpu_capacity(cpu_node, cpu); 527 else 528 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", 529 cpu_node, cpumask_pr_args(cpu_possible_mask)); 530 531 return cpu; 532 } 533 534 static int __init parse_core(struct device_node *core, int package_id, 535 int cluster_id, int core_id) 536 { 537 char name[20]; 538 bool leaf = true; 539 int i = 0; 540 int cpu; 541 542 do { 543 snprintf(name, sizeof(name), "thread%d", i); 544 struct device_node *t __free(device_node) = 545 of_get_child_by_name(core, name); 546 547 if (!t) 548 break; 549 550 leaf = false; 551 cpu = get_cpu_for_node(t); 552 if (cpu >= 0) { 553 cpu_topology[cpu].package_id = package_id; 554 cpu_topology[cpu].cluster_id = cluster_id; 555 cpu_topology[cpu].core_id = core_id; 556 cpu_topology[cpu].thread_id = i; 557 } else if (cpu != -ENODEV) { 558 pr_err("%pOF: Can't get CPU for thread\n", t); 559 return -EINVAL; 560 } 561 i++; 562 } while (1); 563 564 cpu = get_cpu_for_node(core); 565 if (cpu >= 0) { 566 if (!leaf) { 567 pr_err("%pOF: Core has both threads and CPU\n", 568 core); 569 return -EINVAL; 570 } 571 572 cpu_topology[cpu].package_id = package_id; 573 cpu_topology[cpu].cluster_id = cluster_id; 574 cpu_topology[cpu].core_id = core_id; 575 } else if (leaf && cpu != -ENODEV) { 576 pr_err("%pOF: Can't get CPU for leaf core\n", core); 577 return -EINVAL; 578 } 579 580 return 0; 581 } 582 583 static int __init parse_cluster(struct device_node *cluster, int package_id, 584 int cluster_id, int depth) 585 { 586 char name[20]; 587 bool leaf = true; 588 bool has_cores = false; 589 int core_id = 0; 590 int i, ret; 591 592 /* 593 * First check for child clusters; we currently ignore any 594 * information about the nesting of clusters and present the 595 * scheduler with a flat list of them. 596 */ 597 i = 0; 598 do { 599 snprintf(name, sizeof(name), "cluster%d", i); 600 struct device_node *c __free(device_node) = 601 of_get_child_by_name(cluster, name); 602 603 if (!c) 604 break; 605 606 leaf = false; 607 ret = parse_cluster(c, package_id, i, depth + 1); 608 if (depth > 0) 609 pr_warn("Topology for clusters of clusters not yet supported\n"); 610 if (ret != 0) 611 return ret; 612 i++; 613 } while (1); 614 615 /* Now check for cores */ 616 i = 0; 617 do { 618 snprintf(name, sizeof(name), "core%d", i); 619 struct device_node *c __free(device_node) = 620 of_get_child_by_name(cluster, name); 621 622 if (!c) 623 break; 624 625 has_cores = true; 626 627 if (depth == 0) { 628 pr_err("%pOF: cpu-map children should be clusters\n", c); 629 return -EINVAL; 630 } 631 632 if (leaf) { 633 ret = parse_core(c, package_id, cluster_id, core_id++); 634 if (ret != 0) 635 return ret; 636 } else { 637 pr_err("%pOF: Non-leaf cluster with core %s\n", 638 cluster, name); 639 return -EINVAL; 640 } 641 642 i++; 643 } while (1); 644 645 if (leaf && !has_cores) 646 pr_warn("%pOF: empty cluster\n", cluster); 647 648 return 0; 649 } 650 651 static int __init parse_socket(struct device_node *socket) 652 { 653 char name[20]; 654 bool has_socket = false; 655 int package_id = 0, ret; 656 657 do { 658 snprintf(name, sizeof(name), "socket%d", package_id); 659 struct device_node *c __free(device_node) = 660 of_get_child_by_name(socket, name); 661 662 if (!c) 663 break; 664 665 has_socket = true; 666 ret = parse_cluster(c, package_id, -1, 0); 667 if (ret != 0) 668 return ret; 669 670 package_id++; 671 } while (1); 672 673 if (!has_socket) 674 ret = parse_cluster(socket, 0, -1, 0); 675 676 return ret; 677 } 678 679 static int __init parse_dt_topology(void) 680 { 681 int ret = 0; 682 int cpu; 683 struct device_node *cn __free(device_node) = 684 of_find_node_by_path("/cpus"); 685 686 if (!cn) { 687 pr_err("No CPU information found in DT\n"); 688 return 0; 689 } 690 691 /* 692 * When topology is provided cpu-map is essentially a root 693 * cluster with restricted subnodes. 694 */ 695 struct device_node *map __free(device_node) = 696 of_get_child_by_name(cn, "cpu-map"); 697 698 if (!map) 699 return ret; 700 701 ret = parse_socket(map); 702 if (ret != 0) 703 return ret; 704 705 topology_normalize_cpu_scale(); 706 707 /* 708 * Check that all cores are in the topology; the SMP code will 709 * only mark cores described in the DT as possible. 710 */ 711 for_each_possible_cpu(cpu) 712 if (cpu_topology[cpu].package_id < 0) { 713 return -EINVAL; 714 } 715 716 return ret; 717 } 718 #endif 719 720 /* 721 * cpu topology table 722 */ 723 struct cpu_topology cpu_topology[NR_CPUS]; 724 EXPORT_SYMBOL_GPL(cpu_topology); 725 726 const struct cpumask *cpu_coregroup_mask(int cpu) 727 { 728 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); 729 730 /* Find the smaller of NUMA, core or LLC siblings */ 731 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { 732 /* not numa in package, lets use the package siblings */ 733 core_mask = &cpu_topology[cpu].core_sibling; 734 } 735 736 if (last_level_cache_is_valid(cpu)) { 737 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) 738 core_mask = &cpu_topology[cpu].llc_sibling; 739 } 740 741 /* 742 * For systems with no shared cpu-side LLC but with clusters defined, 743 * extend core_mask to cluster_siblings. The sched domain builder will 744 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. 745 */ 746 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && 747 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) 748 core_mask = &cpu_topology[cpu].cluster_sibling; 749 750 return core_mask; 751 } 752 753 const struct cpumask *cpu_clustergroup_mask(int cpu) 754 { 755 /* 756 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as 757 * cpu_coregroup_mask(). 758 */ 759 if (cpumask_subset(cpu_coregroup_mask(cpu), 760 &cpu_topology[cpu].cluster_sibling)) 761 return topology_sibling_cpumask(cpu); 762 763 return &cpu_topology[cpu].cluster_sibling; 764 } 765 766 void update_siblings_masks(unsigned int cpuid) 767 { 768 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; 769 int cpu, ret; 770 771 ret = detect_cache_attributes(cpuid); 772 if (ret && ret != -ENOENT) 773 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); 774 775 /* update core and thread sibling masks */ 776 for_each_online_cpu(cpu) { 777 cpu_topo = &cpu_topology[cpu]; 778 779 if (last_level_cache_is_shared(cpu, cpuid)) { 780 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); 781 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); 782 } 783 784 if (cpuid_topo->package_id != cpu_topo->package_id) 785 continue; 786 787 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); 788 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); 789 790 if (cpuid_topo->cluster_id != cpu_topo->cluster_id) 791 continue; 792 793 if (cpuid_topo->cluster_id >= 0) { 794 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); 795 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); 796 } 797 798 if (cpuid_topo->core_id != cpu_topo->core_id) 799 continue; 800 801 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); 802 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); 803 } 804 } 805 806 static void clear_cpu_topology(int cpu) 807 { 808 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; 809 810 cpumask_clear(&cpu_topo->llc_sibling); 811 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); 812 813 cpumask_clear(&cpu_topo->cluster_sibling); 814 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); 815 816 cpumask_clear(&cpu_topo->core_sibling); 817 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); 818 cpumask_clear(&cpu_topo->thread_sibling); 819 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); 820 } 821 822 void __init reset_cpu_topology(void) 823 { 824 unsigned int cpu; 825 826 for_each_possible_cpu(cpu) { 827 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; 828 829 cpu_topo->thread_id = -1; 830 cpu_topo->core_id = -1; 831 cpu_topo->cluster_id = -1; 832 cpu_topo->package_id = -1; 833 834 clear_cpu_topology(cpu); 835 } 836 } 837 838 void remove_cpu_topology(unsigned int cpu) 839 { 840 int sibling; 841 842 for_each_cpu(sibling, topology_core_cpumask(cpu)) 843 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); 844 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) 845 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); 846 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) 847 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); 848 for_each_cpu(sibling, topology_llc_cpumask(cpu)) 849 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); 850 851 clear_cpu_topology(cpu); 852 } 853 854 __weak int __init parse_acpi_topology(void) 855 { 856 return 0; 857 } 858 859 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 860 void __init init_cpu_topology(void) 861 { 862 int cpu, ret; 863 864 reset_cpu_topology(); 865 ret = parse_acpi_topology(); 866 if (!ret) 867 ret = of_have_populated_dt() && parse_dt_topology(); 868 869 if (ret) { 870 /* 871 * Discard anything that was parsed if we hit an error so we 872 * don't use partial information. But do not return yet to give 873 * arch-specific early cache level detection a chance to run. 874 */ 875 reset_cpu_topology(); 876 } 877 878 for_each_possible_cpu(cpu) { 879 ret = fetch_cache_info(cpu); 880 if (!ret) 881 continue; 882 else if (ret != -ENOENT) 883 pr_err("Early cacheinfo failed, ret = %d\n", ret); 884 return; 885 } 886 } 887 888 void store_cpu_topology(unsigned int cpuid) 889 { 890 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; 891 892 if (cpuid_topo->package_id != -1) 893 goto topology_populated; 894 895 cpuid_topo->thread_id = -1; 896 cpuid_topo->core_id = cpuid; 897 cpuid_topo->package_id = cpu_to_node(cpuid); 898 899 pr_debug("CPU%u: package %d core %d thread %d\n", 900 cpuid, cpuid_topo->package_id, cpuid_topo->core_id, 901 cpuid_topo->thread_id); 902 903 topology_populated: 904 update_siblings_masks(cpuid); 905 } 906 #endif 907