1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Scheduler topology setup/handling methods 4 */ 5 6 #include <linux/sched/isolation.h> 7 #include <linux/sched/clock.h> 8 #include <linux/bsearch.h> 9 #include "sched.h" 10 11 DEFINE_MUTEX(sched_domains_mutex); 12 void sched_domains_mutex_lock(void) 13 { 14 mutex_lock(&sched_domains_mutex); 15 } 16 void sched_domains_mutex_unlock(void) 17 { 18 mutex_unlock(&sched_domains_mutex); 19 } 20 21 /* Protected by sched_domains_mutex: */ 22 static cpumask_var_t sched_domains_tmpmask; 23 static cpumask_var_t sched_domains_tmpmask2; 24 25 static int __init sched_debug_setup(char *str) 26 { 27 sched_debug_verbose = true; 28 29 return 0; 30 } 31 early_param("sched_verbose", sched_debug_setup); 32 33 static inline bool sched_debug(void) 34 { 35 return sched_debug_verbose; 36 } 37 38 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 39 const struct sd_flag_debug sd_flag_debug[] = { 40 #include <linux/sched/sd_flags.h> 41 }; 42 #undef SD_FLAG 43 44 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 45 struct cpumask *groupmask) 46 { 47 struct sched_group *group = sd->groups; 48 unsigned long flags = sd->flags; 49 unsigned int idx; 50 51 cpumask_clear(groupmask); 52 53 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 54 printk(KERN_CONT "span=%*pbl level=%s\n", 55 cpumask_pr_args(sched_domain_span(sd)), sd->name); 56 57 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 58 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 59 } 60 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 61 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 62 } 63 64 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 65 unsigned int flag = BIT(idx); 66 unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 67 68 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 69 !(sd->child->flags & flag)) 70 printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 71 sd_flag_debug[idx].name); 72 73 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 74 !(sd->parent->flags & flag)) 75 printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 76 sd_flag_debug[idx].name); 77 } 78 79 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 80 do { 81 if (!group) { 82 printk("\n"); 83 printk(KERN_ERR "ERROR: group is NULL\n"); 84 break; 85 } 86 87 if (cpumask_empty(sched_group_span(group))) { 88 printk(KERN_CONT "\n"); 89 printk(KERN_ERR "ERROR: empty group\n"); 90 break; 91 } 92 93 if (!(sd->flags & SD_NUMA) && 94 cpumask_intersects(groupmask, sched_group_span(group))) { 95 printk(KERN_CONT "\n"); 96 printk(KERN_ERR "ERROR: repeated CPUs\n"); 97 break; 98 } 99 100 cpumask_or(groupmask, groupmask, sched_group_span(group)); 101 102 printk(KERN_CONT " %d:{ span=%*pbl", 103 group->sgc->id, 104 cpumask_pr_args(sched_group_span(group))); 105 106 if ((sd->flags & SD_NUMA) && 107 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 108 printk(KERN_CONT " mask=%*pbl", 109 cpumask_pr_args(group_balance_mask(group))); 110 } 111 112 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 113 printk(KERN_CONT " cap=%lu", group->sgc->capacity); 114 115 if (group == sd->groups && sd->child && 116 !cpumask_equal(sched_domain_span(sd->child), 117 sched_group_span(group))) { 118 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 119 } 120 121 printk(KERN_CONT " }"); 122 123 group = group->next; 124 125 if (group != sd->groups) 126 printk(KERN_CONT ","); 127 128 } while (group != sd->groups); 129 printk(KERN_CONT "\n"); 130 131 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 132 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 133 134 if (sd->parent && 135 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 136 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 137 return 0; 138 } 139 140 static void sched_domain_debug(struct sched_domain *sd, int cpu) 141 { 142 int level = 0; 143 144 if (!sched_debug_verbose) 145 return; 146 147 if (!sd) { 148 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 149 return; 150 } 151 152 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 153 154 for (;;) { 155 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 156 break; 157 level++; 158 sd = sd->parent; 159 if (!sd) 160 break; 161 } 162 } 163 164 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 165 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 166 static const unsigned int SD_DEGENERATE_GROUPS_MASK = 167 #include <linux/sched/sd_flags.h> 168 0; 169 #undef SD_FLAG 170 171 static int sd_degenerate(struct sched_domain *sd) 172 { 173 if (cpumask_weight(sched_domain_span(sd)) == 1) 174 return 1; 175 176 /* Following flags need at least 2 groups */ 177 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 178 (sd->groups != sd->groups->next)) 179 return 0; 180 181 /* Following flags don't use groups */ 182 if (sd->flags & (SD_WAKE_AFFINE)) 183 return 0; 184 185 return 1; 186 } 187 188 static int 189 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 190 { 191 unsigned long cflags = sd->flags, pflags = parent->flags; 192 193 if (sd_degenerate(parent)) 194 return 1; 195 196 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 197 return 0; 198 199 /* Flags needing groups don't count if only 1 group in parent */ 200 if (parent->groups == parent->groups->next) 201 pflags &= ~SD_DEGENERATE_GROUPS_MASK; 202 203 if (~cflags & pflags) 204 return 0; 205 206 return 1; 207 } 208 209 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 210 DEFINE_STATIC_KEY_FALSE(sched_energy_present); 211 static unsigned int sysctl_sched_energy_aware = 1; 212 static DEFINE_MUTEX(sched_energy_mutex); 213 static bool sched_energy_update; 214 215 static bool sched_is_eas_possible(const struct cpumask *cpu_mask) 216 { 217 bool any_asym_capacity = false; 218 int i; 219 220 /* EAS is enabled for asymmetric CPU capacity topologies. */ 221 for_each_cpu(i, cpu_mask) { 222 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) { 223 any_asym_capacity = true; 224 break; 225 } 226 } 227 if (!any_asym_capacity) { 228 if (sched_debug()) { 229 pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n", 230 cpumask_pr_args(cpu_mask)); 231 } 232 return false; 233 } 234 235 /* EAS definitely does *not* handle SMT */ 236 if (sched_smt_active()) { 237 if (sched_debug()) { 238 pr_info("rd %*pbl: Checking EAS, SMT is not supported\n", 239 cpumask_pr_args(cpu_mask)); 240 } 241 return false; 242 } 243 244 if (!arch_scale_freq_invariant()) { 245 if (sched_debug()) { 246 pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", 247 cpumask_pr_args(cpu_mask)); 248 } 249 return false; 250 } 251 252 if (!cpufreq_ready_for_eas(cpu_mask)) { 253 if (sched_debug()) { 254 pr_info("rd %*pbl: Checking EAS: cpufreq is not ready\n", 255 cpumask_pr_args(cpu_mask)); 256 } 257 return false; 258 } 259 260 return true; 261 } 262 263 void rebuild_sched_domains_energy(void) 264 { 265 mutex_lock(&sched_energy_mutex); 266 sched_energy_update = true; 267 rebuild_sched_domains(); 268 sched_energy_update = false; 269 mutex_unlock(&sched_energy_mutex); 270 } 271 272 #ifdef CONFIG_PROC_SYSCTL 273 static int sched_energy_aware_handler(const struct ctl_table *table, int write, 274 void *buffer, size_t *lenp, loff_t *ppos) 275 { 276 int ret; 277 278 if (write && !capable(CAP_SYS_ADMIN)) 279 return -EPERM; 280 281 if (!sched_is_eas_possible(cpu_active_mask)) { 282 if (write) { 283 return -EOPNOTSUPP; 284 } else { 285 *lenp = 0; 286 return 0; 287 } 288 } 289 290 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 291 if (!ret && write) { 292 if (sysctl_sched_energy_aware != sched_energy_enabled()) 293 rebuild_sched_domains_energy(); 294 } 295 296 return ret; 297 } 298 299 static const struct ctl_table sched_energy_aware_sysctls[] = { 300 { 301 .procname = "sched_energy_aware", 302 .data = &sysctl_sched_energy_aware, 303 .maxlen = sizeof(unsigned int), 304 .mode = 0644, 305 .proc_handler = sched_energy_aware_handler, 306 .extra1 = SYSCTL_ZERO, 307 .extra2 = SYSCTL_ONE, 308 }, 309 }; 310 311 static int __init sched_energy_aware_sysctl_init(void) 312 { 313 register_sysctl_init("kernel", sched_energy_aware_sysctls); 314 return 0; 315 } 316 317 late_initcall(sched_energy_aware_sysctl_init); 318 #endif /* CONFIG_PROC_SYSCTL */ 319 320 static void free_pd(struct perf_domain *pd) 321 { 322 struct perf_domain *tmp; 323 324 while (pd) { 325 tmp = pd->next; 326 kfree(pd); 327 pd = tmp; 328 } 329 } 330 331 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 332 { 333 while (pd) { 334 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 335 return pd; 336 pd = pd->next; 337 } 338 339 return NULL; 340 } 341 342 static struct perf_domain *pd_init(int cpu) 343 { 344 struct em_perf_domain *obj = em_cpu_get(cpu); 345 struct perf_domain *pd; 346 347 if (!obj) { 348 if (sched_debug()) 349 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 350 return NULL; 351 } 352 353 pd = kzalloc_obj(*pd); 354 if (!pd) 355 return NULL; 356 pd->em_pd = obj; 357 358 return pd; 359 } 360 361 static void perf_domain_debug(const struct cpumask *cpu_map, 362 struct perf_domain *pd) 363 { 364 if (!sched_debug() || !pd) 365 return; 366 367 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 368 369 while (pd) { 370 printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 371 cpumask_first(perf_domain_span(pd)), 372 cpumask_pr_args(perf_domain_span(pd)), 373 em_pd_nr_perf_states(pd->em_pd)); 374 pd = pd->next; 375 } 376 377 printk(KERN_CONT "\n"); 378 } 379 380 static void destroy_perf_domain_rcu(struct rcu_head *rp) 381 { 382 struct perf_domain *pd; 383 384 pd = container_of(rp, struct perf_domain, rcu); 385 free_pd(pd); 386 } 387 388 static void sched_energy_set(bool has_eas) 389 { 390 if (!has_eas && sched_energy_enabled()) { 391 if (sched_debug()) 392 pr_info("%s: stopping EAS\n", __func__); 393 static_branch_disable_cpuslocked(&sched_energy_present); 394 } else if (has_eas && !sched_energy_enabled()) { 395 if (sched_debug()) 396 pr_info("%s: starting EAS\n", __func__); 397 static_branch_enable_cpuslocked(&sched_energy_present); 398 } 399 } 400 401 /* 402 * EAS can be used on a root domain if it meets all the following conditions: 403 * 1. an Energy Model (EM) is available; 404 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 405 * 3. no SMT is detected. 406 * 4. schedutil is driving the frequency of all CPUs of the rd; 407 * 5. frequency invariance support is present; 408 */ 409 static bool build_perf_domains(const struct cpumask *cpu_map) 410 { 411 int i; 412 struct perf_domain *pd = NULL, *tmp; 413 int cpu = cpumask_first(cpu_map); 414 struct root_domain *rd = cpu_rq(cpu)->rd; 415 416 if (!sysctl_sched_energy_aware) 417 goto free; 418 419 if (!sched_is_eas_possible(cpu_map)) 420 goto free; 421 422 for_each_cpu(i, cpu_map) { 423 /* Skip already covered CPUs. */ 424 if (find_pd(pd, i)) 425 continue; 426 427 /* Create the new pd and add it to the local list. */ 428 tmp = pd_init(i); 429 if (!tmp) 430 goto free; 431 tmp->next = pd; 432 pd = tmp; 433 } 434 435 perf_domain_debug(cpu_map, pd); 436 437 /* Attach the new list of performance domains to the root domain. */ 438 tmp = rd->pd; 439 rcu_assign_pointer(rd->pd, pd); 440 if (tmp) 441 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 442 443 return !!pd; 444 445 free: 446 free_pd(pd); 447 tmp = rd->pd; 448 rcu_assign_pointer(rd->pd, NULL); 449 if (tmp) 450 call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 451 452 return false; 453 } 454 #else /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL): */ 455 static void free_pd(struct perf_domain *pd) { } 456 #endif /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 457 458 static void free_rootdomain(struct rcu_head *rcu) 459 { 460 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 461 462 cpupri_cleanup(&rd->cpupri); 463 cpudl_cleanup(&rd->cpudl); 464 free_cpumask_var(rd->dlo_mask); 465 free_cpumask_var(rd->rto_mask); 466 free_cpumask_var(rd->online); 467 free_cpumask_var(rd->span); 468 free_pd(rd->pd); 469 kfree(rd); 470 } 471 472 void rq_attach_root(struct rq *rq, struct root_domain *rd) 473 { 474 struct root_domain *old_rd = NULL; 475 struct rq_flags rf; 476 477 rq_lock_irqsave(rq, &rf); 478 479 if (rq->rd) { 480 old_rd = rq->rd; 481 482 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 483 set_rq_offline(rq); 484 485 cpumask_clear_cpu(rq->cpu, old_rd->span); 486 487 /* 488 * If we don't want to free the old_rd yet then 489 * set old_rd to NULL to skip the freeing later 490 * in this function: 491 */ 492 if (!atomic_dec_and_test(&old_rd->refcount)) 493 old_rd = NULL; 494 } 495 496 atomic_inc(&rd->refcount); 497 rq->rd = rd; 498 499 cpumask_set_cpu(rq->cpu, rd->span); 500 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 501 set_rq_online(rq); 502 503 /* 504 * Because the rq is not a task, dl_add_task_root_domain() did not 505 * move the fair server bw to the rd if it already started. 506 * Add it now. 507 */ 508 if (rq->fair_server.dl_server) 509 __dl_server_attach_root(&rq->fair_server, rq); 510 511 #ifdef CONFIG_SCHED_CLASS_EXT 512 if (rq->ext_server.dl_server) 513 __dl_server_attach_root(&rq->ext_server, rq); 514 #endif 515 516 rq_unlock_irqrestore(rq, &rf); 517 518 if (old_rd) 519 call_rcu(&old_rd->rcu, free_rootdomain); 520 } 521 522 void sched_get_rd(struct root_domain *rd) 523 { 524 atomic_inc(&rd->refcount); 525 } 526 527 void sched_put_rd(struct root_domain *rd) 528 { 529 if (!atomic_dec_and_test(&rd->refcount)) 530 return; 531 532 call_rcu(&rd->rcu, free_rootdomain); 533 } 534 535 static int init_rootdomain(struct root_domain *rd) 536 { 537 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 538 goto out; 539 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 540 goto free_span; 541 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 542 goto free_online; 543 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 544 goto free_dlo_mask; 545 546 #ifdef HAVE_RT_PUSH_IPI 547 rd->rto_cpu = -1; 548 raw_spin_lock_init(&rd->rto_lock); 549 rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 550 #endif 551 552 rd->visit_cookie = 0; 553 init_dl_bw(&rd->dl_bw); 554 if (cpudl_init(&rd->cpudl) != 0) 555 goto free_rto_mask; 556 557 if (cpupri_init(&rd->cpupri) != 0) 558 goto free_cpudl; 559 return 0; 560 561 free_cpudl: 562 cpudl_cleanup(&rd->cpudl); 563 free_rto_mask: 564 free_cpumask_var(rd->rto_mask); 565 free_dlo_mask: 566 free_cpumask_var(rd->dlo_mask); 567 free_online: 568 free_cpumask_var(rd->online); 569 free_span: 570 free_cpumask_var(rd->span); 571 out: 572 return -ENOMEM; 573 } 574 575 /* 576 * By default the system creates a single root-domain with all CPUs as 577 * members (mimicking the global state we have today). 578 */ 579 struct root_domain def_root_domain; 580 581 void __init init_defrootdomain(void) 582 { 583 init_rootdomain(&def_root_domain); 584 585 atomic_set(&def_root_domain.refcount, 1); 586 } 587 588 static struct root_domain *alloc_rootdomain(void) 589 { 590 struct root_domain *rd; 591 592 rd = kzalloc_obj(*rd); 593 if (!rd) 594 return NULL; 595 596 if (init_rootdomain(rd) != 0) { 597 kfree(rd); 598 return NULL; 599 } 600 601 return rd; 602 } 603 604 static void free_sched_groups(struct sched_group *sg, int free_sgc) 605 { 606 struct sched_group *tmp, *first; 607 608 if (!sg) 609 return; 610 611 first = sg; 612 do { 613 tmp = sg->next; 614 615 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 616 kfree(sg->sgc); 617 618 if (atomic_dec_and_test(&sg->ref)) 619 kfree(sg); 620 sg = tmp; 621 } while (sg != first); 622 } 623 624 static void destroy_sched_domain(struct sched_domain *sd) 625 { 626 /* 627 * A normal sched domain may have multiple group references, an 628 * overlapping domain, having private groups, only one. Iterate, 629 * dropping group/capacity references, freeing where none remain. 630 */ 631 free_sched_groups(sd->groups, 1); 632 633 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 634 kfree(sd->shared); 635 kfree(sd); 636 } 637 638 static void destroy_sched_domains_rcu(struct rcu_head *rcu) 639 { 640 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 641 642 while (sd) { 643 struct sched_domain *parent = sd->parent; 644 destroy_sched_domain(sd); 645 sd = parent; 646 } 647 } 648 649 static void destroy_sched_domains(struct sched_domain *sd) 650 { 651 if (sd) 652 call_rcu(&sd->rcu, destroy_sched_domains_rcu); 653 } 654 655 /* 656 * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set 657 * (Last Level Cache Domain) for this allows us to avoid some pointer chasing 658 * select_idle_sibling(). 659 * 660 * Also keep a unique ID per domain (we use the first CPU number in the cpumask 661 * of the domain), this allows us to quickly tell if two CPUs are in the same 662 * cache domain, see cpus_share_cache(). 663 */ 664 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 665 DEFINE_PER_CPU(int, sd_llc_size); 666 DEFINE_PER_CPU(int, sd_llc_id); 667 DEFINE_PER_CPU(int, sd_share_id); 668 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 669 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 670 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 671 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 672 673 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 674 DEFINE_STATIC_KEY_FALSE(sched_cluster_active); 675 676 static void update_top_cache_domain(int cpu) 677 { 678 struct sched_domain_shared *sds = NULL; 679 struct sched_domain *sd; 680 int id = cpu; 681 int size = 1; 682 683 sd = highest_flag_domain(cpu, SD_SHARE_LLC); 684 if (sd) { 685 id = cpumask_first(sched_domain_span(sd)); 686 size = cpumask_weight(sched_domain_span(sd)); 687 688 /* If sd_llc exists, sd_llc_shared should exist too. */ 689 WARN_ON_ONCE(!sd->shared); 690 sds = sd->shared; 691 } 692 693 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 694 per_cpu(sd_llc_size, cpu) = size; 695 per_cpu(sd_llc_id, cpu) = id; 696 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 697 698 sd = lowest_flag_domain(cpu, SD_CLUSTER); 699 if (sd) 700 id = cpumask_first(sched_domain_span(sd)); 701 702 /* 703 * This assignment should be placed after the sd_llc_id as 704 * we want this id equals to cluster id on cluster machines 705 * but equals to LLC id on non-Cluster machines. 706 */ 707 per_cpu(sd_share_id, cpu) = id; 708 709 sd = lowest_flag_domain(cpu, SD_NUMA); 710 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 711 712 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 713 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 714 715 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 716 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 717 } 718 719 /* 720 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 721 * hold the hotplug lock. 722 */ 723 static void 724 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 725 { 726 struct rq *rq = cpu_rq(cpu); 727 struct sched_domain *tmp; 728 729 /* Remove the sched domains which do not contribute to scheduling. */ 730 for (tmp = sd; tmp; ) { 731 struct sched_domain *parent = tmp->parent; 732 if (!parent) 733 break; 734 735 if (sd_parent_degenerate(tmp, parent)) { 736 tmp->parent = parent->parent; 737 738 /* Pick reference to parent->shared. */ 739 if (parent->shared) { 740 WARN_ON_ONCE(tmp->shared); 741 tmp->shared = parent->shared; 742 parent->shared = NULL; 743 } 744 745 if (parent->parent) { 746 parent->parent->child = tmp; 747 parent->parent->groups->flags = tmp->flags; 748 } 749 750 /* 751 * Transfer SD_PREFER_SIBLING down in case of a 752 * degenerate parent; the spans match for this 753 * so the property transfers. 754 */ 755 if (parent->flags & SD_PREFER_SIBLING) 756 tmp->flags |= SD_PREFER_SIBLING; 757 destroy_sched_domain(parent); 758 } else 759 tmp = tmp->parent; 760 } 761 762 if (sd && sd_degenerate(sd)) { 763 tmp = sd; 764 sd = sd->parent; 765 destroy_sched_domain(tmp); 766 if (sd) { 767 struct sched_group *sg = sd->groups; 768 769 /* 770 * sched groups hold the flags of the child sched 771 * domain for convenience. Clear such flags since 772 * the child is being destroyed. 773 */ 774 do { 775 sg->flags = 0; 776 } while (sg != sd->groups); 777 778 sd->child = NULL; 779 } 780 } 781 782 sched_domain_debug(sd, cpu); 783 784 rq_attach_root(rq, rd); 785 tmp = rq->sd; 786 rcu_assign_pointer(rq->sd, sd); 787 dirty_sched_domain_sysctl(cpu); 788 destroy_sched_domains(tmp); 789 790 update_top_cache_domain(cpu); 791 } 792 793 struct s_data { 794 struct sched_domain_shared * __percpu *sds; 795 struct sched_domain * __percpu *sd; 796 struct root_domain *rd; 797 }; 798 799 enum s_alloc { 800 sa_rootdomain, 801 sa_sd, 802 sa_sd_shared, 803 sa_sd_storage, 804 sa_none, 805 }; 806 807 /* 808 * Return the canonical balance CPU for this group, this is the first CPU 809 * of this group that's also in the balance mask. 810 * 811 * The balance mask are all those CPUs that could actually end up at this 812 * group. See build_balance_mask(). 813 * 814 * Also see should_we_balance(). 815 */ 816 int group_balance_cpu(struct sched_group *sg) 817 { 818 return cpumask_first(group_balance_mask(sg)); 819 } 820 821 822 /* 823 * NUMA topology (first read the regular topology blurb below) 824 * 825 * Given a node-distance table, for example: 826 * 827 * node 0 1 2 3 828 * 0: 10 20 30 20 829 * 1: 20 10 20 30 830 * 2: 30 20 10 20 831 * 3: 20 30 20 10 832 * 833 * which represents a 4 node ring topology like: 834 * 835 * 0 ----- 1 836 * | | 837 * | | 838 * | | 839 * 3 ----- 2 840 * 841 * We want to construct domains and groups to represent this. The way we go 842 * about doing this is to build the domains on 'hops'. For each NUMA level we 843 * construct the mask of all nodes reachable in @level hops. 844 * 845 * For the above NUMA topology that gives 3 levels: 846 * 847 * NUMA-2 0-3 0-3 0-3 0-3 848 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 849 * 850 * NUMA-1 0-1,3 0-2 1-3 0,2-3 851 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 852 * 853 * NUMA-0 0 1 2 3 854 * 855 * 856 * As can be seen; things don't nicely line up as with the regular topology. 857 * When we iterate a domain in child domain chunks some nodes can be 858 * represented multiple times -- hence the "overlap" naming for this part of 859 * the topology. 860 * 861 * In order to minimize this overlap, we only build enough groups to cover the 862 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 863 * 864 * Because: 865 * 866 * - the first group of each domain is its child domain; this 867 * gets us the first 0-1,3 868 * - the only uncovered node is 2, who's child domain is 1-3. 869 * 870 * However, because of the overlap, computing a unique CPU for each group is 871 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 872 * groups include the CPUs of Node-0, while those CPUs would not in fact ever 873 * end up at those groups (they would end up in group: 0-1,3). 874 * 875 * To correct this we have to introduce the group balance mask. This mask 876 * will contain those CPUs in the group that can reach this group given the 877 * (child) domain tree. 878 * 879 * With this we can once again compute balance_cpu and sched_group_capacity 880 * relations. 881 * 882 * XXX include words on how balance_cpu is unique and therefore can be 883 * used for sched_group_capacity links. 884 * 885 * 886 * Another 'interesting' topology is: 887 * 888 * node 0 1 2 3 889 * 0: 10 20 20 30 890 * 1: 20 10 20 20 891 * 2: 20 20 10 20 892 * 3: 30 20 20 10 893 * 894 * Which looks a little like: 895 * 896 * 0 ----- 1 897 * | / | 898 * | / | 899 * | / | 900 * 2 ----- 3 901 * 902 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 903 * are not. 904 * 905 * This leads to a few particularly weird cases where the sched_domain's are 906 * not of the same number for each CPU. Consider: 907 * 908 * NUMA-2 0-3 0-3 909 * groups: {0-2},{1-3} {1-3},{0-2} 910 * 911 * NUMA-1 0-2 0-3 0-3 1-3 912 * 913 * NUMA-0 0 1 2 3 914 * 915 */ 916 917 918 /* 919 * Build the balance mask; it contains only those CPUs that can arrive at this 920 * group and should be considered to continue balancing. 921 * 922 * We do this during the group creation pass, therefore the group information 923 * isn't complete yet, however since each group represents a (child) domain we 924 * can fully construct this using the sched_domain bits (which are already 925 * complete). 926 */ 927 static void 928 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 929 { 930 const struct cpumask *sg_span = sched_group_span(sg); 931 struct sd_data *sdd = sd->private; 932 struct sched_domain *sibling; 933 int i; 934 935 cpumask_clear(mask); 936 937 for_each_cpu(i, sg_span) { 938 sibling = *per_cpu_ptr(sdd->sd, i); 939 940 /* 941 * Can happen in the asymmetric case, where these siblings are 942 * unused. The mask will not be empty because those CPUs that 943 * do have the top domain _should_ span the domain. 944 */ 945 if (!sibling->child) 946 continue; 947 948 /* If we would not end up here, we can't continue from here */ 949 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 950 continue; 951 952 cpumask_set_cpu(i, mask); 953 } 954 955 /* We must not have empty masks here */ 956 WARN_ON_ONCE(cpumask_empty(mask)); 957 } 958 959 /* 960 * XXX: This creates per-node group entries; since the load-balancer will 961 * immediately access remote memory to construct this group's load-balance 962 * statistics having the groups node local is of dubious benefit. 963 */ 964 static struct sched_group * 965 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 966 { 967 struct sched_group *sg; 968 struct cpumask *sg_span; 969 970 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 971 GFP_KERNEL, cpu_to_node(cpu)); 972 973 if (!sg) 974 return NULL; 975 976 sg_span = sched_group_span(sg); 977 if (sd->child) { 978 cpumask_copy(sg_span, sched_domain_span(sd->child)); 979 sg->flags = sd->child->flags; 980 } else { 981 cpumask_copy(sg_span, sched_domain_span(sd)); 982 } 983 984 atomic_inc(&sg->ref); 985 return sg; 986 } 987 988 static void init_overlap_sched_group(struct sched_domain *sd, 989 struct sched_group *sg) 990 { 991 struct cpumask *mask = sched_domains_tmpmask2; 992 struct sd_data *sdd = sd->private; 993 struct cpumask *sg_span; 994 int cpu; 995 996 build_balance_mask(sd, sg, mask); 997 cpu = cpumask_first(mask); 998 999 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1000 if (atomic_inc_return(&sg->sgc->ref) == 1) 1001 cpumask_copy(group_balance_mask(sg), mask); 1002 else 1003 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 1004 1005 /* 1006 * Initialize sgc->capacity such that even if we mess up the 1007 * domains and no possible iteration will get us here, we won't 1008 * die on a /0 trap. 1009 */ 1010 sg_span = sched_group_span(sg); 1011 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 1012 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1013 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 1014 } 1015 1016 static struct sched_domain * 1017 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 1018 { 1019 /* 1020 * The proper descendant would be the one whose child won't span out 1021 * of sd 1022 */ 1023 while (sibling->child && 1024 !cpumask_subset(sched_domain_span(sibling->child), 1025 sched_domain_span(sd))) 1026 sibling = sibling->child; 1027 1028 /* 1029 * As we are referencing sgc across different topology level, we need 1030 * to go down to skip those sched_domains which don't contribute to 1031 * scheduling because they will be degenerated in cpu_attach_domain 1032 */ 1033 while (sibling->child && 1034 cpumask_equal(sched_domain_span(sibling->child), 1035 sched_domain_span(sibling))) 1036 sibling = sibling->child; 1037 1038 return sibling; 1039 } 1040 1041 static int 1042 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1043 { 1044 struct sched_group *first = NULL, *last = NULL, *sg; 1045 const struct cpumask *span = sched_domain_span(sd); 1046 struct cpumask *covered = sched_domains_tmpmask; 1047 struct sd_data *sdd = sd->private; 1048 struct sched_domain *sibling; 1049 int i; 1050 1051 cpumask_clear(covered); 1052 1053 for_each_cpu_wrap(i, span, cpu) { 1054 struct cpumask *sg_span; 1055 1056 if (cpumask_test_cpu(i, covered)) 1057 continue; 1058 1059 sibling = *per_cpu_ptr(sdd->sd, i); 1060 1061 /* 1062 * Asymmetric node setups can result in situations where the 1063 * domain tree is of unequal depth, make sure to skip domains 1064 * that already cover the entire range. 1065 * 1066 * In that case build_sched_domains() will have terminated the 1067 * iteration early and our sibling sd spans will be empty. 1068 * Domains should always include the CPU they're built on, so 1069 * check that. 1070 */ 1071 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1072 continue; 1073 1074 /* 1075 * Usually we build sched_group by sibling's child sched_domain 1076 * But for machines whose NUMA diameter are 3 or above, we move 1077 * to build sched_group by sibling's proper descendant's child 1078 * domain because sibling's child sched_domain will span out of 1079 * the sched_domain being built as below. 1080 * 1081 * Smallest diameter=3 topology is: 1082 * 1083 * node 0 1 2 3 1084 * 0: 10 20 30 40 1085 * 1: 20 10 20 30 1086 * 2: 30 20 10 20 1087 * 3: 40 30 20 10 1088 * 1089 * 0 --- 1 --- 2 --- 3 1090 * 1091 * NUMA-3 0-3 N/A N/A 0-3 1092 * groups: {0-2},{1-3} {1-3},{0-2} 1093 * 1094 * NUMA-2 0-2 0-3 0-3 1-3 1095 * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1096 * 1097 * NUMA-1 0-1 0-2 1-3 2-3 1098 * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1099 * 1100 * NUMA-0 0 1 2 3 1101 * 1102 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1103 * group span isn't a subset of the domain span. 1104 */ 1105 if (sibling->child && 1106 !cpumask_subset(sched_domain_span(sibling->child), span)) 1107 sibling = find_descended_sibling(sd, sibling); 1108 1109 sg = build_group_from_child_sched_domain(sibling, cpu); 1110 if (!sg) 1111 goto fail; 1112 1113 sg_span = sched_group_span(sg); 1114 cpumask_or(covered, covered, sg_span); 1115 1116 init_overlap_sched_group(sibling, sg); 1117 1118 if (!first) 1119 first = sg; 1120 if (last) 1121 last->next = sg; 1122 last = sg; 1123 last->next = first; 1124 } 1125 sd->groups = first; 1126 1127 return 0; 1128 1129 fail: 1130 free_sched_groups(first, 0); 1131 1132 return -ENOMEM; 1133 } 1134 1135 1136 /* 1137 * Package topology (also see the load-balance blurb in fair.c) 1138 * 1139 * The scheduler builds a tree structure to represent a number of important 1140 * topology features. By default (default_topology[]) these include: 1141 * 1142 * - Simultaneous multithreading (SMT) 1143 * - Multi-Core Cache (MC) 1144 * - Package (PKG) 1145 * 1146 * Where the last one more or less denotes everything up to a NUMA node. 1147 * 1148 * The tree consists of 3 primary data structures: 1149 * 1150 * sched_domain -> sched_group -> sched_group_capacity 1151 * ^ ^ ^ ^ 1152 * `-' `-' 1153 * 1154 * The sched_domains are per-CPU and have a two way link (parent & child) and 1155 * denote the ever growing mask of CPUs belonging to that level of topology. 1156 * 1157 * Each sched_domain has a circular (double) linked list of sched_group's, each 1158 * denoting the domains of the level below (or individual CPUs in case of the 1159 * first domain level). The sched_group linked by a sched_domain includes the 1160 * CPU of that sched_domain [*]. 1161 * 1162 * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 1163 * 1164 * CPU 0 1 2 3 4 5 6 7 1165 * 1166 * PKG [ ] 1167 * MC [ ] [ ] 1168 * SMT [ ] [ ] [ ] [ ] 1169 * 1170 * - or - 1171 * 1172 * PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 1173 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 1174 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 1175 * 1176 * CPU 0 1 2 3 4 5 6 7 1177 * 1178 * One way to think about it is: sched_domain moves you up and down among these 1179 * topology levels, while sched_group moves you sideways through it, at child 1180 * domain granularity. 1181 * 1182 * sched_group_capacity ensures each unique sched_group has shared storage. 1183 * 1184 * There are two related construction problems, both require a CPU that 1185 * uniquely identify each group (for a given domain): 1186 * 1187 * - The first is the balance_cpu (see should_we_balance() and the 1188 * load-balance blurb in fair.c); for each group we only want 1 CPU to 1189 * continue balancing at a higher domain. 1190 * 1191 * - The second is the sched_group_capacity; we want all identical groups 1192 * to share a single sched_group_capacity. 1193 * 1194 * Since these topologies are exclusive by construction. That is, its 1195 * impossible for an SMT thread to belong to multiple cores, and cores to 1196 * be part of multiple caches. There is a very clear and unique location 1197 * for each CPU in the hierarchy. 1198 * 1199 * Therefore computing a unique CPU for each group is trivial (the iteration 1200 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 1201 * group), we can simply pick the first CPU in each group. 1202 * 1203 * 1204 * [*] in other words, the first group of each domain is its child domain. 1205 */ 1206 1207 static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1208 { 1209 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1210 struct sched_domain *child = sd->child; 1211 struct sched_group *sg; 1212 bool already_visited; 1213 1214 if (child) 1215 cpu = cpumask_first(sched_domain_span(child)); 1216 1217 sg = *per_cpu_ptr(sdd->sg, cpu); 1218 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1219 1220 /* Increase refcounts for claim_allocations: */ 1221 already_visited = atomic_inc_return(&sg->ref) > 1; 1222 /* sgc visits should follow a similar trend as sg */ 1223 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 1224 1225 /* If we have already visited that group, it's already initialized. */ 1226 if (already_visited) 1227 return sg; 1228 1229 if (child) { 1230 cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1231 cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 1232 sg->flags = child->flags; 1233 } else { 1234 cpumask_set_cpu(cpu, sched_group_span(sg)); 1235 cpumask_set_cpu(cpu, group_balance_mask(sg)); 1236 } 1237 1238 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 1239 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1240 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 1241 1242 return sg; 1243 } 1244 1245 /* 1246 * build_sched_groups will build a circular linked list of the groups 1247 * covered by the given span, will set each group's ->cpumask correctly, 1248 * and will initialize their ->sgc. 1249 * 1250 * Assumes the sched_domain tree is fully constructed 1251 */ 1252 static int 1253 build_sched_groups(struct sched_domain *sd, int cpu) 1254 { 1255 struct sched_group *first = NULL, *last = NULL; 1256 struct sd_data *sdd = sd->private; 1257 const struct cpumask *span = sched_domain_span(sd); 1258 struct cpumask *covered; 1259 int i; 1260 1261 lockdep_assert_held(&sched_domains_mutex); 1262 covered = sched_domains_tmpmask; 1263 1264 cpumask_clear(covered); 1265 1266 for_each_cpu_wrap(i, span, cpu) { 1267 struct sched_group *sg; 1268 1269 if (cpumask_test_cpu(i, covered)) 1270 continue; 1271 1272 sg = get_group(i, sdd); 1273 1274 cpumask_or(covered, covered, sched_group_span(sg)); 1275 1276 if (!first) 1277 first = sg; 1278 if (last) 1279 last->next = sg; 1280 last = sg; 1281 } 1282 last->next = first; 1283 sd->groups = first; 1284 1285 return 0; 1286 } 1287 1288 /* 1289 * Initialize sched groups cpu_capacity. 1290 * 1291 * cpu_capacity indicates the capacity of sched group, which is used while 1292 * distributing the load between different sched groups in a sched domain. 1293 * Typically cpu_capacity for all the groups in a sched domain will be same 1294 * unless there are asymmetries in the topology. If there are asymmetries, 1295 * group having more cpu_capacity will pickup more load compared to the 1296 * group having less cpu_capacity. 1297 */ 1298 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1299 { 1300 struct sched_group *sg = sd->groups; 1301 struct cpumask *mask = sched_domains_tmpmask2; 1302 1303 WARN_ON(!sg); 1304 1305 do { 1306 int cpu, cores = 0, max_cpu = -1; 1307 1308 sg->group_weight = cpumask_weight(sched_group_span(sg)); 1309 1310 cpumask_copy(mask, sched_group_span(sg)); 1311 for_each_cpu(cpu, mask) { 1312 cores++; 1313 #ifdef CONFIG_SCHED_SMT 1314 cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); 1315 #endif 1316 } 1317 sg->cores = cores; 1318 1319 if (!(sd->flags & SD_ASYM_PACKING)) 1320 goto next; 1321 1322 for_each_cpu(cpu, sched_group_span(sg)) { 1323 if (max_cpu < 0) 1324 max_cpu = cpu; 1325 else if (sched_asym_prefer(cpu, max_cpu)) 1326 max_cpu = cpu; 1327 } 1328 sg->asym_prefer_cpu = max_cpu; 1329 1330 next: 1331 sg = sg->next; 1332 } while (sg != sd->groups); 1333 1334 if (cpu != group_balance_cpu(sg)) 1335 return; 1336 1337 update_group_capacity(sd, cpu); 1338 } 1339 1340 /* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ 1341 void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) 1342 { 1343 int asym_prefer_cpu = cpu; 1344 struct sched_domain *sd; 1345 1346 guard(rcu)(); 1347 1348 for_each_domain(cpu, sd) { 1349 struct sched_group *sg; 1350 int group_cpu; 1351 1352 if (!(sd->flags & SD_ASYM_PACKING)) 1353 continue; 1354 1355 /* 1356 * Groups of overlapping domain are replicated per NUMA 1357 * node and will require updating "asym_prefer_cpu" on 1358 * each local copy. 1359 * 1360 * If you are hitting this warning, consider moving 1361 * "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu" 1362 * which is shared by all the overlapping groups. 1363 */ 1364 WARN_ON_ONCE(sd->flags & SD_NUMA); 1365 1366 sg = sd->groups; 1367 if (cpu != sg->asym_prefer_cpu) { 1368 /* 1369 * Since the parent is a superset of the current group, 1370 * if the cpu is not the "asym_prefer_cpu" at the 1371 * current level, it cannot be the preferred CPU at a 1372 * higher levels either. 1373 */ 1374 if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu)) 1375 return; 1376 1377 WRITE_ONCE(sg->asym_prefer_cpu, cpu); 1378 continue; 1379 } 1380 1381 /* Ranking has improved; CPU is still the preferred one. */ 1382 if (new_prio >= old_prio) 1383 continue; 1384 1385 for_each_cpu(group_cpu, sched_group_span(sg)) { 1386 if (sched_asym_prefer(group_cpu, asym_prefer_cpu)) 1387 asym_prefer_cpu = group_cpu; 1388 } 1389 1390 WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); 1391 } 1392 } 1393 1394 /* 1395 * Set of available CPUs grouped by their corresponding capacities 1396 * Each list entry contains a CPU mask reflecting CPUs that share the same 1397 * capacity. 1398 * The lifespan of data is unlimited. 1399 */ 1400 LIST_HEAD(asym_cap_list); 1401 1402 /* 1403 * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1404 * Provides sd_flags reflecting the asymmetry scope. 1405 */ 1406 static inline int 1407 asym_cpu_capacity_classify(const struct cpumask *sd_span, 1408 const struct cpumask *cpu_map) 1409 { 1410 struct asym_cap_data *entry; 1411 int count = 0, miss = 0; 1412 1413 /* 1414 * Count how many unique CPU capacities this domain spans across 1415 * (compare sched_domain CPUs mask with ones representing available 1416 * CPUs capacities). Take into account CPUs that might be offline: 1417 * skip those. 1418 */ 1419 list_for_each_entry(entry, &asym_cap_list, link) { 1420 if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1421 ++count; 1422 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1423 ++miss; 1424 } 1425 1426 WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1427 1428 /* No asymmetry detected */ 1429 if (count < 2) 1430 return 0; 1431 /* Some of the available CPU capacity values have not been detected */ 1432 if (miss) 1433 return SD_ASYM_CPUCAPACITY; 1434 1435 /* Full asymmetry */ 1436 return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1437 1438 } 1439 1440 static void free_asym_cap_entry(struct rcu_head *head) 1441 { 1442 struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu); 1443 kfree(entry); 1444 } 1445 1446 static inline void asym_cpu_capacity_update_data(int cpu) 1447 { 1448 unsigned long capacity = arch_scale_cpu_capacity(cpu); 1449 struct asym_cap_data *insert_entry = NULL; 1450 struct asym_cap_data *entry; 1451 1452 /* 1453 * Search if capacity already exits. If not, track which the entry 1454 * where we should insert to keep the list ordered descending. 1455 */ 1456 list_for_each_entry(entry, &asym_cap_list, link) { 1457 if (capacity == entry->capacity) 1458 goto done; 1459 else if (!insert_entry && capacity > entry->capacity) 1460 insert_entry = list_prev_entry(entry, link); 1461 } 1462 1463 entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1464 if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1465 return; 1466 entry->capacity = capacity; 1467 1468 /* If NULL then the new capacity is the smallest, add last. */ 1469 if (!insert_entry) 1470 list_add_tail_rcu(&entry->link, &asym_cap_list); 1471 else 1472 list_add_rcu(&entry->link, &insert_entry->link); 1473 done: 1474 __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1475 } 1476 1477 /* 1478 * Build-up/update list of CPUs grouped by their capacities 1479 * An update requires explicit request to rebuild sched domains 1480 * with state indicating CPU topology changes. 1481 */ 1482 static void asym_cpu_capacity_scan(void) 1483 { 1484 struct asym_cap_data *entry, *next; 1485 int cpu; 1486 1487 list_for_each_entry(entry, &asym_cap_list, link) 1488 cpumask_clear(cpu_capacity_span(entry)); 1489 1490 for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1491 asym_cpu_capacity_update_data(cpu); 1492 1493 list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1494 if (cpumask_empty(cpu_capacity_span(entry))) { 1495 list_del_rcu(&entry->link); 1496 call_rcu(&entry->rcu, free_asym_cap_entry); 1497 } 1498 } 1499 1500 /* 1501 * Only one capacity value has been detected i.e. this system is symmetric. 1502 * No need to keep this data around. 1503 */ 1504 if (list_is_singular(&asym_cap_list)) { 1505 entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 1506 list_del_rcu(&entry->link); 1507 call_rcu(&entry->rcu, free_asym_cap_entry); 1508 } 1509 } 1510 1511 /* 1512 * Initializers for schedule domains 1513 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1514 */ 1515 1516 static int default_relax_domain_level = -1; 1517 int sched_domain_level_max; 1518 1519 static int __init setup_relax_domain_level(char *str) 1520 { 1521 if (kstrtoint(str, 0, &default_relax_domain_level)) 1522 pr_warn("Unable to set relax_domain_level\n"); 1523 1524 return 1; 1525 } 1526 __setup("relax_domain_level=", setup_relax_domain_level); 1527 1528 static void set_domain_attribute(struct sched_domain *sd, 1529 struct sched_domain_attr *attr) 1530 { 1531 int request; 1532 1533 if (!attr || attr->relax_domain_level < 0) { 1534 if (default_relax_domain_level < 0) 1535 return; 1536 request = default_relax_domain_level; 1537 } else 1538 request = attr->relax_domain_level; 1539 1540 if (sd->level >= request) { 1541 /* Turn off idle balance on this domain: */ 1542 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1543 } 1544 } 1545 1546 static void __sdt_free(const struct cpumask *cpu_map); 1547 static int __sdt_alloc(const struct cpumask *cpu_map); 1548 1549 static void __sds_free(struct s_data *d, const struct cpumask *cpu_map); 1550 static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map); 1551 1552 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1553 const struct cpumask *cpu_map) 1554 { 1555 switch (what) { 1556 case sa_rootdomain: 1557 if (!atomic_read(&d->rd->refcount)) 1558 free_rootdomain(&d->rd->rcu); 1559 fallthrough; 1560 case sa_sd: 1561 free_percpu(d->sd); 1562 fallthrough; 1563 case sa_sd_shared: 1564 __sds_free(d, cpu_map); 1565 fallthrough; 1566 case sa_sd_storage: 1567 __sdt_free(cpu_map); 1568 fallthrough; 1569 case sa_none: 1570 break; 1571 } 1572 } 1573 1574 static enum s_alloc 1575 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1576 { 1577 memset(d, 0, sizeof(*d)); 1578 1579 if (__sdt_alloc(cpu_map)) 1580 return sa_sd_storage; 1581 if (__sds_alloc(d, cpu_map)) 1582 return sa_sd_shared; 1583 d->sd = alloc_percpu(struct sched_domain *); 1584 if (!d->sd) 1585 return sa_sd_shared; 1586 d->rd = alloc_rootdomain(); 1587 if (!d->rd) 1588 return sa_sd; 1589 1590 return sa_rootdomain; 1591 } 1592 1593 /* 1594 * NULL the sd_data elements we've used to build the sched_domain and 1595 * sched_group structure so that the subsequent __free_domain_allocs() 1596 * will not free the data we're using. 1597 */ 1598 static void claim_allocations(int cpu, struct s_data *d) 1599 { 1600 struct sched_domain *sd; 1601 1602 if (atomic_read(&(*per_cpu_ptr(d->sds, cpu))->ref)) 1603 *per_cpu_ptr(d->sds, cpu) = NULL; 1604 1605 for (sd = *per_cpu_ptr(d->sd, cpu); sd; sd = sd->parent) { 1606 struct sd_data *sdd = sd->private; 1607 1608 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1609 *per_cpu_ptr(sdd->sd, cpu) = NULL; 1610 1611 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1612 *per_cpu_ptr(sdd->sg, cpu) = NULL; 1613 1614 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1615 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1616 } 1617 } 1618 1619 #ifdef CONFIG_NUMA 1620 enum numa_topology_type sched_numa_topology_type; 1621 1622 /* 1623 * sched_domains_numa_distance is derived from sched_numa_node_distance 1624 * and provides a simplified view of NUMA distances used specifically 1625 * for building NUMA scheduling domains. 1626 */ 1627 static int sched_domains_numa_levels; 1628 static int sched_numa_node_levels; 1629 1630 int sched_max_numa_distance; 1631 static int *sched_domains_numa_distance; 1632 static int *sched_numa_node_distance; 1633 static struct cpumask ***sched_domains_numa_masks; 1634 #endif /* CONFIG_NUMA */ 1635 1636 /* 1637 * SD_flags allowed in topology descriptions. 1638 * 1639 * These flags are purely descriptive of the topology and do not prescribe 1640 * behaviour. Behaviour is artificial and mapped in the below sd_init() 1641 * function. For details, see include/linux/sched/sd_flags.h. 1642 * 1643 * SD_SHARE_CPUCAPACITY 1644 * SD_SHARE_LLC 1645 * SD_CLUSTER 1646 * SD_NUMA 1647 * 1648 * Odd one out, which beside describing the topology has a quirk also 1649 * prescribes the desired behaviour that goes along with it: 1650 * 1651 * SD_ASYM_PACKING - describes SMT quirks 1652 */ 1653 #define TOPOLOGY_SD_FLAGS \ 1654 (SD_SHARE_CPUCAPACITY | \ 1655 SD_CLUSTER | \ 1656 SD_SHARE_LLC | \ 1657 SD_NUMA | \ 1658 SD_ASYM_PACKING) 1659 1660 static struct sched_domain * 1661 sd_init(struct sched_domain_topology_level *tl, 1662 const struct cpumask *cpu_map, 1663 struct sched_domain *child, int cpu) 1664 { 1665 struct sd_data *sdd = &tl->data; 1666 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1667 int sd_id, sd_weight, sd_flags = 0; 1668 struct cpumask *sd_span; 1669 u64 now = sched_clock(); 1670 1671 sd_span = sched_domain_span(sd); 1672 cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu)); 1673 sd_weight = cpumask_weight(sd_span); 1674 sd_id = cpumask_first(sd_span); 1675 1676 if (tl->sd_flags) 1677 sd_flags = (*tl->sd_flags)(); 1678 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1679 "wrong sd_flags in topology description\n")) 1680 sd_flags &= TOPOLOGY_SD_FLAGS; 1681 sd_flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1682 1683 *sd = (struct sched_domain){ 1684 .min_interval = sd_weight, 1685 .max_interval = 2*sd_weight, 1686 .busy_factor = 16, 1687 .imbalance_pct = 117, 1688 1689 .cache_nice_tries = 0, 1690 1691 .flags = 1*SD_BALANCE_NEWIDLE 1692 | 1*SD_BALANCE_EXEC 1693 | 1*SD_BALANCE_FORK 1694 | 0*SD_BALANCE_WAKE 1695 | 1*SD_WAKE_AFFINE 1696 | 0*SD_SHARE_CPUCAPACITY 1697 | 0*SD_SHARE_LLC 1698 | 0*SD_SERIALIZE 1699 | 1*SD_PREFER_SIBLING 1700 | 0*SD_NUMA 1701 | sd_flags 1702 , 1703 1704 .last_balance = jiffies, 1705 .balance_interval = sd_weight, 1706 1707 /* 50% success rate */ 1708 .newidle_call = 512, 1709 .newidle_success = 256, 1710 .newidle_ratio = 512, 1711 .newidle_stamp = now, 1712 1713 .max_newidle_lb_cost = 0, 1714 .last_decay_max_lb_cost = jiffies, 1715 .child = child, 1716 .name = tl->name, 1717 }; 1718 1719 WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1720 (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1721 "CPU capacity asymmetry not supported on SMT\n"); 1722 1723 /* 1724 * Convert topological properties into behaviour. 1725 */ 1726 /* Don't attempt to spread across CPUs of different capacities. */ 1727 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 1728 sd->child->flags &= ~SD_PREFER_SIBLING; 1729 1730 if (sd->flags & SD_SHARE_CPUCAPACITY) { 1731 sd->imbalance_pct = 110; 1732 1733 } else if (sd->flags & SD_SHARE_LLC) { 1734 sd->imbalance_pct = 117; 1735 sd->cache_nice_tries = 1; 1736 1737 #ifdef CONFIG_NUMA 1738 } else if (sd->flags & SD_NUMA) { 1739 sd->cache_nice_tries = 2; 1740 1741 sd->flags &= ~SD_PREFER_SIBLING; 1742 sd->flags |= SD_SERIALIZE; 1743 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1744 sd->flags &= ~(SD_BALANCE_EXEC | 1745 SD_BALANCE_FORK | 1746 SD_WAKE_AFFINE); 1747 } 1748 1749 #endif /* CONFIG_NUMA */ 1750 } else { 1751 sd->cache_nice_tries = 1; 1752 } 1753 1754 sd->private = sdd; 1755 1756 return sd; 1757 } 1758 1759 #ifdef CONFIG_SCHED_SMT 1760 int cpu_smt_flags(void) 1761 { 1762 return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC; 1763 } 1764 1765 const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu) 1766 { 1767 return cpu_smt_mask(cpu); 1768 } 1769 #endif 1770 1771 #ifdef CONFIG_SCHED_CLUSTER 1772 int cpu_cluster_flags(void) 1773 { 1774 return SD_CLUSTER | SD_SHARE_LLC; 1775 } 1776 1777 const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu) 1778 { 1779 return cpu_clustergroup_mask(cpu); 1780 } 1781 #endif 1782 1783 #ifdef CONFIG_SCHED_MC 1784 int cpu_core_flags(void) 1785 { 1786 return SD_SHARE_LLC; 1787 } 1788 1789 const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu) 1790 { 1791 return cpu_coregroup_mask(cpu); 1792 } 1793 #endif 1794 1795 const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu) 1796 { 1797 return cpu_node_mask(cpu); 1798 } 1799 1800 /* 1801 * Topology list, bottom-up. 1802 */ 1803 static struct sched_domain_topology_level default_topology[] = { 1804 #ifdef CONFIG_SCHED_SMT 1805 SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT), 1806 #endif 1807 1808 #ifdef CONFIG_SCHED_CLUSTER 1809 SDTL_INIT(tl_cls_mask, cpu_cluster_flags, CLS), 1810 #endif 1811 1812 #ifdef CONFIG_SCHED_MC 1813 SDTL_INIT(tl_mc_mask, cpu_core_flags, MC), 1814 #endif 1815 SDTL_INIT(tl_pkg_mask, NULL, PKG), 1816 { NULL, }, 1817 }; 1818 1819 static struct sched_domain_topology_level *sched_domain_topology = 1820 default_topology; 1821 static struct sched_domain_topology_level *sched_domain_topology_saved; 1822 1823 #define for_each_sd_topology(tl) \ 1824 for (tl = sched_domain_topology; tl->mask; tl++) 1825 1826 void __init set_sched_topology(struct sched_domain_topology_level *tl) 1827 { 1828 if (WARN_ON_ONCE(sched_smp_initialized)) 1829 return; 1830 1831 sched_domain_topology = tl; 1832 sched_domain_topology_saved = NULL; 1833 } 1834 1835 #ifdef CONFIG_NUMA 1836 static int cpu_numa_flags(void) 1837 { 1838 return SD_NUMA; 1839 } 1840 1841 static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu) 1842 { 1843 return sched_domains_numa_masks[tl->numa_level][cpu_to_node(cpu)]; 1844 } 1845 1846 static void sched_numa_warn(const char *str) 1847 { 1848 static int done = false; 1849 int i,j; 1850 1851 if (done) 1852 return; 1853 1854 done = true; 1855 1856 printk(KERN_WARNING "ERROR: %s\n\n", str); 1857 1858 for (i = 0; i < nr_node_ids; i++) { 1859 printk(KERN_WARNING " "); 1860 for (j = 0; j < nr_node_ids; j++) { 1861 if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 1862 printk(KERN_CONT "(%02d) ", node_distance(i,j)); 1863 else 1864 printk(KERN_CONT " %02d ", node_distance(i,j)); 1865 } 1866 printk(KERN_CONT "\n"); 1867 } 1868 printk(KERN_WARNING "\n"); 1869 } 1870 1871 bool find_numa_distance(int distance) 1872 { 1873 bool found = false; 1874 int i, *distances; 1875 1876 if (distance == node_distance(0, 0)) 1877 return true; 1878 1879 rcu_read_lock(); 1880 distances = rcu_dereference(sched_numa_node_distance); 1881 if (!distances) 1882 goto unlock; 1883 for (i = 0; i < sched_numa_node_levels; i++) { 1884 if (distances[i] == distance) { 1885 found = true; 1886 break; 1887 } 1888 } 1889 unlock: 1890 rcu_read_unlock(); 1891 1892 return found; 1893 } 1894 1895 #define for_each_cpu_node_but(n, nbut) \ 1896 for_each_node_state(n, N_CPU) \ 1897 if (n == nbut) \ 1898 continue; \ 1899 else 1900 1901 /* 1902 * A system can have three types of NUMA topology: 1903 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1904 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1905 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1906 * 1907 * The difference between a glueless mesh topology and a backplane 1908 * topology lies in whether communication between not directly 1909 * connected nodes goes through intermediary nodes (where programs 1910 * could run), or through backplane controllers. This affects 1911 * placement of programs. 1912 * 1913 * The type of topology can be discerned with the following tests: 1914 * - If the maximum distance between any nodes is 1 hop, the system 1915 * is directly connected. 1916 * - If for two nodes A and B, located N > 1 hops away from each other, 1917 * there is an intermediary node C, which is < N hops away from both 1918 * nodes A and B, the system is a glueless mesh. 1919 */ 1920 static void init_numa_topology_type(int offline_node) 1921 { 1922 int a, b, c, n; 1923 1924 n = sched_max_numa_distance; 1925 1926 if (sched_domains_numa_levels <= 2) { 1927 sched_numa_topology_type = NUMA_DIRECT; 1928 return; 1929 } 1930 1931 for_each_cpu_node_but(a, offline_node) { 1932 for_each_cpu_node_but(b, offline_node) { 1933 /* Find two nodes furthest removed from each other. */ 1934 if (node_distance(a, b) < n) 1935 continue; 1936 1937 /* Is there an intermediary node between a and b? */ 1938 for_each_cpu_node_but(c, offline_node) { 1939 if (node_distance(a, c) < n && 1940 node_distance(b, c) < n) { 1941 sched_numa_topology_type = 1942 NUMA_GLUELESS_MESH; 1943 return; 1944 } 1945 } 1946 1947 sched_numa_topology_type = NUMA_BACKPLANE; 1948 return; 1949 } 1950 } 1951 1952 pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 1953 sched_numa_topology_type = NUMA_DIRECT; 1954 } 1955 1956 1957 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1958 1959 /* 1960 * An architecture could modify its NUMA distance, to change 1961 * grouping of NUMA nodes and number of NUMA levels when creating 1962 * NUMA level sched domains. 1963 * 1964 * A NUMA level is created for each unique 1965 * arch_sched_node_distance. 1966 */ 1967 static int numa_node_dist(int i, int j) 1968 { 1969 return node_distance(i, j); 1970 } 1971 1972 int arch_sched_node_distance(int from, int to) 1973 __weak __alias(numa_node_dist); 1974 1975 static bool modified_sched_node_distance(void) 1976 { 1977 return numa_node_dist != arch_sched_node_distance; 1978 } 1979 1980 static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), 1981 int **dist, int *levels) 1982 { 1983 unsigned long *distance_map __free(bitmap) = NULL; 1984 int nr_levels = 0; 1985 int i, j; 1986 int *distances; 1987 1988 /* 1989 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the 1990 * unique distances in the node_distance() table. 1991 */ 1992 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1993 if (!distance_map) 1994 return -ENOMEM; 1995 1996 bitmap_zero(distance_map, NR_DISTANCE_VALUES); 1997 for_each_cpu_node_but(i, offline_node) { 1998 for_each_cpu_node_but(j, offline_node) { 1999 int distance = n_dist(i, j); 2000 2001 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 2002 sched_numa_warn("Invalid distance value range"); 2003 return -EINVAL; 2004 } 2005 2006 bitmap_set(distance_map, distance, 1); 2007 } 2008 } 2009 /* 2010 * We can now figure out how many unique distance values there are and 2011 * allocate memory accordingly. 2012 */ 2013 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 2014 2015 distances = kzalloc_objs(int, nr_levels); 2016 if (!distances) 2017 return -ENOMEM; 2018 2019 for (i = 0, j = 0; i < nr_levels; i++, j++) { 2020 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 2021 distances[i] = j; 2022 } 2023 *dist = distances; 2024 *levels = nr_levels; 2025 2026 return 0; 2027 } 2028 2029 void sched_init_numa(int offline_node) 2030 { 2031 struct sched_domain_topology_level *tl; 2032 int nr_levels, nr_node_levels; 2033 int i, j; 2034 int *distances, *domain_distances; 2035 struct cpumask ***masks; 2036 2037 /* Record the NUMA distances from SLIT table */ 2038 if (sched_record_numa_dist(offline_node, numa_node_dist, &distances, 2039 &nr_node_levels)) 2040 return; 2041 2042 /* Record modified NUMA distances for building sched domains */ 2043 if (modified_sched_node_distance()) { 2044 if (sched_record_numa_dist(offline_node, arch_sched_node_distance, 2045 &domain_distances, &nr_levels)) { 2046 kfree(distances); 2047 return; 2048 } 2049 } else { 2050 domain_distances = distances; 2051 nr_levels = nr_node_levels; 2052 } 2053 rcu_assign_pointer(sched_numa_node_distance, distances); 2054 WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]); 2055 WRITE_ONCE(sched_numa_node_levels, nr_node_levels); 2056 2057 /* 2058 * 'nr_levels' contains the number of unique distances 2059 * 2060 * The sched_domains_numa_distance[] array includes the actual distance 2061 * numbers. 2062 */ 2063 2064 /* 2065 * Here, we should temporarily reset sched_domains_numa_levels to 0. 2066 * If it fails to allocate memory for array sched_domains_numa_masks[][], 2067 * the array will contain less then 'nr_levels' members. This could be 2068 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 2069 * in other functions. 2070 * 2071 * We reset it to 'nr_levels' at the end of this function. 2072 */ 2073 rcu_assign_pointer(sched_domains_numa_distance, domain_distances); 2074 2075 sched_domains_numa_levels = 0; 2076 2077 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 2078 if (!masks) 2079 return; 2080 2081 /* 2082 * Now for each level, construct a mask per node which contains all 2083 * CPUs of nodes that are that many hops away from us. 2084 */ 2085 for (i = 0; i < nr_levels; i++) { 2086 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 2087 if (!masks[i]) 2088 return; 2089 2090 for_each_cpu_node_but(j, offline_node) { 2091 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 2092 int k; 2093 2094 if (!mask) 2095 return; 2096 2097 masks[i][j] = mask; 2098 2099 for_each_cpu_node_but(k, offline_node) { 2100 if (sched_debug() && 2101 (arch_sched_node_distance(j, k) != 2102 arch_sched_node_distance(k, j))) 2103 sched_numa_warn("Node-distance not symmetric"); 2104 2105 if (arch_sched_node_distance(j, k) > 2106 sched_domains_numa_distance[i]) 2107 continue; 2108 2109 cpumask_or(mask, mask, cpumask_of_node(k)); 2110 } 2111 } 2112 } 2113 rcu_assign_pointer(sched_domains_numa_masks, masks); 2114 2115 /* Compute default topology size */ 2116 for (i = 0; sched_domain_topology[i].mask; i++); 2117 2118 tl = kzalloc((i + nr_levels + 1) * 2119 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 2120 if (!tl) 2121 return; 2122 2123 /* 2124 * Copy the default topology bits.. 2125 */ 2126 for (i = 0; sched_domain_topology[i].mask; i++) 2127 tl[i] = sched_domain_topology[i]; 2128 2129 /* 2130 * Add the NUMA identity distance, aka single NODE. 2131 */ 2132 tl[i++] = SDTL_INIT(sd_numa_mask, NULL, NODE); 2133 2134 /* 2135 * .. and append 'j' levels of NUMA goodness. 2136 */ 2137 for (j = 1; j < nr_levels; i++, j++) { 2138 tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA); 2139 tl[i].numa_level = j; 2140 } 2141 2142 sched_domain_topology_saved = sched_domain_topology; 2143 sched_domain_topology = tl; 2144 2145 sched_domains_numa_levels = nr_levels; 2146 2147 init_numa_topology_type(offline_node); 2148 } 2149 2150 2151 static void sched_reset_numa(void) 2152 { 2153 int nr_levels, *distances, *dom_distances = NULL; 2154 struct cpumask ***masks; 2155 2156 nr_levels = sched_domains_numa_levels; 2157 sched_numa_node_levels = 0; 2158 sched_domains_numa_levels = 0; 2159 sched_max_numa_distance = 0; 2160 sched_numa_topology_type = NUMA_DIRECT; 2161 distances = sched_numa_node_distance; 2162 if (sched_numa_node_distance != sched_domains_numa_distance) 2163 dom_distances = sched_domains_numa_distance; 2164 rcu_assign_pointer(sched_numa_node_distance, NULL); 2165 rcu_assign_pointer(sched_domains_numa_distance, NULL); 2166 masks = sched_domains_numa_masks; 2167 rcu_assign_pointer(sched_domains_numa_masks, NULL); 2168 if (distances || masks) { 2169 int i, j; 2170 2171 synchronize_rcu(); 2172 kfree(distances); 2173 kfree(dom_distances); 2174 for (i = 0; i < nr_levels && masks; i++) { 2175 if (!masks[i]) 2176 continue; 2177 for_each_node(j) 2178 kfree(masks[i][j]); 2179 kfree(masks[i]); 2180 } 2181 kfree(masks); 2182 } 2183 if (sched_domain_topology_saved) { 2184 kfree(sched_domain_topology); 2185 sched_domain_topology = sched_domain_topology_saved; 2186 sched_domain_topology_saved = NULL; 2187 } 2188 } 2189 2190 /* 2191 * Call with hotplug lock held 2192 */ 2193 void sched_update_numa(int cpu, bool online) 2194 { 2195 int node; 2196 2197 node = cpu_to_node(cpu); 2198 /* 2199 * Scheduler NUMA topology is updated when the first CPU of a 2200 * node is onlined or the last CPU of a node is offlined. 2201 */ 2202 if (cpumask_weight(cpumask_of_node(node)) != 1) 2203 return; 2204 2205 sched_reset_numa(); 2206 sched_init_numa(online ? NUMA_NO_NODE : node); 2207 } 2208 2209 void sched_domains_numa_masks_set(unsigned int cpu) 2210 { 2211 int node = cpu_to_node(cpu); 2212 int i, j; 2213 2214 for (i = 0; i < sched_domains_numa_levels; i++) { 2215 for (j = 0; j < nr_node_ids; j++) { 2216 if (!node_state(j, N_CPU)) 2217 continue; 2218 2219 /* Set ourselves in the remote node's masks */ 2220 if (arch_sched_node_distance(j, node) <= 2221 sched_domains_numa_distance[i]) 2222 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2223 } 2224 } 2225 } 2226 2227 void sched_domains_numa_masks_clear(unsigned int cpu) 2228 { 2229 int i, j; 2230 2231 for (i = 0; i < sched_domains_numa_levels; i++) { 2232 for (j = 0; j < nr_node_ids; j++) { 2233 if (sched_domains_numa_masks[i][j]) 2234 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2235 } 2236 } 2237 } 2238 2239 /* 2240 * sched_numa_find_closest() - given the NUMA topology, find the cpu 2241 * closest to @cpu from @cpumask. 2242 * cpumask: cpumask to find a cpu from 2243 * cpu: cpu to be close to 2244 * 2245 * returns: cpu, or nr_cpu_ids when nothing found. 2246 */ 2247 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2248 { 2249 int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 2250 struct cpumask ***masks; 2251 2252 rcu_read_lock(); 2253 masks = rcu_dereference(sched_domains_numa_masks); 2254 if (!masks) 2255 goto unlock; 2256 for (i = 0; i < sched_domains_numa_levels; i++) { 2257 if (!masks[i][j]) 2258 break; 2259 cpu = cpumask_any_and_distribute(cpus, masks[i][j]); 2260 if (cpu < nr_cpu_ids) { 2261 found = cpu; 2262 break; 2263 } 2264 } 2265 unlock: 2266 rcu_read_unlock(); 2267 2268 return found; 2269 } 2270 2271 struct __cmp_key { 2272 const struct cpumask *cpus; 2273 struct cpumask ***masks; 2274 int node; 2275 int cpu; 2276 int w; 2277 }; 2278 2279 static int hop_cmp(const void *a, const void *b) 2280 { 2281 struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; 2282 struct __cmp_key *k = (struct __cmp_key *)a; 2283 2284 if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) 2285 return 1; 2286 2287 if (b == k->masks) { 2288 k->w = 0; 2289 return 0; 2290 } 2291 2292 prev_hop = *((struct cpumask ***)b - 1); 2293 k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); 2294 if (k->w <= k->cpu) 2295 return 0; 2296 2297 return -1; 2298 } 2299 2300 /** 2301 * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU 2302 * from @cpus to @cpu, taking into account distance 2303 * from a given @node. 2304 * @cpus: cpumask to find a cpu from 2305 * @cpu: CPU to start searching 2306 * @node: NUMA node to order CPUs by distance 2307 * 2308 * Return: cpu, or nr_cpu_ids when nothing found. 2309 */ 2310 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) 2311 { 2312 struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; 2313 struct cpumask ***hop_masks; 2314 int hop, ret = nr_cpu_ids; 2315 2316 if (node == NUMA_NO_NODE) 2317 return cpumask_nth_and(cpu, cpus, cpu_online_mask); 2318 2319 rcu_read_lock(); 2320 2321 /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ 2322 node = numa_nearest_node(node, N_CPU); 2323 k.node = node; 2324 2325 k.masks = rcu_dereference(sched_domains_numa_masks); 2326 if (!k.masks) 2327 goto unlock; 2328 2329 hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); 2330 if (!hop_masks) 2331 goto unlock; 2332 hop = hop_masks - k.masks; 2333 2334 ret = hop ? 2335 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : 2336 cpumask_nth_and(cpu, cpus, k.masks[0][node]); 2337 unlock: 2338 rcu_read_unlock(); 2339 return ret; 2340 } 2341 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); 2342 2343 /** 2344 * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from 2345 * @node 2346 * @node: The node to count hops from. 2347 * @hops: Include CPUs up to that many hops away. 0 means local node. 2348 * 2349 * Return: On success, a pointer to a cpumask of CPUs at most @hops away from 2350 * @node, an error value otherwise. 2351 * 2352 * Requires rcu_lock to be held. Returned cpumask is only valid within that 2353 * read-side section, copy it if required beyond that. 2354 * 2355 * Note that not all hops are equal in distance; see sched_init_numa() for how 2356 * distances and masks are handled. 2357 * Also note that this is a reflection of sched_domains_numa_masks, which may change 2358 * during the lifetime of the system (offline nodes are taken out of the masks). 2359 */ 2360 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) 2361 { 2362 struct cpumask ***masks; 2363 2364 if (node >= nr_node_ids || hops >= sched_domains_numa_levels) 2365 return ERR_PTR(-EINVAL); 2366 2367 masks = rcu_dereference(sched_domains_numa_masks); 2368 if (!masks) 2369 return ERR_PTR(-EBUSY); 2370 2371 return masks[hops][node]; 2372 } 2373 EXPORT_SYMBOL_GPL(sched_numa_hop_mask); 2374 2375 #endif /* CONFIG_NUMA */ 2376 2377 static int __sdt_alloc(const struct cpumask *cpu_map) 2378 { 2379 struct sched_domain_topology_level *tl; 2380 int j; 2381 2382 for_each_sd_topology(tl) { 2383 struct sd_data *sdd = &tl->data; 2384 2385 sdd->sd = alloc_percpu(struct sched_domain *); 2386 if (!sdd->sd) 2387 return -ENOMEM; 2388 2389 sdd->sg = alloc_percpu(struct sched_group *); 2390 if (!sdd->sg) 2391 return -ENOMEM; 2392 2393 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2394 if (!sdd->sgc) 2395 return -ENOMEM; 2396 2397 for_each_cpu(j, cpu_map) { 2398 struct sched_domain *sd; 2399 struct sched_group *sg; 2400 struct sched_group_capacity *sgc; 2401 2402 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2403 GFP_KERNEL, cpu_to_node(j)); 2404 if (!sd) 2405 return -ENOMEM; 2406 2407 *per_cpu_ptr(sdd->sd, j) = sd; 2408 2409 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2410 GFP_KERNEL, cpu_to_node(j)); 2411 if (!sg) 2412 return -ENOMEM; 2413 2414 sg->next = sg; 2415 2416 *per_cpu_ptr(sdd->sg, j) = sg; 2417 2418 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2419 GFP_KERNEL, cpu_to_node(j)); 2420 if (!sgc) 2421 return -ENOMEM; 2422 2423 sgc->id = j; 2424 2425 *per_cpu_ptr(sdd->sgc, j) = sgc; 2426 } 2427 } 2428 2429 return 0; 2430 } 2431 2432 static void __sdt_free(const struct cpumask *cpu_map) 2433 { 2434 struct sched_domain_topology_level *tl; 2435 int j; 2436 2437 for_each_sd_topology(tl) { 2438 struct sd_data *sdd = &tl->data; 2439 2440 for_each_cpu(j, cpu_map) { 2441 struct sched_domain *sd; 2442 2443 if (sdd->sd) { 2444 sd = *per_cpu_ptr(sdd->sd, j); 2445 if (sd && (sd->flags & SD_NUMA)) 2446 free_sched_groups(sd->groups, 0); 2447 kfree(*per_cpu_ptr(sdd->sd, j)); 2448 } 2449 2450 if (sdd->sg) 2451 kfree(*per_cpu_ptr(sdd->sg, j)); 2452 if (sdd->sgc) 2453 kfree(*per_cpu_ptr(sdd->sgc, j)); 2454 } 2455 free_percpu(sdd->sd); 2456 sdd->sd = NULL; 2457 free_percpu(sdd->sg); 2458 sdd->sg = NULL; 2459 free_percpu(sdd->sgc); 2460 sdd->sgc = NULL; 2461 } 2462 } 2463 2464 static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map) 2465 { 2466 int j; 2467 2468 d->sds = alloc_percpu(struct sched_domain_shared *); 2469 if (!d->sds) 2470 return -ENOMEM; 2471 2472 for_each_cpu(j, cpu_map) { 2473 struct sched_domain_shared *sds; 2474 2475 sds = kzalloc_node(sizeof(struct sched_domain_shared), 2476 GFP_KERNEL, cpu_to_node(j)); 2477 if (!sds) 2478 return -ENOMEM; 2479 2480 *per_cpu_ptr(d->sds, j) = sds; 2481 } 2482 2483 return 0; 2484 } 2485 2486 static void __sds_free(struct s_data *d, const struct cpumask *cpu_map) 2487 { 2488 int j; 2489 2490 if (!d->sds) 2491 return; 2492 2493 for_each_cpu(j, cpu_map) 2494 kfree(*per_cpu_ptr(d->sds, j)); 2495 2496 free_percpu(d->sds); 2497 d->sds = NULL; 2498 } 2499 2500 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2501 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2502 struct sched_domain *child, int cpu) 2503 { 2504 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2505 2506 if (child) { 2507 sd->level = child->level + 1; 2508 sched_domain_level_max = max(sched_domain_level_max, sd->level); 2509 child->parent = sd; 2510 2511 if (!cpumask_subset(sched_domain_span(child), 2512 sched_domain_span(sd))) { 2513 pr_err("BUG: arch topology borken\n"); 2514 pr_err(" the %s domain not a subset of the %s domain\n", 2515 child->name, sd->name); 2516 /* Fixup, ensure @sd has at least @child CPUs. */ 2517 cpumask_or(sched_domain_span(sd), 2518 sched_domain_span(sd), 2519 sched_domain_span(child)); 2520 } 2521 2522 } 2523 set_domain_attribute(sd, attr); 2524 2525 return sd; 2526 } 2527 2528 /* 2529 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2530 * any two given CPUs on non-NUMA topology levels. 2531 */ 2532 static bool topology_span_sane(const struct cpumask *cpu_map) 2533 { 2534 struct sched_domain_topology_level *tl; 2535 struct cpumask *covered, *id_seen; 2536 int cpu; 2537 2538 lockdep_assert_held(&sched_domains_mutex); 2539 covered = sched_domains_tmpmask; 2540 id_seen = sched_domains_tmpmask2; 2541 2542 for_each_sd_topology(tl) { 2543 int tl_common_flags = 0; 2544 2545 if (tl->sd_flags) 2546 tl_common_flags = (*tl->sd_flags)(); 2547 2548 /* NUMA levels are allowed to overlap */ 2549 if (tl_common_flags & SD_NUMA) 2550 continue; 2551 2552 cpumask_clear(covered); 2553 cpumask_clear(id_seen); 2554 2555 /* 2556 * Non-NUMA levels cannot partially overlap - they must be either 2557 * completely equal or completely disjoint. Otherwise we can end up 2558 * breaking the sched_group lists - i.e. a later get_group() pass 2559 * breaks the linking done for an earlier span. 2560 */ 2561 for_each_cpu(cpu, cpu_map) { 2562 const struct cpumask *tl_cpu_mask = tl->mask(tl, cpu); 2563 int id; 2564 2565 /* lowest bit set in this mask is used as a unique id */ 2566 id = cpumask_first(tl_cpu_mask); 2567 2568 if (cpumask_test_cpu(id, id_seen)) { 2569 /* First CPU has already been seen, ensure identical spans */ 2570 if (!cpumask_equal(tl->mask(tl, id), tl_cpu_mask)) 2571 return false; 2572 } else { 2573 /* First CPU hasn't been seen before, ensure it's a completely new span */ 2574 if (cpumask_intersects(tl_cpu_mask, covered)) 2575 return false; 2576 2577 cpumask_or(covered, covered, tl_cpu_mask); 2578 cpumask_set_cpu(id, id_seen); 2579 } 2580 } 2581 } 2582 return true; 2583 } 2584 2585 /* 2586 * Calculate an allowed NUMA imbalance such that LLCs do not get 2587 * imbalanced. 2588 */ 2589 static void adjust_numa_imbalance(struct sched_domain *sd_llc) 2590 { 2591 struct sched_domain *parent; 2592 unsigned int imb_span = 1; 2593 unsigned int imb = 0; 2594 unsigned int nr_llcs; 2595 2596 WARN_ON(!(sd_llc->flags & SD_SHARE_LLC)); 2597 WARN_ON(!sd_llc->parent); 2598 2599 /* 2600 * For a single LLC per node, allow an 2601 * imbalance up to 12.5% of the node. This is 2602 * arbitrary cutoff based two factors -- SMT and 2603 * memory channels. For SMT-2, the intent is to 2604 * avoid premature sharing of HT resources but 2605 * SMT-4 or SMT-8 *may* benefit from a different 2606 * cutoff. For memory channels, this is a very 2607 * rough estimate of how many channels may be 2608 * active and is based on recent CPUs with 2609 * many cores. 2610 * 2611 * For multiple LLCs, allow an imbalance 2612 * until multiple tasks would share an LLC 2613 * on one node while LLCs on another node 2614 * remain idle. This assumes that there are 2615 * enough logical CPUs per LLC to avoid SMT 2616 * factors and that there is a correlation 2617 * between LLCs and memory channels. 2618 */ 2619 nr_llcs = sd_llc->parent->span_weight / sd_llc->span_weight; 2620 if (nr_llcs == 1) 2621 imb = sd_llc->parent->span_weight >> 3; 2622 else 2623 imb = nr_llcs; 2624 2625 imb = max(1U, imb); 2626 sd_llc->parent->imb_numa_nr = imb; 2627 2628 /* 2629 * Set span based on the first NUMA domain. 2630 * 2631 * NUMA systems always add a NODE domain before 2632 * iterating the NUMA domains. Since this is before 2633 * degeneration, start from sd_llc's parent's 2634 * parent which is the lowest an SD_NUMA domain can 2635 * be relative to sd_llc. 2636 */ 2637 parent = sd_llc->parent->parent; 2638 while (parent && !(parent->flags & SD_NUMA)) 2639 parent = parent->parent; 2640 2641 imb_span = parent ? parent->span_weight : sd_llc->parent->span_weight; 2642 2643 /* Update the upper remainder of the topology */ 2644 parent = sd_llc->parent; 2645 while (parent) { 2646 int factor = max(1U, (parent->span_weight / imb_span)); 2647 2648 parent->imb_numa_nr = imb * factor; 2649 parent = parent->parent; 2650 } 2651 } 2652 2653 /* 2654 * Build sched domains for a given set of CPUs and attach the sched domains 2655 * to the individual CPUs 2656 */ 2657 static int 2658 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2659 { 2660 enum s_alloc alloc_state = sa_none; 2661 struct sched_domain *sd; 2662 struct s_data d; 2663 struct rq *rq = NULL; 2664 int i, ret = -ENOMEM; 2665 bool has_asym = false; 2666 bool has_cluster = false; 2667 2668 if (WARN_ON(cpumask_empty(cpu_map))) 2669 goto error; 2670 2671 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2672 if (alloc_state != sa_rootdomain) 2673 goto error; 2674 2675 /* Set up domains for CPUs specified by the cpu_map: */ 2676 for_each_cpu(i, cpu_map) { 2677 struct sched_domain_topology_level *tl; 2678 2679 sd = NULL; 2680 for_each_sd_topology(tl) { 2681 2682 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2683 2684 has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 2685 2686 if (tl == sched_domain_topology) 2687 *per_cpu_ptr(d.sd, i) = sd; 2688 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2689 break; 2690 } 2691 } 2692 2693 if (WARN_ON(!topology_span_sane(cpu_map))) 2694 goto error; 2695 2696 /* Build the groups for the domains */ 2697 for_each_cpu(i, cpu_map) { 2698 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2699 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2700 if (sd->flags & SD_NUMA) { 2701 if (build_overlap_sched_groups(sd, i)) 2702 goto error; 2703 } else { 2704 if (build_sched_groups(sd, i)) 2705 goto error; 2706 } 2707 } 2708 } 2709 2710 for_each_cpu(i, cpu_map) { 2711 sd = *per_cpu_ptr(d.sd, i); 2712 if (!sd) 2713 continue; 2714 2715 /* First, find the topmost SD_SHARE_LLC domain */ 2716 while (sd->parent && (sd->parent->flags & SD_SHARE_LLC)) 2717 sd = sd->parent; 2718 2719 if (sd->flags & SD_SHARE_LLC) { 2720 int sd_id = cpumask_first(sched_domain_span(sd)); 2721 2722 sd->shared = *per_cpu_ptr(d.sds, sd_id); 2723 atomic_set(&sd->shared->nr_busy_cpus, sd->span_weight); 2724 atomic_inc(&sd->shared->ref); 2725 2726 /* 2727 * In presence of higher domains, adjust the 2728 * NUMA imbalance stats for the hierarchy. 2729 */ 2730 if (IS_ENABLED(CONFIG_NUMA) && sd->parent) 2731 adjust_numa_imbalance(sd); 2732 } 2733 } 2734 2735 /* Calculate CPU capacity for physical packages and nodes */ 2736 for (i = nr_cpumask_bits-1; i >= 0; i--) { 2737 if (!cpumask_test_cpu(i, cpu_map)) 2738 continue; 2739 2740 claim_allocations(i, &d); 2741 2742 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) 2743 init_sched_groups_capacity(i, sd); 2744 } 2745 2746 /* Attach the domains */ 2747 rcu_read_lock(); 2748 for_each_cpu(i, cpu_map) { 2749 rq = cpu_rq(i); 2750 sd = *per_cpu_ptr(d.sd, i); 2751 2752 cpu_attach_domain(sd, d.rd, i); 2753 2754 if (lowest_flag_domain(i, SD_CLUSTER)) 2755 has_cluster = true; 2756 } 2757 rcu_read_unlock(); 2758 2759 if (has_asym) 2760 static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2761 2762 if (has_cluster) 2763 static_branch_inc_cpuslocked(&sched_cluster_active); 2764 2765 if (rq && sched_debug_verbose) 2766 pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map)); 2767 2768 ret = 0; 2769 error: 2770 __free_domain_allocs(&d, alloc_state, cpu_map); 2771 2772 return ret; 2773 } 2774 2775 /* Current sched domains: */ 2776 static cpumask_var_t *doms_cur; 2777 2778 /* Number of sched domains in 'doms_cur': */ 2779 static int ndoms_cur; 2780 2781 /* Attributes of custom domains in 'doms_cur' */ 2782 static struct sched_domain_attr *dattr_cur; 2783 2784 /* 2785 * Special case: If a kmalloc() of a doms_cur partition (array of 2786 * cpumask) fails, then fallback to a single sched domain, 2787 * as determined by the single cpumask fallback_doms. 2788 */ 2789 static cpumask_var_t fallback_doms; 2790 2791 /* 2792 * arch_update_cpu_topology lets virtualized architectures update the 2793 * CPU core maps. It is supposed to return 1 if the topology changed 2794 * or 0 if it stayed the same. 2795 */ 2796 int __weak arch_update_cpu_topology(void) 2797 { 2798 return 0; 2799 } 2800 2801 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2802 { 2803 int i; 2804 cpumask_var_t *doms; 2805 2806 doms = kmalloc_objs(*doms, ndoms); 2807 if (!doms) 2808 return NULL; 2809 for (i = 0; i < ndoms; i++) { 2810 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2811 free_sched_domains(doms, i); 2812 return NULL; 2813 } 2814 } 2815 return doms; 2816 } 2817 2818 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2819 { 2820 unsigned int i; 2821 for (i = 0; i < ndoms; i++) 2822 free_cpumask_var(doms[i]); 2823 kfree(doms); 2824 } 2825 2826 /* 2827 * Set up scheduler domains and groups. For now this just excludes isolated 2828 * CPUs, but could be used to exclude other special cases in the future. 2829 */ 2830 int __init sched_init_domains(const struct cpumask *cpu_map) 2831 { 2832 int err; 2833 2834 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 2835 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 2836 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 2837 2838 arch_update_cpu_topology(); 2839 asym_cpu_capacity_scan(); 2840 ndoms_cur = 1; 2841 doms_cur = alloc_sched_domains(ndoms_cur); 2842 if (!doms_cur) 2843 doms_cur = &fallback_doms; 2844 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2845 err = build_sched_domains(doms_cur[0], NULL); 2846 2847 return err; 2848 } 2849 2850 /* 2851 * Detach sched domains from a group of CPUs specified in cpu_map 2852 * These CPUs will now be attached to the NULL domain 2853 */ 2854 static void detach_destroy_domains(const struct cpumask *cpu_map) 2855 { 2856 unsigned int cpu = cpumask_any(cpu_map); 2857 int i; 2858 2859 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2860 static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2861 2862 if (static_branch_unlikely(&sched_cluster_active)) 2863 static_branch_dec_cpuslocked(&sched_cluster_active); 2864 2865 rcu_read_lock(); 2866 for_each_cpu(i, cpu_map) 2867 cpu_attach_domain(NULL, &def_root_domain, i); 2868 rcu_read_unlock(); 2869 } 2870 2871 /* handle null as "default" */ 2872 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2873 struct sched_domain_attr *new, int idx_new) 2874 { 2875 struct sched_domain_attr tmp; 2876 2877 /* Fast path: */ 2878 if (!new && !cur) 2879 return 1; 2880 2881 tmp = SD_ATTR_INIT; 2882 2883 return !memcmp(cur ? (cur + idx_cur) : &tmp, 2884 new ? (new + idx_new) : &tmp, 2885 sizeof(struct sched_domain_attr)); 2886 } 2887 2888 /* 2889 * Partition sched domains as specified by the 'ndoms_new' 2890 * cpumasks in the array doms_new[] of cpumasks. This compares 2891 * doms_new[] to the current sched domain partitioning, doms_cur[]. 2892 * It destroys each deleted domain and builds each new domain. 2893 * 2894 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2895 * The masks don't intersect (don't overlap.) We should setup one 2896 * sched domain for each mask. CPUs not in any of the cpumasks will 2897 * not be load balanced. If the same cpumask appears both in the 2898 * current 'doms_cur' domains and in the new 'doms_new', we can leave 2899 * it as it is. 2900 * 2901 * The passed in 'doms_new' should be allocated using 2902 * alloc_sched_domains. This routine takes ownership of it and will 2903 * free_sched_domains it when done with it. If the caller failed the 2904 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2905 * and partition_sched_domains() will fallback to the single partition 2906 * 'fallback_doms', it also forces the domains to be rebuilt. 2907 * 2908 * If doms_new == NULL it will be replaced with cpu_online_mask. 2909 * ndoms_new == 0 is a special case for destroying existing domains, 2910 * and it will not create the default domain. 2911 * 2912 * Call with hotplug lock and sched_domains_mutex held 2913 */ 2914 static void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2915 struct sched_domain_attr *dattr_new) 2916 { 2917 bool __maybe_unused has_eas = false; 2918 int i, j, n; 2919 int new_topology; 2920 2921 lockdep_assert_held(&sched_domains_mutex); 2922 2923 /* Let the architecture update CPU core mappings: */ 2924 new_topology = arch_update_cpu_topology(); 2925 /* Trigger rebuilding CPU capacity asymmetry data */ 2926 if (new_topology) 2927 asym_cpu_capacity_scan(); 2928 2929 if (!doms_new) { 2930 WARN_ON_ONCE(dattr_new); 2931 n = 0; 2932 doms_new = alloc_sched_domains(1); 2933 if (doms_new) { 2934 n = 1; 2935 cpumask_and(doms_new[0], cpu_active_mask, 2936 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2937 } 2938 } else { 2939 n = ndoms_new; 2940 } 2941 2942 /* Destroy deleted domains: */ 2943 for (i = 0; i < ndoms_cur; i++) { 2944 for (j = 0; j < n && !new_topology; j++) { 2945 if (cpumask_equal(doms_cur[i], doms_new[j]) && 2946 dattrs_equal(dattr_cur, i, dattr_new, j)) 2947 goto match1; 2948 } 2949 /* No match - a current sched domain not in new doms_new[] */ 2950 detach_destroy_domains(doms_cur[i]); 2951 match1: 2952 ; 2953 } 2954 2955 n = ndoms_cur; 2956 if (!doms_new) { 2957 n = 0; 2958 doms_new = &fallback_doms; 2959 cpumask_and(doms_new[0], cpu_active_mask, 2960 housekeeping_cpumask(HK_TYPE_DOMAIN)); 2961 } 2962 2963 /* Build new domains: */ 2964 for (i = 0; i < ndoms_new; i++) { 2965 for (j = 0; j < n && !new_topology; j++) { 2966 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2967 dattrs_equal(dattr_new, i, dattr_cur, j)) 2968 goto match2; 2969 } 2970 /* No match - add a new doms_new */ 2971 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2972 match2: 2973 ; 2974 } 2975 2976 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2977 /* Build perf domains: */ 2978 for (i = 0; i < ndoms_new; i++) { 2979 for (j = 0; j < n && !sched_energy_update; j++) { 2980 if (cpumask_equal(doms_new[i], doms_cur[j]) && 2981 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 2982 has_eas = true; 2983 goto match3; 2984 } 2985 } 2986 /* No match - add perf domains for a new rd */ 2987 has_eas |= build_perf_domains(doms_new[i]); 2988 match3: 2989 ; 2990 } 2991 sched_energy_set(has_eas); 2992 #endif 2993 2994 /* Remember the new sched domains: */ 2995 if (doms_cur != &fallback_doms) 2996 free_sched_domains(doms_cur, ndoms_cur); 2997 2998 kfree(dattr_cur); 2999 doms_cur = doms_new; 3000 dattr_cur = dattr_new; 3001 ndoms_cur = ndoms_new; 3002 3003 update_sched_domain_debugfs(); 3004 dl_rebuild_rd_accounting(); 3005 } 3006 3007 /* 3008 * Call with hotplug lock held 3009 */ 3010 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 3011 struct sched_domain_attr *dattr_new) 3012 { 3013 sched_domains_mutex_lock(); 3014 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 3015 sched_domains_mutex_unlock(); 3016 } 3017