1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f2cb1360SIngo Molnar /* 3f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 4f2cb1360SIngo Molnar */ 5f2cb1360SIngo Molnar 6cd7f5535SYury Norov #include <linux/bsearch.h> 7cd7f5535SYury Norov 8f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 9f2cb1360SIngo Molnar 10f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 11ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask; 12ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2; 13f2cb1360SIngo Molnar 14f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 15f2cb1360SIngo Molnar 16f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 17f2cb1360SIngo Molnar { 189406415fSPeter Zijlstra sched_debug_verbose = true; 19f2cb1360SIngo Molnar 20f2cb1360SIngo Molnar return 0; 21f2cb1360SIngo Molnar } 229406415fSPeter Zijlstra early_param("sched_verbose", sched_debug_setup); 23f2cb1360SIngo Molnar 24f2cb1360SIngo Molnar static inline bool sched_debug(void) 25f2cb1360SIngo Molnar { 269406415fSPeter Zijlstra return sched_debug_verbose; 27f2cb1360SIngo Molnar } 28f2cb1360SIngo Molnar 29848785dfSValentin Schneider #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 30848785dfSValentin Schneider const struct sd_flag_debug sd_flag_debug[] = { 31848785dfSValentin Schneider #include <linux/sched/sd_flags.h> 32848785dfSValentin Schneider }; 33848785dfSValentin Schneider #undef SD_FLAG 34848785dfSValentin Schneider 35f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 36f2cb1360SIngo Molnar struct cpumask *groupmask) 37f2cb1360SIngo Molnar { 38f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 3965c5e253SValentin Schneider unsigned long flags = sd->flags; 4065c5e253SValentin Schneider unsigned int idx; 41f2cb1360SIngo Molnar 42f2cb1360SIngo Molnar cpumask_clear(groupmask); 43f2cb1360SIngo Molnar 44005f874dSPeter Zijlstra printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 45005f874dSPeter Zijlstra printk(KERN_CONT "span=%*pbl level=%s\n", 46f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 47f2cb1360SIngo Molnar 48f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 4997fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 50f2cb1360SIngo Molnar } 516cd0c583SYi Wang if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 5297fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 53f2cb1360SIngo Molnar } 54f2cb1360SIngo Molnar 5565c5e253SValentin Schneider for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 5665c5e253SValentin Schneider unsigned int flag = BIT(idx); 5765c5e253SValentin Schneider unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 5865c5e253SValentin Schneider 5965c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 6065c5e253SValentin Schneider !(sd->child->flags & flag)) 6165c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 6265c5e253SValentin Schneider sd_flag_debug[idx].name); 6365c5e253SValentin Schneider 6465c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 6565c5e253SValentin Schneider !(sd->parent->flags & flag)) 6665c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 6765c5e253SValentin Schneider sd_flag_debug[idx].name); 6865c5e253SValentin Schneider } 6965c5e253SValentin Schneider 70f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 71f2cb1360SIngo Molnar do { 72f2cb1360SIngo Molnar if (!group) { 73f2cb1360SIngo Molnar printk("\n"); 74f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 75f2cb1360SIngo Molnar break; 76f2cb1360SIngo Molnar } 77f2cb1360SIngo Molnar 781087ad4eSYury Norov if (cpumask_empty(sched_group_span(group))) { 79f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 80f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 81f2cb1360SIngo Molnar break; 82f2cb1360SIngo Molnar } 83f2cb1360SIngo Molnar 84f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 85ae4df9d6SPeter Zijlstra cpumask_intersects(groupmask, sched_group_span(group))) { 86f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 87f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 88f2cb1360SIngo Molnar break; 89f2cb1360SIngo Molnar } 90f2cb1360SIngo Molnar 91ae4df9d6SPeter Zijlstra cpumask_or(groupmask, groupmask, sched_group_span(group)); 92f2cb1360SIngo Molnar 93005f874dSPeter Zijlstra printk(KERN_CONT " %d:{ span=%*pbl", 94005f874dSPeter Zijlstra group->sgc->id, 95ae4df9d6SPeter Zijlstra cpumask_pr_args(sched_group_span(group))); 96b0151c25SPeter Zijlstra 97af218122SPeter Zijlstra if ((sd->flags & SD_OVERLAP) && 98ae4df9d6SPeter Zijlstra !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 99005f874dSPeter Zijlstra printk(KERN_CONT " mask=%*pbl", 100e5c14b1fSPeter Zijlstra cpumask_pr_args(group_balance_mask(group))); 101b0151c25SPeter Zijlstra } 102b0151c25SPeter Zijlstra 103005f874dSPeter Zijlstra if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 104005f874dSPeter Zijlstra printk(KERN_CONT " cap=%lu", group->sgc->capacity); 105f2cb1360SIngo Molnar 106a420b063SPeter Zijlstra if (group == sd->groups && sd->child && 107a420b063SPeter Zijlstra !cpumask_equal(sched_domain_span(sd->child), 108ae4df9d6SPeter Zijlstra sched_group_span(group))) { 109a420b063SPeter Zijlstra printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 110a420b063SPeter Zijlstra } 111a420b063SPeter Zijlstra 112005f874dSPeter Zijlstra printk(KERN_CONT " }"); 113005f874dSPeter Zijlstra 114f2cb1360SIngo Molnar group = group->next; 115b0151c25SPeter Zijlstra 116b0151c25SPeter Zijlstra if (group != sd->groups) 117b0151c25SPeter Zijlstra printk(KERN_CONT ","); 118b0151c25SPeter Zijlstra 119f2cb1360SIngo Molnar } while (group != sd->groups); 120f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 121f2cb1360SIngo Molnar 122f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 123f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 124f2cb1360SIngo Molnar 125f2cb1360SIngo Molnar if (sd->parent && 126f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 12797fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 128f2cb1360SIngo Molnar return 0; 129f2cb1360SIngo Molnar } 130f2cb1360SIngo Molnar 131f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 132f2cb1360SIngo Molnar { 133f2cb1360SIngo Molnar int level = 0; 134f2cb1360SIngo Molnar 1359406415fSPeter Zijlstra if (!sched_debug_verbose) 136f2cb1360SIngo Molnar return; 137f2cb1360SIngo Molnar 138f2cb1360SIngo Molnar if (!sd) { 139f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 140f2cb1360SIngo Molnar return; 141f2cb1360SIngo Molnar } 142f2cb1360SIngo Molnar 143005f874dSPeter Zijlstra printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 144f2cb1360SIngo Molnar 145f2cb1360SIngo Molnar for (;;) { 146f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 147f2cb1360SIngo Molnar break; 148f2cb1360SIngo Molnar level++; 149f2cb1360SIngo Molnar sd = sd->parent; 150f2cb1360SIngo Molnar if (!sd) 151f2cb1360SIngo Molnar break; 152f2cb1360SIngo Molnar } 153f2cb1360SIngo Molnar } 154f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 155f2cb1360SIngo Molnar 1569406415fSPeter Zijlstra # define sched_debug_verbose 0 157f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 158f2cb1360SIngo Molnar static inline bool sched_debug(void) 159f2cb1360SIngo Molnar { 160f2cb1360SIngo Molnar return false; 161f2cb1360SIngo Molnar } 162f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 163f2cb1360SIngo Molnar 1644fc472f1SValentin Schneider /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 1654fc472f1SValentin Schneider #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 1664fc472f1SValentin Schneider static const unsigned int SD_DEGENERATE_GROUPS_MASK = 1674fc472f1SValentin Schneider #include <linux/sched/sd_flags.h> 1684fc472f1SValentin Schneider 0; 1694fc472f1SValentin Schneider #undef SD_FLAG 1704fc472f1SValentin Schneider 171f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 172f2cb1360SIngo Molnar { 173f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 174f2cb1360SIngo Molnar return 1; 175f2cb1360SIngo Molnar 176f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 1776f349818SValentin Schneider if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 1786f349818SValentin Schneider (sd->groups != sd->groups->next)) 179f2cb1360SIngo Molnar return 0; 180f2cb1360SIngo Molnar 181f2cb1360SIngo Molnar /* Following flags don't use groups */ 182f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 183f2cb1360SIngo Molnar return 0; 184f2cb1360SIngo Molnar 185f2cb1360SIngo Molnar return 1; 186f2cb1360SIngo Molnar } 187f2cb1360SIngo Molnar 188f2cb1360SIngo Molnar static int 189f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 190f2cb1360SIngo Molnar { 191f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 192f2cb1360SIngo Molnar 193f2cb1360SIngo Molnar if (sd_degenerate(parent)) 194f2cb1360SIngo Molnar return 1; 195f2cb1360SIngo Molnar 196f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 197f2cb1360SIngo Molnar return 0; 198f2cb1360SIngo Molnar 199f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 200ab65afb0SValentin Schneider if (parent->groups == parent->groups->next) 2013a6712c7SValentin Schneider pflags &= ~SD_DEGENERATE_GROUPS_MASK; 202ab65afb0SValentin Schneider 203f2cb1360SIngo Molnar if (~cflags & pflags) 204f2cb1360SIngo Molnar return 0; 205f2cb1360SIngo Molnar 206f2cb1360SIngo Molnar return 1; 207f2cb1360SIngo Molnar } 208f2cb1360SIngo Molnar 209531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 210f8a696f2SPeter Zijlstra DEFINE_STATIC_KEY_FALSE(sched_energy_present); 2118a044141SZhen Ni static unsigned int sysctl_sched_energy_aware = 1; 212d91e15a2STom Rix static DEFINE_MUTEX(sched_energy_mutex); 213d91e15a2STom Rix static bool sched_energy_update; 214531b5c9fSQuentin Perret 2158f833c82SShrikanth Hegde static bool sched_is_eas_possible(const struct cpumask *cpu_mask) 2168f833c82SShrikanth Hegde { 2178f833c82SShrikanth Hegde bool any_asym_capacity = false; 2188f833c82SShrikanth Hegde struct cpufreq_policy *policy; 2198f833c82SShrikanth Hegde struct cpufreq_governor *gov; 2208f833c82SShrikanth Hegde int i; 2218f833c82SShrikanth Hegde 2228f833c82SShrikanth Hegde /* EAS is enabled for asymmetric CPU capacity topologies. */ 2238f833c82SShrikanth Hegde for_each_cpu(i, cpu_mask) { 2248f833c82SShrikanth Hegde if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) { 2258f833c82SShrikanth Hegde any_asym_capacity = true; 2268f833c82SShrikanth Hegde break; 2278f833c82SShrikanth Hegde } 2288f833c82SShrikanth Hegde } 2298f833c82SShrikanth Hegde if (!any_asym_capacity) { 2308f833c82SShrikanth Hegde if (sched_debug()) { 2318f833c82SShrikanth Hegde pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n", 2328f833c82SShrikanth Hegde cpumask_pr_args(cpu_mask)); 2338f833c82SShrikanth Hegde } 2348f833c82SShrikanth Hegde return false; 2358f833c82SShrikanth Hegde } 2368f833c82SShrikanth Hegde 2378f833c82SShrikanth Hegde /* EAS definitely does *not* handle SMT */ 2388f833c82SShrikanth Hegde if (sched_smt_active()) { 2398f833c82SShrikanth Hegde if (sched_debug()) { 2408f833c82SShrikanth Hegde pr_info("rd %*pbl: Checking EAS, SMT is not supported\n", 2418f833c82SShrikanth Hegde cpumask_pr_args(cpu_mask)); 2428f833c82SShrikanth Hegde } 2438f833c82SShrikanth Hegde return false; 2448f833c82SShrikanth Hegde } 2458f833c82SShrikanth Hegde 2468f833c82SShrikanth Hegde if (!arch_scale_freq_invariant()) { 2478f833c82SShrikanth Hegde if (sched_debug()) { 2488f833c82SShrikanth Hegde pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported", 2498f833c82SShrikanth Hegde cpumask_pr_args(cpu_mask)); 2508f833c82SShrikanth Hegde } 2518f833c82SShrikanth Hegde return false; 2528f833c82SShrikanth Hegde } 2538f833c82SShrikanth Hegde 2548f833c82SShrikanth Hegde /* Do not attempt EAS if schedutil is not being used. */ 2558f833c82SShrikanth Hegde for_each_cpu(i, cpu_mask) { 2568f833c82SShrikanth Hegde policy = cpufreq_cpu_get(i); 2578f833c82SShrikanth Hegde if (!policy) { 2588f833c82SShrikanth Hegde if (sched_debug()) { 2598f833c82SShrikanth Hegde pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d", 2608f833c82SShrikanth Hegde cpumask_pr_args(cpu_mask), i); 2618f833c82SShrikanth Hegde } 2628f833c82SShrikanth Hegde return false; 2638f833c82SShrikanth Hegde } 2648f833c82SShrikanth Hegde gov = policy->governor; 2658f833c82SShrikanth Hegde cpufreq_cpu_put(policy); 2668f833c82SShrikanth Hegde if (gov != &schedutil_gov) { 2678f833c82SShrikanth Hegde if (sched_debug()) { 2688f833c82SShrikanth Hegde pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n", 2698f833c82SShrikanth Hegde cpumask_pr_args(cpu_mask)); 2708f833c82SShrikanth Hegde } 2718f833c82SShrikanth Hegde return false; 2728f833c82SShrikanth Hegde } 2738f833c82SShrikanth Hegde } 2748f833c82SShrikanth Hegde 2758f833c82SShrikanth Hegde return true; 2768f833c82SShrikanth Hegde } 2778f833c82SShrikanth Hegde 27831f6a8c0SIonela Voinescu void rebuild_sched_domains_energy(void) 27931f6a8c0SIonela Voinescu { 28031f6a8c0SIonela Voinescu mutex_lock(&sched_energy_mutex); 28131f6a8c0SIonela Voinescu sched_energy_update = true; 28231f6a8c0SIonela Voinescu rebuild_sched_domains(); 28331f6a8c0SIonela Voinescu sched_energy_update = false; 28431f6a8c0SIonela Voinescu mutex_unlock(&sched_energy_mutex); 28531f6a8c0SIonela Voinescu } 28631f6a8c0SIonela Voinescu 2878d5d0cfbSQuentin Perret #ifdef CONFIG_PROC_SYSCTL 288*78eb4ea2SJoel Granados static int sched_energy_aware_handler(const struct ctl_table *table, int write, 28932927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos) 2908d5d0cfbSQuentin Perret { 2918d5d0cfbSQuentin Perret int ret, state; 2928d5d0cfbSQuentin Perret 2938d5d0cfbSQuentin Perret if (write && !capable(CAP_SYS_ADMIN)) 2948d5d0cfbSQuentin Perret return -EPERM; 2958d5d0cfbSQuentin Perret 2968f833c82SShrikanth Hegde if (!sched_is_eas_possible(cpu_active_mask)) { 2978f833c82SShrikanth Hegde if (write) { 2988f833c82SShrikanth Hegde return -EOPNOTSUPP; 2998f833c82SShrikanth Hegde } else { 3008f833c82SShrikanth Hegde *lenp = 0; 3018f833c82SShrikanth Hegde return 0; 3028f833c82SShrikanth Hegde } 3038f833c82SShrikanth Hegde } 3048f833c82SShrikanth Hegde 3058d5d0cfbSQuentin Perret ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 3068d5d0cfbSQuentin Perret if (!ret && write) { 3078d5d0cfbSQuentin Perret state = static_branch_unlikely(&sched_energy_present); 30831f6a8c0SIonela Voinescu if (state != sysctl_sched_energy_aware) 30931f6a8c0SIonela Voinescu rebuild_sched_domains_energy(); 3108d5d0cfbSQuentin Perret } 3118d5d0cfbSQuentin Perret 3128d5d0cfbSQuentin Perret return ret; 3138d5d0cfbSQuentin Perret } 3148a044141SZhen Ni 3158a044141SZhen Ni static struct ctl_table sched_energy_aware_sysctls[] = { 3168a044141SZhen Ni { 3178a044141SZhen Ni .procname = "sched_energy_aware", 3188a044141SZhen Ni .data = &sysctl_sched_energy_aware, 3198a044141SZhen Ni .maxlen = sizeof(unsigned int), 3208a044141SZhen Ni .mode = 0644, 3218a044141SZhen Ni .proc_handler = sched_energy_aware_handler, 3228a044141SZhen Ni .extra1 = SYSCTL_ZERO, 3238a044141SZhen Ni .extra2 = SYSCTL_ONE, 3248a044141SZhen Ni }, 3258a044141SZhen Ni }; 3268a044141SZhen Ni 3278a044141SZhen Ni static int __init sched_energy_aware_sysctl_init(void) 3288a044141SZhen Ni { 3298a044141SZhen Ni register_sysctl_init("kernel", sched_energy_aware_sysctls); 3308a044141SZhen Ni return 0; 3318a044141SZhen Ni } 3328a044141SZhen Ni 3338a044141SZhen Ni late_initcall(sched_energy_aware_sysctl_init); 3348d5d0cfbSQuentin Perret #endif 3358d5d0cfbSQuentin Perret 3366aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) 3376aa140faSQuentin Perret { 3386aa140faSQuentin Perret struct perf_domain *tmp; 3396aa140faSQuentin Perret 3406aa140faSQuentin Perret while (pd) { 3416aa140faSQuentin Perret tmp = pd->next; 3426aa140faSQuentin Perret kfree(pd); 3436aa140faSQuentin Perret pd = tmp; 3446aa140faSQuentin Perret } 3456aa140faSQuentin Perret } 3466aa140faSQuentin Perret 3476aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 3486aa140faSQuentin Perret { 3496aa140faSQuentin Perret while (pd) { 3506aa140faSQuentin Perret if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 3516aa140faSQuentin Perret return pd; 3526aa140faSQuentin Perret pd = pd->next; 3536aa140faSQuentin Perret } 3546aa140faSQuentin Perret 3556aa140faSQuentin Perret return NULL; 3566aa140faSQuentin Perret } 3576aa140faSQuentin Perret 3586aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu) 3596aa140faSQuentin Perret { 3606aa140faSQuentin Perret struct em_perf_domain *obj = em_cpu_get(cpu); 3616aa140faSQuentin Perret struct perf_domain *pd; 3626aa140faSQuentin Perret 3636aa140faSQuentin Perret if (!obj) { 3646aa140faSQuentin Perret if (sched_debug()) 3656aa140faSQuentin Perret pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 3666aa140faSQuentin Perret return NULL; 3676aa140faSQuentin Perret } 3686aa140faSQuentin Perret 3696aa140faSQuentin Perret pd = kzalloc(sizeof(*pd), GFP_KERNEL); 3706aa140faSQuentin Perret if (!pd) 3716aa140faSQuentin Perret return NULL; 3726aa140faSQuentin Perret pd->em_pd = obj; 3736aa140faSQuentin Perret 3746aa140faSQuentin Perret return pd; 3756aa140faSQuentin Perret } 3766aa140faSQuentin Perret 3776aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map, 3786aa140faSQuentin Perret struct perf_domain *pd) 3796aa140faSQuentin Perret { 3806aa140faSQuentin Perret if (!sched_debug() || !pd) 3816aa140faSQuentin Perret return; 3826aa140faSQuentin Perret 3836aa140faSQuentin Perret printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 3846aa140faSQuentin Perret 3856aa140faSQuentin Perret while (pd) { 386521b512bSLukasz Luba printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 3876aa140faSQuentin Perret cpumask_first(perf_domain_span(pd)), 3886aa140faSQuentin Perret cpumask_pr_args(perf_domain_span(pd)), 389521b512bSLukasz Luba em_pd_nr_perf_states(pd->em_pd)); 3906aa140faSQuentin Perret pd = pd->next; 3916aa140faSQuentin Perret } 3926aa140faSQuentin Perret 3936aa140faSQuentin Perret printk(KERN_CONT "\n"); 3946aa140faSQuentin Perret } 3956aa140faSQuentin Perret 3966aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp) 3976aa140faSQuentin Perret { 3986aa140faSQuentin Perret struct perf_domain *pd; 3996aa140faSQuentin Perret 4006aa140faSQuentin Perret pd = container_of(rp, struct perf_domain, rcu); 4016aa140faSQuentin Perret free_pd(pd); 4026aa140faSQuentin Perret } 4036aa140faSQuentin Perret 4041f74de87SQuentin Perret static void sched_energy_set(bool has_eas) 4051f74de87SQuentin Perret { 4061f74de87SQuentin Perret if (!has_eas && static_branch_unlikely(&sched_energy_present)) { 4071f74de87SQuentin Perret if (sched_debug()) 4081f74de87SQuentin Perret pr_info("%s: stopping EAS\n", __func__); 4091f74de87SQuentin Perret static_branch_disable_cpuslocked(&sched_energy_present); 4101f74de87SQuentin Perret } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) { 4111f74de87SQuentin Perret if (sched_debug()) 4121f74de87SQuentin Perret pr_info("%s: starting EAS\n", __func__); 4131f74de87SQuentin Perret static_branch_enable_cpuslocked(&sched_energy_present); 4141f74de87SQuentin Perret } 4151f74de87SQuentin Perret } 4161f74de87SQuentin Perret 417b68a4c0dSQuentin Perret /* 418b68a4c0dSQuentin Perret * EAS can be used on a root domain if it meets all the following conditions: 419b68a4c0dSQuentin Perret * 1. an Energy Model (EM) is available; 420b68a4c0dSQuentin Perret * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 42138502ab4SValentin Schneider * 3. no SMT is detected. 4225b77261cSPierre Gondois * 4. schedutil is driving the frequency of all CPUs of the rd; 4235b77261cSPierre Gondois * 5. frequency invariance support is present; 424b68a4c0dSQuentin Perret */ 4251f74de87SQuentin Perret static bool build_perf_domains(const struct cpumask *cpu_map) 4266aa140faSQuentin Perret { 4275b77261cSPierre Gondois int i; 4286aa140faSQuentin Perret struct perf_domain *pd = NULL, *tmp; 4296aa140faSQuentin Perret int cpu = cpumask_first(cpu_map); 4306aa140faSQuentin Perret struct root_domain *rd = cpu_rq(cpu)->rd; 431b68a4c0dSQuentin Perret 4328d5d0cfbSQuentin Perret if (!sysctl_sched_energy_aware) 4338d5d0cfbSQuentin Perret goto free; 4348d5d0cfbSQuentin Perret 4358f833c82SShrikanth Hegde if (!sched_is_eas_possible(cpu_map)) 436b68a4c0dSQuentin Perret goto free; 437fa50e2b4SIonela Voinescu 4386aa140faSQuentin Perret for_each_cpu(i, cpu_map) { 4396aa140faSQuentin Perret /* Skip already covered CPUs. */ 4406aa140faSQuentin Perret if (find_pd(pd, i)) 4416aa140faSQuentin Perret continue; 4426aa140faSQuentin Perret 4436aa140faSQuentin Perret /* Create the new pd and add it to the local list. */ 4446aa140faSQuentin Perret tmp = pd_init(i); 4456aa140faSQuentin Perret if (!tmp) 4466aa140faSQuentin Perret goto free; 4476aa140faSQuentin Perret tmp->next = pd; 4486aa140faSQuentin Perret pd = tmp; 4496aa140faSQuentin Perret } 4506aa140faSQuentin Perret 4516aa140faSQuentin Perret perf_domain_debug(cpu_map, pd); 4526aa140faSQuentin Perret 4536aa140faSQuentin Perret /* Attach the new list of performance domains to the root domain. */ 4546aa140faSQuentin Perret tmp = rd->pd; 4556aa140faSQuentin Perret rcu_assign_pointer(rd->pd, pd); 4566aa140faSQuentin Perret if (tmp) 4576aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4586aa140faSQuentin Perret 4591f74de87SQuentin Perret return !!pd; 4606aa140faSQuentin Perret 4616aa140faSQuentin Perret free: 4626aa140faSQuentin Perret free_pd(pd); 4636aa140faSQuentin Perret tmp = rd->pd; 4646aa140faSQuentin Perret rcu_assign_pointer(rd->pd, NULL); 4656aa140faSQuentin Perret if (tmp) 4666aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4671f74de87SQuentin Perret 4681f74de87SQuentin Perret return false; 4696aa140faSQuentin Perret } 4706aa140faSQuentin Perret #else 4716aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { } 472531b5c9fSQuentin Perret #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/ 4736aa140faSQuentin Perret 474f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 475f2cb1360SIngo Molnar { 476f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 477f2cb1360SIngo Molnar 478f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 479f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 480f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 481f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 482f2cb1360SIngo Molnar free_cpumask_var(rd->online); 483f2cb1360SIngo Molnar free_cpumask_var(rd->span); 4846aa140faSQuentin Perret free_pd(rd->pd); 485f2cb1360SIngo Molnar kfree(rd); 486f2cb1360SIngo Molnar } 487f2cb1360SIngo Molnar 488f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 489f2cb1360SIngo Molnar { 490f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 491cab3ecaeSHao Jia struct rq_flags rf; 492f2cb1360SIngo Molnar 493cab3ecaeSHao Jia rq_lock_irqsave(rq, &rf); 494f2cb1360SIngo Molnar 495f2cb1360SIngo Molnar if (rq->rd) { 496f2cb1360SIngo Molnar old_rd = rq->rd; 497f2cb1360SIngo Molnar 498f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 499f2cb1360SIngo Molnar set_rq_offline(rq); 500f2cb1360SIngo Molnar 501f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 502f2cb1360SIngo Molnar 503f2cb1360SIngo Molnar /* 504402de7fcSIngo Molnar * If we don't want to free the old_rd yet then 505f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 506f2cb1360SIngo Molnar * in this function: 507f2cb1360SIngo Molnar */ 508f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 509f2cb1360SIngo Molnar old_rd = NULL; 510f2cb1360SIngo Molnar } 511f2cb1360SIngo Molnar 512f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 513f2cb1360SIngo Molnar rq->rd = rd; 514f2cb1360SIngo Molnar 515f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 516f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 517f2cb1360SIngo Molnar set_rq_online(rq); 518f2cb1360SIngo Molnar 519cab3ecaeSHao Jia rq_unlock_irqrestore(rq, &rf); 520f2cb1360SIngo Molnar 521f2cb1360SIngo Molnar if (old_rd) 522337e9b07SPaul E. McKenney call_rcu(&old_rd->rcu, free_rootdomain); 523f2cb1360SIngo Molnar } 524f2cb1360SIngo Molnar 525364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd) 526364f5665SSteven Rostedt (VMware) { 527364f5665SSteven Rostedt (VMware) atomic_inc(&rd->refcount); 528364f5665SSteven Rostedt (VMware) } 529364f5665SSteven Rostedt (VMware) 530364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd) 531364f5665SSteven Rostedt (VMware) { 532364f5665SSteven Rostedt (VMware) if (!atomic_dec_and_test(&rd->refcount)) 533364f5665SSteven Rostedt (VMware) return; 534364f5665SSteven Rostedt (VMware) 535337e9b07SPaul E. McKenney call_rcu(&rd->rcu, free_rootdomain); 536364f5665SSteven Rostedt (VMware) } 537364f5665SSteven Rostedt (VMware) 538f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 539f2cb1360SIngo Molnar { 540f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 541f2cb1360SIngo Molnar goto out; 542f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 543f2cb1360SIngo Molnar goto free_span; 544f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 545f2cb1360SIngo Molnar goto free_online; 546f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 547f2cb1360SIngo Molnar goto free_dlo_mask; 548f2cb1360SIngo Molnar 5494bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 5504bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 5514bdced5cSSteven Rostedt (Red Hat) raw_spin_lock_init(&rd->rto_lock); 552da6ff099SSebastian Andrzej Siewior rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 5534bdced5cSSteven Rostedt (Red Hat) #endif 5544bdced5cSSteven Rostedt (Red Hat) 55526762423SPeng Liu rd->visit_gen = 0; 556f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 557f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 558f2cb1360SIngo Molnar goto free_rto_mask; 559f2cb1360SIngo Molnar 560f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 561f2cb1360SIngo Molnar goto free_cpudl; 562f2cb1360SIngo Molnar return 0; 563f2cb1360SIngo Molnar 564f2cb1360SIngo Molnar free_cpudl: 565f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 566f2cb1360SIngo Molnar free_rto_mask: 567f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 568f2cb1360SIngo Molnar free_dlo_mask: 569f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 570f2cb1360SIngo Molnar free_online: 571f2cb1360SIngo Molnar free_cpumask_var(rd->online); 572f2cb1360SIngo Molnar free_span: 573f2cb1360SIngo Molnar free_cpumask_var(rd->span); 574f2cb1360SIngo Molnar out: 575f2cb1360SIngo Molnar return -ENOMEM; 576f2cb1360SIngo Molnar } 577f2cb1360SIngo Molnar 578f2cb1360SIngo Molnar /* 579f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 580f2cb1360SIngo Molnar * members (mimicking the global state we have today). 581f2cb1360SIngo Molnar */ 582f2cb1360SIngo Molnar struct root_domain def_root_domain; 583f2cb1360SIngo Molnar 5849a5322dbSBing Huang void __init init_defrootdomain(void) 585f2cb1360SIngo Molnar { 586f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 587f2cb1360SIngo Molnar 588f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 589f2cb1360SIngo Molnar } 590f2cb1360SIngo Molnar 591f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 592f2cb1360SIngo Molnar { 593f2cb1360SIngo Molnar struct root_domain *rd; 594f2cb1360SIngo Molnar 5954d13a06dSViresh Kumar rd = kzalloc(sizeof(*rd), GFP_KERNEL); 596f2cb1360SIngo Molnar if (!rd) 597f2cb1360SIngo Molnar return NULL; 598f2cb1360SIngo Molnar 599f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 600f2cb1360SIngo Molnar kfree(rd); 601f2cb1360SIngo Molnar return NULL; 602f2cb1360SIngo Molnar } 603f2cb1360SIngo Molnar 604f2cb1360SIngo Molnar return rd; 605f2cb1360SIngo Molnar } 606f2cb1360SIngo Molnar 607f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 608f2cb1360SIngo Molnar { 609f2cb1360SIngo Molnar struct sched_group *tmp, *first; 610f2cb1360SIngo Molnar 611f2cb1360SIngo Molnar if (!sg) 612f2cb1360SIngo Molnar return; 613f2cb1360SIngo Molnar 614f2cb1360SIngo Molnar first = sg; 615f2cb1360SIngo Molnar do { 616f2cb1360SIngo Molnar tmp = sg->next; 617f2cb1360SIngo Molnar 618f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 619f2cb1360SIngo Molnar kfree(sg->sgc); 620f2cb1360SIngo Molnar 621213c5a45SShu Wang if (atomic_dec_and_test(&sg->ref)) 622f2cb1360SIngo Molnar kfree(sg); 623f2cb1360SIngo Molnar sg = tmp; 624f2cb1360SIngo Molnar } while (sg != first); 625f2cb1360SIngo Molnar } 626f2cb1360SIngo Molnar 627f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 628f2cb1360SIngo Molnar { 629f2cb1360SIngo Molnar /* 630a090c4f2SPeter Zijlstra * A normal sched domain may have multiple group references, an 631a090c4f2SPeter Zijlstra * overlapping domain, having private groups, only one. Iterate, 632a090c4f2SPeter Zijlstra * dropping group/capacity references, freeing where none remain. 633f2cb1360SIngo Molnar */ 634f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 635213c5a45SShu Wang 636f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 637f2cb1360SIngo Molnar kfree(sd->shared); 638f2cb1360SIngo Molnar kfree(sd); 639f2cb1360SIngo Molnar } 640f2cb1360SIngo Molnar 641f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 642f2cb1360SIngo Molnar { 643f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 644f2cb1360SIngo Molnar 645f2cb1360SIngo Molnar while (sd) { 646f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 647f2cb1360SIngo Molnar destroy_sched_domain(sd); 648f2cb1360SIngo Molnar sd = parent; 649f2cb1360SIngo Molnar } 650f2cb1360SIngo Molnar } 651f2cb1360SIngo Molnar 652f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 653f2cb1360SIngo Molnar { 654f2cb1360SIngo Molnar if (sd) 655f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 656f2cb1360SIngo Molnar } 657f2cb1360SIngo Molnar 658f2cb1360SIngo Molnar /* 65954de4427SAlex Shi * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set 66054de4427SAlex Shi * (Last Level Cache Domain) for this allows us to avoid some pointer chasing 66154de4427SAlex Shi * select_idle_sibling(). 662f2cb1360SIngo Molnar * 66354de4427SAlex Shi * Also keep a unique ID per domain (we use the first CPU number in the cpumask 66454de4427SAlex Shi * of the domain), this allows us to quickly tell if two CPUs are in the same 66554de4427SAlex Shi * cache domain, see cpus_share_cache(). 666f2cb1360SIngo Molnar */ 667994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 668f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 669f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 670b95303e0SBarry Song DEFINE_PER_CPU(int, sd_share_id); 671994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 672994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 673994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 674994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 6758881e163SBarry Song 676df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 6778881e163SBarry Song DEFINE_STATIC_KEY_FALSE(sched_cluster_active); 678f2cb1360SIngo Molnar 679f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 680f2cb1360SIngo Molnar { 681f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 682f2cb1360SIngo Molnar struct sched_domain *sd; 683f2cb1360SIngo Molnar int id = cpu; 684f2cb1360SIngo Molnar int size = 1; 685f2cb1360SIngo Molnar 68654de4427SAlex Shi sd = highest_flag_domain(cpu, SD_SHARE_LLC); 687f2cb1360SIngo Molnar if (sd) { 688f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 689f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 690f2cb1360SIngo Molnar sds = sd->shared; 691f2cb1360SIngo Molnar } 692f2cb1360SIngo Molnar 693f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 694f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 695f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 696f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 697f2cb1360SIngo Molnar 698b95303e0SBarry Song sd = lowest_flag_domain(cpu, SD_CLUSTER); 699b95303e0SBarry Song if (sd) 700b95303e0SBarry Song id = cpumask_first(sched_domain_span(sd)); 701b95303e0SBarry Song 702b95303e0SBarry Song /* 703b95303e0SBarry Song * This assignment should be placed after the sd_llc_id as 704b95303e0SBarry Song * we want this id equals to cluster id on cluster machines 705b95303e0SBarry Song * but equals to LLC id on non-Cluster machines. 706b95303e0SBarry Song */ 707b95303e0SBarry Song per_cpu(sd_share_id, cpu) = id; 708b95303e0SBarry Song 709f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 710f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 711f2cb1360SIngo Molnar 712f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 713011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 714011b27bbSQuentin Perret 715c744dc4aSBeata Michalska sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 716011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 717f2cb1360SIngo Molnar } 718f2cb1360SIngo Molnar 719f2cb1360SIngo Molnar /* 720f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 721f2cb1360SIngo Molnar * hold the hotplug lock. 722f2cb1360SIngo Molnar */ 723f2cb1360SIngo Molnar static void 724f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 725f2cb1360SIngo Molnar { 726f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 727f2cb1360SIngo Molnar struct sched_domain *tmp; 728f2cb1360SIngo Molnar 729f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 730f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 731f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 732f2cb1360SIngo Molnar if (!parent) 733f2cb1360SIngo Molnar break; 734f2cb1360SIngo Molnar 735f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 736f2cb1360SIngo Molnar tmp->parent = parent->parent; 737bf2dc42dSTim C Chen 738bf2dc42dSTim C Chen if (parent->parent) { 739f2cb1360SIngo Molnar parent->parent->child = tmp; 7404efcc8bcSChen Yu parent->parent->groups->flags = tmp->flags; 741bf2dc42dSTim C Chen } 742bf2dc42dSTim C Chen 743f2cb1360SIngo Molnar /* 744f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 745f2cb1360SIngo Molnar * degenerate parent; the spans match for this 746f2cb1360SIngo Molnar * so the property transfers. 747f2cb1360SIngo Molnar */ 748f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 749f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 750f2cb1360SIngo Molnar destroy_sched_domain(parent); 751f2cb1360SIngo Molnar } else 752f2cb1360SIngo Molnar tmp = tmp->parent; 753f2cb1360SIngo Molnar } 754f2cb1360SIngo Molnar 755f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 756f2cb1360SIngo Molnar tmp = sd; 757f2cb1360SIngo Molnar sd = sd->parent; 758f2cb1360SIngo Molnar destroy_sched_domain(tmp); 75916d364baSRicardo Neri if (sd) { 76016d364baSRicardo Neri struct sched_group *sg = sd->groups; 76116d364baSRicardo Neri 76216d364baSRicardo Neri /* 76316d364baSRicardo Neri * sched groups hold the flags of the child sched 76416d364baSRicardo Neri * domain for convenience. Clear such flags since 76516d364baSRicardo Neri * the child is being destroyed. 76616d364baSRicardo Neri */ 76716d364baSRicardo Neri do { 76816d364baSRicardo Neri sg->flags = 0; 76916d364baSRicardo Neri } while (sg != sd->groups); 77016d364baSRicardo Neri 771f2cb1360SIngo Molnar sd->child = NULL; 772f2cb1360SIngo Molnar } 77316d364baSRicardo Neri } 774f2cb1360SIngo Molnar 775f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 776f2cb1360SIngo Molnar 777f2cb1360SIngo Molnar rq_attach_root(rq, rd); 778f2cb1360SIngo Molnar tmp = rq->sd; 779f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 780bbdacdfeSPeter Zijlstra dirty_sched_domain_sysctl(cpu); 781f2cb1360SIngo Molnar destroy_sched_domains(tmp); 782f2cb1360SIngo Molnar 783f2cb1360SIngo Molnar update_top_cache_domain(cpu); 784f2cb1360SIngo Molnar } 785f2cb1360SIngo Molnar 786f2cb1360SIngo Molnar struct s_data { 78799687cdbSLuc Van Oostenryck struct sched_domain * __percpu *sd; 788f2cb1360SIngo Molnar struct root_domain *rd; 789f2cb1360SIngo Molnar }; 790f2cb1360SIngo Molnar 791f2cb1360SIngo Molnar enum s_alloc { 792f2cb1360SIngo Molnar sa_rootdomain, 793f2cb1360SIngo Molnar sa_sd, 794f2cb1360SIngo Molnar sa_sd_storage, 795f2cb1360SIngo Molnar sa_none, 796f2cb1360SIngo Molnar }; 797f2cb1360SIngo Molnar 798f2cb1360SIngo Molnar /* 79935a566e6SPeter Zijlstra * Return the canonical balance CPU for this group, this is the first CPU 800e5c14b1fSPeter Zijlstra * of this group that's also in the balance mask. 80135a566e6SPeter Zijlstra * 802e5c14b1fSPeter Zijlstra * The balance mask are all those CPUs that could actually end up at this 803e5c14b1fSPeter Zijlstra * group. See build_balance_mask(). 80435a566e6SPeter Zijlstra * 80535a566e6SPeter Zijlstra * Also see should_we_balance(). 80635a566e6SPeter Zijlstra */ 80735a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg) 80835a566e6SPeter Zijlstra { 809e5c14b1fSPeter Zijlstra return cpumask_first(group_balance_mask(sg)); 81035a566e6SPeter Zijlstra } 81135a566e6SPeter Zijlstra 81235a566e6SPeter Zijlstra 81335a566e6SPeter Zijlstra /* 81435a566e6SPeter Zijlstra * NUMA topology (first read the regular topology blurb below) 81535a566e6SPeter Zijlstra * 81635a566e6SPeter Zijlstra * Given a node-distance table, for example: 81735a566e6SPeter Zijlstra * 81835a566e6SPeter Zijlstra * node 0 1 2 3 81935a566e6SPeter Zijlstra * 0: 10 20 30 20 82035a566e6SPeter Zijlstra * 1: 20 10 20 30 82135a566e6SPeter Zijlstra * 2: 30 20 10 20 82235a566e6SPeter Zijlstra * 3: 20 30 20 10 82335a566e6SPeter Zijlstra * 82435a566e6SPeter Zijlstra * which represents a 4 node ring topology like: 82535a566e6SPeter Zijlstra * 82635a566e6SPeter Zijlstra * 0 ----- 1 82735a566e6SPeter Zijlstra * | | 82835a566e6SPeter Zijlstra * | | 82935a566e6SPeter Zijlstra * | | 83035a566e6SPeter Zijlstra * 3 ----- 2 83135a566e6SPeter Zijlstra * 83235a566e6SPeter Zijlstra * We want to construct domains and groups to represent this. The way we go 83335a566e6SPeter Zijlstra * about doing this is to build the domains on 'hops'. For each NUMA level we 83435a566e6SPeter Zijlstra * construct the mask of all nodes reachable in @level hops. 83535a566e6SPeter Zijlstra * 83635a566e6SPeter Zijlstra * For the above NUMA topology that gives 3 levels: 83735a566e6SPeter Zijlstra * 83835a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 0-3 0-3 83935a566e6SPeter Zijlstra * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 84035a566e6SPeter Zijlstra * 84135a566e6SPeter Zijlstra * NUMA-1 0-1,3 0-2 1-3 0,2-3 84235a566e6SPeter Zijlstra * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 84335a566e6SPeter Zijlstra * 84435a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 84535a566e6SPeter Zijlstra * 84635a566e6SPeter Zijlstra * 84735a566e6SPeter Zijlstra * As can be seen; things don't nicely line up as with the regular topology. 84835a566e6SPeter Zijlstra * When we iterate a domain in child domain chunks some nodes can be 84935a566e6SPeter Zijlstra * represented multiple times -- hence the "overlap" naming for this part of 85035a566e6SPeter Zijlstra * the topology. 85135a566e6SPeter Zijlstra * 85235a566e6SPeter Zijlstra * In order to minimize this overlap, we only build enough groups to cover the 85335a566e6SPeter Zijlstra * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 85435a566e6SPeter Zijlstra * 85535a566e6SPeter Zijlstra * Because: 85635a566e6SPeter Zijlstra * 85735a566e6SPeter Zijlstra * - the first group of each domain is its child domain; this 85835a566e6SPeter Zijlstra * gets us the first 0-1,3 85935a566e6SPeter Zijlstra * - the only uncovered node is 2, who's child domain is 1-3. 86035a566e6SPeter Zijlstra * 86135a566e6SPeter Zijlstra * However, because of the overlap, computing a unique CPU for each group is 86235a566e6SPeter Zijlstra * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 86335a566e6SPeter Zijlstra * groups include the CPUs of Node-0, while those CPUs would not in fact ever 86435a566e6SPeter Zijlstra * end up at those groups (they would end up in group: 0-1,3). 86535a566e6SPeter Zijlstra * 866e5c14b1fSPeter Zijlstra * To correct this we have to introduce the group balance mask. This mask 86735a566e6SPeter Zijlstra * will contain those CPUs in the group that can reach this group given the 86835a566e6SPeter Zijlstra * (child) domain tree. 86935a566e6SPeter Zijlstra * 87035a566e6SPeter Zijlstra * With this we can once again compute balance_cpu and sched_group_capacity 87135a566e6SPeter Zijlstra * relations. 87235a566e6SPeter Zijlstra * 87335a566e6SPeter Zijlstra * XXX include words on how balance_cpu is unique and therefore can be 87435a566e6SPeter Zijlstra * used for sched_group_capacity links. 87535a566e6SPeter Zijlstra * 87635a566e6SPeter Zijlstra * 87735a566e6SPeter Zijlstra * Another 'interesting' topology is: 87835a566e6SPeter Zijlstra * 87935a566e6SPeter Zijlstra * node 0 1 2 3 88035a566e6SPeter Zijlstra * 0: 10 20 20 30 88135a566e6SPeter Zijlstra * 1: 20 10 20 20 88235a566e6SPeter Zijlstra * 2: 20 20 10 20 88335a566e6SPeter Zijlstra * 3: 30 20 20 10 88435a566e6SPeter Zijlstra * 88535a566e6SPeter Zijlstra * Which looks a little like: 88635a566e6SPeter Zijlstra * 88735a566e6SPeter Zijlstra * 0 ----- 1 88835a566e6SPeter Zijlstra * | / | 88935a566e6SPeter Zijlstra * | / | 89035a566e6SPeter Zijlstra * | / | 89135a566e6SPeter Zijlstra * 2 ----- 3 89235a566e6SPeter Zijlstra * 89335a566e6SPeter Zijlstra * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 89435a566e6SPeter Zijlstra * are not. 89535a566e6SPeter Zijlstra * 89635a566e6SPeter Zijlstra * This leads to a few particularly weird cases where the sched_domain's are 89797fb7a0aSIngo Molnar * not of the same number for each CPU. Consider: 89835a566e6SPeter Zijlstra * 89935a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 90035a566e6SPeter Zijlstra * groups: {0-2},{1-3} {1-3},{0-2} 90135a566e6SPeter Zijlstra * 90235a566e6SPeter Zijlstra * NUMA-1 0-2 0-3 0-3 1-3 90335a566e6SPeter Zijlstra * 90435a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 90535a566e6SPeter Zijlstra * 90635a566e6SPeter Zijlstra */ 90735a566e6SPeter Zijlstra 90835a566e6SPeter Zijlstra 90935a566e6SPeter Zijlstra /* 910e5c14b1fSPeter Zijlstra * Build the balance mask; it contains only those CPUs that can arrive at this 911e5c14b1fSPeter Zijlstra * group and should be considered to continue balancing. 91235a566e6SPeter Zijlstra * 91335a566e6SPeter Zijlstra * We do this during the group creation pass, therefore the group information 91435a566e6SPeter Zijlstra * isn't complete yet, however since each group represents a (child) domain we 91535a566e6SPeter Zijlstra * can fully construct this using the sched_domain bits (which are already 91635a566e6SPeter Zijlstra * complete). 917f2cb1360SIngo Molnar */ 9181676330eSPeter Zijlstra static void 919e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 920f2cb1360SIngo Molnar { 921ae4df9d6SPeter Zijlstra const struct cpumask *sg_span = sched_group_span(sg); 922f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 923f2cb1360SIngo Molnar struct sched_domain *sibling; 924f2cb1360SIngo Molnar int i; 925f2cb1360SIngo Molnar 9261676330eSPeter Zijlstra cpumask_clear(mask); 9271676330eSPeter Zijlstra 928f32d782eSLauro Ramos Venancio for_each_cpu(i, sg_span) { 929f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 93073bb059fSPeter Zijlstra 93173bb059fSPeter Zijlstra /* 93273bb059fSPeter Zijlstra * Can happen in the asymmetric case, where these siblings are 93373bb059fSPeter Zijlstra * unused. The mask will not be empty because those CPUs that 93473bb059fSPeter Zijlstra * do have the top domain _should_ span the domain. 93573bb059fSPeter Zijlstra */ 93673bb059fSPeter Zijlstra if (!sibling->child) 93773bb059fSPeter Zijlstra continue; 93873bb059fSPeter Zijlstra 93973bb059fSPeter Zijlstra /* If we would not end up here, we can't continue from here */ 94073bb059fSPeter Zijlstra if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 941f2cb1360SIngo Molnar continue; 942f2cb1360SIngo Molnar 9431676330eSPeter Zijlstra cpumask_set_cpu(i, mask); 944f2cb1360SIngo Molnar } 94573bb059fSPeter Zijlstra 94673bb059fSPeter Zijlstra /* We must not have empty masks here */ 9471676330eSPeter Zijlstra WARN_ON_ONCE(cpumask_empty(mask)); 948f2cb1360SIngo Molnar } 949f2cb1360SIngo Molnar 950f2cb1360SIngo Molnar /* 95135a566e6SPeter Zijlstra * XXX: This creates per-node group entries; since the load-balancer will 95235a566e6SPeter Zijlstra * immediately access remote memory to construct this group's load-balance 95335a566e6SPeter Zijlstra * statistics having the groups node local is of dubious benefit. 954f2cb1360SIngo Molnar */ 9558c033469SLauro Ramos Venancio static struct sched_group * 9568c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 9578c033469SLauro Ramos Venancio { 9588c033469SLauro Ramos Venancio struct sched_group *sg; 9598c033469SLauro Ramos Venancio struct cpumask *sg_span; 9608c033469SLauro Ramos Venancio 9618c033469SLauro Ramos Venancio sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 9628c033469SLauro Ramos Venancio GFP_KERNEL, cpu_to_node(cpu)); 9638c033469SLauro Ramos Venancio 9648c033469SLauro Ramos Venancio if (!sg) 9658c033469SLauro Ramos Venancio return NULL; 9668c033469SLauro Ramos Venancio 967ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 96816d364baSRicardo Neri if (sd->child) { 9698c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd->child)); 97016d364baSRicardo Neri sg->flags = sd->child->flags; 97116d364baSRicardo Neri } else { 9728c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd)); 97316d364baSRicardo Neri } 9748c033469SLauro Ramos Venancio 975213c5a45SShu Wang atomic_inc(&sg->ref); 9768c033469SLauro Ramos Venancio return sg; 9778c033469SLauro Ramos Venancio } 9788c033469SLauro Ramos Venancio 9798c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd, 9801676330eSPeter Zijlstra struct sched_group *sg) 9818c033469SLauro Ramos Venancio { 9821676330eSPeter Zijlstra struct cpumask *mask = sched_domains_tmpmask2; 9838c033469SLauro Ramos Venancio struct sd_data *sdd = sd->private; 9848c033469SLauro Ramos Venancio struct cpumask *sg_span; 9851676330eSPeter Zijlstra int cpu; 9861676330eSPeter Zijlstra 987e5c14b1fSPeter Zijlstra build_balance_mask(sd, sg, mask); 9880a2b65c0SBarry Song cpu = cpumask_first(mask); 9898c033469SLauro Ramos Venancio 9908c033469SLauro Ramos Venancio sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 9918c033469SLauro Ramos Venancio if (atomic_inc_return(&sg->sgc->ref) == 1) 992e5c14b1fSPeter Zijlstra cpumask_copy(group_balance_mask(sg), mask); 99335a566e6SPeter Zijlstra else 994e5c14b1fSPeter Zijlstra WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 9958c033469SLauro Ramos Venancio 9968c033469SLauro Ramos Venancio /* 9978c033469SLauro Ramos Venancio * Initialize sgc->capacity such that even if we mess up the 9988c033469SLauro Ramos Venancio * domains and no possible iteration will get us here, we won't 9998c033469SLauro Ramos Venancio * die on a /0 trap. 10008c033469SLauro Ramos Venancio */ 1001ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 10028c033469SLauro Ramos Venancio sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 10038c033469SLauro Ramos Venancio sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1004e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 10058c033469SLauro Ramos Venancio } 10068c033469SLauro Ramos Venancio 1007585b6d27SBarry Song static struct sched_domain * 1008585b6d27SBarry Song find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 1009585b6d27SBarry Song { 1010585b6d27SBarry Song /* 1011585b6d27SBarry Song * The proper descendant would be the one whose child won't span out 1012585b6d27SBarry Song * of sd 1013585b6d27SBarry Song */ 1014585b6d27SBarry Song while (sibling->child && 1015585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), 1016585b6d27SBarry Song sched_domain_span(sd))) 1017585b6d27SBarry Song sibling = sibling->child; 1018585b6d27SBarry Song 1019585b6d27SBarry Song /* 1020585b6d27SBarry Song * As we are referencing sgc across different topology level, we need 1021585b6d27SBarry Song * to go down to skip those sched_domains which don't contribute to 1022585b6d27SBarry Song * scheduling because they will be degenerated in cpu_attach_domain 1023585b6d27SBarry Song */ 1024585b6d27SBarry Song while (sibling->child && 1025585b6d27SBarry Song cpumask_equal(sched_domain_span(sibling->child), 1026585b6d27SBarry Song sched_domain_span(sibling))) 1027585b6d27SBarry Song sibling = sibling->child; 1028585b6d27SBarry Song 1029585b6d27SBarry Song return sibling; 1030585b6d27SBarry Song } 1031585b6d27SBarry Song 1032f2cb1360SIngo Molnar static int 1033f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1034f2cb1360SIngo Molnar { 103591eaed0dSPeter Zijlstra struct sched_group *first = NULL, *last = NULL, *sg; 1036f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1037f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 1038f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1039f2cb1360SIngo Molnar struct sched_domain *sibling; 1040f2cb1360SIngo Molnar int i; 1041f2cb1360SIngo Molnar 1042f2cb1360SIngo Molnar cpumask_clear(covered); 1043f2cb1360SIngo Molnar 10440372dd27SPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1045f2cb1360SIngo Molnar struct cpumask *sg_span; 1046f2cb1360SIngo Molnar 1047f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1048f2cb1360SIngo Molnar continue; 1049f2cb1360SIngo Molnar 1050f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 1051f2cb1360SIngo Molnar 1052c20e1ea4SLauro Ramos Venancio /* 1053c20e1ea4SLauro Ramos Venancio * Asymmetric node setups can result in situations where the 1054c20e1ea4SLauro Ramos Venancio * domain tree is of unequal depth, make sure to skip domains 1055c20e1ea4SLauro Ramos Venancio * that already cover the entire range. 1056c20e1ea4SLauro Ramos Venancio * 1057c20e1ea4SLauro Ramos Venancio * In that case build_sched_domains() will have terminated the 1058c20e1ea4SLauro Ramos Venancio * iteration early and our sibling sd spans will be empty. 1059c20e1ea4SLauro Ramos Venancio * Domains should always include the CPU they're built on, so 1060c20e1ea4SLauro Ramos Venancio * check that. 1061c20e1ea4SLauro Ramos Venancio */ 1062f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1063f2cb1360SIngo Molnar continue; 1064f2cb1360SIngo Molnar 1065585b6d27SBarry Song /* 1066585b6d27SBarry Song * Usually we build sched_group by sibling's child sched_domain 1067585b6d27SBarry Song * But for machines whose NUMA diameter are 3 or above, we move 1068585b6d27SBarry Song * to build sched_group by sibling's proper descendant's child 1069585b6d27SBarry Song * domain because sibling's child sched_domain will span out of 1070585b6d27SBarry Song * the sched_domain being built as below. 1071585b6d27SBarry Song * 1072585b6d27SBarry Song * Smallest diameter=3 topology is: 1073585b6d27SBarry Song * 1074585b6d27SBarry Song * node 0 1 2 3 1075585b6d27SBarry Song * 0: 10 20 30 40 1076585b6d27SBarry Song * 1: 20 10 20 30 1077585b6d27SBarry Song * 2: 30 20 10 20 1078585b6d27SBarry Song * 3: 40 30 20 10 1079585b6d27SBarry Song * 1080585b6d27SBarry Song * 0 --- 1 --- 2 --- 3 1081585b6d27SBarry Song * 1082585b6d27SBarry Song * NUMA-3 0-3 N/A N/A 0-3 1083585b6d27SBarry Song * groups: {0-2},{1-3} {1-3},{0-2} 1084585b6d27SBarry Song * 1085585b6d27SBarry Song * NUMA-2 0-2 0-3 0-3 1-3 1086585b6d27SBarry Song * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1087585b6d27SBarry Song * 1088585b6d27SBarry Song * NUMA-1 0-1 0-2 1-3 2-3 1089585b6d27SBarry Song * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1090585b6d27SBarry Song * 1091585b6d27SBarry Song * NUMA-0 0 1 2 3 1092585b6d27SBarry Song * 1093585b6d27SBarry Song * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1094585b6d27SBarry Song * group span isn't a subset of the domain span. 1095585b6d27SBarry Song */ 1096585b6d27SBarry Song if (sibling->child && 1097585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), span)) 1098585b6d27SBarry Song sibling = find_descended_sibling(sd, sibling); 1099585b6d27SBarry Song 11008c033469SLauro Ramos Venancio sg = build_group_from_child_sched_domain(sibling, cpu); 1101f2cb1360SIngo Molnar if (!sg) 1102f2cb1360SIngo Molnar goto fail; 1103f2cb1360SIngo Molnar 1104ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 1105f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 1106f2cb1360SIngo Molnar 1107585b6d27SBarry Song init_overlap_sched_group(sibling, sg); 1108f2cb1360SIngo Molnar 1109f2cb1360SIngo Molnar if (!first) 1110f2cb1360SIngo Molnar first = sg; 1111f2cb1360SIngo Molnar if (last) 1112f2cb1360SIngo Molnar last->next = sg; 1113f2cb1360SIngo Molnar last = sg; 1114f2cb1360SIngo Molnar last->next = first; 1115f2cb1360SIngo Molnar } 111691eaed0dSPeter Zijlstra sd->groups = first; 1117f2cb1360SIngo Molnar 1118f2cb1360SIngo Molnar return 0; 1119f2cb1360SIngo Molnar 1120f2cb1360SIngo Molnar fail: 1121f2cb1360SIngo Molnar free_sched_groups(first, 0); 1122f2cb1360SIngo Molnar 1123f2cb1360SIngo Molnar return -ENOMEM; 1124f2cb1360SIngo Molnar } 1125f2cb1360SIngo Molnar 112635a566e6SPeter Zijlstra 112735a566e6SPeter Zijlstra /* 112835a566e6SPeter Zijlstra * Package topology (also see the load-balance blurb in fair.c) 112935a566e6SPeter Zijlstra * 113035a566e6SPeter Zijlstra * The scheduler builds a tree structure to represent a number of important 113135a566e6SPeter Zijlstra * topology features. By default (default_topology[]) these include: 113235a566e6SPeter Zijlstra * 113335a566e6SPeter Zijlstra * - Simultaneous multithreading (SMT) 113435a566e6SPeter Zijlstra * - Multi-Core Cache (MC) 1135f577cd57SPeter Zijlstra * - Package (PKG) 113635a566e6SPeter Zijlstra * 113735a566e6SPeter Zijlstra * Where the last one more or less denotes everything up to a NUMA node. 113835a566e6SPeter Zijlstra * 113935a566e6SPeter Zijlstra * The tree consists of 3 primary data structures: 114035a566e6SPeter Zijlstra * 114135a566e6SPeter Zijlstra * sched_domain -> sched_group -> sched_group_capacity 114235a566e6SPeter Zijlstra * ^ ^ ^ ^ 114335a566e6SPeter Zijlstra * `-' `-' 114435a566e6SPeter Zijlstra * 114597fb7a0aSIngo Molnar * The sched_domains are per-CPU and have a two way link (parent & child) and 114635a566e6SPeter Zijlstra * denote the ever growing mask of CPUs belonging to that level of topology. 114735a566e6SPeter Zijlstra * 114835a566e6SPeter Zijlstra * Each sched_domain has a circular (double) linked list of sched_group's, each 114935a566e6SPeter Zijlstra * denoting the domains of the level below (or individual CPUs in case of the 115035a566e6SPeter Zijlstra * first domain level). The sched_group linked by a sched_domain includes the 115135a566e6SPeter Zijlstra * CPU of that sched_domain [*]. 115235a566e6SPeter Zijlstra * 115335a566e6SPeter Zijlstra * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 115435a566e6SPeter Zijlstra * 115535a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 115635a566e6SPeter Zijlstra * 1157f577cd57SPeter Zijlstra * PKG [ ] 115835a566e6SPeter Zijlstra * MC [ ] [ ] 115935a566e6SPeter Zijlstra * SMT [ ] [ ] [ ] [ ] 116035a566e6SPeter Zijlstra * 116135a566e6SPeter Zijlstra * - or - 116235a566e6SPeter Zijlstra * 1163f577cd57SPeter Zijlstra * PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 116435a566e6SPeter Zijlstra * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 116535a566e6SPeter Zijlstra * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 116635a566e6SPeter Zijlstra * 116735a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 116835a566e6SPeter Zijlstra * 116935a566e6SPeter Zijlstra * One way to think about it is: sched_domain moves you up and down among these 117035a566e6SPeter Zijlstra * topology levels, while sched_group moves you sideways through it, at child 117135a566e6SPeter Zijlstra * domain granularity. 117235a566e6SPeter Zijlstra * 117335a566e6SPeter Zijlstra * sched_group_capacity ensures each unique sched_group has shared storage. 117435a566e6SPeter Zijlstra * 117535a566e6SPeter Zijlstra * There are two related construction problems, both require a CPU that 117635a566e6SPeter Zijlstra * uniquely identify each group (for a given domain): 117735a566e6SPeter Zijlstra * 117835a566e6SPeter Zijlstra * - The first is the balance_cpu (see should_we_balance() and the 1179402de7fcSIngo Molnar * load-balance blurb in fair.c); for each group we only want 1 CPU to 118035a566e6SPeter Zijlstra * continue balancing at a higher domain. 118135a566e6SPeter Zijlstra * 118235a566e6SPeter Zijlstra * - The second is the sched_group_capacity; we want all identical groups 118335a566e6SPeter Zijlstra * to share a single sched_group_capacity. 118435a566e6SPeter Zijlstra * 118535a566e6SPeter Zijlstra * Since these topologies are exclusive by construction. That is, its 118635a566e6SPeter Zijlstra * impossible for an SMT thread to belong to multiple cores, and cores to 118735a566e6SPeter Zijlstra * be part of multiple caches. There is a very clear and unique location 118835a566e6SPeter Zijlstra * for each CPU in the hierarchy. 118935a566e6SPeter Zijlstra * 119035a566e6SPeter Zijlstra * Therefore computing a unique CPU for each group is trivial (the iteration 119135a566e6SPeter Zijlstra * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 119235a566e6SPeter Zijlstra * group), we can simply pick the first CPU in each group. 119335a566e6SPeter Zijlstra * 119435a566e6SPeter Zijlstra * 119535a566e6SPeter Zijlstra * [*] in other words, the first group of each domain is its child domain. 119635a566e6SPeter Zijlstra */ 119735a566e6SPeter Zijlstra 11980c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1199f2cb1360SIngo Molnar { 1200f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1201f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 12020c0e776aSPeter Zijlstra struct sched_group *sg; 120367d4f6ffSValentin Schneider bool already_visited; 1204f2cb1360SIngo Molnar 1205f2cb1360SIngo Molnar if (child) 1206f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 1207f2cb1360SIngo Molnar 12080c0e776aSPeter Zijlstra sg = *per_cpu_ptr(sdd->sg, cpu); 12090c0e776aSPeter Zijlstra sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1210f2cb1360SIngo Molnar 121167d4f6ffSValentin Schneider /* Increase refcounts for claim_allocations: */ 121267d4f6ffSValentin Schneider already_visited = atomic_inc_return(&sg->ref) > 1; 121367d4f6ffSValentin Schneider /* sgc visits should follow a similar trend as sg */ 121467d4f6ffSValentin Schneider WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 121567d4f6ffSValentin Schneider 121667d4f6ffSValentin Schneider /* If we have already visited that group, it's already initialized. */ 121767d4f6ffSValentin Schneider if (already_visited) 121867d4f6ffSValentin Schneider return sg; 12190c0e776aSPeter Zijlstra 12200c0e776aSPeter Zijlstra if (child) { 1221ae4df9d6SPeter Zijlstra cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1222ae4df9d6SPeter Zijlstra cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 122316d364baSRicardo Neri sg->flags = child->flags; 12240c0e776aSPeter Zijlstra } else { 1225ae4df9d6SPeter Zijlstra cpumask_set_cpu(cpu, sched_group_span(sg)); 1226e5c14b1fSPeter Zijlstra cpumask_set_cpu(cpu, group_balance_mask(sg)); 1227f2cb1360SIngo Molnar } 1228f2cb1360SIngo Molnar 1229ae4df9d6SPeter Zijlstra sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 12300c0e776aSPeter Zijlstra sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1231e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 12320c0e776aSPeter Zijlstra 12330c0e776aSPeter Zijlstra return sg; 1234f2cb1360SIngo Molnar } 1235f2cb1360SIngo Molnar 1236f2cb1360SIngo Molnar /* 1237f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 1238d8743230SValentin Schneider * covered by the given span, will set each group's ->cpumask correctly, 1239d8743230SValentin Schneider * and will initialize their ->sgc. 1240f2cb1360SIngo Molnar * 1241f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 1242f2cb1360SIngo Molnar */ 1243f2cb1360SIngo Molnar static int 1244f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 1245f2cb1360SIngo Molnar { 1246f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 1247f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1248f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1249f2cb1360SIngo Molnar struct cpumask *covered; 1250f2cb1360SIngo Molnar int i; 1251f2cb1360SIngo Molnar 1252f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 1253f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 1254f2cb1360SIngo Molnar 1255f2cb1360SIngo Molnar cpumask_clear(covered); 1256f2cb1360SIngo Molnar 12570c0e776aSPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1258f2cb1360SIngo Molnar struct sched_group *sg; 1259f2cb1360SIngo Molnar 1260f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1261f2cb1360SIngo Molnar continue; 1262f2cb1360SIngo Molnar 12630c0e776aSPeter Zijlstra sg = get_group(i, sdd); 1264f2cb1360SIngo Molnar 1265ae4df9d6SPeter Zijlstra cpumask_or(covered, covered, sched_group_span(sg)); 1266f2cb1360SIngo Molnar 1267f2cb1360SIngo Molnar if (!first) 1268f2cb1360SIngo Molnar first = sg; 1269f2cb1360SIngo Molnar if (last) 1270f2cb1360SIngo Molnar last->next = sg; 1271f2cb1360SIngo Molnar last = sg; 1272f2cb1360SIngo Molnar } 1273f2cb1360SIngo Molnar last->next = first; 12740c0e776aSPeter Zijlstra sd->groups = first; 1275f2cb1360SIngo Molnar 1276f2cb1360SIngo Molnar return 0; 1277f2cb1360SIngo Molnar } 1278f2cb1360SIngo Molnar 1279f2cb1360SIngo Molnar /* 1280f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 1281f2cb1360SIngo Molnar * 1282f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 1283f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 1284f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 1285f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 1286f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 1287f2cb1360SIngo Molnar * group having less cpu_capacity. 1288f2cb1360SIngo Molnar */ 1289f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1290f2cb1360SIngo Molnar { 1291f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 1292d24cb0d9STim C Chen struct cpumask *mask = sched_domains_tmpmask2; 1293f2cb1360SIngo Molnar 1294f2cb1360SIngo Molnar WARN_ON(!sg); 1295f2cb1360SIngo Molnar 1296f2cb1360SIngo Molnar do { 1297d24cb0d9STim C Chen int cpu, cores = 0, max_cpu = -1; 1298f2cb1360SIngo Molnar 1299ae4df9d6SPeter Zijlstra sg->group_weight = cpumask_weight(sched_group_span(sg)); 1300f2cb1360SIngo Molnar 1301d24cb0d9STim C Chen cpumask_copy(mask, sched_group_span(sg)); 1302d24cb0d9STim C Chen for_each_cpu(cpu, mask) { 1303d24cb0d9STim C Chen cores++; 1304d24cb0d9STim C Chen #ifdef CONFIG_SCHED_SMT 1305d24cb0d9STim C Chen cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); 1306d24cb0d9STim C Chen #endif 1307d24cb0d9STim C Chen } 1308d24cb0d9STim C Chen sg->cores = cores; 1309d24cb0d9STim C Chen 1310f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 1311f2cb1360SIngo Molnar goto next; 1312f2cb1360SIngo Molnar 1313ae4df9d6SPeter Zijlstra for_each_cpu(cpu, sched_group_span(sg)) { 1314f2cb1360SIngo Molnar if (max_cpu < 0) 1315f2cb1360SIngo Molnar max_cpu = cpu; 1316f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 1317f2cb1360SIngo Molnar max_cpu = cpu; 1318f2cb1360SIngo Molnar } 1319f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 1320f2cb1360SIngo Molnar 1321f2cb1360SIngo Molnar next: 1322f2cb1360SIngo Molnar sg = sg->next; 1323f2cb1360SIngo Molnar } while (sg != sd->groups); 1324f2cb1360SIngo Molnar 1325f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 1326f2cb1360SIngo Molnar return; 1327f2cb1360SIngo Molnar 1328f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 1329f2cb1360SIngo Molnar } 1330f2cb1360SIngo Molnar 1331f2cb1360SIngo Molnar /* 1332c744dc4aSBeata Michalska * Set of available CPUs grouped by their corresponding capacities 1333c744dc4aSBeata Michalska * Each list entry contains a CPU mask reflecting CPUs that share the same 1334c744dc4aSBeata Michalska * capacity. 1335c744dc4aSBeata Michalska * The lifespan of data is unlimited. 1336c744dc4aSBeata Michalska */ 133777222b0dSQais Yousef LIST_HEAD(asym_cap_list); 1338c744dc4aSBeata Michalska 1339c744dc4aSBeata Michalska /* 1340c744dc4aSBeata Michalska * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1341c744dc4aSBeata Michalska * Provides sd_flags reflecting the asymmetry scope. 1342c744dc4aSBeata Michalska */ 1343c744dc4aSBeata Michalska static inline int 1344c744dc4aSBeata Michalska asym_cpu_capacity_classify(const struct cpumask *sd_span, 1345c744dc4aSBeata Michalska const struct cpumask *cpu_map) 1346c744dc4aSBeata Michalska { 1347c744dc4aSBeata Michalska struct asym_cap_data *entry; 1348c744dc4aSBeata Michalska int count = 0, miss = 0; 1349c744dc4aSBeata Michalska 1350c744dc4aSBeata Michalska /* 1351c744dc4aSBeata Michalska * Count how many unique CPU capacities this domain spans across 1352c744dc4aSBeata Michalska * (compare sched_domain CPUs mask with ones representing available 1353c744dc4aSBeata Michalska * CPUs capacities). Take into account CPUs that might be offline: 1354c744dc4aSBeata Michalska * skip those. 1355c744dc4aSBeata Michalska */ 1356c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1357c744dc4aSBeata Michalska if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1358c744dc4aSBeata Michalska ++count; 1359c744dc4aSBeata Michalska else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1360c744dc4aSBeata Michalska ++miss; 1361c744dc4aSBeata Michalska } 1362c744dc4aSBeata Michalska 1363c744dc4aSBeata Michalska WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1364c744dc4aSBeata Michalska 1365c744dc4aSBeata Michalska /* No asymmetry detected */ 1366c744dc4aSBeata Michalska if (count < 2) 1367c744dc4aSBeata Michalska return 0; 1368c744dc4aSBeata Michalska /* Some of the available CPU capacity values have not been detected */ 1369c744dc4aSBeata Michalska if (miss) 1370c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY; 1371c744dc4aSBeata Michalska 1372c744dc4aSBeata Michalska /* Full asymmetry */ 1373c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1374c744dc4aSBeata Michalska 1375c744dc4aSBeata Michalska } 1376c744dc4aSBeata Michalska 137777222b0dSQais Yousef static void free_asym_cap_entry(struct rcu_head *head) 137877222b0dSQais Yousef { 137977222b0dSQais Yousef struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu); 138077222b0dSQais Yousef kfree(entry); 138177222b0dSQais Yousef } 138277222b0dSQais Yousef 1383c744dc4aSBeata Michalska static inline void asym_cpu_capacity_update_data(int cpu) 1384c744dc4aSBeata Michalska { 1385c744dc4aSBeata Michalska unsigned long capacity = arch_scale_cpu_capacity(cpu); 138677222b0dSQais Yousef struct asym_cap_data *insert_entry = NULL; 138777222b0dSQais Yousef struct asym_cap_data *entry; 1388c744dc4aSBeata Michalska 138977222b0dSQais Yousef /* 139077222b0dSQais Yousef * Search if capacity already exits. If not, track which the entry 1391402de7fcSIngo Molnar * where we should insert to keep the list ordered descending. 139277222b0dSQais Yousef */ 1393c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1394c744dc4aSBeata Michalska if (capacity == entry->capacity) 1395c744dc4aSBeata Michalska goto done; 139677222b0dSQais Yousef else if (!insert_entry && capacity > entry->capacity) 139777222b0dSQais Yousef insert_entry = list_prev_entry(entry, link); 1398c744dc4aSBeata Michalska } 1399c744dc4aSBeata Michalska 1400c744dc4aSBeata Michalska entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1401c744dc4aSBeata Michalska if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1402c744dc4aSBeata Michalska return; 1403c744dc4aSBeata Michalska entry->capacity = capacity; 140477222b0dSQais Yousef 140577222b0dSQais Yousef /* If NULL then the new capacity is the smallest, add last. */ 140677222b0dSQais Yousef if (!insert_entry) 140777222b0dSQais Yousef list_add_tail_rcu(&entry->link, &asym_cap_list); 140877222b0dSQais Yousef else 140977222b0dSQais Yousef list_add_rcu(&entry->link, &insert_entry->link); 1410c744dc4aSBeata Michalska done: 1411c744dc4aSBeata Michalska __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1412c744dc4aSBeata Michalska } 1413c744dc4aSBeata Michalska 1414c744dc4aSBeata Michalska /* 1415c744dc4aSBeata Michalska * Build-up/update list of CPUs grouped by their capacities 1416c744dc4aSBeata Michalska * An update requires explicit request to rebuild sched domains 1417c744dc4aSBeata Michalska * with state indicating CPU topology changes. 1418c744dc4aSBeata Michalska */ 1419c744dc4aSBeata Michalska static void asym_cpu_capacity_scan(void) 1420c744dc4aSBeata Michalska { 1421c744dc4aSBeata Michalska struct asym_cap_data *entry, *next; 1422c744dc4aSBeata Michalska int cpu; 1423c744dc4aSBeata Michalska 1424c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) 1425c744dc4aSBeata Michalska cpumask_clear(cpu_capacity_span(entry)); 1426c744dc4aSBeata Michalska 142704d4e665SFrederic Weisbecker for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1428c744dc4aSBeata Michalska asym_cpu_capacity_update_data(cpu); 1429c744dc4aSBeata Michalska 1430c744dc4aSBeata Michalska list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1431c744dc4aSBeata Michalska if (cpumask_empty(cpu_capacity_span(entry))) { 143277222b0dSQais Yousef list_del_rcu(&entry->link); 143377222b0dSQais Yousef call_rcu(&entry->rcu, free_asym_cap_entry); 1434c744dc4aSBeata Michalska } 1435c744dc4aSBeata Michalska } 1436c744dc4aSBeata Michalska 1437c744dc4aSBeata Michalska /* 1438c744dc4aSBeata Michalska * Only one capacity value has been detected i.e. this system is symmetric. 1439c744dc4aSBeata Michalska * No need to keep this data around. 1440c744dc4aSBeata Michalska */ 1441c744dc4aSBeata Michalska if (list_is_singular(&asym_cap_list)) { 1442c744dc4aSBeata Michalska entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 144377222b0dSQais Yousef list_del_rcu(&entry->link); 144477222b0dSQais Yousef call_rcu(&entry->rcu, free_asym_cap_entry); 1445c744dc4aSBeata Michalska } 1446c744dc4aSBeata Michalska } 1447c744dc4aSBeata Michalska 1448c744dc4aSBeata Michalska /* 1449f2cb1360SIngo Molnar * Initializers for schedule domains 1450f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1451f2cb1360SIngo Molnar */ 1452f2cb1360SIngo Molnar 1453f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 1454f2cb1360SIngo Molnar int sched_domain_level_max; 1455f2cb1360SIngo Molnar 1456f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 1457f2cb1360SIngo Molnar { 1458f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 1459f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 1460f2cb1360SIngo Molnar 1461f2cb1360SIngo Molnar return 1; 1462f2cb1360SIngo Molnar } 1463f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 1464f2cb1360SIngo Molnar 1465f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 1466f2cb1360SIngo Molnar struct sched_domain_attr *attr) 1467f2cb1360SIngo Molnar { 1468f2cb1360SIngo Molnar int request; 1469f2cb1360SIngo Molnar 1470f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 1471f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 1472f2cb1360SIngo Molnar return; 1473f2cb1360SIngo Molnar request = default_relax_domain_level; 1474f2cb1360SIngo Molnar } else 1475f2cb1360SIngo Molnar request = attr->relax_domain_level; 14769ae7ab20SValentin Schneider 1477a1fd0b9dSVitalii Bursov if (sd->level >= request) { 1478f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 1479f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1480f2cb1360SIngo Molnar } 1481f2cb1360SIngo Molnar } 1482f2cb1360SIngo Molnar 1483f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 1484f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 1485f2cb1360SIngo Molnar 1486f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1487f2cb1360SIngo Molnar const struct cpumask *cpu_map) 1488f2cb1360SIngo Molnar { 1489f2cb1360SIngo Molnar switch (what) { 1490f2cb1360SIngo Molnar case sa_rootdomain: 1491f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 1492f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 1493df561f66SGustavo A. R. Silva fallthrough; 1494f2cb1360SIngo Molnar case sa_sd: 1495f2cb1360SIngo Molnar free_percpu(d->sd); 1496df561f66SGustavo A. R. Silva fallthrough; 1497f2cb1360SIngo Molnar case sa_sd_storage: 1498f2cb1360SIngo Molnar __sdt_free(cpu_map); 1499df561f66SGustavo A. R. Silva fallthrough; 1500f2cb1360SIngo Molnar case sa_none: 1501f2cb1360SIngo Molnar break; 1502f2cb1360SIngo Molnar } 1503f2cb1360SIngo Molnar } 1504f2cb1360SIngo Molnar 1505f2cb1360SIngo Molnar static enum s_alloc 1506f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1507f2cb1360SIngo Molnar { 1508f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 1509f2cb1360SIngo Molnar 1510f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 1511f2cb1360SIngo Molnar return sa_sd_storage; 1512f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 1513f2cb1360SIngo Molnar if (!d->sd) 1514f2cb1360SIngo Molnar return sa_sd_storage; 1515f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 1516f2cb1360SIngo Molnar if (!d->rd) 1517f2cb1360SIngo Molnar return sa_sd; 151897fb7a0aSIngo Molnar 1519f2cb1360SIngo Molnar return sa_rootdomain; 1520f2cb1360SIngo Molnar } 1521f2cb1360SIngo Molnar 1522f2cb1360SIngo Molnar /* 1523f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 1524f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 1525f2cb1360SIngo Molnar * will not free the data we're using. 1526f2cb1360SIngo Molnar */ 1527f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 1528f2cb1360SIngo Molnar { 1529f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1530f2cb1360SIngo Molnar 1531f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1532f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 1533f2cb1360SIngo Molnar 1534f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1535f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 1536f2cb1360SIngo Molnar 1537f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1538f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 1539f2cb1360SIngo Molnar 1540f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1541f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1542f2cb1360SIngo Molnar } 1543f2cb1360SIngo Molnar 1544f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1545f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 154697fb7a0aSIngo Molnar 154797fb7a0aSIngo Molnar static int sched_domains_numa_levels; 1548f2cb1360SIngo Molnar static int sched_domains_curr_level; 154997fb7a0aSIngo Molnar 155097fb7a0aSIngo Molnar int sched_max_numa_distance; 155197fb7a0aSIngo Molnar static int *sched_domains_numa_distance; 155297fb7a0aSIngo Molnar static struct cpumask ***sched_domains_numa_masks; 1553f2cb1360SIngo Molnar #endif 1554f2cb1360SIngo Molnar 1555f2cb1360SIngo Molnar /* 1556f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 1557f2cb1360SIngo Molnar * 1558f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 1559f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 1560d654c8ddSAlex Shi * function. For details, see include/linux/sched/sd_flags.h. 1561f2cb1360SIngo Molnar * 1562d654c8ddSAlex Shi * SD_SHARE_CPUCAPACITY 156354de4427SAlex Shi * SD_SHARE_LLC 1564d654c8ddSAlex Shi * SD_CLUSTER 1565d654c8ddSAlex Shi * SD_NUMA 1566f2cb1360SIngo Molnar * 1567f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 1568f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 1569f2cb1360SIngo Molnar * 1570f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 1571f2cb1360SIngo Molnar */ 1572f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 1573f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 1574b95303e0SBarry Song SD_CLUSTER | \ 157554de4427SAlex Shi SD_SHARE_LLC | \ 1576f2cb1360SIngo Molnar SD_NUMA | \ 1577cfe7ddcbSValentin Schneider SD_ASYM_PACKING) 1578f2cb1360SIngo Molnar 1579f2cb1360SIngo Molnar static struct sched_domain * 1580f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 1581f2cb1360SIngo Molnar const struct cpumask *cpu_map, 1582c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 1583f2cb1360SIngo Molnar { 1584f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1585f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1586f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 1587c744dc4aSBeata Michalska struct cpumask *sd_span; 1588f2cb1360SIngo Molnar 1589f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1590f2cb1360SIngo Molnar /* 1591f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 1592f2cb1360SIngo Molnar */ 1593f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 1594f2cb1360SIngo Molnar #endif 1595f2cb1360SIngo Molnar 1596f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 1597f2cb1360SIngo Molnar 1598f2cb1360SIngo Molnar if (tl->sd_flags) 1599f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 1600f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1601f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 16029b1b234bSPeng Liu sd_flags &= TOPOLOGY_SD_FLAGS; 1603f2cb1360SIngo Molnar 1604f2cb1360SIngo Molnar *sd = (struct sched_domain){ 1605f2cb1360SIngo Molnar .min_interval = sd_weight, 1606f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 16076e749913SVincent Guittot .busy_factor = 16, 16082208cdaaSVincent Guittot .imbalance_pct = 117, 1609f2cb1360SIngo Molnar 1610f2cb1360SIngo Molnar .cache_nice_tries = 0, 1611f2cb1360SIngo Molnar 161236c5bdc4SValentin Schneider .flags = 1*SD_BALANCE_NEWIDLE 1613f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 1614f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 1615f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 1616f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 1617f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 161854de4427SAlex Shi | 0*SD_SHARE_LLC 1619f2cb1360SIngo Molnar | 0*SD_SERIALIZE 16209c63e84dSMorten Rasmussen | 1*SD_PREFER_SIBLING 1621f2cb1360SIngo Molnar | 0*SD_NUMA 1622f2cb1360SIngo Molnar | sd_flags 1623f2cb1360SIngo Molnar , 1624f2cb1360SIngo Molnar 1625f2cb1360SIngo Molnar .last_balance = jiffies, 1626f2cb1360SIngo Molnar .balance_interval = sd_weight, 1627f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 1628e60b56e4SVincent Guittot .last_decay_max_lb_cost = jiffies, 1629f2cb1360SIngo Molnar .child = child, 1630f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1631f2cb1360SIngo Molnar .name = tl->name, 1632f2cb1360SIngo Molnar #endif 1633f2cb1360SIngo Molnar }; 1634f2cb1360SIngo Molnar 1635c744dc4aSBeata Michalska sd_span = sched_domain_span(sd); 1636c744dc4aSBeata Michalska cpumask_and(sd_span, cpu_map, tl->mask(cpu)); 1637c744dc4aSBeata Michalska sd_id = cpumask_first(sd_span); 1638c744dc4aSBeata Michalska 1639c744dc4aSBeata Michalska sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1640c744dc4aSBeata Michalska 1641c744dc4aSBeata Michalska WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1642c744dc4aSBeata Michalska (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1643c744dc4aSBeata Michalska "CPU capacity asymmetry not supported on SMT\n"); 1644f2cb1360SIngo Molnar 1645f2cb1360SIngo Molnar /* 1646f2cb1360SIngo Molnar * Convert topological properties into behaviour. 1647f2cb1360SIngo Molnar */ 1648a526d466SMorten Rasmussen /* Don't attempt to spread across CPUs of different capacities. */ 1649a526d466SMorten Rasmussen if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 16509c63e84dSMorten Rasmussen sd->child->flags &= ~SD_PREFER_SIBLING; 16519c63e84dSMorten Rasmussen 1652f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 1653f2cb1360SIngo Molnar sd->imbalance_pct = 110; 1654f2cb1360SIngo Molnar 165554de4427SAlex Shi } else if (sd->flags & SD_SHARE_LLC) { 1656f2cb1360SIngo Molnar sd->imbalance_pct = 117; 1657f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1658f2cb1360SIngo Molnar 1659f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1660f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 1661f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 1662f2cb1360SIngo Molnar 16639c63e84dSMorten Rasmussen sd->flags &= ~SD_PREFER_SIBLING; 1664f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 1665a55c7454SMatt Fleming if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1666f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 1667f2cb1360SIngo Molnar SD_BALANCE_FORK | 1668f2cb1360SIngo Molnar SD_WAKE_AFFINE); 1669f2cb1360SIngo Molnar } 1670f2cb1360SIngo Molnar 1671f2cb1360SIngo Molnar #endif 1672f2cb1360SIngo Molnar } else { 1673f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1674f2cb1360SIngo Molnar } 1675f2cb1360SIngo Molnar 1676f2cb1360SIngo Molnar /* 1677f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 1678f2cb1360SIngo Molnar * instance. 1679f2cb1360SIngo Molnar */ 168054de4427SAlex Shi if (sd->flags & SD_SHARE_LLC) { 1681f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1682f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 1683f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1684f2cb1360SIngo Molnar } 1685f2cb1360SIngo Molnar 1686f2cb1360SIngo Molnar sd->private = sdd; 1687f2cb1360SIngo Molnar 1688f2cb1360SIngo Molnar return sd; 1689f2cb1360SIngo Molnar } 1690f2cb1360SIngo Molnar 1691f2cb1360SIngo Molnar /* 1692f2cb1360SIngo Molnar * Topology list, bottom-up. 1693f2cb1360SIngo Molnar */ 1694f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 1695f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 1696f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1697f2cb1360SIngo Molnar #endif 1698778c558fSBarry Song 1699778c558fSBarry Song #ifdef CONFIG_SCHED_CLUSTER 1700778c558fSBarry Song { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, 1701778c558fSBarry Song #endif 1702778c558fSBarry Song 1703f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 1704f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1705f2cb1360SIngo Molnar #endif 1706f577cd57SPeter Zijlstra { cpu_cpu_mask, SD_INIT_NAME(PKG) }, 1707f2cb1360SIngo Molnar { NULL, }, 1708f2cb1360SIngo Molnar }; 1709f2cb1360SIngo Molnar 1710f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 1711f2cb1360SIngo Molnar default_topology; 17120fb3978bSHuang Ying static struct sched_domain_topology_level *sched_domain_topology_saved; 1713f2cb1360SIngo Molnar 1714f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 1715f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 1716f2cb1360SIngo Molnar 17170cce0fdeSMiaohe Lin void __init set_sched_topology(struct sched_domain_topology_level *tl) 1718f2cb1360SIngo Molnar { 1719f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 1720f2cb1360SIngo Molnar return; 1721f2cb1360SIngo Molnar 1722f2cb1360SIngo Molnar sched_domain_topology = tl; 17230fb3978bSHuang Ying sched_domain_topology_saved = NULL; 1724f2cb1360SIngo Molnar } 1725f2cb1360SIngo Molnar 1726f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1727f2cb1360SIngo Molnar 1728f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 1729f2cb1360SIngo Molnar { 1730f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1731f2cb1360SIngo Molnar } 1732f2cb1360SIngo Molnar 1733f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1734f2cb1360SIngo Molnar { 1735f2cb1360SIngo Molnar static int done = false; 1736f2cb1360SIngo Molnar int i,j; 1737f2cb1360SIngo Molnar 1738f2cb1360SIngo Molnar if (done) 1739f2cb1360SIngo Molnar return; 1740f2cb1360SIngo Molnar 1741f2cb1360SIngo Molnar done = true; 1742f2cb1360SIngo Molnar 1743f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1744f2cb1360SIngo Molnar 1745f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1746f2cb1360SIngo Molnar printk(KERN_WARNING " "); 17470fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 17480fb3978bSHuang Ying if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 17490fb3978bSHuang Ying printk(KERN_CONT "(%02d) ", node_distance(i,j)); 17500fb3978bSHuang Ying else 1751f2cb1360SIngo Molnar printk(KERN_CONT " %02d ", node_distance(i,j)); 17520fb3978bSHuang Ying } 1753f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1754f2cb1360SIngo Molnar } 1755f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1756f2cb1360SIngo Molnar } 1757f2cb1360SIngo Molnar 1758f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1759f2cb1360SIngo Molnar { 17600fb3978bSHuang Ying bool found = false; 17610fb3978bSHuang Ying int i, *distances; 1762f2cb1360SIngo Molnar 1763f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1764f2cb1360SIngo Molnar return true; 1765f2cb1360SIngo Molnar 17660fb3978bSHuang Ying rcu_read_lock(); 17670fb3978bSHuang Ying distances = rcu_dereference(sched_domains_numa_distance); 17680fb3978bSHuang Ying if (!distances) 17690fb3978bSHuang Ying goto unlock; 1770f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 17710fb3978bSHuang Ying if (distances[i] == distance) { 17720fb3978bSHuang Ying found = true; 17730fb3978bSHuang Ying break; 17740fb3978bSHuang Ying } 17750fb3978bSHuang Ying } 17760fb3978bSHuang Ying unlock: 17770fb3978bSHuang Ying rcu_read_unlock(); 17780fb3978bSHuang Ying 17790fb3978bSHuang Ying return found; 1780f2cb1360SIngo Molnar } 1781f2cb1360SIngo Molnar 17820fb3978bSHuang Ying #define for_each_cpu_node_but(n, nbut) \ 17830fb3978bSHuang Ying for_each_node_state(n, N_CPU) \ 17840fb3978bSHuang Ying if (n == nbut) \ 17850fb3978bSHuang Ying continue; \ 17860fb3978bSHuang Ying else 1787f2cb1360SIngo Molnar 1788f2cb1360SIngo Molnar /* 1789f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1790f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1791f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1792f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1793f2cb1360SIngo Molnar * 1794f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1795f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1796f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1797f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1798f2cb1360SIngo Molnar * placement of programs. 1799f2cb1360SIngo Molnar * 1800f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1801f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1802f2cb1360SIngo Molnar * is directly connected. 1803f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1804f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1805f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1806f2cb1360SIngo Molnar */ 18070fb3978bSHuang Ying static void init_numa_topology_type(int offline_node) 1808f2cb1360SIngo Molnar { 1809f2cb1360SIngo Molnar int a, b, c, n; 1810f2cb1360SIngo Molnar 1811f2cb1360SIngo Molnar n = sched_max_numa_distance; 1812f2cb1360SIngo Molnar 1813e5e96fafSSrikar Dronamraju if (sched_domains_numa_levels <= 2) { 1814f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1815f2cb1360SIngo Molnar return; 1816f2cb1360SIngo Molnar } 1817f2cb1360SIngo Molnar 18180fb3978bSHuang Ying for_each_cpu_node_but(a, offline_node) { 18190fb3978bSHuang Ying for_each_cpu_node_but(b, offline_node) { 1820f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1821f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1822f2cb1360SIngo Molnar continue; 1823f2cb1360SIngo Molnar 1824f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 18250fb3978bSHuang Ying for_each_cpu_node_but(c, offline_node) { 1826f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1827f2cb1360SIngo Molnar node_distance(b, c) < n) { 1828f2cb1360SIngo Molnar sched_numa_topology_type = 1829f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1830f2cb1360SIngo Molnar return; 1831f2cb1360SIngo Molnar } 1832f2cb1360SIngo Molnar } 1833f2cb1360SIngo Molnar 1834f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1835f2cb1360SIngo Molnar return; 1836f2cb1360SIngo Molnar } 1837f2cb1360SIngo Molnar } 18380fb3978bSHuang Ying 18390fb3978bSHuang Ying pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 18400fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 1841f2cb1360SIngo Molnar } 1842f2cb1360SIngo Molnar 1843620a6dc4SValentin Schneider 1844620a6dc4SValentin Schneider #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1845620a6dc4SValentin Schneider 18460fb3978bSHuang Ying void sched_init_numa(int offline_node) 1847f2cb1360SIngo Molnar { 1848f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1849620a6dc4SValentin Schneider unsigned long *distance_map; 1850620a6dc4SValentin Schneider int nr_levels = 0; 1851620a6dc4SValentin Schneider int i, j; 18520fb3978bSHuang Ying int *distances; 18530fb3978bSHuang Ying struct cpumask ***masks; 1854051f3ca0SSuravee Suthikulpanit 1855f2cb1360SIngo Molnar /* 1856402de7fcSIngo Molnar * O(nr_nodes^2) de-duplicating selection sort -- in order to find the 1857f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1858f2cb1360SIngo Molnar */ 1859620a6dc4SValentin Schneider distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1860620a6dc4SValentin Schneider if (!distance_map) 1861620a6dc4SValentin Schneider return; 1862620a6dc4SValentin Schneider 1863620a6dc4SValentin Schneider bitmap_zero(distance_map, NR_DISTANCE_VALUES); 18640fb3978bSHuang Ying for_each_cpu_node_but(i, offline_node) { 18650fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1866620a6dc4SValentin Schneider int distance = node_distance(i, j); 1867f2cb1360SIngo Molnar 1868620a6dc4SValentin Schneider if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 1869620a6dc4SValentin Schneider sched_numa_warn("Invalid distance value range"); 18700fb3978bSHuang Ying bitmap_free(distance_map); 1871620a6dc4SValentin Schneider return; 1872620a6dc4SValentin Schneider } 1873f2cb1360SIngo Molnar 1874620a6dc4SValentin Schneider bitmap_set(distance_map, distance, 1); 1875620a6dc4SValentin Schneider } 1876620a6dc4SValentin Schneider } 1877f2cb1360SIngo Molnar /* 1878620a6dc4SValentin Schneider * We can now figure out how many unique distance values there are and 1879620a6dc4SValentin Schneider * allocate memory accordingly. 1880f2cb1360SIngo Molnar */ 1881620a6dc4SValentin Schneider nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 1882f2cb1360SIngo Molnar 18830fb3978bSHuang Ying distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); 18840fb3978bSHuang Ying if (!distances) { 1885620a6dc4SValentin Schneider bitmap_free(distance_map); 1886620a6dc4SValentin Schneider return; 1887f2cb1360SIngo Molnar } 1888620a6dc4SValentin Schneider 1889620a6dc4SValentin Schneider for (i = 0, j = 0; i < nr_levels; i++, j++) { 1890620a6dc4SValentin Schneider j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 18910fb3978bSHuang Ying distances[i] = j; 1892f2cb1360SIngo Molnar } 18930fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, distances); 1894f2cb1360SIngo Molnar 1895620a6dc4SValentin Schneider bitmap_free(distance_map); 1896620a6dc4SValentin Schneider 1897f2cb1360SIngo Molnar /* 1898620a6dc4SValentin Schneider * 'nr_levels' contains the number of unique distances 1899f2cb1360SIngo Molnar * 1900f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1901f2cb1360SIngo Molnar * numbers. 1902f2cb1360SIngo Molnar */ 1903f2cb1360SIngo Molnar 1904f2cb1360SIngo Molnar /* 1905f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1906f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1907620a6dc4SValentin Schneider * the array will contain less then 'nr_levels' members. This could be 1908f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1909f2cb1360SIngo Molnar * in other functions. 1910f2cb1360SIngo Molnar * 1911620a6dc4SValentin Schneider * We reset it to 'nr_levels' at the end of this function. 1912f2cb1360SIngo Molnar */ 1913f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1914f2cb1360SIngo Molnar 19150fb3978bSHuang Ying masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 19160fb3978bSHuang Ying if (!masks) 1917f2cb1360SIngo Molnar return; 1918f2cb1360SIngo Molnar 1919f2cb1360SIngo Molnar /* 1920f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1921f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1922f2cb1360SIngo Molnar */ 1923620a6dc4SValentin Schneider for (i = 0; i < nr_levels; i++) { 19240fb3978bSHuang Ying masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 19250fb3978bSHuang Ying if (!masks[i]) 1926f2cb1360SIngo Molnar return; 1927f2cb1360SIngo Molnar 19280fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1929f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1930620a6dc4SValentin Schneider int k; 1931620a6dc4SValentin Schneider 1932f2cb1360SIngo Molnar if (!mask) 1933f2cb1360SIngo Molnar return; 1934f2cb1360SIngo Molnar 19350fb3978bSHuang Ying masks[i][j] = mask; 1936f2cb1360SIngo Molnar 19370fb3978bSHuang Ying for_each_cpu_node_but(k, offline_node) { 1938620a6dc4SValentin Schneider if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) 1939620a6dc4SValentin Schneider sched_numa_warn("Node-distance not symmetric"); 1940620a6dc4SValentin Schneider 1941f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1942f2cb1360SIngo Molnar continue; 1943f2cb1360SIngo Molnar 1944f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1945f2cb1360SIngo Molnar } 1946f2cb1360SIngo Molnar } 1947f2cb1360SIngo Molnar } 19480fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, masks); 1949f2cb1360SIngo Molnar 1950f2cb1360SIngo Molnar /* Compute default topology size */ 1951f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1952f2cb1360SIngo Molnar 195371e5f664SDietmar Eggemann tl = kzalloc((i + nr_levels + 1) * 1954f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1955f2cb1360SIngo Molnar if (!tl) 1956f2cb1360SIngo Molnar return; 1957f2cb1360SIngo Molnar 1958f2cb1360SIngo Molnar /* 1959f2cb1360SIngo Molnar * Copy the default topology bits.. 1960f2cb1360SIngo Molnar */ 1961f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1962f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1963f2cb1360SIngo Molnar 1964f2cb1360SIngo Molnar /* 1965051f3ca0SSuravee Suthikulpanit * Add the NUMA identity distance, aka single NODE. 1966051f3ca0SSuravee Suthikulpanit */ 1967051f3ca0SSuravee Suthikulpanit tl[i++] = (struct sched_domain_topology_level){ 1968051f3ca0SSuravee Suthikulpanit .mask = sd_numa_mask, 1969051f3ca0SSuravee Suthikulpanit .numa_level = 0, 1970051f3ca0SSuravee Suthikulpanit SD_INIT_NAME(NODE) 1971051f3ca0SSuravee Suthikulpanit }; 1972051f3ca0SSuravee Suthikulpanit 1973051f3ca0SSuravee Suthikulpanit /* 1974f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1975f2cb1360SIngo Molnar */ 1976620a6dc4SValentin Schneider for (j = 1; j < nr_levels; i++, j++) { 1977f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1978f2cb1360SIngo Molnar .mask = sd_numa_mask, 1979f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1980f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1981f2cb1360SIngo Molnar .numa_level = j, 1982f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1983f2cb1360SIngo Molnar }; 1984f2cb1360SIngo Molnar } 1985f2cb1360SIngo Molnar 19860fb3978bSHuang Ying sched_domain_topology_saved = sched_domain_topology; 1987f2cb1360SIngo Molnar sched_domain_topology = tl; 1988f2cb1360SIngo Molnar 1989620a6dc4SValentin Schneider sched_domains_numa_levels = nr_levels; 19900fb3978bSHuang Ying WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); 1991f2cb1360SIngo Molnar 19920fb3978bSHuang Ying init_numa_topology_type(offline_node); 19930083242cSValentin Schneider } 19940083242cSValentin Schneider 19950fb3978bSHuang Ying 19960fb3978bSHuang Ying static void sched_reset_numa(void) 19970083242cSValentin Schneider { 19980fb3978bSHuang Ying int nr_levels, *distances; 19990fb3978bSHuang Ying struct cpumask ***masks; 20000fb3978bSHuang Ying 20010fb3978bSHuang Ying nr_levels = sched_domains_numa_levels; 20020fb3978bSHuang Ying sched_domains_numa_levels = 0; 20030fb3978bSHuang Ying sched_max_numa_distance = 0; 20040fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 20050fb3978bSHuang Ying distances = sched_domains_numa_distance; 20060fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, NULL); 20070fb3978bSHuang Ying masks = sched_domains_numa_masks; 20080fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, NULL); 20090fb3978bSHuang Ying if (distances || masks) { 20100083242cSValentin Schneider int i, j; 20110083242cSValentin Schneider 20120fb3978bSHuang Ying synchronize_rcu(); 20130fb3978bSHuang Ying kfree(distances); 20140fb3978bSHuang Ying for (i = 0; i < nr_levels && masks; i++) { 20150fb3978bSHuang Ying if (!masks[i]) 20160fb3978bSHuang Ying continue; 20170fb3978bSHuang Ying for_each_node(j) 20180fb3978bSHuang Ying kfree(masks[i][j]); 20190fb3978bSHuang Ying kfree(masks[i]); 20200fb3978bSHuang Ying } 20210fb3978bSHuang Ying kfree(masks); 20220fb3978bSHuang Ying } 20230fb3978bSHuang Ying if (sched_domain_topology_saved) { 20240fb3978bSHuang Ying kfree(sched_domain_topology); 20250fb3978bSHuang Ying sched_domain_topology = sched_domain_topology_saved; 20260fb3978bSHuang Ying sched_domain_topology_saved = NULL; 20270fb3978bSHuang Ying } 20280fb3978bSHuang Ying } 20290fb3978bSHuang Ying 20300083242cSValentin Schneider /* 20310fb3978bSHuang Ying * Call with hotplug lock held 20320083242cSValentin Schneider */ 20330fb3978bSHuang Ying void sched_update_numa(int cpu, bool online) 20340fb3978bSHuang Ying { 20350fb3978bSHuang Ying int node; 20360fb3978bSHuang Ying 20370fb3978bSHuang Ying node = cpu_to_node(cpu); 20380fb3978bSHuang Ying /* 20390fb3978bSHuang Ying * Scheduler NUMA topology is updated when the first CPU of a 20400fb3978bSHuang Ying * node is onlined or the last CPU of a node is offlined. 20410fb3978bSHuang Ying */ 20420fb3978bSHuang Ying if (cpumask_weight(cpumask_of_node(node)) != 1) 20430083242cSValentin Schneider return; 20440083242cSValentin Schneider 20450fb3978bSHuang Ying sched_reset_numa(); 20460fb3978bSHuang Ying sched_init_numa(online ? NUMA_NO_NODE : node); 2047f2cb1360SIngo Molnar } 2048f2cb1360SIngo Molnar 2049f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 2050f2cb1360SIngo Molnar { 2051f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 2052f2cb1360SIngo Molnar int i, j; 2053f2cb1360SIngo Molnar 2054f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 2055f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 20560fb3978bSHuang Ying if (!node_state(j, N_CPU)) 20570083242cSValentin Schneider continue; 20580083242cSValentin Schneider 20590083242cSValentin Schneider /* Set ourselves in the remote node's masks */ 2060f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 2061f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2062f2cb1360SIngo Molnar } 2063f2cb1360SIngo Molnar } 2064f2cb1360SIngo Molnar } 2065f2cb1360SIngo Molnar 2066f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 2067f2cb1360SIngo Molnar { 2068f2cb1360SIngo Molnar int i, j; 2069f2cb1360SIngo Molnar 2070f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 20710fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 20720fb3978bSHuang Ying if (sched_domains_numa_masks[i][j]) 2073f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2074f2cb1360SIngo Molnar } 2075f2cb1360SIngo Molnar } 20760fb3978bSHuang Ying } 2077f2cb1360SIngo Molnar 2078e0e8d491SWanpeng Li /* 2079e0e8d491SWanpeng Li * sched_numa_find_closest() - given the NUMA topology, find the cpu 2080e0e8d491SWanpeng Li * closest to @cpu from @cpumask. 2081e0e8d491SWanpeng Li * cpumask: cpumask to find a cpu from 2082e0e8d491SWanpeng Li * cpu: cpu to be close to 2083e0e8d491SWanpeng Li * 2084e0e8d491SWanpeng Li * returns: cpu, or nr_cpu_ids when nothing found. 2085e0e8d491SWanpeng Li */ 2086e0e8d491SWanpeng Li int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2087e0e8d491SWanpeng Li { 20880fb3978bSHuang Ying int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 20890fb3978bSHuang Ying struct cpumask ***masks; 2090e0e8d491SWanpeng Li 20910fb3978bSHuang Ying rcu_read_lock(); 20920fb3978bSHuang Ying masks = rcu_dereference(sched_domains_numa_masks); 20930fb3978bSHuang Ying if (!masks) 20940fb3978bSHuang Ying goto unlock; 2095e0e8d491SWanpeng Li for (i = 0; i < sched_domains_numa_levels; i++) { 20960fb3978bSHuang Ying if (!masks[i][j]) 20970fb3978bSHuang Ying break; 20980fb3978bSHuang Ying cpu = cpumask_any_and(cpus, masks[i][j]); 20990fb3978bSHuang Ying if (cpu < nr_cpu_ids) { 21000fb3978bSHuang Ying found = cpu; 21010fb3978bSHuang Ying break; 2102e0e8d491SWanpeng Li } 21030fb3978bSHuang Ying } 21040fb3978bSHuang Ying unlock: 21050fb3978bSHuang Ying rcu_read_unlock(); 21060fb3978bSHuang Ying 21070fb3978bSHuang Ying return found; 2108e0e8d491SWanpeng Li } 2109e0e8d491SWanpeng Li 2110cd7f5535SYury Norov struct __cmp_key { 2111cd7f5535SYury Norov const struct cpumask *cpus; 2112cd7f5535SYury Norov struct cpumask ***masks; 2113cd7f5535SYury Norov int node; 2114cd7f5535SYury Norov int cpu; 2115cd7f5535SYury Norov int w; 2116cd7f5535SYury Norov }; 2117cd7f5535SYury Norov 2118cd7f5535SYury Norov static int hop_cmp(const void *a, const void *b) 2119cd7f5535SYury Norov { 212001bb11adSYury Norov struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; 2121cd7f5535SYury Norov struct __cmp_key *k = (struct __cmp_key *)a; 2122cd7f5535SYury Norov 2123cd7f5535SYury Norov if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) 2124cd7f5535SYury Norov return 1; 2125cd7f5535SYury Norov 212601bb11adSYury Norov if (b == k->masks) { 212701bb11adSYury Norov k->w = 0; 212801bb11adSYury Norov return 0; 212901bb11adSYury Norov } 213001bb11adSYury Norov 213101bb11adSYury Norov prev_hop = *((struct cpumask ***)b - 1); 213201bb11adSYury Norov k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); 2133cd7f5535SYury Norov if (k->w <= k->cpu) 2134cd7f5535SYury Norov return 0; 2135cd7f5535SYury Norov 2136cd7f5535SYury Norov return -1; 2137cd7f5535SYury Norov } 2138cd7f5535SYury Norov 21396d08ad21SYury Norov /** 21406d08ad21SYury Norov * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU 21416d08ad21SYury Norov * from @cpus to @cpu, taking into account distance 21426d08ad21SYury Norov * from a given @node. 21436d08ad21SYury Norov * @cpus: cpumask to find a cpu from 21446d08ad21SYury Norov * @cpu: CPU to start searching 21456d08ad21SYury Norov * @node: NUMA node to order CPUs by distance 2146cd7f5535SYury Norov * 21476d08ad21SYury Norov * Return: cpu, or nr_cpu_ids when nothing found. 2148cd7f5535SYury Norov */ 2149cd7f5535SYury Norov int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) 2150cd7f5535SYury Norov { 2151617f2c38SYury Norov struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; 2152cd7f5535SYury Norov struct cpumask ***hop_masks; 2153cd7f5535SYury Norov int hop, ret = nr_cpu_ids; 2154cd7f5535SYury Norov 21559ecea9aeSYury Norov if (node == NUMA_NO_NODE) 21569ecea9aeSYury Norov return cpumask_nth_and(cpu, cpus, cpu_online_mask); 21579ecea9aeSYury Norov 2158cd7f5535SYury Norov rcu_read_lock(); 2159cd7f5535SYury Norov 2160617f2c38SYury Norov /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ 2161617f2c38SYury Norov node = numa_nearest_node(node, N_CPU); 2162617f2c38SYury Norov k.node = node; 2163617f2c38SYury Norov 2164cd7f5535SYury Norov k.masks = rcu_dereference(sched_domains_numa_masks); 2165cd7f5535SYury Norov if (!k.masks) 2166cd7f5535SYury Norov goto unlock; 2167cd7f5535SYury Norov 2168cd7f5535SYury Norov hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); 2169cd7f5535SYury Norov hop = hop_masks - k.masks; 2170cd7f5535SYury Norov 2171cd7f5535SYury Norov ret = hop ? 2172cd7f5535SYury Norov cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : 2173cd7f5535SYury Norov cpumask_nth_and(cpu, cpus, k.masks[0][node]); 2174cd7f5535SYury Norov unlock: 2175cd7f5535SYury Norov rcu_read_unlock(); 2176cd7f5535SYury Norov return ret; 2177cd7f5535SYury Norov } 2178cd7f5535SYury Norov EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); 21799feae658SValentin Schneider 21809feae658SValentin Schneider /** 21819feae658SValentin Schneider * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from 21829feae658SValentin Schneider * @node 21839feae658SValentin Schneider * @node: The node to count hops from. 21849feae658SValentin Schneider * @hops: Include CPUs up to that many hops away. 0 means local node. 21859feae658SValentin Schneider * 21869feae658SValentin Schneider * Return: On success, a pointer to a cpumask of CPUs at most @hops away from 21879feae658SValentin Schneider * @node, an error value otherwise. 21889feae658SValentin Schneider * 21899feae658SValentin Schneider * Requires rcu_lock to be held. Returned cpumask is only valid within that 21909feae658SValentin Schneider * read-side section, copy it if required beyond that. 21919feae658SValentin Schneider * 21929feae658SValentin Schneider * Note that not all hops are equal in distance; see sched_init_numa() for how 21939feae658SValentin Schneider * distances and masks are handled. 21949feae658SValentin Schneider * Also note that this is a reflection of sched_domains_numa_masks, which may change 21959feae658SValentin Schneider * during the lifetime of the system (offline nodes are taken out of the masks). 21969feae658SValentin Schneider */ 21979feae658SValentin Schneider const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) 21989feae658SValentin Schneider { 21999feae658SValentin Schneider struct cpumask ***masks; 22009feae658SValentin Schneider 22019feae658SValentin Schneider if (node >= nr_node_ids || hops >= sched_domains_numa_levels) 22029feae658SValentin Schneider return ERR_PTR(-EINVAL); 22039feae658SValentin Schneider 22049feae658SValentin Schneider masks = rcu_dereference(sched_domains_numa_masks); 22059feae658SValentin Schneider if (!masks) 22069feae658SValentin Schneider return ERR_PTR(-EBUSY); 22079feae658SValentin Schneider 22089feae658SValentin Schneider return masks[hops][node]; 22099feae658SValentin Schneider } 22109feae658SValentin Schneider EXPORT_SYMBOL_GPL(sched_numa_hop_mask); 22119feae658SValentin Schneider 2212f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 2213f2cb1360SIngo Molnar 2214f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 2215f2cb1360SIngo Molnar { 2216f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2217f2cb1360SIngo Molnar int j; 2218f2cb1360SIngo Molnar 2219f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2220f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2221f2cb1360SIngo Molnar 2222f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 2223f2cb1360SIngo Molnar if (!sdd->sd) 2224f2cb1360SIngo Molnar return -ENOMEM; 2225f2cb1360SIngo Molnar 2226f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 2227f2cb1360SIngo Molnar if (!sdd->sds) 2228f2cb1360SIngo Molnar return -ENOMEM; 2229f2cb1360SIngo Molnar 2230f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 2231f2cb1360SIngo Molnar if (!sdd->sg) 2232f2cb1360SIngo Molnar return -ENOMEM; 2233f2cb1360SIngo Molnar 2234f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2235f2cb1360SIngo Molnar if (!sdd->sgc) 2236f2cb1360SIngo Molnar return -ENOMEM; 2237f2cb1360SIngo Molnar 2238f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2239f2cb1360SIngo Molnar struct sched_domain *sd; 2240f2cb1360SIngo Molnar struct sched_domain_shared *sds; 2241f2cb1360SIngo Molnar struct sched_group *sg; 2242f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 2243f2cb1360SIngo Molnar 2244f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2245f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2246f2cb1360SIngo Molnar if (!sd) 2247f2cb1360SIngo Molnar return -ENOMEM; 2248f2cb1360SIngo Molnar 2249f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 2250f2cb1360SIngo Molnar 2251f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 2252f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2253f2cb1360SIngo Molnar if (!sds) 2254f2cb1360SIngo Molnar return -ENOMEM; 2255f2cb1360SIngo Molnar 2256f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 2257f2cb1360SIngo Molnar 2258f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2259f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2260f2cb1360SIngo Molnar if (!sg) 2261f2cb1360SIngo Molnar return -ENOMEM; 2262f2cb1360SIngo Molnar 2263f2cb1360SIngo Molnar sg->next = sg; 2264f2cb1360SIngo Molnar 2265f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 2266f2cb1360SIngo Molnar 2267f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2268f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2269f2cb1360SIngo Molnar if (!sgc) 2270f2cb1360SIngo Molnar return -ENOMEM; 2271f2cb1360SIngo Molnar 2272005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2273005f874dSPeter Zijlstra sgc->id = j; 2274005f874dSPeter Zijlstra #endif 2275005f874dSPeter Zijlstra 2276f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 2277f2cb1360SIngo Molnar } 2278f2cb1360SIngo Molnar } 2279f2cb1360SIngo Molnar 2280f2cb1360SIngo Molnar return 0; 2281f2cb1360SIngo Molnar } 2282f2cb1360SIngo Molnar 2283f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 2284f2cb1360SIngo Molnar { 2285f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2286f2cb1360SIngo Molnar int j; 2287f2cb1360SIngo Molnar 2288f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2289f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2290f2cb1360SIngo Molnar 2291f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2292f2cb1360SIngo Molnar struct sched_domain *sd; 2293f2cb1360SIngo Molnar 2294f2cb1360SIngo Molnar if (sdd->sd) { 2295f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 2296f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 2297f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 2298f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 2299f2cb1360SIngo Molnar } 2300f2cb1360SIngo Molnar 2301f2cb1360SIngo Molnar if (sdd->sds) 2302f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 2303f2cb1360SIngo Molnar if (sdd->sg) 2304f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 2305f2cb1360SIngo Molnar if (sdd->sgc) 2306f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 2307f2cb1360SIngo Molnar } 2308f2cb1360SIngo Molnar free_percpu(sdd->sd); 2309f2cb1360SIngo Molnar sdd->sd = NULL; 2310f2cb1360SIngo Molnar free_percpu(sdd->sds); 2311f2cb1360SIngo Molnar sdd->sds = NULL; 2312f2cb1360SIngo Molnar free_percpu(sdd->sg); 2313f2cb1360SIngo Molnar sdd->sg = NULL; 2314f2cb1360SIngo Molnar free_percpu(sdd->sgc); 2315f2cb1360SIngo Molnar sdd->sgc = NULL; 2316f2cb1360SIngo Molnar } 2317f2cb1360SIngo Molnar } 2318f2cb1360SIngo Molnar 2319181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2320f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2321c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 2322f2cb1360SIngo Molnar { 2323c744dc4aSBeata Michalska struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2324f2cb1360SIngo Molnar 2325f2cb1360SIngo Molnar if (child) { 2326f2cb1360SIngo Molnar sd->level = child->level + 1; 2327f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 2328f2cb1360SIngo Molnar child->parent = sd; 2329f2cb1360SIngo Molnar 2330f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 2331f2cb1360SIngo Molnar sched_domain_span(sd))) { 2332f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 2333f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 2334f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 2335f2cb1360SIngo Molnar child->name, sd->name); 2336f2cb1360SIngo Molnar #endif 233797fb7a0aSIngo Molnar /* Fixup, ensure @sd has at least @child CPUs. */ 2338f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 2339f2cb1360SIngo Molnar sched_domain_span(sd), 2340f2cb1360SIngo Molnar sched_domain_span(child)); 2341f2cb1360SIngo Molnar } 2342f2cb1360SIngo Molnar 2343f2cb1360SIngo Molnar } 2344f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 2345f2cb1360SIngo Molnar 2346f2cb1360SIngo Molnar return sd; 2347f2cb1360SIngo Molnar } 2348f2cb1360SIngo Molnar 2349f2cb1360SIngo Molnar /* 2350ccf74128SValentin Schneider * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2351ccf74128SValentin Schneider * any two given CPUs at this (non-NUMA) topology level. 2352ccf74128SValentin Schneider */ 2353ccf74128SValentin Schneider static bool topology_span_sane(struct sched_domain_topology_level *tl, 2354ccf74128SValentin Schneider const struct cpumask *cpu_map, int cpu) 2355ccf74128SValentin Schneider { 235605037e5fSKyle Meyer int i = cpu + 1; 2357ccf74128SValentin Schneider 2358ccf74128SValentin Schneider /* NUMA levels are allowed to overlap */ 2359ccf74128SValentin Schneider if (tl->flags & SDTL_OVERLAP) 2360ccf74128SValentin Schneider return true; 2361ccf74128SValentin Schneider 2362ccf74128SValentin Schneider /* 2363ccf74128SValentin Schneider * Non-NUMA levels cannot partially overlap - they must be either 2364ccf74128SValentin Schneider * completely equal or completely disjoint. Otherwise we can end up 2365ccf74128SValentin Schneider * breaking the sched_group lists - i.e. a later get_group() pass 2366ccf74128SValentin Schneider * breaks the linking done for an earlier span. 2367ccf74128SValentin Schneider */ 236805037e5fSKyle Meyer for_each_cpu_from(i, cpu_map) { 2369ccf74128SValentin Schneider /* 2370ccf74128SValentin Schneider * We should 'and' all those masks with 'cpu_map' to exactly 2371ccf74128SValentin Schneider * match the topology we're about to build, but that can only 2372ccf74128SValentin Schneider * remove CPUs, which only lessens our ability to detect 2373ccf74128SValentin Schneider * overlaps 2374ccf74128SValentin Schneider */ 2375ccf74128SValentin Schneider if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && 2376ccf74128SValentin Schneider cpumask_intersects(tl->mask(cpu), tl->mask(i))) 2377ccf74128SValentin Schneider return false; 2378ccf74128SValentin Schneider } 2379ccf74128SValentin Schneider 2380ccf74128SValentin Schneider return true; 2381ccf74128SValentin Schneider } 2382ccf74128SValentin Schneider 2383ccf74128SValentin Schneider /* 2384f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 2385f2cb1360SIngo Molnar * to the individual CPUs 2386f2cb1360SIngo Molnar */ 2387f2cb1360SIngo Molnar static int 2388f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2389f2cb1360SIngo Molnar { 2390cd1cb335SValentin Schneider enum s_alloc alloc_state = sa_none; 2391f2cb1360SIngo Molnar struct sched_domain *sd; 2392f2cb1360SIngo Molnar struct s_data d; 2393f2cb1360SIngo Molnar struct rq *rq = NULL; 2394f2cb1360SIngo Molnar int i, ret = -ENOMEM; 2395df054e84SMorten Rasmussen bool has_asym = false; 23968881e163SBarry Song bool has_cluster = false; 2397f2cb1360SIngo Molnar 2398cd1cb335SValentin Schneider if (WARN_ON(cpumask_empty(cpu_map))) 2399cd1cb335SValentin Schneider goto error; 2400cd1cb335SValentin Schneider 2401f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2402f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 2403f2cb1360SIngo Molnar goto error; 2404f2cb1360SIngo Molnar 2405f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 2406f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2407f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2408f2cb1360SIngo Molnar 2409f2cb1360SIngo Molnar sd = NULL; 2410f2cb1360SIngo Molnar for_each_sd_topology(tl) { 241105484e09SMorten Rasmussen 2412ccf74128SValentin Schneider if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) 2413ccf74128SValentin Schneider goto error; 2414ccf74128SValentin Schneider 2415c744dc4aSBeata Michalska sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2416c744dc4aSBeata Michalska 2417c744dc4aSBeata Michalska has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 241805484e09SMorten Rasmussen 2419f2cb1360SIngo Molnar if (tl == sched_domain_topology) 2420f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 2421af85596cSPeter Zijlstra if (tl->flags & SDTL_OVERLAP) 2422f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 2423f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2424f2cb1360SIngo Molnar break; 2425f2cb1360SIngo Molnar } 2426f2cb1360SIngo Molnar } 2427f2cb1360SIngo Molnar 2428f2cb1360SIngo Molnar /* Build the groups for the domains */ 2429f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2430f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2431f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2432f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 2433f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 2434f2cb1360SIngo Molnar goto error; 2435f2cb1360SIngo Molnar } else { 2436f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 2437f2cb1360SIngo Molnar goto error; 2438f2cb1360SIngo Molnar } 2439f2cb1360SIngo Molnar } 2440f2cb1360SIngo Molnar } 2441f2cb1360SIngo Molnar 2442e496132eSMel Gorman /* 2443e496132eSMel Gorman * Calculate an allowed NUMA imbalance such that LLCs do not get 2444e496132eSMel Gorman * imbalanced. 2445e496132eSMel Gorman */ 2446e496132eSMel Gorman for_each_cpu(i, cpu_map) { 2447e496132eSMel Gorman unsigned int imb = 0; 2448e496132eSMel Gorman unsigned int imb_span = 1; 2449e496132eSMel Gorman 2450e496132eSMel Gorman for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2451e496132eSMel Gorman struct sched_domain *child = sd->child; 2452e496132eSMel Gorman 245354de4427SAlex Shi if (!(sd->flags & SD_SHARE_LLC) && child && 245454de4427SAlex Shi (child->flags & SD_SHARE_LLC)) { 24557f434dffSK Prateek Nayak struct sched_domain __rcu *top_p; 2456e496132eSMel Gorman unsigned int nr_llcs; 2457e496132eSMel Gorman 2458e496132eSMel Gorman /* 2459e496132eSMel Gorman * For a single LLC per node, allow an 2460026b98a9SMel Gorman * imbalance up to 12.5% of the node. This is 2461026b98a9SMel Gorman * arbitrary cutoff based two factors -- SMT and 2462026b98a9SMel Gorman * memory channels. For SMT-2, the intent is to 2463026b98a9SMel Gorman * avoid premature sharing of HT resources but 2464026b98a9SMel Gorman * SMT-4 or SMT-8 *may* benefit from a different 2465026b98a9SMel Gorman * cutoff. For memory channels, this is a very 2466026b98a9SMel Gorman * rough estimate of how many channels may be 2467026b98a9SMel Gorman * active and is based on recent CPUs with 2468026b98a9SMel Gorman * many cores. 2469e496132eSMel Gorman * 2470e496132eSMel Gorman * For multiple LLCs, allow an imbalance 2471e496132eSMel Gorman * until multiple tasks would share an LLC 2472e496132eSMel Gorman * on one node while LLCs on another node 2473026b98a9SMel Gorman * remain idle. This assumes that there are 2474026b98a9SMel Gorman * enough logical CPUs per LLC to avoid SMT 2475026b98a9SMel Gorman * factors and that there is a correlation 2476026b98a9SMel Gorman * between LLCs and memory channels. 2477e496132eSMel Gorman */ 2478e496132eSMel Gorman nr_llcs = sd->span_weight / child->span_weight; 2479e496132eSMel Gorman if (nr_llcs == 1) 2480026b98a9SMel Gorman imb = sd->span_weight >> 3; 2481e496132eSMel Gorman else 2482e496132eSMel Gorman imb = nr_llcs; 2483026b98a9SMel Gorman imb = max(1U, imb); 2484e496132eSMel Gorman sd->imb_numa_nr = imb; 2485e496132eSMel Gorman 2486e496132eSMel Gorman /* Set span based on the first NUMA domain. */ 24877f434dffSK Prateek Nayak top_p = sd->parent; 2488e496132eSMel Gorman while (top_p && !(top_p->flags & SD_NUMA)) { 24897f434dffSK Prateek Nayak top_p = top_p->parent; 2490e496132eSMel Gorman } 2491e496132eSMel Gorman imb_span = top_p ? top_p->span_weight : sd->span_weight; 2492e496132eSMel Gorman } else { 2493e496132eSMel Gorman int factor = max(1U, (sd->span_weight / imb_span)); 2494e496132eSMel Gorman 2495e496132eSMel Gorman sd->imb_numa_nr = imb * factor; 2496e496132eSMel Gorman } 2497e496132eSMel Gorman } 2498e496132eSMel Gorman } 2499e496132eSMel Gorman 2500f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 2501f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 2502f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 2503f2cb1360SIngo Molnar continue; 2504f2cb1360SIngo Molnar 2505f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2506f2cb1360SIngo Molnar claim_allocations(i, sd); 2507f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 2508f2cb1360SIngo Molnar } 2509f2cb1360SIngo Molnar } 2510f2cb1360SIngo Molnar 2511f2cb1360SIngo Molnar /* Attach the domains */ 2512f2cb1360SIngo Molnar rcu_read_lock(); 2513f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2514f2cb1360SIngo Molnar rq = cpu_rq(i); 2515f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 2516f2cb1360SIngo Molnar 2517f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 25188881e163SBarry Song 25198881e163SBarry Song if (lowest_flag_domain(i, SD_CLUSTER)) 25208881e163SBarry Song has_cluster = true; 2521f2cb1360SIngo Molnar } 2522f2cb1360SIngo Molnar rcu_read_unlock(); 2523f2cb1360SIngo Molnar 2524df054e84SMorten Rasmussen if (has_asym) 2525e284df70SValentin Schneider static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2526df054e84SMorten Rasmussen 25278881e163SBarry Song if (has_cluster) 25288881e163SBarry Song static_branch_inc_cpuslocked(&sched_cluster_active); 25298881e163SBarry Song 2530fa427e8eSQais Yousef if (rq && sched_debug_verbose) 2531fa427e8eSQais Yousef pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map)); 2532f2cb1360SIngo Molnar 2533f2cb1360SIngo Molnar ret = 0; 2534f2cb1360SIngo Molnar error: 2535f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 253697fb7a0aSIngo Molnar 2537f2cb1360SIngo Molnar return ret; 2538f2cb1360SIngo Molnar } 2539f2cb1360SIngo Molnar 2540f2cb1360SIngo Molnar /* Current sched domains: */ 2541f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 2542f2cb1360SIngo Molnar 2543f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 2544f2cb1360SIngo Molnar static int ndoms_cur; 2545f2cb1360SIngo Molnar 25463b03706fSIngo Molnar /* Attributes of custom domains in 'doms_cur' */ 2547f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 2548f2cb1360SIngo Molnar 2549f2cb1360SIngo Molnar /* 2550f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 2551f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 2552f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 2553f2cb1360SIngo Molnar */ 25548d5dc512SPeter Zijlstra static cpumask_var_t fallback_doms; 2555f2cb1360SIngo Molnar 2556f2cb1360SIngo Molnar /* 2557f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 2558f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 2559f2cb1360SIngo Molnar * or 0 if it stayed the same. 2560f2cb1360SIngo Molnar */ 2561f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 2562f2cb1360SIngo Molnar { 2563f2cb1360SIngo Molnar return 0; 2564f2cb1360SIngo Molnar } 2565f2cb1360SIngo Molnar 2566f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2567f2cb1360SIngo Molnar { 2568f2cb1360SIngo Molnar int i; 2569f2cb1360SIngo Molnar cpumask_var_t *doms; 2570f2cb1360SIngo Molnar 25716da2ec56SKees Cook doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 2572f2cb1360SIngo Molnar if (!doms) 2573f2cb1360SIngo Molnar return NULL; 2574f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 2575f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2576f2cb1360SIngo Molnar free_sched_domains(doms, i); 2577f2cb1360SIngo Molnar return NULL; 2578f2cb1360SIngo Molnar } 2579f2cb1360SIngo Molnar } 2580f2cb1360SIngo Molnar return doms; 2581f2cb1360SIngo Molnar } 2582f2cb1360SIngo Molnar 2583f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2584f2cb1360SIngo Molnar { 2585f2cb1360SIngo Molnar unsigned int i; 2586f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 2587f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 2588f2cb1360SIngo Molnar kfree(doms); 2589f2cb1360SIngo Molnar } 2590f2cb1360SIngo Molnar 2591f2cb1360SIngo Molnar /* 2592cb0c0414SJuri Lelli * Set up scheduler domains and groups. For now this just excludes isolated 2593cb0c0414SJuri Lelli * CPUs, but could be used to exclude other special cases in the future. 2594f2cb1360SIngo Molnar */ 2595ef90cf22SBing Huang int __init sched_init_domains(const struct cpumask *cpu_map) 2596f2cb1360SIngo Molnar { 2597f2cb1360SIngo Molnar int err; 2598f2cb1360SIngo Molnar 25998d5dc512SPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 26001676330eSPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 26018d5dc512SPeter Zijlstra zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 26028d5dc512SPeter Zijlstra 2603f2cb1360SIngo Molnar arch_update_cpu_topology(); 2604c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2605f2cb1360SIngo Molnar ndoms_cur = 1; 2606f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 2607f2cb1360SIngo Molnar if (!doms_cur) 2608f2cb1360SIngo Molnar doms_cur = &fallback_doms; 260904d4e665SFrederic Weisbecker cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2610f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 2611f2cb1360SIngo Molnar 2612f2cb1360SIngo Molnar return err; 2613f2cb1360SIngo Molnar } 2614f2cb1360SIngo Molnar 2615f2cb1360SIngo Molnar /* 2616f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 2617f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 2618f2cb1360SIngo Molnar */ 2619f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 2620f2cb1360SIngo Molnar { 2621e284df70SValentin Schneider unsigned int cpu = cpumask_any(cpu_map); 2622f2cb1360SIngo Molnar int i; 2623f2cb1360SIngo Molnar 2624e284df70SValentin Schneider if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2625e284df70SValentin Schneider static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2626e284df70SValentin Schneider 26278881e163SBarry Song if (static_branch_unlikely(&sched_cluster_active)) 26288881e163SBarry Song static_branch_dec_cpuslocked(&sched_cluster_active); 26298881e163SBarry Song 2630f2cb1360SIngo Molnar rcu_read_lock(); 2631f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 2632f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 2633f2cb1360SIngo Molnar rcu_read_unlock(); 2634f2cb1360SIngo Molnar } 2635f2cb1360SIngo Molnar 2636f2cb1360SIngo Molnar /* handle null as "default" */ 2637f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2638f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 2639f2cb1360SIngo Molnar { 2640f2cb1360SIngo Molnar struct sched_domain_attr tmp; 2641f2cb1360SIngo Molnar 2642f2cb1360SIngo Molnar /* Fast path: */ 2643f2cb1360SIngo Molnar if (!new && !cur) 2644f2cb1360SIngo Molnar return 1; 2645f2cb1360SIngo Molnar 2646f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 264797fb7a0aSIngo Molnar 2648f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 2649f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 2650f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 2651f2cb1360SIngo Molnar } 2652f2cb1360SIngo Molnar 2653f2cb1360SIngo Molnar /* 2654f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 2655f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 2656f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 2657f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 2658f2cb1360SIngo Molnar * 2659f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2660f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 2661f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 2662f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 2663f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 2664f2cb1360SIngo Molnar * it as it is. 2665f2cb1360SIngo Molnar * 2666f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 2667f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 2668f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 2669f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2670f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 2671f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 2672f2cb1360SIngo Molnar * 2673f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 2674f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 2675f2cb1360SIngo Molnar * and it will not create the default domain. 2676f2cb1360SIngo Molnar * 2677c22645f4SMathieu Poirier * Call with hotplug lock and sched_domains_mutex held 2678f2cb1360SIngo Molnar */ 2679c22645f4SMathieu Poirier void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2680f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 2681f2cb1360SIngo Molnar { 26821f74de87SQuentin Perret bool __maybe_unused has_eas = false; 2683f2cb1360SIngo Molnar int i, j, n; 2684f2cb1360SIngo Molnar int new_topology; 2685f2cb1360SIngo Molnar 2686c22645f4SMathieu Poirier lockdep_assert_held(&sched_domains_mutex); 2687f2cb1360SIngo Molnar 2688f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 2689f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 2690c744dc4aSBeata Michalska /* Trigger rebuilding CPU capacity asymmetry data */ 2691c744dc4aSBeata Michalska if (new_topology) 2692c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2693f2cb1360SIngo Molnar 269409e0dd8eSPeter Zijlstra if (!doms_new) { 269509e0dd8eSPeter Zijlstra WARN_ON_ONCE(dattr_new); 269609e0dd8eSPeter Zijlstra n = 0; 269709e0dd8eSPeter Zijlstra doms_new = alloc_sched_domains(1); 269809e0dd8eSPeter Zijlstra if (doms_new) { 269909e0dd8eSPeter Zijlstra n = 1; 2700edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 270104d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 270209e0dd8eSPeter Zijlstra } 270309e0dd8eSPeter Zijlstra } else { 270409e0dd8eSPeter Zijlstra n = ndoms_new; 270509e0dd8eSPeter Zijlstra } 2706f2cb1360SIngo Molnar 2707f2cb1360SIngo Molnar /* Destroy deleted domains: */ 2708f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 2709f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 27106aa140faSQuentin Perret if (cpumask_equal(doms_cur[i], doms_new[j]) && 2711f9a25f77SMathieu Poirier dattrs_equal(dattr_cur, i, dattr_new, j)) { 2712f9a25f77SMathieu Poirier struct root_domain *rd; 2713f9a25f77SMathieu Poirier 2714f9a25f77SMathieu Poirier /* 2715f9a25f77SMathieu Poirier * This domain won't be destroyed and as such 2716f9a25f77SMathieu Poirier * its dl_bw->total_bw needs to be cleared. It 2717f9a25f77SMathieu Poirier * will be recomputed in function 2718f9a25f77SMathieu Poirier * update_tasks_root_domain(). 2719f9a25f77SMathieu Poirier */ 2720f9a25f77SMathieu Poirier rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; 2721f9a25f77SMathieu Poirier dl_clear_root_domain(rd); 2722f2cb1360SIngo Molnar goto match1; 2723f2cb1360SIngo Molnar } 2724f9a25f77SMathieu Poirier } 2725f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 2726f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 2727f2cb1360SIngo Molnar match1: 2728f2cb1360SIngo Molnar ; 2729f2cb1360SIngo Molnar } 2730f2cb1360SIngo Molnar 2731f2cb1360SIngo Molnar n = ndoms_cur; 273209e0dd8eSPeter Zijlstra if (!doms_new) { 2733f2cb1360SIngo Molnar n = 0; 2734f2cb1360SIngo Molnar doms_new = &fallback_doms; 2735edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 273604d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 2737f2cb1360SIngo Molnar } 2738f2cb1360SIngo Molnar 2739f2cb1360SIngo Molnar /* Build new domains: */ 2740f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 2741f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 27426aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 27436aa140faSQuentin Perret dattrs_equal(dattr_new, i, dattr_cur, j)) 2744f2cb1360SIngo Molnar goto match2; 2745f2cb1360SIngo Molnar } 2746f2cb1360SIngo Molnar /* No match - add a new doms_new */ 2747f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2748f2cb1360SIngo Molnar match2: 2749f2cb1360SIngo Molnar ; 2750f2cb1360SIngo Molnar } 2751f2cb1360SIngo Molnar 2752531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2753402de7fcSIngo Molnar /* Build perf domains: */ 27546aa140faSQuentin Perret for (i = 0; i < ndoms_new; i++) { 2755531b5c9fSQuentin Perret for (j = 0; j < n && !sched_energy_update; j++) { 27566aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 27571f74de87SQuentin Perret cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 27581f74de87SQuentin Perret has_eas = true; 27596aa140faSQuentin Perret goto match3; 27606aa140faSQuentin Perret } 27611f74de87SQuentin Perret } 2762402de7fcSIngo Molnar /* No match - add perf domains for a new rd */ 27631f74de87SQuentin Perret has_eas |= build_perf_domains(doms_new[i]); 27646aa140faSQuentin Perret match3: 27656aa140faSQuentin Perret ; 27666aa140faSQuentin Perret } 27671f74de87SQuentin Perret sched_energy_set(has_eas); 27686aa140faSQuentin Perret #endif 27696aa140faSQuentin Perret 2770f2cb1360SIngo Molnar /* Remember the new sched domains: */ 2771f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 2772f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 2773f2cb1360SIngo Molnar 2774f2cb1360SIngo Molnar kfree(dattr_cur); 2775f2cb1360SIngo Molnar doms_cur = doms_new; 2776f2cb1360SIngo Molnar dattr_cur = dattr_new; 2777f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 2778f2cb1360SIngo Molnar 27793b87f136SPeter Zijlstra update_sched_domain_debugfs(); 2780c22645f4SMathieu Poirier } 2781f2cb1360SIngo Molnar 2782c22645f4SMathieu Poirier /* 2783c22645f4SMathieu Poirier * Call with hotplug lock held 2784c22645f4SMathieu Poirier */ 2785c22645f4SMathieu Poirier void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2786c22645f4SMathieu Poirier struct sched_domain_attr *dattr_new) 2787c22645f4SMathieu Poirier { 2788c22645f4SMathieu Poirier mutex_lock(&sched_domains_mutex); 2789c22645f4SMathieu Poirier partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2790f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 2791f2cb1360SIngo Molnar } 2792