xref: /linux/kernel/sched/topology.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Scheduler topology setup/handling methods
4  */
5 #include <linux/sched.h>
6 #include <linux/mutex.h>
7 
8 #include "sched.h"
9 
10 DEFINE_MUTEX(sched_domains_mutex);
11 
12 /* Protected by sched_domains_mutex: */
13 cpumask_var_t sched_domains_tmpmask;
14 cpumask_var_t sched_domains_tmpmask2;
15 
16 #ifdef CONFIG_SCHED_DEBUG
17 
18 static int __init sched_debug_setup(char *str)
19 {
20 	sched_debug_enabled = true;
21 
22 	return 0;
23 }
24 early_param("sched_debug", sched_debug_setup);
25 
26 static inline bool sched_debug(void)
27 {
28 	return sched_debug_enabled;
29 }
30 
31 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
32 				  struct cpumask *groupmask)
33 {
34 	struct sched_group *group = sd->groups;
35 
36 	cpumask_clear(groupmask);
37 
38 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
39 
40 	if (!(sd->flags & SD_LOAD_BALANCE)) {
41 		printk("does not load-balance\n");
42 		if (sd->parent)
43 			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
44 					" has parent");
45 		return -1;
46 	}
47 
48 	printk(KERN_CONT "span=%*pbl level=%s\n",
49 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
50 
51 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
52 		printk(KERN_ERR "ERROR: domain->span does not contain "
53 				"CPU%d\n", cpu);
54 	}
55 	if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
56 		printk(KERN_ERR "ERROR: domain->groups does not contain"
57 				" CPU%d\n", cpu);
58 	}
59 
60 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
61 	do {
62 		if (!group) {
63 			printk("\n");
64 			printk(KERN_ERR "ERROR: group is NULL\n");
65 			break;
66 		}
67 
68 		if (!cpumask_weight(sched_group_span(group))) {
69 			printk(KERN_CONT "\n");
70 			printk(KERN_ERR "ERROR: empty group\n");
71 			break;
72 		}
73 
74 		if (!(sd->flags & SD_OVERLAP) &&
75 		    cpumask_intersects(groupmask, sched_group_span(group))) {
76 			printk(KERN_CONT "\n");
77 			printk(KERN_ERR "ERROR: repeated CPUs\n");
78 			break;
79 		}
80 
81 		cpumask_or(groupmask, groupmask, sched_group_span(group));
82 
83 		printk(KERN_CONT " %d:{ span=%*pbl",
84 				group->sgc->id,
85 				cpumask_pr_args(sched_group_span(group)));
86 
87 		if ((sd->flags & SD_OVERLAP) &&
88 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
89 			printk(KERN_CONT " mask=%*pbl",
90 				cpumask_pr_args(group_balance_mask(group)));
91 		}
92 
93 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
94 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
95 
96 		if (group == sd->groups && sd->child &&
97 		    !cpumask_equal(sched_domain_span(sd->child),
98 				   sched_group_span(group))) {
99 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
100 		}
101 
102 		printk(KERN_CONT " }");
103 
104 		group = group->next;
105 
106 		if (group != sd->groups)
107 			printk(KERN_CONT ",");
108 
109 	} while (group != sd->groups);
110 	printk(KERN_CONT "\n");
111 
112 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
113 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
114 
115 	if (sd->parent &&
116 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
117 		printk(KERN_ERR "ERROR: parent span is not a superset "
118 			"of domain->span\n");
119 	return 0;
120 }
121 
122 static void sched_domain_debug(struct sched_domain *sd, int cpu)
123 {
124 	int level = 0;
125 
126 	if (!sched_debug_enabled)
127 		return;
128 
129 	if (!sd) {
130 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
131 		return;
132 	}
133 
134 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
135 
136 	for (;;) {
137 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
138 			break;
139 		level++;
140 		sd = sd->parent;
141 		if (!sd)
142 			break;
143 	}
144 }
145 #else /* !CONFIG_SCHED_DEBUG */
146 
147 # define sched_debug_enabled 0
148 # define sched_domain_debug(sd, cpu) do { } while (0)
149 static inline bool sched_debug(void)
150 {
151 	return false;
152 }
153 #endif /* CONFIG_SCHED_DEBUG */
154 
155 static int sd_degenerate(struct sched_domain *sd)
156 {
157 	if (cpumask_weight(sched_domain_span(sd)) == 1)
158 		return 1;
159 
160 	/* Following flags need at least 2 groups */
161 	if (sd->flags & (SD_LOAD_BALANCE |
162 			 SD_BALANCE_NEWIDLE |
163 			 SD_BALANCE_FORK |
164 			 SD_BALANCE_EXEC |
165 			 SD_SHARE_CPUCAPACITY |
166 			 SD_ASYM_CPUCAPACITY |
167 			 SD_SHARE_PKG_RESOURCES |
168 			 SD_SHARE_POWERDOMAIN)) {
169 		if (sd->groups != sd->groups->next)
170 			return 0;
171 	}
172 
173 	/* Following flags don't use groups */
174 	if (sd->flags & (SD_WAKE_AFFINE))
175 		return 0;
176 
177 	return 1;
178 }
179 
180 static int
181 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
182 {
183 	unsigned long cflags = sd->flags, pflags = parent->flags;
184 
185 	if (sd_degenerate(parent))
186 		return 1;
187 
188 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
189 		return 0;
190 
191 	/* Flags needing groups don't count if only 1 group in parent */
192 	if (parent->groups == parent->groups->next) {
193 		pflags &= ~(SD_LOAD_BALANCE |
194 				SD_BALANCE_NEWIDLE |
195 				SD_BALANCE_FORK |
196 				SD_BALANCE_EXEC |
197 				SD_ASYM_CPUCAPACITY |
198 				SD_SHARE_CPUCAPACITY |
199 				SD_SHARE_PKG_RESOURCES |
200 				SD_PREFER_SIBLING |
201 				SD_SHARE_POWERDOMAIN);
202 		if (nr_node_ids == 1)
203 			pflags &= ~SD_SERIALIZE;
204 	}
205 	if (~cflags & pflags)
206 		return 0;
207 
208 	return 1;
209 }
210 
211 static void free_rootdomain(struct rcu_head *rcu)
212 {
213 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
214 
215 	cpupri_cleanup(&rd->cpupri);
216 	cpudl_cleanup(&rd->cpudl);
217 	free_cpumask_var(rd->dlo_mask);
218 	free_cpumask_var(rd->rto_mask);
219 	free_cpumask_var(rd->online);
220 	free_cpumask_var(rd->span);
221 	kfree(rd);
222 }
223 
224 void rq_attach_root(struct rq *rq, struct root_domain *rd)
225 {
226 	struct root_domain *old_rd = NULL;
227 	unsigned long flags;
228 
229 	raw_spin_lock_irqsave(&rq->lock, flags);
230 
231 	if (rq->rd) {
232 		old_rd = rq->rd;
233 
234 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
235 			set_rq_offline(rq);
236 
237 		cpumask_clear_cpu(rq->cpu, old_rd->span);
238 
239 		/*
240 		 * If we dont want to free the old_rd yet then
241 		 * set old_rd to NULL to skip the freeing later
242 		 * in this function:
243 		 */
244 		if (!atomic_dec_and_test(&old_rd->refcount))
245 			old_rd = NULL;
246 	}
247 
248 	atomic_inc(&rd->refcount);
249 	rq->rd = rd;
250 
251 	cpumask_set_cpu(rq->cpu, rd->span);
252 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
253 		set_rq_online(rq);
254 
255 	raw_spin_unlock_irqrestore(&rq->lock, flags);
256 
257 	if (old_rd)
258 		call_rcu_sched(&old_rd->rcu, free_rootdomain);
259 }
260 
261 static int init_rootdomain(struct root_domain *rd)
262 {
263 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
264 		goto out;
265 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
266 		goto free_span;
267 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
268 		goto free_online;
269 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
270 		goto free_dlo_mask;
271 
272 	init_dl_bw(&rd->dl_bw);
273 	if (cpudl_init(&rd->cpudl) != 0)
274 		goto free_rto_mask;
275 
276 	if (cpupri_init(&rd->cpupri) != 0)
277 		goto free_cpudl;
278 	return 0;
279 
280 free_cpudl:
281 	cpudl_cleanup(&rd->cpudl);
282 free_rto_mask:
283 	free_cpumask_var(rd->rto_mask);
284 free_dlo_mask:
285 	free_cpumask_var(rd->dlo_mask);
286 free_online:
287 	free_cpumask_var(rd->online);
288 free_span:
289 	free_cpumask_var(rd->span);
290 out:
291 	return -ENOMEM;
292 }
293 
294 /*
295  * By default the system creates a single root-domain with all CPUs as
296  * members (mimicking the global state we have today).
297  */
298 struct root_domain def_root_domain;
299 
300 void init_defrootdomain(void)
301 {
302 	init_rootdomain(&def_root_domain);
303 
304 	atomic_set(&def_root_domain.refcount, 1);
305 }
306 
307 static struct root_domain *alloc_rootdomain(void)
308 {
309 	struct root_domain *rd;
310 
311 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
312 	if (!rd)
313 		return NULL;
314 
315 	if (init_rootdomain(rd) != 0) {
316 		kfree(rd);
317 		return NULL;
318 	}
319 
320 	return rd;
321 }
322 
323 static void free_sched_groups(struct sched_group *sg, int free_sgc)
324 {
325 	struct sched_group *tmp, *first;
326 
327 	if (!sg)
328 		return;
329 
330 	first = sg;
331 	do {
332 		tmp = sg->next;
333 
334 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
335 			kfree(sg->sgc);
336 
337 		if (atomic_dec_and_test(&sg->ref))
338 			kfree(sg);
339 		sg = tmp;
340 	} while (sg != first);
341 }
342 
343 static void destroy_sched_domain(struct sched_domain *sd)
344 {
345 	/*
346 	 * A normal sched domain may have multiple group references, an
347 	 * overlapping domain, having private groups, only one.  Iterate,
348 	 * dropping group/capacity references, freeing where none remain.
349 	 */
350 	free_sched_groups(sd->groups, 1);
351 
352 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
353 		kfree(sd->shared);
354 	kfree(sd);
355 }
356 
357 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
358 {
359 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
360 
361 	while (sd) {
362 		struct sched_domain *parent = sd->parent;
363 		destroy_sched_domain(sd);
364 		sd = parent;
365 	}
366 }
367 
368 static void destroy_sched_domains(struct sched_domain *sd)
369 {
370 	if (sd)
371 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
372 }
373 
374 /*
375  * Keep a special pointer to the highest sched_domain that has
376  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
377  * allows us to avoid some pointer chasing select_idle_sibling().
378  *
379  * Also keep a unique ID per domain (we use the first CPU number in
380  * the cpumask of the domain), this allows us to quickly tell if
381  * two CPUs are in the same cache domain, see cpus_share_cache().
382  */
383 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
384 DEFINE_PER_CPU(int, sd_llc_size);
385 DEFINE_PER_CPU(int, sd_llc_id);
386 DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
387 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
388 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
389 
390 static void update_top_cache_domain(int cpu)
391 {
392 	struct sched_domain_shared *sds = NULL;
393 	struct sched_domain *sd;
394 	int id = cpu;
395 	int size = 1;
396 
397 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
398 	if (sd) {
399 		id = cpumask_first(sched_domain_span(sd));
400 		size = cpumask_weight(sched_domain_span(sd));
401 		sds = sd->shared;
402 	}
403 
404 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
405 	per_cpu(sd_llc_size, cpu) = size;
406 	per_cpu(sd_llc_id, cpu) = id;
407 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
408 
409 	sd = lowest_flag_domain(cpu, SD_NUMA);
410 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
411 
412 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
413 	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
414 }
415 
416 /*
417  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
418  * hold the hotplug lock.
419  */
420 static void
421 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
422 {
423 	struct rq *rq = cpu_rq(cpu);
424 	struct sched_domain *tmp;
425 
426 	/* Remove the sched domains which do not contribute to scheduling. */
427 	for (tmp = sd; tmp; ) {
428 		struct sched_domain *parent = tmp->parent;
429 		if (!parent)
430 			break;
431 
432 		if (sd_parent_degenerate(tmp, parent)) {
433 			tmp->parent = parent->parent;
434 			if (parent->parent)
435 				parent->parent->child = tmp;
436 			/*
437 			 * Transfer SD_PREFER_SIBLING down in case of a
438 			 * degenerate parent; the spans match for this
439 			 * so the property transfers.
440 			 */
441 			if (parent->flags & SD_PREFER_SIBLING)
442 				tmp->flags |= SD_PREFER_SIBLING;
443 			destroy_sched_domain(parent);
444 		} else
445 			tmp = tmp->parent;
446 	}
447 
448 	if (sd && sd_degenerate(sd)) {
449 		tmp = sd;
450 		sd = sd->parent;
451 		destroy_sched_domain(tmp);
452 		if (sd)
453 			sd->child = NULL;
454 	}
455 
456 	sched_domain_debug(sd, cpu);
457 
458 	rq_attach_root(rq, rd);
459 	tmp = rq->sd;
460 	rcu_assign_pointer(rq->sd, sd);
461 	dirty_sched_domain_sysctl(cpu);
462 	destroy_sched_domains(tmp);
463 
464 	update_top_cache_domain(cpu);
465 }
466 
467 /* Setup the mask of CPUs configured for isolated domains */
468 static int __init isolated_cpu_setup(char *str)
469 {
470 	int ret;
471 
472 	alloc_bootmem_cpumask_var(&cpu_isolated_map);
473 	ret = cpulist_parse(str, cpu_isolated_map);
474 	if (ret) {
475 		pr_err("sched: Error, all isolcpus= values must be between 0 and %u\n", nr_cpu_ids);
476 		return 0;
477 	}
478 	return 1;
479 }
480 __setup("isolcpus=", isolated_cpu_setup);
481 
482 struct s_data {
483 	struct sched_domain ** __percpu sd;
484 	struct root_domain	*rd;
485 };
486 
487 enum s_alloc {
488 	sa_rootdomain,
489 	sa_sd,
490 	sa_sd_storage,
491 	sa_none,
492 };
493 
494 /*
495  * Return the canonical balance CPU for this group, this is the first CPU
496  * of this group that's also in the balance mask.
497  *
498  * The balance mask are all those CPUs that could actually end up at this
499  * group. See build_balance_mask().
500  *
501  * Also see should_we_balance().
502  */
503 int group_balance_cpu(struct sched_group *sg)
504 {
505 	return cpumask_first(group_balance_mask(sg));
506 }
507 
508 
509 /*
510  * NUMA topology (first read the regular topology blurb below)
511  *
512  * Given a node-distance table, for example:
513  *
514  *   node   0   1   2   3
515  *     0:  10  20  30  20
516  *     1:  20  10  20  30
517  *     2:  30  20  10  20
518  *     3:  20  30  20  10
519  *
520  * which represents a 4 node ring topology like:
521  *
522  *   0 ----- 1
523  *   |       |
524  *   |       |
525  *   |       |
526  *   3 ----- 2
527  *
528  * We want to construct domains and groups to represent this. The way we go
529  * about doing this is to build the domains on 'hops'. For each NUMA level we
530  * construct the mask of all nodes reachable in @level hops.
531  *
532  * For the above NUMA topology that gives 3 levels:
533  *
534  * NUMA-2	0-3		0-3		0-3		0-3
535  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
536  *
537  * NUMA-1	0-1,3		0-2		1-3		0,2-3
538  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
539  *
540  * NUMA-0	0		1		2		3
541  *
542  *
543  * As can be seen; things don't nicely line up as with the regular topology.
544  * When we iterate a domain in child domain chunks some nodes can be
545  * represented multiple times -- hence the "overlap" naming for this part of
546  * the topology.
547  *
548  * In order to minimize this overlap, we only build enough groups to cover the
549  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
550  *
551  * Because:
552  *
553  *  - the first group of each domain is its child domain; this
554  *    gets us the first 0-1,3
555  *  - the only uncovered node is 2, who's child domain is 1-3.
556  *
557  * However, because of the overlap, computing a unique CPU for each group is
558  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
559  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
560  * end up at those groups (they would end up in group: 0-1,3).
561  *
562  * To correct this we have to introduce the group balance mask. This mask
563  * will contain those CPUs in the group that can reach this group given the
564  * (child) domain tree.
565  *
566  * With this we can once again compute balance_cpu and sched_group_capacity
567  * relations.
568  *
569  * XXX include words on how balance_cpu is unique and therefore can be
570  * used for sched_group_capacity links.
571  *
572  *
573  * Another 'interesting' topology is:
574  *
575  *   node   0   1   2   3
576  *     0:  10  20  20  30
577  *     1:  20  10  20  20
578  *     2:  20  20  10  20
579  *     3:  30  20  20  10
580  *
581  * Which looks a little like:
582  *
583  *   0 ----- 1
584  *   |     / |
585  *   |   /   |
586  *   | /     |
587  *   2 ----- 3
588  *
589  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
590  * are not.
591  *
592  * This leads to a few particularly weird cases where the sched_domain's are
593  * not of the same number for each cpu. Consider:
594  *
595  * NUMA-2	0-3						0-3
596  *  groups:	{0-2},{1-3}					{1-3},{0-2}
597  *
598  * NUMA-1	0-2		0-3		0-3		1-3
599  *
600  * NUMA-0	0		1		2		3
601  *
602  */
603 
604 
605 /*
606  * Build the balance mask; it contains only those CPUs that can arrive at this
607  * group and should be considered to continue balancing.
608  *
609  * We do this during the group creation pass, therefore the group information
610  * isn't complete yet, however since each group represents a (child) domain we
611  * can fully construct this using the sched_domain bits (which are already
612  * complete).
613  */
614 static void
615 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
616 {
617 	const struct cpumask *sg_span = sched_group_span(sg);
618 	struct sd_data *sdd = sd->private;
619 	struct sched_domain *sibling;
620 	int i;
621 
622 	cpumask_clear(mask);
623 
624 	for_each_cpu(i, sg_span) {
625 		sibling = *per_cpu_ptr(sdd->sd, i);
626 
627 		/*
628 		 * Can happen in the asymmetric case, where these siblings are
629 		 * unused. The mask will not be empty because those CPUs that
630 		 * do have the top domain _should_ span the domain.
631 		 */
632 		if (!sibling->child)
633 			continue;
634 
635 		/* If we would not end up here, we can't continue from here */
636 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
637 			continue;
638 
639 		cpumask_set_cpu(i, mask);
640 	}
641 
642 	/* We must not have empty masks here */
643 	WARN_ON_ONCE(cpumask_empty(mask));
644 }
645 
646 /*
647  * XXX: This creates per-node group entries; since the load-balancer will
648  * immediately access remote memory to construct this group's load-balance
649  * statistics having the groups node local is of dubious benefit.
650  */
651 static struct sched_group *
652 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
653 {
654 	struct sched_group *sg;
655 	struct cpumask *sg_span;
656 
657 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
658 			GFP_KERNEL, cpu_to_node(cpu));
659 
660 	if (!sg)
661 		return NULL;
662 
663 	sg_span = sched_group_span(sg);
664 	if (sd->child)
665 		cpumask_copy(sg_span, sched_domain_span(sd->child));
666 	else
667 		cpumask_copy(sg_span, sched_domain_span(sd));
668 
669 	atomic_inc(&sg->ref);
670 	return sg;
671 }
672 
673 static void init_overlap_sched_group(struct sched_domain *sd,
674 				     struct sched_group *sg)
675 {
676 	struct cpumask *mask = sched_domains_tmpmask2;
677 	struct sd_data *sdd = sd->private;
678 	struct cpumask *sg_span;
679 	int cpu;
680 
681 	build_balance_mask(sd, sg, mask);
682 	cpu = cpumask_first_and(sched_group_span(sg), mask);
683 
684 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
685 	if (atomic_inc_return(&sg->sgc->ref) == 1)
686 		cpumask_copy(group_balance_mask(sg), mask);
687 	else
688 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
689 
690 	/*
691 	 * Initialize sgc->capacity such that even if we mess up the
692 	 * domains and no possible iteration will get us here, we won't
693 	 * die on a /0 trap.
694 	 */
695 	sg_span = sched_group_span(sg);
696 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
697 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
698 }
699 
700 static int
701 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
702 {
703 	struct sched_group *first = NULL, *last = NULL, *sg;
704 	const struct cpumask *span = sched_domain_span(sd);
705 	struct cpumask *covered = sched_domains_tmpmask;
706 	struct sd_data *sdd = sd->private;
707 	struct sched_domain *sibling;
708 	int i;
709 
710 	cpumask_clear(covered);
711 
712 	for_each_cpu_wrap(i, span, cpu) {
713 		struct cpumask *sg_span;
714 
715 		if (cpumask_test_cpu(i, covered))
716 			continue;
717 
718 		sibling = *per_cpu_ptr(sdd->sd, i);
719 
720 		/*
721 		 * Asymmetric node setups can result in situations where the
722 		 * domain tree is of unequal depth, make sure to skip domains
723 		 * that already cover the entire range.
724 		 *
725 		 * In that case build_sched_domains() will have terminated the
726 		 * iteration early and our sibling sd spans will be empty.
727 		 * Domains should always include the CPU they're built on, so
728 		 * check that.
729 		 */
730 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
731 			continue;
732 
733 		sg = build_group_from_child_sched_domain(sibling, cpu);
734 		if (!sg)
735 			goto fail;
736 
737 		sg_span = sched_group_span(sg);
738 		cpumask_or(covered, covered, sg_span);
739 
740 		init_overlap_sched_group(sd, sg);
741 
742 		if (!first)
743 			first = sg;
744 		if (last)
745 			last->next = sg;
746 		last = sg;
747 		last->next = first;
748 	}
749 	sd->groups = first;
750 
751 	return 0;
752 
753 fail:
754 	free_sched_groups(first, 0);
755 
756 	return -ENOMEM;
757 }
758 
759 
760 /*
761  * Package topology (also see the load-balance blurb in fair.c)
762  *
763  * The scheduler builds a tree structure to represent a number of important
764  * topology features. By default (default_topology[]) these include:
765  *
766  *  - Simultaneous multithreading (SMT)
767  *  - Multi-Core Cache (MC)
768  *  - Package (DIE)
769  *
770  * Where the last one more or less denotes everything up to a NUMA node.
771  *
772  * The tree consists of 3 primary data structures:
773  *
774  *	sched_domain -> sched_group -> sched_group_capacity
775  *	    ^ ^             ^ ^
776  *          `-'             `-'
777  *
778  * The sched_domains are per-cpu and have a two way link (parent & child) and
779  * denote the ever growing mask of CPUs belonging to that level of topology.
780  *
781  * Each sched_domain has a circular (double) linked list of sched_group's, each
782  * denoting the domains of the level below (or individual CPUs in case of the
783  * first domain level). The sched_group linked by a sched_domain includes the
784  * CPU of that sched_domain [*].
785  *
786  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
787  *
788  * CPU   0   1   2   3   4   5   6   7
789  *
790  * DIE  [                             ]
791  * MC   [             ] [             ]
792  * SMT  [     ] [     ] [     ] [     ]
793  *
794  *  - or -
795  *
796  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
797  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
798  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
799  *
800  * CPU   0   1   2   3   4   5   6   7
801  *
802  * One way to think about it is: sched_domain moves you up and down among these
803  * topology levels, while sched_group moves you sideways through it, at child
804  * domain granularity.
805  *
806  * sched_group_capacity ensures each unique sched_group has shared storage.
807  *
808  * There are two related construction problems, both require a CPU that
809  * uniquely identify each group (for a given domain):
810  *
811  *  - The first is the balance_cpu (see should_we_balance() and the
812  *    load-balance blub in fair.c); for each group we only want 1 CPU to
813  *    continue balancing at a higher domain.
814  *
815  *  - The second is the sched_group_capacity; we want all identical groups
816  *    to share a single sched_group_capacity.
817  *
818  * Since these topologies are exclusive by construction. That is, its
819  * impossible for an SMT thread to belong to multiple cores, and cores to
820  * be part of multiple caches. There is a very clear and unique location
821  * for each CPU in the hierarchy.
822  *
823  * Therefore computing a unique CPU for each group is trivial (the iteration
824  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
825  * group), we can simply pick the first CPU in each group.
826  *
827  *
828  * [*] in other words, the first group of each domain is its child domain.
829  */
830 
831 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
832 {
833 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
834 	struct sched_domain *child = sd->child;
835 	struct sched_group *sg;
836 
837 	if (child)
838 		cpu = cpumask_first(sched_domain_span(child));
839 
840 	sg = *per_cpu_ptr(sdd->sg, cpu);
841 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
842 
843 	/* For claim_allocations: */
844 	atomic_inc(&sg->ref);
845 	atomic_inc(&sg->sgc->ref);
846 
847 	if (child) {
848 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
849 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
850 	} else {
851 		cpumask_set_cpu(cpu, sched_group_span(sg));
852 		cpumask_set_cpu(cpu, group_balance_mask(sg));
853 	}
854 
855 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
856 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
857 
858 	return sg;
859 }
860 
861 /*
862  * build_sched_groups will build a circular linked list of the groups
863  * covered by the given span, and will set each group's ->cpumask correctly,
864  * and ->cpu_capacity to 0.
865  *
866  * Assumes the sched_domain tree is fully constructed
867  */
868 static int
869 build_sched_groups(struct sched_domain *sd, int cpu)
870 {
871 	struct sched_group *first = NULL, *last = NULL;
872 	struct sd_data *sdd = sd->private;
873 	const struct cpumask *span = sched_domain_span(sd);
874 	struct cpumask *covered;
875 	int i;
876 
877 	lockdep_assert_held(&sched_domains_mutex);
878 	covered = sched_domains_tmpmask;
879 
880 	cpumask_clear(covered);
881 
882 	for_each_cpu_wrap(i, span, cpu) {
883 		struct sched_group *sg;
884 
885 		if (cpumask_test_cpu(i, covered))
886 			continue;
887 
888 		sg = get_group(i, sdd);
889 
890 		cpumask_or(covered, covered, sched_group_span(sg));
891 
892 		if (!first)
893 			first = sg;
894 		if (last)
895 			last->next = sg;
896 		last = sg;
897 	}
898 	last->next = first;
899 	sd->groups = first;
900 
901 	return 0;
902 }
903 
904 /*
905  * Initialize sched groups cpu_capacity.
906  *
907  * cpu_capacity indicates the capacity of sched group, which is used while
908  * distributing the load between different sched groups in a sched domain.
909  * Typically cpu_capacity for all the groups in a sched domain will be same
910  * unless there are asymmetries in the topology. If there are asymmetries,
911  * group having more cpu_capacity will pickup more load compared to the
912  * group having less cpu_capacity.
913  */
914 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
915 {
916 	struct sched_group *sg = sd->groups;
917 
918 	WARN_ON(!sg);
919 
920 	do {
921 		int cpu, max_cpu = -1;
922 
923 		sg->group_weight = cpumask_weight(sched_group_span(sg));
924 
925 		if (!(sd->flags & SD_ASYM_PACKING))
926 			goto next;
927 
928 		for_each_cpu(cpu, sched_group_span(sg)) {
929 			if (max_cpu < 0)
930 				max_cpu = cpu;
931 			else if (sched_asym_prefer(cpu, max_cpu))
932 				max_cpu = cpu;
933 		}
934 		sg->asym_prefer_cpu = max_cpu;
935 
936 next:
937 		sg = sg->next;
938 	} while (sg != sd->groups);
939 
940 	if (cpu != group_balance_cpu(sg))
941 		return;
942 
943 	update_group_capacity(sd, cpu);
944 }
945 
946 /*
947  * Initializers for schedule domains
948  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
949  */
950 
951 static int default_relax_domain_level = -1;
952 int sched_domain_level_max;
953 
954 static int __init setup_relax_domain_level(char *str)
955 {
956 	if (kstrtoint(str, 0, &default_relax_domain_level))
957 		pr_warn("Unable to set relax_domain_level\n");
958 
959 	return 1;
960 }
961 __setup("relax_domain_level=", setup_relax_domain_level);
962 
963 static void set_domain_attribute(struct sched_domain *sd,
964 				 struct sched_domain_attr *attr)
965 {
966 	int request;
967 
968 	if (!attr || attr->relax_domain_level < 0) {
969 		if (default_relax_domain_level < 0)
970 			return;
971 		else
972 			request = default_relax_domain_level;
973 	} else
974 		request = attr->relax_domain_level;
975 	if (request < sd->level) {
976 		/* Turn off idle balance on this domain: */
977 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
978 	} else {
979 		/* Turn on idle balance on this domain: */
980 		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
981 	}
982 }
983 
984 static void __sdt_free(const struct cpumask *cpu_map);
985 static int __sdt_alloc(const struct cpumask *cpu_map);
986 
987 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
988 				 const struct cpumask *cpu_map)
989 {
990 	switch (what) {
991 	case sa_rootdomain:
992 		if (!atomic_read(&d->rd->refcount))
993 			free_rootdomain(&d->rd->rcu);
994 		/* Fall through */
995 	case sa_sd:
996 		free_percpu(d->sd);
997 		/* Fall through */
998 	case sa_sd_storage:
999 		__sdt_free(cpu_map);
1000 		/* Fall through */
1001 	case sa_none:
1002 		break;
1003 	}
1004 }
1005 
1006 static enum s_alloc
1007 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1008 {
1009 	memset(d, 0, sizeof(*d));
1010 
1011 	if (__sdt_alloc(cpu_map))
1012 		return sa_sd_storage;
1013 	d->sd = alloc_percpu(struct sched_domain *);
1014 	if (!d->sd)
1015 		return sa_sd_storage;
1016 	d->rd = alloc_rootdomain();
1017 	if (!d->rd)
1018 		return sa_sd;
1019 	return sa_rootdomain;
1020 }
1021 
1022 /*
1023  * NULL the sd_data elements we've used to build the sched_domain and
1024  * sched_group structure so that the subsequent __free_domain_allocs()
1025  * will not free the data we're using.
1026  */
1027 static void claim_allocations(int cpu, struct sched_domain *sd)
1028 {
1029 	struct sd_data *sdd = sd->private;
1030 
1031 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1032 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1033 
1034 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1035 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1036 
1037 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1038 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1039 
1040 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1041 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1042 }
1043 
1044 #ifdef CONFIG_NUMA
1045 static int sched_domains_numa_levels;
1046 enum numa_topology_type sched_numa_topology_type;
1047 static int *sched_domains_numa_distance;
1048 int sched_max_numa_distance;
1049 static struct cpumask ***sched_domains_numa_masks;
1050 static int sched_domains_curr_level;
1051 #endif
1052 
1053 /*
1054  * SD_flags allowed in topology descriptions.
1055  *
1056  * These flags are purely descriptive of the topology and do not prescribe
1057  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1058  * function:
1059  *
1060  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1061  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1062  *   SD_NUMA                - describes NUMA topologies
1063  *   SD_SHARE_POWERDOMAIN   - describes shared power domain
1064  *   SD_ASYM_CPUCAPACITY    - describes mixed capacity topologies
1065  *
1066  * Odd one out, which beside describing the topology has a quirk also
1067  * prescribes the desired behaviour that goes along with it:
1068  *
1069  *   SD_ASYM_PACKING        - describes SMT quirks
1070  */
1071 #define TOPOLOGY_SD_FLAGS		\
1072 	(SD_SHARE_CPUCAPACITY |		\
1073 	 SD_SHARE_PKG_RESOURCES |	\
1074 	 SD_NUMA |			\
1075 	 SD_ASYM_PACKING |		\
1076 	 SD_ASYM_CPUCAPACITY |		\
1077 	 SD_SHARE_POWERDOMAIN)
1078 
1079 static struct sched_domain *
1080 sd_init(struct sched_domain_topology_level *tl,
1081 	const struct cpumask *cpu_map,
1082 	struct sched_domain *child, int cpu)
1083 {
1084 	struct sd_data *sdd = &tl->data;
1085 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1086 	int sd_id, sd_weight, sd_flags = 0;
1087 
1088 #ifdef CONFIG_NUMA
1089 	/*
1090 	 * Ugly hack to pass state to sd_numa_mask()...
1091 	 */
1092 	sched_domains_curr_level = tl->numa_level;
1093 #endif
1094 
1095 	sd_weight = cpumask_weight(tl->mask(cpu));
1096 
1097 	if (tl->sd_flags)
1098 		sd_flags = (*tl->sd_flags)();
1099 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1100 			"wrong sd_flags in topology description\n"))
1101 		sd_flags &= ~TOPOLOGY_SD_FLAGS;
1102 
1103 	*sd = (struct sched_domain){
1104 		.min_interval		= sd_weight,
1105 		.max_interval		= 2*sd_weight,
1106 		.busy_factor		= 32,
1107 		.imbalance_pct		= 125,
1108 
1109 		.cache_nice_tries	= 0,
1110 		.busy_idx		= 0,
1111 		.idle_idx		= 0,
1112 		.newidle_idx		= 0,
1113 		.wake_idx		= 0,
1114 		.forkexec_idx		= 0,
1115 
1116 		.flags			= 1*SD_LOAD_BALANCE
1117 					| 1*SD_BALANCE_NEWIDLE
1118 					| 1*SD_BALANCE_EXEC
1119 					| 1*SD_BALANCE_FORK
1120 					| 0*SD_BALANCE_WAKE
1121 					| 1*SD_WAKE_AFFINE
1122 					| 0*SD_SHARE_CPUCAPACITY
1123 					| 0*SD_SHARE_PKG_RESOURCES
1124 					| 0*SD_SERIALIZE
1125 					| 0*SD_PREFER_SIBLING
1126 					| 0*SD_NUMA
1127 					| sd_flags
1128 					,
1129 
1130 		.last_balance		= jiffies,
1131 		.balance_interval	= sd_weight,
1132 		.smt_gain		= 0,
1133 		.max_newidle_lb_cost	= 0,
1134 		.next_decay_max_lb_cost	= jiffies,
1135 		.child			= child,
1136 #ifdef CONFIG_SCHED_DEBUG
1137 		.name			= tl->name,
1138 #endif
1139 	};
1140 
1141 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1142 	sd_id = cpumask_first(sched_domain_span(sd));
1143 
1144 	/*
1145 	 * Convert topological properties into behaviour.
1146 	 */
1147 
1148 	if (sd->flags & SD_ASYM_CPUCAPACITY) {
1149 		struct sched_domain *t = sd;
1150 
1151 		for_each_lower_domain(t)
1152 			t->flags |= SD_BALANCE_WAKE;
1153 	}
1154 
1155 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1156 		sd->flags |= SD_PREFER_SIBLING;
1157 		sd->imbalance_pct = 110;
1158 		sd->smt_gain = 1178; /* ~15% */
1159 
1160 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1161 		sd->imbalance_pct = 117;
1162 		sd->cache_nice_tries = 1;
1163 		sd->busy_idx = 2;
1164 
1165 #ifdef CONFIG_NUMA
1166 	} else if (sd->flags & SD_NUMA) {
1167 		sd->cache_nice_tries = 2;
1168 		sd->busy_idx = 3;
1169 		sd->idle_idx = 2;
1170 
1171 		sd->flags |= SD_SERIALIZE;
1172 		if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
1173 			sd->flags &= ~(SD_BALANCE_EXEC |
1174 				       SD_BALANCE_FORK |
1175 				       SD_WAKE_AFFINE);
1176 		}
1177 
1178 #endif
1179 	} else {
1180 		sd->flags |= SD_PREFER_SIBLING;
1181 		sd->cache_nice_tries = 1;
1182 		sd->busy_idx = 2;
1183 		sd->idle_idx = 1;
1184 	}
1185 
1186 	/*
1187 	 * For all levels sharing cache; connect a sched_domain_shared
1188 	 * instance.
1189 	 */
1190 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1191 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1192 		atomic_inc(&sd->shared->ref);
1193 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1194 	}
1195 
1196 	sd->private = sdd;
1197 
1198 	return sd;
1199 }
1200 
1201 /*
1202  * Topology list, bottom-up.
1203  */
1204 static struct sched_domain_topology_level default_topology[] = {
1205 #ifdef CONFIG_SCHED_SMT
1206 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1207 #endif
1208 #ifdef CONFIG_SCHED_MC
1209 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1210 #endif
1211 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1212 	{ NULL, },
1213 };
1214 
1215 static struct sched_domain_topology_level *sched_domain_topology =
1216 	default_topology;
1217 
1218 #define for_each_sd_topology(tl)			\
1219 	for (tl = sched_domain_topology; tl->mask; tl++)
1220 
1221 void set_sched_topology(struct sched_domain_topology_level *tl)
1222 {
1223 	if (WARN_ON_ONCE(sched_smp_initialized))
1224 		return;
1225 
1226 	sched_domain_topology = tl;
1227 }
1228 
1229 #ifdef CONFIG_NUMA
1230 
1231 static const struct cpumask *sd_numa_mask(int cpu)
1232 {
1233 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1234 }
1235 
1236 static void sched_numa_warn(const char *str)
1237 {
1238 	static int done = false;
1239 	int i,j;
1240 
1241 	if (done)
1242 		return;
1243 
1244 	done = true;
1245 
1246 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1247 
1248 	for (i = 0; i < nr_node_ids; i++) {
1249 		printk(KERN_WARNING "  ");
1250 		for (j = 0; j < nr_node_ids; j++)
1251 			printk(KERN_CONT "%02d ", node_distance(i,j));
1252 		printk(KERN_CONT "\n");
1253 	}
1254 	printk(KERN_WARNING "\n");
1255 }
1256 
1257 bool find_numa_distance(int distance)
1258 {
1259 	int i;
1260 
1261 	if (distance == node_distance(0, 0))
1262 		return true;
1263 
1264 	for (i = 0; i < sched_domains_numa_levels; i++) {
1265 		if (sched_domains_numa_distance[i] == distance)
1266 			return true;
1267 	}
1268 
1269 	return false;
1270 }
1271 
1272 /*
1273  * A system can have three types of NUMA topology:
1274  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1275  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1276  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1277  *
1278  * The difference between a glueless mesh topology and a backplane
1279  * topology lies in whether communication between not directly
1280  * connected nodes goes through intermediary nodes (where programs
1281  * could run), or through backplane controllers. This affects
1282  * placement of programs.
1283  *
1284  * The type of topology can be discerned with the following tests:
1285  * - If the maximum distance between any nodes is 1 hop, the system
1286  *   is directly connected.
1287  * - If for two nodes A and B, located N > 1 hops away from each other,
1288  *   there is an intermediary node C, which is < N hops away from both
1289  *   nodes A and B, the system is a glueless mesh.
1290  */
1291 static void init_numa_topology_type(void)
1292 {
1293 	int a, b, c, n;
1294 
1295 	n = sched_max_numa_distance;
1296 
1297 	if (sched_domains_numa_levels <= 1) {
1298 		sched_numa_topology_type = NUMA_DIRECT;
1299 		return;
1300 	}
1301 
1302 	for_each_online_node(a) {
1303 		for_each_online_node(b) {
1304 			/* Find two nodes furthest removed from each other. */
1305 			if (node_distance(a, b) < n)
1306 				continue;
1307 
1308 			/* Is there an intermediary node between a and b? */
1309 			for_each_online_node(c) {
1310 				if (node_distance(a, c) < n &&
1311 				    node_distance(b, c) < n) {
1312 					sched_numa_topology_type =
1313 							NUMA_GLUELESS_MESH;
1314 					return;
1315 				}
1316 			}
1317 
1318 			sched_numa_topology_type = NUMA_BACKPLANE;
1319 			return;
1320 		}
1321 	}
1322 }
1323 
1324 void sched_init_numa(void)
1325 {
1326 	int next_distance, curr_distance = node_distance(0, 0);
1327 	struct sched_domain_topology_level *tl;
1328 	int level = 0;
1329 	int i, j, k;
1330 
1331 	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
1332 	if (!sched_domains_numa_distance)
1333 		return;
1334 
1335 	/*
1336 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1337 	 * unique distances in the node_distance() table.
1338 	 *
1339 	 * Assumes node_distance(0,j) includes all distances in
1340 	 * node_distance(i,j) in order to avoid cubic time.
1341 	 */
1342 	next_distance = curr_distance;
1343 	for (i = 0; i < nr_node_ids; i++) {
1344 		for (j = 0; j < nr_node_ids; j++) {
1345 			for (k = 0; k < nr_node_ids; k++) {
1346 				int distance = node_distance(i, k);
1347 
1348 				if (distance > curr_distance &&
1349 				    (distance < next_distance ||
1350 				     next_distance == curr_distance))
1351 					next_distance = distance;
1352 
1353 				/*
1354 				 * While not a strong assumption it would be nice to know
1355 				 * about cases where if node A is connected to B, B is not
1356 				 * equally connected to A.
1357 				 */
1358 				if (sched_debug() && node_distance(k, i) != distance)
1359 					sched_numa_warn("Node-distance not symmetric");
1360 
1361 				if (sched_debug() && i && !find_numa_distance(distance))
1362 					sched_numa_warn("Node-0 not representative");
1363 			}
1364 			if (next_distance != curr_distance) {
1365 				sched_domains_numa_distance[level++] = next_distance;
1366 				sched_domains_numa_levels = level;
1367 				curr_distance = next_distance;
1368 			} else break;
1369 		}
1370 
1371 		/*
1372 		 * In case of sched_debug() we verify the above assumption.
1373 		 */
1374 		if (!sched_debug())
1375 			break;
1376 	}
1377 
1378 	if (!level)
1379 		return;
1380 
1381 	/*
1382 	 * 'level' contains the number of unique distances, excluding the
1383 	 * identity distance node_distance(i,i).
1384 	 *
1385 	 * The sched_domains_numa_distance[] array includes the actual distance
1386 	 * numbers.
1387 	 */
1388 
1389 	/*
1390 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1391 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1392 	 * the array will contain less then 'level' members. This could be
1393 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1394 	 * in other functions.
1395 	 *
1396 	 * We reset it to 'level' at the end of this function.
1397 	 */
1398 	sched_domains_numa_levels = 0;
1399 
1400 	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
1401 	if (!sched_domains_numa_masks)
1402 		return;
1403 
1404 	/*
1405 	 * Now for each level, construct a mask per node which contains all
1406 	 * CPUs of nodes that are that many hops away from us.
1407 	 */
1408 	for (i = 0; i < level; i++) {
1409 		sched_domains_numa_masks[i] =
1410 			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1411 		if (!sched_domains_numa_masks[i])
1412 			return;
1413 
1414 		for (j = 0; j < nr_node_ids; j++) {
1415 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1416 			if (!mask)
1417 				return;
1418 
1419 			sched_domains_numa_masks[i][j] = mask;
1420 
1421 			for_each_node(k) {
1422 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1423 					continue;
1424 
1425 				cpumask_or(mask, mask, cpumask_of_node(k));
1426 			}
1427 		}
1428 	}
1429 
1430 	/* Compute default topology size */
1431 	for (i = 0; sched_domain_topology[i].mask; i++);
1432 
1433 	tl = kzalloc((i + level + 1) *
1434 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1435 	if (!tl)
1436 		return;
1437 
1438 	/*
1439 	 * Copy the default topology bits..
1440 	 */
1441 	for (i = 0; sched_domain_topology[i].mask; i++)
1442 		tl[i] = sched_domain_topology[i];
1443 
1444 	/*
1445 	 * .. and append 'j' levels of NUMA goodness.
1446 	 */
1447 	for (j = 0; j < level; i++, j++) {
1448 		tl[i] = (struct sched_domain_topology_level){
1449 			.mask = sd_numa_mask,
1450 			.sd_flags = cpu_numa_flags,
1451 			.flags = SDTL_OVERLAP,
1452 			.numa_level = j,
1453 			SD_INIT_NAME(NUMA)
1454 		};
1455 	}
1456 
1457 	sched_domain_topology = tl;
1458 
1459 	sched_domains_numa_levels = level;
1460 	sched_max_numa_distance = sched_domains_numa_distance[level - 1];
1461 
1462 	init_numa_topology_type();
1463 }
1464 
1465 void sched_domains_numa_masks_set(unsigned int cpu)
1466 {
1467 	int node = cpu_to_node(cpu);
1468 	int i, j;
1469 
1470 	for (i = 0; i < sched_domains_numa_levels; i++) {
1471 		for (j = 0; j < nr_node_ids; j++) {
1472 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
1473 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1474 		}
1475 	}
1476 }
1477 
1478 void sched_domains_numa_masks_clear(unsigned int cpu)
1479 {
1480 	int i, j;
1481 
1482 	for (i = 0; i < sched_domains_numa_levels; i++) {
1483 		for (j = 0; j < nr_node_ids; j++)
1484 			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1485 	}
1486 }
1487 
1488 #endif /* CONFIG_NUMA */
1489 
1490 static int __sdt_alloc(const struct cpumask *cpu_map)
1491 {
1492 	struct sched_domain_topology_level *tl;
1493 	int j;
1494 
1495 	for_each_sd_topology(tl) {
1496 		struct sd_data *sdd = &tl->data;
1497 
1498 		sdd->sd = alloc_percpu(struct sched_domain *);
1499 		if (!sdd->sd)
1500 			return -ENOMEM;
1501 
1502 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
1503 		if (!sdd->sds)
1504 			return -ENOMEM;
1505 
1506 		sdd->sg = alloc_percpu(struct sched_group *);
1507 		if (!sdd->sg)
1508 			return -ENOMEM;
1509 
1510 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1511 		if (!sdd->sgc)
1512 			return -ENOMEM;
1513 
1514 		for_each_cpu(j, cpu_map) {
1515 			struct sched_domain *sd;
1516 			struct sched_domain_shared *sds;
1517 			struct sched_group *sg;
1518 			struct sched_group_capacity *sgc;
1519 
1520 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1521 					GFP_KERNEL, cpu_to_node(j));
1522 			if (!sd)
1523 				return -ENOMEM;
1524 
1525 			*per_cpu_ptr(sdd->sd, j) = sd;
1526 
1527 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
1528 					GFP_KERNEL, cpu_to_node(j));
1529 			if (!sds)
1530 				return -ENOMEM;
1531 
1532 			*per_cpu_ptr(sdd->sds, j) = sds;
1533 
1534 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1535 					GFP_KERNEL, cpu_to_node(j));
1536 			if (!sg)
1537 				return -ENOMEM;
1538 
1539 			sg->next = sg;
1540 
1541 			*per_cpu_ptr(sdd->sg, j) = sg;
1542 
1543 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1544 					GFP_KERNEL, cpu_to_node(j));
1545 			if (!sgc)
1546 				return -ENOMEM;
1547 
1548 #ifdef CONFIG_SCHED_DEBUG
1549 			sgc->id = j;
1550 #endif
1551 
1552 			*per_cpu_ptr(sdd->sgc, j) = sgc;
1553 		}
1554 	}
1555 
1556 	return 0;
1557 }
1558 
1559 static void __sdt_free(const struct cpumask *cpu_map)
1560 {
1561 	struct sched_domain_topology_level *tl;
1562 	int j;
1563 
1564 	for_each_sd_topology(tl) {
1565 		struct sd_data *sdd = &tl->data;
1566 
1567 		for_each_cpu(j, cpu_map) {
1568 			struct sched_domain *sd;
1569 
1570 			if (sdd->sd) {
1571 				sd = *per_cpu_ptr(sdd->sd, j);
1572 				if (sd && (sd->flags & SD_OVERLAP))
1573 					free_sched_groups(sd->groups, 0);
1574 				kfree(*per_cpu_ptr(sdd->sd, j));
1575 			}
1576 
1577 			if (sdd->sds)
1578 				kfree(*per_cpu_ptr(sdd->sds, j));
1579 			if (sdd->sg)
1580 				kfree(*per_cpu_ptr(sdd->sg, j));
1581 			if (sdd->sgc)
1582 				kfree(*per_cpu_ptr(sdd->sgc, j));
1583 		}
1584 		free_percpu(sdd->sd);
1585 		sdd->sd = NULL;
1586 		free_percpu(sdd->sds);
1587 		sdd->sds = NULL;
1588 		free_percpu(sdd->sg);
1589 		sdd->sg = NULL;
1590 		free_percpu(sdd->sgc);
1591 		sdd->sgc = NULL;
1592 	}
1593 }
1594 
1595 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1596 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1597 		struct sched_domain *child, int cpu)
1598 {
1599 	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
1600 
1601 	if (child) {
1602 		sd->level = child->level + 1;
1603 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
1604 		child->parent = sd;
1605 
1606 		if (!cpumask_subset(sched_domain_span(child),
1607 				    sched_domain_span(sd))) {
1608 			pr_err("BUG: arch topology borken\n");
1609 #ifdef CONFIG_SCHED_DEBUG
1610 			pr_err("     the %s domain not a subset of the %s domain\n",
1611 					child->name, sd->name);
1612 #endif
1613 			/* Fixup, ensure @sd has at least @child cpus. */
1614 			cpumask_or(sched_domain_span(sd),
1615 				   sched_domain_span(sd),
1616 				   sched_domain_span(child));
1617 		}
1618 
1619 	}
1620 	set_domain_attribute(sd, attr);
1621 
1622 	return sd;
1623 }
1624 
1625 /*
1626  * Build sched domains for a given set of CPUs and attach the sched domains
1627  * to the individual CPUs
1628  */
1629 static int
1630 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1631 {
1632 	enum s_alloc alloc_state;
1633 	struct sched_domain *sd;
1634 	struct s_data d;
1635 	struct rq *rq = NULL;
1636 	int i, ret = -ENOMEM;
1637 
1638 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1639 	if (alloc_state != sa_rootdomain)
1640 		goto error;
1641 
1642 	/* Set up domains for CPUs specified by the cpu_map: */
1643 	for_each_cpu(i, cpu_map) {
1644 		struct sched_domain_topology_level *tl;
1645 
1646 		sd = NULL;
1647 		for_each_sd_topology(tl) {
1648 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
1649 			if (tl == sched_domain_topology)
1650 				*per_cpu_ptr(d.sd, i) = sd;
1651 			if (tl->flags & SDTL_OVERLAP)
1652 				sd->flags |= SD_OVERLAP;
1653 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
1654 				break;
1655 		}
1656 	}
1657 
1658 	/* Build the groups for the domains */
1659 	for_each_cpu(i, cpu_map) {
1660 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1661 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
1662 			if (sd->flags & SD_OVERLAP) {
1663 				if (build_overlap_sched_groups(sd, i))
1664 					goto error;
1665 			} else {
1666 				if (build_sched_groups(sd, i))
1667 					goto error;
1668 			}
1669 		}
1670 	}
1671 
1672 	/* Calculate CPU capacity for physical packages and nodes */
1673 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
1674 		if (!cpumask_test_cpu(i, cpu_map))
1675 			continue;
1676 
1677 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1678 			claim_allocations(i, sd);
1679 			init_sched_groups_capacity(i, sd);
1680 		}
1681 	}
1682 
1683 	/* Attach the domains */
1684 	rcu_read_lock();
1685 	for_each_cpu(i, cpu_map) {
1686 		rq = cpu_rq(i);
1687 		sd = *per_cpu_ptr(d.sd, i);
1688 
1689 		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
1690 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
1691 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
1692 
1693 		cpu_attach_domain(sd, d.rd, i);
1694 	}
1695 	rcu_read_unlock();
1696 
1697 	if (rq && sched_debug_enabled) {
1698 		pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
1699 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
1700 	}
1701 
1702 	ret = 0;
1703 error:
1704 	__free_domain_allocs(&d, alloc_state, cpu_map);
1705 	return ret;
1706 }
1707 
1708 /* Current sched domains: */
1709 static cpumask_var_t			*doms_cur;
1710 
1711 /* Number of sched domains in 'doms_cur': */
1712 static int				ndoms_cur;
1713 
1714 /* Attribues of custom domains in 'doms_cur' */
1715 static struct sched_domain_attr		*dattr_cur;
1716 
1717 /*
1718  * Special case: If a kmalloc() of a doms_cur partition (array of
1719  * cpumask) fails, then fallback to a single sched domain,
1720  * as determined by the single cpumask fallback_doms.
1721  */
1722 static cpumask_var_t			fallback_doms;
1723 
1724 /*
1725  * arch_update_cpu_topology lets virtualized architectures update the
1726  * CPU core maps. It is supposed to return 1 if the topology changed
1727  * or 0 if it stayed the same.
1728  */
1729 int __weak arch_update_cpu_topology(void)
1730 {
1731 	return 0;
1732 }
1733 
1734 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
1735 {
1736 	int i;
1737 	cpumask_var_t *doms;
1738 
1739 	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
1740 	if (!doms)
1741 		return NULL;
1742 	for (i = 0; i < ndoms; i++) {
1743 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
1744 			free_sched_domains(doms, i);
1745 			return NULL;
1746 		}
1747 	}
1748 	return doms;
1749 }
1750 
1751 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
1752 {
1753 	unsigned int i;
1754 	for (i = 0; i < ndoms; i++)
1755 		free_cpumask_var(doms[i]);
1756 	kfree(doms);
1757 }
1758 
1759 /*
1760  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
1761  * For now this just excludes isolated CPUs, but could be used to
1762  * exclude other special cases in the future.
1763  */
1764 int sched_init_domains(const struct cpumask *cpu_map)
1765 {
1766 	int err;
1767 
1768 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
1769 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
1770 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
1771 
1772 	arch_update_cpu_topology();
1773 	ndoms_cur = 1;
1774 	doms_cur = alloc_sched_domains(ndoms_cur);
1775 	if (!doms_cur)
1776 		doms_cur = &fallback_doms;
1777 	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1778 	err = build_sched_domains(doms_cur[0], NULL);
1779 	register_sched_domain_sysctl();
1780 
1781 	return err;
1782 }
1783 
1784 /*
1785  * Detach sched domains from a group of CPUs specified in cpu_map
1786  * These CPUs will now be attached to the NULL domain
1787  */
1788 static void detach_destroy_domains(const struct cpumask *cpu_map)
1789 {
1790 	int i;
1791 
1792 	rcu_read_lock();
1793 	for_each_cpu(i, cpu_map)
1794 		cpu_attach_domain(NULL, &def_root_domain, i);
1795 	rcu_read_unlock();
1796 }
1797 
1798 /* handle null as "default" */
1799 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
1800 			struct sched_domain_attr *new, int idx_new)
1801 {
1802 	struct sched_domain_attr tmp;
1803 
1804 	/* Fast path: */
1805 	if (!new && !cur)
1806 		return 1;
1807 
1808 	tmp = SD_ATTR_INIT;
1809 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
1810 			new ? (new + idx_new) : &tmp,
1811 			sizeof(struct sched_domain_attr));
1812 }
1813 
1814 /*
1815  * Partition sched domains as specified by the 'ndoms_new'
1816  * cpumasks in the array doms_new[] of cpumasks. This compares
1817  * doms_new[] to the current sched domain partitioning, doms_cur[].
1818  * It destroys each deleted domain and builds each new domain.
1819  *
1820  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
1821  * The masks don't intersect (don't overlap.) We should setup one
1822  * sched domain for each mask. CPUs not in any of the cpumasks will
1823  * not be load balanced. If the same cpumask appears both in the
1824  * current 'doms_cur' domains and in the new 'doms_new', we can leave
1825  * it as it is.
1826  *
1827  * The passed in 'doms_new' should be allocated using
1828  * alloc_sched_domains.  This routine takes ownership of it and will
1829  * free_sched_domains it when done with it. If the caller failed the
1830  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
1831  * and partition_sched_domains() will fallback to the single partition
1832  * 'fallback_doms', it also forces the domains to be rebuilt.
1833  *
1834  * If doms_new == NULL it will be replaced with cpu_online_mask.
1835  * ndoms_new == 0 is a special case for destroying existing domains,
1836  * and it will not create the default domain.
1837  *
1838  * Call with hotplug lock held
1839  */
1840 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1841 			     struct sched_domain_attr *dattr_new)
1842 {
1843 	int i, j, n;
1844 	int new_topology;
1845 
1846 	mutex_lock(&sched_domains_mutex);
1847 
1848 	/* Always unregister in case we don't destroy any domains: */
1849 	unregister_sched_domain_sysctl();
1850 
1851 	/* Let the architecture update CPU core mappings: */
1852 	new_topology = arch_update_cpu_topology();
1853 
1854 	if (!doms_new) {
1855 		WARN_ON_ONCE(dattr_new);
1856 		n = 0;
1857 		doms_new = alloc_sched_domains(1);
1858 		if (doms_new) {
1859 			n = 1;
1860 			cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
1861 		}
1862 	} else {
1863 		n = ndoms_new;
1864 	}
1865 
1866 	/* Destroy deleted domains: */
1867 	for (i = 0; i < ndoms_cur; i++) {
1868 		for (j = 0; j < n && !new_topology; j++) {
1869 			if (cpumask_equal(doms_cur[i], doms_new[j])
1870 			    && dattrs_equal(dattr_cur, i, dattr_new, j))
1871 				goto match1;
1872 		}
1873 		/* No match - a current sched domain not in new doms_new[] */
1874 		detach_destroy_domains(doms_cur[i]);
1875 match1:
1876 		;
1877 	}
1878 
1879 	n = ndoms_cur;
1880 	if (!doms_new) {
1881 		n = 0;
1882 		doms_new = &fallback_doms;
1883 		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
1884 	}
1885 
1886 	/* Build new domains: */
1887 	for (i = 0; i < ndoms_new; i++) {
1888 		for (j = 0; j < n && !new_topology; j++) {
1889 			if (cpumask_equal(doms_new[i], doms_cur[j])
1890 			    && dattrs_equal(dattr_new, i, dattr_cur, j))
1891 				goto match2;
1892 		}
1893 		/* No match - add a new doms_new */
1894 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
1895 match2:
1896 		;
1897 	}
1898 
1899 	/* Remember the new sched domains: */
1900 	if (doms_cur != &fallback_doms)
1901 		free_sched_domains(doms_cur, ndoms_cur);
1902 
1903 	kfree(dattr_cur);
1904 	doms_cur = doms_new;
1905 	dattr_cur = dattr_new;
1906 	ndoms_cur = ndoms_new;
1907 
1908 	register_sched_domain_sysctl();
1909 
1910 	mutex_unlock(&sched_domains_mutex);
1911 }
1912 
1913