Lines Matching full:tl
1632 sd_init(struct sched_domain_topology_level *tl, in sd_init() argument
1636 struct sd_data *sdd = &tl->data; in sd_init()
1641 sd_weight = cpumask_weight(tl->mask(tl, cpu)); in sd_init()
1643 if (tl->sd_flags) in sd_init()
1644 sd_flags = (*tl->sd_flags)(); in sd_init()
1681 .name = tl->name, in sd_init()
1685 cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu)); in sd_init()
1714 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init()
1746 const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu) in tl_smt_mask() argument
1758 const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu) in tl_cls_mask() argument
1770 const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu) in tl_mc_mask() argument
1776 const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu) in tl_pkg_mask() argument
1804 #define for_each_sd_topology(tl) \ argument
1805 for (tl = sched_domain_topology; tl->mask; tl++)
1807 void __init set_sched_topology(struct sched_domain_topology_level *tl) in set_sched_topology() argument
1812 sched_domain_topology = tl; in set_sched_topology()
1822 static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu) in sd_numa_mask() argument
1824 return sched_domains_numa_masks[tl->numa_level][cpu_to_node(cpu)]; in sd_numa_mask()
2012 struct sched_domain_topology_level *tl; in sched_init_numa() local
2099 tl = kzalloc((i + nr_levels + 1) * in sched_init_numa()
2101 if (!tl) in sched_init_numa()
2108 tl[i] = sched_domain_topology[i]; in sched_init_numa()
2113 tl[i++] = SDTL_INIT(sd_numa_mask, NULL, NODE); in sched_init_numa()
2119 tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA); in sched_init_numa()
2120 tl[i].numa_level = j; in sched_init_numa()
2124 sched_domain_topology = tl; in sched_init_numa()
2360 struct sched_domain_topology_level *tl; in __sdt_alloc() local
2363 for_each_sd_topology(tl) { in __sdt_alloc()
2364 struct sd_data *sdd = &tl->data; in __sdt_alloc()
2427 struct sched_domain_topology_level *tl; in __sdt_free() local
2430 for_each_sd_topology(tl) { in __sdt_free()
2431 struct sd_data *sdd = &tl->data; in __sdt_free()
2461 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, in build_sched_domain() argument
2465 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2495 struct sched_domain_topology_level *tl; in topology_span_sane() local
2503 for_each_sd_topology(tl) { in topology_span_sane()
2506 if (tl->sd_flags) in topology_span_sane()
2507 tl_common_flags = (*tl->sd_flags)(); in topology_span_sane()
2523 const struct cpumask *tl_cpu_mask = tl->mask(tl, cpu); in topology_span_sane()
2531 if (!cpumask_equal(tl->mask(tl, id), tl_cpu_mask)) in topology_span_sane()
2570 struct sched_domain_topology_level *tl; in build_sched_domains() local
2573 for_each_sd_topology(tl) { in build_sched_domains()
2575 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
2579 if (tl == sched_domain_topology) in build_sched_domains()