/linux/block/ |
H A D | kyber-iosched.c | 212 unsigned int sched_domain, unsigned int type) in flush_latency_buckets() argument 214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() 215 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets() 227 unsigned int sched_domain, unsigned int type, in calculate_percentile() argument 230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() 243 if (!kqd->latency_timeout[sched_domain]) in calculate_percentile() 244 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL); in calculate_percentile() 246 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) { in calculate_percentile() 249 kqd->latency_timeout[sched_domain] = 0; in calculate_percentile() 257 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile() [all …]
|
/linux/kernel/sched/ |
H A D | topology.c | 35 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() 131 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() 171 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate() 189 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() 635 static void destroy_sched_domain(struct sched_domain *sd) in destroy_sched_domain() 651 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in destroy_sched_domains_rcu() 654 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu() 660 static void destroy_sched_domains(struct sched_domain *sd) in destroy_sched_domains() 675 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 680 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); [all …]
|
H A D | fair.c | 2507 struct sched_domain *sd; in task_numa_migrate() 7425 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() 7467 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() 7488 sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); 7550 static inline int sched_balance_find_dst_cpu(struct sched_domain *sd, struct task_struct *p, in sched_balance_find_dst_cpu() 7567 struct sched_domain *tmp; in sched_balance_find_dst_cpu() 7701 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() 7737 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() 7749 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu() 7819 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() [all …]
|
H A D | sched.h | 1196 struct sched_domain __rcu *sd; 1983 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) in highest_flag_domain() 1985 struct sched_domain *sd, *hsd = NULL; in highest_flag_domain() 2004 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) in lowest_flag_domain() 2006 struct sched_domain *sd; in lowest_flag_domain() 2016 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 2021 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 2022 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 2023 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 2590 extern void update_group_capacity(struct sched_domain *sd, int cpu);
|
H A D | stats.c | 118 struct sched_domain *sd; in show_schedstat()
|
H A D | debug.c | 571 static void register_sd(struct sched_domain *sd, struct dentry *parent) in register_sd() 620 struct sched_domain *sd; in update_sched_domain_debugfs()
|
H A D | ext.c | 3251 struct sched_domain *sd; in llc_numa_mismatch() 3279 struct sched_domain *sd; in update_selcpu_topology() 3385 struct sched_domain *sd; in scx_select_cpu_dfl()
|
H A D | core.c | 1168 struct sched_domain *sd; in get_nohz_timer_target() 3664 struct sched_domain *sd; in ttwu_stat() 6377 static bool steal_cookie_task(int cpu, struct sched_domain *sd) in steal_cookie_task() 6397 struct sched_domain *sd; in sched_core_balance()
|
H A D | rt.c | 1799 struct sched_domain *sd; in find_lowest_rq()
|
H A D | deadline.c | 2528 struct sched_domain *sd; in find_later_rq()
|
/linux/include/linux/sched/ |
H A D | topology.h | 87 struct sched_domain { struct 89 struct sched_domain __rcu *parent; /* top domain must be null terminated */ argument 90 struct sched_domain __rcu *child; /* bottom domain must be null terminated */ argument 163 static inline struct cpumask *sched_domain_span(struct sched_domain *sd) in sched_domain_span() argument 189 struct sched_domain *__percpu *sd;
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | test_access_variable_array.c | 12 struct sched_domain *sd) in BPF_PROG()
|
/linux/Documentation/scheduler/ |
H A D | sched-domains.rst | 5 Each CPU has a "base" scheduling domain (struct sched_domain). The domain 45 the parent sched_domain (if it exists), and the parent of the parent and so 70 of a sched_domain.
|
H A D | sched-capacity.rst | 288 - The SD_ASYM_CPUCAPACITY_FULL flag will be set at the lowest sched_domain 290 - The SD_ASYM_CPUCAPACITY flag will be set for any sched_domain that spans 319 sched_asym_cpucapacity static key will be enabled. However, the sched_domain 328 the sched_domain hierarchy (if relevant, i.e. the codepath targets a specific
|
H A D | sched-energy.rst | 335 flag to be set in the sched_domain hierarchy.
|
/linux/Documentation/translations/zh_CN/scheduler/ |
H A D | sched-domains.rst | 18 每个CPU有一个“基”调度域(struct sched_domain)。调度域层次结构从基调度域构建而来,可
|
H A D | sched-energy.rst | 275 参阅Documentation/scheduler/sched-capacity.rst以了解在sched_domain层次结构
|