1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_TOPOLOGY_H 3 #define _LINUX_SCHED_TOPOLOGY_H 4 5 #include <linux/topology.h> 6 7 #include <linux/sched/idle.h> 8 9 /* 10 * sched-domains (multiprocessor balancing) declarations: 11 */ 12 #ifdef CONFIG_SMP 13 14 /* Generate SD flag indexes */ 15 #define SD_FLAG(name, mflags) __##name, 16 enum { 17 #include <linux/sched/sd_flags.h> 18 __SD_FLAG_CNT, 19 }; 20 #undef SD_FLAG 21 /* Generate SD flag bits */ 22 #define SD_FLAG(name, mflags) name = 1 << __##name, 23 enum { 24 #include <linux/sched/sd_flags.h> 25 }; 26 #undef SD_FLAG 27 28 #ifdef CONFIG_SCHED_DEBUG 29 30 struct sd_flag_debug { 31 unsigned int meta_flags; 32 char *name; 33 }; 34 extern const struct sd_flag_debug sd_flag_debug[]; 35 36 #endif 37 38 #ifdef CONFIG_SCHED_SMT 39 static inline int cpu_smt_flags(void) 40 { 41 return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC; 42 } 43 #endif 44 45 #ifdef CONFIG_SCHED_CLUSTER 46 static inline int cpu_cluster_flags(void) 47 { 48 return SD_CLUSTER | SD_SHARE_LLC; 49 } 50 #endif 51 52 #ifdef CONFIG_SCHED_MC 53 static inline int cpu_core_flags(void) 54 { 55 return SD_SHARE_LLC; 56 } 57 #endif 58 59 #ifdef CONFIG_NUMA 60 static inline int cpu_numa_flags(void) 61 { 62 return SD_NUMA; 63 } 64 #endif 65 66 extern int arch_asym_cpu_priority(int cpu); 67 68 struct sched_domain_attr { 69 int relax_domain_level; 70 }; 71 72 #define SD_ATTR_INIT (struct sched_domain_attr) { \ 73 .relax_domain_level = -1, \ 74 } 75 76 extern int sched_domain_level_max; 77 78 struct sched_group; 79 80 struct sched_domain_shared { 81 atomic_t ref; 82 atomic_t nr_busy_cpus; 83 int has_idle_cores; 84 int nr_idle_scan; 85 }; 86 87 struct sched_domain { 88 /* These fields must be setup */ 89 struct sched_domain __rcu *parent; /* top domain must be null terminated */ 90 struct sched_domain __rcu *child; /* bottom domain must be null terminated */ 91 struct sched_group *groups; /* the balancing groups of the domain */ 92 unsigned long min_interval; /* Minimum balance interval ms */ 93 unsigned long max_interval; /* Maximum balance interval ms */ 94 unsigned int busy_factor; /* less balancing by factor if busy */ 95 unsigned int imbalance_pct; /* No balance until over watermark */ 96 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 97 unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ 98 99 int nohz_idle; /* NOHZ IDLE status */ 100 int flags; /* See SD_* */ 101 int level; 102 103 /* Runtime fields. */ 104 unsigned long last_balance; /* init to jiffies. units in jiffies */ 105 unsigned int balance_interval; /* initialise to 1. units in ms. */ 106 unsigned int nr_balance_failed; /* initialise to 0 */ 107 108 /* idle_balance() stats */ 109 u64 max_newidle_lb_cost; 110 unsigned long last_decay_max_lb_cost; 111 112 #ifdef CONFIG_SCHEDSTATS 113 /* sched_balance_rq() stats */ 114 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 115 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 116 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 117 unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES]; 118 unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES]; 119 unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES]; 120 unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES]; 121 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 122 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 123 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 124 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 125 126 /* Active load balancing */ 127 unsigned int alb_count; 128 unsigned int alb_failed; 129 unsigned int alb_pushed; 130 131 /* SD_BALANCE_EXEC stats */ 132 unsigned int sbe_count; 133 unsigned int sbe_balanced; 134 unsigned int sbe_pushed; 135 136 /* SD_BALANCE_FORK stats */ 137 unsigned int sbf_count; 138 unsigned int sbf_balanced; 139 unsigned int sbf_pushed; 140 141 /* try_to_wake_up() stats */ 142 unsigned int ttwu_wake_remote; 143 unsigned int ttwu_move_affine; 144 unsigned int ttwu_move_balance; 145 #endif 146 char *name; 147 union { 148 void *private; /* used during construction */ 149 struct rcu_head rcu; /* used during destruction */ 150 }; 151 struct sched_domain_shared *shared; 152 153 unsigned int span_weight; 154 /* 155 * Span of all CPUs in this domain. 156 * 157 * NOTE: this field is variable length. (Allocated dynamically 158 * by attaching extra space to the end of the structure, 159 * depending on how many CPUs the kernel has booted up with) 160 */ 161 unsigned long span[]; 162 }; 163 164 static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 165 { 166 return to_cpumask(sd->span); 167 } 168 169 extern void partition_sched_domains_locked(int ndoms_new, 170 cpumask_var_t doms_new[], 171 struct sched_domain_attr *dattr_new); 172 173 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 174 struct sched_domain_attr *dattr_new); 175 176 /* Allocate an array of sched domains, for partition_sched_domains(). */ 177 cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 178 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 179 180 bool cpus_equal_capacity(int this_cpu, int that_cpu); 181 bool cpus_share_cache(int this_cpu, int that_cpu); 182 bool cpus_share_resources(int this_cpu, int that_cpu); 183 184 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 185 typedef int (*sched_domain_flags_f)(void); 186 187 #define SDTL_OVERLAP 0x01 188 189 struct sd_data { 190 struct sched_domain *__percpu *sd; 191 struct sched_domain_shared *__percpu *sds; 192 struct sched_group *__percpu *sg; 193 struct sched_group_capacity *__percpu *sgc; 194 }; 195 196 struct sched_domain_topology_level { 197 sched_domain_mask_f mask; 198 sched_domain_flags_f sd_flags; 199 int flags; 200 int numa_level; 201 struct sd_data data; 202 char *name; 203 }; 204 205 extern void __init set_sched_topology(struct sched_domain_topology_level *tl); 206 207 # define SD_INIT_NAME(type) .name = #type 208 209 #else /* CONFIG_SMP */ 210 211 struct sched_domain_attr; 212 213 static inline void 214 partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 215 struct sched_domain_attr *dattr_new) 216 { 217 } 218 219 static inline void 220 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 221 struct sched_domain_attr *dattr_new) 222 { 223 } 224 225 static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) 226 { 227 return true; 228 } 229 230 static inline bool cpus_share_cache(int this_cpu, int that_cpu) 231 { 232 return true; 233 } 234 235 static inline bool cpus_share_resources(int this_cpu, int that_cpu) 236 { 237 return true; 238 } 239 240 #endif /* !CONFIG_SMP */ 241 242 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 243 extern void rebuild_sched_domains_energy(void); 244 #else 245 static inline void rebuild_sched_domains_energy(void) 246 { 247 } 248 #endif 249 250 #ifndef arch_scale_cpu_capacity 251 /** 252 * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. 253 * @cpu: the CPU in question. 254 * 255 * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 256 * 257 * max_perf(cpu) 258 * ----------------------------- * SCHED_CAPACITY_SCALE 259 * max(max_perf(c) : c \in CPUs) 260 */ 261 static __always_inline 262 unsigned long arch_scale_cpu_capacity(int cpu) 263 { 264 return SCHED_CAPACITY_SCALE; 265 } 266 #endif 267 268 #ifndef arch_scale_hw_pressure 269 static __always_inline 270 unsigned long arch_scale_hw_pressure(int cpu) 271 { 272 return 0; 273 } 274 #endif 275 276 #ifndef arch_update_hw_pressure 277 static __always_inline 278 void arch_update_hw_pressure(const struct cpumask *cpus, 279 unsigned long capped_frequency) 280 { } 281 #endif 282 283 #ifndef arch_scale_freq_ref 284 static __always_inline 285 unsigned int arch_scale_freq_ref(int cpu) 286 { 287 return 0; 288 } 289 #endif 290 291 static inline int task_node(const struct task_struct *p) 292 { 293 return cpu_to_node(task_cpu(p)); 294 } 295 296 #endif /* _LINUX_SCHED_TOPOLOGY_H */ 297