Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
/linux/kernel/sched/ |
H A D | fair.c | 9902 struct sched_group *busiest; /* Busiest group in this sd */ member 9923 .busiest = NULL, in init_sd_lb_stats() 10254 struct sg_lb_stats *busiest, in sibling_imbalance() argument 10260 if (!env->idle || !busiest->sum_nr_running) in sibling_imbalance() 10263 ncores_busiest = sds->busiest->cores; in sibling_imbalance() 10267 imbalance = busiest->sum_nr_running; in sibling_imbalance() 10273 imbalance = ncores_local * busiest->sum_nr_running; in sibling_imbalance() 10281 busiest->sum_nr_running > 1) in sibling_imbalance() 10408 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local 10426 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest() [all …]
|
H A D | sched.h | 2979 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2981 __acquires(busiest->lock) in _double_lock_balance() 2985 double_rq_lock(this_rq, busiest); in _double_lock_balance() 2998 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 3000 __acquires(busiest->lock) in _double_lock_balance() 3003 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || in _double_lock_balance() 3004 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance() 3005 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance() 3009 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance() 3010 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance() [all …]
|
/linux/Documentation/scheduler/ |
H A D | sched-domains.rst | 48 Initially, sched_balance_rq() finds the busiest group in the current sched domain. 49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in 51 CPU's runqueue and the newly found busiest one and starts moving tasks from it
|