Lines Matching full:env

2113 static void update_numa_stats(struct task_numa_env *env,  in update_numa_stats()  argument
2134 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
2147 ns->node_type = numa_classify(env->imbalance_pct, ns); in update_numa_stats()
2153 static void task_numa_assign(struct task_numa_env *env, in task_numa_assign() argument
2156 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2159 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
2161 int start = env->dst_cpu; in task_numa_assign()
2164 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) { in task_numa_assign()
2165 if (cpu == env->best_cpu || !idle_cpu(cpu) || in task_numa_assign()
2166 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
2170 env->dst_cpu = cpu; in task_numa_assign()
2171 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2185 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
2186 rq = cpu_rq(env->best_cpu); in task_numa_assign()
2190 if (env->best_task) in task_numa_assign()
2191 put_task_struct(env->best_task); in task_numa_assign()
2195 env->best_task = p; in task_numa_assign()
2196 env->best_imp = imp; in task_numa_assign()
2197 env->best_cpu = env->dst_cpu; in task_numa_assign()
2201 struct task_numa_env *env) in load_too_imbalanced() argument
2214 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
2215 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
2219 orig_src_load = env->src_stats.load; in load_too_imbalanced()
2220 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
2241 static bool task_numa_compare(struct task_numa_env *env, in task_numa_compare() argument
2244 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
2245 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2249 int dist = env->dist; in task_numa_compare()
2265 * end try selecting ourselves (current == env->p) as a swap candidate. in task_numa_compare()
2267 if (cur == env->p) { in task_numa_compare()
2273 if (maymove && moveimp >= env->best_imp) in task_numa_compare()
2280 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) in task_numa_compare()
2287 if (env->best_task && in task_numa_compare()
2288 env->best_task->numa_preferred_nid == env->src_nid && in task_numa_compare()
2289 cur->numa_preferred_nid != env->src_nid) { in task_numa_compare()
2311 if (env->dst_stats.node_type == node_has_spare) in task_numa_compare()
2314 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
2315 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
2328 imp += group_weight(cur, env->src_nid, dist) - in task_numa_compare()
2329 group_weight(cur, env->dst_nid, dist); in task_numa_compare()
2331 imp += task_weight(cur, env->src_nid, dist) - in task_numa_compare()
2332 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
2336 if (cur->numa_preferred_nid == env->dst_nid) in task_numa_compare()
2345 if (cur->numa_preferred_nid == env->src_nid) in task_numa_compare()
2348 if (maymove && moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
2358 if (env->best_task && cur->numa_preferred_nid == env->src_nid && in task_numa_compare()
2359 env->best_task->numa_preferred_nid != env->src_nid) { in task_numa_compare()
2369 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) in task_numa_compare()
2375 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
2379 dst_load = env->dst_stats.load + load; in task_numa_compare()
2380 src_load = env->src_stats.load - load; in task_numa_compare()
2382 if (load_too_imbalanced(src_load, dst_load, env)) in task_numa_compare()
2388 int cpu = env->dst_stats.idle_cpu; in task_numa_compare()
2392 cpu = env->dst_cpu; in task_numa_compare()
2398 if (!idle_cpu(cpu) && env->best_cpu >= 0 && in task_numa_compare()
2399 idle_cpu(env->best_cpu)) { in task_numa_compare()
2400 cpu = env->best_cpu; in task_numa_compare()
2403 env->dst_cpu = cpu; in task_numa_compare()
2406 task_numa_assign(env, cur, imp); in task_numa_compare()
2413 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) in task_numa_compare()
2420 if (!maymove && env->best_task && in task_numa_compare()
2421 env->best_task->numa_preferred_nid == env->src_nid) { in task_numa_compare()
2430 static void task_numa_find_cpu(struct task_numa_env *env, in task_numa_find_cpu() argument
2440 if (env->dst_stats.node_type == node_has_spare) { in task_numa_find_cpu()
2450 src_running = env->src_stats.nr_running - 1; in task_numa_find_cpu()
2451 dst_running = env->dst_stats.nr_running + 1; in task_numa_find_cpu()
2454 env->imb_numa_nr); in task_numa_find_cpu()
2459 if (env->dst_stats.idle_cpu >= 0) { in task_numa_find_cpu()
2460 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
2461 task_numa_assign(env, NULL, 0); in task_numa_find_cpu()
2468 * If the improvement from just moving env->p direction is better in task_numa_find_cpu()
2471 load = task_h_load(env->p); in task_numa_find_cpu()
2472 dst_load = env->dst_stats.load + load; in task_numa_find_cpu()
2473 src_load = env->src_stats.load - load; in task_numa_find_cpu()
2474 maymove = !load_too_imbalanced(src_load, dst_load, env); in task_numa_find_cpu()
2477 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { in task_numa_find_cpu()
2479 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
2482 env->dst_cpu = cpu; in task_numa_find_cpu()
2483 if (task_numa_compare(env, taskimp, groupimp, maymove)) in task_numa_find_cpu()
2490 struct task_numa_env env = { in task_numa_migrate() local
2518 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
2520 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
2521 env.imb_numa_nr = sd->imb_numa_nr; in task_numa_migrate()
2536 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2537 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2538 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2539 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2540 update_numa_stats(&env, &env.src_stats, env.src_nid, false); in task_numa_migrate()
2541 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2542 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2543 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2546 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2556 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { in task_numa_migrate()
2558 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2561 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2563 dist != env.dist) { in task_numa_migrate()
2564 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2565 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2574 env.dist = dist; in task_numa_migrate()
2575 env.dst_nid = nid; in task_numa_migrate()
2576 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2577 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2590 if (env.best_cpu == -1) in task_numa_migrate()
2591 nid = env.src_nid; in task_numa_migrate()
2593 nid = cpu_to_node(env.best_cpu); in task_numa_migrate()
2600 if (env.best_cpu == -1) { in task_numa_migrate()
2601 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2605 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2606 if (env.best_task == NULL) { in task_numa_migrate()
2607 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2610 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2614 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2618 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2619 put_task_struct(env.best_task); in task_numa_migrate()
9247 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
9251 lockdep_assert_rq_held(env->src_rq); in task_hot()
9260 if (env->sd->flags & SD_SHARE_CPUCAPACITY) in task_hot()
9266 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
9277 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
9283 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9294 static long migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
9303 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
9306 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
9307 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
9314 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
9325 if (env->idle == CPU_IDLE) in migrate_degrades_locality()
9342 struct lb_env *env) in migrate_degrades_locality() argument
9376 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
9380 lockdep_assert_rq_held(env->src_rq); in can_migrate_task()
9393 if ((p->se.sched_delayed) && (env->migration_type != migrate_load)) in can_migrate_task()
9396 if (lb_throttled_hierarchy(p, env->dst_cpu)) in can_migrate_task()
9405 if (!env->sd->nr_balance_failed && in can_migrate_task()
9406 task_is_ineligible_on_dst_cpu(p, env->dst_cpu)) in can_migrate_task()
9416 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
9421 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
9433 if (env->idle == CPU_NEWLY_IDLE || in can_migrate_task()
9434 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB)) in can_migrate_task()
9437 /* Prevent to re-select dst_cpu via env's CPUs: */ in can_migrate_task()
9438 cpu = cpumask_first_and_and(env->dst_grpmask, env->cpus, p->cpus_ptr); in can_migrate_task()
9441 env->flags |= LBF_DST_PINNED; in can_migrate_task()
9442 env->new_dst_cpu = cpu; in can_migrate_task()
9449 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
9451 if (task_on_cpu(env->src_rq, p) || in can_migrate_task()
9452 task_current_donor(env->src_rq, p)) { in can_migrate_task()
9464 if (env->flags & LBF_ACTIVE_LB) in can_migrate_task()
9467 degrades = migrate_degrades_locality(p, env); in can_migrate_task()
9469 hot = task_hot(p, env); in can_migrate_task()
9473 if (!hot || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
9484 * detach_task() -- detach the task for the migration specified in env
9486 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
9488 lockdep_assert_rq_held(env->src_rq); in detach_task()
9492 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in detach_task()
9496 WARN_ON(task_current(env->src_rq, p)); in detach_task()
9497 WARN_ON(task_current_donor(env->src_rq, p)); in detach_task()
9499 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
9500 set_task_cpu(p, env->dst_cpu); in detach_task()
9504 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
9509 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
9513 lockdep_assert_rq_held(env->src_rq); in detach_one_task()
9516 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
9517 if (!can_migrate_task(p, env)) in detach_one_task()
9520 detach_task(p, env); in detach_one_task()
9524 * lb_gained[env->idle] is updated (other is detach_tasks) in detach_one_task()
9528 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
9540 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
9542 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
9547 lockdep_assert_rq_held(env->src_rq); in detach_tasks()
9553 if (env->src_rq->nr_running <= 1) { in detach_tasks()
9554 env->flags &= ~LBF_ALL_PINNED; in detach_tasks()
9558 if (env->imbalance <= 0) in detach_tasks()
9566 if (env->idle && env->src_rq->nr_running <= 1) in detach_tasks()
9569 env->loop++; in detach_tasks()
9571 if (env->loop > env->loop_max) in detach_tasks()
9575 if (env->loop > env->loop_break) { in detach_tasks()
9576 env->loop_break += SCHED_NR_MIGRATE_BREAK; in detach_tasks()
9577 env->flags |= LBF_NEED_BREAK; in detach_tasks()
9583 if (!can_migrate_task(p, env)) in detach_tasks()
9586 switch (env->migration_type) { in detach_tasks()
9591 * value. Make sure that env->imbalance decreases in detach_tasks()
9598 load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
9607 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) in detach_tasks()
9610 env->imbalance -= load; in detach_tasks()
9616 if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance) in detach_tasks()
9619 env->imbalance -= util; in detach_tasks()
9623 env->imbalance--; in detach_tasks()
9628 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
9631 env->imbalance = 0; in detach_tasks()
9635 detach_task(p, env); in detach_tasks()
9636 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9646 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
9654 if (env->imbalance <= 0) in detach_tasks()
9670 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
9705 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
9707 struct list_head *tasks = &env->tasks; in attach_tasks()
9711 rq_lock(env->dst_rq, &rf); in attach_tasks()
9712 update_rq_clock(env->dst_rq); in attach_tasks()
9718 attach_task(env->dst_rq, p); in attach_tasks()
9721 rq_unlock(env->dst_rq, &rf); in attach_tasks()
10240 * @env: The load balancing environment
10244 * @env::dst_cpu can do asym_packing if it has higher priority than the
10247 * Return: true if @env::dst_cpu can do with asym_packing load balance. False
10251 sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group) in sched_group_asym() argument
10261 return sched_asym(env->sd, env->dst_cpu, READ_ONCE(group->asym_prefer_cpu)); in sched_group_asym()
10275 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs, in smt_balance() argument
10278 if (!env->idle) in smt_balance()
10294 static inline long sibling_imbalance(struct lb_env *env, in sibling_imbalance() argument
10302 if (!env->idle || !busiest->sum_nr_running) in sibling_imbalance()
10344 * @env: The load balancing environment.
10351 static inline void update_sg_lb_stats(struct lb_env *env, in update_sg_lb_stats() argument
10358 int i, nr_running, local_group, sd_flags = env->sd->flags; in update_sg_lb_stats()
10359 bool balancing_at_rd = !env->sd->parent; in update_sg_lb_stats()
10365 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats()
10409 } else if (env->idle && sched_reduced_capacity(rq, env->sd)) { in update_sg_lb_stats()
10421 if (!local_group && env->idle && sgs->sum_h_nr_running && in update_sg_lb_stats()
10422 sched_group_asym(env, sgs, group)) in update_sg_lb_stats()
10426 if (!local_group && smt_balance(env, sgs, group)) in update_sg_lb_stats()
10429 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
10439 * @env: The load balancing environment.
10450 static bool update_sd_pick_busiest(struct lb_env *env, in update_sd_pick_busiest() argument
10467 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
10469 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || in update_sd_pick_busiest()
10581 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
10583 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) in update_sd_pick_busiest()
10951 static void update_idle_cpu_scan(struct lb_env *env, in update_idle_cpu_scan() argument
10965 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE) in update_idle_cpu_scan()
10968 llc_weight = per_cpu(sd_llc_size, env->dst_cpu); in update_idle_cpu_scan()
10969 if (env->sd->span_weight != llc_weight) in update_idle_cpu_scan()
10972 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu)); in update_idle_cpu_scan()
11009 pct = env->sd->imbalance_pct; in update_idle_cpu_scan()
11024 * @env: The load balancing environment.
11028 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
11030 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
11040 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
11045 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
11047 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
11050 update_sg_lb_stats(env, sds, sg, sgs, &sg_overloaded, &sg_overutilized); in update_sd_lb_stats()
11052 if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
11063 } while (sg != env->sd->groups); in update_sd_lb_stats()
11074 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
11075 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
11077 if (!env->sd->parent) { in update_sd_lb_stats()
11079 set_rd_overloaded(env->dst_rq->rd, sg_overloaded); in update_sd_lb_stats()
11082 set_rd_overutilized(env->dst_rq->rd, sg_overutilized); in update_sd_lb_stats()
11084 set_rd_overutilized(env->dst_rq->rd, sg_overutilized); in update_sd_lb_stats()
11087 update_idle_cpu_scan(env, sum_util); in update_sd_lb_stats()
11093 * @env: load balance environment
11096 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
11104 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { in calculate_imbalance()
11106 env->migration_type = migrate_misfit; in calculate_imbalance()
11107 env->imbalance = 1; in calculate_imbalance()
11113 env->migration_type = migrate_load; in calculate_imbalance()
11114 env->imbalance = busiest->group_misfit_task_load; in calculate_imbalance()
11124 env->migration_type = migrate_task; in calculate_imbalance()
11125 env->imbalance = busiest->sum_h_nr_running; in calculate_imbalance()
11131 env->migration_type = migrate_task; in calculate_imbalance()
11132 env->imbalance = 1; in calculate_imbalance()
11143 env->migration_type = migrate_task; in calculate_imbalance()
11144 env->imbalance = 1; in calculate_imbalance()
11154 !(env->sd->flags & SD_SHARE_LLC)) { in calculate_imbalance()
11163 env->migration_type = migrate_util; in calculate_imbalance()
11164 env->imbalance = max(local->group_capacity, local->group_util) - in calculate_imbalance()
11174 if (env->idle && env->imbalance == 0) { in calculate_imbalance()
11175 env->migration_type = migrate_task; in calculate_imbalance()
11176 env->imbalance = 1; in calculate_imbalance()
11187 env->migration_type = migrate_task; in calculate_imbalance()
11188 env->imbalance = sibling_imbalance(env, sds, busiest, local); in calculate_imbalance()
11195 env->migration_type = migrate_task; in calculate_imbalance()
11196 env->imbalance = max_t(long, 0, in calculate_imbalance()
11202 if (env->sd->flags & SD_NUMA) { in calculate_imbalance()
11203 env->imbalance = adjust_numa_imbalance(env->imbalance, in calculate_imbalance()
11205 env->sd->imb_numa_nr); in calculate_imbalance()
11210 env->imbalance >>= 1; in calculate_imbalance()
11233 env->imbalance = 0; in calculate_imbalance()
11245 env->imbalance = 0; in calculate_imbalance()
11259 env->migration_type = migrate_load; in calculate_imbalance()
11260 env->imbalance = min( in calculate_imbalance()
11291 * @env: The load balancing environment.
11298 static struct sched_group *sched_balance_find_src_group(struct lb_env *env) in sched_balance_find_src_group() argument
11309 update_sd_lb_stats(env, &sds); in sched_balance_find_src_group()
11321 if (!is_rd_overutilized(env->dst_rq->rd) && in sched_balance_find_src_group()
11322 rcu_dereference(env->dst_rq->rd->pd)) in sched_balance_find_src_group()
11373 env->sd->imbalance_pct * local->avg_load) in sched_balance_find_src_group()
11382 sibling_imbalance(env, &sds, busiest, local) > 1) in sched_balance_find_src_group()
11386 if (!env->idle) { in sched_balance_find_src_group()
11425 calculate_imbalance(env, &sds); in sched_balance_find_src_group()
11426 return env->imbalance ? sds.busiest : NULL; in sched_balance_find_src_group()
11429 env->imbalance = 0; in sched_balance_find_src_group()
11436 static struct rq *sched_balance_find_src_rq(struct lb_env *env, in sched_balance_find_src_rq() argument
11444 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in sched_balance_find_src_rq()
11471 if (rt > env->fbq_type) in sched_balance_find_src_rq()
11486 if (env->sd->flags & SD_ASYM_CPUCAPACITY && in sched_balance_find_src_rq()
11487 !capacity_greater(capacity_of(env->dst_cpu), capacity) && in sched_balance_find_src_rq()
11498 if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1) in sched_balance_find_src_rq()
11501 switch (env->migration_type) { in sched_balance_find_src_rq()
11509 if (nr_running == 1 && load > env->imbalance && in sched_balance_find_src_rq()
11510 !check_cpu_capacity(rq, env->sd)) in sched_balance_find_src_rq()
11582 asym_active_balance(struct lb_env *env) in asym_active_balance() argument
11590 * If @env::src_cpu is an SMT core with busy siblings, let in asym_active_balance()
11591 * the lower priority @env::dst_cpu help it. Do not follow in asym_active_balance()
11594 return env->idle && sched_use_asym_prio(env->sd, env->dst_cpu) && in asym_active_balance()
11595 (sched_asym_prefer(env->dst_cpu, env->src_cpu) || in asym_active_balance()
11596 !sched_use_asym_prio(env->sd, env->src_cpu)); in asym_active_balance()
11600 imbalanced_active_balance(struct lb_env *env) in imbalanced_active_balance() argument
11602 struct sched_domain *sd = env->sd; in imbalanced_active_balance()
11609 if ((env->migration_type == migrate_task) && in imbalanced_active_balance()
11616 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
11618 struct sched_domain *sd = env->sd; in need_active_balance()
11620 if (asym_active_balance(env)) in need_active_balance()
11623 if (imbalanced_active_balance(env)) in need_active_balance()
11632 if (env->idle && in need_active_balance()
11633 (env->src_rq->cfs.h_nr_runnable == 1)) { in need_active_balance()
11634 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
11635 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
11639 if (env->migration_type == migrate_misfit) in need_active_balance()
11647 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
11650 struct sched_group *sg = env->sd->groups; in should_we_balance()
11657 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
11667 if (env->idle == CPU_NEWLY_IDLE) { in should_we_balance()
11668 if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending) in should_we_balance()
11675 for_each_cpu_and(cpu, swb_cpus, env->cpus) { in should_we_balance()
11684 if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) { in should_we_balance()
11702 return cpu == env->dst_cpu; in should_we_balance()
11707 return idle_smt == env->dst_cpu; in should_we_balance()
11710 return group_balance_cpu(sg) == env->dst_cpu; in should_we_balance()
11713 static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd, in update_lb_imbalance_stat() argument
11719 switch (env->migration_type) { in update_lb_imbalance_stat()
11721 __schedstat_add(sd->lb_imbalance_load[idle], env->imbalance); in update_lb_imbalance_stat()
11724 __schedstat_add(sd->lb_imbalance_util[idle], env->imbalance); in update_lb_imbalance_stat()
11727 __schedstat_add(sd->lb_imbalance_task[idle], env->imbalance); in update_lb_imbalance_stat()
11730 __schedstat_add(sd->lb_imbalance_misfit[idle], env->imbalance); in update_lb_imbalance_stat()
11749 struct lb_env env = { in sched_balance_rq() local
11758 .tasks = LIST_HEAD_INIT(env.tasks), in sched_balance_rq()
11766 if (!should_we_balance(&env)) { in sched_balance_rq()
11771 group = sched_balance_find_src_group(&env); in sched_balance_rq()
11777 busiest = sched_balance_find_src_rq(&env, group); in sched_balance_rq()
11783 WARN_ON_ONCE(busiest == env.dst_rq); in sched_balance_rq()
11785 update_lb_imbalance_stat(&env, sd, idle); in sched_balance_rq()
11787 env.src_cpu = busiest->cpu; in sched_balance_rq()
11788 env.src_rq = busiest; in sched_balance_rq()
11792 env.flags |= LBF_ALL_PINNED; in sched_balance_rq()
11800 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in sched_balance_rq()
11810 cur_ld_moved = detach_tasks(&env); in sched_balance_rq()
11823 attach_tasks(&env); in sched_balance_rq()
11829 if (env.flags & LBF_NEED_BREAK) { in sched_balance_rq()
11830 env.flags &= ~LBF_NEED_BREAK; in sched_balance_rq()
11853 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in sched_balance_rq()
11855 /* Prevent to re-select dst_cpu via env's CPUs */ in sched_balance_rq()
11856 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in sched_balance_rq()
11858 env.dst_rq = cpu_rq(env.new_dst_cpu); in sched_balance_rq()
11859 env.dst_cpu = env.new_dst_cpu; in sched_balance_rq()
11860 env.flags &= ~LBF_DST_PINNED; in sched_balance_rq()
11861 env.loop = 0; in sched_balance_rq()
11862 env.loop_break = SCHED_NR_MIGRATE_BREAK; in sched_balance_rq()
11877 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) in sched_balance_rq()
11882 if (unlikely(env.flags & LBF_ALL_PINNED)) { in sched_balance_rq()
11892 if (!cpumask_subset(cpus, env.dst_grpmask)) { in sched_balance_rq()
11893 env.loop = 0; in sched_balance_rq()
11894 env.loop_break = SCHED_NR_MIGRATE_BREAK; in sched_balance_rq()
11913 env.migration_type != migrate_misfit) in sched_balance_rq()
11916 if (need_active_balance(&env)) { in sched_balance_rq()
11932 env.flags &= ~LBF_ALL_PINNED; in sched_balance_rq()
11958 if (likely(!active_balance) || need_active_balance(&env)) { in sched_balance_rq()
11971 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { in sched_balance_rq()
12001 if (env.idle == CPU_NEWLY_IDLE || in sched_balance_rq()
12002 env.migration_type == migrate_misfit) in sched_balance_rq()
12006 if ((env.flags & LBF_ALL_PINNED && in sched_balance_rq()
12100 struct lb_env env = { in active_load_balance_cpu_stop() local
12113 p = detach_one_task(&env); in active_load_balance_cpu_stop()
12240 * env->dst_cpu, so we can't know our idle in sched_balance_domains()