core.c (c2b078e78ace39710356a7bb6b984177d942a699) core.c (07c54f7a7ff77bb47bae26e566969e9c4b6fb0c6)
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 676 unchanged lines hidden (view full) ---

685}
686
687#endif /* CONFIG_NO_HZ_COMMON */
688
689#ifdef CONFIG_NO_HZ_FULL
690bool sched_can_stop_tick(void)
691{
692 /*
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 676 unchanged lines hidden (view full) ---

685}
686
687#endif /* CONFIG_NO_HZ_COMMON */
688
689#ifdef CONFIG_NO_HZ_FULL
690bool sched_can_stop_tick(void)
691{
692 /*
693 * FIFO realtime policy runs the highest priority task. Other runnable
694 * tasks are of a lower priority. The scheduler tick does nothing.
695 */
696 if (current->policy == SCHED_FIFO)
697 return true;
698
699 /*
700 * Round-robin realtime tasks time slice with other tasks at the same
701 * realtime priority. Is this task the only one at this priority?
702 */
703 if (current->policy == SCHED_RR) {
704 struct sched_rt_entity *rt_se = &current->rt;
705
706 return rt_se->run_list.prev == rt_se->run_list.next;
707 }
708
709 /*
693 * More than one running task need preemption.
694 * nr_running update is assumed to be visible
695 * after IPI is sent from wakers.
696 */
697 if (this_rq()->nr_running > 1)
698 return false;
699
700 return true;

--- 4724 unchanged lines hidden (view full) ---

5425 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5426 do {
5427 if (!group) {
5428 printk("\n");
5429 printk(KERN_ERR "ERROR: group is NULL\n");
5430 break;
5431 }
5432
710 * More than one running task need preemption.
711 * nr_running update is assumed to be visible
712 * after IPI is sent from wakers.
713 */
714 if (this_rq()->nr_running > 1)
715 return false;
716
717 return true;

--- 4724 unchanged lines hidden (view full) ---

5442 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5443 do {
5444 if (!group) {
5445 printk("\n");
5446 printk(KERN_ERR "ERROR: group is NULL\n");
5447 break;
5448 }
5449
5433 /*
5434 * Even though we initialize ->capacity to something semi-sane,
5435 * we leave capacity_orig unset. This allows us to detect if
5436 * domain iteration is still funny without causing /0 traps.
5437 */
5438 if (!group->sgc->capacity_orig) {
5439 printk(KERN_CONT "\n");
5440 printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
5441 break;
5442 }
5443
5444 if (!cpumask_weight(sched_group_cpus(group))) {
5445 printk(KERN_CONT "\n");
5446 printk(KERN_ERR "ERROR: empty group\n");
5447 break;
5448 }
5449
5450 if (!(sd->flags & SD_OVERLAP) &&
5451 cpumask_intersects(groupmask, sched_group_cpus(group))) {

--- 467 unchanged lines hidden (view full) ---

5919 build_group_mask(sd, sg);
5920
5921 /*
5922 * Initialize sgc->capacity such that even if we mess up the
5923 * domains and no possible iteration will get us here, we won't
5924 * die on a /0 trap.
5925 */
5926 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
5450 if (!cpumask_weight(sched_group_cpus(group))) {
5451 printk(KERN_CONT "\n");
5452 printk(KERN_ERR "ERROR: empty group\n");
5453 break;
5454 }
5455
5456 if (!(sd->flags & SD_OVERLAP) &&
5457 cpumask_intersects(groupmask, sched_group_cpus(group))) {

--- 467 unchanged lines hidden (view full) ---

5925 build_group_mask(sd, sg);
5926
5927 /*
5928 * Initialize sgc->capacity such that even if we mess up the
5929 * domains and no possible iteration will get us here, we won't
5930 * die on a /0 trap.
5931 */
5932 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
5927 sg->sgc->capacity_orig = sg->sgc->capacity;
5928
5929 /*
5930 * Make sure the first group of this domain contains the
5931 * canonical balance cpu. Otherwise the sched_domain iteration
5932 * breaks. See update_sg_lb_stats().
5933 */
5934 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
5935 group_balance_cpu(sg) == cpu)

--- 294 unchanged lines hidden (view full) ---

6230#endif
6231 };
6232
6233 /*
6234 * Convert topological properties into behaviour.
6235 */
6236
6237 if (sd->flags & SD_SHARE_CPUCAPACITY) {
5933
5934 /*
5935 * Make sure the first group of this domain contains the
5936 * canonical balance cpu. Otherwise the sched_domain iteration
5937 * breaks. See update_sg_lb_stats().
5938 */
5939 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
5940 group_balance_cpu(sg) == cpu)

--- 294 unchanged lines hidden (view full) ---

6235#endif
6236 };
6237
6238 /*
6239 * Convert topological properties into behaviour.
6240 */
6241
6242 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6243 sd->flags |= SD_PREFER_SIBLING;
6238 sd->imbalance_pct = 110;
6239 sd->smt_gain = 1178; /* ~15% */
6240
6241 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6242 sd->imbalance_pct = 117;
6243 sd->cache_nice_tries = 1;
6244 sd->busy_idx = 2;
6245

--- 907 unchanged lines hidden (view full) ---

7153 struct rq *rq;
7154
7155 rq = cpu_rq(i);
7156 raw_spin_lock_init(&rq->lock);
7157 rq->nr_running = 0;
7158 rq->calc_load_active = 0;
7159 rq->calc_load_update = jiffies + LOAD_FREQ;
7160 init_cfs_rq(&rq->cfs);
6244 sd->imbalance_pct = 110;
6245 sd->smt_gain = 1178; /* ~15% */
6246
6247 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6248 sd->imbalance_pct = 117;
6249 sd->cache_nice_tries = 1;
6250 sd->busy_idx = 2;
6251

--- 907 unchanged lines hidden (view full) ---

7159 struct rq *rq;
7160
7161 rq = cpu_rq(i);
7162 raw_spin_lock_init(&rq->lock);
7163 rq->nr_running = 0;
7164 rq->calc_load_active = 0;
7165 rq->calc_load_update = jiffies + LOAD_FREQ;
7166 init_cfs_rq(&rq->cfs);
7161 init_rt_rq(&rq->rt, rq);
7162 init_dl_rq(&rq->dl, rq);
7167 init_rt_rq(&rq->rt);
7168 init_dl_rq(&rq->dl);
7163#ifdef CONFIG_FAIR_GROUP_SCHED
7164 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7165 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7166 /*
7167 * How much cpu bandwidth does root_task_group get?
7168 *
7169 * In case of task-groups formed thr' the cgroup filesystem, it
7170 * gets 100% of the cpu resources in the system. This overall

--- 23 unchanged lines hidden (view full) ---

7194 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7195 rq->cpu_load[j] = 0;
7196
7197 rq->last_load_update_tick = jiffies;
7198
7199#ifdef CONFIG_SMP
7200 rq->sd = NULL;
7201 rq->rd = NULL;
7169#ifdef CONFIG_FAIR_GROUP_SCHED
7170 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7171 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7172 /*
7173 * How much cpu bandwidth does root_task_group get?
7174 *
7175 * In case of task-groups formed thr' the cgroup filesystem, it
7176 * gets 100% of the cpu resources in the system. This overall

--- 23 unchanged lines hidden (view full) ---

7200 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7201 rq->cpu_load[j] = 0;
7202
7203 rq->last_load_update_tick = jiffies;
7204
7205#ifdef CONFIG_SMP
7206 rq->sd = NULL;
7207 rq->rd = NULL;
7202 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
7208 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7203 rq->post_schedule = 0;
7204 rq->active_balance = 0;
7205 rq->next_balance = jiffies;
7206 rq->push_cpu = 0;
7207 rq->cpu = i;
7208 rq->online = 0;
7209 rq->idle_stamp = 0;
7210 rq->avg_idle = 2*sysctl_sched_migration_cost;

--- 582 unchanged lines hidden (view full) ---

7793 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7794 }
7795 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7796
7797 return ret;
7798}
7799#endif /* CONFIG_RT_GROUP_SCHED */
7800
7209 rq->post_schedule = 0;
7210 rq->active_balance = 0;
7211 rq->next_balance = jiffies;
7212 rq->push_cpu = 0;
7213 rq->cpu = i;
7214 rq->online = 0;
7215 rq->idle_stamp = 0;
7216 rq->avg_idle = 2*sysctl_sched_migration_cost;

--- 582 unchanged lines hidden (view full) ---

7799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7800 }
7801 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7802
7803 return ret;
7804}
7805#endif /* CONFIG_RT_GROUP_SCHED */
7806
7801static int sched_dl_global_constraints(void)
7807static int sched_dl_global_validate(void)
7802{
7803 u64 runtime = global_rt_runtime();
7804 u64 period = global_rt_period();
7805 u64 new_bw = to_ratio(period, runtime);
7806 struct dl_bw *dl_b;
7807 int cpu, ret = 0;
7808 unsigned long flags;
7809

--- 84 unchanged lines hidden (view full) ---

7894
7895 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7896
7897 if (!ret && write) {
7898 ret = sched_rt_global_validate();
7899 if (ret)
7900 goto undo;
7901
7808{
7809 u64 runtime = global_rt_runtime();
7810 u64 period = global_rt_period();
7811 u64 new_bw = to_ratio(period, runtime);
7812 struct dl_bw *dl_b;
7813 int cpu, ret = 0;
7814 unsigned long flags;
7815

--- 84 unchanged lines hidden (view full) ---

7900
7901 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7902
7903 if (!ret && write) {
7904 ret = sched_rt_global_validate();
7905 if (ret)
7906 goto undo;
7907
7902 ret = sched_rt_global_constraints();
7908 ret = sched_dl_global_validate();
7903 if (ret)
7904 goto undo;
7905
7909 if (ret)
7910 goto undo;
7911
7906 ret = sched_dl_global_constraints();
7912 ret = sched_rt_global_constraints();
7907 if (ret)
7908 goto undo;
7909
7910 sched_rt_do_global();
7911 sched_dl_do_global();
7912 }
7913 if (0) {
7914undo:

--- 468 unchanged lines hidden ---
7913 if (ret)
7914 goto undo;
7915
7916 sched_rt_do_global();
7917 sched_dl_do_global();
7918 }
7919 if (0) {
7920undo:

--- 468 unchanged lines hidden ---