sched.h (d66f1b06b5b438cd20ba3664b8eef1f9c79e84bf) sched.h (9edeaea1bc452372718837ed2ba775811baf1ba1)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Scheduler internal types and methods:
4 */
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>

--- 1061 unchanged lines hidden (view full) ---

1070 struct cpuidle_state *idle_state;
1071#endif
1072
1073#ifdef CONFIG_SMP
1074 unsigned int nr_pinned;
1075#endif
1076 unsigned int push_busy;
1077 struct cpu_stop_work push_work;
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Scheduler internal types and methods:
4 */
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>

--- 1061 unchanged lines hidden (view full) ---

1070 struct cpuidle_state *idle_state;
1071#endif
1072
1073#ifdef CONFIG_SMP
1074 unsigned int nr_pinned;
1075#endif
1076 unsigned int push_busy;
1077 struct cpu_stop_work push_work;
1078
1079#ifdef CONFIG_SCHED_CORE
1080 /* per rq */
1081 struct rq *core;
1082 unsigned int core_enabled;
1083#endif
1078};
1079
1080#ifdef CONFIG_FAIR_GROUP_SCHED
1081
1082/* CPU runqueue to which this cfs_rq is attached */
1083static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1084{
1085 return cfs_rq->rq;

--- 22 unchanged lines hidden (view full) ---

1108{
1109#ifdef CONFIG_SMP
1110 return p->migration_disabled;
1111#else
1112 return false;
1113#endif
1114}
1115
1084};
1085
1086#ifdef CONFIG_FAIR_GROUP_SCHED
1087
1088/* CPU runqueue to which this cfs_rq is attached */
1089static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1090{
1091 return cfs_rq->rq;

--- 22 unchanged lines hidden (view full) ---

1114{
1115#ifdef CONFIG_SMP
1116 return p->migration_disabled;
1117#else
1118 return false;
1119#endif
1120}
1121
1122#ifdef CONFIG_SCHED_CORE
1123
1124DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
1125
1126static inline bool sched_core_enabled(struct rq *rq)
1127{
1128 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
1129}
1130
1116static inline bool sched_core_disabled(void)
1117{
1131static inline bool sched_core_disabled(void)
1132{
1133 return !static_branch_unlikely(&__sched_core_enabled);
1134}
1135
1136static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1137{
1138 if (sched_core_enabled(rq))
1139 return &rq->core->__lock;
1140
1141 return &rq->__lock;
1142}
1143
1144#else /* !CONFIG_SCHED_CORE */
1145
1146static inline bool sched_core_enabled(struct rq *rq)
1147{
1148 return false;
1149}
1150
1151static inline bool sched_core_disabled(void)
1152{
1118 return true;
1119}
1120
1121static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1122{
1123 return &rq->__lock;
1124}
1125
1153 return true;
1154}
1155
1156static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1157{
1158 return &rq->__lock;
1159}
1160
1161#endif /* CONFIG_SCHED_CORE */
1162
1126static inline void lockdep_assert_rq_held(struct rq *rq)
1127{
1128 lockdep_assert_held(rq_lockp(rq));
1129}
1130
1131extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1132extern bool raw_spin_rq_trylock(struct rq *rq);
1133extern void raw_spin_rq_unlock(struct rq *rq);

--- 1102 unchanged lines hidden (view full) ---

2236}
2237#endif
2238
2239
2240#ifdef CONFIG_SMP
2241
2242static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2243{
1163static inline void lockdep_assert_rq_held(struct rq *rq)
1164{
1165 lockdep_assert_held(rq_lockp(rq));
1166}
1167
1168extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1169extern bool raw_spin_rq_trylock(struct rq *rq);
1170extern void raw_spin_rq_unlock(struct rq *rq);

--- 1102 unchanged lines hidden (view full) ---

2273}
2274#endif
2275
2276
2277#ifdef CONFIG_SMP
2278
2279static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2280{
2281#ifdef CONFIG_SCHED_CORE
2282 /*
2283 * In order to not have {0,2},{1,3} turn into into an AB-BA,
2284 * order by core-id first and cpu-id second.
2285 *
2286 * Notably:
2287 *
2288 * double_rq_lock(0,3); will take core-0, core-1 lock
2289 * double_rq_lock(1,2); will take core-1, core-0 lock
2290 *
2291 * when only cpu-id is considered.
2292 */
2293 if (rq1->core->cpu < rq2->core->cpu)
2294 return true;
2295 if (rq1->core->cpu > rq2->core->cpu)
2296 return false;
2297
2298 /*
2299 * __sched_core_flip() relies on SMT having cpu-id lock order.
2300 */
2301#endif
2244 return rq1->cpu < rq2->cpu;
2245}
2246
2247extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2248
2249#ifdef CONFIG_PREEMPTION
2250
2251/*

--- 535 unchanged lines hidden ---
2302 return rq1->cpu < rq2->cpu;
2303}
2304
2305extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2306
2307#ifdef CONFIG_PREEMPTION
2308
2309/*

--- 535 unchanged lines hidden ---