1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/cpudeadline.c
4 *
5 * Global CPU deadline management
6 *
7 * Author: Juri Lelli <j.lelli@sssup.it>
8 */
9 #include "sched.h"
10
parent(int i)11 static inline int parent(int i)
12 {
13 return (i - 1) >> 1;
14 }
15
left_child(int i)16 static inline int left_child(int i)
17 {
18 return (i << 1) + 1;
19 }
20
right_child(int i)21 static inline int right_child(int i)
22 {
23 return (i << 1) + 2;
24 }
25
cpudl_heapify_down(struct cpudl * cp,int idx)26 static void cpudl_heapify_down(struct cpudl *cp, int idx)
27 {
28 int l, r, largest;
29
30 int orig_cpu = cp->elements[idx].cpu;
31 u64 orig_dl = cp->elements[idx].dl;
32
33 if (left_child(idx) >= cp->size)
34 return;
35
36 /* adapted from lib/prio_heap.c */
37 while (1) {
38 u64 largest_dl;
39
40 l = left_child(idx);
41 r = right_child(idx);
42 largest = idx;
43 largest_dl = orig_dl;
44
45 if ((l < cp->size) && dl_time_before(orig_dl,
46 cp->elements[l].dl)) {
47 largest = l;
48 largest_dl = cp->elements[l].dl;
49 }
50 if ((r < cp->size) && dl_time_before(largest_dl,
51 cp->elements[r].dl))
52 largest = r;
53
54 if (largest == idx)
55 break;
56
57 /* pull largest child onto idx */
58 cp->elements[idx].cpu = cp->elements[largest].cpu;
59 cp->elements[idx].dl = cp->elements[largest].dl;
60 cp->elements[cp->elements[idx].cpu].idx = idx;
61 idx = largest;
62 }
63 /* actual push down of saved original values orig_* */
64 cp->elements[idx].cpu = orig_cpu;
65 cp->elements[idx].dl = orig_dl;
66 cp->elements[cp->elements[idx].cpu].idx = idx;
67 }
68
cpudl_heapify_up(struct cpudl * cp,int idx)69 static void cpudl_heapify_up(struct cpudl *cp, int idx)
70 {
71 int p;
72
73 int orig_cpu = cp->elements[idx].cpu;
74 u64 orig_dl = cp->elements[idx].dl;
75
76 if (idx == 0)
77 return;
78
79 do {
80 p = parent(idx);
81 if (dl_time_before(orig_dl, cp->elements[p].dl))
82 break;
83 /* pull parent onto idx */
84 cp->elements[idx].cpu = cp->elements[p].cpu;
85 cp->elements[idx].dl = cp->elements[p].dl;
86 cp->elements[cp->elements[idx].cpu].idx = idx;
87 idx = p;
88 } while (idx != 0);
89 /* actual push up of saved original values orig_* */
90 cp->elements[idx].cpu = orig_cpu;
91 cp->elements[idx].dl = orig_dl;
92 cp->elements[cp->elements[idx].cpu].idx = idx;
93 }
94
cpudl_heapify(struct cpudl * cp,int idx)95 static void cpudl_heapify(struct cpudl *cp, int idx)
96 {
97 if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
98 cp->elements[idx].dl))
99 cpudl_heapify_up(cp, idx);
100 else
101 cpudl_heapify_down(cp, idx);
102 }
103
cpudl_maximum(struct cpudl * cp)104 static inline int cpudl_maximum(struct cpudl *cp)
105 {
106 return cp->elements[0].cpu;
107 }
108
109 /*
110 * cpudl_find - find the best (later-dl) CPU in the system
111 * @cp: the cpudl max-heap context
112 * @p: the task
113 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
114 *
115 * Returns: int - CPUs were found
116 */
cpudl_find(struct cpudl * cp,struct task_struct * p,struct cpumask * later_mask)117 int cpudl_find(struct cpudl *cp, struct task_struct *p,
118 struct cpumask *later_mask)
119 {
120 const struct sched_dl_entity *dl_se = &p->dl;
121
122 if (later_mask &&
123 cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
124 unsigned long cap, max_cap = 0;
125 int cpu, max_cpu = -1;
126
127 if (!sched_asym_cpucap_active())
128 return 1;
129
130 /* Ensure the capacity of the CPUs fits the task. */
131 for_each_cpu(cpu, later_mask) {
132 if (!dl_task_fits_capacity(p, cpu)) {
133 cpumask_clear_cpu(cpu, later_mask);
134
135 cap = arch_scale_cpu_capacity(cpu);
136
137 if (cap > max_cap ||
138 (cpu == task_cpu(p) && cap == max_cap)) {
139 max_cap = cap;
140 max_cpu = cpu;
141 }
142 }
143 }
144
145 if (cpumask_empty(later_mask))
146 cpumask_set_cpu(max_cpu, later_mask);
147
148 return 1;
149 } else {
150 int best_cpu = cpudl_maximum(cp);
151
152 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
153
154 if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
155 dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
156 if (later_mask)
157 cpumask_set_cpu(best_cpu, later_mask);
158
159 return 1;
160 }
161 }
162 return 0;
163 }
164
165 /*
166 * cpudl_clear - remove a CPU from the cpudl max-heap
167 * @cp: the cpudl max-heap context
168 * @cpu: the target CPU
169 * @online: the online state of the deadline runqueue
170 *
171 * Notes: assumes cpu_rq(cpu)->lock is locked
172 *
173 * Returns: (void)
174 */
cpudl_clear(struct cpudl * cp,int cpu,bool online)175 void cpudl_clear(struct cpudl *cp, int cpu, bool online)
176 {
177 int old_idx, new_cpu;
178 unsigned long flags;
179
180 WARN_ON(!cpu_present(cpu));
181
182 raw_spin_lock_irqsave(&cp->lock, flags);
183
184 old_idx = cp->elements[cpu].idx;
185 if (old_idx == IDX_INVALID) {
186 /*
187 * Nothing to remove if old_idx was invalid.
188 * This could happen if rq_online_dl or rq_offline_dl is
189 * called for a CPU without -dl tasks running.
190 */
191 } else {
192 new_cpu = cp->elements[cp->size - 1].cpu;
193 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
194 cp->elements[old_idx].cpu = new_cpu;
195 cp->size--;
196 cp->elements[new_cpu].idx = old_idx;
197 cp->elements[cpu].idx = IDX_INVALID;
198 cpudl_heapify(cp, old_idx);
199 }
200 if (likely(online))
201 __cpumask_set_cpu(cpu, cp->free_cpus);
202 else
203 __cpumask_clear_cpu(cpu, cp->free_cpus);
204
205 raw_spin_unlock_irqrestore(&cp->lock, flags);
206 }
207
208 /*
209 * cpudl_set - update the cpudl max-heap
210 * @cp: the cpudl max-heap context
211 * @cpu: the target CPU
212 * @dl: the new earliest deadline for this CPU
213 *
214 * Notes: assumes cpu_rq(cpu)->lock is locked
215 *
216 * Returns: (void)
217 */
cpudl_set(struct cpudl * cp,int cpu,u64 dl)218 void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
219 {
220 int old_idx;
221 unsigned long flags;
222
223 WARN_ON(!cpu_present(cpu));
224
225 raw_spin_lock_irqsave(&cp->lock, flags);
226
227 old_idx = cp->elements[cpu].idx;
228 if (old_idx == IDX_INVALID) {
229 int new_idx = cp->size++;
230
231 cp->elements[new_idx].dl = dl;
232 cp->elements[new_idx].cpu = cpu;
233 cp->elements[cpu].idx = new_idx;
234 cpudl_heapify_up(cp, new_idx);
235 __cpumask_clear_cpu(cpu, cp->free_cpus);
236 } else {
237 cp->elements[old_idx].dl = dl;
238 cpudl_heapify(cp, old_idx);
239 }
240
241 raw_spin_unlock_irqrestore(&cp->lock, flags);
242 }
243
244 /*
245 * cpudl_init - initialize the cpudl structure
246 * @cp: the cpudl max-heap context
247 */
cpudl_init(struct cpudl * cp)248 int cpudl_init(struct cpudl *cp)
249 {
250 int i;
251
252 raw_spin_lock_init(&cp->lock);
253 cp->size = 0;
254
255 cp->elements = kcalloc(nr_cpu_ids,
256 sizeof(struct cpudl_item),
257 GFP_KERNEL);
258 if (!cp->elements)
259 return -ENOMEM;
260
261 if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
262 kfree(cp->elements);
263 return -ENOMEM;
264 }
265
266 for_each_possible_cpu(i)
267 cp->elements[i].idx = IDX_INVALID;
268
269 return 0;
270 }
271
272 /*
273 * cpudl_cleanup - clean up the cpudl structure
274 * @cp: the cpudl max-heap context
275 */
cpudl_cleanup(struct cpudl * cp)276 void cpudl_cleanup(struct cpudl *cp)
277 {
278 free_cpumask_var(cp->free_cpus);
279 kfree(cp->elements);
280 }
281