xref: /linux/arch/s390/kernel/vtime.c (revision 201795a1b72570374e6d2c72d5c1c23c9cfc3929)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Virtual cpu timer based timer functions.
4  *
5  *    Copyright IBM Corp. 2004, 2012
6  *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
7  */
8 
9 #include <linux/kernel_stat.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/timex.h>
13 #include <linux/types.h>
14 #include <linux/time.h>
15 #include <asm/alternative.h>
16 #include <asm/cputime.h>
17 #include <asm/vtimer.h>
18 #include <asm/vtime.h>
19 #include <asm/cpu_mf.h>
20 #include <asm/smp.h>
21 
22 #include "entry.h"
23 
24 static void virt_timer_expire(void);
25 
26 static LIST_HEAD(virt_timer_list);
27 static DEFINE_SPINLOCK(virt_timer_lock);
28 static atomic64_t virt_timer_current;
29 static atomic64_t virt_timer_elapsed;
30 
31 DEFINE_PER_CPU(u64, mt_cycles[8]);
32 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
33 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
34 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
35 
set_vtimer(u64 expires)36 static inline void set_vtimer(u64 expires)
37 {
38 	struct lowcore *lc = get_lowcore();
39 	u64 timer;
40 
41 	asm volatile(
42 		"	stpt	%0\n"	/* Store current cpu timer value */
43 		"	spt	%1"	/* Set new value imm. afterwards */
44 		: "=Q" (timer) : "Q" (expires));
45 	lc->system_timer += lc->last_update_timer - timer;
46 	lc->last_update_timer = expires;
47 }
48 
virt_timer_forward(u64 elapsed)49 static inline int virt_timer_forward(u64 elapsed)
50 {
51 	lockdep_assert_irqs_disabled();
52 	if (list_empty(&virt_timer_list))
53 		return 0;
54 	elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
55 	return elapsed >= atomic64_read(&virt_timer_current);
56 }
57 
update_mt_scaling(void)58 static void update_mt_scaling(void)
59 {
60 	u64 cycles_new[8], *cycles_old;
61 	u64 delta, fac, mult, div;
62 	int i;
63 
64 	stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
65 	cycles_old = this_cpu_ptr(mt_cycles);
66 	fac = 1;
67 	mult = div = 0;
68 	for (i = 0; i <= smp_cpu_mtid; i++) {
69 		delta = cycles_new[i] - cycles_old[i];
70 		div += delta;
71 		mult *= i + 1;
72 		mult += delta * fac;
73 		fac *= i + 1;
74 	}
75 	div *= fac;
76 	if (div > 0) {
77 		/* Update scaling factor */
78 		__this_cpu_write(mt_scaling_mult, mult);
79 		__this_cpu_write(mt_scaling_div, div);
80 		memcpy(cycles_old, cycles_new,
81 		       sizeof(u64) * (smp_cpu_mtid + 1));
82 	}
83 	__this_cpu_write(mt_scaling_jiffies, jiffies_64);
84 }
85 
update_tsk_timer(unsigned long * tsk_vtime,u64 new)86 static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
87 {
88 	u64 delta;
89 
90 	delta = new - *tsk_vtime;
91 	*tsk_vtime = new;
92 	return delta;
93 }
94 
95 
scale_vtime(u64 vtime)96 static inline u64 scale_vtime(u64 vtime)
97 {
98 	u64 mult = __this_cpu_read(mt_scaling_mult);
99 	u64 div = __this_cpu_read(mt_scaling_div);
100 
101 	if (smp_cpu_mtid)
102 		return vtime * mult / div;
103 	return vtime;
104 }
105 
account_system_index_scaled(struct task_struct * p,u64 cputime,enum cpu_usage_stat index)106 static void account_system_index_scaled(struct task_struct *p, u64 cputime,
107 					enum cpu_usage_stat index)
108 {
109 	p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
110 	account_system_index_time(p, cputime_to_nsecs(cputime), index);
111 }
112 
113 /*
114  * Update process times based on virtual cpu times stored by entry.S
115  * to the lowcore fields user_timer, system_timer & steal_clock.
116  */
do_account_vtime(struct task_struct * tsk)117 static int do_account_vtime(struct task_struct *tsk)
118 {
119 	u64 timer, clock, user, guest, system, hardirq, softirq;
120 	struct lowcore *lc = get_lowcore();
121 
122 	timer = lc->last_update_timer;
123 	clock = lc->last_update_clock;
124 	asm volatile(
125 		"	stpt	%0\n"	/* Store current cpu timer value */
126 		"	stckf	%1"	/* Store current tod clock value */
127 		: "=Q" (lc->last_update_timer),
128 		  "=Q" (lc->last_update_clock)
129 		: : "cc");
130 	clock = lc->last_update_clock - clock;
131 	timer -= lc->last_update_timer;
132 
133 	if (hardirq_count())
134 		lc->hardirq_timer += timer;
135 	else
136 		lc->system_timer += timer;
137 
138 	/* Update MT utilization calculation */
139 	if (smp_cpu_mtid && time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies)))
140 		update_mt_scaling();
141 
142 	/* Calculate cputime delta */
143 	user = update_tsk_timer(&tsk->thread.user_timer, lc->user_timer);
144 	guest = update_tsk_timer(&tsk->thread.guest_timer, lc->guest_timer);
145 	system = update_tsk_timer(&tsk->thread.system_timer, lc->system_timer);
146 	hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, lc->hardirq_timer);
147 	softirq = update_tsk_timer(&tsk->thread.softirq_timer, lc->softirq_timer);
148 	lc->steal_timer += clock - user - guest - system - hardirq - softirq;
149 
150 	/* Push account value */
151 	if (user) {
152 		account_user_time(tsk, cputime_to_nsecs(user));
153 		tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
154 	}
155 
156 	if (guest) {
157 		account_guest_time(tsk, cputime_to_nsecs(guest));
158 		tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
159 	}
160 
161 	if (system)
162 		account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
163 	if (hardirq)
164 		account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
165 	if (softirq)
166 		account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
167 
168 	return virt_timer_forward(user + guest + system + hardirq + softirq);
169 }
170 
vtime_task_switch(struct task_struct * prev)171 void vtime_task_switch(struct task_struct *prev)
172 {
173 	struct lowcore *lc = get_lowcore();
174 
175 	do_account_vtime(prev);
176 	prev->thread.user_timer = lc->user_timer;
177 	prev->thread.guest_timer = lc->guest_timer;
178 	prev->thread.system_timer = lc->system_timer;
179 	prev->thread.hardirq_timer = lc->hardirq_timer;
180 	prev->thread.softirq_timer = lc->softirq_timer;
181 	lc->user_timer = current->thread.user_timer;
182 	lc->guest_timer = current->thread.guest_timer;
183 	lc->system_timer = current->thread.system_timer;
184 	lc->hardirq_timer = current->thread.hardirq_timer;
185 	lc->softirq_timer = current->thread.softirq_timer;
186 }
187 
188 /*
189  * In s390, accounting pending user time also implies
190  * accounting system time in order to correctly compute
191  * the stolen time accounting.
192  */
vtime_flush(struct task_struct * tsk)193 void vtime_flush(struct task_struct *tsk)
194 {
195 	struct lowcore *lc = get_lowcore();
196 	u64 steal, avg_steal;
197 
198 	if (do_account_vtime(tsk))
199 		virt_timer_expire();
200 
201 	steal = lc->steal_timer;
202 	avg_steal = lc->avg_steal_timer;
203 	if ((s64) steal > 0) {
204 		lc->steal_timer = 0;
205 		account_steal_time(cputime_to_nsecs(steal));
206 		avg_steal += steal;
207 	}
208 	lc->avg_steal_timer = avg_steal / 2;
209 }
210 
vtime_delta(void)211 static u64 vtime_delta(void)
212 {
213 	struct lowcore *lc = get_lowcore();
214 	u64 timer = lc->last_update_timer;
215 
216 	lc->last_update_timer = get_cpu_timer();
217 	return timer - lc->last_update_timer;
218 }
219 
vtime_account_kernel(struct task_struct * tsk)220 void vtime_account_kernel(struct task_struct *tsk)
221 {
222 	struct lowcore *lc = get_lowcore();
223 	u64 delta = vtime_delta();
224 
225 	if (tsk->flags & PF_VCPU)
226 		lc->guest_timer += delta;
227 	else
228 		lc->system_timer += delta;
229 }
230 EXPORT_SYMBOL_GPL(vtime_account_kernel);
231 
vtime_account_softirq(struct task_struct * tsk)232 void vtime_account_softirq(struct task_struct *tsk)
233 {
234 	get_lowcore()->softirq_timer += vtime_delta();
235 }
236 
vtime_account_hardirq(struct task_struct * tsk)237 void vtime_account_hardirq(struct task_struct *tsk)
238 {
239 	get_lowcore()->hardirq_timer += vtime_delta();
240 }
241 
242 /*
243  * Sorted add to a list. List is linear searched until first bigger
244  * element is found.
245  */
list_add_sorted(struct vtimer_list * timer,struct list_head * head)246 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
247 {
248 	struct vtimer_list *tmp;
249 
250 	list_for_each_entry(tmp, head, entry) {
251 		if (tmp->expires > timer->expires) {
252 			list_add_tail(&timer->entry, &tmp->entry);
253 			return;
254 		}
255 	}
256 	list_add_tail(&timer->entry, head);
257 }
258 
259 /*
260  * Handler for expired virtual CPU timer.
261  */
virt_timer_expire(void)262 static void virt_timer_expire(void)
263 {
264 	struct vtimer_list *timer, *tmp;
265 	unsigned long elapsed;
266 	LIST_HEAD(cb_list);
267 
268 	/* walk timer list, fire all expired timers */
269 	spin_lock(&virt_timer_lock);
270 	elapsed = atomic64_read(&virt_timer_elapsed);
271 	list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
272 		if (timer->expires < elapsed)
273 			/* move expired timer to the callback queue */
274 			list_move_tail(&timer->entry, &cb_list);
275 		else
276 			timer->expires -= elapsed;
277 	}
278 	if (!list_empty(&virt_timer_list)) {
279 		timer = list_first_entry(&virt_timer_list,
280 					 struct vtimer_list, entry);
281 		atomic64_set(&virt_timer_current, timer->expires);
282 	}
283 	atomic64_sub(elapsed, &virt_timer_elapsed);
284 	spin_unlock(&virt_timer_lock);
285 
286 	/* Do callbacks and recharge periodic timers */
287 	list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
288 		list_del_init(&timer->entry);
289 		timer->function(timer->data);
290 		if (timer->interval) {
291 			/* Recharge interval timer */
292 			timer->expires = timer->interval +
293 				atomic64_read(&virt_timer_elapsed);
294 			spin_lock(&virt_timer_lock);
295 			list_add_sorted(timer, &virt_timer_list);
296 			spin_unlock(&virt_timer_lock);
297 		}
298 	}
299 }
300 
init_virt_timer(struct vtimer_list * timer)301 void init_virt_timer(struct vtimer_list *timer)
302 {
303 	timer->function = NULL;
304 	INIT_LIST_HEAD(&timer->entry);
305 }
306 EXPORT_SYMBOL(init_virt_timer);
307 
vtimer_pending(struct vtimer_list * timer)308 static inline int vtimer_pending(struct vtimer_list *timer)
309 {
310 	return !list_empty(&timer->entry);
311 }
312 
internal_add_vtimer(struct vtimer_list * timer)313 static void internal_add_vtimer(struct vtimer_list *timer)
314 {
315 	if (list_empty(&virt_timer_list)) {
316 		/* First timer, just program it. */
317 		atomic64_set(&virt_timer_current, timer->expires);
318 		atomic64_set(&virt_timer_elapsed, 0);
319 		list_add(&timer->entry, &virt_timer_list);
320 	} else {
321 		/* Update timer against current base. */
322 		timer->expires += atomic64_read(&virt_timer_elapsed);
323 		if (likely((s64) timer->expires <
324 			   (s64) atomic64_read(&virt_timer_current)))
325 			/* The new timer expires before the current timer. */
326 			atomic64_set(&virt_timer_current, timer->expires);
327 		/* Insert new timer into the list. */
328 		list_add_sorted(timer, &virt_timer_list);
329 	}
330 }
331 
__add_vtimer(struct vtimer_list * timer,int periodic)332 static void __add_vtimer(struct vtimer_list *timer, int periodic)
333 {
334 	unsigned long flags;
335 
336 	timer->interval = periodic ? timer->expires : 0;
337 	spin_lock_irqsave(&virt_timer_lock, flags);
338 	internal_add_vtimer(timer);
339 	spin_unlock_irqrestore(&virt_timer_lock, flags);
340 }
341 
342 /*
343  * add_virt_timer - add a oneshot virtual CPU timer
344  */
add_virt_timer(struct vtimer_list * timer)345 void add_virt_timer(struct vtimer_list *timer)
346 {
347 	__add_vtimer(timer, 0);
348 }
349 EXPORT_SYMBOL(add_virt_timer);
350 
351 /*
352  * add_virt_timer_int - add an interval virtual CPU timer
353  */
add_virt_timer_periodic(struct vtimer_list * timer)354 void add_virt_timer_periodic(struct vtimer_list *timer)
355 {
356 	__add_vtimer(timer, 1);
357 }
358 EXPORT_SYMBOL(add_virt_timer_periodic);
359 
__mod_vtimer(struct vtimer_list * timer,u64 expires,int periodic)360 static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
361 {
362 	unsigned long flags;
363 	int rc;
364 
365 	BUG_ON(!timer->function);
366 
367 	if (timer->expires == expires && vtimer_pending(timer))
368 		return 1;
369 	spin_lock_irqsave(&virt_timer_lock, flags);
370 	rc = vtimer_pending(timer);
371 	if (rc)
372 		list_del_init(&timer->entry);
373 	timer->interval = periodic ? expires : 0;
374 	timer->expires = expires;
375 	internal_add_vtimer(timer);
376 	spin_unlock_irqrestore(&virt_timer_lock, flags);
377 	return rc;
378 }
379 
380 /*
381  * returns whether it has modified a pending timer (1) or not (0)
382  */
mod_virt_timer(struct vtimer_list * timer,u64 expires)383 int mod_virt_timer(struct vtimer_list *timer, u64 expires)
384 {
385 	return __mod_vtimer(timer, expires, 0);
386 }
387 EXPORT_SYMBOL(mod_virt_timer);
388 
389 /*
390  * returns whether it has modified a pending timer (1) or not (0)
391  */
mod_virt_timer_periodic(struct vtimer_list * timer,u64 expires)392 int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
393 {
394 	return __mod_vtimer(timer, expires, 1);
395 }
396 EXPORT_SYMBOL(mod_virt_timer_periodic);
397 
398 /*
399  * Delete a virtual timer.
400  *
401  * returns whether the deleted timer was pending (1) or not (0)
402  */
del_virt_timer(struct vtimer_list * timer)403 int del_virt_timer(struct vtimer_list *timer)
404 {
405 	unsigned long flags;
406 
407 	if (!vtimer_pending(timer))
408 		return 0;
409 	spin_lock_irqsave(&virt_timer_lock, flags);
410 	list_del_init(&timer->entry);
411 	spin_unlock_irqrestore(&virt_timer_lock, flags);
412 	return 1;
413 }
414 EXPORT_SYMBOL(del_virt_timer);
415 
416 /*
417  * Start the virtual CPU timer on the current CPU.
418  */
vtime_init(void)419 void vtime_init(void)
420 {
421 	/* set initial cpu timer */
422 	set_vtimer(VTIMER_MAX_SLICE);
423 	/* Setup initial MT scaling values */
424 	if (smp_cpu_mtid) {
425 		__this_cpu_write(mt_scaling_jiffies, jiffies);
426 		__this_cpu_write(mt_scaling_mult, 1);
427 		__this_cpu_write(mt_scaling_div, 1);
428 		stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
429 	}
430 }
431