xref: /linux/arch/s390/include/asm/vtime.h (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_VTIME_H
3 #define _S390_VTIME_H
4 
5 #include <asm/lowcore.h>
6 #include <asm/cpu_mf.h>
7 #include <asm/idle.h>
8 
9 DECLARE_PER_CPU(u64, mt_cycles[8]);
10 
11 static inline void update_timer_sys(void)
12 {
13 	struct lowcore *lc = get_lowcore();
14 
15 	lc->system_timer += lc->last_update_timer - lc->exit_timer;
16 	lc->user_timer += lc->exit_timer - lc->sys_enter_timer;
17 	lc->last_update_timer = lc->sys_enter_timer;
18 }
19 
20 static inline void update_timer_mcck(void)
21 {
22 	struct lowcore *lc = get_lowcore();
23 
24 	lc->system_timer += lc->last_update_timer - lc->exit_timer;
25 	lc->user_timer += lc->exit_timer - lc->mcck_enter_timer;
26 	lc->last_update_timer = lc->mcck_enter_timer;
27 }
28 
29 static inline void update_timer_idle(void)
30 {
31 	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
32 	struct lowcore *lc = get_lowcore();
33 	u64 cycles_new[8];
34 	int i, mtid;
35 
36 	mtid = smp_cpu_mtid;
37 	if (mtid) {
38 		stcctm(MT_DIAG, mtid, cycles_new);
39 		for (i = 0; i < mtid; i++)
40 			__this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
41 	}
42 	/*
43 	 * This is a bit subtle: Forward last_update_clock so it excludes idle
44 	 * time. For correct steal time calculation in do_account_vtime() add
45 	 * passed wall time before idle_enter to steal_timer:
46 	 * During the passed wall time before idle_enter CPU time may have
47 	 * been accounted to system, hardirq, softirq, etc. lowcore fields.
48 	 * The accounted CPU times will be subtracted again from steal_timer
49 	 * when accumulated steal time is calculated in do_account_vtime().
50 	 */
51 	lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
52 	lc->last_update_clock = lc->int_clock;
53 	lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
54 	lc->last_update_timer = lc->sys_enter_timer;
55 }
56 
57 #endif /* _S390_VTIME_H */
58