xref: /linux/arch/s390/include/asm/timex.h (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *  include/asm-s390/timex.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *
7  *  Derived from "include/asm-i386/timex.h"
8  *    Copyright (C) 1992, Linus Torvalds
9  */
10 
11 #ifndef _ASM_S390_TIMEX_H
12 #define _ASM_S390_TIMEX_H
13 
14 #include <asm/lowcore.h>
15 
16 /* The value of the TOD clock for 1.1.1970. */
17 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
18 
19 /* Inline functions for clock register access. */
20 static inline int set_clock(__u64 time)
21 {
22 	int cc;
23 
24 	asm volatile(
25 		"   sck   %1\n"
26 		"   ipm   %0\n"
27 		"   srl   %0,28\n"
28 		: "=d" (cc) : "Q" (time) : "cc");
29 	return cc;
30 }
31 
32 static inline int store_clock(__u64 *time)
33 {
34 	int cc;
35 
36 	asm volatile(
37 		"   stck  %1\n"
38 		"   ipm   %0\n"
39 		"   srl   %0,28\n"
40 		: "=d" (cc), "=Q" (*time) : : "cc");
41 	return cc;
42 }
43 
44 static inline void set_clock_comparator(__u64 time)
45 {
46 	asm volatile("sckc %0" : : "Q" (time));
47 }
48 
49 static inline void store_clock_comparator(__u64 *time)
50 {
51 	asm volatile("stckc %0" : "=Q" (*time));
52 }
53 
54 void clock_comparator_work(void);
55 
56 static inline unsigned long long local_tick_disable(void)
57 {
58 	unsigned long long old;
59 
60 	old = S390_lowcore.clock_comparator;
61 	S390_lowcore.clock_comparator = -1ULL;
62 	set_clock_comparator(S390_lowcore.clock_comparator);
63 	return old;
64 }
65 
66 static inline void local_tick_enable(unsigned long long comp)
67 {
68 	S390_lowcore.clock_comparator = comp;
69 	set_clock_comparator(S390_lowcore.clock_comparator);
70 }
71 
72 #define CLOCK_TICK_RATE	1193180 /* Underlying HZ */
73 
74 typedef unsigned long long cycles_t;
75 
76 static inline unsigned long long get_clock (void)
77 {
78 	unsigned long long clk;
79 
80 	asm volatile("stck %0" : "=Q" (clk) : : "cc");
81 	return clk;
82 }
83 
84 static inline void get_clock_ext(char *clk)
85 {
86 	asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
87 }
88 
89 static inline unsigned long long get_clock_fast(void)
90 {
91 	unsigned long long clk;
92 
93 	if (test_facility(25))
94 		asm volatile(".insn	s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
95 	else
96 		clk = get_clock();
97 	return clk;
98 }
99 
100 static inline unsigned long long get_clock_xt(void)
101 {
102 	unsigned char clk[16];
103 	get_clock_ext(clk);
104 	return *((unsigned long long *)&clk[1]);
105 }
106 
107 static inline cycles_t get_cycles(void)
108 {
109 	return (cycles_t) get_clock() >> 2;
110 }
111 
112 int get_sync_clock(unsigned long long *clock);
113 void init_cpu_timer(void);
114 unsigned long long monotonic_clock(void);
115 
116 void tod_to_timeval(__u64, struct timespec *);
117 
118 static inline
119 void stck_to_timespec(unsigned long long stck, struct timespec *ts)
120 {
121 	tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
122 }
123 
124 extern u64 sched_clock_base_cc;
125 
126 /**
127  * get_clock_monotonic - returns current time in clock rate units
128  *
129  * The caller must ensure that preemption is disabled.
130  * The clock and sched_clock_base get changed via stop_machine.
131  * Therefore preemption must be disabled when calling this
132  * function, otherwise the returned value is not guaranteed to
133  * be monotonic.
134  */
135 static inline unsigned long long get_clock_monotonic(void)
136 {
137 	return get_clock_xt() - sched_clock_base_cc;
138 }
139 
140 #endif
141