xref: /linux/arch/arm64/include/asm/vdso/compat_gettimeofday.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_COMPAT_GETTIMEOFDAY_H
6 #define __ASM_VDSO_COMPAT_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLER__
9 
10 #include <vdso/clocksource.h>
11 #include <vdso/time32.h>
12 
13 #include <asm/barrier.h>
14 #include <asm/unistd_compat_32.h>
15 #include <asm/errno.h>
16 
17 #include <asm/vdso/compat_barrier.h>
18 
19 #define VDSO_HAS_CLOCK_GETRES		1
20 
21 #define BUILD_VDSO32			1
22 
23 static __always_inline
24 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
25 			  struct timezone *_tz)
26 {
27 	register struct timezone *tz asm("r1") = _tz;
28 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
29 	register long ret asm ("r0");
30 	register long nr asm("r7") = __NR_compat32_gettimeofday;
31 
32 	asm volatile(
33 	"	swi #0\n"
34 	: "=r" (ret)
35 	: "r" (tv), "r" (tz), "r" (nr)
36 	: "memory");
37 
38 	return ret;
39 }
40 
41 static __always_inline
42 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
43 {
44 	register struct __kernel_timespec *ts asm("r1") = _ts;
45 	register clockid_t clkid asm("r0") = _clkid;
46 	register long ret asm ("r0");
47 	register long nr asm("r7") = __NR_compat32_clock_gettime64;
48 
49 	asm volatile(
50 	"	swi #0\n"
51 	: "=r" (ret)
52 	: "r" (clkid), "r" (ts), "r" (nr)
53 	: "memory");
54 
55 	return ret;
56 }
57 
58 static __always_inline
59 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
60 {
61 	register struct old_timespec32 *ts asm("r1") = _ts;
62 	register clockid_t clkid asm("r0") = _clkid;
63 	register long ret asm ("r0");
64 	register long nr asm("r7") = __NR_compat32_clock_gettime;
65 
66 	asm volatile(
67 	"	swi #0\n"
68 	: "=r" (ret)
69 	: "r" (clkid), "r" (ts), "r" (nr)
70 	: "memory");
71 
72 	return ret;
73 }
74 
75 static __always_inline
76 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
77 {
78 	register struct __kernel_timespec *ts asm("r1") = _ts;
79 	register clockid_t clkid asm("r0") = _clkid;
80 	register long ret asm ("r0");
81 	register long nr asm("r7") = __NR_compat32_clock_getres_time64;
82 
83 	asm volatile(
84 	"       swi #0\n"
85 	: "=r" (ret)
86 	: "r" (clkid), "r" (ts), "r" (nr)
87 	: "memory");
88 
89 	return ret;
90 }
91 
92 static __always_inline
93 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
94 {
95 	register struct old_timespec32 *ts asm("r1") = _ts;
96 	register clockid_t clkid asm("r0") = _clkid;
97 	register long ret asm ("r0");
98 	register long nr asm("r7") = __NR_compat32_clock_getres;
99 
100 	asm volatile(
101 	"       swi #0\n"
102 	: "=r" (ret)
103 	: "r" (clkid), "r" (ts), "r" (nr)
104 	: "memory");
105 
106 	return ret;
107 }
108 
109 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
110 						 const struct vdso_time_data *vd)
111 {
112 	u64 res;
113 
114 	/*
115 	 * Core checks for mode already, so this raced against a concurrent
116 	 * update. Return something. Core will do another round and then
117 	 * see the mode change and fallback to the syscall.
118 	 */
119 	if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
120 		return 0;
121 
122 	/*
123 	 * This isb() is required to prevent that the counter value
124 	 * is speculated.
125 	 */
126 	isb();
127 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
128 	/*
129 	 * This isb() is required to prevent that the seq lock is
130 	 * speculated.
131 	 */
132 	isb();
133 
134 	return res;
135 }
136 
137 static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
138 {
139 	const struct vdso_time_data *ret;
140 
141 	/*
142 	 * This simply puts &_vdso_time_data into ret. The reason why we don't use
143 	 * `ret = _vdso_time_data` is that the compiler tends to optimise this in a
144 	 * very suboptimal way: instead of keeping &_vdso_time_data in a register,
145 	 * it goes through a relocation almost every time _vdso_time_data must be
146 	 * accessed (even in subfunctions). This is both time and space
147 	 * consuming: each relocation uses a word in the code section, and it
148 	 * has to be loaded at runtime.
149 	 *
150 	 * This trick hides the assignment from the compiler. Since it cannot
151 	 * track where the pointer comes from, it will only use one relocation
152 	 * where __aarch64_get_vdso_u_time_data() is called, and then keep the
153 	 * result in a register.
154 	 */
155 	asm volatile("mov %0, %1" : "=r"(ret) : "r"(&vdso_u_time_data));
156 
157 	return ret;
158 }
159 #define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
160 
161 static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
162 {
163 	return vc->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
164 }
165 #define vdso_clocksource_ok	vdso_clocksource_ok
166 
167 #endif /* !__ASSEMBLER__ */
168 
169 #endif /* __ASM_VDSO_COMPAT_GETTIMEOFDAY_H */
170