1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
3 #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
4
5 #ifndef __ASSEMBLY__
6
7 #include <asm/vdso/timebase.h>
8 #include <asm/barrier.h>
9 #include <asm/unistd.h>
10 #include <uapi/linux/time.h>
11
12 #define VDSO_HAS_CLOCK_GETRES 1
13
14 #define VDSO_HAS_TIME 1
15
16 /*
17 * powerpc specific delta calculation.
18 *
19 * This variant removes the masking of the subtraction because the
20 * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
21 * which would result in a pointless operation. The compiler cannot
22 * optimize it away as the mask comes from the vdso data and is not compile
23 * time constant.
24 */
25 #define VDSO_DELTA_NOMASK 1
26
do_syscall_2(const unsigned long _r0,const unsigned long _r3,const unsigned long _r4)27 static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
28 const unsigned long _r4)
29 {
30 register long r0 asm("r0") = _r0;
31 register unsigned long r3 asm("r3") = _r3;
32 register unsigned long r4 asm("r4") = _r4;
33 register int ret asm ("r3");
34
35 asm volatile(
36 " sc\n"
37 " bns+ 1f\n"
38 " neg %0, %0\n"
39 "1:\n"
40 : "=r" (ret), "+r" (r4), "+r" (r0)
41 : "r" (r3)
42 : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
43
44 return ret;
45 }
46
47 static __always_inline
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)48 int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz)
49 {
50 return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
51 }
52
53 #ifdef __powerpc64__
54
55 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)56 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
57 {
58 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
59 }
60
61 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)62 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
63 {
64 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
65 }
66
67 #else
68
69 #define BUILD_VDSO32 1
70
71 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)72 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
73 {
74 return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
75 }
76
77 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)78 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
79 {
80 return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
81 }
82
83 static __always_inline
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)84 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
85 {
86 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
87 }
88
89 static __always_inline
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)90 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
91 {
92 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
93 }
94 #endif
95
__arch_get_hw_counter(s32 clock_mode,const struct vdso_data * vd)96 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
97 const struct vdso_data *vd)
98 {
99 return get_tb();
100 }
101
102 const struct vdso_data *__arch_get_vdso_data(void);
103
104 #ifdef CONFIG_TIME_NS
105 static __always_inline
__arch_get_timens_vdso_data(const struct vdso_data * vd)106 const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
107 {
108 return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
109 }
110 #endif
111
vdso_clocksource_ok(const struct vdso_data * vd)112 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
113 {
114 return true;
115 }
116 #define vdso_clocksource_ok vdso_clocksource_ok
117
118 #ifndef __powerpc64__
vdso_shift_ns(u64 ns,unsigned long shift)119 static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
120 {
121 u32 hi = ns >> 32;
122 u32 lo = ns;
123
124 lo >>= shift;
125 lo |= hi << (32 - shift);
126 hi >>= shift;
127
128 if (likely(hi == 0))
129 return lo;
130
131 return ((u64)hi << 32) | lo;
132 }
133 #define vdso_shift_ns vdso_shift_ns
134 #endif
135
136 #ifdef __powerpc64__
137 int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
138 const struct vdso_data *vd);
139 int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
140 const struct vdso_data *vd);
141 #else
142 int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
143 const struct vdso_data *vd);
144 int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
145 const struct vdso_data *vd);
146 int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
147 const struct vdso_data *vd);
148 #endif
149 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
150 const struct vdso_data *vd);
151 __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
152 const struct vdso_data *vd);
153 #endif /* __ASSEMBLY__ */
154
155 #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
156