1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2018 ARM Limited 4 */ 5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H 6 #define __ASM_VDSO_GETTIMEOFDAY_H 7 8 #ifndef __ASSEMBLY__ 9 10 #include <asm/unistd.h> 11 #include <uapi/linux/time.h> 12 13 #include <asm/vdso/compat_barrier.h> 14 15 #define __VDSO_USE_SYSCALL ULLONG_MAX 16 17 #define VDSO_HAS_CLOCK_GETRES 1 18 19 #define VDSO_HAS_32BIT_FALLBACK 1 20 21 static __always_inline 22 int gettimeofday_fallback(struct __kernel_old_timeval *_tv, 23 struct timezone *_tz) 24 { 25 register struct timezone *tz asm("r1") = _tz; 26 register struct __kernel_old_timeval *tv asm("r0") = _tv; 27 register long ret asm ("r0"); 28 register long nr asm("r7") = __NR_compat_gettimeofday; 29 30 asm volatile( 31 " swi #0\n" 32 : "=r" (ret) 33 : "r" (tv), "r" (tz), "r" (nr) 34 : "memory"); 35 36 return ret; 37 } 38 39 static __always_inline 40 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 41 { 42 register struct __kernel_timespec *ts asm("r1") = _ts; 43 register clockid_t clkid asm("r0") = _clkid; 44 register long ret asm ("r0"); 45 register long nr asm("r7") = __NR_compat_clock_gettime64; 46 47 asm volatile( 48 " swi #0\n" 49 : "=r" (ret) 50 : "r" (clkid), "r" (ts), "r" (nr) 51 : "memory"); 52 53 return ret; 54 } 55 56 static __always_inline 57 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 58 { 59 register struct old_timespec32 *ts asm("r1") = _ts; 60 register clockid_t clkid asm("r0") = _clkid; 61 register long ret asm ("r0"); 62 register long nr asm("r7") = __NR_compat_clock_gettime; 63 64 asm volatile( 65 " swi #0\n" 66 : "=r" (ret) 67 : "r" (clkid), "r" (ts), "r" (nr) 68 : "memory"); 69 70 return ret; 71 } 72 73 static __always_inline 74 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 75 { 76 register struct __kernel_timespec *ts asm("r1") = _ts; 77 register clockid_t clkid asm("r0") = _clkid; 78 register long ret asm ("r0"); 79 register long nr asm("r7") = __NR_compat_clock_getres_time64; 80 81 /* The checks below are required for ABI consistency with arm */ 82 if ((_clkid >= MAX_CLOCKS) && (_ts == NULL)) 83 return -EINVAL; 84 85 asm volatile( 86 " swi #0\n" 87 : "=r" (ret) 88 : "r" (clkid), "r" (ts), "r" (nr) 89 : "memory"); 90 91 return ret; 92 } 93 94 static __always_inline 95 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 96 { 97 register struct old_timespec32 *ts asm("r1") = _ts; 98 register clockid_t clkid asm("r0") = _clkid; 99 register long ret asm ("r0"); 100 register long nr asm("r7") = __NR_compat_clock_getres; 101 102 /* The checks below are required for ABI consistency with arm */ 103 if ((_clkid >= MAX_CLOCKS) && (_ts == NULL)) 104 return -EINVAL; 105 106 asm volatile( 107 " swi #0\n" 108 : "=r" (ret) 109 : "r" (clkid), "r" (ts), "r" (nr) 110 : "memory"); 111 112 return ret; 113 } 114 115 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) 116 { 117 u64 res; 118 119 /* 120 * clock_mode == 0 implies that vDSO are enabled otherwise 121 * fallback on syscall. 122 */ 123 if (clock_mode) 124 return __VDSO_USE_SYSCALL; 125 126 /* 127 * This isb() is required to prevent that the counter value 128 * is speculated. 129 */ 130 isb(); 131 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res)); 132 /* 133 * This isb() is required to prevent that the seq lock is 134 * speculated. 135 */ 136 isb(); 137 138 return res; 139 } 140 141 static __always_inline const struct vdso_data *__arch_get_vdso_data(void) 142 { 143 const struct vdso_data *ret; 144 145 /* 146 * This simply puts &_vdso_data into ret. The reason why we don't use 147 * `ret = _vdso_data` is that the compiler tends to optimise this in a 148 * very suboptimal way: instead of keeping &_vdso_data in a register, 149 * it goes through a relocation almost every time _vdso_data must be 150 * accessed (even in subfunctions). This is both time and space 151 * consuming: each relocation uses a word in the code section, and it 152 * has to be loaded at runtime. 153 * 154 * This trick hides the assignment from the compiler. Since it cannot 155 * track where the pointer comes from, it will only use one relocation 156 * where __arch_get_vdso_data() is called, and then keep the result in 157 * a register. 158 */ 159 asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data)); 160 161 return ret; 162 } 163 164 #endif /* !__ASSEMBLY__ */ 165 166 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ 167