1 /* 2 * Copyright 2006 Andi Kleen, SUSE Labs. 3 * Subject to the GNU Public License, v.2 4 * 5 * Fast user context implementation of clock_gettime, gettimeofday, and time. 6 * 7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net> 8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany 9 * 10 * The code should have no internal unresolved relocations. 11 * Check with readelf after changing. 12 */ 13 14 #include <uapi/linux/time.h> 15 #include <asm/vgtod.h> 16 #include <asm/vvar.h> 17 #include <asm/unistd.h> 18 #include <asm/msr.h> 19 #include <asm/pvclock.h> 20 #include <asm/mshyperv.h> 21 #include <linux/math64.h> 22 #include <linux/time.h> 23 #include <linux/kernel.h> 24 25 #define gtod (&VVAR(vsyscall_gtod_data)) 26 27 extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts); 28 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); 29 extern time_t __vdso_time(time_t *t); 30 31 #ifdef CONFIG_PARAVIRT_CLOCK 32 extern u8 pvclock_page 33 __attribute__((visibility("hidden"))); 34 #endif 35 36 #ifdef CONFIG_HYPERV_TSCPAGE 37 extern u8 hvclock_page 38 __attribute__((visibility("hidden"))); 39 #endif 40 41 #ifndef BUILD_VDSO32 42 43 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 44 { 45 long ret; 46 asm ("syscall" : "=a" (ret), "=m" (*ts) : 47 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : 48 "memory", "rcx", "r11"); 49 return ret; 50 } 51 52 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) 53 { 54 long ret; 55 56 asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) : 57 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : 58 "memory", "rcx", "r11"); 59 return ret; 60 } 61 62 63 #else 64 65 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 66 { 67 long ret; 68 69 asm ( 70 "mov %%ebx, %%edx \n" 71 "mov %[clock], %%ebx \n" 72 "call __kernel_vsyscall \n" 73 "mov %%edx, %%ebx \n" 74 : "=a" (ret), "=m" (*ts) 75 : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts) 76 : "memory", "edx"); 77 return ret; 78 } 79 80 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) 81 { 82 long ret; 83 84 asm ( 85 "mov %%ebx, %%edx \n" 86 "mov %[tv], %%ebx \n" 87 "call __kernel_vsyscall \n" 88 "mov %%edx, %%ebx \n" 89 : "=a" (ret), "=m" (*tv), "=m" (*tz) 90 : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz) 91 : "memory", "edx"); 92 return ret; 93 } 94 95 #endif 96 97 #ifdef CONFIG_PARAVIRT_CLOCK 98 static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void) 99 { 100 return (const struct pvclock_vsyscall_time_info *)&pvclock_page; 101 } 102 103 static notrace u64 vread_pvclock(int *mode) 104 { 105 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; 106 u64 ret; 107 u64 last; 108 u32 version; 109 110 /* 111 * Note: The kernel and hypervisor must guarantee that cpu ID 112 * number maps 1:1 to per-CPU pvclock time info. 113 * 114 * Because the hypervisor is entirely unaware of guest userspace 115 * preemption, it cannot guarantee that per-CPU pvclock time 116 * info is updated if the underlying CPU changes or that that 117 * version is increased whenever underlying CPU changes. 118 * 119 * On KVM, we are guaranteed that pvti updates for any vCPU are 120 * atomic as seen by *all* vCPUs. This is an even stronger 121 * guarantee than we get with a normal seqlock. 122 * 123 * On Xen, we don't appear to have that guarantee, but Xen still 124 * supplies a valid seqlock using the version field. 125 * 126 * We only do pvclock vdso timing at all if 127 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to 128 * mean that all vCPUs have matching pvti and that the TSC is 129 * synced, so we can just look at vCPU 0's pvti. 130 */ 131 132 do { 133 version = pvclock_read_begin(pvti); 134 135 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) { 136 *mode = VCLOCK_NONE; 137 return 0; 138 } 139 140 ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); 141 } while (pvclock_read_retry(pvti, version)); 142 143 /* refer to vread_tsc() comment for rationale */ 144 last = gtod->cycle_last; 145 146 if (likely(ret >= last)) 147 return ret; 148 149 return last; 150 } 151 #endif 152 #ifdef CONFIG_HYPERV_TSCPAGE 153 static notrace u64 vread_hvclock(int *mode) 154 { 155 const struct ms_hyperv_tsc_page *tsc_pg = 156 (const struct ms_hyperv_tsc_page *)&hvclock_page; 157 u64 current_tick = hv_read_tsc_page(tsc_pg); 158 159 if (current_tick != U64_MAX) 160 return current_tick; 161 162 *mode = VCLOCK_NONE; 163 return 0; 164 } 165 #endif 166 167 notrace static u64 vread_tsc(void) 168 { 169 u64 ret = (u64)rdtsc_ordered(); 170 u64 last = gtod->cycle_last; 171 172 if (likely(ret >= last)) 173 return ret; 174 175 /* 176 * GCC likes to generate cmov here, but this branch is extremely 177 * predictable (it's just a function of time and the likely is 178 * very likely) and there's a data dependence, so force GCC 179 * to generate a branch instead. I don't barrier() because 180 * we don't actually need a barrier, and if this function 181 * ever gets inlined it will generate worse code. 182 */ 183 asm volatile (""); 184 return last; 185 } 186 187 notrace static inline u64 vgetsns(int *mode) 188 { 189 u64 v; 190 cycles_t cycles; 191 192 if (gtod->vclock_mode == VCLOCK_TSC) 193 cycles = vread_tsc(); 194 #ifdef CONFIG_PARAVIRT_CLOCK 195 else if (gtod->vclock_mode == VCLOCK_PVCLOCK) 196 cycles = vread_pvclock(mode); 197 #endif 198 #ifdef CONFIG_HYPERV_TSCPAGE 199 else if (gtod->vclock_mode == VCLOCK_HVCLOCK) 200 cycles = vread_hvclock(mode); 201 #endif 202 else 203 return 0; 204 v = (cycles - gtod->cycle_last) & gtod->mask; 205 return v * gtod->mult; 206 } 207 208 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ 209 notrace static int __always_inline do_realtime(struct timespec *ts) 210 { 211 unsigned long seq; 212 u64 ns; 213 int mode; 214 215 do { 216 seq = gtod_read_begin(gtod); 217 mode = gtod->vclock_mode; 218 ts->tv_sec = gtod->wall_time_sec; 219 ns = gtod->wall_time_snsec; 220 ns += vgetsns(&mode); 221 ns >>= gtod->shift; 222 } while (unlikely(gtod_read_retry(gtod, seq))); 223 224 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 225 ts->tv_nsec = ns; 226 227 return mode; 228 } 229 230 notrace static int __always_inline do_monotonic(struct timespec *ts) 231 { 232 unsigned long seq; 233 u64 ns; 234 int mode; 235 236 do { 237 seq = gtod_read_begin(gtod); 238 mode = gtod->vclock_mode; 239 ts->tv_sec = gtod->monotonic_time_sec; 240 ns = gtod->monotonic_time_snsec; 241 ns += vgetsns(&mode); 242 ns >>= gtod->shift; 243 } while (unlikely(gtod_read_retry(gtod, seq))); 244 245 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 246 ts->tv_nsec = ns; 247 248 return mode; 249 } 250 251 notrace static void do_realtime_coarse(struct timespec *ts) 252 { 253 unsigned long seq; 254 do { 255 seq = gtod_read_begin(gtod); 256 ts->tv_sec = gtod->wall_time_coarse_sec; 257 ts->tv_nsec = gtod->wall_time_coarse_nsec; 258 } while (unlikely(gtod_read_retry(gtod, seq))); 259 } 260 261 notrace static void do_monotonic_coarse(struct timespec *ts) 262 { 263 unsigned long seq; 264 do { 265 seq = gtod_read_begin(gtod); 266 ts->tv_sec = gtod->monotonic_time_coarse_sec; 267 ts->tv_nsec = gtod->monotonic_time_coarse_nsec; 268 } while (unlikely(gtod_read_retry(gtod, seq))); 269 } 270 271 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 272 { 273 switch (clock) { 274 case CLOCK_REALTIME: 275 if (do_realtime(ts) == VCLOCK_NONE) 276 goto fallback; 277 break; 278 case CLOCK_MONOTONIC: 279 if (do_monotonic(ts) == VCLOCK_NONE) 280 goto fallback; 281 break; 282 case CLOCK_REALTIME_COARSE: 283 do_realtime_coarse(ts); 284 break; 285 case CLOCK_MONOTONIC_COARSE: 286 do_monotonic_coarse(ts); 287 break; 288 default: 289 goto fallback; 290 } 291 292 return 0; 293 fallback: 294 return vdso_fallback_gettime(clock, ts); 295 } 296 int clock_gettime(clockid_t, struct timespec *) 297 __attribute__((weak, alias("__vdso_clock_gettime"))); 298 299 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 300 { 301 if (likely(tv != NULL)) { 302 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE)) 303 return vdso_fallback_gtod(tv, tz); 304 tv->tv_usec /= 1000; 305 } 306 if (unlikely(tz != NULL)) { 307 tz->tz_minuteswest = gtod->tz_minuteswest; 308 tz->tz_dsttime = gtod->tz_dsttime; 309 } 310 311 return 0; 312 } 313 int gettimeofday(struct timeval *, struct timezone *) 314 __attribute__((weak, alias("__vdso_gettimeofday"))); 315 316 /* 317 * This will break when the xtime seconds get inaccurate, but that is 318 * unlikely 319 */ 320 notrace time_t __vdso_time(time_t *t) 321 { 322 /* This is atomic on x86 so we don't need any locks. */ 323 time_t result = READ_ONCE(gtod->wall_time_sec); 324 325 if (t) 326 *t = result; 327 return result; 328 } 329 time_t time(time_t *t) 330 __attribute__((weak, alias("__vdso_time"))); 331