1 /* 2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include "event2/event-config.h" 28 #include "evconfig-private.h" 29 30 #ifdef _WIN32 31 #include <winsock2.h> 32 #define WIN32_LEAN_AND_MEAN 33 #include <windows.h> 34 #undef WIN32_LEAN_AND_MEAN 35 #endif 36 37 #include <sys/types.h> 38 #ifdef EVENT__HAVE_STDLIB_H 39 #include <stdlib.h> 40 #endif 41 #include <errno.h> 42 #include <limits.h> 43 #ifndef EVENT__HAVE_GETTIMEOFDAY 44 #include <sys/timeb.h> 45 #endif 46 #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \ 47 !defined(_WIN32) 48 #include <sys/select.h> 49 #endif 50 #include <time.h> 51 #include <sys/stat.h> 52 #include <string.h> 53 54 #include "event2/util.h" 55 #include "util-internal.h" 56 #include "log-internal.h" 57 58 #ifndef EVENT__HAVE_GETTIMEOFDAY 59 /* No gettimeofday; this must be windows. */ 60 int 61 evutil_gettimeofday(struct timeval *tv, struct timezone *tz) 62 { 63 #ifdef _MSC_VER 64 #define U64_LITERAL(n) n##ui64 65 #else 66 #define U64_LITERAL(n) n##llu 67 #endif 68 69 /* Conversion logic taken from Tor, which in turn took it 70 * from Perl. GetSystemTimeAsFileTime returns its value as 71 * an unaligned (!) 64-bit value containing the number of 72 * 100-nanosecond intervals since 1 January 1601 UTC. */ 73 #define EPOCH_BIAS U64_LITERAL(116444736000000000) 74 #define UNITS_PER_SEC U64_LITERAL(10000000) 75 #define USEC_PER_SEC U64_LITERAL(1000000) 76 #define UNITS_PER_USEC U64_LITERAL(10) 77 union { 78 FILETIME ft_ft; 79 ev_uint64_t ft_64; 80 } ft; 81 82 if (tv == NULL) 83 return -1; 84 85 GetSystemTimeAsFileTime(&ft.ft_ft); 86 87 if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) { 88 /* Time before the unix epoch. */ 89 return -1; 90 } 91 ft.ft_64 -= EPOCH_BIAS; 92 tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC); 93 tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC); 94 return 0; 95 } 96 #endif 97 98 #define MAX_SECONDS_IN_MSEC_LONG \ 99 (((LONG_MAX) - 999) / 1000) 100 101 long 102 evutil_tv_to_msec_(const struct timeval *tv) 103 { 104 if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG) 105 return -1; 106 107 return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000); 108 } 109 110 /* 111 Replacement for usleep on platforms that don't have one. Not guaranteed to 112 be any more finegrained than 1 msec. 113 */ 114 void 115 evutil_usleep_(const struct timeval *tv) 116 { 117 if (!tv) 118 return; 119 #if defined(_WIN32) 120 { 121 long msec = evutil_tv_to_msec_(tv); 122 Sleep((DWORD)msec); 123 } 124 #elif defined(EVENT__HAVE_NANOSLEEP) 125 { 126 struct timespec ts; 127 ts.tv_sec = tv->tv_sec; 128 ts.tv_nsec = tv->tv_usec*1000; 129 nanosleep(&ts, NULL); 130 } 131 #elif defined(EVENT__HAVE_USLEEP) 132 /* Some systems don't like to usleep more than 999999 usec */ 133 sleep(tv->tv_sec); 134 usleep(tv->tv_usec); 135 #else 136 select(0, NULL, NULL, NULL, tv); 137 #endif 138 } 139 140 /* 141 This function assumes it's called repeatedly with a 142 not-actually-so-monotonic time source whose outputs are in 'tv'. It 143 implements a trivial ratcheting mechanism so that the values never go 144 backwards. 145 */ 146 static void 147 adjust_monotonic_time(struct evutil_monotonic_timer *base, 148 struct timeval *tv) 149 { 150 evutil_timeradd(tv, &base->adjust_monotonic_clock, tv); 151 152 if (evutil_timercmp(tv, &base->last_time, <)) { 153 /* Guess it wasn't monotonic after all. */ 154 struct timeval adjust; 155 evutil_timersub(&base->last_time, tv, &adjust); 156 evutil_timeradd(&adjust, &base->adjust_monotonic_clock, 157 &base->adjust_monotonic_clock); 158 *tv = base->last_time; 159 } 160 base->last_time = *tv; 161 } 162 163 #if defined(HAVE_POSIX_MONOTONIC) 164 /* ===== 165 The POSIX clock_gettime() interface provides a few ways to get at a 166 monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also 167 provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec. 168 169 On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic. 170 Platforms don't agree about whether it should jump on a sleep/resume. 171 */ 172 173 int 174 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 175 int flags) 176 { 177 /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to 178 * check for it at runtime, because some older kernel versions won't 179 * have it working. */ 180 #ifdef CLOCK_MONOTONIC_COARSE 181 const int precise = flags & EV_MONOT_PRECISE; 182 #endif 183 const int fallback = flags & EV_MONOT_FALLBACK; 184 struct timespec ts; 185 186 #ifdef CLOCK_MONOTONIC_COARSE 187 if (CLOCK_MONOTONIC_COARSE < 0) { 188 /* Technically speaking, nothing keeps CLOCK_* from being 189 * negative (as far as I know). This check and the one below 190 * make sure that it's safe for us to use -1 as an "unset" 191 * value. */ 192 event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"); 193 } 194 if (! precise && ! fallback) { 195 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) { 196 base->monotonic_clock = CLOCK_MONOTONIC_COARSE; 197 return 0; 198 } 199 } 200 #endif 201 if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { 202 base->monotonic_clock = CLOCK_MONOTONIC; 203 return 0; 204 } 205 206 if (CLOCK_MONOTONIC < 0) { 207 event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0"); 208 } 209 210 base->monotonic_clock = -1; 211 return 0; 212 } 213 214 int 215 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 216 struct timeval *tp) 217 { 218 struct timespec ts; 219 220 if (base->monotonic_clock < 0) { 221 if (evutil_gettimeofday(tp, NULL) < 0) 222 return -1; 223 adjust_monotonic_time(base, tp); 224 return 0; 225 } 226 227 if (clock_gettime(base->monotonic_clock, &ts) == -1) 228 return -1; 229 tp->tv_sec = ts.tv_sec; 230 tp->tv_usec = ts.tv_nsec / 1000; 231 232 return 0; 233 } 234 #endif 235 236 #if defined(HAVE_MACH_MONOTONIC) 237 /* ====== 238 Apple is a little late to the POSIX party. And why not? Instead of 239 clock_gettime(), they provide mach_absolute_time(). Its units are not 240 fixed; we need to use mach_timebase_info() to get the right functions to 241 convert its units into nanoseconds. 242 243 To all appearances, mach_absolute_time() seems to be honest-to-goodness 244 monotonic. Whether it stops during sleep or not is unspecified in 245 principle, and dependent on CPU architecture in practice. 246 */ 247 248 int 249 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 250 int flags) 251 { 252 const int fallback = flags & EV_MONOT_FALLBACK; 253 struct mach_timebase_info mi; 254 memset(base, 0, sizeof(*base)); 255 /* OSX has mach_absolute_time() */ 256 if (!fallback && 257 mach_timebase_info(&mi) == 0 && 258 mach_absolute_time() != 0) { 259 /* mach_timebase_info tells us how to convert 260 * mach_absolute_time() into nanoseconds, but we 261 * want to use microseconds instead. */ 262 mi.denom *= 1000; 263 memcpy(&base->mach_timebase_units, &mi, sizeof(mi)); 264 } else { 265 base->mach_timebase_units.numer = 0; 266 } 267 return 0; 268 } 269 270 int 271 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 272 struct timeval *tp) 273 { 274 ev_uint64_t abstime, usec; 275 if (base->mach_timebase_units.numer == 0) { 276 if (evutil_gettimeofday(tp, NULL) < 0) 277 return -1; 278 adjust_monotonic_time(base, tp); 279 return 0; 280 } 281 282 abstime = mach_absolute_time(); 283 usec = (abstime * base->mach_timebase_units.numer) 284 / (base->mach_timebase_units.denom); 285 tp->tv_sec = usec / 1000000; 286 tp->tv_usec = usec % 1000000; 287 288 return 0; 289 } 290 #endif 291 292 #if defined(HAVE_WIN32_MONOTONIC) 293 /* ===== 294 Turn we now to Windows. Want monontonic time on Windows? 295 296 Windows has QueryPerformanceCounter(), which gives time most high- 297 resolution time. It's a pity it's not so monotonic in practice; it's 298 also got some fun bugs, especially: with older Windowses, under 299 virtualizations, with funny hardware, on multiprocessor systems, and so 300 on. PEP418 [1] has a nice roundup of the issues here. 301 302 There's GetTickCount64() on Vista and later, which gives a number of 1-msec 303 ticks since startup. The accuracy here might be as bad as 10-20 msec, I 304 hear. There's an undocumented function (NtSetTimerResolution) that 305 allegedly increases the accuracy. Good luck! 306 307 There's also GetTickCount(), which is only 32 bits, but seems to be 308 supported on pre-Vista versions of Windows. Apparently, you can coax 309 another 14 bits out of it, giving you 2231 years before rollover. 310 311 The less said about timeGetTime() the better. 312 313 "We don't care. We don't have to. We're the Phone Company." 314 -- Lily Tomlin, SNL 315 316 Our strategy, if precise timers are turned off, is to just use the best 317 GetTickCount equivalent available. If we've been asked for precise timing, 318 then we mostly[2] assume that GetTickCount is monotonic, and correct 319 GetPerformanceCounter to approximate it. 320 321 [1] http://www.python.org/dev/peps/pep-0418 322 [2] Of course, we feed the Windows stuff into adjust_monotonic_time() 323 anyway, just in case it isn't. 324 325 */ 326 /* 327 Parts of our logic in the win32 timer code here are closely based on 328 BitTorrent's libUTP library. That code is subject to the following 329 license: 330 331 Copyright (c) 2010 BitTorrent, Inc. 332 333 Permission is hereby granted, free of charge, to any person obtaining a 334 copy of this software and associated documentation files (the 335 "Software"), to deal in the Software without restriction, including 336 without limitation the rights to use, copy, modify, merge, publish, 337 distribute, sublicense, and/or sell copies of the Software, and to 338 permit persons to whom the Software is furnished to do so, subject to 339 the following conditions: 340 341 The above copyright notice and this permission notice shall be included 342 in all copies or substantial portions of the Software. 343 344 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 345 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 346 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 347 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 348 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 349 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 350 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 351 */ 352 353 static ev_uint64_t 354 evutil_GetTickCount_(struct evutil_monotonic_timer *base) 355 { 356 if (base->GetTickCount64_fn) { 357 /* Let's just use GetTickCount64 if we can. */ 358 return base->GetTickCount64_fn(); 359 } else if (base->GetTickCount_fn) { 360 /* Greg Hazel assures me that this works, that BitTorrent has 361 * done it for years, and this it won't turn around and 362 * bite us. He says they found it on some game programmers' 363 * forum some time around 2007. 364 */ 365 ev_uint64_t v = base->GetTickCount_fn(); 366 return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000); 367 } else { 368 /* Here's the fallback implementation. We have to use 369 * GetTickCount() with its given signature, so we only get 370 * 32 bits worth of milliseconds, which will roll ove every 371 * 49 days or so. */ 372 DWORD ticks = GetTickCount(); 373 if (ticks < base->last_tick_count) { 374 base->adjust_tick_count += ((ev_uint64_t)1) << 32; 375 } 376 base->last_tick_count = ticks; 377 return ticks + base->adjust_tick_count; 378 } 379 } 380 381 int 382 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 383 int flags) 384 { 385 const int precise = flags & EV_MONOT_PRECISE; 386 const int fallback = flags & EV_MONOT_FALLBACK; 387 HANDLE h; 388 memset(base, 0, sizeof(*base)); 389 390 h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); 391 if (h != NULL && !fallback) { 392 base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64"); 393 base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount"); 394 } 395 396 base->first_tick = base->last_tick_count = evutil_GetTickCount_(base); 397 if (precise && !fallback) { 398 LARGE_INTEGER freq; 399 if (QueryPerformanceFrequency(&freq)) { 400 LARGE_INTEGER counter; 401 QueryPerformanceCounter(&counter); 402 base->first_counter = counter.QuadPart; 403 base->usec_per_count = 1.0e6 / freq.QuadPart; 404 base->use_performance_counter = 1; 405 } 406 } 407 408 return 0; 409 } 410 411 static inline ev_int64_t 412 abs64(ev_int64_t i) 413 { 414 return i < 0 ? -i : i; 415 } 416 417 418 int 419 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 420 struct timeval *tp) 421 { 422 ev_uint64_t ticks = evutil_GetTickCount_(base); 423 if (base->use_performance_counter) { 424 /* Here's a trick we took from BitTorrent's libutp, at Greg 425 * Hazel's recommendation. We use QueryPerformanceCounter for 426 * our high-resolution timer, but use GetTickCount*() to keep 427 * it sane, and adjust_monotonic_time() to keep it monotonic. 428 */ 429 LARGE_INTEGER counter; 430 ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed; 431 QueryPerformanceCounter(&counter); 432 counter_elapsed = (ev_int64_t) 433 (counter.QuadPart - base->first_counter); 434 ticks_elapsed = ticks - base->first_tick; 435 /* TODO: This may upset VC6. If you need this to work with 436 * VC6, please supply an appropriate patch. */ 437 counter_usec_elapsed = (ev_int64_t) 438 (counter_elapsed * base->usec_per_count); 439 440 if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) { 441 /* It appears that the QueryPerformanceCounter() 442 * result is more than 1 second away from 443 * GetTickCount() result. Let's adjust it to be as 444 * accurate as we can; adjust_monotnonic_time() below 445 * will keep it monotonic. */ 446 counter_usec_elapsed = ticks_elapsed * 1000; 447 base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count); 448 } 449 tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000); 450 tp->tv_usec = counter_usec_elapsed % 1000000; 451 452 } else { 453 /* We're just using GetTickCount(). */ 454 tp->tv_sec = (time_t) (ticks / 1000); 455 tp->tv_usec = (ticks % 1000) * 1000; 456 } 457 adjust_monotonic_time(base, tp); 458 459 return 0; 460 } 461 #endif 462 463 #if defined(HAVE_FALLBACK_MONOTONIC) 464 /* ===== 465 And if none of the other options work, let's just use gettimeofday(), and 466 ratchet it forward so that it acts like a monotonic timer, whether it 467 wants to or not. 468 */ 469 470 int 471 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 472 int precise) 473 { 474 memset(base, 0, sizeof(*base)); 475 return 0; 476 } 477 478 int 479 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 480 struct timeval *tp) 481 { 482 if (evutil_gettimeofday(tp, NULL) < 0) 483 return -1; 484 adjust_monotonic_time(base, tp); 485 return 0; 486 487 } 488 #endif 489