xref: /freebsd/contrib/ntp/sntp/libevent/evutil_time.c (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*
2  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29 
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 
37 #include <sys/types.h>
38 #ifdef EVENT__HAVE_STDLIB_H
39 #include <stdlib.h>
40 #endif
41 #include <errno.h>
42 #include <limits.h>
43 #ifndef EVENT__HAVE_GETTIMEOFDAY
44 #include <sys/timeb.h>
45 #endif
46 #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
47 	!defined(_WIN32)
48 #include <sys/select.h>
49 #endif
50 #include <time.h>
51 #include <sys/stat.h>
52 #include <string.h>
53 
54 #include "event2/util.h"
55 #include "util-internal.h"
56 #include "log-internal.h"
57 #include "mm-internal.h"
58 
59 #ifndef EVENT__HAVE_GETTIMEOFDAY
60 /* No gettimeofday; this must be windows. */
61 int
62 evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
63 {
64 #ifdef _MSC_VER
65 #define U64_LITERAL(n) n##ui64
66 #else
67 #define U64_LITERAL(n) n##llu
68 #endif
69 
70 	/* Conversion logic taken from Tor, which in turn took it
71 	 * from Perl.  GetSystemTimeAsFileTime returns its value as
72 	 * an unaligned (!) 64-bit value containing the number of
73 	 * 100-nanosecond intervals since 1 January 1601 UTC. */
74 #define EPOCH_BIAS U64_LITERAL(116444736000000000)
75 #define UNITS_PER_SEC U64_LITERAL(10000000)
76 #define USEC_PER_SEC U64_LITERAL(1000000)
77 #define UNITS_PER_USEC U64_LITERAL(10)
78 	union {
79 		FILETIME ft_ft;
80 		ev_uint64_t ft_64;
81 	} ft;
82 
83 	if (tv == NULL)
84 		return -1;
85 
86 	GetSystemTimeAsFileTime(&ft.ft_ft);
87 
88 	if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
89 		/* Time before the unix epoch. */
90 		return -1;
91 	}
92 	ft.ft_64 -= EPOCH_BIAS;
93 	tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
94 	tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
95 	return 0;
96 }
97 #endif
98 
99 #define MAX_SECONDS_IN_MSEC_LONG \
100 	(((LONG_MAX) - 999) / 1000)
101 
102 long
103 evutil_tv_to_msec_(const struct timeval *tv)
104 {
105 	if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
106 		return -1;
107 
108 	return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
109 }
110 
111 /*
112   Replacement for usleep on platforms that don't have one.  Not guaranteed to
113   be any more finegrained than 1 msec.
114  */
115 void
116 evutil_usleep_(const struct timeval *tv)
117 {
118 	if (!tv)
119 		return;
120 #if defined(_WIN32)
121 	{
122 		long msec = evutil_tv_to_msec_(tv);
123 		Sleep((DWORD)msec);
124 	}
125 #elif defined(EVENT__HAVE_NANOSLEEP)
126 	{
127 		struct timespec ts;
128 		ts.tv_sec = tv->tv_sec;
129 		ts.tv_nsec = tv->tv_usec*1000;
130 		nanosleep(&ts, NULL);
131 	}
132 #elif defined(EVENT__HAVE_USLEEP)
133 	/* Some systems don't like to usleep more than 999999 usec */
134 	sleep(tv->tv_sec);
135 	usleep(tv->tv_usec);
136 #else
137 	select(0, NULL, NULL, NULL, tv);
138 #endif
139 }
140 
141 /*
142    This function assumes it's called repeatedly with a
143    not-actually-so-monotonic time source whose outputs are in 'tv'. It
144    implements a trivial ratcheting mechanism so that the values never go
145    backwards.
146  */
147 static void
148 adjust_monotonic_time(struct evutil_monotonic_timer *base,
149     struct timeval *tv)
150 {
151 	evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
152 
153 	if (evutil_timercmp(tv, &base->last_time, <)) {
154 		/* Guess it wasn't monotonic after all. */
155 		struct timeval adjust;
156 		evutil_timersub(&base->last_time, tv, &adjust);
157 		evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
158 		    &base->adjust_monotonic_clock);
159 		*tv = base->last_time;
160 	}
161 	base->last_time = *tv;
162 }
163 
164 /*
165    Allocate a new struct evutil_monotonic_timer
166  */
167 struct evutil_monotonic_timer *
168 evutil_monotonic_timer_new(void)
169 {
170   struct evutil_monotonic_timer *p = NULL;
171 
172   p = mm_malloc(sizeof(*p));
173   if (!p) goto done;
174 
175   memset(p, 0, sizeof(*p));
176 
177  done:
178   return p;
179 }
180 
181 /*
182    Free a struct evutil_monotonic_timer
183  */
184 void
185 evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
186 {
187   if (timer) {
188     mm_free(timer);
189   }
190 }
191 
192 /*
193    Set up a struct evutil_monotonic_timer for initial use
194  */
195 int
196 evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
197                                 int flags)
198 {
199   return evutil_configure_monotonic_time_(timer, flags);
200 }
201 
202 /*
203    Query the current monotonic time
204  */
205 int
206 evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
207                          struct timeval *tp)
208 {
209   return evutil_gettime_monotonic_(timer, tp);
210 }
211 
212 
213 #if defined(HAVE_POSIX_MONOTONIC)
214 /* =====
215    The POSIX clock_gettime() interface provides a few ways to get at a
216    monotonic clock.  CLOCK_MONOTONIC is most widely supported.  Linux also
217    provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
218 
219    On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
220    Platforms don't agree about whether it should jump on a sleep/resume.
221  */
222 
223 int
224 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
225     int flags)
226 {
227 	/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.  You need to
228 	 * check for it at runtime, because some older kernel versions won't
229 	 * have it working. */
230 #ifdef CLOCK_MONOTONIC_COARSE
231 	const int precise = flags & EV_MONOT_PRECISE;
232 #endif
233 	const int fallback = flags & EV_MONOT_FALLBACK;
234 	struct timespec	ts;
235 
236 #ifdef CLOCK_MONOTONIC_COARSE
237 	if (CLOCK_MONOTONIC_COARSE < 0) {
238 		/* Technically speaking, nothing keeps CLOCK_* from being
239 		 * negative (as far as I know). This check and the one below
240 		 * make sure that it's safe for us to use -1 as an "unset"
241 		 * value. */
242 		event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
243 	}
244 	if (! precise && ! fallback) {
245 		if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
246 			base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
247 			return 0;
248 		}
249 	}
250 #endif
251 	if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
252 		base->monotonic_clock = CLOCK_MONOTONIC;
253 		return 0;
254 	}
255 
256 	if (CLOCK_MONOTONIC < 0) {
257 		event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
258 	}
259 
260 	base->monotonic_clock = -1;
261 	return 0;
262 }
263 
264 int
265 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
266     struct timeval *tp)
267 {
268 	struct timespec ts;
269 
270 	if (base->monotonic_clock < 0) {
271 		if (evutil_gettimeofday(tp, NULL) < 0)
272 			return -1;
273 		adjust_monotonic_time(base, tp);
274 		return 0;
275 	}
276 
277 	if (clock_gettime(base->monotonic_clock, &ts) == -1)
278 		return -1;
279 	tp->tv_sec = ts.tv_sec;
280 	tp->tv_usec = ts.tv_nsec / 1000;
281 
282 	return 0;
283 }
284 #endif
285 
286 #if defined(HAVE_MACH_MONOTONIC)
287 /* ======
288    Apple is a little late to the POSIX party.  And why not?  Instead of
289    clock_gettime(), they provide mach_absolute_time().  Its units are not
290    fixed; we need to use mach_timebase_info() to get the right functions to
291    convert its units into nanoseconds.
292 
293    To all appearances, mach_absolute_time() seems to be honest-to-goodness
294    monotonic.  Whether it stops during sleep or not is unspecified in
295    principle, and dependent on CPU architecture in practice.
296  */
297 
298 int
299 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
300     int flags)
301 {
302 	const int fallback = flags & EV_MONOT_FALLBACK;
303 	struct mach_timebase_info mi;
304 	memset(base, 0, sizeof(*base));
305 	/* OSX has mach_absolute_time() */
306 	if (!fallback &&
307 	    mach_timebase_info(&mi) == 0 &&
308 	    mach_absolute_time() != 0) {
309 		/* mach_timebase_info tells us how to convert
310 		 * mach_absolute_time() into nanoseconds, but we
311 		 * want to use microseconds instead. */
312 		mi.denom *= 1000;
313 		memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
314 	} else {
315 		base->mach_timebase_units.numer = 0;
316 	}
317 	return 0;
318 }
319 
320 int
321 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
322     struct timeval *tp)
323 {
324 	ev_uint64_t abstime, usec;
325 	if (base->mach_timebase_units.numer == 0) {
326 		if (evutil_gettimeofday(tp, NULL) < 0)
327 			return -1;
328 		adjust_monotonic_time(base, tp);
329 		return 0;
330 	}
331 
332 	abstime = mach_absolute_time();
333 	usec = (abstime * base->mach_timebase_units.numer)
334 	    / (base->mach_timebase_units.denom);
335 	tp->tv_sec = usec / 1000000;
336 	tp->tv_usec = usec % 1000000;
337 
338 	return 0;
339 }
340 #endif
341 
342 #if defined(HAVE_WIN32_MONOTONIC)
343 /* =====
344    Turn we now to Windows.  Want monontonic time on Windows?
345 
346    Windows has QueryPerformanceCounter(), which gives time most high-
347    resolution time.  It's a pity it's not so monotonic in practice; it's
348    also got some fun bugs, especially: with older Windowses, under
349    virtualizations, with funny hardware, on multiprocessor systems, and so
350    on.  PEP418 [1] has a nice roundup of the issues here.
351 
352    There's GetTickCount64() on Vista and later, which gives a number of 1-msec
353    ticks since startup.  The accuracy here might be as bad as 10-20 msec, I
354    hear.  There's an undocumented function (NtSetTimerResolution) that
355    allegedly increases the accuracy. Good luck!
356 
357    There's also GetTickCount(), which is only 32 bits, but seems to be
358    supported on pre-Vista versions of Windows.  Apparently, you can coax
359    another 14 bits out of it, giving you 2231 years before rollover.
360 
361    The less said about timeGetTime() the better.
362 
363    "We don't care.  We don't have to.  We're the Phone Company."
364             -- Lily Tomlin, SNL
365 
366    Our strategy, if precise timers are turned off, is to just use the best
367    GetTickCount equivalent available.  If we've been asked for precise timing,
368    then we mostly[2] assume that GetTickCount is monotonic, and correct
369    GetPerformanceCounter to approximate it.
370 
371    [1] http://www.python.org/dev/peps/pep-0418
372    [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
373        anyway, just in case it isn't.
374 
375  */
376 /*
377     Parts of our logic in the win32 timer code here are closely based on
378     BitTorrent's libUTP library.  That code is subject to the following
379     license:
380 
381       Copyright (c) 2010 BitTorrent, Inc.
382 
383       Permission is hereby granted, free of charge, to any person obtaining a
384       copy of this software and associated documentation files (the
385       "Software"), to deal in the Software without restriction, including
386       without limitation the rights to use, copy, modify, merge, publish,
387       distribute, sublicense, and/or sell copies of the Software, and to
388       permit persons to whom the Software is furnished to do so, subject to
389       the following conditions:
390 
391       The above copyright notice and this permission notice shall be included
392       in all copies or substantial portions of the Software.
393 
394       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
395       OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
396       MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
397       NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
398       LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
399       OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
400       WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
401 */
402 
403 static ev_uint64_t
404 evutil_GetTickCount_(struct evutil_monotonic_timer *base)
405 {
406 	if (base->GetTickCount64_fn) {
407 		/* Let's just use GetTickCount64 if we can. */
408 		return base->GetTickCount64_fn();
409 	} else if (base->GetTickCount_fn) {
410 		/* Greg Hazel assures me that this works, that BitTorrent has
411 		 * done it for years, and this it won't turn around and
412 		 * bite us.  He says they found it on some game programmers'
413 		 * forum some time around 2007.
414 		 */
415 		ev_uint64_t v = base->GetTickCount_fn();
416 		return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
417 	} else {
418 		/* Here's the fallback implementation. We have to use
419 		 * GetTickCount() with its given signature, so we only get
420 		 * 32 bits worth of milliseconds, which will roll ove every
421 		 * 49 days or so.  */
422 		DWORD ticks = GetTickCount();
423 		if (ticks < base->last_tick_count) {
424 			base->adjust_tick_count += ((ev_uint64_t)1) << 32;
425 		}
426 		base->last_tick_count = ticks;
427 		return ticks + base->adjust_tick_count;
428 	}
429 }
430 
431 int
432 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
433     int flags)
434 {
435 	const int precise = flags & EV_MONOT_PRECISE;
436 	const int fallback = flags & EV_MONOT_FALLBACK;
437 	HANDLE h;
438 	memset(base, 0, sizeof(*base));
439 
440 	h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
441 	if (h != NULL && !fallback) {
442 		base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
443 		base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
444 	}
445 
446 	base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
447 	if (precise && !fallback) {
448 		LARGE_INTEGER freq;
449 		if (QueryPerformanceFrequency(&freq)) {
450 			LARGE_INTEGER counter;
451 			QueryPerformanceCounter(&counter);
452 			base->first_counter = counter.QuadPart;
453 			base->usec_per_count = 1.0e6 / freq.QuadPart;
454 			base->use_performance_counter = 1;
455 		}
456 	}
457 
458 	return 0;
459 }
460 
461 static inline ev_int64_t
462 abs64(ev_int64_t i)
463 {
464 	return i < 0 ? -i : i;
465 }
466 
467 
468 int
469 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
470     struct timeval *tp)
471 {
472 	ev_uint64_t ticks = evutil_GetTickCount_(base);
473 	if (base->use_performance_counter) {
474 		/* Here's a trick we took from BitTorrent's libutp, at Greg
475 		 * Hazel's recommendation.  We use QueryPerformanceCounter for
476 		 * our high-resolution timer, but use GetTickCount*() to keep
477 		 * it sane, and adjust_monotonic_time() to keep it monotonic.
478 		 */
479 		LARGE_INTEGER counter;
480 		ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
481 		QueryPerformanceCounter(&counter);
482 		counter_elapsed = (ev_int64_t)
483 		    (counter.QuadPart - base->first_counter);
484 		ticks_elapsed = ticks - base->first_tick;
485 		/* TODO: This may upset VC6. If you need this to work with
486 		 * VC6, please supply an appropriate patch. */
487 		counter_usec_elapsed = (ev_int64_t)
488 		    (counter_elapsed * base->usec_per_count);
489 
490 		if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
491 			/* It appears that the QueryPerformanceCounter()
492 			 * result is more than 1 second away from
493 			 * GetTickCount() result. Let's adjust it to be as
494 			 * accurate as we can; adjust_monotnonic_time() below
495 			 * will keep it monotonic. */
496 			counter_usec_elapsed = ticks_elapsed * 1000;
497 			base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
498 		}
499 		tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
500 		tp->tv_usec = counter_usec_elapsed % 1000000;
501 
502 	} else {
503 		/* We're just using GetTickCount(). */
504 		tp->tv_sec = (time_t) (ticks / 1000);
505 		tp->tv_usec = (ticks % 1000) * 1000;
506 	}
507 	adjust_monotonic_time(base, tp);
508 
509 	return 0;
510 }
511 #endif
512 
513 #if defined(HAVE_FALLBACK_MONOTONIC)
514 /* =====
515    And if none of the other options work, let's just use gettimeofday(), and
516    ratchet it forward so that it acts like a monotonic timer, whether it
517    wants to or not.
518  */
519 
520 int
521 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
522     int precise)
523 {
524 	memset(base, 0, sizeof(*base));
525 	return 0;
526 }
527 
528 int
529 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
530     struct timeval *tp)
531 {
532 	if (evutil_gettimeofday(tp, NULL) < 0)
533 		return -1;
534 	adjust_monotonic_time(base, tp);
535 	return 0;
536 
537 }
538 #endif
539