xref: /freebsd/contrib/libevent/evutil_time.c (revision c43e99fd14c915adcb7173dd49c31e803ceadfe0)
1*c43e99fdSEd Maste /*
2*c43e99fdSEd Maste  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3*c43e99fdSEd Maste  *
4*c43e99fdSEd Maste  * Redistribution and use in source and binary forms, with or without
5*c43e99fdSEd Maste  * modification, are permitted provided that the following conditions
6*c43e99fdSEd Maste  * are met:
7*c43e99fdSEd Maste  * 1. Redistributions of source code must retain the above copyright
8*c43e99fdSEd Maste  *    notice, this list of conditions and the following disclaimer.
9*c43e99fdSEd Maste  * 2. Redistributions in binary form must reproduce the above copyright
10*c43e99fdSEd Maste  *    notice, this list of conditions and the following disclaimer in the
11*c43e99fdSEd Maste  *    documentation and/or other materials provided with the distribution.
12*c43e99fdSEd Maste  * 3. The name of the author may not be used to endorse or promote products
13*c43e99fdSEd Maste  *    derived from this software without specific prior written permission.
14*c43e99fdSEd Maste  *
15*c43e99fdSEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16*c43e99fdSEd Maste  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17*c43e99fdSEd Maste  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18*c43e99fdSEd Maste  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19*c43e99fdSEd Maste  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20*c43e99fdSEd Maste  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21*c43e99fdSEd Maste  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22*c43e99fdSEd Maste  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23*c43e99fdSEd Maste  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24*c43e99fdSEd Maste  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25*c43e99fdSEd Maste  */
26*c43e99fdSEd Maste 
27*c43e99fdSEd Maste #include "event2/event-config.h"
28*c43e99fdSEd Maste #include "evconfig-private.h"
29*c43e99fdSEd Maste 
30*c43e99fdSEd Maste #ifdef _WIN32
31*c43e99fdSEd Maste #include <winsock2.h>
32*c43e99fdSEd Maste #define WIN32_LEAN_AND_MEAN
33*c43e99fdSEd Maste #include <windows.h>
34*c43e99fdSEd Maste #undef WIN32_LEAN_AND_MEAN
35*c43e99fdSEd Maste #endif
36*c43e99fdSEd Maste 
37*c43e99fdSEd Maste #include <sys/types.h>
38*c43e99fdSEd Maste #ifdef EVENT__HAVE_STDLIB_H
39*c43e99fdSEd Maste #include <stdlib.h>
40*c43e99fdSEd Maste #endif
41*c43e99fdSEd Maste #include <errno.h>
42*c43e99fdSEd Maste #include <limits.h>
43*c43e99fdSEd Maste #ifndef EVENT__HAVE_GETTIMEOFDAY
44*c43e99fdSEd Maste #include <sys/timeb.h>
45*c43e99fdSEd Maste #endif
46*c43e99fdSEd Maste #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
47*c43e99fdSEd Maste 	!defined(_WIN32)
48*c43e99fdSEd Maste #include <sys/select.h>
49*c43e99fdSEd Maste #endif
50*c43e99fdSEd Maste #include <time.h>
51*c43e99fdSEd Maste #include <sys/stat.h>
52*c43e99fdSEd Maste #include <string.h>
53*c43e99fdSEd Maste 
54*c43e99fdSEd Maste /** evutil_usleep_() */
55*c43e99fdSEd Maste #if defined(_WIN32)
56*c43e99fdSEd Maste #elif defined(EVENT__HAVE_NANOSLEEP)
57*c43e99fdSEd Maste #elif defined(EVENT__HAVE_USLEEP)
58*c43e99fdSEd Maste #include <unistd.h>
59*c43e99fdSEd Maste #endif
60*c43e99fdSEd Maste 
61*c43e99fdSEd Maste #include "event2/util.h"
62*c43e99fdSEd Maste #include "util-internal.h"
63*c43e99fdSEd Maste #include "log-internal.h"
64*c43e99fdSEd Maste #include "mm-internal.h"
65*c43e99fdSEd Maste 
66*c43e99fdSEd Maste #ifndef EVENT__HAVE_GETTIMEOFDAY
67*c43e99fdSEd Maste /* No gettimeofday; this must be windows. */
68*c43e99fdSEd Maste int
69*c43e99fdSEd Maste evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
70*c43e99fdSEd Maste {
71*c43e99fdSEd Maste #ifdef _MSC_VER
72*c43e99fdSEd Maste #define U64_LITERAL(n) n##ui64
73*c43e99fdSEd Maste #else
74*c43e99fdSEd Maste #define U64_LITERAL(n) n##llu
75*c43e99fdSEd Maste #endif
76*c43e99fdSEd Maste 
77*c43e99fdSEd Maste 	/* Conversion logic taken from Tor, which in turn took it
78*c43e99fdSEd Maste 	 * from Perl.  GetSystemTimeAsFileTime returns its value as
79*c43e99fdSEd Maste 	 * an unaligned (!) 64-bit value containing the number of
80*c43e99fdSEd Maste 	 * 100-nanosecond intervals since 1 January 1601 UTC. */
81*c43e99fdSEd Maste #define EPOCH_BIAS U64_LITERAL(116444736000000000)
82*c43e99fdSEd Maste #define UNITS_PER_SEC U64_LITERAL(10000000)
83*c43e99fdSEd Maste #define USEC_PER_SEC U64_LITERAL(1000000)
84*c43e99fdSEd Maste #define UNITS_PER_USEC U64_LITERAL(10)
85*c43e99fdSEd Maste 	union {
86*c43e99fdSEd Maste 		FILETIME ft_ft;
87*c43e99fdSEd Maste 		ev_uint64_t ft_64;
88*c43e99fdSEd Maste 	} ft;
89*c43e99fdSEd Maste 
90*c43e99fdSEd Maste 	if (tv == NULL)
91*c43e99fdSEd Maste 		return -1;
92*c43e99fdSEd Maste 
93*c43e99fdSEd Maste 	GetSystemTimeAsFileTime(&ft.ft_ft);
94*c43e99fdSEd Maste 
95*c43e99fdSEd Maste 	if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
96*c43e99fdSEd Maste 		/* Time before the unix epoch. */
97*c43e99fdSEd Maste 		return -1;
98*c43e99fdSEd Maste 	}
99*c43e99fdSEd Maste 	ft.ft_64 -= EPOCH_BIAS;
100*c43e99fdSEd Maste 	tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
101*c43e99fdSEd Maste 	tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
102*c43e99fdSEd Maste 	return 0;
103*c43e99fdSEd Maste }
104*c43e99fdSEd Maste #endif
105*c43e99fdSEd Maste 
106*c43e99fdSEd Maste #define MAX_SECONDS_IN_MSEC_LONG \
107*c43e99fdSEd Maste 	(((LONG_MAX) - 999) / 1000)
108*c43e99fdSEd Maste 
109*c43e99fdSEd Maste long
110*c43e99fdSEd Maste evutil_tv_to_msec_(const struct timeval *tv)
111*c43e99fdSEd Maste {
112*c43e99fdSEd Maste 	if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
113*c43e99fdSEd Maste 		return -1;
114*c43e99fdSEd Maste 
115*c43e99fdSEd Maste 	return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
116*c43e99fdSEd Maste }
117*c43e99fdSEd Maste 
118*c43e99fdSEd Maste /*
119*c43e99fdSEd Maste   Replacement for usleep on platforms that don't have one.  Not guaranteed to
120*c43e99fdSEd Maste   be any more finegrained than 1 msec.
121*c43e99fdSEd Maste  */
122*c43e99fdSEd Maste void
123*c43e99fdSEd Maste evutil_usleep_(const struct timeval *tv)
124*c43e99fdSEd Maste {
125*c43e99fdSEd Maste 	if (!tv)
126*c43e99fdSEd Maste 		return;
127*c43e99fdSEd Maste #if defined(_WIN32)
128*c43e99fdSEd Maste 	{
129*c43e99fdSEd Maste 		long msec = evutil_tv_to_msec_(tv);
130*c43e99fdSEd Maste 		Sleep((DWORD)msec);
131*c43e99fdSEd Maste 	}
132*c43e99fdSEd Maste #elif defined(EVENT__HAVE_NANOSLEEP)
133*c43e99fdSEd Maste 	{
134*c43e99fdSEd Maste 		struct timespec ts;
135*c43e99fdSEd Maste 		ts.tv_sec = tv->tv_sec;
136*c43e99fdSEd Maste 		ts.tv_nsec = tv->tv_usec*1000;
137*c43e99fdSEd Maste 		nanosleep(&ts, NULL);
138*c43e99fdSEd Maste 	}
139*c43e99fdSEd Maste #elif defined(EVENT__HAVE_USLEEP)
140*c43e99fdSEd Maste 	/* Some systems don't like to usleep more than 999999 usec */
141*c43e99fdSEd Maste 	sleep(tv->tv_sec);
142*c43e99fdSEd Maste 	usleep(tv->tv_usec);
143*c43e99fdSEd Maste #else
144*c43e99fdSEd Maste 	select(0, NULL, NULL, NULL, tv);
145*c43e99fdSEd Maste #endif
146*c43e99fdSEd Maste }
147*c43e99fdSEd Maste 
148*c43e99fdSEd Maste int
149*c43e99fdSEd Maste evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm)
150*c43e99fdSEd Maste {
151*c43e99fdSEd Maste 	static const char *DAYS[] =
152*c43e99fdSEd Maste 		{ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
153*c43e99fdSEd Maste 	static const char *MONTHS[] =
154*c43e99fdSEd Maste 		{ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
155*c43e99fdSEd Maste 
156*c43e99fdSEd Maste 	time_t t = time(NULL);
157*c43e99fdSEd Maste 
158*c43e99fdSEd Maste #ifndef _WIN32
159*c43e99fdSEd Maste 	struct tm sys;
160*c43e99fdSEd Maste #endif
161*c43e99fdSEd Maste 
162*c43e99fdSEd Maste 	/* If `tm` is null, set system's current time. */
163*c43e99fdSEd Maste 	if (tm == NULL) {
164*c43e99fdSEd Maste #ifdef _WIN32
165*c43e99fdSEd Maste 		/** TODO: detect _gmtime64()/_gmtime64_s() */
166*c43e99fdSEd Maste 		tm = gmtime(&t);
167*c43e99fdSEd Maste #else
168*c43e99fdSEd Maste 		gmtime_r(&t, &sys);
169*c43e99fdSEd Maste 		tm = &sys;
170*c43e99fdSEd Maste #endif
171*c43e99fdSEd Maste 	}
172*c43e99fdSEd Maste 
173*c43e99fdSEd Maste 	return evutil_snprintf(
174*c43e99fdSEd Maste 		date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT",
175*c43e99fdSEd Maste 		DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon],
176*c43e99fdSEd Maste 		1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec);
177*c43e99fdSEd Maste }
178*c43e99fdSEd Maste 
179*c43e99fdSEd Maste /*
180*c43e99fdSEd Maste    This function assumes it's called repeatedly with a
181*c43e99fdSEd Maste    not-actually-so-monotonic time source whose outputs are in 'tv'. It
182*c43e99fdSEd Maste    implements a trivial ratcheting mechanism so that the values never go
183*c43e99fdSEd Maste    backwards.
184*c43e99fdSEd Maste  */
185*c43e99fdSEd Maste static void
186*c43e99fdSEd Maste adjust_monotonic_time(struct evutil_monotonic_timer *base,
187*c43e99fdSEd Maste     struct timeval *tv)
188*c43e99fdSEd Maste {
189*c43e99fdSEd Maste 	evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
190*c43e99fdSEd Maste 
191*c43e99fdSEd Maste 	if (evutil_timercmp(tv, &base->last_time, <)) {
192*c43e99fdSEd Maste 		/* Guess it wasn't monotonic after all. */
193*c43e99fdSEd Maste 		struct timeval adjust;
194*c43e99fdSEd Maste 		evutil_timersub(&base->last_time, tv, &adjust);
195*c43e99fdSEd Maste 		evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
196*c43e99fdSEd Maste 		    &base->adjust_monotonic_clock);
197*c43e99fdSEd Maste 		*tv = base->last_time;
198*c43e99fdSEd Maste 	}
199*c43e99fdSEd Maste 	base->last_time = *tv;
200*c43e99fdSEd Maste }
201*c43e99fdSEd Maste 
202*c43e99fdSEd Maste /*
203*c43e99fdSEd Maste    Allocate a new struct evutil_monotonic_timer
204*c43e99fdSEd Maste  */
205*c43e99fdSEd Maste struct evutil_monotonic_timer *
206*c43e99fdSEd Maste evutil_monotonic_timer_new(void)
207*c43e99fdSEd Maste {
208*c43e99fdSEd Maste   struct evutil_monotonic_timer *p = NULL;
209*c43e99fdSEd Maste 
210*c43e99fdSEd Maste   p = mm_malloc(sizeof(*p));
211*c43e99fdSEd Maste   if (!p) goto done;
212*c43e99fdSEd Maste 
213*c43e99fdSEd Maste   memset(p, 0, sizeof(*p));
214*c43e99fdSEd Maste 
215*c43e99fdSEd Maste  done:
216*c43e99fdSEd Maste   return p;
217*c43e99fdSEd Maste }
218*c43e99fdSEd Maste 
219*c43e99fdSEd Maste /*
220*c43e99fdSEd Maste    Free a struct evutil_monotonic_timer
221*c43e99fdSEd Maste  */
222*c43e99fdSEd Maste void
223*c43e99fdSEd Maste evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
224*c43e99fdSEd Maste {
225*c43e99fdSEd Maste   if (timer) {
226*c43e99fdSEd Maste     mm_free(timer);
227*c43e99fdSEd Maste   }
228*c43e99fdSEd Maste }
229*c43e99fdSEd Maste 
230*c43e99fdSEd Maste /*
231*c43e99fdSEd Maste    Set up a struct evutil_monotonic_timer for initial use
232*c43e99fdSEd Maste  */
233*c43e99fdSEd Maste int
234*c43e99fdSEd Maste evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
235*c43e99fdSEd Maste                                 int flags)
236*c43e99fdSEd Maste {
237*c43e99fdSEd Maste   return evutil_configure_monotonic_time_(timer, flags);
238*c43e99fdSEd Maste }
239*c43e99fdSEd Maste 
240*c43e99fdSEd Maste /*
241*c43e99fdSEd Maste    Query the current monotonic time
242*c43e99fdSEd Maste  */
243*c43e99fdSEd Maste int
244*c43e99fdSEd Maste evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
245*c43e99fdSEd Maste                          struct timeval *tp)
246*c43e99fdSEd Maste {
247*c43e99fdSEd Maste   return evutil_gettime_monotonic_(timer, tp);
248*c43e99fdSEd Maste }
249*c43e99fdSEd Maste 
250*c43e99fdSEd Maste 
251*c43e99fdSEd Maste #if defined(HAVE_POSIX_MONOTONIC)
252*c43e99fdSEd Maste /* =====
253*c43e99fdSEd Maste    The POSIX clock_gettime() interface provides a few ways to get at a
254*c43e99fdSEd Maste    monotonic clock.  CLOCK_MONOTONIC is most widely supported.  Linux also
255*c43e99fdSEd Maste    provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
256*c43e99fdSEd Maste 
257*c43e99fdSEd Maste    On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
258*c43e99fdSEd Maste    Platforms don't agree about whether it should jump on a sleep/resume.
259*c43e99fdSEd Maste  */
260*c43e99fdSEd Maste 
261*c43e99fdSEd Maste int
262*c43e99fdSEd Maste evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
263*c43e99fdSEd Maste     int flags)
264*c43e99fdSEd Maste {
265*c43e99fdSEd Maste 	/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.  You need to
266*c43e99fdSEd Maste 	 * check for it at runtime, because some older kernel versions won't
267*c43e99fdSEd Maste 	 * have it working. */
268*c43e99fdSEd Maste #ifdef CLOCK_MONOTONIC_COARSE
269*c43e99fdSEd Maste 	const int precise = flags & EV_MONOT_PRECISE;
270*c43e99fdSEd Maste #endif
271*c43e99fdSEd Maste 	const int fallback = flags & EV_MONOT_FALLBACK;
272*c43e99fdSEd Maste 	struct timespec	ts;
273*c43e99fdSEd Maste 
274*c43e99fdSEd Maste #ifdef CLOCK_MONOTONIC_COARSE
275*c43e99fdSEd Maste 	if (CLOCK_MONOTONIC_COARSE < 0) {
276*c43e99fdSEd Maste 		/* Technically speaking, nothing keeps CLOCK_* from being
277*c43e99fdSEd Maste 		 * negative (as far as I know). This check and the one below
278*c43e99fdSEd Maste 		 * make sure that it's safe for us to use -1 as an "unset"
279*c43e99fdSEd Maste 		 * value. */
280*c43e99fdSEd Maste 		event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
281*c43e99fdSEd Maste 	}
282*c43e99fdSEd Maste 	if (! precise && ! fallback) {
283*c43e99fdSEd Maste 		if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
284*c43e99fdSEd Maste 			base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
285*c43e99fdSEd Maste 			return 0;
286*c43e99fdSEd Maste 		}
287*c43e99fdSEd Maste 	}
288*c43e99fdSEd Maste #endif
289*c43e99fdSEd Maste 	if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
290*c43e99fdSEd Maste 		base->monotonic_clock = CLOCK_MONOTONIC;
291*c43e99fdSEd Maste 		return 0;
292*c43e99fdSEd Maste 	}
293*c43e99fdSEd Maste 
294*c43e99fdSEd Maste 	if (CLOCK_MONOTONIC < 0) {
295*c43e99fdSEd Maste 		event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
296*c43e99fdSEd Maste 	}
297*c43e99fdSEd Maste 
298*c43e99fdSEd Maste 	base->monotonic_clock = -1;
299*c43e99fdSEd Maste 	return 0;
300*c43e99fdSEd Maste }
301*c43e99fdSEd Maste 
302*c43e99fdSEd Maste int
303*c43e99fdSEd Maste evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
304*c43e99fdSEd Maste     struct timeval *tp)
305*c43e99fdSEd Maste {
306*c43e99fdSEd Maste 	struct timespec ts;
307*c43e99fdSEd Maste 
308*c43e99fdSEd Maste 	if (base->monotonic_clock < 0) {
309*c43e99fdSEd Maste 		if (evutil_gettimeofday(tp, NULL) < 0)
310*c43e99fdSEd Maste 			return -1;
311*c43e99fdSEd Maste 		adjust_monotonic_time(base, tp);
312*c43e99fdSEd Maste 		return 0;
313*c43e99fdSEd Maste 	}
314*c43e99fdSEd Maste 
315*c43e99fdSEd Maste 	if (clock_gettime(base->monotonic_clock, &ts) == -1)
316*c43e99fdSEd Maste 		return -1;
317*c43e99fdSEd Maste 	tp->tv_sec = ts.tv_sec;
318*c43e99fdSEd Maste 	tp->tv_usec = ts.tv_nsec / 1000;
319*c43e99fdSEd Maste 
320*c43e99fdSEd Maste 	return 0;
321*c43e99fdSEd Maste }
322*c43e99fdSEd Maste #endif
323*c43e99fdSEd Maste 
324*c43e99fdSEd Maste #if defined(HAVE_MACH_MONOTONIC)
325*c43e99fdSEd Maste /* ======
326*c43e99fdSEd Maste    Apple is a little late to the POSIX party.  And why not?  Instead of
327*c43e99fdSEd Maste    clock_gettime(), they provide mach_absolute_time().  Its units are not
328*c43e99fdSEd Maste    fixed; we need to use mach_timebase_info() to get the right functions to
329*c43e99fdSEd Maste    convert its units into nanoseconds.
330*c43e99fdSEd Maste 
331*c43e99fdSEd Maste    To all appearances, mach_absolute_time() seems to be honest-to-goodness
332*c43e99fdSEd Maste    monotonic.  Whether it stops during sleep or not is unspecified in
333*c43e99fdSEd Maste    principle, and dependent on CPU architecture in practice.
334*c43e99fdSEd Maste  */
335*c43e99fdSEd Maste 
336*c43e99fdSEd Maste int
337*c43e99fdSEd Maste evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
338*c43e99fdSEd Maste     int flags)
339*c43e99fdSEd Maste {
340*c43e99fdSEd Maste 	const int fallback = flags & EV_MONOT_FALLBACK;
341*c43e99fdSEd Maste 	struct mach_timebase_info mi;
342*c43e99fdSEd Maste 	memset(base, 0, sizeof(*base));
343*c43e99fdSEd Maste 	/* OSX has mach_absolute_time() */
344*c43e99fdSEd Maste 	if (!fallback &&
345*c43e99fdSEd Maste 	    mach_timebase_info(&mi) == 0 &&
346*c43e99fdSEd Maste 	    mach_absolute_time() != 0) {
347*c43e99fdSEd Maste 		/* mach_timebase_info tells us how to convert
348*c43e99fdSEd Maste 		 * mach_absolute_time() into nanoseconds, but we
349*c43e99fdSEd Maste 		 * want to use microseconds instead. */
350*c43e99fdSEd Maste 		mi.denom *= 1000;
351*c43e99fdSEd Maste 		memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
352*c43e99fdSEd Maste 	} else {
353*c43e99fdSEd Maste 		base->mach_timebase_units.numer = 0;
354*c43e99fdSEd Maste 	}
355*c43e99fdSEd Maste 	return 0;
356*c43e99fdSEd Maste }
357*c43e99fdSEd Maste 
358*c43e99fdSEd Maste int
359*c43e99fdSEd Maste evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
360*c43e99fdSEd Maste     struct timeval *tp)
361*c43e99fdSEd Maste {
362*c43e99fdSEd Maste 	ev_uint64_t abstime, usec;
363*c43e99fdSEd Maste 	if (base->mach_timebase_units.numer == 0) {
364*c43e99fdSEd Maste 		if (evutil_gettimeofday(tp, NULL) < 0)
365*c43e99fdSEd Maste 			return -1;
366*c43e99fdSEd Maste 		adjust_monotonic_time(base, tp);
367*c43e99fdSEd Maste 		return 0;
368*c43e99fdSEd Maste 	}
369*c43e99fdSEd Maste 
370*c43e99fdSEd Maste 	abstime = mach_absolute_time();
371*c43e99fdSEd Maste 	usec = (abstime * base->mach_timebase_units.numer)
372*c43e99fdSEd Maste 	    / (base->mach_timebase_units.denom);
373*c43e99fdSEd Maste 	tp->tv_sec = usec / 1000000;
374*c43e99fdSEd Maste 	tp->tv_usec = usec % 1000000;
375*c43e99fdSEd Maste 
376*c43e99fdSEd Maste 	return 0;
377*c43e99fdSEd Maste }
378*c43e99fdSEd Maste #endif
379*c43e99fdSEd Maste 
380*c43e99fdSEd Maste #if defined(HAVE_WIN32_MONOTONIC)
381*c43e99fdSEd Maste /* =====
382*c43e99fdSEd Maste    Turn we now to Windows.  Want monontonic time on Windows?
383*c43e99fdSEd Maste 
384*c43e99fdSEd Maste    Windows has QueryPerformanceCounter(), which gives time most high-
385*c43e99fdSEd Maste    resolution time.  It's a pity it's not so monotonic in practice; it's
386*c43e99fdSEd Maste    also got some fun bugs, especially: with older Windowses, under
387*c43e99fdSEd Maste    virtualizations, with funny hardware, on multiprocessor systems, and so
388*c43e99fdSEd Maste    on.  PEP418 [1] has a nice roundup of the issues here.
389*c43e99fdSEd Maste 
390*c43e99fdSEd Maste    There's GetTickCount64() on Vista and later, which gives a number of 1-msec
391*c43e99fdSEd Maste    ticks since startup.  The accuracy here might be as bad as 10-20 msec, I
392*c43e99fdSEd Maste    hear.  There's an undocumented function (NtSetTimerResolution) that
393*c43e99fdSEd Maste    allegedly increases the accuracy. Good luck!
394*c43e99fdSEd Maste 
395*c43e99fdSEd Maste    There's also GetTickCount(), which is only 32 bits, but seems to be
396*c43e99fdSEd Maste    supported on pre-Vista versions of Windows.  Apparently, you can coax
397*c43e99fdSEd Maste    another 14 bits out of it, giving you 2231 years before rollover.
398*c43e99fdSEd Maste 
399*c43e99fdSEd Maste    The less said about timeGetTime() the better.
400*c43e99fdSEd Maste 
401*c43e99fdSEd Maste    "We don't care.  We don't have to.  We're the Phone Company."
402*c43e99fdSEd Maste             -- Lily Tomlin, SNL
403*c43e99fdSEd Maste 
404*c43e99fdSEd Maste    Our strategy, if precise timers are turned off, is to just use the best
405*c43e99fdSEd Maste    GetTickCount equivalent available.  If we've been asked for precise timing,
406*c43e99fdSEd Maste    then we mostly[2] assume that GetTickCount is monotonic, and correct
407*c43e99fdSEd Maste    GetPerformanceCounter to approximate it.
408*c43e99fdSEd Maste 
409*c43e99fdSEd Maste    [1] http://www.python.org/dev/peps/pep-0418
410*c43e99fdSEd Maste    [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
411*c43e99fdSEd Maste        anyway, just in case it isn't.
412*c43e99fdSEd Maste 
413*c43e99fdSEd Maste  */
414*c43e99fdSEd Maste /*
415*c43e99fdSEd Maste     Parts of our logic in the win32 timer code here are closely based on
416*c43e99fdSEd Maste     BitTorrent's libUTP library.  That code is subject to the following
417*c43e99fdSEd Maste     license:
418*c43e99fdSEd Maste 
419*c43e99fdSEd Maste       Copyright (c) 2010 BitTorrent, Inc.
420*c43e99fdSEd Maste 
421*c43e99fdSEd Maste       Permission is hereby granted, free of charge, to any person obtaining a
422*c43e99fdSEd Maste       copy of this software and associated documentation files (the
423*c43e99fdSEd Maste       "Software"), to deal in the Software without restriction, including
424*c43e99fdSEd Maste       without limitation the rights to use, copy, modify, merge, publish,
425*c43e99fdSEd Maste       distribute, sublicense, and/or sell copies of the Software, and to
426*c43e99fdSEd Maste       permit persons to whom the Software is furnished to do so, subject to
427*c43e99fdSEd Maste       the following conditions:
428*c43e99fdSEd Maste 
429*c43e99fdSEd Maste       The above copyright notice and this permission notice shall be included
430*c43e99fdSEd Maste       in all copies or substantial portions of the Software.
431*c43e99fdSEd Maste 
432*c43e99fdSEd Maste       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
433*c43e99fdSEd Maste       OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
434*c43e99fdSEd Maste       MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
435*c43e99fdSEd Maste       NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
436*c43e99fdSEd Maste       LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
437*c43e99fdSEd Maste       OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
438*c43e99fdSEd Maste       WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
439*c43e99fdSEd Maste */
440*c43e99fdSEd Maste 
441*c43e99fdSEd Maste static ev_uint64_t
442*c43e99fdSEd Maste evutil_GetTickCount_(struct evutil_monotonic_timer *base)
443*c43e99fdSEd Maste {
444*c43e99fdSEd Maste 	if (base->GetTickCount64_fn) {
445*c43e99fdSEd Maste 		/* Let's just use GetTickCount64 if we can. */
446*c43e99fdSEd Maste 		return base->GetTickCount64_fn();
447*c43e99fdSEd Maste 	} else if (base->GetTickCount_fn) {
448*c43e99fdSEd Maste 		/* Greg Hazel assures me that this works, that BitTorrent has
449*c43e99fdSEd Maste 		 * done it for years, and this it won't turn around and
450*c43e99fdSEd Maste 		 * bite us.  He says they found it on some game programmers'
451*c43e99fdSEd Maste 		 * forum some time around 2007.
452*c43e99fdSEd Maste 		 */
453*c43e99fdSEd Maste 		ev_uint64_t v = base->GetTickCount_fn();
454*c43e99fdSEd Maste 		return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
455*c43e99fdSEd Maste 	} else {
456*c43e99fdSEd Maste 		/* Here's the fallback implementation. We have to use
457*c43e99fdSEd Maste 		 * GetTickCount() with its given signature, so we only get
458*c43e99fdSEd Maste 		 * 32 bits worth of milliseconds, which will roll ove every
459*c43e99fdSEd Maste 		 * 49 days or so.  */
460*c43e99fdSEd Maste 		DWORD ticks = GetTickCount();
461*c43e99fdSEd Maste 		if (ticks < base->last_tick_count) {
462*c43e99fdSEd Maste 			base->adjust_tick_count += ((ev_uint64_t)1) << 32;
463*c43e99fdSEd Maste 		}
464*c43e99fdSEd Maste 		base->last_tick_count = ticks;
465*c43e99fdSEd Maste 		return ticks + base->adjust_tick_count;
466*c43e99fdSEd Maste 	}
467*c43e99fdSEd Maste }
468*c43e99fdSEd Maste 
469*c43e99fdSEd Maste int
470*c43e99fdSEd Maste evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
471*c43e99fdSEd Maste     int flags)
472*c43e99fdSEd Maste {
473*c43e99fdSEd Maste 	const int precise = flags & EV_MONOT_PRECISE;
474*c43e99fdSEd Maste 	const int fallback = flags & EV_MONOT_FALLBACK;
475*c43e99fdSEd Maste 	HANDLE h;
476*c43e99fdSEd Maste 	memset(base, 0, sizeof(*base));
477*c43e99fdSEd Maste 
478*c43e99fdSEd Maste 	h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
479*c43e99fdSEd Maste 	if (h != NULL && !fallback) {
480*c43e99fdSEd Maste 		base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
481*c43e99fdSEd Maste 		base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
482*c43e99fdSEd Maste 	}
483*c43e99fdSEd Maste 
484*c43e99fdSEd Maste 	base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
485*c43e99fdSEd Maste 	if (precise && !fallback) {
486*c43e99fdSEd Maste 		LARGE_INTEGER freq;
487*c43e99fdSEd Maste 		if (QueryPerformanceFrequency(&freq)) {
488*c43e99fdSEd Maste 			LARGE_INTEGER counter;
489*c43e99fdSEd Maste 			QueryPerformanceCounter(&counter);
490*c43e99fdSEd Maste 			base->first_counter = counter.QuadPart;
491*c43e99fdSEd Maste 			base->usec_per_count = 1.0e6 / freq.QuadPart;
492*c43e99fdSEd Maste 			base->use_performance_counter = 1;
493*c43e99fdSEd Maste 		}
494*c43e99fdSEd Maste 	}
495*c43e99fdSEd Maste 
496*c43e99fdSEd Maste 	return 0;
497*c43e99fdSEd Maste }
498*c43e99fdSEd Maste 
499*c43e99fdSEd Maste static inline ev_int64_t
500*c43e99fdSEd Maste abs64(ev_int64_t i)
501*c43e99fdSEd Maste {
502*c43e99fdSEd Maste 	return i < 0 ? -i : i;
503*c43e99fdSEd Maste }
504*c43e99fdSEd Maste 
505*c43e99fdSEd Maste 
506*c43e99fdSEd Maste int
507*c43e99fdSEd Maste evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
508*c43e99fdSEd Maste     struct timeval *tp)
509*c43e99fdSEd Maste {
510*c43e99fdSEd Maste 	ev_uint64_t ticks = evutil_GetTickCount_(base);
511*c43e99fdSEd Maste 	if (base->use_performance_counter) {
512*c43e99fdSEd Maste 		/* Here's a trick we took from BitTorrent's libutp, at Greg
513*c43e99fdSEd Maste 		 * Hazel's recommendation.  We use QueryPerformanceCounter for
514*c43e99fdSEd Maste 		 * our high-resolution timer, but use GetTickCount*() to keep
515*c43e99fdSEd Maste 		 * it sane, and adjust_monotonic_time() to keep it monotonic.
516*c43e99fdSEd Maste 		 */
517*c43e99fdSEd Maste 		LARGE_INTEGER counter;
518*c43e99fdSEd Maste 		ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
519*c43e99fdSEd Maste 		QueryPerformanceCounter(&counter);
520*c43e99fdSEd Maste 		counter_elapsed = (ev_int64_t)
521*c43e99fdSEd Maste 		    (counter.QuadPart - base->first_counter);
522*c43e99fdSEd Maste 		ticks_elapsed = ticks - base->first_tick;
523*c43e99fdSEd Maste 		/* TODO: This may upset VC6. If you need this to work with
524*c43e99fdSEd Maste 		 * VC6, please supply an appropriate patch. */
525*c43e99fdSEd Maste 		counter_usec_elapsed = (ev_int64_t)
526*c43e99fdSEd Maste 		    (counter_elapsed * base->usec_per_count);
527*c43e99fdSEd Maste 
528*c43e99fdSEd Maste 		if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
529*c43e99fdSEd Maste 			/* It appears that the QueryPerformanceCounter()
530*c43e99fdSEd Maste 			 * result is more than 1 second away from
531*c43e99fdSEd Maste 			 * GetTickCount() result. Let's adjust it to be as
532*c43e99fdSEd Maste 			 * accurate as we can; adjust_monotnonic_time() below
533*c43e99fdSEd Maste 			 * will keep it monotonic. */
534*c43e99fdSEd Maste 			counter_usec_elapsed = ticks_elapsed * 1000;
535*c43e99fdSEd Maste 			base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
536*c43e99fdSEd Maste 		}
537*c43e99fdSEd Maste 		tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
538*c43e99fdSEd Maste 		tp->tv_usec = counter_usec_elapsed % 1000000;
539*c43e99fdSEd Maste 
540*c43e99fdSEd Maste 	} else {
541*c43e99fdSEd Maste 		/* We're just using GetTickCount(). */
542*c43e99fdSEd Maste 		tp->tv_sec = (time_t) (ticks / 1000);
543*c43e99fdSEd Maste 		tp->tv_usec = (ticks % 1000) * 1000;
544*c43e99fdSEd Maste 	}
545*c43e99fdSEd Maste 	adjust_monotonic_time(base, tp);
546*c43e99fdSEd Maste 
547*c43e99fdSEd Maste 	return 0;
548*c43e99fdSEd Maste }
549*c43e99fdSEd Maste #endif
550*c43e99fdSEd Maste 
551*c43e99fdSEd Maste #if defined(HAVE_FALLBACK_MONOTONIC)
552*c43e99fdSEd Maste /* =====
553*c43e99fdSEd Maste    And if none of the other options work, let's just use gettimeofday(), and
554*c43e99fdSEd Maste    ratchet it forward so that it acts like a monotonic timer, whether it
555*c43e99fdSEd Maste    wants to or not.
556*c43e99fdSEd Maste  */
557*c43e99fdSEd Maste 
558*c43e99fdSEd Maste int
559*c43e99fdSEd Maste evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
560*c43e99fdSEd Maste     int precise)
561*c43e99fdSEd Maste {
562*c43e99fdSEd Maste 	memset(base, 0, sizeof(*base));
563*c43e99fdSEd Maste 	return 0;
564*c43e99fdSEd Maste }
565*c43e99fdSEd Maste 
566*c43e99fdSEd Maste int
567*c43e99fdSEd Maste evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
568*c43e99fdSEd Maste     struct timeval *tp)
569*c43e99fdSEd Maste {
570*c43e99fdSEd Maste 	if (evutil_gettimeofday(tp, NULL) < 0)
571*c43e99fdSEd Maste 		return -1;
572*c43e99fdSEd Maste 	adjust_monotonic_time(base, tp);
573*c43e99fdSEd Maste 	return 0;
574*c43e99fdSEd Maste 
575*c43e99fdSEd Maste }
576*c43e99fdSEd Maste #endif
577