xref: /freebsd/contrib/libevent/evutil_time.c (revision a90b9d0159070121c221b966469c3e36d912bf82)
1 /*
2  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29 
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 
37 #include <sys/types.h>
38 #ifdef EVENT__HAVE_STDLIB_H
39 #include <stdlib.h>
40 #endif
41 #include <errno.h>
42 #include <limits.h>
43 #ifndef EVENT__HAVE_GETTIMEOFDAY
44 #include <sys/timeb.h>
45 #endif
46 #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT__HAVE_USLEEP) && \
47 	!defined(_WIN32)
48 #include <sys/select.h>
49 #endif
50 #include <time.h>
51 #include <sys/stat.h>
52 #include <string.h>
53 
54 /** evutil_usleep_() */
55 #if defined(_WIN32)
56 #elif defined(EVENT__HAVE_NANOSLEEP)
57 #elif defined(EVENT__HAVE_USLEEP)
58 #include <unistd.h>
59 #endif
60 
61 #include "event2/util.h"
62 #include "util-internal.h"
63 #include "log-internal.h"
64 #include "mm-internal.h"
65 
66 #ifndef EVENT__HAVE_GETTIMEOFDAY
67 /* No gettimeofday; this must be windows. */
68 
69 typedef void (WINAPI *GetSystemTimePreciseAsFileTime_fn_t) (LPFILETIME);
70 
71 int
72 evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
73 {
74 #ifdef _MSC_VER
75 #define U64_LITERAL(n) n##ui64
76 #else
77 #define U64_LITERAL(n) n##llu
78 #endif
79 
80 	/* Conversion logic taken from Tor, which in turn took it
81 	 * from Perl.  GetSystemTimeAsFileTime returns its value as
82 	 * an unaligned (!) 64-bit value containing the number of
83 	 * 100-nanosecond intervals since 1 January 1601 UTC. */
84 #define EPOCH_BIAS U64_LITERAL(116444736000000000)
85 #define UNITS_PER_SEC U64_LITERAL(10000000)
86 #define USEC_PER_SEC U64_LITERAL(1000000)
87 #define UNITS_PER_USEC U64_LITERAL(10)
88 	union {
89 		FILETIME ft_ft;
90 		ev_uint64_t ft_64;
91 	} ft;
92 
93 	if (tv == NULL)
94 		return -1;
95 
96 	static GetSystemTimePreciseAsFileTime_fn_t GetSystemTimePreciseAsFileTime_fn = NULL;
97 	static int check_precise = 1;
98 
99 	if (EVUTIL_UNLIKELY(check_precise)) {
100 		HMODULE h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
101 		if (h != NULL)
102 			GetSystemTimePreciseAsFileTime_fn =
103 				(GetSystemTimePreciseAsFileTime_fn_t)
104 					GetProcAddress(h, "GetSystemTimePreciseAsFileTime");
105 		check_precise = 0;
106 	}
107 
108 	if (GetSystemTimePreciseAsFileTime_fn != NULL)
109 		GetSystemTimePreciseAsFileTime_fn(&ft.ft_ft);
110 	else
111 		GetSystemTimeAsFileTime(&ft.ft_ft);
112 
113 	if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
114 		/* Time before the unix epoch. */
115 		return -1;
116 	}
117 	ft.ft_64 -= EPOCH_BIAS;
118 	tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
119 	tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
120 	return 0;
121 }
122 #endif
123 
124 #define MAX_SECONDS_IN_MSEC_LONG \
125 	(((LONG_MAX) - 999) / 1000)
126 
127 long
128 evutil_tv_to_msec_(const struct timeval *tv)
129 {
130 	if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
131 		return -1;
132 
133 	return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
134 }
135 
136 /*
137   Replacement for usleep on platforms that don't have one.  Not guaranteed to
138   be any more finegrained than 1 msec.
139  */
140 void
141 evutil_usleep_(const struct timeval *tv)
142 {
143 	if (!tv)
144 		return;
145 #if defined(_WIN32)
146 	{
147 		__int64 usec;
148 		LARGE_INTEGER li;
149 		HANDLE timer;
150 
151 		usec = tv->tv_sec * 1000000LL + tv->tv_usec;
152 		if (!usec)
153 			return;
154 
155 		li.QuadPart = -10LL * usec;
156 		timer = CreateWaitableTimer(NULL, TRUE, NULL);
157 		if (!timer)
158 			return;
159 
160 		SetWaitableTimer(timer, &li, 0, NULL, NULL, 0);
161 		WaitForSingleObject(timer, INFINITE);
162 		CloseHandle(timer);
163 	}
164 #elif defined(EVENT__HAVE_NANOSLEEP)
165 	{
166 		struct timespec ts;
167 		ts.tv_sec = tv->tv_sec;
168 		ts.tv_nsec = tv->tv_usec*1000;
169 		nanosleep(&ts, NULL);
170 	}
171 #elif defined(EVENT__HAVE_USLEEP)
172 	/* Some systems don't like to usleep more than 999999 usec */
173 	sleep(tv->tv_sec);
174 	usleep(tv->tv_usec);
175 #else
176 	{
177 		struct timeval tv2 = *tv;
178 		select(0, NULL, NULL, NULL, &tv2);
179 	}
180 #endif
181 }
182 
183 int
184 evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm)
185 {
186 	static const char *DAYS[] =
187 		{ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
188 	static const char *MONTHS[] =
189 		{ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
190 
191 	time_t t = time(NULL);
192 
193 #if defined(EVENT__HAVE__GMTIME64_S) || !defined(_WIN32)
194 	struct tm sys;
195 #endif
196 
197 	/* If `tm` is null, set system's current time. */
198 	if (tm == NULL) {
199 #if !defined(_WIN32)
200 		gmtime_r(&t, &sys);
201 		tm = &sys;
202 		/** detect _gmtime64()/_gmtime64_s() */
203 #elif defined(EVENT__HAVE__GMTIME64_S)
204 		errno_t err;
205 		err = _gmtime64_s(&sys, &t);
206 		if (err) {
207 			event_errx(1, "Invalid argument to _gmtime64_s");
208 		} else {
209 			tm = &sys;
210 		}
211 #elif defined(EVENT__HAVE__GMTIME64)
212 		tm = _gmtime64(&t);
213 #else
214 		tm = gmtime(&t);
215 #endif
216 	}
217 
218 	return evutil_snprintf(
219 		date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT",
220 		DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon],
221 		1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec);
222 }
223 
224 /*
225    This function assumes it's called repeatedly with a
226    not-actually-so-monotonic time source whose outputs are in 'tv'. It
227    implements a trivial ratcheting mechanism so that the values never go
228    backwards.
229  */
230 static void
231 adjust_monotonic_time(struct evutil_monotonic_timer *base,
232     struct timeval *tv)
233 {
234 	evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
235 
236 	if (evutil_timercmp(tv, &base->last_time, <)) {
237 		/* Guess it wasn't monotonic after all. */
238 		struct timeval adjust;
239 		evutil_timersub(&base->last_time, tv, &adjust);
240 		evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
241 		    &base->adjust_monotonic_clock);
242 		*tv = base->last_time;
243 	}
244 	base->last_time = *tv;
245 }
246 
247 /*
248    Allocate a new struct evutil_monotonic_timer
249  */
250 struct evutil_monotonic_timer *
251 evutil_monotonic_timer_new(void)
252 {
253   struct evutil_monotonic_timer *p = NULL;
254 
255   p = mm_malloc(sizeof(*p));
256   if (!p) goto done;
257 
258   memset(p, 0, sizeof(*p));
259 
260  done:
261   return p;
262 }
263 
264 /*
265    Free a struct evutil_monotonic_timer
266  */
267 void
268 evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
269 {
270   if (timer) {
271     mm_free(timer);
272   }
273 }
274 
275 /*
276    Set up a struct evutil_monotonic_timer for initial use
277  */
278 int
279 evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
280                                 int flags)
281 {
282   return evutil_configure_monotonic_time_(timer, flags);
283 }
284 
285 /*
286    Query the current monotonic time
287  */
288 int
289 evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
290                          struct timeval *tp)
291 {
292   return evutil_gettime_monotonic_(timer, tp);
293 }
294 
295 
296 #if defined(HAVE_POSIX_MONOTONIC)
297 /* =====
298    The POSIX clock_gettime() interface provides a few ways to get at a
299    monotonic clock.  CLOCK_MONOTONIC is most widely supported.  Linux also
300    provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
301 
302    On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
303    Platforms don't agree about whether it should jump on a sleep/resume.
304  */
305 
306 int
307 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
308     int flags)
309 {
310 	/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.  You need to
311 	 * check for it at runtime, because some older kernel versions won't
312 	 * have it working. */
313 #ifdef CLOCK_MONOTONIC_COARSE
314 	const int precise = flags & EV_MONOT_PRECISE;
315 #endif
316 	const int fallback = flags & EV_MONOT_FALLBACK;
317 	struct timespec	ts;
318 
319 #ifdef CLOCK_MONOTONIC_COARSE
320 	if (CLOCK_MONOTONIC_COARSE < 0) {
321 		/* Technically speaking, nothing keeps CLOCK_* from being
322 		 * negative (as far as I know). This check and the one below
323 		 * make sure that it's safe for us to use -1 as an "unset"
324 		 * value. */
325 		event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
326 	}
327 	if (! precise && ! fallback) {
328 		if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
329 			base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
330 			return 0;
331 		}
332 	}
333 #endif
334 	if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
335 		base->monotonic_clock = CLOCK_MONOTONIC;
336 		return 0;
337 	}
338 
339 	if (CLOCK_MONOTONIC < 0) {
340 		event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
341 	}
342 
343 	base->monotonic_clock = -1;
344 	return 0;
345 }
346 
347 int
348 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
349     struct timeval *tp)
350 {
351 	struct timespec ts;
352 
353 	if (base->monotonic_clock < 0) {
354 		if (evutil_gettimeofday(tp, NULL) < 0)
355 			return -1;
356 		adjust_monotonic_time(base, tp);
357 		return 0;
358 	}
359 
360 	if (clock_gettime(base->monotonic_clock, &ts) == -1)
361 		return -1;
362 	tp->tv_sec = ts.tv_sec;
363 	tp->tv_usec = ts.tv_nsec / 1000;
364 
365 	return 0;
366 }
367 #endif
368 
369 #if defined(HAVE_MACH_MONOTONIC)
370 /* ======
371    Apple is a little late to the POSIX party.  And why not?  Instead of
372    clock_gettime(), they provide mach_absolute_time().  Its units are not
373    fixed; we need to use mach_timebase_info() to get the right functions to
374    convert its units into nanoseconds.
375 
376    To all appearances, mach_absolute_time() seems to be honest-to-goodness
377    monotonic.  Whether it stops during sleep or not is unspecified in
378    principle, and dependent on CPU architecture in practice.
379  */
380 
381 int
382 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
383     int flags)
384 {
385 	const int fallback = flags & EV_MONOT_FALLBACK;
386 	struct mach_timebase_info mi;
387 	memset(base, 0, sizeof(*base));
388 	/* OSX has mach_absolute_time() */
389 	if (!fallback &&
390 	    mach_timebase_info(&mi) == 0 &&
391 	    mach_absolute_time() != 0) {
392 		/* mach_timebase_info tells us how to convert
393 		 * mach_absolute_time() into nanoseconds, but we
394 		 * want to use microseconds instead. */
395 		mi.denom *= 1000;
396 		memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
397 	} else {
398 		base->mach_timebase_units.numer = 0;
399 	}
400 	return 0;
401 }
402 
403 int
404 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
405     struct timeval *tp)
406 {
407 	ev_uint64_t abstime, usec;
408 	if (base->mach_timebase_units.numer == 0) {
409 		if (evutil_gettimeofday(tp, NULL) < 0)
410 			return -1;
411 		adjust_monotonic_time(base, tp);
412 		return 0;
413 	}
414 
415 	abstime = mach_absolute_time();
416 	usec = (abstime * base->mach_timebase_units.numer)
417 	    / (base->mach_timebase_units.denom);
418 	tp->tv_sec = usec / 1000000;
419 	tp->tv_usec = usec % 1000000;
420 
421 	return 0;
422 }
423 #endif
424 
425 #if defined(HAVE_WIN32_MONOTONIC)
426 /* =====
427    Turn we now to Windows.  Want monontonic time on Windows?
428 
429    Windows has QueryPerformanceCounter(), which gives time most high-
430    resolution time.  It's a pity it's not so monotonic in practice; it's
431    also got some fun bugs, especially: with older Windowses, under
432    virtualizations, with funny hardware, on multiprocessor systems, and so
433    on.  PEP418 [1] has a nice roundup of the issues here.
434 
435    There's GetTickCount64() on Vista and later, which gives a number of 1-msec
436    ticks since startup.  The accuracy here might be as bad as 10-20 msec, I
437    hear.  There's an undocumented function (NtSetTimerResolution) that
438    allegedly increases the accuracy. Good luck!
439 
440    There's also GetTickCount(), which is only 32 bits, but seems to be
441    supported on pre-Vista versions of Windows.  Apparently, you can coax
442    another 14 bits out of it, giving you 2231 years before rollover.
443 
444    The less said about timeGetTime() the better.
445 
446    "We don't care.  We don't have to.  We're the Phone Company."
447             -- Lily Tomlin, SNL
448 
449    Our strategy, if precise timers are turned off, is to just use the best
450    GetTickCount equivalent available.  If we've been asked for precise timing,
451    then we mostly[2] assume that GetTickCount is monotonic, and correct
452    GetPerformanceCounter to approximate it.
453 
454    [1] http://www.python.org/dev/peps/pep-0418
455    [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
456        anyway, just in case it isn't.
457 
458  */
459 /*
460     Parts of our logic in the win32 timer code here are closely based on
461     BitTorrent's libUTP library.  That code is subject to the following
462     license:
463 
464       Copyright (c) 2010 BitTorrent, Inc.
465 
466       Permission is hereby granted, free of charge, to any person obtaining a
467       copy of this software and associated documentation files (the
468       "Software"), to deal in the Software without restriction, including
469       without limitation the rights to use, copy, modify, merge, publish,
470       distribute, sublicense, and/or sell copies of the Software, and to
471       permit persons to whom the Software is furnished to do so, subject to
472       the following conditions:
473 
474       The above copyright notice and this permission notice shall be included
475       in all copies or substantial portions of the Software.
476 
477       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
478       OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
479       MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
480       NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
481       LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
482       OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
483       WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
484 */
485 
486 static ev_uint64_t
487 evutil_GetTickCount_(struct evutil_monotonic_timer *base)
488 {
489 	if (base->GetTickCount64_fn) {
490 		/* Let's just use GetTickCount64 if we can. */
491 		return base->GetTickCount64_fn();
492 	} else if (base->GetTickCount_fn) {
493 		/* Greg Hazel assures me that this works, that BitTorrent has
494 		 * done it for years, and this it won't turn around and
495 		 * bite us.  He says they found it on some game programmers'
496 		 * forum some time around 2007.
497 		 */
498 		ev_uint64_t v = base->GetTickCount_fn();
499 		return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
500 	} else {
501 		/* Here's the fallback implementation. We have to use
502 		 * GetTickCount() with its given signature, so we only get
503 		 * 32 bits worth of milliseconds, which will roll ove every
504 		 * 49 days or so.  */
505 		DWORD ticks = GetTickCount();
506 		if (ticks < base->last_tick_count) {
507 			base->adjust_tick_count += ((ev_uint64_t)1) << 32;
508 		}
509 		base->last_tick_count = ticks;
510 		return ticks + base->adjust_tick_count;
511 	}
512 }
513 
514 int
515 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
516     int flags)
517 {
518 	const int precise = flags & EV_MONOT_PRECISE;
519 	const int fallback = flags & EV_MONOT_FALLBACK;
520 	HANDLE h;
521 	memset(base, 0, sizeof(*base));
522 
523 	h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
524 	if (h != NULL && !fallback) {
525 		base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
526 		base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
527 	}
528 
529 	base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
530 	if (precise && !fallback) {
531 		LARGE_INTEGER freq;
532 		if (QueryPerformanceFrequency(&freq)) {
533 			LARGE_INTEGER counter;
534 			QueryPerformanceCounter(&counter);
535 			base->first_counter = counter.QuadPart;
536 			base->usec_per_count = 1.0e6 / freq.QuadPart;
537 			base->use_performance_counter = 1;
538 		}
539 	}
540 
541 	return 0;
542 }
543 
544 static inline ev_int64_t
545 abs64(ev_int64_t i)
546 {
547 	return i < 0 ? -i : i;
548 }
549 
550 
551 int
552 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
553     struct timeval *tp)
554 {
555 	ev_uint64_t ticks = evutil_GetTickCount_(base);
556 	if (base->use_performance_counter) {
557 		/* Here's a trick we took from BitTorrent's libutp, at Greg
558 		 * Hazel's recommendation.  We use QueryPerformanceCounter for
559 		 * our high-resolution timer, but use GetTickCount*() to keep
560 		 * it sane, and adjust_monotonic_time() to keep it monotonic.
561 		 */
562 		LARGE_INTEGER counter;
563 		ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
564 		QueryPerformanceCounter(&counter);
565 		counter_elapsed = (ev_int64_t)
566 		    (counter.QuadPart - base->first_counter);
567 		ticks_elapsed = ticks - base->first_tick;
568 		/* TODO: This may upset VC6. If you need this to work with
569 		 * VC6, please supply an appropriate patch. */
570 		counter_usec_elapsed = (ev_int64_t)
571 		    (counter_elapsed * base->usec_per_count);
572 
573 		if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
574 			/* It appears that the QueryPerformanceCounter()
575 			 * result is more than 1 second away from
576 			 * GetTickCount() result. Let's adjust it to be as
577 			 * accurate as we can; adjust_monotnonic_time() below
578 			 * will keep it monotonic. */
579 			counter_usec_elapsed = ticks_elapsed * 1000;
580 			base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
581 		}
582 		tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
583 		tp->tv_usec = counter_usec_elapsed % 1000000;
584 
585 	} else {
586 		/* We're just using GetTickCount(). */
587 		tp->tv_sec = (time_t) (ticks / 1000);
588 		tp->tv_usec = (ticks % 1000) * 1000;
589 	}
590 	adjust_monotonic_time(base, tp);
591 
592 	return 0;
593 }
594 #endif
595 
596 #if defined(HAVE_FALLBACK_MONOTONIC)
597 /* =====
598    And if none of the other options work, let's just use gettimeofday(), and
599    ratchet it forward so that it acts like a monotonic timer, whether it
600    wants to or not.
601  */
602 
603 int
604 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
605     int precise)
606 {
607 	memset(base, 0, sizeof(*base));
608 	return 0;
609 }
610 
611 int
612 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
613     struct timeval *tp)
614 {
615 	if (evutil_gettimeofday(tp, NULL) < 0)
616 		return -1;
617 	adjust_monotonic_time(base, tp);
618 	return 0;
619 
620 }
621 #endif
622