xref: /freebsd/contrib/libevent/test/regress_thread.c (revision 02e9120893770924227138ba49df1edb3896112a)
1 /*
2  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 #include "util-internal.h"
27 
28 /* The old tests here need assertions to work. */
29 #undef NDEBUG
30 
31 #include "event2/event-config.h"
32 
33 #include <sys/types.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #ifdef EVENT__HAVE_UNISTD_H
38 #include <unistd.h>
39 #endif
40 #ifdef EVENT__HAVE_SYS_WAIT_H
41 #include <sys/wait.h>
42 #endif
43 
44 #ifdef EVENT__HAVE_PTHREADS
45 #include <pthread.h>
46 #elif defined(_WIN32)
47 #include <process.h>
48 #endif
49 #include <assert.h>
50 #ifdef EVENT__HAVE_UNISTD_H
51 #include <unistd.h>
52 #endif
53 #include <time.h>
54 
55 #include "sys/queue.h"
56 
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/thread.h"
60 #include "event2/util.h"
61 #include "evthread-internal.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "regress.h"
65 #include "tinytest_macros.h"
66 #include "time-internal.h"
67 #include "regress_thread.h"
68 
69 struct cond_wait {
70 	void *lock;
71 	void *cond;
72 };
73 
74 static void
75 wake_all_timeout(evutil_socket_t fd, short what, void *arg)
76 {
77 	struct cond_wait *cw = arg;
78 	EVLOCK_LOCK(cw->lock, 0);
79 	EVTHREAD_COND_BROADCAST(cw->cond);
80 	EVLOCK_UNLOCK(cw->lock, 0);
81 
82 }
83 
84 static void
85 wake_one_timeout(evutil_socket_t fd, short what, void *arg)
86 {
87 	struct cond_wait *cw = arg;
88 	EVLOCK_LOCK(cw->lock, 0);
89 	EVTHREAD_COND_SIGNAL(cw->cond);
90 	EVLOCK_UNLOCK(cw->lock, 0);
91 }
92 
93 #define NUM_THREADS	100
94 #define NUM_ITERATIONS  100
95 void *count_lock;
96 static int count;
97 
98 static THREAD_FN
99 basic_thread(void *arg)
100 {
101 	struct cond_wait cw;
102 	struct event_base *base = arg;
103 	struct event ev;
104 	int i = 0;
105 
106 	EVTHREAD_ALLOC_LOCK(cw.lock, 0);
107 	EVTHREAD_ALLOC_COND(cw.cond);
108 	assert(cw.lock);
109 	assert(cw.cond);
110 
111 	evtimer_assign(&ev, base, wake_all_timeout, &cw);
112 	for (i = 0; i < NUM_ITERATIONS; i++) {
113 		struct timeval tv;
114 		evutil_timerclear(&tv);
115 		tv.tv_sec = 0;
116 		tv.tv_usec = 3000;
117 
118 		EVLOCK_LOCK(cw.lock, 0);
119 		/* we need to make sure that event does not happen before
120 		 * we get to wait on the conditional variable */
121 		assert(evtimer_add(&ev, &tv) == 0);
122 
123 		assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0);
124 		EVLOCK_UNLOCK(cw.lock, 0);
125 
126 		EVLOCK_LOCK(count_lock, 0);
127 		++count;
128 		EVLOCK_UNLOCK(count_lock, 0);
129 	}
130 
131 	/* exit the loop only if all threads fired all timeouts */
132 	EVLOCK_LOCK(count_lock, 0);
133 	if (count >= NUM_THREADS * NUM_ITERATIONS)
134 		event_base_loopexit(base, NULL);
135 	EVLOCK_UNLOCK(count_lock, 0);
136 
137 	EVTHREAD_FREE_LOCK(cw.lock, 0);
138 	EVTHREAD_FREE_COND(cw.cond);
139 
140 	THREAD_RETURN();
141 }
142 
143 static int notification_fd_used = 0;
144 #ifndef _WIN32
145 static int got_sigchld = 0;
146 static void
147 sigchld_cb(evutil_socket_t fd, short event, void *arg)
148 {
149 	struct timeval tv;
150 	struct event_base *base = arg;
151 
152 	got_sigchld++;
153 	tv.tv_usec = 100000;
154 	tv.tv_sec = 0;
155 	event_base_loopexit(base, &tv);
156 }
157 
158 
159 static void
160 notify_fd_cb(evutil_socket_t fd, short event, void *arg)
161 {
162 	++notification_fd_used;
163 }
164 #endif
165 
166 static void
167 thread_basic(void *arg)
168 {
169 	THREAD_T threads[NUM_THREADS];
170 	struct event ev;
171 	struct timeval tv;
172 	int i;
173 	struct basic_test_data *data = arg;
174 	struct event_base *base = data->base;
175 
176 	struct event *notification_event = NULL;
177 	struct event *sigchld_event = NULL;
178 
179 	EVTHREAD_ALLOC_LOCK(count_lock, 0);
180 	tt_assert(count_lock);
181 
182 	tt_assert(base);
183 	if (evthread_make_base_notifiable(base)<0) {
184 		tt_abort_msg("Couldn't make base notifiable!");
185 	}
186 
187 #ifndef _WIN32
188 	if (data->setup_data && !strcmp(data->setup_data, "forking")) {
189 		pid_t pid;
190 		int status;
191 		sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base);
192 		/* This piggybacks on the th_notify_fd weirdly, and looks
193 		 * inside libevent internals.  Not a good idea in non-testing
194 		 * code! */
195 		notification_event = event_new(base,
196 		    base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb,
197 		    NULL);
198 		event_add(sigchld_event, NULL);
199 		event_add(notification_event, NULL);
200 
201 		if ((pid = fork()) == 0) {
202 			event_del(notification_event);
203 			if (event_reinit(base) < 0) {
204 				TT_FAIL(("reinit"));
205 				exit(1);
206 			}
207 			event_assign(notification_event, base,
208 			    base->th_notify_fd[0], EV_READ|EV_PERSIST,
209 			    notify_fd_cb, NULL);
210 			event_add(notification_event, NULL);
211 	 		goto child;
212 		}
213 
214 		event_base_dispatch(base);
215 
216 		if (waitpid(pid, &status, 0) == -1)
217 			tt_abort_perror("waitpid");
218 		TT_BLATHER(("Waitpid okay\n"));
219 
220 		tt_assert(got_sigchld);
221 		tt_int_op(notification_fd_used, ==, 0);
222 
223 		goto end;
224 	}
225 
226 child:
227 #endif
228 	for (i = 0; i < NUM_THREADS; ++i)
229 		THREAD_START(threads[i], basic_thread, base);
230 
231 	evtimer_assign(&ev, base, NULL, NULL);
232 	evutil_timerclear(&tv);
233 	tv.tv_sec = 1000;
234 	event_add(&ev, &tv);
235 
236 	event_base_dispatch(base);
237 
238 	for (i = 0; i < NUM_THREADS; ++i)
239 		THREAD_JOIN(threads[i]);
240 
241 	event_del(&ev);
242 
243 	tt_int_op(count, ==, NUM_THREADS * NUM_ITERATIONS);
244 
245 	EVTHREAD_FREE_LOCK(count_lock, 0);
246 
247 	TT_BLATHER(("notifiations==%d", notification_fd_used));
248 
249 end:
250 
251 	if (notification_event)
252 		event_free(notification_event);
253 	if (sigchld_event)
254 		event_free(sigchld_event);
255 }
256 
257 #undef NUM_THREADS
258 #define NUM_THREADS 10
259 
260 struct alerted_record {
261 	struct cond_wait *cond;
262 	struct timeval delay;
263 	struct timeval alerted_at;
264 	int timed_out;
265 };
266 
267 static THREAD_FN
268 wait_for_condition(void *arg)
269 {
270 	struct alerted_record *rec = arg;
271 	int r;
272 
273 	EVLOCK_LOCK(rec->cond->lock, 0);
274 	if (rec->delay.tv_sec || rec->delay.tv_usec) {
275 		r = EVTHREAD_COND_WAIT_TIMED(rec->cond->cond, rec->cond->lock,
276 		    &rec->delay);
277 	} else {
278 		r = EVTHREAD_COND_WAIT(rec->cond->cond, rec->cond->lock);
279 	}
280 	EVLOCK_UNLOCK(rec->cond->lock, 0);
281 
282 	evutil_gettimeofday(&rec->alerted_at, NULL);
283 	if (r == 1)
284 		rec->timed_out = 1;
285 
286 	THREAD_RETURN();
287 }
288 
289 static void
290 thread_conditions_simple(void *arg)
291 {
292 	struct timeval tv_signal, tv_timeout, tv_broadcast;
293 	struct alerted_record alerted[NUM_THREADS];
294 	THREAD_T threads[NUM_THREADS];
295 	struct cond_wait cond;
296 	int i;
297 	struct timeval launched_at;
298 	struct event wake_one;
299 	struct event wake_all;
300 	struct basic_test_data *data = arg;
301 	struct event_base *base = data->base;
302 	int n_timed_out=0, n_signal=0, n_broadcast=0;
303 
304 	tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0;
305 	tv_signal.tv_usec = 30*1000;
306 	tv_timeout.tv_usec = 150*1000;
307 	tv_broadcast.tv_usec = 500*1000;
308 
309 	EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
310 	EVTHREAD_ALLOC_COND(cond.cond);
311 	tt_assert(cond.lock);
312 	tt_assert(cond.cond);
313 	for (i = 0; i < NUM_THREADS; ++i) {
314 		memset(&alerted[i], 0, sizeof(struct alerted_record));
315 		alerted[i].cond = &cond;
316 	}
317 
318 	/* Threads 5 and 6 will be allowed to time out */
319 	memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout));
320 	memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout));
321 
322 	evtimer_assign(&wake_one, base, wake_one_timeout, &cond);
323 	evtimer_assign(&wake_all, base, wake_all_timeout, &cond);
324 
325 	evutil_gettimeofday(&launched_at, NULL);
326 
327 	/* Launch the threads... */
328 	for (i = 0; i < NUM_THREADS; ++i) {
329 		THREAD_START(threads[i], wait_for_condition, &alerted[i]);
330 	}
331 
332 	/* Start the timers... */
333 	tt_int_op(event_add(&wake_one, &tv_signal), ==, 0);
334 	tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0);
335 
336 	/* And run for a bit... */
337 	event_base_dispatch(base);
338 
339 	/* And wait till the threads are done. */
340 	for (i = 0; i < NUM_THREADS; ++i)
341 		THREAD_JOIN(threads[i]);
342 
343 	/* Now, let's see what happened. At least one of 5 or 6 should
344 	 * have timed out. */
345 	n_timed_out = alerted[5].timed_out + alerted[6].timed_out;
346 	tt_int_op(n_timed_out, >=, 1);
347 	tt_int_op(n_timed_out, <=, 2);
348 
349 	for (i = 0; i < NUM_THREADS; ++i) {
350 		const struct timeval *target_delay;
351 		struct timeval target_time, actual_delay;
352 		if (alerted[i].timed_out) {
353 			TT_BLATHER(("%d looks like a timeout\n", i));
354 			target_delay = &tv_timeout;
355 			tt_assert(i == 5 || i == 6);
356 		} else if (evutil_timerisset(&alerted[i].alerted_at)) {
357 			long diff1,diff2;
358 			evutil_timersub(&alerted[i].alerted_at,
359 			    &launched_at, &actual_delay);
360 			diff1 = timeval_msec_diff(&actual_delay,
361 			    &tv_signal);
362 			diff2 = timeval_msec_diff(&actual_delay,
363 			    &tv_broadcast);
364 			if (labs(diff1) < labs(diff2)) {
365 				TT_BLATHER(("%d looks like a signal\n", i));
366 				target_delay = &tv_signal;
367 				++n_signal;
368 			} else {
369 				TT_BLATHER(("%d looks like a broadcast\n", i));
370 				target_delay = &tv_broadcast;
371 				++n_broadcast;
372 			}
373 		} else {
374 			TT_FAIL(("Thread %d never got woken", i));
375 			continue;
376 		}
377 		evutil_timeradd(target_delay, &launched_at, &target_time);
378 		test_timeval_diff_leq(&target_time, &alerted[i].alerted_at,
379 		    0, 200);
380 	}
381 	tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS);
382 	tt_int_op(n_signal, ==, 1);
383 
384 end:
385 	EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
386 	EVTHREAD_FREE_COND(cond.cond);
387 }
388 
389 #define CB_COUNT 128
390 #define QUEUE_THREAD_COUNT 8
391 
392 static void
393 SLEEP_MS(int ms)
394 {
395 	struct timeval tv;
396 	tv.tv_sec = ms/1000;
397 	tv.tv_usec = (ms%1000)*1000;
398 	evutil_usleep_(&tv);
399 }
400 
401 struct deferred_test_data {
402 	struct event_callback cbs[CB_COUNT];
403 	struct event_base *queue;
404 };
405 
406 static struct timeval timer_start = {0,0};
407 static struct timeval timer_end = {0,0};
408 static unsigned callback_count = 0;
409 static THREAD_T load_threads[QUEUE_THREAD_COUNT];
410 static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
411 
412 static void
413 deferred_callback(struct event_callback *cb, void *arg)
414 {
415 	SLEEP_MS(1);
416 	callback_count += 1;
417 }
418 
419 static THREAD_FN
420 load_deferred_queue(void *arg)
421 {
422 	struct deferred_test_data *data = arg;
423 	size_t i;
424 
425 	for (i = 0; i < CB_COUNT; ++i) {
426 		event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback,
427 		    NULL);
428 		event_deferred_cb_schedule_(data->queue, &data->cbs[i]);
429 		SLEEP_MS(1);
430 	}
431 
432 	THREAD_RETURN();
433 }
434 
435 static void
436 timer_callback(evutil_socket_t fd, short what, void *arg)
437 {
438 	evutil_gettimeofday(&timer_end, NULL);
439 }
440 
441 static void
442 start_threads_callback(evutil_socket_t fd, short what, void *arg)
443 {
444 	int i;
445 
446 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
447 		THREAD_START(load_threads[i], load_deferred_queue,
448 				&deferred_data[i]);
449 	}
450 }
451 
452 static void
453 thread_deferred_cb_skew(void *arg)
454 {
455 	struct timeval tv_timer = {1, 0};
456 	struct event_base *base = NULL;
457 	struct event_config *cfg = NULL;
458 	struct timeval elapsed;
459 	int elapsed_usec;
460 	int i;
461 
462 	cfg = event_config_new();
463 	tt_assert(cfg);
464 	event_config_set_max_dispatch_interval(cfg, NULL, 16, 0);
465 
466 	base = event_base_new_with_config(cfg);
467 	tt_assert(base);
468 
469 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
470 		deferred_data[i].queue = base;
471 
472 	evutil_gettimeofday(&timer_start, NULL);
473 	event_base_once(base, -1, EV_TIMEOUT, timer_callback, NULL,
474 			&tv_timer);
475 	event_base_once(base, -1, EV_TIMEOUT, start_threads_callback,
476 			NULL, NULL);
477 	event_base_dispatch(base);
478 
479 	evutil_timersub(&timer_end, &timer_start, &elapsed);
480 	TT_BLATHER(("callback count, %u", callback_count));
481 	elapsed_usec =
482 	    (unsigned)(elapsed.tv_sec*1000000 + elapsed.tv_usec);
483 	TT_BLATHER(("elapsed time, %u usec", elapsed_usec));
484 
485 	/* XXX be more intelligent here.  just make sure skew is
486 	 * within .4 seconds for now. */
487 	tt_assert(elapsed_usec >= 600000 && elapsed_usec <= 1400000);
488 
489 end:
490 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
491 		THREAD_JOIN(load_threads[i]);
492 	if (base)
493 		event_base_free(base);
494 	if (cfg)
495 		event_config_free(cfg);
496 }
497 
498 static struct event time_events[5];
499 static struct timeval times[5];
500 static struct event_base *exit_base = NULL;
501 static void
502 note_time_cb(evutil_socket_t fd, short what, void *arg)
503 {
504 	evutil_gettimeofday(arg, NULL);
505 	if (arg == &times[4]) {
506 		event_base_loopbreak(exit_base);
507 	}
508 }
509 static THREAD_FN
510 register_events_subthread(void *arg)
511 {
512 	struct timeval tv = {0,0};
513 	SLEEP_MS(100);
514 	event_active(&time_events[0], EV_TIMEOUT, 1);
515 	SLEEP_MS(100);
516 	event_active(&time_events[1], EV_TIMEOUT, 1);
517 	SLEEP_MS(100);
518 	tv.tv_usec = 100*1000;
519 	event_add(&time_events[2], &tv);
520 	tv.tv_usec = 150*1000;
521 	event_add(&time_events[3], &tv);
522 	SLEEP_MS(200);
523 	event_active(&time_events[4], EV_TIMEOUT, 1);
524 
525 	THREAD_RETURN();
526 }
527 
528 static void
529 thread_no_events(void *arg)
530 {
531 	THREAD_T thread;
532 	struct basic_test_data *data = arg;
533 	struct timeval starttime, endtime;
534 	int i;
535 	exit_base = data->base;
536 
537 	memset(times,0,sizeof(times));
538 	for (i=0;i<5;++i) {
539 		event_assign(&time_events[i], data->base,
540 		    -1, 0, note_time_cb, &times[i]);
541 	}
542 
543 	evutil_gettimeofday(&starttime, NULL);
544 	THREAD_START(thread, register_events_subthread, data->base);
545 	event_base_loop(data->base, EVLOOP_NO_EXIT_ON_EMPTY);
546 	evutil_gettimeofday(&endtime, NULL);
547 	tt_assert(event_base_got_break(data->base));
548 	THREAD_JOIN(thread);
549 	for (i=0; i<5; ++i) {
550 		struct timeval diff;
551 		double sec;
552 		evutil_timersub(&times[i], &starttime, &diff);
553 		sec = diff.tv_sec + diff.tv_usec/1.0e6;
554 		TT_BLATHER(("event %d at %.4f seconds", i, sec));
555 	}
556 	test_timeval_diff_eq(&starttime, &times[0], 100);
557 	test_timeval_diff_eq(&starttime, &times[1], 200);
558 	test_timeval_diff_eq(&starttime, &times[2], 400);
559 	test_timeval_diff_eq(&starttime, &times[3], 450);
560 	test_timeval_diff_eq(&starttime, &times[4], 500);
561 	test_timeval_diff_eq(&starttime, &endtime,  500);
562 
563 end:
564 	;
565 }
566 
567 #define TEST(name, f)							\
568 	{ #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE|(f),	\
569 	  &basic_setup, NULL }
570 
571 struct testcase_t thread_testcases[] = {
572 	{ "basic", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
573 	  &basic_setup, NULL },
574 #ifndef _WIN32
575 	{ "forking", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
576 	  &basic_setup, (char*)"forking" },
577 #endif
578 	TEST(conditions_simple, TT_RETRIABLE),
579 	{ "deferred_cb_skew", thread_deferred_cb_skew,
580 	  TT_FORK|TT_NEED_THREADS|TT_OFF_BY_DEFAULT,
581 	  &basic_setup, NULL },
582 #ifndef _WIN32
583 	/****** XXX TODO FIXME windows seems to be having some timing trouble,
584 	 * looking into it now. / ellzey
585 	 ******/
586 	TEST(no_events, TT_RETRIABLE),
587 #endif
588 	END_OF_TESTCASES
589 };
590 
591