1 /* 2 * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include "util-internal.h" 28 29 #ifdef _WIN32 30 #include <winsock2.h> 31 #include <windows.h> 32 #endif 33 34 #include "event2/event-config.h" 35 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #ifdef EVENT__HAVE_SYS_TIME_H 39 #include <sys/time.h> 40 #endif 41 #include <sys/queue.h> 42 #ifndef _WIN32 43 #include <sys/socket.h> 44 #include <sys/wait.h> 45 #include <signal.h> 46 #include <unistd.h> 47 #include <netdb.h> 48 #endif 49 #include <fcntl.h> 50 #include <signal.h> 51 #include <stdlib.h> 52 #include <stdio.h> 53 #include <string.h> 54 #include <errno.h> 55 #include <assert.h> 56 #include <ctype.h> 57 58 #include "event2/event.h" 59 #include "event2/event_struct.h" 60 #include "event2/event_compat.h" 61 #include "event2/tag.h" 62 #include "event2/buffer.h" 63 #include "event2/buffer_compat.h" 64 #include "event2/util.h" 65 #include "event-internal.h" 66 #include "evthread-internal.h" 67 #include "log-internal.h" 68 #include "time-internal.h" 69 70 #include "regress.h" 71 72 #ifndef _WIN32 73 #include "regress.gen.h" 74 #endif 75 76 evutil_socket_t pair[2]; 77 int test_ok; 78 int called; 79 struct event_base *global_base; 80 81 static char wbuf[4096]; 82 static char rbuf[4096]; 83 static int woff; 84 static int roff; 85 static int usepersist; 86 static struct timeval tset; 87 static struct timeval tcalled; 88 89 90 #define TEST1 "this is a test" 91 92 #ifndef SHUT_WR 93 #define SHUT_WR 1 94 #endif 95 96 #ifdef _WIN32 97 #define write(fd,buf,len) send((fd),(buf),(int)(len),0) 98 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0) 99 #endif 100 101 struct basic_cb_args 102 { 103 struct event_base *eb; 104 struct event *ev; 105 unsigned int callcount; 106 }; 107 108 static void 109 simple_read_cb(evutil_socket_t fd, short event, void *arg) 110 { 111 char buf[256]; 112 int len; 113 114 len = read(fd, buf, sizeof(buf)); 115 116 if (len) { 117 if (!called) { 118 if (event_add(arg, NULL) == -1) 119 exit(1); 120 } 121 } else if (called == 1) 122 test_ok = 1; 123 124 called++; 125 } 126 127 static void 128 basic_read_cb(evutil_socket_t fd, short event, void *data) 129 { 130 char buf[256]; 131 int len; 132 struct basic_cb_args *arg = data; 133 134 len = read(fd, buf, sizeof(buf)); 135 136 if (len < 0) { 137 tt_fail_perror("read (callback)"); 138 } else { 139 switch (arg->callcount++) { 140 case 0: /* first call: expect to read data; cycle */ 141 if (len > 0) 142 return; 143 144 tt_fail_msg("EOF before data read"); 145 break; 146 147 case 1: /* second call: expect EOF; stop */ 148 if (len > 0) 149 tt_fail_msg("not all data read on first cycle"); 150 break; 151 152 default: /* third call: should not happen */ 153 tt_fail_msg("too many cycles"); 154 } 155 } 156 157 event_del(arg->ev); 158 event_base_loopexit(arg->eb, NULL); 159 } 160 161 static void 162 dummy_read_cb(evutil_socket_t fd, short event, void *arg) 163 { 164 } 165 166 static void 167 simple_write_cb(evutil_socket_t fd, short event, void *arg) 168 { 169 int len; 170 171 len = write(fd, TEST1, strlen(TEST1) + 1); 172 if (len == -1) 173 test_ok = 0; 174 else 175 test_ok = 1; 176 } 177 178 static void 179 multiple_write_cb(evutil_socket_t fd, short event, void *arg) 180 { 181 struct event *ev = arg; 182 int len; 183 184 len = 128; 185 if (woff + len >= (int)sizeof(wbuf)) 186 len = sizeof(wbuf) - woff; 187 188 len = write(fd, wbuf + woff, len); 189 if (len == -1) { 190 fprintf(stderr, "%s: write\n", __func__); 191 if (usepersist) 192 event_del(ev); 193 return; 194 } 195 196 woff += len; 197 198 if (woff >= (int)sizeof(wbuf)) { 199 shutdown(fd, SHUT_WR); 200 if (usepersist) 201 event_del(ev); 202 return; 203 } 204 205 if (!usepersist) { 206 if (event_add(ev, NULL) == -1) 207 exit(1); 208 } 209 } 210 211 static void 212 multiple_read_cb(evutil_socket_t fd, short event, void *arg) 213 { 214 struct event *ev = arg; 215 int len; 216 217 len = read(fd, rbuf + roff, sizeof(rbuf) - roff); 218 if (len == -1) 219 fprintf(stderr, "%s: read\n", __func__); 220 if (len <= 0) { 221 if (usepersist) 222 event_del(ev); 223 return; 224 } 225 226 roff += len; 227 if (!usepersist) { 228 if (event_add(ev, NULL) == -1) 229 exit(1); 230 } 231 } 232 233 static void 234 timeout_cb(evutil_socket_t fd, short event, void *arg) 235 { 236 evutil_gettimeofday(&tcalled, NULL); 237 } 238 239 struct both { 240 struct event ev; 241 int nread; 242 }; 243 244 static void 245 combined_read_cb(evutil_socket_t fd, short event, void *arg) 246 { 247 struct both *both = arg; 248 char buf[128]; 249 int len; 250 251 len = read(fd, buf, sizeof(buf)); 252 if (len == -1) 253 fprintf(stderr, "%s: read\n", __func__); 254 if (len <= 0) 255 return; 256 257 both->nread += len; 258 if (event_add(&both->ev, NULL) == -1) 259 exit(1); 260 } 261 262 static void 263 combined_write_cb(evutil_socket_t fd, short event, void *arg) 264 { 265 struct both *both = arg; 266 char buf[128]; 267 int len; 268 269 len = sizeof(buf); 270 if (len > both->nread) 271 len = both->nread; 272 273 memset(buf, 'q', len); 274 275 len = write(fd, buf, len); 276 if (len == -1) 277 fprintf(stderr, "%s: write\n", __func__); 278 if (len <= 0) { 279 shutdown(fd, SHUT_WR); 280 return; 281 } 282 283 both->nread -= len; 284 if (event_add(&both->ev, NULL) == -1) 285 exit(1); 286 } 287 288 /* These macros used to replicate the work of the legacy test wrapper code */ 289 #define setup_test(x) do { \ 290 if (!in_legacy_test_wrapper) { \ 291 TT_FAIL(("Legacy test %s not wrapped properly", x)); \ 292 return; \ 293 } \ 294 } while (0) 295 #define cleanup_test() setup_test("cleanup") 296 297 static void 298 test_simpleread(void) 299 { 300 struct event ev; 301 302 /* Very simple read test */ 303 setup_test("Simple read: "); 304 305 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 306 tt_fail_perror("write"); 307 } 308 309 shutdown(pair[0], SHUT_WR); 310 311 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); 312 if (event_add(&ev, NULL) == -1) 313 exit(1); 314 event_dispatch(); 315 316 cleanup_test(); 317 } 318 319 static void 320 test_simplewrite(void) 321 { 322 struct event ev; 323 324 /* Very simple write test */ 325 setup_test("Simple write: "); 326 327 event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev); 328 if (event_add(&ev, NULL) == -1) 329 exit(1); 330 event_dispatch(); 331 332 cleanup_test(); 333 } 334 335 static void 336 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg) 337 { 338 if (++called == 2) 339 test_ok = 1; 340 } 341 342 static void 343 test_simpleread_multiple(void) 344 { 345 struct event one, two; 346 347 /* Very simple read test */ 348 setup_test("Simple read to multiple evens: "); 349 350 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 351 tt_fail_perror("write"); 352 } 353 354 shutdown(pair[0], SHUT_WR); 355 356 event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL); 357 if (event_add(&one, NULL) == -1) 358 exit(1); 359 event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL); 360 if (event_add(&two, NULL) == -1) 361 exit(1); 362 event_dispatch(); 363 364 cleanup_test(); 365 } 366 367 static int have_closed = 0; 368 static int premature_event = 0; 369 static void 370 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr) 371 { 372 evutil_socket_t **fds = ptr; 373 TT_BLATHER(("Closing")); 374 evutil_closesocket(*fds[0]); 375 evutil_closesocket(*fds[1]); 376 *fds[0] = -1; 377 *fds[1] = -1; 378 have_closed = 1; 379 } 380 381 static void 382 record_event_cb(evutil_socket_t s, short what, void *ptr) 383 { 384 short *whatp = ptr; 385 if (!have_closed) 386 premature_event = 1; 387 *whatp = what; 388 TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s)); 389 } 390 391 static void 392 test_simpleclose(void *ptr) 393 { 394 /* Test that a close of FD is detected as a read and as a write. */ 395 struct event_base *base = event_base_new(); 396 evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1}; 397 evutil_socket_t *to_close[2]; 398 struct event *rev=NULL, *wev=NULL, *closeev=NULL; 399 struct timeval tv; 400 short got_read_on_close = 0, got_write_on_close = 0; 401 char buf[1024]; 402 memset(buf, 99, sizeof(buf)); 403 #ifdef _WIN32 404 #define LOCAL_SOCKETPAIR_AF AF_INET 405 #else 406 #define LOCAL_SOCKETPAIR_AF AF_UNIX 407 #endif 408 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0) 409 TT_DIE(("socketpair: %s", strerror(errno))); 410 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0) 411 TT_DIE(("socketpair: %s", strerror(errno))); 412 if (evutil_make_socket_nonblocking(pair1[1]) < 0) 413 TT_DIE(("make_socket_nonblocking")); 414 if (evutil_make_socket_nonblocking(pair2[1]) < 0) 415 TT_DIE(("make_socket_nonblocking")); 416 417 /** Stuff pair2[1] full of data, until write fails */ 418 while (1) { 419 int r = write(pair2[1], buf, sizeof(buf)); 420 if (r<0) { 421 int err = evutil_socket_geterror(pair2[1]); 422 if (! EVUTIL_ERR_RW_RETRIABLE(err)) 423 TT_DIE(("write failed strangely: %s", 424 evutil_socket_error_to_string(err))); 425 break; 426 } 427 } 428 to_close[0] = &pair1[0]; 429 to_close[1] = &pair2[0]; 430 431 closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb, 432 to_close); 433 rev = event_new(base, pair1[1], EV_READ, record_event_cb, 434 &got_read_on_close); 435 TT_BLATHER(("Waiting for read on %d", (int)pair1[1])); 436 wev = event_new(base, pair2[1], EV_WRITE, record_event_cb, 437 &got_write_on_close); 438 TT_BLATHER(("Waiting for write on %d", (int)pair2[1])); 439 tv.tv_sec = 0; 440 tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make 441 * sure we get a read event. */ 442 event_add(closeev, &tv); 443 event_add(rev, NULL); 444 event_add(wev, NULL); 445 /* Don't let the test go on too long. */ 446 tv.tv_sec = 0; 447 tv.tv_usec = 200*1000; 448 event_base_loopexit(base, &tv); 449 event_base_loop(base, 0); 450 451 tt_int_op(got_read_on_close, ==, EV_READ); 452 tt_int_op(got_write_on_close, ==, EV_WRITE); 453 tt_int_op(premature_event, ==, 0); 454 455 end: 456 if (pair1[0] >= 0) 457 evutil_closesocket(pair1[0]); 458 if (pair1[1] >= 0) 459 evutil_closesocket(pair1[1]); 460 if (pair2[0] >= 0) 461 evutil_closesocket(pair2[0]); 462 if (pair2[1] >= 0) 463 evutil_closesocket(pair2[1]); 464 if (rev) 465 event_free(rev); 466 if (wev) 467 event_free(wev); 468 if (closeev) 469 event_free(closeev); 470 if (base) 471 event_base_free(base); 472 } 473 474 475 static void 476 test_multiple(void) 477 { 478 struct event ev, ev2; 479 int i; 480 481 /* Multiple read and write test */ 482 setup_test("Multiple read/write: "); 483 memset(rbuf, 0, sizeof(rbuf)); 484 for (i = 0; i < (int)sizeof(wbuf); i++) 485 wbuf[i] = i; 486 487 roff = woff = 0; 488 usepersist = 0; 489 490 event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev); 491 if (event_add(&ev, NULL) == -1) 492 exit(1); 493 event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2); 494 if (event_add(&ev2, NULL) == -1) 495 exit(1); 496 event_dispatch(); 497 498 if (roff == woff) 499 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 500 501 cleanup_test(); 502 } 503 504 static void 505 test_persistent(void) 506 { 507 struct event ev, ev2; 508 int i; 509 510 /* Multiple read and write test with persist */ 511 setup_test("Persist read/write: "); 512 memset(rbuf, 0, sizeof(rbuf)); 513 for (i = 0; i < (int)sizeof(wbuf); i++) 514 wbuf[i] = i; 515 516 roff = woff = 0; 517 usepersist = 1; 518 519 event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev); 520 if (event_add(&ev, NULL) == -1) 521 exit(1); 522 event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2); 523 if (event_add(&ev2, NULL) == -1) 524 exit(1); 525 event_dispatch(); 526 527 if (roff == woff) 528 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 529 530 cleanup_test(); 531 } 532 533 static void 534 test_combined(void) 535 { 536 struct both r1, r2, w1, w2; 537 538 setup_test("Combined read/write: "); 539 memset(&r1, 0, sizeof(r1)); 540 memset(&r2, 0, sizeof(r2)); 541 memset(&w1, 0, sizeof(w1)); 542 memset(&w2, 0, sizeof(w2)); 543 544 w1.nread = 4096; 545 w2.nread = 8192; 546 547 event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1); 548 event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1); 549 event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2); 550 event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2); 551 tt_assert(event_add(&r1.ev, NULL) != -1); 552 tt_assert(!event_add(&w1.ev, NULL)); 553 tt_assert(!event_add(&r2.ev, NULL)); 554 tt_assert(!event_add(&w2.ev, NULL)); 555 event_dispatch(); 556 557 if (r1.nread == 8192 && r2.nread == 4096) 558 test_ok = 1; 559 560 end: 561 cleanup_test(); 562 } 563 564 static void 565 test_simpletimeout(void) 566 { 567 struct timeval tv; 568 struct event ev; 569 570 setup_test("Simple timeout: "); 571 572 tv.tv_usec = 200*1000; 573 tv.tv_sec = 0; 574 evutil_timerclear(&tcalled); 575 evtimer_set(&ev, timeout_cb, NULL); 576 evtimer_add(&ev, &tv); 577 578 evutil_gettimeofday(&tset, NULL); 579 event_dispatch(); 580 test_timeval_diff_eq(&tset, &tcalled, 200); 581 582 test_ok = 1; 583 end: 584 cleanup_test(); 585 } 586 587 static void 588 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg) 589 { 590 int *count = arg; 591 592 (*count)++; 593 if (*count == 6) { 594 /* call loopexit only once - on slow machines(?), it is 595 * apparently possible for this to get called twice. */ 596 test_ok = 1; 597 event_base_loopexit(global_base, NULL); 598 } 599 } 600 601 static void 602 test_persistent_timeout(void) 603 { 604 struct timeval tv; 605 struct event ev; 606 int count = 0; 607 608 evutil_timerclear(&tv); 609 tv.tv_usec = 10000; 610 611 event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST, 612 periodic_timeout_cb, &count); 613 event_add(&ev, &tv); 614 615 event_dispatch(); 616 617 event_del(&ev); 618 } 619 620 static void 621 test_persistent_timeout_jump(void *ptr) 622 { 623 struct basic_test_data *data = ptr; 624 struct event ev; 625 int count = 0; 626 struct timeval msec100 = { 0, 100 * 1000 }; 627 struct timeval msec50 = { 0, 50 * 1000 }; 628 struct timeval msec300 = { 0, 300 * 1000 }; 629 630 event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count); 631 event_add(&ev, &msec100); 632 /* Wait for a bit */ 633 evutil_usleep_(&msec300); 634 event_base_loopexit(data->base, &msec50); 635 event_base_dispatch(data->base); 636 tt_int_op(count, ==, 1); 637 638 end: 639 event_del(&ev); 640 } 641 642 struct persist_active_timeout_called { 643 int n; 644 short events[16]; 645 struct timeval tvs[16]; 646 }; 647 648 static void 649 activate_cb(evutil_socket_t fd, short event, void *arg) 650 { 651 struct event *ev = arg; 652 event_active(ev, EV_READ, 1); 653 } 654 655 static void 656 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg) 657 { 658 struct persist_active_timeout_called *c = arg; 659 if (c->n < 15) { 660 c->events[c->n] = event; 661 evutil_gettimeofday(&c->tvs[c->n], NULL); 662 ++c->n; 663 } 664 } 665 666 static void 667 test_persistent_active_timeout(void *ptr) 668 { 669 struct timeval tv, tv2, tv_exit, start; 670 struct event ev; 671 struct persist_active_timeout_called res; 672 673 struct basic_test_data *data = ptr; 674 struct event_base *base = data->base; 675 676 memset(&res, 0, sizeof(res)); 677 678 tv.tv_sec = 0; 679 tv.tv_usec = 200 * 1000; 680 event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST, 681 persist_active_timeout_cb, &res); 682 event_add(&ev, &tv); 683 684 tv2.tv_sec = 0; 685 tv2.tv_usec = 100 * 1000; 686 event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2); 687 688 tv_exit.tv_sec = 0; 689 tv_exit.tv_usec = 600 * 1000; 690 event_base_loopexit(base, &tv_exit); 691 692 event_base_assert_ok_(base); 693 evutil_gettimeofday(&start, NULL); 694 695 event_base_dispatch(base); 696 event_base_assert_ok_(base); 697 698 tt_int_op(res.n, ==, 3); 699 tt_int_op(res.events[0], ==, EV_READ); 700 tt_int_op(res.events[1], ==, EV_TIMEOUT); 701 tt_int_op(res.events[2], ==, EV_TIMEOUT); 702 test_timeval_diff_eq(&start, &res.tvs[0], 100); 703 test_timeval_diff_eq(&start, &res.tvs[1], 300); 704 test_timeval_diff_eq(&start, &res.tvs[2], 500); 705 end: 706 event_del(&ev); 707 } 708 709 struct common_timeout_info { 710 struct event ev; 711 struct timeval called_at; 712 int which; 713 int count; 714 }; 715 716 static void 717 common_timeout_cb(evutil_socket_t fd, short event, void *arg) 718 { 719 struct common_timeout_info *ti = arg; 720 ++ti->count; 721 evutil_gettimeofday(&ti->called_at, NULL); 722 if (ti->count >= 4) 723 event_del(&ti->ev); 724 } 725 726 static void 727 test_common_timeout(void *ptr) 728 { 729 struct basic_test_data *data = ptr; 730 731 struct event_base *base = data->base; 732 int i; 733 struct common_timeout_info info[100]; 734 735 struct timeval start; 736 struct timeval tmp_100_ms = { 0, 100*1000 }; 737 struct timeval tmp_200_ms = { 0, 200*1000 }; 738 struct timeval tmp_5_sec = { 5, 0 }; 739 struct timeval tmp_5M_usec = { 0, 5*1000*1000 }; 740 741 const struct timeval *ms_100, *ms_200, *sec_5; 742 743 ms_100 = event_base_init_common_timeout(base, &tmp_100_ms); 744 ms_200 = event_base_init_common_timeout(base, &tmp_200_ms); 745 sec_5 = event_base_init_common_timeout(base, &tmp_5_sec); 746 tt_assert(ms_100); 747 tt_assert(ms_200); 748 tt_assert(sec_5); 749 tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms), 750 ==, ms_200); 751 tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200); 752 tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5); 753 tt_int_op(ms_100->tv_sec, ==, 0); 754 tt_int_op(ms_200->tv_sec, ==, 0); 755 tt_int_op(sec_5->tv_sec, ==, 5); 756 tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000); 757 tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000); 758 tt_int_op(sec_5->tv_usec, ==, 0|0x50200000); 759 760 memset(info, 0, sizeof(info)); 761 762 for (i=0; i<100; ++i) { 763 info[i].which = i; 764 event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST, 765 common_timeout_cb, &info[i]); 766 if (i % 2) { 767 if ((i%20)==1) { 768 /* Glass-box test: Make sure we survive the 769 * transition to non-common timeouts. It's 770 * a little tricky. */ 771 event_add(&info[i].ev, ms_200); 772 event_add(&info[i].ev, &tmp_100_ms); 773 } else if ((i%20)==3) { 774 /* Check heap-to-common too. */ 775 event_add(&info[i].ev, &tmp_200_ms); 776 event_add(&info[i].ev, ms_100); 777 } else if ((i%20)==5) { 778 /* Also check common-to-common. */ 779 event_add(&info[i].ev, ms_200); 780 event_add(&info[i].ev, ms_100); 781 } else { 782 event_add(&info[i].ev, ms_100); 783 } 784 } else { 785 event_add(&info[i].ev, ms_200); 786 } 787 } 788 789 event_base_assert_ok_(base); 790 evutil_gettimeofday(&start, NULL); 791 event_base_dispatch(base); 792 793 event_base_assert_ok_(base); 794 795 for (i=0; i<10; ++i) { 796 tt_int_op(info[i].count, ==, 4); 797 if (i % 2) { 798 test_timeval_diff_eq(&start, &info[i].called_at, 400); 799 } else { 800 test_timeval_diff_eq(&start, &info[i].called_at, 800); 801 } 802 } 803 804 /* Make sure we can free the base with some events in. */ 805 for (i=0; i<100; ++i) { 806 if (i % 2) { 807 event_add(&info[i].ev, ms_100); 808 } else { 809 event_add(&info[i].ev, ms_200); 810 } 811 } 812 813 end: 814 event_base_free(data->base); /* need to do this here before info is 815 * out-of-scope */ 816 data->base = NULL; 817 } 818 819 #ifndef _WIN32 820 static void signal_cb(evutil_socket_t fd, short event, void *arg); 821 822 #define current_base event_global_current_base_ 823 extern struct event_base *current_base; 824 825 static void 826 child_signal_cb(evutil_socket_t fd, short event, void *arg) 827 { 828 struct timeval tv; 829 int *pint = arg; 830 831 *pint = 1; 832 833 tv.tv_usec = 500000; 834 tv.tv_sec = 0; 835 event_loopexit(&tv); 836 } 837 838 static void 839 test_fork(void) 840 { 841 int status, got_sigchld = 0; 842 struct event ev, sig_ev; 843 pid_t pid; 844 845 setup_test("After fork: "); 846 847 tt_assert(current_base); 848 evthread_make_base_notifiable(current_base); 849 850 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 851 tt_fail_perror("write"); 852 } 853 854 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); 855 if (event_add(&ev, NULL) == -1) 856 exit(1); 857 858 evsignal_set(&sig_ev, SIGCHLD, child_signal_cb, &got_sigchld); 859 evsignal_add(&sig_ev, NULL); 860 861 event_base_assert_ok_(current_base); 862 TT_BLATHER(("Before fork")); 863 if ((pid = regress_fork()) == 0) { 864 /* in the child */ 865 TT_BLATHER(("In child, before reinit")); 866 event_base_assert_ok_(current_base); 867 if (event_reinit(current_base) == -1) { 868 fprintf(stdout, "FAILED (reinit)\n"); 869 exit(1); 870 } 871 TT_BLATHER(("After reinit")); 872 event_base_assert_ok_(current_base); 873 TT_BLATHER(("After assert-ok")); 874 875 evsignal_del(&sig_ev); 876 877 called = 0; 878 879 event_dispatch(); 880 881 event_base_free(current_base); 882 883 /* we do not send an EOF; simple_read_cb requires an EOF 884 * to set test_ok. we just verify that the callback was 885 * called. */ 886 exit(test_ok != 0 || called != 2 ? -2 : 76); 887 } 888 889 /* wait for the child to read the data */ 890 { 891 const struct timeval tv = { 0, 100000 }; 892 evutil_usleep_(&tv); 893 } 894 895 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 896 tt_fail_perror("write"); 897 } 898 899 TT_BLATHER(("Before waitpid")); 900 if (waitpid(pid, &status, 0) == -1) { 901 fprintf(stdout, "FAILED (fork)\n"); 902 exit(1); 903 } 904 TT_BLATHER(("After waitpid")); 905 906 if (WEXITSTATUS(status) != 76) { 907 fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status)); 908 exit(1); 909 } 910 911 /* test that the current event loop still works */ 912 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 913 fprintf(stderr, "%s: write\n", __func__); 914 } 915 916 shutdown(pair[0], SHUT_WR); 917 918 event_dispatch(); 919 920 if (!got_sigchld) { 921 fprintf(stdout, "FAILED (sigchld)\n"); 922 exit(1); 923 } 924 925 evsignal_del(&sig_ev); 926 927 end: 928 cleanup_test(); 929 } 930 931 static void 932 signal_cb_sa(int sig) 933 { 934 test_ok = 2; 935 } 936 937 static void 938 signal_cb(evutil_socket_t fd, short event, void *arg) 939 { 940 struct event *ev = arg; 941 942 evsignal_del(ev); 943 test_ok = 1; 944 } 945 946 static void 947 test_simplesignal_impl(int find_reorder) 948 { 949 struct event ev; 950 struct itimerval itv; 951 952 evsignal_set(&ev, SIGALRM, signal_cb, &ev); 953 evsignal_add(&ev, NULL); 954 /* find bugs in which operations are re-ordered */ 955 if (find_reorder) { 956 evsignal_del(&ev); 957 evsignal_add(&ev, NULL); 958 } 959 960 memset(&itv, 0, sizeof(itv)); 961 itv.it_value.tv_sec = 0; 962 itv.it_value.tv_usec = 100000; 963 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 964 goto skip_simplesignal; 965 966 event_dispatch(); 967 skip_simplesignal: 968 if (evsignal_del(&ev) == -1) 969 test_ok = 0; 970 971 cleanup_test(); 972 } 973 974 static void 975 test_simplestsignal(void) 976 { 977 setup_test("Simplest one signal: "); 978 test_simplesignal_impl(0); 979 } 980 981 static void 982 test_simplesignal(void) 983 { 984 setup_test("Simple signal: "); 985 test_simplesignal_impl(1); 986 } 987 988 static void 989 test_multiplesignal(void) 990 { 991 struct event ev_one, ev_two; 992 struct itimerval itv; 993 994 setup_test("Multiple signal: "); 995 996 evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one); 997 evsignal_add(&ev_one, NULL); 998 999 evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two); 1000 evsignal_add(&ev_two, NULL); 1001 1002 memset(&itv, 0, sizeof(itv)); 1003 itv.it_value.tv_sec = 0; 1004 itv.it_value.tv_usec = 100000; 1005 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 1006 goto skip_simplesignal; 1007 1008 event_dispatch(); 1009 1010 skip_simplesignal: 1011 if (evsignal_del(&ev_one) == -1) 1012 test_ok = 0; 1013 if (evsignal_del(&ev_two) == -1) 1014 test_ok = 0; 1015 1016 cleanup_test(); 1017 } 1018 1019 static void 1020 test_immediatesignal(void) 1021 { 1022 struct event ev; 1023 1024 test_ok = 0; 1025 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1026 evsignal_add(&ev, NULL); 1027 raise(SIGUSR1); 1028 event_loop(EVLOOP_NONBLOCK); 1029 evsignal_del(&ev); 1030 cleanup_test(); 1031 } 1032 1033 static void 1034 test_signal_dealloc(void) 1035 { 1036 /* make sure that evsignal_event is event_del'ed and pipe closed */ 1037 struct event ev; 1038 struct event_base *base = event_init(); 1039 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1040 evsignal_add(&ev, NULL); 1041 evsignal_del(&ev); 1042 event_base_free(base); 1043 /* If we got here without asserting, we're fine. */ 1044 test_ok = 1; 1045 cleanup_test(); 1046 } 1047 1048 static void 1049 test_signal_pipeloss(void) 1050 { 1051 /* make sure that the base1 pipe is closed correctly. */ 1052 struct event_base *base1, *base2; 1053 int pipe1; 1054 test_ok = 0; 1055 base1 = event_init(); 1056 pipe1 = base1->sig.ev_signal_pair[0]; 1057 base2 = event_init(); 1058 event_base_free(base2); 1059 event_base_free(base1); 1060 if (close(pipe1) != -1 || errno!=EBADF) { 1061 /* fd must be closed, so second close gives -1, EBADF */ 1062 printf("signal pipe not closed. "); 1063 test_ok = 0; 1064 } else { 1065 test_ok = 1; 1066 } 1067 cleanup_test(); 1068 } 1069 1070 /* 1071 * make two bases to catch signals, use both of them. this only works 1072 * for event mechanisms that use our signal pipe trick. kqueue handles 1073 * signals internally, and all interested kqueues get all the signals. 1074 */ 1075 static void 1076 test_signal_switchbase(void) 1077 { 1078 struct event ev1, ev2; 1079 struct event_base *base1, *base2; 1080 int is_kqueue; 1081 test_ok = 0; 1082 base1 = event_init(); 1083 base2 = event_init(); 1084 is_kqueue = !strcmp(event_get_method(),"kqueue"); 1085 evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1); 1086 evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2); 1087 if (event_base_set(base1, &ev1) || 1088 event_base_set(base2, &ev2) || 1089 event_add(&ev1, NULL) || 1090 event_add(&ev2, NULL)) { 1091 fprintf(stderr, "%s: cannot set base, add\n", __func__); 1092 exit(1); 1093 } 1094 1095 tt_ptr_op(event_get_base(&ev1), ==, base1); 1096 tt_ptr_op(event_get_base(&ev2), ==, base2); 1097 1098 test_ok = 0; 1099 /* can handle signal before loop is called */ 1100 raise(SIGUSR1); 1101 event_base_loop(base2, EVLOOP_NONBLOCK); 1102 if (is_kqueue) { 1103 if (!test_ok) 1104 goto end; 1105 test_ok = 0; 1106 } 1107 event_base_loop(base1, EVLOOP_NONBLOCK); 1108 if (test_ok && !is_kqueue) { 1109 test_ok = 0; 1110 1111 /* set base1 to handle signals */ 1112 event_base_loop(base1, EVLOOP_NONBLOCK); 1113 raise(SIGUSR1); 1114 event_base_loop(base1, EVLOOP_NONBLOCK); 1115 event_base_loop(base2, EVLOOP_NONBLOCK); 1116 } 1117 end: 1118 event_base_free(base1); 1119 event_base_free(base2); 1120 cleanup_test(); 1121 } 1122 1123 /* 1124 * assert that a signal event removed from the event queue really is 1125 * removed - with no possibility of it's parent handler being fired. 1126 */ 1127 static void 1128 test_signal_assert(void) 1129 { 1130 struct event ev; 1131 struct event_base *base = event_init(); 1132 test_ok = 0; 1133 /* use SIGCONT so we don't kill ourselves when we signal to nowhere */ 1134 evsignal_set(&ev, SIGCONT, signal_cb, &ev); 1135 evsignal_add(&ev, NULL); 1136 /* 1137 * if evsignal_del() fails to reset the handler, it's current handler 1138 * will still point to evsig_handler(). 1139 */ 1140 evsignal_del(&ev); 1141 1142 raise(SIGCONT); 1143 #if 0 1144 /* only way to verify we were in evsig_handler() */ 1145 /* XXXX Now there's no longer a good way. */ 1146 if (base->sig.evsig_caught) 1147 test_ok = 0; 1148 else 1149 test_ok = 1; 1150 #else 1151 test_ok = 1; 1152 #endif 1153 1154 event_base_free(base); 1155 cleanup_test(); 1156 return; 1157 } 1158 1159 /* 1160 * assert that we restore our previous signal handler properly. 1161 */ 1162 static void 1163 test_signal_restore(void) 1164 { 1165 struct event ev; 1166 struct event_base *base = event_init(); 1167 #ifdef EVENT__HAVE_SIGACTION 1168 struct sigaction sa; 1169 #endif 1170 1171 test_ok = 0; 1172 #ifdef EVENT__HAVE_SIGACTION 1173 sa.sa_handler = signal_cb_sa; 1174 sa.sa_flags = 0x0; 1175 sigemptyset(&sa.sa_mask); 1176 if (sigaction(SIGUSR1, &sa, NULL) == -1) 1177 goto out; 1178 #else 1179 if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR) 1180 goto out; 1181 #endif 1182 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1183 evsignal_add(&ev, NULL); 1184 evsignal_del(&ev); 1185 1186 raise(SIGUSR1); 1187 /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */ 1188 if (test_ok != 2) 1189 test_ok = 0; 1190 out: 1191 event_base_free(base); 1192 cleanup_test(); 1193 return; 1194 } 1195 1196 static void 1197 signal_cb_swp(int sig, short event, void *arg) 1198 { 1199 called++; 1200 if (called < 5) 1201 raise(sig); 1202 else 1203 event_loopexit(NULL); 1204 } 1205 static void 1206 timeout_cb_swp(evutil_socket_t fd, short event, void *arg) 1207 { 1208 if (called == -1) { 1209 struct timeval tv = {5, 0}; 1210 1211 called = 0; 1212 evtimer_add((struct event *)arg, &tv); 1213 raise(SIGUSR1); 1214 return; 1215 } 1216 test_ok = 0; 1217 event_loopexit(NULL); 1218 } 1219 1220 static void 1221 test_signal_while_processing(void) 1222 { 1223 struct event_base *base = event_init(); 1224 struct event ev, ev_timer; 1225 struct timeval tv = {0, 0}; 1226 1227 setup_test("Receiving a signal while processing other signal: "); 1228 1229 called = -1; 1230 test_ok = 1; 1231 signal_set(&ev, SIGUSR1, signal_cb_swp, NULL); 1232 signal_add(&ev, NULL); 1233 evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer); 1234 evtimer_add(&ev_timer, &tv); 1235 event_dispatch(); 1236 1237 event_base_free(base); 1238 cleanup_test(); 1239 return; 1240 } 1241 #endif 1242 1243 static void 1244 test_free_active_base(void *ptr) 1245 { 1246 struct basic_test_data *data = ptr; 1247 struct event_base *base1; 1248 struct event ev1; 1249 1250 base1 = event_init(); 1251 if (base1) { 1252 event_assign(&ev1, base1, data->pair[1], EV_READ, 1253 dummy_read_cb, NULL); 1254 event_add(&ev1, NULL); 1255 event_base_free(base1); /* should not crash */ 1256 } else { 1257 tt_fail_msg("failed to create event_base for test"); 1258 } 1259 1260 base1 = event_init(); 1261 tt_assert(base1); 1262 event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL); 1263 event_active(&ev1, EV_READ, 1); 1264 event_base_free(base1); 1265 end: 1266 ; 1267 } 1268 1269 static void 1270 test_manipulate_active_events(void *ptr) 1271 { 1272 struct basic_test_data *data = ptr; 1273 struct event_base *base = data->base; 1274 struct event ev1; 1275 1276 event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL); 1277 1278 /* Make sure an active event is pending. */ 1279 event_active(&ev1, EV_READ, 1); 1280 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1281 ==, EV_READ); 1282 1283 /* Make sure that activating an event twice works. */ 1284 event_active(&ev1, EV_WRITE, 1); 1285 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1286 ==, EV_READ|EV_WRITE); 1287 1288 end: 1289 event_del(&ev1); 1290 } 1291 1292 static void 1293 event_selfarg_cb(evutil_socket_t fd, short event, void *arg) 1294 { 1295 struct event *ev = arg; 1296 struct event_base *base = event_get_base(ev); 1297 event_base_assert_ok_(base); 1298 event_base_loopexit(base, NULL); 1299 tt_want(ev == event_base_get_running_event(base)); 1300 } 1301 1302 static void 1303 test_event_new_selfarg(void *ptr) 1304 { 1305 struct basic_test_data *data = ptr; 1306 struct event_base *base = data->base; 1307 struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb, 1308 event_self_cbarg()); 1309 1310 event_active(ev, EV_READ, 1); 1311 event_base_dispatch(base); 1312 1313 event_free(ev); 1314 } 1315 1316 static void 1317 test_event_assign_selfarg(void *ptr) 1318 { 1319 struct basic_test_data *data = ptr; 1320 struct event_base *base = data->base; 1321 struct event ev; 1322 1323 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1324 event_self_cbarg()); 1325 event_active(&ev, EV_READ, 1); 1326 event_base_dispatch(base); 1327 } 1328 1329 static void 1330 test_event_base_get_num_events(void *ptr) 1331 { 1332 struct basic_test_data *data = ptr; 1333 struct event_base *base = data->base; 1334 struct event ev; 1335 int event_count_active; 1336 int event_count_virtual; 1337 int event_count_added; 1338 int event_count_active_virtual; 1339 int event_count_active_added; 1340 int event_count_virtual_added; 1341 int event_count_active_added_virtual; 1342 1343 struct timeval qsec = {0, 100000}; 1344 1345 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1346 event_self_cbarg()); 1347 1348 event_add(&ev, &qsec); 1349 event_count_active = event_base_get_num_events(base, 1350 EVENT_BASE_COUNT_ACTIVE); 1351 event_count_virtual = event_base_get_num_events(base, 1352 EVENT_BASE_COUNT_VIRTUAL); 1353 event_count_added = event_base_get_num_events(base, 1354 EVENT_BASE_COUNT_ADDED); 1355 event_count_active_virtual = event_base_get_num_events(base, 1356 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1357 event_count_active_added = event_base_get_num_events(base, 1358 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1359 event_count_virtual_added = event_base_get_num_events(base, 1360 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1361 event_count_active_added_virtual = event_base_get_num_events(base, 1362 EVENT_BASE_COUNT_ACTIVE| 1363 EVENT_BASE_COUNT_ADDED| 1364 EVENT_BASE_COUNT_VIRTUAL); 1365 tt_int_op(event_count_active, ==, 0); 1366 tt_int_op(event_count_virtual, ==, 0); 1367 /* libevent itself adds a timeout event, so the event_count is 2 here */ 1368 tt_int_op(event_count_added, ==, 2); 1369 tt_int_op(event_count_active_virtual, ==, 0); 1370 tt_int_op(event_count_active_added, ==, 2); 1371 tt_int_op(event_count_virtual_added, ==, 2); 1372 tt_int_op(event_count_active_added_virtual, ==, 2); 1373 1374 event_active(&ev, EV_READ, 1); 1375 event_count_active = event_base_get_num_events(base, 1376 EVENT_BASE_COUNT_ACTIVE); 1377 event_count_virtual = event_base_get_num_events(base, 1378 EVENT_BASE_COUNT_VIRTUAL); 1379 event_count_added = event_base_get_num_events(base, 1380 EVENT_BASE_COUNT_ADDED); 1381 event_count_active_virtual = event_base_get_num_events(base, 1382 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1383 event_count_active_added = event_base_get_num_events(base, 1384 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1385 event_count_virtual_added = event_base_get_num_events(base, 1386 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1387 event_count_active_added_virtual = event_base_get_num_events(base, 1388 EVENT_BASE_COUNT_ACTIVE| 1389 EVENT_BASE_COUNT_ADDED| 1390 EVENT_BASE_COUNT_VIRTUAL); 1391 tt_int_op(event_count_active, ==, 1); 1392 tt_int_op(event_count_virtual, ==, 0); 1393 tt_int_op(event_count_added, ==, 3); 1394 tt_int_op(event_count_active_virtual, ==, 1); 1395 tt_int_op(event_count_active_added, ==, 4); 1396 tt_int_op(event_count_virtual_added, ==, 3); 1397 tt_int_op(event_count_active_added_virtual, ==, 4); 1398 1399 event_base_loop(base, 0); 1400 event_count_active = event_base_get_num_events(base, 1401 EVENT_BASE_COUNT_ACTIVE); 1402 event_count_virtual = event_base_get_num_events(base, 1403 EVENT_BASE_COUNT_VIRTUAL); 1404 event_count_added = event_base_get_num_events(base, 1405 EVENT_BASE_COUNT_ADDED); 1406 event_count_active_virtual = event_base_get_num_events(base, 1407 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1408 event_count_active_added = event_base_get_num_events(base, 1409 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1410 event_count_virtual_added = event_base_get_num_events(base, 1411 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1412 event_count_active_added_virtual = event_base_get_num_events(base, 1413 EVENT_BASE_COUNT_ACTIVE| 1414 EVENT_BASE_COUNT_ADDED| 1415 EVENT_BASE_COUNT_VIRTUAL); 1416 tt_int_op(event_count_active, ==, 0); 1417 tt_int_op(event_count_virtual, ==, 0); 1418 tt_int_op(event_count_added, ==, 0); 1419 tt_int_op(event_count_active_virtual, ==, 0); 1420 tt_int_op(event_count_active_added, ==, 0); 1421 tt_int_op(event_count_virtual_added, ==, 0); 1422 tt_int_op(event_count_active_added_virtual, ==, 0); 1423 1424 event_base_add_virtual_(base); 1425 event_count_active = event_base_get_num_events(base, 1426 EVENT_BASE_COUNT_ACTIVE); 1427 event_count_virtual = event_base_get_num_events(base, 1428 EVENT_BASE_COUNT_VIRTUAL); 1429 event_count_added = event_base_get_num_events(base, 1430 EVENT_BASE_COUNT_ADDED); 1431 event_count_active_virtual = event_base_get_num_events(base, 1432 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1433 event_count_active_added = event_base_get_num_events(base, 1434 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1435 event_count_virtual_added = event_base_get_num_events(base, 1436 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1437 event_count_active_added_virtual = event_base_get_num_events(base, 1438 EVENT_BASE_COUNT_ACTIVE| 1439 EVENT_BASE_COUNT_ADDED| 1440 EVENT_BASE_COUNT_VIRTUAL); 1441 tt_int_op(event_count_active, ==, 0); 1442 tt_int_op(event_count_virtual, ==, 1); 1443 tt_int_op(event_count_added, ==, 0); 1444 tt_int_op(event_count_active_virtual, ==, 1); 1445 tt_int_op(event_count_active_added, ==, 0); 1446 tt_int_op(event_count_virtual_added, ==, 1); 1447 tt_int_op(event_count_active_added_virtual, ==, 1); 1448 1449 end: 1450 ; 1451 } 1452 1453 static void 1454 test_event_base_get_max_events(void *ptr) 1455 { 1456 struct basic_test_data *data = ptr; 1457 struct event_base *base = data->base; 1458 struct event ev; 1459 struct event ev2; 1460 int event_count_active; 1461 int event_count_virtual; 1462 int event_count_added; 1463 int event_count_active_virtual; 1464 int event_count_active_added; 1465 int event_count_virtual_added; 1466 int event_count_active_added_virtual; 1467 1468 struct timeval qsec = {0, 100000}; 1469 1470 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1471 event_self_cbarg()); 1472 event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb, 1473 event_self_cbarg()); 1474 1475 event_add(&ev, &qsec); 1476 event_add(&ev2, &qsec); 1477 event_del(&ev2); 1478 1479 event_count_active = event_base_get_max_events(base, 1480 EVENT_BASE_COUNT_ACTIVE, 0); 1481 event_count_virtual = event_base_get_max_events(base, 1482 EVENT_BASE_COUNT_VIRTUAL, 0); 1483 event_count_added = event_base_get_max_events(base, 1484 EVENT_BASE_COUNT_ADDED, 0); 1485 event_count_active_virtual = event_base_get_max_events(base, 1486 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1487 event_count_active_added = event_base_get_max_events(base, 1488 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1489 event_count_virtual_added = event_base_get_max_events(base, 1490 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1491 event_count_active_added_virtual = event_base_get_max_events(base, 1492 EVENT_BASE_COUNT_ACTIVE | 1493 EVENT_BASE_COUNT_ADDED | 1494 EVENT_BASE_COUNT_VIRTUAL, 0); 1495 1496 tt_int_op(event_count_active, ==, 0); 1497 tt_int_op(event_count_virtual, ==, 0); 1498 /* libevent itself adds a timeout event, so the event_count is 4 here */ 1499 tt_int_op(event_count_added, ==, 4); 1500 tt_int_op(event_count_active_virtual, ==, 0); 1501 tt_int_op(event_count_active_added, ==, 4); 1502 tt_int_op(event_count_virtual_added, ==, 4); 1503 tt_int_op(event_count_active_added_virtual, ==, 4); 1504 1505 event_active(&ev, EV_READ, 1); 1506 event_count_active = event_base_get_max_events(base, 1507 EVENT_BASE_COUNT_ACTIVE, 0); 1508 event_count_virtual = event_base_get_max_events(base, 1509 EVENT_BASE_COUNT_VIRTUAL, 0); 1510 event_count_added = event_base_get_max_events(base, 1511 EVENT_BASE_COUNT_ADDED, 0); 1512 event_count_active_virtual = event_base_get_max_events(base, 1513 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1514 event_count_active_added = event_base_get_max_events(base, 1515 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1516 event_count_virtual_added = event_base_get_max_events(base, 1517 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1518 event_count_active_added_virtual = event_base_get_max_events(base, 1519 EVENT_BASE_COUNT_ACTIVE | 1520 EVENT_BASE_COUNT_ADDED | 1521 EVENT_BASE_COUNT_VIRTUAL, 0); 1522 1523 tt_int_op(event_count_active, ==, 1); 1524 tt_int_op(event_count_virtual, ==, 0); 1525 tt_int_op(event_count_added, ==, 4); 1526 tt_int_op(event_count_active_virtual, ==, 1); 1527 tt_int_op(event_count_active_added, ==, 5); 1528 tt_int_op(event_count_virtual_added, ==, 4); 1529 tt_int_op(event_count_active_added_virtual, ==, 5); 1530 1531 event_base_loop(base, 0); 1532 event_count_active = event_base_get_max_events(base, 1533 EVENT_BASE_COUNT_ACTIVE, 1); 1534 event_count_virtual = event_base_get_max_events(base, 1535 EVENT_BASE_COUNT_VIRTUAL, 1); 1536 event_count_added = event_base_get_max_events(base, 1537 EVENT_BASE_COUNT_ADDED, 1); 1538 event_count_active_virtual = event_base_get_max_events(base, 1539 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1540 event_count_active_added = event_base_get_max_events(base, 1541 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1542 event_count_virtual_added = event_base_get_max_events(base, 1543 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1544 event_count_active_added_virtual = event_base_get_max_events(base, 1545 EVENT_BASE_COUNT_ACTIVE | 1546 EVENT_BASE_COUNT_ADDED | 1547 EVENT_BASE_COUNT_VIRTUAL, 1); 1548 1549 tt_int_op(event_count_active, ==, 1); 1550 tt_int_op(event_count_virtual, ==, 0); 1551 tt_int_op(event_count_added, ==, 4); 1552 tt_int_op(event_count_active_virtual, ==, 0); 1553 tt_int_op(event_count_active_added, ==, 0); 1554 tt_int_op(event_count_virtual_added, ==, 0); 1555 tt_int_op(event_count_active_added_virtual, ==, 0); 1556 1557 event_count_active = event_base_get_max_events(base, 1558 EVENT_BASE_COUNT_ACTIVE, 0); 1559 event_count_virtual = event_base_get_max_events(base, 1560 EVENT_BASE_COUNT_VIRTUAL, 0); 1561 event_count_added = event_base_get_max_events(base, 1562 EVENT_BASE_COUNT_ADDED, 0); 1563 tt_int_op(event_count_active, ==, 0); 1564 tt_int_op(event_count_virtual, ==, 0); 1565 tt_int_op(event_count_added, ==, 0); 1566 1567 event_base_add_virtual_(base); 1568 event_count_active = event_base_get_max_events(base, 1569 EVENT_BASE_COUNT_ACTIVE, 0); 1570 event_count_virtual = event_base_get_max_events(base, 1571 EVENT_BASE_COUNT_VIRTUAL, 0); 1572 event_count_added = event_base_get_max_events(base, 1573 EVENT_BASE_COUNT_ADDED, 0); 1574 event_count_active_virtual = event_base_get_max_events(base, 1575 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1576 event_count_active_added = event_base_get_max_events(base, 1577 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1578 event_count_virtual_added = event_base_get_max_events(base, 1579 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1580 event_count_active_added_virtual = event_base_get_max_events(base, 1581 EVENT_BASE_COUNT_ACTIVE | 1582 EVENT_BASE_COUNT_ADDED | 1583 EVENT_BASE_COUNT_VIRTUAL, 0); 1584 1585 tt_int_op(event_count_active, ==, 0); 1586 tt_int_op(event_count_virtual, ==, 1); 1587 tt_int_op(event_count_added, ==, 0); 1588 tt_int_op(event_count_active_virtual, ==, 1); 1589 tt_int_op(event_count_active_added, ==, 0); 1590 tt_int_op(event_count_virtual_added, ==, 1); 1591 tt_int_op(event_count_active_added_virtual, ==, 1); 1592 1593 end: 1594 ; 1595 } 1596 1597 static void 1598 test_bad_assign(void *ptr) 1599 { 1600 struct event ev; 1601 int r; 1602 /* READ|SIGNAL is not allowed */ 1603 r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL); 1604 tt_int_op(r,==,-1); 1605 1606 end: 1607 ; 1608 } 1609 1610 static int reentrant_cb_run = 0; 1611 1612 static void 1613 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr) 1614 { 1615 struct event_base *base = ptr; 1616 int r; 1617 reentrant_cb_run = 1; 1618 /* This reentrant call to event_base_loop should be detected and 1619 * should fail */ 1620 r = event_base_loop(base, 0); 1621 tt_int_op(r, ==, -1); 1622 end: 1623 ; 1624 } 1625 1626 static void 1627 test_bad_reentrant(void *ptr) 1628 { 1629 struct basic_test_data *data = ptr; 1630 struct event_base *base = data->base; 1631 struct event ev; 1632 int r; 1633 event_assign(&ev, base, -1, 1634 0, bad_reentrant_run_loop_cb, base); 1635 1636 event_active(&ev, EV_WRITE, 1); 1637 r = event_base_loop(base, 0); 1638 tt_int_op(r, ==, 1); 1639 tt_int_op(reentrant_cb_run, ==, 1); 1640 end: 1641 ; 1642 } 1643 1644 static int n_write_a_byte_cb=0; 1645 static int n_read_and_drain_cb=0; 1646 static int n_activate_other_event_cb=0; 1647 static void 1648 write_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1649 { 1650 char buf[] = "x"; 1651 if (write(fd, buf, 1) == 1) 1652 ++n_write_a_byte_cb; 1653 } 1654 static void 1655 read_and_drain_cb(evutil_socket_t fd, short what, void *arg) 1656 { 1657 char buf[128]; 1658 int n; 1659 ++n_read_and_drain_cb; 1660 while ((n = read(fd, buf, sizeof(buf))) > 0) 1661 ; 1662 } 1663 1664 static void 1665 activate_other_event_cb(evutil_socket_t fd, short what, void *other_) 1666 { 1667 struct event *ev_activate = other_; 1668 ++n_activate_other_event_cb; 1669 event_active_later_(ev_activate, EV_READ); 1670 } 1671 1672 static void 1673 test_active_later(void *ptr) 1674 { 1675 struct basic_test_data *data = ptr; 1676 struct event *ev1 = NULL, *ev2 = NULL; 1677 struct event ev3, ev4; 1678 struct timeval qsec = {0, 100000}; 1679 ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL); 1680 ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL); 1681 event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4); 1682 event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3); 1683 event_add(ev1, NULL); 1684 event_add(ev2, NULL); 1685 event_active_later_(&ev3, EV_READ); 1686 1687 event_base_loopexit(data->base, &qsec); 1688 1689 event_base_loop(data->base, 0); 1690 1691 TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.", 1692 n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb)); 1693 event_del(&ev3); 1694 event_del(&ev4); 1695 1696 tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb); 1697 tt_int_op(n_write_a_byte_cb, >, 100); 1698 tt_int_op(n_read_and_drain_cb, >, 100); 1699 tt_int_op(n_activate_other_event_cb, >, 100); 1700 1701 event_active_later_(&ev4, EV_READ); 1702 event_active(&ev4, EV_READ, 1); /* This should make the event 1703 active immediately. */ 1704 tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0); 1705 tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0); 1706 1707 /* Now leave this one around, so that event_free sees it and removes 1708 * it. */ 1709 event_active_later_(&ev3, EV_READ); 1710 event_base_assert_ok_(data->base); 1711 1712 end: 1713 if (ev1) 1714 event_free(ev1); 1715 if (ev2) 1716 event_free(ev2); 1717 1718 event_base_free(data->base); 1719 data->base = NULL; 1720 } 1721 1722 1723 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg) 1724 { 1725 int *intptr = arg; 1726 (void) fd; (void) what; 1727 ++*intptr; 1728 } 1729 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg) 1730 { 1731 struct event **ep = arg; 1732 (void) fd; (void) what; 1733 event_remove_timer(ep[0]); 1734 event_remove_timer(ep[1]); 1735 } 1736 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1737 { 1738 evutil_socket_t *sockp = arg; 1739 (void) fd; (void) what; 1740 (void) write(*sockp, "A", 1); 1741 } 1742 struct read_not_timeout_param 1743 { 1744 struct event **ev; 1745 int events; 1746 int count; 1747 }; 1748 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg) 1749 { 1750 struct read_not_timeout_param *rntp = arg; 1751 char c; 1752 ev_ssize_t n; 1753 (void) fd; (void) what; 1754 n = read(fd, &c, 1); 1755 tt_int_op(n, ==, 1); 1756 rntp->events |= what; 1757 ++rntp->count; 1758 if(2 == rntp->count) event_del(rntp->ev[0]); 1759 end: 1760 ; 1761 } 1762 1763 static void 1764 test_event_remove_timeout(void *ptr) 1765 { 1766 struct basic_test_data *data = ptr; 1767 struct event_base *base = data->base; 1768 struct event *ev[5]; 1769 int ev1_fired=0; 1770 struct timeval ms25 = { 0, 25*1000 }, 1771 ms40 = { 0, 40*1000 }, 1772 ms75 = { 0, 75*1000 }, 1773 ms125 = { 0, 125*1000 }; 1774 struct read_not_timeout_param rntp = { ev, 0, 0 }; 1775 1776 event_base_assert_ok_(base); 1777 1778 ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST, 1779 read_not_timeout_cb, &rntp); 1780 ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired); 1781 ev[2] = evtimer_new(base, remove_timers_cb, ev); 1782 ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1783 ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1784 tt_assert(base); 1785 event_add(ev[2], &ms25); /* remove timers */ 1786 event_add(ev[4], &ms40); /* write to test if timer re-activates */ 1787 event_add(ev[0], &ms75); /* read */ 1788 event_add(ev[1], &ms75); /* timer */ 1789 event_add(ev[3], &ms125); /* timeout. */ 1790 event_base_assert_ok_(base); 1791 1792 event_base_dispatch(base); 1793 1794 tt_int_op(ev1_fired, ==, 0); 1795 tt_int_op(rntp.events, ==, EV_READ); 1796 1797 event_base_assert_ok_(base); 1798 end: 1799 event_free(ev[0]); 1800 event_free(ev[1]); 1801 event_free(ev[2]); 1802 event_free(ev[3]); 1803 event_free(ev[4]); 1804 } 1805 1806 static void 1807 test_event_base_new(void *ptr) 1808 { 1809 struct basic_test_data *data = ptr; 1810 struct event_base *base = 0; 1811 struct event ev1; 1812 struct basic_cb_args args; 1813 1814 int towrite = (int)strlen(TEST1)+1; 1815 int len = write(data->pair[0], TEST1, towrite); 1816 1817 if (len < 0) 1818 tt_abort_perror("initial write"); 1819 else if (len != towrite) 1820 tt_abort_printf(("initial write fell short (%d of %d bytes)", 1821 len, towrite)); 1822 1823 if (shutdown(data->pair[0], SHUT_WR)) 1824 tt_abort_perror("initial write shutdown"); 1825 1826 base = event_base_new(); 1827 if (!base) 1828 tt_abort_msg("failed to create event base"); 1829 1830 args.eb = base; 1831 args.ev = &ev1; 1832 args.callcount = 0; 1833 event_assign(&ev1, base, data->pair[1], 1834 EV_READ|EV_PERSIST, basic_read_cb, &args); 1835 1836 if (event_add(&ev1, NULL)) 1837 tt_abort_perror("initial event_add"); 1838 1839 if (event_base_loop(base, 0)) 1840 tt_abort_msg("unsuccessful exit from event loop"); 1841 1842 end: 1843 if (base) 1844 event_base_free(base); 1845 } 1846 1847 static void 1848 test_loopexit(void) 1849 { 1850 struct timeval tv, tv_start, tv_end; 1851 struct event ev; 1852 1853 setup_test("Loop exit: "); 1854 1855 tv.tv_usec = 0; 1856 tv.tv_sec = 60*60*24; 1857 evtimer_set(&ev, timeout_cb, NULL); 1858 evtimer_add(&ev, &tv); 1859 1860 tv.tv_usec = 300*1000; 1861 tv.tv_sec = 0; 1862 event_loopexit(&tv); 1863 1864 evutil_gettimeofday(&tv_start, NULL); 1865 event_dispatch(); 1866 evutil_gettimeofday(&tv_end, NULL); 1867 1868 evtimer_del(&ev); 1869 1870 tt_assert(event_base_got_exit(global_base)); 1871 tt_assert(!event_base_got_break(global_base)); 1872 1873 test_timeval_diff_eq(&tv_start, &tv_end, 300); 1874 1875 test_ok = 1; 1876 end: 1877 cleanup_test(); 1878 } 1879 1880 static void 1881 test_loopexit_multiple(void) 1882 { 1883 struct timeval tv, tv_start, tv_end; 1884 struct event_base *base; 1885 1886 setup_test("Loop Multiple exit: "); 1887 1888 base = event_base_new(); 1889 1890 tv.tv_usec = 200*1000; 1891 tv.tv_sec = 0; 1892 event_base_loopexit(base, &tv); 1893 1894 tv.tv_usec = 0; 1895 tv.tv_sec = 3; 1896 event_base_loopexit(base, &tv); 1897 1898 evutil_gettimeofday(&tv_start, NULL); 1899 event_base_dispatch(base); 1900 evutil_gettimeofday(&tv_end, NULL); 1901 1902 tt_assert(event_base_got_exit(base)); 1903 tt_assert(!event_base_got_break(base)); 1904 1905 event_base_free(base); 1906 1907 test_timeval_diff_eq(&tv_start, &tv_end, 200); 1908 1909 test_ok = 1; 1910 1911 end: 1912 cleanup_test(); 1913 } 1914 1915 static void 1916 break_cb(evutil_socket_t fd, short events, void *arg) 1917 { 1918 test_ok = 1; 1919 event_loopbreak(); 1920 } 1921 1922 static void 1923 fail_cb(evutil_socket_t fd, short events, void *arg) 1924 { 1925 test_ok = 0; 1926 } 1927 1928 static void 1929 test_loopbreak(void) 1930 { 1931 struct event ev1, ev2; 1932 struct timeval tv; 1933 1934 setup_test("Loop break: "); 1935 1936 tv.tv_sec = 0; 1937 tv.tv_usec = 0; 1938 evtimer_set(&ev1, break_cb, NULL); 1939 evtimer_add(&ev1, &tv); 1940 evtimer_set(&ev2, fail_cb, NULL); 1941 evtimer_add(&ev2, &tv); 1942 1943 event_dispatch(); 1944 1945 tt_assert(!event_base_got_exit(global_base)); 1946 tt_assert(event_base_got_break(global_base)); 1947 1948 evtimer_del(&ev1); 1949 evtimer_del(&ev2); 1950 1951 end: 1952 cleanup_test(); 1953 } 1954 1955 static struct event *readd_test_event_last_added = NULL; 1956 static void 1957 re_add_read_cb(evutil_socket_t fd, short event, void *arg) 1958 { 1959 char buf[256]; 1960 struct event *ev_other = arg; 1961 ev_ssize_t n_read; 1962 1963 readd_test_event_last_added = ev_other; 1964 1965 n_read = read(fd, buf, sizeof(buf)); 1966 1967 if (n_read < 0) { 1968 tt_fail_perror("read"); 1969 event_base_loopbreak(event_get_base(ev_other)); 1970 return; 1971 } else { 1972 event_add(ev_other, NULL); 1973 ++test_ok; 1974 } 1975 } 1976 1977 static void 1978 test_nonpersist_readd(void) 1979 { 1980 struct event ev1, ev2; 1981 1982 setup_test("Re-add nonpersistent events: "); 1983 event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2); 1984 event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1); 1985 1986 if (write(pair[0], "Hello", 5) < 0) { 1987 tt_fail_perror("write(pair[0])"); 1988 } 1989 1990 if (write(pair[1], "Hello", 5) < 0) { 1991 tt_fail_perror("write(pair[1])\n"); 1992 } 1993 1994 if (event_add(&ev1, NULL) == -1 || 1995 event_add(&ev2, NULL) == -1) { 1996 test_ok = 0; 1997 } 1998 if (test_ok != 0) 1999 exit(1); 2000 event_loop(EVLOOP_ONCE); 2001 if (test_ok != 2) 2002 exit(1); 2003 /* At this point, we executed both callbacks. Whichever one got 2004 * called first added the second, but the second then immediately got 2005 * deleted before its callback was called. At this point, though, it 2006 * re-added the first. 2007 */ 2008 if (!readd_test_event_last_added) { 2009 test_ok = 0; 2010 } else if (readd_test_event_last_added == &ev1) { 2011 if (!event_pending(&ev1, EV_READ, NULL) || 2012 event_pending(&ev2, EV_READ, NULL)) 2013 test_ok = 0; 2014 } else { 2015 if (event_pending(&ev1, EV_READ, NULL) || 2016 !event_pending(&ev2, EV_READ, NULL)) 2017 test_ok = 0; 2018 } 2019 2020 event_del(&ev1); 2021 event_del(&ev2); 2022 2023 cleanup_test(); 2024 } 2025 2026 struct test_pri_event { 2027 struct event ev; 2028 int count; 2029 }; 2030 2031 static void 2032 test_priorities_cb(evutil_socket_t fd, short what, void *arg) 2033 { 2034 struct test_pri_event *pri = arg; 2035 struct timeval tv; 2036 2037 if (pri->count == 3) { 2038 event_loopexit(NULL); 2039 return; 2040 } 2041 2042 pri->count++; 2043 2044 evutil_timerclear(&tv); 2045 event_add(&pri->ev, &tv); 2046 } 2047 2048 static void 2049 test_priorities_impl(int npriorities) 2050 { 2051 struct test_pri_event one, two; 2052 struct timeval tv; 2053 2054 TT_BLATHER(("Testing Priorities %d: ", npriorities)); 2055 2056 event_base_priority_init(global_base, npriorities); 2057 2058 memset(&one, 0, sizeof(one)); 2059 memset(&two, 0, sizeof(two)); 2060 2061 timeout_set(&one.ev, test_priorities_cb, &one); 2062 if (event_priority_set(&one.ev, 0) == -1) { 2063 fprintf(stderr, "%s: failed to set priority", __func__); 2064 exit(1); 2065 } 2066 2067 timeout_set(&two.ev, test_priorities_cb, &two); 2068 if (event_priority_set(&two.ev, npriorities - 1) == -1) { 2069 fprintf(stderr, "%s: failed to set priority", __func__); 2070 exit(1); 2071 } 2072 2073 evutil_timerclear(&tv); 2074 2075 if (event_add(&one.ev, &tv) == -1) 2076 exit(1); 2077 if (event_add(&two.ev, &tv) == -1) 2078 exit(1); 2079 2080 event_dispatch(); 2081 2082 event_del(&one.ev); 2083 event_del(&two.ev); 2084 2085 if (npriorities == 1) { 2086 if (one.count == 3 && two.count == 3) 2087 test_ok = 1; 2088 } else if (npriorities == 2) { 2089 /* Two is called once because event_loopexit is priority 1 */ 2090 if (one.count == 3 && two.count == 1) 2091 test_ok = 1; 2092 } else { 2093 if (one.count == 3 && two.count == 0) 2094 test_ok = 1; 2095 } 2096 } 2097 2098 static void 2099 test_priorities(void) 2100 { 2101 test_priorities_impl(1); 2102 if (test_ok) 2103 test_priorities_impl(2); 2104 if (test_ok) 2105 test_priorities_impl(3); 2106 } 2107 2108 /* priority-active-inversion: activate a higher-priority event, and make sure 2109 * it keeps us from running a lower-priority event first. */ 2110 static int n_pai_calls = 0; 2111 static struct event pai_events[3]; 2112 2113 static void 2114 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg) 2115 { 2116 int *call_order = arg; 2117 *call_order = n_pai_calls++; 2118 if (n_pai_calls == 1) { 2119 /* This should activate later, even though it shares a 2120 priority with us. */ 2121 event_active(&pai_events[1], EV_READ, 1); 2122 /* This should activate next, since its priority is higher, 2123 even though we activated it second. */ 2124 event_active(&pai_events[2], EV_TIMEOUT, 1); 2125 } 2126 } 2127 2128 static void 2129 test_priority_active_inversion(void *data_) 2130 { 2131 struct basic_test_data *data = data_; 2132 struct event_base *base = data->base; 2133 int call_order[3]; 2134 int i; 2135 tt_int_op(event_base_priority_init(base, 8), ==, 0); 2136 2137 n_pai_calls = 0; 2138 memset(call_order, 0, sizeof(call_order)); 2139 2140 for (i=0;i<3;++i) { 2141 event_assign(&pai_events[i], data->base, -1, 0, 2142 prio_active_inversion_cb, &call_order[i]); 2143 } 2144 2145 event_priority_set(&pai_events[0], 4); 2146 event_priority_set(&pai_events[1], 4); 2147 event_priority_set(&pai_events[2], 0); 2148 2149 event_active(&pai_events[0], EV_WRITE, 1); 2150 2151 event_base_dispatch(base); 2152 tt_int_op(n_pai_calls, ==, 3); 2153 tt_int_op(call_order[0], ==, 0); 2154 tt_int_op(call_order[1], ==, 2); 2155 tt_int_op(call_order[2], ==, 1); 2156 end: 2157 ; 2158 } 2159 2160 2161 static void 2162 test_multiple_cb(evutil_socket_t fd, short event, void *arg) 2163 { 2164 if (event & EV_READ) 2165 test_ok |= 1; 2166 else if (event & EV_WRITE) 2167 test_ok |= 2; 2168 } 2169 2170 static void 2171 test_multiple_events_for_same_fd(void) 2172 { 2173 struct event e1, e2; 2174 2175 setup_test("Multiple events for same fd: "); 2176 2177 event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL); 2178 event_add(&e1, NULL); 2179 event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL); 2180 event_add(&e2, NULL); 2181 event_loop(EVLOOP_ONCE); 2182 event_del(&e2); 2183 2184 if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) { 2185 tt_fail_perror("write"); 2186 } 2187 2188 event_loop(EVLOOP_ONCE); 2189 event_del(&e1); 2190 2191 if (test_ok != 3) 2192 test_ok = 0; 2193 2194 cleanup_test(); 2195 } 2196 2197 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2198 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf); 2199 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number); 2200 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2201 2202 static void 2203 read_once_cb(evutil_socket_t fd, short event, void *arg) 2204 { 2205 char buf[256]; 2206 int len; 2207 2208 len = read(fd, buf, sizeof(buf)); 2209 2210 if (called) { 2211 test_ok = 0; 2212 } else if (len) { 2213 /* Assumes global pair[0] can be used for writing */ 2214 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2215 tt_fail_perror("write"); 2216 test_ok = 0; 2217 } else { 2218 test_ok = 1; 2219 } 2220 } 2221 2222 called++; 2223 } 2224 2225 static void 2226 test_want_only_once(void) 2227 { 2228 struct event ev; 2229 struct timeval tv; 2230 2231 /* Very simple read test */ 2232 setup_test("Want read only once: "); 2233 2234 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2235 tt_fail_perror("write"); 2236 } 2237 2238 /* Setup the loop termination */ 2239 evutil_timerclear(&tv); 2240 tv.tv_usec = 300*1000; 2241 event_loopexit(&tv); 2242 2243 event_set(&ev, pair[1], EV_READ, read_once_cb, &ev); 2244 if (event_add(&ev, NULL) == -1) 2245 exit(1); 2246 event_dispatch(); 2247 2248 cleanup_test(); 2249 } 2250 2251 #define TEST_MAX_INT 6 2252 2253 static void 2254 evtag_int_test(void *ptr) 2255 { 2256 struct evbuffer *tmp = evbuffer_new(); 2257 ev_uint32_t integers[TEST_MAX_INT] = { 2258 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2259 }; 2260 ev_uint32_t integer; 2261 ev_uint64_t big_int; 2262 int i; 2263 2264 evtag_init(); 2265 2266 for (i = 0; i < TEST_MAX_INT; i++) { 2267 int oldlen, newlen; 2268 oldlen = (int)EVBUFFER_LENGTH(tmp); 2269 evtag_encode_int(tmp, integers[i]); 2270 newlen = (int)EVBUFFER_LENGTH(tmp); 2271 TT_BLATHER(("encoded 0x%08x with %d bytes", 2272 (unsigned)integers[i], newlen - oldlen)); 2273 big_int = integers[i]; 2274 big_int *= 1000000000; /* 1 billion */ 2275 evtag_encode_int64(tmp, big_int); 2276 } 2277 2278 for (i = 0; i < TEST_MAX_INT; i++) { 2279 tt_int_op(evtag_decode_int(&integer, tmp), !=, -1); 2280 tt_uint_op(integer, ==, integers[i]); 2281 tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1); 2282 tt_assert((big_int / 1000000000) == integers[i]); 2283 } 2284 2285 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2286 end: 2287 evbuffer_free(tmp); 2288 } 2289 2290 static void 2291 evtag_fuzz(void *ptr) 2292 { 2293 u_char buffer[4096]; 2294 struct evbuffer *tmp = evbuffer_new(); 2295 struct timeval tv; 2296 int i, j; 2297 2298 int not_failed = 0; 2299 2300 evtag_init(); 2301 2302 for (j = 0; j < 100; j++) { 2303 for (i = 0; i < (int)sizeof(buffer); i++) 2304 buffer[i] = test_weakrand(); 2305 evbuffer_drain(tmp, -1); 2306 evbuffer_add(tmp, buffer, sizeof(buffer)); 2307 2308 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) 2309 not_failed++; 2310 } 2311 2312 /* The majority of decodes should fail */ 2313 tt_int_op(not_failed, <, 10); 2314 2315 /* Now insert some corruption into the tag length field */ 2316 evbuffer_drain(tmp, -1); 2317 evutil_timerclear(&tv); 2318 tv.tv_sec = 1; 2319 evtag_marshal_timeval(tmp, 0, &tv); 2320 evbuffer_add(tmp, buffer, sizeof(buffer)); 2321 2322 ((char *)EVBUFFER_DATA(tmp))[1] = '\xff'; 2323 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) { 2324 tt_abort_msg("evtag_unmarshal_timeval should have failed"); 2325 } 2326 2327 end: 2328 evbuffer_free(tmp); 2329 } 2330 2331 static void 2332 evtag_tag_encoding(void *ptr) 2333 { 2334 struct evbuffer *tmp = evbuffer_new(); 2335 ev_uint32_t integers[TEST_MAX_INT] = { 2336 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2337 }; 2338 ev_uint32_t integer; 2339 int i; 2340 2341 evtag_init(); 2342 2343 for (i = 0; i < TEST_MAX_INT; i++) { 2344 int oldlen, newlen; 2345 oldlen = (int)EVBUFFER_LENGTH(tmp); 2346 evtag_encode_tag(tmp, integers[i]); 2347 newlen = (int)EVBUFFER_LENGTH(tmp); 2348 TT_BLATHER(("encoded 0x%08x with %d bytes", 2349 (unsigned)integers[i], newlen - oldlen)); 2350 } 2351 2352 for (i = 0; i < TEST_MAX_INT; i++) { 2353 tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1); 2354 tt_uint_op(integer, ==, integers[i]); 2355 } 2356 2357 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2358 2359 end: 2360 evbuffer_free(tmp); 2361 } 2362 2363 static void 2364 evtag_test_peek(void *ptr) 2365 { 2366 struct evbuffer *tmp = evbuffer_new(); 2367 ev_uint32_t u32; 2368 2369 evtag_marshal_int(tmp, 30, 0); 2370 evtag_marshal_string(tmp, 40, "Hello world"); 2371 2372 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2373 tt_int_op(u32, ==, 30); 2374 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2375 tt_int_op(u32, ==, 1+1+1); 2376 tt_int_op(evtag_consume(tmp), ==, 0); 2377 2378 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2379 tt_int_op(u32, ==, 40); 2380 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2381 tt_int_op(u32, ==, 1+1+11); 2382 tt_int_op(evtag_payload_length(tmp, &u32), ==, 0); 2383 tt_int_op(u32, ==, 11); 2384 2385 end: 2386 evbuffer_free(tmp); 2387 } 2388 2389 2390 static void 2391 test_methods(void *ptr) 2392 { 2393 const char **methods = event_get_supported_methods(); 2394 struct event_config *cfg = NULL; 2395 struct event_base *base = NULL; 2396 const char *backend; 2397 int n_methods = 0; 2398 2399 tt_assert(methods); 2400 2401 backend = methods[0]; 2402 while (*methods != NULL) { 2403 TT_BLATHER(("Support method: %s", *methods)); 2404 ++methods; 2405 ++n_methods; 2406 } 2407 2408 cfg = event_config_new(); 2409 assert(cfg != NULL); 2410 2411 tt_int_op(event_config_avoid_method(cfg, backend), ==, 0); 2412 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2413 2414 base = event_base_new_with_config(cfg); 2415 if (n_methods > 1) { 2416 tt_assert(base); 2417 tt_str_op(backend, !=, event_base_get_method(base)); 2418 } else { 2419 tt_assert(base == NULL); 2420 } 2421 2422 end: 2423 if (base) 2424 event_base_free(base); 2425 if (cfg) 2426 event_config_free(cfg); 2427 } 2428 2429 static void 2430 test_version(void *arg) 2431 { 2432 const char *vstr; 2433 ev_uint32_t vint; 2434 int major, minor, patch, n; 2435 2436 vstr = event_get_version(); 2437 vint = event_get_version_number(); 2438 2439 tt_assert(vstr); 2440 tt_assert(vint); 2441 2442 tt_str_op(vstr, ==, LIBEVENT_VERSION); 2443 tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER); 2444 2445 n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch); 2446 tt_assert(3 == n); 2447 tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8))); 2448 end: 2449 ; 2450 } 2451 2452 static void 2453 test_base_features(void *arg) 2454 { 2455 struct event_base *base = NULL; 2456 struct event_config *cfg = NULL; 2457 2458 cfg = event_config_new(); 2459 2460 tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET)); 2461 2462 base = event_base_new_with_config(cfg); 2463 if (base) { 2464 tt_int_op(EV_FEATURE_ET, ==, 2465 event_base_get_features(base) & EV_FEATURE_ET); 2466 } else { 2467 base = event_base_new(); 2468 tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET); 2469 } 2470 2471 end: 2472 if (base) 2473 event_base_free(base); 2474 if (cfg) 2475 event_config_free(cfg); 2476 } 2477 2478 #ifdef EVENT__HAVE_SETENV 2479 #define SETENV_OK 2480 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV) 2481 static void setenv(const char *k, const char *v, int o_) 2482 { 2483 char b[256]; 2484 evutil_snprintf(b, sizeof(b), "%s=%s",k,v); 2485 putenv(b); 2486 } 2487 #define SETENV_OK 2488 #endif 2489 2490 #ifdef EVENT__HAVE_UNSETENV 2491 #define UNSETENV_OK 2492 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV) 2493 static void unsetenv(const char *k) 2494 { 2495 char b[256]; 2496 evutil_snprintf(b, sizeof(b), "%s=",k); 2497 putenv(b); 2498 } 2499 #define UNSETENV_OK 2500 #endif 2501 2502 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2503 static void 2504 methodname_to_envvar(const char *mname, char *buf, size_t buflen) 2505 { 2506 char *cp; 2507 evutil_snprintf(buf, buflen, "EVENT_NO%s", mname); 2508 for (cp = buf; *cp; ++cp) { 2509 *cp = EVUTIL_TOUPPER_(*cp); 2510 } 2511 } 2512 #endif 2513 2514 static void 2515 test_base_environ(void *arg) 2516 { 2517 struct event_base *base = NULL; 2518 struct event_config *cfg = NULL; 2519 2520 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2521 const char **basenames; 2522 int i, n_methods=0; 2523 char varbuf[128]; 2524 const char *defaultname, *ignoreenvname; 2525 2526 /* See if unsetenv works before we rely on it. */ 2527 setenv("EVENT_NOWAFFLES", "1", 1); 2528 unsetenv("EVENT_NOWAFFLES"); 2529 if (getenv("EVENT_NOWAFFLES") != NULL) { 2530 #ifndef EVENT__HAVE_UNSETENV 2531 TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test")); 2532 #else 2533 TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test")); 2534 #endif 2535 tt_skip(); 2536 } 2537 2538 basenames = event_get_supported_methods(); 2539 for (i = 0; basenames[i]; ++i) { 2540 methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf)); 2541 unsetenv(varbuf); 2542 ++n_methods; 2543 } 2544 2545 base = event_base_new(); 2546 tt_assert(base); 2547 2548 defaultname = event_base_get_method(base); 2549 TT_BLATHER(("default is <%s>", defaultname)); 2550 event_base_free(base); 2551 base = NULL; 2552 2553 /* Can we disable the method with EVENT_NOfoo ? */ 2554 if (!strcmp(defaultname, "epoll (with changelist)")) { 2555 setenv("EVENT_NOEPOLL", "1", 1); 2556 ignoreenvname = "epoll"; 2557 } else { 2558 methodname_to_envvar(defaultname, varbuf, sizeof(varbuf)); 2559 setenv(varbuf, "1", 1); 2560 ignoreenvname = defaultname; 2561 } 2562 2563 /* Use an empty cfg rather than NULL so a failure doesn't exit() */ 2564 cfg = event_config_new(); 2565 base = event_base_new_with_config(cfg); 2566 event_config_free(cfg); 2567 cfg = NULL; 2568 if (n_methods == 1) { 2569 tt_assert(!base); 2570 } else { 2571 tt_assert(base); 2572 tt_str_op(defaultname, !=, event_base_get_method(base)); 2573 event_base_free(base); 2574 base = NULL; 2575 } 2576 2577 /* Can we disable looking at the environment with IGNORE_ENV ? */ 2578 cfg = event_config_new(); 2579 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2580 base = event_base_new_with_config(cfg); 2581 tt_assert(base); 2582 tt_str_op(ignoreenvname, ==, event_base_get_method(base)); 2583 #else 2584 tt_skip(); 2585 #endif 2586 2587 end: 2588 if (base) 2589 event_base_free(base); 2590 if (cfg) 2591 event_config_free(cfg); 2592 } 2593 2594 static void 2595 read_called_once_cb(evutil_socket_t fd, short event, void *arg) 2596 { 2597 tt_int_op(event, ==, EV_READ); 2598 called += 1; 2599 end: 2600 ; 2601 } 2602 2603 static void 2604 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg) 2605 { 2606 tt_int_op(event, ==, EV_TIMEOUT); 2607 called += 100; 2608 end: 2609 ; 2610 } 2611 2612 static void 2613 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg) 2614 { 2615 tt_int_op(event, ==, EV_TIMEOUT); 2616 called += 1000; 2617 end: 2618 ; 2619 } 2620 2621 static void 2622 test_event_once(void *ptr) 2623 { 2624 struct basic_test_data *data = ptr; 2625 struct timeval tv; 2626 int r; 2627 2628 tv.tv_sec = 0; 2629 tv.tv_usec = 50*1000; 2630 called = 0; 2631 r = event_base_once(data->base, data->pair[0], EV_READ, 2632 read_called_once_cb, NULL, NULL); 2633 tt_int_op(r, ==, 0); 2634 r = event_base_once(data->base, -1, EV_TIMEOUT, 2635 timeout_called_once_cb, NULL, &tv); 2636 tt_int_op(r, ==, 0); 2637 r = event_base_once(data->base, -1, 0, NULL, NULL, NULL); 2638 tt_int_op(r, <, 0); 2639 r = event_base_once(data->base, -1, EV_TIMEOUT, 2640 immediate_called_twice_cb, NULL, NULL); 2641 tt_int_op(r, ==, 0); 2642 tv.tv_sec = 0; 2643 tv.tv_usec = 0; 2644 r = event_base_once(data->base, -1, EV_TIMEOUT, 2645 immediate_called_twice_cb, NULL, &tv); 2646 tt_int_op(r, ==, 0); 2647 2648 if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) { 2649 tt_fail_perror("write"); 2650 } 2651 2652 shutdown(data->pair[1], SHUT_WR); 2653 2654 event_base_dispatch(data->base); 2655 2656 tt_int_op(called, ==, 2101); 2657 end: 2658 ; 2659 } 2660 2661 static void 2662 test_event_once_never(void *ptr) 2663 { 2664 struct basic_test_data *data = ptr; 2665 struct timeval tv; 2666 2667 /* Have one trigger in 10 seconds (don't worry, because) */ 2668 tv.tv_sec = 10; 2669 tv.tv_usec = 0; 2670 called = 0; 2671 event_base_once(data->base, -1, EV_TIMEOUT, 2672 timeout_called_once_cb, NULL, &tv); 2673 2674 /* But shut down the base in 75 msec. */ 2675 tv.tv_sec = 0; 2676 tv.tv_usec = 75*1000; 2677 event_base_loopexit(data->base, &tv); 2678 2679 event_base_dispatch(data->base); 2680 2681 tt_int_op(called, ==, 0); 2682 end: 2683 ; 2684 } 2685 2686 static void 2687 test_event_pending(void *ptr) 2688 { 2689 struct basic_test_data *data = ptr; 2690 struct event *r=NULL, *w=NULL, *t=NULL; 2691 struct timeval tv, now, tv2; 2692 2693 tv.tv_sec = 0; 2694 tv.tv_usec = 500 * 1000; 2695 r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb, 2696 NULL); 2697 w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb, 2698 NULL); 2699 t = evtimer_new(data->base, timeout_cb, NULL); 2700 2701 tt_assert(r); 2702 tt_assert(w); 2703 tt_assert(t); 2704 2705 evutil_gettimeofday(&now, NULL); 2706 event_add(r, NULL); 2707 event_add(t, &tv); 2708 2709 tt_assert( event_pending(r, EV_READ, NULL)); 2710 tt_assert(!event_pending(w, EV_WRITE, NULL)); 2711 tt_assert(!event_pending(r, EV_WRITE, NULL)); 2712 tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL)); 2713 tt_assert(!event_pending(r, EV_TIMEOUT, NULL)); 2714 tt_assert( event_pending(t, EV_TIMEOUT, NULL)); 2715 tt_assert( event_pending(t, EV_TIMEOUT, &tv2)); 2716 2717 tt_assert(evutil_timercmp(&tv2, &now, >)); 2718 2719 test_timeval_diff_eq(&now, &tv2, 500); 2720 2721 end: 2722 if (r) { 2723 event_del(r); 2724 event_free(r); 2725 } 2726 if (w) { 2727 event_del(w); 2728 event_free(w); 2729 } 2730 if (t) { 2731 event_del(t); 2732 event_free(t); 2733 } 2734 } 2735 2736 #ifndef _WIN32 2737 /* You can't do this test on windows, since dup2 doesn't work on sockets */ 2738 2739 static void 2740 dfd_cb(evutil_socket_t fd, short e, void *data) 2741 { 2742 *(int*)data = (int)e; 2743 } 2744 2745 /* Regression test for our workaround for a fun epoll/linux related bug 2746 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2) 2747 * will get you an EEXIST */ 2748 static void 2749 test_dup_fd(void *arg) 2750 { 2751 struct basic_test_data *data = arg; 2752 struct event_base *base = data->base; 2753 struct event *ev1=NULL, *ev2=NULL; 2754 int fd, dfd=-1; 2755 int ev1_got, ev2_got; 2756 2757 tt_int_op(write(data->pair[0], "Hello world", 2758 strlen("Hello world")), >, 0); 2759 fd = data->pair[1]; 2760 2761 dfd = dup(fd); 2762 tt_int_op(dfd, >=, 0); 2763 2764 ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got); 2765 ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got); 2766 ev1_got = ev2_got = 0; 2767 event_add(ev1, NULL); 2768 event_add(ev2, NULL); 2769 event_base_loop(base, EVLOOP_ONCE); 2770 tt_int_op(ev1_got, ==, EV_READ); 2771 tt_int_op(ev2_got, ==, EV_READ); 2772 2773 /* Now close and delete dfd then dispatch. We need to do the 2774 * dispatch here so that when we add it later, we think there 2775 * was an intermediate delete. */ 2776 close(dfd); 2777 event_del(ev2); 2778 ev1_got = ev2_got = 0; 2779 event_base_loop(base, EVLOOP_ONCE); 2780 tt_want_int_op(ev1_got, ==, EV_READ); 2781 tt_int_op(ev2_got, ==, 0); 2782 2783 /* Re-duplicate the fd. We need to get the same duplicated 2784 * value that we closed to provoke the epoll quirk. Also, we 2785 * need to change the events to write, or else the old lingering 2786 * read event will make the test pass whether the change was 2787 * successful or not. */ 2788 tt_int_op(dup2(fd, dfd), ==, dfd); 2789 event_free(ev2); 2790 ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got); 2791 event_add(ev2, NULL); 2792 ev1_got = ev2_got = 0; 2793 event_base_loop(base, EVLOOP_ONCE); 2794 tt_want_int_op(ev1_got, ==, EV_READ); 2795 tt_int_op(ev2_got, ==, EV_WRITE); 2796 2797 end: 2798 if (ev1) 2799 event_free(ev1); 2800 if (ev2) 2801 event_free(ev2); 2802 if (dfd >= 0) 2803 close(dfd); 2804 } 2805 #endif 2806 2807 #ifdef EVENT__DISABLE_MM_REPLACEMENT 2808 static void 2809 test_mm_functions(void *arg) 2810 { 2811 tinytest_set_test_skipped_(); 2812 } 2813 #else 2814 static int 2815 check_dummy_mem_ok(void *mem_) 2816 { 2817 char *mem = mem_; 2818 mem -= 16; 2819 return !memcmp(mem, "{[<guardedram>]}", 16); 2820 } 2821 2822 static void * 2823 dummy_malloc(size_t len) 2824 { 2825 char *mem = malloc(len+16); 2826 memcpy(mem, "{[<guardedram>]}", 16); 2827 return mem+16; 2828 } 2829 2830 static void * 2831 dummy_realloc(void *mem_, size_t len) 2832 { 2833 char *mem = mem_; 2834 if (!mem) 2835 return dummy_malloc(len); 2836 tt_want(check_dummy_mem_ok(mem_)); 2837 mem -= 16; 2838 mem = realloc(mem, len+16); 2839 return mem+16; 2840 } 2841 2842 static void 2843 dummy_free(void *mem_) 2844 { 2845 char *mem = mem_; 2846 tt_want(check_dummy_mem_ok(mem_)); 2847 mem -= 16; 2848 free(mem); 2849 } 2850 2851 static void 2852 test_mm_functions(void *arg) 2853 { 2854 struct event_base *b = NULL; 2855 struct event_config *cfg = NULL; 2856 event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free); 2857 cfg = event_config_new(); 2858 event_config_avoid_method(cfg, "Nonesuch"); 2859 b = event_base_new_with_config(cfg); 2860 tt_assert(b); 2861 tt_assert(check_dummy_mem_ok(b)); 2862 end: 2863 if (cfg) 2864 event_config_free(cfg); 2865 if (b) 2866 event_base_free(b); 2867 } 2868 #endif 2869 2870 static void 2871 many_event_cb(evutil_socket_t fd, short event, void *arg) 2872 { 2873 int *calledp = arg; 2874 *calledp += 1; 2875 } 2876 2877 static void 2878 test_many_events(void *arg) 2879 { 2880 /* Try 70 events that should all be ready at once. This will 2881 * exercise the "resize" code on most of the backends, and will make 2882 * sure that we can get past the 64-handle limit of some windows 2883 * functions. */ 2884 #define MANY 70 2885 2886 struct basic_test_data *data = arg; 2887 struct event_base *base = data->base; 2888 int one_at_a_time = data->setup_data != NULL; 2889 evutil_socket_t sock[MANY]; 2890 struct event *ev[MANY]; 2891 int called[MANY]; 2892 int i; 2893 int loopflags = EVLOOP_NONBLOCK, evflags=0; 2894 if (one_at_a_time) { 2895 loopflags |= EVLOOP_ONCE; 2896 evflags = EV_PERSIST; 2897 } 2898 2899 memset(sock, 0xff, sizeof(sock)); 2900 memset(ev, 0, sizeof(ev)); 2901 memset(called, 0, sizeof(called)); 2902 2903 for (i = 0; i < MANY; ++i) { 2904 /* We need an event that will hit the backend, and that will 2905 * be ready immediately. "Send a datagram" is an easy 2906 * instance of that. */ 2907 sock[i] = socket(AF_INET, SOCK_DGRAM, 0); 2908 tt_assert(sock[i] >= 0); 2909 called[i] = 0; 2910 ev[i] = event_new(base, sock[i], EV_WRITE|evflags, 2911 many_event_cb, &called[i]); 2912 event_add(ev[i], NULL); 2913 if (one_at_a_time) 2914 event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); 2915 } 2916 2917 event_base_loop(base, loopflags); 2918 2919 for (i = 0; i < MANY; ++i) { 2920 if (one_at_a_time) 2921 tt_int_op(called[i], ==, MANY - i + 1); 2922 else 2923 tt_int_op(called[i], ==, 1); 2924 } 2925 2926 end: 2927 for (i = 0; i < MANY; ++i) { 2928 if (ev[i]) 2929 event_free(ev[i]); 2930 if (sock[i] >= 0) 2931 evutil_closesocket(sock[i]); 2932 } 2933 #undef MANY 2934 } 2935 2936 static void 2937 test_struct_event_size(void *arg) 2938 { 2939 tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event)); 2940 end: 2941 ; 2942 } 2943 2944 static void 2945 test_get_assignment(void *arg) 2946 { 2947 struct basic_test_data *data = arg; 2948 struct event_base *base = data->base; 2949 struct event *ev1 = NULL; 2950 const char *str = "foo"; 2951 2952 struct event_base *b; 2953 evutil_socket_t s; 2954 short what; 2955 event_callback_fn cb; 2956 void *cb_arg; 2957 2958 ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str); 2959 event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg); 2960 2961 tt_ptr_op(b, ==, base); 2962 tt_int_op(s, ==, data->pair[1]); 2963 tt_int_op(what, ==, EV_READ); 2964 tt_ptr_op(cb, ==, dummy_read_cb); 2965 tt_ptr_op(cb_arg, ==, str); 2966 2967 /* Now make sure this doesn't crash. */ 2968 event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL); 2969 2970 end: 2971 if (ev1) 2972 event_free(ev1); 2973 } 2974 2975 struct foreach_helper { 2976 int count; 2977 const struct event *ev; 2978 }; 2979 2980 static int 2981 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg) 2982 { 2983 struct foreach_helper *h = event_get_callback_arg(ev); 2984 struct timeval *tv = arg; 2985 if (event_get_callback(ev) != timeout_cb) 2986 return 0; 2987 tt_ptr_op(event_get_base(ev), ==, base); 2988 tt_int_op(tv->tv_sec, ==, 10); 2989 h->ev = ev; 2990 h->count++; 2991 return 0; 2992 end: 2993 return -1; 2994 } 2995 2996 static int 2997 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg) 2998 { 2999 const struct event **ev_out = arg; 3000 struct foreach_helper *h = event_get_callback_arg(ev); 3001 if (event_get_callback(ev) != timeout_cb) 3002 return 0; 3003 if (h->count == 99) { 3004 *ev_out = ev; 3005 return 101; 3006 } 3007 return 0; 3008 } 3009 3010 static void 3011 test_event_foreach(void *arg) 3012 { 3013 struct basic_test_data *data = arg; 3014 struct event_base *base = data->base; 3015 struct event *ev[5]; 3016 struct foreach_helper visited[5]; 3017 int i; 3018 struct timeval ten_sec = {10,0}; 3019 const struct event *ev_found = NULL; 3020 3021 for (i = 0; i < 5; ++i) { 3022 visited[i].count = 0; 3023 visited[i].ev = NULL; 3024 ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]); 3025 } 3026 3027 tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL)); 3028 tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL)); 3029 3030 event_add(ev[0], &ten_sec); 3031 event_add(ev[1], &ten_sec); 3032 event_active(ev[1], EV_TIMEOUT, 1); 3033 event_active(ev[2], EV_TIMEOUT, 1); 3034 event_add(ev[3], &ten_sec); 3035 /* Don't touch ev[4]. */ 3036 3037 tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb, 3038 &ten_sec)); 3039 tt_int_op(1, ==, visited[0].count); 3040 tt_int_op(1, ==, visited[1].count); 3041 tt_int_op(1, ==, visited[2].count); 3042 tt_int_op(1, ==, visited[3].count); 3043 tt_ptr_op(ev[0], ==, visited[0].ev); 3044 tt_ptr_op(ev[1], ==, visited[1].ev); 3045 tt_ptr_op(ev[2], ==, visited[2].ev); 3046 tt_ptr_op(ev[3], ==, visited[3].ev); 3047 3048 visited[2].count = 99; 3049 tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb, 3050 &ev_found)); 3051 tt_ptr_op(ev_found, ==, ev[2]); 3052 3053 end: 3054 for (i=0; i<5; ++i) { 3055 event_free(ev[i]); 3056 } 3057 } 3058 3059 static struct event_base *cached_time_base = NULL; 3060 static int cached_time_reset = 0; 3061 static int cached_time_sleep = 0; 3062 static void 3063 cache_time_cb(evutil_socket_t fd, short what, void *arg) 3064 { 3065 struct timeval *tv = arg; 3066 tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv)); 3067 if (cached_time_sleep) { 3068 struct timeval delay = { 0, 30*1000 }; 3069 evutil_usleep_(&delay); 3070 } 3071 if (cached_time_reset) { 3072 event_base_update_cache_time(cached_time_base); 3073 } 3074 end: 3075 ; 3076 } 3077 3078 static void 3079 test_gettimeofday_cached(void *arg) 3080 { 3081 struct basic_test_data *data = arg; 3082 struct event_config *cfg = NULL; 3083 struct event_base *base = NULL; 3084 struct timeval tv1, tv2, tv3, now; 3085 struct event *ev1=NULL, *ev2=NULL, *ev3=NULL; 3086 int cached_time_disable = strstr(data->setup_data, "disable") != NULL; 3087 3088 cfg = event_config_new(); 3089 if (cached_time_disable) { 3090 event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME); 3091 } 3092 cached_time_base = base = event_base_new_with_config(cfg); 3093 tt_assert(base); 3094 3095 /* Try gettimeofday_cached outside of an event loop. */ 3096 evutil_gettimeofday(&now, NULL); 3097 tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1)); 3098 tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2)); 3099 tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10); 3100 tt_int_op(timeval_msec_diff(&tv1, &now), <, 10); 3101 3102 cached_time_reset = strstr(data->setup_data, "reset") != NULL; 3103 cached_time_sleep = strstr(data->setup_data, "sleep") != NULL; 3104 3105 ev1 = event_new(base, -1, 0, cache_time_cb, &tv1); 3106 ev2 = event_new(base, -1, 0, cache_time_cb, &tv2); 3107 ev3 = event_new(base, -1, 0, cache_time_cb, &tv3); 3108 3109 event_active(ev1, EV_TIMEOUT, 1); 3110 event_active(ev2, EV_TIMEOUT, 1); 3111 event_active(ev3, EV_TIMEOUT, 1); 3112 3113 event_base_dispatch(base); 3114 3115 if (cached_time_reset && cached_time_sleep) { 3116 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3117 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3118 } else if (cached_time_disable && cached_time_sleep) { 3119 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3120 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3121 } else if (! cached_time_disable) { 3122 tt_assert(evutil_timercmp(&tv1, &tv2, ==)); 3123 tt_assert(evutil_timercmp(&tv2, &tv3, ==)); 3124 } 3125 3126 end: 3127 if (ev1) 3128 event_free(ev1); 3129 if (ev2) 3130 event_free(ev2); 3131 if (ev3) 3132 event_free(ev3); 3133 if (base) 3134 event_base_free(base); 3135 if (cfg) 3136 event_config_free(cfg); 3137 } 3138 3139 static void 3140 tabf_cb(evutil_socket_t fd, short what, void *arg) 3141 { 3142 int *ptr = arg; 3143 *ptr = what; 3144 *ptr += 0x10000; 3145 } 3146 3147 static void 3148 test_active_by_fd(void *arg) 3149 { 3150 struct basic_test_data *data = arg; 3151 struct event_base *base = data->base; 3152 struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL; 3153 int e1,e2,e3,e4; 3154 #ifndef _WIN32 3155 struct event *evsig = NULL; 3156 int es; 3157 #endif 3158 struct timeval tenmin = { 600, 0 }; 3159 3160 /* Ensure no crash on nonexistent FD. */ 3161 event_base_active_by_fd(base, 1000, EV_READ); 3162 3163 /* Ensure no crash on bogus FD. */ 3164 event_base_active_by_fd(base, -1, EV_READ); 3165 3166 /* Ensure no crash on nonexistent/bogus signal. */ 3167 event_base_active_by_signal(base, 1000); 3168 event_base_active_by_signal(base, -1); 3169 3170 event_base_assert_ok_(base); 3171 3172 e1 = e2 = e3 = e4 = 0; 3173 ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1); 3174 ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2); 3175 ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3); 3176 ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4); 3177 tt_assert(ev1); 3178 tt_assert(ev2); 3179 tt_assert(ev3); 3180 tt_assert(ev4); 3181 #ifndef _WIN32 3182 evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es); 3183 tt_assert(evsig); 3184 event_add(evsig, &tenmin); 3185 #endif 3186 3187 event_add(ev1, &tenmin); 3188 event_add(ev2, NULL); 3189 event_add(ev3, NULL); 3190 event_add(ev4, &tenmin); 3191 3192 3193 event_base_assert_ok_(base); 3194 3195 /* Trigger 2, 3, 4 */ 3196 event_base_active_by_fd(base, data->pair[0], EV_WRITE); 3197 event_base_active_by_fd(base, data->pair[1], EV_READ); 3198 #ifndef _WIN32 3199 event_base_active_by_signal(base, SIGHUP); 3200 #endif 3201 3202 event_base_assert_ok_(base); 3203 3204 event_base_loop(base, EVLOOP_ONCE); 3205 3206 tt_int_op(e1, ==, 0); 3207 tt_int_op(e2, ==, EV_WRITE | 0x10000); 3208 tt_int_op(e3, ==, EV_READ | 0x10000); 3209 /* Mask out EV_WRITE here, since it could be genuinely writeable. */ 3210 tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000); 3211 #ifndef _WIN32 3212 tt_int_op(es, ==, EV_SIGNAL | 0x10000); 3213 #endif 3214 3215 end: 3216 if (ev1) 3217 event_free(ev1); 3218 if (ev2) 3219 event_free(ev2); 3220 if (ev3) 3221 event_free(ev3); 3222 if (ev4) 3223 event_free(ev4); 3224 #ifndef _WIN32 3225 if (evsig) 3226 event_free(evsig); 3227 #endif 3228 } 3229 3230 struct testcase_t main_testcases[] = { 3231 /* Some converted-over tests */ 3232 { "methods", test_methods, TT_FORK, NULL, NULL }, 3233 { "version", test_version, 0, NULL, NULL }, 3234 BASIC(base_features, TT_FORK|TT_NO_LOGS), 3235 { "base_environ", test_base_environ, TT_FORK, NULL, NULL }, 3236 3237 BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR), 3238 BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR), 3239 3240 BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE), 3241 BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE), 3242 BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE), 3243 BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE), 3244 BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE), 3245 3246 BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3247 BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3248 BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3249 BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3250 3251 /* These are still using the old API */ 3252 LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE), 3253 { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3254 { "persistent_active_timeout", test_persistent_active_timeout, 3255 TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3256 LEGACY(priorities, TT_FORK|TT_NEED_BASE), 3257 BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE), 3258 { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE, 3259 &basic_setup, NULL }, 3260 3261 /* These legacy tests may not all need all of these flags. */ 3262 LEGACY(simpleread, TT_ISOLATED), 3263 LEGACY(simpleread_multiple, TT_ISOLATED), 3264 LEGACY(simplewrite, TT_ISOLATED), 3265 { "simpleclose", test_simpleclose, TT_FORK, &basic_setup, 3266 NULL }, 3267 LEGACY(multiple, TT_ISOLATED), 3268 LEGACY(persistent, TT_ISOLATED), 3269 LEGACY(combined, TT_ISOLATED), 3270 LEGACY(simpletimeout, TT_ISOLATED), 3271 LEGACY(loopbreak, TT_ISOLATED), 3272 LEGACY(loopexit, TT_ISOLATED), 3273 LEGACY(loopexit_multiple, TT_ISOLATED), 3274 LEGACY(nonpersist_readd, TT_ISOLATED), 3275 LEGACY(multiple_events_for_same_fd, TT_ISOLATED), 3276 LEGACY(want_only_once, TT_ISOLATED), 3277 { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL }, 3278 { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL }, 3279 { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup, 3280 NULL }, 3281 #ifndef _WIN32 3282 { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL }, 3283 #endif 3284 { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL }, 3285 { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL }, 3286 { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 }, 3287 3288 { "struct_event_size", test_struct_event_size, 0, NULL, NULL }, 3289 BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3290 3291 BASIC(event_foreach, TT_FORK|TT_NEED_BASE), 3292 { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" }, 3293 { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" }, 3294 { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" }, 3295 { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" }, 3296 { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" }, 3297 3298 BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3299 3300 #ifndef _WIN32 3301 LEGACY(fork, TT_ISOLATED), 3302 #endif 3303 END_OF_TESTCASES 3304 }; 3305 3306 struct testcase_t evtag_testcases[] = { 3307 { "int", evtag_int_test, TT_FORK, NULL, NULL }, 3308 { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL }, 3309 { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL }, 3310 { "peek", evtag_test_peek, 0, NULL, NULL }, 3311 3312 END_OF_TESTCASES 3313 }; 3314 3315 struct testcase_t signal_testcases[] = { 3316 #ifndef _WIN32 3317 LEGACY(simplestsignal, TT_ISOLATED), 3318 LEGACY(simplesignal, TT_ISOLATED), 3319 LEGACY(multiplesignal, TT_ISOLATED), 3320 LEGACY(immediatesignal, TT_ISOLATED), 3321 LEGACY(signal_dealloc, TT_ISOLATED), 3322 LEGACY(signal_pipeloss, TT_ISOLATED), 3323 LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS), 3324 LEGACY(signal_restore, TT_ISOLATED), 3325 LEGACY(signal_assert, TT_ISOLATED), 3326 LEGACY(signal_while_processing, TT_ISOLATED), 3327 #endif 3328 END_OF_TESTCASES 3329 }; 3330 3331