1 /* 2 * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include "event2/event-config.h" 28 #include "evconfig-private.h" 29 30 #include <sys/types.h> 31 32 #ifdef _WIN32 33 #ifndef _WIN32_WINNT 34 /* Minimum required for InitializeCriticalSectionAndSpinCount */ 35 #define _WIN32_WINNT 0x0403 36 #endif 37 #include <winsock2.h> 38 #include <winerror.h> 39 #include <ws2tcpip.h> 40 #include <mswsock.h> 41 #endif 42 #include <errno.h> 43 #ifdef EVENT__HAVE_SYS_SOCKET_H 44 #include <sys/socket.h> 45 #endif 46 #ifdef EVENT__HAVE_FCNTL_H 47 #include <fcntl.h> 48 #endif 49 #ifdef EVENT__HAVE_UNISTD_H 50 #include <unistd.h> 51 #endif 52 53 #include "event2/listener.h" 54 #include "event2/util.h" 55 #include "event2/event.h" 56 #include "event2/event_struct.h" 57 #include "mm-internal.h" 58 #include "util-internal.h" 59 #include "log-internal.h" 60 #include "evthread-internal.h" 61 #ifdef _WIN32 62 #include "iocp-internal.h" 63 #include "defer-internal.h" 64 #include "event-internal.h" 65 #endif 66 67 struct evconnlistener_ops { 68 int (*enable)(struct evconnlistener *); 69 int (*disable)(struct evconnlistener *); 70 void (*destroy)(struct evconnlistener *); 71 void (*shutdown)(struct evconnlistener *); 72 evutil_socket_t (*getfd)(struct evconnlistener *); 73 struct event_base *(*getbase)(struct evconnlistener *); 74 }; 75 76 struct evconnlistener { 77 const struct evconnlistener_ops *ops; 78 void *lock; 79 evconnlistener_cb cb; 80 evconnlistener_errorcb errorcb; 81 void *user_data; 82 unsigned flags; 83 short refcnt; 84 int accept4_flags; 85 unsigned enabled : 1; 86 }; 87 88 struct evconnlistener_event { 89 struct evconnlistener base; 90 struct event listener; 91 }; 92 93 #ifdef _WIN32 94 struct evconnlistener_iocp { 95 struct evconnlistener base; 96 evutil_socket_t fd; 97 struct event_base *event_base; 98 struct event_iocp_port *port; 99 short n_accepting; 100 unsigned shutting_down : 1; 101 unsigned event_added : 1; 102 struct accepting_socket **accepting; 103 }; 104 #endif 105 106 #define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0) 107 #define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0) 108 109 struct evconnlistener * 110 evconnlistener_new_async(struct event_base *base, 111 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 112 evutil_socket_t fd); /* XXXX export this? */ 113 114 static int event_listener_enable(struct evconnlistener *); 115 static int event_listener_disable(struct evconnlistener *); 116 static void event_listener_destroy(struct evconnlistener *); 117 static evutil_socket_t event_listener_getfd(struct evconnlistener *); 118 static struct event_base *event_listener_getbase(struct evconnlistener *); 119 120 #if 0 121 static void 122 listener_incref_and_lock(struct evconnlistener *listener) 123 { 124 LOCK(listener); 125 ++listener->refcnt; 126 } 127 #endif 128 129 static int 130 listener_decref_and_unlock(struct evconnlistener *listener) 131 { 132 int refcnt = --listener->refcnt; 133 if (refcnt == 0) { 134 listener->ops->destroy(listener); 135 UNLOCK(listener); 136 EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 137 mm_free(listener); 138 return 1; 139 } else { 140 UNLOCK(listener); 141 return 0; 142 } 143 } 144 145 static const struct evconnlistener_ops evconnlistener_event_ops = { 146 event_listener_enable, 147 event_listener_disable, 148 event_listener_destroy, 149 NULL, /* shutdown */ 150 event_listener_getfd, 151 event_listener_getbase 152 }; 153 154 static void listener_read_cb(evutil_socket_t, short, void *); 155 156 struct evconnlistener * 157 evconnlistener_new(struct event_base *base, 158 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 159 evutil_socket_t fd) 160 { 161 struct evconnlistener_event *lev; 162 163 #ifdef _WIN32 164 if (base && event_base_get_iocp_(base)) { 165 const struct win32_extension_fns *ext = 166 event_get_win32_extension_fns_(); 167 if (ext->AcceptEx && ext->GetAcceptExSockaddrs) 168 return evconnlistener_new_async(base, cb, ptr, flags, 169 backlog, fd); 170 } 171 #endif 172 173 if (backlog > 0) { 174 if (listen(fd, backlog) < 0) 175 return NULL; 176 } else if (backlog < 0) { 177 if (listen(fd, 128) < 0) 178 return NULL; 179 } 180 181 lev = mm_calloc(1, sizeof(struct evconnlistener_event)); 182 if (!lev) 183 return NULL; 184 185 lev->base.ops = &evconnlistener_event_ops; 186 lev->base.cb = cb; 187 lev->base.user_data = ptr; 188 lev->base.flags = flags; 189 lev->base.refcnt = 1; 190 191 lev->base.accept4_flags = 0; 192 if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) 193 lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK; 194 if (flags & LEV_OPT_CLOSE_ON_EXEC) 195 lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC; 196 197 if (flags & LEV_OPT_THREADSAFE) { 198 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 199 } 200 201 event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST, 202 listener_read_cb, lev); 203 204 if (!(flags & LEV_OPT_DISABLED)) 205 evconnlistener_enable(&lev->base); 206 207 return &lev->base; 208 } 209 210 struct evconnlistener * 211 evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb, 212 void *ptr, unsigned flags, int backlog, const struct sockaddr *sa, 213 int socklen) 214 { 215 struct evconnlistener *listener; 216 evutil_socket_t fd; 217 int on = 1; 218 int family = sa ? sa->sa_family : AF_UNSPEC; 219 int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK; 220 221 if (backlog == 0) 222 return NULL; 223 224 if (flags & LEV_OPT_CLOSE_ON_EXEC) 225 socktype |= EVUTIL_SOCK_CLOEXEC; 226 227 fd = evutil_socket_(family, socktype, 0); 228 if (fd == -1) 229 return NULL; 230 231 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0) 232 goto err; 233 234 if (flags & LEV_OPT_REUSEABLE) { 235 if (evutil_make_listen_socket_reuseable(fd) < 0) 236 goto err; 237 } 238 239 if (flags & LEV_OPT_REUSEABLE_PORT) { 240 if (evutil_make_listen_socket_reuseable_port(fd) < 0) 241 goto err; 242 } 243 244 if (flags & LEV_OPT_DEFERRED_ACCEPT) { 245 if (evutil_make_tcp_listen_socket_deferred(fd) < 0) 246 goto err; 247 } 248 249 if (flags & LEV_OPT_BIND_IPV6ONLY) { 250 if (evutil_make_listen_socket_ipv6only(fd) < 0) 251 goto err; 252 } 253 254 if (sa) { 255 if (bind(fd, sa, socklen)<0) 256 goto err; 257 } 258 259 listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd); 260 if (!listener) 261 goto err; 262 263 return listener; 264 err: 265 evutil_closesocket(fd); 266 return NULL; 267 } 268 269 void 270 evconnlistener_free(struct evconnlistener *lev) 271 { 272 LOCK(lev); 273 lev->cb = NULL; 274 lev->errorcb = NULL; 275 if (lev->ops->shutdown) 276 lev->ops->shutdown(lev); 277 listener_decref_and_unlock(lev); 278 } 279 280 static void 281 event_listener_destroy(struct evconnlistener *lev) 282 { 283 struct evconnlistener_event *lev_e = 284 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 285 286 event_del(&lev_e->listener); 287 if (lev->flags & LEV_OPT_CLOSE_ON_FREE) 288 evutil_closesocket(event_get_fd(&lev_e->listener)); 289 event_debug_unassign(&lev_e->listener); 290 } 291 292 int 293 evconnlistener_enable(struct evconnlistener *lev) 294 { 295 int r; 296 LOCK(lev); 297 lev->enabled = 1; 298 if (lev->cb) 299 r = lev->ops->enable(lev); 300 else 301 r = 0; 302 UNLOCK(lev); 303 return r; 304 } 305 306 int 307 evconnlistener_disable(struct evconnlistener *lev) 308 { 309 int r; 310 LOCK(lev); 311 lev->enabled = 0; 312 r = lev->ops->disable(lev); 313 UNLOCK(lev); 314 return r; 315 } 316 317 static int 318 event_listener_enable(struct evconnlistener *lev) 319 { 320 struct evconnlistener_event *lev_e = 321 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 322 return event_add(&lev_e->listener, NULL); 323 } 324 325 static int 326 event_listener_disable(struct evconnlistener *lev) 327 { 328 struct evconnlistener_event *lev_e = 329 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 330 return event_del(&lev_e->listener); 331 } 332 333 evutil_socket_t 334 evconnlistener_get_fd(struct evconnlistener *lev) 335 { 336 evutil_socket_t fd; 337 LOCK(lev); 338 fd = lev->ops->getfd(lev); 339 UNLOCK(lev); 340 return fd; 341 } 342 343 static evutil_socket_t 344 event_listener_getfd(struct evconnlistener *lev) 345 { 346 struct evconnlistener_event *lev_e = 347 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 348 return event_get_fd(&lev_e->listener); 349 } 350 351 struct event_base * 352 evconnlistener_get_base(struct evconnlistener *lev) 353 { 354 struct event_base *base; 355 LOCK(lev); 356 base = lev->ops->getbase(lev); 357 UNLOCK(lev); 358 return base; 359 } 360 361 static struct event_base * 362 event_listener_getbase(struct evconnlistener *lev) 363 { 364 struct evconnlistener_event *lev_e = 365 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 366 return event_get_base(&lev_e->listener); 367 } 368 369 void 370 evconnlistener_set_cb(struct evconnlistener *lev, 371 evconnlistener_cb cb, void *arg) 372 { 373 int enable = 0; 374 LOCK(lev); 375 if (lev->enabled && !lev->cb) 376 enable = 1; 377 lev->cb = cb; 378 lev->user_data = arg; 379 if (enable) 380 evconnlistener_enable(lev); 381 UNLOCK(lev); 382 } 383 384 void 385 evconnlistener_set_error_cb(struct evconnlistener *lev, 386 evconnlistener_errorcb errorcb) 387 { 388 LOCK(lev); 389 lev->errorcb = errorcb; 390 UNLOCK(lev); 391 } 392 393 static void 394 listener_read_cb(evutil_socket_t fd, short what, void *p) 395 { 396 struct evconnlistener *lev = p; 397 int err; 398 evconnlistener_cb cb; 399 evconnlistener_errorcb errorcb; 400 void *user_data; 401 LOCK(lev); 402 while (1) { 403 struct sockaddr_storage ss; 404 ev_socklen_t socklen = sizeof(ss); 405 evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags); 406 if (new_fd < 0) 407 break; 408 if (socklen == 0) { 409 /* This can happen with some older linux kernels in 410 * response to nmap. */ 411 evutil_closesocket(new_fd); 412 continue; 413 } 414 415 if (lev->cb == NULL) { 416 evutil_closesocket(new_fd); 417 UNLOCK(lev); 418 return; 419 } 420 ++lev->refcnt; 421 cb = lev->cb; 422 user_data = lev->user_data; 423 UNLOCK(lev); 424 cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen, 425 user_data); 426 LOCK(lev); 427 if (lev->refcnt == 1) { 428 int freed = listener_decref_and_unlock(lev); 429 EVUTIL_ASSERT(freed); 430 return; 431 } 432 --lev->refcnt; 433 if (!lev->enabled) { 434 /* the callback could have disabled the listener */ 435 UNLOCK(lev); 436 return; 437 } 438 } 439 err = evutil_socket_geterror(fd); 440 if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) { 441 UNLOCK(lev); 442 return; 443 } 444 if (lev->errorcb != NULL) { 445 ++lev->refcnt; 446 errorcb = lev->errorcb; 447 user_data = lev->user_data; 448 UNLOCK(lev); 449 errorcb(lev, user_data); 450 LOCK(lev); 451 listener_decref_and_unlock(lev); 452 } else { 453 event_sock_warn(fd, "Error from accept() call"); 454 UNLOCK(lev); 455 } 456 } 457 458 #ifdef _WIN32 459 struct accepting_socket { 460 CRITICAL_SECTION lock; 461 struct event_overlapped overlapped; 462 SOCKET s; 463 int error; 464 struct event_callback deferred; 465 struct evconnlistener_iocp *lev; 466 ev_uint8_t buflen; 467 ev_uint8_t family; 468 unsigned free_on_cb:1; 469 char addrbuf[1]; 470 }; 471 472 static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, 473 ev_ssize_t n, int ok); 474 static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg); 475 476 static void 477 iocp_listener_event_add(struct evconnlistener_iocp *lev) 478 { 479 if (lev->event_added) 480 return; 481 482 lev->event_added = 1; 483 event_base_add_virtual_(lev->event_base); 484 } 485 486 static void 487 iocp_listener_event_del(struct evconnlistener_iocp *lev) 488 { 489 if (!lev->event_added) 490 return; 491 492 lev->event_added = 0; 493 event_base_del_virtual_(lev->event_base); 494 } 495 496 static struct accepting_socket * 497 new_accepting_socket(struct evconnlistener_iocp *lev, int family) 498 { 499 struct accepting_socket *res; 500 int addrlen; 501 int buflen; 502 503 if (family == AF_INET) 504 addrlen = sizeof(struct sockaddr_in); 505 else if (family == AF_INET6) 506 addrlen = sizeof(struct sockaddr_in6); 507 else 508 return NULL; 509 buflen = (addrlen+16)*2; 510 511 res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen); 512 if (!res) 513 return NULL; 514 515 event_overlapped_init_(&res->overlapped, accepted_socket_cb); 516 res->s = EVUTIL_INVALID_SOCKET; 517 res->lev = lev; 518 res->buflen = buflen; 519 res->family = family; 520 521 event_deferred_cb_init_(&res->deferred, 522 event_base_get_npriorities(lev->event_base) / 2, 523 accepted_socket_invoke_user_cb, res); 524 525 InitializeCriticalSectionAndSpinCount(&res->lock, 1000); 526 527 return res; 528 } 529 530 static void 531 free_and_unlock_accepting_socket(struct accepting_socket *as) 532 { 533 /* requires lock. */ 534 if (as->s != EVUTIL_INVALID_SOCKET) 535 closesocket(as->s); 536 537 LeaveCriticalSection(&as->lock); 538 DeleteCriticalSection(&as->lock); 539 mm_free(as); 540 } 541 542 static int 543 start_accepting(struct accepting_socket *as) 544 { 545 /* requires lock */ 546 const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); 547 DWORD pending = 0; 548 SOCKET s = socket(as->family, SOCK_STREAM, 0); 549 int error = 0; 550 551 if (!as->lev->base.enabled) 552 return 0; 553 554 if (s == EVUTIL_INVALID_SOCKET) { 555 error = WSAGetLastError(); 556 goto report_err; 557 } 558 559 /* XXXX It turns out we need to do this again later. Does this call 560 * have any effect? */ 561 setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, 562 (char *)&as->lev->fd, sizeof(&as->lev->fd)); 563 564 if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) 565 evutil_make_socket_nonblocking(s); 566 567 if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) { 568 closesocket(s); 569 return -1; 570 } 571 572 as->s = s; 573 574 if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0, 575 as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped)) 576 { 577 /* Immediate success! */ 578 accepted_socket_cb(&as->overlapped, 1, 0, 1); 579 } else { 580 error = WSAGetLastError(); 581 if (error != ERROR_IO_PENDING) { 582 goto report_err; 583 } 584 } 585 586 return 0; 587 588 report_err: 589 as->error = error; 590 event_deferred_cb_schedule_( 591 as->lev->event_base, 592 &as->deferred); 593 return 0; 594 } 595 596 static void 597 stop_accepting(struct accepting_socket *as) 598 { 599 /* requires lock. */ 600 SOCKET s = as->s; 601 as->s = EVUTIL_INVALID_SOCKET; 602 closesocket(s); 603 } 604 605 static void 606 accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg) 607 { 608 struct accepting_socket *as = arg; 609 610 struct sockaddr *sa_local=NULL, *sa_remote=NULL; 611 int socklen_local=0, socklen_remote=0; 612 const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); 613 struct evconnlistener *lev = &as->lev->base; 614 evutil_socket_t sock=-1; 615 void *data; 616 evconnlistener_cb cb=NULL; 617 evconnlistener_errorcb errorcb=NULL; 618 int error; 619 620 EVUTIL_ASSERT(ext->GetAcceptExSockaddrs); 621 622 LOCK(lev); 623 EnterCriticalSection(&as->lock); 624 if (as->free_on_cb) { 625 free_and_unlock_accepting_socket(as); 626 listener_decref_and_unlock(lev); 627 return; 628 } 629 630 ++lev->refcnt; 631 632 error = as->error; 633 if (error) { 634 as->error = 0; 635 errorcb = lev->errorcb; 636 } else { 637 ext->GetAcceptExSockaddrs( 638 as->addrbuf, 0, as->buflen/2, as->buflen/2, 639 &sa_local, &socklen_local, &sa_remote, 640 &socklen_remote); 641 sock = as->s; 642 cb = lev->cb; 643 as->s = EVUTIL_INVALID_SOCKET; 644 645 /* We need to call this so getsockname, getpeername, and 646 * shutdown work correctly on the accepted socket. */ 647 /* XXXX handle error? */ 648 setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, 649 (char *)&as->lev->fd, sizeof(&as->lev->fd)); 650 } 651 data = lev->user_data; 652 653 LeaveCriticalSection(&as->lock); 654 UNLOCK(lev); 655 656 if (errorcb) { 657 WSASetLastError(error); 658 errorcb(lev, data); 659 } else if (cb) { 660 cb(lev, sock, sa_remote, socklen_remote, data); 661 } 662 663 LOCK(lev); 664 if (listener_decref_and_unlock(lev)) 665 return; 666 667 EnterCriticalSection(&as->lock); 668 start_accepting(as); 669 LeaveCriticalSection(&as->lock); 670 } 671 672 static void 673 accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok) 674 { 675 struct accepting_socket *as = 676 EVUTIL_UPCAST(o, struct accepting_socket, overlapped); 677 678 LOCK(&as->lev->base); 679 EnterCriticalSection(&as->lock); 680 if (ok) { 681 /* XXXX Don't do this if some EV_MT flag is set. */ 682 event_deferred_cb_schedule_( 683 as->lev->event_base, 684 &as->deferred); 685 LeaveCriticalSection(&as->lock); 686 } else if (as->free_on_cb) { 687 struct evconnlistener *lev = &as->lev->base; 688 free_and_unlock_accepting_socket(as); 689 listener_decref_and_unlock(lev); 690 return; 691 } else if (as->s == EVUTIL_INVALID_SOCKET) { 692 /* This is okay; we were disabled by iocp_listener_disable. */ 693 LeaveCriticalSection(&as->lock); 694 } else { 695 /* Some error on accept that we couldn't actually handle. */ 696 BOOL ok; 697 DWORD transfer = 0, flags=0; 698 event_sock_warn(as->s, "Unexpected error on AcceptEx"); 699 ok = WSAGetOverlappedResult(as->s, &o->overlapped, 700 &transfer, FALSE, &flags); 701 if (ok) { 702 /* well, that was confusing! */ 703 as->error = 1; 704 } else { 705 as->error = WSAGetLastError(); 706 } 707 event_deferred_cb_schedule_( 708 as->lev->event_base, 709 &as->deferred); 710 LeaveCriticalSection(&as->lock); 711 } 712 UNLOCK(&as->lev->base); 713 } 714 715 static int 716 iocp_listener_enable(struct evconnlistener *lev) 717 { 718 int i; 719 struct evconnlistener_iocp *lev_iocp = 720 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 721 722 LOCK(lev); 723 iocp_listener_event_add(lev_iocp); 724 for (i = 0; i < lev_iocp->n_accepting; ++i) { 725 struct accepting_socket *as = lev_iocp->accepting[i]; 726 if (!as) 727 continue; 728 EnterCriticalSection(&as->lock); 729 if (!as->free_on_cb && as->s == EVUTIL_INVALID_SOCKET) 730 start_accepting(as); 731 LeaveCriticalSection(&as->lock); 732 } 733 UNLOCK(lev); 734 return 0; 735 } 736 737 static int 738 iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown) 739 { 740 int i; 741 struct evconnlistener_iocp *lev_iocp = 742 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 743 744 LOCK(lev); 745 iocp_listener_event_del(lev_iocp); 746 for (i = 0; i < lev_iocp->n_accepting; ++i) { 747 struct accepting_socket *as = lev_iocp->accepting[i]; 748 if (!as) 749 continue; 750 EnterCriticalSection(&as->lock); 751 if (!as->free_on_cb && as->s != EVUTIL_INVALID_SOCKET) { 752 if (shutdown) 753 as->free_on_cb = 1; 754 stop_accepting(as); 755 } 756 LeaveCriticalSection(&as->lock); 757 } 758 759 if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE) 760 evutil_closesocket(lev_iocp->fd); 761 762 UNLOCK(lev); 763 return 0; 764 } 765 766 static int 767 iocp_listener_disable(struct evconnlistener *lev) 768 { 769 return iocp_listener_disable_impl(lev,0); 770 } 771 772 static void 773 iocp_listener_destroy(struct evconnlistener *lev) 774 { 775 struct evconnlistener_iocp *lev_iocp = 776 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 777 778 if (! lev_iocp->shutting_down) { 779 lev_iocp->shutting_down = 1; 780 iocp_listener_disable_impl(lev,1); 781 } 782 783 } 784 785 static evutil_socket_t 786 iocp_listener_getfd(struct evconnlistener *lev) 787 { 788 struct evconnlistener_iocp *lev_iocp = 789 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 790 return lev_iocp->fd; 791 } 792 static struct event_base * 793 iocp_listener_getbase(struct evconnlistener *lev) 794 { 795 struct evconnlistener_iocp *lev_iocp = 796 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 797 return lev_iocp->event_base; 798 } 799 800 static const struct evconnlistener_ops evconnlistener_iocp_ops = { 801 iocp_listener_enable, 802 iocp_listener_disable, 803 iocp_listener_destroy, 804 iocp_listener_destroy, /* shutdown */ 805 iocp_listener_getfd, 806 iocp_listener_getbase 807 }; 808 809 /* XXX define some way to override this. */ 810 #define N_SOCKETS_PER_LISTENER 4 811 812 struct evconnlistener * 813 evconnlistener_new_async(struct event_base *base, 814 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 815 evutil_socket_t fd) 816 { 817 struct sockaddr_storage ss; 818 int socklen = sizeof(ss); 819 struct evconnlistener_iocp *lev; 820 int i; 821 822 flags |= LEV_OPT_THREADSAFE; 823 824 if (!base || !event_base_get_iocp_(base)) 825 goto err; 826 827 /* XXXX duplicate code */ 828 if (backlog > 0) { 829 if (listen(fd, backlog) < 0) 830 goto err; 831 } else if (backlog < 0) { 832 if (listen(fd, 128) < 0) 833 goto err; 834 } 835 if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) { 836 event_sock_warn(fd, "getsockname"); 837 goto err; 838 } 839 lev = mm_calloc(1, sizeof(struct evconnlistener_iocp)); 840 if (!lev) { 841 event_warn("calloc"); 842 goto err; 843 } 844 lev->base.ops = &evconnlistener_iocp_ops; 845 lev->base.cb = cb; 846 lev->base.user_data = ptr; 847 lev->base.flags = flags; 848 lev->base.refcnt = 1; 849 lev->base.enabled = 1; 850 851 lev->port = event_base_get_iocp_(base); 852 lev->fd = fd; 853 lev->event_base = base; 854 855 856 if (event_iocp_port_associate_(lev->port, fd, 1) < 0) 857 goto err_free_lev; 858 859 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 860 861 lev->n_accepting = N_SOCKETS_PER_LISTENER; 862 lev->accepting = mm_calloc(lev->n_accepting, 863 sizeof(struct accepting_socket *)); 864 if (!lev->accepting) { 865 event_warn("calloc"); 866 goto err_delete_lock; 867 } 868 for (i = 0; i < lev->n_accepting; ++i) { 869 lev->accepting[i] = new_accepting_socket(lev, ss.ss_family); 870 if (!lev->accepting[i]) { 871 event_warnx("Couldn't create accepting socket"); 872 goto err_free_accepting; 873 } 874 if (cb && start_accepting(lev->accepting[i]) < 0) { 875 event_warnx("Couldn't start accepting on socket"); 876 EnterCriticalSection(&lev->accepting[i]->lock); 877 free_and_unlock_accepting_socket(lev->accepting[i]); 878 goto err_free_accepting; 879 } 880 ++lev->base.refcnt; 881 } 882 883 iocp_listener_event_add(lev); 884 885 return &lev->base; 886 887 err_free_accepting: 888 mm_free(lev->accepting); 889 /* XXXX free the other elements. */ 890 err_delete_lock: 891 EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 892 err_free_lev: 893 mm_free(lev); 894 err: 895 /* Don't close the fd, it is caller's responsibility. */ 896 return NULL; 897 } 898 899 #endif 900