1 /* 2 * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include "event2/event-config.h" 28 #include "evconfig-private.h" 29 30 #include <sys/types.h> 31 32 #ifdef _WIN32 33 #ifndef _WIN32_WINNT 34 /* Minimum required for InitializeCriticalSectionAndSpinCount */ 35 #define _WIN32_WINNT 0x0403 36 #endif 37 #include <winsock2.h> 38 #include <ws2tcpip.h> 39 #include <mswsock.h> 40 #endif 41 #include <errno.h> 42 #ifdef EVENT__HAVE_SYS_SOCKET_H 43 #include <sys/socket.h> 44 #endif 45 #ifdef EVENT__HAVE_FCNTL_H 46 #include <fcntl.h> 47 #endif 48 #ifdef EVENT__HAVE_UNISTD_H 49 #include <unistd.h> 50 #endif 51 52 #include "event2/listener.h" 53 #include "event2/util.h" 54 #include "event2/event.h" 55 #include "event2/event_struct.h" 56 #include "mm-internal.h" 57 #include "util-internal.h" 58 #include "log-internal.h" 59 #include "evthread-internal.h" 60 #ifdef _WIN32 61 #include "iocp-internal.h" 62 #include "defer-internal.h" 63 #include "event-internal.h" 64 #endif 65 66 struct evconnlistener_ops { 67 int (*enable)(struct evconnlistener *); 68 int (*disable)(struct evconnlistener *); 69 void (*destroy)(struct evconnlistener *); 70 void (*shutdown)(struct evconnlistener *); 71 evutil_socket_t (*getfd)(struct evconnlistener *); 72 struct event_base *(*getbase)(struct evconnlistener *); 73 }; 74 75 struct evconnlistener { 76 const struct evconnlistener_ops *ops; 77 void *lock; 78 evconnlistener_cb cb; 79 evconnlistener_errorcb errorcb; 80 void *user_data; 81 unsigned flags; 82 short refcnt; 83 int accept4_flags; 84 unsigned enabled : 1; 85 }; 86 87 struct evconnlistener_event { 88 struct evconnlistener base; 89 struct event listener; 90 }; 91 92 #ifdef _WIN32 93 struct evconnlistener_iocp { 94 struct evconnlistener base; 95 evutil_socket_t fd; 96 struct event_base *event_base; 97 struct event_iocp_port *port; 98 short n_accepting; 99 unsigned shutting_down : 1; 100 unsigned event_added : 1; 101 struct accepting_socket **accepting; 102 }; 103 #endif 104 105 #define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0) 106 #define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0) 107 108 struct evconnlistener * 109 evconnlistener_new_async(struct event_base *base, 110 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 111 evutil_socket_t fd); /* XXXX export this? */ 112 113 static int event_listener_enable(struct evconnlistener *); 114 static int event_listener_disable(struct evconnlistener *); 115 static void event_listener_destroy(struct evconnlistener *); 116 static evutil_socket_t event_listener_getfd(struct evconnlistener *); 117 static struct event_base *event_listener_getbase(struct evconnlistener *); 118 119 #if 0 120 static void 121 listener_incref_and_lock(struct evconnlistener *listener) 122 { 123 LOCK(listener); 124 ++listener->refcnt; 125 } 126 #endif 127 128 static int 129 listener_decref_and_unlock(struct evconnlistener *listener) 130 { 131 int refcnt = --listener->refcnt; 132 if (refcnt == 0) { 133 listener->ops->destroy(listener); 134 UNLOCK(listener); 135 EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 136 mm_free(listener); 137 return 1; 138 } else { 139 UNLOCK(listener); 140 return 0; 141 } 142 } 143 144 static const struct evconnlistener_ops evconnlistener_event_ops = { 145 event_listener_enable, 146 event_listener_disable, 147 event_listener_destroy, 148 NULL, /* shutdown */ 149 event_listener_getfd, 150 event_listener_getbase 151 }; 152 153 static void listener_read_cb(evutil_socket_t, short, void *); 154 155 struct evconnlistener * 156 evconnlistener_new(struct event_base *base, 157 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 158 evutil_socket_t fd) 159 { 160 struct evconnlistener_event *lev; 161 162 #ifdef _WIN32 163 if (base && event_base_get_iocp_(base)) { 164 const struct win32_extension_fns *ext = 165 event_get_win32_extension_fns_(); 166 if (ext->AcceptEx && ext->GetAcceptExSockaddrs) 167 return evconnlistener_new_async(base, cb, ptr, flags, 168 backlog, fd); 169 } 170 #endif 171 172 if (backlog > 0) { 173 if (listen(fd, backlog) < 0) 174 return NULL; 175 } else if (backlog < 0) { 176 if (listen(fd, 128) < 0) 177 return NULL; 178 } 179 180 lev = mm_calloc(1, sizeof(struct evconnlistener_event)); 181 if (!lev) 182 return NULL; 183 184 lev->base.ops = &evconnlistener_event_ops; 185 lev->base.cb = cb; 186 lev->base.user_data = ptr; 187 lev->base.flags = flags; 188 lev->base.refcnt = 1; 189 190 lev->base.accept4_flags = 0; 191 if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) 192 lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK; 193 if (flags & LEV_OPT_CLOSE_ON_EXEC) 194 lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC; 195 196 if (flags & LEV_OPT_THREADSAFE) { 197 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 198 } 199 200 event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST, 201 listener_read_cb, lev); 202 203 if (!(flags & LEV_OPT_DISABLED)) 204 evconnlistener_enable(&lev->base); 205 206 return &lev->base; 207 } 208 209 struct evconnlistener * 210 evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb, 211 void *ptr, unsigned flags, int backlog, const struct sockaddr *sa, 212 int socklen) 213 { 214 struct evconnlistener *listener; 215 evutil_socket_t fd; 216 int on = 1; 217 int family = sa ? sa->sa_family : AF_UNSPEC; 218 int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK; 219 220 if (backlog == 0) 221 return NULL; 222 223 if (flags & LEV_OPT_CLOSE_ON_EXEC) 224 socktype |= EVUTIL_SOCK_CLOEXEC; 225 226 fd = evutil_socket_(family, socktype, 0); 227 if (fd == -1) 228 return NULL; 229 230 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0) 231 goto err; 232 233 if (flags & LEV_OPT_REUSEABLE) { 234 if (evutil_make_listen_socket_reuseable(fd) < 0) 235 goto err; 236 } 237 238 if (flags & LEV_OPT_REUSEABLE_PORT) { 239 if (evutil_make_listen_socket_reuseable_port(fd) < 0) 240 goto err; 241 } 242 243 if (flags & LEV_OPT_DEFERRED_ACCEPT) { 244 if (evutil_make_tcp_listen_socket_deferred(fd) < 0) 245 goto err; 246 } 247 248 if (sa) { 249 if (bind(fd, sa, socklen)<0) 250 goto err; 251 } 252 253 listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd); 254 if (!listener) 255 goto err; 256 257 return listener; 258 err: 259 evutil_closesocket(fd); 260 return NULL; 261 } 262 263 void 264 evconnlistener_free(struct evconnlistener *lev) 265 { 266 LOCK(lev); 267 lev->cb = NULL; 268 lev->errorcb = NULL; 269 if (lev->ops->shutdown) 270 lev->ops->shutdown(lev); 271 listener_decref_and_unlock(lev); 272 } 273 274 static void 275 event_listener_destroy(struct evconnlistener *lev) 276 { 277 struct evconnlistener_event *lev_e = 278 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 279 280 event_del(&lev_e->listener); 281 if (lev->flags & LEV_OPT_CLOSE_ON_FREE) 282 evutil_closesocket(event_get_fd(&lev_e->listener)); 283 event_debug_unassign(&lev_e->listener); 284 } 285 286 int 287 evconnlistener_enable(struct evconnlistener *lev) 288 { 289 int r; 290 LOCK(lev); 291 lev->enabled = 1; 292 if (lev->cb) 293 r = lev->ops->enable(lev); 294 else 295 r = 0; 296 UNLOCK(lev); 297 return r; 298 } 299 300 int 301 evconnlistener_disable(struct evconnlistener *lev) 302 { 303 int r; 304 LOCK(lev); 305 lev->enabled = 0; 306 r = lev->ops->disable(lev); 307 UNLOCK(lev); 308 return r; 309 } 310 311 static int 312 event_listener_enable(struct evconnlistener *lev) 313 { 314 struct evconnlistener_event *lev_e = 315 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 316 return event_add(&lev_e->listener, NULL); 317 } 318 319 static int 320 event_listener_disable(struct evconnlistener *lev) 321 { 322 struct evconnlistener_event *lev_e = 323 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 324 return event_del(&lev_e->listener); 325 } 326 327 evutil_socket_t 328 evconnlistener_get_fd(struct evconnlistener *lev) 329 { 330 evutil_socket_t fd; 331 LOCK(lev); 332 fd = lev->ops->getfd(lev); 333 UNLOCK(lev); 334 return fd; 335 } 336 337 static evutil_socket_t 338 event_listener_getfd(struct evconnlistener *lev) 339 { 340 struct evconnlistener_event *lev_e = 341 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 342 return event_get_fd(&lev_e->listener); 343 } 344 345 struct event_base * 346 evconnlistener_get_base(struct evconnlistener *lev) 347 { 348 struct event_base *base; 349 LOCK(lev); 350 base = lev->ops->getbase(lev); 351 UNLOCK(lev); 352 return base; 353 } 354 355 static struct event_base * 356 event_listener_getbase(struct evconnlistener *lev) 357 { 358 struct evconnlistener_event *lev_e = 359 EVUTIL_UPCAST(lev, struct evconnlistener_event, base); 360 return event_get_base(&lev_e->listener); 361 } 362 363 void 364 evconnlistener_set_cb(struct evconnlistener *lev, 365 evconnlistener_cb cb, void *arg) 366 { 367 int enable = 0; 368 LOCK(lev); 369 if (lev->enabled && !lev->cb) 370 enable = 1; 371 lev->cb = cb; 372 lev->user_data = arg; 373 if (enable) 374 evconnlistener_enable(lev); 375 UNLOCK(lev); 376 } 377 378 void 379 evconnlistener_set_error_cb(struct evconnlistener *lev, 380 evconnlistener_errorcb errorcb) 381 { 382 LOCK(lev); 383 lev->errorcb = errorcb; 384 UNLOCK(lev); 385 } 386 387 static void 388 listener_read_cb(evutil_socket_t fd, short what, void *p) 389 { 390 struct evconnlistener *lev = p; 391 int err; 392 evconnlistener_cb cb; 393 evconnlistener_errorcb errorcb; 394 void *user_data; 395 LOCK(lev); 396 while (1) { 397 struct sockaddr_storage ss; 398 ev_socklen_t socklen = sizeof(ss); 399 evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags); 400 if (new_fd < 0) 401 break; 402 if (socklen == 0) { 403 /* This can happen with some older linux kernels in 404 * response to nmap. */ 405 evutil_closesocket(new_fd); 406 continue; 407 } 408 409 if (lev->cb == NULL) { 410 evutil_closesocket(new_fd); 411 UNLOCK(lev); 412 return; 413 } 414 ++lev->refcnt; 415 cb = lev->cb; 416 user_data = lev->user_data; 417 UNLOCK(lev); 418 cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen, 419 user_data); 420 LOCK(lev); 421 if (lev->refcnt == 1) { 422 int freed = listener_decref_and_unlock(lev); 423 EVUTIL_ASSERT(freed); 424 425 evutil_closesocket(new_fd); 426 return; 427 } 428 --lev->refcnt; 429 } 430 err = evutil_socket_geterror(fd); 431 if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) { 432 UNLOCK(lev); 433 return; 434 } 435 if (lev->errorcb != NULL) { 436 ++lev->refcnt; 437 errorcb = lev->errorcb; 438 user_data = lev->user_data; 439 UNLOCK(lev); 440 errorcb(lev, user_data); 441 LOCK(lev); 442 listener_decref_and_unlock(lev); 443 } else { 444 event_sock_warn(fd, "Error from accept() call"); 445 UNLOCK(lev); 446 } 447 } 448 449 #ifdef _WIN32 450 struct accepting_socket { 451 CRITICAL_SECTION lock; 452 struct event_overlapped overlapped; 453 SOCKET s; 454 int error; 455 struct event_callback deferred; 456 struct evconnlistener_iocp *lev; 457 ev_uint8_t buflen; 458 ev_uint8_t family; 459 unsigned free_on_cb:1; 460 char addrbuf[1]; 461 }; 462 463 static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, 464 ev_ssize_t n, int ok); 465 static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg); 466 467 static void 468 iocp_listener_event_add(struct evconnlistener_iocp *lev) 469 { 470 if (lev->event_added) 471 return; 472 473 lev->event_added = 1; 474 event_base_add_virtual_(lev->event_base); 475 } 476 477 static void 478 iocp_listener_event_del(struct evconnlistener_iocp *lev) 479 { 480 if (!lev->event_added) 481 return; 482 483 lev->event_added = 0; 484 event_base_del_virtual_(lev->event_base); 485 } 486 487 static struct accepting_socket * 488 new_accepting_socket(struct evconnlistener_iocp *lev, int family) 489 { 490 struct accepting_socket *res; 491 int addrlen; 492 int buflen; 493 494 if (family == AF_INET) 495 addrlen = sizeof(struct sockaddr_in); 496 else if (family == AF_INET6) 497 addrlen = sizeof(struct sockaddr_in6); 498 else 499 return NULL; 500 buflen = (addrlen+16)*2; 501 502 res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen); 503 if (!res) 504 return NULL; 505 506 event_overlapped_init_(&res->overlapped, accepted_socket_cb); 507 res->s = INVALID_SOCKET; 508 res->lev = lev; 509 res->buflen = buflen; 510 res->family = family; 511 512 event_deferred_cb_init_(&res->deferred, 513 event_base_get_npriorities(lev->event_base) / 2, 514 accepted_socket_invoke_user_cb, res); 515 516 InitializeCriticalSectionAndSpinCount(&res->lock, 1000); 517 518 return res; 519 } 520 521 static void 522 free_and_unlock_accepting_socket(struct accepting_socket *as) 523 { 524 /* requires lock. */ 525 if (as->s != INVALID_SOCKET) 526 closesocket(as->s); 527 528 LeaveCriticalSection(&as->lock); 529 DeleteCriticalSection(&as->lock); 530 mm_free(as); 531 } 532 533 static int 534 start_accepting(struct accepting_socket *as) 535 { 536 /* requires lock */ 537 const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); 538 DWORD pending = 0; 539 SOCKET s = socket(as->family, SOCK_STREAM, 0); 540 int error = 0; 541 542 if (!as->lev->base.enabled) 543 return 0; 544 545 if (s == INVALID_SOCKET) { 546 error = WSAGetLastError(); 547 goto report_err; 548 } 549 550 /* XXXX It turns out we need to do this again later. Does this call 551 * have any effect? */ 552 setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, 553 (char *)&as->lev->fd, sizeof(&as->lev->fd)); 554 555 if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) 556 evutil_make_socket_nonblocking(s); 557 558 if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) { 559 closesocket(s); 560 return -1; 561 } 562 563 as->s = s; 564 565 if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0, 566 as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped)) 567 { 568 /* Immediate success! */ 569 accepted_socket_cb(&as->overlapped, 1, 0, 1); 570 } else { 571 error = WSAGetLastError(); 572 if (error != ERROR_IO_PENDING) { 573 goto report_err; 574 } 575 } 576 577 return 0; 578 579 report_err: 580 as->error = error; 581 event_deferred_cb_schedule_( 582 as->lev->event_base, 583 &as->deferred); 584 return 0; 585 } 586 587 static void 588 stop_accepting(struct accepting_socket *as) 589 { 590 /* requires lock. */ 591 SOCKET s = as->s; 592 as->s = INVALID_SOCKET; 593 closesocket(s); 594 } 595 596 static void 597 accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg) 598 { 599 struct accepting_socket *as = arg; 600 601 struct sockaddr *sa_local=NULL, *sa_remote=NULL; 602 int socklen_local=0, socklen_remote=0; 603 const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); 604 struct evconnlistener *lev = &as->lev->base; 605 evutil_socket_t sock=-1; 606 void *data; 607 evconnlistener_cb cb=NULL; 608 evconnlistener_errorcb errorcb=NULL; 609 int error; 610 611 EVUTIL_ASSERT(ext->GetAcceptExSockaddrs); 612 613 LOCK(lev); 614 EnterCriticalSection(&as->lock); 615 if (as->free_on_cb) { 616 free_and_unlock_accepting_socket(as); 617 listener_decref_and_unlock(lev); 618 return; 619 } 620 621 ++lev->refcnt; 622 623 error = as->error; 624 if (error) { 625 as->error = 0; 626 errorcb = lev->errorcb; 627 } else { 628 ext->GetAcceptExSockaddrs( 629 as->addrbuf, 0, as->buflen/2, as->buflen/2, 630 &sa_local, &socklen_local, &sa_remote, 631 &socklen_remote); 632 sock = as->s; 633 cb = lev->cb; 634 as->s = INVALID_SOCKET; 635 636 /* We need to call this so getsockname, getpeername, and 637 * shutdown work correctly on the accepted socket. */ 638 /* XXXX handle error? */ 639 setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, 640 (char *)&as->lev->fd, sizeof(&as->lev->fd)); 641 } 642 data = lev->user_data; 643 644 LeaveCriticalSection(&as->lock); 645 UNLOCK(lev); 646 647 if (errorcb) { 648 WSASetLastError(error); 649 errorcb(lev, data); 650 } else if (cb) { 651 cb(lev, sock, sa_remote, socklen_remote, data); 652 } 653 654 LOCK(lev); 655 if (listener_decref_and_unlock(lev)) 656 return; 657 658 EnterCriticalSection(&as->lock); 659 start_accepting(as); 660 LeaveCriticalSection(&as->lock); 661 } 662 663 static void 664 accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok) 665 { 666 struct accepting_socket *as = 667 EVUTIL_UPCAST(o, struct accepting_socket, overlapped); 668 669 LOCK(&as->lev->base); 670 EnterCriticalSection(&as->lock); 671 if (ok) { 672 /* XXXX Don't do this if some EV_MT flag is set. */ 673 event_deferred_cb_schedule_( 674 as->lev->event_base, 675 &as->deferred); 676 LeaveCriticalSection(&as->lock); 677 } else if (as->free_on_cb) { 678 struct evconnlistener *lev = &as->lev->base; 679 free_and_unlock_accepting_socket(as); 680 listener_decref_and_unlock(lev); 681 return; 682 } else if (as->s == INVALID_SOCKET) { 683 /* This is okay; we were disabled by iocp_listener_disable. */ 684 LeaveCriticalSection(&as->lock); 685 } else { 686 /* Some error on accept that we couldn't actually handle. */ 687 BOOL ok; 688 DWORD transfer = 0, flags=0; 689 event_sock_warn(as->s, "Unexpected error on AcceptEx"); 690 ok = WSAGetOverlappedResult(as->s, &o->overlapped, 691 &transfer, FALSE, &flags); 692 if (ok) { 693 /* well, that was confusing! */ 694 as->error = 1; 695 } else { 696 as->error = WSAGetLastError(); 697 } 698 event_deferred_cb_schedule_( 699 as->lev->event_base, 700 &as->deferred); 701 LeaveCriticalSection(&as->lock); 702 } 703 UNLOCK(&as->lev->base); 704 } 705 706 static int 707 iocp_listener_enable(struct evconnlistener *lev) 708 { 709 int i; 710 struct evconnlistener_iocp *lev_iocp = 711 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 712 713 LOCK(lev); 714 iocp_listener_event_add(lev_iocp); 715 for (i = 0; i < lev_iocp->n_accepting; ++i) { 716 struct accepting_socket *as = lev_iocp->accepting[i]; 717 if (!as) 718 continue; 719 EnterCriticalSection(&as->lock); 720 if (!as->free_on_cb && as->s == INVALID_SOCKET) 721 start_accepting(as); 722 LeaveCriticalSection(&as->lock); 723 } 724 UNLOCK(lev); 725 return 0; 726 } 727 728 static int 729 iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown) 730 { 731 int i; 732 struct evconnlistener_iocp *lev_iocp = 733 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 734 735 LOCK(lev); 736 iocp_listener_event_del(lev_iocp); 737 for (i = 0; i < lev_iocp->n_accepting; ++i) { 738 struct accepting_socket *as = lev_iocp->accepting[i]; 739 if (!as) 740 continue; 741 EnterCriticalSection(&as->lock); 742 if (!as->free_on_cb && as->s != INVALID_SOCKET) { 743 if (shutdown) 744 as->free_on_cb = 1; 745 stop_accepting(as); 746 } 747 LeaveCriticalSection(&as->lock); 748 } 749 750 if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE) 751 evutil_closesocket(lev_iocp->fd); 752 753 UNLOCK(lev); 754 return 0; 755 } 756 757 static int 758 iocp_listener_disable(struct evconnlistener *lev) 759 { 760 return iocp_listener_disable_impl(lev,0); 761 } 762 763 static void 764 iocp_listener_destroy(struct evconnlistener *lev) 765 { 766 struct evconnlistener_iocp *lev_iocp = 767 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 768 769 if (! lev_iocp->shutting_down) { 770 lev_iocp->shutting_down = 1; 771 iocp_listener_disable_impl(lev,1); 772 } 773 774 } 775 776 static evutil_socket_t 777 iocp_listener_getfd(struct evconnlistener *lev) 778 { 779 struct evconnlistener_iocp *lev_iocp = 780 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 781 return lev_iocp->fd; 782 } 783 static struct event_base * 784 iocp_listener_getbase(struct evconnlistener *lev) 785 { 786 struct evconnlistener_iocp *lev_iocp = 787 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); 788 return lev_iocp->event_base; 789 } 790 791 static const struct evconnlistener_ops evconnlistener_iocp_ops = { 792 iocp_listener_enable, 793 iocp_listener_disable, 794 iocp_listener_destroy, 795 iocp_listener_destroy, /* shutdown */ 796 iocp_listener_getfd, 797 iocp_listener_getbase 798 }; 799 800 /* XXX define some way to override this. */ 801 #define N_SOCKETS_PER_LISTENER 4 802 803 struct evconnlistener * 804 evconnlistener_new_async(struct event_base *base, 805 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, 806 evutil_socket_t fd) 807 { 808 struct sockaddr_storage ss; 809 int socklen = sizeof(ss); 810 struct evconnlistener_iocp *lev; 811 int i; 812 813 flags |= LEV_OPT_THREADSAFE; 814 815 if (!base || !event_base_get_iocp_(base)) 816 goto err; 817 818 /* XXXX duplicate code */ 819 if (backlog > 0) { 820 if (listen(fd, backlog) < 0) 821 goto err; 822 } else if (backlog < 0) { 823 if (listen(fd, 128) < 0) 824 goto err; 825 } 826 if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) { 827 event_sock_warn(fd, "getsockname"); 828 goto err; 829 } 830 lev = mm_calloc(1, sizeof(struct evconnlistener_iocp)); 831 if (!lev) { 832 event_warn("calloc"); 833 goto err; 834 } 835 lev->base.ops = &evconnlistener_iocp_ops; 836 lev->base.cb = cb; 837 lev->base.user_data = ptr; 838 lev->base.flags = flags; 839 lev->base.refcnt = 1; 840 lev->base.enabled = 1; 841 842 lev->port = event_base_get_iocp_(base); 843 lev->fd = fd; 844 lev->event_base = base; 845 846 847 if (event_iocp_port_associate_(lev->port, fd, 1) < 0) 848 goto err_free_lev; 849 850 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 851 852 lev->n_accepting = N_SOCKETS_PER_LISTENER; 853 lev->accepting = mm_calloc(lev->n_accepting, 854 sizeof(struct accepting_socket *)); 855 if (!lev->accepting) { 856 event_warn("calloc"); 857 goto err_delete_lock; 858 } 859 for (i = 0; i < lev->n_accepting; ++i) { 860 lev->accepting[i] = new_accepting_socket(lev, ss.ss_family); 861 if (!lev->accepting[i]) { 862 event_warnx("Couldn't create accepting socket"); 863 goto err_free_accepting; 864 } 865 if (cb && start_accepting(lev->accepting[i]) < 0) { 866 event_warnx("Couldn't start accepting on socket"); 867 EnterCriticalSection(&lev->accepting[i]->lock); 868 free_and_unlock_accepting_socket(lev->accepting[i]); 869 goto err_free_accepting; 870 } 871 ++lev->base.refcnt; 872 } 873 874 iocp_listener_event_add(lev); 875 876 return &lev->base; 877 878 err_free_accepting: 879 mm_free(lev->accepting); 880 /* XXXX free the other elements. */ 881 err_delete_lock: 882 EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); 883 err_free_lev: 884 mm_free(lev); 885 err: 886 /* Don't close the fd, it is caller's responsibility. */ 887 return NULL; 888 } 889 890 #endif 891