1 /* 2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef EVENT__HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef EVENT__HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef EVENT__HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef EVENT__HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef EVENT__HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 99 /* some systems do not have MAP_FAILED */ 100 #ifndef MAP_FAILED 101 #define MAP_FAILED ((void *)-1) 102 #endif 103 104 /* send file support */ 105 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 106 #define USE_SENDFILE 1 107 #define SENDFILE_IS_LINUX 1 108 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 109 #define USE_SENDFILE 1 110 #define SENDFILE_IS_FREEBSD 1 111 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 112 #define USE_SENDFILE 1 113 #define SENDFILE_IS_MACOSX 1 114 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 115 #define USE_SENDFILE 1 116 #define SENDFILE_IS_SOLARIS 1 117 #endif 118 119 /* Mask of user-selectable callback flags. */ 120 #define EVBUFFER_CB_USER_FLAGS 0xffff 121 /* Mask of all internal-use-only flags. */ 122 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 123 124 /* Flag set if the callback is using the cb_obsolete function pointer */ 125 #define EVBUFFER_CB_OBSOLETE 0x00040000 126 127 /* evbuffer_chain support */ 128 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 129 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 130 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 131 132 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 133 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 134 135 /* evbuffer_ptr support */ 136 #define PTR_NOT_FOUND(ptr) do { \ 137 (ptr)->pos = -1; \ 138 (ptr)->internal_.chain = NULL; \ 139 (ptr)->internal_.pos_in_chain = 0; \ 140 } while (0) 141 142 static void evbuffer_chain_align(struct evbuffer_chain *chain); 143 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 144 size_t datalen); 145 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 146 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 147 const struct evbuffer_ptr *pos, const char *mem, size_t len); 148 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 149 size_t datlen); 150 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 151 size_t howfar); 152 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 153 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 154 155 static struct evbuffer_chain * 156 evbuffer_chain_new(size_t size) 157 { 158 struct evbuffer_chain *chain; 159 size_t to_alloc; 160 161 size += EVBUFFER_CHAIN_SIZE; 162 163 /* get the next largest memory that can hold the buffer */ 164 to_alloc = MIN_BUFFER_SIZE; 165 while (to_alloc < size) 166 to_alloc <<= 1; 167 168 /* we get everything in one chunk */ 169 if ((chain = mm_malloc(to_alloc)) == NULL) 170 return (NULL); 171 172 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 173 174 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 175 176 /* this way we can manipulate the buffer to different addresses, 177 * which is required for mmap for example. 178 */ 179 chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain); 180 181 chain->refcnt = 1; 182 183 return (chain); 184 } 185 186 static inline void 187 evbuffer_chain_free(struct evbuffer_chain *chain) 188 { 189 EVUTIL_ASSERT(chain->refcnt > 0); 190 if (--chain->refcnt > 0) { 191 /* chain is still referenced by other chains */ 192 return; 193 } 194 195 if (CHAIN_PINNED(chain)) { 196 /* will get freed once no longer dangling */ 197 chain->refcnt++; 198 chain->flags |= EVBUFFER_DANGLING; 199 return; 200 } 201 202 /* safe to release chain, it's either a referencing 203 * chain or all references to it have been freed */ 204 if (chain->flags & EVBUFFER_REFERENCE) { 205 struct evbuffer_chain_reference *info = 206 EVBUFFER_CHAIN_EXTRA( 207 struct evbuffer_chain_reference, 208 chain); 209 if (info->cleanupfn) 210 (*info->cleanupfn)(chain->buffer, 211 chain->buffer_len, 212 info->extra); 213 } 214 if (chain->flags & EVBUFFER_FILESEGMENT) { 215 struct evbuffer_chain_file_segment *info = 216 EVBUFFER_CHAIN_EXTRA( 217 struct evbuffer_chain_file_segment, 218 chain); 219 if (info->segment) { 220 #ifdef _WIN32 221 if (info->segment->is_mapping) 222 UnmapViewOfFile(chain->buffer); 223 #endif 224 evbuffer_file_segment_free(info->segment); 225 } 226 } 227 if (chain->flags & EVBUFFER_MULTICAST) { 228 struct evbuffer_multicast_parent *info = 229 EVBUFFER_CHAIN_EXTRA( 230 struct evbuffer_multicast_parent, 231 chain); 232 /* referencing chain is being freed, decrease 233 * refcounts of source chain and associated 234 * evbuffer (which get freed once both reach 235 * zero) */ 236 EVUTIL_ASSERT(info->source != NULL); 237 EVUTIL_ASSERT(info->parent != NULL); 238 EVBUFFER_LOCK(info->source); 239 evbuffer_chain_free(info->parent); 240 evbuffer_decref_and_unlock_(info->source); 241 } 242 243 mm_free(chain); 244 } 245 246 static void 247 evbuffer_free_all_chains(struct evbuffer_chain *chain) 248 { 249 struct evbuffer_chain *next; 250 for (; chain; chain = next) { 251 next = chain->next; 252 evbuffer_chain_free(chain); 253 } 254 } 255 256 #ifndef NDEBUG 257 static int 258 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 259 { 260 for (; chain; chain = chain->next) { 261 if (chain->off) 262 return 0; 263 } 264 return 1; 265 } 266 #else 267 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 268 "unused variable" warnings. */ 269 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 270 return 1; 271 } 272 #endif 273 274 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 275 * to replacing them all with a new chain. Return a pointer to the place 276 * where the new chain will go. 277 * 278 * Internal; requires lock. The caller must fix up buf->last and buf->first 279 * as needed; they might have been freed. 280 */ 281 static struct evbuffer_chain ** 282 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 283 { 284 struct evbuffer_chain **ch = buf->last_with_datap; 285 /* Find the first victim chain. It might be *last_with_datap */ 286 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 287 ch = &(*ch)->next; 288 if (*ch) { 289 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 290 evbuffer_free_all_chains(*ch); 291 *ch = NULL; 292 } 293 return ch; 294 } 295 296 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 297 * chains as necessary. Requires lock. Does not schedule callbacks. 298 */ 299 static void 300 evbuffer_chain_insert(struct evbuffer *buf, 301 struct evbuffer_chain *chain) 302 { 303 ASSERT_EVBUFFER_LOCKED(buf); 304 if (*buf->last_with_datap == NULL) { 305 /* There are no chains data on the buffer at all. */ 306 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 307 EVUTIL_ASSERT(buf->first == NULL); 308 buf->first = buf->last = chain; 309 } else { 310 struct evbuffer_chain **chp; 311 chp = evbuffer_free_trailing_empty_chains(buf); 312 *chp = chain; 313 if (chain->off) 314 buf->last_with_datap = chp; 315 buf->last = chain; 316 } 317 buf->total_len += chain->off; 318 } 319 320 static inline struct evbuffer_chain * 321 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 322 { 323 struct evbuffer_chain *chain; 324 if ((chain = evbuffer_chain_new(datlen)) == NULL) 325 return NULL; 326 evbuffer_chain_insert(buf, chain); 327 return chain; 328 } 329 330 void 331 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 332 { 333 EVUTIL_ASSERT((chain->flags & flag) == 0); 334 chain->flags |= flag; 335 } 336 337 void 338 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 339 { 340 EVUTIL_ASSERT((chain->flags & flag) != 0); 341 chain->flags &= ~flag; 342 if (chain->flags & EVBUFFER_DANGLING) 343 evbuffer_chain_free(chain); 344 } 345 346 static inline void 347 evbuffer_chain_incref(struct evbuffer_chain *chain) 348 { 349 ++chain->refcnt; 350 } 351 352 struct evbuffer * 353 evbuffer_new(void) 354 { 355 struct evbuffer *buffer; 356 357 buffer = mm_calloc(1, sizeof(struct evbuffer)); 358 if (buffer == NULL) 359 return (NULL); 360 361 LIST_INIT(&buffer->callbacks); 362 buffer->refcnt = 1; 363 buffer->last_with_datap = &buffer->first; 364 365 return (buffer); 366 } 367 368 int 369 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 370 { 371 EVBUFFER_LOCK(buf); 372 buf->flags |= (ev_uint32_t)flags; 373 EVBUFFER_UNLOCK(buf); 374 return 0; 375 } 376 377 int 378 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 379 { 380 EVBUFFER_LOCK(buf); 381 buf->flags &= ~(ev_uint32_t)flags; 382 EVBUFFER_UNLOCK(buf); 383 return 0; 384 } 385 386 void 387 evbuffer_incref_(struct evbuffer *buf) 388 { 389 EVBUFFER_LOCK(buf); 390 ++buf->refcnt; 391 EVBUFFER_UNLOCK(buf); 392 } 393 394 void 395 evbuffer_incref_and_lock_(struct evbuffer *buf) 396 { 397 EVBUFFER_LOCK(buf); 398 ++buf->refcnt; 399 } 400 401 int 402 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 403 { 404 EVBUFFER_LOCK(buffer); 405 buffer->cb_queue = base; 406 buffer->deferred_cbs = 1; 407 event_deferred_cb_init_(&buffer->deferred, 408 event_base_get_npriorities(base) / 2, 409 evbuffer_deferred_callback, buffer); 410 EVBUFFER_UNLOCK(buffer); 411 return 0; 412 } 413 414 int 415 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 416 { 417 #ifdef EVENT__DISABLE_THREAD_SUPPORT 418 return -1; 419 #else 420 if (buf->lock) 421 return -1; 422 423 if (!lock) { 424 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 425 if (!lock) 426 return -1; 427 buf->lock = lock; 428 buf->own_lock = 1; 429 } else { 430 buf->lock = lock; 431 buf->own_lock = 0; 432 } 433 434 return 0; 435 #endif 436 } 437 438 void 439 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 440 { 441 EVBUFFER_LOCK(buf); 442 buf->parent = bev; 443 EVBUFFER_UNLOCK(buf); 444 } 445 446 static void 447 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 448 { 449 struct evbuffer_cb_entry *cbent, *next; 450 struct evbuffer_cb_info info; 451 size_t new_size; 452 ev_uint32_t mask, masked_val; 453 int clear = 1; 454 455 if (running_deferred) { 456 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 457 masked_val = EVBUFFER_CB_ENABLED; 458 } else if (buffer->deferred_cbs) { 459 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 460 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 461 /* Don't zero-out n_add/n_del, since the deferred callbacks 462 will want to see them. */ 463 clear = 0; 464 } else { 465 mask = EVBUFFER_CB_ENABLED; 466 masked_val = EVBUFFER_CB_ENABLED; 467 } 468 469 ASSERT_EVBUFFER_LOCKED(buffer); 470 471 if (LIST_EMPTY(&buffer->callbacks)) { 472 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 473 return; 474 } 475 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 476 return; 477 478 new_size = buffer->total_len; 479 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 480 info.n_added = buffer->n_add_for_cb; 481 info.n_deleted = buffer->n_del_for_cb; 482 if (clear) { 483 buffer->n_add_for_cb = 0; 484 buffer->n_del_for_cb = 0; 485 } 486 for (cbent = LIST_FIRST(&buffer->callbacks); 487 cbent != LIST_END(&buffer->callbacks); 488 cbent = next) { 489 /* Get the 'next' pointer now in case this callback decides 490 * to remove itself or something. */ 491 next = LIST_NEXT(cbent, next); 492 493 if ((cbent->flags & mask) != masked_val) 494 continue; 495 496 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 497 cbent->cb.cb_obsolete(buffer, 498 info.orig_size, new_size, cbent->cbarg); 499 else 500 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 501 } 502 } 503 504 void 505 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 506 { 507 if (LIST_EMPTY(&buffer->callbacks)) { 508 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 509 return; 510 } 511 512 if (buffer->deferred_cbs) { 513 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 514 evbuffer_incref_and_lock_(buffer); 515 if (buffer->parent) 516 bufferevent_incref_(buffer->parent); 517 } 518 EVBUFFER_UNLOCK(buffer); 519 } 520 521 evbuffer_run_callbacks(buffer, 0); 522 } 523 524 static void 525 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 526 { 527 struct bufferevent *parent = NULL; 528 struct evbuffer *buffer = arg; 529 530 /* XXXX It would be better to run these callbacks without holding the 531 * lock */ 532 EVBUFFER_LOCK(buffer); 533 parent = buffer->parent; 534 evbuffer_run_callbacks(buffer, 1); 535 evbuffer_decref_and_unlock_(buffer); 536 if (parent) 537 bufferevent_decref_(parent); 538 } 539 540 static void 541 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 542 { 543 struct evbuffer_cb_entry *cbent; 544 545 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 546 LIST_REMOVE(cbent, next); 547 mm_free(cbent); 548 } 549 } 550 551 void 552 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 553 { 554 struct evbuffer_chain *chain, *next; 555 ASSERT_EVBUFFER_LOCKED(buffer); 556 557 EVUTIL_ASSERT(buffer->refcnt > 0); 558 559 if (--buffer->refcnt > 0) { 560 EVBUFFER_UNLOCK(buffer); 561 return; 562 } 563 564 for (chain = buffer->first; chain != NULL; chain = next) { 565 next = chain->next; 566 evbuffer_chain_free(chain); 567 } 568 evbuffer_remove_all_callbacks(buffer); 569 if (buffer->deferred_cbs) 570 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 571 572 EVBUFFER_UNLOCK(buffer); 573 if (buffer->own_lock) 574 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 575 mm_free(buffer); 576 } 577 578 void 579 evbuffer_free(struct evbuffer *buffer) 580 { 581 EVBUFFER_LOCK(buffer); 582 evbuffer_decref_and_unlock_(buffer); 583 } 584 585 void 586 evbuffer_lock(struct evbuffer *buf) 587 { 588 EVBUFFER_LOCK(buf); 589 } 590 591 void 592 evbuffer_unlock(struct evbuffer *buf) 593 { 594 EVBUFFER_UNLOCK(buf); 595 } 596 597 size_t 598 evbuffer_get_length(const struct evbuffer *buffer) 599 { 600 size_t result; 601 602 EVBUFFER_LOCK(buffer); 603 604 result = (buffer->total_len); 605 606 EVBUFFER_UNLOCK(buffer); 607 608 return result; 609 } 610 611 size_t 612 evbuffer_get_contiguous_space(const struct evbuffer *buf) 613 { 614 struct evbuffer_chain *chain; 615 size_t result; 616 617 EVBUFFER_LOCK(buf); 618 chain = buf->first; 619 result = (chain != NULL ? chain->off : 0); 620 EVBUFFER_UNLOCK(buf); 621 622 return result; 623 } 624 625 size_t 626 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 627 int n; 628 size_t res; 629 size_t to_alloc; 630 631 EVBUFFER_LOCK(buf); 632 633 res = to_alloc = 0; 634 635 for (n = 0; n < n_vec; n++) { 636 to_alloc += vec[n].iov_len; 637 } 638 639 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 640 goto done; 641 } 642 643 for (n = 0; n < n_vec; n++) { 644 /* XXX each 'add' call here does a bunch of setup that's 645 * obviated by evbuffer_expand_fast_, and some cleanup that we 646 * would like to do only once. Instead we should just extract 647 * the part of the code that's needed. */ 648 649 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 650 goto done; 651 } 652 653 res += vec[n].iov_len; 654 } 655 656 done: 657 EVBUFFER_UNLOCK(buf); 658 return res; 659 } 660 661 int 662 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 663 struct evbuffer_iovec *vec, int n_vecs) 664 { 665 struct evbuffer_chain *chain, **chainp; 666 int n = -1; 667 668 EVBUFFER_LOCK(buf); 669 if (buf->freeze_end) 670 goto done; 671 if (n_vecs < 1) 672 goto done; 673 if (n_vecs == 1) { 674 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 675 goto done; 676 677 vec[0].iov_base = CHAIN_SPACE_PTR(chain); 678 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); 679 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 680 n = 1; 681 } else { 682 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 683 goto done; 684 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 685 &chainp, 0); 686 } 687 688 done: 689 EVBUFFER_UNLOCK(buf); 690 return n; 691 692 } 693 694 static int 695 advance_last_with_data(struct evbuffer *buf) 696 { 697 int n = 0; 698 ASSERT_EVBUFFER_LOCKED(buf); 699 700 if (!*buf->last_with_datap) 701 return 0; 702 703 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 704 buf->last_with_datap = &(*buf->last_with_datap)->next; 705 ++n; 706 } 707 return n; 708 } 709 710 int 711 evbuffer_commit_space(struct evbuffer *buf, 712 struct evbuffer_iovec *vec, int n_vecs) 713 { 714 struct evbuffer_chain *chain, **firstchainp, **chainp; 715 int result = -1; 716 size_t added = 0; 717 int i; 718 719 EVBUFFER_LOCK(buf); 720 721 if (buf->freeze_end) 722 goto done; 723 if (n_vecs == 0) { 724 result = 0; 725 goto done; 726 } else if (n_vecs == 1 && 727 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { 728 /* The user only got or used one chain; it might not 729 * be the first one with space in it. */ 730 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 731 goto done; 732 buf->last->off += vec[0].iov_len; 733 added = vec[0].iov_len; 734 if (added) 735 advance_last_with_data(buf); 736 goto okay; 737 } 738 739 /* Advance 'firstchain' to the first chain with space in it. */ 740 firstchainp = buf->last_with_datap; 741 if (!*firstchainp) 742 goto done; 743 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 744 firstchainp = &(*firstchainp)->next; 745 } 746 747 chain = *firstchainp; 748 /* pass 1: make sure that the pointers and lengths of vecs[] are in 749 * bounds before we try to commit anything. */ 750 for (i=0; i<n_vecs; ++i) { 751 if (!chain) 752 goto done; 753 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) || 754 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 755 goto done; 756 chain = chain->next; 757 } 758 /* pass 2: actually adjust all the chains. */ 759 chainp = firstchainp; 760 for (i=0; i<n_vecs; ++i) { 761 (*chainp)->off += vec[i].iov_len; 762 added += vec[i].iov_len; 763 if (vec[i].iov_len) { 764 buf->last_with_datap = chainp; 765 } 766 chainp = &(*chainp)->next; 767 } 768 769 okay: 770 buf->total_len += added; 771 buf->n_add_for_cb += added; 772 result = 0; 773 evbuffer_invoke_callbacks_(buf); 774 775 done: 776 EVBUFFER_UNLOCK(buf); 777 return result; 778 } 779 780 static inline int 781 HAS_PINNED_R(struct evbuffer *buf) 782 { 783 return (buf->last && CHAIN_PINNED_R(buf->last)); 784 } 785 786 static inline void 787 ZERO_CHAIN(struct evbuffer *dst) 788 { 789 ASSERT_EVBUFFER_LOCKED(dst); 790 dst->first = NULL; 791 dst->last = NULL; 792 dst->last_with_datap = &(dst)->first; 793 dst->total_len = 0; 794 } 795 796 /* Prepares the contents of src to be moved to another buffer by removing 797 * read-pinned chains. The first pinned chain is saved in first, and the 798 * last in last. If src has no read-pinned chains, first and last are set 799 * to NULL. */ 800 static int 801 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 802 struct evbuffer_chain **last) 803 { 804 struct evbuffer_chain *chain, **pinned; 805 806 ASSERT_EVBUFFER_LOCKED(src); 807 808 if (!HAS_PINNED_R(src)) { 809 *first = *last = NULL; 810 return 0; 811 } 812 813 pinned = src->last_with_datap; 814 if (!CHAIN_PINNED_R(*pinned)) 815 pinned = &(*pinned)->next; 816 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 817 chain = *first = *pinned; 818 *last = src->last; 819 820 /* If there's data in the first pinned chain, we need to allocate 821 * a new chain and copy the data over. */ 822 if (chain->off) { 823 struct evbuffer_chain *tmp; 824 825 EVUTIL_ASSERT(pinned == src->last_with_datap); 826 tmp = evbuffer_chain_new(chain->off); 827 if (!tmp) 828 return -1; 829 memcpy(tmp->buffer, chain->buffer + chain->misalign, 830 chain->off); 831 tmp->off = chain->off; 832 *src->last_with_datap = tmp; 833 src->last = tmp; 834 chain->misalign += chain->off; 835 chain->off = 0; 836 } else { 837 src->last = *src->last_with_datap; 838 *pinned = NULL; 839 } 840 841 return 0; 842 } 843 844 static inline void 845 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 846 struct evbuffer_chain *last) 847 { 848 ASSERT_EVBUFFER_LOCKED(src); 849 850 if (!pinned) { 851 ZERO_CHAIN(src); 852 return; 853 } 854 855 src->first = pinned; 856 src->last = last; 857 src->last_with_datap = &src->first; 858 src->total_len = 0; 859 } 860 861 static inline void 862 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 863 { 864 ASSERT_EVBUFFER_LOCKED(dst); 865 ASSERT_EVBUFFER_LOCKED(src); 866 dst->first = src->first; 867 if (src->last_with_datap == &src->first) 868 dst->last_with_datap = &dst->first; 869 else 870 dst->last_with_datap = src->last_with_datap; 871 dst->last = src->last; 872 dst->total_len = src->total_len; 873 } 874 875 static void 876 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 877 { 878 ASSERT_EVBUFFER_LOCKED(dst); 879 ASSERT_EVBUFFER_LOCKED(src); 880 dst->last->next = src->first; 881 if (src->last_with_datap == &src->first) 882 dst->last_with_datap = &dst->last->next; 883 else 884 dst->last_with_datap = src->last_with_datap; 885 dst->last = src->last; 886 dst->total_len += src->total_len; 887 } 888 889 static inline void 890 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 891 { 892 struct evbuffer_chain *tmp; 893 struct evbuffer_chain *chain = src->first; 894 struct evbuffer_multicast_parent *extra; 895 896 ASSERT_EVBUFFER_LOCKED(dst); 897 ASSERT_EVBUFFER_LOCKED(src); 898 899 for (; chain; chain = chain->next) { 900 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 901 /* skip empty chains */ 902 continue; 903 } 904 905 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 906 if (!tmp) { 907 event_warn("%s: out of memory", __func__); 908 return; 909 } 910 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 911 /* reference evbuffer containing source chain so it 912 * doesn't get released while the chain is still 913 * being referenced to */ 914 evbuffer_incref_(src); 915 extra->source = src; 916 /* reference source chain which now becomes immutable */ 917 evbuffer_chain_incref(chain); 918 extra->parent = chain; 919 chain->flags |= EVBUFFER_IMMUTABLE; 920 tmp->buffer_len = chain->buffer_len; 921 tmp->misalign = chain->misalign; 922 tmp->off = chain->off; 923 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 924 tmp->buffer = chain->buffer; 925 evbuffer_chain_insert(dst, tmp); 926 } 927 } 928 929 static void 930 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 931 { 932 ASSERT_EVBUFFER_LOCKED(dst); 933 ASSERT_EVBUFFER_LOCKED(src); 934 src->last->next = dst->first; 935 dst->first = src->first; 936 dst->total_len += src->total_len; 937 if (*dst->last_with_datap == NULL) { 938 if (src->last_with_datap == &(src)->first) 939 dst->last_with_datap = &dst->first; 940 else 941 dst->last_with_datap = src->last_with_datap; 942 } else if (dst->last_with_datap == &dst->first) { 943 dst->last_with_datap = &src->last->next; 944 } 945 } 946 947 int 948 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 949 { 950 struct evbuffer_chain *pinned, *last; 951 size_t in_total_len, out_total_len; 952 int result = 0; 953 954 EVBUFFER_LOCK2(inbuf, outbuf); 955 in_total_len = inbuf->total_len; 956 out_total_len = outbuf->total_len; 957 958 if (in_total_len == 0 || outbuf == inbuf) 959 goto done; 960 961 if (outbuf->freeze_end || inbuf->freeze_start) { 962 result = -1; 963 goto done; 964 } 965 966 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 967 result = -1; 968 goto done; 969 } 970 971 if (out_total_len == 0) { 972 /* There might be an empty chain at the start of outbuf; free 973 * it. */ 974 evbuffer_free_all_chains(outbuf->first); 975 COPY_CHAIN(outbuf, inbuf); 976 } else { 977 APPEND_CHAIN(outbuf, inbuf); 978 } 979 980 RESTORE_PINNED(inbuf, pinned, last); 981 982 inbuf->n_del_for_cb += in_total_len; 983 outbuf->n_add_for_cb += in_total_len; 984 985 evbuffer_invoke_callbacks_(inbuf); 986 evbuffer_invoke_callbacks_(outbuf); 987 988 done: 989 EVBUFFER_UNLOCK2(inbuf, outbuf); 990 return result; 991 } 992 993 int 994 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 995 { 996 size_t in_total_len, out_total_len; 997 struct evbuffer_chain *chain; 998 int result = 0; 999 1000 EVBUFFER_LOCK2(inbuf, outbuf); 1001 in_total_len = inbuf->total_len; 1002 out_total_len = outbuf->total_len; 1003 chain = inbuf->first; 1004 1005 if (in_total_len == 0) 1006 goto done; 1007 1008 if (outbuf->freeze_end || outbuf == inbuf) { 1009 result = -1; 1010 goto done; 1011 } 1012 1013 for (; chain; chain = chain->next) { 1014 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1015 /* chain type can not be referenced */ 1016 result = -1; 1017 goto done; 1018 } 1019 } 1020 1021 if (out_total_len == 0) { 1022 /* There might be an empty chain at the start of outbuf; free 1023 * it. */ 1024 evbuffer_free_all_chains(outbuf->first); 1025 } 1026 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1027 1028 outbuf->n_add_for_cb += in_total_len; 1029 evbuffer_invoke_callbacks_(outbuf); 1030 1031 done: 1032 EVBUFFER_UNLOCK2(inbuf, outbuf); 1033 return result; 1034 } 1035 1036 int 1037 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1038 { 1039 struct evbuffer_chain *pinned, *last; 1040 size_t in_total_len, out_total_len; 1041 int result = 0; 1042 1043 EVBUFFER_LOCK2(inbuf, outbuf); 1044 1045 in_total_len = inbuf->total_len; 1046 out_total_len = outbuf->total_len; 1047 1048 if (!in_total_len || inbuf == outbuf) 1049 goto done; 1050 1051 if (outbuf->freeze_start || inbuf->freeze_start) { 1052 result = -1; 1053 goto done; 1054 } 1055 1056 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1057 result = -1; 1058 goto done; 1059 } 1060 1061 if (out_total_len == 0) { 1062 /* There might be an empty chain at the start of outbuf; free 1063 * it. */ 1064 evbuffer_free_all_chains(outbuf->first); 1065 COPY_CHAIN(outbuf, inbuf); 1066 } else { 1067 PREPEND_CHAIN(outbuf, inbuf); 1068 } 1069 1070 RESTORE_PINNED(inbuf, pinned, last); 1071 1072 inbuf->n_del_for_cb += in_total_len; 1073 outbuf->n_add_for_cb += in_total_len; 1074 1075 evbuffer_invoke_callbacks_(inbuf); 1076 evbuffer_invoke_callbacks_(outbuf); 1077 done: 1078 EVBUFFER_UNLOCK2(inbuf, outbuf); 1079 return result; 1080 } 1081 1082 int 1083 evbuffer_drain(struct evbuffer *buf, size_t len) 1084 { 1085 struct evbuffer_chain *chain, *next; 1086 size_t remaining, old_len; 1087 int result = 0; 1088 1089 EVBUFFER_LOCK(buf); 1090 old_len = buf->total_len; 1091 1092 if (old_len == 0) 1093 goto done; 1094 1095 if (buf->freeze_start) { 1096 result = -1; 1097 goto done; 1098 } 1099 1100 if (len >= old_len && !HAS_PINNED_R(buf)) { 1101 len = old_len; 1102 for (chain = buf->first; chain != NULL; chain = next) { 1103 next = chain->next; 1104 evbuffer_chain_free(chain); 1105 } 1106 1107 ZERO_CHAIN(buf); 1108 } else { 1109 if (len >= old_len) 1110 len = old_len; 1111 1112 buf->total_len -= len; 1113 remaining = len; 1114 for (chain = buf->first; 1115 remaining >= chain->off; 1116 chain = next) { 1117 next = chain->next; 1118 remaining -= chain->off; 1119 1120 if (chain == *buf->last_with_datap) { 1121 buf->last_with_datap = &buf->first; 1122 } 1123 if (&chain->next == buf->last_with_datap) 1124 buf->last_with_datap = &buf->first; 1125 1126 if (CHAIN_PINNED_R(chain)) { 1127 EVUTIL_ASSERT(remaining == 0); 1128 chain->misalign += chain->off; 1129 chain->off = 0; 1130 break; 1131 } else 1132 evbuffer_chain_free(chain); 1133 } 1134 1135 buf->first = chain; 1136 chain->misalign += remaining; 1137 chain->off -= remaining; 1138 } 1139 1140 buf->n_del_for_cb += len; 1141 /* Tell someone about changes in this buffer */ 1142 evbuffer_invoke_callbacks_(buf); 1143 1144 done: 1145 EVBUFFER_UNLOCK(buf); 1146 return result; 1147 } 1148 1149 /* Reads data from an event buffer and drains the bytes read */ 1150 int 1151 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1152 { 1153 ev_ssize_t n; 1154 EVBUFFER_LOCK(buf); 1155 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1156 if (n > 0) { 1157 if (evbuffer_drain(buf, n)<0) 1158 n = -1; 1159 } 1160 EVBUFFER_UNLOCK(buf); 1161 return (int)n; 1162 } 1163 1164 ev_ssize_t 1165 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1166 { 1167 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1168 } 1169 1170 ev_ssize_t 1171 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1172 void *data_out, size_t datlen) 1173 { 1174 /*XXX fails badly on sendfile case. */ 1175 struct evbuffer_chain *chain; 1176 char *data = data_out; 1177 size_t nread; 1178 ev_ssize_t result = 0; 1179 size_t pos_in_chain; 1180 1181 EVBUFFER_LOCK(buf); 1182 1183 if (pos) { 1184 chain = pos->internal_.chain; 1185 pos_in_chain = pos->internal_.pos_in_chain; 1186 if (datlen + pos->pos > buf->total_len) 1187 datlen = buf->total_len - pos->pos; 1188 } else { 1189 chain = buf->first; 1190 pos_in_chain = 0; 1191 if (datlen > buf->total_len) 1192 datlen = buf->total_len; 1193 } 1194 1195 1196 if (datlen == 0) 1197 goto done; 1198 1199 if (buf->freeze_start) { 1200 result = -1; 1201 goto done; 1202 } 1203 1204 nread = datlen; 1205 1206 while (datlen && datlen >= chain->off - pos_in_chain) { 1207 size_t copylen = chain->off - pos_in_chain; 1208 memcpy(data, 1209 chain->buffer + chain->misalign + pos_in_chain, 1210 copylen); 1211 data += copylen; 1212 datlen -= copylen; 1213 1214 chain = chain->next; 1215 pos_in_chain = 0; 1216 EVUTIL_ASSERT(chain || datlen==0); 1217 } 1218 1219 if (datlen) { 1220 EVUTIL_ASSERT(chain); 1221 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1222 datlen); 1223 } 1224 1225 result = nread; 1226 done: 1227 EVBUFFER_UNLOCK(buf); 1228 return result; 1229 } 1230 1231 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1232 * possible. */ 1233 /* XXXX should return ev_ssize_t */ 1234 int 1235 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1236 size_t datlen) 1237 { 1238 /*XXX We should have an option to force this to be zero-copy.*/ 1239 1240 /*XXX can fail badly on sendfile case. */ 1241 struct evbuffer_chain *chain, *previous; 1242 size_t nread = 0; 1243 int result; 1244 1245 EVBUFFER_LOCK2(src, dst); 1246 1247 chain = previous = src->first; 1248 1249 if (datlen == 0 || dst == src) { 1250 result = 0; 1251 goto done; 1252 } 1253 1254 if (dst->freeze_end || src->freeze_start) { 1255 result = -1; 1256 goto done; 1257 } 1258 1259 /* short-cut if there is no more data buffered */ 1260 if (datlen >= src->total_len) { 1261 datlen = src->total_len; 1262 evbuffer_add_buffer(dst, src); 1263 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1264 goto done; 1265 } 1266 1267 /* removes chains if possible */ 1268 while (chain->off <= datlen) { 1269 /* We can't remove the last with data from src unless we 1270 * remove all chains, in which case we would have done the if 1271 * block above */ 1272 EVUTIL_ASSERT(chain != *src->last_with_datap); 1273 nread += chain->off; 1274 datlen -= chain->off; 1275 previous = chain; 1276 if (src->last_with_datap == &chain->next) 1277 src->last_with_datap = &src->first; 1278 chain = chain->next; 1279 } 1280 1281 if (nread) { 1282 /* we can remove the chain */ 1283 struct evbuffer_chain **chp; 1284 chp = evbuffer_free_trailing_empty_chains(dst); 1285 1286 if (dst->first == NULL) { 1287 dst->first = src->first; 1288 } else { 1289 *chp = src->first; 1290 } 1291 dst->last = previous; 1292 previous->next = NULL; 1293 src->first = chain; 1294 advance_last_with_data(dst); 1295 1296 dst->total_len += nread; 1297 dst->n_add_for_cb += nread; 1298 } 1299 1300 /* we know that there is more data in the src buffer than 1301 * we want to read, so we manually drain the chain */ 1302 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1303 chain->misalign += datlen; 1304 chain->off -= datlen; 1305 nread += datlen; 1306 1307 /* You might think we would want to increment dst->n_add_for_cb 1308 * here too. But evbuffer_add above already took care of that. 1309 */ 1310 src->total_len -= nread; 1311 src->n_del_for_cb += nread; 1312 1313 if (nread) { 1314 evbuffer_invoke_callbacks_(dst); 1315 evbuffer_invoke_callbacks_(src); 1316 } 1317 result = (int)nread;/*XXXX should change return type */ 1318 1319 done: 1320 EVBUFFER_UNLOCK2(src, dst); 1321 return result; 1322 } 1323 1324 unsigned char * 1325 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1326 { 1327 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1328 unsigned char *buffer, *result = NULL; 1329 ev_ssize_t remaining; 1330 int removed_last_with_data = 0; 1331 int removed_last_with_datap = 0; 1332 1333 EVBUFFER_LOCK(buf); 1334 1335 chain = buf->first; 1336 1337 if (size < 0) 1338 size = buf->total_len; 1339 /* if size > buf->total_len, we cannot guarantee to the user that she 1340 * is going to have a long enough buffer afterwards; so we return 1341 * NULL */ 1342 if (size == 0 || (size_t)size > buf->total_len) 1343 goto done; 1344 1345 /* No need to pull up anything; the first size bytes are 1346 * already here. */ 1347 if (chain->off >= (size_t)size) { 1348 result = chain->buffer + chain->misalign; 1349 goto done; 1350 } 1351 1352 /* Make sure that none of the chains we need to copy from is pinned. */ 1353 remaining = size - chain->off; 1354 EVUTIL_ASSERT(remaining >= 0); 1355 for (tmp=chain->next; tmp; tmp=tmp->next) { 1356 if (CHAIN_PINNED(tmp)) 1357 goto done; 1358 if (tmp->off >= (size_t)remaining) 1359 break; 1360 remaining -= tmp->off; 1361 } 1362 1363 if (CHAIN_PINNED(chain)) { 1364 size_t old_off = chain->off; 1365 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1366 /* not enough room at end of chunk. */ 1367 goto done; 1368 } 1369 buffer = CHAIN_SPACE_PTR(chain); 1370 tmp = chain; 1371 tmp->off = size; 1372 size -= old_off; 1373 chain = chain->next; 1374 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1375 /* already have enough space in the first chain */ 1376 size_t old_off = chain->off; 1377 buffer = chain->buffer + chain->misalign + chain->off; 1378 tmp = chain; 1379 tmp->off = size; 1380 size -= old_off; 1381 chain = chain->next; 1382 } else { 1383 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1384 event_warn("%s: out of memory", __func__); 1385 goto done; 1386 } 1387 buffer = tmp->buffer; 1388 tmp->off = size; 1389 buf->first = tmp; 1390 } 1391 1392 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1393 1394 /* Copy and free every chunk that will be entirely pulled into tmp */ 1395 last_with_data = *buf->last_with_datap; 1396 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1397 next = chain->next; 1398 1399 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1400 size -= chain->off; 1401 buffer += chain->off; 1402 if (chain == last_with_data) 1403 removed_last_with_data = 1; 1404 if (&chain->next == buf->last_with_datap) 1405 removed_last_with_datap = 1; 1406 1407 evbuffer_chain_free(chain); 1408 } 1409 1410 if (chain != NULL) { 1411 memcpy(buffer, chain->buffer + chain->misalign, size); 1412 chain->misalign += size; 1413 chain->off -= size; 1414 } else { 1415 buf->last = tmp; 1416 } 1417 1418 tmp->next = chain; 1419 1420 if (removed_last_with_data) { 1421 buf->last_with_datap = &buf->first; 1422 } else if (removed_last_with_datap) { 1423 if (buf->first->next && buf->first->next->off) 1424 buf->last_with_datap = &buf->first->next; 1425 else 1426 buf->last_with_datap = &buf->first; 1427 } 1428 1429 result = (tmp->buffer + tmp->misalign); 1430 1431 done: 1432 EVBUFFER_UNLOCK(buf); 1433 return result; 1434 } 1435 1436 /* 1437 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1438 * The returned buffer needs to be freed by the called. 1439 */ 1440 char * 1441 evbuffer_readline(struct evbuffer *buffer) 1442 { 1443 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1444 } 1445 1446 static inline ev_ssize_t 1447 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1448 { 1449 struct evbuffer_chain *chain = it->internal_.chain; 1450 size_t i = it->internal_.pos_in_chain; 1451 while (chain != NULL) { 1452 char *buffer = (char *)chain->buffer + chain->misalign; 1453 char *cp = memchr(buffer+i, chr, chain->off-i); 1454 if (cp) { 1455 it->internal_.chain = chain; 1456 it->internal_.pos_in_chain = cp - buffer; 1457 it->pos += (cp - buffer - i); 1458 return it->pos; 1459 } 1460 it->pos += chain->off - i; 1461 i = 0; 1462 chain = chain->next; 1463 } 1464 1465 return (-1); 1466 } 1467 1468 static inline char * 1469 find_eol_char(char *s, size_t len) 1470 { 1471 #define CHUNK_SZ 128 1472 /* Lots of benchmarking found this approach to be faster in practice 1473 * than doing two memchrs over the whole buffer, doin a memchr on each 1474 * char of the buffer, or trying to emulate memchr by hand. */ 1475 char *s_end, *cr, *lf; 1476 s_end = s+len; 1477 while (s < s_end) { 1478 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1479 cr = memchr(s, '\r', chunk); 1480 lf = memchr(s, '\n', chunk); 1481 if (cr) { 1482 if (lf && lf < cr) 1483 return lf; 1484 return cr; 1485 } else if (lf) { 1486 return lf; 1487 } 1488 s += CHUNK_SZ; 1489 } 1490 1491 return NULL; 1492 #undef CHUNK_SZ 1493 } 1494 1495 static ev_ssize_t 1496 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1497 { 1498 struct evbuffer_chain *chain = it->internal_.chain; 1499 size_t i = it->internal_.pos_in_chain; 1500 while (chain != NULL) { 1501 char *buffer = (char *)chain->buffer + chain->misalign; 1502 char *cp = find_eol_char(buffer+i, chain->off-i); 1503 if (cp) { 1504 it->internal_.chain = chain; 1505 it->internal_.pos_in_chain = cp - buffer; 1506 it->pos += (cp - buffer) - i; 1507 return it->pos; 1508 } 1509 it->pos += chain->off - i; 1510 i = 0; 1511 chain = chain->next; 1512 } 1513 1514 return (-1); 1515 } 1516 1517 static inline int 1518 evbuffer_strspn( 1519 struct evbuffer_ptr *ptr, const char *chrset) 1520 { 1521 int count = 0; 1522 struct evbuffer_chain *chain = ptr->internal_.chain; 1523 size_t i = ptr->internal_.pos_in_chain; 1524 1525 if (!chain) 1526 return 0; 1527 1528 while (1) { 1529 char *buffer = (char *)chain->buffer + chain->misalign; 1530 for (; i < chain->off; ++i) { 1531 const char *p = chrset; 1532 while (*p) { 1533 if (buffer[i] == *p++) 1534 goto next; 1535 } 1536 ptr->internal_.chain = chain; 1537 ptr->internal_.pos_in_chain = i; 1538 ptr->pos += count; 1539 return count; 1540 next: 1541 ++count; 1542 } 1543 i = 0; 1544 1545 if (! chain->next) { 1546 ptr->internal_.chain = chain; 1547 ptr->internal_.pos_in_chain = i; 1548 ptr->pos += count; 1549 return count; 1550 } 1551 1552 chain = chain->next; 1553 } 1554 } 1555 1556 1557 static inline int 1558 evbuffer_getchr(struct evbuffer_ptr *it) 1559 { 1560 struct evbuffer_chain *chain = it->internal_.chain; 1561 size_t off = it->internal_.pos_in_chain; 1562 1563 if (chain == NULL) 1564 return -1; 1565 1566 return (unsigned char)chain->buffer[chain->misalign + off]; 1567 } 1568 1569 struct evbuffer_ptr 1570 evbuffer_search_eol(struct evbuffer *buffer, 1571 struct evbuffer_ptr *start, size_t *eol_len_out, 1572 enum evbuffer_eol_style eol_style) 1573 { 1574 struct evbuffer_ptr it, it2; 1575 size_t extra_drain = 0; 1576 int ok = 0; 1577 1578 /* Avoid locking in trivial edge cases */ 1579 if (start && start->internal_.chain == NULL) { 1580 PTR_NOT_FOUND(&it); 1581 if (eol_len_out) 1582 *eol_len_out = extra_drain; 1583 return it; 1584 } 1585 1586 EVBUFFER_LOCK(buffer); 1587 1588 if (start) { 1589 memcpy(&it, start, sizeof(it)); 1590 } else { 1591 it.pos = 0; 1592 it.internal_.chain = buffer->first; 1593 it.internal_.pos_in_chain = 0; 1594 } 1595 1596 /* the eol_style determines our first stop character and how many 1597 * characters we are going to drain afterwards. */ 1598 switch (eol_style) { 1599 case EVBUFFER_EOL_ANY: 1600 if (evbuffer_find_eol_char(&it) < 0) 1601 goto done; 1602 memcpy(&it2, &it, sizeof(it)); 1603 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1604 break; 1605 case EVBUFFER_EOL_CRLF_STRICT: { 1606 it = evbuffer_search(buffer, "\r\n", 2, &it); 1607 if (it.pos < 0) 1608 goto done; 1609 extra_drain = 2; 1610 break; 1611 } 1612 case EVBUFFER_EOL_CRLF: { 1613 ev_ssize_t start_pos = it.pos; 1614 /* Look for a LF ... */ 1615 if (evbuffer_strchr(&it, '\n') < 0) 1616 goto done; 1617 extra_drain = 1; 1618 /* ... optionally preceeded by a CR. */ 1619 if (it.pos == start_pos) 1620 break; /* If the first character is \n, don't back up */ 1621 /* This potentially does an extra linear walk over the first 1622 * few chains. Probably, that's not too expensive unless you 1623 * have a really pathological setup. */ 1624 memcpy(&it2, &it, sizeof(it)); 1625 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1626 break; 1627 if (evbuffer_getchr(&it2) == '\r') { 1628 memcpy(&it, &it2, sizeof(it)); 1629 extra_drain = 2; 1630 } 1631 break; 1632 } 1633 case EVBUFFER_EOL_LF: 1634 if (evbuffer_strchr(&it, '\n') < 0) 1635 goto done; 1636 extra_drain = 1; 1637 break; 1638 case EVBUFFER_EOL_NUL: 1639 if (evbuffer_strchr(&it, '\0') < 0) 1640 goto done; 1641 extra_drain = 1; 1642 break; 1643 default: 1644 goto done; 1645 } 1646 1647 ok = 1; 1648 done: 1649 EVBUFFER_UNLOCK(buffer); 1650 1651 if (!ok) 1652 PTR_NOT_FOUND(&it); 1653 if (eol_len_out) 1654 *eol_len_out = extra_drain; 1655 1656 return it; 1657 } 1658 1659 char * 1660 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1661 enum evbuffer_eol_style eol_style) 1662 { 1663 struct evbuffer_ptr it; 1664 char *line; 1665 size_t n_to_copy=0, extra_drain=0; 1666 char *result = NULL; 1667 1668 EVBUFFER_LOCK(buffer); 1669 1670 if (buffer->freeze_start) { 1671 goto done; 1672 } 1673 1674 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1675 if (it.pos < 0) 1676 goto done; 1677 n_to_copy = it.pos; 1678 1679 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1680 event_warn("%s: out of memory", __func__); 1681 goto done; 1682 } 1683 1684 evbuffer_remove(buffer, line, n_to_copy); 1685 line[n_to_copy] = '\0'; 1686 1687 evbuffer_drain(buffer, extra_drain); 1688 result = line; 1689 done: 1690 EVBUFFER_UNLOCK(buffer); 1691 1692 if (n_read_out) 1693 *n_read_out = result ? n_to_copy : 0; 1694 1695 return result; 1696 } 1697 1698 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1699 1700 /* Adds data to an event buffer */ 1701 1702 int 1703 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1704 { 1705 struct evbuffer_chain *chain, *tmp; 1706 const unsigned char *data = data_in; 1707 size_t remain, to_alloc; 1708 int result = -1; 1709 1710 EVBUFFER_LOCK(buf); 1711 1712 if (buf->freeze_end) { 1713 goto done; 1714 } 1715 1716 chain = buf->last; 1717 1718 /* If there are no chains allocated for this buffer, allocate one 1719 * big enough to hold all the data. */ 1720 if (chain == NULL) { 1721 chain = evbuffer_chain_new(datlen); 1722 if (!chain) 1723 goto done; 1724 evbuffer_chain_insert(buf, chain); 1725 } 1726 1727 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1728 remain = (size_t)(chain->buffer_len - chain->misalign - chain->off); 1729 if (remain >= datlen) { 1730 /* there's enough space to hold all the data in the 1731 * current last chain */ 1732 memcpy(chain->buffer + chain->misalign + chain->off, 1733 data, datlen); 1734 chain->off += datlen; 1735 buf->total_len += datlen; 1736 buf->n_add_for_cb += datlen; 1737 goto out; 1738 } else if (!CHAIN_PINNED(chain) && 1739 evbuffer_chain_should_realign(chain, datlen)) { 1740 /* we can fit the data into the misalignment */ 1741 evbuffer_chain_align(chain); 1742 1743 memcpy(chain->buffer + chain->off, data, datlen); 1744 chain->off += datlen; 1745 buf->total_len += datlen; 1746 buf->n_add_for_cb += datlen; 1747 goto out; 1748 } 1749 } else { 1750 /* we cannot write any data to the last chain */ 1751 remain = 0; 1752 } 1753 1754 /* we need to add another chain */ 1755 to_alloc = chain->buffer_len; 1756 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1757 to_alloc <<= 1; 1758 if (datlen > to_alloc) 1759 to_alloc = datlen; 1760 tmp = evbuffer_chain_new(to_alloc); 1761 if (tmp == NULL) 1762 goto done; 1763 1764 if (remain) { 1765 memcpy(chain->buffer + chain->misalign + chain->off, 1766 data, remain); 1767 chain->off += remain; 1768 buf->total_len += remain; 1769 buf->n_add_for_cb += remain; 1770 } 1771 1772 data += remain; 1773 datlen -= remain; 1774 1775 memcpy(tmp->buffer, data, datlen); 1776 tmp->off = datlen; 1777 evbuffer_chain_insert(buf, tmp); 1778 buf->n_add_for_cb += datlen; 1779 1780 out: 1781 evbuffer_invoke_callbacks_(buf); 1782 result = 0; 1783 done: 1784 EVBUFFER_UNLOCK(buf); 1785 return result; 1786 } 1787 1788 int 1789 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1790 { 1791 struct evbuffer_chain *chain, *tmp; 1792 int result = -1; 1793 1794 EVBUFFER_LOCK(buf); 1795 1796 if (buf->freeze_start) { 1797 goto done; 1798 } 1799 1800 chain = buf->first; 1801 1802 if (chain == NULL) { 1803 chain = evbuffer_chain_new(datlen); 1804 if (!chain) 1805 goto done; 1806 evbuffer_chain_insert(buf, chain); 1807 } 1808 1809 /* we cannot touch immutable buffers */ 1810 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1811 /* If this chain is empty, we can treat it as 1812 * 'empty at the beginning' rather than 'empty at the end' */ 1813 if (chain->off == 0) 1814 chain->misalign = chain->buffer_len; 1815 1816 if ((size_t)chain->misalign >= datlen) { 1817 /* we have enough space to fit everything */ 1818 memcpy(chain->buffer + chain->misalign - datlen, 1819 data, datlen); 1820 chain->off += datlen; 1821 chain->misalign -= datlen; 1822 buf->total_len += datlen; 1823 buf->n_add_for_cb += datlen; 1824 goto out; 1825 } else if (chain->misalign) { 1826 /* we can only fit some of the data. */ 1827 memcpy(chain->buffer, 1828 (char*)data + datlen - chain->misalign, 1829 (size_t)chain->misalign); 1830 chain->off += (size_t)chain->misalign; 1831 buf->total_len += (size_t)chain->misalign; 1832 buf->n_add_for_cb += (size_t)chain->misalign; 1833 datlen -= (size_t)chain->misalign; 1834 chain->misalign = 0; 1835 } 1836 } 1837 1838 /* we need to add another chain */ 1839 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1840 goto done; 1841 buf->first = tmp; 1842 if (buf->last_with_datap == &buf->first) 1843 buf->last_with_datap = &tmp->next; 1844 1845 tmp->next = chain; 1846 1847 tmp->off = datlen; 1848 tmp->misalign = tmp->buffer_len - datlen; 1849 1850 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1851 buf->total_len += datlen; 1852 buf->n_add_for_cb += (size_t)chain->misalign; 1853 1854 out: 1855 evbuffer_invoke_callbacks_(buf); 1856 result = 0; 1857 done: 1858 EVBUFFER_UNLOCK(buf); 1859 return result; 1860 } 1861 1862 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1863 static void 1864 evbuffer_chain_align(struct evbuffer_chain *chain) 1865 { 1866 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1867 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1868 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1869 chain->misalign = 0; 1870 } 1871 1872 #define MAX_TO_COPY_IN_EXPAND 4096 1873 #define MAX_TO_REALIGN_IN_EXPAND 2048 1874 1875 /** Helper: return true iff we should realign chain to fit datalen bytes of 1876 data in it. */ 1877 static int 1878 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1879 size_t datlen) 1880 { 1881 return chain->buffer_len - chain->off >= datlen && 1882 (chain->off < chain->buffer_len / 2) && 1883 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1884 } 1885 1886 /* Expands the available space in the event buffer to at least datlen, all in 1887 * a single chunk. Return that chunk. */ 1888 static struct evbuffer_chain * 1889 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1890 { 1891 struct evbuffer_chain *chain, **chainp; 1892 struct evbuffer_chain *result = NULL; 1893 ASSERT_EVBUFFER_LOCKED(buf); 1894 1895 chainp = buf->last_with_datap; 1896 1897 /* XXX If *chainp is no longer writeable, but has enough space in its 1898 * misalign, this might be a bad idea: we could still use *chainp, not 1899 * (*chainp)->next. */ 1900 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1901 chainp = &(*chainp)->next; 1902 1903 /* 'chain' now points to the first chain with writable space (if any) 1904 * We will either use it, realign it, replace it, or resize it. */ 1905 chain = *chainp; 1906 1907 if (chain == NULL || 1908 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1909 /* We can't use the last_with_data chain at all. Just add a 1910 * new one that's big enough. */ 1911 goto insert_new; 1912 } 1913 1914 /* If we can fit all the data, then we don't have to do anything */ 1915 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1916 result = chain; 1917 goto ok; 1918 } 1919 1920 /* If the chain is completely empty, just replace it by adding a new 1921 * empty chain. */ 1922 if (chain->off == 0) { 1923 goto insert_new; 1924 } 1925 1926 /* If the misalignment plus the remaining space fulfills our data 1927 * needs, we could just force an alignment to happen. Afterwards, we 1928 * have enough space. But only do this if we're saving a lot of space 1929 * and not moving too much data. Otherwise the space savings are 1930 * probably offset by the time lost in copying. 1931 */ 1932 if (evbuffer_chain_should_realign(chain, datlen)) { 1933 evbuffer_chain_align(chain); 1934 result = chain; 1935 goto ok; 1936 } 1937 1938 /* At this point, we can either resize the last chunk with space in 1939 * it, use the next chunk after it, or If we add a new chunk, we waste 1940 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1941 * resize, we have to copy chain->off bytes. 1942 */ 1943 1944 /* Would expanding this chunk be affordable and worthwhile? */ 1945 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1946 chain->off > MAX_TO_COPY_IN_EXPAND) { 1947 /* It's not worth resizing this chain. Can the next one be 1948 * used? */ 1949 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1950 /* Yes, we can just use the next chain (which should 1951 * be empty. */ 1952 result = chain->next; 1953 goto ok; 1954 } else { 1955 /* No; append a new chain (which will free all 1956 * terminal empty chains.) */ 1957 goto insert_new; 1958 } 1959 } else { 1960 /* Okay, we're going to try to resize this chain: Not doing so 1961 * would waste at least 1/8 of its current allocation, and we 1962 * can do so without having to copy more than 1963 * MAX_TO_COPY_IN_EXPAND bytes. */ 1964 /* figure out how much space we need */ 1965 size_t length = chain->off + datlen; 1966 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 1967 if (tmp == NULL) 1968 goto err; 1969 1970 /* copy the data over that we had so far */ 1971 tmp->off = chain->off; 1972 memcpy(tmp->buffer, chain->buffer + chain->misalign, 1973 chain->off); 1974 /* fix up the list */ 1975 EVUTIL_ASSERT(*chainp == chain); 1976 result = *chainp = tmp; 1977 1978 if (buf->last == chain) 1979 buf->last = tmp; 1980 1981 tmp->next = chain->next; 1982 evbuffer_chain_free(chain); 1983 goto ok; 1984 } 1985 1986 insert_new: 1987 result = evbuffer_chain_insert_new(buf, datlen); 1988 if (!result) 1989 goto err; 1990 ok: 1991 EVUTIL_ASSERT(result); 1992 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 1993 err: 1994 return result; 1995 } 1996 1997 /* Make sure that datlen bytes are available for writing in the last n 1998 * chains. Never copies or moves data. */ 1999 int 2000 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2001 { 2002 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2003 size_t avail; 2004 int used; 2005 2006 ASSERT_EVBUFFER_LOCKED(buf); 2007 EVUTIL_ASSERT(n >= 2); 2008 2009 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2010 /* There is no last chunk, or we can't touch the last chunk. 2011 * Just add a new chunk. */ 2012 chain = evbuffer_chain_new(datlen); 2013 if (chain == NULL) 2014 return (-1); 2015 2016 evbuffer_chain_insert(buf, chain); 2017 return (0); 2018 } 2019 2020 used = 0; /* number of chains we're using space in. */ 2021 avail = 0; /* how much space they have. */ 2022 /* How many bytes can we stick at the end of buffer as it is? Iterate 2023 * over the chains at the end of the buffer, tring to see how much 2024 * space we have in the first n. */ 2025 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2026 if (chain->off) { 2027 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2028 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2029 if (space) { 2030 avail += space; 2031 ++used; 2032 } 2033 } else { 2034 /* No data in chain; realign it. */ 2035 chain->misalign = 0; 2036 avail += chain->buffer_len; 2037 ++used; 2038 } 2039 if (avail >= datlen) { 2040 /* There is already enough space. Just return */ 2041 return (0); 2042 } 2043 if (used == n) 2044 break; 2045 } 2046 2047 /* There wasn't enough space in the first n chains with space in 2048 * them. Either add a new chain with enough space, or replace all 2049 * empty chains with one that has enough space, depending on n. */ 2050 if (used < n) { 2051 /* The loop ran off the end of the chains before it hit n 2052 * chains; we can add another. */ 2053 EVUTIL_ASSERT(chain == NULL); 2054 2055 tmp = evbuffer_chain_new(datlen - avail); 2056 if (tmp == NULL) 2057 return (-1); 2058 2059 buf->last->next = tmp; 2060 buf->last = tmp; 2061 /* (we would only set last_with_data if we added the first 2062 * chain. But if the buffer had no chains, we would have 2063 * just allocated a new chain earlier) */ 2064 return (0); 2065 } else { 2066 /* Nuke _all_ the empty chains. */ 2067 int rmv_all = 0; /* True iff we removed last_with_data. */ 2068 chain = *buf->last_with_datap; 2069 if (!chain->off) { 2070 EVUTIL_ASSERT(chain == buf->first); 2071 rmv_all = 1; 2072 avail = 0; 2073 } else { 2074 avail = (size_t) CHAIN_SPACE_LEN(chain); 2075 chain = chain->next; 2076 } 2077 2078 2079 for (; chain; chain = next) { 2080 next = chain->next; 2081 EVUTIL_ASSERT(chain->off == 0); 2082 evbuffer_chain_free(chain); 2083 } 2084 tmp = evbuffer_chain_new(datlen - avail); 2085 if (tmp == NULL) { 2086 if (rmv_all) { 2087 ZERO_CHAIN(buf); 2088 } else { 2089 buf->last = *buf->last_with_datap; 2090 (*buf->last_with_datap)->next = NULL; 2091 } 2092 return (-1); 2093 } 2094 2095 if (rmv_all) { 2096 buf->first = buf->last = tmp; 2097 buf->last_with_datap = &buf->first; 2098 } else { 2099 (*buf->last_with_datap)->next = tmp; 2100 buf->last = tmp; 2101 } 2102 return (0); 2103 } 2104 } 2105 2106 int 2107 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2108 { 2109 struct evbuffer_chain *chain; 2110 2111 EVBUFFER_LOCK(buf); 2112 chain = evbuffer_expand_singlechain(buf, datlen); 2113 EVBUFFER_UNLOCK(buf); 2114 return chain ? 0 : -1; 2115 } 2116 2117 /* 2118 * Reads data from a file descriptor into a buffer. 2119 */ 2120 2121 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2122 #define USE_IOVEC_IMPL 2123 #endif 2124 2125 #ifdef USE_IOVEC_IMPL 2126 2127 #ifdef EVENT__HAVE_SYS_UIO_H 2128 /* number of iovec we use for writev, fragmentation is going to determine 2129 * how much we end up writing */ 2130 2131 #define DEFAULT_WRITE_IOVEC 128 2132 2133 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2134 #define NUM_WRITE_IOVEC UIO_MAXIOV 2135 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2136 #define NUM_WRITE_IOVEC IOV_MAX 2137 #else 2138 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2139 #endif 2140 2141 #define IOV_TYPE struct iovec 2142 #define IOV_PTR_FIELD iov_base 2143 #define IOV_LEN_FIELD iov_len 2144 #define IOV_LEN_TYPE size_t 2145 #else 2146 #define NUM_WRITE_IOVEC 16 2147 #define IOV_TYPE WSABUF 2148 #define IOV_PTR_FIELD buf 2149 #define IOV_LEN_FIELD len 2150 #define IOV_LEN_TYPE unsigned long 2151 #endif 2152 #endif 2153 #define NUM_READ_IOVEC 4 2154 2155 #define EVBUFFER_MAX_READ 4096 2156 2157 /** Helper function to figure out which space to use for reading data into 2158 an evbuffer. Internal use only. 2159 2160 @param buf The buffer to read into 2161 @param howmuch How much we want to read. 2162 @param vecs An array of two or more iovecs or WSABUFs. 2163 @param n_vecs_avail The length of vecs 2164 @param chainp A pointer to a variable to hold the first chain we're 2165 reading into. 2166 @param exact Boolean: if true, we do not provide more than 'howmuch' 2167 space in the vectors, even if more space is available. 2168 @return The number of buffers we're using. 2169 */ 2170 int 2171 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2172 struct evbuffer_iovec *vecs, int n_vecs_avail, 2173 struct evbuffer_chain ***chainp, int exact) 2174 { 2175 struct evbuffer_chain *chain; 2176 struct evbuffer_chain **firstchainp; 2177 size_t so_far; 2178 int i; 2179 ASSERT_EVBUFFER_LOCKED(buf); 2180 2181 if (howmuch < 0) 2182 return -1; 2183 2184 so_far = 0; 2185 /* Let firstchain be the first chain with any space on it */ 2186 firstchainp = buf->last_with_datap; 2187 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2188 firstchainp = &(*firstchainp)->next; 2189 } 2190 2191 chain = *firstchainp; 2192 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2193 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2194 if (avail > (howmuch - so_far) && exact) 2195 avail = howmuch - so_far; 2196 vecs[i].iov_base = CHAIN_SPACE_PTR(chain); 2197 vecs[i].iov_len = avail; 2198 so_far += avail; 2199 chain = chain->next; 2200 } 2201 2202 *chainp = firstchainp; 2203 return i; 2204 } 2205 2206 static int 2207 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2208 { 2209 #if defined(FIONREAD) && defined(_WIN32) 2210 unsigned long lng = EVBUFFER_MAX_READ; 2211 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2212 return -1; 2213 return (int)lng; 2214 #elif defined(FIONREAD) 2215 int n = EVBUFFER_MAX_READ; 2216 if (ioctl(fd, FIONREAD, &n) < 0) 2217 return -1; 2218 return n; 2219 #else 2220 return EVBUFFER_MAX_READ; 2221 #endif 2222 } 2223 2224 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2225 * as howmuch? */ 2226 int 2227 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2228 { 2229 struct evbuffer_chain **chainp; 2230 int n; 2231 int result; 2232 2233 #ifdef USE_IOVEC_IMPL 2234 int nvecs, i, remaining; 2235 #else 2236 struct evbuffer_chain *chain; 2237 unsigned char *p; 2238 #endif 2239 2240 EVBUFFER_LOCK(buf); 2241 2242 if (buf->freeze_end) { 2243 result = -1; 2244 goto done; 2245 } 2246 2247 n = get_n_bytes_readable_on_socket(fd); 2248 if (n <= 0 || n > EVBUFFER_MAX_READ) 2249 n = EVBUFFER_MAX_READ; 2250 if (howmuch < 0 || howmuch > n) 2251 howmuch = n; 2252 2253 #ifdef USE_IOVEC_IMPL 2254 /* Since we can use iovecs, we're willing to use the last 2255 * NUM_READ_IOVEC chains. */ 2256 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2257 result = -1; 2258 goto done; 2259 } else { 2260 IOV_TYPE vecs[NUM_READ_IOVEC]; 2261 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2262 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2263 NUM_READ_IOVEC, &chainp, 1); 2264 #else 2265 /* We aren't using the native struct iovec. Therefore, 2266 we are on win32. */ 2267 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2268 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2269 &chainp, 1); 2270 2271 for (i=0; i < nvecs; ++i) 2272 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2273 #endif 2274 2275 #ifdef _WIN32 2276 { 2277 DWORD bytesRead; 2278 DWORD flags=0; 2279 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2280 /* The read failed. It might be a close, 2281 * or it might be an error. */ 2282 if (WSAGetLastError() == WSAECONNABORTED) 2283 n = 0; 2284 else 2285 n = -1; 2286 } else 2287 n = bytesRead; 2288 } 2289 #else 2290 n = readv(fd, vecs, nvecs); 2291 #endif 2292 } 2293 2294 #else /*!USE_IOVEC_IMPL*/ 2295 /* If we don't have FIONREAD, we might waste some space here */ 2296 /* XXX we _will_ waste some space here if there is any space left 2297 * over on buf->last. */ 2298 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2299 result = -1; 2300 goto done; 2301 } 2302 2303 /* We can append new data at this point */ 2304 p = chain->buffer + chain->misalign + chain->off; 2305 2306 #ifndef _WIN32 2307 n = read(fd, p, howmuch); 2308 #else 2309 n = recv(fd, p, howmuch, 0); 2310 #endif 2311 #endif /* USE_IOVEC_IMPL */ 2312 2313 if (n == -1) { 2314 result = -1; 2315 goto done; 2316 } 2317 if (n == 0) { 2318 result = 0; 2319 goto done; 2320 } 2321 2322 #ifdef USE_IOVEC_IMPL 2323 remaining = n; 2324 for (i=0; i < nvecs; ++i) { 2325 ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp); 2326 if (space < remaining) { 2327 (*chainp)->off += space; 2328 remaining -= (int)space; 2329 } else { 2330 (*chainp)->off += remaining; 2331 buf->last_with_datap = chainp; 2332 break; 2333 } 2334 chainp = &(*chainp)->next; 2335 } 2336 #else 2337 chain->off += n; 2338 advance_last_with_data(buf); 2339 #endif 2340 buf->total_len += n; 2341 buf->n_add_for_cb += n; 2342 2343 /* Tell someone about changes in this buffer */ 2344 evbuffer_invoke_callbacks_(buf); 2345 result = n; 2346 done: 2347 EVBUFFER_UNLOCK(buf); 2348 return result; 2349 } 2350 2351 #ifdef USE_IOVEC_IMPL 2352 static inline int 2353 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2354 ev_ssize_t howmuch) 2355 { 2356 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2357 struct evbuffer_chain *chain = buffer->first; 2358 int n, i = 0; 2359 2360 if (howmuch < 0) 2361 return -1; 2362 2363 ASSERT_EVBUFFER_LOCKED(buffer); 2364 /* XXX make this top out at some maximal data length? if the 2365 * buffer has (say) 1MB in it, split over 128 chains, there's 2366 * no way it all gets written in one go. */ 2367 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2368 #ifdef USE_SENDFILE 2369 /* we cannot write the file info via writev */ 2370 if (chain->flags & EVBUFFER_SENDFILE) 2371 break; 2372 #endif 2373 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2374 if ((size_t)howmuch >= chain->off) { 2375 /* XXXcould be problematic when windows supports mmap*/ 2376 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2377 howmuch -= chain->off; 2378 } else { 2379 /* XXXcould be problematic when windows supports mmap*/ 2380 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2381 break; 2382 } 2383 chain = chain->next; 2384 } 2385 if (! i) 2386 return 0; 2387 2388 #ifdef _WIN32 2389 { 2390 DWORD bytesSent; 2391 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2392 n = -1; 2393 else 2394 n = bytesSent; 2395 } 2396 #else 2397 n = writev(fd, iov, i); 2398 #endif 2399 return (n); 2400 } 2401 #endif 2402 2403 #ifdef USE_SENDFILE 2404 static inline int 2405 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2406 ev_ssize_t howmuch) 2407 { 2408 struct evbuffer_chain *chain = buffer->first; 2409 struct evbuffer_chain_file_segment *info = 2410 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2411 chain); 2412 const int source_fd = info->segment->fd; 2413 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2414 int res; 2415 ev_off_t len = chain->off; 2416 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2417 ev_ssize_t res; 2418 ev_off_t offset = chain->misalign; 2419 #endif 2420 2421 ASSERT_EVBUFFER_LOCKED(buffer); 2422 2423 #if defined(SENDFILE_IS_MACOSX) 2424 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2425 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2426 return (-1); 2427 2428 return (len); 2429 #elif defined(SENDFILE_IS_FREEBSD) 2430 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2431 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2432 return (-1); 2433 2434 return (len); 2435 #elif defined(SENDFILE_IS_LINUX) 2436 /* TODO(niels): implement splice */ 2437 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2438 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2439 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2440 return (0); 2441 } 2442 return (res); 2443 #elif defined(SENDFILE_IS_SOLARIS) 2444 { 2445 const off_t offset_orig = offset; 2446 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2447 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2448 if (offset - offset_orig) 2449 return offset - offset_orig; 2450 /* if this is EAGAIN or EINTR and no bytes were 2451 * written, return 0 */ 2452 return (0); 2453 } 2454 return (res); 2455 } 2456 #endif 2457 } 2458 #endif 2459 2460 int 2461 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2462 ev_ssize_t howmuch) 2463 { 2464 int n = -1; 2465 2466 EVBUFFER_LOCK(buffer); 2467 2468 if (buffer->freeze_start) { 2469 goto done; 2470 } 2471 2472 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2473 howmuch = buffer->total_len; 2474 2475 if (howmuch > 0) { 2476 #ifdef USE_SENDFILE 2477 struct evbuffer_chain *chain = buffer->first; 2478 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2479 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2480 else { 2481 #endif 2482 #ifdef USE_IOVEC_IMPL 2483 n = evbuffer_write_iovec(buffer, fd, howmuch); 2484 #elif defined(_WIN32) 2485 /* XXX(nickm) Don't disable this code until we know if 2486 * the WSARecv code above works. */ 2487 void *p = evbuffer_pullup(buffer, howmuch); 2488 n = send(fd, p, howmuch, 0); 2489 #else 2490 void *p = evbuffer_pullup(buffer, howmuch); 2491 n = write(fd, p, howmuch); 2492 #endif 2493 #ifdef USE_SENDFILE 2494 } 2495 #endif 2496 } 2497 2498 if (n > 0) 2499 evbuffer_drain(buffer, n); 2500 2501 done: 2502 EVBUFFER_UNLOCK(buffer); 2503 return (n); 2504 } 2505 2506 int 2507 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2508 { 2509 return evbuffer_write_atmost(buffer, fd, -1); 2510 } 2511 2512 unsigned char * 2513 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2514 { 2515 unsigned char *search; 2516 struct evbuffer_ptr ptr; 2517 2518 EVBUFFER_LOCK(buffer); 2519 2520 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2521 if (ptr.pos < 0) { 2522 search = NULL; 2523 } else { 2524 search = evbuffer_pullup(buffer, ptr.pos + len); 2525 if (search) 2526 search += ptr.pos; 2527 } 2528 EVBUFFER_UNLOCK(buffer); 2529 return search; 2530 } 2531 2532 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2533 * <b>buf</b>. Returns 0 on success, -1 on failure. 2534 * 2535 * This isn't exposed yet, because of potential inefficiency issues. 2536 * Maybe it should be. */ 2537 static int 2538 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2539 size_t howfar) 2540 { 2541 if (howfar > (size_t)pos->pos) 2542 return -1; 2543 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2544 pos->internal_.pos_in_chain -= howfar; 2545 pos->pos -= howfar; 2546 return 0; 2547 } else { 2548 const size_t newpos = pos->pos - howfar; 2549 /* Here's the inefficient part: it walks over the 2550 * chains until we hit newpos. */ 2551 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2552 } 2553 } 2554 2555 int 2556 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2557 size_t position, enum evbuffer_ptr_how how) 2558 { 2559 size_t left = position; 2560 struct evbuffer_chain *chain = NULL; 2561 int result = 0; 2562 2563 EVBUFFER_LOCK(buf); 2564 2565 switch (how) { 2566 case EVBUFFER_PTR_SET: 2567 chain = buf->first; 2568 pos->pos = position; 2569 position = 0; 2570 break; 2571 case EVBUFFER_PTR_ADD: 2572 /* this avoids iterating over all previous chains if 2573 we just want to advance the position */ 2574 chain = pos->internal_.chain; 2575 pos->pos += position; 2576 position = pos->internal_.pos_in_chain; 2577 break; 2578 } 2579 2580 while (chain && position + left >= chain->off) { 2581 left -= chain->off - position; 2582 chain = chain->next; 2583 position = 0; 2584 } 2585 if (chain) { 2586 pos->internal_.chain = chain; 2587 pos->internal_.pos_in_chain = position + left; 2588 } else if (left == 0) { 2589 /* The first byte in the (nonexistent) chain after the last chain */ 2590 pos->internal_.chain = NULL; 2591 pos->internal_.pos_in_chain = 0; 2592 } else { 2593 PTR_NOT_FOUND(pos); 2594 result = -1; 2595 } 2596 2597 EVBUFFER_UNLOCK(buf); 2598 2599 return result; 2600 } 2601 2602 /** 2603 Compare the bytes in buf at position pos to the len bytes in mem. Return 2604 less than 0, 0, or greater than 0 as memcmp. 2605 */ 2606 static int 2607 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2608 const char *mem, size_t len) 2609 { 2610 struct evbuffer_chain *chain; 2611 size_t position; 2612 int r; 2613 2614 ASSERT_EVBUFFER_LOCKED(buf); 2615 2616 if (pos->pos + len > buf->total_len) 2617 return -1; 2618 2619 chain = pos->internal_.chain; 2620 position = pos->internal_.pos_in_chain; 2621 while (len && chain) { 2622 size_t n_comparable; 2623 if (len + position > chain->off) 2624 n_comparable = chain->off - position; 2625 else 2626 n_comparable = len; 2627 r = memcmp(chain->buffer + chain->misalign + position, mem, 2628 n_comparable); 2629 if (r) 2630 return r; 2631 mem += n_comparable; 2632 len -= n_comparable; 2633 position = 0; 2634 chain = chain->next; 2635 } 2636 2637 return 0; 2638 } 2639 2640 struct evbuffer_ptr 2641 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2642 { 2643 return evbuffer_search_range(buffer, what, len, start, NULL); 2644 } 2645 2646 struct evbuffer_ptr 2647 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2648 { 2649 struct evbuffer_ptr pos; 2650 struct evbuffer_chain *chain, *last_chain = NULL; 2651 const unsigned char *p; 2652 char first; 2653 2654 EVBUFFER_LOCK(buffer); 2655 2656 if (start) { 2657 memcpy(&pos, start, sizeof(pos)); 2658 chain = pos.internal_.chain; 2659 } else { 2660 pos.pos = 0; 2661 chain = pos.internal_.chain = buffer->first; 2662 pos.internal_.pos_in_chain = 0; 2663 } 2664 2665 if (end) 2666 last_chain = end->internal_.chain; 2667 2668 if (!len || len > EV_SSIZE_MAX) 2669 goto done; 2670 2671 first = what[0]; 2672 2673 while (chain) { 2674 const unsigned char *start_at = 2675 chain->buffer + chain->misalign + 2676 pos.internal_.pos_in_chain; 2677 p = memchr(start_at, first, 2678 chain->off - pos.internal_.pos_in_chain); 2679 if (p) { 2680 pos.pos += p - start_at; 2681 pos.internal_.pos_in_chain += p - start_at; 2682 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2683 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2684 goto not_found; 2685 else 2686 goto done; 2687 } 2688 ++pos.pos; 2689 ++pos.internal_.pos_in_chain; 2690 if (pos.internal_.pos_in_chain == chain->off) { 2691 chain = pos.internal_.chain = chain->next; 2692 pos.internal_.pos_in_chain = 0; 2693 } 2694 } else { 2695 if (chain == last_chain) 2696 goto not_found; 2697 pos.pos += chain->off - pos.internal_.pos_in_chain; 2698 chain = pos.internal_.chain = chain->next; 2699 pos.internal_.pos_in_chain = 0; 2700 } 2701 } 2702 2703 not_found: 2704 PTR_NOT_FOUND(&pos); 2705 done: 2706 EVBUFFER_UNLOCK(buffer); 2707 return pos; 2708 } 2709 2710 int 2711 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2712 struct evbuffer_ptr *start_at, 2713 struct evbuffer_iovec *vec, int n_vec) 2714 { 2715 struct evbuffer_chain *chain; 2716 int idx = 0; 2717 ev_ssize_t len_so_far = 0; 2718 2719 /* Avoid locking in trivial edge cases */ 2720 if (start_at && start_at->internal_.chain == NULL) 2721 return 0; 2722 2723 EVBUFFER_LOCK(buffer); 2724 2725 if (start_at) { 2726 chain = start_at->internal_.chain; 2727 len_so_far = chain->off 2728 - start_at->internal_.pos_in_chain; 2729 idx = 1; 2730 if (n_vec > 0) { 2731 vec[0].iov_base = chain->buffer + chain->misalign 2732 + start_at->internal_.pos_in_chain; 2733 vec[0].iov_len = len_so_far; 2734 } 2735 chain = chain->next; 2736 } else { 2737 chain = buffer->first; 2738 } 2739 2740 if (n_vec == 0 && len < 0) { 2741 /* If no vectors are provided and they asked for "everything", 2742 * pretend they asked for the actual available amount. */ 2743 len = buffer->total_len - len_so_far; 2744 } 2745 2746 while (chain) { 2747 if (len >= 0 && len_so_far >= len) 2748 break; 2749 if (idx<n_vec) { 2750 vec[idx].iov_base = chain->buffer + chain->misalign; 2751 vec[idx].iov_len = chain->off; 2752 } else if (len<0) { 2753 break; 2754 } 2755 ++idx; 2756 len_so_far += chain->off; 2757 chain = chain->next; 2758 } 2759 2760 EVBUFFER_UNLOCK(buffer); 2761 2762 return idx; 2763 } 2764 2765 2766 int 2767 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2768 { 2769 char *buffer; 2770 size_t space; 2771 int sz, result = -1; 2772 va_list aq; 2773 struct evbuffer_chain *chain; 2774 2775 2776 EVBUFFER_LOCK(buf); 2777 2778 if (buf->freeze_end) { 2779 goto done; 2780 } 2781 2782 /* make sure that at least some space is available */ 2783 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2784 goto done; 2785 2786 for (;;) { 2787 #if 0 2788 size_t used = chain->misalign + chain->off; 2789 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2790 EVUTIL_ASSERT(chain->buffer_len >= used); 2791 space = chain->buffer_len - used; 2792 #endif 2793 buffer = (char*) CHAIN_SPACE_PTR(chain); 2794 space = (size_t) CHAIN_SPACE_LEN(chain); 2795 2796 #ifndef va_copy 2797 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2798 #endif 2799 va_copy(aq, ap); 2800 2801 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2802 2803 va_end(aq); 2804 2805 if (sz < 0) 2806 goto done; 2807 if ((size_t)sz < space) { 2808 chain->off += sz; 2809 buf->total_len += sz; 2810 buf->n_add_for_cb += sz; 2811 2812 advance_last_with_data(buf); 2813 evbuffer_invoke_callbacks_(buf); 2814 result = sz; 2815 goto done; 2816 } 2817 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2818 goto done; 2819 } 2820 /* NOTREACHED */ 2821 2822 done: 2823 EVBUFFER_UNLOCK(buf); 2824 return result; 2825 } 2826 2827 int 2828 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2829 { 2830 int res = -1; 2831 va_list ap; 2832 2833 va_start(ap, fmt); 2834 res = evbuffer_add_vprintf(buf, fmt, ap); 2835 va_end(ap); 2836 2837 return (res); 2838 } 2839 2840 int 2841 evbuffer_add_reference(struct evbuffer *outbuf, 2842 const void *data, size_t datlen, 2843 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2844 { 2845 struct evbuffer_chain *chain; 2846 struct evbuffer_chain_reference *info; 2847 int result = -1; 2848 2849 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2850 if (!chain) 2851 return (-1); 2852 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2853 chain->buffer = (u_char *)data; 2854 chain->buffer_len = datlen; 2855 chain->off = datlen; 2856 2857 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2858 info->cleanupfn = cleanupfn; 2859 info->extra = extra; 2860 2861 EVBUFFER_LOCK(outbuf); 2862 if (outbuf->freeze_end) { 2863 /* don't call chain_free; we do not want to actually invoke 2864 * the cleanup function */ 2865 mm_free(chain); 2866 goto done; 2867 } 2868 evbuffer_chain_insert(outbuf, chain); 2869 outbuf->n_add_for_cb += datlen; 2870 2871 evbuffer_invoke_callbacks_(outbuf); 2872 2873 result = 0; 2874 done: 2875 EVBUFFER_UNLOCK(outbuf); 2876 2877 return result; 2878 } 2879 2880 /* TODO(niels): we may want to add to automagically convert to mmap, in 2881 * case evbuffer_remove() or evbuffer_pullup() are being used. 2882 */ 2883 struct evbuffer_file_segment * 2884 evbuffer_file_segment_new( 2885 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2886 { 2887 struct evbuffer_file_segment *seg = 2888 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2889 if (!seg) 2890 return NULL; 2891 seg->refcnt = 1; 2892 seg->fd = fd; 2893 seg->flags = flags; 2894 seg->file_offset = offset; 2895 seg->cleanup_cb = NULL; 2896 seg->cleanup_cb_arg = NULL; 2897 #ifdef _WIN32 2898 #ifndef lseek 2899 #define lseek _lseeki64 2900 #endif 2901 #ifndef fstat 2902 #define fstat _fstat 2903 #endif 2904 #ifndef stat 2905 #define stat _stat 2906 #endif 2907 #endif 2908 if (length == -1) { 2909 struct stat st; 2910 if (fstat(fd, &st) < 0) 2911 goto err; 2912 length = st.st_size; 2913 } 2914 seg->length = length; 2915 2916 #if defined(USE_SENDFILE) 2917 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 2918 seg->can_sendfile = 1; 2919 goto done; 2920 } 2921 #endif 2922 2923 if (evbuffer_file_segment_materialize(seg)<0) 2924 goto err; 2925 2926 #if defined(USE_SENDFILE) 2927 done: 2928 #endif 2929 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 2930 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 2931 } 2932 return seg; 2933 err: 2934 mm_free(seg); 2935 return NULL; 2936 } 2937 2938 #ifdef EVENT__HAVE_MMAP 2939 static long 2940 get_page_size(void) 2941 { 2942 #ifdef SC_PAGE_SIZE 2943 return sysconf(SC_PAGE_SIZE); 2944 #elif defined(_SC_PAGE_SIZE) 2945 return sysconf(_SC_PAGE_SIZE); 2946 #else 2947 return 1; 2948 #endif 2949 } 2950 #endif 2951 2952 /* DOCDOC */ 2953 /* Requires lock */ 2954 static int 2955 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 2956 { 2957 const unsigned flags = seg->flags; 2958 const int fd = seg->fd; 2959 const ev_off_t length = seg->length; 2960 const ev_off_t offset = seg->file_offset; 2961 2962 if (seg->contents) 2963 return 0; /* already materialized */ 2964 2965 #if defined(EVENT__HAVE_MMAP) 2966 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 2967 off_t offset_rounded = 0, offset_leftover = 0; 2968 void *mapped; 2969 if (offset) { 2970 /* mmap implementations don't generally like us 2971 * to have an offset that isn't a round */ 2972 long page_size = get_page_size(); 2973 if (page_size == -1) 2974 goto err; 2975 offset_leftover = offset % page_size; 2976 offset_rounded = offset - offset_leftover; 2977 } 2978 mapped = mmap(NULL, length + offset_leftover, 2979 PROT_READ, 2980 #ifdef MAP_NOCACHE 2981 MAP_NOCACHE | /* ??? */ 2982 #endif 2983 #ifdef MAP_FILE 2984 MAP_FILE | 2985 #endif 2986 MAP_PRIVATE, 2987 fd, offset_rounded); 2988 if (mapped == MAP_FAILED) { 2989 event_warn("%s: mmap(%d, %d, %zu) failed", 2990 __func__, fd, 0, (size_t)(offset + length)); 2991 } else { 2992 seg->mapping = mapped; 2993 seg->contents = (char*)mapped+offset_leftover; 2994 seg->mmap_offset = 0; 2995 seg->is_mapping = 1; 2996 goto done; 2997 } 2998 } 2999 #endif 3000 #ifdef _WIN32 3001 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3002 intptr_t h = _get_osfhandle(fd); 3003 HANDLE m; 3004 ev_uint64_t total_size = length+offset; 3005 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3006 goto err; 3007 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3008 (total_size >> 32), total_size & 0xfffffffful, 3009 NULL); 3010 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3011 seg->mapping_handle = m; 3012 seg->mmap_offset = offset; 3013 seg->is_mapping = 1; 3014 goto done; 3015 } 3016 } 3017 #endif 3018 { 3019 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3020 ev_off_t read_so_far = 0; 3021 char *mem; 3022 int e; 3023 ev_ssize_t n = 0; 3024 if (!(mem = mm_malloc(length))) 3025 goto err; 3026 if (start_pos < 0) { 3027 mm_free(mem); 3028 goto err; 3029 } 3030 if (lseek(fd, offset, SEEK_SET) < 0) { 3031 mm_free(mem); 3032 goto err; 3033 } 3034 while (read_so_far < length) { 3035 n = read(fd, mem+read_so_far, length-read_so_far); 3036 if (n <= 0) 3037 break; 3038 read_so_far += n; 3039 } 3040 3041 e = errno; 3042 pos = lseek(fd, start_pos, SEEK_SET); 3043 if (n < 0 || (n == 0 && length > read_so_far)) { 3044 mm_free(mem); 3045 errno = e; 3046 goto err; 3047 } else if (pos < 0) { 3048 mm_free(mem); 3049 goto err; 3050 } 3051 3052 seg->contents = mem; 3053 } 3054 3055 done: 3056 return 0; 3057 err: 3058 return -1; 3059 } 3060 3061 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3062 evbuffer_file_segment_cleanup_cb cb, void* arg) 3063 { 3064 EVUTIL_ASSERT(seg->refcnt > 0); 3065 seg->cleanup_cb = cb; 3066 seg->cleanup_cb_arg = arg; 3067 } 3068 3069 void 3070 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3071 { 3072 int refcnt; 3073 EVLOCK_LOCK(seg->lock, 0); 3074 refcnt = --seg->refcnt; 3075 EVLOCK_UNLOCK(seg->lock, 0); 3076 if (refcnt > 0) 3077 return; 3078 EVUTIL_ASSERT(refcnt == 0); 3079 3080 if (seg->is_mapping) { 3081 #ifdef _WIN32 3082 CloseHandle(seg->mapping_handle); 3083 #elif defined (EVENT__HAVE_MMAP) 3084 off_t offset_leftover; 3085 offset_leftover = seg->file_offset % get_page_size(); 3086 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3087 event_warn("%s: munmap failed", __func__); 3088 #endif 3089 } else if (seg->contents) { 3090 mm_free(seg->contents); 3091 } 3092 3093 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3094 close(seg->fd); 3095 } 3096 3097 if (seg->cleanup_cb) { 3098 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3099 seg->flags, seg->cleanup_cb_arg); 3100 seg->cleanup_cb = NULL; 3101 seg->cleanup_cb_arg = NULL; 3102 } 3103 3104 EVTHREAD_FREE_LOCK(seg->lock, 0); 3105 mm_free(seg); 3106 } 3107 3108 int 3109 evbuffer_add_file_segment(struct evbuffer *buf, 3110 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3111 { 3112 struct evbuffer_chain *chain; 3113 struct evbuffer_chain_file_segment *extra; 3114 int can_use_sendfile = 0; 3115 3116 EVBUFFER_LOCK(buf); 3117 EVLOCK_LOCK(seg->lock, 0); 3118 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3119 can_use_sendfile = 1; 3120 } else { 3121 if (!seg->contents) { 3122 if (evbuffer_file_segment_materialize(seg)<0) { 3123 EVLOCK_UNLOCK(seg->lock, 0); 3124 EVBUFFER_UNLOCK(buf); 3125 return -1; 3126 } 3127 } 3128 } 3129 ++seg->refcnt; 3130 EVLOCK_UNLOCK(seg->lock, 0); 3131 3132 if (buf->freeze_end) 3133 goto err; 3134 3135 if (length < 0) { 3136 if (offset > seg->length) 3137 goto err; 3138 length = seg->length - offset; 3139 } 3140 3141 /* Can we actually add this? */ 3142 if (offset+length > seg->length) 3143 goto err; 3144 3145 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3146 if (!chain) 3147 goto err; 3148 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3149 3150 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3151 if (can_use_sendfile && seg->can_sendfile) { 3152 chain->flags |= EVBUFFER_SENDFILE; 3153 chain->misalign = seg->file_offset + offset; 3154 chain->off = length; 3155 chain->buffer_len = chain->misalign + length; 3156 } else if (seg->is_mapping) { 3157 #ifdef _WIN32 3158 ev_uint64_t total_offset = seg->mmap_offset+offset; 3159 ev_uint64_t offset_rounded=0, offset_remaining=0; 3160 LPVOID data; 3161 if (total_offset) { 3162 SYSTEM_INFO si; 3163 memset(&si, 0, sizeof(si)); /* cargo cult */ 3164 GetSystemInfo(&si); 3165 offset_remaining = total_offset % si.dwAllocationGranularity; 3166 offset_rounded = total_offset - offset_remaining; 3167 } 3168 data = MapViewOfFile( 3169 seg->mapping_handle, 3170 FILE_MAP_READ, 3171 offset_rounded >> 32, 3172 offset_rounded & 0xfffffffful, 3173 length + offset_remaining); 3174 if (data == NULL) { 3175 mm_free(chain); 3176 goto err; 3177 } 3178 chain->buffer = (unsigned char*) data; 3179 chain->buffer_len = length+offset_remaining; 3180 chain->misalign = offset_remaining; 3181 chain->off = length; 3182 #else 3183 chain->buffer = (unsigned char*)(seg->contents + offset); 3184 chain->buffer_len = length; 3185 chain->off = length; 3186 #endif 3187 } else { 3188 chain->buffer = (unsigned char*)(seg->contents + offset); 3189 chain->buffer_len = length; 3190 chain->off = length; 3191 } 3192 3193 extra->segment = seg; 3194 buf->n_add_for_cb += length; 3195 evbuffer_chain_insert(buf, chain); 3196 3197 evbuffer_invoke_callbacks_(buf); 3198 3199 EVBUFFER_UNLOCK(buf); 3200 3201 return 0; 3202 err: 3203 EVBUFFER_UNLOCK(buf); 3204 evbuffer_file_segment_free(seg); 3205 return -1; 3206 } 3207 3208 int 3209 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3210 { 3211 struct evbuffer_file_segment *seg; 3212 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3213 int r; 3214 3215 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3216 if (!seg) 3217 return -1; 3218 r = evbuffer_add_file_segment(buf, seg, 0, length); 3219 if (r == 0) 3220 evbuffer_file_segment_free(seg); 3221 return r; 3222 } 3223 3224 void 3225 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3226 { 3227 EVBUFFER_LOCK(buffer); 3228 3229 if (!LIST_EMPTY(&buffer->callbacks)) 3230 evbuffer_remove_all_callbacks(buffer); 3231 3232 if (cb) { 3233 struct evbuffer_cb_entry *ent = 3234 evbuffer_add_cb(buffer, NULL, cbarg); 3235 ent->cb.cb_obsolete = cb; 3236 ent->flags |= EVBUFFER_CB_OBSOLETE; 3237 } 3238 EVBUFFER_UNLOCK(buffer); 3239 } 3240 3241 struct evbuffer_cb_entry * 3242 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3243 { 3244 struct evbuffer_cb_entry *e; 3245 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3246 return NULL; 3247 EVBUFFER_LOCK(buffer); 3248 e->cb.cb_func = cb; 3249 e->cbarg = cbarg; 3250 e->flags = EVBUFFER_CB_ENABLED; 3251 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3252 EVBUFFER_UNLOCK(buffer); 3253 return e; 3254 } 3255 3256 int 3257 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3258 struct evbuffer_cb_entry *ent) 3259 { 3260 EVBUFFER_LOCK(buffer); 3261 LIST_REMOVE(ent, next); 3262 EVBUFFER_UNLOCK(buffer); 3263 mm_free(ent); 3264 return 0; 3265 } 3266 3267 int 3268 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3269 { 3270 struct evbuffer_cb_entry *cbent; 3271 int result = -1; 3272 EVBUFFER_LOCK(buffer); 3273 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3274 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3275 result = evbuffer_remove_cb_entry(buffer, cbent); 3276 goto done; 3277 } 3278 } 3279 done: 3280 EVBUFFER_UNLOCK(buffer); 3281 return result; 3282 } 3283 3284 int 3285 evbuffer_cb_set_flags(struct evbuffer *buffer, 3286 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3287 { 3288 /* the user isn't allowed to mess with these. */ 3289 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3290 EVBUFFER_LOCK(buffer); 3291 cb->flags |= flags; 3292 EVBUFFER_UNLOCK(buffer); 3293 return 0; 3294 } 3295 3296 int 3297 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3298 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3299 { 3300 /* the user isn't allowed to mess with these. */ 3301 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3302 EVBUFFER_LOCK(buffer); 3303 cb->flags &= ~flags; 3304 EVBUFFER_UNLOCK(buffer); 3305 return 0; 3306 } 3307 3308 int 3309 evbuffer_freeze(struct evbuffer *buffer, int start) 3310 { 3311 EVBUFFER_LOCK(buffer); 3312 if (start) 3313 buffer->freeze_start = 1; 3314 else 3315 buffer->freeze_end = 1; 3316 EVBUFFER_UNLOCK(buffer); 3317 return 0; 3318 } 3319 3320 int 3321 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3322 { 3323 EVBUFFER_LOCK(buffer); 3324 if (start) 3325 buffer->freeze_start = 0; 3326 else 3327 buffer->freeze_end = 0; 3328 EVBUFFER_UNLOCK(buffer); 3329 return 0; 3330 } 3331 3332 #if 0 3333 void 3334 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3335 { 3336 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3337 cb->size_before_suspend = evbuffer_get_length(buffer); 3338 cb->flags |= EVBUFFER_CB_SUSPENDED; 3339 } 3340 } 3341 3342 void 3343 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3344 { 3345 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3346 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3347 size_t sz = cb->size_before_suspend; 3348 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3349 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3350 cb->size_before_suspend = 0; 3351 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3352 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3353 } 3354 } 3355 } 3356 #endif 3357 3358 int 3359 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3360 int max_cbs) 3361 { 3362 int r = 0; 3363 EVBUFFER_LOCK(buffer); 3364 if (buffer->deferred_cbs) { 3365 if (max_cbs < 1) { 3366 r = -1; 3367 goto done; 3368 } 3369 cbs[0] = &buffer->deferred; 3370 r = 1; 3371 } 3372 done: 3373 EVBUFFER_UNLOCK(buffer); 3374 return r; 3375 } 3376