1 /* 2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef EVENT__HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef EVENT__HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef EVENT__HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef EVENT__HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef EVENT__HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 #include "event-internal.h" 99 100 /* some systems do not have MAP_FAILED */ 101 #ifndef MAP_FAILED 102 #define MAP_FAILED ((void *)-1) 103 #endif 104 105 /* send file support */ 106 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 107 #define USE_SENDFILE 1 108 #define SENDFILE_IS_LINUX 1 109 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 110 #define USE_SENDFILE 1 111 #define SENDFILE_IS_FREEBSD 1 112 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 113 #define USE_SENDFILE 1 114 #define SENDFILE_IS_MACOSX 1 115 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 116 #define USE_SENDFILE 1 117 #define SENDFILE_IS_SOLARIS 1 118 #endif 119 120 /* Mask of user-selectable callback flags. */ 121 #define EVBUFFER_CB_USER_FLAGS 0xffff 122 /* Mask of all internal-use-only flags. */ 123 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 124 125 /* Flag set if the callback is using the cb_obsolete function pointer */ 126 #define EVBUFFER_CB_OBSOLETE 0x00040000 127 128 /* evbuffer_chain support */ 129 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 130 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 131 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 132 133 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 134 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 135 136 /* evbuffer_ptr support */ 137 #define PTR_NOT_FOUND(ptr) do { \ 138 (ptr)->pos = -1; \ 139 (ptr)->internal_.chain = NULL; \ 140 (ptr)->internal_.pos_in_chain = 0; \ 141 } while (0) 142 143 static void evbuffer_chain_align(struct evbuffer_chain *chain); 144 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 145 size_t datalen); 146 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 147 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 148 const struct evbuffer_ptr *pos, const char *mem, size_t len); 149 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 150 size_t datlen); 151 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 152 size_t howfar); 153 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 154 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 155 156 static struct evbuffer_chain * 157 evbuffer_chain_new(size_t size) 158 { 159 struct evbuffer_chain *chain; 160 size_t to_alloc; 161 162 if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) 163 return (NULL); 164 165 size += EVBUFFER_CHAIN_SIZE; 166 167 /* get the next largest memory that can hold the buffer */ 168 if (size < EVBUFFER_CHAIN_MAX / 2) { 169 to_alloc = MIN_BUFFER_SIZE; 170 while (to_alloc < size) { 171 to_alloc <<= 1; 172 } 173 } else { 174 to_alloc = size; 175 } 176 177 /* we get everything in one chunk */ 178 if ((chain = mm_malloc(to_alloc)) == NULL) 179 return (NULL); 180 181 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 182 183 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 184 185 /* this way we can manipulate the buffer to different addresses, 186 * which is required for mmap for example. 187 */ 188 chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); 189 190 chain->refcnt = 1; 191 192 return (chain); 193 } 194 195 static inline void 196 evbuffer_chain_free(struct evbuffer_chain *chain) 197 { 198 EVUTIL_ASSERT(chain->refcnt > 0); 199 if (--chain->refcnt > 0) { 200 /* chain is still referenced by other chains */ 201 return; 202 } 203 204 if (CHAIN_PINNED(chain)) { 205 /* will get freed once no longer dangling */ 206 chain->refcnt++; 207 chain->flags |= EVBUFFER_DANGLING; 208 return; 209 } 210 211 /* safe to release chain, it's either a referencing 212 * chain or all references to it have been freed */ 213 if (chain->flags & EVBUFFER_REFERENCE) { 214 struct evbuffer_chain_reference *info = 215 EVBUFFER_CHAIN_EXTRA( 216 struct evbuffer_chain_reference, 217 chain); 218 if (info->cleanupfn) 219 (*info->cleanupfn)(chain->buffer, 220 chain->buffer_len, 221 info->extra); 222 } 223 if (chain->flags & EVBUFFER_FILESEGMENT) { 224 struct evbuffer_chain_file_segment *info = 225 EVBUFFER_CHAIN_EXTRA( 226 struct evbuffer_chain_file_segment, 227 chain); 228 if (info->segment) { 229 #ifdef _WIN32 230 if (info->segment->is_mapping) 231 UnmapViewOfFile(chain->buffer); 232 #endif 233 evbuffer_file_segment_free(info->segment); 234 } 235 } 236 if (chain->flags & EVBUFFER_MULTICAST) { 237 struct evbuffer_multicast_parent *info = 238 EVBUFFER_CHAIN_EXTRA( 239 struct evbuffer_multicast_parent, 240 chain); 241 /* referencing chain is being freed, decrease 242 * refcounts of source chain and associated 243 * evbuffer (which get freed once both reach 244 * zero) */ 245 EVUTIL_ASSERT(info->source != NULL); 246 EVUTIL_ASSERT(info->parent != NULL); 247 EVBUFFER_LOCK(info->source); 248 evbuffer_chain_free(info->parent); 249 evbuffer_decref_and_unlock_(info->source); 250 } 251 252 mm_free(chain); 253 } 254 255 static void 256 evbuffer_free_all_chains(struct evbuffer_chain *chain) 257 { 258 struct evbuffer_chain *next; 259 for (; chain; chain = next) { 260 next = chain->next; 261 evbuffer_chain_free(chain); 262 } 263 } 264 265 #ifndef NDEBUG 266 static int 267 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 268 { 269 for (; chain; chain = chain->next) { 270 if (chain->off) 271 return 0; 272 } 273 return 1; 274 } 275 #else 276 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 277 "unused variable" warnings. */ 278 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 279 return 1; 280 } 281 #endif 282 283 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 284 * to replacing them all with a new chain. Return a pointer to the place 285 * where the new chain will go. 286 * 287 * Internal; requires lock. The caller must fix up buf->last and buf->first 288 * as needed; they might have been freed. 289 */ 290 static struct evbuffer_chain ** 291 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 292 { 293 struct evbuffer_chain **ch = buf->last_with_datap; 294 /* Find the first victim chain. It might be *last_with_datap */ 295 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 296 ch = &(*ch)->next; 297 if (*ch) { 298 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 299 evbuffer_free_all_chains(*ch); 300 *ch = NULL; 301 } 302 return ch; 303 } 304 305 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 306 * chains as necessary. Requires lock. Does not schedule callbacks. 307 */ 308 static void 309 evbuffer_chain_insert(struct evbuffer *buf, 310 struct evbuffer_chain *chain) 311 { 312 ASSERT_EVBUFFER_LOCKED(buf); 313 if (*buf->last_with_datap == NULL) { 314 /* There are no chains data on the buffer at all. */ 315 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 316 EVUTIL_ASSERT(buf->first == NULL); 317 buf->first = buf->last = chain; 318 } else { 319 struct evbuffer_chain **chp; 320 chp = evbuffer_free_trailing_empty_chains(buf); 321 *chp = chain; 322 if (chain->off) 323 buf->last_with_datap = chp; 324 buf->last = chain; 325 } 326 buf->total_len += chain->off; 327 } 328 329 static inline struct evbuffer_chain * 330 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 331 { 332 struct evbuffer_chain *chain; 333 if ((chain = evbuffer_chain_new(datlen)) == NULL) 334 return NULL; 335 evbuffer_chain_insert(buf, chain); 336 return chain; 337 } 338 339 void 340 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 341 { 342 EVUTIL_ASSERT((chain->flags & flag) == 0); 343 chain->flags |= flag; 344 } 345 346 void 347 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 348 { 349 EVUTIL_ASSERT((chain->flags & flag) != 0); 350 chain->flags &= ~flag; 351 if (chain->flags & EVBUFFER_DANGLING) 352 evbuffer_chain_free(chain); 353 } 354 355 static inline void 356 evbuffer_chain_incref(struct evbuffer_chain *chain) 357 { 358 ++chain->refcnt; 359 } 360 361 struct evbuffer * 362 evbuffer_new(void) 363 { 364 struct evbuffer *buffer; 365 366 buffer = mm_calloc(1, sizeof(struct evbuffer)); 367 if (buffer == NULL) 368 return (NULL); 369 370 LIST_INIT(&buffer->callbacks); 371 buffer->refcnt = 1; 372 buffer->last_with_datap = &buffer->first; 373 374 return (buffer); 375 } 376 377 int 378 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 379 { 380 EVBUFFER_LOCK(buf); 381 buf->flags |= (ev_uint32_t)flags; 382 EVBUFFER_UNLOCK(buf); 383 return 0; 384 } 385 386 int 387 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 388 { 389 EVBUFFER_LOCK(buf); 390 buf->flags &= ~(ev_uint32_t)flags; 391 EVBUFFER_UNLOCK(buf); 392 return 0; 393 } 394 395 void 396 evbuffer_incref_(struct evbuffer *buf) 397 { 398 EVBUFFER_LOCK(buf); 399 ++buf->refcnt; 400 EVBUFFER_UNLOCK(buf); 401 } 402 403 void 404 evbuffer_incref_and_lock_(struct evbuffer *buf) 405 { 406 EVBUFFER_LOCK(buf); 407 ++buf->refcnt; 408 } 409 410 int 411 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 412 { 413 EVBUFFER_LOCK(buffer); 414 buffer->cb_queue = base; 415 buffer->deferred_cbs = 1; 416 event_deferred_cb_init_(&buffer->deferred, 417 event_base_get_npriorities(base) / 2, 418 evbuffer_deferred_callback, buffer); 419 EVBUFFER_UNLOCK(buffer); 420 return 0; 421 } 422 423 int 424 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 425 { 426 #ifdef EVENT__DISABLE_THREAD_SUPPORT 427 return -1; 428 #else 429 if (buf->lock) 430 return -1; 431 432 if (!lock) { 433 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 434 if (!lock) 435 return -1; 436 buf->lock = lock; 437 buf->own_lock = 1; 438 } else { 439 buf->lock = lock; 440 buf->own_lock = 0; 441 } 442 443 return 0; 444 #endif 445 } 446 447 void 448 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 449 { 450 EVBUFFER_LOCK(buf); 451 buf->parent = bev; 452 EVBUFFER_UNLOCK(buf); 453 } 454 455 static void 456 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 457 { 458 struct evbuffer_cb_entry *cbent, *next; 459 struct evbuffer_cb_info info; 460 size_t new_size; 461 ev_uint32_t mask, masked_val; 462 int clear = 1; 463 464 if (running_deferred) { 465 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 466 masked_val = EVBUFFER_CB_ENABLED; 467 } else if (buffer->deferred_cbs) { 468 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 469 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 470 /* Don't zero-out n_add/n_del, since the deferred callbacks 471 will want to see them. */ 472 clear = 0; 473 } else { 474 mask = EVBUFFER_CB_ENABLED; 475 masked_val = EVBUFFER_CB_ENABLED; 476 } 477 478 ASSERT_EVBUFFER_LOCKED(buffer); 479 480 if (LIST_EMPTY(&buffer->callbacks)) { 481 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 482 return; 483 } 484 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 485 return; 486 487 new_size = buffer->total_len; 488 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 489 info.n_added = buffer->n_add_for_cb; 490 info.n_deleted = buffer->n_del_for_cb; 491 if (clear) { 492 buffer->n_add_for_cb = 0; 493 buffer->n_del_for_cb = 0; 494 } 495 for (cbent = LIST_FIRST(&buffer->callbacks); 496 cbent != LIST_END(&buffer->callbacks); 497 cbent = next) { 498 /* Get the 'next' pointer now in case this callback decides 499 * to remove itself or something. */ 500 next = LIST_NEXT(cbent, next); 501 502 if ((cbent->flags & mask) != masked_val) 503 continue; 504 505 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 506 cbent->cb.cb_obsolete(buffer, 507 info.orig_size, new_size, cbent->cbarg); 508 else 509 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 510 } 511 } 512 513 void 514 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 515 { 516 if (LIST_EMPTY(&buffer->callbacks)) { 517 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 518 return; 519 } 520 521 if (buffer->deferred_cbs) { 522 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 523 evbuffer_incref_and_lock_(buffer); 524 if (buffer->parent) 525 bufferevent_incref_(buffer->parent); 526 EVBUFFER_UNLOCK(buffer); 527 } 528 } 529 530 evbuffer_run_callbacks(buffer, 0); 531 } 532 533 static void 534 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 535 { 536 struct bufferevent *parent = NULL; 537 struct evbuffer *buffer = arg; 538 539 /* XXXX It would be better to run these callbacks without holding the 540 * lock */ 541 EVBUFFER_LOCK(buffer); 542 parent = buffer->parent; 543 evbuffer_run_callbacks(buffer, 1); 544 evbuffer_decref_and_unlock_(buffer); 545 if (parent) 546 bufferevent_decref_(parent); 547 } 548 549 static void 550 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 551 { 552 struct evbuffer_cb_entry *cbent; 553 554 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 555 LIST_REMOVE(cbent, next); 556 mm_free(cbent); 557 } 558 } 559 560 void 561 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 562 { 563 struct evbuffer_chain *chain, *next; 564 ASSERT_EVBUFFER_LOCKED(buffer); 565 566 EVUTIL_ASSERT(buffer->refcnt > 0); 567 568 if (--buffer->refcnt > 0) { 569 EVBUFFER_UNLOCK(buffer); 570 return; 571 } 572 573 for (chain = buffer->first; chain != NULL; chain = next) { 574 next = chain->next; 575 evbuffer_chain_free(chain); 576 } 577 evbuffer_remove_all_callbacks(buffer); 578 if (buffer->deferred_cbs) 579 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 580 581 EVBUFFER_UNLOCK(buffer); 582 if (buffer->own_lock) 583 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 584 mm_free(buffer); 585 } 586 587 void 588 evbuffer_free(struct evbuffer *buffer) 589 { 590 EVBUFFER_LOCK(buffer); 591 evbuffer_decref_and_unlock_(buffer); 592 } 593 594 void 595 evbuffer_lock(struct evbuffer *buf) 596 { 597 EVBUFFER_LOCK(buf); 598 } 599 600 void 601 evbuffer_unlock(struct evbuffer *buf) 602 { 603 EVBUFFER_UNLOCK(buf); 604 } 605 606 size_t 607 evbuffer_get_length(const struct evbuffer *buffer) 608 { 609 size_t result; 610 611 EVBUFFER_LOCK(buffer); 612 613 result = (buffer->total_len); 614 615 EVBUFFER_UNLOCK(buffer); 616 617 return result; 618 } 619 620 size_t 621 evbuffer_get_contiguous_space(const struct evbuffer *buf) 622 { 623 struct evbuffer_chain *chain; 624 size_t result; 625 626 EVBUFFER_LOCK(buf); 627 chain = buf->first; 628 result = (chain != NULL ? chain->off : 0); 629 EVBUFFER_UNLOCK(buf); 630 631 return result; 632 } 633 634 size_t 635 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 636 int n; 637 size_t res; 638 size_t to_alloc; 639 640 EVBUFFER_LOCK(buf); 641 642 res = to_alloc = 0; 643 644 for (n = 0; n < n_vec; n++) { 645 to_alloc += vec[n].iov_len; 646 } 647 648 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 649 goto done; 650 } 651 652 for (n = 0; n < n_vec; n++) { 653 /* XXX each 'add' call here does a bunch of setup that's 654 * obviated by evbuffer_expand_fast_, and some cleanup that we 655 * would like to do only once. Instead we should just extract 656 * the part of the code that's needed. */ 657 658 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 659 goto done; 660 } 661 662 res += vec[n].iov_len; 663 } 664 665 done: 666 EVBUFFER_UNLOCK(buf); 667 return res; 668 } 669 670 int 671 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 672 struct evbuffer_iovec *vec, int n_vecs) 673 { 674 struct evbuffer_chain *chain, **chainp; 675 int n = -1; 676 677 EVBUFFER_LOCK(buf); 678 if (buf->freeze_end) 679 goto done; 680 if (n_vecs < 1) 681 goto done; 682 if (n_vecs == 1) { 683 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 684 goto done; 685 686 vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); 687 vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); 688 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 689 n = 1; 690 } else { 691 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 692 goto done; 693 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 694 &chainp, 0); 695 } 696 697 done: 698 EVBUFFER_UNLOCK(buf); 699 return n; 700 701 } 702 703 static int 704 advance_last_with_data(struct evbuffer *buf) 705 { 706 int n = 0; 707 struct evbuffer_chain **chainp = buf->last_with_datap; 708 709 ASSERT_EVBUFFER_LOCKED(buf); 710 711 if (!*chainp) 712 return 0; 713 714 while ((*chainp)->next) { 715 chainp = &(*chainp)->next; 716 if ((*chainp)->off) 717 buf->last_with_datap = chainp; 718 ++n; 719 } 720 return n; 721 } 722 723 int 724 evbuffer_commit_space(struct evbuffer *buf, 725 struct evbuffer_iovec *vec, int n_vecs) 726 { 727 struct evbuffer_chain *chain, **firstchainp, **chainp; 728 int result = -1; 729 size_t added = 0; 730 int i; 731 732 EVBUFFER_LOCK(buf); 733 734 if (buf->freeze_end) 735 goto done; 736 if (n_vecs == 0) { 737 result = 0; 738 goto done; 739 } else if (n_vecs == 1 && 740 (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { 741 /* The user only got or used one chain; it might not 742 * be the first one with space in it. */ 743 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 744 goto done; 745 buf->last->off += vec[0].iov_len; 746 added = vec[0].iov_len; 747 if (added) 748 advance_last_with_data(buf); 749 goto okay; 750 } 751 752 /* Advance 'firstchain' to the first chain with space in it. */ 753 firstchainp = buf->last_with_datap; 754 if (!*firstchainp) 755 goto done; 756 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 757 firstchainp = &(*firstchainp)->next; 758 } 759 760 chain = *firstchainp; 761 /* pass 1: make sure that the pointers and lengths of vecs[] are in 762 * bounds before we try to commit anything. */ 763 for (i=0; i<n_vecs; ++i) { 764 if (!chain) 765 goto done; 766 if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) || 767 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 768 goto done; 769 chain = chain->next; 770 } 771 /* pass 2: actually adjust all the chains. */ 772 chainp = firstchainp; 773 for (i=0; i<n_vecs; ++i) { 774 (*chainp)->off += vec[i].iov_len; 775 added += vec[i].iov_len; 776 if (vec[i].iov_len) { 777 buf->last_with_datap = chainp; 778 } 779 chainp = &(*chainp)->next; 780 } 781 782 okay: 783 buf->total_len += added; 784 buf->n_add_for_cb += added; 785 result = 0; 786 evbuffer_invoke_callbacks_(buf); 787 788 done: 789 EVBUFFER_UNLOCK(buf); 790 return result; 791 } 792 793 static inline int 794 HAS_PINNED_R(struct evbuffer *buf) 795 { 796 return (buf->last && CHAIN_PINNED_R(buf->last)); 797 } 798 799 static inline void 800 ZERO_CHAIN(struct evbuffer *dst) 801 { 802 ASSERT_EVBUFFER_LOCKED(dst); 803 dst->first = NULL; 804 dst->last = NULL; 805 dst->last_with_datap = &(dst)->first; 806 dst->total_len = 0; 807 } 808 809 /* Prepares the contents of src to be moved to another buffer by removing 810 * read-pinned chains. The first pinned chain is saved in first, and the 811 * last in last. If src has no read-pinned chains, first and last are set 812 * to NULL. */ 813 static int 814 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 815 struct evbuffer_chain **last) 816 { 817 struct evbuffer_chain *chain, **pinned; 818 819 ASSERT_EVBUFFER_LOCKED(src); 820 821 if (!HAS_PINNED_R(src)) { 822 *first = *last = NULL; 823 return 0; 824 } 825 826 pinned = src->last_with_datap; 827 if (!CHAIN_PINNED_R(*pinned)) 828 pinned = &(*pinned)->next; 829 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 830 chain = *first = *pinned; 831 *last = src->last; 832 833 /* If there's data in the first pinned chain, we need to allocate 834 * a new chain and copy the data over. */ 835 if (chain->off) { 836 struct evbuffer_chain *tmp; 837 838 EVUTIL_ASSERT(pinned == src->last_with_datap); 839 tmp = evbuffer_chain_new(chain->off); 840 if (!tmp) 841 return -1; 842 memcpy(tmp->buffer, chain->buffer + chain->misalign, 843 chain->off); 844 tmp->off = chain->off; 845 *src->last_with_datap = tmp; 846 src->last = tmp; 847 chain->misalign += chain->off; 848 chain->off = 0; 849 } else { 850 src->last = *src->last_with_datap; 851 *pinned = NULL; 852 } 853 854 return 0; 855 } 856 857 static inline void 858 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 859 struct evbuffer_chain *last) 860 { 861 ASSERT_EVBUFFER_LOCKED(src); 862 863 if (!pinned) { 864 ZERO_CHAIN(src); 865 return; 866 } 867 868 src->first = pinned; 869 src->last = last; 870 src->last_with_datap = &src->first; 871 src->total_len = 0; 872 } 873 874 static inline void 875 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 876 { 877 ASSERT_EVBUFFER_LOCKED(dst); 878 ASSERT_EVBUFFER_LOCKED(src); 879 dst->first = src->first; 880 if (src->last_with_datap == &src->first) 881 dst->last_with_datap = &dst->first; 882 else 883 dst->last_with_datap = src->last_with_datap; 884 dst->last = src->last; 885 dst->total_len = src->total_len; 886 } 887 888 static void 889 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 890 { 891 struct evbuffer_chain **chp; 892 893 ASSERT_EVBUFFER_LOCKED(dst); 894 ASSERT_EVBUFFER_LOCKED(src); 895 896 chp = evbuffer_free_trailing_empty_chains(dst); 897 *chp = src->first; 898 899 if (src->last_with_datap == &src->first) 900 dst->last_with_datap = chp; 901 else 902 dst->last_with_datap = src->last_with_datap; 903 dst->last = src->last; 904 dst->total_len += src->total_len; 905 } 906 907 static inline void 908 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 909 { 910 struct evbuffer_chain *tmp; 911 struct evbuffer_chain *chain = src->first; 912 struct evbuffer_multicast_parent *extra; 913 914 ASSERT_EVBUFFER_LOCKED(dst); 915 ASSERT_EVBUFFER_LOCKED(src); 916 917 for (; chain; chain = chain->next) { 918 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 919 /* skip empty chains */ 920 continue; 921 } 922 923 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 924 if (!tmp) { 925 event_warn("%s: out of memory", __func__); 926 return; 927 } 928 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 929 /* reference evbuffer containing source chain so it 930 * doesn't get released while the chain is still 931 * being referenced to */ 932 evbuffer_incref_(src); 933 extra->source = src; 934 /* reference source chain which now becomes immutable */ 935 evbuffer_chain_incref(chain); 936 extra->parent = chain; 937 chain->flags |= EVBUFFER_IMMUTABLE; 938 tmp->buffer_len = chain->buffer_len; 939 tmp->misalign = chain->misalign; 940 tmp->off = chain->off; 941 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 942 tmp->buffer = chain->buffer; 943 evbuffer_chain_insert(dst, tmp); 944 } 945 } 946 947 static void 948 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 949 { 950 ASSERT_EVBUFFER_LOCKED(dst); 951 ASSERT_EVBUFFER_LOCKED(src); 952 src->last->next = dst->first; 953 dst->first = src->first; 954 dst->total_len += src->total_len; 955 if (*dst->last_with_datap == NULL) { 956 if (src->last_with_datap == &(src)->first) 957 dst->last_with_datap = &dst->first; 958 else 959 dst->last_with_datap = src->last_with_datap; 960 } else if (dst->last_with_datap == &dst->first) { 961 dst->last_with_datap = &src->last->next; 962 } 963 } 964 965 int 966 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 967 { 968 struct evbuffer_chain *pinned, *last; 969 size_t in_total_len, out_total_len; 970 int result = 0; 971 972 EVBUFFER_LOCK2(inbuf, outbuf); 973 in_total_len = inbuf->total_len; 974 out_total_len = outbuf->total_len; 975 976 if (in_total_len == 0 || outbuf == inbuf) 977 goto done; 978 979 if (outbuf->freeze_end || inbuf->freeze_start) { 980 result = -1; 981 goto done; 982 } 983 984 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 985 result = -1; 986 goto done; 987 } 988 989 if (out_total_len == 0) { 990 /* There might be an empty chain at the start of outbuf; free 991 * it. */ 992 evbuffer_free_all_chains(outbuf->first); 993 COPY_CHAIN(outbuf, inbuf); 994 } else { 995 APPEND_CHAIN(outbuf, inbuf); 996 } 997 998 RESTORE_PINNED(inbuf, pinned, last); 999 1000 inbuf->n_del_for_cb += in_total_len; 1001 outbuf->n_add_for_cb += in_total_len; 1002 1003 evbuffer_invoke_callbacks_(inbuf); 1004 evbuffer_invoke_callbacks_(outbuf); 1005 1006 done: 1007 EVBUFFER_UNLOCK2(inbuf, outbuf); 1008 return result; 1009 } 1010 1011 int 1012 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 1013 { 1014 size_t in_total_len, out_total_len; 1015 struct evbuffer_chain *chain; 1016 int result = 0; 1017 1018 EVBUFFER_LOCK2(inbuf, outbuf); 1019 in_total_len = inbuf->total_len; 1020 out_total_len = outbuf->total_len; 1021 chain = inbuf->first; 1022 1023 if (in_total_len == 0) 1024 goto done; 1025 1026 if (outbuf->freeze_end || outbuf == inbuf) { 1027 result = -1; 1028 goto done; 1029 } 1030 1031 for (; chain; chain = chain->next) { 1032 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1033 /* chain type can not be referenced */ 1034 result = -1; 1035 goto done; 1036 } 1037 } 1038 1039 if (out_total_len == 0) { 1040 /* There might be an empty chain at the start of outbuf; free 1041 * it. */ 1042 evbuffer_free_all_chains(outbuf->first); 1043 } 1044 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1045 1046 outbuf->n_add_for_cb += in_total_len; 1047 evbuffer_invoke_callbacks_(outbuf); 1048 1049 done: 1050 EVBUFFER_UNLOCK2(inbuf, outbuf); 1051 return result; 1052 } 1053 1054 int 1055 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1056 { 1057 struct evbuffer_chain *pinned, *last; 1058 size_t in_total_len, out_total_len; 1059 int result = 0; 1060 1061 EVBUFFER_LOCK2(inbuf, outbuf); 1062 1063 in_total_len = inbuf->total_len; 1064 out_total_len = outbuf->total_len; 1065 1066 if (!in_total_len || inbuf == outbuf) 1067 goto done; 1068 1069 if (outbuf->freeze_start || inbuf->freeze_start) { 1070 result = -1; 1071 goto done; 1072 } 1073 1074 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1075 result = -1; 1076 goto done; 1077 } 1078 1079 if (out_total_len == 0) { 1080 /* There might be an empty chain at the start of outbuf; free 1081 * it. */ 1082 evbuffer_free_all_chains(outbuf->first); 1083 COPY_CHAIN(outbuf, inbuf); 1084 } else { 1085 PREPEND_CHAIN(outbuf, inbuf); 1086 } 1087 1088 RESTORE_PINNED(inbuf, pinned, last); 1089 1090 inbuf->n_del_for_cb += in_total_len; 1091 outbuf->n_add_for_cb += in_total_len; 1092 1093 evbuffer_invoke_callbacks_(inbuf); 1094 evbuffer_invoke_callbacks_(outbuf); 1095 done: 1096 EVBUFFER_UNLOCK2(inbuf, outbuf); 1097 return result; 1098 } 1099 1100 int 1101 evbuffer_drain(struct evbuffer *buf, size_t len) 1102 { 1103 struct evbuffer_chain *chain, *next; 1104 size_t remaining, old_len; 1105 int result = 0; 1106 1107 EVBUFFER_LOCK(buf); 1108 old_len = buf->total_len; 1109 1110 if (old_len == 0) 1111 goto done; 1112 1113 if (buf->freeze_start) { 1114 result = -1; 1115 goto done; 1116 } 1117 1118 if (len >= old_len && !HAS_PINNED_R(buf)) { 1119 len = old_len; 1120 for (chain = buf->first; chain != NULL; chain = next) { 1121 next = chain->next; 1122 evbuffer_chain_free(chain); 1123 } 1124 1125 ZERO_CHAIN(buf); 1126 } else { 1127 if (len >= old_len) 1128 len = old_len; 1129 1130 buf->total_len -= len; 1131 remaining = len; 1132 for (chain = buf->first; 1133 remaining >= chain->off; 1134 chain = next) { 1135 next = chain->next; 1136 remaining -= chain->off; 1137 1138 if (chain == *buf->last_with_datap) { 1139 buf->last_with_datap = &buf->first; 1140 } 1141 if (&chain->next == buf->last_with_datap) 1142 buf->last_with_datap = &buf->first; 1143 1144 if (CHAIN_PINNED_R(chain)) { 1145 EVUTIL_ASSERT(remaining == 0); 1146 chain->misalign += chain->off; 1147 chain->off = 0; 1148 break; 1149 } else 1150 evbuffer_chain_free(chain); 1151 } 1152 1153 buf->first = chain; 1154 EVUTIL_ASSERT(remaining <= chain->off); 1155 chain->misalign += remaining; 1156 chain->off -= remaining; 1157 } 1158 1159 buf->n_del_for_cb += len; 1160 /* Tell someone about changes in this buffer */ 1161 evbuffer_invoke_callbacks_(buf); 1162 1163 done: 1164 EVBUFFER_UNLOCK(buf); 1165 return result; 1166 } 1167 1168 /* Reads data from an event buffer and drains the bytes read */ 1169 int 1170 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1171 { 1172 ev_ssize_t n; 1173 EVBUFFER_LOCK(buf); 1174 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1175 if (n > 0) { 1176 if (evbuffer_drain(buf, n)<0) 1177 n = -1; 1178 } 1179 EVBUFFER_UNLOCK(buf); 1180 return (int)n; 1181 } 1182 1183 ev_ssize_t 1184 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1185 { 1186 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1187 } 1188 1189 ev_ssize_t 1190 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1191 void *data_out, size_t datlen) 1192 { 1193 /*XXX fails badly on sendfile case. */ 1194 struct evbuffer_chain *chain; 1195 char *data = data_out; 1196 size_t nread; 1197 ev_ssize_t result = 0; 1198 size_t pos_in_chain; 1199 1200 EVBUFFER_LOCK(buf); 1201 1202 if (pos) { 1203 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { 1204 result = -1; 1205 goto done; 1206 } 1207 chain = pos->internal_.chain; 1208 pos_in_chain = pos->internal_.pos_in_chain; 1209 if (datlen + pos->pos > buf->total_len) 1210 datlen = buf->total_len - pos->pos; 1211 } else { 1212 chain = buf->first; 1213 pos_in_chain = 0; 1214 if (datlen > buf->total_len) 1215 datlen = buf->total_len; 1216 } 1217 1218 1219 if (datlen == 0) 1220 goto done; 1221 1222 if (buf->freeze_start) { 1223 result = -1; 1224 goto done; 1225 } 1226 1227 nread = datlen; 1228 1229 while (datlen && datlen >= chain->off - pos_in_chain) { 1230 size_t copylen = chain->off - pos_in_chain; 1231 memcpy(data, 1232 chain->buffer + chain->misalign + pos_in_chain, 1233 copylen); 1234 data += copylen; 1235 datlen -= copylen; 1236 1237 chain = chain->next; 1238 pos_in_chain = 0; 1239 EVUTIL_ASSERT(chain || datlen==0); 1240 } 1241 1242 if (datlen) { 1243 EVUTIL_ASSERT(chain); 1244 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); 1245 1246 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1247 datlen); 1248 } 1249 1250 result = nread; 1251 done: 1252 EVBUFFER_UNLOCK(buf); 1253 return result; 1254 } 1255 1256 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1257 * possible. */ 1258 /* XXXX should return ev_ssize_t */ 1259 int 1260 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1261 size_t datlen) 1262 { 1263 /*XXX We should have an option to force this to be zero-copy.*/ 1264 1265 /*XXX can fail badly on sendfile case. */ 1266 struct evbuffer_chain *chain, *previous; 1267 size_t nread = 0; 1268 int result; 1269 1270 EVBUFFER_LOCK2(src, dst); 1271 1272 chain = previous = src->first; 1273 1274 if (datlen == 0 || dst == src) { 1275 result = 0; 1276 goto done; 1277 } 1278 1279 if (dst->freeze_end || src->freeze_start) { 1280 result = -1; 1281 goto done; 1282 } 1283 1284 /* short-cut if there is no more data buffered */ 1285 if (datlen >= src->total_len) { 1286 datlen = src->total_len; 1287 evbuffer_add_buffer(dst, src); 1288 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1289 goto done; 1290 } 1291 1292 /* removes chains if possible */ 1293 while (chain->off <= datlen) { 1294 /* We can't remove the last with data from src unless we 1295 * remove all chains, in which case we would have done the if 1296 * block above */ 1297 EVUTIL_ASSERT(chain != *src->last_with_datap); 1298 nread += chain->off; 1299 datlen -= chain->off; 1300 previous = chain; 1301 if (src->last_with_datap == &chain->next) 1302 src->last_with_datap = &src->first; 1303 chain = chain->next; 1304 } 1305 1306 if (chain != src->first) { 1307 /* we can remove the chain */ 1308 struct evbuffer_chain **chp; 1309 chp = evbuffer_free_trailing_empty_chains(dst); 1310 1311 if (dst->first == NULL) { 1312 dst->first = src->first; 1313 } else { 1314 *chp = src->first; 1315 } 1316 dst->last = previous; 1317 previous->next = NULL; 1318 src->first = chain; 1319 advance_last_with_data(dst); 1320 1321 dst->total_len += nread; 1322 dst->n_add_for_cb += nread; 1323 } 1324 1325 /* we know that there is more data in the src buffer than 1326 * we want to read, so we manually drain the chain */ 1327 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1328 chain->misalign += datlen; 1329 chain->off -= datlen; 1330 nread += datlen; 1331 1332 /* You might think we would want to increment dst->n_add_for_cb 1333 * here too. But evbuffer_add above already took care of that. 1334 */ 1335 src->total_len -= nread; 1336 src->n_del_for_cb += nread; 1337 1338 if (nread) { 1339 evbuffer_invoke_callbacks_(dst); 1340 evbuffer_invoke_callbacks_(src); 1341 } 1342 result = (int)nread;/*XXXX should change return type */ 1343 1344 done: 1345 EVBUFFER_UNLOCK2(src, dst); 1346 return result; 1347 } 1348 1349 unsigned char * 1350 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1351 { 1352 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1353 unsigned char *buffer, *result = NULL; 1354 ev_ssize_t remaining; 1355 int removed_last_with_data = 0; 1356 int removed_last_with_datap = 0; 1357 1358 EVBUFFER_LOCK(buf); 1359 1360 chain = buf->first; 1361 1362 if (size < 0) 1363 size = buf->total_len; 1364 /* if size > buf->total_len, we cannot guarantee to the user that she 1365 * is going to have a long enough buffer afterwards; so we return 1366 * NULL */ 1367 if (size == 0 || (size_t)size > buf->total_len) 1368 goto done; 1369 1370 /* No need to pull up anything; the first size bytes are 1371 * already here. */ 1372 if (chain->off >= (size_t)size) { 1373 result = chain->buffer + chain->misalign; 1374 goto done; 1375 } 1376 1377 /* Make sure that none of the chains we need to copy from is pinned. */ 1378 remaining = size - chain->off; 1379 EVUTIL_ASSERT(remaining >= 0); 1380 for (tmp=chain->next; tmp; tmp=tmp->next) { 1381 if (CHAIN_PINNED(tmp)) 1382 goto done; 1383 if (tmp->off >= (size_t)remaining) 1384 break; 1385 remaining -= tmp->off; 1386 } 1387 1388 if (CHAIN_PINNED(chain)) { 1389 size_t old_off = chain->off; 1390 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1391 /* not enough room at end of chunk. */ 1392 goto done; 1393 } 1394 buffer = CHAIN_SPACE_PTR(chain); 1395 tmp = chain; 1396 tmp->off = size; 1397 size -= old_off; 1398 chain = chain->next; 1399 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1400 /* already have enough space in the first chain */ 1401 size_t old_off = chain->off; 1402 buffer = chain->buffer + chain->misalign + chain->off; 1403 tmp = chain; 1404 tmp->off = size; 1405 size -= old_off; 1406 chain = chain->next; 1407 } else { 1408 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1409 event_warn("%s: out of memory", __func__); 1410 goto done; 1411 } 1412 buffer = tmp->buffer; 1413 tmp->off = size; 1414 buf->first = tmp; 1415 } 1416 1417 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1418 1419 /* Copy and free every chunk that will be entirely pulled into tmp */ 1420 last_with_data = *buf->last_with_datap; 1421 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1422 next = chain->next; 1423 1424 if (chain->buffer) { 1425 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1426 size -= chain->off; 1427 buffer += chain->off; 1428 } 1429 if (chain == last_with_data) 1430 removed_last_with_data = 1; 1431 if (&chain->next == buf->last_with_datap) 1432 removed_last_with_datap = 1; 1433 1434 evbuffer_chain_free(chain); 1435 } 1436 1437 if (chain != NULL) { 1438 memcpy(buffer, chain->buffer + chain->misalign, size); 1439 chain->misalign += size; 1440 chain->off -= size; 1441 } else { 1442 buf->last = tmp; 1443 } 1444 1445 tmp->next = chain; 1446 1447 if (removed_last_with_data) { 1448 buf->last_with_datap = &buf->first; 1449 } else if (removed_last_with_datap) { 1450 if (buf->first->next && buf->first->next->off) 1451 buf->last_with_datap = &buf->first->next; 1452 else 1453 buf->last_with_datap = &buf->first; 1454 } 1455 1456 result = (tmp->buffer + tmp->misalign); 1457 1458 done: 1459 EVBUFFER_UNLOCK(buf); 1460 return result; 1461 } 1462 1463 /* 1464 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1465 * The returned buffer needs to be freed by the called. 1466 */ 1467 char * 1468 evbuffer_readline(struct evbuffer *buffer) 1469 { 1470 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1471 } 1472 1473 static inline ev_ssize_t 1474 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1475 { 1476 struct evbuffer_chain *chain = it->internal_.chain; 1477 size_t i = it->internal_.pos_in_chain; 1478 while (chain != NULL) { 1479 char *buffer = (char *)chain->buffer + chain->misalign; 1480 char *cp = memchr(buffer+i, chr, chain->off-i); 1481 if (cp) { 1482 it->internal_.chain = chain; 1483 it->internal_.pos_in_chain = cp - buffer; 1484 it->pos += (cp - buffer - i); 1485 return it->pos; 1486 } 1487 it->pos += chain->off - i; 1488 i = 0; 1489 chain = chain->next; 1490 } 1491 1492 return (-1); 1493 } 1494 1495 static inline char * 1496 find_eol_char(char *s, size_t len) 1497 { 1498 #define CHUNK_SZ 128 1499 /* Lots of benchmarking found this approach to be faster in practice 1500 * than doing two memchrs over the whole buffer, doin a memchr on each 1501 * char of the buffer, or trying to emulate memchr by hand. */ 1502 char *s_end, *cr, *lf; 1503 s_end = s+len; 1504 while (s < s_end) { 1505 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1506 cr = memchr(s, '\r', chunk); 1507 lf = memchr(s, '\n', chunk); 1508 if (cr) { 1509 if (lf && lf < cr) 1510 return lf; 1511 return cr; 1512 } else if (lf) { 1513 return lf; 1514 } 1515 s += CHUNK_SZ; 1516 } 1517 1518 return NULL; 1519 #undef CHUNK_SZ 1520 } 1521 1522 static ev_ssize_t 1523 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1524 { 1525 struct evbuffer_chain *chain = it->internal_.chain; 1526 size_t i = it->internal_.pos_in_chain; 1527 while (chain != NULL) { 1528 char *buffer = (char *)chain->buffer + chain->misalign; 1529 char *cp = find_eol_char(buffer+i, chain->off-i); 1530 if (cp) { 1531 it->internal_.chain = chain; 1532 it->internal_.pos_in_chain = cp - buffer; 1533 it->pos += (cp - buffer) - i; 1534 return it->pos; 1535 } 1536 it->pos += chain->off - i; 1537 i = 0; 1538 chain = chain->next; 1539 } 1540 1541 return (-1); 1542 } 1543 1544 static inline size_t 1545 evbuffer_strspn( 1546 struct evbuffer_ptr *ptr, const char *chrset) 1547 { 1548 size_t count = 0; 1549 struct evbuffer_chain *chain = ptr->internal_.chain; 1550 size_t i = ptr->internal_.pos_in_chain; 1551 1552 if (!chain) 1553 return 0; 1554 1555 while (1) { 1556 char *buffer = (char *)chain->buffer + chain->misalign; 1557 for (; i < chain->off; ++i) { 1558 const char *p = chrset; 1559 while (*p) { 1560 if (buffer[i] == *p++) 1561 goto next; 1562 } 1563 ptr->internal_.chain = chain; 1564 ptr->internal_.pos_in_chain = i; 1565 ptr->pos += count; 1566 return count; 1567 next: 1568 ++count; 1569 } 1570 i = 0; 1571 1572 if (! chain->next) { 1573 ptr->internal_.chain = chain; 1574 ptr->internal_.pos_in_chain = i; 1575 ptr->pos += count; 1576 return count; 1577 } 1578 1579 chain = chain->next; 1580 } 1581 } 1582 1583 1584 static inline int 1585 evbuffer_getchr(struct evbuffer_ptr *it) 1586 { 1587 struct evbuffer_chain *chain = it->internal_.chain; 1588 size_t off = it->internal_.pos_in_chain; 1589 1590 if (chain == NULL) 1591 return -1; 1592 1593 return (unsigned char)chain->buffer[chain->misalign + off]; 1594 } 1595 1596 struct evbuffer_ptr 1597 evbuffer_search_eol(struct evbuffer *buffer, 1598 struct evbuffer_ptr *start, size_t *eol_len_out, 1599 enum evbuffer_eol_style eol_style) 1600 { 1601 struct evbuffer_ptr it, it2; 1602 size_t extra_drain = 0; 1603 int ok = 0; 1604 1605 /* Avoid locking in trivial edge cases */ 1606 if (start && start->internal_.chain == NULL) { 1607 PTR_NOT_FOUND(&it); 1608 if (eol_len_out) 1609 *eol_len_out = extra_drain; 1610 return it; 1611 } 1612 1613 EVBUFFER_LOCK(buffer); 1614 1615 if (start) { 1616 memcpy(&it, start, sizeof(it)); 1617 } else { 1618 it.pos = 0; 1619 it.internal_.chain = buffer->first; 1620 it.internal_.pos_in_chain = 0; 1621 } 1622 1623 /* the eol_style determines our first stop character and how many 1624 * characters we are going to drain afterwards. */ 1625 switch (eol_style) { 1626 case EVBUFFER_EOL_ANY: 1627 if (evbuffer_find_eol_char(&it) < 0) 1628 goto done; 1629 memcpy(&it2, &it, sizeof(it)); 1630 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1631 break; 1632 case EVBUFFER_EOL_CRLF_STRICT: { 1633 it = evbuffer_search(buffer, "\r\n", 2, &it); 1634 if (it.pos < 0) 1635 goto done; 1636 extra_drain = 2; 1637 break; 1638 } 1639 case EVBUFFER_EOL_CRLF: { 1640 ev_ssize_t start_pos = it.pos; 1641 /* Look for a LF ... */ 1642 if (evbuffer_strchr(&it, '\n') < 0) 1643 goto done; 1644 extra_drain = 1; 1645 /* ... optionally preceeded by a CR. */ 1646 if (it.pos == start_pos) 1647 break; /* If the first character is \n, don't back up */ 1648 /* This potentially does an extra linear walk over the first 1649 * few chains. Probably, that's not too expensive unless you 1650 * have a really pathological setup. */ 1651 memcpy(&it2, &it, sizeof(it)); 1652 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1653 break; 1654 if (evbuffer_getchr(&it2) == '\r') { 1655 memcpy(&it, &it2, sizeof(it)); 1656 extra_drain = 2; 1657 } 1658 break; 1659 } 1660 case EVBUFFER_EOL_LF: 1661 if (evbuffer_strchr(&it, '\n') < 0) 1662 goto done; 1663 extra_drain = 1; 1664 break; 1665 case EVBUFFER_EOL_NUL: 1666 if (evbuffer_strchr(&it, '\0') < 0) 1667 goto done; 1668 extra_drain = 1; 1669 break; 1670 default: 1671 goto done; 1672 } 1673 1674 ok = 1; 1675 done: 1676 EVBUFFER_UNLOCK(buffer); 1677 1678 if (!ok) 1679 PTR_NOT_FOUND(&it); 1680 if (eol_len_out) 1681 *eol_len_out = extra_drain; 1682 1683 return it; 1684 } 1685 1686 char * 1687 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1688 enum evbuffer_eol_style eol_style) 1689 { 1690 struct evbuffer_ptr it; 1691 char *line; 1692 size_t n_to_copy=0, extra_drain=0; 1693 char *result = NULL; 1694 1695 EVBUFFER_LOCK(buffer); 1696 1697 if (buffer->freeze_start) { 1698 goto done; 1699 } 1700 1701 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1702 if (it.pos < 0) 1703 goto done; 1704 n_to_copy = it.pos; 1705 1706 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1707 event_warn("%s: out of memory", __func__); 1708 goto done; 1709 } 1710 1711 evbuffer_remove(buffer, line, n_to_copy); 1712 line[n_to_copy] = '\0'; 1713 1714 evbuffer_drain(buffer, extra_drain); 1715 result = line; 1716 done: 1717 EVBUFFER_UNLOCK(buffer); 1718 1719 if (n_read_out) 1720 *n_read_out = result ? n_to_copy : 0; 1721 1722 return result; 1723 } 1724 1725 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1726 1727 /* Adds data to an event buffer */ 1728 1729 int 1730 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1731 { 1732 struct evbuffer_chain *chain, *tmp; 1733 const unsigned char *data = data_in; 1734 size_t remain, to_alloc; 1735 int result = -1; 1736 1737 EVBUFFER_LOCK(buf); 1738 1739 if (buf->freeze_end) { 1740 goto done; 1741 } 1742 /* Prevent buf->total_len overflow */ 1743 if (datlen > EV_SIZE_MAX - buf->total_len) { 1744 goto done; 1745 } 1746 1747 if (*buf->last_with_datap == NULL) { 1748 chain = buf->last; 1749 } else { 1750 chain = *buf->last_with_datap; 1751 } 1752 1753 /* If there are no chains allocated for this buffer, allocate one 1754 * big enough to hold all the data. */ 1755 if (chain == NULL) { 1756 chain = evbuffer_chain_new(datlen); 1757 if (!chain) 1758 goto done; 1759 evbuffer_chain_insert(buf, chain); 1760 } 1761 1762 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1763 /* Always true for mutable buffers */ 1764 EVUTIL_ASSERT(chain->misalign >= 0 && 1765 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1766 remain = chain->buffer_len - (size_t)chain->misalign - chain->off; 1767 if (remain >= datlen) { 1768 /* there's enough space to hold all the data in the 1769 * current last chain */ 1770 memcpy(chain->buffer + chain->misalign + chain->off, 1771 data, datlen); 1772 chain->off += datlen; 1773 buf->total_len += datlen; 1774 buf->n_add_for_cb += datlen; 1775 goto out; 1776 } else if (!CHAIN_PINNED(chain) && 1777 evbuffer_chain_should_realign(chain, datlen)) { 1778 /* we can fit the data into the misalignment */ 1779 evbuffer_chain_align(chain); 1780 1781 memcpy(chain->buffer + chain->off, data, datlen); 1782 chain->off += datlen; 1783 buf->total_len += datlen; 1784 buf->n_add_for_cb += datlen; 1785 goto out; 1786 } 1787 } else { 1788 /* we cannot write any data to the last chain */ 1789 remain = 0; 1790 } 1791 1792 /* we need to add another chain */ 1793 to_alloc = chain->buffer_len; 1794 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1795 to_alloc <<= 1; 1796 if (datlen > to_alloc) 1797 to_alloc = datlen; 1798 tmp = evbuffer_chain_new(to_alloc); 1799 if (tmp == NULL) 1800 goto done; 1801 1802 if (remain) { 1803 memcpy(chain->buffer + chain->misalign + chain->off, 1804 data, remain); 1805 chain->off += remain; 1806 buf->total_len += remain; 1807 buf->n_add_for_cb += remain; 1808 } 1809 1810 data += remain; 1811 datlen -= remain; 1812 1813 memcpy(tmp->buffer, data, datlen); 1814 tmp->off = datlen; 1815 evbuffer_chain_insert(buf, tmp); 1816 buf->n_add_for_cb += datlen; 1817 1818 out: 1819 evbuffer_invoke_callbacks_(buf); 1820 result = 0; 1821 done: 1822 EVBUFFER_UNLOCK(buf); 1823 return result; 1824 } 1825 1826 int 1827 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1828 { 1829 struct evbuffer_chain *chain, *tmp; 1830 int result = -1; 1831 1832 EVBUFFER_LOCK(buf); 1833 1834 if (datlen == 0) { 1835 result = 0; 1836 goto done; 1837 } 1838 if (buf->freeze_start) { 1839 goto done; 1840 } 1841 if (datlen > EV_SIZE_MAX - buf->total_len) { 1842 goto done; 1843 } 1844 1845 chain = buf->first; 1846 1847 if (chain == NULL) { 1848 chain = evbuffer_chain_new(datlen); 1849 if (!chain) 1850 goto done; 1851 evbuffer_chain_insert(buf, chain); 1852 } 1853 1854 /* we cannot touch immutable buffers */ 1855 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1856 /* Always true for mutable buffers */ 1857 EVUTIL_ASSERT(chain->misalign >= 0 && 1858 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1859 1860 /* If this chain is empty, we can treat it as 1861 * 'empty at the beginning' rather than 'empty at the end' */ 1862 if (chain->off == 0) 1863 chain->misalign = chain->buffer_len; 1864 1865 if ((size_t)chain->misalign >= datlen) { 1866 /* we have enough space to fit everything */ 1867 memcpy(chain->buffer + chain->misalign - datlen, 1868 data, datlen); 1869 chain->off += datlen; 1870 chain->misalign -= datlen; 1871 buf->total_len += datlen; 1872 buf->n_add_for_cb += datlen; 1873 goto out; 1874 } else if (chain->misalign) { 1875 /* we can only fit some of the data. */ 1876 memcpy(chain->buffer, 1877 (char*)data + datlen - chain->misalign, 1878 (size_t)chain->misalign); 1879 chain->off += (size_t)chain->misalign; 1880 buf->total_len += (size_t)chain->misalign; 1881 buf->n_add_for_cb += (size_t)chain->misalign; 1882 datlen -= (size_t)chain->misalign; 1883 chain->misalign = 0; 1884 } 1885 } 1886 1887 /* we need to add another chain */ 1888 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1889 goto done; 1890 buf->first = tmp; 1891 if (buf->last_with_datap == &buf->first && chain->off) 1892 buf->last_with_datap = &tmp->next; 1893 1894 tmp->next = chain; 1895 1896 tmp->off = datlen; 1897 EVUTIL_ASSERT(datlen <= tmp->buffer_len); 1898 tmp->misalign = tmp->buffer_len - datlen; 1899 1900 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1901 buf->total_len += datlen; 1902 buf->n_add_for_cb += datlen; 1903 1904 out: 1905 evbuffer_invoke_callbacks_(buf); 1906 result = 0; 1907 done: 1908 EVBUFFER_UNLOCK(buf); 1909 return result; 1910 } 1911 1912 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1913 static void 1914 evbuffer_chain_align(struct evbuffer_chain *chain) 1915 { 1916 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1917 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1918 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1919 chain->misalign = 0; 1920 } 1921 1922 #define MAX_TO_COPY_IN_EXPAND 4096 1923 #define MAX_TO_REALIGN_IN_EXPAND 2048 1924 1925 /** Helper: return true iff we should realign chain to fit datalen bytes of 1926 data in it. */ 1927 static int 1928 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1929 size_t datlen) 1930 { 1931 return chain->buffer_len - chain->off >= datlen && 1932 (chain->off < chain->buffer_len / 2) && 1933 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1934 } 1935 1936 /* Expands the available space in the event buffer to at least datlen, all in 1937 * a single chunk. Return that chunk. */ 1938 static struct evbuffer_chain * 1939 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1940 { 1941 struct evbuffer_chain *chain, **chainp; 1942 struct evbuffer_chain *result = NULL; 1943 ASSERT_EVBUFFER_LOCKED(buf); 1944 1945 chainp = buf->last_with_datap; 1946 1947 /* XXX If *chainp is no longer writeable, but has enough space in its 1948 * misalign, this might be a bad idea: we could still use *chainp, not 1949 * (*chainp)->next. */ 1950 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1951 chainp = &(*chainp)->next; 1952 1953 /* 'chain' now points to the first chain with writable space (if any) 1954 * We will either use it, realign it, replace it, or resize it. */ 1955 chain = *chainp; 1956 1957 if (chain == NULL || 1958 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1959 /* We can't use the last_with_data chain at all. Just add a 1960 * new one that's big enough. */ 1961 goto insert_new; 1962 } 1963 1964 /* If we can fit all the data, then we don't have to do anything */ 1965 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1966 result = chain; 1967 goto ok; 1968 } 1969 1970 /* If the chain is completely empty, just replace it by adding a new 1971 * empty chain. */ 1972 if (chain->off == 0) { 1973 goto insert_new; 1974 } 1975 1976 /* If the misalignment plus the remaining space fulfills our data 1977 * needs, we could just force an alignment to happen. Afterwards, we 1978 * have enough space. But only do this if we're saving a lot of space 1979 * and not moving too much data. Otherwise the space savings are 1980 * probably offset by the time lost in copying. 1981 */ 1982 if (evbuffer_chain_should_realign(chain, datlen)) { 1983 evbuffer_chain_align(chain); 1984 result = chain; 1985 goto ok; 1986 } 1987 1988 /* At this point, we can either resize the last chunk with space in 1989 * it, use the next chunk after it, or If we add a new chunk, we waste 1990 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1991 * resize, we have to copy chain->off bytes. 1992 */ 1993 1994 /* Would expanding this chunk be affordable and worthwhile? */ 1995 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1996 chain->off > MAX_TO_COPY_IN_EXPAND || 1997 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { 1998 /* It's not worth resizing this chain. Can the next one be 1999 * used? */ 2000 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 2001 /* Yes, we can just use the next chain (which should 2002 * be empty. */ 2003 result = chain->next; 2004 goto ok; 2005 } else { 2006 /* No; append a new chain (which will free all 2007 * terminal empty chains.) */ 2008 goto insert_new; 2009 } 2010 } else { 2011 /* Okay, we're going to try to resize this chain: Not doing so 2012 * would waste at least 1/8 of its current allocation, and we 2013 * can do so without having to copy more than 2014 * MAX_TO_COPY_IN_EXPAND bytes. */ 2015 /* figure out how much space we need */ 2016 size_t length = chain->off + datlen; 2017 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 2018 if (tmp == NULL) 2019 goto err; 2020 2021 /* copy the data over that we had so far */ 2022 tmp->off = chain->off; 2023 memcpy(tmp->buffer, chain->buffer + chain->misalign, 2024 chain->off); 2025 /* fix up the list */ 2026 EVUTIL_ASSERT(*chainp == chain); 2027 result = *chainp = tmp; 2028 2029 if (buf->last == chain) 2030 buf->last = tmp; 2031 2032 tmp->next = chain->next; 2033 evbuffer_chain_free(chain); 2034 goto ok; 2035 } 2036 2037 insert_new: 2038 result = evbuffer_chain_insert_new(buf, datlen); 2039 if (!result) 2040 goto err; 2041 ok: 2042 EVUTIL_ASSERT(result); 2043 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 2044 err: 2045 return result; 2046 } 2047 2048 /* Make sure that datlen bytes are available for writing in the last n 2049 * chains. Never copies or moves data. */ 2050 int 2051 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2052 { 2053 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2054 size_t avail; 2055 int used; 2056 2057 ASSERT_EVBUFFER_LOCKED(buf); 2058 EVUTIL_ASSERT(n >= 2); 2059 2060 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2061 /* There is no last chunk, or we can't touch the last chunk. 2062 * Just add a new chunk. */ 2063 chain = evbuffer_chain_new(datlen); 2064 if (chain == NULL) 2065 return (-1); 2066 2067 evbuffer_chain_insert(buf, chain); 2068 return (0); 2069 } 2070 2071 used = 0; /* number of chains we're using space in. */ 2072 avail = 0; /* how much space they have. */ 2073 /* How many bytes can we stick at the end of buffer as it is? Iterate 2074 * over the chains at the end of the buffer, tring to see how much 2075 * space we have in the first n. */ 2076 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2077 if (chain->off) { 2078 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2079 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2080 if (space) { 2081 avail += space; 2082 ++used; 2083 } 2084 } else { 2085 /* No data in chain; realign it. */ 2086 chain->misalign = 0; 2087 avail += chain->buffer_len; 2088 ++used; 2089 } 2090 if (avail >= datlen) { 2091 /* There is already enough space. Just return */ 2092 return (0); 2093 } 2094 if (used == n) 2095 break; 2096 } 2097 2098 /* There wasn't enough space in the first n chains with space in 2099 * them. Either add a new chain with enough space, or replace all 2100 * empty chains with one that has enough space, depending on n. */ 2101 if (used < n) { 2102 /* The loop ran off the end of the chains before it hit n 2103 * chains; we can add another. */ 2104 EVUTIL_ASSERT(chain == NULL); 2105 2106 tmp = evbuffer_chain_new(datlen - avail); 2107 if (tmp == NULL) 2108 return (-1); 2109 2110 buf->last->next = tmp; 2111 buf->last = tmp; 2112 /* (we would only set last_with_data if we added the first 2113 * chain. But if the buffer had no chains, we would have 2114 * just allocated a new chain earlier) */ 2115 return (0); 2116 } else { 2117 /* Nuke _all_ the empty chains. */ 2118 int rmv_all = 0; /* True iff we removed last_with_data. */ 2119 chain = *buf->last_with_datap; 2120 if (!chain->off) { 2121 EVUTIL_ASSERT(chain == buf->first); 2122 rmv_all = 1; 2123 avail = 0; 2124 } else { 2125 /* can't overflow, since only mutable chains have 2126 * huge misaligns. */ 2127 avail = (size_t) CHAIN_SPACE_LEN(chain); 2128 chain = chain->next; 2129 } 2130 2131 2132 for (; chain; chain = next) { 2133 next = chain->next; 2134 EVUTIL_ASSERT(chain->off == 0); 2135 evbuffer_chain_free(chain); 2136 } 2137 EVUTIL_ASSERT(datlen >= avail); 2138 tmp = evbuffer_chain_new(datlen - avail); 2139 if (tmp == NULL) { 2140 if (rmv_all) { 2141 ZERO_CHAIN(buf); 2142 } else { 2143 buf->last = *buf->last_with_datap; 2144 (*buf->last_with_datap)->next = NULL; 2145 } 2146 return (-1); 2147 } 2148 2149 if (rmv_all) { 2150 buf->first = buf->last = tmp; 2151 buf->last_with_datap = &buf->first; 2152 } else { 2153 (*buf->last_with_datap)->next = tmp; 2154 buf->last = tmp; 2155 } 2156 return (0); 2157 } 2158 } 2159 2160 int 2161 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2162 { 2163 struct evbuffer_chain *chain; 2164 2165 EVBUFFER_LOCK(buf); 2166 chain = evbuffer_expand_singlechain(buf, datlen); 2167 EVBUFFER_UNLOCK(buf); 2168 return chain ? 0 : -1; 2169 } 2170 2171 /* 2172 * Reads data from a file descriptor into a buffer. 2173 */ 2174 2175 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2176 #define USE_IOVEC_IMPL 2177 #endif 2178 2179 #ifdef USE_IOVEC_IMPL 2180 2181 #ifdef EVENT__HAVE_SYS_UIO_H 2182 /* number of iovec we use for writev, fragmentation is going to determine 2183 * how much we end up writing */ 2184 2185 #define DEFAULT_WRITE_IOVEC 128 2186 2187 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2188 #define NUM_WRITE_IOVEC UIO_MAXIOV 2189 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2190 #define NUM_WRITE_IOVEC IOV_MAX 2191 #else 2192 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2193 #endif 2194 2195 #define IOV_TYPE struct iovec 2196 #define IOV_PTR_FIELD iov_base 2197 #define IOV_LEN_FIELD iov_len 2198 #define IOV_LEN_TYPE size_t 2199 #else 2200 #define NUM_WRITE_IOVEC 16 2201 #define IOV_TYPE WSABUF 2202 #define IOV_PTR_FIELD buf 2203 #define IOV_LEN_FIELD len 2204 #define IOV_LEN_TYPE unsigned long 2205 #endif 2206 #endif 2207 #define NUM_READ_IOVEC 4 2208 2209 #define EVBUFFER_MAX_READ 4096 2210 2211 /** Helper function to figure out which space to use for reading data into 2212 an evbuffer. Internal use only. 2213 2214 @param buf The buffer to read into 2215 @param howmuch How much we want to read. 2216 @param vecs An array of two or more iovecs or WSABUFs. 2217 @param n_vecs_avail The length of vecs 2218 @param chainp A pointer to a variable to hold the first chain we're 2219 reading into. 2220 @param exact Boolean: if true, we do not provide more than 'howmuch' 2221 space in the vectors, even if more space is available. 2222 @return The number of buffers we're using. 2223 */ 2224 int 2225 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2226 struct evbuffer_iovec *vecs, int n_vecs_avail, 2227 struct evbuffer_chain ***chainp, int exact) 2228 { 2229 struct evbuffer_chain *chain; 2230 struct evbuffer_chain **firstchainp; 2231 size_t so_far; 2232 int i; 2233 ASSERT_EVBUFFER_LOCKED(buf); 2234 2235 if (howmuch < 0) 2236 return -1; 2237 2238 so_far = 0; 2239 /* Let firstchain be the first chain with any space on it */ 2240 firstchainp = buf->last_with_datap; 2241 EVUTIL_ASSERT(*firstchainp); 2242 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2243 firstchainp = &(*firstchainp)->next; 2244 } 2245 2246 chain = *firstchainp; 2247 EVUTIL_ASSERT(chain); 2248 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2249 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2250 if (avail > (howmuch - so_far) && exact) 2251 avail = howmuch - so_far; 2252 vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); 2253 vecs[i].iov_len = avail; 2254 so_far += avail; 2255 chain = chain->next; 2256 } 2257 2258 *chainp = firstchainp; 2259 return i; 2260 } 2261 2262 static int 2263 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2264 { 2265 #if defined(FIONREAD) && defined(_WIN32) 2266 unsigned long lng = EVBUFFER_MAX_READ; 2267 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2268 return -1; 2269 /* Can overflow, but mostly harmlessly. XXXX */ 2270 return (int)lng; 2271 #elif defined(FIONREAD) 2272 int n = EVBUFFER_MAX_READ; 2273 if (ioctl(fd, FIONREAD, &n) < 0) 2274 return -1; 2275 return n; 2276 #else 2277 return EVBUFFER_MAX_READ; 2278 #endif 2279 } 2280 2281 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2282 * as howmuch? */ 2283 int 2284 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2285 { 2286 struct evbuffer_chain **chainp; 2287 int n; 2288 int result; 2289 2290 #ifdef USE_IOVEC_IMPL 2291 int nvecs, i, remaining; 2292 #else 2293 struct evbuffer_chain *chain; 2294 unsigned char *p; 2295 #endif 2296 2297 EVBUFFER_LOCK(buf); 2298 2299 if (buf->freeze_end) { 2300 result = -1; 2301 goto done; 2302 } 2303 2304 n = get_n_bytes_readable_on_socket(fd); 2305 if (n <= 0 || n > EVBUFFER_MAX_READ) 2306 n = EVBUFFER_MAX_READ; 2307 if (howmuch < 0 || howmuch > n) 2308 howmuch = n; 2309 2310 #ifdef USE_IOVEC_IMPL 2311 /* Since we can use iovecs, we're willing to use the last 2312 * NUM_READ_IOVEC chains. */ 2313 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2314 result = -1; 2315 goto done; 2316 } else { 2317 IOV_TYPE vecs[NUM_READ_IOVEC]; 2318 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2319 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2320 NUM_READ_IOVEC, &chainp, 1); 2321 #else 2322 /* We aren't using the native struct iovec. Therefore, 2323 we are on win32. */ 2324 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2325 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2326 &chainp, 1); 2327 2328 for (i=0; i < nvecs; ++i) 2329 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2330 #endif 2331 2332 #ifdef _WIN32 2333 { 2334 DWORD bytesRead; 2335 DWORD flags=0; 2336 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2337 /* The read failed. It might be a close, 2338 * or it might be an error. */ 2339 if (WSAGetLastError() == WSAECONNABORTED) 2340 n = 0; 2341 else 2342 n = -1; 2343 } else 2344 n = bytesRead; 2345 } 2346 #else 2347 n = readv(fd, vecs, nvecs); 2348 #endif 2349 } 2350 2351 #else /*!USE_IOVEC_IMPL*/ 2352 /* If we don't have FIONREAD, we might waste some space here */ 2353 /* XXX we _will_ waste some space here if there is any space left 2354 * over on buf->last. */ 2355 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2356 result = -1; 2357 goto done; 2358 } 2359 2360 /* We can append new data at this point */ 2361 p = chain->buffer + chain->misalign + chain->off; 2362 2363 #ifndef _WIN32 2364 n = read(fd, p, howmuch); 2365 #else 2366 n = recv(fd, p, howmuch, 0); 2367 #endif 2368 #endif /* USE_IOVEC_IMPL */ 2369 2370 if (n == -1) { 2371 result = -1; 2372 goto done; 2373 } 2374 if (n == 0) { 2375 result = 0; 2376 goto done; 2377 } 2378 2379 #ifdef USE_IOVEC_IMPL 2380 remaining = n; 2381 for (i=0; i < nvecs; ++i) { 2382 /* can't overflow, since only mutable chains have 2383 * huge misaligns. */ 2384 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); 2385 /* XXXX This is a kludge that can waste space in perverse 2386 * situations. */ 2387 if (space > EVBUFFER_CHAIN_MAX) 2388 space = EVBUFFER_CHAIN_MAX; 2389 if ((ev_ssize_t)space < remaining) { 2390 (*chainp)->off += space; 2391 remaining -= (int)space; 2392 } else { 2393 (*chainp)->off += remaining; 2394 buf->last_with_datap = chainp; 2395 break; 2396 } 2397 chainp = &(*chainp)->next; 2398 } 2399 #else 2400 chain->off += n; 2401 advance_last_with_data(buf); 2402 #endif 2403 buf->total_len += n; 2404 buf->n_add_for_cb += n; 2405 2406 /* Tell someone about changes in this buffer */ 2407 evbuffer_invoke_callbacks_(buf); 2408 result = n; 2409 done: 2410 EVBUFFER_UNLOCK(buf); 2411 return result; 2412 } 2413 2414 #ifdef USE_IOVEC_IMPL 2415 static inline int 2416 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2417 ev_ssize_t howmuch) 2418 { 2419 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2420 struct evbuffer_chain *chain = buffer->first; 2421 int n, i = 0; 2422 2423 if (howmuch < 0) 2424 return -1; 2425 2426 ASSERT_EVBUFFER_LOCKED(buffer); 2427 /* XXX make this top out at some maximal data length? if the 2428 * buffer has (say) 1MB in it, split over 128 chains, there's 2429 * no way it all gets written in one go. */ 2430 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2431 #ifdef USE_SENDFILE 2432 /* we cannot write the file info via writev */ 2433 if (chain->flags & EVBUFFER_SENDFILE) 2434 break; 2435 #endif 2436 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2437 if ((size_t)howmuch >= chain->off) { 2438 /* XXXcould be problematic when windows supports mmap*/ 2439 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2440 howmuch -= chain->off; 2441 } else { 2442 /* XXXcould be problematic when windows supports mmap*/ 2443 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2444 break; 2445 } 2446 chain = chain->next; 2447 } 2448 if (! i) 2449 return 0; 2450 2451 #ifdef _WIN32 2452 { 2453 DWORD bytesSent; 2454 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2455 n = -1; 2456 else 2457 n = bytesSent; 2458 } 2459 #else 2460 n = writev(fd, iov, i); 2461 #endif 2462 return (n); 2463 } 2464 #endif 2465 2466 #ifdef USE_SENDFILE 2467 static inline int 2468 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2469 ev_ssize_t howmuch) 2470 { 2471 struct evbuffer_chain *chain = buffer->first; 2472 struct evbuffer_chain_file_segment *info = 2473 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2474 chain); 2475 const int source_fd = info->segment->fd; 2476 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2477 int res; 2478 ev_off_t len = chain->off; 2479 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2480 ev_ssize_t res; 2481 off_t offset = chain->misalign; 2482 #endif 2483 2484 ASSERT_EVBUFFER_LOCKED(buffer); 2485 2486 #if defined(SENDFILE_IS_MACOSX) 2487 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2488 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2489 return (-1); 2490 2491 return (len); 2492 #elif defined(SENDFILE_IS_FREEBSD) 2493 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2494 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2495 return (-1); 2496 2497 return (len); 2498 #elif defined(SENDFILE_IS_LINUX) 2499 /* TODO(niels): implement splice */ 2500 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2501 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2502 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2503 return (0); 2504 } 2505 return (res); 2506 #elif defined(SENDFILE_IS_SOLARIS) 2507 { 2508 const off_t offset_orig = offset; 2509 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2510 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2511 if (offset - offset_orig) 2512 return offset - offset_orig; 2513 /* if this is EAGAIN or EINTR and no bytes were 2514 * written, return 0 */ 2515 return (0); 2516 } 2517 return (res); 2518 } 2519 #endif 2520 } 2521 #endif 2522 2523 int 2524 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2525 ev_ssize_t howmuch) 2526 { 2527 int n = -1; 2528 2529 EVBUFFER_LOCK(buffer); 2530 2531 if (buffer->freeze_start) { 2532 goto done; 2533 } 2534 2535 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2536 howmuch = buffer->total_len; 2537 2538 if (howmuch > 0) { 2539 #ifdef USE_SENDFILE 2540 struct evbuffer_chain *chain = buffer->first; 2541 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2542 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2543 else { 2544 #endif 2545 #ifdef USE_IOVEC_IMPL 2546 n = evbuffer_write_iovec(buffer, fd, howmuch); 2547 #elif defined(_WIN32) 2548 /* XXX(nickm) Don't disable this code until we know if 2549 * the WSARecv code above works. */ 2550 void *p = evbuffer_pullup(buffer, howmuch); 2551 EVUTIL_ASSERT(p || !howmuch); 2552 n = send(fd, p, howmuch, 0); 2553 #else 2554 void *p = evbuffer_pullup(buffer, howmuch); 2555 EVUTIL_ASSERT(p || !howmuch); 2556 n = write(fd, p, howmuch); 2557 #endif 2558 #ifdef USE_SENDFILE 2559 } 2560 #endif 2561 } 2562 2563 if (n > 0) 2564 evbuffer_drain(buffer, n); 2565 2566 done: 2567 EVBUFFER_UNLOCK(buffer); 2568 return (n); 2569 } 2570 2571 int 2572 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2573 { 2574 return evbuffer_write_atmost(buffer, fd, -1); 2575 } 2576 2577 unsigned char * 2578 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2579 { 2580 unsigned char *search; 2581 struct evbuffer_ptr ptr; 2582 2583 EVBUFFER_LOCK(buffer); 2584 2585 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2586 if (ptr.pos < 0) { 2587 search = NULL; 2588 } else { 2589 search = evbuffer_pullup(buffer, ptr.pos + len); 2590 if (search) 2591 search += ptr.pos; 2592 } 2593 EVBUFFER_UNLOCK(buffer); 2594 return search; 2595 } 2596 2597 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2598 * <b>buf</b>. Returns 0 on success, -1 on failure. 2599 * 2600 * This isn't exposed yet, because of potential inefficiency issues. 2601 * Maybe it should be. */ 2602 static int 2603 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2604 size_t howfar) 2605 { 2606 if (pos->pos < 0) 2607 return -1; 2608 if (howfar > (size_t)pos->pos) 2609 return -1; 2610 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2611 pos->internal_.pos_in_chain -= howfar; 2612 pos->pos -= howfar; 2613 return 0; 2614 } else { 2615 const size_t newpos = pos->pos - howfar; 2616 /* Here's the inefficient part: it walks over the 2617 * chains until we hit newpos. */ 2618 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2619 } 2620 } 2621 2622 int 2623 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2624 size_t position, enum evbuffer_ptr_how how) 2625 { 2626 size_t left = position; 2627 struct evbuffer_chain *chain = NULL; 2628 int result = 0; 2629 2630 EVBUFFER_LOCK(buf); 2631 2632 switch (how) { 2633 case EVBUFFER_PTR_SET: 2634 chain = buf->first; 2635 pos->pos = position; 2636 position = 0; 2637 break; 2638 case EVBUFFER_PTR_ADD: 2639 /* this avoids iterating over all previous chains if 2640 we just want to advance the position */ 2641 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { 2642 EVBUFFER_UNLOCK(buf); 2643 return -1; 2644 } 2645 chain = pos->internal_.chain; 2646 pos->pos += position; 2647 position = pos->internal_.pos_in_chain; 2648 break; 2649 } 2650 2651 EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); 2652 while (chain && position + left >= chain->off) { 2653 left -= chain->off - position; 2654 chain = chain->next; 2655 position = 0; 2656 } 2657 if (chain) { 2658 pos->internal_.chain = chain; 2659 pos->internal_.pos_in_chain = position + left; 2660 } else if (left == 0) { 2661 /* The first byte in the (nonexistent) chain after the last chain */ 2662 pos->internal_.chain = NULL; 2663 pos->internal_.pos_in_chain = 0; 2664 } else { 2665 PTR_NOT_FOUND(pos); 2666 result = -1; 2667 } 2668 2669 EVBUFFER_UNLOCK(buf); 2670 2671 return result; 2672 } 2673 2674 /** 2675 Compare the bytes in buf at position pos to the len bytes in mem. Return 2676 less than 0, 0, or greater than 0 as memcmp. 2677 */ 2678 static int 2679 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2680 const char *mem, size_t len) 2681 { 2682 struct evbuffer_chain *chain; 2683 size_t position; 2684 int r; 2685 2686 ASSERT_EVBUFFER_LOCKED(buf); 2687 2688 if (pos->pos < 0 || 2689 EV_SIZE_MAX - len < (size_t)pos->pos || 2690 pos->pos + len > buf->total_len) 2691 return -1; 2692 2693 chain = pos->internal_.chain; 2694 position = pos->internal_.pos_in_chain; 2695 while (len && chain) { 2696 size_t n_comparable; 2697 if (len + position > chain->off) 2698 n_comparable = chain->off - position; 2699 else 2700 n_comparable = len; 2701 r = memcmp(chain->buffer + chain->misalign + position, mem, 2702 n_comparable); 2703 if (r) 2704 return r; 2705 mem += n_comparable; 2706 len -= n_comparable; 2707 position = 0; 2708 chain = chain->next; 2709 } 2710 2711 return 0; 2712 } 2713 2714 struct evbuffer_ptr 2715 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2716 { 2717 return evbuffer_search_range(buffer, what, len, start, NULL); 2718 } 2719 2720 struct evbuffer_ptr 2721 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2722 { 2723 struct evbuffer_ptr pos; 2724 struct evbuffer_chain *chain, *last_chain = NULL; 2725 const unsigned char *p; 2726 char first; 2727 2728 EVBUFFER_LOCK(buffer); 2729 2730 if (start) { 2731 memcpy(&pos, start, sizeof(pos)); 2732 chain = pos.internal_.chain; 2733 } else { 2734 pos.pos = 0; 2735 chain = pos.internal_.chain = buffer->first; 2736 pos.internal_.pos_in_chain = 0; 2737 } 2738 2739 if (end) 2740 last_chain = end->internal_.chain; 2741 2742 if (!len || len > EV_SSIZE_MAX) 2743 goto done; 2744 2745 first = what[0]; 2746 2747 while (chain) { 2748 const unsigned char *start_at = 2749 chain->buffer + chain->misalign + 2750 pos.internal_.pos_in_chain; 2751 p = memchr(start_at, first, 2752 chain->off - pos.internal_.pos_in_chain); 2753 if (p) { 2754 pos.pos += p - start_at; 2755 pos.internal_.pos_in_chain += p - start_at; 2756 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2757 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2758 goto not_found; 2759 else 2760 goto done; 2761 } 2762 ++pos.pos; 2763 ++pos.internal_.pos_in_chain; 2764 if (pos.internal_.pos_in_chain == chain->off) { 2765 chain = pos.internal_.chain = chain->next; 2766 pos.internal_.pos_in_chain = 0; 2767 } 2768 } else { 2769 if (chain == last_chain) 2770 goto not_found; 2771 pos.pos += chain->off - pos.internal_.pos_in_chain; 2772 chain = pos.internal_.chain = chain->next; 2773 pos.internal_.pos_in_chain = 0; 2774 } 2775 } 2776 2777 not_found: 2778 PTR_NOT_FOUND(&pos); 2779 done: 2780 EVBUFFER_UNLOCK(buffer); 2781 return pos; 2782 } 2783 2784 int 2785 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2786 struct evbuffer_ptr *start_at, 2787 struct evbuffer_iovec *vec, int n_vec) 2788 { 2789 struct evbuffer_chain *chain; 2790 int idx = 0; 2791 ev_ssize_t len_so_far = 0; 2792 2793 /* Avoid locking in trivial edge cases */ 2794 if (start_at && start_at->internal_.chain == NULL) 2795 return 0; 2796 2797 EVBUFFER_LOCK(buffer); 2798 2799 if (start_at) { 2800 chain = start_at->internal_.chain; 2801 len_so_far = chain->off 2802 - start_at->internal_.pos_in_chain; 2803 idx = 1; 2804 if (n_vec > 0) { 2805 vec[0].iov_base = (void *)(chain->buffer + chain->misalign 2806 + start_at->internal_.pos_in_chain); 2807 vec[0].iov_len = len_so_far; 2808 } 2809 chain = chain->next; 2810 } else { 2811 chain = buffer->first; 2812 } 2813 2814 if (n_vec == 0 && len < 0) { 2815 /* If no vectors are provided and they asked for "everything", 2816 * pretend they asked for the actual available amount. */ 2817 len = buffer->total_len; 2818 if (start_at) { 2819 len -= start_at->pos; 2820 } 2821 } 2822 2823 while (chain) { 2824 if (len >= 0 && len_so_far >= len) 2825 break; 2826 if (idx<n_vec) { 2827 vec[idx].iov_base = (void *)(chain->buffer + chain->misalign); 2828 vec[idx].iov_len = chain->off; 2829 } else if (len<0) { 2830 break; 2831 } 2832 ++idx; 2833 len_so_far += chain->off; 2834 chain = chain->next; 2835 } 2836 2837 EVBUFFER_UNLOCK(buffer); 2838 2839 return idx; 2840 } 2841 2842 2843 int 2844 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2845 { 2846 char *buffer; 2847 size_t space; 2848 int sz, result = -1; 2849 va_list aq; 2850 struct evbuffer_chain *chain; 2851 2852 2853 EVBUFFER_LOCK(buf); 2854 2855 if (buf->freeze_end) { 2856 goto done; 2857 } 2858 2859 /* make sure that at least some space is available */ 2860 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2861 goto done; 2862 2863 for (;;) { 2864 #if 0 2865 size_t used = chain->misalign + chain->off; 2866 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2867 EVUTIL_ASSERT(chain->buffer_len >= used); 2868 space = chain->buffer_len - used; 2869 #endif 2870 buffer = (char*) CHAIN_SPACE_PTR(chain); 2871 space = (size_t) CHAIN_SPACE_LEN(chain); 2872 2873 #ifndef va_copy 2874 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2875 #endif 2876 va_copy(aq, ap); 2877 2878 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2879 2880 va_end(aq); 2881 2882 if (sz < 0) 2883 goto done; 2884 if (INT_MAX >= EVBUFFER_CHAIN_MAX && 2885 (size_t)sz >= EVBUFFER_CHAIN_MAX) 2886 goto done; 2887 if ((size_t)sz < space) { 2888 chain->off += sz; 2889 buf->total_len += sz; 2890 buf->n_add_for_cb += sz; 2891 2892 advance_last_with_data(buf); 2893 evbuffer_invoke_callbacks_(buf); 2894 result = sz; 2895 goto done; 2896 } 2897 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2898 goto done; 2899 } 2900 /* NOTREACHED */ 2901 2902 done: 2903 EVBUFFER_UNLOCK(buf); 2904 return result; 2905 } 2906 2907 int 2908 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2909 { 2910 int res = -1; 2911 va_list ap; 2912 2913 va_start(ap, fmt); 2914 res = evbuffer_add_vprintf(buf, fmt, ap); 2915 va_end(ap); 2916 2917 return (res); 2918 } 2919 2920 int 2921 evbuffer_add_reference(struct evbuffer *outbuf, 2922 const void *data, size_t datlen, 2923 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2924 { 2925 struct evbuffer_chain *chain; 2926 struct evbuffer_chain_reference *info; 2927 int result = -1; 2928 2929 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2930 if (!chain) 2931 return (-1); 2932 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2933 chain->buffer = (unsigned char *)data; 2934 chain->buffer_len = datlen; 2935 chain->off = datlen; 2936 2937 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2938 info->cleanupfn = cleanupfn; 2939 info->extra = extra; 2940 2941 EVBUFFER_LOCK(outbuf); 2942 if (outbuf->freeze_end) { 2943 /* don't call chain_free; we do not want to actually invoke 2944 * the cleanup function */ 2945 mm_free(chain); 2946 goto done; 2947 } 2948 evbuffer_chain_insert(outbuf, chain); 2949 outbuf->n_add_for_cb += datlen; 2950 2951 evbuffer_invoke_callbacks_(outbuf); 2952 2953 result = 0; 2954 done: 2955 EVBUFFER_UNLOCK(outbuf); 2956 2957 return result; 2958 } 2959 2960 /* TODO(niels): we may want to add to automagically convert to mmap, in 2961 * case evbuffer_remove() or evbuffer_pullup() are being used. 2962 */ 2963 struct evbuffer_file_segment * 2964 evbuffer_file_segment_new( 2965 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2966 { 2967 struct evbuffer_file_segment *seg = 2968 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2969 if (!seg) 2970 return NULL; 2971 seg->refcnt = 1; 2972 seg->fd = fd; 2973 seg->flags = flags; 2974 seg->file_offset = offset; 2975 seg->cleanup_cb = NULL; 2976 seg->cleanup_cb_arg = NULL; 2977 #ifdef _WIN32 2978 #ifndef lseek 2979 #define lseek _lseeki64 2980 #endif 2981 #ifndef fstat 2982 #define fstat _fstat 2983 #endif 2984 #ifndef stat 2985 #define stat _stat 2986 #endif 2987 #endif 2988 if (length == -1) { 2989 struct stat st; 2990 if (fstat(fd, &st) < 0) 2991 goto err; 2992 length = st.st_size; 2993 } 2994 seg->length = length; 2995 2996 if (offset < 0 || length < 0 || 2997 ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || 2998 (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) 2999 goto err; 3000 3001 #if defined(USE_SENDFILE) 3002 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 3003 seg->can_sendfile = 1; 3004 goto done; 3005 } 3006 #endif 3007 3008 if (evbuffer_file_segment_materialize(seg)<0) 3009 goto err; 3010 3011 #if defined(USE_SENDFILE) 3012 done: 3013 #endif 3014 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 3015 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 3016 } 3017 return seg; 3018 err: 3019 mm_free(seg); 3020 return NULL; 3021 } 3022 3023 #ifdef EVENT__HAVE_MMAP 3024 static long 3025 get_page_size(void) 3026 { 3027 #ifdef SC_PAGE_SIZE 3028 return sysconf(SC_PAGE_SIZE); 3029 #elif defined(_SC_PAGE_SIZE) 3030 return sysconf(_SC_PAGE_SIZE); 3031 #else 3032 return 1; 3033 #endif 3034 } 3035 #endif 3036 3037 /* DOCDOC */ 3038 /* Requires lock */ 3039 static int 3040 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 3041 { 3042 const unsigned flags = seg->flags; 3043 const int fd = seg->fd; 3044 const ev_off_t length = seg->length; 3045 const ev_off_t offset = seg->file_offset; 3046 3047 if (seg->contents) 3048 return 0; /* already materialized */ 3049 3050 #if defined(EVENT__HAVE_MMAP) 3051 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3052 off_t offset_rounded = 0, offset_leftover = 0; 3053 void *mapped; 3054 if (offset) { 3055 /* mmap implementations don't generally like us 3056 * to have an offset that isn't a round */ 3057 long page_size = get_page_size(); 3058 if (page_size == -1) 3059 goto err; 3060 offset_leftover = offset % page_size; 3061 offset_rounded = offset - offset_leftover; 3062 } 3063 mapped = mmap(NULL, length + offset_leftover, 3064 PROT_READ, 3065 #ifdef MAP_NOCACHE 3066 MAP_NOCACHE | /* ??? */ 3067 #endif 3068 #ifdef MAP_FILE 3069 MAP_FILE | 3070 #endif 3071 MAP_PRIVATE, 3072 fd, offset_rounded); 3073 if (mapped == MAP_FAILED) { 3074 event_warn("%s: mmap(%d, %d, %zu) failed", 3075 __func__, fd, 0, (size_t)(offset + length)); 3076 } else { 3077 seg->mapping = mapped; 3078 seg->contents = (char*)mapped+offset_leftover; 3079 seg->mmap_offset = 0; 3080 seg->is_mapping = 1; 3081 goto done; 3082 } 3083 } 3084 #endif 3085 #ifdef _WIN32 3086 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3087 intptr_t h = _get_osfhandle(fd); 3088 HANDLE m; 3089 ev_uint64_t total_size = length+offset; 3090 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3091 goto err; 3092 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3093 (total_size >> 32), total_size & 0xfffffffful, 3094 NULL); 3095 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3096 seg->mapping_handle = m; 3097 seg->mmap_offset = offset; 3098 seg->is_mapping = 1; 3099 goto done; 3100 } 3101 } 3102 #endif 3103 { 3104 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3105 ev_off_t read_so_far = 0; 3106 char *mem; 3107 int e; 3108 ev_ssize_t n = 0; 3109 if (!(mem = mm_malloc(length))) 3110 goto err; 3111 if (start_pos < 0) { 3112 mm_free(mem); 3113 goto err; 3114 } 3115 if (lseek(fd, offset, SEEK_SET) < 0) { 3116 mm_free(mem); 3117 goto err; 3118 } 3119 while (read_so_far < length) { 3120 n = read(fd, mem+read_so_far, length-read_so_far); 3121 if (n <= 0) 3122 break; 3123 read_so_far += n; 3124 } 3125 3126 e = errno; 3127 pos = lseek(fd, start_pos, SEEK_SET); 3128 if (n < 0 || (n == 0 && length > read_so_far)) { 3129 mm_free(mem); 3130 errno = e; 3131 goto err; 3132 } else if (pos < 0) { 3133 mm_free(mem); 3134 goto err; 3135 } 3136 3137 seg->contents = mem; 3138 } 3139 3140 done: 3141 return 0; 3142 err: 3143 return -1; 3144 } 3145 3146 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3147 evbuffer_file_segment_cleanup_cb cb, void* arg) 3148 { 3149 EVUTIL_ASSERT(seg->refcnt > 0); 3150 seg->cleanup_cb = cb; 3151 seg->cleanup_cb_arg = arg; 3152 } 3153 3154 void 3155 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3156 { 3157 int refcnt; 3158 EVLOCK_LOCK(seg->lock, 0); 3159 refcnt = --seg->refcnt; 3160 EVLOCK_UNLOCK(seg->lock, 0); 3161 if (refcnt > 0) 3162 return; 3163 EVUTIL_ASSERT(refcnt == 0); 3164 3165 if (seg->is_mapping) { 3166 #ifdef _WIN32 3167 CloseHandle(seg->mapping_handle); 3168 #elif defined (EVENT__HAVE_MMAP) 3169 off_t offset_leftover; 3170 offset_leftover = seg->file_offset % get_page_size(); 3171 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3172 event_warn("%s: munmap failed", __func__); 3173 #endif 3174 } else if (seg->contents) { 3175 mm_free(seg->contents); 3176 } 3177 3178 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3179 close(seg->fd); 3180 } 3181 3182 if (seg->cleanup_cb) { 3183 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3184 seg->flags, seg->cleanup_cb_arg); 3185 seg->cleanup_cb = NULL; 3186 seg->cleanup_cb_arg = NULL; 3187 } 3188 3189 EVTHREAD_FREE_LOCK(seg->lock, 0); 3190 mm_free(seg); 3191 } 3192 3193 int 3194 evbuffer_add_file_segment(struct evbuffer *buf, 3195 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3196 { 3197 struct evbuffer_chain *chain; 3198 struct evbuffer_chain_file_segment *extra; 3199 int can_use_sendfile = 0; 3200 3201 EVBUFFER_LOCK(buf); 3202 EVLOCK_LOCK(seg->lock, 0); 3203 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3204 can_use_sendfile = 1; 3205 } else { 3206 if (!seg->contents) { 3207 if (evbuffer_file_segment_materialize(seg)<0) { 3208 EVLOCK_UNLOCK(seg->lock, 0); 3209 EVBUFFER_UNLOCK(buf); 3210 return -1; 3211 } 3212 } 3213 } 3214 EVLOCK_UNLOCK(seg->lock, 0); 3215 3216 if (buf->freeze_end) 3217 goto err; 3218 3219 if (length < 0) { 3220 if (offset > seg->length) 3221 goto err; 3222 length = seg->length - offset; 3223 } 3224 3225 /* Can we actually add this? */ 3226 if (offset+length > seg->length) 3227 goto err; 3228 3229 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3230 if (!chain) 3231 goto err; 3232 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3233 3234 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3235 if (can_use_sendfile && seg->can_sendfile) { 3236 chain->flags |= EVBUFFER_SENDFILE; 3237 chain->misalign = seg->file_offset + offset; 3238 chain->off = length; 3239 chain->buffer_len = chain->misalign + length; 3240 } else if (seg->is_mapping) { 3241 #ifdef _WIN32 3242 ev_uint64_t total_offset = seg->mmap_offset+offset; 3243 ev_uint64_t offset_rounded=0, offset_remaining=0; 3244 LPVOID data; 3245 if (total_offset) { 3246 SYSTEM_INFO si; 3247 memset(&si, 0, sizeof(si)); /* cargo cult */ 3248 GetSystemInfo(&si); 3249 offset_remaining = total_offset % si.dwAllocationGranularity; 3250 offset_rounded = total_offset - offset_remaining; 3251 } 3252 data = MapViewOfFile( 3253 seg->mapping_handle, 3254 FILE_MAP_READ, 3255 offset_rounded >> 32, 3256 offset_rounded & 0xfffffffful, 3257 length + offset_remaining); 3258 if (data == NULL) { 3259 mm_free(chain); 3260 goto err; 3261 } 3262 chain->buffer = (unsigned char*) data; 3263 chain->buffer_len = length+offset_remaining; 3264 chain->misalign = offset_remaining; 3265 chain->off = length; 3266 #else 3267 chain->buffer = (unsigned char*)(seg->contents + offset); 3268 chain->buffer_len = length; 3269 chain->off = length; 3270 #endif 3271 } else { 3272 chain->buffer = (unsigned char*)(seg->contents + offset); 3273 chain->buffer_len = length; 3274 chain->off = length; 3275 } 3276 3277 EVLOCK_LOCK(seg->lock, 0); 3278 ++seg->refcnt; 3279 EVLOCK_UNLOCK(seg->lock, 0); 3280 extra->segment = seg; 3281 buf->n_add_for_cb += length; 3282 evbuffer_chain_insert(buf, chain); 3283 3284 evbuffer_invoke_callbacks_(buf); 3285 3286 EVBUFFER_UNLOCK(buf); 3287 3288 return 0; 3289 err: 3290 EVBUFFER_UNLOCK(buf); 3291 evbuffer_file_segment_free(seg); /* Lowers the refcount */ 3292 return -1; 3293 } 3294 3295 int 3296 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3297 { 3298 struct evbuffer_file_segment *seg; 3299 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3300 int r; 3301 3302 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3303 if (!seg) 3304 return -1; 3305 r = evbuffer_add_file_segment(buf, seg, 0, length); 3306 if (r == 0) 3307 evbuffer_file_segment_free(seg); 3308 return r; 3309 } 3310 3311 int 3312 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3313 { 3314 EVBUFFER_LOCK(buffer); 3315 3316 if (!LIST_EMPTY(&buffer->callbacks)) 3317 evbuffer_remove_all_callbacks(buffer); 3318 3319 if (cb) { 3320 struct evbuffer_cb_entry *ent = 3321 evbuffer_add_cb(buffer, NULL, cbarg); 3322 if (!ent) { 3323 EVBUFFER_UNLOCK(buffer); 3324 return -1; 3325 } 3326 ent->cb.cb_obsolete = cb; 3327 ent->flags |= EVBUFFER_CB_OBSOLETE; 3328 } 3329 EVBUFFER_UNLOCK(buffer); 3330 return 0; 3331 } 3332 3333 struct evbuffer_cb_entry * 3334 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3335 { 3336 struct evbuffer_cb_entry *e; 3337 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3338 return NULL; 3339 EVBUFFER_LOCK(buffer); 3340 e->cb.cb_func = cb; 3341 e->cbarg = cbarg; 3342 e->flags = EVBUFFER_CB_ENABLED; 3343 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3344 EVBUFFER_UNLOCK(buffer); 3345 return e; 3346 } 3347 3348 int 3349 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3350 struct evbuffer_cb_entry *ent) 3351 { 3352 EVBUFFER_LOCK(buffer); 3353 LIST_REMOVE(ent, next); 3354 EVBUFFER_UNLOCK(buffer); 3355 mm_free(ent); 3356 return 0; 3357 } 3358 3359 int 3360 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3361 { 3362 struct evbuffer_cb_entry *cbent; 3363 int result = -1; 3364 EVBUFFER_LOCK(buffer); 3365 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3366 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3367 result = evbuffer_remove_cb_entry(buffer, cbent); 3368 goto done; 3369 } 3370 } 3371 done: 3372 EVBUFFER_UNLOCK(buffer); 3373 return result; 3374 } 3375 3376 int 3377 evbuffer_cb_set_flags(struct evbuffer *buffer, 3378 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3379 { 3380 /* the user isn't allowed to mess with these. */ 3381 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3382 EVBUFFER_LOCK(buffer); 3383 cb->flags |= flags; 3384 EVBUFFER_UNLOCK(buffer); 3385 return 0; 3386 } 3387 3388 int 3389 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3390 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3391 { 3392 /* the user isn't allowed to mess with these. */ 3393 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3394 EVBUFFER_LOCK(buffer); 3395 cb->flags &= ~flags; 3396 EVBUFFER_UNLOCK(buffer); 3397 return 0; 3398 } 3399 3400 int 3401 evbuffer_freeze(struct evbuffer *buffer, int start) 3402 { 3403 EVBUFFER_LOCK(buffer); 3404 if (start) 3405 buffer->freeze_start = 1; 3406 else 3407 buffer->freeze_end = 1; 3408 EVBUFFER_UNLOCK(buffer); 3409 return 0; 3410 } 3411 3412 int 3413 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3414 { 3415 EVBUFFER_LOCK(buffer); 3416 if (start) 3417 buffer->freeze_start = 0; 3418 else 3419 buffer->freeze_end = 0; 3420 EVBUFFER_UNLOCK(buffer); 3421 return 0; 3422 } 3423 3424 #if 0 3425 void 3426 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3427 { 3428 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3429 cb->size_before_suspend = evbuffer_get_length(buffer); 3430 cb->flags |= EVBUFFER_CB_SUSPENDED; 3431 } 3432 } 3433 3434 void 3435 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3436 { 3437 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3438 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3439 size_t sz = cb->size_before_suspend; 3440 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3441 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3442 cb->size_before_suspend = 0; 3443 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3444 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3445 } 3446 } 3447 } 3448 #endif 3449 3450 int 3451 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3452 int max_cbs) 3453 { 3454 int r = 0; 3455 EVBUFFER_LOCK(buffer); 3456 if (buffer->deferred_cbs) { 3457 if (max_cbs < 1) { 3458 r = -1; 3459 goto done; 3460 } 3461 cbs[0] = &buffer->deferred; 3462 r = 1; 3463 } 3464 done: 3465 EVBUFFER_UNLOCK(buffer); 3466 return r; 3467 } 3468