1 /*- 2 * Copyright (c) 2003-2011 Tim Kientzle 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 /* 27 * This file contains the "essential" portions of the read API, that 28 * is, stuff that will probably always be used by any client that 29 * actually needs to read an archive. Optional pieces have been, as 30 * far as possible, separated out into separate files to avoid 31 * needlessly bloating statically-linked clients. 32 */ 33 34 #include "archive_platform.h" 35 36 #ifdef HAVE_ERRNO_H 37 #include <errno.h> 38 #endif 39 #include <stdio.h> 40 #ifdef HAVE_STDLIB_H 41 #include <stdlib.h> 42 #endif 43 #ifdef HAVE_STRING_H 44 #include <string.h> 45 #endif 46 #ifdef HAVE_UNISTD_H 47 #include <unistd.h> 48 #endif 49 50 #include "archive.h" 51 #include "archive_entry.h" 52 #include "archive_private.h" 53 #include "archive_read_private.h" 54 55 #define minimum(a, b) (a < b ? a : b) 56 57 static int choose_filters(struct archive_read *); 58 static int choose_format(struct archive_read *); 59 static int close_filters(struct archive_read *); 60 static int64_t _archive_filter_bytes(struct archive *, int); 61 static int _archive_filter_code(struct archive *, int); 62 static const char *_archive_filter_name(struct archive *, int); 63 static int _archive_filter_count(struct archive *); 64 static int _archive_read_close(struct archive *); 65 static int _archive_read_data_block(struct archive *, 66 const void **, size_t *, int64_t *); 67 static int _archive_read_free(struct archive *); 68 static int _archive_read_next_header(struct archive *, 69 struct archive_entry **); 70 static int _archive_read_next_header2(struct archive *, 71 struct archive_entry *); 72 static int64_t advance_file_pointer(struct archive_read_filter *, int64_t); 73 74 static const struct archive_vtable 75 archive_read_vtable = { 76 .archive_filter_bytes = _archive_filter_bytes, 77 .archive_filter_code = _archive_filter_code, 78 .archive_filter_name = _archive_filter_name, 79 .archive_filter_count = _archive_filter_count, 80 .archive_read_data_block = _archive_read_data_block, 81 .archive_read_next_header = _archive_read_next_header, 82 .archive_read_next_header2 = _archive_read_next_header2, 83 .archive_free = _archive_read_free, 84 .archive_close = _archive_read_close, 85 }; 86 87 /* 88 * Allocate, initialize and return a struct archive object. 89 */ 90 struct archive * 91 archive_read_new(void) 92 { 93 struct archive_read *a; 94 95 a = calloc(1, sizeof(*a)); 96 if (a == NULL) 97 return (NULL); 98 a->archive.magic = ARCHIVE_READ_MAGIC; 99 100 a->archive.state = ARCHIVE_STATE_NEW; 101 a->entry = archive_entry_new2(&a->archive); 102 a->archive.vtable = &archive_read_vtable; 103 104 a->passphrases.last = &a->passphrases.first; 105 106 return (&a->archive); 107 } 108 109 /* 110 * Record the do-not-extract-to file. This belongs in archive_read_extract.c. 111 */ 112 void 113 archive_read_extract_set_skip_file(struct archive *_a, la_int64_t d, 114 la_int64_t i) 115 { 116 struct archive_read *a = (struct archive_read *)_a; 117 118 if (ARCHIVE_OK != __archive_check_magic(_a, ARCHIVE_READ_MAGIC, 119 ARCHIVE_STATE_ANY, "archive_read_extract_set_skip_file")) 120 return; 121 a->skip_file_set = 1; 122 a->skip_file_dev = d; 123 a->skip_file_ino = i; 124 } 125 126 /* 127 * Open the archive 128 */ 129 int 130 archive_read_open(struct archive *a, void *client_data, 131 archive_open_callback *client_opener, archive_read_callback *client_reader, 132 archive_close_callback *client_closer) 133 { 134 /* Old archive_read_open() is just a thin shell around 135 * archive_read_open1. */ 136 archive_read_set_open_callback(a, client_opener); 137 archive_read_set_read_callback(a, client_reader); 138 archive_read_set_close_callback(a, client_closer); 139 archive_read_set_callback_data(a, client_data); 140 return archive_read_open1(a); 141 } 142 143 144 int 145 archive_read_open2(struct archive *a, void *client_data, 146 archive_open_callback *client_opener, 147 archive_read_callback *client_reader, 148 archive_skip_callback *client_skipper, 149 archive_close_callback *client_closer) 150 { 151 /* Old archive_read_open2() is just a thin shell around 152 * archive_read_open1. */ 153 archive_read_set_callback_data(a, client_data); 154 archive_read_set_open_callback(a, client_opener); 155 archive_read_set_read_callback(a, client_reader); 156 archive_read_set_skip_callback(a, client_skipper); 157 archive_read_set_close_callback(a, client_closer); 158 return archive_read_open1(a); 159 } 160 161 static ssize_t 162 client_read_proxy(struct archive_read_filter *self, const void **buff) 163 { 164 ssize_t r; 165 r = (self->archive->client.reader)(&self->archive->archive, 166 self->data, buff); 167 return (r); 168 } 169 170 static int64_t 171 client_skip_proxy(struct archive_read_filter *self, int64_t request) 172 { 173 if (request < 0) 174 __archive_errx(1, "Negative skip requested."); 175 if (request == 0) 176 return 0; 177 178 if (self->archive->client.skipper != NULL) { 179 int64_t total = 0; 180 for (;;) { 181 int64_t get, ask = request; 182 get = (self->archive->client.skipper) 183 (&self->archive->archive, self->data, ask); 184 total += get; 185 if (get == 0 || get == request) 186 return (total); 187 if (get > request) 188 return ARCHIVE_FATAL; 189 request -= get; 190 } 191 } else if (self->archive->client.seeker != NULL 192 && request > 64 * 1024) { 193 /* If the client provided a seeker but not a skipper, 194 * we can use the seeker to skip forward. 195 * 196 * Note: This isn't always a good idea. The client 197 * skipper is allowed to skip by less than requested 198 * if it needs to maintain block alignment. The 199 * seeker is not allowed to play such games, so using 200 * the seeker here may be a performance loss compared 201 * to just reading and discarding. That's why we 202 * only do this for skips of over 64k. 203 */ 204 int64_t before = self->position; 205 int64_t after = (self->archive->client.seeker) 206 (&self->archive->archive, self->data, request, SEEK_CUR); 207 if (after != before + request) 208 return ARCHIVE_FATAL; 209 return after - before; 210 } 211 return 0; 212 } 213 214 static int64_t 215 client_seek_proxy(struct archive_read_filter *self, int64_t offset, int whence) 216 { 217 /* DO NOT use the skipper here! If we transparently handled 218 * forward seek here by using the skipper, that will break 219 * other libarchive code that assumes a successful forward 220 * seek means it can also seek backwards. 221 */ 222 if (self->archive->client.seeker == NULL) { 223 archive_set_error(&self->archive->archive, ARCHIVE_ERRNO_MISC, 224 "Current client reader does not support seeking a device"); 225 return (ARCHIVE_FAILED); 226 } 227 return (self->archive->client.seeker)(&self->archive->archive, 228 self->data, offset, whence); 229 } 230 231 static int 232 read_client_close_proxy(struct archive_read *a) 233 { 234 int r = ARCHIVE_OK, r2; 235 unsigned int i; 236 237 if (a->client.closer == NULL) 238 return (r); 239 for (i = 0; i < a->client.nodes; i++) 240 { 241 r2 = (a->client.closer) 242 ((struct archive *)a, a->client.dataset[i].data); 243 if (r > r2) 244 r = r2; 245 } 246 return (r); 247 } 248 249 static int 250 client_close_proxy(struct archive_read_filter *self) 251 { 252 return read_client_close_proxy(self->archive); 253 } 254 255 static int 256 client_open_proxy(struct archive_read_filter *self) 257 { 258 int r = ARCHIVE_OK; 259 if (self->archive->client.opener != NULL) 260 r = (self->archive->client.opener)( 261 (struct archive *)self->archive, self->data); 262 return (r); 263 } 264 265 static int 266 client_switch_proxy(struct archive_read_filter *self, unsigned int iindex) 267 { 268 int r1 = ARCHIVE_OK, r2 = ARCHIVE_OK; 269 void *data2 = NULL; 270 271 /* Don't do anything if already in the specified data node */ 272 if (self->archive->client.cursor == iindex) 273 return (ARCHIVE_OK); 274 275 self->archive->client.cursor = iindex; 276 data2 = self->archive->client.dataset[self->archive->client.cursor].data; 277 if (self->archive->client.switcher != NULL) 278 { 279 r1 = r2 = (self->archive->client.switcher) 280 ((struct archive *)self->archive, self->data, data2); 281 self->data = data2; 282 } 283 else 284 { 285 /* Attempt to call close and open instead */ 286 if (self->archive->client.closer != NULL) 287 r1 = (self->archive->client.closer) 288 ((struct archive *)self->archive, self->data); 289 self->data = data2; 290 r2 = client_open_proxy(self); 291 } 292 return (r1 < r2) ? r1 : r2; 293 } 294 295 int 296 archive_read_set_open_callback(struct archive *_a, 297 archive_open_callback *client_opener) 298 { 299 struct archive_read *a = (struct archive_read *)_a; 300 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 301 "archive_read_set_open_callback"); 302 a->client.opener = client_opener; 303 return ARCHIVE_OK; 304 } 305 306 int 307 archive_read_set_read_callback(struct archive *_a, 308 archive_read_callback *client_reader) 309 { 310 struct archive_read *a = (struct archive_read *)_a; 311 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 312 "archive_read_set_read_callback"); 313 a->client.reader = client_reader; 314 return ARCHIVE_OK; 315 } 316 317 int 318 archive_read_set_skip_callback(struct archive *_a, 319 archive_skip_callback *client_skipper) 320 { 321 struct archive_read *a = (struct archive_read *)_a; 322 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 323 "archive_read_set_skip_callback"); 324 a->client.skipper = client_skipper; 325 return ARCHIVE_OK; 326 } 327 328 int 329 archive_read_set_seek_callback(struct archive *_a, 330 archive_seek_callback *client_seeker) 331 { 332 struct archive_read *a = (struct archive_read *)_a; 333 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 334 "archive_read_set_seek_callback"); 335 a->client.seeker = client_seeker; 336 return ARCHIVE_OK; 337 } 338 339 int 340 archive_read_set_close_callback(struct archive *_a, 341 archive_close_callback *client_closer) 342 { 343 struct archive_read *a = (struct archive_read *)_a; 344 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 345 "archive_read_set_close_callback"); 346 a->client.closer = client_closer; 347 return ARCHIVE_OK; 348 } 349 350 int 351 archive_read_set_switch_callback(struct archive *_a, 352 archive_switch_callback *client_switcher) 353 { 354 struct archive_read *a = (struct archive_read *)_a; 355 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 356 "archive_read_set_switch_callback"); 357 a->client.switcher = client_switcher; 358 return ARCHIVE_OK; 359 } 360 361 int 362 archive_read_set_callback_data(struct archive *_a, void *client_data) 363 { 364 return archive_read_set_callback_data2(_a, client_data, 0); 365 } 366 367 int 368 archive_read_set_callback_data2(struct archive *_a, void *client_data, 369 unsigned int iindex) 370 { 371 struct archive_read *a = (struct archive_read *)_a; 372 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 373 "archive_read_set_callback_data2"); 374 375 if (a->client.nodes == 0) 376 { 377 a->client.dataset = (struct archive_read_data_node *) 378 calloc(1, sizeof(*a->client.dataset)); 379 if (a->client.dataset == NULL) 380 { 381 archive_set_error(&a->archive, ENOMEM, 382 "No memory."); 383 return ARCHIVE_FATAL; 384 } 385 a->client.nodes = 1; 386 } 387 388 if (iindex > a->client.nodes - 1) 389 { 390 archive_set_error(&a->archive, EINVAL, 391 "Invalid index specified."); 392 return ARCHIVE_FATAL; 393 } 394 a->client.dataset[iindex].data = client_data; 395 a->client.dataset[iindex].begin_position = -1; 396 a->client.dataset[iindex].total_size = -1; 397 return ARCHIVE_OK; 398 } 399 400 int 401 archive_read_add_callback_data(struct archive *_a, void *client_data, 402 unsigned int iindex) 403 { 404 struct archive_read *a = (struct archive_read *)_a; 405 void *p; 406 unsigned int i; 407 408 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 409 "archive_read_add_callback_data"); 410 if (iindex > a->client.nodes) { 411 archive_set_error(&a->archive, EINVAL, 412 "Invalid index specified."); 413 return ARCHIVE_FATAL; 414 } 415 p = realloc(a->client.dataset, sizeof(*a->client.dataset) 416 * (++(a->client.nodes))); 417 if (p == NULL) { 418 archive_set_error(&a->archive, ENOMEM, 419 "No memory."); 420 return ARCHIVE_FATAL; 421 } 422 a->client.dataset = (struct archive_read_data_node *)p; 423 for (i = a->client.nodes - 1; i > iindex; i--) { 424 a->client.dataset[i].data = a->client.dataset[i-1].data; 425 a->client.dataset[i].begin_position = -1; 426 a->client.dataset[i].total_size = -1; 427 } 428 a->client.dataset[iindex].data = client_data; 429 a->client.dataset[iindex].begin_position = -1; 430 a->client.dataset[iindex].total_size = -1; 431 return ARCHIVE_OK; 432 } 433 434 int 435 archive_read_append_callback_data(struct archive *_a, void *client_data) 436 { 437 struct archive_read *a = (struct archive_read *)_a; 438 return archive_read_add_callback_data(_a, client_data, a->client.nodes); 439 } 440 441 int 442 archive_read_prepend_callback_data(struct archive *_a, void *client_data) 443 { 444 return archive_read_add_callback_data(_a, client_data, 0); 445 } 446 447 static const struct archive_read_filter_vtable 448 none_reader_vtable = { 449 .read = client_read_proxy, 450 .close = client_close_proxy, 451 }; 452 453 int 454 archive_read_open1(struct archive *_a) 455 { 456 struct archive_read *a = (struct archive_read *)_a; 457 struct archive_read_filter *filter, *tmp; 458 int slot, e = ARCHIVE_OK; 459 460 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 461 "archive_read_open"); 462 archive_clear_error(&a->archive); 463 464 if (a->client.reader == NULL) { 465 archive_set_error(&a->archive, EINVAL, 466 "No reader function provided to archive_read_open"); 467 a->archive.state = ARCHIVE_STATE_FATAL; 468 return (ARCHIVE_FATAL); 469 } 470 471 /* Open data source. */ 472 if (a->client.opener != NULL) { 473 e = (a->client.opener)(&a->archive, a->client.dataset[0].data); 474 if (e != 0) { 475 /* If the open failed, call the closer to clean up. */ 476 read_client_close_proxy(a); 477 return (e); 478 } 479 } 480 481 filter = calloc(1, sizeof(*filter)); 482 if (filter == NULL) 483 return (ARCHIVE_FATAL); 484 filter->bidder = NULL; 485 filter->upstream = NULL; 486 filter->archive = a; 487 filter->data = a->client.dataset[0].data; 488 filter->vtable = &none_reader_vtable; 489 filter->name = "none"; 490 filter->code = ARCHIVE_FILTER_NONE; 491 filter->can_skip = 1; 492 filter->can_seek = 1; 493 494 a->client.dataset[0].begin_position = 0; 495 if (!a->filter || !a->bypass_filter_bidding) 496 { 497 a->filter = filter; 498 /* Build out the input pipeline. */ 499 e = choose_filters(a); 500 if (e < ARCHIVE_WARN) { 501 a->archive.state = ARCHIVE_STATE_FATAL; 502 return (ARCHIVE_FATAL); 503 } 504 } 505 else 506 { 507 /* Need to add "NONE" type filter at the end of the filter chain */ 508 tmp = a->filter; 509 while (tmp->upstream) 510 tmp = tmp->upstream; 511 tmp->upstream = filter; 512 } 513 514 if (!a->format) 515 { 516 slot = choose_format(a); 517 if (slot < 0) { 518 close_filters(a); 519 a->archive.state = ARCHIVE_STATE_FATAL; 520 return (ARCHIVE_FATAL); 521 } 522 a->format = &(a->formats[slot]); 523 } 524 525 a->archive.state = ARCHIVE_STATE_HEADER; 526 527 /* Ensure libarchive starts from the first node in a multivolume set */ 528 client_switch_proxy(a->filter, 0); 529 return (e); 530 } 531 532 /* 533 * Allow each registered stream transform to bid on whether 534 * it wants to handle this stream. Repeat until we've finished 535 * building the pipeline. 536 */ 537 538 /* We won't build a filter pipeline with more stages than this. */ 539 #define MAX_NUMBER_FILTERS 25 540 541 static int 542 choose_filters(struct archive_read *a) 543 { 544 int number_bidders, i, bid, best_bid, number_filters; 545 struct archive_read_filter_bidder *bidder, *best_bidder; 546 struct archive_read_filter *filter; 547 ssize_t avail; 548 int r; 549 550 for (number_filters = 0; number_filters < MAX_NUMBER_FILTERS; ++number_filters) { 551 number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]); 552 553 best_bid = 0; 554 best_bidder = NULL; 555 556 bidder = a->bidders; 557 for (i = 0; i < number_bidders; i++, bidder++) { 558 if (bidder->vtable == NULL) 559 continue; 560 bid = (bidder->vtable->bid)(bidder, a->filter); 561 if (bid > best_bid) { 562 best_bid = bid; 563 best_bidder = bidder; 564 } 565 } 566 567 /* If no bidder, we're done. */ 568 if (best_bidder == NULL) { 569 /* Verify the filter by asking it for some data. */ 570 __archive_read_filter_ahead(a->filter, 1, &avail); 571 if (avail < 0) { 572 __archive_read_free_filters(a); 573 return (ARCHIVE_FATAL); 574 } 575 return (ARCHIVE_OK); 576 } 577 578 filter = calloc(1, sizeof(*filter)); 579 if (filter == NULL) 580 return (ARCHIVE_FATAL); 581 filter->bidder = best_bidder; 582 filter->archive = a; 583 filter->upstream = a->filter; 584 a->filter = filter; 585 r = (best_bidder->vtable->init)(a->filter); 586 if (r != ARCHIVE_OK) { 587 __archive_read_free_filters(a); 588 return (ARCHIVE_FATAL); 589 } 590 } 591 archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, 592 "Input requires too many filters for decoding"); 593 return (ARCHIVE_FATAL); 594 } 595 596 int 597 __archive_read_header(struct archive_read *a, struct archive_entry *entry) 598 { 599 if (!a->filter->vtable->read_header) 600 return (ARCHIVE_OK); 601 return a->filter->vtable->read_header(a->filter, entry); 602 } 603 604 /* 605 * Read header of next entry. 606 */ 607 static int 608 _archive_read_next_header2(struct archive *_a, struct archive_entry *entry) 609 { 610 struct archive_read *a = (struct archive_read *)_a; 611 int r1 = ARCHIVE_OK, r2; 612 613 archive_check_magic(_a, ARCHIVE_READ_MAGIC, 614 ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA, 615 "archive_read_next_header"); 616 617 archive_entry_clear(entry); 618 archive_clear_error(&a->archive); 619 620 /* 621 * If client didn't consume entire data, skip any remainder 622 * (This is especially important for GNU incremental directories.) 623 */ 624 if (a->archive.state == ARCHIVE_STATE_DATA) { 625 r1 = archive_read_data_skip(&a->archive); 626 if (r1 == ARCHIVE_EOF) 627 archive_set_error(&a->archive, EIO, 628 "Premature end-of-file."); 629 if (r1 == ARCHIVE_EOF || r1 == ARCHIVE_FATAL) { 630 a->archive.state = ARCHIVE_STATE_FATAL; 631 return (ARCHIVE_FATAL); 632 } 633 } 634 635 /* Record start-of-header offset in uncompressed stream. */ 636 a->header_position = a->filter->position; 637 638 ++_a->file_count; 639 r2 = (a->format->read_header)(a, entry); 640 641 /* 642 * EOF and FATAL are persistent at this layer. By 643 * modifying the state, we guarantee that future calls to 644 * read a header or read data will fail. 645 */ 646 switch (r2) { 647 case ARCHIVE_EOF: 648 a->archive.state = ARCHIVE_STATE_EOF; 649 --_a->file_count;/* Revert a file counter. */ 650 break; 651 case ARCHIVE_OK: 652 a->archive.state = ARCHIVE_STATE_DATA; 653 break; 654 case ARCHIVE_WARN: 655 a->archive.state = ARCHIVE_STATE_DATA; 656 break; 657 case ARCHIVE_RETRY: 658 break; 659 case ARCHIVE_FATAL: 660 a->archive.state = ARCHIVE_STATE_FATAL; 661 break; 662 } 663 664 __archive_reset_read_data(&a->archive); 665 666 a->data_start_node = a->client.cursor; 667 /* EOF always wins; otherwise return the worst error. */ 668 return (r2 < r1 || r2 == ARCHIVE_EOF) ? r2 : r1; 669 } 670 671 static int 672 _archive_read_next_header(struct archive *_a, struct archive_entry **entryp) 673 { 674 int ret; 675 struct archive_read *a = (struct archive_read *)_a; 676 *entryp = NULL; 677 ret = _archive_read_next_header2(_a, a->entry); 678 *entryp = a->entry; 679 return ret; 680 } 681 682 /* 683 * Allow each registered format to bid on whether it wants to handle 684 * the next entry. Return index of winning bidder. 685 */ 686 static int 687 choose_format(struct archive_read *a) 688 { 689 int slots; 690 int i; 691 int bid, best_bid; 692 int best_bid_slot; 693 694 slots = sizeof(a->formats) / sizeof(a->formats[0]); 695 best_bid = -1; 696 best_bid_slot = -1; 697 698 /* Set up a->format for convenience of bidders. */ 699 a->format = &(a->formats[0]); 700 for (i = 0; i < slots; i++, a->format++) { 701 if (a->format->bid) { 702 bid = (a->format->bid)(a, best_bid); 703 if (bid == ARCHIVE_FATAL) 704 return (ARCHIVE_FATAL); 705 if (a->filter->position != 0) 706 __archive_read_seek(a, 0, SEEK_SET); 707 if ((bid > best_bid) || (best_bid_slot < 0)) { 708 best_bid = bid; 709 best_bid_slot = i; 710 } 711 } 712 } 713 714 /* 715 * There were no bidders; this is a serious programmer error 716 * and demands a quick and definitive abort. 717 */ 718 if (best_bid_slot < 0) { 719 archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, 720 "No formats registered"); 721 return (ARCHIVE_FATAL); 722 } 723 724 /* 725 * There were bidders, but no non-zero bids; this means we 726 * can't support this stream. 727 */ 728 if (best_bid < 1) { 729 archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, 730 "Unrecognized archive format"); 731 return (ARCHIVE_FATAL); 732 } 733 734 return (best_bid_slot); 735 } 736 737 /* 738 * Return the file offset (within the uncompressed data stream) where 739 * the last header started. 740 */ 741 la_int64_t 742 archive_read_header_position(struct archive *_a) 743 { 744 struct archive_read *a = (struct archive_read *)_a; 745 archive_check_magic(_a, ARCHIVE_READ_MAGIC, 746 ARCHIVE_STATE_ANY, "archive_read_header_position"); 747 return (a->header_position); 748 } 749 750 /* 751 * Returns 1 if the archive contains at least one encrypted entry. 752 * If the archive format not support encryption at all 753 * ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED is returned. 754 * If for any other reason (e.g. not enough data read so far) 755 * we cannot say whether there are encrypted entries, then 756 * ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW is returned. 757 * In general, this function will return values below zero when the 758 * reader is uncertain or totally incapable of encryption support. 759 * When this function returns 0 you can be sure that the reader 760 * supports encryption detection but no encrypted entries have 761 * been found yet. 762 * 763 * NOTE: If the metadata/header of an archive is also encrypted, you 764 * cannot rely on the number of encrypted entries. That is why this 765 * function does not return the number of encrypted entries but# 766 * just shows that there are some. 767 */ 768 int 769 archive_read_has_encrypted_entries(struct archive *_a) 770 { 771 struct archive_read *a = (struct archive_read *)_a; 772 int format_supports_encryption = archive_read_format_capabilities(_a) 773 & (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA | ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA); 774 775 if (!_a || !format_supports_encryption) { 776 /* Format in general doesn't support encryption */ 777 return ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED; 778 } 779 780 /* A reader potentially has read enough data now. */ 781 if (a->format && a->format->has_encrypted_entries) { 782 return (a->format->has_encrypted_entries)(a); 783 } 784 785 /* For any other reason we cannot say how many entries are there. */ 786 return ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; 787 } 788 789 /* 790 * Returns a bitmask of capabilities that are supported by the archive format reader. 791 * If the reader has no special capabilities, ARCHIVE_READ_FORMAT_CAPS_NONE is returned. 792 */ 793 int 794 archive_read_format_capabilities(struct archive *_a) 795 { 796 struct archive_read *a = (struct archive_read *)_a; 797 if (a && a->format && a->format->format_capabilties) { 798 return (a->format->format_capabilties)(a); 799 } 800 return ARCHIVE_READ_FORMAT_CAPS_NONE; 801 } 802 803 /* 804 * Read data from an archive entry, using a read(2)-style interface. 805 * This is a convenience routine that just calls 806 * archive_read_data_block and copies the results into the client 807 * buffer, filling any gaps with zero bytes. Clients using this 808 * API can be completely ignorant of sparse-file issues; sparse files 809 * will simply be padded with nulls. 810 * 811 * DO NOT intermingle calls to this function and archive_read_data_block 812 * to read a single entry body. 813 */ 814 la_ssize_t 815 archive_read_data(struct archive *_a, void *buff, size_t s) 816 { 817 struct archive *a = (struct archive *)_a; 818 char *dest; 819 const void *read_buf; 820 size_t bytes_read; 821 size_t len; 822 int r; 823 824 bytes_read = 0; 825 dest = (char *)buff; 826 827 while (s > 0) { 828 if (a->read_data_offset == a->read_data_output_offset && 829 a->read_data_remaining == 0) { 830 read_buf = a->read_data_block; 831 a->read_data_is_posix_read = 1; 832 a->read_data_requested = s; 833 r = archive_read_data_block(a, &read_buf, 834 &a->read_data_remaining, &a->read_data_offset); 835 a->read_data_block = read_buf; 836 if (r == ARCHIVE_EOF && 837 a->read_data_offset == a->read_data_output_offset && 838 a->read_data_remaining == 0) 839 return (bytes_read); 840 /* 841 * Error codes are all negative, so the status 842 * return here cannot be confused with a valid 843 * byte count. (ARCHIVE_OK is zero.) 844 */ 845 if (r < ARCHIVE_OK) 846 return (r); 847 } 848 849 if (a->read_data_offset < a->read_data_output_offset) { 850 archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT, 851 "Encountered out-of-order sparse blocks"); 852 return (ARCHIVE_RETRY); 853 } 854 855 /* Compute the amount of zero padding needed. */ 856 if (a->read_data_output_offset + (int64_t)s < 857 a->read_data_offset) { 858 len = s; 859 } else if (a->read_data_output_offset < 860 a->read_data_offset) { 861 len = (size_t)(a->read_data_offset - 862 a->read_data_output_offset); 863 } else 864 len = 0; 865 866 /* Add zeroes. */ 867 memset(dest, 0, len); 868 s -= len; 869 a->read_data_output_offset += len; 870 dest += len; 871 bytes_read += len; 872 873 /* Copy data if there is any space left. */ 874 if (s > 0) { 875 len = a->read_data_remaining; 876 if (len > s) 877 len = s; 878 if (len) { 879 memcpy(dest, a->read_data_block, len); 880 s -= len; 881 a->read_data_block += len; 882 a->read_data_remaining -= len; 883 a->read_data_output_offset += len; 884 a->read_data_offset += len; 885 dest += len; 886 bytes_read += len; 887 } 888 } 889 } 890 a->read_data_is_posix_read = 0; 891 a->read_data_requested = 0; 892 return (bytes_read); 893 } 894 895 /* 896 * Reset the read_data_* variables, used for starting a new entry. 897 */ 898 void __archive_reset_read_data(struct archive * a) 899 { 900 a->read_data_output_offset = 0; 901 a->read_data_remaining = 0; 902 a->read_data_is_posix_read = 0; 903 a->read_data_requested = 0; 904 905 /* extra resets, from rar.c */ 906 a->read_data_block = NULL; 907 a->read_data_offset = 0; 908 } 909 910 /* 911 * Skip over all remaining data in this entry. 912 */ 913 int 914 archive_read_data_skip(struct archive *_a) 915 { 916 struct archive_read *a = (struct archive_read *)_a; 917 int r; 918 const void *buff; 919 size_t size; 920 int64_t offset; 921 922 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, 923 "archive_read_data_skip"); 924 925 if (a->format->read_data_skip != NULL) 926 r = (a->format->read_data_skip)(a); 927 else { 928 while ((r = archive_read_data_block(&a->archive, 929 &buff, &size, &offset)) 930 == ARCHIVE_OK) 931 ; 932 } 933 934 if (r == ARCHIVE_EOF) 935 r = ARCHIVE_OK; 936 937 a->archive.state = ARCHIVE_STATE_HEADER; 938 return (r); 939 } 940 941 la_int64_t 942 archive_seek_data(struct archive *_a, int64_t offset, int whence) 943 { 944 struct archive_read *a = (struct archive_read *)_a; 945 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, 946 "archive_seek_data_block"); 947 948 if (a->format->seek_data == NULL) { 949 archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, 950 "Internal error: " 951 "No format_seek_data_block function registered"); 952 return (ARCHIVE_FATAL); 953 } 954 955 return (a->format->seek_data)(a, offset, whence); 956 } 957 958 /* 959 * Read the next block of entry data from the archive. 960 * This is a zero-copy interface; the client receives a pointer, 961 * size, and file offset of the next available block of data. 962 * 963 * Returns ARCHIVE_OK if the operation is successful, ARCHIVE_EOF if 964 * the end of entry is encountered. 965 */ 966 static int 967 _archive_read_data_block(struct archive *_a, 968 const void **buff, size_t *size, int64_t *offset) 969 { 970 struct archive_read *a = (struct archive_read *)_a; 971 archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, 972 "archive_read_data_block"); 973 974 if (a->format->read_data == NULL) { 975 archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, 976 "Internal error: " 977 "No format->read_data function registered"); 978 return (ARCHIVE_FATAL); 979 } 980 981 return (a->format->read_data)(a, buff, size, offset); 982 } 983 984 static int 985 close_filters(struct archive_read *a) 986 { 987 struct archive_read_filter *f = a->filter; 988 int r = ARCHIVE_OK; 989 /* Close each filter in the pipeline. */ 990 while (f != NULL) { 991 struct archive_read_filter *t = f->upstream; 992 if (!f->closed && f->vtable != NULL) { 993 int r1 = (f->vtable->close)(f); 994 f->closed = 1; 995 if (r1 < r) 996 r = r1; 997 } 998 free(f->buffer); 999 f->buffer = NULL; 1000 f = t; 1001 } 1002 return r; 1003 } 1004 1005 void 1006 __archive_read_free_filters(struct archive_read *a) 1007 { 1008 /* Make sure filters are closed and their buffers are freed */ 1009 close_filters(a); 1010 1011 while (a->filter != NULL) { 1012 struct archive_read_filter *t = a->filter->upstream; 1013 free(a->filter); 1014 a->filter = t; 1015 } 1016 } 1017 1018 /* 1019 * return the count of # of filters in use 1020 */ 1021 static int 1022 _archive_filter_count(struct archive *_a) 1023 { 1024 struct archive_read *a = (struct archive_read *)_a; 1025 struct archive_read_filter *p = a->filter; 1026 int count = 0; 1027 while(p) { 1028 count++; 1029 p = p->upstream; 1030 } 1031 return count; 1032 } 1033 1034 /* 1035 * Close the file and all I/O. 1036 */ 1037 static int 1038 _archive_read_close(struct archive *_a) 1039 { 1040 struct archive_read *a = (struct archive_read *)_a; 1041 int r = ARCHIVE_OK, r1 = ARCHIVE_OK; 1042 1043 archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC, 1044 ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_close"); 1045 if (a->archive.state == ARCHIVE_STATE_CLOSED) 1046 return (ARCHIVE_OK); 1047 archive_clear_error(&a->archive); 1048 a->archive.state = ARCHIVE_STATE_CLOSED; 1049 1050 /* TODO: Clean up the formatters. */ 1051 1052 /* Release the filter objects. */ 1053 r1 = close_filters(a); 1054 if (r1 < r) 1055 r = r1; 1056 1057 return (r); 1058 } 1059 1060 /* 1061 * Release memory and other resources. 1062 */ 1063 static int 1064 _archive_read_free(struct archive *_a) 1065 { 1066 struct archive_read *a = (struct archive_read *)_a; 1067 struct archive_read_passphrase *p; 1068 int i, n; 1069 int slots; 1070 int r = ARCHIVE_OK; 1071 1072 if (_a == NULL) 1073 return (ARCHIVE_OK); 1074 archive_check_magic(_a, ARCHIVE_READ_MAGIC, 1075 ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_free"); 1076 if (a->archive.state != ARCHIVE_STATE_CLOSED 1077 && a->archive.state != ARCHIVE_STATE_FATAL) 1078 r = archive_read_close(&a->archive); 1079 1080 /* Call cleanup functions registered by optional components. */ 1081 if (a->cleanup_archive_extract != NULL) 1082 r = (a->cleanup_archive_extract)(a); 1083 1084 /* Cleanup format-specific data. */ 1085 slots = sizeof(a->formats) / sizeof(a->formats[0]); 1086 for (i = 0; i < slots; i++) { 1087 a->format = &(a->formats[i]); 1088 if (a->formats[i].cleanup) 1089 (a->formats[i].cleanup)(a); 1090 } 1091 1092 /* Free the filters */ 1093 __archive_read_free_filters(a); 1094 1095 /* Release the bidder objects. */ 1096 n = sizeof(a->bidders)/sizeof(a->bidders[0]); 1097 for (i = 0; i < n; i++) { 1098 if (a->bidders[i].vtable == NULL || 1099 a->bidders[i].vtable->free == NULL) 1100 continue; 1101 (a->bidders[i].vtable->free)(&a->bidders[i]); 1102 } 1103 1104 /* Release passphrase list. */ 1105 p = a->passphrases.first; 1106 while (p != NULL) { 1107 struct archive_read_passphrase *np = p->next; 1108 1109 /* A passphrase should be cleaned. */ 1110 memset(p->passphrase, 0, strlen(p->passphrase)); 1111 free(p->passphrase); 1112 free(p); 1113 p = np; 1114 } 1115 1116 archive_string_free(&a->archive.error_string); 1117 archive_entry_free(a->entry); 1118 a->archive.magic = 0; 1119 __archive_clean(&a->archive); 1120 free(a->client.dataset); 1121 free(a); 1122 return (r); 1123 } 1124 1125 static struct archive_read_filter * 1126 get_filter(struct archive *_a, int n) 1127 { 1128 struct archive_read *a = (struct archive_read *)_a; 1129 struct archive_read_filter *f = a->filter; 1130 /* We use n == -1 for 'the last filter', which is always the 1131 * client proxy. */ 1132 if (n == -1 && f != NULL) { 1133 struct archive_read_filter *last = f; 1134 f = f->upstream; 1135 while (f != NULL) { 1136 last = f; 1137 f = f->upstream; 1138 } 1139 return (last); 1140 } 1141 if (n < 0) 1142 return NULL; 1143 while (n > 0 && f != NULL) { 1144 f = f->upstream; 1145 --n; 1146 } 1147 return (f); 1148 } 1149 1150 static int 1151 _archive_filter_code(struct archive *_a, int n) 1152 { 1153 struct archive_read_filter *f = get_filter(_a, n); 1154 return f == NULL ? -1 : f->code; 1155 } 1156 1157 static const char * 1158 _archive_filter_name(struct archive *_a, int n) 1159 { 1160 struct archive_read_filter *f = get_filter(_a, n); 1161 return f != NULL ? f->name : NULL; 1162 } 1163 1164 static int64_t 1165 _archive_filter_bytes(struct archive *_a, int n) 1166 { 1167 struct archive_read_filter *f = get_filter(_a, n); 1168 return f == NULL ? -1 : f->position; 1169 } 1170 1171 /* 1172 * Used internally by read format handlers to register their bid and 1173 * initialization functions. 1174 */ 1175 int 1176 __archive_read_register_format(struct archive_read *a, 1177 void *format_data, 1178 const char *name, 1179 int (*bid)(struct archive_read *, int), 1180 int (*options)(struct archive_read *, const char *, const char *), 1181 int (*read_header)(struct archive_read *, struct archive_entry *), 1182 int (*read_data)(struct archive_read *, const void **, size_t *, int64_t *), 1183 int (*read_data_skip)(struct archive_read *), 1184 int64_t (*seek_data)(struct archive_read *, int64_t, int), 1185 int (*cleanup)(struct archive_read *), 1186 int (*format_capabilities)(struct archive_read *), 1187 int (*has_encrypted_entries)(struct archive_read *)) 1188 { 1189 int i, number_slots; 1190 1191 archive_check_magic(&a->archive, 1192 ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, 1193 "__archive_read_register_format"); 1194 1195 number_slots = sizeof(a->formats) / sizeof(a->formats[0]); 1196 1197 for (i = 0; i < number_slots; i++) { 1198 if (a->formats[i].bid == bid) 1199 return (ARCHIVE_WARN); /* We've already installed */ 1200 if (a->formats[i].bid == NULL) { 1201 a->formats[i].bid = bid; 1202 a->formats[i].options = options; 1203 a->formats[i].read_header = read_header; 1204 a->formats[i].read_data = read_data; 1205 a->formats[i].read_data_skip = read_data_skip; 1206 a->formats[i].seek_data = seek_data; 1207 a->formats[i].cleanup = cleanup; 1208 a->formats[i].data = format_data; 1209 a->formats[i].name = name; 1210 a->formats[i].format_capabilties = format_capabilities; 1211 a->formats[i].has_encrypted_entries = has_encrypted_entries; 1212 return (ARCHIVE_OK); 1213 } 1214 } 1215 1216 archive_set_error(&a->archive, ENOMEM, 1217 "Not enough slots for format registration"); 1218 return (ARCHIVE_FATAL); 1219 } 1220 1221 /* 1222 * Used internally by decompression routines to register their bid and 1223 * initialization functions. 1224 */ 1225 int 1226 __archive_read_register_bidder(struct archive_read *a, 1227 void *bidder_data, 1228 const char *name, 1229 const struct archive_read_filter_bidder_vtable *vtable) 1230 { 1231 struct archive_read_filter_bidder *bidder; 1232 int i, number_slots; 1233 1234 archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC, 1235 ARCHIVE_STATE_NEW, "__archive_read_register_bidder"); 1236 1237 number_slots = sizeof(a->bidders) / sizeof(a->bidders[0]); 1238 1239 for (i = 0; i < number_slots; i++) { 1240 if (a->bidders[i].vtable != NULL) 1241 continue; 1242 memset(a->bidders + i, 0, sizeof(a->bidders[0])); 1243 bidder = (a->bidders + i); 1244 bidder->data = bidder_data; 1245 bidder->name = name; 1246 bidder->vtable = vtable; 1247 if (bidder->vtable->bid == NULL || bidder->vtable->init == NULL) { 1248 archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, 1249 "Internal error: " 1250 "no bid/init for filter bidder"); 1251 return (ARCHIVE_FATAL); 1252 } 1253 1254 return (ARCHIVE_OK); 1255 } 1256 1257 archive_set_error(&a->archive, ENOMEM, 1258 "Not enough slots for filter registration"); 1259 return (ARCHIVE_FATAL); 1260 } 1261 1262 /* 1263 * The next section implements the peek/consume internal I/O 1264 * system used by archive readers. This system allows simple 1265 * read-ahead for consumers while preserving zero-copy operation 1266 * most of the time. 1267 * 1268 * The two key operations: 1269 * * The read-ahead function returns a pointer to a block of data 1270 * that satisfies a minimum request. 1271 * * The consume function advances the file pointer. 1272 * 1273 * In the ideal case, filters generate blocks of data 1274 * and __archive_read_ahead() just returns pointers directly into 1275 * those blocks. Then __archive_read_consume() just bumps those 1276 * pointers. Only if your request would span blocks does the I/O 1277 * layer use a copy buffer to provide you with a contiguous block of 1278 * data. 1279 * 1280 * A couple of useful idioms: 1281 * * "I just want some data." Ask for 1 byte and pay attention to 1282 * the "number of bytes available" from __archive_read_ahead(). 1283 * Consume whatever you actually use. 1284 * * "I want to output a large block of data." As above, ask for 1 byte, 1285 * emit all that's available (up to whatever limit you have), consume 1286 * it all, then repeat until you're done. This effectively means that 1287 * you're passing along the blocks that came from your provider. 1288 * * "I want to peek ahead by a large amount." Ask for 4k or so, then 1289 * double and repeat until you get an error or have enough. Note 1290 * that the I/O layer will likely end up expanding its copy buffer 1291 * to fit your request, so use this technique cautiously. This 1292 * technique is used, for example, by some of the format tasting 1293 * code that has uncertain look-ahead needs. 1294 */ 1295 1296 /* 1297 * Looks ahead in the input stream: 1298 * * If 'avail' pointer is provided, that returns number of bytes available 1299 * in the current buffer, which may be much larger than requested. 1300 * * If end-of-file, *avail gets set to zero. 1301 * * If error, *avail gets error code. 1302 * * If request can be met, returns pointer to data. 1303 * * If minimum request cannot be met, returns NULL. 1304 * 1305 * Note: If you just want "some data", ask for 1 byte and pay attention 1306 * to *avail, which will have the actual amount available. If you 1307 * know exactly how many bytes you need, just ask for that and treat 1308 * a NULL return as an error. 1309 * 1310 * Important: This does NOT move the file pointer. See 1311 * __archive_read_consume() below. 1312 */ 1313 const void * 1314 __archive_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) 1315 { 1316 return (__archive_read_filter_ahead(a->filter, min, avail)); 1317 } 1318 1319 const void * 1320 __archive_read_filter_ahead(struct archive_read_filter *filter, 1321 size_t min, ssize_t *avail) 1322 { 1323 ssize_t bytes_read; 1324 size_t tocopy; 1325 1326 if (filter->fatal) { 1327 if (avail) 1328 *avail = ARCHIVE_FATAL; 1329 return (NULL); 1330 } 1331 1332 /* 1333 * Keep pulling more data until we can satisfy the request. 1334 */ 1335 for (;;) { 1336 1337 /* 1338 * If we can satisfy from the copy buffer (and the 1339 * copy buffer isn't empty), we're done. In particular, 1340 * note that min == 0 is a perfectly well-defined 1341 * request. 1342 */ 1343 if (filter->avail >= min && filter->avail > 0) { 1344 if (avail != NULL) 1345 *avail = filter->avail; 1346 return (filter->next); 1347 } 1348 1349 /* 1350 * We can satisfy directly from client buffer if everything 1351 * currently in the copy buffer is still in the client buffer. 1352 */ 1353 if (filter->client_total >= filter->client_avail + filter->avail 1354 && filter->client_avail + filter->avail >= min) { 1355 /* "Roll back" to client buffer. */ 1356 filter->client_avail += filter->avail; 1357 filter->client_next -= filter->avail; 1358 /* Copy buffer is now empty. */ 1359 filter->avail = 0; 1360 filter->next = filter->buffer; 1361 /* Return data from client buffer. */ 1362 if (avail != NULL) 1363 *avail = filter->client_avail; 1364 return (filter->client_next); 1365 } 1366 1367 /* Move data forward in copy buffer if necessary. */ 1368 if (filter->next > filter->buffer && 1369 filter->next + min > filter->buffer + filter->buffer_size) { 1370 if (filter->avail > 0) 1371 memmove(filter->buffer, filter->next, 1372 filter->avail); 1373 filter->next = filter->buffer; 1374 } 1375 1376 /* If we've used up the client data, get more. */ 1377 if (filter->client_avail <= 0) { 1378 if (filter->end_of_file) { 1379 if (avail != NULL) 1380 *avail = filter->avail; 1381 return (NULL); 1382 } 1383 bytes_read = (filter->vtable->read)(filter, 1384 &filter->client_buff); 1385 if (bytes_read < 0) { /* Read error. */ 1386 filter->client_total = filter->client_avail = 0; 1387 filter->client_next = 1388 filter->client_buff = NULL; 1389 filter->fatal = 1; 1390 if (avail != NULL) 1391 *avail = ARCHIVE_FATAL; 1392 return (NULL); 1393 } 1394 if (bytes_read == 0) { 1395 /* Check for another client object first */ 1396 if (filter->archive->client.cursor != 1397 filter->archive->client.nodes - 1) { 1398 if (client_switch_proxy(filter, 1399 filter->archive->client.cursor + 1) 1400 == ARCHIVE_OK) 1401 continue; 1402 } 1403 /* Premature end-of-file. */ 1404 filter->client_total = filter->client_avail = 0; 1405 filter->client_next = 1406 filter->client_buff = NULL; 1407 filter->end_of_file = 1; 1408 /* Return whatever we do have. */ 1409 if (avail != NULL) 1410 *avail = filter->avail; 1411 return (NULL); 1412 } 1413 filter->client_total = bytes_read; 1414 filter->client_avail = filter->client_total; 1415 filter->client_next = filter->client_buff; 1416 } else { 1417 /* 1418 * We can't satisfy the request from the copy 1419 * buffer or the existing client data, so we 1420 * need to copy more client data over to the 1421 * copy buffer. 1422 */ 1423 1424 /* Ensure the buffer is big enough. */ 1425 if (min > filter->buffer_size) { 1426 size_t s, t; 1427 char *p; 1428 1429 /* Double the buffer; watch for overflow. */ 1430 s = t = filter->buffer_size; 1431 if (s == 0) 1432 s = min; 1433 while (s < min) { 1434 t *= 2; 1435 if (t <= s) { /* Integer overflow! */ 1436 archive_set_error( 1437 &filter->archive->archive, 1438 ENOMEM, 1439 "Unable to allocate copy" 1440 " buffer"); 1441 filter->fatal = 1; 1442 if (avail != NULL) 1443 *avail = ARCHIVE_FATAL; 1444 return (NULL); 1445 } 1446 s = t; 1447 } 1448 /* Now s >= min, so allocate a new buffer. */ 1449 p = malloc(s); 1450 if (p == NULL) { 1451 archive_set_error( 1452 &filter->archive->archive, 1453 ENOMEM, 1454 "Unable to allocate copy buffer"); 1455 filter->fatal = 1; 1456 if (avail != NULL) 1457 *avail = ARCHIVE_FATAL; 1458 return (NULL); 1459 } 1460 /* Move data into newly-enlarged buffer. */ 1461 if (filter->avail > 0) 1462 memmove(p, filter->next, filter->avail); 1463 free(filter->buffer); 1464 filter->next = filter->buffer = p; 1465 filter->buffer_size = s; 1466 } 1467 1468 /* We can add client data to copy buffer. */ 1469 /* First estimate: copy to fill rest of buffer. */ 1470 tocopy = (filter->buffer + filter->buffer_size) 1471 - (filter->next + filter->avail); 1472 /* Don't waste time buffering more than we need to. */ 1473 if (tocopy + filter->avail > min) 1474 tocopy = min - filter->avail; 1475 /* Don't copy more than is available. */ 1476 if (tocopy > filter->client_avail) 1477 tocopy = filter->client_avail; 1478 1479 memcpy(filter->next + filter->avail, 1480 filter->client_next, tocopy); 1481 /* Remove this data from client buffer. */ 1482 filter->client_next += tocopy; 1483 filter->client_avail -= tocopy; 1484 /* add it to copy buffer. */ 1485 filter->avail += tocopy; 1486 } 1487 } 1488 } 1489 1490 /* 1491 * Move the file pointer forward. 1492 */ 1493 int64_t 1494 __archive_read_consume(struct archive_read *a, int64_t request) 1495 { 1496 return (__archive_read_filter_consume(a->filter, request)); 1497 } 1498 1499 int64_t 1500 __archive_read_filter_consume(struct archive_read_filter * filter, 1501 int64_t request) 1502 { 1503 int64_t skipped; 1504 1505 if (request < 0) 1506 return ARCHIVE_FATAL; 1507 if (request == 0) 1508 return 0; 1509 1510 skipped = advance_file_pointer(filter, request); 1511 if (skipped == request) 1512 return (skipped); 1513 /* We hit EOF before we satisfied the skip request. */ 1514 if (skipped < 0) /* Map error code to 0 for error message below. */ 1515 skipped = 0; 1516 archive_set_error(&filter->archive->archive, 1517 ARCHIVE_ERRNO_MISC, 1518 "Truncated input file (needed %jd bytes, only %jd available)", 1519 (intmax_t)request, (intmax_t)skipped); 1520 return (ARCHIVE_FATAL); 1521 } 1522 1523 /* 1524 * Advance the file pointer by the amount requested. 1525 * Returns the amount actually advanced, which may be less than the 1526 * request if EOF is encountered first. 1527 * Returns a negative value if there's an I/O error. 1528 */ 1529 static int64_t 1530 advance_file_pointer(struct archive_read_filter *filter, int64_t request) 1531 { 1532 int64_t bytes_skipped, total_bytes_skipped = 0; 1533 ssize_t bytes_read; 1534 size_t min; 1535 1536 if (filter->fatal) 1537 return (-1); 1538 1539 /* Use up the copy buffer first. */ 1540 if (filter->avail > 0) { 1541 min = (size_t)minimum(request, (int64_t)filter->avail); 1542 filter->next += min; 1543 filter->avail -= min; 1544 request -= min; 1545 filter->position += min; 1546 total_bytes_skipped += min; 1547 } 1548 1549 /* Then use up the client buffer. */ 1550 if (filter->client_avail > 0) { 1551 min = (size_t)minimum(request, (int64_t)filter->client_avail); 1552 filter->client_next += min; 1553 filter->client_avail -= min; 1554 request -= min; 1555 filter->position += min; 1556 total_bytes_skipped += min; 1557 } 1558 if (request == 0) 1559 return (total_bytes_skipped); 1560 1561 /* If there's an optimized skip function, use it. */ 1562 if (filter->can_skip != 0) { 1563 bytes_skipped = client_skip_proxy(filter, request); 1564 if (bytes_skipped < 0) { /* error */ 1565 filter->fatal = 1; 1566 return (bytes_skipped); 1567 } 1568 filter->position += bytes_skipped; 1569 total_bytes_skipped += bytes_skipped; 1570 request -= bytes_skipped; 1571 if (request == 0) 1572 return (total_bytes_skipped); 1573 } 1574 1575 /* Use ordinary reads as necessary to complete the request. */ 1576 for (;;) { 1577 bytes_read = (filter->vtable->read)(filter, &filter->client_buff); 1578 if (bytes_read < 0) { 1579 filter->client_buff = NULL; 1580 filter->fatal = 1; 1581 return (bytes_read); 1582 } 1583 1584 if (bytes_read == 0) { 1585 if (filter->archive->client.cursor != 1586 filter->archive->client.nodes - 1) { 1587 if (client_switch_proxy(filter, 1588 filter->archive->client.cursor + 1) 1589 == ARCHIVE_OK) 1590 continue; 1591 } 1592 filter->client_buff = NULL; 1593 filter->end_of_file = 1; 1594 return (total_bytes_skipped); 1595 } 1596 1597 if (bytes_read >= request) { 1598 filter->client_next = 1599 ((const char *)filter->client_buff) + request; 1600 filter->client_avail = (size_t)(bytes_read - request); 1601 filter->client_total = bytes_read; 1602 total_bytes_skipped += request; 1603 filter->position += request; 1604 return (total_bytes_skipped); 1605 } 1606 1607 filter->position += bytes_read; 1608 total_bytes_skipped += bytes_read; 1609 request -= bytes_read; 1610 } 1611 } 1612 1613 /** 1614 * Returns ARCHIVE_FAILED if seeking isn't supported. 1615 */ 1616 int64_t 1617 __archive_read_seek(struct archive_read *a, int64_t offset, int whence) 1618 { 1619 return __archive_read_filter_seek(a->filter, offset, whence); 1620 } 1621 1622 int64_t 1623 __archive_read_filter_seek(struct archive_read_filter *filter, int64_t offset, 1624 int whence) 1625 { 1626 struct archive_read_client *client; 1627 int64_t r; 1628 unsigned int cursor; 1629 1630 if (filter->closed || filter->fatal) 1631 return (ARCHIVE_FATAL); 1632 if (filter->can_seek == 0) 1633 return (ARCHIVE_FAILED); 1634 1635 client = &(filter->archive->client); 1636 switch (whence) { 1637 case SEEK_CUR: 1638 /* Adjust the offset and use SEEK_SET instead */ 1639 offset += filter->position; 1640 __LA_FALLTHROUGH; 1641 case SEEK_SET: 1642 cursor = 0; 1643 while (1) 1644 { 1645 if (client->dataset[cursor].begin_position < 0 || 1646 client->dataset[cursor].total_size < 0 || 1647 client->dataset[cursor].begin_position + 1648 client->dataset[cursor].total_size - 1 > offset || 1649 cursor + 1 >= client->nodes) 1650 break; 1651 r = client->dataset[cursor].begin_position + 1652 client->dataset[cursor].total_size; 1653 client->dataset[++cursor].begin_position = r; 1654 } 1655 while (1) { 1656 r = client_switch_proxy(filter, cursor); 1657 if (r != ARCHIVE_OK) 1658 return r; 1659 if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0) 1660 return r; 1661 client->dataset[cursor].total_size = r; 1662 if (client->dataset[cursor].begin_position + 1663 client->dataset[cursor].total_size - 1 > offset || 1664 cursor + 1 >= client->nodes) 1665 break; 1666 r = client->dataset[cursor].begin_position + 1667 client->dataset[cursor].total_size; 1668 client->dataset[++cursor].begin_position = r; 1669 } 1670 offset -= client->dataset[cursor].begin_position; 1671 if (offset < 0 1672 || offset > client->dataset[cursor].total_size) 1673 return ARCHIVE_FATAL; 1674 if ((r = client_seek_proxy(filter, offset, SEEK_SET)) < 0) 1675 return r; 1676 break; 1677 1678 case SEEK_END: 1679 cursor = 0; 1680 while (1) { 1681 if (client->dataset[cursor].begin_position < 0 || 1682 client->dataset[cursor].total_size < 0 || 1683 cursor + 1 >= client->nodes) 1684 break; 1685 r = client->dataset[cursor].begin_position + 1686 client->dataset[cursor].total_size; 1687 client->dataset[++cursor].begin_position = r; 1688 } 1689 while (1) { 1690 r = client_switch_proxy(filter, cursor); 1691 if (r != ARCHIVE_OK) 1692 return r; 1693 if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0) 1694 return r; 1695 client->dataset[cursor].total_size = r; 1696 r = client->dataset[cursor].begin_position + 1697 client->dataset[cursor].total_size; 1698 if (cursor + 1 >= client->nodes) 1699 break; 1700 client->dataset[++cursor].begin_position = r; 1701 } 1702 while (1) { 1703 if (r + offset >= 1704 client->dataset[cursor].begin_position) 1705 break; 1706 offset += client->dataset[cursor].total_size; 1707 if (cursor == 0) 1708 break; 1709 cursor--; 1710 r = client->dataset[cursor].begin_position + 1711 client->dataset[cursor].total_size; 1712 } 1713 offset = (r + offset) - client->dataset[cursor].begin_position; 1714 if ((r = client_switch_proxy(filter, cursor)) != ARCHIVE_OK) 1715 return r; 1716 r = client_seek_proxy(filter, offset, SEEK_SET); 1717 if (r < ARCHIVE_OK) 1718 return r; 1719 break; 1720 1721 default: 1722 return (ARCHIVE_FATAL); 1723 } 1724 r += client->dataset[cursor].begin_position; 1725 1726 if (r >= 0) { 1727 /* 1728 * Ouch. Clearing the buffer like this hurts, especially 1729 * at bid time. A lot of our efficiency at bid time comes 1730 * from having bidders reuse the data we've already read. 1731 * 1732 * TODO: If the seek request is in data we already 1733 * have, then don't call the seek callback. 1734 * 1735 * TODO: Zip seeks to end-of-file at bid time. If 1736 * other formats also start doing this, we may need to 1737 * find a way for clients to fudge the seek offset to 1738 * a block boundary. 1739 * 1740 * Hmmm... If whence was SEEK_END, we know the file 1741 * size is (r - offset). Can we use that to simplify 1742 * the TODO items above? 1743 */ 1744 filter->avail = filter->client_avail = 0; 1745 filter->next = filter->buffer; 1746 filter->position = r; 1747 filter->end_of_file = 0; 1748 } 1749 return r; 1750 } 1751