xref: /freebsd/contrib/libarchive/libarchive/archive_read.c (revision 7d536dc855c85c15bf45f033d108a61b1f3cecc3)
1 /*-
2  * Copyright (c) 2003-2011 Tim Kientzle
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 /*
27  * This file contains the "essential" portions of the read API, that
28  * is, stuff that will probably always be used by any client that
29  * actually needs to read an archive.  Optional pieces have been, as
30  * far as possible, separated out into separate files to avoid
31  * needlessly bloating statically-linked clients.
32  */
33 
34 #include "archive_platform.h"
35 __FBSDID("$FreeBSD$");
36 
37 #ifdef HAVE_ERRNO_H
38 #include <errno.h>
39 #endif
40 #include <stdio.h>
41 #ifdef HAVE_STDLIB_H
42 #include <stdlib.h>
43 #endif
44 #ifdef HAVE_STRING_H
45 #include <string.h>
46 #endif
47 #ifdef HAVE_UNISTD_H
48 #include <unistd.h>
49 #endif
50 
51 #include "archive.h"
52 #include "archive_entry.h"
53 #include "archive_private.h"
54 #include "archive_read_private.h"
55 
56 #define minimum(a, b) (a < b ? a : b)
57 
58 static int	choose_filters(struct archive_read *);
59 static int	choose_format(struct archive_read *);
60 static struct archive_vtable *archive_read_vtable(void);
61 static int64_t	_archive_filter_bytes(struct archive *, int);
62 static int	_archive_filter_code(struct archive *, int);
63 static const char *_archive_filter_name(struct archive *, int);
64 static int  _archive_filter_count(struct archive *);
65 static int	_archive_read_close(struct archive *);
66 static int	_archive_read_data_block(struct archive *,
67 		    const void **, size_t *, int64_t *);
68 static int	_archive_read_free(struct archive *);
69 static int	_archive_read_next_header(struct archive *,
70 		    struct archive_entry **);
71 static int	_archive_read_next_header2(struct archive *,
72 		    struct archive_entry *);
73 static int64_t  advance_file_pointer(struct archive_read_filter *, int64_t);
74 
75 static struct archive_vtable *
76 archive_read_vtable(void)
77 {
78 	static struct archive_vtable av;
79 	static int inited = 0;
80 
81 	if (!inited) {
82 		av.archive_filter_bytes = _archive_filter_bytes;
83 		av.archive_filter_code = _archive_filter_code;
84 		av.archive_filter_name = _archive_filter_name;
85 		av.archive_filter_count = _archive_filter_count;
86 		av.archive_read_data_block = _archive_read_data_block;
87 		av.archive_read_next_header = _archive_read_next_header;
88 		av.archive_read_next_header2 = _archive_read_next_header2;
89 		av.archive_free = _archive_read_free;
90 		av.archive_close = _archive_read_close;
91 		inited = 1;
92 	}
93 	return (&av);
94 }
95 
96 /*
97  * Allocate, initialize and return a struct archive object.
98  */
99 struct archive *
100 archive_read_new(void)
101 {
102 	struct archive_read *a;
103 
104 	a = (struct archive_read *)malloc(sizeof(*a));
105 	if (a == NULL)
106 		return (NULL);
107 	memset(a, 0, sizeof(*a));
108 	a->archive.magic = ARCHIVE_READ_MAGIC;
109 
110 	a->archive.state = ARCHIVE_STATE_NEW;
111 	a->entry = archive_entry_new2(&a->archive);
112 	a->archive.vtable = archive_read_vtable();
113 
114 	return (&a->archive);
115 }
116 
117 /*
118  * Record the do-not-extract-to file. This belongs in archive_read_extract.c.
119  */
120 void
121 archive_read_extract_set_skip_file(struct archive *_a, int64_t d, int64_t i)
122 {
123 	struct archive_read *a = (struct archive_read *)_a;
124 
125 	if (ARCHIVE_OK != __archive_check_magic(_a, ARCHIVE_READ_MAGIC,
126 		ARCHIVE_STATE_ANY, "archive_read_extract_set_skip_file"))
127 		return;
128 	a->skip_file_set = 1;
129 	a->skip_file_dev = d;
130 	a->skip_file_ino = i;
131 }
132 
133 /*
134  * Open the archive
135  */
136 int
137 archive_read_open(struct archive *a, void *client_data,
138     archive_open_callback *client_opener, archive_read_callback *client_reader,
139     archive_close_callback *client_closer)
140 {
141 	/* Old archive_read_open() is just a thin shell around
142 	 * archive_read_open1. */
143 	archive_read_set_open_callback(a, client_opener);
144 	archive_read_set_read_callback(a, client_reader);
145 	archive_read_set_close_callback(a, client_closer);
146 	archive_read_set_callback_data(a, client_data);
147 	return archive_read_open1(a);
148 }
149 
150 
151 int
152 archive_read_open2(struct archive *a, void *client_data,
153     archive_open_callback *client_opener,
154     archive_read_callback *client_reader,
155     archive_skip_callback *client_skipper,
156     archive_close_callback *client_closer)
157 {
158 	/* Old archive_read_open2() is just a thin shell around
159 	 * archive_read_open1. */
160 	archive_read_set_callback_data(a, client_data);
161 	archive_read_set_open_callback(a, client_opener);
162 	archive_read_set_read_callback(a, client_reader);
163 	archive_read_set_skip_callback(a, client_skipper);
164 	archive_read_set_close_callback(a, client_closer);
165 	return archive_read_open1(a);
166 }
167 
168 static ssize_t
169 client_read_proxy(struct archive_read_filter *self, const void **buff)
170 {
171 	ssize_t r;
172 	r = (self->archive->client.reader)(&self->archive->archive,
173 	    self->data, buff);
174 	return (r);
175 }
176 
177 static int64_t
178 client_skip_proxy(struct archive_read_filter *self, int64_t request)
179 {
180 	if (request < 0)
181 		__archive_errx(1, "Negative skip requested.");
182 	if (request == 0)
183 		return 0;
184 
185 	if (self->archive->client.skipper != NULL) {
186 		/* Seek requests over 1GiB are broken down into
187 		 * multiple seeks.  This avoids overflows when the
188 		 * requests get passed through 32-bit arguments. */
189 		int64_t skip_limit = (int64_t)1 << 30;
190 		int64_t total = 0;
191 		for (;;) {
192 			int64_t get, ask = request;
193 			if (ask > skip_limit)
194 				ask = skip_limit;
195 			get = (self->archive->client.skipper)
196 				(&self->archive->archive, self->data, ask);
197 			if (get == 0)
198 				return (total);
199 			request -= get;
200 			total += get;
201 		}
202 	} else if (self->archive->client.seeker != NULL
203 		&& request > 64 * 1024) {
204 		/* If the client provided a seeker but not a skipper,
205 		 * we can use the seeker to skip forward.
206 		 *
207 		 * Note: This isn't always a good idea.  The client
208 		 * skipper is allowed to skip by less than requested
209 		 * if it needs to maintain block alignment.  The
210 		 * seeker is not allowed to play such games, so using
211 		 * the seeker here may be a performance loss compared
212 		 * to just reading and discarding.  That's why we
213 		 * only do this for skips of over 64k.
214 		 */
215 		int64_t before = self->position;
216 		int64_t after = (self->archive->client.seeker)
217 		    (&self->archive->archive, self->data, request, SEEK_CUR);
218 		if (after != before + request)
219 			return ARCHIVE_FATAL;
220 		return after - before;
221 	}
222 	return 0;
223 }
224 
225 static int64_t
226 client_seek_proxy(struct archive_read_filter *self, int64_t offset, int whence)
227 {
228 	/* DO NOT use the skipper here!  If we transparently handled
229 	 * forward seek here by using the skipper, that will break
230 	 * other libarchive code that assumes a successful forward
231 	 * seek means it can also seek backwards.
232 	 */
233 	if (self->archive->client.seeker == NULL)
234 		return (ARCHIVE_FAILED);
235 	return (self->archive->client.seeker)(&self->archive->archive,
236 	    self->data, offset, whence);
237 }
238 
239 static int
240 client_close_proxy(struct archive_read_filter *self)
241 {
242 	int r = ARCHIVE_OK, r2;
243 	unsigned int i;
244 
245 	if (self->archive->client.closer == NULL)
246 		return (r);
247 	for (i = 0; i < self->archive->client.nodes; i++)
248 	{
249 		r2 = (self->archive->client.closer)
250 			((struct archive *)self->archive,
251 				self->archive->client.dataset[i].data);
252 		if (r > r2)
253 			r = r2;
254 	}
255 	return (r);
256 }
257 
258 static int
259 client_open_proxy(struct archive_read_filter *self)
260 {
261   int r = ARCHIVE_OK;
262 	if (self->archive->client.opener != NULL)
263 		r = (self->archive->client.opener)(
264 		    (struct archive *)self->archive, self->data);
265 	return (r);
266 }
267 
268 static int
269 client_switch_proxy(struct archive_read_filter *self, unsigned int iindex)
270 {
271   int r1 = ARCHIVE_OK, r2 = ARCHIVE_OK;
272 	void *data2 = NULL;
273 
274 	/* Don't do anything if already in the specified data node */
275 	if (self->archive->client.cursor == iindex)
276 		return (ARCHIVE_OK);
277 
278 	self->archive->client.cursor = iindex;
279 	data2 = self->archive->client.dataset[self->archive->client.cursor].data;
280 	if (self->archive->client.switcher != NULL)
281 	{
282 		r1 = r2 = (self->archive->client.switcher)
283 			((struct archive *)self->archive, self->data, data2);
284 		self->data = data2;
285 	}
286 	else
287 	{
288 		/* Attempt to call close and open instead */
289 		if (self->archive->client.closer != NULL)
290 			r1 = (self->archive->client.closer)
291 				((struct archive *)self->archive, self->data);
292 		self->data = data2;
293 		if (self->archive->client.opener != NULL)
294 			r2 = (self->archive->client.opener)
295 				((struct archive *)self->archive, self->data);
296 	}
297 	return (r1 < r2) ? r1 : r2;
298 }
299 
300 int
301 archive_read_set_open_callback(struct archive *_a,
302     archive_open_callback *client_opener)
303 {
304 	struct archive_read *a = (struct archive_read *)_a;
305 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
306 	    "archive_read_set_open_callback");
307 	a->client.opener = client_opener;
308 	return ARCHIVE_OK;
309 }
310 
311 int
312 archive_read_set_read_callback(struct archive *_a,
313     archive_read_callback *client_reader)
314 {
315 	struct archive_read *a = (struct archive_read *)_a;
316 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
317 	    "archive_read_set_read_callback");
318 	a->client.reader = client_reader;
319 	return ARCHIVE_OK;
320 }
321 
322 int
323 archive_read_set_skip_callback(struct archive *_a,
324     archive_skip_callback *client_skipper)
325 {
326 	struct archive_read *a = (struct archive_read *)_a;
327 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
328 	    "archive_read_set_skip_callback");
329 	a->client.skipper = client_skipper;
330 	return ARCHIVE_OK;
331 }
332 
333 int
334 archive_read_set_seek_callback(struct archive *_a,
335     archive_seek_callback *client_seeker)
336 {
337 	struct archive_read *a = (struct archive_read *)_a;
338 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
339 	    "archive_read_set_seek_callback");
340 	a->client.seeker = client_seeker;
341 	return ARCHIVE_OK;
342 }
343 
344 int
345 archive_read_set_close_callback(struct archive *_a,
346     archive_close_callback *client_closer)
347 {
348 	struct archive_read *a = (struct archive_read *)_a;
349 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
350 	    "archive_read_set_close_callback");
351 	a->client.closer = client_closer;
352 	return ARCHIVE_OK;
353 }
354 
355 int
356 archive_read_set_switch_callback(struct archive *_a,
357     archive_switch_callback *client_switcher)
358 {
359 	struct archive_read *a = (struct archive_read *)_a;
360 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
361 	    "archive_read_set_switch_callback");
362 	a->client.switcher = client_switcher;
363 	return ARCHIVE_OK;
364 }
365 
366 int
367 archive_read_set_callback_data(struct archive *_a, void *client_data)
368 {
369 	return archive_read_set_callback_data2(_a, client_data, 0);
370 }
371 
372 int
373 archive_read_set_callback_data2(struct archive *_a, void *client_data,
374     unsigned int iindex)
375 {
376 	struct archive_read *a = (struct archive_read *)_a;
377 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
378 	    "archive_read_set_callback_data2");
379 
380 	if (a->client.nodes == 0)
381 	{
382 		a->client.dataset = (struct archive_read_data_node *)
383 		    calloc(1, sizeof(*a->client.dataset));
384 		if (a->client.dataset == NULL)
385 		{
386 			archive_set_error(&a->archive, ENOMEM,
387 				"No memory.");
388 			return ARCHIVE_FATAL;
389 		}
390 		a->client.nodes = 1;
391 	}
392 
393 	if (iindex > a->client.nodes - 1)
394 	{
395 		archive_set_error(&a->archive, EINVAL,
396 			"Invalid index specified.");
397 		return ARCHIVE_FATAL;
398 	}
399 	a->client.dataset[iindex].data = client_data;
400 	a->client.dataset[iindex].begin_position = -1;
401 	a->client.dataset[iindex].total_size = -1;
402 	return ARCHIVE_OK;
403 }
404 
405 int
406 archive_read_add_callback_data(struct archive *_a, void *client_data,
407     unsigned int iindex)
408 {
409 	struct archive_read *a = (struct archive_read *)_a;
410 	void *p;
411 	unsigned int i;
412 
413 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
414 	    "archive_read_add_callback_data");
415 	if (iindex > a->client.nodes) {
416 		archive_set_error(&a->archive, EINVAL,
417 			"Invalid index specified.");
418 		return ARCHIVE_FATAL;
419 	}
420 	p = realloc(a->client.dataset, sizeof(*a->client.dataset)
421 		* (++(a->client.nodes)));
422 	if (p == NULL) {
423 		archive_set_error(&a->archive, ENOMEM,
424 			"No memory.");
425 		return ARCHIVE_FATAL;
426 	}
427 	a->client.dataset = (struct archive_read_data_node *)p;
428 	for (i = a->client.nodes - 1; i > iindex && i > 0; i--) {
429 		a->client.dataset[i].data = a->client.dataset[i-1].data;
430 		a->client.dataset[i].begin_position = -1;
431 		a->client.dataset[i].total_size = -1;
432 	}
433 	a->client.dataset[iindex].data = client_data;
434 	a->client.dataset[iindex].begin_position = -1;
435 	a->client.dataset[iindex].total_size = -1;
436 	return ARCHIVE_OK;
437 }
438 
439 int
440 archive_read_append_callback_data(struct archive *_a, void *client_data)
441 {
442 	struct archive_read *a = (struct archive_read *)_a;
443 	return archive_read_add_callback_data(_a, client_data, a->client.nodes);
444 }
445 
446 int
447 archive_read_prepend_callback_data(struct archive *_a, void *client_data)
448 {
449 	return archive_read_add_callback_data(_a, client_data, 0);
450 }
451 
452 int
453 archive_read_open1(struct archive *_a)
454 {
455 	struct archive_read *a = (struct archive_read *)_a;
456 	struct archive_read_filter *filter, *tmp;
457 	int slot;
458 	int e = 0;
459 	unsigned int i;
460 
461 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
462 	    "archive_read_open");
463 	archive_clear_error(&a->archive);
464 
465 	if (a->client.reader == NULL) {
466 		archive_set_error(&a->archive, EINVAL,
467 		    "No reader function provided to archive_read_open");
468 		a->archive.state = ARCHIVE_STATE_FATAL;
469 		return (ARCHIVE_FATAL);
470 	}
471 
472 	/* Open data source. */
473 	if (a->client.opener != NULL) {
474 		e = (a->client.opener)(&a->archive, a->client.dataset[0].data);
475 		if (e != 0) {
476 			/* If the open failed, call the closer to clean up. */
477 			if (a->client.closer) {
478 				for (i = 0; i < a->client.nodes; i++)
479 					(a->client.closer)(&a->archive,
480 					    a->client.dataset[i].data);
481 			}
482 			return (e);
483 		}
484 	}
485 
486 	filter = calloc(1, sizeof(*filter));
487 	if (filter == NULL)
488 		return (ARCHIVE_FATAL);
489 	filter->bidder = NULL;
490 	filter->upstream = NULL;
491 	filter->archive = a;
492 	filter->data = a->client.dataset[0].data;
493 	filter->open = client_open_proxy;
494 	filter->read = client_read_proxy;
495 	filter->skip = client_skip_proxy;
496 	filter->seek = client_seek_proxy;
497 	filter->close = client_close_proxy;
498 	filter->sswitch = client_switch_proxy;
499 	filter->name = "none";
500 	filter->code = ARCHIVE_FILTER_NONE;
501 
502 	a->client.dataset[0].begin_position = 0;
503 	if (!a->filter || !a->bypass_filter_bidding)
504 	{
505 		a->filter = filter;
506 		/* Build out the input pipeline. */
507 		e = choose_filters(a);
508 		if (e < ARCHIVE_WARN) {
509 			a->archive.state = ARCHIVE_STATE_FATAL;
510 			return (ARCHIVE_FATAL);
511 		}
512 	}
513 	else
514 	{
515 		/* Need to add "NONE" type filter at the end of the filter chain */
516 		tmp = a->filter;
517 		while (tmp->upstream)
518 			tmp = tmp->upstream;
519 		tmp->upstream = filter;
520 	}
521 
522 	if (!a->format)
523 	{
524 		slot = choose_format(a);
525 		if (slot < 0) {
526 			__archive_read_close_filters(a);
527 			a->archive.state = ARCHIVE_STATE_FATAL;
528 			return (ARCHIVE_FATAL);
529 		}
530 		a->format = &(a->formats[slot]);
531 	}
532 
533 	a->archive.state = ARCHIVE_STATE_HEADER;
534 
535 	/* Ensure libarchive starts from the first node in a multivolume set */
536 	client_switch_proxy(a->filter, 0);
537 	return (e);
538 }
539 
540 /*
541  * Allow each registered stream transform to bid on whether
542  * it wants to handle this stream.  Repeat until we've finished
543  * building the pipeline.
544  */
545 static int
546 choose_filters(struct archive_read *a)
547 {
548 	int number_bidders, i, bid, best_bid, n;
549 	struct archive_read_filter_bidder *bidder, *best_bidder;
550 	struct archive_read_filter *filter;
551 	ssize_t avail;
552 	int r;
553 
554 	for (n = 0; n < 25; ++n) {
555 		number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]);
556 
557 		best_bid = 0;
558 		best_bidder = NULL;
559 
560 		bidder = a->bidders;
561 		for (i = 0; i < number_bidders; i++, bidder++) {
562 			if (bidder->bid != NULL) {
563 				bid = (bidder->bid)(bidder, a->filter);
564 				if (bid > best_bid) {
565 					best_bid = bid;
566 					best_bidder = bidder;
567 				}
568 			}
569 		}
570 
571 		/* If no bidder, we're done. */
572 		if (best_bidder == NULL) {
573 			/* Verify the filter by asking it for some data. */
574 			__archive_read_filter_ahead(a->filter, 1, &avail);
575 			if (avail < 0) {
576 				__archive_read_close_filters(a);
577 				__archive_read_free_filters(a);
578 				return (ARCHIVE_FATAL);
579 			}
580 			a->archive.compression_name = a->filter->name;
581 			a->archive.compression_code = a->filter->code;
582 			return (ARCHIVE_OK);
583 		}
584 
585 		filter
586 		    = (struct archive_read_filter *)calloc(1, sizeof(*filter));
587 		if (filter == NULL)
588 			return (ARCHIVE_FATAL);
589 		filter->bidder = best_bidder;
590 		filter->archive = a;
591 		filter->upstream = a->filter;
592 		a->filter = filter;
593 		r = (best_bidder->init)(a->filter);
594 		if (r != ARCHIVE_OK) {
595 			__archive_read_close_filters(a);
596 			__archive_read_free_filters(a);
597 			return (ARCHIVE_FATAL);
598 		}
599 	}
600 	archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
601 	    "Input requires too many filters for decoding");
602 	return (ARCHIVE_FATAL);
603 }
604 
605 /*
606  * Read header of next entry.
607  */
608 static int
609 _archive_read_next_header2(struct archive *_a, struct archive_entry *entry)
610 {
611 	struct archive_read *a = (struct archive_read *)_a;
612 	int r1 = ARCHIVE_OK, r2;
613 
614 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
615 	    ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA,
616 	    "archive_read_next_header");
617 
618 	archive_entry_clear(entry);
619 	archive_clear_error(&a->archive);
620 
621 	/*
622 	 * If client didn't consume entire data, skip any remainder
623 	 * (This is especially important for GNU incremental directories.)
624 	 */
625 	if (a->archive.state == ARCHIVE_STATE_DATA) {
626 		r1 = archive_read_data_skip(&a->archive);
627 		if (r1 == ARCHIVE_EOF)
628 			archive_set_error(&a->archive, EIO,
629 			    "Premature end-of-file.");
630 		if (r1 == ARCHIVE_EOF || r1 == ARCHIVE_FATAL) {
631 			a->archive.state = ARCHIVE_STATE_FATAL;
632 			return (ARCHIVE_FATAL);
633 		}
634 	}
635 
636 	/* Record start-of-header offset in uncompressed stream. */
637 	a->header_position = a->filter->position;
638 
639 	++_a->file_count;
640 	r2 = (a->format->read_header)(a, entry);
641 
642 	/*
643 	 * EOF and FATAL are persistent at this layer.  By
644 	 * modifying the state, we guarantee that future calls to
645 	 * read a header or read data will fail.
646 	 */
647 	switch (r2) {
648 	case ARCHIVE_EOF:
649 		a->archive.state = ARCHIVE_STATE_EOF;
650 		--_a->file_count;/* Revert a file counter. */
651 		break;
652 	case ARCHIVE_OK:
653 		a->archive.state = ARCHIVE_STATE_DATA;
654 		break;
655 	case ARCHIVE_WARN:
656 		a->archive.state = ARCHIVE_STATE_DATA;
657 		break;
658 	case ARCHIVE_RETRY:
659 		break;
660 	case ARCHIVE_FATAL:
661 		a->archive.state = ARCHIVE_STATE_FATAL;
662 		break;
663 	}
664 
665 	a->read_data_output_offset = 0;
666 	a->read_data_remaining = 0;
667 	a->read_data_is_posix_read = 0;
668 	a->read_data_requested = 0;
669 	a->data_start_node = a->client.cursor;
670 	/* EOF always wins; otherwise return the worst error. */
671 	return (r2 < r1 || r2 == ARCHIVE_EOF) ? r2 : r1;
672 }
673 
674 int
675 _archive_read_next_header(struct archive *_a, struct archive_entry **entryp)
676 {
677 	int ret;
678 	struct archive_read *a = (struct archive_read *)_a;
679 	*entryp = NULL;
680 	ret = _archive_read_next_header2(_a, a->entry);
681 	*entryp = a->entry;
682 	return ret;
683 }
684 
685 /*
686  * Allow each registered format to bid on whether it wants to handle
687  * the next entry.  Return index of winning bidder.
688  */
689 static int
690 choose_format(struct archive_read *a)
691 {
692 	int slots;
693 	int i;
694 	int bid, best_bid;
695 	int best_bid_slot;
696 
697 	slots = sizeof(a->formats) / sizeof(a->formats[0]);
698 	best_bid = -1;
699 	best_bid_slot = -1;
700 
701 	/* Set up a->format for convenience of bidders. */
702 	a->format = &(a->formats[0]);
703 	for (i = 0; i < slots; i++, a->format++) {
704 		if (a->format->bid) {
705 			bid = (a->format->bid)(a, best_bid);
706 			if (bid == ARCHIVE_FATAL)
707 				return (ARCHIVE_FATAL);
708 			if (a->filter->position != 0)
709 				__archive_read_seek(a, 0, SEEK_SET);
710 			if ((bid > best_bid) || (best_bid_slot < 0)) {
711 				best_bid = bid;
712 				best_bid_slot = i;
713 			}
714 		}
715 	}
716 
717 	/*
718 	 * There were no bidders; this is a serious programmer error
719 	 * and demands a quick and definitive abort.
720 	 */
721 	if (best_bid_slot < 0) {
722 		archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
723 		    "No formats registered");
724 		return (ARCHIVE_FATAL);
725 	}
726 
727 	/*
728 	 * There were bidders, but no non-zero bids; this means we
729 	 * can't support this stream.
730 	 */
731 	if (best_bid < 1) {
732 		archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
733 		    "Unrecognized archive format");
734 		return (ARCHIVE_FATAL);
735 	}
736 
737 	return (best_bid_slot);
738 }
739 
740 /*
741  * Return the file offset (within the uncompressed data stream) where
742  * the last header started.
743  */
744 int64_t
745 archive_read_header_position(struct archive *_a)
746 {
747 	struct archive_read *a = (struct archive_read *)_a;
748 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
749 	    ARCHIVE_STATE_ANY, "archive_read_header_position");
750 	return (a->header_position);
751 }
752 
753 /*
754  * Read data from an archive entry, using a read(2)-style interface.
755  * This is a convenience routine that just calls
756  * archive_read_data_block and copies the results into the client
757  * buffer, filling any gaps with zero bytes.  Clients using this
758  * API can be completely ignorant of sparse-file issues; sparse files
759  * will simply be padded with nulls.
760  *
761  * DO NOT intermingle calls to this function and archive_read_data_block
762  * to read a single entry body.
763  */
764 ssize_t
765 archive_read_data(struct archive *_a, void *buff, size_t s)
766 {
767 	struct archive_read *a = (struct archive_read *)_a;
768 	char	*dest;
769 	const void *read_buf;
770 	size_t	 bytes_read;
771 	size_t	 len;
772 	int	 r;
773 
774 	bytes_read = 0;
775 	dest = (char *)buff;
776 
777 	while (s > 0) {
778 		if (a->read_data_remaining == 0) {
779 			read_buf = a->read_data_block;
780 			a->read_data_is_posix_read = 1;
781 			a->read_data_requested = s;
782 			r = _archive_read_data_block(&a->archive, &read_buf,
783 			    &a->read_data_remaining, &a->read_data_offset);
784 			a->read_data_block = read_buf;
785 			if (r == ARCHIVE_EOF)
786 				return (bytes_read);
787 			/*
788 			 * Error codes are all negative, so the status
789 			 * return here cannot be confused with a valid
790 			 * byte count.  (ARCHIVE_OK is zero.)
791 			 */
792 			if (r < ARCHIVE_OK)
793 				return (r);
794 		}
795 
796 		if (a->read_data_offset < a->read_data_output_offset) {
797 			archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
798 			    "Encountered out-of-order sparse blocks");
799 			return (ARCHIVE_RETRY);
800 		}
801 
802 		/* Compute the amount of zero padding needed. */
803 		if (a->read_data_output_offset + (int64_t)s <
804 		    a->read_data_offset) {
805 			len = s;
806 		} else if (a->read_data_output_offset <
807 		    a->read_data_offset) {
808 			len = (size_t)(a->read_data_offset -
809 			    a->read_data_output_offset);
810 		} else
811 			len = 0;
812 
813 		/* Add zeroes. */
814 		memset(dest, 0, len);
815 		s -= len;
816 		a->read_data_output_offset += len;
817 		dest += len;
818 		bytes_read += len;
819 
820 		/* Copy data if there is any space left. */
821 		if (s > 0) {
822 			len = a->read_data_remaining;
823 			if (len > s)
824 				len = s;
825 			memcpy(dest, a->read_data_block, len);
826 			s -= len;
827 			a->read_data_block += len;
828 			a->read_data_remaining -= len;
829 			a->read_data_output_offset += len;
830 			a->read_data_offset += len;
831 			dest += len;
832 			bytes_read += len;
833 		}
834 	}
835 	a->read_data_is_posix_read = 0;
836 	a->read_data_requested = 0;
837 	return (bytes_read);
838 }
839 
840 /*
841  * Skip over all remaining data in this entry.
842  */
843 int
844 archive_read_data_skip(struct archive *_a)
845 {
846 	struct archive_read *a = (struct archive_read *)_a;
847 	int r;
848 	const void *buff;
849 	size_t size;
850 	int64_t offset;
851 
852 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
853 	    "archive_read_data_skip");
854 
855 	if (a->format->read_data_skip != NULL)
856 		r = (a->format->read_data_skip)(a);
857 	else {
858 		while ((r = archive_read_data_block(&a->archive,
859 			    &buff, &size, &offset))
860 		    == ARCHIVE_OK)
861 			;
862 	}
863 
864 	if (r == ARCHIVE_EOF)
865 		r = ARCHIVE_OK;
866 
867 	a->archive.state = ARCHIVE_STATE_HEADER;
868 	return (r);
869 }
870 
871 int64_t
872 archive_seek_data(struct archive *_a, int64_t offset, int whence)
873 {
874 	struct archive_read *a = (struct archive_read *)_a;
875 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
876 	    "archive_seek_data_block");
877 
878 	if (a->format->seek_data == NULL) {
879 		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
880 		    "Internal error: "
881 		    "No format_seek_data_block function registered");
882 		return (ARCHIVE_FATAL);
883 	}
884 
885 	return (a->format->seek_data)(a, offset, whence);
886 }
887 
888 /*
889  * Read the next block of entry data from the archive.
890  * This is a zero-copy interface; the client receives a pointer,
891  * size, and file offset of the next available block of data.
892  *
893  * Returns ARCHIVE_OK if the operation is successful, ARCHIVE_EOF if
894  * the end of entry is encountered.
895  */
896 static int
897 _archive_read_data_block(struct archive *_a,
898     const void **buff, size_t *size, int64_t *offset)
899 {
900 	struct archive_read *a = (struct archive_read *)_a;
901 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
902 	    "archive_read_data_block");
903 
904 	if (a->format->read_data == NULL) {
905 		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
906 		    "Internal error: "
907 		    "No format_read_data_block function registered");
908 		return (ARCHIVE_FATAL);
909 	}
910 
911 	return (a->format->read_data)(a, buff, size, offset);
912 }
913 
914 int
915 __archive_read_close_filters(struct archive_read *a)
916 {
917 	struct archive_read_filter *f = a->filter;
918 	int r = ARCHIVE_OK;
919 	/* Close each filter in the pipeline. */
920 	while (f != NULL) {
921 		struct archive_read_filter *t = f->upstream;
922 		if (!f->closed && f->close != NULL) {
923 			int r1 = (f->close)(f);
924 			f->closed = 1;
925 			if (r1 < r)
926 				r = r1;
927 		}
928 		free(f->buffer);
929 		f->buffer = NULL;
930 		f = t;
931 	}
932 	return r;
933 }
934 
935 void
936 __archive_read_free_filters(struct archive_read *a)
937 {
938 	while (a->filter != NULL) {
939 		struct archive_read_filter *t = a->filter->upstream;
940 		free(a->filter);
941 		a->filter = t;
942 	}
943 }
944 
945 /*
946  * return the count of # of filters in use
947  */
948 static int
949 _archive_filter_count(struct archive *_a)
950 {
951 	struct archive_read *a = (struct archive_read *)_a;
952 	struct archive_read_filter *p = a->filter;
953 	int count = 0;
954 	while(p) {
955 		count++;
956 		p = p->upstream;
957 	}
958 	return count;
959 }
960 
961 /*
962  * Close the file and all I/O.
963  */
964 static int
965 _archive_read_close(struct archive *_a)
966 {
967 	struct archive_read *a = (struct archive_read *)_a;
968 	int r = ARCHIVE_OK, r1 = ARCHIVE_OK;
969 
970 	archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC,
971 	    ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_close");
972 	if (a->archive.state == ARCHIVE_STATE_CLOSED)
973 		return (ARCHIVE_OK);
974 	archive_clear_error(&a->archive);
975 	a->archive.state = ARCHIVE_STATE_CLOSED;
976 
977 	/* TODO: Clean up the formatters. */
978 
979 	/* Release the filter objects. */
980 	r1 = __archive_read_close_filters(a);
981 	if (r1 < r)
982 		r = r1;
983 
984 	return (r);
985 }
986 
987 /*
988  * Release memory and other resources.
989  */
990 static int
991 _archive_read_free(struct archive *_a)
992 {
993 	struct archive_read *a = (struct archive_read *)_a;
994 	int i, n;
995 	int slots;
996 	int r = ARCHIVE_OK;
997 
998 	if (_a == NULL)
999 		return (ARCHIVE_OK);
1000 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
1001 	    ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_free");
1002 	if (a->archive.state != ARCHIVE_STATE_CLOSED
1003 	    && a->archive.state != ARCHIVE_STATE_FATAL)
1004 		r = archive_read_close(&a->archive);
1005 
1006 	/* Call cleanup functions registered by optional components. */
1007 	if (a->cleanup_archive_extract != NULL)
1008 		r = (a->cleanup_archive_extract)(a);
1009 
1010 	/* Cleanup format-specific data. */
1011 	slots = sizeof(a->formats) / sizeof(a->formats[0]);
1012 	for (i = 0; i < slots; i++) {
1013 		a->format = &(a->formats[i]);
1014 		if (a->formats[i].cleanup)
1015 			(a->formats[i].cleanup)(a);
1016 	}
1017 
1018 	/* Free the filters */
1019 	__archive_read_free_filters(a);
1020 
1021 	/* Release the bidder objects. */
1022 	n = sizeof(a->bidders)/sizeof(a->bidders[0]);
1023 	for (i = 0; i < n; i++) {
1024 		if (a->bidders[i].free != NULL) {
1025 			int r1 = (a->bidders[i].free)(&a->bidders[i]);
1026 			if (r1 < r)
1027 				r = r1;
1028 		}
1029 	}
1030 
1031 	archive_string_free(&a->archive.error_string);
1032 	if (a->entry)
1033 		archive_entry_free(a->entry);
1034 	a->archive.magic = 0;
1035 	__archive_clean(&a->archive);
1036 	free(a->client.dataset);
1037 	free(a);
1038 	return (r);
1039 }
1040 
1041 static struct archive_read_filter *
1042 get_filter(struct archive *_a, int n)
1043 {
1044 	struct archive_read *a = (struct archive_read *)_a;
1045 	struct archive_read_filter *f = a->filter;
1046 	/* We use n == -1 for 'the last filter', which is always the
1047 	 * client proxy. */
1048 	if (n == -1 && f != NULL) {
1049 		struct archive_read_filter *last = f;
1050 		f = f->upstream;
1051 		while (f != NULL) {
1052 			last = f;
1053 			f = f->upstream;
1054 		}
1055 		return (last);
1056 	}
1057 	if (n < 0)
1058 		return NULL;
1059 	while (n > 0 && f != NULL) {
1060 		f = f->upstream;
1061 		--n;
1062 	}
1063 	return (f);
1064 }
1065 
1066 static int
1067 _archive_filter_code(struct archive *_a, int n)
1068 {
1069 	struct archive_read_filter *f = get_filter(_a, n);
1070 	return f == NULL ? -1 : f->code;
1071 }
1072 
1073 static const char *
1074 _archive_filter_name(struct archive *_a, int n)
1075 {
1076 	struct archive_read_filter *f = get_filter(_a, n);
1077 	return f == NULL ? NULL : f->name;
1078 }
1079 
1080 static int64_t
1081 _archive_filter_bytes(struct archive *_a, int n)
1082 {
1083 	struct archive_read_filter *f = get_filter(_a, n);
1084 	return f == NULL ? -1 : f->position;
1085 }
1086 
1087 /*
1088  * Used internally by read format handlers to register their bid and
1089  * initialization functions.
1090  */
1091 int
1092 __archive_read_register_format(struct archive_read *a,
1093     void *format_data,
1094     const char *name,
1095     int (*bid)(struct archive_read *, int),
1096     int (*options)(struct archive_read *, const char *, const char *),
1097     int (*read_header)(struct archive_read *, struct archive_entry *),
1098     int (*read_data)(struct archive_read *, const void **, size_t *, int64_t *),
1099     int (*read_data_skip)(struct archive_read *),
1100     int64_t (*seek_data)(struct archive_read *, int64_t, int),
1101     int (*cleanup)(struct archive_read *))
1102 {
1103 	int i, number_slots;
1104 
1105 	archive_check_magic(&a->archive,
1106 	    ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
1107 	    "__archive_read_register_format");
1108 
1109 	number_slots = sizeof(a->formats) / sizeof(a->formats[0]);
1110 
1111 	for (i = 0; i < number_slots; i++) {
1112 		if (a->formats[i].bid == bid)
1113 			return (ARCHIVE_WARN); /* We've already installed */
1114 		if (a->formats[i].bid == NULL) {
1115 			a->formats[i].bid = bid;
1116 			a->formats[i].options = options;
1117 			a->formats[i].read_header = read_header;
1118 			a->formats[i].read_data = read_data;
1119 			a->formats[i].read_data_skip = read_data_skip;
1120 			a->formats[i].seek_data = seek_data;
1121 			a->formats[i].cleanup = cleanup;
1122 			a->formats[i].data = format_data;
1123 			a->formats[i].name = name;
1124 			return (ARCHIVE_OK);
1125 		}
1126 	}
1127 
1128 	archive_set_error(&a->archive, ENOMEM,
1129 	    "Not enough slots for format registration");
1130 	return (ARCHIVE_FATAL);
1131 }
1132 
1133 /*
1134  * Used internally by decompression routines to register their bid and
1135  * initialization functions.
1136  */
1137 int
1138 __archive_read_get_bidder(struct archive_read *a,
1139     struct archive_read_filter_bidder **bidder)
1140 {
1141 	int i, number_slots;
1142 
1143 	number_slots = sizeof(a->bidders) / sizeof(a->bidders[0]);
1144 
1145 	for (i = 0; i < number_slots; i++) {
1146 		if (a->bidders[i].bid == NULL) {
1147 			memset(a->bidders + i, 0, sizeof(a->bidders[0]));
1148 			*bidder = (a->bidders + i);
1149 			return (ARCHIVE_OK);
1150 		}
1151 	}
1152 
1153 	archive_set_error(&a->archive, ENOMEM,
1154 	    "Not enough slots for filter registration");
1155 	return (ARCHIVE_FATAL);
1156 }
1157 
1158 /*
1159  * The next section implements the peek/consume internal I/O
1160  * system used by archive readers.  This system allows simple
1161  * read-ahead for consumers while preserving zero-copy operation
1162  * most of the time.
1163  *
1164  * The two key operations:
1165  *  * The read-ahead function returns a pointer to a block of data
1166  *    that satisfies a minimum request.
1167  *  * The consume function advances the file pointer.
1168  *
1169  * In the ideal case, filters generate blocks of data
1170  * and __archive_read_ahead() just returns pointers directly into
1171  * those blocks.  Then __archive_read_consume() just bumps those
1172  * pointers.  Only if your request would span blocks does the I/O
1173  * layer use a copy buffer to provide you with a contiguous block of
1174  * data.
1175  *
1176  * A couple of useful idioms:
1177  *  * "I just want some data."  Ask for 1 byte and pay attention to
1178  *    the "number of bytes available" from __archive_read_ahead().
1179  *    Consume whatever you actually use.
1180  *  * "I want to output a large block of data."  As above, ask for 1 byte,
1181  *    emit all that's available (up to whatever limit you have), consume
1182  *    it all, then repeat until you're done.  This effectively means that
1183  *    you're passing along the blocks that came from your provider.
1184  *  * "I want to peek ahead by a large amount."  Ask for 4k or so, then
1185  *    double and repeat until you get an error or have enough.  Note
1186  *    that the I/O layer will likely end up expanding its copy buffer
1187  *    to fit your request, so use this technique cautiously.  This
1188  *    technique is used, for example, by some of the format tasting
1189  *    code that has uncertain look-ahead needs.
1190  */
1191 
1192 /*
1193  * Looks ahead in the input stream:
1194  *  * If 'avail' pointer is provided, that returns number of bytes available
1195  *    in the current buffer, which may be much larger than requested.
1196  *  * If end-of-file, *avail gets set to zero.
1197  *  * If error, *avail gets error code.
1198  *  * If request can be met, returns pointer to data.
1199  *  * If minimum request cannot be met, returns NULL.
1200  *
1201  * Note: If you just want "some data", ask for 1 byte and pay attention
1202  * to *avail, which will have the actual amount available.  If you
1203  * know exactly how many bytes you need, just ask for that and treat
1204  * a NULL return as an error.
1205  *
1206  * Important:  This does NOT move the file pointer.  See
1207  * __archive_read_consume() below.
1208  */
1209 const void *
1210 __archive_read_ahead(struct archive_read *a, size_t min, ssize_t *avail)
1211 {
1212 	return (__archive_read_filter_ahead(a->filter, min, avail));
1213 }
1214 
1215 const void *
1216 __archive_read_filter_ahead(struct archive_read_filter *filter,
1217     size_t min, ssize_t *avail)
1218 {
1219 	ssize_t bytes_read;
1220 	size_t tocopy;
1221 
1222 	if (filter->fatal) {
1223 		if (avail)
1224 			*avail = ARCHIVE_FATAL;
1225 		return (NULL);
1226 	}
1227 
1228 	/*
1229 	 * Keep pulling more data until we can satisfy the request.
1230 	 */
1231 	for (;;) {
1232 
1233 		/*
1234 		 * If we can satisfy from the copy buffer (and the
1235 		 * copy buffer isn't empty), we're done.  In particular,
1236 		 * note that min == 0 is a perfectly well-defined
1237 		 * request.
1238 		 */
1239 		if (filter->avail >= min && filter->avail > 0) {
1240 			if (avail != NULL)
1241 				*avail = filter->avail;
1242 			return (filter->next);
1243 		}
1244 
1245 		/*
1246 		 * We can satisfy directly from client buffer if everything
1247 		 * currently in the copy buffer is still in the client buffer.
1248 		 */
1249 		if (filter->client_total >= filter->client_avail + filter->avail
1250 		    && filter->client_avail + filter->avail >= min) {
1251 			/* "Roll back" to client buffer. */
1252 			filter->client_avail += filter->avail;
1253 			filter->client_next -= filter->avail;
1254 			/* Copy buffer is now empty. */
1255 			filter->avail = 0;
1256 			filter->next = filter->buffer;
1257 			/* Return data from client buffer. */
1258 			if (avail != NULL)
1259 				*avail = filter->client_avail;
1260 			return (filter->client_next);
1261 		}
1262 
1263 		/* Move data forward in copy buffer if necessary. */
1264 		if (filter->next > filter->buffer &&
1265 		    filter->next + min > filter->buffer + filter->buffer_size) {
1266 			if (filter->avail > 0)
1267 				memmove(filter->buffer, filter->next,
1268 				    filter->avail);
1269 			filter->next = filter->buffer;
1270 		}
1271 
1272 		/* If we've used up the client data, get more. */
1273 		if (filter->client_avail <= 0) {
1274 			if (filter->end_of_file) {
1275 				if (avail != NULL)
1276 					*avail = 0;
1277 				return (NULL);
1278 			}
1279 			bytes_read = (filter->read)(filter,
1280 			    &filter->client_buff);
1281 			if (bytes_read < 0) {		/* Read error. */
1282 				filter->client_total = filter->client_avail = 0;
1283 				filter->client_next =
1284 				    filter->client_buff = NULL;
1285 				filter->fatal = 1;
1286 				if (avail != NULL)
1287 					*avail = ARCHIVE_FATAL;
1288 				return (NULL);
1289 			}
1290 			if (bytes_read == 0) {
1291 				/* Check for another client object first */
1292 				if (filter->archive->client.cursor !=
1293 				      filter->archive->client.nodes - 1) {
1294 					if (client_switch_proxy(filter,
1295 					    filter->archive->client.cursor + 1)
1296 					    == ARCHIVE_OK)
1297 						continue;
1298 				}
1299 				/* Premature end-of-file. */
1300 				filter->client_total = filter->client_avail = 0;
1301 				filter->client_next =
1302 				    filter->client_buff = NULL;
1303 				filter->end_of_file = 1;
1304 				/* Return whatever we do have. */
1305 				if (avail != NULL)
1306 					*avail = filter->avail;
1307 				return (NULL);
1308 			}
1309 			filter->client_total = bytes_read;
1310 			filter->client_avail = filter->client_total;
1311 			filter->client_next = filter->client_buff;
1312 		} else {
1313 			/*
1314 			 * We can't satisfy the request from the copy
1315 			 * buffer or the existing client data, so we
1316 			 * need to copy more client data over to the
1317 			 * copy buffer.
1318 			 */
1319 
1320 			/* Ensure the buffer is big enough. */
1321 			if (min > filter->buffer_size) {
1322 				size_t s, t;
1323 				char *p;
1324 
1325 				/* Double the buffer; watch for overflow. */
1326 				s = t = filter->buffer_size;
1327 				if (s == 0)
1328 					s = min;
1329 				while (s < min) {
1330 					t *= 2;
1331 					if (t <= s) { /* Integer overflow! */
1332 						archive_set_error(
1333 						    &filter->archive->archive,
1334 						    ENOMEM,
1335 						    "Unable to allocate copy"
1336 						    " buffer");
1337 						filter->fatal = 1;
1338 						if (avail != NULL)
1339 							*avail = ARCHIVE_FATAL;
1340 						return (NULL);
1341 					}
1342 					s = t;
1343 				}
1344 				/* Now s >= min, so allocate a new buffer. */
1345 				p = (char *)malloc(s);
1346 				if (p == NULL) {
1347 					archive_set_error(
1348 						&filter->archive->archive,
1349 						ENOMEM,
1350 					    "Unable to allocate copy buffer");
1351 					filter->fatal = 1;
1352 					if (avail != NULL)
1353 						*avail = ARCHIVE_FATAL;
1354 					return (NULL);
1355 				}
1356 				/* Move data into newly-enlarged buffer. */
1357 				if (filter->avail > 0)
1358 					memmove(p, filter->next, filter->avail);
1359 				free(filter->buffer);
1360 				filter->next = filter->buffer = p;
1361 				filter->buffer_size = s;
1362 			}
1363 
1364 			/* We can add client data to copy buffer. */
1365 			/* First estimate: copy to fill rest of buffer. */
1366 			tocopy = (filter->buffer + filter->buffer_size)
1367 			    - (filter->next + filter->avail);
1368 			/* Don't waste time buffering more than we need to. */
1369 			if (tocopy + filter->avail > min)
1370 				tocopy = min - filter->avail;
1371 			/* Don't copy more than is available. */
1372 			if (tocopy > filter->client_avail)
1373 				tocopy = filter->client_avail;
1374 
1375 			memcpy(filter->next + filter->avail,
1376 			    filter->client_next, tocopy);
1377 			/* Remove this data from client buffer. */
1378 			filter->client_next += tocopy;
1379 			filter->client_avail -= tocopy;
1380 			/* add it to copy buffer. */
1381 			filter->avail += tocopy;
1382 		}
1383 	}
1384 }
1385 
1386 /*
1387  * Move the file pointer forward.
1388  */
1389 int64_t
1390 __archive_read_consume(struct archive_read *a, int64_t request)
1391 {
1392 	return (__archive_read_filter_consume(a->filter, request));
1393 }
1394 
1395 int64_t
1396 __archive_read_filter_consume(struct archive_read_filter * filter,
1397     int64_t request)
1398 {
1399 	int64_t skipped;
1400 
1401 	if (request < 0)
1402 		return ARCHIVE_FATAL;
1403 	if (request == 0)
1404 		return 0;
1405 
1406 	skipped = advance_file_pointer(filter, request);
1407 	if (skipped == request)
1408 		return (skipped);
1409 	/* We hit EOF before we satisfied the skip request. */
1410 	if (skipped < 0)  /* Map error code to 0 for error message below. */
1411 		skipped = 0;
1412 	archive_set_error(&filter->archive->archive,
1413 	    ARCHIVE_ERRNO_MISC,
1414 	    "Truncated input file (needed %jd bytes, only %jd available)",
1415 	    (intmax_t)request, (intmax_t)skipped);
1416 	return (ARCHIVE_FATAL);
1417 }
1418 
1419 /*
1420  * Advance the file pointer by the amount requested.
1421  * Returns the amount actually advanced, which may be less than the
1422  * request if EOF is encountered first.
1423  * Returns a negative value if there's an I/O error.
1424  */
1425 static int64_t
1426 advance_file_pointer(struct archive_read_filter *filter, int64_t request)
1427 {
1428 	int64_t bytes_skipped, total_bytes_skipped = 0;
1429 	ssize_t bytes_read;
1430 	size_t min;
1431 
1432 	if (filter->fatal)
1433 		return (-1);
1434 
1435 	/* Use up the copy buffer first. */
1436 	if (filter->avail > 0) {
1437 		min = (size_t)minimum(request, (int64_t)filter->avail);
1438 		filter->next += min;
1439 		filter->avail -= min;
1440 		request -= min;
1441 		filter->position += min;
1442 		total_bytes_skipped += min;
1443 	}
1444 
1445 	/* Then use up the client buffer. */
1446 	if (filter->client_avail > 0) {
1447 		min = (size_t)minimum(request, (int64_t)filter->client_avail);
1448 		filter->client_next += min;
1449 		filter->client_avail -= min;
1450 		request -= min;
1451 		filter->position += min;
1452 		total_bytes_skipped += min;
1453 	}
1454 	if (request == 0)
1455 		return (total_bytes_skipped);
1456 
1457 	/* If there's an optimized skip function, use it. */
1458 	if (filter->skip != NULL) {
1459 		bytes_skipped = (filter->skip)(filter, request);
1460 		if (bytes_skipped < 0) {	/* error */
1461 			filter->fatal = 1;
1462 			return (bytes_skipped);
1463 		}
1464 		filter->position += bytes_skipped;
1465 		total_bytes_skipped += bytes_skipped;
1466 		request -= bytes_skipped;
1467 		if (request == 0)
1468 			return (total_bytes_skipped);
1469 	}
1470 
1471 	/* Use ordinary reads as necessary to complete the request. */
1472 	for (;;) {
1473 		bytes_read = (filter->read)(filter, &filter->client_buff);
1474 		if (bytes_read < 0) {
1475 			filter->client_buff = NULL;
1476 			filter->fatal = 1;
1477 			return (bytes_read);
1478 		}
1479 
1480 		if (bytes_read == 0) {
1481 			if (filter->archive->client.cursor !=
1482 			      filter->archive->client.nodes - 1) {
1483 				if (client_switch_proxy(filter,
1484 				    filter->archive->client.cursor + 1)
1485 				    == ARCHIVE_OK)
1486 					continue;
1487 			}
1488 			filter->client_buff = NULL;
1489 			filter->end_of_file = 1;
1490 			return (total_bytes_skipped);
1491 		}
1492 
1493 		if (bytes_read >= request) {
1494 			filter->client_next =
1495 			    ((const char *)filter->client_buff) + request;
1496 			filter->client_avail = (size_t)(bytes_read - request);
1497 			filter->client_total = bytes_read;
1498 			total_bytes_skipped += request;
1499 			filter->position += request;
1500 			return (total_bytes_skipped);
1501 		}
1502 
1503 		filter->position += bytes_read;
1504 		total_bytes_skipped += bytes_read;
1505 		request -= bytes_read;
1506 	}
1507 }
1508 
1509 /**
1510  * Returns ARCHIVE_FAILED if seeking isn't supported.
1511  */
1512 int64_t
1513 __archive_read_seek(struct archive_read *a, int64_t offset, int whence)
1514 {
1515 	return __archive_read_filter_seek(a->filter, offset, whence);
1516 }
1517 
1518 int64_t
1519 __archive_read_filter_seek(struct archive_read_filter *filter, int64_t offset,
1520     int whence)
1521 {
1522 	struct archive_read_client *client;
1523 	int64_t r;
1524 	unsigned int cursor;
1525 
1526 	if (filter->closed || filter->fatal)
1527 		return (ARCHIVE_FATAL);
1528 	if (filter->seek == NULL)
1529 		return (ARCHIVE_FAILED);
1530 
1531 	client = &(filter->archive->client);
1532 	switch (whence) {
1533 	case SEEK_CUR:
1534 		/* Adjust the offset and use SEEK_SET instead */
1535 		offset += filter->position;
1536 	case SEEK_SET:
1537 		cursor = 0;
1538 		while (1)
1539 		{
1540 			if (client->dataset[cursor].begin_position < 0 ||
1541 			    client->dataset[cursor].total_size < 0 ||
1542 			    client->dataset[cursor].begin_position +
1543 			      client->dataset[cursor].total_size - 1 > offset ||
1544 			    cursor + 1 >= client->nodes)
1545 				break;
1546 			r = client->dataset[cursor].begin_position +
1547 				client->dataset[cursor].total_size;
1548 			client->dataset[++cursor].begin_position = r;
1549 		}
1550 		while (1) {
1551 			r = client_switch_proxy(filter, cursor);
1552 			if (r != ARCHIVE_OK)
1553 				return r;
1554 			if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0)
1555 				return r;
1556 			client->dataset[cursor].total_size = r;
1557 			if (client->dataset[cursor].begin_position +
1558 			    client->dataset[cursor].total_size - 1 > offset ||
1559 			    cursor + 1 >= client->nodes)
1560 				break;
1561 			r = client->dataset[cursor].begin_position +
1562 				client->dataset[cursor].total_size;
1563 			client->dataset[++cursor].begin_position = r;
1564 		}
1565 		offset -= client->dataset[cursor].begin_position;
1566 		if (offset < 0)
1567 			offset = 0;
1568 		else if (offset > client->dataset[cursor].total_size - 1)
1569 			offset = client->dataset[cursor].total_size - 1;
1570 		if ((r = client_seek_proxy(filter, offset, SEEK_SET)) < 0)
1571 			return r;
1572 		break;
1573 
1574 	case SEEK_END:
1575 		cursor = 0;
1576 		while (1) {
1577 			if (client->dataset[cursor].begin_position < 0 ||
1578 			    client->dataset[cursor].total_size < 0 ||
1579 			    cursor + 1 >= client->nodes)
1580 				break;
1581 			r = client->dataset[cursor].begin_position +
1582 				client->dataset[cursor].total_size;
1583 			client->dataset[++cursor].begin_position = r;
1584 		}
1585 		while (1) {
1586 			r = client_switch_proxy(filter, cursor);
1587 			if (r != ARCHIVE_OK)
1588 				return r;
1589 			if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0)
1590 				return r;
1591 			client->dataset[cursor].total_size = r;
1592 			r = client->dataset[cursor].begin_position +
1593 				client->dataset[cursor].total_size;
1594 			if (cursor + 1 >= client->nodes)
1595 				break;
1596 			client->dataset[++cursor].begin_position = r;
1597 		}
1598 		while (1) {
1599 			if (r + offset >=
1600 			    client->dataset[cursor].begin_position)
1601 				break;
1602 			offset += client->dataset[cursor].total_size;
1603 			if (cursor == 0)
1604 				break;
1605 			cursor--;
1606 			r = client->dataset[cursor].begin_position +
1607 				client->dataset[cursor].total_size;
1608 		}
1609 		offset = (r + offset) - client->dataset[cursor].begin_position;
1610 		if ((r = client_switch_proxy(filter, cursor)) != ARCHIVE_OK)
1611 			return r;
1612 		r = client_seek_proxy(filter, offset, SEEK_SET);
1613 		if (r < ARCHIVE_OK)
1614 			return r;
1615 		break;
1616 
1617 	default:
1618 		return (ARCHIVE_FATAL);
1619 	}
1620 	r += client->dataset[cursor].begin_position;
1621 
1622 	if (r >= 0) {
1623 		/*
1624 		 * Ouch.  Clearing the buffer like this hurts, especially
1625 		 * at bid time.  A lot of our efficiency at bid time comes
1626 		 * from having bidders reuse the data we've already read.
1627 		 *
1628 		 * TODO: If the seek request is in data we already
1629 		 * have, then don't call the seek callback.
1630 		 *
1631 		 * TODO: Zip seeks to end-of-file at bid time.  If
1632 		 * other formats also start doing this, we may need to
1633 		 * find a way for clients to fudge the seek offset to
1634 		 * a block boundary.
1635 		 *
1636 		 * Hmmm... If whence was SEEK_END, we know the file
1637 		 * size is (r - offset).  Can we use that to simplify
1638 		 * the TODO items above?
1639 		 */
1640 		filter->avail = filter->client_avail = 0;
1641 		filter->next = filter->buffer;
1642 		filter->position = r;
1643 		filter->end_of_file = 0;
1644 	}
1645 	return r;
1646 }
1647