xref: /freebsd/contrib/libarchive/libarchive/archive_read.c (revision 9e5787d2284e187abb5b654d924394a65772e004)
1 /*-
2  * Copyright (c) 2003-2011 Tim Kientzle
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 /*
27  * This file contains the "essential" portions of the read API, that
28  * is, stuff that will probably always be used by any client that
29  * actually needs to read an archive.  Optional pieces have been, as
30  * far as possible, separated out into separate files to avoid
31  * needlessly bloating statically-linked clients.
32  */
33 
34 #include "archive_platform.h"
35 __FBSDID("$FreeBSD$");
36 
37 #ifdef HAVE_ERRNO_H
38 #include <errno.h>
39 #endif
40 #include <stdio.h>
41 #ifdef HAVE_STDLIB_H
42 #include <stdlib.h>
43 #endif
44 #ifdef HAVE_STRING_H
45 #include <string.h>
46 #endif
47 #ifdef HAVE_UNISTD_H
48 #include <unistd.h>
49 #endif
50 
51 #include "archive.h"
52 #include "archive_entry.h"
53 #include "archive_private.h"
54 #include "archive_read_private.h"
55 
56 #define minimum(a, b) (a < b ? a : b)
57 
58 static int	choose_filters(struct archive_read *);
59 static int	choose_format(struct archive_read *);
60 static int	close_filters(struct archive_read *);
61 static struct archive_vtable *archive_read_vtable(void);
62 static int64_t	_archive_filter_bytes(struct archive *, int);
63 static int	_archive_filter_code(struct archive *, int);
64 static const char *_archive_filter_name(struct archive *, int);
65 static int  _archive_filter_count(struct archive *);
66 static int	_archive_read_close(struct archive *);
67 static int	_archive_read_data_block(struct archive *,
68 		    const void **, size_t *, int64_t *);
69 static int	_archive_read_free(struct archive *);
70 static int	_archive_read_next_header(struct archive *,
71 		    struct archive_entry **);
72 static int	_archive_read_next_header2(struct archive *,
73 		    struct archive_entry *);
74 static int64_t  advance_file_pointer(struct archive_read_filter *, int64_t);
75 
76 static struct archive_vtable *
77 archive_read_vtable(void)
78 {
79 	static struct archive_vtable av;
80 	static int inited = 0;
81 
82 	if (!inited) {
83 		av.archive_filter_bytes = _archive_filter_bytes;
84 		av.archive_filter_code = _archive_filter_code;
85 		av.archive_filter_name = _archive_filter_name;
86 		av.archive_filter_count = _archive_filter_count;
87 		av.archive_read_data_block = _archive_read_data_block;
88 		av.archive_read_next_header = _archive_read_next_header;
89 		av.archive_read_next_header2 = _archive_read_next_header2;
90 		av.archive_free = _archive_read_free;
91 		av.archive_close = _archive_read_close;
92 		inited = 1;
93 	}
94 	return (&av);
95 }
96 
97 /*
98  * Allocate, initialize and return a struct archive object.
99  */
100 struct archive *
101 archive_read_new(void)
102 {
103 	struct archive_read *a;
104 
105 	a = (struct archive_read *)calloc(1, sizeof(*a));
106 	if (a == NULL)
107 		return (NULL);
108 	a->archive.magic = ARCHIVE_READ_MAGIC;
109 
110 	a->archive.state = ARCHIVE_STATE_NEW;
111 	a->entry = archive_entry_new2(&a->archive);
112 	a->archive.vtable = archive_read_vtable();
113 
114 	a->passphrases.last = &a->passphrases.first;
115 
116 	return (&a->archive);
117 }
118 
119 /*
120  * Record the do-not-extract-to file. This belongs in archive_read_extract.c.
121  */
122 void
123 archive_read_extract_set_skip_file(struct archive *_a, la_int64_t d,
124     la_int64_t i)
125 {
126 	struct archive_read *a = (struct archive_read *)_a;
127 
128 	if (ARCHIVE_OK != __archive_check_magic(_a, ARCHIVE_READ_MAGIC,
129 		ARCHIVE_STATE_ANY, "archive_read_extract_set_skip_file"))
130 		return;
131 	a->skip_file_set = 1;
132 	a->skip_file_dev = d;
133 	a->skip_file_ino = i;
134 }
135 
136 /*
137  * Open the archive
138  */
139 int
140 archive_read_open(struct archive *a, void *client_data,
141     archive_open_callback *client_opener, archive_read_callback *client_reader,
142     archive_close_callback *client_closer)
143 {
144 	/* Old archive_read_open() is just a thin shell around
145 	 * archive_read_open1. */
146 	archive_read_set_open_callback(a, client_opener);
147 	archive_read_set_read_callback(a, client_reader);
148 	archive_read_set_close_callback(a, client_closer);
149 	archive_read_set_callback_data(a, client_data);
150 	return archive_read_open1(a);
151 }
152 
153 
154 int
155 archive_read_open2(struct archive *a, void *client_data,
156     archive_open_callback *client_opener,
157     archive_read_callback *client_reader,
158     archive_skip_callback *client_skipper,
159     archive_close_callback *client_closer)
160 {
161 	/* Old archive_read_open2() is just a thin shell around
162 	 * archive_read_open1. */
163 	archive_read_set_callback_data(a, client_data);
164 	archive_read_set_open_callback(a, client_opener);
165 	archive_read_set_read_callback(a, client_reader);
166 	archive_read_set_skip_callback(a, client_skipper);
167 	archive_read_set_close_callback(a, client_closer);
168 	return archive_read_open1(a);
169 }
170 
171 static ssize_t
172 client_read_proxy(struct archive_read_filter *self, const void **buff)
173 {
174 	ssize_t r;
175 	r = (self->archive->client.reader)(&self->archive->archive,
176 	    self->data, buff);
177 	return (r);
178 }
179 
180 static int64_t
181 client_skip_proxy(struct archive_read_filter *self, int64_t request)
182 {
183 	if (request < 0)
184 		__archive_errx(1, "Negative skip requested.");
185 	if (request == 0)
186 		return 0;
187 
188 	if (self->archive->client.skipper != NULL) {
189 		/* Seek requests over 1GiB are broken down into
190 		 * multiple seeks.  This avoids overflows when the
191 		 * requests get passed through 32-bit arguments. */
192 		int64_t skip_limit = (int64_t)1 << 30;
193 		int64_t total = 0;
194 		for (;;) {
195 			int64_t get, ask = request;
196 			if (ask > skip_limit)
197 				ask = skip_limit;
198 			get = (self->archive->client.skipper)
199 				(&self->archive->archive, self->data, ask);
200 			total += get;
201 			if (get == 0 || get == request)
202 				return (total);
203 			if (get > request)
204 				return ARCHIVE_FATAL;
205 			request -= get;
206 		}
207 	} else if (self->archive->client.seeker != NULL
208 		&& request > 64 * 1024) {
209 		/* If the client provided a seeker but not a skipper,
210 		 * we can use the seeker to skip forward.
211 		 *
212 		 * Note: This isn't always a good idea.  The client
213 		 * skipper is allowed to skip by less than requested
214 		 * if it needs to maintain block alignment.  The
215 		 * seeker is not allowed to play such games, so using
216 		 * the seeker here may be a performance loss compared
217 		 * to just reading and discarding.  That's why we
218 		 * only do this for skips of over 64k.
219 		 */
220 		int64_t before = self->position;
221 		int64_t after = (self->archive->client.seeker)
222 		    (&self->archive->archive, self->data, request, SEEK_CUR);
223 		if (after != before + request)
224 			return ARCHIVE_FATAL;
225 		return after - before;
226 	}
227 	return 0;
228 }
229 
230 static int64_t
231 client_seek_proxy(struct archive_read_filter *self, int64_t offset, int whence)
232 {
233 	/* DO NOT use the skipper here!  If we transparently handled
234 	 * forward seek here by using the skipper, that will break
235 	 * other libarchive code that assumes a successful forward
236 	 * seek means it can also seek backwards.
237 	 */
238 	if (self->archive->client.seeker == NULL) {
239 		archive_set_error(&self->archive->archive, ARCHIVE_ERRNO_MISC,
240 		    "Current client reader does not support seeking a device");
241 		return (ARCHIVE_FAILED);
242 	}
243 	return (self->archive->client.seeker)(&self->archive->archive,
244 	    self->data, offset, whence);
245 }
246 
247 static int
248 client_close_proxy(struct archive_read_filter *self)
249 {
250 	int r = ARCHIVE_OK, r2;
251 	unsigned int i;
252 
253 	if (self->archive->client.closer == NULL)
254 		return (r);
255 	for (i = 0; i < self->archive->client.nodes; i++)
256 	{
257 		r2 = (self->archive->client.closer)
258 			((struct archive *)self->archive,
259 				self->archive->client.dataset[i].data);
260 		if (r > r2)
261 			r = r2;
262 	}
263 	return (r);
264 }
265 
266 static int
267 client_open_proxy(struct archive_read_filter *self)
268 {
269   int r = ARCHIVE_OK;
270 	if (self->archive->client.opener != NULL)
271 		r = (self->archive->client.opener)(
272 		    (struct archive *)self->archive, self->data);
273 	return (r);
274 }
275 
276 static int
277 client_switch_proxy(struct archive_read_filter *self, unsigned int iindex)
278 {
279   int r1 = ARCHIVE_OK, r2 = ARCHIVE_OK;
280 	void *data2 = NULL;
281 
282 	/* Don't do anything if already in the specified data node */
283 	if (self->archive->client.cursor == iindex)
284 		return (ARCHIVE_OK);
285 
286 	self->archive->client.cursor = iindex;
287 	data2 = self->archive->client.dataset[self->archive->client.cursor].data;
288 	if (self->archive->client.switcher != NULL)
289 	{
290 		r1 = r2 = (self->archive->client.switcher)
291 			((struct archive *)self->archive, self->data, data2);
292 		self->data = data2;
293 	}
294 	else
295 	{
296 		/* Attempt to call close and open instead */
297 		if (self->archive->client.closer != NULL)
298 			r1 = (self->archive->client.closer)
299 				((struct archive *)self->archive, self->data);
300 		self->data = data2;
301 		if (self->archive->client.opener != NULL)
302 			r2 = (self->archive->client.opener)
303 				((struct archive *)self->archive, self->data);
304 	}
305 	return (r1 < r2) ? r1 : r2;
306 }
307 
308 int
309 archive_read_set_open_callback(struct archive *_a,
310     archive_open_callback *client_opener)
311 {
312 	struct archive_read *a = (struct archive_read *)_a;
313 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
314 	    "archive_read_set_open_callback");
315 	a->client.opener = client_opener;
316 	return ARCHIVE_OK;
317 }
318 
319 int
320 archive_read_set_read_callback(struct archive *_a,
321     archive_read_callback *client_reader)
322 {
323 	struct archive_read *a = (struct archive_read *)_a;
324 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
325 	    "archive_read_set_read_callback");
326 	a->client.reader = client_reader;
327 	return ARCHIVE_OK;
328 }
329 
330 int
331 archive_read_set_skip_callback(struct archive *_a,
332     archive_skip_callback *client_skipper)
333 {
334 	struct archive_read *a = (struct archive_read *)_a;
335 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
336 	    "archive_read_set_skip_callback");
337 	a->client.skipper = client_skipper;
338 	return ARCHIVE_OK;
339 }
340 
341 int
342 archive_read_set_seek_callback(struct archive *_a,
343     archive_seek_callback *client_seeker)
344 {
345 	struct archive_read *a = (struct archive_read *)_a;
346 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
347 	    "archive_read_set_seek_callback");
348 	a->client.seeker = client_seeker;
349 	return ARCHIVE_OK;
350 }
351 
352 int
353 archive_read_set_close_callback(struct archive *_a,
354     archive_close_callback *client_closer)
355 {
356 	struct archive_read *a = (struct archive_read *)_a;
357 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
358 	    "archive_read_set_close_callback");
359 	a->client.closer = client_closer;
360 	return ARCHIVE_OK;
361 }
362 
363 int
364 archive_read_set_switch_callback(struct archive *_a,
365     archive_switch_callback *client_switcher)
366 {
367 	struct archive_read *a = (struct archive_read *)_a;
368 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
369 	    "archive_read_set_switch_callback");
370 	a->client.switcher = client_switcher;
371 	return ARCHIVE_OK;
372 }
373 
374 int
375 archive_read_set_callback_data(struct archive *_a, void *client_data)
376 {
377 	return archive_read_set_callback_data2(_a, client_data, 0);
378 }
379 
380 int
381 archive_read_set_callback_data2(struct archive *_a, void *client_data,
382     unsigned int iindex)
383 {
384 	struct archive_read *a = (struct archive_read *)_a;
385 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
386 	    "archive_read_set_callback_data2");
387 
388 	if (a->client.nodes == 0)
389 	{
390 		a->client.dataset = (struct archive_read_data_node *)
391 		    calloc(1, sizeof(*a->client.dataset));
392 		if (a->client.dataset == NULL)
393 		{
394 			archive_set_error(&a->archive, ENOMEM,
395 				"No memory.");
396 			return ARCHIVE_FATAL;
397 		}
398 		a->client.nodes = 1;
399 	}
400 
401 	if (iindex > a->client.nodes - 1)
402 	{
403 		archive_set_error(&a->archive, EINVAL,
404 			"Invalid index specified.");
405 		return ARCHIVE_FATAL;
406 	}
407 	a->client.dataset[iindex].data = client_data;
408 	a->client.dataset[iindex].begin_position = -1;
409 	a->client.dataset[iindex].total_size = -1;
410 	return ARCHIVE_OK;
411 }
412 
413 int
414 archive_read_add_callback_data(struct archive *_a, void *client_data,
415     unsigned int iindex)
416 {
417 	struct archive_read *a = (struct archive_read *)_a;
418 	void *p;
419 	unsigned int i;
420 
421 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
422 	    "archive_read_add_callback_data");
423 	if (iindex > a->client.nodes) {
424 		archive_set_error(&a->archive, EINVAL,
425 			"Invalid index specified.");
426 		return ARCHIVE_FATAL;
427 	}
428 	p = realloc(a->client.dataset, sizeof(*a->client.dataset)
429 		* (++(a->client.nodes)));
430 	if (p == NULL) {
431 		archive_set_error(&a->archive, ENOMEM,
432 			"No memory.");
433 		return ARCHIVE_FATAL;
434 	}
435 	a->client.dataset = (struct archive_read_data_node *)p;
436 	for (i = a->client.nodes - 1; i > iindex; i--) {
437 		a->client.dataset[i].data = a->client.dataset[i-1].data;
438 		a->client.dataset[i].begin_position = -1;
439 		a->client.dataset[i].total_size = -1;
440 	}
441 	a->client.dataset[iindex].data = client_data;
442 	a->client.dataset[iindex].begin_position = -1;
443 	a->client.dataset[iindex].total_size = -1;
444 	return ARCHIVE_OK;
445 }
446 
447 int
448 archive_read_append_callback_data(struct archive *_a, void *client_data)
449 {
450 	struct archive_read *a = (struct archive_read *)_a;
451 	return archive_read_add_callback_data(_a, client_data, a->client.nodes);
452 }
453 
454 int
455 archive_read_prepend_callback_data(struct archive *_a, void *client_data)
456 {
457 	return archive_read_add_callback_data(_a, client_data, 0);
458 }
459 
460 int
461 archive_read_open1(struct archive *_a)
462 {
463 	struct archive_read *a = (struct archive_read *)_a;
464 	struct archive_read_filter *filter, *tmp;
465 	int slot, e = ARCHIVE_OK;
466 	unsigned int i;
467 
468 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
469 	    "archive_read_open");
470 	archive_clear_error(&a->archive);
471 
472 	if (a->client.reader == NULL) {
473 		archive_set_error(&a->archive, EINVAL,
474 		    "No reader function provided to archive_read_open");
475 		a->archive.state = ARCHIVE_STATE_FATAL;
476 		return (ARCHIVE_FATAL);
477 	}
478 
479 	/* Open data source. */
480 	if (a->client.opener != NULL) {
481 		e = (a->client.opener)(&a->archive, a->client.dataset[0].data);
482 		if (e != 0) {
483 			/* If the open failed, call the closer to clean up. */
484 			if (a->client.closer) {
485 				for (i = 0; i < a->client.nodes; i++)
486 					(a->client.closer)(&a->archive,
487 					    a->client.dataset[i].data);
488 			}
489 			return (e);
490 		}
491 	}
492 
493 	filter = calloc(1, sizeof(*filter));
494 	if (filter == NULL)
495 		return (ARCHIVE_FATAL);
496 	filter->bidder = NULL;
497 	filter->upstream = NULL;
498 	filter->archive = a;
499 	filter->data = a->client.dataset[0].data;
500 	filter->open = client_open_proxy;
501 	filter->read = client_read_proxy;
502 	filter->skip = client_skip_proxy;
503 	filter->seek = client_seek_proxy;
504 	filter->close = client_close_proxy;
505 	filter->sswitch = client_switch_proxy;
506 	filter->name = "none";
507 	filter->code = ARCHIVE_FILTER_NONE;
508 
509 	a->client.dataset[0].begin_position = 0;
510 	if (!a->filter || !a->bypass_filter_bidding)
511 	{
512 		a->filter = filter;
513 		/* Build out the input pipeline. */
514 		e = choose_filters(a);
515 		if (e < ARCHIVE_WARN) {
516 			a->archive.state = ARCHIVE_STATE_FATAL;
517 			return (ARCHIVE_FATAL);
518 		}
519 	}
520 	else
521 	{
522 		/* Need to add "NONE" type filter at the end of the filter chain */
523 		tmp = a->filter;
524 		while (tmp->upstream)
525 			tmp = tmp->upstream;
526 		tmp->upstream = filter;
527 	}
528 
529 	if (!a->format)
530 	{
531 		slot = choose_format(a);
532 		if (slot < 0) {
533 			close_filters(a);
534 			a->archive.state = ARCHIVE_STATE_FATAL;
535 			return (ARCHIVE_FATAL);
536 		}
537 		a->format = &(a->formats[slot]);
538 	}
539 
540 	a->archive.state = ARCHIVE_STATE_HEADER;
541 
542 	/* Ensure libarchive starts from the first node in a multivolume set */
543 	client_switch_proxy(a->filter, 0);
544 	return (e);
545 }
546 
547 /*
548  * Allow each registered stream transform to bid on whether
549  * it wants to handle this stream.  Repeat until we've finished
550  * building the pipeline.
551  */
552 
553 /* We won't build a filter pipeline with more stages than this. */
554 #define MAX_NUMBER_FILTERS 25
555 
556 static int
557 choose_filters(struct archive_read *a)
558 {
559 	int number_bidders, i, bid, best_bid, number_filters;
560 	struct archive_read_filter_bidder *bidder, *best_bidder;
561 	struct archive_read_filter *filter;
562 	ssize_t avail;
563 	int r;
564 
565 	for (number_filters = 0; number_filters < MAX_NUMBER_FILTERS; ++number_filters) {
566 		number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]);
567 
568 		best_bid = 0;
569 		best_bidder = NULL;
570 
571 		bidder = a->bidders;
572 		for (i = 0; i < number_bidders; i++, bidder++) {
573 			if (bidder->bid != NULL) {
574 				bid = (bidder->bid)(bidder, a->filter);
575 				if (bid > best_bid) {
576 					best_bid = bid;
577 					best_bidder = bidder;
578 				}
579 			}
580 		}
581 
582 		/* If no bidder, we're done. */
583 		if (best_bidder == NULL) {
584 			/* Verify the filter by asking it for some data. */
585 			__archive_read_filter_ahead(a->filter, 1, &avail);
586 			if (avail < 0) {
587 				__archive_read_free_filters(a);
588 				return (ARCHIVE_FATAL);
589 			}
590 			a->archive.compression_name = a->filter->name;
591 			a->archive.compression_code = a->filter->code;
592 			return (ARCHIVE_OK);
593 		}
594 
595 		filter
596 		    = (struct archive_read_filter *)calloc(1, sizeof(*filter));
597 		if (filter == NULL)
598 			return (ARCHIVE_FATAL);
599 		filter->bidder = best_bidder;
600 		filter->archive = a;
601 		filter->upstream = a->filter;
602 		a->filter = filter;
603 		r = (best_bidder->init)(a->filter);
604 		if (r != ARCHIVE_OK) {
605 			__archive_read_free_filters(a);
606 			return (ARCHIVE_FATAL);
607 		}
608 	}
609 	archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
610 	    "Input requires too many filters for decoding");
611 	return (ARCHIVE_FATAL);
612 }
613 
614 int
615 __archive_read_header(struct archive_read *a, struct archive_entry *entry)
616 {
617 	if (a->filter->read_header)
618 		return a->filter->read_header(a->filter, entry);
619 	else
620 		return (ARCHIVE_OK);
621 }
622 
623 /*
624  * Read header of next entry.
625  */
626 static int
627 _archive_read_next_header2(struct archive *_a, struct archive_entry *entry)
628 {
629 	struct archive_read *a = (struct archive_read *)_a;
630 	int r1 = ARCHIVE_OK, r2;
631 
632 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
633 	    ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA,
634 	    "archive_read_next_header");
635 
636 	archive_entry_clear(entry);
637 	archive_clear_error(&a->archive);
638 
639 	/*
640 	 * If client didn't consume entire data, skip any remainder
641 	 * (This is especially important for GNU incremental directories.)
642 	 */
643 	if (a->archive.state == ARCHIVE_STATE_DATA) {
644 		r1 = archive_read_data_skip(&a->archive);
645 		if (r1 == ARCHIVE_EOF)
646 			archive_set_error(&a->archive, EIO,
647 			    "Premature end-of-file.");
648 		if (r1 == ARCHIVE_EOF || r1 == ARCHIVE_FATAL) {
649 			a->archive.state = ARCHIVE_STATE_FATAL;
650 			return (ARCHIVE_FATAL);
651 		}
652 	}
653 
654 	/* Record start-of-header offset in uncompressed stream. */
655 	a->header_position = a->filter->position;
656 
657 	++_a->file_count;
658 	r2 = (a->format->read_header)(a, entry);
659 
660 	/*
661 	 * EOF and FATAL are persistent at this layer.  By
662 	 * modifying the state, we guarantee that future calls to
663 	 * read a header or read data will fail.
664 	 */
665 	switch (r2) {
666 	case ARCHIVE_EOF:
667 		a->archive.state = ARCHIVE_STATE_EOF;
668 		--_a->file_count;/* Revert a file counter. */
669 		break;
670 	case ARCHIVE_OK:
671 		a->archive.state = ARCHIVE_STATE_DATA;
672 		break;
673 	case ARCHIVE_WARN:
674 		a->archive.state = ARCHIVE_STATE_DATA;
675 		break;
676 	case ARCHIVE_RETRY:
677 		break;
678 	case ARCHIVE_FATAL:
679 		a->archive.state = ARCHIVE_STATE_FATAL;
680 		break;
681 	}
682 
683 	__archive_reset_read_data(&a->archive);
684 
685 	a->data_start_node = a->client.cursor;
686 	/* EOF always wins; otherwise return the worst error. */
687 	return (r2 < r1 || r2 == ARCHIVE_EOF) ? r2 : r1;
688 }
689 
690 static int
691 _archive_read_next_header(struct archive *_a, struct archive_entry **entryp)
692 {
693 	int ret;
694 	struct archive_read *a = (struct archive_read *)_a;
695 	*entryp = NULL;
696 	ret = _archive_read_next_header2(_a, a->entry);
697 	*entryp = a->entry;
698 	return ret;
699 }
700 
701 /*
702  * Allow each registered format to bid on whether it wants to handle
703  * the next entry.  Return index of winning bidder.
704  */
705 static int
706 choose_format(struct archive_read *a)
707 {
708 	int slots;
709 	int i;
710 	int bid, best_bid;
711 	int best_bid_slot;
712 
713 	slots = sizeof(a->formats) / sizeof(a->formats[0]);
714 	best_bid = -1;
715 	best_bid_slot = -1;
716 
717 	/* Set up a->format for convenience of bidders. */
718 	a->format = &(a->formats[0]);
719 	for (i = 0; i < slots; i++, a->format++) {
720 		if (a->format->bid) {
721 			bid = (a->format->bid)(a, best_bid);
722 			if (bid == ARCHIVE_FATAL)
723 				return (ARCHIVE_FATAL);
724 			if (a->filter->position != 0)
725 				__archive_read_seek(a, 0, SEEK_SET);
726 			if ((bid > best_bid) || (best_bid_slot < 0)) {
727 				best_bid = bid;
728 				best_bid_slot = i;
729 			}
730 		}
731 	}
732 
733 	/*
734 	 * There were no bidders; this is a serious programmer error
735 	 * and demands a quick and definitive abort.
736 	 */
737 	if (best_bid_slot < 0) {
738 		archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
739 		    "No formats registered");
740 		return (ARCHIVE_FATAL);
741 	}
742 
743 	/*
744 	 * There were bidders, but no non-zero bids; this means we
745 	 * can't support this stream.
746 	 */
747 	if (best_bid < 1) {
748 		archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
749 		    "Unrecognized archive format");
750 		return (ARCHIVE_FATAL);
751 	}
752 
753 	return (best_bid_slot);
754 }
755 
756 /*
757  * Return the file offset (within the uncompressed data stream) where
758  * the last header started.
759  */
760 la_int64_t
761 archive_read_header_position(struct archive *_a)
762 {
763 	struct archive_read *a = (struct archive_read *)_a;
764 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
765 	    ARCHIVE_STATE_ANY, "archive_read_header_position");
766 	return (a->header_position);
767 }
768 
769 /*
770  * Returns 1 if the archive contains at least one encrypted entry.
771  * If the archive format not support encryption at all
772  * ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED is returned.
773  * If for any other reason (e.g. not enough data read so far)
774  * we cannot say whether there are encrypted entries, then
775  * ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW is returned.
776  * In general, this function will return values below zero when the
777  * reader is uncertain or totally incapable of encryption support.
778  * When this function returns 0 you can be sure that the reader
779  * supports encryption detection but no encrypted entries have
780  * been found yet.
781  *
782  * NOTE: If the metadata/header of an archive is also encrypted, you
783  * cannot rely on the number of encrypted entries. That is why this
784  * function does not return the number of encrypted entries but#
785  * just shows that there are some.
786  */
787 int
788 archive_read_has_encrypted_entries(struct archive *_a)
789 {
790 	struct archive_read *a = (struct archive_read *)_a;
791 	int format_supports_encryption = archive_read_format_capabilities(_a)
792 			& (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA | ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA);
793 
794 	if (!_a || !format_supports_encryption) {
795 		/* Format in general doesn't support encryption */
796 		return ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED;
797 	}
798 
799 	/* A reader potentially has read enough data now. */
800 	if (a->format && a->format->has_encrypted_entries) {
801 		return (a->format->has_encrypted_entries)(a);
802 	}
803 
804 	/* For any other reason we cannot say how many entries are there. */
805 	return ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW;
806 }
807 
808 /*
809  * Returns a bitmask of capabilities that are supported by the archive format reader.
810  * If the reader has no special capabilities, ARCHIVE_READ_FORMAT_CAPS_NONE is returned.
811  */
812 int
813 archive_read_format_capabilities(struct archive *_a)
814 {
815 	struct archive_read *a = (struct archive_read *)_a;
816 	if (a && a->format && a->format->format_capabilties) {
817 		return (a->format->format_capabilties)(a);
818 	}
819 	return ARCHIVE_READ_FORMAT_CAPS_NONE;
820 }
821 
822 /*
823  * Read data from an archive entry, using a read(2)-style interface.
824  * This is a convenience routine that just calls
825  * archive_read_data_block and copies the results into the client
826  * buffer, filling any gaps with zero bytes.  Clients using this
827  * API can be completely ignorant of sparse-file issues; sparse files
828  * will simply be padded with nulls.
829  *
830  * DO NOT intermingle calls to this function and archive_read_data_block
831  * to read a single entry body.
832  */
833 la_ssize_t
834 archive_read_data(struct archive *_a, void *buff, size_t s)
835 {
836 	struct archive *a = (struct archive *)_a;
837 	char	*dest;
838 	const void *read_buf;
839 	size_t	 bytes_read;
840 	size_t	 len;
841 	int	 r;
842 
843 	bytes_read = 0;
844 	dest = (char *)buff;
845 
846 	while (s > 0) {
847 		if (a->read_data_offset == a->read_data_output_offset &&
848 		    a->read_data_remaining == 0) {
849 			read_buf = a->read_data_block;
850 			a->read_data_is_posix_read = 1;
851 			a->read_data_requested = s;
852 			r = archive_read_data_block(a, &read_buf,
853 			    &a->read_data_remaining, &a->read_data_offset);
854 			a->read_data_block = read_buf;
855 			if (r == ARCHIVE_EOF)
856 				return (bytes_read);
857 			/*
858 			 * Error codes are all negative, so the status
859 			 * return here cannot be confused with a valid
860 			 * byte count.  (ARCHIVE_OK is zero.)
861 			 */
862 			if (r < ARCHIVE_OK)
863 				return (r);
864 		}
865 
866 		if (a->read_data_offset < a->read_data_output_offset) {
867 			archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
868 			    "Encountered out-of-order sparse blocks");
869 			return (ARCHIVE_RETRY);
870 		}
871 
872 		/* Compute the amount of zero padding needed. */
873 		if (a->read_data_output_offset + (int64_t)s <
874 		    a->read_data_offset) {
875 			len = s;
876 		} else if (a->read_data_output_offset <
877 		    a->read_data_offset) {
878 			len = (size_t)(a->read_data_offset -
879 			    a->read_data_output_offset);
880 		} else
881 			len = 0;
882 
883 		/* Add zeroes. */
884 		memset(dest, 0, len);
885 		s -= len;
886 		a->read_data_output_offset += len;
887 		dest += len;
888 		bytes_read += len;
889 
890 		/* Copy data if there is any space left. */
891 		if (s > 0) {
892 			len = a->read_data_remaining;
893 			if (len > s)
894 				len = s;
895 			if (len) {
896 				memcpy(dest, a->read_data_block, len);
897 				s -= len;
898 				a->read_data_block += len;
899 				a->read_data_remaining -= len;
900 				a->read_data_output_offset += len;
901 				a->read_data_offset += len;
902 				dest += len;
903 				bytes_read += len;
904 			}
905 		}
906 	}
907 	a->read_data_is_posix_read = 0;
908 	a->read_data_requested = 0;
909 	return (bytes_read);
910 }
911 
912 /*
913  * Reset the read_data_* variables, used for starting a new entry.
914  */
915 void __archive_reset_read_data(struct archive * a)
916 {
917 	a->read_data_output_offset = 0;
918 	a->read_data_remaining = 0;
919 	a->read_data_is_posix_read = 0;
920 	a->read_data_requested = 0;
921 
922    /* extra resets, from rar.c */
923    a->read_data_block = NULL;
924    a->read_data_offset = 0;
925 }
926 
927 /*
928  * Skip over all remaining data in this entry.
929  */
930 int
931 archive_read_data_skip(struct archive *_a)
932 {
933 	struct archive_read *a = (struct archive_read *)_a;
934 	int r;
935 	const void *buff;
936 	size_t size;
937 	int64_t offset;
938 
939 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
940 	    "archive_read_data_skip");
941 
942 	if (a->format->read_data_skip != NULL)
943 		r = (a->format->read_data_skip)(a);
944 	else {
945 		while ((r = archive_read_data_block(&a->archive,
946 			    &buff, &size, &offset))
947 		    == ARCHIVE_OK)
948 			;
949 	}
950 
951 	if (r == ARCHIVE_EOF)
952 		r = ARCHIVE_OK;
953 
954 	a->archive.state = ARCHIVE_STATE_HEADER;
955 	return (r);
956 }
957 
958 la_int64_t
959 archive_seek_data(struct archive *_a, int64_t offset, int whence)
960 {
961 	struct archive_read *a = (struct archive_read *)_a;
962 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
963 	    "archive_seek_data_block");
964 
965 	if (a->format->seek_data == NULL) {
966 		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
967 		    "Internal error: "
968 		    "No format_seek_data_block function registered");
969 		return (ARCHIVE_FATAL);
970 	}
971 
972 	return (a->format->seek_data)(a, offset, whence);
973 }
974 
975 /*
976  * Read the next block of entry data from the archive.
977  * This is a zero-copy interface; the client receives a pointer,
978  * size, and file offset of the next available block of data.
979  *
980  * Returns ARCHIVE_OK if the operation is successful, ARCHIVE_EOF if
981  * the end of entry is encountered.
982  */
983 static int
984 _archive_read_data_block(struct archive *_a,
985     const void **buff, size_t *size, int64_t *offset)
986 {
987 	struct archive_read *a = (struct archive_read *)_a;
988 	archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA,
989 	    "archive_read_data_block");
990 
991 	if (a->format->read_data == NULL) {
992 		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
993 		    "Internal error: "
994 		    "No format->read_data function registered");
995 		return (ARCHIVE_FATAL);
996 	}
997 
998 	return (a->format->read_data)(a, buff, size, offset);
999 }
1000 
1001 static int
1002 close_filters(struct archive_read *a)
1003 {
1004 	struct archive_read_filter *f = a->filter;
1005 	int r = ARCHIVE_OK;
1006 	/* Close each filter in the pipeline. */
1007 	while (f != NULL) {
1008 		struct archive_read_filter *t = f->upstream;
1009 		if (!f->closed && f->close != NULL) {
1010 			int r1 = (f->close)(f);
1011 			f->closed = 1;
1012 			if (r1 < r)
1013 				r = r1;
1014 		}
1015 		free(f->buffer);
1016 		f->buffer = NULL;
1017 		f = t;
1018 	}
1019 	return r;
1020 }
1021 
1022 void
1023 __archive_read_free_filters(struct archive_read *a)
1024 {
1025 	/* Make sure filters are closed and their buffers are freed */
1026 	close_filters(a);
1027 
1028 	while (a->filter != NULL) {
1029 		struct archive_read_filter *t = a->filter->upstream;
1030 		free(a->filter);
1031 		a->filter = t;
1032 	}
1033 }
1034 
1035 /*
1036  * return the count of # of filters in use
1037  */
1038 static int
1039 _archive_filter_count(struct archive *_a)
1040 {
1041 	struct archive_read *a = (struct archive_read *)_a;
1042 	struct archive_read_filter *p = a->filter;
1043 	int count = 0;
1044 	while(p) {
1045 		count++;
1046 		p = p->upstream;
1047 	}
1048 	return count;
1049 }
1050 
1051 /*
1052  * Close the file and all I/O.
1053  */
1054 static int
1055 _archive_read_close(struct archive *_a)
1056 {
1057 	struct archive_read *a = (struct archive_read *)_a;
1058 	int r = ARCHIVE_OK, r1 = ARCHIVE_OK;
1059 
1060 	archive_check_magic(&a->archive, ARCHIVE_READ_MAGIC,
1061 	    ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_close");
1062 	if (a->archive.state == ARCHIVE_STATE_CLOSED)
1063 		return (ARCHIVE_OK);
1064 	archive_clear_error(&a->archive);
1065 	a->archive.state = ARCHIVE_STATE_CLOSED;
1066 
1067 	/* TODO: Clean up the formatters. */
1068 
1069 	/* Release the filter objects. */
1070 	r1 = close_filters(a);
1071 	if (r1 < r)
1072 		r = r1;
1073 
1074 	return (r);
1075 }
1076 
1077 /*
1078  * Release memory and other resources.
1079  */
1080 static int
1081 _archive_read_free(struct archive *_a)
1082 {
1083 	struct archive_read *a = (struct archive_read *)_a;
1084 	struct archive_read_passphrase *p;
1085 	int i, n;
1086 	int slots;
1087 	int r = ARCHIVE_OK;
1088 
1089 	if (_a == NULL)
1090 		return (ARCHIVE_OK);
1091 	archive_check_magic(_a, ARCHIVE_READ_MAGIC,
1092 	    ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_read_free");
1093 	if (a->archive.state != ARCHIVE_STATE_CLOSED
1094 	    && a->archive.state != ARCHIVE_STATE_FATAL)
1095 		r = archive_read_close(&a->archive);
1096 
1097 	/* Call cleanup functions registered by optional components. */
1098 	if (a->cleanup_archive_extract != NULL)
1099 		r = (a->cleanup_archive_extract)(a);
1100 
1101 	/* Cleanup format-specific data. */
1102 	slots = sizeof(a->formats) / sizeof(a->formats[0]);
1103 	for (i = 0; i < slots; i++) {
1104 		a->format = &(a->formats[i]);
1105 		if (a->formats[i].cleanup)
1106 			(a->formats[i].cleanup)(a);
1107 	}
1108 
1109 	/* Free the filters */
1110 	__archive_read_free_filters(a);
1111 
1112 	/* Release the bidder objects. */
1113 	n = sizeof(a->bidders)/sizeof(a->bidders[0]);
1114 	for (i = 0; i < n; i++) {
1115 		if (a->bidders[i].free != NULL) {
1116 			int r1 = (a->bidders[i].free)(&a->bidders[i]);
1117 			if (r1 < r)
1118 				r = r1;
1119 		}
1120 	}
1121 
1122 	/* Release passphrase list. */
1123 	p = a->passphrases.first;
1124 	while (p != NULL) {
1125 		struct archive_read_passphrase *np = p->next;
1126 
1127 		/* A passphrase should be cleaned. */
1128 		memset(p->passphrase, 0, strlen(p->passphrase));
1129 		free(p->passphrase);
1130 		free(p);
1131 		p = np;
1132 	}
1133 
1134 	archive_string_free(&a->archive.error_string);
1135 	archive_entry_free(a->entry);
1136 	a->archive.magic = 0;
1137 	__archive_clean(&a->archive);
1138 	free(a->client.dataset);
1139 	free(a);
1140 	return (r);
1141 }
1142 
1143 static struct archive_read_filter *
1144 get_filter(struct archive *_a, int n)
1145 {
1146 	struct archive_read *a = (struct archive_read *)_a;
1147 	struct archive_read_filter *f = a->filter;
1148 	/* We use n == -1 for 'the last filter', which is always the
1149 	 * client proxy. */
1150 	if (n == -1 && f != NULL) {
1151 		struct archive_read_filter *last = f;
1152 		f = f->upstream;
1153 		while (f != NULL) {
1154 			last = f;
1155 			f = f->upstream;
1156 		}
1157 		return (last);
1158 	}
1159 	if (n < 0)
1160 		return NULL;
1161 	while (n > 0 && f != NULL) {
1162 		f = f->upstream;
1163 		--n;
1164 	}
1165 	return (f);
1166 }
1167 
1168 static int
1169 _archive_filter_code(struct archive *_a, int n)
1170 {
1171 	struct archive_read_filter *f = get_filter(_a, n);
1172 	return f == NULL ? -1 : f->code;
1173 }
1174 
1175 static const char *
1176 _archive_filter_name(struct archive *_a, int n)
1177 {
1178 	struct archive_read_filter *f = get_filter(_a, n);
1179 	return f != NULL ? f->name : NULL;
1180 }
1181 
1182 static int64_t
1183 _archive_filter_bytes(struct archive *_a, int n)
1184 {
1185 	struct archive_read_filter *f = get_filter(_a, n);
1186 	return f == NULL ? -1 : f->position;
1187 }
1188 
1189 /*
1190  * Used internally by read format handlers to register their bid and
1191  * initialization functions.
1192  */
1193 int
1194 __archive_read_register_format(struct archive_read *a,
1195     void *format_data,
1196     const char *name,
1197     int (*bid)(struct archive_read *, int),
1198     int (*options)(struct archive_read *, const char *, const char *),
1199     int (*read_header)(struct archive_read *, struct archive_entry *),
1200     int (*read_data)(struct archive_read *, const void **, size_t *, int64_t *),
1201     int (*read_data_skip)(struct archive_read *),
1202     int64_t (*seek_data)(struct archive_read *, int64_t, int),
1203     int (*cleanup)(struct archive_read *),
1204     int (*format_capabilities)(struct archive_read *),
1205     int (*has_encrypted_entries)(struct archive_read *))
1206 {
1207 	int i, number_slots;
1208 
1209 	archive_check_magic(&a->archive,
1210 	    ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
1211 	    "__archive_read_register_format");
1212 
1213 	number_slots = sizeof(a->formats) / sizeof(a->formats[0]);
1214 
1215 	for (i = 0; i < number_slots; i++) {
1216 		if (a->formats[i].bid == bid)
1217 			return (ARCHIVE_WARN); /* We've already installed */
1218 		if (a->formats[i].bid == NULL) {
1219 			a->formats[i].bid = bid;
1220 			a->formats[i].options = options;
1221 			a->formats[i].read_header = read_header;
1222 			a->formats[i].read_data = read_data;
1223 			a->formats[i].read_data_skip = read_data_skip;
1224 			a->formats[i].seek_data = seek_data;
1225 			a->formats[i].cleanup = cleanup;
1226 			a->formats[i].data = format_data;
1227 			a->formats[i].name = name;
1228 			a->formats[i].format_capabilties = format_capabilities;
1229 			a->formats[i].has_encrypted_entries = has_encrypted_entries;
1230 			return (ARCHIVE_OK);
1231 		}
1232 	}
1233 
1234 	archive_set_error(&a->archive, ENOMEM,
1235 	    "Not enough slots for format registration");
1236 	return (ARCHIVE_FATAL);
1237 }
1238 
1239 /*
1240  * Used internally by decompression routines to register their bid and
1241  * initialization functions.
1242  */
1243 int
1244 __archive_read_get_bidder(struct archive_read *a,
1245     struct archive_read_filter_bidder **bidder)
1246 {
1247 	int i, number_slots;
1248 
1249 	number_slots = sizeof(a->bidders) / sizeof(a->bidders[0]);
1250 
1251 	for (i = 0; i < number_slots; i++) {
1252 		if (a->bidders[i].bid == NULL) {
1253 			memset(a->bidders + i, 0, sizeof(a->bidders[0]));
1254 			*bidder = (a->bidders + i);
1255 			return (ARCHIVE_OK);
1256 		}
1257 	}
1258 
1259 	archive_set_error(&a->archive, ENOMEM,
1260 	    "Not enough slots for filter registration");
1261 	return (ARCHIVE_FATAL);
1262 }
1263 
1264 /*
1265  * The next section implements the peek/consume internal I/O
1266  * system used by archive readers.  This system allows simple
1267  * read-ahead for consumers while preserving zero-copy operation
1268  * most of the time.
1269  *
1270  * The two key operations:
1271  *  * The read-ahead function returns a pointer to a block of data
1272  *    that satisfies a minimum request.
1273  *  * The consume function advances the file pointer.
1274  *
1275  * In the ideal case, filters generate blocks of data
1276  * and __archive_read_ahead() just returns pointers directly into
1277  * those blocks.  Then __archive_read_consume() just bumps those
1278  * pointers.  Only if your request would span blocks does the I/O
1279  * layer use a copy buffer to provide you with a contiguous block of
1280  * data.
1281  *
1282  * A couple of useful idioms:
1283  *  * "I just want some data."  Ask for 1 byte and pay attention to
1284  *    the "number of bytes available" from __archive_read_ahead().
1285  *    Consume whatever you actually use.
1286  *  * "I want to output a large block of data."  As above, ask for 1 byte,
1287  *    emit all that's available (up to whatever limit you have), consume
1288  *    it all, then repeat until you're done.  This effectively means that
1289  *    you're passing along the blocks that came from your provider.
1290  *  * "I want to peek ahead by a large amount."  Ask for 4k or so, then
1291  *    double and repeat until you get an error or have enough.  Note
1292  *    that the I/O layer will likely end up expanding its copy buffer
1293  *    to fit your request, so use this technique cautiously.  This
1294  *    technique is used, for example, by some of the format tasting
1295  *    code that has uncertain look-ahead needs.
1296  */
1297 
1298 /*
1299  * Looks ahead in the input stream:
1300  *  * If 'avail' pointer is provided, that returns number of bytes available
1301  *    in the current buffer, which may be much larger than requested.
1302  *  * If end-of-file, *avail gets set to zero.
1303  *  * If error, *avail gets error code.
1304  *  * If request can be met, returns pointer to data.
1305  *  * If minimum request cannot be met, returns NULL.
1306  *
1307  * Note: If you just want "some data", ask for 1 byte and pay attention
1308  * to *avail, which will have the actual amount available.  If you
1309  * know exactly how many bytes you need, just ask for that and treat
1310  * a NULL return as an error.
1311  *
1312  * Important:  This does NOT move the file pointer.  See
1313  * __archive_read_consume() below.
1314  */
1315 const void *
1316 __archive_read_ahead(struct archive_read *a, size_t min, ssize_t *avail)
1317 {
1318 	return (__archive_read_filter_ahead(a->filter, min, avail));
1319 }
1320 
1321 const void *
1322 __archive_read_filter_ahead(struct archive_read_filter *filter,
1323     size_t min, ssize_t *avail)
1324 {
1325 	ssize_t bytes_read;
1326 	size_t tocopy;
1327 
1328 	if (filter->fatal) {
1329 		if (avail)
1330 			*avail = ARCHIVE_FATAL;
1331 		return (NULL);
1332 	}
1333 
1334 	/*
1335 	 * Keep pulling more data until we can satisfy the request.
1336 	 */
1337 	for (;;) {
1338 
1339 		/*
1340 		 * If we can satisfy from the copy buffer (and the
1341 		 * copy buffer isn't empty), we're done.  In particular,
1342 		 * note that min == 0 is a perfectly well-defined
1343 		 * request.
1344 		 */
1345 		if (filter->avail >= min && filter->avail > 0) {
1346 			if (avail != NULL)
1347 				*avail = filter->avail;
1348 			return (filter->next);
1349 		}
1350 
1351 		/*
1352 		 * We can satisfy directly from client buffer if everything
1353 		 * currently in the copy buffer is still in the client buffer.
1354 		 */
1355 		if (filter->client_total >= filter->client_avail + filter->avail
1356 		    && filter->client_avail + filter->avail >= min) {
1357 			/* "Roll back" to client buffer. */
1358 			filter->client_avail += filter->avail;
1359 			filter->client_next -= filter->avail;
1360 			/* Copy buffer is now empty. */
1361 			filter->avail = 0;
1362 			filter->next = filter->buffer;
1363 			/* Return data from client buffer. */
1364 			if (avail != NULL)
1365 				*avail = filter->client_avail;
1366 			return (filter->client_next);
1367 		}
1368 
1369 		/* Move data forward in copy buffer if necessary. */
1370 		if (filter->next > filter->buffer &&
1371 		    filter->next + min > filter->buffer + filter->buffer_size) {
1372 			if (filter->avail > 0)
1373 				memmove(filter->buffer, filter->next,
1374 				    filter->avail);
1375 			filter->next = filter->buffer;
1376 		}
1377 
1378 		/* If we've used up the client data, get more. */
1379 		if (filter->client_avail <= 0) {
1380 			if (filter->end_of_file) {
1381 				if (avail != NULL)
1382 					*avail = 0;
1383 				return (NULL);
1384 			}
1385 			bytes_read = (filter->read)(filter,
1386 			    &filter->client_buff);
1387 			if (bytes_read < 0) {		/* Read error. */
1388 				filter->client_total = filter->client_avail = 0;
1389 				filter->client_next =
1390 				    filter->client_buff = NULL;
1391 				filter->fatal = 1;
1392 				if (avail != NULL)
1393 					*avail = ARCHIVE_FATAL;
1394 				return (NULL);
1395 			}
1396 			if (bytes_read == 0) {
1397 				/* Check for another client object first */
1398 				if (filter->archive->client.cursor !=
1399 				      filter->archive->client.nodes - 1) {
1400 					if (client_switch_proxy(filter,
1401 					    filter->archive->client.cursor + 1)
1402 					    == ARCHIVE_OK)
1403 						continue;
1404 				}
1405 				/* Premature end-of-file. */
1406 				filter->client_total = filter->client_avail = 0;
1407 				filter->client_next =
1408 				    filter->client_buff = NULL;
1409 				filter->end_of_file = 1;
1410 				/* Return whatever we do have. */
1411 				if (avail != NULL)
1412 					*avail = filter->avail;
1413 				return (NULL);
1414 			}
1415 			filter->client_total = bytes_read;
1416 			filter->client_avail = filter->client_total;
1417 			filter->client_next = filter->client_buff;
1418 		} else {
1419 			/*
1420 			 * We can't satisfy the request from the copy
1421 			 * buffer or the existing client data, so we
1422 			 * need to copy more client data over to the
1423 			 * copy buffer.
1424 			 */
1425 
1426 			/* Ensure the buffer is big enough. */
1427 			if (min > filter->buffer_size) {
1428 				size_t s, t;
1429 				char *p;
1430 
1431 				/* Double the buffer; watch for overflow. */
1432 				s = t = filter->buffer_size;
1433 				if (s == 0)
1434 					s = min;
1435 				while (s < min) {
1436 					t *= 2;
1437 					if (t <= s) { /* Integer overflow! */
1438 						archive_set_error(
1439 						    &filter->archive->archive,
1440 						    ENOMEM,
1441 						    "Unable to allocate copy"
1442 						    " buffer");
1443 						filter->fatal = 1;
1444 						if (avail != NULL)
1445 							*avail = ARCHIVE_FATAL;
1446 						return (NULL);
1447 					}
1448 					s = t;
1449 				}
1450 				/* Now s >= min, so allocate a new buffer. */
1451 				p = (char *)malloc(s);
1452 				if (p == NULL) {
1453 					archive_set_error(
1454 						&filter->archive->archive,
1455 						ENOMEM,
1456 					    "Unable to allocate copy buffer");
1457 					filter->fatal = 1;
1458 					if (avail != NULL)
1459 						*avail = ARCHIVE_FATAL;
1460 					return (NULL);
1461 				}
1462 				/* Move data into newly-enlarged buffer. */
1463 				if (filter->avail > 0)
1464 					memmove(p, filter->next, filter->avail);
1465 				free(filter->buffer);
1466 				filter->next = filter->buffer = p;
1467 				filter->buffer_size = s;
1468 			}
1469 
1470 			/* We can add client data to copy buffer. */
1471 			/* First estimate: copy to fill rest of buffer. */
1472 			tocopy = (filter->buffer + filter->buffer_size)
1473 			    - (filter->next + filter->avail);
1474 			/* Don't waste time buffering more than we need to. */
1475 			if (tocopy + filter->avail > min)
1476 				tocopy = min - filter->avail;
1477 			/* Don't copy more than is available. */
1478 			if (tocopy > filter->client_avail)
1479 				tocopy = filter->client_avail;
1480 
1481 			memcpy(filter->next + filter->avail,
1482 			    filter->client_next, tocopy);
1483 			/* Remove this data from client buffer. */
1484 			filter->client_next += tocopy;
1485 			filter->client_avail -= tocopy;
1486 			/* add it to copy buffer. */
1487 			filter->avail += tocopy;
1488 		}
1489 	}
1490 }
1491 
1492 /*
1493  * Move the file pointer forward.
1494  */
1495 int64_t
1496 __archive_read_consume(struct archive_read *a, int64_t request)
1497 {
1498 	return (__archive_read_filter_consume(a->filter, request));
1499 }
1500 
1501 int64_t
1502 __archive_read_filter_consume(struct archive_read_filter * filter,
1503     int64_t request)
1504 {
1505 	int64_t skipped;
1506 
1507 	if (request < 0)
1508 		return ARCHIVE_FATAL;
1509 	if (request == 0)
1510 		return 0;
1511 
1512 	skipped = advance_file_pointer(filter, request);
1513 	if (skipped == request)
1514 		return (skipped);
1515 	/* We hit EOF before we satisfied the skip request. */
1516 	if (skipped < 0)  /* Map error code to 0 for error message below. */
1517 		skipped = 0;
1518 	archive_set_error(&filter->archive->archive,
1519 	    ARCHIVE_ERRNO_MISC,
1520 	    "Truncated input file (needed %jd bytes, only %jd available)",
1521 	    (intmax_t)request, (intmax_t)skipped);
1522 	return (ARCHIVE_FATAL);
1523 }
1524 
1525 /*
1526  * Advance the file pointer by the amount requested.
1527  * Returns the amount actually advanced, which may be less than the
1528  * request if EOF is encountered first.
1529  * Returns a negative value if there's an I/O error.
1530  */
1531 static int64_t
1532 advance_file_pointer(struct archive_read_filter *filter, int64_t request)
1533 {
1534 	int64_t bytes_skipped, total_bytes_skipped = 0;
1535 	ssize_t bytes_read;
1536 	size_t min;
1537 
1538 	if (filter->fatal)
1539 		return (-1);
1540 
1541 	/* Use up the copy buffer first. */
1542 	if (filter->avail > 0) {
1543 		min = (size_t)minimum(request, (int64_t)filter->avail);
1544 		filter->next += min;
1545 		filter->avail -= min;
1546 		request -= min;
1547 		filter->position += min;
1548 		total_bytes_skipped += min;
1549 	}
1550 
1551 	/* Then use up the client buffer. */
1552 	if (filter->client_avail > 0) {
1553 		min = (size_t)minimum(request, (int64_t)filter->client_avail);
1554 		filter->client_next += min;
1555 		filter->client_avail -= min;
1556 		request -= min;
1557 		filter->position += min;
1558 		total_bytes_skipped += min;
1559 	}
1560 	if (request == 0)
1561 		return (total_bytes_skipped);
1562 
1563 	/* If there's an optimized skip function, use it. */
1564 	if (filter->skip != NULL) {
1565 		bytes_skipped = (filter->skip)(filter, request);
1566 		if (bytes_skipped < 0) {	/* error */
1567 			filter->fatal = 1;
1568 			return (bytes_skipped);
1569 		}
1570 		filter->position += bytes_skipped;
1571 		total_bytes_skipped += bytes_skipped;
1572 		request -= bytes_skipped;
1573 		if (request == 0)
1574 			return (total_bytes_skipped);
1575 	}
1576 
1577 	/* Use ordinary reads as necessary to complete the request. */
1578 	for (;;) {
1579 		bytes_read = (filter->read)(filter, &filter->client_buff);
1580 		if (bytes_read < 0) {
1581 			filter->client_buff = NULL;
1582 			filter->fatal = 1;
1583 			return (bytes_read);
1584 		}
1585 
1586 		if (bytes_read == 0) {
1587 			if (filter->archive->client.cursor !=
1588 			      filter->archive->client.nodes - 1) {
1589 				if (client_switch_proxy(filter,
1590 				    filter->archive->client.cursor + 1)
1591 				    == ARCHIVE_OK)
1592 					continue;
1593 			}
1594 			filter->client_buff = NULL;
1595 			filter->end_of_file = 1;
1596 			return (total_bytes_skipped);
1597 		}
1598 
1599 		if (bytes_read >= request) {
1600 			filter->client_next =
1601 			    ((const char *)filter->client_buff) + request;
1602 			filter->client_avail = (size_t)(bytes_read - request);
1603 			filter->client_total = bytes_read;
1604 			total_bytes_skipped += request;
1605 			filter->position += request;
1606 			return (total_bytes_skipped);
1607 		}
1608 
1609 		filter->position += bytes_read;
1610 		total_bytes_skipped += bytes_read;
1611 		request -= bytes_read;
1612 	}
1613 }
1614 
1615 /**
1616  * Returns ARCHIVE_FAILED if seeking isn't supported.
1617  */
1618 int64_t
1619 __archive_read_seek(struct archive_read *a, int64_t offset, int whence)
1620 {
1621 	return __archive_read_filter_seek(a->filter, offset, whence);
1622 }
1623 
1624 int64_t
1625 __archive_read_filter_seek(struct archive_read_filter *filter, int64_t offset,
1626     int whence)
1627 {
1628 	struct archive_read_client *client;
1629 	int64_t r;
1630 	unsigned int cursor;
1631 
1632 	if (filter->closed || filter->fatal)
1633 		return (ARCHIVE_FATAL);
1634 	if (filter->seek == NULL)
1635 		return (ARCHIVE_FAILED);
1636 
1637 	client = &(filter->archive->client);
1638 	switch (whence) {
1639 	case SEEK_CUR:
1640 		/* Adjust the offset and use SEEK_SET instead */
1641 		offset += filter->position;
1642 		__LA_FALLTHROUGH;
1643 	case SEEK_SET:
1644 		cursor = 0;
1645 		while (1)
1646 		{
1647 			if (client->dataset[cursor].begin_position < 0 ||
1648 			    client->dataset[cursor].total_size < 0 ||
1649 			    client->dataset[cursor].begin_position +
1650 			      client->dataset[cursor].total_size - 1 > offset ||
1651 			    cursor + 1 >= client->nodes)
1652 				break;
1653 			r = client->dataset[cursor].begin_position +
1654 				client->dataset[cursor].total_size;
1655 			client->dataset[++cursor].begin_position = r;
1656 		}
1657 		while (1) {
1658 			r = client_switch_proxy(filter, cursor);
1659 			if (r != ARCHIVE_OK)
1660 				return r;
1661 			if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0)
1662 				return r;
1663 			client->dataset[cursor].total_size = r;
1664 			if (client->dataset[cursor].begin_position +
1665 			    client->dataset[cursor].total_size - 1 > offset ||
1666 			    cursor + 1 >= client->nodes)
1667 				break;
1668 			r = client->dataset[cursor].begin_position +
1669 				client->dataset[cursor].total_size;
1670 			client->dataset[++cursor].begin_position = r;
1671 		}
1672 		offset -= client->dataset[cursor].begin_position;
1673 		if (offset < 0
1674 		    || offset > client->dataset[cursor].total_size)
1675 			return ARCHIVE_FATAL;
1676 		if ((r = client_seek_proxy(filter, offset, SEEK_SET)) < 0)
1677 			return r;
1678 		break;
1679 
1680 	case SEEK_END:
1681 		cursor = 0;
1682 		while (1) {
1683 			if (client->dataset[cursor].begin_position < 0 ||
1684 			    client->dataset[cursor].total_size < 0 ||
1685 			    cursor + 1 >= client->nodes)
1686 				break;
1687 			r = client->dataset[cursor].begin_position +
1688 				client->dataset[cursor].total_size;
1689 			client->dataset[++cursor].begin_position = r;
1690 		}
1691 		while (1) {
1692 			r = client_switch_proxy(filter, cursor);
1693 			if (r != ARCHIVE_OK)
1694 				return r;
1695 			if ((r = client_seek_proxy(filter, 0, SEEK_END)) < 0)
1696 				return r;
1697 			client->dataset[cursor].total_size = r;
1698 			r = client->dataset[cursor].begin_position +
1699 				client->dataset[cursor].total_size;
1700 			if (cursor + 1 >= client->nodes)
1701 				break;
1702 			client->dataset[++cursor].begin_position = r;
1703 		}
1704 		while (1) {
1705 			if (r + offset >=
1706 			    client->dataset[cursor].begin_position)
1707 				break;
1708 			offset += client->dataset[cursor].total_size;
1709 			if (cursor == 0)
1710 				break;
1711 			cursor--;
1712 			r = client->dataset[cursor].begin_position +
1713 				client->dataset[cursor].total_size;
1714 		}
1715 		offset = (r + offset) - client->dataset[cursor].begin_position;
1716 		if ((r = client_switch_proxy(filter, cursor)) != ARCHIVE_OK)
1717 			return r;
1718 		r = client_seek_proxy(filter, offset, SEEK_SET);
1719 		if (r < ARCHIVE_OK)
1720 			return r;
1721 		break;
1722 
1723 	default:
1724 		return (ARCHIVE_FATAL);
1725 	}
1726 	r += client->dataset[cursor].begin_position;
1727 
1728 	if (r >= 0) {
1729 		/*
1730 		 * Ouch.  Clearing the buffer like this hurts, especially
1731 		 * at bid time.  A lot of our efficiency at bid time comes
1732 		 * from having bidders reuse the data we've already read.
1733 		 *
1734 		 * TODO: If the seek request is in data we already
1735 		 * have, then don't call the seek callback.
1736 		 *
1737 		 * TODO: Zip seeks to end-of-file at bid time.  If
1738 		 * other formats also start doing this, we may need to
1739 		 * find a way for clients to fudge the seek offset to
1740 		 * a block boundary.
1741 		 *
1742 		 * Hmmm... If whence was SEEK_END, we know the file
1743 		 * size is (r - offset).  Can we use that to simplify
1744 		 * the TODO items above?
1745 		 */
1746 		filter->avail = filter->client_avail = 0;
1747 		filter->next = filter->buffer;
1748 		filter->position = r;
1749 		filter->end_of_file = 0;
1750 	}
1751 	return r;
1752 }
1753