xref: /freebsd/contrib/xz/src/liblzma/common/stream_decoder_mt.c (revision 128836d304d93f2d00eb14069c27089ab46c38d4)
1 // SPDX-License-Identifier: 0BSD
2 
3 ///////////////////////////////////////////////////////////////////////////////
4 //
5 /// \file       stream_decoder_mt.c
6 /// \brief      Multithreaded .xz Stream decoder
7 //
8 //  Authors:    Sebastian Andrzej Siewior
9 //              Lasse Collin
10 //
11 ///////////////////////////////////////////////////////////////////////////////
12 
13 #include "common.h"
14 #include "block_decoder.h"
15 #include "stream_decoder.h"
16 #include "index.h"
17 #include "outqueue.h"
18 
19 
20 typedef enum {
21 	/// Waiting for work.
22 	/// Main thread may change this to THR_RUN or THR_EXIT.
23 	THR_IDLE,
24 
25 	/// Decoding is in progress.
26 	/// Main thread may change this to THR_IDLE or THR_EXIT.
27 	/// The worker thread may change this to THR_IDLE.
28 	THR_RUN,
29 
30 	/// The main thread wants the thread to exit.
31 	THR_EXIT,
32 
33 } worker_state;
34 
35 
36 typedef enum {
37 	/// Partial updates (storing of worker thread progress
38 	/// to lzma_outbuf) are disabled.
39 	PARTIAL_DISABLED,
40 
41 	/// Main thread requests partial updates to be enabled but
42 	/// no partial update has been done by the worker thread yet.
43 	///
44 	/// Changing from PARTIAL_DISABLED to PARTIAL_START requires
45 	/// use of the worker-thread mutex. Other transitions don't
46 	/// need a mutex.
47 	PARTIAL_START,
48 
49 	/// Partial updates are enabled and the worker thread has done
50 	/// at least one partial update.
51 	PARTIAL_ENABLED,
52 
53 } partial_update_mode;
54 
55 
56 struct worker_thread {
57 	/// Worker state is protected with our mutex.
58 	worker_state state;
59 
60 	/// Input buffer that will contain the whole Block except Block Header.
61 	uint8_t *in;
62 
63 	/// Amount of memory allocated for "in"
64 	size_t in_size;
65 
66 	/// Number of bytes written to "in" by the main thread
67 	size_t in_filled;
68 
69 	/// Number of bytes consumed from "in" by the worker thread.
70 	size_t in_pos;
71 
72 	/// Amount of uncompressed data that has been decoded. This local
73 	/// copy is needed because updating outbuf->pos requires locking
74 	/// the main mutex (coder->mutex).
75 	size_t out_pos;
76 
77 	/// Pointer to the main structure is needed to (1) lock the main
78 	/// mutex (coder->mutex) when updating outbuf->pos and (2) when
79 	/// putting this thread back to the stack of free threads.
80 	struct lzma_stream_coder *coder;
81 
82 	/// The allocator is set by the main thread. Since a copy of the
83 	/// pointer is kept here, the application must not change the
84 	/// allocator before calling lzma_end().
85 	const lzma_allocator *allocator;
86 
87 	/// Output queue buffer to which the uncompressed data is written.
88 	lzma_outbuf *outbuf;
89 
90 	/// Amount of compressed data that has already been decompressed.
91 	/// This is updated from in_pos when our mutex is locked.
92 	/// This is size_t, not uint64_t, because per-thread progress
93 	/// is limited to sizes of allocated buffers.
94 	size_t progress_in;
95 
96 	/// Like progress_in but for uncompressed data.
97 	size_t progress_out;
98 
99 	/// Updating outbuf->pos requires locking the main mutex
100 	/// (coder->mutex). Since the main thread will only read output
101 	/// from the oldest outbuf in the queue, only the worker thread
102 	/// that is associated with the oldest outbuf needs to update its
103 	/// outbuf->pos. This avoids useless mutex contention that would
104 	/// happen if all worker threads were frequently locking the main
105 	/// mutex to update their outbuf->pos.
106 	///
107 	/// Only when partial_update is something else than PARTIAL_DISABLED,
108 	/// this worker thread will update outbuf->pos after each call to
109 	/// the Block decoder.
110 	partial_update_mode partial_update;
111 
112 	/// Block decoder
113 	lzma_next_coder block_decoder;
114 
115 	/// Thread-specific Block options are needed because the Block
116 	/// decoder modifies the struct given to it at initialization.
117 	lzma_block block_options;
118 
119 	/// Filter chain memory usage
120 	uint64_t mem_filters;
121 
122 	/// Next structure in the stack of free worker threads.
123 	struct worker_thread *next;
124 
125 	mythread_mutex mutex;
126 	mythread_cond cond;
127 
128 	/// The ID of this thread is used to join the thread
129 	/// when it's not needed anymore.
130 	mythread thread_id;
131 };
132 
133 
134 struct lzma_stream_coder {
135 	enum {
136 		SEQ_STREAM_HEADER,
137 		SEQ_BLOCK_HEADER,
138 		SEQ_BLOCK_INIT,
139 		SEQ_BLOCK_THR_INIT,
140 		SEQ_BLOCK_THR_RUN,
141 		SEQ_BLOCK_DIRECT_INIT,
142 		SEQ_BLOCK_DIRECT_RUN,
143 		SEQ_INDEX_WAIT_OUTPUT,
144 		SEQ_INDEX_DECODE,
145 		SEQ_STREAM_FOOTER,
146 		SEQ_STREAM_PADDING,
147 		SEQ_ERROR,
148 	} sequence;
149 
150 	/// Block decoder
151 	lzma_next_coder block_decoder;
152 
153 	/// Every Block Header will be decoded into this structure.
154 	/// This is also used to initialize a Block decoder when in
155 	/// direct mode. In threaded mode, a thread-specific copy will
156 	/// be made for decoder initialization because the Block decoder
157 	/// will modify the structure given to it.
158 	lzma_block block_options;
159 
160 	/// Buffer to hold a filter chain for Block Header decoding and
161 	/// initialization. These are freed after successful Block decoder
162 	/// initialization or at stream_decoder_mt_end(). The thread-specific
163 	/// copy of block_options won't hold a pointer to filters[] after
164 	/// initialization.
165 	lzma_filter filters[LZMA_FILTERS_MAX + 1];
166 
167 	/// Stream Flags from Stream Header
168 	lzma_stream_flags stream_flags;
169 
170 	/// Index is hashed so that it can be compared to the sizes of Blocks
171 	/// with O(1) memory usage.
172 	lzma_index_hash *index_hash;
173 
174 
175 	/// Maximum wait time if cannot use all the input and cannot
176 	/// fill the output buffer. This is in milliseconds.
177 	uint32_t timeout;
178 
179 
180 	/// Error code from a worker thread.
181 	///
182 	/// \note       Use mutex.
183 	lzma_ret thread_error;
184 
185 	/// Error code to return after pending output has been copied out. If
186 	/// set in read_output_and_wait(), this is a mirror of thread_error.
187 	/// If set in stream_decode_mt() then it's, for example, error that
188 	/// occurred when decoding Block Header.
189 	lzma_ret pending_error;
190 
191 	/// Number of threads that will be created at maximum.
192 	uint32_t threads_max;
193 
194 	/// Number of thread structures that have been initialized from
195 	/// "threads", and thus the number of worker threads actually
196 	/// created so far.
197 	uint32_t threads_initialized;
198 
199 	/// Array of allocated thread-specific structures. When no threads
200 	/// are in use (direct mode) this is NULL. In threaded mode this
201 	/// points to an array of threads_max number of worker_thread structs.
202 	struct worker_thread *threads;
203 
204 	/// Stack of free threads. When a thread finishes, it puts itself
205 	/// back into this stack. This starts as empty because threads
206 	/// are created only when actually needed.
207 	///
208 	/// \note       Use mutex.
209 	struct worker_thread *threads_free;
210 
211 	/// The most recent worker thread to which the main thread writes
212 	/// the new input from the application.
213 	struct worker_thread *thr;
214 
215 	/// Output buffer queue for decompressed data from the worker threads
216 	///
217 	/// \note       Use mutex with operations that need it.
218 	lzma_outq outq;
219 
220 	mythread_mutex mutex;
221 	mythread_cond cond;
222 
223 
224 	/// Memory usage that will not be exceeded in multi-threaded mode.
225 	/// Single-threaded mode can exceed this even by a large amount.
226 	uint64_t memlimit_threading;
227 
228 	/// Memory usage limit that should never be exceeded.
229 	/// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
230 	/// even in single-threaded mode without exceeding this limit.
231 	uint64_t memlimit_stop;
232 
233 	/// Amount of memory in use by the direct mode decoder
234 	/// (coder->block_decoder). In threaded mode this is 0.
235 	uint64_t mem_direct_mode;
236 
237 	/// Amount of memory needed by the running worker threads.
238 	/// This doesn't include the memory needed by the output buffer.
239 	///
240 	/// \note       Use mutex.
241 	uint64_t mem_in_use;
242 
243 	/// Amount of memory used by the idle (cached) threads.
244 	///
245 	/// \note       Use mutex.
246 	uint64_t mem_cached;
247 
248 
249 	/// Amount of memory needed for the filter chain of the next Block.
250 	uint64_t mem_next_filters;
251 
252 	/// Amount of memory needed for the thread-specific input buffer
253 	/// for the next Block.
254 	uint64_t mem_next_in;
255 
256 	/// Amount of memory actually needed to decode the next Block
257 	/// in threaded mode. This is
258 	/// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
259 	uint64_t mem_next_block;
260 
261 
262 	/// Amount of compressed data in Stream Header + Blocks that have
263 	/// already been finished.
264 	///
265 	/// \note       Use mutex.
266 	uint64_t progress_in;
267 
268 	/// Amount of uncompressed data in Blocks that have already
269 	/// been finished.
270 	///
271 	/// \note       Use mutex.
272 	uint64_t progress_out;
273 
274 
275 	/// If true, LZMA_NO_CHECK is returned if the Stream has
276 	/// no integrity check.
277 	bool tell_no_check;
278 
279 	/// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
280 	/// an integrity check that isn't supported by this liblzma build.
281 	bool tell_unsupported_check;
282 
283 	/// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
284 	bool tell_any_check;
285 
286 	/// If true, we will tell the Block decoder to skip calculating
287 	/// and verifying the integrity check.
288 	bool ignore_check;
289 
290 	/// If true, we will decode concatenated Streams that possibly have
291 	/// Stream Padding between or after them. LZMA_STREAM_END is returned
292 	/// once the application isn't giving us any new input (LZMA_FINISH),
293 	/// and we aren't in the middle of a Stream, and possible
294 	/// Stream Padding is a multiple of four bytes.
295 	bool concatenated;
296 
297 	/// If true, we will return any errors immediately instead of first
298 	/// producing all output before the location of the error.
299 	bool fail_fast;
300 
301 
302 	/// When decoding concatenated Streams, this is true as long as we
303 	/// are decoding the first Stream. This is needed to avoid misleading
304 	/// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
305 	/// bytes.
306 	bool first_stream;
307 
308 	/// This is used to track if the previous call to stream_decode_mt()
309 	/// had output space (*out_pos < out_size) and managed to fill the
310 	/// output buffer (*out_pos == out_size). This may be set to true
311 	/// in read_output_and_wait(). This is read and then reset to false
312 	/// at the beginning of stream_decode_mt().
313 	///
314 	/// This is needed to support applications that call lzma_code() in
315 	/// such a way that more input is provided only when lzma_code()
316 	/// didn't fill the output buffer completely. Basically, this makes
317 	/// it easier to convert such applications from single-threaded
318 	/// decoder to multi-threaded decoder.
319 	bool out_was_filled;
320 
321 	/// Write position in buffer[] and position in Stream Padding
322 	size_t pos;
323 
324 	/// Buffer to hold Stream Header, Block Header, and Stream Footer.
325 	/// Block Header has biggest maximum size.
326 	uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
327 };
328 
329 
330 /// Enables updating of outbuf->pos. This is a callback function that is
331 /// used with lzma_outq_enable_partial_output().
332 static void
worker_enable_partial_update(void * thr_ptr)333 worker_enable_partial_update(void *thr_ptr)
334 {
335 	struct worker_thread *thr = thr_ptr;
336 
337 	mythread_sync(thr->mutex) {
338 		thr->partial_update = PARTIAL_START;
339 		mythread_cond_signal(&thr->cond);
340 	}
341 }
342 
343 
344 static MYTHREAD_RET_TYPE
worker_decoder(void * thr_ptr)345 worker_decoder(void *thr_ptr)
346 {
347 	struct worker_thread *thr = thr_ptr;
348 	size_t in_filled;
349 	partial_update_mode partial_update;
350 	lzma_ret ret;
351 
352 next_loop_lock:
353 
354 	mythread_mutex_lock(&thr->mutex);
355 next_loop_unlocked:
356 
357 	if (thr->state == THR_IDLE) {
358 		mythread_cond_wait(&thr->cond, &thr->mutex);
359 		goto next_loop_unlocked;
360 	}
361 
362 	if (thr->state == THR_EXIT) {
363 		mythread_mutex_unlock(&thr->mutex);
364 
365 		lzma_free(thr->in, thr->allocator);
366 		lzma_next_end(&thr->block_decoder, thr->allocator);
367 
368 		mythread_mutex_destroy(&thr->mutex);
369 		mythread_cond_destroy(&thr->cond);
370 
371 		return MYTHREAD_RET_VALUE;
372 	}
373 
374 	assert(thr->state == THR_RUN);
375 
376 	// Update progress info for get_progress().
377 	thr->progress_in = thr->in_pos;
378 	thr->progress_out = thr->out_pos;
379 
380 	// If we don't have any new input, wait for a signal from the main
381 	// thread except if partial output has just been enabled. In that
382 	// case we will do one normal run so that the partial output info
383 	// gets passed to the main thread. The call to block_decoder.code()
384 	// is useless but harmless as it can occur only once per Block.
385 	in_filled = thr->in_filled;
386 	partial_update = thr->partial_update;
387 
388 	if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
389 		mythread_cond_wait(&thr->cond, &thr->mutex);
390 		goto next_loop_unlocked;
391 	}
392 
393 	mythread_mutex_unlock(&thr->mutex);
394 
395 	// Pass the input in small chunks to the Block decoder.
396 	// This way we react reasonably fast if we are told to stop/exit,
397 	// and (when partial update is enabled) we tell about our progress
398 	// to the main thread frequently enough.
399 	const size_t chunk_size = 16384;
400 	if ((in_filled - thr->in_pos) > chunk_size)
401 		in_filled = thr->in_pos + chunk_size;
402 
403 	ret = thr->block_decoder.code(
404 			thr->block_decoder.coder, thr->allocator,
405 			thr->in, &thr->in_pos, in_filled,
406 			thr->outbuf->buf, &thr->out_pos,
407 			thr->outbuf->allocated, LZMA_RUN);
408 
409 	if (ret == LZMA_OK) {
410 		if (partial_update != PARTIAL_DISABLED) {
411 			// The main thread uses thr->mutex to change from
412 			// PARTIAL_DISABLED to PARTIAL_START. The main thread
413 			// doesn't care about this variable after that so we
414 			// can safely change it here to PARTIAL_ENABLED
415 			// without a mutex.
416 			thr->partial_update = PARTIAL_ENABLED;
417 
418 			// The main thread is reading decompressed data
419 			// from thr->outbuf. Tell the main thread about
420 			// our progress.
421 			//
422 			// NOTE: It's possible that we consumed input without
423 			// producing any new output so it's possible that
424 			// only in_pos has changed. In case of PARTIAL_START
425 			// it is possible that neither in_pos nor out_pos has
426 			// changed.
427 			mythread_sync(thr->coder->mutex) {
428 				thr->outbuf->pos = thr->out_pos;
429 				thr->outbuf->decoder_in_pos = thr->in_pos;
430 				mythread_cond_signal(&thr->coder->cond);
431 			}
432 		}
433 
434 		goto next_loop_lock;
435 	}
436 
437 	// Either we finished successfully (LZMA_STREAM_END) or an error
438 	// occurred.
439 	//
440 	// The sizes are in the Block Header and the Block decoder
441 	// checks that they match, thus we know these:
442 	assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
443 	assert(ret != LZMA_STREAM_END
444 		|| thr->out_pos == thr->block_options.uncompressed_size);
445 
446 	mythread_sync(thr->mutex) {
447 		// Block decoder ensures this, but do a sanity check anyway
448 		// because thr->in_filled < thr->in_size means that the main
449 		// thread is still writing to thr->in.
450 		if (ret == LZMA_STREAM_END && thr->in_filled != thr->in_size) {
451 			assert(0);
452 			ret = LZMA_PROG_ERROR;
453 		}
454 
455 		if (thr->state != THR_EXIT)
456 			thr->state = THR_IDLE;
457 	}
458 
459 	// Free the input buffer. Don't update in_size as we need
460 	// it later to update thr->coder->mem_in_use.
461 	//
462 	// This step is skipped if an error occurred because the main thread
463 	// might still be writing to thr->in. The memory will be freed after
464 	// threads_end() sets thr->state = THR_EXIT.
465 	if (ret == LZMA_STREAM_END) {
466 		lzma_free(thr->in, thr->allocator);
467 		thr->in = NULL;
468 	}
469 
470 	mythread_sync(thr->coder->mutex) {
471 		// Move our progress info to the main thread.
472 		thr->coder->progress_in += thr->in_pos;
473 		thr->coder->progress_out += thr->out_pos;
474 		thr->progress_in = 0;
475 		thr->progress_out = 0;
476 
477 		// Mark the outbuf as finished.
478 		thr->outbuf->pos = thr->out_pos;
479 		thr->outbuf->decoder_in_pos = thr->in_pos;
480 		thr->outbuf->finished = true;
481 		thr->outbuf->finish_ret = ret;
482 		thr->outbuf = NULL;
483 
484 		// If an error occurred, tell it to the main thread.
485 		if (ret != LZMA_STREAM_END
486 				&& thr->coder->thread_error == LZMA_OK)
487 			thr->coder->thread_error = ret;
488 
489 		// Return the worker thread to the stack of available
490 		// threads only if no errors occurred.
491 		if (ret == LZMA_STREAM_END) {
492 			// Update memory usage counters.
493 			thr->coder->mem_in_use -= thr->in_size;
494 			thr->coder->mem_in_use -= thr->mem_filters;
495 			thr->coder->mem_cached += thr->mem_filters;
496 
497 			// Put this thread to the stack of free threads.
498 			thr->next = thr->coder->threads_free;
499 			thr->coder->threads_free = thr;
500 		}
501 
502 		mythread_cond_signal(&thr->coder->cond);
503 	}
504 
505 	goto next_loop_lock;
506 }
507 
508 
509 /// Tells the worker threads to exit and waits for them to terminate.
510 static void
threads_end(struct lzma_stream_coder * coder,const lzma_allocator * allocator)511 threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
512 {
513 	for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
514 		mythread_sync(coder->threads[i].mutex) {
515 			coder->threads[i].state = THR_EXIT;
516 			mythread_cond_signal(&coder->threads[i].cond);
517 		}
518 	}
519 
520 	for (uint32_t i = 0; i < coder->threads_initialized; ++i)
521 		mythread_join(coder->threads[i].thread_id);
522 
523 	lzma_free(coder->threads, allocator);
524 	coder->threads_initialized = 0;
525 	coder->threads = NULL;
526 	coder->threads_free = NULL;
527 
528 	// The threads don't update these when they exit. Do it here.
529 	coder->mem_in_use = 0;
530 	coder->mem_cached = 0;
531 
532 	return;
533 }
534 
535 
536 /// Tell worker threads to stop without doing any cleaning up.
537 /// The clean up will be done when threads_exit() is called;
538 /// it's not possible to reuse the threads after threads_stop().
539 ///
540 /// This is called before returning an unrecoverable error code
541 /// to the application. It would be waste of processor time
542 /// to keep the threads running in such a situation.
543 static void
threads_stop(struct lzma_stream_coder * coder)544 threads_stop(struct lzma_stream_coder *coder)
545 {
546 	for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
547 		// The threads that are in the THR_RUN state will stop
548 		// when they check the state the next time. There's no
549 		// need to signal coder->threads[i].cond.
550 		mythread_sync(coder->threads[i].mutex) {
551 			coder->threads[i].state = THR_IDLE;
552 		}
553 	}
554 
555 	return;
556 }
557 
558 
559 /// Initialize a new worker_thread structure and create a new thread.
560 static lzma_ret
initialize_new_thread(struct lzma_stream_coder * coder,const lzma_allocator * allocator)561 initialize_new_thread(struct lzma_stream_coder *coder,
562 		const lzma_allocator *allocator)
563 {
564 	// Allocate the coder->threads array if needed. It's done here instead
565 	// of when initializing the decoder because we don't need this if we
566 	// use the direct mode (we may even free coder->threads in the middle
567 	// of the file if we switch from threaded to direct mode).
568 	if (coder->threads == NULL) {
569 		coder->threads = lzma_alloc(
570 			coder->threads_max * sizeof(struct worker_thread),
571 			allocator);
572 
573 		if (coder->threads == NULL)
574 			return LZMA_MEM_ERROR;
575 	}
576 
577 	// Pick a free structure.
578 	assert(coder->threads_initialized < coder->threads_max);
579 	struct worker_thread *thr
580 			= &coder->threads[coder->threads_initialized];
581 
582 	if (mythread_mutex_init(&thr->mutex))
583 		goto error_mutex;
584 
585 	if (mythread_cond_init(&thr->cond))
586 		goto error_cond;
587 
588 	thr->state = THR_IDLE;
589 	thr->in = NULL;
590 	thr->in_size = 0;
591 	thr->allocator = allocator;
592 	thr->coder = coder;
593 	thr->outbuf = NULL;
594 	thr->block_decoder = LZMA_NEXT_CODER_INIT;
595 	thr->mem_filters = 0;
596 
597 	if (mythread_create(&thr->thread_id, worker_decoder, thr))
598 		goto error_thread;
599 
600 	++coder->threads_initialized;
601 	coder->thr = thr;
602 
603 	return LZMA_OK;
604 
605 error_thread:
606 	mythread_cond_destroy(&thr->cond);
607 
608 error_cond:
609 	mythread_mutex_destroy(&thr->mutex);
610 
611 error_mutex:
612 	return LZMA_MEM_ERROR;
613 }
614 
615 
616 static lzma_ret
get_thread(struct lzma_stream_coder * coder,const lzma_allocator * allocator)617 get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
618 {
619 	// If there is a free structure on the stack, use it.
620 	mythread_sync(coder->mutex) {
621 		if (coder->threads_free != NULL) {
622 			coder->thr = coder->threads_free;
623 			coder->threads_free = coder->threads_free->next;
624 
625 			// The thread is no longer in the cache so subtract
626 			// it from the cached memory usage. Don't add it
627 			// to mem_in_use though; the caller will handle it
628 			// since it knows how much memory it will actually
629 			// use (the filter chain might change).
630 			coder->mem_cached -= coder->thr->mem_filters;
631 		}
632 	}
633 
634 	if (coder->thr == NULL) {
635 		assert(coder->threads_initialized < coder->threads_max);
636 
637 		// Initialize a new thread.
638 		return_if_error(initialize_new_thread(coder, allocator));
639 	}
640 
641 	coder->thr->in_filled = 0;
642 	coder->thr->in_pos = 0;
643 	coder->thr->out_pos = 0;
644 
645 	coder->thr->progress_in = 0;
646 	coder->thr->progress_out = 0;
647 
648 	coder->thr->partial_update = PARTIAL_DISABLED;
649 
650 	return LZMA_OK;
651 }
652 
653 
654 static lzma_ret
read_output_and_wait(struct lzma_stream_coder * coder,const lzma_allocator * allocator,uint8_t * restrict out,size_t * restrict out_pos,size_t out_size,bool * input_is_possible,bool waiting_allowed,mythread_condtime * wait_abs,bool * has_blocked)655 read_output_and_wait(struct lzma_stream_coder *coder,
656 		const lzma_allocator *allocator,
657 		uint8_t *restrict out, size_t *restrict out_pos,
658 		size_t out_size,
659 		bool *input_is_possible,
660 		bool waiting_allowed,
661 		mythread_condtime *wait_abs, bool *has_blocked)
662 {
663 	lzma_ret ret = LZMA_OK;
664 
665 	mythread_sync(coder->mutex) {
666 		do {
667 			// Get as much output from the queue as is possible
668 			// without blocking.
669 			const size_t out_start = *out_pos;
670 			do {
671 				ret = lzma_outq_read(&coder->outq, allocator,
672 						out, out_pos, out_size,
673 						NULL, NULL);
674 
675 				// If a Block was finished, tell the worker
676 				// thread of the next Block (if it is still
677 				// running) to start telling the main thread
678 				// when new output is available.
679 				if (ret == LZMA_STREAM_END)
680 					lzma_outq_enable_partial_output(
681 						&coder->outq,
682 						&worker_enable_partial_update);
683 
684 				// Loop until a Block wasn't finished.
685 				// It's important to loop around even if
686 				// *out_pos == out_size because there could
687 				// be an empty Block that will return
688 				// LZMA_STREAM_END without needing any
689 				// output space.
690 			} while (ret == LZMA_STREAM_END);
691 
692 			// Check if lzma_outq_read reported an error from
693 			// the Block decoder.
694 			if (ret != LZMA_OK)
695 				break;
696 
697 			// If the output buffer is now full but it wasn't full
698 			// when this function was called, set out_was_filled.
699 			// This way the next call to stream_decode_mt() knows
700 			// that some output was produced and no output space
701 			// remained in the previous call to stream_decode_mt().
702 			if (*out_pos == out_size && *out_pos != out_start)
703 				coder->out_was_filled = true;
704 
705 			// Check if any thread has indicated an error.
706 			if (coder->thread_error != LZMA_OK) {
707 				// If LZMA_FAIL_FAST was used, report errors
708 				// from worker threads immediately.
709 				if (coder->fail_fast) {
710 					ret = coder->thread_error;
711 					break;
712 				}
713 
714 				// Otherwise set pending_error. The value we
715 				// set here will not actually get used other
716 				// than working as a flag that an error has
717 				// occurred. This is because in SEQ_ERROR
718 				// all output before the error will be read
719 				// first by calling this function, and once we
720 				// reach the location of the (first) error the
721 				// error code from the above lzma_outq_read()
722 				// will be returned to the application.
723 				//
724 				// Use LZMA_PROG_ERROR since the value should
725 				// never leak to the application. It's
726 				// possible that pending_error has already
727 				// been set but that doesn't matter: if we get
728 				// here, pending_error only works as a flag.
729 				coder->pending_error = LZMA_PROG_ERROR;
730 			}
731 
732 			// Check if decoding of the next Block can be started.
733 			// The memusage of the active threads must be low
734 			// enough, there must be a free buffer slot in the
735 			// output queue, and there must be a free thread
736 			// (that can be either created or an existing one
737 			// reused).
738 			//
739 			// NOTE: This is checked after reading the output
740 			// above because reading the output can free a slot in
741 			// the output queue and also reduce active memusage.
742 			//
743 			// NOTE: If output queue is empty, then input will
744 			// always be possible.
745 			if (input_is_possible != NULL
746 					&& coder->memlimit_threading
747 						- coder->mem_in_use
748 						- coder->outq.mem_in_use
749 						>= coder->mem_next_block
750 					&& lzma_outq_has_buf(&coder->outq)
751 					&& (coder->threads_initialized
752 							< coder->threads_max
753 						|| coder->threads_free
754 							!= NULL)) {
755 				*input_is_possible = true;
756 				break;
757 			}
758 
759 			// If the caller doesn't want us to block, return now.
760 			if (!waiting_allowed)
761 				break;
762 
763 			// This check is needed only when input_is_possible
764 			// is NULL. We must return if we aren't waiting for
765 			// input to become possible and there is no more
766 			// output coming from the queue.
767 			if (lzma_outq_is_empty(&coder->outq)) {
768 				assert(input_is_possible == NULL);
769 				break;
770 			}
771 
772 			// If there is more data available from the queue,
773 			// our out buffer must be full and we need to return
774 			// so that the application can provide more output
775 			// space.
776 			//
777 			// NOTE: In general lzma_outq_is_readable() can return
778 			// true also when there are no more bytes available.
779 			// This can happen when a Block has finished without
780 			// providing any new output. We know that this is not
781 			// the case because in the beginning of this loop we
782 			// tried to read as much as possible even when we had
783 			// no output space left and the mutex has been locked
784 			// all the time (so worker threads cannot have changed
785 			// anything). Thus there must be actual pending output
786 			// in the queue.
787 			if (lzma_outq_is_readable(&coder->outq)) {
788 				assert(*out_pos == out_size);
789 				break;
790 			}
791 
792 			// If the application stops providing more input
793 			// in the middle of a Block, there will eventually
794 			// be one worker thread left that is stuck waiting for
795 			// more input (that might never arrive) and a matching
796 			// outbuf which the worker thread cannot finish due
797 			// to lack of input. We must detect this situation,
798 			// otherwise we would end up waiting indefinitely
799 			// (if no timeout is in use) or keep returning
800 			// LZMA_TIMED_OUT while making no progress. Thus, the
801 			// application would never get LZMA_BUF_ERROR from
802 			// lzma_code() which would tell the application that
803 			// no more progress is possible. No LZMA_BUF_ERROR
804 			// means that, for example, truncated .xz files could
805 			// cause an infinite loop.
806 			//
807 			// A worker thread doing partial updates will
808 			// store not only the output position in outbuf->pos
809 			// but also the matching input position in
810 			// outbuf->decoder_in_pos. Here we check if that
811 			// input position matches the amount of input that
812 			// the worker thread has been given (in_filled).
813 			// If so, we must return and not wait as no more
814 			// output will be coming without first getting more
815 			// input to the worker thread. If the application
816 			// keeps calling lzma_code() without providing more
817 			// input, it will eventually get LZMA_BUF_ERROR.
818 			//
819 			// NOTE: We can read partial_update and in_filled
820 			// without thr->mutex as only the main thread
821 			// modifies these variables. decoder_in_pos requires
822 			// coder->mutex which we are already holding.
823 			if (coder->thr != NULL && coder->thr->partial_update
824 					!= PARTIAL_DISABLED) {
825 				// There is exactly one outbuf in the queue.
826 				assert(coder->thr->outbuf == coder->outq.head);
827 				assert(coder->thr->outbuf == coder->outq.tail);
828 
829 				if (coder->thr->outbuf->decoder_in_pos
830 						== coder->thr->in_filled)
831 					break;
832 			}
833 
834 			// Wait for input or output to become possible.
835 			if (coder->timeout != 0) {
836 				// See the comment in stream_encoder_mt.c
837 				// about why mythread_condtime_set() is used
838 				// like this.
839 				//
840 				// FIXME?
841 				// In contrast to the encoder, this calls
842 				// _condtime_set while the mutex is locked.
843 				if (!*has_blocked) {
844 					*has_blocked = true;
845 					mythread_condtime_set(wait_abs,
846 							&coder->cond,
847 							coder->timeout);
848 				}
849 
850 				if (mythread_cond_timedwait(&coder->cond,
851 						&coder->mutex,
852 						wait_abs) != 0) {
853 					ret = LZMA_TIMED_OUT;
854 					break;
855 				}
856 			} else {
857 				mythread_cond_wait(&coder->cond,
858 						&coder->mutex);
859 			}
860 		} while (ret == LZMA_OK);
861 	}
862 
863 	// If we are returning an error, then the application cannot get
864 	// more output from us and thus keeping the threads running is
865 	// useless and waste of CPU time.
866 	if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
867 		threads_stop(coder);
868 
869 	return ret;
870 }
871 
872 
873 static lzma_ret
decode_block_header(struct lzma_stream_coder * coder,const lzma_allocator * allocator,const uint8_t * restrict in,size_t * restrict in_pos,size_t in_size)874 decode_block_header(struct lzma_stream_coder *coder,
875 		const lzma_allocator *allocator, const uint8_t *restrict in,
876 		size_t *restrict in_pos, size_t in_size)
877 {
878 	if (*in_pos >= in_size)
879 		return LZMA_OK;
880 
881 	if (coder->pos == 0) {
882 		// Detect if it's Index.
883 		if (in[*in_pos] == INDEX_INDICATOR)
884 			return LZMA_INDEX_DETECTED;
885 
886 		// Calculate the size of the Block Header. Note that
887 		// Block Header decoder wants to see this byte too
888 		// so don't advance *in_pos.
889 		coder->block_options.header_size
890 				= lzma_block_header_size_decode(
891 					in[*in_pos]);
892 	}
893 
894 	// Copy the Block Header to the internal buffer.
895 	lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
896 			coder->block_options.header_size);
897 
898 	// Return if we didn't get the whole Block Header yet.
899 	if (coder->pos < coder->block_options.header_size)
900 		return LZMA_OK;
901 
902 	coder->pos = 0;
903 
904 	// Version 1 is needed to support the .ignore_check option.
905 	coder->block_options.version = 1;
906 
907 	// Block Header decoder will initialize all members of this array
908 	// so we don't need to do it here.
909 	coder->block_options.filters = coder->filters;
910 
911 	// Decode the Block Header.
912 	return_if_error(lzma_block_header_decode(&coder->block_options,
913 			allocator, coder->buffer));
914 
915 	// If LZMA_IGNORE_CHECK was used, this flag needs to be set.
916 	// It has to be set after lzma_block_header_decode() because
917 	// it always resets this to false.
918 	coder->block_options.ignore_check = coder->ignore_check;
919 
920 	// coder->block_options is ready now.
921 	return LZMA_STREAM_END;
922 }
923 
924 
925 /// Get the size of the Compressed Data + Block Padding + Check.
926 static size_t
comp_blk_size(const struct lzma_stream_coder * coder)927 comp_blk_size(const struct lzma_stream_coder *coder)
928 {
929 	return vli_ceil4(coder->block_options.compressed_size)
930 			+ lzma_check_size(coder->stream_flags.check);
931 }
932 
933 
934 /// Returns true if the size (compressed or uncompressed) is such that
935 /// threaded decompression cannot be used. Sizes that are too big compared
936 /// to SIZE_MAX must be rejected to avoid integer overflows and truncations
937 /// when lzma_vli is assigned to a size_t.
938 static bool
is_direct_mode_needed(lzma_vli size)939 is_direct_mode_needed(lzma_vli size)
940 {
941 	return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
942 }
943 
944 
945 static lzma_ret
stream_decoder_reset(struct lzma_stream_coder * coder,const lzma_allocator * allocator)946 stream_decoder_reset(struct lzma_stream_coder *coder,
947 		const lzma_allocator *allocator)
948 {
949 	// Initialize the Index hash used to verify the Index.
950 	coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
951 	if (coder->index_hash == NULL)
952 		return LZMA_MEM_ERROR;
953 
954 	// Reset the rest of the variables.
955 	coder->sequence = SEQ_STREAM_HEADER;
956 	coder->pos = 0;
957 
958 	return LZMA_OK;
959 }
960 
961 
962 static lzma_ret
stream_decode_mt(void * coder_ptr,const lzma_allocator * allocator,const uint8_t * restrict in,size_t * restrict in_pos,size_t in_size,uint8_t * restrict out,size_t * restrict out_pos,size_t out_size,lzma_action action)963 stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
964 		 const uint8_t *restrict in, size_t *restrict in_pos,
965 		 size_t in_size,
966 		 uint8_t *restrict out, size_t *restrict out_pos,
967 		 size_t out_size, lzma_action action)
968 {
969 	struct lzma_stream_coder *coder = coder_ptr;
970 
971 	mythread_condtime wait_abs;
972 	bool has_blocked = false;
973 
974 	// Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
975 	// tell read_output_and_wait() to wait until it can fill the output
976 	// buffer (or a timeout occurs). Two conditions must be met:
977 	//
978 	// (1) If the caller provided no new input. The reason for this
979 	//     can be, for example, the end of the file or that there is
980 	//     a pause in the input stream and more input is available
981 	//     a little later. In this situation we should wait for output
982 	//     because otherwise we would end up in a busy-waiting loop where
983 	//     we make no progress and the application just calls us again
984 	//     without providing any new input. This would then result in
985 	//     LZMA_BUF_ERROR even though more output would be available
986 	//     once the worker threads decode more data.
987 	//
988 	// (2) Even if (1) is true, we will not wait if the previous call to
989 	//     this function managed to produce some output and the output
990 	//     buffer became full. This is for compatibility with applications
991 	//     that call lzma_code() in such a way that new input is provided
992 	//     only when the output buffer didn't become full. Without this
993 	//     trick such applications would have bad performance (bad
994 	//     parallelization due to decoder not getting input fast enough).
995 	//
996 	//     NOTE: Such loops might require that timeout is disabled (0)
997 	//     if they assume that output-not-full implies that all input has
998 	//     been consumed. If and only if timeout is enabled, we may return
999 	//     when output isn't full *and* not all input has been consumed.
1000 	//
1001 	// However, if LZMA_FINISH is used, the above is ignored and we always
1002 	// wait (timeout can still cause us to return) because we know that
1003 	// we won't get any more input. This matters if the input file is
1004 	// truncated and we are doing single-shot decoding, that is,
1005 	// timeout = 0 and LZMA_FINISH is used on the first call to
1006 	// lzma_code() and the output buffer is known to be big enough
1007 	// to hold all uncompressed data:
1008 	//
1009 	//   - If LZMA_FINISH wasn't handled specially, we could return
1010 	//     LZMA_OK before providing all output that is possible with the
1011 	//     truncated input. The rest would be available if lzma_code() was
1012 	//     called again but then it's not single-shot decoding anymore.
1013 	//
1014 	//   - By handling LZMA_FINISH specially here, the first call will
1015 	//     produce all the output, matching the behavior of the
1016 	//     single-threaded decoder.
1017 	//
1018 	// So it's a very specific corner case but also easy to avoid. Note
1019 	// that this special handling of LZMA_FINISH has no effect for
1020 	// single-shot decoding when the input file is valid (not truncated);
1021 	// premature LZMA_OK wouldn't be possible as long as timeout = 0.
1022 	const bool waiting_allowed = action == LZMA_FINISH
1023 			|| (*in_pos == in_size && !coder->out_was_filled);
1024 	coder->out_was_filled = false;
1025 
1026 	while (true)
1027 	switch (coder->sequence) {
1028 	case SEQ_STREAM_HEADER: {
1029 		// Copy the Stream Header to the internal buffer.
1030 		const size_t in_old = *in_pos;
1031 		lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1032 				LZMA_STREAM_HEADER_SIZE);
1033 		coder->progress_in += *in_pos - in_old;
1034 
1035 		// Return if we didn't get the whole Stream Header yet.
1036 		if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1037 			return LZMA_OK;
1038 
1039 		coder->pos = 0;
1040 
1041 		// Decode the Stream Header.
1042 		const lzma_ret ret = lzma_stream_header_decode(
1043 				&coder->stream_flags, coder->buffer);
1044 		if (ret != LZMA_OK)
1045 			return ret == LZMA_FORMAT_ERROR && !coder->first_stream
1046 					? LZMA_DATA_ERROR : ret;
1047 
1048 		// If we are decoding concatenated Streams, and the later
1049 		// Streams have invalid Header Magic Bytes, we give
1050 		// LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
1051 		coder->first_stream = false;
1052 
1053 		// Copy the type of the Check so that Block Header and Block
1054 		// decoders see it.
1055 		coder->block_options.check = coder->stream_flags.check;
1056 
1057 		// Even if we return LZMA_*_CHECK below, we want
1058 		// to continue from Block Header decoding.
1059 		coder->sequence = SEQ_BLOCK_HEADER;
1060 
1061 		// Detect if there's no integrity check or if it is
1062 		// unsupported if those were requested by the application.
1063 		if (coder->tell_no_check && coder->stream_flags.check
1064 				== LZMA_CHECK_NONE)
1065 			return LZMA_NO_CHECK;
1066 
1067 		if (coder->tell_unsupported_check
1068 				&& !lzma_check_is_supported(
1069 					coder->stream_flags.check))
1070 			return LZMA_UNSUPPORTED_CHECK;
1071 
1072 		if (coder->tell_any_check)
1073 			return LZMA_GET_CHECK;
1074 
1075 		FALLTHROUGH;
1076 	}
1077 
1078 	case SEQ_BLOCK_HEADER: {
1079 		const size_t in_old = *in_pos;
1080 		const lzma_ret ret = decode_block_header(coder, allocator,
1081 				in, in_pos, in_size);
1082 		coder->progress_in += *in_pos - in_old;
1083 
1084 		if (ret == LZMA_OK) {
1085 			// We didn't decode the whole Block Header yet.
1086 			//
1087 			// Read output from the queue before returning. This
1088 			// is important because it is possible that the
1089 			// application doesn't have any new input available
1090 			// immediately. If we didn't try to copy output from
1091 			// the output queue here, lzma_code() could end up
1092 			// returning LZMA_BUF_ERROR even though queued output
1093 			// is available.
1094 			//
1095 			// If the lzma_code() call provided at least one input
1096 			// byte, only copy as much data from the output queue
1097 			// as is available immediately. This way the
1098 			// application will be able to provide more input
1099 			// without a delay.
1100 			//
1101 			// On the other hand, if lzma_code() was called with
1102 			// an empty input buffer(*), treat it specially: try
1103 			// to fill the output buffer even if it requires
1104 			// waiting for the worker threads to provide output
1105 			// (timeout, if specified, can still cause us to
1106 			// return).
1107 			//
1108 			//   - This way the application will be able to get all
1109 			//     data that can be decoded from the input provided
1110 			//     so far.
1111 			//
1112 			//   - We avoid both premature LZMA_BUF_ERROR and
1113 			//     busy-waiting where the application repeatedly
1114 			//     calls lzma_code() which immediately returns
1115 			//     LZMA_OK without providing new data.
1116 			//
1117 			//   - If the queue becomes empty, we won't wait
1118 			//     anything and will return LZMA_OK immediately
1119 			//     (coder->timeout is completely ignored).
1120 			//
1121 			// (*) See the comment at the beginning of this
1122 			//     function how waiting_allowed is determined
1123 			//     and why there is an exception to the rule
1124 			//     of "called with an empty input buffer".
1125 			assert(*in_pos == in_size);
1126 
1127 			// If LZMA_FINISH was used we know that we won't get
1128 			// more input, so the file must be truncated if we
1129 			// get here. If worker threads don't detect any
1130 			// errors, eventually there will be no more output
1131 			// while we keep returning LZMA_OK which gets
1132 			// converted to LZMA_BUF_ERROR in lzma_code().
1133 			//
1134 			// If fail-fast is enabled then we will return
1135 			// immediately using LZMA_DATA_ERROR instead of
1136 			// LZMA_OK or LZMA_BUF_ERROR. Rationale for the
1137 			// error code:
1138 			//
1139 			//   - Worker threads may have a large amount of
1140 			//     not-yet-decoded input data and we don't
1141 			//     know for sure if all data is valid. Bad
1142 			//     data there would result in LZMA_DATA_ERROR
1143 			//     when fail-fast isn't used.
1144 			//
1145 			//   - Immediate LZMA_BUF_ERROR would be a bit weird
1146 			//     considering the older liblzma code. lzma_code()
1147 			//     even has an assertion to prevent coders from
1148 			//     returning LZMA_BUF_ERROR directly.
1149 			//
1150 			// The downside of this is that with fail-fast apps
1151 			// cannot always distinguish between corrupt and
1152 			// truncated files.
1153 			if (action == LZMA_FINISH && coder->fail_fast) {
1154 				// We won't produce any more output. Stop
1155 				// the unfinished worker threads so they
1156 				// won't waste CPU time.
1157 				threads_stop(coder);
1158 				return LZMA_DATA_ERROR;
1159 			}
1160 
1161 			// read_output_and_wait() will call threads_stop()
1162 			// if needed so with that we can use return_if_error.
1163 			return_if_error(read_output_and_wait(coder, allocator,
1164 				out, out_pos, out_size,
1165 				NULL, waiting_allowed,
1166 				&wait_abs, &has_blocked));
1167 
1168 			if (coder->pending_error != LZMA_OK) {
1169 				coder->sequence = SEQ_ERROR;
1170 				break;
1171 			}
1172 
1173 			return LZMA_OK;
1174 		}
1175 
1176 		if (ret == LZMA_INDEX_DETECTED) {
1177 			coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
1178 			break;
1179 		}
1180 
1181 		// See if an error occurred.
1182 		if (ret != LZMA_STREAM_END) {
1183 			// NOTE: Here and in all other places where
1184 			// pending_error is set, it may overwrite the value
1185 			// (LZMA_PROG_ERROR) set by read_output_and_wait().
1186 			// That function might overwrite value set here too.
1187 			// These are fine because when read_output_and_wait()
1188 			// sets pending_error, it actually works as a flag
1189 			// variable only ("some error has occurred") and the
1190 			// actual value of pending_error is not used in
1191 			// SEQ_ERROR. In such cases SEQ_ERROR will eventually
1192 			// get the correct error code from the return value of
1193 			// a later read_output_and_wait() call.
1194 			coder->pending_error = ret;
1195 			coder->sequence = SEQ_ERROR;
1196 			break;
1197 		}
1198 
1199 		// Calculate the memory usage of the filters / Block decoder.
1200 		coder->mem_next_filters = lzma_raw_decoder_memusage(
1201 				coder->filters);
1202 
1203 		if (coder->mem_next_filters == UINT64_MAX) {
1204 			// One or more unknown Filter IDs.
1205 			coder->pending_error = LZMA_OPTIONS_ERROR;
1206 			coder->sequence = SEQ_ERROR;
1207 			break;
1208 		}
1209 
1210 		coder->sequence = SEQ_BLOCK_INIT;
1211 		FALLTHROUGH;
1212 	}
1213 
1214 	case SEQ_BLOCK_INIT: {
1215 		// Check if decoding is possible at all with the current
1216 		// memlimit_stop which we must never exceed.
1217 		//
1218 		// This needs to be the first thing in SEQ_BLOCK_INIT
1219 		// to make it possible to restart decoding after increasing
1220 		// memlimit_stop with lzma_memlimit_set().
1221 		if (coder->mem_next_filters > coder->memlimit_stop) {
1222 			// Flush pending output before returning
1223 			// LZMA_MEMLIMIT_ERROR. If the application doesn't
1224 			// want to increase the limit, at least it will get
1225 			// all the output possible so far.
1226 			return_if_error(read_output_and_wait(coder, allocator,
1227 					out, out_pos, out_size,
1228 					NULL, true, &wait_abs, &has_blocked));
1229 
1230 			if (!lzma_outq_is_empty(&coder->outq))
1231 				return LZMA_OK;
1232 
1233 			return LZMA_MEMLIMIT_ERROR;
1234 		}
1235 
1236 		// Check if the size information is available in Block Header.
1237 		// If it is, check if the sizes are small enough that we don't
1238 		// need to worry *too* much about integer overflows later in
1239 		// the code. If these conditions are not met, we must use the
1240 		// single-threaded direct mode.
1241 		if (is_direct_mode_needed(coder->block_options.compressed_size)
1242 				|| is_direct_mode_needed(
1243 				coder->block_options.uncompressed_size)) {
1244 			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1245 			break;
1246 		}
1247 
1248 		// Calculate the amount of memory needed for the input and
1249 		// output buffers in threaded mode.
1250 		//
1251 		// These cannot overflow because we already checked that
1252 		// the sizes are small enough using is_direct_mode_needed().
1253 		coder->mem_next_in = comp_blk_size(coder);
1254 		const uint64_t mem_buffers = coder->mem_next_in
1255 				+ lzma_outq_outbuf_memusage(
1256 				coder->block_options.uncompressed_size);
1257 
1258 		// Add the amount needed by the filters.
1259 		// Avoid integer overflows.
1260 		if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
1261 			// Use direct mode if the memusage would overflow.
1262 			// This is a theoretical case that shouldn't happen
1263 			// in practice unless the input file is weird (broken
1264 			// or malicious).
1265 			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1266 			break;
1267 		}
1268 
1269 		// Amount of memory needed to decode this Block in
1270 		// threaded mode:
1271 		coder->mem_next_block = coder->mem_next_filters + mem_buffers;
1272 
1273 		// If this alone would exceed memlimit_threading, then we must
1274 		// use the single-threaded direct mode.
1275 		if (coder->mem_next_block > coder->memlimit_threading) {
1276 			coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1277 			break;
1278 		}
1279 
1280 		// Use the threaded mode. Free the direct mode decoder in
1281 		// case it has been initialized.
1282 		lzma_next_end(&coder->block_decoder, allocator);
1283 		coder->mem_direct_mode = 0;
1284 
1285 		// Since we already know what the sizes are supposed to be,
1286 		// we can already add them to the Index hash. The Block
1287 		// decoder will verify the values while decoding.
1288 		const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
1289 				lzma_block_unpadded_size(
1290 					&coder->block_options),
1291 				coder->block_options.uncompressed_size);
1292 		if (ret != LZMA_OK) {
1293 			coder->pending_error = ret;
1294 			coder->sequence = SEQ_ERROR;
1295 			break;
1296 		}
1297 
1298 		coder->sequence = SEQ_BLOCK_THR_INIT;
1299 		FALLTHROUGH;
1300 	}
1301 
1302 	case SEQ_BLOCK_THR_INIT: {
1303 		// We need to wait for a multiple conditions to become true
1304 		// until we can initialize the Block decoder and let a worker
1305 		// thread decode it:
1306 		//
1307 		//   - Wait for the memory usage of the active threads to drop
1308 		//     so that starting the decoding of this Block won't make
1309 		//     us go over memlimit_threading.
1310 		//
1311 		//   - Wait for at least one free output queue slot.
1312 		//
1313 		//   - Wait for a free worker thread.
1314 		//
1315 		// While we wait, we must copy decompressed data to the out
1316 		// buffer and catch possible decoder errors.
1317 		//
1318 		// read_output_and_wait() does all the above.
1319 		bool block_can_start = false;
1320 
1321 		return_if_error(read_output_and_wait(coder, allocator,
1322 				out, out_pos, out_size,
1323 				&block_can_start, true,
1324 				&wait_abs, &has_blocked));
1325 
1326 		if (coder->pending_error != LZMA_OK) {
1327 			coder->sequence = SEQ_ERROR;
1328 			break;
1329 		}
1330 
1331 		if (!block_can_start) {
1332 			// It's not a timeout because return_if_error handles
1333 			// it already. Output queue cannot be empty either
1334 			// because in that case block_can_start would have
1335 			// been true. Thus the output buffer must be full and
1336 			// the queue isn't empty.
1337 			assert(*out_pos == out_size);
1338 			assert(!lzma_outq_is_empty(&coder->outq));
1339 			return LZMA_OK;
1340 		}
1341 
1342 		// We know that we can start decoding this Block without
1343 		// exceeding memlimit_threading. However, to stay below
1344 		// memlimit_threading may require freeing some of the
1345 		// cached memory.
1346 		//
1347 		// Get a local copy of variables that require locking the
1348 		// mutex. It is fine if the worker threads modify the real
1349 		// values after we read these as those changes can only be
1350 		// towards more favorable conditions (less memory in use,
1351 		// more in cache).
1352 		//
1353 		// These are initialized to silence warnings.
1354 		uint64_t mem_in_use = 0;
1355 		uint64_t mem_cached = 0;
1356 		struct worker_thread *thr = NULL;
1357 
1358 		mythread_sync(coder->mutex) {
1359 			mem_in_use = coder->mem_in_use;
1360 			mem_cached = coder->mem_cached;
1361 			thr = coder->threads_free;
1362 		}
1363 
1364 		// The maximum amount of memory that can be held by other
1365 		// threads and cached buffers while allowing us to start
1366 		// decoding the next Block.
1367 		const uint64_t mem_max = coder->memlimit_threading
1368 				- coder->mem_next_block;
1369 
1370 		// If the existing allocations are so large that starting
1371 		// to decode this Block might exceed memlimit_threads,
1372 		// try to free memory from the output queue cache first.
1373 		//
1374 		// NOTE: This math assumes the worst case. It's possible
1375 		// that the limit wouldn't be exceeded if the existing cached
1376 		// allocations are reused.
1377 		if (mem_in_use + mem_cached + coder->outq.mem_allocated
1378 				> mem_max) {
1379 			// Clear the outq cache except leave one buffer in
1380 			// the cache if its size is correct. That way we
1381 			// don't free and almost immediately reallocate
1382 			// an identical buffer.
1383 			lzma_outq_clear_cache2(&coder->outq, allocator,
1384 				coder->block_options.uncompressed_size);
1385 		}
1386 
1387 		// If there is at least one worker_thread in the cache and
1388 		// the existing allocations are so large that starting to
1389 		// decode this Block might exceed memlimit_threads, free
1390 		// memory by freeing cached Block decoders.
1391 		//
1392 		// NOTE: The comparison is different here than above.
1393 		// Here we don't care about cached buffers in outq anymore
1394 		// and only look at memory actually in use. This is because
1395 		// if there is something in outq cache, it's a single buffer
1396 		// that can be used as is. We ensured this in the above
1397 		// if-block.
1398 		uint64_t mem_freed = 0;
1399 		if (thr != NULL && mem_in_use + mem_cached
1400 				+ coder->outq.mem_in_use > mem_max) {
1401 			// Don't free the first Block decoder if its memory
1402 			// usage isn't greater than what this Block will need.
1403 			// Typically the same filter chain is used for all
1404 			// Blocks so this way the allocations can be reused
1405 			// when get_thread() picks the first worker_thread
1406 			// from the cache.
1407 			if (thr->mem_filters <= coder->mem_next_filters)
1408 				thr = thr->next;
1409 
1410 			while (thr != NULL) {
1411 				lzma_next_end(&thr->block_decoder, allocator);
1412 				mem_freed += thr->mem_filters;
1413 				thr->mem_filters = 0;
1414 				thr = thr->next;
1415 			}
1416 		}
1417 
1418 		// Update the memory usage counters. Note that coder->mem_*
1419 		// may have changed since we read them so we must subtract
1420 		// or add the changes.
1421 		mythread_sync(coder->mutex) {
1422 			coder->mem_cached -= mem_freed;
1423 
1424 			// Memory needed for the filters and the input buffer.
1425 			// The output queue takes care of its own counter so
1426 			// we don't touch it here.
1427 			//
1428 			// NOTE: After this, coder->mem_in_use +
1429 			// coder->mem_cached might count the same thing twice.
1430 			// If so, this will get corrected in get_thread() when
1431 			// a worker_thread is picked from coder->free_threads
1432 			// and its memory usage is subtracted from mem_cached.
1433 			coder->mem_in_use += coder->mem_next_in
1434 					+ coder->mem_next_filters;
1435 		}
1436 
1437 		// Allocate memory for the output buffer in the output queue.
1438 		lzma_ret ret = lzma_outq_prealloc_buf(
1439 				&coder->outq, allocator,
1440 				coder->block_options.uncompressed_size);
1441 		if (ret != LZMA_OK) {
1442 			threads_stop(coder);
1443 			return ret;
1444 		}
1445 
1446 		// Set up coder->thr.
1447 		ret = get_thread(coder, allocator);
1448 		if (ret != LZMA_OK) {
1449 			threads_stop(coder);
1450 			return ret;
1451 		}
1452 
1453 		// The new Block decoder memory usage is already counted in
1454 		// coder->mem_in_use. Store it in the thread too.
1455 		coder->thr->mem_filters = coder->mem_next_filters;
1456 
1457 		// Initialize the Block decoder.
1458 		coder->thr->block_options = coder->block_options;
1459 		ret = lzma_block_decoder_init(
1460 					&coder->thr->block_decoder, allocator,
1461 					&coder->thr->block_options);
1462 
1463 		// Free the allocated filter options since they are needed
1464 		// only to initialize the Block decoder.
1465 		lzma_filters_free(coder->filters, allocator);
1466 		coder->thr->block_options.filters = NULL;
1467 
1468 		// Check if memory usage calculation and Block encoder
1469 		// initialization succeeded.
1470 		if (ret != LZMA_OK) {
1471 			coder->pending_error = ret;
1472 			coder->sequence = SEQ_ERROR;
1473 			break;
1474 		}
1475 
1476 		// Allocate the input buffer.
1477 		coder->thr->in_size = coder->mem_next_in;
1478 		coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
1479 		if (coder->thr->in == NULL) {
1480 			threads_stop(coder);
1481 			return LZMA_MEM_ERROR;
1482 		}
1483 
1484 		// Get the preallocated output buffer.
1485 		coder->thr->outbuf = lzma_outq_get_buf(
1486 				&coder->outq, coder->thr);
1487 
1488 		// Start the decoder.
1489 		mythread_sync(coder->thr->mutex) {
1490 			assert(coder->thr->state == THR_IDLE);
1491 			coder->thr->state = THR_RUN;
1492 			mythread_cond_signal(&coder->thr->cond);
1493 		}
1494 
1495 		// Enable output from the thread that holds the oldest output
1496 		// buffer in the output queue (if such a thread exists).
1497 		mythread_sync(coder->mutex) {
1498 			lzma_outq_enable_partial_output(&coder->outq,
1499 					&worker_enable_partial_update);
1500 		}
1501 
1502 		coder->sequence = SEQ_BLOCK_THR_RUN;
1503 		FALLTHROUGH;
1504 	}
1505 
1506 	case SEQ_BLOCK_THR_RUN: {
1507 		if (action == LZMA_FINISH && coder->fail_fast) {
1508 			// We know that we won't get more input and that
1509 			// the caller wants fail-fast behavior. If we see
1510 			// that we don't have enough input to finish this
1511 			// Block, return LZMA_DATA_ERROR immediately.
1512 			// See SEQ_BLOCK_HEADER for the error code rationale.
1513 			const size_t in_avail = in_size - *in_pos;
1514 			const size_t in_needed = coder->thr->in_size
1515 					- coder->thr->in_filled;
1516 			if (in_avail < in_needed) {
1517 				threads_stop(coder);
1518 				return LZMA_DATA_ERROR;
1519 			}
1520 		}
1521 
1522 		// Copy input to the worker thread.
1523 		size_t cur_in_filled = coder->thr->in_filled;
1524 		lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
1525 				&cur_in_filled, coder->thr->in_size);
1526 
1527 		// Tell the thread how much we copied.
1528 		mythread_sync(coder->thr->mutex) {
1529 			coder->thr->in_filled = cur_in_filled;
1530 
1531 			// NOTE: Most of the time we are copying input faster
1532 			// than the thread can decode so most of the time
1533 			// calling mythread_cond_signal() is useless but
1534 			// we cannot make it conditional because thr->in_pos
1535 			// is updated without a mutex. And the overhead should
1536 			// be very much negligible anyway.
1537 			mythread_cond_signal(&coder->thr->cond);
1538 		}
1539 
1540 		// Read output from the output queue. Just like in
1541 		// SEQ_BLOCK_HEADER, we wait to fill the output buffer
1542 		// only if waiting_allowed was set to true in the beginning
1543 		// of this function (see the comment there) and there is
1544 		// no input available. In SEQ_BLOCK_HEADER, there is never
1545 		// input available when read_output_and_wait() is called,
1546 		// but here there can be when LZMA_FINISH is used, thus we
1547 		// need to check if *in_pos == in_size. Otherwise we would
1548 		// wait here instead of using the available input to start
1549 		// a new thread.
1550 		return_if_error(read_output_and_wait(coder, allocator,
1551 				out, out_pos, out_size,
1552 				NULL,
1553 				waiting_allowed && *in_pos == in_size,
1554 				&wait_abs, &has_blocked));
1555 
1556 		if (coder->pending_error != LZMA_OK) {
1557 			coder->sequence = SEQ_ERROR;
1558 			break;
1559 		}
1560 
1561 		// Return if the input didn't contain the whole Block.
1562 		//
1563 		// NOTE: When we updated coder->thr->in_filled a few lines
1564 		// above, the worker thread might by now have finished its
1565 		// work and returned itself back to the stack of free threads.
1566 		if (coder->thr->in_filled < coder->thr->in_size) {
1567 			assert(*in_pos == in_size);
1568 			return LZMA_OK;
1569 		}
1570 
1571 		// The whole Block has been copied to the thread-specific
1572 		// buffer. Continue from the next Block Header or Index.
1573 		coder->thr = NULL;
1574 		coder->sequence = SEQ_BLOCK_HEADER;
1575 		break;
1576 	}
1577 
1578 	case SEQ_BLOCK_DIRECT_INIT: {
1579 		// Wait for the threads to finish and that all decoded data
1580 		// has been copied to the output. That is, wait until the
1581 		// output queue becomes empty.
1582 		//
1583 		// NOTE: No need to check for coder->pending_error as
1584 		// we aren't consuming any input until the queue is empty
1585 		// and if there is a pending error, read_output_and_wait()
1586 		// will eventually return it before the queue is empty.
1587 		return_if_error(read_output_and_wait(coder, allocator,
1588 				out, out_pos, out_size,
1589 				NULL, true, &wait_abs, &has_blocked));
1590 		if (!lzma_outq_is_empty(&coder->outq))
1591 			return LZMA_OK;
1592 
1593 		// Free the cached output buffers.
1594 		lzma_outq_clear_cache(&coder->outq, allocator);
1595 
1596 		// Get rid of the worker threads, including the coder->threads
1597 		// array.
1598 		threads_end(coder, allocator);
1599 
1600 		// Initialize the Block decoder.
1601 		const lzma_ret ret = lzma_block_decoder_init(
1602 				&coder->block_decoder, allocator,
1603 				&coder->block_options);
1604 
1605 		// Free the allocated filter options since they are needed
1606 		// only to initialize the Block decoder.
1607 		lzma_filters_free(coder->filters, allocator);
1608 		coder->block_options.filters = NULL;
1609 
1610 		// Check if Block decoder initialization succeeded.
1611 		if (ret != LZMA_OK)
1612 			return ret;
1613 
1614 		// Make the memory usage visible to _memconfig().
1615 		coder->mem_direct_mode = coder->mem_next_filters;
1616 
1617 		coder->sequence = SEQ_BLOCK_DIRECT_RUN;
1618 		FALLTHROUGH;
1619 	}
1620 
1621 	case SEQ_BLOCK_DIRECT_RUN: {
1622 		const size_t in_old = *in_pos;
1623 		const size_t out_old = *out_pos;
1624 		const lzma_ret ret = coder->block_decoder.code(
1625 				coder->block_decoder.coder, allocator,
1626 				in, in_pos, in_size, out, out_pos, out_size,
1627 				action);
1628 		coder->progress_in += *in_pos - in_old;
1629 		coder->progress_out += *out_pos - out_old;
1630 
1631 		if (ret != LZMA_STREAM_END)
1632 			return ret;
1633 
1634 		// Block decoded successfully. Add the new size pair to
1635 		// the Index hash.
1636 		return_if_error(lzma_index_hash_append(coder->index_hash,
1637 				lzma_block_unpadded_size(
1638 					&coder->block_options),
1639 				coder->block_options.uncompressed_size));
1640 
1641 		coder->sequence = SEQ_BLOCK_HEADER;
1642 		break;
1643 	}
1644 
1645 	case SEQ_INDEX_WAIT_OUTPUT:
1646 		// Flush the output from all worker threads so that we can
1647 		// decode the Index without thinking about threading.
1648 		return_if_error(read_output_and_wait(coder, allocator,
1649 				out, out_pos, out_size,
1650 				NULL, true, &wait_abs, &has_blocked));
1651 
1652 		if (!lzma_outq_is_empty(&coder->outq))
1653 			return LZMA_OK;
1654 
1655 		coder->sequence = SEQ_INDEX_DECODE;
1656 		FALLTHROUGH;
1657 
1658 	case SEQ_INDEX_DECODE: {
1659 		// If we don't have any input, don't call
1660 		// lzma_index_hash_decode() since it would return
1661 		// LZMA_BUF_ERROR, which we must not do here.
1662 		if (*in_pos >= in_size)
1663 			return LZMA_OK;
1664 
1665 		// Decode the Index and compare it to the hash calculated
1666 		// from the sizes of the Blocks (if any).
1667 		const size_t in_old = *in_pos;
1668 		const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
1669 				in, in_pos, in_size);
1670 		coder->progress_in += *in_pos - in_old;
1671 		if (ret != LZMA_STREAM_END)
1672 			return ret;
1673 
1674 		coder->sequence = SEQ_STREAM_FOOTER;
1675 		FALLTHROUGH;
1676 	}
1677 
1678 	case SEQ_STREAM_FOOTER: {
1679 		// Copy the Stream Footer to the internal buffer.
1680 		const size_t in_old = *in_pos;
1681 		lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1682 				LZMA_STREAM_HEADER_SIZE);
1683 		coder->progress_in += *in_pos - in_old;
1684 
1685 		// Return if we didn't get the whole Stream Footer yet.
1686 		if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1687 			return LZMA_OK;
1688 
1689 		coder->pos = 0;
1690 
1691 		// Decode the Stream Footer. The decoder gives
1692 		// LZMA_FORMAT_ERROR if the magic bytes don't match,
1693 		// so convert that return code to LZMA_DATA_ERROR.
1694 		lzma_stream_flags footer_flags;
1695 		const lzma_ret ret = lzma_stream_footer_decode(
1696 				&footer_flags, coder->buffer);
1697 		if (ret != LZMA_OK)
1698 			return ret == LZMA_FORMAT_ERROR
1699 					? LZMA_DATA_ERROR : ret;
1700 
1701 		// Check that Index Size stored in the Stream Footer matches
1702 		// the real size of the Index field.
1703 		if (lzma_index_hash_size(coder->index_hash)
1704 				!= footer_flags.backward_size)
1705 			return LZMA_DATA_ERROR;
1706 
1707 		// Compare that the Stream Flags fields are identical in
1708 		// both Stream Header and Stream Footer.
1709 		return_if_error(lzma_stream_flags_compare(
1710 				&coder->stream_flags, &footer_flags));
1711 
1712 		if (!coder->concatenated)
1713 			return LZMA_STREAM_END;
1714 
1715 		coder->sequence = SEQ_STREAM_PADDING;
1716 		FALLTHROUGH;
1717 	}
1718 
1719 	case SEQ_STREAM_PADDING:
1720 		assert(coder->concatenated);
1721 
1722 		// Skip over possible Stream Padding.
1723 		while (true) {
1724 			if (*in_pos >= in_size) {
1725 				// Unless LZMA_FINISH was used, we cannot
1726 				// know if there's more input coming later.
1727 				if (action != LZMA_FINISH)
1728 					return LZMA_OK;
1729 
1730 				// Stream Padding must be a multiple of
1731 				// four bytes.
1732 				return coder->pos == 0
1733 						? LZMA_STREAM_END
1734 						: LZMA_DATA_ERROR;
1735 			}
1736 
1737 			// If the byte is not zero, it probably indicates
1738 			// beginning of a new Stream (or the file is corrupt).
1739 			if (in[*in_pos] != 0x00)
1740 				break;
1741 
1742 			++*in_pos;
1743 			++coder->progress_in;
1744 			coder->pos = (coder->pos + 1) & 3;
1745 		}
1746 
1747 		// Stream Padding must be a multiple of four bytes (empty
1748 		// Stream Padding is OK).
1749 		if (coder->pos != 0) {
1750 			++*in_pos;
1751 			++coder->progress_in;
1752 			return LZMA_DATA_ERROR;
1753 		}
1754 
1755 		// Prepare to decode the next Stream.
1756 		return_if_error(stream_decoder_reset(coder, allocator));
1757 		break;
1758 
1759 	case SEQ_ERROR:
1760 		if (!coder->fail_fast) {
1761 			// Let the application get all data before the point
1762 			// where the error was detected. This matches the
1763 			// behavior of single-threaded use.
1764 			//
1765 			// FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
1766 			// they are returned immediately. Thus in rare cases
1767 			// the output will be less than in the single-threaded
1768 			// mode. Maybe this doesn't matter much in practice.
1769 			return_if_error(read_output_and_wait(coder, allocator,
1770 					out, out_pos, out_size,
1771 					NULL, true, &wait_abs, &has_blocked));
1772 
1773 			// We get here only if the error happened in the main
1774 			// thread, for example, unsupported Block Header.
1775 			if (!lzma_outq_is_empty(&coder->outq))
1776 				return LZMA_OK;
1777 		}
1778 
1779 		// We only get here if no errors were detected by the worker
1780 		// threads. Errors from worker threads would have already been
1781 		// returned by the call to read_output_and_wait() above.
1782 		return coder->pending_error;
1783 
1784 	default:
1785 		assert(0);
1786 		return LZMA_PROG_ERROR;
1787 	}
1788 
1789 	// Never reached
1790 }
1791 
1792 
1793 static void
stream_decoder_mt_end(void * coder_ptr,const lzma_allocator * allocator)1794 stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
1795 {
1796 	struct lzma_stream_coder *coder = coder_ptr;
1797 
1798 	threads_end(coder, allocator);
1799 	lzma_outq_end(&coder->outq, allocator);
1800 
1801 	lzma_next_end(&coder->block_decoder, allocator);
1802 	lzma_filters_free(coder->filters, allocator);
1803 	lzma_index_hash_end(coder->index_hash, allocator);
1804 
1805 	lzma_free(coder, allocator);
1806 	return;
1807 }
1808 
1809 
1810 static lzma_check
stream_decoder_mt_get_check(const void * coder_ptr)1811 stream_decoder_mt_get_check(const void *coder_ptr)
1812 {
1813 	const struct lzma_stream_coder *coder = coder_ptr;
1814 	return coder->stream_flags.check;
1815 }
1816 
1817 
1818 static lzma_ret
stream_decoder_mt_memconfig(void * coder_ptr,uint64_t * memusage,uint64_t * old_memlimit,uint64_t new_memlimit)1819 stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
1820 		uint64_t *old_memlimit, uint64_t new_memlimit)
1821 {
1822 	// NOTE: This function gets/sets memlimit_stop. For now,
1823 	// memlimit_threading cannot be modified after initialization.
1824 	//
1825 	// *memusage will include cached memory too. Excluding cached memory
1826 	// would be misleading and it wouldn't help the applications to
1827 	// know how much memory is actually needed to decompress the file
1828 	// because the higher the number of threads and the memlimits are
1829 	// the more memory the decoder may use.
1830 	//
1831 	// Setting a new limit includes the cached memory too and too low
1832 	// limits will be rejected. Alternative could be to free the cached
1833 	// memory immediately if that helps to bring the limit down but
1834 	// the current way is the simplest. It's unlikely that limit needs
1835 	// to be lowered in the middle of a file anyway; the typical reason
1836 	// to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
1837 	// and even such use isn't common.
1838 	struct lzma_stream_coder *coder = coder_ptr;
1839 
1840 	mythread_sync(coder->mutex) {
1841 		*memusage = coder->mem_direct_mode
1842 				+ coder->mem_in_use
1843 				+ coder->mem_cached
1844 				+ coder->outq.mem_allocated;
1845 	}
1846 
1847 	// If no filter chains are allocated, *memusage may be zero.
1848 	// Always return at least LZMA_MEMUSAGE_BASE.
1849 	if (*memusage < LZMA_MEMUSAGE_BASE)
1850 		*memusage = LZMA_MEMUSAGE_BASE;
1851 
1852 	*old_memlimit = coder->memlimit_stop;
1853 
1854 	if (new_memlimit != 0) {
1855 		if (new_memlimit < *memusage)
1856 			return LZMA_MEMLIMIT_ERROR;
1857 
1858 		coder->memlimit_stop = new_memlimit;
1859 	}
1860 
1861 	return LZMA_OK;
1862 }
1863 
1864 
1865 static void
stream_decoder_mt_get_progress(void * coder_ptr,uint64_t * progress_in,uint64_t * progress_out)1866 stream_decoder_mt_get_progress(void *coder_ptr,
1867 		uint64_t *progress_in, uint64_t *progress_out)
1868 {
1869 	struct lzma_stream_coder *coder = coder_ptr;
1870 
1871 	// Lock coder->mutex to prevent finishing threads from moving their
1872 	// progress info from the worker_thread structure to lzma_stream_coder.
1873 	mythread_sync(coder->mutex) {
1874 		*progress_in = coder->progress_in;
1875 		*progress_out = coder->progress_out;
1876 
1877 		for (size_t i = 0; i < coder->threads_initialized; ++i) {
1878 			mythread_sync(coder->threads[i].mutex) {
1879 				*progress_in += coder->threads[i].progress_in;
1880 				*progress_out += coder->threads[i]
1881 						.progress_out;
1882 			}
1883 		}
1884 	}
1885 
1886 	return;
1887 }
1888 
1889 
1890 static lzma_ret
stream_decoder_mt_init(lzma_next_coder * next,const lzma_allocator * allocator,const lzma_mt * options)1891 stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
1892 		       const lzma_mt *options)
1893 {
1894 	struct lzma_stream_coder *coder;
1895 
1896 	if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
1897 		return LZMA_OPTIONS_ERROR;
1898 
1899 	if (options->flags & ~LZMA_SUPPORTED_FLAGS)
1900 		return LZMA_OPTIONS_ERROR;
1901 
1902 	lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
1903 
1904 	coder = next->coder;
1905 	if (!coder) {
1906 		coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
1907 		if (coder == NULL)
1908 			return LZMA_MEM_ERROR;
1909 
1910 		next->coder = coder;
1911 
1912 		if (mythread_mutex_init(&coder->mutex)) {
1913 			lzma_free(coder, allocator);
1914 			return LZMA_MEM_ERROR;
1915 		}
1916 
1917 		if (mythread_cond_init(&coder->cond)) {
1918 			mythread_mutex_destroy(&coder->mutex);
1919 			lzma_free(coder, allocator);
1920 			return LZMA_MEM_ERROR;
1921 		}
1922 
1923 		next->code = &stream_decode_mt;
1924 		next->end = &stream_decoder_mt_end;
1925 		next->get_check = &stream_decoder_mt_get_check;
1926 		next->memconfig = &stream_decoder_mt_memconfig;
1927 		next->get_progress = &stream_decoder_mt_get_progress;
1928 
1929 		coder->filters[0].id = LZMA_VLI_UNKNOWN;
1930 		memzero(&coder->outq, sizeof(coder->outq));
1931 
1932 		coder->block_decoder = LZMA_NEXT_CODER_INIT;
1933 		coder->mem_direct_mode = 0;
1934 
1935 		coder->index_hash = NULL;
1936 		coder->threads = NULL;
1937 		coder->threads_free = NULL;
1938 		coder->threads_initialized = 0;
1939 	}
1940 
1941 	// Cleanup old filter chain if one remains after unfinished decoding
1942 	// of a previous Stream.
1943 	lzma_filters_free(coder->filters, allocator);
1944 
1945 	// By allocating threads from scratch we can start memory-usage
1946 	// accounting from scratch, too. Changes in filter and block sizes may
1947 	// affect number of threads.
1948 	//
1949 	// Reusing threads doesn't seem worth it. Unlike the single-threaded
1950 	// decoder, with some types of input file combinations reusing
1951 	// could leave quite a lot of memory allocated but unused (first
1952 	// file could allocate a lot, the next files could use fewer
1953 	// threads and some of the allocations from the first file would not
1954 	// get freed unless memlimit_threading forces us to clear caches).
1955 	//
1956 	// NOTE: The direct mode decoder isn't freed here if one exists.
1957 	// It will be reused or freed as needed in the main loop.
1958 	threads_end(coder, allocator);
1959 
1960 	// All memusage counters start at 0 (including mem_direct_mode).
1961 	// The little extra that is needed for the structs in this file
1962 	// get accounted well enough by the filter chain memory usage
1963 	// which adds LZMA_MEMUSAGE_BASE for each chain. However,
1964 	// stream_decoder_mt_memconfig() has to handle this specially so that
1965 	// it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
1966 	coder->mem_in_use = 0;
1967 	coder->mem_cached = 0;
1968 	coder->mem_next_block = 0;
1969 
1970 	coder->progress_in = 0;
1971 	coder->progress_out = 0;
1972 
1973 	coder->sequence = SEQ_STREAM_HEADER;
1974 	coder->thread_error = LZMA_OK;
1975 	coder->pending_error = LZMA_OK;
1976 	coder->thr = NULL;
1977 
1978 	coder->timeout = options->timeout;
1979 
1980 	coder->memlimit_threading = my_max(1, options->memlimit_threading);
1981 	coder->memlimit_stop = my_max(1, options->memlimit_stop);
1982 	if (coder->memlimit_threading > coder->memlimit_stop)
1983 		coder->memlimit_threading = coder->memlimit_stop;
1984 
1985 	coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
1986 	coder->tell_unsupported_check
1987 			= (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
1988 	coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
1989 	coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
1990 	coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
1991 	coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
1992 
1993 	coder->first_stream = true;
1994 	coder->out_was_filled = false;
1995 	coder->pos = 0;
1996 
1997 	coder->threads_max = options->threads;
1998 
1999 	return_if_error(lzma_outq_init(&coder->outq, allocator,
2000 				       coder->threads_max));
2001 
2002 	return stream_decoder_reset(coder, allocator);
2003 }
2004 
2005 
2006 extern LZMA_API(lzma_ret)
lzma_stream_decoder_mt(lzma_stream * strm,const lzma_mt * options)2007 lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
2008 {
2009 	lzma_next_strm_init(stream_decoder_mt_init, strm, options);
2010 
2011 	strm->internal->supported_actions[LZMA_RUN] = true;
2012 	strm->internal->supported_actions[LZMA_FINISH] = true;
2013 
2014 	return LZMA_OK;
2015 }
2016