xref: /freebsd/contrib/xz/src/liblzma/rangecoder/range_decoder.h (revision 3b35e7ee8de9b0260149a2b77e87a2b9c7a36244)
1*3b35e7eeSXin LI // SPDX-License-Identifier: 0BSD
2*3b35e7eeSXin LI 
381ad8388SMartin Matuska ///////////////////////////////////////////////////////////////////////////////
481ad8388SMartin Matuska //
581ad8388SMartin Matuska /// \file       range_decoder.h
681ad8388SMartin Matuska /// \brief      Range Decoder
781ad8388SMartin Matuska ///
881ad8388SMartin Matuska //  Authors:    Igor Pavlov
981ad8388SMartin Matuska //              Lasse Collin
1081ad8388SMartin Matuska //
1181ad8388SMartin Matuska ///////////////////////////////////////////////////////////////////////////////
1281ad8388SMartin Matuska 
1381ad8388SMartin Matuska #ifndef LZMA_RANGE_DECODER_H
1481ad8388SMartin Matuska #define LZMA_RANGE_DECODER_H
1581ad8388SMartin Matuska 
1681ad8388SMartin Matuska #include "range_common.h"
1781ad8388SMartin Matuska 
1881ad8388SMartin Matuska 
19*3b35e7eeSXin LI // Choose the range decoder variants to use using a bitmask.
20*3b35e7eeSXin LI // If no bits are set, only the basic version is used.
21*3b35e7eeSXin LI // If more than one version is selected for the same feature,
22*3b35e7eeSXin LI // the last one on the list below is used.
23*3b35e7eeSXin LI //
24*3b35e7eeSXin LI // Bitwise-or of the following enable branchless C versions:
25*3b35e7eeSXin LI //   0x01   normal bittrees
26*3b35e7eeSXin LI //   0x02   fixed-sized reverse bittrees
27*3b35e7eeSXin LI //   0x04   variable-sized reverse bittrees (not faster)
28*3b35e7eeSXin LI //   0x08   matched literal (not faster)
29*3b35e7eeSXin LI //
30*3b35e7eeSXin LI // GCC & Clang compatible x86-64 inline assembly:
31*3b35e7eeSXin LI //   0x010   normal bittrees
32*3b35e7eeSXin LI //   0x020   fixed-sized reverse bittrees
33*3b35e7eeSXin LI //   0x040   variable-sized reverse bittrees
34*3b35e7eeSXin LI //   0x080   matched literal
35*3b35e7eeSXin LI //   0x100   direct bits
36*3b35e7eeSXin LI //
37*3b35e7eeSXin LI // The default can be overridden at build time by defining
38*3b35e7eeSXin LI // LZMA_RANGE_DECODER_CONFIG to the desired mask.
39*3b35e7eeSXin LI //
40*3b35e7eeSXin LI // 2024-02-22: Feedback from benchmarks:
41*3b35e7eeSXin LI //   - Brancless C (0x003) can be better than basic on x86-64 but often it's
42*3b35e7eeSXin LI //     slightly worse on other archs. Since asm is much better on x86-64,
43*3b35e7eeSXin LI //     branchless C is not used at all.
44*3b35e7eeSXin LI //   - With x86-64 asm, there are slight differences between GCC and Clang
45*3b35e7eeSXin LI //     and different processors. Overall 0x1F0 seems to be the best choice.
46*3b35e7eeSXin LI #ifndef LZMA_RANGE_DECODER_CONFIG
47*3b35e7eeSXin LI #	if defined(__x86_64__) && !defined(__ILP32__) \
48*3b35e7eeSXin LI 			&& !defined(__NVCOMPILER) \
49*3b35e7eeSXin LI 			&& (defined(__GNUC__) || defined(__clang__))
50*3b35e7eeSXin LI #		define LZMA_RANGE_DECODER_CONFIG 0x1F0
51*3b35e7eeSXin LI #	else
52*3b35e7eeSXin LI #		define LZMA_RANGE_DECODER_CONFIG 0
53*3b35e7eeSXin LI #	endif
54*3b35e7eeSXin LI #endif
55*3b35e7eeSXin LI 
56*3b35e7eeSXin LI 
57*3b35e7eeSXin LI // Negative RC_BIT_MODEL_TOTAL but the lowest RC_MOVE_BITS are flipped.
58*3b35e7eeSXin LI // This is useful for updating probability variables in branchless decoding:
59*3b35e7eeSXin LI //
60*3b35e7eeSXin LI //     uint32_t decoded_bit = ...;
61*3b35e7eeSXin LI //     probability tmp = RC_BIT_MODEL_OFFSET;
62*3b35e7eeSXin LI //     tmp &= decoded_bit - 1;
63*3b35e7eeSXin LI //     prob -= (prob + tmp) >> RC_MOVE_BITS;
64*3b35e7eeSXin LI #define RC_BIT_MODEL_OFFSET \
65*3b35e7eeSXin LI 	((UINT32_C(1) << RC_MOVE_BITS) - 1 - RC_BIT_MODEL_TOTAL)
66*3b35e7eeSXin LI 
67*3b35e7eeSXin LI 
6881ad8388SMartin Matuska typedef struct {
6981ad8388SMartin Matuska 	uint32_t range;
7081ad8388SMartin Matuska 	uint32_t code;
7181ad8388SMartin Matuska 	uint32_t init_bytes_left;
7281ad8388SMartin Matuska } lzma_range_decoder;
7381ad8388SMartin Matuska 
7481ad8388SMartin Matuska 
7581ad8388SMartin Matuska /// Reads the first five bytes to initialize the range decoder.
7653200025SRui Paulo static inline lzma_ret
7781ad8388SMartin Matuska rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
7881ad8388SMartin Matuska 		size_t *restrict in_pos, size_t in_size)
7981ad8388SMartin Matuska {
8081ad8388SMartin Matuska 	while (rc->init_bytes_left > 0) {
8181ad8388SMartin Matuska 		if (*in_pos == in_size)
8253200025SRui Paulo 			return LZMA_OK;
8353200025SRui Paulo 
8453200025SRui Paulo 		// The first byte is always 0x00. It could have been omitted
8553200025SRui Paulo 		// in LZMA2 but it wasn't, so one byte is wasted in every
8653200025SRui Paulo 		// LZMA2 chunk.
8753200025SRui Paulo 		if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)
8853200025SRui Paulo 			return LZMA_DATA_ERROR;
8981ad8388SMartin Matuska 
9081ad8388SMartin Matuska 		rc->code = (rc->code << 8) | in[*in_pos];
9181ad8388SMartin Matuska 		++*in_pos;
9281ad8388SMartin Matuska 		--rc->init_bytes_left;
9381ad8388SMartin Matuska 	}
9481ad8388SMartin Matuska 
9553200025SRui Paulo 	return LZMA_STREAM_END;
9681ad8388SMartin Matuska }
9781ad8388SMartin Matuska 
9881ad8388SMartin Matuska 
9981ad8388SMartin Matuska /// Makes local copies of range decoder and *in_pos variables. Doing this
10081ad8388SMartin Matuska /// improves speed significantly. The range decoder macros expect also
101*3b35e7eeSXin LI /// variables 'in' and 'in_size' to be defined.
102*3b35e7eeSXin LI #define rc_to_local(range_decoder, in_pos, fast_mode_in_required) \
10381ad8388SMartin Matuska 	lzma_range_decoder rc = range_decoder; \
104*3b35e7eeSXin LI 	const uint8_t *rc_in_ptr = in + (in_pos); \
105*3b35e7eeSXin LI 	const uint8_t *rc_in_end = in + in_size; \
106*3b35e7eeSXin LI 	const uint8_t *rc_in_fast_end \
107*3b35e7eeSXin LI 			= (rc_in_end - rc_in_ptr) <= (fast_mode_in_required) \
108*3b35e7eeSXin LI 			? rc_in_ptr \
109*3b35e7eeSXin LI 			: rc_in_end - (fast_mode_in_required); \
110*3b35e7eeSXin LI 	(void)rc_in_fast_end; /* Silence a warning with HAVE_SMALL. */ \
11181ad8388SMartin Matuska 	uint32_t rc_bound
11281ad8388SMartin Matuska 
11381ad8388SMartin Matuska 
114*3b35e7eeSXin LI /// Evaluates to true if there is enough input remaining to use fast mode.
115*3b35e7eeSXin LI #define rc_is_fast_allowed() (rc_in_ptr < rc_in_fast_end)
116*3b35e7eeSXin LI 
117*3b35e7eeSXin LI 
11881ad8388SMartin Matuska /// Stores the local copes back to the range decoder structure.
11981ad8388SMartin Matuska #define rc_from_local(range_decoder, in_pos) \
12081ad8388SMartin Matuska do { \
12181ad8388SMartin Matuska 	range_decoder = rc; \
122*3b35e7eeSXin LI 	in_pos = (size_t)(rc_in_ptr - in); \
12381ad8388SMartin Matuska } while (0)
12481ad8388SMartin Matuska 
12581ad8388SMartin Matuska 
12681ad8388SMartin Matuska /// Resets the range decoder structure.
12781ad8388SMartin Matuska #define rc_reset(range_decoder) \
12881ad8388SMartin Matuska do { \
12981ad8388SMartin Matuska 	(range_decoder).range = UINT32_MAX; \
13081ad8388SMartin Matuska 	(range_decoder).code = 0; \
13181ad8388SMartin Matuska 	(range_decoder).init_bytes_left = 5; \
13281ad8388SMartin Matuska } while (0)
13381ad8388SMartin Matuska 
13481ad8388SMartin Matuska 
13581ad8388SMartin Matuska /// When decoding has been properly finished, rc.code is always zero unless
13681ad8388SMartin Matuska /// the input stream is corrupt. So checking this can catch some corrupt
13781ad8388SMartin Matuska /// files especially if they don't have any other integrity check.
13881ad8388SMartin Matuska #define rc_is_finished(range_decoder) \
13981ad8388SMartin Matuska 	((range_decoder).code == 0)
14081ad8388SMartin Matuska 
14181ad8388SMartin Matuska 
142*3b35e7eeSXin LI // Read the next input byte if needed.
143*3b35e7eeSXin LI #define rc_normalize() \
1448db56defSXin LI do { \
1458db56defSXin LI 	if (rc.range < RC_TOP_VALUE) { \
146*3b35e7eeSXin LI 		rc.range <<= RC_SHIFT_BITS; \
147*3b35e7eeSXin LI 		rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
148*3b35e7eeSXin LI 	} \
149*3b35e7eeSXin LI } while (0)
150*3b35e7eeSXin LI 
151*3b35e7eeSXin LI 
152*3b35e7eeSXin LI /// If more input is needed but there is
153*3b35e7eeSXin LI /// no more input available, "goto out" is used to jump out of the main
154*3b35e7eeSXin LI /// decoder loop. The "_safe" macros are used in the Resumable decoder
155*3b35e7eeSXin LI /// mode in order to save the sequence to continue decoding from that
156*3b35e7eeSXin LI /// point later.
157*3b35e7eeSXin LI #define rc_normalize_safe(seq) \
158*3b35e7eeSXin LI do { \
159*3b35e7eeSXin LI 	if (rc.range < RC_TOP_VALUE) { \
160*3b35e7eeSXin LI 		if (rc_in_ptr == rc_in_end) { \
16181ad8388SMartin Matuska 			coder->sequence = seq; \
16281ad8388SMartin Matuska 			goto out; \
16381ad8388SMartin Matuska 		} \
16481ad8388SMartin Matuska 		rc.range <<= RC_SHIFT_BITS; \
165*3b35e7eeSXin LI 		rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
16681ad8388SMartin Matuska 	} \
16781ad8388SMartin Matuska } while (0)
16881ad8388SMartin Matuska 
16981ad8388SMartin Matuska 
17081ad8388SMartin Matuska /// Start decoding a bit. This must be used together with rc_update_0()
17181ad8388SMartin Matuska /// and rc_update_1():
17281ad8388SMartin Matuska ///
173*3b35e7eeSXin LI ///     rc_if_0(prob) {
17481ad8388SMartin Matuska ///         rc_update_0(prob);
17581ad8388SMartin Matuska ///         // Do something
17681ad8388SMartin Matuska ///     } else {
17781ad8388SMartin Matuska ///         rc_update_1(prob);
17881ad8388SMartin Matuska ///         // Do something else
17981ad8388SMartin Matuska ///     }
18081ad8388SMartin Matuska ///
181*3b35e7eeSXin LI #define rc_if_0(prob) \
182*3b35e7eeSXin LI 	rc_normalize(); \
183*3b35e7eeSXin LI 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
184*3b35e7eeSXin LI 	if (rc.code < rc_bound)
185*3b35e7eeSXin LI 
186*3b35e7eeSXin LI 
187*3b35e7eeSXin LI #define rc_if_0_safe(prob, seq) \
188*3b35e7eeSXin LI 	rc_normalize_safe(seq); \
18981ad8388SMartin Matuska 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
19081ad8388SMartin Matuska 	if (rc.code < rc_bound)
19181ad8388SMartin Matuska 
19281ad8388SMartin Matuska 
19381ad8388SMartin Matuska /// Update the range decoder state and the used probability variable to
19481ad8388SMartin Matuska /// match a decoded bit of 0.
195*3b35e7eeSXin LI ///
196*3b35e7eeSXin LI /// The x86-64 assembly uses the commented method but it seems that,
197*3b35e7eeSXin LI /// at least on x86-64, the first version is slightly faster as C code.
19881ad8388SMartin Matuska #define rc_update_0(prob) \
19981ad8388SMartin Matuska do { \
20081ad8388SMartin Matuska 	rc.range = rc_bound; \
20181ad8388SMartin Matuska 	prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \
202*3b35e7eeSXin LI 	/* prob -= ((prob) + RC_BIT_MODEL_OFFSET) >> RC_MOVE_BITS; */ \
20381ad8388SMartin Matuska } while (0)
20481ad8388SMartin Matuska 
20581ad8388SMartin Matuska 
20681ad8388SMartin Matuska /// Update the range decoder state and the used probability variable to
20781ad8388SMartin Matuska /// match a decoded bit of 1.
20881ad8388SMartin Matuska #define rc_update_1(prob) \
20981ad8388SMartin Matuska do { \
21081ad8388SMartin Matuska 	rc.range -= rc_bound; \
21181ad8388SMartin Matuska 	rc.code -= rc_bound; \
21281ad8388SMartin Matuska 	prob -= (prob) >> RC_MOVE_BITS; \
21381ad8388SMartin Matuska } while (0)
21481ad8388SMartin Matuska 
21581ad8388SMartin Matuska 
21681ad8388SMartin Matuska /// Decodes one bit and runs action0 or action1 depending on the decoded bit.
21781ad8388SMartin Matuska /// This macro is used as the last step in bittree reverse decoders since
21881ad8388SMartin Matuska /// those don't use "symbol" for anything else than indexing the probability
21981ad8388SMartin Matuska /// arrays.
220*3b35e7eeSXin LI #define rc_bit_last(prob, action0, action1) \
22181ad8388SMartin Matuska do { \
222*3b35e7eeSXin LI 	rc_if_0(prob) { \
223*3b35e7eeSXin LI 		rc_update_0(prob); \
224*3b35e7eeSXin LI 		action0; \
225*3b35e7eeSXin LI 	} else { \
226*3b35e7eeSXin LI 		rc_update_1(prob); \
227*3b35e7eeSXin LI 		action1; \
228*3b35e7eeSXin LI 	} \
229*3b35e7eeSXin LI } while (0)
230*3b35e7eeSXin LI 
231*3b35e7eeSXin LI 
232*3b35e7eeSXin LI #define rc_bit_last_safe(prob, action0, action1, seq) \
233*3b35e7eeSXin LI do { \
234*3b35e7eeSXin LI 	rc_if_0_safe(prob, seq) { \
23581ad8388SMartin Matuska 		rc_update_0(prob); \
23681ad8388SMartin Matuska 		action0; \
23781ad8388SMartin Matuska 	} else { \
23881ad8388SMartin Matuska 		rc_update_1(prob); \
23981ad8388SMartin Matuska 		action1; \
24081ad8388SMartin Matuska 	} \
24181ad8388SMartin Matuska } while (0)
24281ad8388SMartin Matuska 
24381ad8388SMartin Matuska 
24481ad8388SMartin Matuska /// Decodes one bit, updates "symbol", and runs action0 or action1 depending
24581ad8388SMartin Matuska /// on the decoded bit.
246*3b35e7eeSXin LI #define rc_bit(prob, action0, action1) \
24781ad8388SMartin Matuska 	rc_bit_last(prob, \
24881ad8388SMartin Matuska 		symbol <<= 1; action0, \
249*3b35e7eeSXin LI 		symbol = (symbol << 1) + 1; action1);
250*3b35e7eeSXin LI 
251*3b35e7eeSXin LI 
252*3b35e7eeSXin LI #define rc_bit_safe(prob, action0, action1, seq) \
253*3b35e7eeSXin LI 	rc_bit_last_safe(prob, \
254*3b35e7eeSXin LI 		symbol <<= 1; action0, \
25581ad8388SMartin Matuska 		symbol = (symbol << 1) + 1; action1, \
25681ad8388SMartin Matuska 		seq);
25781ad8388SMartin Matuska 
258*3b35e7eeSXin LI // Unroll fixed-sized bittree decoding.
259*3b35e7eeSXin LI //
260*3b35e7eeSXin LI // A compile-time constant in final_add can be used to get rid of the high bit
261*3b35e7eeSXin LI // from symbol that is used for the array indexing (1U << bittree_bits).
262*3b35e7eeSXin LI // final_add may also be used to add offset to the result (LZMA length
263*3b35e7eeSXin LI // decoder does that).
264*3b35e7eeSXin LI //
265*3b35e7eeSXin LI // The reason to have final_add here is that in the asm code the addition
266*3b35e7eeSXin LI // can be done for free: in x86-64 there is SBB instruction with -1 as
267*3b35e7eeSXin LI // the immediate value, and final_add is combined with that value.
268*3b35e7eeSXin LI #define rc_bittree_bit(prob) \
269*3b35e7eeSXin LI 	rc_bit(prob, , )
27081ad8388SMartin Matuska 
271*3b35e7eeSXin LI #define rc_bittree3(probs, final_add) \
272*3b35e7eeSXin LI do { \
273*3b35e7eeSXin LI 	symbol = 1; \
274*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
275*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
276*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
277*3b35e7eeSXin LI 	symbol += (uint32_t)(final_add); \
278*3b35e7eeSXin LI } while (0)
279*3b35e7eeSXin LI 
280*3b35e7eeSXin LI #define rc_bittree6(probs, final_add) \
281*3b35e7eeSXin LI do { \
282*3b35e7eeSXin LI 	symbol = 1; \
283*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
284*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
285*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
286*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
287*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
288*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
289*3b35e7eeSXin LI 	symbol += (uint32_t)(final_add); \
290*3b35e7eeSXin LI } while (0)
291*3b35e7eeSXin LI 
292*3b35e7eeSXin LI #define rc_bittree8(probs, final_add) \
293*3b35e7eeSXin LI do { \
294*3b35e7eeSXin LI 	symbol = 1; \
295*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
296*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
297*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
298*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
299*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
300*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
301*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
302*3b35e7eeSXin LI 	rc_bittree_bit(probs[symbol]); \
303*3b35e7eeSXin LI 	symbol += (uint32_t)(final_add); \
304*3b35e7eeSXin LI } while (0)
305*3b35e7eeSXin LI 
306*3b35e7eeSXin LI 
307*3b35e7eeSXin LI // Fixed-sized reverse bittree
308*3b35e7eeSXin LI #define rc_bittree_rev4(probs) \
309*3b35e7eeSXin LI do { \
310*3b35e7eeSXin LI 	symbol = 0; \
311*3b35e7eeSXin LI 	rc_bit_last(probs[symbol + 1], , symbol += 1); \
312*3b35e7eeSXin LI 	rc_bit_last(probs[symbol + 2], , symbol += 2); \
313*3b35e7eeSXin LI 	rc_bit_last(probs[symbol + 4], , symbol += 4); \
314*3b35e7eeSXin LI 	rc_bit_last(probs[symbol + 8], , symbol += 8); \
315*3b35e7eeSXin LI } while (0)
316*3b35e7eeSXin LI 
317*3b35e7eeSXin LI 
318*3b35e7eeSXin LI // Decode one bit from variable-sized reverse bittree. The loop is done
319*3b35e7eeSXin LI // in the code that uses this macro. This could be changed if the assembly
320*3b35e7eeSXin LI // version benefited from having the loop done in assembly but it didn't
321*3b35e7eeSXin LI // seem so in early 2024.
322*3b35e7eeSXin LI //
323*3b35e7eeSXin LI // Also, if the loop was done here, the loop counter would likely be local
324*3b35e7eeSXin LI // to the macro so that it wouldn't modify yet another input variable.
325*3b35e7eeSXin LI // If a _safe version of a macro with a loop was done then a modifiable
326*3b35e7eeSXin LI // input variable couldn't be avoided though.
327*3b35e7eeSXin LI #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
328*3b35e7eeSXin LI 	rc_bit(probs[symbol], \
329*3b35e7eeSXin LI 		, \
330*3b35e7eeSXin LI 		dest += value_to_add_if_1);
331*3b35e7eeSXin LI 
332*3b35e7eeSXin LI 
333*3b35e7eeSXin LI // Matched literal
334*3b35e7eeSXin LI #define decode_with_match_bit \
335*3b35e7eeSXin LI 		t_match_byte <<= 1; \
336*3b35e7eeSXin LI 		t_match_bit = t_match_byte & t_offset; \
337*3b35e7eeSXin LI 		t_subcoder_index = t_offset + t_match_bit + symbol; \
338*3b35e7eeSXin LI 		rc_bit(probs[t_subcoder_index], \
339*3b35e7eeSXin LI 				t_offset &= ~t_match_bit, \
340*3b35e7eeSXin LI 				t_offset &= t_match_bit)
341*3b35e7eeSXin LI 
342*3b35e7eeSXin LI #define rc_matched_literal(probs_base_var, match_byte) \
343*3b35e7eeSXin LI do { \
344*3b35e7eeSXin LI 	uint32_t t_match_byte = (match_byte); \
345*3b35e7eeSXin LI 	uint32_t t_match_bit; \
346*3b35e7eeSXin LI 	uint32_t t_subcoder_index; \
347*3b35e7eeSXin LI 	uint32_t t_offset = 0x100; \
348*3b35e7eeSXin LI 	symbol = 1; \
349*3b35e7eeSXin LI 	decode_with_match_bit; \
350*3b35e7eeSXin LI 	decode_with_match_bit; \
351*3b35e7eeSXin LI 	decode_with_match_bit; \
352*3b35e7eeSXin LI 	decode_with_match_bit; \
353*3b35e7eeSXin LI 	decode_with_match_bit; \
354*3b35e7eeSXin LI 	decode_with_match_bit; \
355*3b35e7eeSXin LI 	decode_with_match_bit; \
356*3b35e7eeSXin LI 	decode_with_match_bit; \
357*3b35e7eeSXin LI } while (0)
35881ad8388SMartin Matuska 
35981ad8388SMartin Matuska 
36081ad8388SMartin Matuska /// Decode a bit without using a probability.
361*3b35e7eeSXin LI //
362*3b35e7eeSXin LI // NOTE: GCC 13 and Clang/LLVM 16 can, at least on x86-64, optimize the bound
363*3b35e7eeSXin LI // calculation to use an arithmetic right shift so there's no need to provide
364*3b35e7eeSXin LI // the alternative code which, according to C99/C11/C23 6.3.1.3-p3 isn't
365*3b35e7eeSXin LI // perfectly portable: rc_bound = (uint32_t)((int32_t)rc.code >> 31);
366*3b35e7eeSXin LI #define rc_direct(dest, count_var) \
36781ad8388SMartin Matuska do { \
368*3b35e7eeSXin LI 	dest = (dest << 1) + 1; \
369*3b35e7eeSXin LI 	rc_normalize(); \
370*3b35e7eeSXin LI 	rc.range >>= 1; \
371*3b35e7eeSXin LI 	rc.code -= rc.range; \
372*3b35e7eeSXin LI 	rc_bound = UINT32_C(0) - (rc.code >> 31); \
373*3b35e7eeSXin LI 	dest += rc_bound; \
374*3b35e7eeSXin LI 	rc.code += rc.range & rc_bound; \
375*3b35e7eeSXin LI } while (--count_var > 0)
376*3b35e7eeSXin LI 
377*3b35e7eeSXin LI 
378*3b35e7eeSXin LI 
379*3b35e7eeSXin LI #define rc_direct_safe(dest, count_var, seq) \
380*3b35e7eeSXin LI do { \
381*3b35e7eeSXin LI 	rc_normalize_safe(seq); \
38281ad8388SMartin Matuska 	rc.range >>= 1; \
38381ad8388SMartin Matuska 	rc.code -= rc.range; \
38481ad8388SMartin Matuska 	rc_bound = UINT32_C(0) - (rc.code >> 31); \
38581ad8388SMartin Matuska 	rc.code += rc.range & rc_bound; \
38681ad8388SMartin Matuska 	dest = (dest << 1) + (rc_bound + 1); \
387*3b35e7eeSXin LI } while (--count_var > 0)
388*3b35e7eeSXin LI 
389*3b35e7eeSXin LI 
390*3b35e7eeSXin LI //////////////////
391*3b35e7eeSXin LI // Branchless C //
392*3b35e7eeSXin LI //////////////////
393*3b35e7eeSXin LI 
394*3b35e7eeSXin LI /// Decode a bit using a branchless method. This reduces the number of
395*3b35e7eeSXin LI /// mispredicted branches and thus can improve speed.
396*3b35e7eeSXin LI #define rc_c_bit(prob, action_bit, action_neg) \
397*3b35e7eeSXin LI do { \
398*3b35e7eeSXin LI 	probability *p = &(prob); \
399*3b35e7eeSXin LI 	rc_normalize(); \
400*3b35e7eeSXin LI 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * *p; \
401*3b35e7eeSXin LI 	uint32_t rc_mask = rc.code >= rc_bound; /* rc_mask = decoded bit */ \
402*3b35e7eeSXin LI 	action_bit; /* action when rc_mask is 0 or 1 */ \
403*3b35e7eeSXin LI 	/* rc_mask becomes 0 if bit is 0 and 0xFFFFFFFF if bit is 1: */ \
404*3b35e7eeSXin LI 	rc_mask = 0U - rc_mask; \
405*3b35e7eeSXin LI 	rc.range &= rc_mask; /* If bit 0: set rc.range = 0 */ \
406*3b35e7eeSXin LI 	rc_bound ^= rc_mask; \
407*3b35e7eeSXin LI 	rc_bound -= rc_mask; /* If bit 1: rc_bound = 0U - rc_bound */ \
408*3b35e7eeSXin LI 	rc.range += rc_bound; \
409*3b35e7eeSXin LI 	rc_bound &= rc_mask; \
410*3b35e7eeSXin LI 	rc.code += rc_bound; \
411*3b35e7eeSXin LI 	action_neg; /* action when rc_mask is 0 or 0xFFFFFFFF */ \
412*3b35e7eeSXin LI 	rc_mask = ~rc_mask; /* If bit 0: all bits are set in rc_mask */ \
413*3b35e7eeSXin LI 	rc_mask &= RC_BIT_MODEL_OFFSET; \
414*3b35e7eeSXin LI 	*p -= (*p + rc_mask) >> RC_MOVE_BITS; \
41581ad8388SMartin Matuska } while (0)
41681ad8388SMartin Matuska 
41781ad8388SMartin Matuska 
418*3b35e7eeSXin LI // Testing on x86-64 give an impression that only the normal bittrees and
419*3b35e7eeSXin LI // the fixed-sized reverse bittrees are worth the branchless C code.
420*3b35e7eeSXin LI // It should be tested on other archs for which there isn't assembly code
421*3b35e7eeSXin LI // in this file.
422*3b35e7eeSXin LI 
423*3b35e7eeSXin LI // Using addition in "(symbol << 1) + rc_mask" allows use of x86 LEA
424*3b35e7eeSXin LI // or RISC-V SH1ADD instructions. Compilers might infer it from
425*3b35e7eeSXin LI // "(symbol << 1) | rc_mask" too if they see that mask is 0 or 1 but
426*3b35e7eeSXin LI // the use of addition doesn't require such analysis from compilers.
427*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x01
428*3b35e7eeSXin LI #undef rc_bittree_bit
429*3b35e7eeSXin LI #define rc_bittree_bit(prob) \
430*3b35e7eeSXin LI 	rc_c_bit(prob, \
431*3b35e7eeSXin LI 		symbol = (symbol << 1) + rc_mask, \
432*3b35e7eeSXin LI 		)
433*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x01
434*3b35e7eeSXin LI 
435*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x02
436*3b35e7eeSXin LI #undef rc_bittree_rev4
437*3b35e7eeSXin LI #define rc_bittree_rev4(probs) \
438*3b35e7eeSXin LI do { \
439*3b35e7eeSXin LI 	symbol = 0; \
440*3b35e7eeSXin LI 	rc_c_bit(probs[symbol + 1], symbol += rc_mask, ); \
441*3b35e7eeSXin LI 	rc_c_bit(probs[symbol + 2], symbol += rc_mask << 1, ); \
442*3b35e7eeSXin LI 	rc_c_bit(probs[symbol + 4], symbol += rc_mask << 2, ); \
443*3b35e7eeSXin LI 	rc_c_bit(probs[symbol + 8], symbol += rc_mask << 3, ); \
444*3b35e7eeSXin LI } while (0)
445*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x02
446*3b35e7eeSXin LI 
447*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x04
448*3b35e7eeSXin LI #undef rc_bit_add_if_1
449*3b35e7eeSXin LI #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
450*3b35e7eeSXin LI 	rc_c_bit(probs[symbol], \
451*3b35e7eeSXin LI 		symbol = (symbol << 1) + rc_mask, \
452*3b35e7eeSXin LI 		dest += (value_to_add_if_1) & rc_mask)
453*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x04
454*3b35e7eeSXin LI 
455*3b35e7eeSXin LI 
456*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x08
457*3b35e7eeSXin LI #undef decode_with_match_bit
458*3b35e7eeSXin LI #define decode_with_match_bit \
459*3b35e7eeSXin LI 		t_match_byte <<= 1; \
460*3b35e7eeSXin LI 		t_match_bit = t_match_byte & t_offset; \
461*3b35e7eeSXin LI 		t_subcoder_index = t_offset + t_match_bit + symbol; \
462*3b35e7eeSXin LI 		rc_c_bit(probs[t_subcoder_index], \
463*3b35e7eeSXin LI 			symbol = (symbol << 1) + rc_mask, \
464*3b35e7eeSXin LI 			t_offset &= ~t_match_bit ^ rc_mask)
465*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x08
466*3b35e7eeSXin LI 
467*3b35e7eeSXin LI 
468*3b35e7eeSXin LI ////////////
469*3b35e7eeSXin LI // x86-64 //
470*3b35e7eeSXin LI ////////////
471*3b35e7eeSXin LI 
472*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x1F0
473*3b35e7eeSXin LI 
474*3b35e7eeSXin LI // rc_asm_y and rc_asm_n are used as arguments to macros to control which
475*3b35e7eeSXin LI // strings to include or omit.
476*3b35e7eeSXin LI #define rc_asm_y(str) str
477*3b35e7eeSXin LI #define rc_asm_n(str)
478*3b35e7eeSXin LI 
479*3b35e7eeSXin LI // There are a few possible variations for normalization.
480*3b35e7eeSXin LI // This is the smallest variant which is also used by LZMA SDK.
481*3b35e7eeSXin LI //
482*3b35e7eeSXin LI //   - This has partial register write (the MOV from (%[in_ptr])).
483*3b35e7eeSXin LI //
484*3b35e7eeSXin LI //   - INC saves one byte in code size over ADD. False dependency on
485*3b35e7eeSXin LI //     partial flags from INC shouldn't become a problem on any processor
486*3b35e7eeSXin LI //     because the instructions after normalization don't read the flags
487*3b35e7eeSXin LI //     until SUB which sets all flags.
488*3b35e7eeSXin LI //
489*3b35e7eeSXin LI #define rc_asm_normalize \
490*3b35e7eeSXin LI 	"cmp	%[top_value], %[range]\n\t" \
491*3b35e7eeSXin LI 	"jae	1f\n\t" \
492*3b35e7eeSXin LI 	"shl	%[shift_bits], %[code]\n\t" \
493*3b35e7eeSXin LI 	"mov	(%[in_ptr]), %b[code]\n\t" \
494*3b35e7eeSXin LI 	"shl	%[shift_bits], %[range]\n\t" \
495*3b35e7eeSXin LI 	"inc	%[in_ptr]\n" \
496*3b35e7eeSXin LI 	"1:\n"
497*3b35e7eeSXin LI 
498*3b35e7eeSXin LI // rc_asm_calc(prob) is roughly equivalent to the C version of rc_if_0(prob)...
499*3b35e7eeSXin LI //
500*3b35e7eeSXin LI //     rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
501*3b35e7eeSXin LI //     if (rc.code < rc_bound)
502*3b35e7eeSXin LI //
503*3b35e7eeSXin LI // ...but the bound is stored in "range":
504*3b35e7eeSXin LI //
505*3b35e7eeSXin LI //     t0 = range;
506*3b35e7eeSXin LI //     range = (range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
507*3b35e7eeSXin LI //     t0 -= range;
508*3b35e7eeSXin LI //     t1 = code;
509*3b35e7eeSXin LI //     code -= range;
510*3b35e7eeSXin LI //
511*3b35e7eeSXin LI // The carry flag (CF) from the last subtraction holds the negation of
512*3b35e7eeSXin LI // the decoded bit (if CF==0 then the decoded bit is 1).
513*3b35e7eeSXin LI // The values in t0 and t1 are needed for rc_update_0(prob) and
514*3b35e7eeSXin LI // rc_update_1(prob). If the bit is 0, rc_update_0(prob)...
515*3b35e7eeSXin LI //
516*3b35e7eeSXin LI //     rc.range = rc_bound;
517*3b35e7eeSXin LI //
518*3b35e7eeSXin LI // ...has already been done but the "code -= range" has to be reverted using
519*3b35e7eeSXin LI // the old value stored in t1. (Also, prob needs to be updated.)
520*3b35e7eeSXin LI //
521*3b35e7eeSXin LI // If the bit is 1, rc_update_1(prob)...
522*3b35e7eeSXin LI //
523*3b35e7eeSXin LI //     rc.range -= rc_bound;
524*3b35e7eeSXin LI //     rc.code -= rc_bound;
525*3b35e7eeSXin LI //
526*3b35e7eeSXin LI // ...is already done for "code" but the value for "range" needs to be taken
527*3b35e7eeSXin LI // from t0. (Also, prob needs to be updated here as well.)
528*3b35e7eeSXin LI //
529*3b35e7eeSXin LI // The assignments from t0 and t1 can be done in a branchless manner with CMOV
530*3b35e7eeSXin LI // after the instructions from this macro. The CF from SUB tells which moves
531*3b35e7eeSXin LI // are needed.
532*3b35e7eeSXin LI #define rc_asm_calc(prob) \
533*3b35e7eeSXin LI 		"mov	%[range], %[t0]\n\t" \
534*3b35e7eeSXin LI 		"shr	%[bit_model_total_bits], %[range]\n\t" \
535*3b35e7eeSXin LI 		"imul	%[" prob "], %[range]\n\t" \
536*3b35e7eeSXin LI 		"sub	%[range], %[t0]\n\t" \
537*3b35e7eeSXin LI 		"mov	%[code], %[t1]\n\t" \
538*3b35e7eeSXin LI 		"sub	%[range], %[code]\n\t"
539*3b35e7eeSXin LI 
540*3b35e7eeSXin LI // Also, prob needs to be updated: The update math depends on the decoded bit.
541*3b35e7eeSXin LI // It can be expressed in a few slightly different ways but this is fairly
542*3b35e7eeSXin LI // convenient here:
543*3b35e7eeSXin LI //
544*3b35e7eeSXin LI //     prob -= (prob + (bit ? 0 : RC_BIT_MODEL_OFFSET)) >> RC_MOVE_BITS;
545*3b35e7eeSXin LI //
546*3b35e7eeSXin LI // To do it in branchless way when the negation of the decoded bit is in CF,
547*3b35e7eeSXin LI // both "prob" and "prob + RC_BIT_MODEL_OFFSET" are needed. Then the desired
548*3b35e7eeSXin LI // value can be picked with CMOV. The addition can be done using LEA without
549*3b35e7eeSXin LI // affecting CF.
550*3b35e7eeSXin LI //
551*3b35e7eeSXin LI // (This prob update method is a tiny bit different from LZMA SDK 23.01.
552*3b35e7eeSXin LI // In the LZMA SDK a single register is reserved solely for a constant to
553*3b35e7eeSXin LI // be used with CMOV when updating prob. That is fine since there are enough
554*3b35e7eeSXin LI // free registers to do so. The method used here uses one fewer register,
555*3b35e7eeSXin LI // which is valuable with inline assembly.)
556*3b35e7eeSXin LI //
557*3b35e7eeSXin LI // * * *
558*3b35e7eeSXin LI //
559*3b35e7eeSXin LI // In bittree decoding, each (unrolled) loop iteration decodes one bit
560*3b35e7eeSXin LI // and needs one prob variable. To make it faster, the prob variable of
561*3b35e7eeSXin LI // the iteration N+1 is loaded during iteration N. There are two possible
562*3b35e7eeSXin LI // prob variables to choose from for N+1. Both are loaded from memory and
563*3b35e7eeSXin LI // the correct one is chosen with CMOV using the same CF as is used for
564*3b35e7eeSXin LI // other things described above.
565*3b35e7eeSXin LI //
566*3b35e7eeSXin LI // This preloading/prefetching requires an extra register. To avoid
567*3b35e7eeSXin LI // useless moves from "preloaded prob register" to "current prob register",
568*3b35e7eeSXin LI // the macros swap between the two registers for odd and even iterations.
569*3b35e7eeSXin LI //
570*3b35e7eeSXin LI // * * *
571*3b35e7eeSXin LI //
572*3b35e7eeSXin LI // Finally, the decoded bit has to be stored in "symbol". Since the negation
573*3b35e7eeSXin LI // of the bit is in CF, this can be done with SBB: symbol -= CF - 1. That is,
574*3b35e7eeSXin LI // if the decoded bit is 0 (CF==1) the operation is a no-op "symbol -= 0"
575*3b35e7eeSXin LI // and when bit is 1 (CF==0) the operation is "symbol -= 0 - 1" which is
576*3b35e7eeSXin LI // the same as "symbol += 1".
577*3b35e7eeSXin LI //
578*3b35e7eeSXin LI // The instructions for all things are intertwined for a few reasons:
579*3b35e7eeSXin LI //   - freeing temporary registers for new use
580*3b35e7eeSXin LI //   - not modifying CF too early
581*3b35e7eeSXin LI //   - instruction scheduling
582*3b35e7eeSXin LI //
583*3b35e7eeSXin LI // The first and last iterations can cheat a little. For example,
584*3b35e7eeSXin LI // on the first iteration "symbol" is known to start from 1 so it
585*3b35e7eeSXin LI // doesn't need to be read; it can even be immediately initialized
586*3b35e7eeSXin LI // to 2 to prepare for the second iteration of the loop.
587*3b35e7eeSXin LI //
588*3b35e7eeSXin LI // * * *
589*3b35e7eeSXin LI //
590*3b35e7eeSXin LI // a = number of the current prob variable (0 or 1)
591*3b35e7eeSXin LI // b = number of the next prob variable (1 or 0)
592*3b35e7eeSXin LI // *_only = rc_asm_y or _n to include or exclude code marked with them
593*3b35e7eeSXin LI #define rc_asm_bittree(a, b, first_only, middle_only, last_only) \
594*3b35e7eeSXin LI 	first_only( \
595*3b35e7eeSXin LI 		"movzw	2(%[probs_base]), %[prob" #a "]\n\t" \
596*3b35e7eeSXin LI 		"mov	$2, %[symbol]\n\t" \
597*3b35e7eeSXin LI 		"movzw	4(%[probs_base]), %[prob" #b "]\n\t" \
598*3b35e7eeSXin LI 	) \
599*3b35e7eeSXin LI 	middle_only( \
600*3b35e7eeSXin LI 		/* Note the scaling of 4 instead of 2: */ \
601*3b35e7eeSXin LI 		"movzw	(%[probs_base], %q[symbol], 4), %[prob" #b "]\n\t" \
602*3b35e7eeSXin LI 	) \
603*3b35e7eeSXin LI 	last_only( \
604*3b35e7eeSXin LI 		"add	%[symbol], %[symbol]\n\t" \
605*3b35e7eeSXin LI 	) \
606*3b35e7eeSXin LI 		\
607*3b35e7eeSXin LI 		rc_asm_normalize \
608*3b35e7eeSXin LI 		rc_asm_calc("prob" #a) \
609*3b35e7eeSXin LI 		\
610*3b35e7eeSXin LI 		"cmovae	%[t0], %[range]\n\t" \
611*3b35e7eeSXin LI 		\
612*3b35e7eeSXin LI 	first_only( \
613*3b35e7eeSXin LI 		"movzw	6(%[probs_base]), %[t0]\n\t" \
614*3b35e7eeSXin LI 		"cmovae	%[t0], %[prob" #b "]\n\t" \
615*3b35e7eeSXin LI 	) \
616*3b35e7eeSXin LI 	middle_only( \
617*3b35e7eeSXin LI 		"movzw	2(%[probs_base], %q[symbol], 4), %[t0]\n\t" \
618*3b35e7eeSXin LI 		"lea	(%q[symbol], %q[symbol]), %[symbol]\n\t" \
619*3b35e7eeSXin LI 		"cmovae	%[t0], %[prob" #b "]\n\t" \
620*3b35e7eeSXin LI 	) \
621*3b35e7eeSXin LI 		\
622*3b35e7eeSXin LI 		"lea	%c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
623*3b35e7eeSXin LI 		"cmovb	%[t1], %[code]\n\t" \
624*3b35e7eeSXin LI 		"mov	%[symbol], %[t1]\n\t" \
625*3b35e7eeSXin LI 		"cmovae	%[prob" #a "], %[t0]\n\t" \
626*3b35e7eeSXin LI 		\
627*3b35e7eeSXin LI 	first_only( \
628*3b35e7eeSXin LI 		"sbb	$-1, %[symbol]\n\t" \
629*3b35e7eeSXin LI 	) \
630*3b35e7eeSXin LI 	middle_only( \
631*3b35e7eeSXin LI 		"sbb	$-1, %[symbol]\n\t" \
632*3b35e7eeSXin LI 	) \
633*3b35e7eeSXin LI 	last_only( \
634*3b35e7eeSXin LI 		"sbb	%[last_sbb], %[symbol]\n\t" \
635*3b35e7eeSXin LI 	) \
636*3b35e7eeSXin LI 		\
637*3b35e7eeSXin LI 		"shr	%[move_bits], %[t0]\n\t" \
638*3b35e7eeSXin LI 		"sub	%[t0], %[prob" #a "]\n\t" \
639*3b35e7eeSXin LI 		/* Scaling of 1 instead of 2 because symbol <<= 1. */ \
640*3b35e7eeSXin LI 		"mov	%w[prob" #a "], (%[probs_base], %q[t1], 1)\n\t"
641*3b35e7eeSXin LI 
642*3b35e7eeSXin LI // NOTE: The order of variables in __asm__ can affect speed and code size.
643*3b35e7eeSXin LI #define rc_asm_bittree_n(probs_base_var, final_add, asm_str) \
644*3b35e7eeSXin LI do { \
645*3b35e7eeSXin LI 	uint32_t t0; \
646*3b35e7eeSXin LI 	uint32_t t1; \
647*3b35e7eeSXin LI 	uint32_t t_prob0; \
648*3b35e7eeSXin LI 	uint32_t t_prob1; \
649*3b35e7eeSXin LI 	\
650*3b35e7eeSXin LI 	__asm__( \
651*3b35e7eeSXin LI 		asm_str \
652*3b35e7eeSXin LI 		: \
653*3b35e7eeSXin LI 		[range]     "+&r"(rc.range), \
654*3b35e7eeSXin LI 		[code]      "+&r"(rc.code), \
655*3b35e7eeSXin LI 		[t0]        "=&r"(t0), \
656*3b35e7eeSXin LI 		[t1]        "=&r"(t1), \
657*3b35e7eeSXin LI 		[prob0]     "=&r"(t_prob0), \
658*3b35e7eeSXin LI 		[prob1]     "=&r"(t_prob1), \
659*3b35e7eeSXin LI 		[symbol]    "=&r"(symbol), \
660*3b35e7eeSXin LI 		[in_ptr]    "+&r"(rc_in_ptr) \
661*3b35e7eeSXin LI 		: \
662*3b35e7eeSXin LI 		[probs_base]           "r"(probs_base_var), \
663*3b35e7eeSXin LI 		[last_sbb]             "n"(-1 - (final_add)), \
664*3b35e7eeSXin LI 		[top_value]            "n"(RC_TOP_VALUE), \
665*3b35e7eeSXin LI 		[shift_bits]           "n"(RC_SHIFT_BITS), \
666*3b35e7eeSXin LI 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
667*3b35e7eeSXin LI 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
668*3b35e7eeSXin LI 		[move_bits]            "n"(RC_MOVE_BITS) \
669*3b35e7eeSXin LI 		: \
670*3b35e7eeSXin LI 		"cc", "memory"); \
671*3b35e7eeSXin LI } while (0)
672*3b35e7eeSXin LI 
673*3b35e7eeSXin LI 
674*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x010
675*3b35e7eeSXin LI #undef rc_bittree3
676*3b35e7eeSXin LI #define rc_bittree3(probs_base_var, final_add) \
677*3b35e7eeSXin LI 	rc_asm_bittree_n(probs_base_var, final_add, \
678*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
679*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
680*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_n, rc_asm_y) \
681*3b35e7eeSXin LI 	)
682*3b35e7eeSXin LI 
683*3b35e7eeSXin LI #undef rc_bittree6
684*3b35e7eeSXin LI #define rc_bittree6(probs_base_var, final_add) \
685*3b35e7eeSXin LI 	rc_asm_bittree_n(probs_base_var, final_add, \
686*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
687*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
688*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
689*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
690*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
691*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
692*3b35e7eeSXin LI 	)
693*3b35e7eeSXin LI 
694*3b35e7eeSXin LI #undef rc_bittree8
695*3b35e7eeSXin LI #define rc_bittree8(probs_base_var, final_add) \
696*3b35e7eeSXin LI 	rc_asm_bittree_n(probs_base_var, final_add, \
697*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
698*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
699*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
700*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
701*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
702*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
703*3b35e7eeSXin LI 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
704*3b35e7eeSXin LI 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
705*3b35e7eeSXin LI 	)
706*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x010
707*3b35e7eeSXin LI 
708*3b35e7eeSXin LI 
709*3b35e7eeSXin LI // Fixed-sized reverse bittree
710*3b35e7eeSXin LI //
711*3b35e7eeSXin LI // This uses the indexing that constructs the final value in symbol directly.
712*3b35e7eeSXin LI // add    = 1,  2,   4,  8
713*3b35e7eeSXin LI // dcur   = -,  4,   8, 16
714*3b35e7eeSXin LI // dnext0 = 4,   8, 16,  -
715*3b35e7eeSXin LI // dnext0 = 6,  12, 24,  -
716*3b35e7eeSXin LI #define rc_asm_bittree_rev(a, b, add, dcur, dnext0, dnext1, \
717*3b35e7eeSXin LI 		first_only, middle_only, last_only) \
718*3b35e7eeSXin LI 	first_only( \
719*3b35e7eeSXin LI 		"movzw	2(%[probs_base]), %[prob" #a "]\n\t" \
720*3b35e7eeSXin LI 		"xor	%[symbol], %[symbol]\n\t" \
721*3b35e7eeSXin LI 		"movzw	4(%[probs_base]), %[prob" #b "]\n\t" \
722*3b35e7eeSXin LI 	) \
723*3b35e7eeSXin LI 	middle_only( \
724*3b35e7eeSXin LI 		"movzw	" #dnext0 "(%[probs_base], %q[symbol], 2), " \
725*3b35e7eeSXin LI 			"%[prob" #b "]\n\t" \
726*3b35e7eeSXin LI 	) \
727*3b35e7eeSXin LI 		\
728*3b35e7eeSXin LI 		rc_asm_normalize \
729*3b35e7eeSXin LI 		rc_asm_calc("prob" #a) \
730*3b35e7eeSXin LI 		\
731*3b35e7eeSXin LI 		"cmovae	%[t0], %[range]\n\t" \
732*3b35e7eeSXin LI 		\
733*3b35e7eeSXin LI 	first_only( \
734*3b35e7eeSXin LI 		"movzw	6(%[probs_base]), %[t0]\n\t" \
735*3b35e7eeSXin LI 		"cmovae	%[t0], %[prob" #b "]\n\t" \
736*3b35e7eeSXin LI 	) \
737*3b35e7eeSXin LI 	middle_only( \
738*3b35e7eeSXin LI 		"movzw	" #dnext1 "(%[probs_base], %q[symbol], 2), %[t0]\n\t" \
739*3b35e7eeSXin LI 		"cmovae	%[t0], %[prob" #b "]\n\t" \
740*3b35e7eeSXin LI 	) \
741*3b35e7eeSXin LI 		\
742*3b35e7eeSXin LI 		"lea	" #add "(%q[symbol]), %[t0]\n\t" \
743*3b35e7eeSXin LI 		"cmovb	%[t1], %[code]\n\t" \
744*3b35e7eeSXin LI 	middle_only( \
745*3b35e7eeSXin LI 		"mov	%[symbol], %[t1]\n\t" \
746*3b35e7eeSXin LI 	) \
747*3b35e7eeSXin LI 	last_only( \
748*3b35e7eeSXin LI 		"mov	%[symbol], %[t1]\n\t" \
749*3b35e7eeSXin LI 	) \
750*3b35e7eeSXin LI 		"cmovae	%[t0], %[symbol]\n\t" \
751*3b35e7eeSXin LI 		"lea	%c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
752*3b35e7eeSXin LI 		"cmovae	%[prob" #a "], %[t0]\n\t" \
753*3b35e7eeSXin LI 		\
754*3b35e7eeSXin LI 		"shr	%[move_bits], %[t0]\n\t" \
755*3b35e7eeSXin LI 		"sub	%[t0], %[prob" #a "]\n\t" \
756*3b35e7eeSXin LI 	first_only( \
757*3b35e7eeSXin LI 		"mov	%w[prob" #a "], 2(%[probs_base])\n\t" \
758*3b35e7eeSXin LI 	) \
759*3b35e7eeSXin LI 	middle_only( \
760*3b35e7eeSXin LI 		"mov	%w[prob" #a "], " \
761*3b35e7eeSXin LI 			#dcur "(%[probs_base], %q[t1], 2)\n\t" \
762*3b35e7eeSXin LI 	) \
763*3b35e7eeSXin LI 	last_only( \
764*3b35e7eeSXin LI 		"mov	%w[prob" #a "], " \
765*3b35e7eeSXin LI 			#dcur "(%[probs_base], %q[t1], 2)\n\t" \
766*3b35e7eeSXin LI 	)
767*3b35e7eeSXin LI 
768*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x020
769*3b35e7eeSXin LI #undef rc_bittree_rev4
770*3b35e7eeSXin LI #define rc_bittree_rev4(probs_base_var) \
771*3b35e7eeSXin LI rc_asm_bittree_n(probs_base_var, 4, \
772*3b35e7eeSXin LI 	rc_asm_bittree_rev(0, 1, 1,  -,  4,  6, rc_asm_y, rc_asm_n, rc_asm_n) \
773*3b35e7eeSXin LI 	rc_asm_bittree_rev(1, 0, 2,  4,  8, 12, rc_asm_n, rc_asm_y, rc_asm_n) \
774*3b35e7eeSXin LI 	rc_asm_bittree_rev(0, 1, 4,  8, 16, 24, rc_asm_n, rc_asm_y, rc_asm_n) \
775*3b35e7eeSXin LI 	rc_asm_bittree_rev(1, 0, 8, 16,  -,  -, rc_asm_n, rc_asm_n, rc_asm_y) \
776*3b35e7eeSXin LI )
777*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x020
778*3b35e7eeSXin LI 
779*3b35e7eeSXin LI 
780*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x040
781*3b35e7eeSXin LI #undef rc_bit_add_if_1
782*3b35e7eeSXin LI #define rc_bit_add_if_1(probs_base_var, dest_var, value_to_add_if_1) \
783*3b35e7eeSXin LI do { \
784*3b35e7eeSXin LI 	uint32_t t0; \
785*3b35e7eeSXin LI 	uint32_t t1; \
786*3b35e7eeSXin LI 	uint32_t t2 = (value_to_add_if_1); \
787*3b35e7eeSXin LI 	uint32_t t_prob; \
788*3b35e7eeSXin LI 	uint32_t t_index; \
789*3b35e7eeSXin LI 	\
790*3b35e7eeSXin LI 	__asm__( \
791*3b35e7eeSXin LI 		"movzw	(%[probs_base], %q[symbol], 2), %[prob]\n\t" \
792*3b35e7eeSXin LI 		"mov	%[symbol], %[index]\n\t" \
793*3b35e7eeSXin LI 		\
794*3b35e7eeSXin LI 		"add	%[dest], %[t2]\n\t" \
795*3b35e7eeSXin LI 		"add	%[symbol], %[symbol]\n\t" \
796*3b35e7eeSXin LI 		\
797*3b35e7eeSXin LI 		rc_asm_normalize \
798*3b35e7eeSXin LI 		rc_asm_calc("prob") \
799*3b35e7eeSXin LI 		\
800*3b35e7eeSXin LI 		"cmovae	%[t0], %[range]\n\t" \
801*3b35e7eeSXin LI 		"lea	%c[bit_model_offset](%q[prob]), %[t0]\n\t" \
802*3b35e7eeSXin LI 		"cmovb	%[t1], %[code]\n\t" \
803*3b35e7eeSXin LI 		"cmovae	%[prob], %[t0]\n\t" \
804*3b35e7eeSXin LI 		\
805*3b35e7eeSXin LI 		"cmovae	%[t2], %[dest]\n\t" \
806*3b35e7eeSXin LI 		"sbb	$-1, %[symbol]\n\t" \
807*3b35e7eeSXin LI 		\
808*3b35e7eeSXin LI 		"sar	%[move_bits], %[t0]\n\t" \
809*3b35e7eeSXin LI 		"sub	%[t0], %[prob]\n\t" \
810*3b35e7eeSXin LI 		"mov	%w[prob], (%[probs_base], %q[index], 2)" \
811*3b35e7eeSXin LI 		: \
812*3b35e7eeSXin LI 		[range]     "+&r"(rc.range), \
813*3b35e7eeSXin LI 		[code]      "+&r"(rc.code), \
814*3b35e7eeSXin LI 		[t0]        "=&r"(t0), \
815*3b35e7eeSXin LI 		[t1]        "=&r"(t1), \
816*3b35e7eeSXin LI 		[prob]      "=&r"(t_prob), \
817*3b35e7eeSXin LI 		[index]     "=&r"(t_index), \
818*3b35e7eeSXin LI 		[symbol]    "+&r"(symbol), \
819*3b35e7eeSXin LI 		[t2]        "+&r"(t2), \
820*3b35e7eeSXin LI 		[dest]      "+&r"(dest_var), \
821*3b35e7eeSXin LI 		[in_ptr]    "+&r"(rc_in_ptr) \
822*3b35e7eeSXin LI 		: \
823*3b35e7eeSXin LI 		[probs_base]           "r"(probs_base_var), \
824*3b35e7eeSXin LI 		[top_value]            "n"(RC_TOP_VALUE), \
825*3b35e7eeSXin LI 		[shift_bits]           "n"(RC_SHIFT_BITS), \
826*3b35e7eeSXin LI 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
827*3b35e7eeSXin LI 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
828*3b35e7eeSXin LI 		[move_bits]            "n"(RC_MOVE_BITS) \
829*3b35e7eeSXin LI 		: \
830*3b35e7eeSXin LI 		"cc", "memory"); \
831*3b35e7eeSXin LI } while (0)
832*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x040
833*3b35e7eeSXin LI 
834*3b35e7eeSXin LI 
835*3b35e7eeSXin LI // Literal decoding uses a normal 8-bit bittree but literal with match byte
836*3b35e7eeSXin LI // is more complex in picking the probability variable from the correct
837*3b35e7eeSXin LI // subtree. This doesn't use preloading/prefetching of the next prob because
838*3b35e7eeSXin LI // there are four choices instead of two.
839*3b35e7eeSXin LI //
840*3b35e7eeSXin LI // FIXME? The first iteration starts with symbol = 1 so it could be optimized
841*3b35e7eeSXin LI // by a tiny amount.
842*3b35e7eeSXin LI #define rc_asm_matched_literal(nonlast_only) \
843*3b35e7eeSXin LI 		"add	%[offset], %[symbol]\n\t" \
844*3b35e7eeSXin LI 		"and	%[offset], %[match_bit]\n\t" \
845*3b35e7eeSXin LI 		"add	%[match_bit], %[symbol]\n\t" \
846*3b35e7eeSXin LI 		\
847*3b35e7eeSXin LI 		"movzw	(%[probs_base], %q[symbol], 2), %[prob]\n\t" \
848*3b35e7eeSXin LI 		\
849*3b35e7eeSXin LI 		"add	%[symbol], %[symbol]\n\t" \
850*3b35e7eeSXin LI 		\
851*3b35e7eeSXin LI 	nonlast_only( \
852*3b35e7eeSXin LI 		"xor	%[match_bit], %[offset]\n\t" \
853*3b35e7eeSXin LI 		"add	%[match_byte], %[match_byte]\n\t" \
854*3b35e7eeSXin LI 	) \
855*3b35e7eeSXin LI 		\
856*3b35e7eeSXin LI 		rc_asm_normalize \
857*3b35e7eeSXin LI 		rc_asm_calc("prob") \
858*3b35e7eeSXin LI 		\
859*3b35e7eeSXin LI 		"cmovae	%[t0], %[range]\n\t" \
860*3b35e7eeSXin LI 		"lea	%c[bit_model_offset](%q[prob]), %[t0]\n\t" \
861*3b35e7eeSXin LI 		"cmovb	%[t1], %[code]\n\t" \
862*3b35e7eeSXin LI 		"mov	%[symbol], %[t1]\n\t" \
863*3b35e7eeSXin LI 		"cmovae	%[prob], %[t0]\n\t" \
864*3b35e7eeSXin LI 		\
865*3b35e7eeSXin LI 	nonlast_only( \
866*3b35e7eeSXin LI 		"cmovae	%[match_bit], %[offset]\n\t" \
867*3b35e7eeSXin LI 		"mov	%[match_byte], %[match_bit]\n\t" \
868*3b35e7eeSXin LI 	) \
869*3b35e7eeSXin LI 		\
870*3b35e7eeSXin LI 		"sbb	$-1, %[symbol]\n\t" \
871*3b35e7eeSXin LI 		\
872*3b35e7eeSXin LI 		"shr	%[move_bits], %[t0]\n\t" \
873*3b35e7eeSXin LI 		/* Undo symbol += match_bit + offset: */ \
874*3b35e7eeSXin LI 		"and	$0x1FF, %[symbol]\n\t" \
875*3b35e7eeSXin LI 		"sub	%[t0], %[prob]\n\t" \
876*3b35e7eeSXin LI 		\
877*3b35e7eeSXin LI 		/* Scaling of 1 instead of 2 because symbol <<= 1. */ \
878*3b35e7eeSXin LI 		"mov	%w[prob], (%[probs_base], %q[t1], 1)\n\t"
879*3b35e7eeSXin LI 
880*3b35e7eeSXin LI 
881*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x080
882*3b35e7eeSXin LI #undef rc_matched_literal
883*3b35e7eeSXin LI #define rc_matched_literal(probs_base_var, match_byte_value) \
884*3b35e7eeSXin LI do { \
885*3b35e7eeSXin LI 	uint32_t t0; \
886*3b35e7eeSXin LI 	uint32_t t1; \
887*3b35e7eeSXin LI 	uint32_t t_prob; \
888*3b35e7eeSXin LI 	uint32_t t_match_byte = (uint32_t)(match_byte_value) << 1; \
889*3b35e7eeSXin LI 	uint32_t t_match_bit = t_match_byte; \
890*3b35e7eeSXin LI 	uint32_t t_offset = 0x100; \
891*3b35e7eeSXin LI 	symbol = 1; \
892*3b35e7eeSXin LI 	\
893*3b35e7eeSXin LI 	__asm__( \
894*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
895*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
896*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
897*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
898*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
899*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
900*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_y) \
901*3b35e7eeSXin LI 		rc_asm_matched_literal(rc_asm_n) \
902*3b35e7eeSXin LI 		: \
903*3b35e7eeSXin LI 		[range]       "+&r"(rc.range), \
904*3b35e7eeSXin LI 		[code]        "+&r"(rc.code), \
905*3b35e7eeSXin LI 		[t0]          "=&r"(t0), \
906*3b35e7eeSXin LI 		[t1]          "=&r"(t1), \
907*3b35e7eeSXin LI 		[prob]        "=&r"(t_prob), \
908*3b35e7eeSXin LI 		[match_bit]   "+&r"(t_match_bit), \
909*3b35e7eeSXin LI 		[symbol]      "+&r"(symbol), \
910*3b35e7eeSXin LI 		[match_byte]  "+&r"(t_match_byte), \
911*3b35e7eeSXin LI 		[offset]      "+&r"(t_offset), \
912*3b35e7eeSXin LI 		[in_ptr]      "+&r"(rc_in_ptr) \
913*3b35e7eeSXin LI 		: \
914*3b35e7eeSXin LI 		[probs_base]           "r"(probs_base_var), \
915*3b35e7eeSXin LI 		[top_value]            "n"(RC_TOP_VALUE), \
916*3b35e7eeSXin LI 		[shift_bits]           "n"(RC_SHIFT_BITS), \
917*3b35e7eeSXin LI 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
918*3b35e7eeSXin LI 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
919*3b35e7eeSXin LI 		[move_bits]            "n"(RC_MOVE_BITS) \
920*3b35e7eeSXin LI 		: \
921*3b35e7eeSXin LI 		"cc", "memory"); \
922*3b35e7eeSXin LI } while (0)
923*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x080
924*3b35e7eeSXin LI 
925*3b35e7eeSXin LI 
926*3b35e7eeSXin LI // Doing the loop in asm instead of C seems to help a little.
927*3b35e7eeSXin LI #if LZMA_RANGE_DECODER_CONFIG & 0x100
928*3b35e7eeSXin LI #undef rc_direct
929*3b35e7eeSXin LI #define rc_direct(dest_var, count_var) \
930*3b35e7eeSXin LI do { \
931*3b35e7eeSXin LI 	uint32_t t0; \
932*3b35e7eeSXin LI 	uint32_t t1; \
933*3b35e7eeSXin LI 	\
934*3b35e7eeSXin LI 	__asm__( \
935*3b35e7eeSXin LI 		"2:\n\t" \
936*3b35e7eeSXin LI 		"add	%[dest], %[dest]\n\t" \
937*3b35e7eeSXin LI 		"lea	1(%q[dest]), %[t1]\n\t" \
938*3b35e7eeSXin LI 		\
939*3b35e7eeSXin LI 		rc_asm_normalize \
940*3b35e7eeSXin LI 		\
941*3b35e7eeSXin LI 		"shr	$1, %[range]\n\t" \
942*3b35e7eeSXin LI 		"mov	%[code], %[t0]\n\t" \
943*3b35e7eeSXin LI 		"sub	%[range], %[code]\n\t" \
944*3b35e7eeSXin LI 		"cmovns	%[t1], %[dest]\n\t" \
945*3b35e7eeSXin LI 		"cmovs	%[t0], %[code]\n\t" \
946*3b35e7eeSXin LI 		"dec	%[count]\n\t" \
947*3b35e7eeSXin LI 		"jnz	2b\n\t" \
948*3b35e7eeSXin LI 		: \
949*3b35e7eeSXin LI 		[range]       "+&r"(rc.range), \
950*3b35e7eeSXin LI 		[code]        "+&r"(rc.code), \
951*3b35e7eeSXin LI 		[t0]          "=&r"(t0), \
952*3b35e7eeSXin LI 		[t1]          "=&r"(t1), \
953*3b35e7eeSXin LI 		[dest]        "+&r"(dest_var), \
954*3b35e7eeSXin LI 		[count]       "+&r"(count_var), \
955*3b35e7eeSXin LI 		[in_ptr]      "+&r"(rc_in_ptr) \
956*3b35e7eeSXin LI 		: \
957*3b35e7eeSXin LI 		[top_value]   "n"(RC_TOP_VALUE), \
958*3b35e7eeSXin LI 		[shift_bits]  "n"(RC_SHIFT_BITS) \
959*3b35e7eeSXin LI 		: \
960*3b35e7eeSXin LI 		"cc", "memory"); \
961*3b35e7eeSXin LI } while (0)
962*3b35e7eeSXin LI #endif // LZMA_RANGE_DECODER_CONFIG & 0x100
963*3b35e7eeSXin LI 
964*3b35e7eeSXin LI #endif // x86_64
96581ad8388SMartin Matuska 
96681ad8388SMartin Matuska #endif
967