xref: /freebsd/contrib/xz/src/liblzma/rangecoder/range_decoder.h (revision 38b3683592d4c20a74f52a6e8e29368e6fa61858)
1 // SPDX-License-Identifier: 0BSD
2 
3 ///////////////////////////////////////////////////////////////////////////////
4 //
5 /// \file       range_decoder.h
6 /// \brief      Range Decoder
7 ///
8 //  Authors:    Igor Pavlov
9 //              Lasse Collin
10 //
11 ///////////////////////////////////////////////////////////////////////////////
12 
13 #ifndef LZMA_RANGE_DECODER_H
14 #define LZMA_RANGE_DECODER_H
15 
16 #include "range_common.h"
17 
18 
19 // Choose the range decoder variants to use using a bitmask.
20 // If no bits are set, only the basic version is used.
21 // If more than one version is selected for the same feature,
22 // the last one on the list below is used.
23 //
24 // Bitwise-or of the following enable branchless C versions:
25 //   0x01   normal bittrees
26 //   0x02   fixed-sized reverse bittrees
27 //   0x04   variable-sized reverse bittrees (not faster)
28 //   0x08   matched literal (not faster)
29 //
30 // GCC & Clang compatible x86-64 inline assembly:
31 //   0x010   normal bittrees
32 //   0x020   fixed-sized reverse bittrees
33 //   0x040   variable-sized reverse bittrees
34 //   0x080   matched literal
35 //   0x100   direct bits
36 //
37 // The default can be overridden at build time by defining
38 // LZMA_RANGE_DECODER_CONFIG to the desired mask.
39 //
40 // 2024-02-22: Feedback from benchmarks:
41 //   - Brancless C (0x003) can be better than basic on x86-64 but often it's
42 //     slightly worse on other archs. Since asm is much better on x86-64,
43 //     branchless C is not used at all.
44 //   - With x86-64 asm, there are slight differences between GCC and Clang
45 //     and different processors. Overall 0x1F0 seems to be the best choice.
46 #ifndef LZMA_RANGE_DECODER_CONFIG
47 #	if defined(__x86_64__) && !defined(__ILP32__) \
48 			&& (defined(__GNUC__) || defined(__clang__))
49 #		define LZMA_RANGE_DECODER_CONFIG 0x1F0
50 #	else
51 #		define LZMA_RANGE_DECODER_CONFIG 0
52 #	endif
53 #endif
54 
55 
56 // Negative RC_BIT_MODEL_TOTAL but the lowest RC_MOVE_BITS are flipped.
57 // This is useful for updating probability variables in branchless decoding:
58 //
59 //     uint32_t decoded_bit = ...;
60 //     probability tmp = RC_BIT_MODEL_OFFSET;
61 //     tmp &= decoded_bit - 1;
62 //     prob -= (prob + tmp) >> RC_MOVE_BITS;
63 #define RC_BIT_MODEL_OFFSET \
64 	((UINT32_C(1) << RC_MOVE_BITS) - 1 - RC_BIT_MODEL_TOTAL)
65 
66 
67 typedef struct {
68 	uint32_t range;
69 	uint32_t code;
70 	uint32_t init_bytes_left;
71 } lzma_range_decoder;
72 
73 
74 /// Reads the first five bytes to initialize the range decoder.
75 static inline lzma_ret
76 rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
77 		size_t *restrict in_pos, size_t in_size)
78 {
79 	while (rc->init_bytes_left > 0) {
80 		if (*in_pos == in_size)
81 			return LZMA_OK;
82 
83 		// The first byte is always 0x00. It could have been omitted
84 		// in LZMA2 but it wasn't, so one byte is wasted in every
85 		// LZMA2 chunk.
86 		if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)
87 			return LZMA_DATA_ERROR;
88 
89 		rc->code = (rc->code << 8) | in[*in_pos];
90 		++*in_pos;
91 		--rc->init_bytes_left;
92 	}
93 
94 	return LZMA_STREAM_END;
95 }
96 
97 
98 /// Makes local copies of range decoder and *in_pos variables. Doing this
99 /// improves speed significantly. The range decoder macros expect also
100 /// variables 'in' and 'in_size' to be defined.
101 #define rc_to_local(range_decoder, in_pos, fast_mode_in_required) \
102 	lzma_range_decoder rc = range_decoder; \
103 	const uint8_t *rc_in_ptr = in + (in_pos); \
104 	const uint8_t *rc_in_end = in + in_size; \
105 	const uint8_t *rc_in_fast_end \
106 			= (rc_in_end - rc_in_ptr) <= (fast_mode_in_required) \
107 			? rc_in_ptr \
108 			: rc_in_end - (fast_mode_in_required); \
109 	(void)rc_in_fast_end; /* Silence a warning with HAVE_SMALL. */ \
110 	uint32_t rc_bound
111 
112 
113 /// Evaluates to true if there is enough input remaining to use fast mode.
114 #define rc_is_fast_allowed() (rc_in_ptr < rc_in_fast_end)
115 
116 
117 /// Stores the local copes back to the range decoder structure.
118 #define rc_from_local(range_decoder, in_pos) \
119 do { \
120 	range_decoder = rc; \
121 	in_pos = (size_t)(rc_in_ptr - in); \
122 } while (0)
123 
124 
125 /// Resets the range decoder structure.
126 #define rc_reset(range_decoder) \
127 do { \
128 	(range_decoder).range = UINT32_MAX; \
129 	(range_decoder).code = 0; \
130 	(range_decoder).init_bytes_left = 5; \
131 } while (0)
132 
133 
134 /// When decoding has been properly finished, rc.code is always zero unless
135 /// the input stream is corrupt. So checking this can catch some corrupt
136 /// files especially if they don't have any other integrity check.
137 #define rc_is_finished(range_decoder) \
138 	((range_decoder).code == 0)
139 
140 
141 // Read the next input byte if needed.
142 #define rc_normalize() \
143 do { \
144 	if (rc.range < RC_TOP_VALUE) { \
145 		rc.range <<= RC_SHIFT_BITS; \
146 		rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
147 	} \
148 } while (0)
149 
150 
151 /// If more input is needed but there is
152 /// no more input available, "goto out" is used to jump out of the main
153 /// decoder loop. The "_safe" macros are used in the Resumable decoder
154 /// mode in order to save the sequence to continue decoding from that
155 /// point later.
156 #define rc_normalize_safe(seq) \
157 do { \
158 	if (rc.range < RC_TOP_VALUE) { \
159 		if (rc_in_ptr == rc_in_end) { \
160 			coder->sequence = seq; \
161 			goto out; \
162 		} \
163 		rc.range <<= RC_SHIFT_BITS; \
164 		rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
165 	} \
166 } while (0)
167 
168 
169 /// Start decoding a bit. This must be used together with rc_update_0()
170 /// and rc_update_1():
171 ///
172 ///     rc_if_0(prob) {
173 ///         rc_update_0(prob);
174 ///         // Do something
175 ///     } else {
176 ///         rc_update_1(prob);
177 ///         // Do something else
178 ///     }
179 ///
180 #define rc_if_0(prob) \
181 	rc_normalize(); \
182 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
183 	if (rc.code < rc_bound)
184 
185 
186 #define rc_if_0_safe(prob, seq) \
187 	rc_normalize_safe(seq); \
188 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
189 	if (rc.code < rc_bound)
190 
191 
192 /// Update the range decoder state and the used probability variable to
193 /// match a decoded bit of 0.
194 ///
195 /// The x86-64 assembly uses the commented method but it seems that,
196 /// at least on x86-64, the first version is slightly faster as C code.
197 #define rc_update_0(prob) \
198 do { \
199 	rc.range = rc_bound; \
200 	prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \
201 	/* prob -= ((prob) + RC_BIT_MODEL_OFFSET) >> RC_MOVE_BITS; */ \
202 } while (0)
203 
204 
205 /// Update the range decoder state and the used probability variable to
206 /// match a decoded bit of 1.
207 #define rc_update_1(prob) \
208 do { \
209 	rc.range -= rc_bound; \
210 	rc.code -= rc_bound; \
211 	prob -= (prob) >> RC_MOVE_BITS; \
212 } while (0)
213 
214 
215 /// Decodes one bit and runs action0 or action1 depending on the decoded bit.
216 /// This macro is used as the last step in bittree reverse decoders since
217 /// those don't use "symbol" for anything else than indexing the probability
218 /// arrays.
219 #define rc_bit_last(prob, action0, action1) \
220 do { \
221 	rc_if_0(prob) { \
222 		rc_update_0(prob); \
223 		action0; \
224 	} else { \
225 		rc_update_1(prob); \
226 		action1; \
227 	} \
228 } while (0)
229 
230 
231 #define rc_bit_last_safe(prob, action0, action1, seq) \
232 do { \
233 	rc_if_0_safe(prob, seq) { \
234 		rc_update_0(prob); \
235 		action0; \
236 	} else { \
237 		rc_update_1(prob); \
238 		action1; \
239 	} \
240 } while (0)
241 
242 
243 /// Decodes one bit, updates "symbol", and runs action0 or action1 depending
244 /// on the decoded bit.
245 #define rc_bit(prob, action0, action1) \
246 	rc_bit_last(prob, \
247 		symbol <<= 1; action0, \
248 		symbol = (symbol << 1) + 1; action1);
249 
250 
251 #define rc_bit_safe(prob, action0, action1, seq) \
252 	rc_bit_last_safe(prob, \
253 		symbol <<= 1; action0, \
254 		symbol = (symbol << 1) + 1; action1, \
255 		seq);
256 
257 // Unroll fixed-sized bittree decoding.
258 //
259 // A compile-time constant in final_add can be used to get rid of the high bit
260 // from symbol that is used for the array indexing (1U << bittree_bits).
261 // final_add may also be used to add offset to the result (LZMA length
262 // decoder does that).
263 //
264 // The reason to have final_add here is that in the asm code the addition
265 // can be done for free: in x86-64 there is SBB instruction with -1 as
266 // the immediate value, and final_add is combined with that value.
267 #define rc_bittree_bit(prob) \
268 	rc_bit(prob, , )
269 
270 #define rc_bittree3(probs, final_add) \
271 do { \
272 	symbol = 1; \
273 	rc_bittree_bit(probs[symbol]); \
274 	rc_bittree_bit(probs[symbol]); \
275 	rc_bittree_bit(probs[symbol]); \
276 	symbol += (uint32_t)(final_add); \
277 } while (0)
278 
279 #define rc_bittree6(probs, final_add) \
280 do { \
281 	symbol = 1; \
282 	rc_bittree_bit(probs[symbol]); \
283 	rc_bittree_bit(probs[symbol]); \
284 	rc_bittree_bit(probs[symbol]); \
285 	rc_bittree_bit(probs[symbol]); \
286 	rc_bittree_bit(probs[symbol]); \
287 	rc_bittree_bit(probs[symbol]); \
288 	symbol += (uint32_t)(final_add); \
289 } while (0)
290 
291 #define rc_bittree8(probs, final_add) \
292 do { \
293 	symbol = 1; \
294 	rc_bittree_bit(probs[symbol]); \
295 	rc_bittree_bit(probs[symbol]); \
296 	rc_bittree_bit(probs[symbol]); \
297 	rc_bittree_bit(probs[symbol]); \
298 	rc_bittree_bit(probs[symbol]); \
299 	rc_bittree_bit(probs[symbol]); \
300 	rc_bittree_bit(probs[symbol]); \
301 	rc_bittree_bit(probs[symbol]); \
302 	symbol += (uint32_t)(final_add); \
303 } while (0)
304 
305 
306 // Fixed-sized reverse bittree
307 #define rc_bittree_rev4(probs) \
308 do { \
309 	symbol = 0; \
310 	rc_bit_last(probs[symbol + 1], , symbol += 1); \
311 	rc_bit_last(probs[symbol + 2], , symbol += 2); \
312 	rc_bit_last(probs[symbol + 4], , symbol += 4); \
313 	rc_bit_last(probs[symbol + 8], , symbol += 8); \
314 } while (0)
315 
316 
317 // Decode one bit from variable-sized reverse bittree. The loop is done
318 // in the code that uses this macro. This could be changed if the assembly
319 // version benefited from having the loop done in assembly but it didn't
320 // seem so in early 2024.
321 //
322 // Also, if the loop was done here, the loop counter would likely be local
323 // to the macro so that it wouldn't modify yet another input variable.
324 // If a _safe version of a macro with a loop was done then a modifiable
325 // input variable couldn't be avoided though.
326 #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
327 	rc_bit(probs[symbol], \
328 		, \
329 		dest += value_to_add_if_1);
330 
331 
332 // Matched literal
333 #define decode_with_match_bit \
334 		t_match_byte <<= 1; \
335 		t_match_bit = t_match_byte & t_offset; \
336 		t_subcoder_index = t_offset + t_match_bit + symbol; \
337 		rc_bit(probs[t_subcoder_index], \
338 				t_offset &= ~t_match_bit, \
339 				t_offset &= t_match_bit)
340 
341 #define rc_matched_literal(probs_base_var, match_byte) \
342 do { \
343 	uint32_t t_match_byte = (match_byte); \
344 	uint32_t t_match_bit; \
345 	uint32_t t_subcoder_index; \
346 	uint32_t t_offset = 0x100; \
347 	symbol = 1; \
348 	decode_with_match_bit; \
349 	decode_with_match_bit; \
350 	decode_with_match_bit; \
351 	decode_with_match_bit; \
352 	decode_with_match_bit; \
353 	decode_with_match_bit; \
354 	decode_with_match_bit; \
355 	decode_with_match_bit; \
356 } while (0)
357 
358 
359 /// Decode a bit without using a probability.
360 //
361 // NOTE: GCC 13 and Clang/LLVM 16 can, at least on x86-64, optimize the bound
362 // calculation to use an arithmetic right shift so there's no need to provide
363 // the alternative code which, according to C99/C11/C23 6.3.1.3-p3 isn't
364 // perfectly portable: rc_bound = (uint32_t)((int32_t)rc.code >> 31);
365 #define rc_direct(dest, count_var) \
366 do { \
367 	dest = (dest << 1) + 1; \
368 	rc_normalize(); \
369 	rc.range >>= 1; \
370 	rc.code -= rc.range; \
371 	rc_bound = UINT32_C(0) - (rc.code >> 31); \
372 	dest += rc_bound; \
373 	rc.code += rc.range & rc_bound; \
374 } while (--count_var > 0)
375 
376 
377 
378 #define rc_direct_safe(dest, count_var, seq) \
379 do { \
380 	rc_normalize_safe(seq); \
381 	rc.range >>= 1; \
382 	rc.code -= rc.range; \
383 	rc_bound = UINT32_C(0) - (rc.code >> 31); \
384 	rc.code += rc.range & rc_bound; \
385 	dest = (dest << 1) + (rc_bound + 1); \
386 } while (--count_var > 0)
387 
388 
389 //////////////////
390 // Branchless C //
391 //////////////////
392 
393 /// Decode a bit using a branchless method. This reduces the number of
394 /// mispredicted branches and thus can improve speed.
395 #define rc_c_bit(prob, action_bit, action_neg) \
396 do { \
397 	probability *p = &(prob); \
398 	rc_normalize(); \
399 	rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * *p; \
400 	uint32_t rc_mask = rc.code >= rc_bound; /* rc_mask = decoded bit */ \
401 	action_bit; /* action when rc_mask is 0 or 1 */ \
402 	/* rc_mask becomes 0 if bit is 0 and 0xFFFFFFFF if bit is 1: */ \
403 	rc_mask = 0U - rc_mask; \
404 	rc.range &= rc_mask; /* If bit 0: set rc.range = 0 */ \
405 	rc_bound ^= rc_mask; \
406 	rc_bound -= rc_mask; /* If bit 1: rc_bound = 0U - rc_bound */ \
407 	rc.range += rc_bound; \
408 	rc_bound &= rc_mask; \
409 	rc.code += rc_bound; \
410 	action_neg; /* action when rc_mask is 0 or 0xFFFFFFFF */ \
411 	rc_mask = ~rc_mask; /* If bit 0: all bits are set in rc_mask */ \
412 	rc_mask &= RC_BIT_MODEL_OFFSET; \
413 	*p -= (*p + rc_mask) >> RC_MOVE_BITS; \
414 } while (0)
415 
416 
417 // Testing on x86-64 give an impression that only the normal bittrees and
418 // the fixed-sized reverse bittrees are worth the branchless C code.
419 // It should be tested on other archs for which there isn't assembly code
420 // in this file.
421 
422 // Using addition in "(symbol << 1) + rc_mask" allows use of x86 LEA
423 // or RISC-V SH1ADD instructions. Compilers might infer it from
424 // "(symbol << 1) | rc_mask" too if they see that mask is 0 or 1 but
425 // the use of addition doesn't require such analysis from compilers.
426 #if LZMA_RANGE_DECODER_CONFIG & 0x01
427 #undef rc_bittree_bit
428 #define rc_bittree_bit(prob) \
429 	rc_c_bit(prob, \
430 		symbol = (symbol << 1) + rc_mask, \
431 		)
432 #endif // LZMA_RANGE_DECODER_CONFIG & 0x01
433 
434 #if LZMA_RANGE_DECODER_CONFIG & 0x02
435 #undef rc_bittree_rev4
436 #define rc_bittree_rev4(probs) \
437 do { \
438 	symbol = 0; \
439 	rc_c_bit(probs[symbol + 1], symbol += rc_mask, ); \
440 	rc_c_bit(probs[symbol + 2], symbol += rc_mask << 1, ); \
441 	rc_c_bit(probs[symbol + 4], symbol += rc_mask << 2, ); \
442 	rc_c_bit(probs[symbol + 8], symbol += rc_mask << 3, ); \
443 } while (0)
444 #endif // LZMA_RANGE_DECODER_CONFIG & 0x02
445 
446 #if LZMA_RANGE_DECODER_CONFIG & 0x04
447 #undef rc_bit_add_if_1
448 #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
449 	rc_c_bit(probs[symbol], \
450 		symbol = (symbol << 1) + rc_mask, \
451 		dest += (value_to_add_if_1) & rc_mask)
452 #endif // LZMA_RANGE_DECODER_CONFIG & 0x04
453 
454 
455 #if LZMA_RANGE_DECODER_CONFIG & 0x08
456 #undef decode_with_match_bit
457 #define decode_with_match_bit \
458 		t_match_byte <<= 1; \
459 		t_match_bit = t_match_byte & t_offset; \
460 		t_subcoder_index = t_offset + t_match_bit + symbol; \
461 		rc_c_bit(probs[t_subcoder_index], \
462 			symbol = (symbol << 1) + rc_mask, \
463 			t_offset &= ~t_match_bit ^ rc_mask)
464 #endif // LZMA_RANGE_DECODER_CONFIG & 0x08
465 
466 
467 ////////////
468 // x86-64 //
469 ////////////
470 
471 #if LZMA_RANGE_DECODER_CONFIG & 0x1F0
472 
473 // rc_asm_y and rc_asm_n are used as arguments to macros to control which
474 // strings to include or omit.
475 #define rc_asm_y(str) str
476 #define rc_asm_n(str)
477 
478 // There are a few possible variations for normalization.
479 // This is the smallest variant which is also used by LZMA SDK.
480 //
481 //   - This has partial register write (the MOV from (%[in_ptr])).
482 //
483 //   - INC saves one byte in code size over ADD. False dependency on
484 //     partial flags from INC shouldn't become a problem on any processor
485 //     because the instructions after normalization don't read the flags
486 //     until SUB which sets all flags.
487 //
488 #define rc_asm_normalize \
489 	"cmp	%[top_value], %[range]\n\t" \
490 	"jae	1f\n\t" \
491 	"shl	%[shift_bits], %[code]\n\t" \
492 	"mov	(%[in_ptr]), %b[code]\n\t" \
493 	"shl	%[shift_bits], %[range]\n\t" \
494 	"inc	%[in_ptr]\n" \
495 	"1:\n"
496 
497 // rc_asm_calc(prob) is roughly equivalent to the C version of rc_if_0(prob)...
498 //
499 //     rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
500 //     if (rc.code < rc_bound)
501 //
502 // ...but the bound is stored in "range":
503 //
504 //     t0 = range;
505 //     range = (range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
506 //     t0 -= range;
507 //     t1 = code;
508 //     code -= range;
509 //
510 // The carry flag (CF) from the last subtraction holds the negation of
511 // the decoded bit (if CF==0 then the decoded bit is 1).
512 // The values in t0 and t1 are needed for rc_update_0(prob) and
513 // rc_update_1(prob). If the bit is 0, rc_update_0(prob)...
514 //
515 //     rc.range = rc_bound;
516 //
517 // ...has already been done but the "code -= range" has to be reverted using
518 // the old value stored in t1. (Also, prob needs to be updated.)
519 //
520 // If the bit is 1, rc_update_1(prob)...
521 //
522 //     rc.range -= rc_bound;
523 //     rc.code -= rc_bound;
524 //
525 // ...is already done for "code" but the value for "range" needs to be taken
526 // from t0. (Also, prob needs to be updated here as well.)
527 //
528 // The assignments from t0 and t1 can be done in a branchless manner with CMOV
529 // after the instructions from this macro. The CF from SUB tells which moves
530 // are needed.
531 #define rc_asm_calc(prob) \
532 		"mov	%[range], %[t0]\n\t" \
533 		"shr	%[bit_model_total_bits], %[range]\n\t" \
534 		"imul	%[" prob "], %[range]\n\t" \
535 		"sub	%[range], %[t0]\n\t" \
536 		"mov	%[code], %[t1]\n\t" \
537 		"sub	%[range], %[code]\n\t"
538 
539 // Also, prob needs to be updated: The update math depends on the decoded bit.
540 // It can be expressed in a few slightly different ways but this is fairly
541 // convenient here:
542 //
543 //     prob -= (prob + (bit ? 0 : RC_BIT_MODEL_OFFSET)) >> RC_MOVE_BITS;
544 //
545 // To do it in branchless way when the negation of the decoded bit is in CF,
546 // both "prob" and "prob + RC_BIT_MODEL_OFFSET" are needed. Then the desired
547 // value can be picked with CMOV. The addition can be done using LEA without
548 // affecting CF.
549 //
550 // (This prob update method is a tiny bit different from LZMA SDK 23.01.
551 // In the LZMA SDK a single register is reserved solely for a constant to
552 // be used with CMOV when updating prob. That is fine since there are enough
553 // free registers to do so. The method used here uses one fewer register,
554 // which is valuable with inline assembly.)
555 //
556 // * * *
557 //
558 // In bittree decoding, each (unrolled) loop iteration decodes one bit
559 // and needs one prob variable. To make it faster, the prob variable of
560 // the iteration N+1 is loaded during iteration N. There are two possible
561 // prob variables to choose from for N+1. Both are loaded from memory and
562 // the correct one is chosen with CMOV using the same CF as is used for
563 // other things described above.
564 //
565 // This preloading/prefetching requires an extra register. To avoid
566 // useless moves from "preloaded prob register" to "current prob register",
567 // the macros swap between the two registers for odd and even iterations.
568 //
569 // * * *
570 //
571 // Finally, the decoded bit has to be stored in "symbol". Since the negation
572 // of the bit is in CF, this can be done with SBB: symbol -= CF - 1. That is,
573 // if the decoded bit is 0 (CF==1) the operation is a no-op "symbol -= 0"
574 // and when bit is 1 (CF==0) the operation is "symbol -= 0 - 1" which is
575 // the same as "symbol += 1".
576 //
577 // The instructions for all things are intertwined for a few reasons:
578 //   - freeing temporary registers for new use
579 //   - not modifying CF too early
580 //   - instruction scheduling
581 //
582 // The first and last iterations can cheat a little. For example,
583 // on the first iteration "symbol" is known to start from 1 so it
584 // doesn't need to be read; it can even be immediately initialized
585 // to 2 to prepare for the second iteration of the loop.
586 //
587 // * * *
588 //
589 // a = number of the current prob variable (0 or 1)
590 // b = number of the next prob variable (1 or 0)
591 // *_only = rc_asm_y or _n to include or exclude code marked with them
592 #define rc_asm_bittree(a, b, first_only, middle_only, last_only) \
593 	first_only( \
594 		"movzw	2(%[probs_base]), %[prob" #a "]\n\t" \
595 		"mov	$2, %[symbol]\n\t" \
596 		"movzw	4(%[probs_base]), %[prob" #b "]\n\t" \
597 	) \
598 	middle_only( \
599 		/* Note the scaling of 4 instead of 2: */ \
600 		"movzw	(%[probs_base], %q[symbol], 4), %[prob" #b "]\n\t" \
601 	) \
602 	last_only( \
603 		"add	%[symbol], %[symbol]\n\t" \
604 	) \
605 		\
606 		rc_asm_normalize \
607 		rc_asm_calc("prob" #a) \
608 		\
609 		"cmovae	%[t0], %[range]\n\t" \
610 		\
611 	first_only( \
612 		"movzw	6(%[probs_base]), %[t0]\n\t" \
613 		"cmovae	%[t0], %[prob" #b "]\n\t" \
614 	) \
615 	middle_only( \
616 		"movzw	2(%[probs_base], %q[symbol], 4), %[t0]\n\t" \
617 		"lea	(%q[symbol], %q[symbol]), %[symbol]\n\t" \
618 		"cmovae	%[t0], %[prob" #b "]\n\t" \
619 	) \
620 		\
621 		"lea	%c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
622 		"cmovb	%[t1], %[code]\n\t" \
623 		"mov	%[symbol], %[t1]\n\t" \
624 		"cmovae	%[prob" #a "], %[t0]\n\t" \
625 		\
626 	first_only( \
627 		"sbb	$-1, %[symbol]\n\t" \
628 	) \
629 	middle_only( \
630 		"sbb	$-1, %[symbol]\n\t" \
631 	) \
632 	last_only( \
633 		"sbb	%[last_sbb], %[symbol]\n\t" \
634 	) \
635 		\
636 		"shr	%[move_bits], %[t0]\n\t" \
637 		"sub	%[t0], %[prob" #a "]\n\t" \
638 		/* Scaling of 1 instead of 2 because symbol <<= 1. */ \
639 		"mov	%w[prob" #a "], (%[probs_base], %q[t1], 1)\n\t"
640 
641 // NOTE: The order of variables in __asm__ can affect speed and code size.
642 #define rc_asm_bittree_n(probs_base_var, final_add, asm_str) \
643 do { \
644 	uint32_t t0; \
645 	uint32_t t1; \
646 	uint32_t t_prob0; \
647 	uint32_t t_prob1; \
648 	\
649 	__asm__( \
650 		asm_str \
651 		: \
652 		[range]     "+&r"(rc.range), \
653 		[code]      "+&r"(rc.code), \
654 		[t0]        "=&r"(t0), \
655 		[t1]        "=&r"(t1), \
656 		[prob0]     "=&r"(t_prob0), \
657 		[prob1]     "=&r"(t_prob1), \
658 		[symbol]    "=&r"(symbol), \
659 		[in_ptr]    "+&r"(rc_in_ptr) \
660 		: \
661 		[probs_base]           "r"(probs_base_var), \
662 		[last_sbb]             "n"(-1 - (final_add)), \
663 		[top_value]            "n"(RC_TOP_VALUE), \
664 		[shift_bits]           "n"(RC_SHIFT_BITS), \
665 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
666 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
667 		[move_bits]            "n"(RC_MOVE_BITS) \
668 		: \
669 		"cc", "memory"); \
670 } while (0)
671 
672 
673 #if LZMA_RANGE_DECODER_CONFIG & 0x010
674 #undef rc_bittree3
675 #define rc_bittree3(probs_base_var, final_add) \
676 	rc_asm_bittree_n(probs_base_var, final_add, \
677 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
678 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
679 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_n, rc_asm_y) \
680 	)
681 
682 #undef rc_bittree6
683 #define rc_bittree6(probs_base_var, final_add) \
684 	rc_asm_bittree_n(probs_base_var, final_add, \
685 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
686 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
687 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
688 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
689 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
690 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
691 	)
692 
693 #undef rc_bittree8
694 #define rc_bittree8(probs_base_var, final_add) \
695 	rc_asm_bittree_n(probs_base_var, final_add, \
696 		rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
697 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
698 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
699 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
700 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
701 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
702 		rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
703 		rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
704 	)
705 #endif // LZMA_RANGE_DECODER_CONFIG & 0x010
706 
707 
708 // Fixed-sized reverse bittree
709 //
710 // This uses the indexing that constructs the final value in symbol directly.
711 // add    = 1,  2,   4,  8
712 // dcur   = -,  4,   8, 16
713 // dnext0 = 4,   8, 16,  -
714 // dnext0 = 6,  12, 24,  -
715 #define rc_asm_bittree_rev(a, b, add, dcur, dnext0, dnext1, \
716 		first_only, middle_only, last_only) \
717 	first_only( \
718 		"movzw	2(%[probs_base]), %[prob" #a "]\n\t" \
719 		"xor	%[symbol], %[symbol]\n\t" \
720 		"movzw	4(%[probs_base]), %[prob" #b "]\n\t" \
721 	) \
722 	middle_only( \
723 		"movzw	" #dnext0 "(%[probs_base], %q[symbol], 2), " \
724 			"%[prob" #b "]\n\t" \
725 	) \
726 		\
727 		rc_asm_normalize \
728 		rc_asm_calc("prob" #a) \
729 		\
730 		"cmovae	%[t0], %[range]\n\t" \
731 		\
732 	first_only( \
733 		"movzw	6(%[probs_base]), %[t0]\n\t" \
734 		"cmovae	%[t0], %[prob" #b "]\n\t" \
735 	) \
736 	middle_only( \
737 		"movzw	" #dnext1 "(%[probs_base], %q[symbol], 2), %[t0]\n\t" \
738 		"cmovae	%[t0], %[prob" #b "]\n\t" \
739 	) \
740 		\
741 		"lea	" #add "(%q[symbol]), %[t0]\n\t" \
742 		"cmovb	%[t1], %[code]\n\t" \
743 	middle_only( \
744 		"mov	%[symbol], %[t1]\n\t" \
745 	) \
746 	last_only( \
747 		"mov	%[symbol], %[t1]\n\t" \
748 	) \
749 		"cmovae	%[t0], %[symbol]\n\t" \
750 		"lea	%c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
751 		"cmovae	%[prob" #a "], %[t0]\n\t" \
752 		\
753 		"shr	%[move_bits], %[t0]\n\t" \
754 		"sub	%[t0], %[prob" #a "]\n\t" \
755 	first_only( \
756 		"mov	%w[prob" #a "], 2(%[probs_base])\n\t" \
757 	) \
758 	middle_only( \
759 		"mov	%w[prob" #a "], " \
760 			#dcur "(%[probs_base], %q[t1], 2)\n\t" \
761 	) \
762 	last_only( \
763 		"mov	%w[prob" #a "], " \
764 			#dcur "(%[probs_base], %q[t1], 2)\n\t" \
765 	)
766 
767 #if LZMA_RANGE_DECODER_CONFIG & 0x020
768 #undef rc_bittree_rev4
769 #define rc_bittree_rev4(probs_base_var) \
770 rc_asm_bittree_n(probs_base_var, 4, \
771 	rc_asm_bittree_rev(0, 1, 1,  -,  4,  6, rc_asm_y, rc_asm_n, rc_asm_n) \
772 	rc_asm_bittree_rev(1, 0, 2,  4,  8, 12, rc_asm_n, rc_asm_y, rc_asm_n) \
773 	rc_asm_bittree_rev(0, 1, 4,  8, 16, 24, rc_asm_n, rc_asm_y, rc_asm_n) \
774 	rc_asm_bittree_rev(1, 0, 8, 16,  -,  -, rc_asm_n, rc_asm_n, rc_asm_y) \
775 )
776 #endif // LZMA_RANGE_DECODER_CONFIG & 0x020
777 
778 
779 #if LZMA_RANGE_DECODER_CONFIG & 0x040
780 #undef rc_bit_add_if_1
781 #define rc_bit_add_if_1(probs_base_var, dest_var, value_to_add_if_1) \
782 do { \
783 	uint32_t t0; \
784 	uint32_t t1; \
785 	uint32_t t2 = (value_to_add_if_1); \
786 	uint32_t t_prob; \
787 	uint32_t t_index; \
788 	\
789 	__asm__( \
790 		"movzw	(%[probs_base], %q[symbol], 2), %[prob]\n\t" \
791 		"mov	%[symbol], %[index]\n\t" \
792 		\
793 		"add	%[dest], %[t2]\n\t" \
794 		"add	%[symbol], %[symbol]\n\t" \
795 		\
796 		rc_asm_normalize \
797 		rc_asm_calc("prob") \
798 		\
799 		"cmovae	%[t0], %[range]\n\t" \
800 		"lea	%c[bit_model_offset](%q[prob]), %[t0]\n\t" \
801 		"cmovb	%[t1], %[code]\n\t" \
802 		"cmovae	%[prob], %[t0]\n\t" \
803 		\
804 		"cmovae	%[t2], %[dest]\n\t" \
805 		"sbb	$-1, %[symbol]\n\t" \
806 		\
807 		"sar	%[move_bits], %[t0]\n\t" \
808 		"sub	%[t0], %[prob]\n\t" \
809 		"mov	%w[prob], (%[probs_base], %q[index], 2)" \
810 		: \
811 		[range]     "+&r"(rc.range), \
812 		[code]      "+&r"(rc.code), \
813 		[t0]        "=&r"(t0), \
814 		[t1]        "=&r"(t1), \
815 		[prob]      "=&r"(t_prob), \
816 		[index]     "=&r"(t_index), \
817 		[symbol]    "+&r"(symbol), \
818 		[t2]        "+&r"(t2), \
819 		[dest]      "+&r"(dest_var), \
820 		[in_ptr]    "+&r"(rc_in_ptr) \
821 		: \
822 		[probs_base]           "r"(probs_base_var), \
823 		[top_value]            "n"(RC_TOP_VALUE), \
824 		[shift_bits]           "n"(RC_SHIFT_BITS), \
825 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
826 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
827 		[move_bits]            "n"(RC_MOVE_BITS) \
828 		: \
829 		"cc", "memory"); \
830 } while (0)
831 #endif // LZMA_RANGE_DECODER_CONFIG & 0x040
832 
833 
834 // Literal decoding uses a normal 8-bit bittree but literal with match byte
835 // is more complex in picking the probability variable from the correct
836 // subtree. This doesn't use preloading/prefetching of the next prob because
837 // there are four choices instead of two.
838 //
839 // FIXME? The first iteration starts with symbol = 1 so it could be optimized
840 // by a tiny amount.
841 #define rc_asm_matched_literal(nonlast_only) \
842 		"add	%[offset], %[symbol]\n\t" \
843 		"and	%[offset], %[match_bit]\n\t" \
844 		"add	%[match_bit], %[symbol]\n\t" \
845 		\
846 		"movzw	(%[probs_base], %q[symbol], 2), %[prob]\n\t" \
847 		\
848 		"add	%[symbol], %[symbol]\n\t" \
849 		\
850 	nonlast_only( \
851 		"xor	%[match_bit], %[offset]\n\t" \
852 		"add	%[match_byte], %[match_byte]\n\t" \
853 	) \
854 		\
855 		rc_asm_normalize \
856 		rc_asm_calc("prob") \
857 		\
858 		"cmovae	%[t0], %[range]\n\t" \
859 		"lea	%c[bit_model_offset](%q[prob]), %[t0]\n\t" \
860 		"cmovb	%[t1], %[code]\n\t" \
861 		"mov	%[symbol], %[t1]\n\t" \
862 		"cmovae	%[prob], %[t0]\n\t" \
863 		\
864 	nonlast_only( \
865 		"cmovae	%[match_bit], %[offset]\n\t" \
866 		"mov	%[match_byte], %[match_bit]\n\t" \
867 	) \
868 		\
869 		"sbb	$-1, %[symbol]\n\t" \
870 		\
871 		"shr	%[move_bits], %[t0]\n\t" \
872 		/* Undo symbol += match_bit + offset: */ \
873 		"and	$0x1FF, %[symbol]\n\t" \
874 		"sub	%[t0], %[prob]\n\t" \
875 		\
876 		/* Scaling of 1 instead of 2 because symbol <<= 1. */ \
877 		"mov	%w[prob], (%[probs_base], %q[t1], 1)\n\t"
878 
879 
880 #if LZMA_RANGE_DECODER_CONFIG & 0x080
881 #undef rc_matched_literal
882 #define rc_matched_literal(probs_base_var, match_byte_value) \
883 do { \
884 	uint32_t t0; \
885 	uint32_t t1; \
886 	uint32_t t_prob; \
887 	uint32_t t_match_byte = (uint32_t)(match_byte_value) << 1; \
888 	uint32_t t_match_bit = t_match_byte; \
889 	uint32_t t_offset = 0x100; \
890 	symbol = 1; \
891 	\
892 	__asm__( \
893 		rc_asm_matched_literal(rc_asm_y) \
894 		rc_asm_matched_literal(rc_asm_y) \
895 		rc_asm_matched_literal(rc_asm_y) \
896 		rc_asm_matched_literal(rc_asm_y) \
897 		rc_asm_matched_literal(rc_asm_y) \
898 		rc_asm_matched_literal(rc_asm_y) \
899 		rc_asm_matched_literal(rc_asm_y) \
900 		rc_asm_matched_literal(rc_asm_n) \
901 		: \
902 		[range]       "+&r"(rc.range), \
903 		[code]        "+&r"(rc.code), \
904 		[t0]          "=&r"(t0), \
905 		[t1]          "=&r"(t1), \
906 		[prob]        "=&r"(t_prob), \
907 		[match_bit]   "+&r"(t_match_bit), \
908 		[symbol]      "+&r"(symbol), \
909 		[match_byte]  "+&r"(t_match_byte), \
910 		[offset]      "+&r"(t_offset), \
911 		[in_ptr]      "+&r"(rc_in_ptr) \
912 		: \
913 		[probs_base]           "r"(probs_base_var), \
914 		[top_value]            "n"(RC_TOP_VALUE), \
915 		[shift_bits]           "n"(RC_SHIFT_BITS), \
916 		[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
917 		[bit_model_offset]     "n"(RC_BIT_MODEL_OFFSET), \
918 		[move_bits]            "n"(RC_MOVE_BITS) \
919 		: \
920 		"cc", "memory"); \
921 } while (0)
922 #endif // LZMA_RANGE_DECODER_CONFIG & 0x080
923 
924 
925 // Doing the loop in asm instead of C seems to help a little.
926 #if LZMA_RANGE_DECODER_CONFIG & 0x100
927 #undef rc_direct
928 #define rc_direct(dest_var, count_var) \
929 do { \
930 	uint32_t t0; \
931 	uint32_t t1; \
932 	\
933 	__asm__( \
934 		"2:\n\t" \
935 		"add	%[dest], %[dest]\n\t" \
936 		"lea	1(%q[dest]), %[t1]\n\t" \
937 		\
938 		rc_asm_normalize \
939 		\
940 		"shr	$1, %[range]\n\t" \
941 		"mov	%[code], %[t0]\n\t" \
942 		"sub	%[range], %[code]\n\t" \
943 		"cmovns	%[t1], %[dest]\n\t" \
944 		"cmovs	%[t0], %[code]\n\t" \
945 		"dec	%[count]\n\t" \
946 		"jnz	2b\n\t" \
947 		: \
948 		[range]       "+&r"(rc.range), \
949 		[code]        "+&r"(rc.code), \
950 		[t0]          "=&r"(t0), \
951 		[t1]          "=&r"(t1), \
952 		[dest]        "+&r"(dest_var), \
953 		[count]       "+&r"(count_var), \
954 		[in_ptr]      "+&r"(rc_in_ptr) \
955 		: \
956 		[top_value]   "n"(RC_TOP_VALUE), \
957 		[shift_bits]  "n"(RC_SHIFT_BITS) \
958 		: \
959 		"cc", "memory"); \
960 } while (0)
961 #endif // LZMA_RANGE_DECODER_CONFIG & 0x100
962 
963 #endif // x86_64
964 
965 #endif
966