1/* SPDX-License-Identifier: 0BSD */ 2 3/* 4 * Speed-optimized CRC64 using slicing-by-four algorithm 5 * 6 * This uses only i386 instructions, but it is optimized for i686 and later 7 * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). 8 * 9 * Authors: Igor Pavlov (original CRC32 assembly code) 10 * Lasse Collin (CRC64 adaptation of the modified CRC32 code) 11 * 12 * This code needs lzma_crc64_table, which can be created using the 13 * following C code: 14 15uint64_t lzma_crc64_table[4][256]; 16 17void 18init_table(void) 19{ 20 // ECMA-182 21 static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42); 22 23 for (size_t s = 0; s < 4; ++s) { 24 for (size_t b = 0; b < 256; ++b) { 25 uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b]; 26 27 for (size_t i = 0; i < 8; ++i) { 28 if (r & 1) 29 r = (r >> 1) ^ poly64; 30 else 31 r >>= 1; 32 } 33 34 lzma_crc64_table[s][b] = r; 35 } 36 } 37} 38 39 * The prototype of the CRC64 function: 40 * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc); 41 */ 42 43/* When Intel CET is enabled, include <cet.h> in assembly code to mark 44 Intel CET support. */ 45#ifdef __CET__ 46# include <cet.h> 47#else 48# define _CET_ENDBR 49#endif 50 51/* 52 * On some systems, the functions need to be prefixed. The prefix is 53 * usually an underscore. 54 */ 55#ifndef __USER_LABEL_PREFIX__ 56# define __USER_LABEL_PREFIX__ 57#endif 58#define MAKE_SYM_CAT(prefix, sym) prefix ## sym 59#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym) 60#define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64) 61#define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table) 62 63/* 64 * Solaris assembler doesn't have .p2align, and Darwin uses .align 65 * differently than GNU/Linux and Solaris. 66 */ 67#if defined(__APPLE__) || defined(__MSDOS__) 68# define ALIGN(pow2, abs) .align pow2 69#else 70# define ALIGN(pow2, abs) .align abs 71#endif 72 73 .text 74 .globl LZMA_CRC64 75 76#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \ 77 && !defined(__MSDOS__) 78 .type LZMA_CRC64, @function 79#endif 80 81 ALIGN(4, 16) 82LZMA_CRC64: 83 _CET_ENDBR 84 /* 85 * Register usage: 86 * %eax crc LSB 87 * %edx crc MSB 88 * %esi buf 89 * %edi size or buf + size 90 * %ebx lzma_crc64_table 91 * %ebp Table index 92 * %ecx Temporary 93 */ 94 pushl %ebx 95 pushl %esi 96 pushl %edi 97 pushl %ebp 98 movl 0x14(%esp), %esi /* buf */ 99 movl 0x18(%esp), %edi /* size */ 100 movl 0x1C(%esp), %eax /* crc LSB */ 101 movl 0x20(%esp), %edx /* crc MSB */ 102 103 /* 104 * Store the address of lzma_crc64_table to %ebx. This is needed to 105 * get position-independent code (PIC). 106 * 107 * The PIC macro is defined by libtool, while __PIC__ is defined 108 * by GCC but only on some systems. Testing for both makes it simpler 109 * to test this code without libtool, and keeps the code working also 110 * when built with libtool but using something else than GCC. 111 * 112 * I understood that libtool may define PIC on Windows even though 113 * the code in Windows DLLs is not PIC in sense that it is in ELF 114 * binaries, so we need a separate check to always use the non-PIC 115 * code on Windows. 116 */ 117#if (!defined(PIC) && !defined(__PIC__)) \ 118 || (defined(_WIN32) || defined(__CYGWIN__)) 119 /* Not PIC */ 120 movl $ LZMA_CRC64_TABLE, %ebx 121#elif defined(__APPLE__) 122 /* Mach-O */ 123 call .L_get_pc 124.L_pic: 125 leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx 126 movl (%ebx), %ebx 127#else 128 /* ELF */ 129 call .L_get_pc 130 addl $_GLOBAL_OFFSET_TABLE_, %ebx 131 movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx 132#endif 133 134 /* Complement the initial value. */ 135 notl %eax 136 notl %edx 137 138.L_align: 139 /* 140 * Check if there is enough input to use slicing-by-four. 141 * We need eight bytes, because the loop pre-reads four bytes. 142 */ 143 cmpl $8, %edi 144 jb .L_rest 145 146 /* Check if we have reached alignment of four bytes. */ 147 testl $3, %esi 148 jz .L_slice 149 150 /* Calculate CRC of the next input byte. */ 151 movzbl (%esi), %ebp 152 incl %esi 153 movzbl %al, %ecx 154 xorl %ecx, %ebp 155 shrdl $8, %edx, %eax 156 xorl (%ebx, %ebp, 8), %eax 157 shrl $8, %edx 158 xorl 4(%ebx, %ebp, 8), %edx 159 decl %edi 160 jmp .L_align 161 162.L_slice: 163 /* 164 * If we get here, there's at least eight bytes of aligned input 165 * available. Make %edi multiple of four bytes. Store the possible 166 * remainder over the "size" variable in the argument stack. 167 */ 168 movl %edi, 0x18(%esp) 169 andl $-4, %edi 170 subl %edi, 0x18(%esp) 171 172 /* 173 * Let %edi be buf + size - 4 while running the main loop. This way 174 * we can compare for equality to determine when exit the loop. 175 */ 176 addl %esi, %edi 177 subl $4, %edi 178 179 /* Read in the first four aligned bytes. */ 180 movl (%esi), %ecx 181 182.L_loop: 183 xorl %eax, %ecx 184 movzbl %cl, %ebp 185 movl 0x1800(%ebx, %ebp, 8), %eax 186 xorl %edx, %eax 187 movl 0x1804(%ebx, %ebp, 8), %edx 188 movzbl %ch, %ebp 189 xorl 0x1000(%ebx, %ebp, 8), %eax 190 xorl 0x1004(%ebx, %ebp, 8), %edx 191 shrl $16, %ecx 192 movzbl %cl, %ebp 193 xorl 0x0800(%ebx, %ebp, 8), %eax 194 xorl 0x0804(%ebx, %ebp, 8), %edx 195 movzbl %ch, %ebp 196 addl $4, %esi 197 xorl (%ebx, %ebp, 8), %eax 198 xorl 4(%ebx, %ebp, 8), %edx 199 200 /* Check for end of aligned input. */ 201 cmpl %edi, %esi 202 203 /* 204 * Copy the next input byte to %ecx. It is slightly faster to 205 * read it here than at the top of the loop. 206 */ 207 movl (%esi), %ecx 208 jb .L_loop 209 210 /* 211 * Process the remaining four bytes, which we have already 212 * copied to %ecx. 213 */ 214 xorl %eax, %ecx 215 movzbl %cl, %ebp 216 movl 0x1800(%ebx, %ebp, 8), %eax 217 xorl %edx, %eax 218 movl 0x1804(%ebx, %ebp, 8), %edx 219 movzbl %ch, %ebp 220 xorl 0x1000(%ebx, %ebp, 8), %eax 221 xorl 0x1004(%ebx, %ebp, 8), %edx 222 shrl $16, %ecx 223 movzbl %cl, %ebp 224 xorl 0x0800(%ebx, %ebp, 8), %eax 225 xorl 0x0804(%ebx, %ebp, 8), %edx 226 movzbl %ch, %ebp 227 addl $4, %esi 228 xorl (%ebx, %ebp, 8), %eax 229 xorl 4(%ebx, %ebp, 8), %edx 230 231 /* Copy the number of remaining bytes to %edi. */ 232 movl 0x18(%esp), %edi 233 234.L_rest: 235 /* Check for end of input. */ 236 testl %edi, %edi 237 jz .L_return 238 239 /* Calculate CRC of the next input byte. */ 240 movzbl (%esi), %ebp 241 incl %esi 242 movzbl %al, %ecx 243 xorl %ecx, %ebp 244 shrdl $8, %edx, %eax 245 xorl (%ebx, %ebp, 8), %eax 246 shrl $8, %edx 247 xorl 4(%ebx, %ebp, 8), %edx 248 decl %edi 249 jmp .L_rest 250 251.L_return: 252 /* Complement the final value. */ 253 notl %eax 254 notl %edx 255 256 popl %ebp 257 popl %edi 258 popl %esi 259 popl %ebx 260 ret 261 262#if defined(PIC) || defined(__PIC__) 263 ALIGN(4, 16) 264.L_get_pc: 265 movl (%esp), %ebx 266 ret 267#endif 268 269#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__)) 270 /* Mach-O PIC */ 271 .section __IMPORT,__pointers,non_lazy_symbol_pointers 272.L_lzma_crc64_table$non_lazy_ptr: 273 .indirect_symbol LZMA_CRC64_TABLE 274 .long 0 275 276#elif defined(_WIN32) || defined(__CYGWIN__) 277# ifdef DLL_EXPORT 278 /* This is equivalent of __declspec(dllexport). */ 279 .section .drectve 280 .ascii " -export:lzma_crc64" 281# endif 282 283#elif !defined(__MSDOS__) 284 /* ELF */ 285 .size LZMA_CRC64, .-LZMA_CRC64 286#endif 287 288/* 289 * This is needed to support non-executable stack. It's ugly to 290 * use __FreeBSD__ and __linux__ here, but I don't know a way to detect when 291 * we are using GNU assembler. 292 */ 293#if defined(__ELF__) && (defined(__FreeBSD__) || defined(__linux__)) 294 .section .note.GNU-stack,"",@progbits 295#endif 296