1/* SPDX-License-Identifier: 0BSD */ 2 3/* 4 * Speed-optimized CRC32 using slicing-by-eight algorithm 5 * 6 * This uses only i386 instructions, but it is optimized for i686 and later 7 * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586 8 * (e.g. Pentium), slicing-by-four would be better, and even the C version 9 * of slicing-by-eight built with gcc -march=i586 tends to be a little bit 10 * better than this. Very few probably run this code on i586 or older x86 11 * so this shouldn't be a problem in practice. 12 * 13 * Authors: Igor Pavlov (original version) 14 * Lasse Collin (AT&T syntax, PIC support, better portability) 15 * 16 * This code needs lzma_crc32_table, which can be created using the 17 * following C code: 18 19uint32_t lzma_crc32_table[8][256]; 20 21void 22init_table(void) 23{ 24 // IEEE-802.3 25 static const uint32_t poly32 = UINT32_C(0xEDB88320); 26 27 // Castagnoli 28 // static const uint32_t poly32 = UINT32_C(0x82F63B78); 29 30 // Koopman 31 // static const uint32_t poly32 = UINT32_C(0xEB31D82E); 32 33 for (size_t s = 0; s < 8; ++s) { 34 for (size_t b = 0; b < 256; ++b) { 35 uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b]; 36 37 for (size_t i = 0; i < 8; ++i) { 38 if (r & 1) 39 r = (r >> 1) ^ poly32; 40 else 41 r >>= 1; 42 } 43 44 lzma_crc32_table[s][b] = r; 45 } 46 } 47} 48 49 * The prototype of the CRC32 function: 50 * extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc); 51 */ 52 53/* When Intel CET is enabled, include <cet.h> in assembly code to mark 54 Intel CET support. */ 55#ifdef __CET__ 56# include <cet.h> 57#else 58# define _CET_ENDBR 59#endif 60 61/* 62 * On some systems, the functions need to be prefixed. The prefix is 63 * usually an underscore. 64 */ 65#ifndef __USER_LABEL_PREFIX__ 66# define __USER_LABEL_PREFIX__ 67#endif 68#define MAKE_SYM_CAT(prefix, sym) prefix ## sym 69#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym) 70#define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_generic) 71#define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table) 72 73/* 74 * Solaris assembler doesn't have .p2align, and Darwin uses .align 75 * differently than GNU/Linux and Solaris. 76 */ 77#if defined(__APPLE__) || defined(__MSDOS__) 78# define ALIGN(pow2, abs) .align pow2 79#else 80# define ALIGN(pow2, abs) .align abs 81#endif 82 83 .text 84 .globl LZMA_CRC32 85#ifdef __ELF__ 86 .hidden LZMA_CRC32 87#endif 88 89#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \ 90 && !defined(__MSDOS__) 91 .type LZMA_CRC32, @function 92#endif 93 94 ALIGN(4, 16) 95LZMA_CRC32: 96 _CET_ENDBR 97 /* 98 * Register usage: 99 * %eax crc 100 * %esi buf 101 * %edi size or buf + size 102 * %ebx lzma_crc32_table 103 * %ebp Table index 104 * %ecx Temporary 105 * %edx Temporary 106 */ 107 pushl %ebx 108 pushl %esi 109 pushl %edi 110 pushl %ebp 111 movl 0x14(%esp), %esi /* buf */ 112 movl 0x18(%esp), %edi /* size */ 113 movl 0x1C(%esp), %eax /* crc */ 114 115 /* 116 * Store the address of lzma_crc32_table to %ebx. This is needed to 117 * get position-independent code (PIC). 118 * 119 * The PIC macro is defined by libtool, while __PIC__ is defined 120 * by GCC but only on some systems. Testing for both makes it simpler 121 * to test this code without libtool, and keeps the code working also 122 * when built with libtool but using something else than GCC. 123 * 124 * I understood that libtool may define PIC on Windows even though 125 * the code in Windows DLLs is not PIC in sense that it is in ELF 126 * binaries, so we need a separate check to always use the non-PIC 127 * code on Windows. 128 */ 129#if (!defined(PIC) && !defined(__PIC__)) \ 130 || (defined(_WIN32) || defined(__CYGWIN__)) 131 /* Not PIC */ 132 movl $ LZMA_CRC32_TABLE, %ebx 133#elif defined(__APPLE__) 134 /* Mach-O */ 135 call .L_get_pc 136.L_pic: 137 leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx 138 movl (%ebx), %ebx 139#else 140 /* ELF */ 141 call .L_get_pc 142 addl $_GLOBAL_OFFSET_TABLE_, %ebx 143 movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx 144#endif 145 146 /* Complement the initial value. */ 147 notl %eax 148 149 ALIGN(4, 16) 150.L_align: 151 /* 152 * Check if there is enough input to use slicing-by-eight. 153 * We need 16 bytes, because the loop pre-reads eight bytes. 154 */ 155 cmpl $16, %edi 156 jb .L_rest 157 158 /* Check if we have reached alignment of eight bytes. */ 159 testl $7, %esi 160 jz .L_slice 161 162 /* Calculate CRC of the next input byte. */ 163 movzbl (%esi), %ebp 164 incl %esi 165 movzbl %al, %ecx 166 xorl %ecx, %ebp 167 shrl $8, %eax 168 xorl (%ebx, %ebp, 4), %eax 169 decl %edi 170 jmp .L_align 171 172 ALIGN(2, 4) 173.L_slice: 174 /* 175 * If we get here, there's at least 16 bytes of aligned input 176 * available. Make %edi multiple of eight bytes. Store the possible 177 * remainder over the "size" variable in the argument stack. 178 */ 179 movl %edi, 0x18(%esp) 180 andl $-8, %edi 181 subl %edi, 0x18(%esp) 182 183 /* 184 * Let %edi be buf + size - 8 while running the main loop. This way 185 * we can compare for equality to determine when exit the loop. 186 */ 187 addl %esi, %edi 188 subl $8, %edi 189 190 /* Read in the first eight aligned bytes. */ 191 xorl (%esi), %eax 192 movl 4(%esi), %ecx 193 movzbl %cl, %ebp 194 195.L_loop: 196 movl 0x0C00(%ebx, %ebp, 4), %edx 197 movzbl %ch, %ebp 198 xorl 0x0800(%ebx, %ebp, 4), %edx 199 shrl $16, %ecx 200 xorl 8(%esi), %edx 201 movzbl %cl, %ebp 202 xorl 0x0400(%ebx, %ebp, 4), %edx 203 movzbl %ch, %ebp 204 xorl (%ebx, %ebp, 4), %edx 205 movzbl %al, %ebp 206 207 /* 208 * Read the next four bytes, for which the CRC is calculated 209 * on the next iteration of the loop. 210 */ 211 movl 12(%esi), %ecx 212 213 xorl 0x1C00(%ebx, %ebp, 4), %edx 214 movzbl %ah, %ebp 215 shrl $16, %eax 216 xorl 0x1800(%ebx, %ebp, 4), %edx 217 movzbl %ah, %ebp 218 movzbl %al, %eax 219 movl 0x1400(%ebx, %eax, 4), %eax 220 addl $8, %esi 221 xorl %edx, %eax 222 xorl 0x1000(%ebx, %ebp, 4), %eax 223 224 /* Check for end of aligned input. */ 225 cmpl %edi, %esi 226 movzbl %cl, %ebp 227 jne .L_loop 228 229 /* 230 * Process the remaining eight bytes, which we have already 231 * copied to %ecx and %edx. 232 */ 233 movl 0x0C00(%ebx, %ebp, 4), %edx 234 movzbl %ch, %ebp 235 xorl 0x0800(%ebx, %ebp, 4), %edx 236 shrl $16, %ecx 237 movzbl %cl, %ebp 238 xorl 0x0400(%ebx, %ebp, 4), %edx 239 movzbl %ch, %ebp 240 xorl (%ebx, %ebp, 4), %edx 241 movzbl %al, %ebp 242 243 xorl 0x1C00(%ebx, %ebp, 4), %edx 244 movzbl %ah, %ebp 245 shrl $16, %eax 246 xorl 0x1800(%ebx, %ebp, 4), %edx 247 movzbl %ah, %ebp 248 movzbl %al, %eax 249 movl 0x1400(%ebx, %eax, 4), %eax 250 addl $8, %esi 251 xorl %edx, %eax 252 xorl 0x1000(%ebx, %ebp, 4), %eax 253 254 /* Copy the number of remaining bytes to %edi. */ 255 movl 0x18(%esp), %edi 256 257.L_rest: 258 /* Check for end of input. */ 259 testl %edi, %edi 260 jz .L_return 261 262 /* Calculate CRC of the next input byte. */ 263 movzbl (%esi), %ebp 264 incl %esi 265 movzbl %al, %ecx 266 xorl %ecx, %ebp 267 shrl $8, %eax 268 xorl (%ebx, %ebp, 4), %eax 269 decl %edi 270 jmp .L_rest 271 272.L_return: 273 /* Complement the final value. */ 274 notl %eax 275 276 popl %ebp 277 popl %edi 278 popl %esi 279 popl %ebx 280 ret 281 282#if defined(PIC) || defined(__PIC__) 283 ALIGN(4, 16) 284.L_get_pc: 285 movl (%esp), %ebx 286 ret 287#endif 288 289#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__)) 290 /* Mach-O PIC */ 291 .section __IMPORT,__pointers,non_lazy_symbol_pointers 292.L_lzma_crc32_table$non_lazy_ptr: 293 .indirect_symbol LZMA_CRC32_TABLE 294 .long 0 295 296#elif !defined(_WIN32) && !defined(__CYGWIN__) && !defined(__MSDOS__) 297 /* ELF */ 298 .size LZMA_CRC32, .-LZMA_CRC32 299#endif 300 301/* 302 * This is needed to support non-executable stack. It's ugly to 303 * use __FreeBSD__ and __linux__ here, but I don't know a way to detect when 304 * we are using GNU assembler. 305 */ 306#if defined(__ELF__) && (defined(__FreeBSD__) || defined(__linux__)) 307 .section .note.GNU-stack,"",@progbits 308#endif 309