1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. 4 * All rights reserved. 5 * 6 * This source code is licensed under both the BSD-style license (found in the 7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 8 * in the COPYING file in the root directory of this source tree). 9 * You may select, at your option, one of the above-listed licenses. 10 */ 11 12 #ifndef MEM_H_MODULE 13 #define MEM_H_MODULE 14 15 #if defined (__cplusplus) 16 extern "C" { 17 #endif 18 19 /*-**************************************** 20 * Dependencies 21 ******************************************/ 22 #include <stddef.h> /* size_t, ptrdiff_t */ 23 #include <string.h> /* memcpy */ 24 25 26 /*-**************************************** 27 * Compiler specifics 28 ******************************************/ 29 #if defined(_MSC_VER) /* Visual Studio */ 30 # include <stdlib.h> /* _byteswap_ulong */ 31 # include <intrin.h> /* _byteswap_* */ 32 #endif 33 #if defined(__GNUC__) 34 # define MEM_STATIC static __inline __attribute__((unused)) 35 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 36 # define MEM_STATIC static inline 37 #elif defined(_MSC_VER) 38 # define MEM_STATIC static __inline 39 #else 40 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ 41 #endif 42 43 #ifndef __has_builtin 44 # define __has_builtin(x) 0 /* compat. with non-clang compilers */ 45 #endif 46 47 /* code only tested on 32 and 64 bits systems */ 48 #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } 49 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } 50 51 /* detects whether we are being compiled under msan */ 52 #if defined (__has_feature) 53 # if __has_feature(memory_sanitizer) 54 # define MEMORY_SANITIZER 1 55 # endif 56 #endif 57 58 #if defined (MEMORY_SANITIZER) 59 /* Not all platforms that support msan provide sanitizers/msan_interface.h. 60 * We therefore declare the functions we need ourselves, rather than trying to 61 * include the header file... */ 62 63 #include <stdint.h> /* intptr_t */ 64 65 /* Make memory region fully initialized (without changing its contents). */ 66 void __msan_unpoison(const volatile void *a, size_t size); 67 68 /* Make memory region fully uninitialized (without changing its contents). 69 This is a legacy interface that does not update origin information. Use 70 __msan_allocated_memory() instead. */ 71 void __msan_poison(const volatile void *a, size_t size); 72 73 /* Returns the offset of the first (at least partially) poisoned byte in the 74 memory range, or -1 if the whole range is good. */ 75 intptr_t __msan_test_shadow(const volatile void *x, size_t size); 76 #endif 77 78 /* detects whether we are being compiled under asan */ 79 #if defined (ZFS_ASAN_ENABLED) 80 # define ADDRESS_SANITIZER 1 81 # define ZSTD_ASAN_DONT_POISON_WORKSPACE 82 #endif 83 84 #if defined (ADDRESS_SANITIZER) 85 /* Not all platforms that support asan provide sanitizers/asan_interface.h. 86 * We therefore declare the functions we need ourselves, rather than trying to 87 * include the header file... */ 88 89 /** 90 * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable. 91 * 92 * This memory must be previously allocated by your program. Instrumented 93 * code is forbidden from accessing addresses in this region until it is 94 * unpoisoned. This function is not guaranteed to poison the entire region - 95 * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan 96 * alignment restrictions. 97 * 98 * \note This function is not thread-safe because no two threads can poison or 99 * unpoison memory in the same memory region simultaneously. 100 * 101 * \param addr Start of memory region. 102 * \param size Size of memory region. */ 103 void __asan_poison_memory_region(void const volatile *addr, size_t size); 104 105 /** 106 * Marks a memory region (<c>[addr, addr+size)</c>) as addressable. 107 * 108 * This memory must be previously allocated by your program. Accessing 109 * addresses in this region is allowed until this region is poisoned again. 110 * This function could unpoison a super-region of <c>[addr, addr+size)</c> due 111 * to ASan alignment restrictions. 112 * 113 * \note This function is not thread-safe because no two threads can 114 * poison or unpoison memory in the same memory region simultaneously. 115 * 116 * \param addr Start of memory region. 117 * \param size Size of memory region. */ 118 void __asan_unpoison_memory_region(void const volatile *addr, size_t size); 119 #endif 120 121 122 /*-************************************************************** 123 * Basic Types 124 *****************************************************************/ 125 #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 126 # include <stdint.h> 127 typedef uint8_t BYTE; 128 typedef uint16_t U16; 129 typedef int16_t S16; 130 typedef uint32_t U32; 131 typedef int32_t S32; 132 typedef uint64_t U64; 133 typedef int64_t S64; 134 #else 135 # include <limits.h> 136 #if CHAR_BIT != 8 137 # error "this implementation requires char to be exactly 8-bit type" 138 #endif 139 typedef unsigned char BYTE; 140 #if USHRT_MAX != 65535 141 # error "this implementation requires short to be exactly 16-bit type" 142 #endif 143 typedef unsigned short U16; 144 typedef signed short S16; 145 #if UINT_MAX != 4294967295 146 # error "this implementation requires int to be exactly 32-bit type" 147 #endif 148 typedef unsigned int U32; 149 typedef signed int S32; 150 /* note : there are no limits defined for long long type in C90. 151 * limits exist in C99, however, in such case, <stdint.h> is preferred */ 152 typedef unsigned long long U64; 153 typedef signed long long S64; 154 #endif 155 156 157 /*-************************************************************** 158 * Memory I/O 159 *****************************************************************/ 160 /* MEM_FORCE_MEMORY_ACCESS : 161 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. 162 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. 163 * The below switch allow to select different access method for improved performance. 164 * Method 0 (default) : use `memcpy()`. Safe and portable. 165 * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). 166 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. 167 * Method 2 : direct access. This method is portable but violate C standard. 168 * It can generate buggy code on targets depending on alignment. 169 * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) 170 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. 171 * Prefer these methods in priority order (0 > 1 > 2) 172 */ 173 #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 174 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) 175 # define MEM_FORCE_MEMORY_ACCESS 2 176 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) 177 # define MEM_FORCE_MEMORY_ACCESS 1 178 # endif 179 #endif 180 181 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } 182 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } 183 184 MEM_STATIC unsigned MEM_isLittleEndian(void) 185 { 186 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ 187 return one.c[0]; 188 } 189 190 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) 191 192 /* violates C standard, by lying on structure alignment. 193 Only use if no other choice to achieve best performance on target platform */ 194 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } 195 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } 196 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } 197 MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } 198 199 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } 200 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } 201 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } 202 203 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) 204 205 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 206 /* currently only defined for gcc and icc */ 207 #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) 208 __pragma( pack(push, 1) ) 209 typedef struct { U16 v; } unalign16; 210 typedef struct { U32 v; } unalign32; 211 typedef struct { U64 v; } unalign64; 212 typedef struct { size_t v; } unalignArch; 213 __pragma( pack(pop) ) 214 #else 215 typedef struct { U16 v; } __attribute__((packed)) unalign16; 216 typedef struct { U32 v; } __attribute__((packed)) unalign32; 217 typedef struct { U64 v; } __attribute__((packed)) unalign64; 218 typedef struct { size_t v; } __attribute__((packed)) unalignArch; 219 #endif 220 221 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } 222 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } 223 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } 224 MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } 225 226 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } 227 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } 228 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } 229 230 #else 231 232 /* default method, safe and standard. 233 can sometimes prove slower */ 234 235 MEM_STATIC U16 MEM_read16(const void* memPtr) 236 { 237 U16 val; memcpy(&val, memPtr, sizeof(val)); return val; 238 } 239 240 MEM_STATIC U32 MEM_read32(const void* memPtr) 241 { 242 U32 val; memcpy(&val, memPtr, sizeof(val)); return val; 243 } 244 245 MEM_STATIC U64 MEM_read64(const void* memPtr) 246 { 247 U64 val; memcpy(&val, memPtr, sizeof(val)); return val; 248 } 249 250 MEM_STATIC size_t MEM_readST(const void* memPtr) 251 { 252 size_t val; memcpy(&val, memPtr, sizeof(val)); return val; 253 } 254 255 MEM_STATIC void MEM_write16(void* memPtr, U16 value) 256 { 257 memcpy(memPtr, &value, sizeof(value)); 258 } 259 260 MEM_STATIC void MEM_write32(void* memPtr, U32 value) 261 { 262 memcpy(memPtr, &value, sizeof(value)); 263 } 264 265 MEM_STATIC void MEM_write64(void* memPtr, U64 value) 266 { 267 memcpy(memPtr, &value, sizeof(value)); 268 } 269 270 #endif /* MEM_FORCE_MEMORY_ACCESS */ 271 272 MEM_STATIC U32 MEM_swap32(U32 in) 273 { 274 #if defined(_MSC_VER) /* Visual Studio */ 275 return _byteswap_ulong(in); 276 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ 277 || (defined(__clang__) && __has_builtin(__builtin_bswap32)) 278 return __builtin_bswap32(in); 279 #else 280 return ((in << 24) & 0xff000000 ) | 281 ((in << 8) & 0x00ff0000 ) | 282 ((in >> 8) & 0x0000ff00 ) | 283 ((in >> 24) & 0x000000ff ); 284 #endif 285 } 286 287 MEM_STATIC U64 MEM_swap64(U64 in) 288 { 289 #if defined(_MSC_VER) /* Visual Studio */ 290 return _byteswap_uint64(in); 291 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ 292 || (defined(__clang__) && __has_builtin(__builtin_bswap64)) 293 return __builtin_bswap64(in); 294 #else 295 return ((in << 56) & 0xff00000000000000ULL) | 296 ((in << 40) & 0x00ff000000000000ULL) | 297 ((in << 24) & 0x0000ff0000000000ULL) | 298 ((in << 8) & 0x000000ff00000000ULL) | 299 ((in >> 8) & 0x00000000ff000000ULL) | 300 ((in >> 24) & 0x0000000000ff0000ULL) | 301 ((in >> 40) & 0x000000000000ff00ULL) | 302 ((in >> 56) & 0x00000000000000ffULL); 303 #endif 304 } 305 306 MEM_STATIC size_t MEM_swapST(size_t in) 307 { 308 if (MEM_32bits()) 309 return (size_t)MEM_swap32((U32)in); 310 else 311 return (size_t)MEM_swap64((U64)in); 312 } 313 314 /*=== Little endian r/w ===*/ 315 316 MEM_STATIC U16 MEM_readLE16(const void* memPtr) 317 { 318 if (MEM_isLittleEndian()) 319 return MEM_read16(memPtr); 320 else { 321 const BYTE* p = (const BYTE*)memPtr; 322 return (U16)(p[0] + (p[1]<<8)); 323 } 324 } 325 326 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) 327 { 328 if (MEM_isLittleEndian()) { 329 MEM_write16(memPtr, val); 330 } else { 331 BYTE* p = (BYTE*)memPtr; 332 p[0] = (BYTE)val; 333 p[1] = (BYTE)(val>>8); 334 } 335 } 336 337 MEM_STATIC U32 MEM_readLE24(const void* memPtr) 338 { 339 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); 340 } 341 342 MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) 343 { 344 MEM_writeLE16(memPtr, (U16)val); 345 ((BYTE*)memPtr)[2] = (BYTE)(val>>16); 346 } 347 348 MEM_STATIC U32 MEM_readLE32(const void* memPtr) 349 { 350 if (MEM_isLittleEndian()) 351 return MEM_read32(memPtr); 352 else 353 return MEM_swap32(MEM_read32(memPtr)); 354 } 355 356 MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) 357 { 358 if (MEM_isLittleEndian()) 359 MEM_write32(memPtr, val32); 360 else 361 MEM_write32(memPtr, MEM_swap32(val32)); 362 } 363 364 MEM_STATIC U64 MEM_readLE64(const void* memPtr) 365 { 366 if (MEM_isLittleEndian()) 367 return MEM_read64(memPtr); 368 else 369 return MEM_swap64(MEM_read64(memPtr)); 370 } 371 372 MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) 373 { 374 if (MEM_isLittleEndian()) 375 MEM_write64(memPtr, val64); 376 else 377 MEM_write64(memPtr, MEM_swap64(val64)); 378 } 379 380 MEM_STATIC size_t MEM_readLEST(const void* memPtr) 381 { 382 if (MEM_32bits()) 383 return (size_t)MEM_readLE32(memPtr); 384 else 385 return (size_t)MEM_readLE64(memPtr); 386 } 387 388 MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) 389 { 390 if (MEM_32bits()) 391 MEM_writeLE32(memPtr, (U32)val); 392 else 393 MEM_writeLE64(memPtr, (U64)val); 394 } 395 396 /*=== Big endian r/w ===*/ 397 398 MEM_STATIC U32 MEM_readBE32(const void* memPtr) 399 { 400 if (MEM_isLittleEndian()) 401 return MEM_swap32(MEM_read32(memPtr)); 402 else 403 return MEM_read32(memPtr); 404 } 405 406 MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) 407 { 408 if (MEM_isLittleEndian()) 409 MEM_write32(memPtr, MEM_swap32(val32)); 410 else 411 MEM_write32(memPtr, val32); 412 } 413 414 MEM_STATIC U64 MEM_readBE64(const void* memPtr) 415 { 416 if (MEM_isLittleEndian()) 417 return MEM_swap64(MEM_read64(memPtr)); 418 else 419 return MEM_read64(memPtr); 420 } 421 422 MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) 423 { 424 if (MEM_isLittleEndian()) 425 MEM_write64(memPtr, MEM_swap64(val64)); 426 else 427 MEM_write64(memPtr, val64); 428 } 429 430 MEM_STATIC size_t MEM_readBEST(const void* memPtr) 431 { 432 if (MEM_32bits()) 433 return (size_t)MEM_readBE32(memPtr); 434 else 435 return (size_t)MEM_readBE64(memPtr); 436 } 437 438 MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) 439 { 440 if (MEM_32bits()) 441 MEM_writeBE32(memPtr, (U32)val); 442 else 443 MEM_writeBE64(memPtr, (U64)val); 444 } 445 446 447 #if defined (__cplusplus) 448 } 449 #endif 450 451 #endif /* MEM_H_MODULE */ 452