1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #ifndef MEM_H_MODULE 12 #define MEM_H_MODULE 13 14 #if defined (__cplusplus) 15 extern "C" { 16 #endif 17 18 /*-**************************************** 19 * Dependencies 20 ******************************************/ 21 #include <stddef.h> /* size_t, ptrdiff_t */ 22 #include <string.h> /* memcpy */ 23 24 25 /*-**************************************** 26 * Compiler specifics 27 ******************************************/ 28 #if defined(_MSC_VER) /* Visual Studio */ 29 # include <stdlib.h> /* _byteswap_ulong */ 30 # include <intrin.h> /* _byteswap_* */ 31 #endif 32 #if defined(__GNUC__) 33 # define MEM_STATIC static __inline __attribute__((unused)) 34 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 35 # define MEM_STATIC static inline 36 #elif defined(_MSC_VER) 37 # define MEM_STATIC static __inline 38 #else 39 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ 40 #endif 41 42 #ifndef __has_builtin 43 # define __has_builtin(x) 0 /* compat. with non-clang compilers */ 44 #endif 45 46 /* code only tested on 32 and 64 bits systems */ 47 #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } 48 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } 49 50 /* detects whether we are being compiled under msan */ 51 #if defined (__has_feature) 52 # if __has_feature(memory_sanitizer) 53 # define MEMORY_SANITIZER 1 54 # endif 55 #endif 56 57 #if defined (MEMORY_SANITIZER) 58 /* Not all platforms that support msan provide sanitizers/msan_interface.h. 59 * We therefore declare the functions we need ourselves, rather than trying to 60 * include the header file... */ 61 62 #include <stdint.h> /* intptr_t */ 63 64 /* Make memory region fully initialized (without changing its contents). */ 65 void __msan_unpoison(const volatile void *a, size_t size); 66 67 /* Make memory region fully uninitialized (without changing its contents). 68 This is a legacy interface that does not update origin information. Use 69 __msan_allocated_memory() instead. */ 70 void __msan_poison(const volatile void *a, size_t size); 71 72 /* Returns the offset of the first (at least partially) poisoned byte in the 73 memory range, or -1 if the whole range is good. */ 74 intptr_t __msan_test_shadow(const volatile void *x, size_t size); 75 #endif 76 77 /* detects whether we are being compiled under asan */ 78 #if defined (__has_feature) 79 # if __has_feature(address_sanitizer) 80 # define ADDRESS_SANITIZER 1 81 # endif 82 #elif defined(__SANITIZE_ADDRESS__) 83 # define ADDRESS_SANITIZER 1 84 #endif 85 86 #if defined (ADDRESS_SANITIZER) 87 /* Not all platforms that support asan provide sanitizers/asan_interface.h. 88 * We therefore declare the functions we need ourselves, rather than trying to 89 * include the header file... */ 90 91 /** 92 * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable. 93 * 94 * This memory must be previously allocated by your program. Instrumented 95 * code is forbidden from accessing addresses in this region until it is 96 * unpoisoned. This function is not guaranteed to poison the entire region - 97 * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan 98 * alignment restrictions. 99 * 100 * \note This function is not thread-safe because no two threads can poison or 101 * unpoison memory in the same memory region simultaneously. 102 * 103 * \param addr Start of memory region. 104 * \param size Size of memory region. */ 105 void __asan_poison_memory_region(void const volatile *addr, size_t size); 106 107 /** 108 * Marks a memory region (<c>[addr, addr+size)</c>) as addressable. 109 * 110 * This memory must be previously allocated by your program. Accessing 111 * addresses in this region is allowed until this region is poisoned again. 112 * This function could unpoison a super-region of <c>[addr, addr+size)</c> due 113 * to ASan alignment restrictions. 114 * 115 * \note This function is not thread-safe because no two threads can 116 * poison or unpoison memory in the same memory region simultaneously. 117 * 118 * \param addr Start of memory region. 119 * \param size Size of memory region. */ 120 void __asan_unpoison_memory_region(void const volatile *addr, size_t size); 121 #endif 122 123 124 /*-************************************************************** 125 * Basic Types 126 *****************************************************************/ 127 #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 128 # include <stdint.h> 129 typedef uint8_t BYTE; 130 typedef uint16_t U16; 131 typedef int16_t S16; 132 typedef uint32_t U32; 133 typedef int32_t S32; 134 typedef uint64_t U64; 135 typedef int64_t S64; 136 #else 137 # include <limits.h> 138 #if CHAR_BIT != 8 139 # error "this implementation requires char to be exactly 8-bit type" 140 #endif 141 typedef unsigned char BYTE; 142 #if USHRT_MAX != 65535 143 # error "this implementation requires short to be exactly 16-bit type" 144 #endif 145 typedef unsigned short U16; 146 typedef signed short S16; 147 #if UINT_MAX != 4294967295 148 # error "this implementation requires int to be exactly 32-bit type" 149 #endif 150 typedef unsigned int U32; 151 typedef signed int S32; 152 /* note : there are no limits defined for long long type in C90. 153 * limits exist in C99, however, in such case, <stdint.h> is preferred */ 154 typedef unsigned long long U64; 155 typedef signed long long S64; 156 #endif 157 158 159 /*-************************************************************** 160 * Memory I/O 161 *****************************************************************/ 162 /* MEM_FORCE_MEMORY_ACCESS : 163 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. 164 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. 165 * The below switch allow to select different access method for improved performance. 166 * Method 0 (default) : use `memcpy()`. Safe and portable. 167 * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). 168 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. 169 * Method 2 : direct access. This method is portable but violate C standard. 170 * It can generate buggy code on targets depending on alignment. 171 * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) 172 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. 173 * Prefer these methods in priority order (0 > 1 > 2) 174 */ 175 #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 176 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) 177 # define MEM_FORCE_MEMORY_ACCESS 2 178 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) 179 # define MEM_FORCE_MEMORY_ACCESS 1 180 # endif 181 #endif 182 183 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } 184 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } 185 186 MEM_STATIC unsigned MEM_isLittleEndian(void) 187 { 188 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ 189 return one.c[0]; 190 } 191 192 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) 193 194 /* violates C standard, by lying on structure alignment. 195 Only use if no other choice to achieve best performance on target platform */ 196 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } 197 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } 198 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } 199 MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } 200 201 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } 202 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } 203 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } 204 205 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) 206 207 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 208 /* currently only defined for gcc and icc */ 209 #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) 210 __pragma( pack(push, 1) ) 211 typedef struct { U16 v; } unalign16; 212 typedef struct { U32 v; } unalign32; 213 typedef struct { U64 v; } unalign64; 214 typedef struct { size_t v; } unalignArch; 215 __pragma( pack(pop) ) 216 #else 217 typedef struct { U16 v; } __attribute__((packed)) unalign16; 218 typedef struct { U32 v; } __attribute__((packed)) unalign32; 219 typedef struct { U64 v; } __attribute__((packed)) unalign64; 220 typedef struct { size_t v; } __attribute__((packed)) unalignArch; 221 #endif 222 223 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } 224 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } 225 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } 226 MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } 227 228 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } 229 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } 230 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } 231 232 #else 233 234 /* default method, safe and standard. 235 can sometimes prove slower */ 236 237 MEM_STATIC U16 MEM_read16(const void* memPtr) 238 { 239 U16 val; memcpy(&val, memPtr, sizeof(val)); return val; 240 } 241 242 MEM_STATIC U32 MEM_read32(const void* memPtr) 243 { 244 U32 val; memcpy(&val, memPtr, sizeof(val)); return val; 245 } 246 247 MEM_STATIC U64 MEM_read64(const void* memPtr) 248 { 249 U64 val; memcpy(&val, memPtr, sizeof(val)); return val; 250 } 251 252 MEM_STATIC size_t MEM_readST(const void* memPtr) 253 { 254 size_t val; memcpy(&val, memPtr, sizeof(val)); return val; 255 } 256 257 MEM_STATIC void MEM_write16(void* memPtr, U16 value) 258 { 259 memcpy(memPtr, &value, sizeof(value)); 260 } 261 262 MEM_STATIC void MEM_write32(void* memPtr, U32 value) 263 { 264 memcpy(memPtr, &value, sizeof(value)); 265 } 266 267 MEM_STATIC void MEM_write64(void* memPtr, U64 value) 268 { 269 memcpy(memPtr, &value, sizeof(value)); 270 } 271 272 #endif /* MEM_FORCE_MEMORY_ACCESS */ 273 274 MEM_STATIC U32 MEM_swap32(U32 in) 275 { 276 #if defined(_MSC_VER) /* Visual Studio */ 277 return _byteswap_ulong(in); 278 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ 279 || (defined(__clang__) && __has_builtin(__builtin_bswap32)) 280 return __builtin_bswap32(in); 281 #else 282 return ((in << 24) & 0xff000000 ) | 283 ((in << 8) & 0x00ff0000 ) | 284 ((in >> 8) & 0x0000ff00 ) | 285 ((in >> 24) & 0x000000ff ); 286 #endif 287 } 288 289 MEM_STATIC U64 MEM_swap64(U64 in) 290 { 291 #if defined(_MSC_VER) /* Visual Studio */ 292 return _byteswap_uint64(in); 293 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ 294 || (defined(__clang__) && __has_builtin(__builtin_bswap64)) 295 return __builtin_bswap64(in); 296 #else 297 return ((in << 56) & 0xff00000000000000ULL) | 298 ((in << 40) & 0x00ff000000000000ULL) | 299 ((in << 24) & 0x0000ff0000000000ULL) | 300 ((in << 8) & 0x000000ff00000000ULL) | 301 ((in >> 8) & 0x00000000ff000000ULL) | 302 ((in >> 24) & 0x0000000000ff0000ULL) | 303 ((in >> 40) & 0x000000000000ff00ULL) | 304 ((in >> 56) & 0x00000000000000ffULL); 305 #endif 306 } 307 308 MEM_STATIC size_t MEM_swapST(size_t in) 309 { 310 if (MEM_32bits()) 311 return (size_t)MEM_swap32((U32)in); 312 else 313 return (size_t)MEM_swap64((U64)in); 314 } 315 316 /*=== Little endian r/w ===*/ 317 318 MEM_STATIC U16 MEM_readLE16(const void* memPtr) 319 { 320 if (MEM_isLittleEndian()) 321 return MEM_read16(memPtr); 322 else { 323 const BYTE* p = (const BYTE*)memPtr; 324 return (U16)(p[0] + (p[1]<<8)); 325 } 326 } 327 328 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) 329 { 330 if (MEM_isLittleEndian()) { 331 MEM_write16(memPtr, val); 332 } else { 333 BYTE* p = (BYTE*)memPtr; 334 p[0] = (BYTE)val; 335 p[1] = (BYTE)(val>>8); 336 } 337 } 338 339 MEM_STATIC U32 MEM_readLE24(const void* memPtr) 340 { 341 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); 342 } 343 344 MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) 345 { 346 MEM_writeLE16(memPtr, (U16)val); 347 ((BYTE*)memPtr)[2] = (BYTE)(val>>16); 348 } 349 350 MEM_STATIC U32 MEM_readLE32(const void* memPtr) 351 { 352 if (MEM_isLittleEndian()) 353 return MEM_read32(memPtr); 354 else 355 return MEM_swap32(MEM_read32(memPtr)); 356 } 357 358 MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) 359 { 360 if (MEM_isLittleEndian()) 361 MEM_write32(memPtr, val32); 362 else 363 MEM_write32(memPtr, MEM_swap32(val32)); 364 } 365 366 MEM_STATIC U64 MEM_readLE64(const void* memPtr) 367 { 368 if (MEM_isLittleEndian()) 369 return MEM_read64(memPtr); 370 else 371 return MEM_swap64(MEM_read64(memPtr)); 372 } 373 374 MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) 375 { 376 if (MEM_isLittleEndian()) 377 MEM_write64(memPtr, val64); 378 else 379 MEM_write64(memPtr, MEM_swap64(val64)); 380 } 381 382 MEM_STATIC size_t MEM_readLEST(const void* memPtr) 383 { 384 if (MEM_32bits()) 385 return (size_t)MEM_readLE32(memPtr); 386 else 387 return (size_t)MEM_readLE64(memPtr); 388 } 389 390 MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) 391 { 392 if (MEM_32bits()) 393 MEM_writeLE32(memPtr, (U32)val); 394 else 395 MEM_writeLE64(memPtr, (U64)val); 396 } 397 398 /*=== Big endian r/w ===*/ 399 400 MEM_STATIC U32 MEM_readBE32(const void* memPtr) 401 { 402 if (MEM_isLittleEndian()) 403 return MEM_swap32(MEM_read32(memPtr)); 404 else 405 return MEM_read32(memPtr); 406 } 407 408 MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) 409 { 410 if (MEM_isLittleEndian()) 411 MEM_write32(memPtr, MEM_swap32(val32)); 412 else 413 MEM_write32(memPtr, val32); 414 } 415 416 MEM_STATIC U64 MEM_readBE64(const void* memPtr) 417 { 418 if (MEM_isLittleEndian()) 419 return MEM_swap64(MEM_read64(memPtr)); 420 else 421 return MEM_read64(memPtr); 422 } 423 424 MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) 425 { 426 if (MEM_isLittleEndian()) 427 MEM_write64(memPtr, MEM_swap64(val64)); 428 else 429 MEM_write64(memPtr, val64); 430 } 431 432 MEM_STATIC size_t MEM_readBEST(const void* memPtr) 433 { 434 if (MEM_32bits()) 435 return (size_t)MEM_readBE32(memPtr); 436 else 437 return (size_t)MEM_readBE64(memPtr); 438 } 439 440 MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) 441 { 442 if (MEM_32bits()) 443 MEM_writeBE32(memPtr, (U32)val); 444 else 445 MEM_writeBE64(memPtr, (U64)val); 446 } 447 448 449 #if defined (__cplusplus) 450 } 451 #endif 452 453 #endif /* MEM_H_MODULE */ 454