Lines Matching +full:separately +full:- +full:defined

5  * This source code is licensed under both the BSD-style license (found in the
8 * You may select, at your option, one of the above-listed licenses.
14 /*-*************************************
19 #if defined (__cplusplus)
23 /*-*************************************
42 /*-*************************************
54 * expect a well-formed caller to free this.
70 * - These different internal datastructures have different setup requirements:
72 * - The static objects need to be cleared once and can then be trivially
75 * - Various buffers don't need to be initialized at all--they are always
78 * - The matchstate tables have a unique requirement that they don't need
86 * - These buffers also have different alignment requirements.
88 * - We would like to reuse the objects in the workspace for multiple
92 * - We would like to be able to efficiently reuse the workspace across
102 * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
105 * following categories, and are allocated separately:
107 * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
113 * - Fixed size objects: these are fixed-size, fixed-count objects that are
115 * control how they're initialized separately from the broader ZSTD_CCtx.
117 * - Entropy Workspace
118 * - 2 x ZSTD_compressedBlockState_t
119 * - CDict dictionary contents
121 * - Tables: these are any of several different datastructures (hash tables,
123 * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
124 * Their sizes depend on the cparams. These tables are 64-byte aligned.
126 * - Aligned: these buffers are used for various purposes that require 4 byte
130 * - Buffers: these buffers are used for various purposes that don't require
160 /*-*************************************
168 assert(ws->workspace <= ws->objectEnd); in ZSTD_cwksp_assert_internal_consistency()
169 assert(ws->objectEnd <= ws->tableEnd); in ZSTD_cwksp_assert_internal_consistency()
170 assert(ws->objectEnd <= ws->tableValidEnd); in ZSTD_cwksp_assert_internal_consistency()
171 assert(ws->tableEnd <= ws->allocStart); in ZSTD_cwksp_assert_internal_consistency()
172 assert(ws->tableValidEnd <= ws->allocStart); in ZSTD_cwksp_assert_internal_consistency()
173 assert(ws->allocStart <= ws->workspaceEnd); in ZSTD_cwksp_assert_internal_consistency()
180 size_t const mask = align - 1; in ZSTD_cwksp_align()
200 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_alloc_size()
237 size_t const alignBytesMask = alignBytes - 1; in ZSTD_cwksp_bytes_to_align_ptr()
238 size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; in ZSTD_cwksp_bytes_to_align_ptr()
254 void* const alloc = (BYTE*)ws->allocStart - bytes; in ZSTD_cwksp_reserve_internal_buffer_space()
255 void* const bottom = ws->tableEnd; in ZSTD_cwksp_reserve_internal_buffer_space()
257 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); in ZSTD_cwksp_reserve_internal_buffer_space()
262 ws->allocFailed = 1; in ZSTD_cwksp_reserve_internal_buffer_space()
267 if (alloc < ws->tableValidEnd) { in ZSTD_cwksp_reserve_internal_buffer_space()
268 ws->tableValidEnd = alloc; in ZSTD_cwksp_reserve_internal_buffer_space()
270 ws->allocStart = alloc; in ZSTD_cwksp_reserve_internal_buffer_space()
282 assert(phase >= ws->phase); in ZSTD_cwksp_internal_advance_phase()
283 if (phase > ws->phase) { in ZSTD_cwksp_internal_advance_phase()
285 if (ws->phase < ZSTD_cwksp_alloc_buffers && in ZSTD_cwksp_internal_advance_phase()
287 ws->tableValidEnd = ws->objectEnd; in ZSTD_cwksp_internal_advance_phase()
291 if (ws->phase < ZSTD_cwksp_alloc_aligned && in ZSTD_cwksp_internal_advance_phase()
295 …ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BY… in ZSTD_cwksp_internal_advance_phase()
297 …ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power… in ZSTD_cwksp_internal_advance_phase()
299 … memory_allocation, "aligned phase - alignment initial allocation failed!"); in ZSTD_cwksp_internal_advance_phase()
302 void* const alloc = ws->objectEnd; in ZSTD_cwksp_internal_advance_phase()
306 RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, in ZSTD_cwksp_internal_advance_phase()
307 "table phase - alignment initial allocation failed!"); in ZSTD_cwksp_internal_advance_phase()
308 ws->objectEnd = objectEnd; in ZSTD_cwksp_internal_advance_phase()
309 ws->tableEnd = objectEnd; /* table area starts being empty */ in ZSTD_cwksp_internal_advance_phase()
310 if (ws->tableValidEnd < ws->tableEnd) { in ZSTD_cwksp_internal_advance_phase()
311 ws->tableValidEnd = ws->tableEnd; in ZSTD_cwksp_internal_advance_phase()
313 ws->phase = phase; in ZSTD_cwksp_internal_advance_phase()
324 return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); in ZSTD_cwksp_owns_buffer()
338 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_reserve_internal()
339 /* over-reserve space */ in ZSTD_cwksp_reserve_internal()
345 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_reserve_internal()
350 if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { in ZSTD_cwksp_reserve_internal()
374 assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); in ZSTD_cwksp_reserve_aligned()
380 * their values remain constrained, allowing us to re-use them without
381 * memset()-ing them.
393 alloc = ws->tableEnd; in ZSTD_cwksp_reserve_table()
395 top = ws->allocStart; in ZSTD_cwksp_reserve_table()
398 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); in ZSTD_cwksp_reserve_table()
399 assert((bytes & (sizeof(U32)-1)) == 0); in ZSTD_cwksp_reserve_table()
404 ws->allocFailed = 1; in ZSTD_cwksp_reserve_table()
407 ws->tableEnd = end; in ZSTD_cwksp_reserve_table()
409 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_reserve_table()
410 if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { in ZSTD_cwksp_reserve_table()
415 assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); in ZSTD_cwksp_reserve_table()
416 assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); in ZSTD_cwksp_reserve_table()
427 void* alloc = ws->objectEnd; in ZSTD_cwksp_reserve_object()
430 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_reserve_object()
431 /* over-reserve space */ in ZSTD_cwksp_reserve_object()
437 alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); in ZSTD_cwksp_reserve_object()
442 if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { in ZSTD_cwksp_reserve_object()
444 ws->allocFailed = 1; in ZSTD_cwksp_reserve_object()
447 ws->objectEnd = end; in ZSTD_cwksp_reserve_object()
448 ws->tableEnd = end; in ZSTD_cwksp_reserve_object()
449 ws->tableValidEnd = end; in ZSTD_cwksp_reserve_object()
451 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_reserve_object()
455 if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { in ZSTD_cwksp_reserve_object()
467 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_mark_tables_dirty()
468 /* To validate that the table re-use logic is sound, and that we don't in ZSTD_cwksp_mark_tables_dirty()
469 * access table space that we haven't cleaned, we re-"poison" the table in ZSTD_cwksp_mark_tables_dirty()
472 size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; in ZSTD_cwksp_mark_tables_dirty()
473 assert(__msan_test_shadow(ws->objectEnd, size) == -1); in ZSTD_cwksp_mark_tables_dirty()
474 __msan_poison(ws->objectEnd, size); in ZSTD_cwksp_mark_tables_dirty()
478 assert(ws->tableValidEnd >= ws->objectEnd); in ZSTD_cwksp_mark_tables_dirty()
479 assert(ws->tableValidEnd <= ws->allocStart); in ZSTD_cwksp_mark_tables_dirty()
480 ws->tableValidEnd = ws->objectEnd; in ZSTD_cwksp_mark_tables_dirty()
486 assert(ws->tableValidEnd >= ws->objectEnd); in ZSTD_cwksp_mark_tables_clean()
487 assert(ws->tableValidEnd <= ws->allocStart); in ZSTD_cwksp_mark_tables_clean()
488 if (ws->tableValidEnd < ws->tableEnd) { in ZSTD_cwksp_mark_tables_clean()
489 ws->tableValidEnd = ws->tableEnd; in ZSTD_cwksp_mark_tables_clean()
499 assert(ws->tableValidEnd >= ws->objectEnd); in ZSTD_cwksp_clean_tables()
500 assert(ws->tableValidEnd <= ws->allocStart); in ZSTD_cwksp_clean_tables()
501 if (ws->tableValidEnd < ws->tableEnd) { in ZSTD_cwksp_clean_tables()
502 ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); in ZSTD_cwksp_clean_tables()
514 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_clear_tables()
519 if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { in ZSTD_cwksp_clear_tables()
520 size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; in ZSTD_cwksp_clear_tables()
521 __asan_poison_memory_region(ws->objectEnd, size); in ZSTD_cwksp_clear_tables()
525 ws->tableEnd = ws->objectEnd; in ZSTD_cwksp_clear_tables()
536 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_clear()
537 /* To validate that the context re-use logic is sound, and that we don't in ZSTD_cwksp_clear()
538 * access stuff that this compression hasn't initialized, we re-"poison" in ZSTD_cwksp_clear()
539 * the workspace (or at least the non-static, non-table parts of it) in ZSTD_cwksp_clear()
542 size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; in ZSTD_cwksp_clear()
543 __msan_poison(ws->tableValidEnd, size); in ZSTD_cwksp_clear()
547 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) in ZSTD_cwksp_clear()
552 if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { in ZSTD_cwksp_clear()
553 size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; in ZSTD_cwksp_clear()
554 __asan_poison_memory_region(ws->objectEnd, size); in ZSTD_cwksp_clear()
558 ws->tableEnd = ws->objectEnd; in ZSTD_cwksp_clear()
559 ws->allocStart = ws->workspaceEnd; in ZSTD_cwksp_clear()
560 ws->allocFailed = 0; in ZSTD_cwksp_clear()
561 if (ws->phase > ZSTD_cwksp_alloc_buffers) { in ZSTD_cwksp_clear()
562 ws->phase = ZSTD_cwksp_alloc_buffers; in ZSTD_cwksp_clear()
570 * buffer, if present, must be separately freed).
574 assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ in ZSTD_cwksp_init()
575 ws->workspace = start; in ZSTD_cwksp_init()
576 ws->workspaceEnd = (BYTE*)start + size; in ZSTD_cwksp_init()
577 ws->objectEnd = ws->workspace; in ZSTD_cwksp_init()
578 ws->tableValidEnd = ws->objectEnd; in ZSTD_cwksp_init()
579 ws->phase = ZSTD_cwksp_alloc_objects; in ZSTD_cwksp_init()
580 ws->isStatic = isStatic; in ZSTD_cwksp_init()
582 ws->workspaceOversizedDuration = 0; in ZSTD_cwksp_init()
595 void *ptr = ws->workspace; in ZSTD_cwksp_free()
603 * is left in an invalid state (src must be re-init()'ed before it's used again).
611 return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); in ZSTD_cwksp_sizeof()
615 return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) in ZSTD_cwksp_used()
616 + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); in ZSTD_cwksp_used()
620 return ws->allocFailed; in ZSTD_cwksp_reserve_failed()
623 /*-*************************************
640 …return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63… in ZSTD_cwksp_estimated_space_within_bounds()
646 return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); in ZSTD_cwksp_available_space()
660 && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; in ZSTD_cwksp_check_wasteful()
666 ws->workspaceOversizedDuration++; in ZSTD_cwksp_bump_oversized_duration()
668 ws->workspaceOversizedDuration = 0; in ZSTD_cwksp_bump_oversized_duration()
672 #if defined (__cplusplus)