1*61145dc2SMartin Matuska // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only
2c03c5b1cSMartin Matuska /*
3c03c5b1cSMartin Matuska * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
4c03c5b1cSMartin Matuska * All rights reserved.
5c03c5b1cSMartin Matuska *
6c03c5b1cSMartin Matuska * This source code is licensed under both the BSD-style license (found in the
7c03c5b1cSMartin Matuska * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8c03c5b1cSMartin Matuska * in the COPYING file in the root directory of this source tree).
9c03c5b1cSMartin Matuska * You may select, at your option, one of the above-listed licenses.
10c03c5b1cSMartin Matuska */
11c03c5b1cSMartin Matuska
12c03c5b1cSMartin Matuska #ifndef ZSTD_CWKSP_H
13c03c5b1cSMartin Matuska #define ZSTD_CWKSP_H
14c03c5b1cSMartin Matuska
15c03c5b1cSMartin Matuska /*-*************************************
16c03c5b1cSMartin Matuska * Dependencies
17c03c5b1cSMartin Matuska ***************************************/
18c03c5b1cSMartin Matuska #include "../common/zstd_internal.h"
19c03c5b1cSMartin Matuska
20c03c5b1cSMartin Matuska #if defined (__cplusplus)
21c03c5b1cSMartin Matuska extern "C" {
22c03c5b1cSMartin Matuska #endif
23c03c5b1cSMartin Matuska
24c03c5b1cSMartin Matuska /*-*************************************
25c03c5b1cSMartin Matuska * Constants
26c03c5b1cSMartin Matuska ***************************************/
27c03c5b1cSMartin Matuska
28c03c5b1cSMartin Matuska /* Since the workspace is effectively its own little malloc implementation /
29c03c5b1cSMartin Matuska * arena, when we run under ASAN, we should similarly insert redzones between
30c03c5b1cSMartin Matuska * each internal element of the workspace, so ASAN will catch overruns that
31c03c5b1cSMartin Matuska * reach outside an object but that stay inside the workspace.
32c03c5b1cSMartin Matuska *
33c03c5b1cSMartin Matuska * This defines the size of that redzone.
34c03c5b1cSMartin Matuska */
35c03c5b1cSMartin Matuska #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
36c03c5b1cSMartin Matuska #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
37c03c5b1cSMartin Matuska #endif
38c03c5b1cSMartin Matuska
39c03c5b1cSMartin Matuska /*-*************************************
40c03c5b1cSMartin Matuska * Structures
41c03c5b1cSMartin Matuska ***************************************/
42c03c5b1cSMartin Matuska typedef enum {
43c03c5b1cSMartin Matuska ZSTD_cwksp_alloc_objects,
44c03c5b1cSMartin Matuska ZSTD_cwksp_alloc_buffers,
45c03c5b1cSMartin Matuska ZSTD_cwksp_alloc_aligned
46c03c5b1cSMartin Matuska } ZSTD_cwksp_alloc_phase_e;
47c03c5b1cSMartin Matuska
48c03c5b1cSMartin Matuska /**
49c03c5b1cSMartin Matuska * Zstd fits all its internal datastructures into a single continuous buffer,
50c03c5b1cSMartin Matuska * so that it only needs to perform a single OS allocation (or so that a buffer
51c03c5b1cSMartin Matuska * can be provided to it and it can perform no allocations at all). This buffer
52c03c5b1cSMartin Matuska * is called the workspace.
53c03c5b1cSMartin Matuska *
54c03c5b1cSMartin Matuska * Several optimizations complicate that process of allocating memory ranges
55c03c5b1cSMartin Matuska * from this workspace for each internal datastructure:
56c03c5b1cSMartin Matuska *
57c03c5b1cSMartin Matuska * - These different internal datastructures have different setup requirements:
58c03c5b1cSMartin Matuska *
59c03c5b1cSMartin Matuska * - The static objects need to be cleared once and can then be trivially
60c03c5b1cSMartin Matuska * reused for each compression.
61c03c5b1cSMartin Matuska *
62c03c5b1cSMartin Matuska * - Various buffers don't need to be initialized at all--they are always
63c03c5b1cSMartin Matuska * written into before they're read.
64c03c5b1cSMartin Matuska *
65c03c5b1cSMartin Matuska * - The matchstate tables have a unique requirement that they don't need
66c03c5b1cSMartin Matuska * their memory to be totally cleared, but they do need the memory to have
67c03c5b1cSMartin Matuska * some bound, i.e., a guarantee that all values in the memory they've been
68c03c5b1cSMartin Matuska * allocated is less than some maximum value (which is the starting value
69c03c5b1cSMartin Matuska * for the indices that they will then use for compression). When this
70c03c5b1cSMartin Matuska * guarantee is provided to them, they can use the memory without any setup
71c03c5b1cSMartin Matuska * work. When it can't, they have to clear the area.
72c03c5b1cSMartin Matuska *
73c03c5b1cSMartin Matuska * - These buffers also have different alignment requirements.
74c03c5b1cSMartin Matuska *
75c03c5b1cSMartin Matuska * - We would like to reuse the objects in the workspace for multiple
76c03c5b1cSMartin Matuska * compressions without having to perform any expensive reallocation or
77c03c5b1cSMartin Matuska * reinitialization work.
78c03c5b1cSMartin Matuska *
79c03c5b1cSMartin Matuska * - We would like to be able to efficiently reuse the workspace across
80c03c5b1cSMartin Matuska * multiple compressions **even when the compression parameters change** and
81c03c5b1cSMartin Matuska * we need to resize some of the objects (where possible).
82c03c5b1cSMartin Matuska *
83c03c5b1cSMartin Matuska * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
84c03c5b1cSMartin Matuska * abstraction was created. It works as follows:
85c03c5b1cSMartin Matuska *
86c03c5b1cSMartin Matuska * Workspace Layout:
87c03c5b1cSMartin Matuska *
88c03c5b1cSMartin Matuska * [ ... workspace ... ]
89c03c5b1cSMartin Matuska * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
90c03c5b1cSMartin Matuska *
91c03c5b1cSMartin Matuska * The various objects that live in the workspace are divided into the
92c03c5b1cSMartin Matuska * following categories, and are allocated separately:
93c03c5b1cSMartin Matuska *
94c03c5b1cSMartin Matuska * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
95c03c5b1cSMartin Matuska * so that literally everything fits in a single buffer. Note: if present,
96c03c5b1cSMartin Matuska * this must be the first object in the workspace, since ZSTD_free{CCtx,
97c03c5b1cSMartin Matuska * CDict}() rely on a pointer comparison to see whether one or two frees are
98c03c5b1cSMartin Matuska * required.
99c03c5b1cSMartin Matuska *
100c03c5b1cSMartin Matuska * - Fixed size objects: these are fixed-size, fixed-count objects that are
101c03c5b1cSMartin Matuska * nonetheless "dynamically" allocated in the workspace so that we can
102c03c5b1cSMartin Matuska * control how they're initialized separately from the broader ZSTD_CCtx.
103c03c5b1cSMartin Matuska * Examples:
104c03c5b1cSMartin Matuska * - Entropy Workspace
105c03c5b1cSMartin Matuska * - 2 x ZSTD_compressedBlockState_t
106c03c5b1cSMartin Matuska * - CDict dictionary contents
107c03c5b1cSMartin Matuska *
108c03c5b1cSMartin Matuska * - Tables: these are any of several different datastructures (hash tables,
109c03c5b1cSMartin Matuska * chain tables, binary trees) that all respect a common format: they are
110c03c5b1cSMartin Matuska * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
111c03c5b1cSMartin Matuska * Their sizes depend on the cparams.
112c03c5b1cSMartin Matuska *
113c03c5b1cSMartin Matuska * - Aligned: these buffers are used for various purposes that require 4 byte
114c03c5b1cSMartin Matuska * alignment, but don't require any initialization before they're used.
115c03c5b1cSMartin Matuska *
116c03c5b1cSMartin Matuska * - Buffers: these buffers are used for various purposes that don't require
117c03c5b1cSMartin Matuska * any alignment or initialization before they're used. This means they can
118c03c5b1cSMartin Matuska * be moved around at no cost for a new compression.
119c03c5b1cSMartin Matuska *
120c03c5b1cSMartin Matuska * Allocating Memory:
121c03c5b1cSMartin Matuska *
122c03c5b1cSMartin Matuska * The various types of objects must be allocated in order, so they can be
123c03c5b1cSMartin Matuska * correctly packed into the workspace buffer. That order is:
124c03c5b1cSMartin Matuska *
125c03c5b1cSMartin Matuska * 1. Objects
126c03c5b1cSMartin Matuska * 2. Buffers
127c03c5b1cSMartin Matuska * 3. Aligned
128c03c5b1cSMartin Matuska * 4. Tables
129c03c5b1cSMartin Matuska *
130c03c5b1cSMartin Matuska * Attempts to reserve objects of different types out of order will fail.
131c03c5b1cSMartin Matuska */
132c03c5b1cSMartin Matuska typedef struct {
133c03c5b1cSMartin Matuska void* workspace;
134c03c5b1cSMartin Matuska void* workspaceEnd;
135c03c5b1cSMartin Matuska
136c03c5b1cSMartin Matuska void* objectEnd;
137c03c5b1cSMartin Matuska void* tableEnd;
138c03c5b1cSMartin Matuska void* tableValidEnd;
139c03c5b1cSMartin Matuska void* allocStart;
140c03c5b1cSMartin Matuska
141c03c5b1cSMartin Matuska int allocFailed;
142c03c5b1cSMartin Matuska int workspaceOversizedDuration;
143c03c5b1cSMartin Matuska ZSTD_cwksp_alloc_phase_e phase;
144c03c5b1cSMartin Matuska } ZSTD_cwksp;
145c03c5b1cSMartin Matuska
146c03c5b1cSMartin Matuska /*-*************************************
147c03c5b1cSMartin Matuska * Functions
148c03c5b1cSMartin Matuska ***************************************/
149c03c5b1cSMartin Matuska
150c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
151c03c5b1cSMartin Matuska
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)152c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
153c03c5b1cSMartin Matuska (void)ws;
154c03c5b1cSMartin Matuska assert(ws->workspace <= ws->objectEnd);
155c03c5b1cSMartin Matuska assert(ws->objectEnd <= ws->tableEnd);
156c03c5b1cSMartin Matuska assert(ws->objectEnd <= ws->tableValidEnd);
157c03c5b1cSMartin Matuska assert(ws->tableEnd <= ws->allocStart);
158c03c5b1cSMartin Matuska assert(ws->tableValidEnd <= ws->allocStart);
159c03c5b1cSMartin Matuska assert(ws->allocStart <= ws->workspaceEnd);
160c03c5b1cSMartin Matuska }
161c03c5b1cSMartin Matuska
162c03c5b1cSMartin Matuska /**
163c03c5b1cSMartin Matuska * Align must be a power of 2.
164c03c5b1cSMartin Matuska */
ZSTD_cwksp_align(size_t size,size_t const align)165c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
166c03c5b1cSMartin Matuska size_t const mask = align - 1;
167c03c5b1cSMartin Matuska assert((align & mask) == 0);
168c03c5b1cSMartin Matuska return (size + mask) & ~mask;
169c03c5b1cSMartin Matuska }
170c03c5b1cSMartin Matuska
171c03c5b1cSMartin Matuska /**
172c03c5b1cSMartin Matuska * Use this to determine how much space in the workspace we will consume to
173c03c5b1cSMartin Matuska * allocate this object. (Normally it should be exactly the size of the object,
174c03c5b1cSMartin Matuska * but under special conditions, like ASAN, where we pad each object, it might
175c03c5b1cSMartin Matuska * be larger.)
176c03c5b1cSMartin Matuska *
177c03c5b1cSMartin Matuska * Since tables aren't currently redzoned, you don't need to call through this
178c03c5b1cSMartin Matuska * to figure out how much space you need for the matchState tables. Everything
179c03c5b1cSMartin Matuska * else is though.
180c03c5b1cSMartin Matuska */
ZSTD_cwksp_alloc_size(size_t size)181c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
182c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
183c03c5b1cSMartin Matuska return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
184c03c5b1cSMartin Matuska #else
185c03c5b1cSMartin Matuska return size;
186c03c5b1cSMartin Matuska #endif
187c03c5b1cSMartin Matuska }
188c03c5b1cSMartin Matuska
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)189c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
190c03c5b1cSMartin Matuska ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
191c03c5b1cSMartin Matuska assert(phase >= ws->phase);
192c03c5b1cSMartin Matuska if (phase > ws->phase) {
193c03c5b1cSMartin Matuska if (ws->phase < ZSTD_cwksp_alloc_buffers &&
194c03c5b1cSMartin Matuska phase >= ZSTD_cwksp_alloc_buffers) {
195c03c5b1cSMartin Matuska ws->tableValidEnd = ws->objectEnd;
196c03c5b1cSMartin Matuska }
197c03c5b1cSMartin Matuska if (ws->phase < ZSTD_cwksp_alloc_aligned &&
198c03c5b1cSMartin Matuska phase >= ZSTD_cwksp_alloc_aligned) {
199c03c5b1cSMartin Matuska /* If unaligned allocations down from a too-large top have left us
200c03c5b1cSMartin Matuska * unaligned, we need to realign our alloc ptr. Technically, this
201c03c5b1cSMartin Matuska * can consume space that is unaccounted for in the neededSpace
202c03c5b1cSMartin Matuska * calculation. However, I believe this can only happen when the
203c03c5b1cSMartin Matuska * workspace is too large, and specifically when it is too large
204c03c5b1cSMartin Matuska * by a larger margin than the space that will be consumed. */
205c03c5b1cSMartin Matuska /* TODO: cleaner, compiler warning friendly way to do this??? */
206c03c5b1cSMartin Matuska ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
207c03c5b1cSMartin Matuska if (ws->allocStart < ws->tableValidEnd) {
208c03c5b1cSMartin Matuska ws->tableValidEnd = ws->allocStart;
209c03c5b1cSMartin Matuska }
210c03c5b1cSMartin Matuska }
211c03c5b1cSMartin Matuska ws->phase = phase;
212c03c5b1cSMartin Matuska }
213c03c5b1cSMartin Matuska }
214c03c5b1cSMartin Matuska
215c03c5b1cSMartin Matuska /**
216c03c5b1cSMartin Matuska * Returns whether this object/buffer/etc was allocated in this workspace.
217c03c5b1cSMartin Matuska */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)218c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
219c03c5b1cSMartin Matuska return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
220c03c5b1cSMartin Matuska }
221c03c5b1cSMartin Matuska
222c03c5b1cSMartin Matuska /**
223c03c5b1cSMartin Matuska * Internal function. Do not use directly.
224c03c5b1cSMartin Matuska */
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)225c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_internal(
226c03c5b1cSMartin Matuska ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
227c03c5b1cSMartin Matuska void* alloc;
228c03c5b1cSMartin Matuska void* bottom = ws->tableEnd;
229c03c5b1cSMartin Matuska ZSTD_cwksp_internal_advance_phase(ws, phase);
230c03c5b1cSMartin Matuska alloc = (BYTE *)ws->allocStart - bytes;
231c03c5b1cSMartin Matuska
232c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
233c03c5b1cSMartin Matuska /* over-reserve space */
234c03c5b1cSMartin Matuska alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
235c03c5b1cSMartin Matuska #endif
236c03c5b1cSMartin Matuska
237c03c5b1cSMartin Matuska DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
238c03c5b1cSMartin Matuska alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
239c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
240c03c5b1cSMartin Matuska assert(alloc >= bottom);
241c03c5b1cSMartin Matuska if (alloc < bottom) {
242c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: alloc failed!");
243c03c5b1cSMartin Matuska ws->allocFailed = 1;
244c03c5b1cSMartin Matuska return NULL;
245c03c5b1cSMartin Matuska }
246c03c5b1cSMartin Matuska if (alloc < ws->tableValidEnd) {
247c03c5b1cSMartin Matuska ws->tableValidEnd = alloc;
248c03c5b1cSMartin Matuska }
249c03c5b1cSMartin Matuska ws->allocStart = alloc;
250c03c5b1cSMartin Matuska
251c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
252c03c5b1cSMartin Matuska /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
253c03c5b1cSMartin Matuska * either size. */
254c03c5b1cSMartin Matuska alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
255c03c5b1cSMartin Matuska __asan_unpoison_memory_region(alloc, bytes);
256c03c5b1cSMartin Matuska #endif
257c03c5b1cSMartin Matuska
258c03c5b1cSMartin Matuska return alloc;
259c03c5b1cSMartin Matuska }
260c03c5b1cSMartin Matuska
261c03c5b1cSMartin Matuska /**
262c03c5b1cSMartin Matuska * Reserves and returns unaligned memory.
263c03c5b1cSMartin Matuska */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)264c03c5b1cSMartin Matuska MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
265c03c5b1cSMartin Matuska return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
266c03c5b1cSMartin Matuska }
267c03c5b1cSMartin Matuska
268c03c5b1cSMartin Matuska /**
269c03c5b1cSMartin Matuska * Reserves and returns memory sized on and aligned on sizeof(unsigned).
270c03c5b1cSMartin Matuska */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)271c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
272c03c5b1cSMartin Matuska assert((bytes & (sizeof(U32)-1)) == 0);
273c03c5b1cSMartin Matuska return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
274c03c5b1cSMartin Matuska }
275c03c5b1cSMartin Matuska
276c03c5b1cSMartin Matuska /**
277c03c5b1cSMartin Matuska * Aligned on sizeof(unsigned). These buffers have the special property that
278c03c5b1cSMartin Matuska * their values remain constrained, allowing us to re-use them without
279c03c5b1cSMartin Matuska * memset()-ing them.
280c03c5b1cSMartin Matuska */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)281c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
282c03c5b1cSMartin Matuska const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
283c03c5b1cSMartin Matuska void* alloc = ws->tableEnd;
284c03c5b1cSMartin Matuska void* end = (BYTE *)alloc + bytes;
285c03c5b1cSMartin Matuska void* top = ws->allocStart;
286c03c5b1cSMartin Matuska
287c03c5b1cSMartin Matuska DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
288c03c5b1cSMartin Matuska alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
289c03c5b1cSMartin Matuska assert((bytes & (sizeof(U32)-1)) == 0);
290c03c5b1cSMartin Matuska ZSTD_cwksp_internal_advance_phase(ws, phase);
291c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
292c03c5b1cSMartin Matuska assert(end <= top);
293c03c5b1cSMartin Matuska if (end > top) {
294c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: table alloc failed!");
295c03c5b1cSMartin Matuska ws->allocFailed = 1;
296c03c5b1cSMartin Matuska return NULL;
297c03c5b1cSMartin Matuska }
298c03c5b1cSMartin Matuska ws->tableEnd = end;
299c03c5b1cSMartin Matuska
300c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
301c03c5b1cSMartin Matuska __asan_unpoison_memory_region(alloc, bytes);
302c03c5b1cSMartin Matuska #endif
303c03c5b1cSMartin Matuska
304c03c5b1cSMartin Matuska return alloc;
305c03c5b1cSMartin Matuska }
306c03c5b1cSMartin Matuska
307c03c5b1cSMartin Matuska /**
308c03c5b1cSMartin Matuska * Aligned on sizeof(void*).
309c03c5b1cSMartin Matuska */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)310c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
311c03c5b1cSMartin Matuska size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
312c03c5b1cSMartin Matuska void* alloc = ws->objectEnd;
313c03c5b1cSMartin Matuska void* end = (BYTE*)alloc + roundedBytes;
314c03c5b1cSMartin Matuska
315c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
316c03c5b1cSMartin Matuska /* over-reserve space */
317c03c5b1cSMartin Matuska end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
318c03c5b1cSMartin Matuska #endif
319c03c5b1cSMartin Matuska
320c03c5b1cSMartin Matuska DEBUGLOG(5,
321c03c5b1cSMartin Matuska "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
322c03c5b1cSMartin Matuska alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
323c03c5b1cSMartin Matuska assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
324c03c5b1cSMartin Matuska assert((bytes & (sizeof(void*)-1)) == 0);
325c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
326c03c5b1cSMartin Matuska /* we must be in the first phase, no advance is possible */
327c03c5b1cSMartin Matuska if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
328c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: object alloc failed!");
329c03c5b1cSMartin Matuska ws->allocFailed = 1;
330c03c5b1cSMartin Matuska return NULL;
331c03c5b1cSMartin Matuska }
332c03c5b1cSMartin Matuska ws->objectEnd = end;
333c03c5b1cSMartin Matuska ws->tableEnd = end;
334c03c5b1cSMartin Matuska ws->tableValidEnd = end;
335c03c5b1cSMartin Matuska
336c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
337c03c5b1cSMartin Matuska /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
338c03c5b1cSMartin Matuska * either size. */
339c03c5b1cSMartin Matuska alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
340c03c5b1cSMartin Matuska __asan_unpoison_memory_region(alloc, bytes);
341c03c5b1cSMartin Matuska #endif
342c03c5b1cSMartin Matuska
343c03c5b1cSMartin Matuska return alloc;
344c03c5b1cSMartin Matuska }
345c03c5b1cSMartin Matuska
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)346c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
347c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
348c03c5b1cSMartin Matuska
349c03c5b1cSMartin Matuska #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
350c03c5b1cSMartin Matuska /* To validate that the table re-use logic is sound, and that we don't
351c03c5b1cSMartin Matuska * access table space that we haven't cleaned, we re-"poison" the table
352c03c5b1cSMartin Matuska * space every time we mark it dirty. */
353c03c5b1cSMartin Matuska {
354c03c5b1cSMartin Matuska size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
355c03c5b1cSMartin Matuska assert(__msan_test_shadow(ws->objectEnd, size) == -1);
356c03c5b1cSMartin Matuska __msan_poison(ws->objectEnd, size);
357c03c5b1cSMartin Matuska }
358c03c5b1cSMartin Matuska #endif
359c03c5b1cSMartin Matuska
360c03c5b1cSMartin Matuska assert(ws->tableValidEnd >= ws->objectEnd);
361c03c5b1cSMartin Matuska assert(ws->tableValidEnd <= ws->allocStart);
362c03c5b1cSMartin Matuska ws->tableValidEnd = ws->objectEnd;
363c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
364c03c5b1cSMartin Matuska }
365c03c5b1cSMartin Matuska
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)366c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
367c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
368c03c5b1cSMartin Matuska assert(ws->tableValidEnd >= ws->objectEnd);
369c03c5b1cSMartin Matuska assert(ws->tableValidEnd <= ws->allocStart);
370c03c5b1cSMartin Matuska if (ws->tableValidEnd < ws->tableEnd) {
371c03c5b1cSMartin Matuska ws->tableValidEnd = ws->tableEnd;
372c03c5b1cSMartin Matuska }
373c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
374c03c5b1cSMartin Matuska }
375c03c5b1cSMartin Matuska
376c03c5b1cSMartin Matuska /**
377c03c5b1cSMartin Matuska * Zero the part of the allocated tables not already marked clean.
378c03c5b1cSMartin Matuska */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)379c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
380c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
381c03c5b1cSMartin Matuska assert(ws->tableValidEnd >= ws->objectEnd);
382c03c5b1cSMartin Matuska assert(ws->tableValidEnd <= ws->allocStart);
383c03c5b1cSMartin Matuska if (ws->tableValidEnd < ws->tableEnd) {
384c03c5b1cSMartin Matuska memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
385c03c5b1cSMartin Matuska }
386c03c5b1cSMartin Matuska ZSTD_cwksp_mark_tables_clean(ws);
387c03c5b1cSMartin Matuska }
388c03c5b1cSMartin Matuska
389c03c5b1cSMartin Matuska /**
390c03c5b1cSMartin Matuska * Invalidates table allocations.
391c03c5b1cSMartin Matuska * All other allocations remain valid.
392c03c5b1cSMartin Matuska */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)393c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
394c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: clearing tables!");
395c03c5b1cSMartin Matuska
396c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
397c03c5b1cSMartin Matuska {
398c03c5b1cSMartin Matuska size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
399c03c5b1cSMartin Matuska __asan_poison_memory_region(ws->objectEnd, size);
400c03c5b1cSMartin Matuska }
401c03c5b1cSMartin Matuska #endif
402c03c5b1cSMartin Matuska
403c03c5b1cSMartin Matuska ws->tableEnd = ws->objectEnd;
404c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
405c03c5b1cSMartin Matuska }
406c03c5b1cSMartin Matuska
407c03c5b1cSMartin Matuska /**
408c03c5b1cSMartin Matuska * Invalidates all buffer, aligned, and table allocations.
409c03c5b1cSMartin Matuska * Object allocations remain valid.
410c03c5b1cSMartin Matuska */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)411c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
412c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: clearing!");
413c03c5b1cSMartin Matuska
414c03c5b1cSMartin Matuska #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
415c03c5b1cSMartin Matuska /* To validate that the context re-use logic is sound, and that we don't
416c03c5b1cSMartin Matuska * access stuff that this compression hasn't initialized, we re-"poison"
417c03c5b1cSMartin Matuska * the workspace (or at least the non-static, non-table parts of it)
418c03c5b1cSMartin Matuska * every time we start a new compression. */
419c03c5b1cSMartin Matuska {
420c03c5b1cSMartin Matuska size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
421c03c5b1cSMartin Matuska __msan_poison(ws->tableValidEnd, size);
422c03c5b1cSMartin Matuska }
423c03c5b1cSMartin Matuska #endif
424c03c5b1cSMartin Matuska
425c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
426c03c5b1cSMartin Matuska {
427c03c5b1cSMartin Matuska size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
428c03c5b1cSMartin Matuska __asan_poison_memory_region(ws->objectEnd, size);
429c03c5b1cSMartin Matuska }
430c03c5b1cSMartin Matuska #endif
431c03c5b1cSMartin Matuska
432c03c5b1cSMartin Matuska ws->tableEnd = ws->objectEnd;
433c03c5b1cSMartin Matuska ws->allocStart = ws->workspaceEnd;
434c03c5b1cSMartin Matuska ws->allocFailed = 0;
435c03c5b1cSMartin Matuska if (ws->phase > ZSTD_cwksp_alloc_buffers) {
436c03c5b1cSMartin Matuska ws->phase = ZSTD_cwksp_alloc_buffers;
437c03c5b1cSMartin Matuska }
438c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
439c03c5b1cSMartin Matuska }
440c03c5b1cSMartin Matuska
441c03c5b1cSMartin Matuska /**
442c03c5b1cSMartin Matuska * The provided workspace takes ownership of the buffer [start, start+size).
443c03c5b1cSMartin Matuska * Any existing values in the workspace are ignored (the previously managed
444c03c5b1cSMartin Matuska * buffer, if present, must be separately freed).
445c03c5b1cSMartin Matuska */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size)446c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
447c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
448c03c5b1cSMartin Matuska assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
449c03c5b1cSMartin Matuska ws->workspace = start;
450c03c5b1cSMartin Matuska ws->workspaceEnd = (BYTE*)start + size;
451c03c5b1cSMartin Matuska ws->objectEnd = ws->workspace;
452c03c5b1cSMartin Matuska ws->tableValidEnd = ws->objectEnd;
453c03c5b1cSMartin Matuska ws->phase = ZSTD_cwksp_alloc_objects;
454c03c5b1cSMartin Matuska ZSTD_cwksp_clear(ws);
455c03c5b1cSMartin Matuska ws->workspaceOversizedDuration = 0;
456c03c5b1cSMartin Matuska ZSTD_cwksp_assert_internal_consistency(ws);
457c03c5b1cSMartin Matuska }
458c03c5b1cSMartin Matuska
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)459c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
460c03c5b1cSMartin Matuska void* workspace = ZSTD_malloc(size, customMem);
461c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
462c03c5b1cSMartin Matuska RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
463c03c5b1cSMartin Matuska ZSTD_cwksp_init(ws, workspace, size);
464c03c5b1cSMartin Matuska return 0;
465c03c5b1cSMartin Matuska }
466c03c5b1cSMartin Matuska
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)467c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
468c03c5b1cSMartin Matuska void *ptr = ws->workspace;
469c03c5b1cSMartin Matuska DEBUGLOG(4, "cwksp: freeing workspace");
470c03c5b1cSMartin Matuska memset(ws, 0, sizeof(ZSTD_cwksp));
471c03c5b1cSMartin Matuska ZSTD_free(ptr, customMem);
472c03c5b1cSMartin Matuska }
473c03c5b1cSMartin Matuska
474c03c5b1cSMartin Matuska /**
475c03c5b1cSMartin Matuska * Moves the management of a workspace from one cwksp to another. The src cwksp
476c03c5b1cSMartin Matuska * is left in an invalid state (src must be re-init()'ed before its used again).
477c03c5b1cSMartin Matuska */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)478c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
479c03c5b1cSMartin Matuska *dst = *src;
480c03c5b1cSMartin Matuska memset(src, 0, sizeof(ZSTD_cwksp));
481c03c5b1cSMartin Matuska }
482c03c5b1cSMartin Matuska
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)483c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
484c03c5b1cSMartin Matuska return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
485c03c5b1cSMartin Matuska }
486c03c5b1cSMartin Matuska
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)487c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
488c03c5b1cSMartin Matuska return ws->allocFailed;
489c03c5b1cSMartin Matuska }
490c03c5b1cSMartin Matuska
491c03c5b1cSMartin Matuska /*-*************************************
492c03c5b1cSMartin Matuska * Functions Checking Free Space
493c03c5b1cSMartin Matuska ***************************************/
494c03c5b1cSMartin Matuska
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)495c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
496c03c5b1cSMartin Matuska return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
497c03c5b1cSMartin Matuska }
498c03c5b1cSMartin Matuska
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)499c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
500c03c5b1cSMartin Matuska return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
501c03c5b1cSMartin Matuska }
502c03c5b1cSMartin Matuska
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)503c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
504c03c5b1cSMartin Matuska return ZSTD_cwksp_check_available(
505c03c5b1cSMartin Matuska ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
506c03c5b1cSMartin Matuska }
507c03c5b1cSMartin Matuska
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)508c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
509c03c5b1cSMartin Matuska return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
510c03c5b1cSMartin Matuska && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
511c03c5b1cSMartin Matuska }
512c03c5b1cSMartin Matuska
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)513c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
514c03c5b1cSMartin Matuska ZSTD_cwksp* ws, size_t additionalNeededSpace) {
515c03c5b1cSMartin Matuska if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
516c03c5b1cSMartin Matuska ws->workspaceOversizedDuration++;
517c03c5b1cSMartin Matuska } else {
518c03c5b1cSMartin Matuska ws->workspaceOversizedDuration = 0;
519c03c5b1cSMartin Matuska }
520c03c5b1cSMartin Matuska }
521c03c5b1cSMartin Matuska
522c03c5b1cSMartin Matuska #if defined (__cplusplus)
523c03c5b1cSMartin Matuska }
524c03c5b1cSMartin Matuska #endif
525c03c5b1cSMartin Matuska
526c03c5b1cSMartin Matuska #endif /* ZSTD_CWKSP_H */
527