xref: /freebsd/sys/contrib/openzfs/module/zstd/zfs_zstd.c (revision 21b492ed51aa6ff8008a8aa83333b1de30288a15)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * BSD 3-Clause New License (https://spdx.org/licenses/BSD-3-Clause.html)
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * Redistribution and use in source and binary forms, with or without
5eda14cbcSMatt Macy  * modification, are permitted provided that the following conditions are met:
6eda14cbcSMatt Macy  *
7eda14cbcSMatt Macy  * 1. Redistributions of source code must retain the above copyright notice,
8eda14cbcSMatt Macy  * this list of conditions and the following disclaimer.
9eda14cbcSMatt Macy  *
10eda14cbcSMatt Macy  * 2. Redistributions in binary form must reproduce the above copyright notice,
11eda14cbcSMatt Macy  * this list of conditions and the following disclaimer in the documentation
12eda14cbcSMatt Macy  * and/or other materials provided with the distribution.
13eda14cbcSMatt Macy  *
14eda14cbcSMatt Macy  * 3. Neither the name of the copyright holder nor the names of its
15eda14cbcSMatt Macy  * contributors may be used to endorse or promote products derived from this
16eda14cbcSMatt Macy  * software without specific prior written permission.
17eda14cbcSMatt Macy  *
18eda14cbcSMatt Macy  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19eda14cbcSMatt Macy  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20eda14cbcSMatt Macy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21eda14cbcSMatt Macy  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22eda14cbcSMatt Macy  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23eda14cbcSMatt Macy  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24eda14cbcSMatt Macy  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25eda14cbcSMatt Macy  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26eda14cbcSMatt Macy  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27eda14cbcSMatt Macy  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28eda14cbcSMatt Macy  * POSSIBILITY OF SUCH DAMAGE.
29eda14cbcSMatt Macy  */
30eda14cbcSMatt Macy 
31eda14cbcSMatt Macy /*
32eda14cbcSMatt Macy  * Copyright (c) 2016-2018, Klara Inc.
33eda14cbcSMatt Macy  * Copyright (c) 2016-2018, Allan Jude
34eda14cbcSMatt Macy  * Copyright (c) 2018-2020, Sebastian Gottschall
35eda14cbcSMatt Macy  * Copyright (c) 2019-2020, Michael Niewöhner
36eda14cbcSMatt Macy  * Copyright (c) 2020, The FreeBSD Foundation [1]
37eda14cbcSMatt Macy  *
38eda14cbcSMatt Macy  * [1] Portions of this software were developed by Allan Jude
39eda14cbcSMatt Macy  *     under sponsorship from the FreeBSD Foundation.
40eda14cbcSMatt Macy  */
41eda14cbcSMatt Macy 
42eda14cbcSMatt Macy #include <sys/param.h>
43eda14cbcSMatt Macy #include <sys/sysmacros.h>
44eda14cbcSMatt Macy #include <sys/zfs_context.h>
45eda14cbcSMatt Macy #include <sys/zio_compress.h>
46eda14cbcSMatt Macy #include <sys/spa.h>
47eda14cbcSMatt Macy #include <sys/zstd/zstd.h>
48eda14cbcSMatt Macy 
49eda14cbcSMatt Macy #define	ZSTD_STATIC_LINKING_ONLY
50eda14cbcSMatt Macy #include "lib/zstd.h"
51eda14cbcSMatt Macy #include "lib/zstd_errors.h"
52eda14cbcSMatt Macy 
53eda14cbcSMatt Macy kstat_t *zstd_ksp = NULL;
54eda14cbcSMatt Macy 
55eda14cbcSMatt Macy typedef struct zstd_stats {
56eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_alloc_fail;
57eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_alloc_fallback;
58eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_com_alloc_fail;
59eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_dec_alloc_fail;
60eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_com_inval;
61eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_dec_inval;
62eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_dec_header_inval;
63eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_com_fail;
64eda14cbcSMatt Macy 	kstat_named_t	zstd_stat_dec_fail;
654a58b4abSMateusz Guzik 	kstat_named_t	zstd_stat_buffers;
664a58b4abSMateusz Guzik 	kstat_named_t	zstd_stat_size;
67eda14cbcSMatt Macy } zstd_stats_t;
68eda14cbcSMatt Macy 
69eda14cbcSMatt Macy static zstd_stats_t zstd_stats = {
70eda14cbcSMatt Macy 	{ "alloc_fail",			KSTAT_DATA_UINT64 },
71eda14cbcSMatt Macy 	{ "alloc_fallback",		KSTAT_DATA_UINT64 },
72eda14cbcSMatt Macy 	{ "compress_alloc_fail",	KSTAT_DATA_UINT64 },
73eda14cbcSMatt Macy 	{ "decompress_alloc_fail",	KSTAT_DATA_UINT64 },
74eda14cbcSMatt Macy 	{ "compress_level_invalid",	KSTAT_DATA_UINT64 },
75eda14cbcSMatt Macy 	{ "decompress_level_invalid",	KSTAT_DATA_UINT64 },
76eda14cbcSMatt Macy 	{ "decompress_header_invalid",	KSTAT_DATA_UINT64 },
77eda14cbcSMatt Macy 	{ "compress_failed",		KSTAT_DATA_UINT64 },
78eda14cbcSMatt Macy 	{ "decompress_failed",		KSTAT_DATA_UINT64 },
794a58b4abSMateusz Guzik 	{ "buffers",			KSTAT_DATA_UINT64 },
804a58b4abSMateusz Guzik 	{ "size",			KSTAT_DATA_UINT64 },
81eda14cbcSMatt Macy };
82eda14cbcSMatt Macy 
83eda14cbcSMatt Macy /* Enums describing the allocator type specified by kmem_type in zstd_kmem */
84eda14cbcSMatt Macy enum zstd_kmem_type {
85eda14cbcSMatt Macy 	ZSTD_KMEM_UNKNOWN = 0,
86eda14cbcSMatt Macy 	/* Allocation type using kmem_vmalloc */
87eda14cbcSMatt Macy 	ZSTD_KMEM_DEFAULT,
88eda14cbcSMatt Macy 	/* Pool based allocation using mempool_alloc */
89eda14cbcSMatt Macy 	ZSTD_KMEM_POOL,
90eda14cbcSMatt Macy 	/* Reserved fallback memory for decompression only */
91eda14cbcSMatt Macy 	ZSTD_KMEM_DCTX,
92eda14cbcSMatt Macy 	ZSTD_KMEM_COUNT,
93eda14cbcSMatt Macy };
94eda14cbcSMatt Macy 
95eda14cbcSMatt Macy /* Structure for pooled memory objects */
96eda14cbcSMatt Macy struct zstd_pool {
97eda14cbcSMatt Macy 	void *mem;
98eda14cbcSMatt Macy 	size_t size;
99eda14cbcSMatt Macy 	kmutex_t barrier;
100eda14cbcSMatt Macy 	hrtime_t timeout;
101eda14cbcSMatt Macy };
102eda14cbcSMatt Macy 
103eda14cbcSMatt Macy /* Global structure for handling memory allocations */
104eda14cbcSMatt Macy struct zstd_kmem {
105eda14cbcSMatt Macy 	enum zstd_kmem_type kmem_type;
106eda14cbcSMatt Macy 	size_t kmem_size;
107eda14cbcSMatt Macy 	struct zstd_pool *pool;
108eda14cbcSMatt Macy };
109eda14cbcSMatt Macy 
110eda14cbcSMatt Macy /* Fallback memory structure used for decompression only if memory runs out */
111eda14cbcSMatt Macy struct zstd_fallback_mem {
112eda14cbcSMatt Macy 	size_t mem_size;
113eda14cbcSMatt Macy 	void *mem;
114eda14cbcSMatt Macy 	kmutex_t barrier;
115eda14cbcSMatt Macy };
116eda14cbcSMatt Macy 
117eda14cbcSMatt Macy struct zstd_levelmap {
118eda14cbcSMatt Macy 	int16_t zstd_level;
119eda14cbcSMatt Macy 	enum zio_zstd_levels level;
120eda14cbcSMatt Macy };
121eda14cbcSMatt Macy 
122eda14cbcSMatt Macy /*
123eda14cbcSMatt Macy  * ZSTD memory handlers
124eda14cbcSMatt Macy  *
125eda14cbcSMatt Macy  * For decompression we use a different handler which also provides fallback
126eda14cbcSMatt Macy  * memory allocation in case memory runs out.
127eda14cbcSMatt Macy  *
128eda14cbcSMatt Macy  * The ZSTD handlers were split up for the most simplified implementation.
129eda14cbcSMatt Macy  */
130eda14cbcSMatt Macy static void *zstd_alloc(void *opaque, size_t size);
131eda14cbcSMatt Macy static void *zstd_dctx_alloc(void *opaque, size_t size);
132eda14cbcSMatt Macy static void zstd_free(void *opaque, void *ptr);
133eda14cbcSMatt Macy 
134eda14cbcSMatt Macy /* Compression memory handler */
135eda14cbcSMatt Macy static const ZSTD_customMem zstd_malloc = {
136eda14cbcSMatt Macy 	zstd_alloc,
137eda14cbcSMatt Macy 	zstd_free,
138eda14cbcSMatt Macy 	NULL,
139eda14cbcSMatt Macy };
140eda14cbcSMatt Macy 
141eda14cbcSMatt Macy /* Decompression memory handler */
142eda14cbcSMatt Macy static const ZSTD_customMem zstd_dctx_malloc = {
143eda14cbcSMatt Macy 	zstd_dctx_alloc,
144eda14cbcSMatt Macy 	zstd_free,
145eda14cbcSMatt Macy 	NULL,
146eda14cbcSMatt Macy };
147eda14cbcSMatt Macy 
148eda14cbcSMatt Macy /* Level map for converting ZFS internal levels to ZSTD levels and vice versa */
149eda14cbcSMatt Macy static struct zstd_levelmap zstd_levels[] = {
150eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_1, ZIO_ZSTD_LEVEL_1},
151eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_2, ZIO_ZSTD_LEVEL_2},
152eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_3, ZIO_ZSTD_LEVEL_3},
153eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_4, ZIO_ZSTD_LEVEL_4},
154eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_5, ZIO_ZSTD_LEVEL_5},
155eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_6, ZIO_ZSTD_LEVEL_6},
156eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_7, ZIO_ZSTD_LEVEL_7},
157eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_8, ZIO_ZSTD_LEVEL_8},
158eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_9, ZIO_ZSTD_LEVEL_9},
159eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_10, ZIO_ZSTD_LEVEL_10},
160eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_11, ZIO_ZSTD_LEVEL_11},
161eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_12, ZIO_ZSTD_LEVEL_12},
162eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_13, ZIO_ZSTD_LEVEL_13},
163eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_14, ZIO_ZSTD_LEVEL_14},
164eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_15, ZIO_ZSTD_LEVEL_15},
165eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_16, ZIO_ZSTD_LEVEL_16},
166eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_17, ZIO_ZSTD_LEVEL_17},
167eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_18, ZIO_ZSTD_LEVEL_18},
168eda14cbcSMatt Macy 	{ZIO_ZSTD_LEVEL_19, ZIO_ZSTD_LEVEL_19},
169eda14cbcSMatt Macy 	{-1, ZIO_ZSTD_LEVEL_FAST_1},
170eda14cbcSMatt Macy 	{-2, ZIO_ZSTD_LEVEL_FAST_2},
171eda14cbcSMatt Macy 	{-3, ZIO_ZSTD_LEVEL_FAST_3},
172eda14cbcSMatt Macy 	{-4, ZIO_ZSTD_LEVEL_FAST_4},
173eda14cbcSMatt Macy 	{-5, ZIO_ZSTD_LEVEL_FAST_5},
174eda14cbcSMatt Macy 	{-6, ZIO_ZSTD_LEVEL_FAST_6},
175eda14cbcSMatt Macy 	{-7, ZIO_ZSTD_LEVEL_FAST_7},
176eda14cbcSMatt Macy 	{-8, ZIO_ZSTD_LEVEL_FAST_8},
177eda14cbcSMatt Macy 	{-9, ZIO_ZSTD_LEVEL_FAST_9},
178eda14cbcSMatt Macy 	{-10, ZIO_ZSTD_LEVEL_FAST_10},
179eda14cbcSMatt Macy 	{-20, ZIO_ZSTD_LEVEL_FAST_20},
180eda14cbcSMatt Macy 	{-30, ZIO_ZSTD_LEVEL_FAST_30},
181eda14cbcSMatt Macy 	{-40, ZIO_ZSTD_LEVEL_FAST_40},
182eda14cbcSMatt Macy 	{-50, ZIO_ZSTD_LEVEL_FAST_50},
183eda14cbcSMatt Macy 	{-60, ZIO_ZSTD_LEVEL_FAST_60},
184eda14cbcSMatt Macy 	{-70, ZIO_ZSTD_LEVEL_FAST_70},
185eda14cbcSMatt Macy 	{-80, ZIO_ZSTD_LEVEL_FAST_80},
186eda14cbcSMatt Macy 	{-90, ZIO_ZSTD_LEVEL_FAST_90},
187eda14cbcSMatt Macy 	{-100, ZIO_ZSTD_LEVEL_FAST_100},
188eda14cbcSMatt Macy 	{-500, ZIO_ZSTD_LEVEL_FAST_500},
189eda14cbcSMatt Macy 	{-1000, ZIO_ZSTD_LEVEL_FAST_1000},
190eda14cbcSMatt Macy };
191eda14cbcSMatt Macy 
192eda14cbcSMatt Macy /*
193eda14cbcSMatt Macy  * This variable represents the maximum count of the pool based on the number
194eda14cbcSMatt Macy  * of CPUs plus some buffer. We default to cpu count * 4, see init_zstd.
195eda14cbcSMatt Macy  */
196eda14cbcSMatt Macy static int pool_count = 16;
197eda14cbcSMatt Macy 
198eda14cbcSMatt Macy #define	ZSTD_POOL_MAX		pool_count
199eda14cbcSMatt Macy #define	ZSTD_POOL_TIMEOUT	60 * 2
200eda14cbcSMatt Macy 
201eda14cbcSMatt Macy static struct zstd_fallback_mem zstd_dctx_fallback;
202eda14cbcSMatt Macy static struct zstd_pool *zstd_mempool_cctx;
203eda14cbcSMatt Macy static struct zstd_pool *zstd_mempool_dctx;
204eda14cbcSMatt Macy 
2052617128aSMartin Matuska /*
2062617128aSMartin Matuska  * The library zstd code expects these if ADDRESS_SANITIZER gets defined,
2072617128aSMartin Matuska  * and while ASAN does this, KASAN defines that and does not. So to avoid
2082617128aSMartin Matuska  * changing the external code, we do this.
2092617128aSMartin Matuska  */
2102617128aSMartin Matuska #if defined(__has_feature)
2112617128aSMartin Matuska #if __has_feature(address_sanitizer)
2122617128aSMartin Matuska #define	ADDRESS_SANITIZER 1
2132617128aSMartin Matuska #endif
2142617128aSMartin Matuska #elif defined(__SANITIZE_ADDRESS__)
2152617128aSMartin Matuska #define	ADDRESS_SANITIZER 1
2162617128aSMartin Matuska #endif
2172617128aSMartin Matuska #if defined(_KERNEL) && defined(ADDRESS_SANITIZER)
2182617128aSMartin Matuska void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
2192617128aSMartin Matuska void __asan_poison_memory_region(void const volatile *addr, size_t size);
2202617128aSMartin Matuska void __asan_unpoison_memory_region(void const volatile *addr, size_t size) {};
2212617128aSMartin Matuska void __asan_poison_memory_region(void const volatile *addr, size_t size) {};
2222617128aSMartin Matuska #endif
2232617128aSMartin Matuska 
2247877fdebSMatt Macy 
2257877fdebSMatt Macy static void
2267877fdebSMatt Macy zstd_mempool_reap(struct zstd_pool *zstd_mempool)
2277877fdebSMatt Macy {
2287877fdebSMatt Macy 	struct zstd_pool *pool;
2297877fdebSMatt Macy 
2307877fdebSMatt Macy 	if (!zstd_mempool || !ZSTDSTAT(zstd_stat_buffers)) {
2317877fdebSMatt Macy 		return;
2327877fdebSMatt Macy 	}
2337877fdebSMatt Macy 
2347877fdebSMatt Macy 	/* free obsolete slots */
2357877fdebSMatt Macy 	for (int i = 0; i < ZSTD_POOL_MAX; i++) {
2367877fdebSMatt Macy 		pool = &zstd_mempool[i];
2377877fdebSMatt Macy 		if (pool->mem && mutex_tryenter(&pool->barrier)) {
2387877fdebSMatt Macy 			/* Free memory if unused object older than 2 minutes */
2397877fdebSMatt Macy 			if (pool->mem && gethrestime_sec() > pool->timeout) {
2407877fdebSMatt Macy 				vmem_free(pool->mem, pool->size);
2417877fdebSMatt Macy 				ZSTDSTAT_SUB(zstd_stat_buffers, 1);
2427877fdebSMatt Macy 				ZSTDSTAT_SUB(zstd_stat_size, pool->size);
2437877fdebSMatt Macy 				pool->mem = NULL;
2447877fdebSMatt Macy 				pool->size = 0;
2457877fdebSMatt Macy 				pool->timeout = 0;
2467877fdebSMatt Macy 			}
2477877fdebSMatt Macy 			mutex_exit(&pool->barrier);
2487877fdebSMatt Macy 		}
2497877fdebSMatt Macy 	}
2507877fdebSMatt Macy }
2517877fdebSMatt Macy 
252eda14cbcSMatt Macy /*
253eda14cbcSMatt Macy  * Try to get a cached allocated buffer from memory pool or allocate a new one
254eda14cbcSMatt Macy  * if necessary. If a object is older than 2 minutes and does not fit the
255eda14cbcSMatt Macy  * requested size, it will be released and a new cached entry will be allocated.
256eda14cbcSMatt Macy  * If other pooled objects are detected without being used for 2 minutes, they
257eda14cbcSMatt Macy  * will be released, too.
258eda14cbcSMatt Macy  *
259eda14cbcSMatt Macy  * The concept is that high frequency memory allocations of bigger objects are
260eda14cbcSMatt Macy  * expensive. So if a lot of work is going on, allocations will be kept for a
261eda14cbcSMatt Macy  * while and can be reused in that time frame.
262eda14cbcSMatt Macy  *
263eda14cbcSMatt Macy  * The scheduled release will be updated every time a object is reused.
264eda14cbcSMatt Macy  */
2657877fdebSMatt Macy 
266eda14cbcSMatt Macy static void *
267eda14cbcSMatt Macy zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
268eda14cbcSMatt Macy {
269eda14cbcSMatt Macy 	struct zstd_pool *pool;
270eda14cbcSMatt Macy 	struct zstd_kmem *mem = NULL;
271eda14cbcSMatt Macy 
272eda14cbcSMatt Macy 	if (!zstd_mempool) {
273eda14cbcSMatt Macy 		return (NULL);
274eda14cbcSMatt Macy 	}
275eda14cbcSMatt Macy 
276eda14cbcSMatt Macy 	/* Seek for preallocated memory slot and free obsolete slots */
277eda14cbcSMatt Macy 	for (int i = 0; i < ZSTD_POOL_MAX; i++) {
278eda14cbcSMatt Macy 		pool = &zstd_mempool[i];
279eda14cbcSMatt Macy 		/*
28016038816SMartin Matuska 		 * This lock is simply a marker for a pool object being in use.
281eda14cbcSMatt Macy 		 * If it's already hold, it will be skipped.
282eda14cbcSMatt Macy 		 *
283eda14cbcSMatt Macy 		 * We need to create it before checking it to avoid race
284eda14cbcSMatt Macy 		 * conditions caused by running in a threaded context.
285eda14cbcSMatt Macy 		 *
286eda14cbcSMatt Macy 		 * The lock is later released by zstd_mempool_free.
287eda14cbcSMatt Macy 		 */
288eda14cbcSMatt Macy 		if (mutex_tryenter(&pool->barrier)) {
289eda14cbcSMatt Macy 			/*
290eda14cbcSMatt Macy 			 * Check if objects fits the size, if so we take it and
291eda14cbcSMatt Macy 			 * update the timestamp.
292eda14cbcSMatt Macy 			 */
2937877fdebSMatt Macy 			if (pool->mem && size <= pool->size) {
294eda14cbcSMatt Macy 				pool->timeout = gethrestime_sec() +
295eda14cbcSMatt Macy 				    ZSTD_POOL_TIMEOUT;
296eda14cbcSMatt Macy 				mem = pool->mem;
2977877fdebSMatt Macy 				return (mem);
298eda14cbcSMatt Macy 			}
299eda14cbcSMatt Macy 			mutex_exit(&pool->barrier);
300eda14cbcSMatt Macy 		}
301eda14cbcSMatt Macy 	}
302eda14cbcSMatt Macy 
303eda14cbcSMatt Macy 	/*
304eda14cbcSMatt Macy 	 * If no preallocated slot was found, try to fill in a new one.
305eda14cbcSMatt Macy 	 *
306eda14cbcSMatt Macy 	 * We run a similar algorithm twice here to avoid pool fragmentation.
307eda14cbcSMatt Macy 	 * The first one may generate holes in the list if objects get released.
308eda14cbcSMatt Macy 	 * We always make sure that these holes get filled instead of adding new
309eda14cbcSMatt Macy 	 * allocations constantly at the end.
310eda14cbcSMatt Macy 	 */
311eda14cbcSMatt Macy 	for (int i = 0; i < ZSTD_POOL_MAX; i++) {
312eda14cbcSMatt Macy 		pool = &zstd_mempool[i];
313eda14cbcSMatt Macy 		if (mutex_tryenter(&pool->barrier)) {
314eda14cbcSMatt Macy 			/* Object is free, try to allocate new one */
315eda14cbcSMatt Macy 			if (!pool->mem) {
316eda14cbcSMatt Macy 				mem = vmem_alloc(size, KM_SLEEP);
3174a58b4abSMateusz Guzik 				if (mem) {
3184a58b4abSMateusz Guzik 					ZSTDSTAT_ADD(zstd_stat_buffers, 1);
3194a58b4abSMateusz Guzik 					ZSTDSTAT_ADD(zstd_stat_size, size);
320eda14cbcSMatt Macy 					pool->mem = mem;
3214a58b4abSMateusz Guzik 					pool->size = size;
322eda14cbcSMatt Macy 					/* Keep track for later release */
323eda14cbcSMatt Macy 					mem->pool = pool;
324eda14cbcSMatt Macy 					mem->kmem_type = ZSTD_KMEM_POOL;
325eda14cbcSMatt Macy 					mem->kmem_size = size;
326eda14cbcSMatt Macy 				}
327eda14cbcSMatt Macy 			}
328eda14cbcSMatt Macy 
329eda14cbcSMatt Macy 			if (size <= pool->size) {
330eda14cbcSMatt Macy 				/* Update timestamp */
331eda14cbcSMatt Macy 				pool->timeout = gethrestime_sec() +
332eda14cbcSMatt Macy 				    ZSTD_POOL_TIMEOUT;
333eda14cbcSMatt Macy 
334eda14cbcSMatt Macy 				return (pool->mem);
335eda14cbcSMatt Macy 			}
336eda14cbcSMatt Macy 
337eda14cbcSMatt Macy 			mutex_exit(&pool->barrier);
338eda14cbcSMatt Macy 		}
339eda14cbcSMatt Macy 	}
340eda14cbcSMatt Macy 
341eda14cbcSMatt Macy 	/*
342eda14cbcSMatt Macy 	 * If the pool is full or the allocation failed, try lazy allocation
343eda14cbcSMatt Macy 	 * instead.
344eda14cbcSMatt Macy 	 */
345eda14cbcSMatt Macy 	if (!mem) {
346eda14cbcSMatt Macy 		mem = vmem_alloc(size, KM_NOSLEEP);
347eda14cbcSMatt Macy 		if (mem) {
348eda14cbcSMatt Macy 			mem->pool = NULL;
349eda14cbcSMatt Macy 			mem->kmem_type = ZSTD_KMEM_DEFAULT;
350eda14cbcSMatt Macy 			mem->kmem_size = size;
351eda14cbcSMatt Macy 		}
352eda14cbcSMatt Macy 	}
353eda14cbcSMatt Macy 
354eda14cbcSMatt Macy 	return (mem);
355eda14cbcSMatt Macy }
356eda14cbcSMatt Macy 
357eda14cbcSMatt Macy /* Mark object as released by releasing the barrier mutex */
358eda14cbcSMatt Macy static void
359eda14cbcSMatt Macy zstd_mempool_free(struct zstd_kmem *z)
360eda14cbcSMatt Macy {
361eda14cbcSMatt Macy 	mutex_exit(&z->pool->barrier);
362eda14cbcSMatt Macy }
363eda14cbcSMatt Macy 
364eda14cbcSMatt Macy /* Convert ZFS internal enum to ZSTD level */
365eda14cbcSMatt Macy static int
366eda14cbcSMatt Macy zstd_enum_to_level(enum zio_zstd_levels level, int16_t *zstd_level)
367eda14cbcSMatt Macy {
368eda14cbcSMatt Macy 	if (level > 0 && level <= ZIO_ZSTD_LEVEL_19) {
369eda14cbcSMatt Macy 		*zstd_level = zstd_levels[level - 1].zstd_level;
370eda14cbcSMatt Macy 		return (0);
371eda14cbcSMatt Macy 	}
372eda14cbcSMatt Macy 	if (level >= ZIO_ZSTD_LEVEL_FAST_1 &&
373eda14cbcSMatt Macy 	    level <= ZIO_ZSTD_LEVEL_FAST_1000) {
374eda14cbcSMatt Macy 		*zstd_level = zstd_levels[level - ZIO_ZSTD_LEVEL_FAST_1
375eda14cbcSMatt Macy 		    + ZIO_ZSTD_LEVEL_19].zstd_level;
376eda14cbcSMatt Macy 		return (0);
377eda14cbcSMatt Macy 	}
378eda14cbcSMatt Macy 
379eda14cbcSMatt Macy 	/* Invalid/unknown zfs compression enum - this should never happen. */
380eda14cbcSMatt Macy 	return (1);
381eda14cbcSMatt Macy }
382eda14cbcSMatt Macy 
383*21b492edSMartin Matuska 
384eda14cbcSMatt Macy /* Compress block using zstd */
385eda14cbcSMatt Macy size_t
386eda14cbcSMatt Macy zfs_zstd_compress(void *s_start, void *d_start, size_t s_len, size_t d_len,
387eda14cbcSMatt Macy     int level)
388eda14cbcSMatt Macy {
389eda14cbcSMatt Macy 	size_t c_len;
390eda14cbcSMatt Macy 	int16_t zstd_level;
391eda14cbcSMatt Macy 	zfs_zstdhdr_t *hdr;
392eda14cbcSMatt Macy 	ZSTD_CCtx *cctx;
393eda14cbcSMatt Macy 
394eda14cbcSMatt Macy 	hdr = (zfs_zstdhdr_t *)d_start;
395eda14cbcSMatt Macy 
396eda14cbcSMatt Macy 	/* Skip compression if the specified level is invalid */
397eda14cbcSMatt Macy 	if (zstd_enum_to_level(level, &zstd_level)) {
398eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_com_inval);
399eda14cbcSMatt Macy 		return (s_len);
400eda14cbcSMatt Macy 	}
401eda14cbcSMatt Macy 
402eda14cbcSMatt Macy 	ASSERT3U(d_len, >=, sizeof (*hdr));
403eda14cbcSMatt Macy 	ASSERT3U(d_len, <=, s_len);
404eda14cbcSMatt Macy 	ASSERT3U(zstd_level, !=, 0);
405eda14cbcSMatt Macy 
406eda14cbcSMatt Macy 	cctx = ZSTD_createCCtx_advanced(zstd_malloc);
407eda14cbcSMatt Macy 
408eda14cbcSMatt Macy 	/*
409eda14cbcSMatt Macy 	 * Out of kernel memory, gently fall through - this will disable
410eda14cbcSMatt Macy 	 * compression in zio_compress_data
411eda14cbcSMatt Macy 	 */
412eda14cbcSMatt Macy 	if (!cctx) {
413eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_com_alloc_fail);
414eda14cbcSMatt Macy 		return (s_len);
415eda14cbcSMatt Macy 	}
416eda14cbcSMatt Macy 
417eda14cbcSMatt Macy 	/* Set the compression level */
418eda14cbcSMatt Macy 	ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, zstd_level);
419eda14cbcSMatt Macy 
420eda14cbcSMatt Macy 	/* Use the "magicless" zstd header which saves us 4 header bytes */
421eda14cbcSMatt Macy 	ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless);
422eda14cbcSMatt Macy 
423eda14cbcSMatt Macy 	/*
424eda14cbcSMatt Macy 	 * Disable redundant checksum calculation and content size storage since
425eda14cbcSMatt Macy 	 * this is already done by ZFS itself.
426eda14cbcSMatt Macy 	 */
427eda14cbcSMatt Macy 	ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 0);
428eda14cbcSMatt Macy 	ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, 0);
429eda14cbcSMatt Macy 
430eda14cbcSMatt Macy 	c_len = ZSTD_compress2(cctx,
431eda14cbcSMatt Macy 	    hdr->data,
432eda14cbcSMatt Macy 	    d_len - sizeof (*hdr),
433eda14cbcSMatt Macy 	    s_start, s_len);
434eda14cbcSMatt Macy 
435eda14cbcSMatt Macy 	ZSTD_freeCCtx(cctx);
436eda14cbcSMatt Macy 
437eda14cbcSMatt Macy 	/* Error in the compression routine, disable compression. */
438eda14cbcSMatt Macy 	if (ZSTD_isError(c_len)) {
439eda14cbcSMatt Macy 		/*
440eda14cbcSMatt Macy 		 * If we are aborting the compression because the saves are
441eda14cbcSMatt Macy 		 * too small, that is not a failure. Everything else is a
442eda14cbcSMatt Macy 		 * failure, so increment the compression failure counter.
443eda14cbcSMatt Macy 		 */
444eda14cbcSMatt Macy 		if (ZSTD_getErrorCode(c_len) != ZSTD_error_dstSize_tooSmall) {
445eda14cbcSMatt Macy 			ZSTDSTAT_BUMP(zstd_stat_com_fail);
446eda14cbcSMatt Macy 		}
447eda14cbcSMatt Macy 		return (s_len);
448eda14cbcSMatt Macy 	}
449eda14cbcSMatt Macy 
450eda14cbcSMatt Macy 	/*
451eda14cbcSMatt Macy 	 * Encode the compressed buffer size at the start. We'll need this in
452eda14cbcSMatt Macy 	 * decompression to counter the effects of padding which might be added
453eda14cbcSMatt Macy 	 * to the compressed buffer and which, if unhandled, would confuse the
454eda14cbcSMatt Macy 	 * hell out of our decompression function.
455eda14cbcSMatt Macy 	 */
456eda14cbcSMatt Macy 	hdr->c_len = BE_32(c_len);
457eda14cbcSMatt Macy 
458eda14cbcSMatt Macy 	/*
459eda14cbcSMatt Macy 	 * Check version for overflow.
460eda14cbcSMatt Macy 	 * The limit of 24 bits must not be exceeded. This allows a maximum
461eda14cbcSMatt Macy 	 * version 1677.72.15 which we don't expect to be ever reached.
462eda14cbcSMatt Macy 	 */
463eda14cbcSMatt Macy 	ASSERT3U(ZSTD_VERSION_NUMBER, <=, 0xFFFFFF);
464eda14cbcSMatt Macy 
465eda14cbcSMatt Macy 	/*
466eda14cbcSMatt Macy 	 * Encode the compression level as well. We may need to know the
467eda14cbcSMatt Macy 	 * original compression level if compressed_arc is disabled, to match
468eda14cbcSMatt Macy 	 * the compression settings to write this block to the L2ARC.
469eda14cbcSMatt Macy 	 *
470eda14cbcSMatt Macy 	 * Encode the actual level, so if the enum changes in the future, we
471eda14cbcSMatt Macy 	 * will be compatible.
472eda14cbcSMatt Macy 	 *
473eda14cbcSMatt Macy 	 * The upper 24 bits store the ZSTD version to be able to provide
474eda14cbcSMatt Macy 	 * future compatibility, since new versions might enhance the
475eda14cbcSMatt Macy 	 * compression algorithm in a way, where the compressed data will
476eda14cbcSMatt Macy 	 * change.
477eda14cbcSMatt Macy 	 *
478eda14cbcSMatt Macy 	 * As soon as such incompatibility occurs, handling code needs to be
479eda14cbcSMatt Macy 	 * added, differentiating between the versions.
480eda14cbcSMatt Macy 	 */
481*21b492edSMartin Matuska 	zfs_set_hdrversion(hdr, ZSTD_VERSION_NUMBER);
482*21b492edSMartin Matuska 	zfs_set_hdrlevel(hdr, level);
483eda14cbcSMatt Macy 	hdr->raw_version_level = BE_32(hdr->raw_version_level);
484eda14cbcSMatt Macy 
485eda14cbcSMatt Macy 	return (c_len + sizeof (*hdr));
486eda14cbcSMatt Macy }
487eda14cbcSMatt Macy 
488eda14cbcSMatt Macy /* Decompress block using zstd and return its stored level */
489eda14cbcSMatt Macy int
490eda14cbcSMatt Macy zfs_zstd_decompress_level(void *s_start, void *d_start, size_t s_len,
491eda14cbcSMatt Macy     size_t d_len, uint8_t *level)
492eda14cbcSMatt Macy {
493eda14cbcSMatt Macy 	ZSTD_DCtx *dctx;
494eda14cbcSMatt Macy 	size_t result;
495eda14cbcSMatt Macy 	int16_t zstd_level;
496eda14cbcSMatt Macy 	uint32_t c_len;
497eda14cbcSMatt Macy 	const zfs_zstdhdr_t *hdr;
498eda14cbcSMatt Macy 	zfs_zstdhdr_t hdr_copy;
499eda14cbcSMatt Macy 
500eda14cbcSMatt Macy 	hdr = (const zfs_zstdhdr_t *)s_start;
501eda14cbcSMatt Macy 	c_len = BE_32(hdr->c_len);
502eda14cbcSMatt Macy 
503eda14cbcSMatt Macy 	/*
504eda14cbcSMatt Macy 	 * Make a copy instead of directly converting the header, since we must
505eda14cbcSMatt Macy 	 * not modify the original data that may be used again later.
506eda14cbcSMatt Macy 	 */
507eda14cbcSMatt Macy 	hdr_copy.raw_version_level = BE_32(hdr->raw_version_level);
508*21b492edSMartin Matuska 	uint8_t curlevel = zfs_get_hdrlevel(&hdr_copy);
509eda14cbcSMatt Macy 
510eda14cbcSMatt Macy 	/*
511eda14cbcSMatt Macy 	 * NOTE: We ignore the ZSTD version for now. As soon as any
51216038816SMartin Matuska 	 * incompatibility occurs, it has to be handled accordingly.
513eda14cbcSMatt Macy 	 * The version can be accessed via `hdr_copy.version`.
514eda14cbcSMatt Macy 	 */
515eda14cbcSMatt Macy 
516eda14cbcSMatt Macy 	/*
517eda14cbcSMatt Macy 	 * Convert and check the level
518eda14cbcSMatt Macy 	 * An invalid level is a strong indicator for data corruption! In such
519eda14cbcSMatt Macy 	 * case return an error so the upper layers can try to fix it.
520eda14cbcSMatt Macy 	 */
521*21b492edSMartin Matuska 	if (zstd_enum_to_level(curlevel, &zstd_level)) {
522eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_dec_inval);
523eda14cbcSMatt Macy 		return (1);
524eda14cbcSMatt Macy 	}
525eda14cbcSMatt Macy 
526eda14cbcSMatt Macy 	ASSERT3U(d_len, >=, s_len);
527*21b492edSMartin Matuska 	ASSERT3U(curlevel, !=, ZIO_COMPLEVEL_INHERIT);
528eda14cbcSMatt Macy 
529eda14cbcSMatt Macy 	/* Invalid compressed buffer size encoded at start */
530eda14cbcSMatt Macy 	if (c_len + sizeof (*hdr) > s_len) {
531eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_dec_header_inval);
532eda14cbcSMatt Macy 		return (1);
533eda14cbcSMatt Macy 	}
534eda14cbcSMatt Macy 
535eda14cbcSMatt Macy 	dctx = ZSTD_createDCtx_advanced(zstd_dctx_malloc);
536eda14cbcSMatt Macy 	if (!dctx) {
537eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_dec_alloc_fail);
538eda14cbcSMatt Macy 		return (1);
539eda14cbcSMatt Macy 	}
540eda14cbcSMatt Macy 
541eda14cbcSMatt Macy 	/* Set header type to "magicless" */
542eda14cbcSMatt Macy 	ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless);
543eda14cbcSMatt Macy 
544eda14cbcSMatt Macy 	/* Decompress the data and release the context */
545eda14cbcSMatt Macy 	result = ZSTD_decompressDCtx(dctx, d_start, d_len, hdr->data, c_len);
546eda14cbcSMatt Macy 	ZSTD_freeDCtx(dctx);
547eda14cbcSMatt Macy 
548eda14cbcSMatt Macy 	/*
549eda14cbcSMatt Macy 	 * Returns 0 on success (decompression function returned non-negative)
550eda14cbcSMatt Macy 	 * and non-zero on failure (decompression function returned negative.
551eda14cbcSMatt Macy 	 */
552eda14cbcSMatt Macy 	if (ZSTD_isError(result)) {
553eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_dec_fail);
554eda14cbcSMatt Macy 		return (1);
555eda14cbcSMatt Macy 	}
556eda14cbcSMatt Macy 
557eda14cbcSMatt Macy 	if (level) {
558*21b492edSMartin Matuska 		*level = curlevel;
559eda14cbcSMatt Macy 	}
560eda14cbcSMatt Macy 
561eda14cbcSMatt Macy 	return (0);
562eda14cbcSMatt Macy }
563eda14cbcSMatt Macy 
564eda14cbcSMatt Macy /* Decompress datablock using zstd */
565eda14cbcSMatt Macy int
566eda14cbcSMatt Macy zfs_zstd_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len,
567eda14cbcSMatt Macy     int level __maybe_unused)
568eda14cbcSMatt Macy {
569eda14cbcSMatt Macy 
570eda14cbcSMatt Macy 	return (zfs_zstd_decompress_level(s_start, d_start, s_len, d_len,
571eda14cbcSMatt Macy 	    NULL));
572eda14cbcSMatt Macy }
573eda14cbcSMatt Macy 
574eda14cbcSMatt Macy /* Allocator for zstd compression context using mempool_allocator */
575eda14cbcSMatt Macy static void *
576eda14cbcSMatt Macy zstd_alloc(void *opaque __maybe_unused, size_t size)
577eda14cbcSMatt Macy {
578eda14cbcSMatt Macy 	size_t nbytes = sizeof (struct zstd_kmem) + size;
579eda14cbcSMatt Macy 	struct zstd_kmem *z = NULL;
580eda14cbcSMatt Macy 
581eda14cbcSMatt Macy 	z = (struct zstd_kmem *)zstd_mempool_alloc(zstd_mempool_cctx, nbytes);
582eda14cbcSMatt Macy 
583eda14cbcSMatt Macy 	if (!z) {
584eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_alloc_fail);
585eda14cbcSMatt Macy 		return (NULL);
586eda14cbcSMatt Macy 	}
587eda14cbcSMatt Macy 
588eda14cbcSMatt Macy 	return ((void*)z + (sizeof (struct zstd_kmem)));
589eda14cbcSMatt Macy }
590eda14cbcSMatt Macy 
591eda14cbcSMatt Macy /*
592eda14cbcSMatt Macy  * Allocator for zstd decompression context using mempool_allocator with
593eda14cbcSMatt Macy  * fallback to reserved memory if allocation fails
594eda14cbcSMatt Macy  */
595eda14cbcSMatt Macy static void *
596eda14cbcSMatt Macy zstd_dctx_alloc(void *opaque __maybe_unused, size_t size)
597eda14cbcSMatt Macy {
598eda14cbcSMatt Macy 	size_t nbytes = sizeof (struct zstd_kmem) + size;
599eda14cbcSMatt Macy 	struct zstd_kmem *z = NULL;
600eda14cbcSMatt Macy 	enum zstd_kmem_type type = ZSTD_KMEM_DEFAULT;
601eda14cbcSMatt Macy 
602eda14cbcSMatt Macy 	z = (struct zstd_kmem *)zstd_mempool_alloc(zstd_mempool_dctx, nbytes);
603eda14cbcSMatt Macy 	if (!z) {
604eda14cbcSMatt Macy 		/* Try harder, decompression shall not fail */
605eda14cbcSMatt Macy 		z = vmem_alloc(nbytes, KM_SLEEP);
606eda14cbcSMatt Macy 		if (z) {
607eda14cbcSMatt Macy 			z->pool = NULL;
608eda14cbcSMatt Macy 		}
609eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_alloc_fail);
610eda14cbcSMatt Macy 	} else {
611eda14cbcSMatt Macy 		return ((void*)z + (sizeof (struct zstd_kmem)));
612eda14cbcSMatt Macy 	}
613eda14cbcSMatt Macy 
614eda14cbcSMatt Macy 	/* Fallback if everything fails */
615eda14cbcSMatt Macy 	if (!z) {
616eda14cbcSMatt Macy 		/*
617eda14cbcSMatt Macy 		 * Barrier since we only can handle it in a single thread. All
618eda14cbcSMatt Macy 		 * other following threads need to wait here until decompression
619eda14cbcSMatt Macy 		 * is completed. zstd_free will release this barrier later.
620eda14cbcSMatt Macy 		 */
621eda14cbcSMatt Macy 		mutex_enter(&zstd_dctx_fallback.barrier);
622eda14cbcSMatt Macy 
623eda14cbcSMatt Macy 		z = zstd_dctx_fallback.mem;
624eda14cbcSMatt Macy 		type = ZSTD_KMEM_DCTX;
625eda14cbcSMatt Macy 		ZSTDSTAT_BUMP(zstd_stat_alloc_fallback);
626eda14cbcSMatt Macy 	}
627eda14cbcSMatt Macy 
628eda14cbcSMatt Macy 	/* Allocation should always be successful */
629eda14cbcSMatt Macy 	if (!z) {
630eda14cbcSMatt Macy 		return (NULL);
631eda14cbcSMatt Macy 	}
632eda14cbcSMatt Macy 
633eda14cbcSMatt Macy 	z->kmem_type = type;
634eda14cbcSMatt Macy 	z->kmem_size = nbytes;
635eda14cbcSMatt Macy 
636eda14cbcSMatt Macy 	return ((void*)z + (sizeof (struct zstd_kmem)));
637eda14cbcSMatt Macy }
638eda14cbcSMatt Macy 
639eda14cbcSMatt Macy /* Free allocated memory by its specific type */
640eda14cbcSMatt Macy static void
641eda14cbcSMatt Macy zstd_free(void *opaque __maybe_unused, void *ptr)
642eda14cbcSMatt Macy {
643eda14cbcSMatt Macy 	struct zstd_kmem *z = (ptr - sizeof (struct zstd_kmem));
644eda14cbcSMatt Macy 	enum zstd_kmem_type type;
645eda14cbcSMatt Macy 
646eda14cbcSMatt Macy 	ASSERT3U(z->kmem_type, <, ZSTD_KMEM_COUNT);
647eda14cbcSMatt Macy 	ASSERT3U(z->kmem_type, >, ZSTD_KMEM_UNKNOWN);
648eda14cbcSMatt Macy 
649eda14cbcSMatt Macy 	type = z->kmem_type;
650eda14cbcSMatt Macy 	switch (type) {
651eda14cbcSMatt Macy 	case ZSTD_KMEM_DEFAULT:
652eda14cbcSMatt Macy 		vmem_free(z, z->kmem_size);
653eda14cbcSMatt Macy 		break;
654eda14cbcSMatt Macy 	case ZSTD_KMEM_POOL:
655eda14cbcSMatt Macy 		zstd_mempool_free(z);
656eda14cbcSMatt Macy 		break;
657eda14cbcSMatt Macy 	case ZSTD_KMEM_DCTX:
658eda14cbcSMatt Macy 		mutex_exit(&zstd_dctx_fallback.barrier);
659eda14cbcSMatt Macy 		break;
660eda14cbcSMatt Macy 	default:
661eda14cbcSMatt Macy 		break;
662eda14cbcSMatt Macy 	}
663eda14cbcSMatt Macy }
664eda14cbcSMatt Macy 
665eda14cbcSMatt Macy /* Allocate fallback memory to ensure safe decompression */
666eda14cbcSMatt Macy static void __init
667eda14cbcSMatt Macy create_fallback_mem(struct zstd_fallback_mem *mem, size_t size)
668eda14cbcSMatt Macy {
669eda14cbcSMatt Macy 	mem->mem_size = size;
670eda14cbcSMatt Macy 	mem->mem = vmem_zalloc(mem->mem_size, KM_SLEEP);
671eda14cbcSMatt Macy 	mutex_init(&mem->barrier, NULL, MUTEX_DEFAULT, NULL);
672eda14cbcSMatt Macy }
673eda14cbcSMatt Macy 
674eda14cbcSMatt Macy /* Initialize memory pool barrier mutexes */
675eda14cbcSMatt Macy static void __init
676eda14cbcSMatt Macy zstd_mempool_init(void)
677eda14cbcSMatt Macy {
678eda14cbcSMatt Macy 	zstd_mempool_cctx = (struct zstd_pool *)
679eda14cbcSMatt Macy 	    kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
680eda14cbcSMatt Macy 	zstd_mempool_dctx = (struct zstd_pool *)
681eda14cbcSMatt Macy 	    kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
682eda14cbcSMatt Macy 
683eda14cbcSMatt Macy 	for (int i = 0; i < ZSTD_POOL_MAX; i++) {
684eda14cbcSMatt Macy 		mutex_init(&zstd_mempool_cctx[i].barrier, NULL,
685eda14cbcSMatt Macy 		    MUTEX_DEFAULT, NULL);
686eda14cbcSMatt Macy 		mutex_init(&zstd_mempool_dctx[i].barrier, NULL,
687eda14cbcSMatt Macy 		    MUTEX_DEFAULT, NULL);
688eda14cbcSMatt Macy 	}
689eda14cbcSMatt Macy }
690eda14cbcSMatt Macy 
691eda14cbcSMatt Macy /* Initialize zstd-related memory handling */
692eda14cbcSMatt Macy static int __init
693eda14cbcSMatt Macy zstd_meminit(void)
694eda14cbcSMatt Macy {
695eda14cbcSMatt Macy 	zstd_mempool_init();
696eda14cbcSMatt Macy 
697eda14cbcSMatt Macy 	/*
698eda14cbcSMatt Macy 	 * Estimate the size of the fallback decompression context.
699eda14cbcSMatt Macy 	 * The expected size on x64 with current ZSTD should be about 160 KB.
700eda14cbcSMatt Macy 	 */
701eda14cbcSMatt Macy 	create_fallback_mem(&zstd_dctx_fallback,
702eda14cbcSMatt Macy 	    P2ROUNDUP(ZSTD_estimateDCtxSize() + sizeof (struct zstd_kmem),
703eda14cbcSMatt Macy 	    PAGESIZE));
704eda14cbcSMatt Macy 
705eda14cbcSMatt Macy 	return (0);
706eda14cbcSMatt Macy }
707eda14cbcSMatt Macy 
708eda14cbcSMatt Macy /* Release object from pool and free memory */
709eda14cbcSMatt Macy static void __exit
710eda14cbcSMatt Macy release_pool(struct zstd_pool *pool)
711eda14cbcSMatt Macy {
712eda14cbcSMatt Macy 	mutex_destroy(&pool->barrier);
713eda14cbcSMatt Macy 	vmem_free(pool->mem, pool->size);
714eda14cbcSMatt Macy 	pool->mem = NULL;
715eda14cbcSMatt Macy 	pool->size = 0;
716eda14cbcSMatt Macy }
717eda14cbcSMatt Macy 
718eda14cbcSMatt Macy /* Release memory pool objects */
719eda14cbcSMatt Macy static void __exit
720eda14cbcSMatt Macy zstd_mempool_deinit(void)
721eda14cbcSMatt Macy {
722eda14cbcSMatt Macy 	for (int i = 0; i < ZSTD_POOL_MAX; i++) {
723eda14cbcSMatt Macy 		release_pool(&zstd_mempool_cctx[i]);
724eda14cbcSMatt Macy 		release_pool(&zstd_mempool_dctx[i]);
725eda14cbcSMatt Macy 	}
726eda14cbcSMatt Macy 
727eda14cbcSMatt Macy 	kmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
728eda14cbcSMatt Macy 	kmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
729eda14cbcSMatt Macy 	zstd_mempool_dctx = NULL;
730eda14cbcSMatt Macy 	zstd_mempool_cctx = NULL;
731eda14cbcSMatt Macy }
732eda14cbcSMatt Macy 
733c40487d4SMatt Macy /* release unused memory from pool */
734c40487d4SMatt Macy 
735c40487d4SMatt Macy void
736c40487d4SMatt Macy zfs_zstd_cache_reap_now(void)
737c40487d4SMatt Macy {
73836639c39SMateusz Guzik 
73936639c39SMateusz Guzik 	/*
74036639c39SMateusz Guzik 	 * Short-circuit if there are no buffers to begin with.
74136639c39SMateusz Guzik 	 */
74236639c39SMateusz Guzik 	if (ZSTDSTAT(zstd_stat_buffers) == 0)
74336639c39SMateusz Guzik 		return;
74436639c39SMateusz Guzik 
745c40487d4SMatt Macy 	/*
746c40487d4SMatt Macy 	 * calling alloc with zero size seeks
747c40487d4SMatt Macy 	 * and releases old unused objects
748c40487d4SMatt Macy 	 */
7497877fdebSMatt Macy 	zstd_mempool_reap(zstd_mempool_cctx);
7507877fdebSMatt Macy 	zstd_mempool_reap(zstd_mempool_dctx);
751c40487d4SMatt Macy }
752c40487d4SMatt Macy 
753eda14cbcSMatt Macy extern int __init
754eda14cbcSMatt Macy zstd_init(void)
755eda14cbcSMatt Macy {
756eda14cbcSMatt Macy 	/* Set pool size by using maximum sane thread count * 4 */
757eda14cbcSMatt Macy 	pool_count = (boot_ncpus * 4);
758eda14cbcSMatt Macy 	zstd_meminit();
759eda14cbcSMatt Macy 
760eda14cbcSMatt Macy 	/* Initialize kstat */
761eda14cbcSMatt Macy 	zstd_ksp = kstat_create("zfs", 0, "zstd", "misc",
762eda14cbcSMatt Macy 	    KSTAT_TYPE_NAMED, sizeof (zstd_stats) / sizeof (kstat_named_t),
763eda14cbcSMatt Macy 	    KSTAT_FLAG_VIRTUAL);
764eda14cbcSMatt Macy 	if (zstd_ksp != NULL) {
765eda14cbcSMatt Macy 		zstd_ksp->ks_data = &zstd_stats;
766eda14cbcSMatt Macy 		kstat_install(zstd_ksp);
767eda14cbcSMatt Macy 	}
768eda14cbcSMatt Macy 
769eda14cbcSMatt Macy 	return (0);
770eda14cbcSMatt Macy }
771eda14cbcSMatt Macy 
772eda14cbcSMatt Macy extern void __exit
773eda14cbcSMatt Macy zstd_fini(void)
774eda14cbcSMatt Macy {
775eda14cbcSMatt Macy 	/* Deinitialize kstat */
776eda14cbcSMatt Macy 	if (zstd_ksp != NULL) {
777eda14cbcSMatt Macy 		kstat_delete(zstd_ksp);
778eda14cbcSMatt Macy 		zstd_ksp = NULL;
779eda14cbcSMatt Macy 	}
780eda14cbcSMatt Macy 
781eda14cbcSMatt Macy 	/* Release fallback memory */
782eda14cbcSMatt Macy 	vmem_free(zstd_dctx_fallback.mem, zstd_dctx_fallback.mem_size);
783eda14cbcSMatt Macy 	mutex_destroy(&zstd_dctx_fallback.barrier);
784eda14cbcSMatt Macy 
785eda14cbcSMatt Macy 	/* Deinit memory pool */
786eda14cbcSMatt Macy 	zstd_mempool_deinit();
787eda14cbcSMatt Macy }
788eda14cbcSMatt Macy 
789eda14cbcSMatt Macy #if defined(_KERNEL)
790eda14cbcSMatt Macy module_init(zstd_init);
791eda14cbcSMatt Macy module_exit(zstd_fini);
792eda14cbcSMatt Macy 
793eda14cbcSMatt Macy ZFS_MODULE_DESCRIPTION("ZSTD Compression for ZFS");
794c40487d4SMatt Macy ZFS_MODULE_LICENSE("Dual BSD/GPL");
795*21b492edSMartin Matuska ZFS_MODULE_VERSION(ZSTD_VERSION_STRING "a");
796eda14cbcSMatt Macy 
797eda14cbcSMatt Macy EXPORT_SYMBOL(zfs_zstd_compress);
798eda14cbcSMatt Macy EXPORT_SYMBOL(zfs_zstd_decompress_level);
799eda14cbcSMatt Macy EXPORT_SYMBOL(zfs_zstd_decompress);
800c40487d4SMatt Macy EXPORT_SYMBOL(zfs_zstd_cache_reap_now);
801eda14cbcSMatt Macy #endif
802