1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016-present, Facebook, Inc.
4 * All rights reserved.
5 *
6 */
7
8 #include <linux/bio.h>
9 #include <linux/bitmap.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/sched/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/zstd.h>
20 #include "misc.h"
21 #include "fs.h"
22 #include "btrfs_inode.h"
23 #include "compression.h"
24 #include "super.h"
25
26 #define ZSTD_BTRFS_MAX_WINDOWLOG 17
27 #define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
28 #define ZSTD_BTRFS_DEFAULT_LEVEL 3
29 #define ZSTD_BTRFS_MIN_LEVEL -15
30 #define ZSTD_BTRFS_MAX_LEVEL 15
31 /* 307s to avoid pathologically clashing with transaction commit */
32 #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
33
zstd_get_btrfs_parameters(int level,size_t src_len)34 static zstd_parameters zstd_get_btrfs_parameters(int level,
35 size_t src_len)
36 {
37 zstd_parameters params = zstd_get_params(level, src_len);
38
39 if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
40 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
41 WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
42 return params;
43 }
44
45 struct workspace {
46 void *mem;
47 size_t size;
48 char *buf;
49 int level;
50 int req_level;
51 unsigned long last_used; /* jiffies */
52 struct list_head list;
53 struct list_head lru_list;
54 zstd_in_buffer in_buf;
55 zstd_out_buffer out_buf;
56 zstd_parameters params;
57 };
58
59 /*
60 * Zstd Workspace Management
61 *
62 * Zstd workspaces have different memory requirements depending on the level.
63 * The zstd workspaces are managed by having individual lists for each level
64 * and a global lru. Forward progress is maintained by protecting a max level
65 * workspace.
66 *
67 * Getting a workspace is done by using the bitmap to identify the levels that
68 * have available workspaces and scans up. This lets us recycle higher level
69 * workspaces because of the monotonic memory guarantee. A workspace's
70 * last_used is only updated if it is being used by the corresponding memory
71 * level. Putting a workspace involves adding it back to the appropriate places
72 * and adding it back to the lru if necessary.
73 *
74 * A timer is used to reclaim workspaces if they have not been used for
75 * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around.
76 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
77 */
78
79 struct zstd_workspace_manager {
80 spinlock_t lock;
81 struct list_head lru_list;
82 struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
83 unsigned long active_map;
84 wait_queue_head_t wait;
85 struct timer_list timer;
86 };
87
88 static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
89
list_to_workspace(struct list_head * list)90 static inline struct workspace *list_to_workspace(struct list_head *list)
91 {
92 return container_of(list, struct workspace, list);
93 }
94
clip_level(int level)95 static inline int clip_level(int level)
96 {
97 return max(0, level - 1);
98 }
99
100 /*
101 * Timer callback to free unused workspaces.
102 *
103 * @t: timer
104 *
105 * This scans the lru_list and attempts to reclaim any workspace that hasn't
106 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
107 *
108 * The context is softirq and does not need the _bh locking primitives.
109 */
zstd_reclaim_timer_fn(struct timer_list * timer)110 static void zstd_reclaim_timer_fn(struct timer_list *timer)
111 {
112 struct zstd_workspace_manager *zwsm =
113 container_of(timer, struct zstd_workspace_manager, timer);
114 unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
115 struct list_head *pos, *next;
116
117 spin_lock(&zwsm->lock);
118
119 if (list_empty(&zwsm->lru_list)) {
120 spin_unlock(&zwsm->lock);
121 return;
122 }
123
124 list_for_each_prev_safe(pos, next, &zwsm->lru_list) {
125 struct workspace *victim = container_of(pos, struct workspace,
126 lru_list);
127 int level;
128
129 if (time_after(victim->last_used, reclaim_threshold))
130 break;
131
132 /* workspace is in use */
133 if (victim->req_level)
134 continue;
135
136 level = victim->level;
137 list_del(&victim->lru_list);
138 list_del(&victim->list);
139 zstd_free_workspace(&victim->list);
140
141 if (list_empty(&zwsm->idle_ws[level]))
142 clear_bit(level, &zwsm->active_map);
143
144 }
145
146 if (!list_empty(&zwsm->lru_list))
147 mod_timer(&zwsm->timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
148
149 spin_unlock(&zwsm->lock);
150 }
151
152 /*
153 * Calculate monotonic memory bounds.
154 *
155 * It is possible based on the level configurations that a higher level
156 * workspace uses less memory than a lower level workspace. In order to reuse
157 * workspaces, this must be made a monotonic relationship. This precomputes
158 * the required memory for each level and enforces the monotonicity between
159 * level and memory required.
160 */
zstd_calc_ws_mem_sizes(void)161 static void zstd_calc_ws_mem_sizes(void)
162 {
163 size_t max_size = 0;
164 int level;
165
166 for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
167 if (level == 0)
168 continue;
169 zstd_parameters params =
170 zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
171 size_t level_size =
172 max_t(size_t,
173 zstd_cstream_workspace_bound(¶ms.cParams),
174 zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
175
176 max_size = max_t(size_t, max_size, level_size);
177 /* Use level 1 workspace size for all the fast mode negative levels. */
178 zstd_ws_mem_sizes[clip_level(level)] = max_size;
179 }
180 }
181
zstd_alloc_workspace_manager(struct btrfs_fs_info * fs_info)182 int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info)
183 {
184 struct zstd_workspace_manager *zwsm;
185 struct list_head *ws;
186
187 ASSERT(fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] == NULL);
188 zwsm = kzalloc(sizeof(*zwsm), GFP_KERNEL);
189 if (!zwsm)
190 return -ENOMEM;
191 zstd_calc_ws_mem_sizes();
192 spin_lock_init(&zwsm->lock);
193 init_waitqueue_head(&zwsm->wait);
194 timer_setup(&zwsm->timer, zstd_reclaim_timer_fn, 0);
195
196 INIT_LIST_HEAD(&zwsm->lru_list);
197 for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
198 INIT_LIST_HEAD(&zwsm->idle_ws[i]);
199 fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = zwsm;
200
201 ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL);
202 if (IS_ERR(ws)) {
203 btrfs_warn(NULL, "cannot preallocate zstd compression workspace");
204 } else {
205 set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &zwsm->active_map);
206 list_add(ws, &zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
207 }
208 return 0;
209 }
210
zstd_free_workspace_manager(struct btrfs_fs_info * fs_info)211 void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info)
212 {
213 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
214 struct workspace *workspace;
215
216 if (!zwsm)
217 return;
218 fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = NULL;
219 spin_lock_bh(&zwsm->lock);
220 for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
221 while (!list_empty(&zwsm->idle_ws[i])) {
222 workspace = container_of(zwsm->idle_ws[i].next,
223 struct workspace, list);
224 list_del(&workspace->list);
225 list_del(&workspace->lru_list);
226 zstd_free_workspace(&workspace->list);
227 }
228 }
229 spin_unlock_bh(&zwsm->lock);
230 timer_delete_sync(&zwsm->timer);
231 kfree(zwsm);
232 }
233
234 /*
235 * Find workspace for given level.
236 *
237 * @level: compression level
238 *
239 * This iterates over the set bits in the active_map beginning at the requested
240 * compression level. This lets us utilize already allocated workspaces before
241 * allocating a new one. If the workspace is of a larger size, it is used, but
242 * the place in the lru_list and last_used times are not updated. This is to
243 * offer the opportunity to reclaim the workspace in favor of allocating an
244 * appropriately sized one in the future.
245 */
zstd_find_workspace(struct btrfs_fs_info * fs_info,int level)246 static struct list_head *zstd_find_workspace(struct btrfs_fs_info *fs_info, int level)
247 {
248 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
249 struct list_head *ws;
250 struct workspace *workspace;
251 int i = clip_level(level);
252
253 ASSERT(zwsm);
254 spin_lock_bh(&zwsm->lock);
255 for_each_set_bit_from(i, &zwsm->active_map, ZSTD_BTRFS_MAX_LEVEL) {
256 if (!list_empty(&zwsm->idle_ws[i])) {
257 ws = zwsm->idle_ws[i].next;
258 workspace = list_to_workspace(ws);
259 list_del_init(ws);
260 /* keep its place if it's a lower level using this */
261 workspace->req_level = level;
262 if (clip_level(level) == workspace->level)
263 list_del(&workspace->lru_list);
264 if (list_empty(&zwsm->idle_ws[i]))
265 clear_bit(i, &zwsm->active_map);
266 spin_unlock_bh(&zwsm->lock);
267 return ws;
268 }
269 }
270 spin_unlock_bh(&zwsm->lock);
271
272 return NULL;
273 }
274
275 /*
276 * Zstd get_workspace for level.
277 *
278 * @level: compression level
279 *
280 * If @level is 0, then any compression level can be used. Therefore, we begin
281 * scanning from 1. We first scan through possible workspaces and then after
282 * attempt to allocate a new workspace. If we fail to allocate one due to
283 * memory pressure, go to sleep waiting for the max level workspace to free up.
284 */
zstd_get_workspace(struct btrfs_fs_info * fs_info,int level)285 struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level)
286 {
287 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
288 struct list_head *ws;
289 unsigned int nofs_flag;
290
291 ASSERT(zwsm);
292
293 /* level == 0 means we can use any workspace */
294 if (!level)
295 level = 1;
296
297 again:
298 ws = zstd_find_workspace(fs_info, level);
299 if (ws)
300 return ws;
301
302 nofs_flag = memalloc_nofs_save();
303 ws = zstd_alloc_workspace(fs_info, level);
304 memalloc_nofs_restore(nofs_flag);
305
306 if (IS_ERR(ws)) {
307 DEFINE_WAIT(wait);
308
309 prepare_to_wait(&zwsm->wait, &wait, TASK_UNINTERRUPTIBLE);
310 schedule();
311 finish_wait(&zwsm->wait, &wait);
312
313 goto again;
314 }
315
316 return ws;
317 }
318
319 /*
320 * Zstd put_workspace.
321 *
322 * @ws: list_head for the workspace
323 *
324 * When putting back a workspace, we only need to update the LRU if we are of
325 * the requested compression level. Here is where we continue to protect the
326 * max level workspace or update last_used accordingly. If the reclaim timer
327 * isn't set, it is also set here. Only the max level workspace tries and wakes
328 * up waiting workspaces.
329 */
zstd_put_workspace(struct btrfs_fs_info * fs_info,struct list_head * ws)330 void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws)
331 {
332 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
333 struct workspace *workspace = list_to_workspace(ws);
334
335 ASSERT(zwsm);
336 spin_lock_bh(&zwsm->lock);
337
338 /* A node is only taken off the lru if we are the corresponding level */
339 if (clip_level(workspace->req_level) == workspace->level) {
340 /* Hide a max level workspace from reclaim */
341 if (list_empty(&zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
342 INIT_LIST_HEAD(&workspace->lru_list);
343 } else {
344 workspace->last_used = jiffies;
345 list_add(&workspace->lru_list, &zwsm->lru_list);
346 if (!timer_pending(&zwsm->timer))
347 mod_timer(&zwsm->timer,
348 jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
349 }
350 }
351
352 set_bit(workspace->level, &zwsm->active_map);
353 list_add(&workspace->list, &zwsm->idle_ws[workspace->level]);
354 workspace->req_level = 0;
355
356 spin_unlock_bh(&zwsm->lock);
357
358 if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL))
359 cond_wake_up(&zwsm->wait);
360 }
361
zstd_free_workspace(struct list_head * ws)362 void zstd_free_workspace(struct list_head *ws)
363 {
364 struct workspace *workspace = list_entry(ws, struct workspace, list);
365
366 kvfree(workspace->mem);
367 kfree(workspace->buf);
368 kfree(workspace);
369 }
370
zstd_alloc_workspace(struct btrfs_fs_info * fs_info,int level)371 struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
372 {
373 const u32 blocksize = fs_info->sectorsize;
374 struct workspace *workspace;
375
376 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
377 if (!workspace)
378 return ERR_PTR(-ENOMEM);
379
380 /* Use level 1 workspace size for all the fast mode negative levels. */
381 workspace->size = zstd_ws_mem_sizes[clip_level(level)];
382 workspace->level = clip_level(level);
383 workspace->req_level = level;
384 workspace->last_used = jiffies;
385 workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
386 workspace->buf = kmalloc(blocksize, GFP_KERNEL);
387 if (!workspace->mem || !workspace->buf)
388 goto fail;
389
390 INIT_LIST_HEAD(&workspace->list);
391 INIT_LIST_HEAD(&workspace->lru_list);
392
393 return &workspace->list;
394 fail:
395 zstd_free_workspace(&workspace->list);
396 return ERR_PTR(-ENOMEM);
397 }
398
zstd_compress_folios(struct list_head * ws,struct btrfs_inode * inode,u64 start,struct folio ** folios,unsigned long * out_folios,unsigned long * total_in,unsigned long * total_out)399 int zstd_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
400 u64 start, struct folio **folios, unsigned long *out_folios,
401 unsigned long *total_in, unsigned long *total_out)
402 {
403 struct btrfs_fs_info *fs_info = inode->root->fs_info;
404 struct workspace *workspace = list_entry(ws, struct workspace, list);
405 struct address_space *mapping = inode->vfs_inode.i_mapping;
406 zstd_cstream *stream;
407 int ret = 0;
408 int nr_folios = 0;
409 struct folio *in_folio = NULL; /* The current folio to read. */
410 struct folio *out_folio = NULL; /* The current folio to write to. */
411 unsigned long tot_in = 0;
412 unsigned long tot_out = 0;
413 unsigned long len = *total_out;
414 const unsigned long nr_dest_folios = *out_folios;
415 const u64 orig_end = start + len;
416 const u32 blocksize = fs_info->sectorsize;
417 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
418 unsigned long max_out = nr_dest_folios * min_folio_size;
419 unsigned int cur_len;
420
421 workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
422 *out_folios = 0;
423 *total_out = 0;
424 *total_in = 0;
425
426 /* Initialize the stream */
427 stream = zstd_init_cstream(&workspace->params, len, workspace->mem,
428 workspace->size);
429 if (unlikely(!stream)) {
430 btrfs_err(fs_info,
431 "zstd compression init level %d failed, root %llu inode %llu offset %llu",
432 workspace->req_level, btrfs_root_id(inode->root),
433 btrfs_ino(inode), start);
434 ret = -EIO;
435 goto out;
436 }
437
438 /* map in the first page of input data */
439 ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
440 if (ret < 0)
441 goto out;
442 cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
443 workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
444 workspace->in_buf.pos = 0;
445 workspace->in_buf.size = cur_len;
446
447 /* Allocate and map in the output buffer */
448 out_folio = btrfs_alloc_compr_folio(fs_info);
449 if (out_folio == NULL) {
450 ret = -ENOMEM;
451 goto out;
452 }
453 folios[nr_folios++] = out_folio;
454 workspace->out_buf.dst = folio_address(out_folio);
455 workspace->out_buf.pos = 0;
456 workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
457
458 while (1) {
459 size_t ret2;
460
461 ret2 = zstd_compress_stream(stream, &workspace->out_buf,
462 &workspace->in_buf);
463 if (unlikely(zstd_is_error(ret2))) {
464 btrfs_warn(fs_info,
465 "zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
466 workspace->req_level, zstd_get_error_code(ret2),
467 btrfs_root_id(inode->root), btrfs_ino(inode),
468 start);
469 ret = -EIO;
470 goto out;
471 }
472
473 /* Check to see if we are making it bigger */
474 if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
475 tot_in + workspace->in_buf.pos <
476 tot_out + workspace->out_buf.pos) {
477 ret = -E2BIG;
478 goto out;
479 }
480
481 /* We've reached the end of our output range */
482 if (workspace->out_buf.pos >= max_out) {
483 tot_out += workspace->out_buf.pos;
484 ret = -E2BIG;
485 goto out;
486 }
487
488 /* Check if we need more output space */
489 if (workspace->out_buf.pos == workspace->out_buf.size) {
490 tot_out += min_folio_size;
491 max_out -= min_folio_size;
492 if (nr_folios == nr_dest_folios) {
493 ret = -E2BIG;
494 goto out;
495 }
496 out_folio = btrfs_alloc_compr_folio(fs_info);
497 if (out_folio == NULL) {
498 ret = -ENOMEM;
499 goto out;
500 }
501 folios[nr_folios++] = out_folio;
502 workspace->out_buf.dst = folio_address(out_folio);
503 workspace->out_buf.pos = 0;
504 workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
505 }
506
507 /* We've reached the end of the input */
508 if (workspace->in_buf.pos >= len) {
509 tot_in += workspace->in_buf.pos;
510 break;
511 }
512
513 /* Check if we need more input */
514 if (workspace->in_buf.pos == workspace->in_buf.size) {
515 tot_in += workspace->in_buf.size;
516 kunmap_local(workspace->in_buf.src);
517 workspace->in_buf.src = NULL;
518 folio_put(in_folio);
519 start += cur_len;
520 len -= cur_len;
521 ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
522 if (ret < 0)
523 goto out;
524 cur_len = btrfs_calc_input_length(in_folio, orig_end, start);
525 workspace->in_buf.src = kmap_local_folio(in_folio,
526 offset_in_folio(in_folio, start));
527 workspace->in_buf.pos = 0;
528 workspace->in_buf.size = cur_len;
529 }
530 }
531 while (1) {
532 size_t ret2;
533
534 ret2 = zstd_end_stream(stream, &workspace->out_buf);
535 if (unlikely(zstd_is_error(ret2))) {
536 btrfs_err(fs_info,
537 "zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
538 workspace->req_level, zstd_get_error_code(ret2),
539 btrfs_root_id(inode->root), btrfs_ino(inode),
540 start);
541 ret = -EIO;
542 goto out;
543 }
544 if (ret2 == 0) {
545 tot_out += workspace->out_buf.pos;
546 break;
547 }
548 if (workspace->out_buf.pos >= max_out) {
549 tot_out += workspace->out_buf.pos;
550 ret = -E2BIG;
551 goto out;
552 }
553
554 tot_out += min_folio_size;
555 max_out -= min_folio_size;
556 if (nr_folios == nr_dest_folios) {
557 ret = -E2BIG;
558 goto out;
559 }
560 out_folio = btrfs_alloc_compr_folio(fs_info);
561 if (out_folio == NULL) {
562 ret = -ENOMEM;
563 goto out;
564 }
565 folios[nr_folios++] = out_folio;
566 workspace->out_buf.dst = folio_address(out_folio);
567 workspace->out_buf.pos = 0;
568 workspace->out_buf.size = min_t(size_t, max_out, min_folio_size);
569 }
570
571 if (tot_out >= tot_in) {
572 ret = -E2BIG;
573 goto out;
574 }
575
576 ret = 0;
577 *total_in = tot_in;
578 *total_out = tot_out;
579 out:
580 *out_folios = nr_folios;
581 if (workspace->in_buf.src) {
582 kunmap_local(workspace->in_buf.src);
583 folio_put(in_folio);
584 }
585 return ret;
586 }
587
zstd_decompress_bio(struct list_head * ws,struct compressed_bio * cb)588 int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
589 {
590 struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
591 struct workspace *workspace = list_entry(ws, struct workspace, list);
592 struct folio **folios_in = cb->compressed_folios;
593 size_t srclen = cb->compressed_len;
594 zstd_dstream *stream;
595 int ret = 0;
596 const u32 blocksize = fs_info->sectorsize;
597 const unsigned int min_folio_size = btrfs_min_folio_size(fs_info);
598 unsigned long folio_in_index = 0;
599 unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
600 unsigned long buf_start;
601 unsigned long total_out = 0;
602
603 stream = zstd_init_dstream(
604 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
605 if (unlikely(!stream)) {
606 struct btrfs_inode *inode = cb->bbio.inode;
607
608 btrfs_err(inode->root->fs_info,
609 "zstd decompression init failed, root %llu inode %llu offset %llu",
610 btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
611 ret = -EIO;
612 goto done;
613 }
614
615 workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0);
616 workspace->in_buf.pos = 0;
617 workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
618
619 workspace->out_buf.dst = workspace->buf;
620 workspace->out_buf.pos = 0;
621 workspace->out_buf.size = blocksize;
622
623 while (1) {
624 size_t ret2;
625
626 ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
627 &workspace->in_buf);
628 if (unlikely(zstd_is_error(ret2))) {
629 struct btrfs_inode *inode = cb->bbio.inode;
630
631 btrfs_err(inode->root->fs_info,
632 "zstd decompression failed, error %d root %llu inode %llu offset %llu",
633 zstd_get_error_code(ret2), btrfs_root_id(inode->root),
634 btrfs_ino(inode), cb->start);
635 ret = -EIO;
636 goto done;
637 }
638 buf_start = total_out;
639 total_out += workspace->out_buf.pos;
640 workspace->out_buf.pos = 0;
641
642 ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
643 total_out - buf_start, cb, buf_start);
644 if (ret == 0)
645 break;
646
647 if (workspace->in_buf.pos >= srclen)
648 break;
649
650 /* Check if we've hit the end of a frame */
651 if (ret2 == 0)
652 break;
653
654 if (workspace->in_buf.pos == workspace->in_buf.size) {
655 kunmap_local(workspace->in_buf.src);
656 folio_in_index++;
657 if (unlikely(folio_in_index >= total_folios_in)) {
658 workspace->in_buf.src = NULL;
659 ret = -EIO;
660 goto done;
661 }
662 srclen -= min_folio_size;
663 workspace->in_buf.src =
664 kmap_local_folio(folios_in[folio_in_index], 0);
665 workspace->in_buf.pos = 0;
666 workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
667 }
668 }
669 ret = 0;
670 done:
671 if (workspace->in_buf.src)
672 kunmap_local(workspace->in_buf.src);
673 return ret;
674 }
675
zstd_decompress(struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)676 int zstd_decompress(struct list_head *ws, const u8 *data_in,
677 struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
678 size_t destlen)
679 {
680 struct workspace *workspace = list_entry(ws, struct workspace, list);
681 struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb);
682 const u32 sectorsize = fs_info->sectorsize;
683 zstd_dstream *stream;
684 int ret = 0;
685 unsigned long to_copy = 0;
686
687 stream = zstd_init_dstream(
688 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
689 if (unlikely(!stream)) {
690 struct btrfs_inode *inode = folio_to_inode(dest_folio);
691
692 btrfs_err(inode->root->fs_info,
693 "zstd decompression init failed, root %llu inode %llu offset %llu",
694 btrfs_root_id(inode->root), btrfs_ino(inode),
695 folio_pos(dest_folio));
696 ret = -EIO;
697 goto finish;
698 }
699
700 workspace->in_buf.src = data_in;
701 workspace->in_buf.pos = 0;
702 workspace->in_buf.size = srclen;
703
704 workspace->out_buf.dst = workspace->buf;
705 workspace->out_buf.pos = 0;
706 workspace->out_buf.size = sectorsize;
707
708 /*
709 * Since both input and output buffers should not exceed one sector,
710 * one call should end the decompression.
711 */
712 ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
713 if (unlikely(zstd_is_error(ret))) {
714 struct btrfs_inode *inode = folio_to_inode(dest_folio);
715
716 btrfs_err(inode->root->fs_info,
717 "zstd decompression failed, error %d root %llu inode %llu offset %llu",
718 zstd_get_error_code(ret), btrfs_root_id(inode->root),
719 btrfs_ino(inode), folio_pos(dest_folio));
720 goto finish;
721 }
722 to_copy = workspace->out_buf.pos;
723 memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy);
724 finish:
725 /* Error or early end. */
726 if (unlikely(to_copy < destlen)) {
727 ret = -EIO;
728 folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
729 }
730 return ret;
731 }
732
733 const struct btrfs_compress_levels btrfs_zstd_compress = {
734 .min_level = ZSTD_BTRFS_MIN_LEVEL,
735 .max_level = ZSTD_BTRFS_MAX_LEVEL,
736 .default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
737 };
738