1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-present, Facebook, Inc. 4 * All rights reserved. 5 * 6 */ 7 8 #include <linux/bio.h> 9 #include <linux/bitmap.h> 10 #include <linux/err.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/sched/mm.h> 15 #include <linux/pagemap.h> 16 #include <linux/refcount.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/zstd.h> 20 #include "compression.h" 21 #include "ctree.h" 22 23 #define ZSTD_BTRFS_MAX_WINDOWLOG 17 24 #define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) 25 #define ZSTD_BTRFS_DEFAULT_LEVEL 3 26 #define ZSTD_BTRFS_MAX_LEVEL 15 27 /* 307s to avoid pathologically clashing with transaction commit */ 28 #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ) 29 30 static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level, 31 size_t src_len) 32 { 33 ZSTD_parameters params = ZSTD_getParams(level, src_len, 0); 34 35 if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) 36 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; 37 WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); 38 return params; 39 } 40 41 struct workspace { 42 void *mem; 43 size_t size; 44 char *buf; 45 unsigned int level; 46 unsigned int req_level; 47 unsigned long last_used; /* jiffies */ 48 struct list_head list; 49 struct list_head lru_list; 50 ZSTD_inBuffer in_buf; 51 ZSTD_outBuffer out_buf; 52 }; 53 54 /* 55 * Zstd Workspace Management 56 * 57 * Zstd workspaces have different memory requirements depending on the level. 58 * The zstd workspaces are managed by having individual lists for each level 59 * and a global lru. Forward progress is maintained by protecting a max level 60 * workspace. 61 * 62 * Getting a workspace is done by using the bitmap to identify the levels that 63 * have available workspaces and scans up. This lets us recycle higher level 64 * workspaces because of the monotonic memory guarantee. A workspace's 65 * last_used is only updated if it is being used by the corresponding memory 66 * level. Putting a workspace involves adding it back to the appropriate places 67 * and adding it back to the lru if necessary. 68 * 69 * A timer is used to reclaim workspaces if they have not been used for 70 * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around. 71 * The upper bound is provided by the workqueue limit which is 2 (percpu limit). 72 */ 73 74 struct zstd_workspace_manager { 75 const struct btrfs_compress_op *ops; 76 spinlock_t lock; 77 struct list_head lru_list; 78 struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL]; 79 unsigned long active_map; 80 wait_queue_head_t wait; 81 struct timer_list timer; 82 }; 83 84 static struct zstd_workspace_manager wsm; 85 86 static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL]; 87 88 static inline struct workspace *list_to_workspace(struct list_head *list) 89 { 90 return container_of(list, struct workspace, list); 91 } 92 93 static void zstd_free_workspace(struct list_head *ws); 94 static struct list_head *zstd_alloc_workspace(unsigned int level); 95 96 /* 97 * zstd_reclaim_timer_fn - reclaim timer 98 * @t: timer 99 * 100 * This scans the lru_list and attempts to reclaim any workspace that hasn't 101 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES. 102 */ 103 static void zstd_reclaim_timer_fn(struct timer_list *timer) 104 { 105 unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES; 106 struct list_head *pos, *next; 107 108 spin_lock_bh(&wsm.lock); 109 110 if (list_empty(&wsm.lru_list)) { 111 spin_unlock_bh(&wsm.lock); 112 return; 113 } 114 115 list_for_each_prev_safe(pos, next, &wsm.lru_list) { 116 struct workspace *victim = container_of(pos, struct workspace, 117 lru_list); 118 unsigned int level; 119 120 if (time_after(victim->last_used, reclaim_threshold)) 121 break; 122 123 /* workspace is in use */ 124 if (victim->req_level) 125 continue; 126 127 level = victim->level; 128 list_del(&victim->lru_list); 129 list_del(&victim->list); 130 zstd_free_workspace(&victim->list); 131 132 if (list_empty(&wsm.idle_ws[level - 1])) 133 clear_bit(level - 1, &wsm.active_map); 134 135 } 136 137 if (!list_empty(&wsm.lru_list)) 138 mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); 139 140 spin_unlock_bh(&wsm.lock); 141 } 142 143 /* 144 * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds 145 * 146 * It is possible based on the level configurations that a higher level 147 * workspace uses less memory than a lower level workspace. In order to reuse 148 * workspaces, this must be made a monotonic relationship. This precomputes 149 * the required memory for each level and enforces the monotonicity between 150 * level and memory required. 151 */ 152 static void zstd_calc_ws_mem_sizes(void) 153 { 154 size_t max_size = 0; 155 unsigned int level; 156 157 for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) { 158 ZSTD_parameters params = 159 zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT); 160 size_t level_size = 161 max_t(size_t, 162 ZSTD_CStreamWorkspaceBound(params.cParams), 163 ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT)); 164 165 max_size = max_t(size_t, max_size, level_size); 166 zstd_ws_mem_sizes[level - 1] = max_size; 167 } 168 } 169 170 static void zstd_init_workspace_manager(void) 171 { 172 struct list_head *ws; 173 int i; 174 175 zstd_calc_ws_mem_sizes(); 176 177 wsm.ops = &btrfs_zstd_compress; 178 spin_lock_init(&wsm.lock); 179 init_waitqueue_head(&wsm.wait); 180 timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0); 181 182 INIT_LIST_HEAD(&wsm.lru_list); 183 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) 184 INIT_LIST_HEAD(&wsm.idle_ws[i]); 185 186 ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL); 187 if (IS_ERR(ws)) { 188 pr_warn( 189 "BTRFS: cannot preallocate zstd compression workspace\n"); 190 } else { 191 set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map); 192 list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); 193 } 194 } 195 196 static void zstd_cleanup_workspace_manager(void) 197 { 198 struct workspace *workspace; 199 int i; 200 201 spin_lock_bh(&wsm.lock); 202 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) { 203 while (!list_empty(&wsm.idle_ws[i])) { 204 workspace = container_of(wsm.idle_ws[i].next, 205 struct workspace, list); 206 list_del(&workspace->list); 207 list_del(&workspace->lru_list); 208 zstd_free_workspace(&workspace->list); 209 } 210 } 211 spin_unlock_bh(&wsm.lock); 212 213 del_timer_sync(&wsm.timer); 214 } 215 216 /* 217 * zstd_find_workspace - find workspace 218 * @level: compression level 219 * 220 * This iterates over the set bits in the active_map beginning at the requested 221 * compression level. This lets us utilize already allocated workspaces before 222 * allocating a new one. If the workspace is of a larger size, it is used, but 223 * the place in the lru_list and last_used times are not updated. This is to 224 * offer the opportunity to reclaim the workspace in favor of allocating an 225 * appropriately sized one in the future. 226 */ 227 static struct list_head *zstd_find_workspace(unsigned int level) 228 { 229 struct list_head *ws; 230 struct workspace *workspace; 231 int i = level - 1; 232 233 spin_lock_bh(&wsm.lock); 234 for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) { 235 if (!list_empty(&wsm.idle_ws[i])) { 236 ws = wsm.idle_ws[i].next; 237 workspace = list_to_workspace(ws); 238 list_del_init(ws); 239 /* keep its place if it's a lower level using this */ 240 workspace->req_level = level; 241 if (level == workspace->level) 242 list_del(&workspace->lru_list); 243 if (list_empty(&wsm.idle_ws[i])) 244 clear_bit(i, &wsm.active_map); 245 spin_unlock_bh(&wsm.lock); 246 return ws; 247 } 248 } 249 spin_unlock_bh(&wsm.lock); 250 251 return NULL; 252 } 253 254 /* 255 * zstd_get_workspace - zstd's get_workspace 256 * @level: compression level 257 * 258 * If @level is 0, then any compression level can be used. Therefore, we begin 259 * scanning from 1. We first scan through possible workspaces and then after 260 * attempt to allocate a new workspace. If we fail to allocate one due to 261 * memory pressure, go to sleep waiting for the max level workspace to free up. 262 */ 263 static struct list_head *zstd_get_workspace(unsigned int level) 264 { 265 struct list_head *ws; 266 unsigned int nofs_flag; 267 268 /* level == 0 means we can use any workspace */ 269 if (!level) 270 level = 1; 271 272 again: 273 ws = zstd_find_workspace(level); 274 if (ws) 275 return ws; 276 277 nofs_flag = memalloc_nofs_save(); 278 ws = zstd_alloc_workspace(level); 279 memalloc_nofs_restore(nofs_flag); 280 281 if (IS_ERR(ws)) { 282 DEFINE_WAIT(wait); 283 284 prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE); 285 schedule(); 286 finish_wait(&wsm.wait, &wait); 287 288 goto again; 289 } 290 291 return ws; 292 } 293 294 /* 295 * zstd_put_workspace - zstd put_workspace 296 * @ws: list_head for the workspace 297 * 298 * When putting back a workspace, we only need to update the LRU if we are of 299 * the requested compression level. Here is where we continue to protect the 300 * max level workspace or update last_used accordingly. If the reclaim timer 301 * isn't set, it is also set here. Only the max level workspace tries and wakes 302 * up waiting workspaces. 303 */ 304 static void zstd_put_workspace(struct list_head *ws) 305 { 306 struct workspace *workspace = list_to_workspace(ws); 307 308 spin_lock_bh(&wsm.lock); 309 310 /* A node is only taken off the lru if we are the corresponding level */ 311 if (workspace->req_level == workspace->level) { 312 /* Hide a max level workspace from reclaim */ 313 if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) { 314 INIT_LIST_HEAD(&workspace->lru_list); 315 } else { 316 workspace->last_used = jiffies; 317 list_add(&workspace->lru_list, &wsm.lru_list); 318 if (!timer_pending(&wsm.timer)) 319 mod_timer(&wsm.timer, 320 jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); 321 } 322 } 323 324 set_bit(workspace->level - 1, &wsm.active_map); 325 list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]); 326 workspace->req_level = 0; 327 328 spin_unlock_bh(&wsm.lock); 329 330 if (workspace->level == ZSTD_BTRFS_MAX_LEVEL) 331 cond_wake_up(&wsm.wait); 332 } 333 334 static void zstd_free_workspace(struct list_head *ws) 335 { 336 struct workspace *workspace = list_entry(ws, struct workspace, list); 337 338 kvfree(workspace->mem); 339 kfree(workspace->buf); 340 kfree(workspace); 341 } 342 343 static struct list_head *zstd_alloc_workspace(unsigned int level) 344 { 345 struct workspace *workspace; 346 347 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); 348 if (!workspace) 349 return ERR_PTR(-ENOMEM); 350 351 workspace->size = zstd_ws_mem_sizes[level - 1]; 352 workspace->level = level; 353 workspace->req_level = level; 354 workspace->last_used = jiffies; 355 workspace->mem = kvmalloc(workspace->size, GFP_KERNEL); 356 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 357 if (!workspace->mem || !workspace->buf) 358 goto fail; 359 360 INIT_LIST_HEAD(&workspace->list); 361 INIT_LIST_HEAD(&workspace->lru_list); 362 363 return &workspace->list; 364 fail: 365 zstd_free_workspace(&workspace->list); 366 return ERR_PTR(-ENOMEM); 367 } 368 369 static int zstd_compress_pages(struct list_head *ws, 370 struct address_space *mapping, 371 u64 start, 372 struct page **pages, 373 unsigned long *out_pages, 374 unsigned long *total_in, 375 unsigned long *total_out) 376 { 377 struct workspace *workspace = list_entry(ws, struct workspace, list); 378 ZSTD_CStream *stream; 379 int ret = 0; 380 int nr_pages = 0; 381 struct page *in_page = NULL; /* The current page to read */ 382 struct page *out_page = NULL; /* The current page to write to */ 383 unsigned long tot_in = 0; 384 unsigned long tot_out = 0; 385 unsigned long len = *total_out; 386 const unsigned long nr_dest_pages = *out_pages; 387 unsigned long max_out = nr_dest_pages * PAGE_SIZE; 388 ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level, 389 len); 390 391 *out_pages = 0; 392 *total_out = 0; 393 *total_in = 0; 394 395 /* Initialize the stream */ 396 stream = ZSTD_initCStream(params, len, workspace->mem, 397 workspace->size); 398 if (!stream) { 399 pr_warn("BTRFS: ZSTD_initCStream failed\n"); 400 ret = -EIO; 401 goto out; 402 } 403 404 /* map in the first page of input data */ 405 in_page = find_get_page(mapping, start >> PAGE_SHIFT); 406 workspace->in_buf.src = kmap(in_page); 407 workspace->in_buf.pos = 0; 408 workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); 409 410 411 /* Allocate and map in the output buffer */ 412 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 413 if (out_page == NULL) { 414 ret = -ENOMEM; 415 goto out; 416 } 417 pages[nr_pages++] = out_page; 418 workspace->out_buf.dst = kmap(out_page); 419 workspace->out_buf.pos = 0; 420 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); 421 422 while (1) { 423 size_t ret2; 424 425 ret2 = ZSTD_compressStream(stream, &workspace->out_buf, 426 &workspace->in_buf); 427 if (ZSTD_isError(ret2)) { 428 pr_debug("BTRFS: ZSTD_compressStream returned %d\n", 429 ZSTD_getErrorCode(ret2)); 430 ret = -EIO; 431 goto out; 432 } 433 434 /* Check to see if we are making it bigger */ 435 if (tot_in + workspace->in_buf.pos > 8192 && 436 tot_in + workspace->in_buf.pos < 437 tot_out + workspace->out_buf.pos) { 438 ret = -E2BIG; 439 goto out; 440 } 441 442 /* We've reached the end of our output range */ 443 if (workspace->out_buf.pos >= max_out) { 444 tot_out += workspace->out_buf.pos; 445 ret = -E2BIG; 446 goto out; 447 } 448 449 /* Check if we need more output space */ 450 if (workspace->out_buf.pos == workspace->out_buf.size) { 451 tot_out += PAGE_SIZE; 452 max_out -= PAGE_SIZE; 453 kunmap(out_page); 454 if (nr_pages == nr_dest_pages) { 455 out_page = NULL; 456 ret = -E2BIG; 457 goto out; 458 } 459 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 460 if (out_page == NULL) { 461 ret = -ENOMEM; 462 goto out; 463 } 464 pages[nr_pages++] = out_page; 465 workspace->out_buf.dst = kmap(out_page); 466 workspace->out_buf.pos = 0; 467 workspace->out_buf.size = min_t(size_t, max_out, 468 PAGE_SIZE); 469 } 470 471 /* We've reached the end of the input */ 472 if (workspace->in_buf.pos >= len) { 473 tot_in += workspace->in_buf.pos; 474 break; 475 } 476 477 /* Check if we need more input */ 478 if (workspace->in_buf.pos == workspace->in_buf.size) { 479 tot_in += PAGE_SIZE; 480 kunmap(in_page); 481 put_page(in_page); 482 483 start += PAGE_SIZE; 484 len -= PAGE_SIZE; 485 in_page = find_get_page(mapping, start >> PAGE_SHIFT); 486 workspace->in_buf.src = kmap(in_page); 487 workspace->in_buf.pos = 0; 488 workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); 489 } 490 } 491 while (1) { 492 size_t ret2; 493 494 ret2 = ZSTD_endStream(stream, &workspace->out_buf); 495 if (ZSTD_isError(ret2)) { 496 pr_debug("BTRFS: ZSTD_endStream returned %d\n", 497 ZSTD_getErrorCode(ret2)); 498 ret = -EIO; 499 goto out; 500 } 501 if (ret2 == 0) { 502 tot_out += workspace->out_buf.pos; 503 break; 504 } 505 if (workspace->out_buf.pos >= max_out) { 506 tot_out += workspace->out_buf.pos; 507 ret = -E2BIG; 508 goto out; 509 } 510 511 tot_out += PAGE_SIZE; 512 max_out -= PAGE_SIZE; 513 kunmap(out_page); 514 if (nr_pages == nr_dest_pages) { 515 out_page = NULL; 516 ret = -E2BIG; 517 goto out; 518 } 519 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 520 if (out_page == NULL) { 521 ret = -ENOMEM; 522 goto out; 523 } 524 pages[nr_pages++] = out_page; 525 workspace->out_buf.dst = kmap(out_page); 526 workspace->out_buf.pos = 0; 527 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); 528 } 529 530 if (tot_out >= tot_in) { 531 ret = -E2BIG; 532 goto out; 533 } 534 535 ret = 0; 536 *total_in = tot_in; 537 *total_out = tot_out; 538 out: 539 *out_pages = nr_pages; 540 /* Cleanup */ 541 if (in_page) { 542 kunmap(in_page); 543 put_page(in_page); 544 } 545 if (out_page) 546 kunmap(out_page); 547 return ret; 548 } 549 550 static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) 551 { 552 struct workspace *workspace = list_entry(ws, struct workspace, list); 553 struct page **pages_in = cb->compressed_pages; 554 u64 disk_start = cb->start; 555 struct bio *orig_bio = cb->orig_bio; 556 size_t srclen = cb->compressed_len; 557 ZSTD_DStream *stream; 558 int ret = 0; 559 unsigned long page_in_index = 0; 560 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); 561 unsigned long buf_start; 562 unsigned long total_out = 0; 563 564 stream = ZSTD_initDStream( 565 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); 566 if (!stream) { 567 pr_debug("BTRFS: ZSTD_initDStream failed\n"); 568 ret = -EIO; 569 goto done; 570 } 571 572 workspace->in_buf.src = kmap(pages_in[page_in_index]); 573 workspace->in_buf.pos = 0; 574 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); 575 576 workspace->out_buf.dst = workspace->buf; 577 workspace->out_buf.pos = 0; 578 workspace->out_buf.size = PAGE_SIZE; 579 580 while (1) { 581 size_t ret2; 582 583 ret2 = ZSTD_decompressStream(stream, &workspace->out_buf, 584 &workspace->in_buf); 585 if (ZSTD_isError(ret2)) { 586 pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", 587 ZSTD_getErrorCode(ret2)); 588 ret = -EIO; 589 goto done; 590 } 591 buf_start = total_out; 592 total_out += workspace->out_buf.pos; 593 workspace->out_buf.pos = 0; 594 595 ret = btrfs_decompress_buf2page(workspace->out_buf.dst, 596 buf_start, total_out, disk_start, orig_bio); 597 if (ret == 0) 598 break; 599 600 if (workspace->in_buf.pos >= srclen) 601 break; 602 603 /* Check if we've hit the end of a frame */ 604 if (ret2 == 0) 605 break; 606 607 if (workspace->in_buf.pos == workspace->in_buf.size) { 608 kunmap(pages_in[page_in_index++]); 609 if (page_in_index >= total_pages_in) { 610 workspace->in_buf.src = NULL; 611 ret = -EIO; 612 goto done; 613 } 614 srclen -= PAGE_SIZE; 615 workspace->in_buf.src = kmap(pages_in[page_in_index]); 616 workspace->in_buf.pos = 0; 617 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); 618 } 619 } 620 ret = 0; 621 zero_fill_bio(orig_bio); 622 done: 623 if (workspace->in_buf.src) 624 kunmap(pages_in[page_in_index]); 625 return ret; 626 } 627 628 static int zstd_decompress(struct list_head *ws, unsigned char *data_in, 629 struct page *dest_page, 630 unsigned long start_byte, 631 size_t srclen, size_t destlen) 632 { 633 struct workspace *workspace = list_entry(ws, struct workspace, list); 634 ZSTD_DStream *stream; 635 int ret = 0; 636 size_t ret2; 637 unsigned long total_out = 0; 638 unsigned long pg_offset = 0; 639 char *kaddr; 640 641 stream = ZSTD_initDStream( 642 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); 643 if (!stream) { 644 pr_warn("BTRFS: ZSTD_initDStream failed\n"); 645 ret = -EIO; 646 goto finish; 647 } 648 649 destlen = min_t(size_t, destlen, PAGE_SIZE); 650 651 workspace->in_buf.src = data_in; 652 workspace->in_buf.pos = 0; 653 workspace->in_buf.size = srclen; 654 655 workspace->out_buf.dst = workspace->buf; 656 workspace->out_buf.pos = 0; 657 workspace->out_buf.size = PAGE_SIZE; 658 659 ret2 = 1; 660 while (pg_offset < destlen 661 && workspace->in_buf.pos < workspace->in_buf.size) { 662 unsigned long buf_start; 663 unsigned long buf_offset; 664 unsigned long bytes; 665 666 /* Check if the frame is over and we still need more input */ 667 if (ret2 == 0) { 668 pr_debug("BTRFS: ZSTD_decompressStream ended early\n"); 669 ret = -EIO; 670 goto finish; 671 } 672 ret2 = ZSTD_decompressStream(stream, &workspace->out_buf, 673 &workspace->in_buf); 674 if (ZSTD_isError(ret2)) { 675 pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", 676 ZSTD_getErrorCode(ret2)); 677 ret = -EIO; 678 goto finish; 679 } 680 681 buf_start = total_out; 682 total_out += workspace->out_buf.pos; 683 workspace->out_buf.pos = 0; 684 685 if (total_out <= start_byte) 686 continue; 687 688 if (total_out > start_byte && buf_start < start_byte) 689 buf_offset = start_byte - buf_start; 690 else 691 buf_offset = 0; 692 693 bytes = min_t(unsigned long, destlen - pg_offset, 694 workspace->out_buf.size - buf_offset); 695 696 kaddr = kmap_atomic(dest_page); 697 memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset, 698 bytes); 699 kunmap_atomic(kaddr); 700 701 pg_offset += bytes; 702 } 703 ret = 0; 704 finish: 705 if (pg_offset < destlen) { 706 kaddr = kmap_atomic(dest_page); 707 memset(kaddr + pg_offset, 0, destlen - pg_offset); 708 kunmap_atomic(kaddr); 709 } 710 return ret; 711 } 712 713 static unsigned int zstd_set_level(unsigned int level) 714 { 715 if (!level) 716 return ZSTD_BTRFS_DEFAULT_LEVEL; 717 718 return min_t(unsigned int, level, ZSTD_BTRFS_MAX_LEVEL); 719 } 720 721 const struct btrfs_compress_op btrfs_zstd_compress = { 722 .init_workspace_manager = zstd_init_workspace_manager, 723 .cleanup_workspace_manager = zstd_cleanup_workspace_manager, 724 .get_workspace = zstd_get_workspace, 725 .put_workspace = zstd_put_workspace, 726 .alloc_workspace = zstd_alloc_workspace, 727 .free_workspace = zstd_free_workspace, 728 .compress_pages = zstd_compress_pages, 729 .decompress_bio = zstd_decompress_bio, 730 .decompress = zstd_decompress, 731 .set_level = zstd_set_level, 732 }; 733