1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/jiffies.h> 4 #include <linux/kernel.h> 5 #include <linux/ktime.h> 6 #include <linux/list.h> 7 #include <linux/math64.h> 8 #include <linux/sizes.h> 9 #include <linux/workqueue.h> 10 #include "ctree.h" 11 #include "block-group.h" 12 #include "discard.h" 13 #include "free-space-cache.h" 14 #include "fs.h" 15 16 /* 17 * This contains the logic to handle async discard. 18 * 19 * Async discard manages trimming of free space outside of transaction commit. 20 * Discarding is done by managing the block_groups on a LRU list based on free 21 * space recency. Two passes are used to first prioritize discarding extents 22 * and then allow for trimming in the bitmap the best opportunity to coalesce. 23 * The block_groups are maintained on multiple lists to allow for multiple 24 * passes with different discard filter requirements. A delayed work item is 25 * used to manage discarding with timeout determined by a max of the delay 26 * incurred by the iops rate limit, the byte rate limit, and the max delay of 27 * BTRFS_DISCARD_MAX_DELAY. 28 * 29 * Note, this only keeps track of block_groups that are explicitly for data. 30 * Mixed block_groups are not supported. 31 * 32 * The first list is special to manage discarding of fully free block groups. 33 * This is necessary because we issue a final trim for a full free block group 34 * after forgetting it. When a block group becomes unused, instead of directly 35 * being added to the unused_bgs list, we add it to this first list. Then 36 * from there, if it becomes fully discarded, we place it onto the unused_bgs 37 * list. 38 * 39 * The in-memory free space cache serves as the backing state for discard. 40 * Consequently this means there is no persistence. We opt to load all the 41 * block groups in as not discarded, so the mount case degenerates to the 42 * crashing case. 43 * 44 * As the free space cache uses bitmaps, there exists a tradeoff between 45 * ease/efficiency for find_free_extent() and the accuracy of discard state. 46 * Here we opt to let untrimmed regions merge with everything while only letting 47 * trimmed regions merge with other trimmed regions. This can cause 48 * overtrimming, but the coalescing benefit seems to be worth it. Additionally, 49 * bitmap state is tracked as a whole. If we're able to fully trim a bitmap, 50 * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in, 51 * this resets the state and we will retry trimming the whole bitmap. This is a 52 * tradeoff between discard state accuracy and the cost of accounting. 53 */ 54 55 /* This is an initial delay to give some chance for block reuse */ 56 #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) 57 #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) 58 59 /* Target completion latency of discarding all discardable extents */ 60 #define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC) 61 #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL) 62 #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) 63 #define BTRFS_DISCARD_MAX_IOPS (10U) 64 65 /* Monotonically decreasing minimum length filters after index 0 */ 66 static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { 67 0, 68 BTRFS_ASYNC_DISCARD_MAX_FILTER, 69 BTRFS_ASYNC_DISCARD_MIN_FILTER 70 }; 71 72 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, 73 struct btrfs_block_group *block_group) 74 { 75 return &discard_ctl->discard_list[block_group->discard_index]; 76 } 77 78 static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, 79 struct btrfs_block_group *block_group) 80 { 81 if (!btrfs_run_discard_work(discard_ctl)) 82 return; 83 84 if (list_empty(&block_group->discard_list) || 85 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) { 86 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) 87 block_group->discard_index = BTRFS_DISCARD_INDEX_START; 88 block_group->discard_eligible_time = (ktime_get_ns() + 89 BTRFS_DISCARD_DELAY); 90 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; 91 } 92 93 list_move_tail(&block_group->discard_list, 94 get_discard_list(discard_ctl, block_group)); 95 } 96 97 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, 98 struct btrfs_block_group *block_group) 99 { 100 if (!btrfs_is_block_group_data_only(block_group)) 101 return; 102 103 spin_lock(&discard_ctl->lock); 104 __add_to_discard_list(discard_ctl, block_group); 105 spin_unlock(&discard_ctl->lock); 106 } 107 108 static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl, 109 struct btrfs_block_group *block_group) 110 { 111 spin_lock(&discard_ctl->lock); 112 113 if (!btrfs_run_discard_work(discard_ctl)) { 114 spin_unlock(&discard_ctl->lock); 115 return; 116 } 117 118 list_del_init(&block_group->discard_list); 119 120 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 121 block_group->discard_eligible_time = (ktime_get_ns() + 122 BTRFS_DISCARD_UNUSED_DELAY); 123 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; 124 list_add_tail(&block_group->discard_list, 125 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]); 126 127 spin_unlock(&discard_ctl->lock); 128 } 129 130 static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, 131 struct btrfs_block_group *block_group) 132 { 133 bool running = false; 134 135 spin_lock(&discard_ctl->lock); 136 137 if (block_group == discard_ctl->block_group) { 138 running = true; 139 discard_ctl->block_group = NULL; 140 } 141 142 block_group->discard_eligible_time = 0; 143 list_del_init(&block_group->discard_list); 144 145 spin_unlock(&discard_ctl->lock); 146 147 return running; 148 } 149 150 /* 151 * Find block_group that's up next for discarding. 152 * 153 * @discard_ctl: discard control 154 * @now: current time 155 * 156 * Iterate over the discard lists to find the next block_group up for 157 * discarding checking the discard_eligible_time of block_group. 158 */ 159 static struct btrfs_block_group *find_next_block_group( 160 struct btrfs_discard_ctl *discard_ctl, 161 u64 now) 162 { 163 struct btrfs_block_group *ret_block_group = NULL, *block_group; 164 int i; 165 166 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) { 167 struct list_head *discard_list = &discard_ctl->discard_list[i]; 168 169 if (!list_empty(discard_list)) { 170 block_group = list_first_entry(discard_list, 171 struct btrfs_block_group, 172 discard_list); 173 174 if (!ret_block_group) 175 ret_block_group = block_group; 176 177 if (ret_block_group->discard_eligible_time < now) 178 break; 179 180 if (ret_block_group->discard_eligible_time > 181 block_group->discard_eligible_time) 182 ret_block_group = block_group; 183 } 184 } 185 186 return ret_block_group; 187 } 188 189 /* 190 * Look up next block group and set it for use. 191 * 192 * @discard_ctl: discard control 193 * @discard_state: the discard_state of the block_group after state management 194 * @discard_index: the discard_index of the block_group after state management 195 * @now: time when discard was invoked, in ns 196 * 197 * Wrap find_next_block_group() and set the block_group to be in use. 198 * @discard_state's control flow is managed here. Variables related to 199 * @discard_state are reset here as needed (eg. @discard_cursor). @discard_state 200 * and @discard_index are remembered as it may change while we're discarding, 201 * but we want the discard to execute in the context determined here. 202 */ 203 static struct btrfs_block_group *peek_discard_list( 204 struct btrfs_discard_ctl *discard_ctl, 205 enum btrfs_discard_state *discard_state, 206 int *discard_index, u64 now) 207 { 208 struct btrfs_block_group *block_group; 209 210 spin_lock(&discard_ctl->lock); 211 again: 212 block_group = find_next_block_group(discard_ctl, now); 213 214 if (block_group && now >= block_group->discard_eligible_time) { 215 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && 216 block_group->used != 0) { 217 if (btrfs_is_block_group_data_only(block_group)) 218 __add_to_discard_list(discard_ctl, block_group); 219 else 220 list_del_init(&block_group->discard_list); 221 goto again; 222 } 223 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) { 224 block_group->discard_cursor = block_group->start; 225 block_group->discard_state = BTRFS_DISCARD_EXTENTS; 226 } 227 discard_ctl->block_group = block_group; 228 } 229 if (block_group) { 230 *discard_state = block_group->discard_state; 231 *discard_index = block_group->discard_index; 232 } 233 spin_unlock(&discard_ctl->lock); 234 235 return block_group; 236 } 237 238 /* 239 * Update a block group's filters. 240 * 241 * @block_group: block group of interest 242 * @bytes: recently freed region size after coalescing 243 * 244 * Async discard maintains multiple lists with progressively smaller filters 245 * to prioritize discarding based on size. Should a free space that matches 246 * a larger filter be returned to the free_space_cache, prioritize that discard 247 * by moving @block_group to the proper filter. 248 */ 249 void btrfs_discard_check_filter(struct btrfs_block_group *block_group, 250 u64 bytes) 251 { 252 struct btrfs_discard_ctl *discard_ctl; 253 254 if (!block_group || 255 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) 256 return; 257 258 discard_ctl = &block_group->fs_info->discard_ctl; 259 260 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START && 261 bytes >= discard_minlen[block_group->discard_index - 1]) { 262 int i; 263 264 remove_from_discard_list(discard_ctl, block_group); 265 266 for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS; 267 i++) { 268 if (bytes >= discard_minlen[i]) { 269 block_group->discard_index = i; 270 add_to_discard_list(discard_ctl, block_group); 271 break; 272 } 273 } 274 } 275 } 276 277 /* 278 * Move a block group along the discard lists. 279 * 280 * @discard_ctl: discard control 281 * @block_group: block_group of interest 282 * 283 * Increment @block_group's discard_index. If it falls of the list, let it be. 284 * Otherwise add it back to the appropriate list. 285 */ 286 static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl, 287 struct btrfs_block_group *block_group) 288 { 289 block_group->discard_index++; 290 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) { 291 block_group->discard_index = 1; 292 return; 293 } 294 295 add_to_discard_list(discard_ctl, block_group); 296 } 297 298 /* 299 * Remove a block_group from the discard lists. 300 * 301 * @discard_ctl: discard control 302 * @block_group: block_group of interest 303 * 304 * Remove @block_group from the discard lists. If necessary, wait on the 305 * current work and then reschedule the delayed work. 306 */ 307 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, 308 struct btrfs_block_group *block_group) 309 { 310 if (remove_from_discard_list(discard_ctl, block_group)) { 311 cancel_delayed_work_sync(&discard_ctl->work); 312 btrfs_discard_schedule_work(discard_ctl, true); 313 } 314 } 315 316 /* 317 * Handles queuing the block_groups. 318 * 319 * @discard_ctl: discard control 320 * @block_group: block_group of interest 321 * 322 * Maintain the LRU order of the discard lists. 323 */ 324 void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl, 325 struct btrfs_block_group *block_group) 326 { 327 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) 328 return; 329 330 if (block_group->used == 0) 331 add_to_discard_unused_list(discard_ctl, block_group); 332 else 333 add_to_discard_list(discard_ctl, block_group); 334 335 if (!delayed_work_pending(&discard_ctl->work)) 336 btrfs_discard_schedule_work(discard_ctl, false); 337 } 338 339 static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, 340 u64 now, bool override) 341 { 342 struct btrfs_block_group *block_group; 343 344 if (!btrfs_run_discard_work(discard_ctl)) 345 return; 346 if (!override && delayed_work_pending(&discard_ctl->work)) 347 return; 348 349 block_group = find_next_block_group(discard_ctl, now); 350 if (block_group) { 351 u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC; 352 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit); 353 354 /* 355 * A single delayed workqueue item is responsible for 356 * discarding, so we can manage the bytes rate limit by keeping 357 * track of the previous discard. 358 */ 359 if (kbps_limit && discard_ctl->prev_discard) { 360 u64 bps_limit = ((u64)kbps_limit) * SZ_1K; 361 u64 bps_delay = div64_u64(discard_ctl->prev_discard * 362 NSEC_PER_SEC, bps_limit); 363 364 delay = max(delay, bps_delay); 365 } 366 367 /* 368 * This timeout is to hopefully prevent immediate discarding 369 * in a recently allocated block group. 370 */ 371 if (now < block_group->discard_eligible_time) { 372 u64 bg_timeout = block_group->discard_eligible_time - now; 373 374 delay = max(delay, bg_timeout); 375 } 376 377 if (override && discard_ctl->prev_discard) { 378 u64 elapsed = now - discard_ctl->prev_discard_time; 379 380 if (delay > elapsed) 381 delay -= elapsed; 382 else 383 delay = 0; 384 } 385 386 mod_delayed_work(discard_ctl->discard_workers, 387 &discard_ctl->work, nsecs_to_jiffies(delay)); 388 } 389 } 390 391 /* 392 * Responsible for scheduling the discard work. 393 * 394 * @discard_ctl: discard control 395 * @override: override the current timer 396 * 397 * Discards are issued by a delayed workqueue item. @override is used to 398 * update the current delay as the baseline delay interval is reevaluated on 399 * transaction commit. This is also maxed with any other rate limit. 400 */ 401 void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, 402 bool override) 403 { 404 const u64 now = ktime_get_ns(); 405 406 spin_lock(&discard_ctl->lock); 407 __btrfs_discard_schedule_work(discard_ctl, now, override); 408 spin_unlock(&discard_ctl->lock); 409 } 410 411 /* 412 * Determine next step of a block_group. 413 * 414 * @discard_ctl: discard control 415 * @block_group: block_group of interest 416 * 417 * Determine the next step for a block group after it's finished going through 418 * a pass on a discard list. If it is unused and fully trimmed, we can mark it 419 * unused and send it to the unused_bgs path. Otherwise, pass it onto the 420 * appropriate filter list or let it fall off. 421 */ 422 static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl, 423 struct btrfs_block_group *block_group) 424 { 425 remove_from_discard_list(discard_ctl, block_group); 426 427 if (block_group->used == 0) { 428 if (btrfs_is_free_space_trimmed(block_group)) 429 btrfs_mark_bg_unused(block_group); 430 else 431 add_to_discard_unused_list(discard_ctl, block_group); 432 } else { 433 btrfs_update_discard_index(discard_ctl, block_group); 434 } 435 } 436 437 /* 438 * Discard work queue callback 439 * 440 * @work: work 441 * 442 * Find the next block_group to start discarding and then discard a single 443 * region. It does this in a two-pass fashion: first extents and second 444 * bitmaps. Completely discarded block groups are sent to the unused_bgs path. 445 */ 446 static void btrfs_discard_workfn(struct work_struct *work) 447 { 448 struct btrfs_discard_ctl *discard_ctl; 449 struct btrfs_block_group *block_group; 450 enum btrfs_discard_state discard_state; 451 int discard_index = 0; 452 u64 trimmed = 0; 453 u64 minlen = 0; 454 u64 now = ktime_get_ns(); 455 456 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); 457 458 block_group = peek_discard_list(discard_ctl, &discard_state, 459 &discard_index, now); 460 if (!block_group || !btrfs_run_discard_work(discard_ctl)) 461 return; 462 if (now < block_group->discard_eligible_time) { 463 btrfs_discard_schedule_work(discard_ctl, false); 464 return; 465 } 466 467 /* Perform discarding */ 468 minlen = discard_minlen[discard_index]; 469 470 if (discard_state == BTRFS_DISCARD_BITMAPS) { 471 u64 maxlen = 0; 472 473 /* 474 * Use the previous levels minimum discard length as the max 475 * length filter. In the case something is added to make a 476 * region go beyond the max filter, the entire bitmap is set 477 * back to BTRFS_TRIM_STATE_UNTRIMMED. 478 */ 479 if (discard_index != BTRFS_DISCARD_INDEX_UNUSED) 480 maxlen = discard_minlen[discard_index - 1]; 481 482 btrfs_trim_block_group_bitmaps(block_group, &trimmed, 483 block_group->discard_cursor, 484 btrfs_block_group_end(block_group), 485 minlen, maxlen, true); 486 discard_ctl->discard_bitmap_bytes += trimmed; 487 } else { 488 btrfs_trim_block_group_extents(block_group, &trimmed, 489 block_group->discard_cursor, 490 btrfs_block_group_end(block_group), 491 minlen, true); 492 discard_ctl->discard_extent_bytes += trimmed; 493 } 494 495 /* Determine next steps for a block_group */ 496 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { 497 if (discard_state == BTRFS_DISCARD_BITMAPS) { 498 btrfs_finish_discard_pass(discard_ctl, block_group); 499 } else { 500 block_group->discard_cursor = block_group->start; 501 spin_lock(&discard_ctl->lock); 502 if (block_group->discard_state != 503 BTRFS_DISCARD_RESET_CURSOR) 504 block_group->discard_state = 505 BTRFS_DISCARD_BITMAPS; 506 spin_unlock(&discard_ctl->lock); 507 } 508 } 509 510 now = ktime_get_ns(); 511 spin_lock(&discard_ctl->lock); 512 discard_ctl->prev_discard = trimmed; 513 discard_ctl->prev_discard_time = now; 514 discard_ctl->block_group = NULL; 515 __btrfs_discard_schedule_work(discard_ctl, now, false); 516 spin_unlock(&discard_ctl->lock); 517 } 518 519 /* 520 * Determine if async discard should be running. 521 * 522 * @discard_ctl: discard control 523 * 524 * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set. 525 */ 526 bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) 527 { 528 struct btrfs_fs_info *fs_info = container_of(discard_ctl, 529 struct btrfs_fs_info, 530 discard_ctl); 531 532 return (!(fs_info->sb->s_flags & SB_RDONLY) && 533 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); 534 } 535 536 /* 537 * Recalculate the base delay. 538 * 539 * @discard_ctl: discard control 540 * 541 * Recalculate the base delay which is based off the total number of 542 * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms) 543 * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC). 544 */ 545 void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) 546 { 547 s32 discardable_extents; 548 s64 discardable_bytes; 549 u32 iops_limit; 550 unsigned long delay; 551 552 discardable_extents = atomic_read(&discard_ctl->discardable_extents); 553 if (!discardable_extents) 554 return; 555 556 spin_lock(&discard_ctl->lock); 557 558 /* 559 * The following is to fix a potential -1 discrepancy that we're not 560 * sure how to reproduce. But given that this is the only place that 561 * utilizes these numbers and this is only called by from 562 * btrfs_finish_extent_commit() which is synchronized, we can correct 563 * here. 564 */ 565 if (discardable_extents < 0) 566 atomic_add(-discardable_extents, 567 &discard_ctl->discardable_extents); 568 569 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes); 570 if (discardable_bytes < 0) 571 atomic64_add(-discardable_bytes, 572 &discard_ctl->discardable_bytes); 573 574 if (discardable_extents <= 0) { 575 spin_unlock(&discard_ctl->lock); 576 return; 577 } 578 579 iops_limit = READ_ONCE(discard_ctl->iops_limit); 580 if (iops_limit) 581 delay = MSEC_PER_SEC / iops_limit; 582 else 583 delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents; 584 585 delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC, 586 BTRFS_DISCARD_MAX_DELAY_MSEC); 587 discard_ctl->delay_ms = delay; 588 589 spin_unlock(&discard_ctl->lock); 590 } 591 592 /* 593 * Propagate discard counters. 594 * 595 * @block_group: block_group of interest 596 * 597 * Propagate deltas of counters up to the discard_ctl. It maintains a current 598 * counter and a previous counter passing the delta up to the global stat. 599 * Then the current counter value becomes the previous counter value. 600 */ 601 void btrfs_discard_update_discardable(struct btrfs_block_group *block_group) 602 { 603 struct btrfs_free_space_ctl *ctl; 604 struct btrfs_discard_ctl *discard_ctl; 605 s32 extents_delta; 606 s64 bytes_delta; 607 608 if (!block_group || 609 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) || 610 !btrfs_is_block_group_data_only(block_group)) 611 return; 612 613 ctl = block_group->free_space_ctl; 614 discard_ctl = &block_group->fs_info->discard_ctl; 615 616 lockdep_assert_held(&ctl->tree_lock); 617 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] - 618 ctl->discardable_extents[BTRFS_STAT_PREV]; 619 if (extents_delta) { 620 atomic_add(extents_delta, &discard_ctl->discardable_extents); 621 ctl->discardable_extents[BTRFS_STAT_PREV] = 622 ctl->discardable_extents[BTRFS_STAT_CURR]; 623 } 624 625 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] - 626 ctl->discardable_bytes[BTRFS_STAT_PREV]; 627 if (bytes_delta) { 628 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes); 629 ctl->discardable_bytes[BTRFS_STAT_PREV] = 630 ctl->discardable_bytes[BTRFS_STAT_CURR]; 631 } 632 } 633 634 /* 635 * Punt unused_bgs list to discard lists. 636 * 637 * @fs_info: fs_info of interest 638 * 639 * The unused_bgs list needs to be punted to the discard lists because the 640 * order of operations is changed. In the normal synchronous discard path, the 641 * block groups are trimmed via a single large trim in transaction commit. This 642 * is ultimately what we are trying to avoid with asynchronous discard. Thus, 643 * it must be done before going down the unused_bgs path. 644 */ 645 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info) 646 { 647 struct btrfs_block_group *block_group, *next; 648 649 spin_lock(&fs_info->unused_bgs_lock); 650 /* We enabled async discard, so punt all to the queue */ 651 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs, 652 bg_list) { 653 list_del_init(&block_group->bg_list); 654 btrfs_put_block_group(block_group); 655 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); 656 } 657 spin_unlock(&fs_info->unused_bgs_lock); 658 } 659 660 /* 661 * Purge discard lists. 662 * 663 * @discard_ctl: discard control 664 * 665 * If we are disabling async discard, we may have intercepted block groups that 666 * are completely free and ready for the unused_bgs path. As discarding will 667 * now happen in transaction commit or not at all, we can safely mark the 668 * corresponding block groups as unused and they will be sent on their merry 669 * way to the unused_bgs list. 670 */ 671 static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl) 672 { 673 struct btrfs_block_group *block_group, *next; 674 int i; 675 676 spin_lock(&discard_ctl->lock); 677 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) { 678 list_for_each_entry_safe(block_group, next, 679 &discard_ctl->discard_list[i], 680 discard_list) { 681 list_del_init(&block_group->discard_list); 682 spin_unlock(&discard_ctl->lock); 683 if (block_group->used == 0) 684 btrfs_mark_bg_unused(block_group); 685 spin_lock(&discard_ctl->lock); 686 } 687 } 688 spin_unlock(&discard_ctl->lock); 689 } 690 691 void btrfs_discard_resume(struct btrfs_fs_info *fs_info) 692 { 693 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 694 btrfs_discard_cleanup(fs_info); 695 return; 696 } 697 698 btrfs_discard_punt_unused_bgs_list(fs_info); 699 700 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); 701 } 702 703 void btrfs_discard_stop(struct btrfs_fs_info *fs_info) 704 { 705 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); 706 } 707 708 void btrfs_discard_init(struct btrfs_fs_info *fs_info) 709 { 710 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; 711 int i; 712 713 spin_lock_init(&discard_ctl->lock); 714 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn); 715 716 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) 717 INIT_LIST_HEAD(&discard_ctl->discard_list[i]); 718 719 discard_ctl->prev_discard = 0; 720 discard_ctl->prev_discard_time = 0; 721 atomic_set(&discard_ctl->discardable_extents, 0); 722 atomic64_set(&discard_ctl->discardable_bytes, 0); 723 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE; 724 discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC; 725 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; 726 discard_ctl->kbps_limit = 0; 727 discard_ctl->discard_extent_bytes = 0; 728 discard_ctl->discard_bitmap_bytes = 0; 729 atomic64_set(&discard_ctl->discard_bytes_saved, 0); 730 } 731 732 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info) 733 { 734 btrfs_discard_stop(fs_info); 735 cancel_delayed_work_sync(&fs_info->discard_ctl.work); 736 btrfs_discard_purge_list(&fs_info->discard_ctl); 737 } 738