Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0
7 #include "space-info.h"
10 #include "free-space-cache.h"
11 #include "ordered-data.h"
13 #include "block-group.h"
16 #include "extent-tree.h"
31 * reservations we care about total_bytes - SUM(space_info->bytes_) when
37 * code on the rules for each type, but generally block_rsv->reserved is how
38 * much space is accounted for in space_info->bytes_may_use.
52 * ->reserve
53 * space_info->bytes_may_use += num_bytes
55 * ->extent allocation
57 * space_info->bytes_may_use -= num_bytes
58 * space_info->bytes_reserved += extent_bytes
60 * ->insert reference
62 * space_info->bytes_reserved -= extent_bytes
63 * space_info->bytes_used += extent_bytes
65 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
70 * -> __reserve_bytes
71 * create a reserve_ticket with ->bytes set to our reservation, add it to
72 * the tail of space_info->tickets, kick async flush thread
74 * ->handle_reserve_ticket
75 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
78 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
81 * -> btrfs_try_granting_tickets()
83 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
84 * space_info->total_bytes. This loops through the ->priority_tickets and
85 * then the ->tickets list checking to see if the reservation can be
86 * completed. If it can the space is added to space_info->bytes_may_use and
89 * -> ticket wakeup
90 * Check if ->bytes == 0, if it does we got our reservation and we can carry
97 * space_info->priority_tickets, and we do not use ticket->wait, we simply
152 * out of a pre-tickets era where we could end up committing the transaction
179 return s_info->bytes_used + s_info->bytes_reserved + in btrfs_space_info_used()
180 s_info->bytes_pinned + s_info->bytes_readonly + in btrfs_space_info_used()
181 s_info->bytes_zone_unusable + in btrfs_space_info_used()
182 (may_use_included ? s_info->bytes_may_use : 0); in btrfs_space_info_used()
191 struct list_head *head = &info->space_info; in btrfs_clear_space_info_full()
195 found->full = 0; in btrfs_clear_space_info_full()
212 return fs_info->zone_size; in calc_chunk_size()
222 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) in calc_chunk_size()
234 WRITE_ONCE(space_info->chunk_size, chunk_size); in btrfs_update_space_info_chunk_size()
240 space_info->fs_info = info; in init_space_info()
242 INIT_LIST_HEAD(&space_info->block_groups[i]); in init_space_info()
243 init_rwsem(&space_info->groups_sem); in init_space_info()
244 spin_lock_init(&space_info->lock); in init_space_info()
245 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; in init_space_info()
246 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in init_space_info()
247 INIT_LIST_HEAD(&space_info->ro_bgs); in init_space_info()
248 INIT_LIST_HEAD(&space_info->tickets); in init_space_info()
249 INIT_LIST_HEAD(&space_info->priority_tickets); in init_space_info()
250 space_info->clamp = 1; in init_space_info()
252 space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY; in init_space_info()
255 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; in init_space_info()
261 struct btrfs_fs_info *fs_info = parent->fs_info; in create_space_info_sub_group()
265 ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY); in create_space_info_sub_group()
270 return -ENOMEM; in create_space_info_sub_group()
273 parent->sub_group[index] = sub_group; in create_space_info_sub_group()
274 sub_group->parent = parent; in create_space_info_sub_group()
275 sub_group->subgroup_id = id; in create_space_info_sub_group()
280 parent->sub_group[index] = NULL; in create_space_info_sub_group()
293 return -ENOMEM; in create_space_info()
315 list_add(&space_info->list, &info->space_info); in create_space_info()
317 info->data_sinfo = space_info; in create_space_info()
330 disk_super = fs_info->super_copy; in btrfs_init_space_info()
332 return -EINVAL; in btrfs_init_space_info()
362 struct btrfs_space_info *space_info = block_group->space_info; in btrfs_add_bg_to_space_info()
365 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_add_bg_to_space_info()
367 spin_lock(&space_info->lock); in btrfs_add_bg_to_space_info()
368 space_info->total_bytes += block_group->length; in btrfs_add_bg_to_space_info()
369 space_info->disk_total += block_group->length * factor; in btrfs_add_bg_to_space_info()
370 space_info->bytes_used += block_group->used; in btrfs_add_bg_to_space_info()
371 space_info->disk_used += block_group->used * factor; in btrfs_add_bg_to_space_info()
372 space_info->bytes_readonly += block_group->bytes_super; in btrfs_add_bg_to_space_info()
373 btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable); in btrfs_add_bg_to_space_info()
374 if (block_group->length > 0) in btrfs_add_bg_to_space_info()
375 space_info->full = 0; in btrfs_add_bg_to_space_info()
377 spin_unlock(&space_info->lock); in btrfs_add_bg_to_space_info()
379 block_group->space_info = space_info; in btrfs_add_bg_to_space_info()
381 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_add_bg_to_space_info()
382 down_write(&space_info->groups_sem); in btrfs_add_bg_to_space_info()
383 list_add_tail(&block_group->list, &space_info->block_groups[index]); in btrfs_add_bg_to_space_info()
384 up_write(&space_info->groups_sem); in btrfs_add_bg_to_space_info()
390 struct list_head *head = &info->space_info; in btrfs_find_space_info()
396 if (found->flags & flags) in btrfs_find_space_info()
408 * Calculate the data_chunk_size, space_info->chunk_size is the in calc_effective_data_chunk_size()
413 * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size) in calc_effective_data_chunk_size()
418 return data_sinfo->chunk_size; in calc_effective_data_chunk_size()
419 data_chunk_size = min(data_sinfo->chunk_size, in calc_effective_data_chunk_size()
420 mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); in calc_effective_data_chunk_size()
433 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in calc_available_free_space()
438 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
468 avail -= data_chunk_size; in calc_available_free_space()
482 * Returning non-zone size aligned bytes here will result in in calc_available_free_space()
484 * will over-commit too much leading to ENOSPC. Align down to the in calc_available_free_space()
488 avail = ALIGN_DOWN(avail, fs_info->zone_size); in calc_available_free_space()
501 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_can_overcommit()
507 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
515 if (!list_empty(&ticket->list)) { in remove_ticket()
516 list_del_init(&ticket->list); in remove_ticket()
517 ASSERT(space_info->reclaim_size >= ticket->bytes); in remove_ticket()
518 space_info->reclaim_size -= ticket->bytes; in remove_ticket()
523 * This is for space we already have accounted in space_info->bytes_may_use, so
532 lockdep_assert_held(&space_info->lock); in btrfs_try_granting_tickets()
534 head = &space_info->priority_tickets; in btrfs_try_granting_tickets()
543 if ((used + ticket->bytes <= space_info->total_bytes) || in btrfs_try_granting_tickets()
544 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, in btrfs_try_granting_tickets()
546 btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes); in btrfs_try_granting_tickets()
548 ticket->bytes = 0; in btrfs_try_granting_tickets()
549 space_info->tickets_id++; in btrfs_try_granting_tickets()
550 wake_up(&ticket->wait); in btrfs_try_granting_tickets()
556 if (head == &space_info->priority_tickets) { in btrfs_try_granting_tickets()
557 head = &space_info->tickets; in btrfs_try_granting_tickets()
565 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
566 spin_lock(&__rsv->lock); \
568 __rsv->size, __rsv->reserved); \
569 spin_unlock(&__rsv->lock); \
574 switch (space_info->flags) { in space_info_flag_to_str()
601 lockdep_assert_held(&info->lock); in __btrfs_dump_space_info()
605 "space_info %s (sub-group id %d) has %lld free, is %sfull", in __btrfs_dump_space_info()
606 flag_str, info->subgroup_id, in __btrfs_dump_space_info()
607 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), in __btrfs_dump_space_info()
608 info->full ? "" : "not "); in __btrfs_dump_space_info()
611 info->total_bytes, info->bytes_used, info->bytes_pinned, in __btrfs_dump_space_info()
612 info->bytes_reserved, info->bytes_may_use, in __btrfs_dump_space_info()
613 info->bytes_readonly, info->bytes_zone_unusable); in __btrfs_dump_space_info()
624 spin_lock(&info->lock); in btrfs_dump_space_info()
627 spin_unlock(&info->lock); in btrfs_dump_space_info()
632 down_read(&info->groups_sem); in btrfs_dump_space_info()
634 list_for_each_entry(cache, &info->block_groups[index], list) { in btrfs_dump_space_info()
637 spin_lock(&cache->lock); in btrfs_dump_space_info()
638 avail = cache->length - cache->used - cache->pinned - in btrfs_dump_space_info()
639 cache->reserved - cache->bytes_super - cache->zone_unusable; in btrfs_dump_space_info()
642 cache->start, cache->length, cache->used, cache->pinned, in btrfs_dump_space_info()
643 cache->reserved, cache->delalloc_bytes, in btrfs_dump_space_info()
644 cache->bytes_super, cache->zone_unusable, in btrfs_dump_space_info()
645 avail, cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
646 spin_unlock(&cache->lock); in btrfs_dump_space_info()
652 up_read(&info->groups_sem); in btrfs_dump_space_info()
685 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in shrink_delalloc()
686 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); in shrink_delalloc()
710 trans = current->journal_info; in shrink_delalloc()
749 async_pages = atomic_read(&fs_info->async_delalloc_pages); in shrink_delalloc()
760 async_pages -= nr_pages; in shrink_delalloc()
763 wait_event(fs_info->async_submit_wait, in shrink_delalloc()
764 atomic_read(&fs_info->async_delalloc_pages) <= in shrink_delalloc()
777 * If we are for preemption we just want a one-shot of delalloc in shrink_delalloc()
784 spin_lock(&space_info->lock); in shrink_delalloc()
785 if (list_empty(&space_info->tickets) && in shrink_delalloc()
786 list_empty(&space_info->priority_tickets)) { in shrink_delalloc()
787 spin_unlock(&space_info->lock); in shrink_delalloc()
790 spin_unlock(&space_info->lock); in shrink_delalloc()
793 &fs_info->delalloc_bytes); in shrink_delalloc()
795 &fs_info->ordered_bytes); in shrink_delalloc()
808 struct btrfs_root *root = fs_info->tree_root; in flush_space()
819 nr = -1; in flush_space()
824 if (ret == -ENOENT) in flush_space()
844 if (ret == -ENOENT) in flush_space()
862 btrfs_get_alloc_profile(fs_info, space_info->flags), in flush_space()
867 if (ret > 0 || ret == -ENOSPC) in flush_space()
880 ASSERT(current->journal_info == NULL); in flush_space()
894 ret = -ENOSPC; in flush_space()
898 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, in flush_space()
908 u64 to_reclaim = space_info->reclaim_size; in btrfs_calc_reclaim_metadata_size()
910 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_metadata_size()
918 * before, and now we're well over-committed based on our current free in btrfs_calc_reclaim_metadata_size()
922 if (space_info->total_bytes + avail < used) in btrfs_calc_reclaim_metadata_size()
923 to_reclaim += used - (space_info->total_bytes + avail); in btrfs_calc_reclaim_metadata_size()
931 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); in need_preemptive_reclaim()
936 thresh = mult_perc(space_info->total_bytes, 90); in need_preemptive_reclaim()
938 lockdep_assert_held(&space_info->lock); in need_preemptive_reclaim()
941 if ((space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
945 used = space_info->bytes_may_use + space_info->bytes_pinned; in need_preemptive_reclaim()
956 if (used - global_rsv_size <= SZ_128M) in need_preemptive_reclaim()
963 if (space_info->reclaim_size) in need_preemptive_reclaim()
980 * Our clamping range is 2^1 -> 2^8. Practically speaking that means in need_preemptive_reclaim()
997 used = space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
998 space_info->bytes_readonly + global_rsv_size; in need_preemptive_reclaim()
999 if (used < space_info->total_bytes) in need_preemptive_reclaim()
1000 thresh += space_info->total_bytes - used; in need_preemptive_reclaim()
1001 thresh >>= space_info->clamp; in need_preemptive_reclaim()
1003 used = space_info->bytes_pinned; in need_preemptive_reclaim()
1028 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; in need_preemptive_reclaim()
1029 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); in need_preemptive_reclaim()
1031 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + in need_preemptive_reclaim()
1032 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); in need_preemptive_reclaim()
1034 used += space_info->bytes_may_use - global_rsv_size; in need_preemptive_reclaim()
1037 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); in need_preemptive_reclaim()
1044 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in steal_from_global_rsv()
1047 if (!ticket->steal) in steal_from_global_rsv()
1050 if (global_rsv->space_info != space_info) in steal_from_global_rsv()
1053 spin_lock(&global_rsv->lock); in steal_from_global_rsv()
1054 min_bytes = mult_perc(global_rsv->size, 10); in steal_from_global_rsv()
1055 if (global_rsv->reserved < min_bytes + ticket->bytes) { in steal_from_global_rsv()
1056 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
1059 global_rsv->reserved -= ticket->bytes; in steal_from_global_rsv()
1061 ticket->bytes = 0; in steal_from_global_rsv()
1062 wake_up(&ticket->wait); in steal_from_global_rsv()
1063 space_info->tickets_id++; in steal_from_global_rsv()
1064 if (global_rsv->reserved < global_rsv->size) in steal_from_global_rsv()
1065 global_rsv->full = 0; in steal_from_global_rsv()
1066 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
1074 * @fs_info - fs_info for this fs
1075 * @space_info - the space info we were flushing
1091 u64 tickets_id = space_info->tickets_id; in maybe_fail_all_tickets()
1101 while (!list_empty(&space_info->tickets) && in maybe_fail_all_tickets()
1102 tickets_id == space_info->tickets_id) { in maybe_fail_all_tickets()
1103 ticket = list_first_entry(&space_info->tickets, in maybe_fail_all_tickets()
1111 ticket->bytes); in maybe_fail_all_tickets()
1115 ticket->error = -EIO; in maybe_fail_all_tickets()
1117 ticket->error = -ENOSPC; in maybe_fail_all_tickets()
1118 wake_up(&ticket->wait); in maybe_fail_all_tickets()
1129 return (tickets_id != space_info->tickets_id); in maybe_fail_all_tickets()
1134 struct btrfs_fs_info *fs_info = space_info->fs_info; in do_async_reclaim_metadata_space()
1146 spin_lock(&space_info->lock); in do_async_reclaim_metadata_space()
1149 space_info->flush = 0; in do_async_reclaim_metadata_space()
1150 spin_unlock(&space_info->lock); in do_async_reclaim_metadata_space()
1153 last_tickets_id = space_info->tickets_id; in do_async_reclaim_metadata_space()
1154 spin_unlock(&space_info->lock); in do_async_reclaim_metadata_space()
1159 spin_lock(&space_info->lock); in do_async_reclaim_metadata_space()
1160 if (list_empty(&space_info->tickets)) { in do_async_reclaim_metadata_space()
1161 space_info->flush = 0; in do_async_reclaim_metadata_space()
1162 spin_unlock(&space_info->lock); in do_async_reclaim_metadata_space()
1167 if (last_tickets_id == space_info->tickets_id) { in do_async_reclaim_metadata_space()
1170 last_tickets_id = space_info->tickets_id; in do_async_reclaim_metadata_space()
1173 commit_cycles--; in do_async_reclaim_metadata_space()
1202 commit_cycles--; in do_async_reclaim_metadata_space()
1204 space_info->flush = 0; in do_async_reclaim_metadata_space()
1210 spin_unlock(&space_info->lock); in do_async_reclaim_metadata_space()
1228 if (space_info->sub_group[i]) in btrfs_async_reclaim_metadata_space()
1229 do_async_reclaim_metadata_space(space_info->sub_group[i]); in btrfs_async_reclaim_metadata_space()
1234 * This handles pre-flushing of metadata space before we get to the point that
1254 delayed_block_rsv = &fs_info->delayed_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1255 delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_preempt_reclaim_metadata_space()
1256 global_rsv = &fs_info->global_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1257 trans_rsv = &fs_info->trans_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1259 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1279 if (block_rsv_size < space_info->bytes_may_use) in btrfs_preempt_reclaim_metadata_space()
1280 delalloc_size = space_info->bytes_may_use - block_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1287 block_rsv_size -= global_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1297 } else if (space_info->bytes_pinned > in btrfs_preempt_reclaim_metadata_space()
1300 to_reclaim = space_info->bytes_pinned; in btrfs_preempt_reclaim_metadata_space()
1311 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1323 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1327 if (loops == 1 && !space_info->reclaim_size) in btrfs_preempt_reclaim_metadata_space()
1328 space_info->clamp = max(1, space_info->clamp - 1); in btrfs_preempt_reclaim_metadata_space()
1330 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1342 * length to ->bytes_reserved, and subtracts the reserved space from
1343 * ->bytes_may_use.
1380 struct btrfs_fs_info *fs_info = space_info->fs_info; in do_async_reclaim_data_space()
1384 spin_lock(&space_info->lock); in do_async_reclaim_data_space()
1385 if (list_empty(&space_info->tickets)) { in do_async_reclaim_data_space()
1386 space_info->flush = 0; in do_async_reclaim_data_space()
1387 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1390 last_tickets_id = space_info->tickets_id; in do_async_reclaim_data_space()
1391 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1393 while (!space_info->full) { in do_async_reclaim_data_space()
1395 spin_lock(&space_info->lock); in do_async_reclaim_data_space()
1396 if (list_empty(&space_info->tickets)) { in do_async_reclaim_data_space()
1397 space_info->flush = 0; in do_async_reclaim_data_space()
1398 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1405 last_tickets_id = space_info->tickets_id; in do_async_reclaim_data_space()
1406 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1412 spin_lock(&space_info->lock); in do_async_reclaim_data_space()
1413 if (list_empty(&space_info->tickets)) { in do_async_reclaim_data_space()
1414 space_info->flush = 0; in do_async_reclaim_data_space()
1415 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1419 if (last_tickets_id == space_info->tickets_id) { in do_async_reclaim_data_space()
1422 last_tickets_id = space_info->tickets_id; in do_async_reclaim_data_space()
1427 if (space_info->full) { in do_async_reclaim_data_space()
1431 space_info->flush = 0; in do_async_reclaim_data_space()
1441 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1447 space_info->flush = 0; in do_async_reclaim_data_space()
1448 spin_unlock(&space_info->lock); in do_async_reclaim_data_space()
1457 space_info = fs_info->data_sinfo; in btrfs_async_reclaim_data_space()
1460 if (space_info->sub_group[i]) in btrfs_async_reclaim_data_space()
1461 do_async_reclaim_data_space(space_info->sub_group[i]); in btrfs_async_reclaim_data_space()
1466 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); in btrfs_init_async_reclaim_work()
1467 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); in btrfs_init_async_reclaim_work()
1468 INIT_WORK(&fs_info->preempt_reclaim_work, in btrfs_init_async_reclaim_work()
1501 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1506 * left non priority tickets on the list. We would then have in priority_reclaim_metadata_space()
1507 * to_reclaim but ->bytes == 0. in priority_reclaim_metadata_space()
1509 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1510 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1515 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1519 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1520 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1521 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1530 * success to the caller if we can steal from the global rsv - this is in priority_reclaim_metadata_space()
1532 * modify the fs, making it easier to debug -ENOSPC problems. in priority_reclaim_metadata_space()
1535 ticket->error = BTRFS_FS_ERROR(fs_info); in priority_reclaim_metadata_space()
1538 ticket->error = -ENOSPC; in priority_reclaim_metadata_space()
1548 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1555 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1558 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1559 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1563 while (!space_info->full) { in priority_reclaim_data_space()
1564 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1566 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1567 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1568 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1573 ticket->error = -ENOSPC; in priority_reclaim_data_space()
1576 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1586 spin_lock(&space_info->lock); in wait_reserve_ticket()
1587 while (ticket->bytes > 0 && ticket->error == 0) { in wait_reserve_ticket()
1588 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); in wait_reserve_ticket()
1599 ticket->error = -EINTR; in wait_reserve_ticket()
1602 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1606 finish_wait(&ticket->wait, &wait); in wait_reserve_ticket()
1607 spin_lock(&space_info->lock); in wait_reserve_ticket()
1609 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1657 ret = ticket->error; in handle_reserve_ticket()
1658 ASSERT(list_empty(&ticket->list)); in handle_reserve_ticket()
1665 ASSERT(!(ticket->bytes == 0 && ticket->error)); in handle_reserve_ticket()
1666 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, in handle_reserve_ticket()
1667 start_ns, flush, ticket->error); in handle_reserve_ticket()
1684 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); in maybe_clamp_preempt()
1685 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in maybe_clamp_preempt()
1696 space_info->clamp = min(space_info->clamp + 1, 8); in maybe_clamp_preempt()
1738 int ret = -ENOSPC; in __reserve_bytes()
1743 * If have a transaction handle (current->journal_info != NULL), then in __reserve_bytes()
1748 if (current->journal_info) { in __reserve_bytes()
1756 async_work = &fs_info->async_data_reclaim_work; in __reserve_bytes()
1758 async_work = &fs_info->async_reclaim_work; in __reserve_bytes()
1760 spin_lock(&space_info->lock); in __reserve_bytes()
1769 pending_tickets = !list_empty(&space_info->tickets) || in __reserve_bytes()
1770 !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1772 pending_tickets = !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1775 * Carry on if we have enough space (short-circuit) OR call in __reserve_bytes()
1779 ((used + orig_bytes <= space_info->total_bytes) || in __reserve_bytes()
1792 if (used + orig_bytes <= space_info->total_bytes) { in __reserve_bytes()
1808 space_info->reclaim_size += ticket.bytes; in __reserve_bytes()
1817 list_add_tail(&ticket.list, &space_info->tickets); in __reserve_bytes()
1818 if (!space_info->flush) { in __reserve_bytes()
1828 space_info->flush = 1; in __reserve_bytes()
1830 space_info->flags, in __reserve_bytes()
1837 &space_info->priority_tickets); in __reserve_bytes()
1839 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in __reserve_bytes()
1842 * which means we won't have fs_info->fs_root set, so don't do in __reserve_bytes()
1845 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && in __reserve_bytes()
1846 !work_busy(&fs_info->preempt_reclaim_work) && in __reserve_bytes()
1848 trace_btrfs_trigger_flush(fs_info, space_info->flags, in __reserve_bytes()
1851 &fs_info->preempt_reclaim_work); in __reserve_bytes()
1854 spin_unlock(&space_info->lock); in __reserve_bytes()
1885 if (ret == -ENOSPC) { in btrfs_reserve_metadata_bytes()
1887 space_info->flags, orig_bytes, 1); in btrfs_reserve_metadata_bytes()
1908 struct btrfs_fs_info *fs_info = space_info->fs_info; in btrfs_reserve_data_bytes()
1914 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); in btrfs_reserve_data_bytes()
1917 if (ret == -ENOSPC) { in btrfs_reserve_data_bytes()
1919 space_info->flags, bytes, 1); in btrfs_reserve_data_bytes()
1932 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_dump_space_info_for_trans_abort()
1933 spin_lock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1935 spin_unlock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1951 if (list_empty(&sinfo->ro_bgs)) in btrfs_account_ro_block_groups_free_space()
1954 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
1955 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { in btrfs_account_ro_block_groups_free_space()
1956 spin_lock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1958 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
1959 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1963 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_account_ro_block_groups_free_space()
1964 free_bytes += (block_group->length - in btrfs_account_ro_block_groups_free_space()
1965 block_group->used) * factor; in btrfs_account_ro_block_groups_free_space()
1967 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1969 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
2014 * value in doing trivial "relocations" of re-writing the same block group
2023 * - calculate a target unallocated amount of 5 block group sized chunks
2024 * - ratchet up the intensity of reclaim depending on how far we are from
2032 struct btrfs_fs_info *fs_info = space_info->fs_info; in calc_dynamic_reclaim_threshold()
2033 u64 unalloc = atomic64_read(&fs_info->free_chunk_space); in calc_dynamic_reclaim_threshold()
2035 u64 alloc = space_info->total_bytes; in calc_dynamic_reclaim_threshold()
2037 u64 unused = alloc - used; in calc_dynamic_reclaim_threshold()
2038 u64 want = target > unalloc ? target - unalloc : 0; in calc_dynamic_reclaim_threshold()
2051 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_threshold()
2053 if (READ_ONCE(space_info->dynamic_reclaim)) in btrfs_calc_reclaim_threshold()
2055 return READ_ONCE(space_info->bg_reclaim_threshold); in btrfs_calc_reclaim_threshold()
2059 * Under "urgent" reclaim, we will reclaim even fresh block groups that have
2065 struct btrfs_fs_info *fs_info = space_info->fs_info; in is_reclaim_urgent()
2066 u64 unalloc = atomic64_read(&fs_info->free_chunk_space); in is_reclaim_urgent()
2077 bool urgent; in do_reclaim_sweep() local
2079 spin_lock(&space_info->lock); in do_reclaim_sweep()
2080 urgent = is_reclaim_urgent(space_info); in do_reclaim_sweep()
2082 spin_unlock(&space_info->lock); in do_reclaim_sweep()
2084 down_read(&space_info->groups_sem); in do_reclaim_sweep()
2086 list_for_each_entry(bg, &space_info->block_groups[raid], list) { in do_reclaim_sweep()
2091 spin_lock(&bg->lock); in do_reclaim_sweep()
2092 thresh = mult_perc(bg->length, thresh_pct); in do_reclaim_sweep()
2093 if (bg->used < thresh && bg->reclaim_mark) { in do_reclaim_sweep()
2097 bg->reclaim_mark++; in do_reclaim_sweep()
2098 spin_unlock(&bg->lock); in do_reclaim_sweep()
2111 if (try_again && urgent) { in do_reclaim_sweep()
2116 up_read(&space_info->groups_sem); in do_reclaim_sweep()
2121 u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info); in btrfs_space_info_update_reclaimable()
2123 lockdep_assert_held(&space_info->lock); in btrfs_space_info_update_reclaimable()
2124 space_info->reclaimable_bytes += bytes; in btrfs_space_info_update_reclaimable()
2126 if (space_info->reclaimable_bytes >= chunk_sz) in btrfs_space_info_update_reclaimable()
2132 lockdep_assert_held(&space_info->lock); in btrfs_set_periodic_reclaim_ready()
2133 if (!READ_ONCE(space_info->periodic_reclaim)) in btrfs_set_periodic_reclaim_ready()
2135 if (ready != space_info->periodic_reclaim_ready) { in btrfs_set_periodic_reclaim_ready()
2136 space_info->periodic_reclaim_ready = ready; in btrfs_set_periodic_reclaim_ready()
2138 space_info->reclaimable_bytes = 0; in btrfs_set_periodic_reclaim_ready()
2146 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in btrfs_should_periodic_reclaim()
2148 if (!READ_ONCE(space_info->periodic_reclaim)) in btrfs_should_periodic_reclaim()
2151 spin_lock(&space_info->lock); in btrfs_should_periodic_reclaim()
2152 ret = space_info->periodic_reclaim_ready; in btrfs_should_periodic_reclaim()
2154 spin_unlock(&space_info->lock); in btrfs_should_periodic_reclaim()
2164 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_reclaim_sweep()
2174 struct btrfs_fs_info *fs_info = space_info->fs_info; in btrfs_return_free_space()
2175 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in btrfs_return_free_space()
2177 lockdep_assert_held(&space_info->lock); in btrfs_return_free_space()
2180 if (global_rsv->space_info != space_info) in btrfs_return_free_space()
2183 spin_lock(&global_rsv->lock); in btrfs_return_free_space()
2184 if (!global_rsv->full) { in btrfs_return_free_space()
2185 u64 to_add = min(len, global_rsv->size - global_rsv->reserved); in btrfs_return_free_space()
2187 global_rsv->reserved += to_add; in btrfs_return_free_space()
2189 if (global_rsv->reserved >= global_rsv->size) in btrfs_return_free_space()
2190 global_rsv->full = 1; in btrfs_return_free_space()
2191 len -= to_add; in btrfs_return_free_space()
2193 spin_unlock(&global_rsv->lock); in btrfs_return_free_space()