Lines Matching +full:1 +full:ac

102  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
135 * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders)
150 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
166 * in the data structure (1) above where largest_free_order = order of the
169 * lookup in O(1) time.
174 * size group lists (data structure 2) in O(1) time.
183 * suitable block group in O(1) time and results in faster allocation at the
280 * 1) if buddy is referenced, it's already initialized
426 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
428 static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
437 * 1. We sample the percpu discard_pa_seq counter before trying for block
539 if (order > e4b->bd_blkbits + 1) { in mb_find_buddy()
546 *max = 1 << (e4b->bd_blkbits + 3); in mb_find_buddy()
689 int order = e4b->bd_blkbits + 1; in __mb_check_buddy()
706 while (order > 1) { in __mb_check_buddy()
709 buddy2 = mb_find_buddy(e4b, order - 1, &max2); in __mb_check_buddy()
719 if (!mb_test_bit(i << 1, buddy2)) { in __mb_check_buddy()
721 mb_test_bit((i<<1)+1, buddy2)); in __mb_check_buddy()
726 /* both bits in buddy2 must be 1 */ in __mb_check_buddy()
727 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); in __mb_check_buddy()
728 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); in __mb_check_buddy()
730 for (j = 0; j < (1 << order); j++) { in __mb_check_buddy()
731 k = (i * (1 << order)) + j; in __mb_check_buddy()
741 fstart = -1; in __mb_check_buddy()
746 if (fstart == -1) { in __mb_check_buddy()
752 fstart = -1; in __mb_check_buddy()
754 for (j = 0; j < e4b->bd_blkbits + 1; j++) { in __mb_check_buddy()
806 max = ffs(first | border) - 1; in ext4_mb_mark_free_simple()
809 min = fls(len) - 1; in ext4_mb_mark_free_simple()
813 chunk = 1 << min; in ext4_mb_mark_free_simple()
831 * We don't bother with a special lists groups with only 1 block free in mb_avg_fragment_size_order()
840 order = MB_NUM_ORDERS(sb) - 1; in mb_avg_fragment_size_order()
855 new = grp->bb_fragments == 0 ? -1 : in mb_update_avg_fragment_size()
878 static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_xa_range() argument
882 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_groups_xa_range()
884 enum criteria cr = ac->ac_criteria; in ext4_mb_scan_groups_xa_range()
892 xa_for_each_range(xa, group, grp, start, end - 1) { in ext4_mb_scan_groups_xa_range()
898 err = ext4_mb_scan_group(ac, grp->bb_group); in ext4_mb_scan_groups_xa_range()
899 if (err || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups_xa_range()
912 ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_largest_free_order_range() argument
916 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order]; in ext4_mb_scan_groups_largest_free_order_range()
921 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); in ext4_mb_scan_groups_largest_free_order_range()
928 static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_p2_aligned() argument
931 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_scan_groups_p2_aligned()
937 end = ext4_get_groups_count(ac->ac_sb); in ext4_mb_scan_groups_p2_aligned()
939 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_scan_groups_p2_aligned()
940 ret = ext4_mb_scan_groups_largest_free_order_range(ac, i, in ext4_mb_scan_groups_p2_aligned()
942 if (ret || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups_p2_aligned()
952 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); in ext4_mb_scan_groups_p2_aligned()
955 ac->ac_criteria = CR_GOAL_LEN_FAST; in ext4_mb_scan_groups_p2_aligned()
963 ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_avg_frag_order_range() argument
967 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order]; in ext4_mb_scan_groups_avg_frag_order_range()
972 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); in ext4_mb_scan_groups_avg_frag_order_range()
979 static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_goal_fast() argument
982 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_scan_groups_goal_fast()
987 end = ext4_get_groups_count(ac->ac_sb); in ext4_mb_scan_groups_goal_fast()
989 i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); in ext4_mb_scan_groups_goal_fast()
990 for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_scan_groups_goal_fast()
991 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i, in ext4_mb_scan_groups_goal_fast()
993 if (ret || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups_goal_fast()
1003 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); in ext4_mb_scan_groups_goal_fast()
1012 if (ac->ac_flags & EXT4_MB_HINT_DATA) in ext4_mb_scan_groups_goal_fast()
1013 ac->ac_criteria = CR_BEST_AVAIL_LEN; in ext4_mb_scan_groups_goal_fast()
1015 ac->ac_criteria = CR_GOAL_LEN_SLOW; in ext4_mb_scan_groups_goal_fast()
1029 static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_best_avail() argument
1033 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_scan_groups_best_avail()
1040 * retrieving back the length using (1 << order) inaccurate. Hence, use in ext4_mb_scan_groups_best_avail()
1044 order = fls(ac->ac_g_ex.fe_len) - 1; in ext4_mb_scan_groups_best_avail()
1045 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) in ext4_mb_scan_groups_best_avail()
1046 order = MB_NUM_ORDERS(ac->ac_sb); in ext4_mb_scan_groups_best_avail()
1057 if (1 << min_order < num_stripe_clusters) in ext4_mb_scan_groups_best_avail()
1059 * We consider 1 order less because later we round in ext4_mb_scan_groups_best_avail()
1062 min_order = fls(num_stripe_clusters) - 1; in ext4_mb_scan_groups_best_avail()
1065 if (1 << min_order < ac->ac_o_ex.fe_len) in ext4_mb_scan_groups_best_avail()
1066 min_order = fls(ac->ac_o_ex.fe_len); in ext4_mb_scan_groups_best_avail()
1069 end = ext4_get_groups_count(ac->ac_sb); in ext4_mb_scan_groups_best_avail()
1078 ac->ac_g_ex.fe_len = 1 << i; in ext4_mb_scan_groups_best_avail()
1086 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, in ext4_mb_scan_groups_best_avail()
1090 frag_order = mb_avg_fragment_size_order(ac->ac_sb, in ext4_mb_scan_groups_best_avail()
1091 ac->ac_g_ex.fe_len); in ext4_mb_scan_groups_best_avail()
1093 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order, in ext4_mb_scan_groups_best_avail()
1095 if (ret || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups_best_avail()
1105 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_scan_groups_best_avail()
1107 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); in ext4_mb_scan_groups_best_avail()
1108 ac->ac_criteria = CR_GOAL_LEN_SLOW; in ext4_mb_scan_groups_best_avail()
1113 static inline int should_optimize_scan(struct ext4_allocation_context *ac) in should_optimize_scan() argument
1115 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) in should_optimize_scan()
1117 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) in should_optimize_scan()
1119 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) in should_optimize_scan()
1121 return 1; in should_optimize_scan()
1133 *group = *group + 1 >= ngroups ? 0 : *group + 1; in next_linear_group()
1136 static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac, in ext4_mb_scan_groups_linear() argument
1140 enum criteria cr = ac->ac_criteria; in ext4_mb_scan_groups_linear()
1141 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_groups_linear()
1146 ret = ext4_mb_scan_group(ac, group); in ext4_mb_scan_groups_linear()
1147 if (ret || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups_linear()
1154 ac->ac_criteria++; in ext4_mb_scan_groups_linear()
1163 static int ext4_mb_scan_groups(struct ext4_allocation_context *ac) in ext4_mb_scan_groups() argument
1167 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_scan_groups()
1168 ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb); in ext4_mb_scan_groups()
1171 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) in ext4_mb_scan_groups()
1175 start = ac->ac_g_ex.fe_group; in ext4_mb_scan_groups()
1176 ac->ac_prefetch_grp = start; in ext4_mb_scan_groups()
1177 ac->ac_prefetch_nr = 0; in ext4_mb_scan_groups()
1179 if (!should_optimize_scan(ac)) in ext4_mb_scan_groups()
1180 return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups); in ext4_mb_scan_groups()
1188 ret = ext4_mb_scan_groups_linear(ac, ngroups, &start, in ext4_mb_scan_groups()
1190 if (ret || ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_scan_groups()
1193 switch (ac->ac_criteria) { in ext4_mb_scan_groups()
1195 return ext4_mb_scan_groups_p2_aligned(ac, start); in ext4_mb_scan_groups()
1197 return ext4_mb_scan_groups_goal_fast(ac, start); in ext4_mb_scan_groups()
1199 return ext4_mb_scan_groups_best_avail(ac, start); in ext4_mb_scan_groups()
1206 WARN_ON(1); in ext4_mb_scan_groups()
1222 for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--) in mb_set_largest_free_order()
1276 if (len > 1) in ext4_mb_generate_buddy()
1311 int order = 1; in mb_regenerate_buddy()
1333 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1372 groups_per_page = blocks_per_page >> 1; in ext4_mb_init_cache()
1374 groups_per_page = 1; in ext4_mb_init_cache()
1377 if (groups_per_page > 1) { in ext4_mb_init_cache()
1428 group = (first_block + i) >> 1; in ext4_mb_init_cache()
1459 if ((first_block + i) & 1) { in ext4_mb_init_cache()
1551 /* blocks_per_page == 1, hence we need another page for the buddy */ in ext4_mb_get_buddy_page_lock()
1552 folio = __filemap_get_folio(inode->i_mapping, block + 1, in ext4_mb_get_buddy_page_lock()
1816 int order = 1, max; in mb_find_order_for_block()
1820 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); in mb_find_order_for_block()
1822 while (order <= e4b->bd_blkbits + 1) { in mb_find_order_for_block()
1852 * will return first found zero bit if any, -1 otherwise
1857 int zero_bit = -1; in mb_test_and_clear_bits()
1864 if (*addr != (__u32)(-1) && zero_bit == -1) in mb_test_and_clear_bits()
1870 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) in mb_test_and_clear_bits()
1901 return 1; in mb_buddy_adjust_border()
1906 return -1; in mb_buddy_adjust_border()
1913 int order = 1; in mb_buddy_mark_free()
1930 * | 1 | 1 | 1 | 1 | in mb_buddy_mark_free()
1932 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | in mb_buddy_mark_free()
1934 * 0 1 2 3 4 5 6 7 in mb_buddy_mark_free()
1937 * Neither [1] nor [6] is aligned to above layer. in mb_buddy_mark_free()
1948 if (first & 1) in mb_buddy_mark_free()
1949 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); in mb_buddy_mark_free()
1950 if (!(last & 1)) in mb_buddy_mark_free()
1951 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); in mb_buddy_mark_free()
1958 mb_clear_bits(buddy, first, last - first + 1); in mb_buddy_mark_free()
1959 e4b->bd_info->bb_counters[order - 1] += last - first + 1; in mb_buddy_mark_free()
1962 first >>= 1; in mb_buddy_mark_free()
1963 last >>= 1; in mb_buddy_mark_free()
1974 int last = first + count - 1; in mb_free_blocks()
1992 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); in mb_free_blocks()
1994 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) in mb_free_blocks()
1995 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); in mb_free_blocks()
1997 if (unlikely(block != -1)) { in mb_free_blocks()
2038 if (first & 1) { in mb_free_blocks()
2040 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; in mb_free_blocks()
2042 if (!(last & 1)) { in mb_free_blocks()
2044 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; in mb_free_blocks()
2048 mb_buddy_mark_free(e4b, first >> 1, last >> 1); in mb_free_blocks()
2078 ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); in mb_find_extent()
2087 if (block + 1 >= max) in mb_find_extent()
2090 next = (block + 1) * (1 << order); in mb_find_extent()
2097 ex->fe_len += 1 << order; in mb_find_extent()
2102 WARN_ON(1); in mb_find_extent()
2140 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); in mb_mark_used()
2152 if (((start >> ord) << ord) == start && len >= (1 << ord)) { in mb_mark_used()
2154 mlen = 1 << ord; in mb_mark_used()
2175 ord_end = ord_start + (1 << ord); in mb_mark_used()
2205 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, in ext4_mb_use_best_found() argument
2208 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found()
2211 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); in ext4_mb_use_best_found()
2212 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_use_best_found()
2214 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); in ext4_mb_use_best_found()
2215 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_use_best_found()
2216 ret = mb_mark_used(e4b, &ac->ac_b_ex); in ext4_mb_use_best_found()
2220 ac->ac_f_ex = ac->ac_b_ex; in ext4_mb_use_best_found()
2222 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_best_found()
2223 ac->ac_tail = ret & 0xffff; in ext4_mb_use_best_found()
2224 ac->ac_buddy = ret >> 16; in ext4_mb_use_best_found()
2233 ac->ac_bitmap_folio = e4b->bd_bitmap_folio; in ext4_mb_use_best_found()
2234 folio_get(ac->ac_bitmap_folio); in ext4_mb_use_best_found()
2235 ac->ac_buddy_folio = e4b->bd_buddy_folio; in ext4_mb_use_best_found()
2236 folio_get(ac->ac_buddy_folio); in ext4_mb_use_best_found()
2238 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_use_best_found()
2239 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; in ext4_mb_use_best_found()
2241 WRITE_ONCE(sbi->s_mb_last_groups[hash], ac->ac_f_ex.fe_group); in ext4_mb_use_best_found()
2249 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) in ext4_mb_use_best_found()
2250 ext4_mb_new_preallocation(ac); in ext4_mb_use_best_found()
2254 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, in ext4_mb_check_limits() argument
2258 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_check_limits()
2259 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_check_limits()
2260 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_check_limits()
2262 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_check_limits()
2267 if (ac->ac_found > sbi->s_mb_max_to_scan && in ext4_mb_check_limits()
2268 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_check_limits()
2269 ac->ac_status = AC_STATUS_BREAK; in ext4_mb_check_limits()
2279 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) in ext4_mb_check_limits()
2280 ext4_mb_use_best_found(ac, e4b); in ext4_mb_check_limits()
2307 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, in ext4_mb_measure_extent() argument
2311 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_measure_extent()
2312 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_measure_extent()
2315 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2316 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2317 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); in ext4_mb_measure_extent()
2319 ac->ac_found++; in ext4_mb_measure_extent()
2320 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_measure_extent()
2325 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_measure_extent()
2327 ext4_mb_use_best_found(ac, e4b); in ext4_mb_measure_extent()
2336 ext4_mb_use_best_found(ac, e4b); in ext4_mb_measure_extent()
2364 ext4_mb_check_limits(ac, e4b, 0); in ext4_mb_measure_extent()
2368 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, in ext4_mb_try_best_found() argument
2371 struct ext4_free_extent ex = ac->ac_b_ex; in ext4_mb_try_best_found()
2377 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_try_best_found()
2381 ext4_lock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2388 ac->ac_b_ex = ex; in ext4_mb_try_best_found()
2389 ext4_mb_use_best_found(ac, e4b); in ext4_mb_try_best_found()
2393 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2398 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, in ext4_mb_find_by_goal() argument
2401 ext4_group_t group = ac->ac_g_ex.fe_group; in ext4_mb_find_by_goal()
2404 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_by_goal()
2405 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_find_by_goal()
2410 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_find_by_goal()
2415 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_find_by_goal()
2419 ext4_lock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2423 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, in ext4_mb_find_by_goal()
2424 ac->ac_g_ex.fe_len, &ex); in ext4_mb_find_by_goal()
2427 if (max >= ac->ac_g_ex.fe_len && in ext4_mb_find_by_goal()
2428 ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) { in ext4_mb_find_by_goal()
2431 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); in ext4_mb_find_by_goal()
2434 ac->ac_found++; in ext4_mb_find_by_goal()
2435 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2436 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2438 } else if (max >= ac->ac_g_ex.fe_len) { in ext4_mb_find_by_goal()
2440 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2441 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2442 ac->ac_found++; in ext4_mb_find_by_goal()
2443 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2444 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2445 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { in ext4_mb_find_by_goal()
2449 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2450 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2451 ac->ac_found++; in ext4_mb_find_by_goal()
2452 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2453 ext4_mb_use_best_found(ac, e4b); in ext4_mb_find_by_goal()
2456 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2467 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, in ext4_mb_simple_scan_group() argument
2470 struct super_block *sb = ac->ac_sb; in ext4_mb_simple_scan_group()
2477 BUG_ON(ac->ac_2order <= 0); in ext4_mb_simple_scan_group()
2478 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { in ext4_mb_simple_scan_group()
2489 ext4_mark_group_bitmap_corrupted(ac->ac_sb, in ext4_mb_simple_scan_group()
2492 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, in ext4_mb_simple_scan_group()
2497 ac->ac_found++; in ext4_mb_simple_scan_group()
2498 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_simple_scan_group()
2500 ac->ac_b_ex.fe_len = 1 << i; in ext4_mb_simple_scan_group()
2501 ac->ac_b_ex.fe_start = k << i; in ext4_mb_simple_scan_group()
2502 ac->ac_b_ex.fe_group = e4b->bd_group; in ext4_mb_simple_scan_group()
2504 ext4_mb_use_best_found(ac, e4b); in ext4_mb_simple_scan_group()
2506 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); in ext4_mb_simple_scan_group()
2521 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, in ext4_mb_complex_scan_group() argument
2524 struct super_block *sb = ac->ac_sb; in ext4_mb_complex_scan_group()
2536 while (free && ac->ac_status == AC_STATUS_CONTINUE) { in ext4_mb_complex_scan_group()
2554 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { in ext4_mb_complex_scan_group()
2565 if (freelen < ac->ac_g_ex.fe_len) { in ext4_mb_complex_scan_group()
2572 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); in ext4_mb_complex_scan_group()
2590 ext4_mb_measure_extent(ac, &ex, e4b); in ext4_mb_complex_scan_group()
2596 ext4_mb_check_limits(ac, e4b, 1); in ext4_mb_complex_scan_group()
2604 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, in ext4_mb_scan_aligned() argument
2607 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_aligned()
2621 a = first_group_block + sbi->s_stripe - 1; in ext4_mb_scan_aligned()
2631 ac->ac_found++; in ext4_mb_scan_aligned()
2632 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_scan_aligned()
2634 ac->ac_b_ex = ex; in ext4_mb_scan_aligned()
2635 ext4_mb_use_best_found(ac, e4b); in ext4_mb_scan_aligned()
2643 static void __ext4_mb_scan_group(struct ext4_allocation_context *ac) in __ext4_mb_scan_group() argument
2647 enum criteria cr = ac->ac_criteria; in __ext4_mb_scan_group()
2649 ac->ac_groups_scanned++; in __ext4_mb_scan_group()
2651 return ext4_mb_simple_scan_group(ac, ac->ac_e4b); in __ext4_mb_scan_group()
2653 sbi = EXT4_SB(ac->ac_sb); in __ext4_mb_scan_group()
2656 !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe))) in __ext4_mb_scan_group()
2661 ext4_mb_scan_aligned(ac, ac->ac_e4b); in __ext4_mb_scan_group()
2663 if (ac->ac_status == AC_STATUS_CONTINUE) in __ext4_mb_scan_group()
2664 ext4_mb_complex_scan_group(ac, ac->ac_e4b); in __ext4_mb_scan_group()
2669 * Returns either 1 or 0 indicating that the group is either suitable
2672 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, in ext4_mb_good_group() argument
2676 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); in ext4_mb_good_group()
2677 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group()
2694 BUG_ON(ac->ac_2order == 0); in ext4_mb_good_group()
2697 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && in ext4_mb_good_group()
2702 if (free < ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2705 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) in ext4_mb_good_group()
2708 if (grp->bb_largest_free_order < ac->ac_2order) in ext4_mb_good_group()
2714 if ((free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2718 if (free >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2741 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, in ext4_mb_good_group_nolock() argument
2744 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group_nolock()
2745 struct super_block *sb = ac->ac_sb; in ext4_mb_good_group_nolock()
2747 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; in ext4_mb_good_group_nolock()
2754 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); in ext4_mb_good_group_nolock()
2767 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) in ext4_mb_good_group_nolock()
2793 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && in ext4_mb_good_group_nolock()
2806 ret = ext4_mb_good_group(ac, group, cr); in ext4_mb_good_group_nolock()
2862 static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac, in ext4_mb_might_prefetch() argument
2867 if (ac->ac_prefetch_grp != group) in ext4_mb_might_prefetch()
2870 sbi = EXT4_SB(ac->ac_sb); in ext4_mb_might_prefetch()
2871 if (ext4_mb_cr_expensive(ac->ac_criteria) || in ext4_mb_might_prefetch()
2872 ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) { in ext4_mb_might_prefetch()
2875 if (ext4_has_feature_flex_bg(ac->ac_sb)) { in ext4_mb_might_prefetch()
2876 nr = 1 << sbi->s_log_groups_per_flex; in ext4_mb_might_prefetch()
2877 nr -= group & (nr - 1); in ext4_mb_might_prefetch()
2881 ac->ac_prefetch_nr = nr; in ext4_mb_might_prefetch()
2882 ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr, in ext4_mb_might_prefetch()
2883 &ac->ac_prefetch_ios); in ext4_mb_might_prefetch()
2920 static int ext4_mb_scan_group(struct ext4_allocation_context *ac, in ext4_mb_scan_group() argument
2924 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_group()
2925 enum criteria cr = ac->ac_criteria; in ext4_mb_scan_group()
2927 ext4_mb_might_prefetch(ac, group); in ext4_mb_scan_group()
2934 ret = ext4_mb_good_group_nolock(ac, group, cr); in ext4_mb_scan_group()
2936 if (!ac->ac_first_err) in ext4_mb_scan_group()
2937 ac->ac_first_err = ret; in ext4_mb_scan_group()
2941 ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b); in ext4_mb_scan_group()
2952 if (unlikely(!ext4_mb_good_group(ac, group, cr))) in ext4_mb_scan_group()
2955 __ext4_mb_scan_group(ac); in ext4_mb_scan_group()
2960 ext4_mb_unload_buddy(ac->ac_e4b); in ext4_mb_scan_group()
2965 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) in ext4_mb_regular_allocator() argument
2969 struct super_block *sb = ac->ac_sb; in ext4_mb_regular_allocator()
2973 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_regular_allocator()
2976 err = ext4_mb_find_by_goal(ac, &e4b); in ext4_mb_regular_allocator()
2977 if (err || ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2980 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_regular_allocator()
2984 * ac->ac_2order is set only if the fe_len is a power of 2 in ext4_mb_regular_allocator()
2985 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED in ext4_mb_regular_allocator()
2988 i = fls(ac->ac_g_ex.fe_len); in ext4_mb_regular_allocator()
2989 ac->ac_2order = 0; in ext4_mb_regular_allocator()
2998 if (is_power_of_2(ac->ac_g_ex.fe_len)) in ext4_mb_regular_allocator()
2999 ac->ac_2order = array_index_nospec(i - 1, in ext4_mb_regular_allocator()
3004 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_regular_allocator()
3005 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; in ext4_mb_regular_allocator()
3007 ac->ac_g_ex.fe_group = READ_ONCE(sbi->s_mb_last_groups[hash]); in ext4_mb_regular_allocator()
3008 ac->ac_g_ex.fe_start = -1; in ext4_mb_regular_allocator()
3009 ac->ac_flags &= ~EXT4_MB_HINT_TRY_GOAL; in ext4_mb_regular_allocator()
3017 ac->ac_criteria = CR_GOAL_LEN_FAST; in ext4_mb_regular_allocator()
3018 if (ac->ac_2order) in ext4_mb_regular_allocator()
3019 ac->ac_criteria = CR_POWER2_ALIGNED; in ext4_mb_regular_allocator()
3021 ac->ac_e4b = &e4b; in ext4_mb_regular_allocator()
3022 ac->ac_prefetch_ios = 0; in ext4_mb_regular_allocator()
3023 ac->ac_first_err = 0; in ext4_mb_regular_allocator()
3025 while (ac->ac_criteria < EXT4_MB_NUM_CRS) { in ext4_mb_regular_allocator()
3026 err = ext4_mb_scan_groups(ac); in ext4_mb_regular_allocator()
3030 if (ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_regular_allocator()
3034 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && in ext4_mb_regular_allocator()
3035 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_regular_allocator()
3040 ext4_mb_try_best_found(ac, &e4b); in ext4_mb_regular_allocator()
3041 if (ac->ac_status != AC_STATUS_FOUND) { in ext4_mb_regular_allocator()
3051 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, in ext4_mb_regular_allocator()
3052 ac->ac_b_ex.fe_len, lost); in ext4_mb_regular_allocator()
3054 ac->ac_b_ex.fe_group = 0; in ext4_mb_regular_allocator()
3055 ac->ac_b_ex.fe_start = 0; in ext4_mb_regular_allocator()
3056 ac->ac_b_ex.fe_len = 0; in ext4_mb_regular_allocator()
3057 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_regular_allocator()
3058 ac->ac_flags |= EXT4_MB_HINT_FIRST; in ext4_mb_regular_allocator()
3059 ac->ac_criteria = CR_ANY_FREE; in ext4_mb_regular_allocator()
3064 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) { in ext4_mb_regular_allocator()
3065 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); in ext4_mb_regular_allocator()
3066 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC && in ext4_mb_regular_allocator()
3067 ac->ac_b_ex.fe_group == ac->ac_g_ex.fe_group) in ext4_mb_regular_allocator()
3071 if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err) in ext4_mb_regular_allocator()
3072 err = ac->ac_first_err; in ext4_mb_regular_allocator()
3075 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, in ext4_mb_regular_allocator()
3076 ac->ac_flags, ac->ac_criteria, err); in ext4_mb_regular_allocator()
3078 if (ac->ac_prefetch_nr) in ext4_mb_regular_allocator()
3079 ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr); in ext4_mb_regular_allocator()
3091 group = *pos + 1; in ext4_mb_seq_groups_start()
3103 group = *pos + 1; in ext4_mb_seq_groups_next()
3124 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " in ext4_mb_seq_groups_show()
3151 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? in ext4_mb_seq_groups_show()
3181 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); in ext4_seq_mb_stats_show()
3281 position = *pos + 1; in ext4_mb_seq_structs_summary_start()
3293 position = *pos + 1; in ext4_mb_seq_structs_summary_next()
3322 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); in ext4_mb_seq_structs_summary_show()
3364 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_alloc_groupinfo()
3421 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); in ext4_mb_add_groupinfo()
3447 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3448 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3513 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, in ext4_mb_init_backend()
3646 fd->efd_start_cluster + fd->efd_count - 1, 1); in ext4_discard_work()
3711 i = 1; in ext4_mb_init()
3713 offset_incr = 1 << (sb->s_blocksize_bits - 1); in ext4_mb_init()
3719 offset_incr = offset_incr >> 1; in ext4_mb_init()
3720 max = max >> 1; in ext4_mb_init()
3747 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); in ext4_mb_init()
3763 * is 1 megabyte, then group preallocation size becomes half a in ext4_mb_init()
3768 * size is 256k, and 32 megs when the cluster size is 1 meg, in ext4_mb_init()
3774 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc in ext4_mb_init()
3781 if (sbi->s_stripe > 1) { in ext4_mb_init()
3886 EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_release()
3982 mb_debug(sb, "freed %d blocks in 1 structures\n", count); in ext4_free_data_in_buddy()
3994 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; in ext4_process_freed_data()
4162 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4166 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, in ext4_mb_mark_diskspace_used() argument
4177 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_mark_diskspace_used()
4178 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_mark_diskspace_used()
4180 sb = ac->ac_sb; in ext4_mb_mark_diskspace_used()
4183 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); in ext4_mb_mark_diskspace_used()
4186 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4189 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_mark_diskspace_used()
4190 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4191 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { in ext4_mb_mark_diskspace_used()
4199 ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4200 ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4201 ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4211 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4212 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4219 BUG_ON(changed != ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4221 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4225 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) in ext4_mb_mark_diskspace_used()
4291 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) in ext4_mb_normalize_group_request() argument
4293 struct super_block *sb = ac->ac_sb; in ext4_mb_normalize_group_request()
4294 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_normalize_group_request()
4297 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; in ext4_mb_normalize_group_request()
4298 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); in ext4_mb_normalize_group_request()
4320 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, in ext4_mb_pa_assert_overlap() argument
4323 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_assert_overlap()
4324 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_assert_overlap()
4347 * Given an allocation context "ac" and a range "start", "end", check
4352 * ac allocation context
4357 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, in ext4_mb_pa_adjust_overlap() argument
4360 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_adjust_overlap()
4361 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_adjust_overlap()
4364 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; in ext4_mb_pa_adjust_overlap()
4365 loff_t new_end, tmp_pa_end, left_pa_end = -1; in ext4_mb_pa_adjust_overlap()
4377 /* Step 1: find any one immediate neighboring PA of the normalized range */ in ext4_mb_pa_adjust_overlap()
4379 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_pa_adjust_overlap()
4389 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || in ext4_mb_pa_adjust_overlap()
4390 ac->ac_o_ex.fe_logical < tmp_pa_start)); in ext4_mb_pa_adjust_overlap()
4399 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { in ext4_mb_pa_adjust_overlap()
4465 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4470 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4486 ext4_mb_pa_assert_overlap(ac, new_start, new_end); in ext4_mb_pa_adjust_overlap()
4497 ext4_mb_normalize_request(struct ext4_allocation_context *ac, in ext4_mb_normalize_request() argument
4500 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_normalize_request()
4509 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_normalize_request()
4513 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_normalize_request()
4518 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) in ext4_mb_normalize_request()
4521 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { in ext4_mb_normalize_request()
4522 ext4_mb_normalize_group_request(ac); in ext4_mb_normalize_request()
4526 bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_normalize_request()
4530 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_normalize_request()
4532 if (size < i_size_read(ac->ac_inode)) in ext4_mb_normalize_request()
4533 size = i_size_read(ac->ac_inode); in ext4_mb_normalize_request()
4560 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4564 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4567 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), in ext4_mb_normalize_request()
4569 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4573 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; in ext4_mb_normalize_request()
4575 ac->ac_o_ex.fe_len) << bsbits; in ext4_mb_normalize_request()
4586 start = max(start, rounddown(ac->ac_o_ex.fe_logical, in ext4_mb_normalize_request()
4587 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); in ext4_mb_normalize_request()
4595 size -= ar->lleft + 1 - start; in ext4_mb_normalize_request()
4596 start = ar->lleft + 1; in ext4_mb_normalize_request()
4598 if (ar->pright && start + size - 1 >= ar->lright) in ext4_mb_normalize_request()
4605 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) in ext4_mb_normalize_request()
4606 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); in ext4_mb_normalize_request()
4610 ext4_mb_pa_adjust_overlap(ac, &start, &end); in ext4_mb_normalize_request()
4618 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and in ext4_mb_normalize_request()
4629 if (start + size <= ac->ac_o_ex.fe_logical || in ext4_mb_normalize_request()
4630 start > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
4631 ext4_msg(ac->ac_sb, KERN_ERR, in ext4_mb_normalize_request()
4634 (unsigned long) ac->ac_o_ex.fe_logical); in ext4_mb_normalize_request()
4637 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); in ext4_mb_normalize_request()
4643 ac->ac_g_ex.fe_logical = start; in ext4_mb_normalize_request()
4644 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); in ext4_mb_normalize_request()
4645 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_normalize_request()
4652 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, in ext4_mb_normalize_request()
4653 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4654 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4655 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4657 if (ar->pleft && (ar->lleft + 1 == start) && in ext4_mb_normalize_request()
4658 ar->pleft + 1 < ext4_blocks_count(es)) { in ext4_mb_normalize_request()
4660 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, in ext4_mb_normalize_request()
4661 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4662 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4663 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4666 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, in ext4_mb_normalize_request()
4670 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) in ext4_mb_collect_stats() argument
4672 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_collect_stats()
4674 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { in ext4_mb_collect_stats()
4676 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); in ext4_mb_collect_stats()
4677 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) in ext4_mb_collect_stats()
4680 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); in ext4_mb_collect_stats()
4682 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); in ext4_mb_collect_stats()
4685 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); in ext4_mb_collect_stats()
4686 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && in ext4_mb_collect_stats()
4687 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) in ext4_mb_collect_stats()
4690 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) in ext4_mb_collect_stats()
4693 if (ac->ac_found > sbi->s_mb_max_to_scan) in ext4_mb_collect_stats()
4697 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) in ext4_mb_collect_stats()
4698 trace_ext4_mballoc_alloc(ac); in ext4_mb_collect_stats()
4700 trace_ext4_mballoc_prealloc(ac); in ext4_mb_collect_stats()
4707 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4709 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) in ext4_discard_allocated_blocks() argument
4711 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks()
4716 if (ac->ac_f_ex.fe_len == 0) in ext4_discard_allocated_blocks()
4718 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); in ext4_discard_allocated_blocks()
4727 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4728 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, in ext4_discard_allocated_blocks()
4729 ac->ac_f_ex.fe_len); in ext4_discard_allocated_blocks()
4730 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4736 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4744 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, in ext4_mb_use_inode_pa() argument
4747 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_inode_pa()
4753 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4755 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); in ext4_mb_use_inode_pa()
4757 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, in ext4_mb_use_inode_pa()
4758 &ac->ac_b_ex.fe_start); in ext4_mb_use_inode_pa()
4759 ac->ac_b_ex.fe_len = len; in ext4_mb_use_inode_pa()
4760 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_inode_pa()
4761 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4766 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_use_inode_pa()
4769 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4775 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, in ext4_mb_use_group_pa() argument
4778 unsigned int len = ac->ac_o_ex.fe_len; in ext4_mb_use_group_pa()
4780 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4781 &ac->ac_b_ex.fe_group, in ext4_mb_use_group_pa()
4782 &ac->ac_b_ex.fe_start); in ext4_mb_use_group_pa()
4783 ac->ac_b_ex.fe_len = len; in ext4_mb_use_group_pa()
4784 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_group_pa()
4785 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4793 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4830 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, in ext4_mb_pa_goal_check() argument
4833 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_goal_check()
4836 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_pa_goal_check()
4846 (ac->ac_g_ex.fe_logical - pa->pa_lstart); in ext4_mb_pa_goal_check()
4847 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) in ext4_mb_pa_goal_check()
4850 if (ac->ac_g_ex.fe_len > pa->pa_len - in ext4_mb_pa_goal_check()
4851 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) in ext4_mb_pa_goal_check()
4861 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) in ext4_mb_use_preallocated() argument
4863 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_preallocated()
4865 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_use_preallocated()
4872 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_use_preallocated()
4889 * Step 1: Find a pa with logical start immediately adjacent to the in ext4_mb_use_preallocated()
4895 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_use_preallocated()
4906 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_use_preallocated()
4923 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4955 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4956 BUG_ON(tmp_pa->pa_deleted == 1); in ext4_mb_use_preallocated()
4963 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { in ext4_mb_use_preallocated()
4969 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && in ext4_mb_use_preallocated()
4980 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { in ext4_mb_use_preallocated()
4982 ext4_mb_use_inode_pa(ac, tmp_pa); in ext4_mb_use_preallocated()
4992 * 1. When a new inode pa is added to rbtree it must have in ext4_mb_use_preallocated()
5021 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) in ext4_mb_use_preallocated()
5025 lg = ac->ac_lg; in ext4_mb_use_preallocated()
5028 order = fls(ac->ac_o_ex.fe_len) - 1; in ext4_mb_use_preallocated()
5029 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_use_preallocated()
5031 order = PREALLOC_TB_SIZE - 1; in ext4_mb_use_preallocated()
5033 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); in ext4_mb_use_preallocated()
5044 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
5054 ext4_mb_use_group_pa(ac, cpa); in ext4_mb_use_preallocated()
5116 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
5144 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, in ext4_mb_put_pa() argument
5149 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_put_pa()
5158 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
5236 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) in ext4_mb_new_inode_pa() argument
5238 struct super_block *sb = ac->ac_sb; in ext4_mb_new_inode_pa()
5245 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5246 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_inode_pa()
5247 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_inode_pa()
5248 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_inode_pa()
5250 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
5252 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { in ext4_mb_new_inode_pa()
5254 .fe_logical = ac->ac_g_ex.fe_logical, in ext4_mb_new_inode_pa()
5255 .fe_len = ac->ac_orig_goal_len, in ext4_mb_new_inode_pa()
5258 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_new_inode_pa()
5268 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); in ext4_mb_new_inode_pa()
5269 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); in ext4_mb_new_inode_pa()
5276 * 1. Check if best ex can be kept at end of goal (before in ext4_mb_new_inode_pa()
5282 ex.fe_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5285 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) in ext4_mb_new_inode_pa()
5288 ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_new_inode_pa()
5292 ex.fe_logical = ac->ac_o_ex.fe_logical; in ext4_mb_new_inode_pa()
5294 ac->ac_b_ex.fe_logical = ex.fe_logical; in ext4_mb_new_inode_pa()
5296 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); in ext4_mb_new_inode_pa()
5300 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
5301 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
5302 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5311 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5314 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
5316 ei = EXT4_I(ac->ac_inode); in ext4_mb_new_inode_pa()
5317 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_inode_pa()
5322 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
5336 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) in ext4_mb_new_group_pa() argument
5338 struct super_block *sb = ac->ac_sb; in ext4_mb_new_group_pa()
5344 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_group_pa()
5345 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_group_pa()
5346 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_group_pa()
5347 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_group_pa()
5349 pa = ac->ac_pa; in ext4_mb_new_group_pa()
5351 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
5353 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
5363 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
5365 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
5368 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_group_pa()
5371 lg = ac->ac_lg; in ext4_mb_new_group_pa()
5385 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) in ext4_mb_new_preallocation() argument
5387 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_new_preallocation()
5388 ext4_mb_new_group_pa(ac); in ext4_mb_new_preallocation()
5390 ext4_mb_new_inode_pa(ac); in ext4_mb_new_preallocation()
5435 bit = next + 1; in ext4_mb_release_inode_pa()
5479 * 1) ENOSPC
5481 * 1) how many requested
5525 *busy = 1; in ext4_mb_discard_group_preallocations()
5632 WARN_ON(1); in ext4_discard_preallocations()
5700 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) in ext4_mb_pa_alloc() argument
5708 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5709 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5713 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) in ext4_mb_pa_put_free() argument
5715 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_put_free()
5718 ac->ac_pa = NULL; in ext4_mb_pa_put_free()
5725 pa->pa_deleted = 1; in ext4_mb_pa_put_free()
5764 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) in ext4_mb_show_ac() argument
5766 struct super_block *sb = ac->ac_sb; in ext4_mb_show_ac()
5774 ac->ac_status, ac->ac_flags); in ext4_mb_show_ac()
5778 (unsigned long)ac->ac_o_ex.fe_group, in ext4_mb_show_ac()
5779 (unsigned long)ac->ac_o_ex.fe_start, in ext4_mb_show_ac()
5780 (unsigned long)ac->ac_o_ex.fe_len, in ext4_mb_show_ac()
5781 (unsigned long)ac->ac_o_ex.fe_logical, in ext4_mb_show_ac()
5782 (unsigned long)ac->ac_g_ex.fe_group, in ext4_mb_show_ac()
5783 (unsigned long)ac->ac_g_ex.fe_start, in ext4_mb_show_ac()
5784 (unsigned long)ac->ac_g_ex.fe_len, in ext4_mb_show_ac()
5785 (unsigned long)ac->ac_g_ex.fe_logical, in ext4_mb_show_ac()
5786 (unsigned long)ac->ac_b_ex.fe_group, in ext4_mb_show_ac()
5787 (unsigned long)ac->ac_b_ex.fe_start, in ext4_mb_show_ac()
5788 (unsigned long)ac->ac_b_ex.fe_len, in ext4_mb_show_ac()
5789 (unsigned long)ac->ac_b_ex.fe_logical, in ext4_mb_show_ac()
5790 (int)ac->ac_criteria); in ext4_mb_show_ac()
5791 mb_debug(sb, "%u found", ac->ac_found); in ext4_mb_show_ac()
5792 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa)); in ext4_mb_show_ac()
5793 if (ac->ac_pa) in ext4_mb_show_ac()
5794 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? in ext4_mb_show_ac()
5802 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) in ext4_mb_show_ac() argument
5804 ext4_mb_show_pa(ac->ac_sb); in ext4_mb_show_ac()
5815 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) in ext4_mb_group_or_file() argument
5817 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_group_or_file()
5818 int bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_group_or_file()
5822 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_group_or_file()
5825 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_group_or_file()
5830 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_group_or_file()
5831 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) in ext4_mb_group_or_file()
5836 !inode_is_open_for_write(ac->ac_inode)) in ext4_mb_group_or_file()
5846 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; in ext4_mb_group_or_file()
5848 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_group_or_file()
5852 BUG_ON(ac->ac_lg != NULL); in ext4_mb_group_or_file()
5858 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); in ext4_mb_group_or_file()
5861 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; in ext4_mb_group_or_file()
5864 mutex_lock(&ac->ac_lg->lg_mutex); in ext4_mb_group_or_file()
5868 ext4_mb_initialize_context(struct ext4_allocation_context *ac, in ext4_mb_initialize_context() argument
5894 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); in ext4_mb_initialize_context()
5895 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_initialize_context()
5896 ac->ac_sb = sb; in ext4_mb_initialize_context()
5897 ac->ac_inode = ar->inode; in ext4_mb_initialize_context()
5898 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; in ext4_mb_initialize_context()
5899 ac->ac_o_ex.fe_group = group; in ext4_mb_initialize_context()
5900 ac->ac_o_ex.fe_start = block; in ext4_mb_initialize_context()
5901 ac->ac_o_ex.fe_len = len; in ext4_mb_initialize_context()
5902 ac->ac_g_ex = ac->ac_o_ex; in ext4_mb_initialize_context()
5903 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_initialize_context()
5904 ac->ac_flags = ar->flags; in ext4_mb_initialize_context()
5908 ext4_mb_group_or_file(ac); in ext4_mb_initialize_context()
5910 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " in ext4_mb_initialize_context()
5913 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, in ext4_mb_initialize_context()
6003 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) in ext4_mb_add_n_trim() argument
6005 int order, added = 0, lg_prealloc_count = 1; in ext4_mb_add_n_trim()
6006 struct super_block *sb = ac->ac_sb; in ext4_mb_add_n_trim()
6007 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_add_n_trim()
6008 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim()
6010 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
6011 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_add_n_trim()
6013 order = PREALLOC_TB_SIZE - 1; in ext4_mb_add_n_trim()
6028 added = 1; in ext4_mb_add_n_trim()
6051 static void ext4_mb_release_context(struct ext4_allocation_context *ac) in ext4_mb_release_context() argument
6053 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_release_context()
6054 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context()
6059 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
6060 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
6061 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
6062 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
6075 ext4_mb_add_n_trim(ac); in ext4_mb_release_context()
6079 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
6081 if (ac->ac_bitmap_folio) in ext4_mb_release_context()
6082 folio_put(ac->ac_bitmap_folio); in ext4_mb_release_context()
6083 if (ac->ac_buddy_folio) in ext4_mb_release_context()
6084 folio_put(ac->ac_buddy_folio); in ext4_mb_release_context()
6085 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_release_context()
6086 mutex_unlock(&ac->ac_lg->lg_mutex); in ext4_mb_release_context()
6087 ext4_mb_collect_stats(ac); in ext4_mb_release_context()
6100 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; in ext4_mb_discard_preallocations()
6118 struct ext4_allocation_context *ac, u64 *seq) in ext4_mb_discard_preallocations_should_retry() argument
6124 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); in ext4_mb_discard_preallocations_should_retry()
6130 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { in ext4_mb_discard_preallocations_should_retry()
6131 ac->ac_flags |= EXT4_MB_STRICT_CHECK; in ext4_mb_discard_preallocations_should_retry()
6174 while (1) { in ext4_mb_new_blocks_simple()
6182 blkoff = i + 1; in ext4_mb_new_blocks_simple()
6202 ext4_mb_mark_bb(sb, block, 1, true); in ext4_mb_new_blocks_simple()
6203 ar->len = 1; in ext4_mb_new_blocks_simple()
6217 struct ext4_allocation_context *ac = NULL; in ext4_mb_new_blocks() local
6248 ar->len = ar->len >> 1; in ext4_mb_new_blocks()
6275 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); in ext4_mb_new_blocks()
6276 if (!ac) { in ext4_mb_new_blocks()
6282 ext4_mb_initialize_context(ac, ar); in ext4_mb_new_blocks()
6284 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; in ext4_mb_new_blocks()
6286 if (!ext4_mb_use_preallocated(ac)) { in ext4_mb_new_blocks()
6287 ac->ac_op = EXT4_MB_HISTORY_ALLOC; in ext4_mb_new_blocks()
6288 ext4_mb_normalize_request(ac, ar); in ext4_mb_new_blocks()
6290 *errp = ext4_mb_pa_alloc(ac); in ext4_mb_new_blocks()
6295 *errp = ext4_mb_regular_allocator(ac); in ext4_mb_new_blocks()
6299 * ac->ac_status == AC_STATUS_FOUND. in ext4_mb_new_blocks()
6300 * And error from above mean ac->ac_status != AC_STATUS_FOUND in ext4_mb_new_blocks()
6304 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6305 ext4_discard_allocated_blocks(ac); in ext4_mb_new_blocks()
6308 if (ac->ac_status == AC_STATUS_FOUND && in ext4_mb_new_blocks()
6309 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) in ext4_mb_new_blocks()
6310 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6312 if (likely(ac->ac_status == AC_STATUS_FOUND)) { in ext4_mb_new_blocks()
6313 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); in ext4_mb_new_blocks()
6315 ext4_discard_allocated_blocks(ac); in ext4_mb_new_blocks()
6318 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_blocks()
6319 ar->len = ac->ac_b_ex.fe_len; in ext4_mb_new_blocks()
6323 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) in ext4_mb_new_blocks()
6329 ext4_mb_pa_put_free(ac); in ext4_mb_new_blocks()
6335 ac->ac_b_ex.fe_len = 0; in ext4_mb_new_blocks()
6337 ext4_mb_show_ac(ac); in ext4_mb_new_blocks()
6339 ext4_mb_release_context(ac); in ext4_mb_new_blocks()
6340 kmem_cache_free(ext4_ac_cachep, ac); in ext4_mb_new_blocks()
6491 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); in ext4_mb_free_metadata()
6715 BUG_ON(count > 1); in ext4_free_blocks()
6791 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); in ext4_group_add_blocks()
6792 unsigned long cluster_count = last_cluster - first_cluster + 1; in ext4_group_add_blocks()
6795 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); in ext4_group_add_blocks()
6891 if (grp < (ext4_get_groups_count(sb) - 1)) in ext4_last_grp_cluster()
6898 return nr_clusters_in_group - 1; in ext4_last_grp_cluster()
6929 start = mb_find_next_zero_bit(bitmap, max + 1, start); in ext4_try_to_trim_range()
6933 next = mb_find_next_bit(bitmap, last + 1, start); in ext4_try_to_trim_range()
6945 start = next + 1; in ext4_try_to_trim_range()
7038 end = start + (range->len >> sb->s_blocksize_bits) - 1; in ext4_trim_fs()
7053 if (end >= max_blks - 1) in ext4_trim_fs()
7054 end = max_blks - 1; in ext4_trim_fs()
7067 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_trim_fs()
7084 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to in ext4_trim_fs()
7140 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_mballoc_query_range()
7152 start = mb_find_next_zero_bit(bitmap, end + 1, start); in ext4_mballoc_query_range()
7155 next = mb_find_next_bit(bitmap, end + 1, start); in ext4_mballoc_query_range()
7163 start = next + 1; in ext4_mballoc_query_range()