1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <linux/freezer.h> 20 #include <trace/events/ext4.h> 21 #include <kunit/static_stub.h> 22 23 /* 24 * MUSTDO: 25 * - test ext4_ext_search_left() and ext4_ext_search_right() 26 * - search for metadata in few groups 27 * 28 * TODO v4: 29 * - normalization should take into account whether file is still open 30 * - discard preallocations if no free space left (policy?) 31 * - don't normalize tails 32 * - quota 33 * - reservation for superuser 34 * 35 * TODO v3: 36 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 37 * - track min/max extents in each group for better group selection 38 * - mb_mark_used() may allocate chunk right after splitting buddy 39 * - tree of groups sorted by number of free blocks 40 * - error handling 41 */ 42 43 /* 44 * The allocation request involve request for multiple number of blocks 45 * near to the goal(block) value specified. 46 * 47 * During initialization phase of the allocator we decide to use the 48 * group preallocation or inode preallocation depending on the size of 49 * the file. The size of the file could be the resulting file size we 50 * would have after allocation, or the current file size, which ever 51 * is larger. If the size is less than sbi->s_mb_stream_request we 52 * select to use the group preallocation. The default value of 53 * s_mb_stream_request is 16 blocks. This can also be tuned via 54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 55 * terms of number of blocks. 56 * 57 * The main motivation for having small file use group preallocation is to 58 * ensure that we have small files closer together on the disk. 59 * 60 * First stage the allocator looks at the inode prealloc list, 61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 62 * spaces for this particular inode. The inode prealloc space is 63 * represented as: 64 * 65 * pa_lstart -> the logical start block for this prealloc space 66 * pa_pstart -> the physical start block for this prealloc space 67 * pa_len -> length for this prealloc space (in clusters) 68 * pa_free -> free space available in this prealloc space (in clusters) 69 * 70 * The inode preallocation space is used looking at the _logical_ start 71 * block. If only the logical file block falls within the range of prealloc 72 * space we will consume the particular prealloc space. This makes sure that 73 * we have contiguous physical blocks representing the file blocks 74 * 75 * The important thing to be noted in case of inode prealloc space is that 76 * we don't modify the values associated to inode prealloc space except 77 * pa_free. 78 * 79 * If we are not able to find blocks in the inode prealloc space and if we 80 * have the group allocation flag set then we look at the locality group 81 * prealloc space. These are per CPU prealloc list represented as 82 * 83 * ext4_sb_info.s_locality_groups[smp_processor_id()] 84 * 85 * The reason for having a per cpu locality group is to reduce the contention 86 * between CPUs. It is possible to get scheduled at this point. 87 * 88 * The locality group prealloc space is used looking at whether we have 89 * enough free space (pa_free) within the prealloc space. 90 * 91 * If we can't allocate blocks via inode prealloc or/and locality group 92 * prealloc then we look at the buddy cache. The buddy cache is represented 93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 94 * mapped to the buddy and bitmap information regarding different 95 * groups. The buddy information is attached to buddy cache inode so that 96 * we can access them through the page cache. The information regarding 97 * each group is loaded via ext4_mb_load_buddy. The information involve 98 * block bitmap and buddy information. The information are stored in the 99 * inode as: 100 * 101 * { folio } 102 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 103 * 104 * 105 * one block each for bitmap and buddy information. So for each group we 106 * take up 2 blocks. A folio can contain blocks_per_folio (folio_size / 107 * blocksize) blocks. So it can have information regarding groups_per_folio 108 * which is blocks_per_folio/2 109 * 110 * The buddy cache inode is not stored on disk. The inode is thrown 111 * away when the filesystem is unmounted. 112 * 113 * We look for count number of blocks in the buddy cache. If we were able 114 * to locate that many free blocks we return with additional information 115 * regarding rest of the contiguous physical block available 116 * 117 * Before allocating blocks via buddy cache we normalize the request 118 * blocks. This ensure we ask for more blocks that we needed. The extra 119 * blocks that we get after allocation is added to the respective prealloc 120 * list. In case of inode preallocation we follow a list of heuristics 121 * based on file size. This can be found in ext4_mb_normalize_request. If 122 * we are doing a group prealloc we try to normalize the request to 123 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 124 * dependent on the cluster size; for non-bigalloc file systems, it is 125 * 512 blocks. This can be tuned via 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 127 * terms of number of blocks. If we have mounted the file system with -O 128 * stripe=<value> option the group prealloc request is normalized to the 129 * smallest multiple of the stripe value (sbi->s_stripe) which is 130 * greater than the default mb_group_prealloc. 131 * 132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 133 * structures in two data structures: 134 * 135 * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders) 136 * 137 * Locking: Writers use xa_lock, readers use rcu_read_lock. 138 * 139 * This is an array of xarrays where the index in the array represents the 140 * largest free order in the buddy bitmap of the participating group infos of 141 * that xarray. So, there are exactly MB_NUM_ORDERS(sb) (which means total 142 * number of buddy bitmap orders possible) number of xarrays. Group-infos are 143 * placed in appropriate xarrays. 144 * 145 * 2) Average fragment size xarrays (sbi->s_mb_avg_fragment_size) 146 * 147 * Locking: Writers use xa_lock, readers use rcu_read_lock. 148 * 149 * This is an array of xarrays where in the i-th xarray there are groups with 150 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 151 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 152 * Note that we don't bother with a special xarray for completely empty 153 * groups so we only have MB_NUM_ORDERS(sb) xarrays. Group-infos are placed 154 * in appropriate xarrays. 155 * 156 * In xarray, the index is the block group number, the value is the block group 157 * information, and a non-empty value indicates the block group is present in 158 * the current xarray. 159 * 160 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 161 * structures to decide the order in which groups are to be traversed for 162 * fulfilling an allocation request. 163 * 164 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order 165 * >= the order of the request. We directly look at the largest free order list 166 * in the data structure (1) above where largest_free_order = order of the 167 * request. If that list is empty, we look at remaining list in the increasing 168 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED 169 * lookup in O(1) time. 170 * 171 * At CR_GOAL_LEN_FAST, we only consider groups where 172 * average fragment size > request size. So, we lookup a group which has average 173 * fragment size just above or equal to request size using our average fragment 174 * size group lists (data structure 2) in O(1) time. 175 * 176 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied 177 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in 178 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg 179 * fragment size > goal length. So before falling to the slower 180 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and 181 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big 182 * enough average fragment size. This increases the chances of finding a 183 * suitable block group in O(1) time and results in faster allocation at the 184 * cost of reduced size of allocation. 185 * 186 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 187 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and 188 * CR_GOAL_LEN_FAST phase. 189 * 190 * The regular allocator (using the buddy cache) supports a few tunables. 191 * 192 * /sys/fs/ext4/<partition>/mb_min_to_scan 193 * /sys/fs/ext4/<partition>/mb_max_to_scan 194 * /sys/fs/ext4/<partition>/mb_order2_req 195 * /sys/fs/ext4/<partition>/mb_max_linear_groups 196 * 197 * The regular allocator uses buddy scan only if the request len is power of 198 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 199 * value of s_mb_order2_reqs can be tuned via 200 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 201 * stripe size (sbi->s_stripe), we try to search for contiguous block in 202 * stripe size. This should result in better allocation on RAID setups. If 203 * not, we search in the specific group using bitmap for best extents. The 204 * tunable min_to_scan and max_to_scan control the behaviour here. 205 * min_to_scan indicate how long the mballoc __must__ look for a best 206 * extent and max_to_scan indicates how long the mballoc __can__ look for a 207 * best extent in the found extents. Searching for the blocks starts with 208 * the group specified as the goal value in allocation context via 209 * ac_g_ex. Each group is first checked based on the criteria whether it 210 * can be used for allocation. ext4_mb_good_group explains how the groups are 211 * checked. 212 * 213 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 214 * get traversed linearly. That may result in subsequent allocations being not 215 * close to each other. And so, the underlying device may get filled up in a 216 * non-linear fashion. While that may not matter on non-rotational devices, for 217 * rotational devices that may result in higher seek times. "mb_max_linear_groups" 218 * tells mballoc how many groups mballoc should search linearly before 219 * performing consulting above data structures for more efficient lookups. For 220 * non rotational devices, this value defaults to 0 and for rotational devices 221 * this is set to MB_DEFAULT_LINEAR_LIMIT. 222 * 223 * Both the prealloc space are getting populated as above. So for the first 224 * request we will hit the buddy cache which will result in this prealloc 225 * space getting filled. The prealloc space is then later used for the 226 * subsequent request. 227 */ 228 229 /* 230 * mballoc operates on the following data: 231 * - on-disk bitmap 232 * - in-core buddy (actually includes buddy and bitmap) 233 * - preallocation descriptors (PAs) 234 * 235 * there are two types of preallocations: 236 * - inode 237 * assiged to specific inode and can be used for this inode only. 238 * it describes part of inode's space preallocated to specific 239 * physical blocks. any block from that preallocated can be used 240 * independent. the descriptor just tracks number of blocks left 241 * unused. so, before taking some block from descriptor, one must 242 * make sure corresponded logical block isn't allocated yet. this 243 * also means that freeing any block within descriptor's range 244 * must discard all preallocated blocks. 245 * - locality group 246 * assigned to specific locality group which does not translate to 247 * permanent set of inodes: inode can join and leave group. space 248 * from this type of preallocation can be used for any inode. thus 249 * it's consumed from the beginning to the end. 250 * 251 * relation between them can be expressed as: 252 * in-core buddy = on-disk bitmap + preallocation descriptors 253 * 254 * this mean blocks mballoc considers used are: 255 * - allocated blocks (persistent) 256 * - preallocated blocks (non-persistent) 257 * 258 * consistency in mballoc world means that at any time a block is either 259 * free or used in ALL structures. notice: "any time" should not be read 260 * literally -- time is discrete and delimited by locks. 261 * 262 * to keep it simple, we don't use block numbers, instead we count number of 263 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 264 * 265 * all operations can be expressed as: 266 * - init buddy: buddy = on-disk + PAs 267 * - new PA: buddy += N; PA = N 268 * - use inode PA: on-disk += N; PA -= N 269 * - discard inode PA buddy -= on-disk - PA; PA = 0 270 * - use locality group PA on-disk += N; PA -= N 271 * - discard locality group PA buddy -= PA; PA = 0 272 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 273 * is used in real operation because we can't know actual used 274 * bits from PA, only from on-disk bitmap 275 * 276 * if we follow this strict logic, then all operations above should be atomic. 277 * given some of them can block, we'd have to use something like semaphores 278 * killing performance on high-end SMP hardware. let's try to relax it using 279 * the following knowledge: 280 * 1) if buddy is referenced, it's already initialized 281 * 2) while block is used in buddy and the buddy is referenced, 282 * nobody can re-allocate that block 283 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 284 * bit set and PA claims same block, it's OK. IOW, one can set bit in 285 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 286 * block 287 * 288 * so, now we're building a concurrency table: 289 * - init buddy vs. 290 * - new PA 291 * blocks for PA are allocated in the buddy, buddy must be referenced 292 * until PA is linked to allocation group to avoid concurrent buddy init 293 * - use inode PA 294 * we need to make sure that either on-disk bitmap or PA has uptodate data 295 * given (3) we care that PA-=N operation doesn't interfere with init 296 * - discard inode PA 297 * the simplest way would be to have buddy initialized by the discard 298 * - use locality group PA 299 * again PA-=N must be serialized with init 300 * - discard locality group PA 301 * the simplest way would be to have buddy initialized by the discard 302 * - new PA vs. 303 * - use inode PA 304 * i_data_sem serializes them 305 * - discard inode PA 306 * discard process must wait until PA isn't used by another process 307 * - use locality group PA 308 * some mutex should serialize them 309 * - discard locality group PA 310 * discard process must wait until PA isn't used by another process 311 * - use inode PA 312 * - use inode PA 313 * i_data_sem or another mutex should serializes them 314 * - discard inode PA 315 * discard process must wait until PA isn't used by another process 316 * - use locality group PA 317 * nothing wrong here -- they're different PAs covering different blocks 318 * - discard locality group PA 319 * discard process must wait until PA isn't used by another process 320 * 321 * now we're ready to make few consequences: 322 * - PA is referenced and while it is no discard is possible 323 * - PA is referenced until block isn't marked in on-disk bitmap 324 * - PA changes only after on-disk bitmap 325 * - discard must not compete with init. either init is done before 326 * any discard or they're serialized somehow 327 * - buddy init as sum of on-disk bitmap and PAs is done atomically 328 * 329 * a special case when we've used PA to emptiness. no need to modify buddy 330 * in this case, but we should care about concurrent init 331 * 332 */ 333 334 /* 335 * Logic in few words: 336 * 337 * - allocation: 338 * load group 339 * find blocks 340 * mark bits in on-disk bitmap 341 * release group 342 * 343 * - use preallocation: 344 * find proper PA (per-inode or group) 345 * load group 346 * mark bits in on-disk bitmap 347 * release group 348 * release PA 349 * 350 * - free: 351 * load group 352 * mark bits in on-disk bitmap 353 * release group 354 * 355 * - discard preallocations in group: 356 * mark PAs deleted 357 * move them onto local list 358 * load on-disk bitmap 359 * load group 360 * remove PA from object (inode or locality group) 361 * mark free blocks in-core 362 * 363 * - discard inode's preallocations: 364 */ 365 366 /* 367 * Locking rules 368 * 369 * Locks: 370 * - bitlock on a group (group) 371 * - object (inode/locality) (object) 372 * - per-pa lock (pa) 373 * - cr_power2_aligned lists lock (cr_power2_aligned) 374 * - cr_goal_len_fast lists lock (cr_goal_len_fast) 375 * 376 * Paths: 377 * - new pa 378 * object 379 * group 380 * 381 * - find and use pa: 382 * pa 383 * 384 * - release consumed pa: 385 * pa 386 * group 387 * object 388 * 389 * - generate in-core bitmap: 390 * group 391 * pa 392 * 393 * - discard all for given object (inode, locality group): 394 * object 395 * pa 396 * group 397 * 398 * - discard all for given group: 399 * group 400 * pa 401 * group 402 * object 403 * 404 * - allocation path (ext4_mb_regular_allocator) 405 * group 406 * cr_power2_aligned/cr_goal_len_fast 407 */ 408 static struct kmem_cache *ext4_pspace_cachep; 409 static struct kmem_cache *ext4_ac_cachep; 410 static struct kmem_cache *ext4_free_data_cachep; 411 412 /* We create slab caches for groupinfo data structures based on the 413 * superblock block size. There will be one per mounted filesystem for 414 * each unique s_blocksize_bits */ 415 #define NR_GRPINFO_CACHES 8 416 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 417 418 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 419 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 420 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 421 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 422 }; 423 424 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 425 ext4_group_t group); 426 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 427 428 static int ext4_mb_scan_group(struct ext4_allocation_context *ac, 429 ext4_group_t group); 430 431 static int ext4_try_to_trim_range(struct super_block *sb, 432 struct ext4_buddy *e4b, ext4_grpblk_t start, 433 ext4_grpblk_t max, ext4_grpblk_t minblocks); 434 435 /* 436 * The algorithm using this percpu seq counter goes below: 437 * 1. We sample the percpu discard_pa_seq counter before trying for block 438 * allocation in ext4_mb_new_blocks(). 439 * 2. We increment this percpu discard_pa_seq counter when we either allocate 440 * or free these blocks i.e. while marking those blocks as used/free in 441 * mb_mark_used()/mb_free_blocks(). 442 * 3. We also increment this percpu seq counter when we successfully identify 443 * that the bb_prealloc_list is not empty and hence proceed for discarding 444 * of those PAs inside ext4_mb_discard_group_preallocations(). 445 * 446 * Now to make sure that the regular fast path of block allocation is not 447 * affected, as a small optimization we only sample the percpu seq counter 448 * on that cpu. Only when the block allocation fails and when freed blocks 449 * found were 0, that is when we sample percpu seq counter for all cpus using 450 * below function ext4_get_discard_pa_seq_sum(). This happens after making 451 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 452 */ 453 static DEFINE_PER_CPU(u64, discard_pa_seq); 454 static inline u64 ext4_get_discard_pa_seq_sum(void) 455 { 456 int __cpu; 457 u64 __seq = 0; 458 459 for_each_possible_cpu(__cpu) 460 __seq += per_cpu(discard_pa_seq, __cpu); 461 return __seq; 462 } 463 464 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 465 { 466 #if BITS_PER_LONG == 64 467 *bit += ((unsigned long) addr & 7UL) << 3; 468 addr = (void *) ((unsigned long) addr & ~7UL); 469 #elif BITS_PER_LONG == 32 470 *bit += ((unsigned long) addr & 3UL) << 3; 471 addr = (void *) ((unsigned long) addr & ~3UL); 472 #else 473 #error "how many bits you are?!" 474 #endif 475 return addr; 476 } 477 478 static inline int mb_test_bit(int bit, void *addr) 479 { 480 /* 481 * ext4_test_bit on architecture like powerpc 482 * needs unsigned long aligned address 483 */ 484 addr = mb_correct_addr_and_bit(&bit, addr); 485 return ext4_test_bit(bit, addr); 486 } 487 488 static inline void mb_set_bit(int bit, void *addr) 489 { 490 addr = mb_correct_addr_and_bit(&bit, addr); 491 ext4_set_bit(bit, addr); 492 } 493 494 static inline void mb_clear_bit(int bit, void *addr) 495 { 496 addr = mb_correct_addr_and_bit(&bit, addr); 497 ext4_clear_bit(bit, addr); 498 } 499 500 static inline int mb_test_and_clear_bit(int bit, void *addr) 501 { 502 addr = mb_correct_addr_and_bit(&bit, addr); 503 return ext4_test_and_clear_bit(bit, addr); 504 } 505 506 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 507 { 508 int fix = 0, ret, tmpmax; 509 addr = mb_correct_addr_and_bit(&fix, addr); 510 tmpmax = max + fix; 511 start += fix; 512 513 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 514 if (ret > max) 515 return max; 516 return ret; 517 } 518 519 static inline int mb_find_next_bit(void *addr, int max, int start) 520 { 521 int fix = 0, ret, tmpmax; 522 addr = mb_correct_addr_and_bit(&fix, addr); 523 tmpmax = max + fix; 524 start += fix; 525 526 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 527 if (ret > max) 528 return max; 529 return ret; 530 } 531 532 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 533 { 534 char *bb; 535 536 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 537 BUG_ON(max == NULL); 538 539 if (order > e4b->bd_blkbits + 1) { 540 *max = 0; 541 return NULL; 542 } 543 544 /* at order 0 we see each particular block */ 545 if (order == 0) { 546 *max = 1 << (e4b->bd_blkbits + 3); 547 return e4b->bd_bitmap; 548 } 549 550 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 551 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 552 553 return bb; 554 } 555 556 #ifdef DOUBLE_CHECK 557 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 558 int first, int count) 559 { 560 int i; 561 struct super_block *sb = e4b->bd_sb; 562 563 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 564 return; 565 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 566 for (i = 0; i < count; i++) { 567 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 568 ext4_fsblk_t blocknr; 569 570 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 571 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 573 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 574 ext4_grp_locked_error(sb, e4b->bd_group, 575 inode ? inode->i_ino : 0, 576 blocknr, 577 "freeing block already freed " 578 "(bit %u)", 579 first + i); 580 } 581 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 582 } 583 } 584 585 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 586 { 587 int i; 588 589 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 590 return; 591 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 592 for (i = 0; i < count; i++) { 593 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 594 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 595 } 596 } 597 598 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 599 { 600 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 601 return; 602 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 603 unsigned char *b1, *b2; 604 int i; 605 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 606 b2 = (unsigned char *) bitmap; 607 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 608 if (b1[i] != b2[i]) { 609 ext4_msg(e4b->bd_sb, KERN_ERR, 610 "corruption in group %u " 611 "at byte %u(%u): %x in copy != %x " 612 "on disk/prealloc", 613 e4b->bd_group, i, i * 8, b1[i], b2[i]); 614 BUG(); 615 } 616 } 617 } 618 } 619 620 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 621 struct ext4_group_info *grp, ext4_group_t group) 622 { 623 struct buffer_head *bh; 624 625 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 626 if (!grp->bb_bitmap) 627 return; 628 629 bh = ext4_read_block_bitmap(sb, group); 630 if (IS_ERR_OR_NULL(bh)) { 631 kfree(grp->bb_bitmap); 632 grp->bb_bitmap = NULL; 633 return; 634 } 635 636 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 637 put_bh(bh); 638 } 639 640 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 641 { 642 kfree(grp->bb_bitmap); 643 } 644 645 #else 646 static inline void mb_free_blocks_double(struct inode *inode, 647 struct ext4_buddy *e4b, int first, int count) 648 { 649 return; 650 } 651 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 652 int first, int count) 653 { 654 return; 655 } 656 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 657 { 658 return; 659 } 660 661 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 662 struct ext4_group_info *grp, ext4_group_t group) 663 { 664 return; 665 } 666 667 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 668 { 669 return; 670 } 671 #endif 672 673 #ifdef AGGRESSIVE_CHECK 674 675 #define MB_CHECK_ASSERT(assert) \ 676 do { \ 677 if (!(assert)) { \ 678 printk(KERN_EMERG \ 679 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 680 function, file, line, # assert); \ 681 BUG(); \ 682 } \ 683 } while (0) 684 685 /* 686 * Perform buddy integrity check with the following steps: 687 * 688 * 1. Top-down validation (from highest order down to order 1, excluding order-0 bitmap): 689 * For each pair of adjacent orders, if a higher-order bit is set (indicating a free block), 690 * at most one of the two corresponding lower-order bits may be clear (free). 691 * 692 * 2. Order-0 (bitmap) validation, performed on bit pairs: 693 * - If either bit in a pair is set (1, allocated), then all corresponding higher-order bits 694 * must not be free (0). 695 * - If both bits in a pair are clear (0, free), then exactly one of the corresponding 696 * higher-order bits must be free (0). 697 * 698 * 3. Preallocation (pa) list validation: 699 * For each preallocated block (pa) in the group: 700 * - Verify that pa_pstart falls within the bounds of this block group. 701 * - Ensure the corresponding bit(s) in the order-0 bitmap are marked as allocated (1). 702 */ 703 static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, 704 const char *function, int line) 705 { 706 struct super_block *sb = e4b->bd_sb; 707 int order = e4b->bd_blkbits + 1; 708 int max; 709 int max2; 710 int i; 711 int j; 712 int k; 713 int count; 714 struct ext4_group_info *grp; 715 int fragments = 0; 716 int fstart; 717 struct list_head *cur; 718 void *buddy; 719 void *buddy2; 720 721 if (e4b->bd_info->bb_check_counter++ % 10) 722 return; 723 724 while (order > 1) { 725 buddy = mb_find_buddy(e4b, order, &max); 726 MB_CHECK_ASSERT(buddy); 727 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 728 MB_CHECK_ASSERT(buddy2); 729 MB_CHECK_ASSERT(buddy != buddy2); 730 MB_CHECK_ASSERT(max * 2 == max2); 731 732 count = 0; 733 for (i = 0; i < max; i++) { 734 735 if (mb_test_bit(i, buddy)) { 736 /* only single bit in buddy2 may be 0 */ 737 if (!mb_test_bit(i << 1, buddy2)) { 738 MB_CHECK_ASSERT( 739 mb_test_bit((i<<1)+1, buddy2)); 740 } 741 continue; 742 } 743 744 count++; 745 } 746 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 747 order--; 748 } 749 750 fstart = -1; 751 buddy = mb_find_buddy(e4b, 0, &max); 752 for (i = 0; i < max; i++) { 753 if (!mb_test_bit(i, buddy)) { 754 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 755 if (fstart == -1) { 756 fragments++; 757 fstart = i; 758 } 759 } else { 760 fstart = -1; 761 } 762 if (!(i & 1)) { 763 int in_use, zero_bit_count = 0; 764 765 in_use = mb_test_bit(i, buddy) || mb_test_bit(i + 1, buddy); 766 for (j = 1; j < e4b->bd_blkbits + 2; j++) { 767 buddy2 = mb_find_buddy(e4b, j, &max2); 768 k = i >> j; 769 MB_CHECK_ASSERT(k < max2); 770 if (!mb_test_bit(k, buddy2)) 771 zero_bit_count++; 772 } 773 MB_CHECK_ASSERT(zero_bit_count == !in_use); 774 } 775 } 776 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 777 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 778 779 grp = ext4_get_group_info(sb, e4b->bd_group); 780 if (!grp) 781 return; 782 list_for_each(cur, &grp->bb_prealloc_list) { 783 ext4_group_t groupnr; 784 struct ext4_prealloc_space *pa; 785 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 786 if (!pa->pa_len) 787 continue; 788 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 789 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 790 for (i = 0; i < pa->pa_len; i++) 791 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 792 } 793 } 794 #undef MB_CHECK_ASSERT 795 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 796 __FILE__, __func__, __LINE__) 797 #else 798 #define mb_check_buddy(e4b) 799 #endif 800 801 /* 802 * Divide blocks started from @first with length @len into 803 * smaller chunks with power of 2 blocks. 804 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 805 * then increase bb_counters[] for corresponded chunk size. 806 */ 807 static void ext4_mb_mark_free_simple(struct super_block *sb, 808 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 809 struct ext4_group_info *grp) 810 { 811 struct ext4_sb_info *sbi = EXT4_SB(sb); 812 ext4_grpblk_t min; 813 ext4_grpblk_t max; 814 ext4_grpblk_t chunk; 815 unsigned int border; 816 817 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 818 819 border = 2 << sb->s_blocksize_bits; 820 821 while (len > 0) { 822 /* find how many blocks can be covered since this position */ 823 max = ffs(first | border) - 1; 824 825 /* find how many blocks of power 2 we need to mark */ 826 min = fls(len) - 1; 827 828 if (max < min) 829 min = max; 830 chunk = 1 << min; 831 832 /* mark multiblock chunks only */ 833 grp->bb_counters[min]++; 834 if (min > 0) 835 mb_clear_bit(first >> min, 836 buddy + sbi->s_mb_offsets[min]); 837 838 len -= chunk; 839 first += chunk; 840 } 841 } 842 843 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 844 { 845 int order; 846 847 /* 848 * We don't bother with a special lists groups with only 1 block free 849 * extents and for completely empty groups. 850 */ 851 order = fls(len) - 2; 852 if (order < 0) 853 return 0; 854 if (order == MB_NUM_ORDERS(sb)) 855 order--; 856 if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb))) 857 order = MB_NUM_ORDERS(sb) - 1; 858 return order; 859 } 860 861 /* Move group to appropriate avg_fragment_size list */ 862 static void 863 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 864 { 865 struct ext4_sb_info *sbi = EXT4_SB(sb); 866 int new, old; 867 868 if (!test_opt2(sb, MB_OPTIMIZE_SCAN)) 869 return; 870 871 old = grp->bb_avg_fragment_size_order; 872 new = grp->bb_fragments == 0 ? -1 : 873 mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments); 874 if (new == old) 875 return; 876 877 if (old >= 0) 878 xa_erase(&sbi->s_mb_avg_fragment_size[old], grp->bb_group); 879 880 grp->bb_avg_fragment_size_order = new; 881 if (new >= 0) { 882 /* 883 * Cannot use __GFP_NOFAIL because we hold the group lock. 884 * Although allocation for insertion may fails, it's not fatal 885 * as we have linear traversal to fall back on. 886 */ 887 int err = xa_insert(&sbi->s_mb_avg_fragment_size[new], 888 grp->bb_group, grp, GFP_ATOMIC); 889 if (err) 890 mb_debug(sb, "insert group: %u to s_mb_avg_fragment_size[%d] failed, err %d", 891 grp->bb_group, new, err); 892 } 893 } 894 895 static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac, 896 struct xarray *xa, 897 ext4_group_t start, ext4_group_t end) 898 { 899 struct super_block *sb = ac->ac_sb; 900 struct ext4_sb_info *sbi = EXT4_SB(sb); 901 enum criteria cr = ac->ac_criteria; 902 ext4_group_t ngroups = ext4_get_groups_count(sb); 903 unsigned long group = start; 904 struct ext4_group_info *grp; 905 906 if (WARN_ON_ONCE(end > ngroups || start >= end)) 907 return 0; 908 909 xa_for_each_range(xa, group, grp, start, end - 1) { 910 int err; 911 912 if (sbi->s_mb_stats) 913 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 914 915 err = ext4_mb_scan_group(ac, grp->bb_group); 916 if (err || ac->ac_status != AC_STATUS_CONTINUE) 917 return err; 918 919 cond_resched(); 920 } 921 922 return 0; 923 } 924 925 /* 926 * Find a suitable group of given order from the largest free orders xarray. 927 */ 928 static inline int 929 ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac, 930 int order, ext4_group_t start, 931 ext4_group_t end) 932 { 933 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order]; 934 935 if (xa_empty(xa)) 936 return 0; 937 938 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); 939 } 940 941 /* 942 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 943 * cr level needs an update. 944 */ 945 static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac, 946 ext4_group_t group) 947 { 948 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 949 int i; 950 int ret = 0; 951 ext4_group_t start, end; 952 953 start = group; 954 end = ext4_get_groups_count(ac->ac_sb); 955 wrap_around: 956 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 957 ret = ext4_mb_scan_groups_largest_free_order_range(ac, i, 958 start, end); 959 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 960 return ret; 961 } 962 if (start) { 963 end = start; 964 start = 0; 965 goto wrap_around; 966 } 967 968 if (sbi->s_mb_stats) 969 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 970 971 /* Increment cr and search again if no group is found */ 972 ac->ac_criteria = CR_GOAL_LEN_FAST; 973 return ret; 974 } 975 976 /* 977 * Find a suitable group of given order from the average fragments xarray. 978 */ 979 static int 980 ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac, 981 int order, ext4_group_t start, 982 ext4_group_t end) 983 { 984 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order]; 985 986 if (xa_empty(xa)) 987 return 0; 988 989 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); 990 } 991 992 /* 993 * Choose next group by traversing average fragment size list of suitable 994 * order. Updates *new_cr if cr level needs an update. 995 */ 996 static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac, 997 ext4_group_t group) 998 { 999 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1000 int i, ret = 0; 1001 ext4_group_t start, end; 1002 1003 start = group; 1004 end = ext4_get_groups_count(ac->ac_sb); 1005 wrap_around: 1006 i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 1007 for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 1008 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i, 1009 start, end); 1010 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1011 return ret; 1012 } 1013 if (start) { 1014 end = start; 1015 start = 0; 1016 goto wrap_around; 1017 } 1018 1019 if (sbi->s_mb_stats) 1020 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1021 /* 1022 * CR_BEST_AVAIL_LEN works based on the concept that we have 1023 * a larger normalized goal len request which can be trimmed to 1024 * a smaller goal len such that it can still satisfy original 1025 * request len. However, allocation request for non-regular 1026 * files never gets normalized. 1027 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). 1028 */ 1029 if (ac->ac_flags & EXT4_MB_HINT_DATA) 1030 ac->ac_criteria = CR_BEST_AVAIL_LEN; 1031 else 1032 ac->ac_criteria = CR_GOAL_LEN_SLOW; 1033 1034 return ret; 1035 } 1036 1037 /* 1038 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment 1039 * order we have and proactively trim the goal request length to that order to 1040 * find a suitable group faster. 1041 * 1042 * This optimizes allocation speed at the cost of slightly reduced 1043 * preallocations. However, we make sure that we don't trim the request too 1044 * much and fall to CR_GOAL_LEN_SLOW in that case. 1045 */ 1046 static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac, 1047 ext4_group_t group) 1048 { 1049 int ret = 0; 1050 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1051 int i, order, min_order; 1052 unsigned long num_stripe_clusters = 0; 1053 ext4_group_t start, end; 1054 1055 /* 1056 * mb_avg_fragment_size_order() returns order in a way that makes 1057 * retrieving back the length using (1 << order) inaccurate. Hence, use 1058 * fls() instead since we need to know the actual length while modifying 1059 * goal length. 1060 */ 1061 order = fls(ac->ac_g_ex.fe_len) - 1; 1062 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) 1063 order = MB_NUM_ORDERS(ac->ac_sb); 1064 min_order = order - sbi->s_mb_best_avail_max_trim_order; 1065 if (min_order < 0) 1066 min_order = 0; 1067 1068 if (sbi->s_stripe > 0) { 1069 /* 1070 * We are assuming that stripe size is always a multiple of 1071 * cluster ratio otherwise __ext4_fill_super exists early. 1072 */ 1073 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); 1074 if (1 << min_order < num_stripe_clusters) 1075 /* 1076 * We consider 1 order less because later we round 1077 * up the goal len to num_stripe_clusters 1078 */ 1079 min_order = fls(num_stripe_clusters) - 1; 1080 } 1081 1082 if (1 << min_order < ac->ac_o_ex.fe_len) 1083 min_order = fls(ac->ac_o_ex.fe_len); 1084 1085 start = group; 1086 end = ext4_get_groups_count(ac->ac_sb); 1087 wrap_around: 1088 for (i = order; i >= min_order; i--) { 1089 int frag_order; 1090 /* 1091 * Scale down goal len to make sure we find something 1092 * in the free fragments list. Basically, reduce 1093 * preallocations. 1094 */ 1095 ac->ac_g_ex.fe_len = 1 << i; 1096 1097 if (num_stripe_clusters > 0) { 1098 /* 1099 * Try to round up the adjusted goal length to 1100 * stripe size (in cluster units) multiple for 1101 * efficiency. 1102 */ 1103 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, 1104 num_stripe_clusters); 1105 } 1106 1107 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1108 ac->ac_g_ex.fe_len); 1109 1110 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order, 1111 start, end); 1112 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1113 return ret; 1114 } 1115 if (start) { 1116 end = start; 1117 start = 0; 1118 goto wrap_around; 1119 } 1120 1121 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1122 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1123 if (sbi->s_mb_stats) 1124 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1125 ac->ac_criteria = CR_GOAL_LEN_SLOW; 1126 1127 return ret; 1128 } 1129 1130 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 1131 { 1132 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1133 return 0; 1134 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) 1135 return 0; 1136 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1137 return 0; 1138 return 1; 1139 } 1140 1141 /* 1142 * next linear group for allocation. 1143 */ 1144 static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups) 1145 { 1146 /* 1147 * Artificially restricted ngroups for non-extent 1148 * files makes group > ngroups possible on first loop. 1149 */ 1150 *group = *group + 1 >= ngroups ? 0 : *group + 1; 1151 } 1152 1153 static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac, 1154 ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count) 1155 { 1156 int ret, i; 1157 enum criteria cr = ac->ac_criteria; 1158 struct super_block *sb = ac->ac_sb; 1159 struct ext4_sb_info *sbi = EXT4_SB(sb); 1160 ext4_group_t group = *start; 1161 1162 for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) { 1163 ret = ext4_mb_scan_group(ac, group); 1164 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1165 return ret; 1166 cond_resched(); 1167 } 1168 1169 *start = group; 1170 if (count == ngroups) 1171 ac->ac_criteria++; 1172 1173 /* Processed all groups and haven't found blocks */ 1174 if (sbi->s_mb_stats && i == ngroups) 1175 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 1176 1177 return 0; 1178 } 1179 1180 static int ext4_mb_scan_groups(struct ext4_allocation_context *ac) 1181 { 1182 int ret = 0; 1183 ext4_group_t start; 1184 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1185 ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb); 1186 1187 /* non-extent files are limited to low blocks/groups */ 1188 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 1189 ngroups = sbi->s_blockfile_groups; 1190 1191 /* searching for the right group start from the goal value specified */ 1192 start = ac->ac_g_ex.fe_group; 1193 ac->ac_prefetch_grp = start; 1194 ac->ac_prefetch_nr = 0; 1195 1196 if (!should_optimize_scan(ac)) 1197 return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups); 1198 1199 /* 1200 * Optimized scanning can return non adjacent groups which can cause 1201 * seek overhead for rotational disks. So try few linear groups before 1202 * trying optimized scan. 1203 */ 1204 if (sbi->s_mb_max_linear_groups) 1205 ret = ext4_mb_scan_groups_linear(ac, ngroups, &start, 1206 sbi->s_mb_max_linear_groups); 1207 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1208 return ret; 1209 1210 switch (ac->ac_criteria) { 1211 case CR_POWER2_ALIGNED: 1212 return ext4_mb_scan_groups_p2_aligned(ac, start); 1213 case CR_GOAL_LEN_FAST: 1214 return ext4_mb_scan_groups_goal_fast(ac, start); 1215 case CR_BEST_AVAIL_LEN: 1216 return ext4_mb_scan_groups_best_avail(ac, start); 1217 default: 1218 /* 1219 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an 1220 * rb tree sorted by bb_free. But until that happens, we should 1221 * never come here. 1222 */ 1223 WARN_ON(1); 1224 } 1225 1226 return 0; 1227 } 1228 1229 /* 1230 * Cache the order of the largest free extent we have available in this block 1231 * group. 1232 */ 1233 static void 1234 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1235 { 1236 struct ext4_sb_info *sbi = EXT4_SB(sb); 1237 int new, old = grp->bb_largest_free_order; 1238 1239 for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--) 1240 if (grp->bb_counters[new] > 0) 1241 break; 1242 1243 /* No need to move between order lists? */ 1244 if (new == old) 1245 return; 1246 1247 if (old >= 0) { 1248 struct xarray *xa = &sbi->s_mb_largest_free_orders[old]; 1249 1250 if (!xa_empty(xa) && xa_load(xa, grp->bb_group)) 1251 xa_erase(xa, grp->bb_group); 1252 } 1253 1254 grp->bb_largest_free_order = new; 1255 if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) { 1256 /* 1257 * Cannot use __GFP_NOFAIL because we hold the group lock. 1258 * Although allocation for insertion may fails, it's not fatal 1259 * as we have linear traversal to fall back on. 1260 */ 1261 int err = xa_insert(&sbi->s_mb_largest_free_orders[new], 1262 grp->bb_group, grp, GFP_ATOMIC); 1263 if (err) 1264 mb_debug(sb, "insert group: %u to s_mb_largest_free_orders[%d] failed, err %d", 1265 grp->bb_group, new, err); 1266 } 1267 } 1268 1269 static noinline_for_stack 1270 void ext4_mb_generate_buddy(struct super_block *sb, 1271 void *buddy, void *bitmap, ext4_group_t group, 1272 struct ext4_group_info *grp) 1273 { 1274 struct ext4_sb_info *sbi = EXT4_SB(sb); 1275 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1276 ext4_grpblk_t i = 0; 1277 ext4_grpblk_t first; 1278 ext4_grpblk_t len; 1279 unsigned free = 0; 1280 unsigned fragments = 0; 1281 unsigned long long period = get_cycles(); 1282 1283 /* initialize buddy from bitmap which is aggregation 1284 * of on-disk bitmap and preallocations */ 1285 i = mb_find_next_zero_bit(bitmap, max, 0); 1286 grp->bb_first_free = i; 1287 while (i < max) { 1288 fragments++; 1289 first = i; 1290 i = mb_find_next_bit(bitmap, max, i); 1291 len = i - first; 1292 free += len; 1293 if (len > 1) 1294 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1295 else 1296 grp->bb_counters[0]++; 1297 if (i < max) 1298 i = mb_find_next_zero_bit(bitmap, max, i); 1299 } 1300 grp->bb_fragments = fragments; 1301 1302 if (free != grp->bb_free) { 1303 ext4_grp_locked_error(sb, group, 0, 0, 1304 "block bitmap and bg descriptor " 1305 "inconsistent: %u vs %u free clusters", 1306 free, grp->bb_free); 1307 /* 1308 * If we intend to continue, we consider group descriptor 1309 * corrupt and update bb_free using bitmap value 1310 */ 1311 grp->bb_free = free; 1312 ext4_mark_group_bitmap_corrupted(sb, group, 1313 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1314 } 1315 mb_set_largest_free_order(sb, grp); 1316 mb_update_avg_fragment_size(sb, grp); 1317 1318 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1319 1320 period = get_cycles() - period; 1321 atomic_inc(&sbi->s_mb_buddies_generated); 1322 atomic64_add(period, &sbi->s_mb_generation_time); 1323 } 1324 1325 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 1326 { 1327 int count; 1328 int order = 1; 1329 void *buddy; 1330 1331 while ((buddy = mb_find_buddy(e4b, order++, &count))) 1332 mb_set_bits(buddy, 0, count); 1333 1334 e4b->bd_info->bb_fragments = 0; 1335 memset(e4b->bd_info->bb_counters, 0, 1336 sizeof(*e4b->bd_info->bb_counters) * 1337 (e4b->bd_sb->s_blocksize_bits + 2)); 1338 1339 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 1340 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); 1341 } 1342 1343 /* The buddy information is attached the buddy cache inode 1344 * for convenience. The information regarding each group 1345 * is loaded via ext4_mb_load_buddy. The information involve 1346 * block bitmap and buddy information. The information are 1347 * stored in the inode as 1348 * 1349 * { folio } 1350 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1351 * 1352 * 1353 * one block each for bitmap and buddy information. 1354 * So for each group we take up 2 blocks. A folio can 1355 * contain blocks_per_folio (folio_size / blocksize) blocks. 1356 * So it can have information regarding groups_per_folio which 1357 * is blocks_per_folio/2 1358 * 1359 * Locking note: This routine takes the block group lock of all groups 1360 * for this folio; do not hold this lock when calling this routine! 1361 */ 1362 static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp) 1363 { 1364 ext4_group_t ngroups; 1365 unsigned int blocksize; 1366 int blocks_per_folio; 1367 int groups_per_folio; 1368 int err = 0; 1369 int i; 1370 ext4_group_t first_group, group; 1371 int first_block; 1372 struct super_block *sb; 1373 struct buffer_head *bhs; 1374 struct buffer_head **bh = NULL; 1375 struct inode *inode; 1376 char *data; 1377 char *bitmap; 1378 struct ext4_group_info *grinfo; 1379 1380 inode = folio->mapping->host; 1381 sb = inode->i_sb; 1382 ngroups = ext4_get_groups_count(sb); 1383 blocksize = i_blocksize(inode); 1384 blocks_per_folio = folio_size(folio) / blocksize; 1385 WARN_ON_ONCE(!blocks_per_folio); 1386 groups_per_folio = DIV_ROUND_UP(blocks_per_folio, 2); 1387 1388 mb_debug(sb, "init folio %lu\n", folio->index); 1389 1390 /* allocate buffer_heads to read bitmaps */ 1391 if (groups_per_folio > 1) { 1392 i = sizeof(struct buffer_head *) * groups_per_folio; 1393 bh = kzalloc(i, gfp); 1394 if (bh == NULL) 1395 return -ENOMEM; 1396 } else 1397 bh = &bhs; 1398 1399 /* read all groups the folio covers into the cache */ 1400 first_group = EXT4_PG_TO_LBLK(inode, folio->index) / 2; 1401 for (i = 0, group = first_group; i < groups_per_folio; i++, group++) { 1402 if (group >= ngroups) 1403 break; 1404 1405 grinfo = ext4_get_group_info(sb, group); 1406 if (!grinfo) 1407 continue; 1408 /* 1409 * If folio is uptodate then we came here after online resize 1410 * which added some new uninitialized group info structs, so 1411 * we must skip all initialized uptodate buddies on the folio, 1412 * which may be currently in use by an allocating task. 1413 */ 1414 if (folio_test_uptodate(folio) && 1415 !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1416 bh[i] = NULL; 1417 continue; 1418 } 1419 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1420 if (IS_ERR(bh[i])) { 1421 err = PTR_ERR(bh[i]); 1422 bh[i] = NULL; 1423 goto out; 1424 } 1425 mb_debug(sb, "read bitmap for group %u\n", group); 1426 } 1427 1428 /* wait for I/O completion */ 1429 for (i = 0, group = first_group; i < groups_per_folio; i++, group++) { 1430 int err2; 1431 1432 if (!bh[i]) 1433 continue; 1434 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1435 if (!err) 1436 err = err2; 1437 } 1438 1439 first_block = EXT4_PG_TO_LBLK(inode, folio->index); 1440 for (i = 0; i < blocks_per_folio; i++) { 1441 group = (first_block + i) >> 1; 1442 if (group >= ngroups) 1443 break; 1444 1445 if (!bh[group - first_group]) 1446 /* skip initialized uptodate buddy */ 1447 continue; 1448 1449 if (!buffer_verified(bh[group - first_group])) 1450 /* Skip faulty bitmaps */ 1451 continue; 1452 err = 0; 1453 1454 /* 1455 * data carry information regarding this 1456 * particular group in the format specified 1457 * above 1458 * 1459 */ 1460 data = folio_address(folio) + (i * blocksize); 1461 bitmap = bh[group - first_group]->b_data; 1462 1463 /* 1464 * We place the buddy block and bitmap block 1465 * close together 1466 */ 1467 grinfo = ext4_get_group_info(sb, group); 1468 if (!grinfo) { 1469 err = -EFSCORRUPTED; 1470 goto out; 1471 } 1472 if ((first_block + i) & 1) { 1473 /* this is block of buddy */ 1474 BUG_ON(incore == NULL); 1475 mb_debug(sb, "put buddy for group %u in folio %lu/%x\n", 1476 group, folio->index, i * blocksize); 1477 trace_ext4_mb_buddy_bitmap_load(sb, group); 1478 grinfo->bb_fragments = 0; 1479 memset(grinfo->bb_counters, 0, 1480 sizeof(*grinfo->bb_counters) * 1481 (MB_NUM_ORDERS(sb))); 1482 /* 1483 * incore got set to the group block bitmap below 1484 */ 1485 ext4_lock_group(sb, group); 1486 /* init the buddy */ 1487 memset(data, 0xff, blocksize); 1488 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1489 ext4_unlock_group(sb, group); 1490 incore = NULL; 1491 } else { 1492 /* this is block of bitmap */ 1493 BUG_ON(incore != NULL); 1494 mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n", 1495 group, folio->index, i * blocksize); 1496 trace_ext4_mb_bitmap_load(sb, group); 1497 1498 /* see comments in ext4_mb_put_pa() */ 1499 ext4_lock_group(sb, group); 1500 memcpy(data, bitmap, blocksize); 1501 1502 /* mark all preallocated blks used in in-core bitmap */ 1503 ext4_mb_generate_from_pa(sb, data, group); 1504 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); 1505 ext4_unlock_group(sb, group); 1506 1507 /* set incore so that the buddy information can be 1508 * generated using this 1509 */ 1510 incore = data; 1511 } 1512 } 1513 folio_mark_uptodate(folio); 1514 1515 out: 1516 if (bh) { 1517 for (i = 0; i < groups_per_folio; i++) 1518 brelse(bh[i]); 1519 if (bh != &bhs) 1520 kfree(bh); 1521 } 1522 return err; 1523 } 1524 1525 /* 1526 * Lock the buddy and bitmap folios. This makes sure other parallel init_group 1527 * on the same buddy folio doesn't happen while holding the buddy folio lock. 1528 * Return locked buddy and bitmap folios on e4b struct. If buddy and bitmap 1529 * are on the same folio e4b->bd_buddy_folio is NULL and return value is 0. 1530 */ 1531 static int ext4_mb_get_buddy_folio_lock(struct super_block *sb, 1532 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1533 { 1534 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1535 int block, pnum; 1536 struct folio *folio; 1537 1538 e4b->bd_buddy_folio = NULL; 1539 e4b->bd_bitmap_folio = NULL; 1540 1541 /* 1542 * the buddy cache inode stores the block bitmap 1543 * and buddy information in consecutive blocks. 1544 * So for each group we need two blocks. 1545 */ 1546 block = group * 2; 1547 pnum = EXT4_LBLK_TO_PG(inode, block); 1548 folio = __filemap_get_folio(inode->i_mapping, pnum, 1549 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1550 if (IS_ERR(folio)) 1551 return PTR_ERR(folio); 1552 BUG_ON(folio->mapping != inode->i_mapping); 1553 WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize); 1554 e4b->bd_bitmap_folio = folio; 1555 e4b->bd_bitmap = folio_address(folio) + 1556 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1557 1558 block++; 1559 pnum = EXT4_LBLK_TO_PG(inode, block); 1560 if (folio_contains(folio, pnum)) { 1561 /* buddy and bitmap are on the same folio */ 1562 return 0; 1563 } 1564 1565 /* we need another folio for the buddy */ 1566 folio = __filemap_get_folio(inode->i_mapping, pnum, 1567 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1568 if (IS_ERR(folio)) 1569 return PTR_ERR(folio); 1570 BUG_ON(folio->mapping != inode->i_mapping); 1571 WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize); 1572 e4b->bd_buddy_folio = folio; 1573 return 0; 1574 } 1575 1576 static void ext4_mb_put_buddy_folio_lock(struct ext4_buddy *e4b) 1577 { 1578 if (e4b->bd_bitmap_folio) { 1579 folio_unlock(e4b->bd_bitmap_folio); 1580 folio_put(e4b->bd_bitmap_folio); 1581 } 1582 if (e4b->bd_buddy_folio) { 1583 folio_unlock(e4b->bd_buddy_folio); 1584 folio_put(e4b->bd_buddy_folio); 1585 } 1586 } 1587 1588 /* 1589 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1590 * block group lock of all groups for this folio; do not hold the BG lock when 1591 * calling this routine! 1592 */ 1593 static noinline_for_stack 1594 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1595 { 1596 1597 struct ext4_group_info *this_grp; 1598 struct ext4_buddy e4b; 1599 struct folio *folio; 1600 int ret = 0; 1601 1602 might_sleep(); 1603 mb_debug(sb, "init group %u\n", group); 1604 this_grp = ext4_get_group_info(sb, group); 1605 if (!this_grp) 1606 return -EFSCORRUPTED; 1607 1608 /* 1609 * This ensures that we don't reinit the buddy cache 1610 * folio which map to the group from which we are already 1611 * allocating. If we are looking at the buddy cache we would 1612 * have taken a reference using ext4_mb_load_buddy and that 1613 * would have pinned buddy folio to page cache. 1614 * The call to ext4_mb_get_buddy_folio_lock will mark the 1615 * folio accessed. 1616 */ 1617 ret = ext4_mb_get_buddy_folio_lock(sb, group, &e4b, gfp); 1618 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1619 /* 1620 * somebody initialized the group 1621 * return without doing anything 1622 */ 1623 goto err; 1624 } 1625 1626 folio = e4b.bd_bitmap_folio; 1627 ret = ext4_mb_init_cache(folio, NULL, gfp); 1628 if (ret) 1629 goto err; 1630 if (!folio_test_uptodate(folio)) { 1631 ret = -EIO; 1632 goto err; 1633 } 1634 1635 if (e4b.bd_buddy_folio == NULL) { 1636 /* 1637 * If both the bitmap and buddy are in 1638 * the same folio we don't need to force 1639 * init the buddy 1640 */ 1641 ret = 0; 1642 goto err; 1643 } 1644 /* init buddy cache */ 1645 folio = e4b.bd_buddy_folio; 1646 ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp); 1647 if (ret) 1648 goto err; 1649 if (!folio_test_uptodate(folio)) { 1650 ret = -EIO; 1651 goto err; 1652 } 1653 err: 1654 ext4_mb_put_buddy_folio_lock(&e4b); 1655 return ret; 1656 } 1657 1658 /* 1659 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1660 * block group lock of all groups for this folio; do not hold the BG lock when 1661 * calling this routine! 1662 */ 1663 static noinline_for_stack int 1664 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1665 struct ext4_buddy *e4b, gfp_t gfp) 1666 { 1667 int block; 1668 int pnum; 1669 struct folio *folio; 1670 int ret; 1671 struct ext4_group_info *grp; 1672 struct ext4_sb_info *sbi = EXT4_SB(sb); 1673 struct inode *inode = sbi->s_buddy_cache; 1674 1675 might_sleep(); 1676 mb_debug(sb, "load group %u\n", group); 1677 1678 grp = ext4_get_group_info(sb, group); 1679 if (!grp) 1680 return -EFSCORRUPTED; 1681 1682 e4b->bd_blkbits = sb->s_blocksize_bits; 1683 e4b->bd_info = grp; 1684 e4b->bd_sb = sb; 1685 e4b->bd_group = group; 1686 e4b->bd_buddy_folio = NULL; 1687 e4b->bd_bitmap_folio = NULL; 1688 1689 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1690 /* 1691 * we need full data about the group 1692 * to make a good selection 1693 */ 1694 ret = ext4_mb_init_group(sb, group, gfp); 1695 if (ret) 1696 return ret; 1697 } 1698 1699 /* 1700 * the buddy cache inode stores the block bitmap 1701 * and buddy information in consecutive blocks. 1702 * So for each group we need two blocks. 1703 */ 1704 block = group * 2; 1705 pnum = EXT4_LBLK_TO_PG(inode, block); 1706 1707 /* Avoid locking the folio in the fast path ... */ 1708 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1709 if (IS_ERR(folio) || !folio_test_uptodate(folio)) { 1710 if (!IS_ERR(folio)) 1711 /* 1712 * drop the folio reference and try 1713 * to get the folio with lock. If we 1714 * are not uptodate that implies 1715 * somebody just created the folio but 1716 * is yet to initialize it. So 1717 * wait for it to initialize. 1718 */ 1719 folio_put(folio); 1720 folio = __filemap_get_folio(inode->i_mapping, pnum, 1721 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1722 if (!IS_ERR(folio)) { 1723 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1724 "ext4: bitmap's mapping != inode->i_mapping\n")) { 1725 /* should never happen */ 1726 folio_unlock(folio); 1727 ret = -EINVAL; 1728 goto err; 1729 } 1730 if (!folio_test_uptodate(folio)) { 1731 ret = ext4_mb_init_cache(folio, NULL, gfp); 1732 if (ret) { 1733 folio_unlock(folio); 1734 goto err; 1735 } 1736 mb_cmp_bitmaps(e4b, folio_address(folio) + 1737 offset_in_folio(folio, 1738 EXT4_LBLK_TO_B(inode, block))); 1739 } 1740 folio_unlock(folio); 1741 } 1742 } 1743 if (IS_ERR(folio)) { 1744 ret = PTR_ERR(folio); 1745 goto err; 1746 } 1747 if (!folio_test_uptodate(folio)) { 1748 ret = -EIO; 1749 goto err; 1750 } 1751 1752 /* Folios marked accessed already */ 1753 e4b->bd_bitmap_folio = folio; 1754 e4b->bd_bitmap = folio_address(folio) + 1755 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1756 1757 block++; 1758 pnum = EXT4_LBLK_TO_PG(inode, block); 1759 /* buddy and bitmap are on the same folio? */ 1760 if (folio_contains(folio, pnum)) { 1761 folio_get(folio); 1762 goto update_buddy; 1763 } 1764 1765 /* we need another folio for the buddy */ 1766 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1767 if (IS_ERR(folio) || !folio_test_uptodate(folio)) { 1768 if (!IS_ERR(folio)) 1769 folio_put(folio); 1770 folio = __filemap_get_folio(inode->i_mapping, pnum, 1771 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1772 if (!IS_ERR(folio)) { 1773 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1774 "ext4: buddy bitmap's mapping != inode->i_mapping\n")) { 1775 /* should never happen */ 1776 folio_unlock(folio); 1777 ret = -EINVAL; 1778 goto err; 1779 } 1780 if (!folio_test_uptodate(folio)) { 1781 ret = ext4_mb_init_cache(folio, e4b->bd_bitmap, 1782 gfp); 1783 if (ret) { 1784 folio_unlock(folio); 1785 goto err; 1786 } 1787 } 1788 folio_unlock(folio); 1789 } 1790 } 1791 if (IS_ERR(folio)) { 1792 ret = PTR_ERR(folio); 1793 goto err; 1794 } 1795 if (!folio_test_uptodate(folio)) { 1796 ret = -EIO; 1797 goto err; 1798 } 1799 1800 update_buddy: 1801 /* Folios marked accessed already */ 1802 e4b->bd_buddy_folio = folio; 1803 e4b->bd_buddy = folio_address(folio) + 1804 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1805 1806 return 0; 1807 1808 err: 1809 if (!IS_ERR_OR_NULL(folio)) 1810 folio_put(folio); 1811 if (e4b->bd_bitmap_folio) 1812 folio_put(e4b->bd_bitmap_folio); 1813 1814 e4b->bd_buddy = NULL; 1815 e4b->bd_bitmap = NULL; 1816 return ret; 1817 } 1818 1819 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1820 struct ext4_buddy *e4b) 1821 { 1822 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1823 } 1824 1825 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1826 { 1827 if (e4b->bd_bitmap_folio) 1828 folio_put(e4b->bd_bitmap_folio); 1829 if (e4b->bd_buddy_folio) 1830 folio_put(e4b->bd_buddy_folio); 1831 } 1832 1833 1834 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1835 { 1836 int order = 1, max; 1837 void *bb; 1838 1839 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1840 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1841 1842 while (order <= e4b->bd_blkbits + 1) { 1843 bb = mb_find_buddy(e4b, order, &max); 1844 if (!mb_test_bit(block >> order, bb)) { 1845 /* this block is part of buddy of order 'order' */ 1846 return order; 1847 } 1848 order++; 1849 } 1850 return 0; 1851 } 1852 1853 static void mb_clear_bits(void *bm, int cur, int len) 1854 { 1855 __u32 *addr; 1856 1857 len = cur + len; 1858 while (cur < len) { 1859 if ((cur & 31) == 0 && (len - cur) >= 32) { 1860 /* fast path: clear whole word at once */ 1861 addr = bm + (cur >> 3); 1862 *addr = 0; 1863 cur += 32; 1864 continue; 1865 } 1866 mb_clear_bit(cur, bm); 1867 cur++; 1868 } 1869 } 1870 1871 /* clear bits in given range 1872 * will return first found zero bit if any, -1 otherwise 1873 */ 1874 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1875 { 1876 __u32 *addr; 1877 int zero_bit = -1; 1878 1879 len = cur + len; 1880 while (cur < len) { 1881 if ((cur & 31) == 0 && (len - cur) >= 32) { 1882 /* fast path: clear whole word at once */ 1883 addr = bm + (cur >> 3); 1884 if (*addr != (__u32)(-1) && zero_bit == -1) 1885 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1886 *addr = 0; 1887 cur += 32; 1888 continue; 1889 } 1890 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1891 zero_bit = cur; 1892 cur++; 1893 } 1894 1895 return zero_bit; 1896 } 1897 1898 void mb_set_bits(void *bm, int cur, int len) 1899 { 1900 __u32 *addr; 1901 1902 len = cur + len; 1903 while (cur < len) { 1904 if ((cur & 31) == 0 && (len - cur) >= 32) { 1905 /* fast path: set whole word at once */ 1906 addr = bm + (cur >> 3); 1907 *addr = 0xffffffff; 1908 cur += 32; 1909 continue; 1910 } 1911 mb_set_bit(cur, bm); 1912 cur++; 1913 } 1914 } 1915 1916 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1917 { 1918 if (mb_test_bit(*bit + side, bitmap)) { 1919 mb_clear_bit(*bit, bitmap); 1920 (*bit) -= side; 1921 return 1; 1922 } 1923 else { 1924 (*bit) += side; 1925 mb_set_bit(*bit, bitmap); 1926 return -1; 1927 } 1928 } 1929 1930 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1931 { 1932 int max; 1933 int order = 1; 1934 void *buddy = mb_find_buddy(e4b, order, &max); 1935 1936 while (buddy) { 1937 void *buddy2; 1938 1939 /* Bits in range [first; last] are known to be set since 1940 * corresponding blocks were allocated. Bits in range 1941 * (first; last) will stay set because they form buddies on 1942 * upper layer. We just deal with borders if they don't 1943 * align with upper layer and then go up. 1944 * Releasing entire group is all about clearing 1945 * single bit of highest order buddy. 1946 */ 1947 1948 /* Example: 1949 * --------------------------------- 1950 * | 1 | 1 | 1 | 1 | 1951 * --------------------------------- 1952 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1953 * --------------------------------- 1954 * 0 1 2 3 4 5 6 7 1955 * \_____________________/ 1956 * 1957 * Neither [1] nor [6] is aligned to above layer. 1958 * Left neighbour [0] is free, so mark it busy, 1959 * decrease bb_counters and extend range to 1960 * [0; 6] 1961 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1962 * mark [6] free, increase bb_counters and shrink range to 1963 * [0; 5]. 1964 * Then shift range to [0; 2], go up and do the same. 1965 */ 1966 1967 1968 if (first & 1) 1969 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1970 if (!(last & 1)) 1971 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1972 if (first > last) 1973 break; 1974 order++; 1975 1976 buddy2 = mb_find_buddy(e4b, order, &max); 1977 if (!buddy2) { 1978 mb_clear_bits(buddy, first, last - first + 1); 1979 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1980 break; 1981 } 1982 first >>= 1; 1983 last >>= 1; 1984 buddy = buddy2; 1985 } 1986 } 1987 1988 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1989 int first, int count) 1990 { 1991 int left_is_free = 0; 1992 int right_is_free = 0; 1993 int block; 1994 int last = first + count - 1; 1995 struct super_block *sb = e4b->bd_sb; 1996 1997 if (WARN_ON(count == 0)) 1998 return; 1999 BUG_ON(last >= (sb->s_blocksize << 3)); 2000 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 2001 /* Don't bother if the block group is corrupt. */ 2002 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2003 return; 2004 2005 mb_check_buddy(e4b); 2006 mb_free_blocks_double(inode, e4b, first, count); 2007 2008 /* access memory sequentially: check left neighbour, 2009 * clear range and then check right neighbour 2010 */ 2011 if (first != 0) 2012 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 2013 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 2014 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 2015 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 2016 2017 if (unlikely(block != -1)) { 2018 struct ext4_sb_info *sbi = EXT4_SB(sb); 2019 ext4_fsblk_t blocknr; 2020 2021 /* 2022 * Fastcommit replay can free already freed blocks which 2023 * corrupts allocation info. Regenerate it. 2024 */ 2025 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 2026 mb_regenerate_buddy(e4b); 2027 goto check; 2028 } 2029 2030 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 2031 blocknr += EXT4_C2B(sbi, block); 2032 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2033 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2034 ext4_grp_locked_error(sb, e4b->bd_group, 2035 inode ? inode->i_ino : 0, blocknr, 2036 "freeing already freed block (bit %u); block bitmap corrupt.", 2037 block); 2038 return; 2039 } 2040 2041 this_cpu_inc(discard_pa_seq); 2042 e4b->bd_info->bb_free += count; 2043 if (first < e4b->bd_info->bb_first_free) 2044 e4b->bd_info->bb_first_free = first; 2045 2046 /* let's maintain fragments counter */ 2047 if (left_is_free && right_is_free) 2048 e4b->bd_info->bb_fragments--; 2049 else if (!left_is_free && !right_is_free) 2050 e4b->bd_info->bb_fragments++; 2051 2052 /* buddy[0] == bd_bitmap is a special case, so handle 2053 * it right away and let mb_buddy_mark_free stay free of 2054 * zero order checks. 2055 * Check if neighbours are to be coaleasced, 2056 * adjust bitmap bb_counters and borders appropriately. 2057 */ 2058 if (first & 1) { 2059 first += !left_is_free; 2060 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 2061 } 2062 if (!(last & 1)) { 2063 last -= !right_is_free; 2064 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 2065 } 2066 2067 if (first <= last) 2068 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 2069 2070 mb_set_largest_free_order(sb, e4b->bd_info); 2071 mb_update_avg_fragment_size(sb, e4b->bd_info); 2072 check: 2073 mb_check_buddy(e4b); 2074 } 2075 2076 static int mb_find_extent(struct ext4_buddy *e4b, int block, 2077 int needed, struct ext4_free_extent *ex) 2078 { 2079 int max, order, next; 2080 void *buddy; 2081 2082 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2083 BUG_ON(ex == NULL); 2084 2085 buddy = mb_find_buddy(e4b, 0, &max); 2086 BUG_ON(buddy == NULL); 2087 BUG_ON(block >= max); 2088 if (mb_test_bit(block, buddy)) { 2089 ex->fe_len = 0; 2090 ex->fe_start = 0; 2091 ex->fe_group = 0; 2092 return 0; 2093 } 2094 2095 /* find actual order */ 2096 order = mb_find_order_for_block(e4b, block); 2097 2098 ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); 2099 ex->fe_start = block; 2100 ex->fe_group = e4b->bd_group; 2101 2102 block = block >> order; 2103 2104 while (needed > ex->fe_len && 2105 mb_find_buddy(e4b, order, &max)) { 2106 2107 if (block + 1 >= max) 2108 break; 2109 2110 next = (block + 1) * (1 << order); 2111 if (mb_test_bit(next, e4b->bd_bitmap)) 2112 break; 2113 2114 order = mb_find_order_for_block(e4b, next); 2115 2116 block = next >> order; 2117 ex->fe_len += 1 << order; 2118 } 2119 2120 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 2121 /* Should never happen! (but apparently sometimes does?!?) */ 2122 WARN_ON(1); 2123 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 2124 "corruption or bug in mb_find_extent " 2125 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 2126 block, order, needed, ex->fe_group, ex->fe_start, 2127 ex->fe_len, ex->fe_logical); 2128 ex->fe_len = 0; 2129 ex->fe_start = 0; 2130 ex->fe_group = 0; 2131 } 2132 return ex->fe_len; 2133 } 2134 2135 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 2136 { 2137 int ord; 2138 int mlen = 0; 2139 int max = 0; 2140 int start = ex->fe_start; 2141 int len = ex->fe_len; 2142 unsigned ret = 0; 2143 int len0 = len; 2144 void *buddy; 2145 int ord_start, ord_end; 2146 2147 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 2148 BUG_ON(e4b->bd_group != ex->fe_group); 2149 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2150 mb_check_buddy(e4b); 2151 mb_mark_used_double(e4b, start, len); 2152 2153 this_cpu_inc(discard_pa_seq); 2154 e4b->bd_info->bb_free -= len; 2155 if (e4b->bd_info->bb_first_free == start) 2156 e4b->bd_info->bb_first_free += len; 2157 2158 /* let's maintain fragments counter */ 2159 if (start != 0) 2160 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 2161 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 2162 max = !mb_test_bit(start + len, e4b->bd_bitmap); 2163 if (mlen && max) 2164 e4b->bd_info->bb_fragments++; 2165 else if (!mlen && !max) 2166 e4b->bd_info->bb_fragments--; 2167 2168 /* let's maintain buddy itself */ 2169 while (len) { 2170 ord = mb_find_order_for_block(e4b, start); 2171 2172 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 2173 /* the whole chunk may be allocated at once! */ 2174 mlen = 1 << ord; 2175 buddy = mb_find_buddy(e4b, ord, &max); 2176 BUG_ON((start >> ord) >= max); 2177 mb_set_bit(start >> ord, buddy); 2178 e4b->bd_info->bb_counters[ord]--; 2179 start += mlen; 2180 len -= mlen; 2181 BUG_ON(len < 0); 2182 continue; 2183 } 2184 2185 /* store for history */ 2186 if (ret == 0) 2187 ret = len | (ord << 16); 2188 2189 BUG_ON(ord <= 0); 2190 buddy = mb_find_buddy(e4b, ord, &max); 2191 mb_set_bit(start >> ord, buddy); 2192 e4b->bd_info->bb_counters[ord]--; 2193 2194 ord_start = (start >> ord) << ord; 2195 ord_end = ord_start + (1 << ord); 2196 /* first chunk */ 2197 if (start > ord_start) 2198 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2199 ord_start, start - ord_start, 2200 e4b->bd_info); 2201 2202 /* last chunk */ 2203 if (start + len < ord_end) { 2204 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2205 start + len, 2206 ord_end - (start + len), 2207 e4b->bd_info); 2208 break; 2209 } 2210 len = start + len - ord_end; 2211 start = ord_end; 2212 } 2213 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 2214 2215 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 2216 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2217 mb_check_buddy(e4b); 2218 2219 return ret; 2220 } 2221 2222 /* 2223 * Must be called under group lock! 2224 */ 2225 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2226 struct ext4_buddy *e4b) 2227 { 2228 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2229 int ret; 2230 2231 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2232 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2233 2234 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2235 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2236 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2237 2238 /* preallocation can change ac_b_ex, thus we store actually 2239 * allocated blocks for history */ 2240 ac->ac_f_ex = ac->ac_b_ex; 2241 2242 ac->ac_status = AC_STATUS_FOUND; 2243 ac->ac_tail = ret & 0xffff; 2244 ac->ac_buddy = ret >> 16; 2245 2246 /* 2247 * take the folio reference. We want the folio to be pinned 2248 * so that we don't get a ext4_mb_init_cache_call for this 2249 * group until we update the bitmap. That would mean we 2250 * double allocate blocks. The reference is dropped 2251 * in ext4_mb_release_context 2252 */ 2253 ac->ac_bitmap_folio = e4b->bd_bitmap_folio; 2254 folio_get(ac->ac_bitmap_folio); 2255 ac->ac_buddy_folio = e4b->bd_buddy_folio; 2256 folio_get(ac->ac_buddy_folio); 2257 /* store last allocated for subsequent stream allocation */ 2258 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2259 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; 2260 2261 WRITE_ONCE(sbi->s_mb_last_groups[hash], ac->ac_f_ex.fe_group); 2262 } 2263 2264 /* 2265 * As we've just preallocated more space than 2266 * user requested originally, we store allocated 2267 * space in a special descriptor. 2268 */ 2269 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2270 ext4_mb_new_preallocation(ac); 2271 2272 } 2273 2274 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2275 struct ext4_buddy *e4b, 2276 int finish_group) 2277 { 2278 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2279 struct ext4_free_extent *bex = &ac->ac_b_ex; 2280 struct ext4_free_extent *gex = &ac->ac_g_ex; 2281 2282 if (ac->ac_status == AC_STATUS_FOUND) 2283 return; 2284 /* 2285 * We don't want to scan for a whole year 2286 */ 2287 if (ac->ac_found > sbi->s_mb_max_to_scan && 2288 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2289 ac->ac_status = AC_STATUS_BREAK; 2290 return; 2291 } 2292 2293 /* 2294 * Haven't found good chunk so far, let's continue 2295 */ 2296 if (bex->fe_len < gex->fe_len) 2297 return; 2298 2299 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2300 ext4_mb_use_best_found(ac, e4b); 2301 } 2302 2303 /* 2304 * The routine checks whether found extent is good enough. If it is, 2305 * then the extent gets marked used and flag is set to the context 2306 * to stop scanning. Otherwise, the extent is compared with the 2307 * previous found extent and if new one is better, then it's stored 2308 * in the context. Later, the best found extent will be used, if 2309 * mballoc can't find good enough extent. 2310 * 2311 * The algorithm used is roughly as follows: 2312 * 2313 * * If free extent found is exactly as big as goal, then 2314 * stop the scan and use it immediately 2315 * 2316 * * If free extent found is smaller than goal, then keep retrying 2317 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2318 * that stop scanning and use whatever we have. 2319 * 2320 * * If free extent found is bigger than goal, then keep retrying 2321 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2322 * stopping the scan and using the extent. 2323 * 2324 * 2325 * FIXME: real allocation policy is to be designed yet! 2326 */ 2327 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2328 struct ext4_free_extent *ex, 2329 struct ext4_buddy *e4b) 2330 { 2331 struct ext4_free_extent *bex = &ac->ac_b_ex; 2332 struct ext4_free_extent *gex = &ac->ac_g_ex; 2333 2334 BUG_ON(ex->fe_len <= 0); 2335 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2336 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2337 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2338 2339 ac->ac_found++; 2340 ac->ac_cX_found[ac->ac_criteria]++; 2341 2342 /* 2343 * The special case - take what you catch first 2344 */ 2345 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2346 *bex = *ex; 2347 ext4_mb_use_best_found(ac, e4b); 2348 return; 2349 } 2350 2351 /* 2352 * Let's check whether the chuck is good enough 2353 */ 2354 if (ex->fe_len == gex->fe_len) { 2355 *bex = *ex; 2356 ext4_mb_use_best_found(ac, e4b); 2357 return; 2358 } 2359 2360 /* 2361 * If this is first found extent, just store it in the context 2362 */ 2363 if (bex->fe_len == 0) { 2364 *bex = *ex; 2365 return; 2366 } 2367 2368 /* 2369 * If new found extent is better, store it in the context 2370 */ 2371 if (bex->fe_len < gex->fe_len) { 2372 /* if the request isn't satisfied, any found extent 2373 * larger than previous best one is better */ 2374 if (ex->fe_len > bex->fe_len) 2375 *bex = *ex; 2376 } else if (ex->fe_len > gex->fe_len) { 2377 /* if the request is satisfied, then we try to find 2378 * an extent that still satisfy the request, but is 2379 * smaller than previous one */ 2380 if (ex->fe_len < bex->fe_len) 2381 *bex = *ex; 2382 } 2383 2384 ext4_mb_check_limits(ac, e4b, 0); 2385 } 2386 2387 static noinline_for_stack 2388 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2389 struct ext4_buddy *e4b) 2390 { 2391 struct ext4_free_extent ex = ac->ac_b_ex; 2392 ext4_group_t group = ex.fe_group; 2393 int max; 2394 int err; 2395 2396 BUG_ON(ex.fe_len <= 0); 2397 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2398 if (err) 2399 return; 2400 2401 ext4_lock_group(ac->ac_sb, group); 2402 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2403 goto out; 2404 2405 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2406 2407 if (max > 0) { 2408 ac->ac_b_ex = ex; 2409 ext4_mb_use_best_found(ac, e4b); 2410 } 2411 2412 out: 2413 ext4_unlock_group(ac->ac_sb, group); 2414 ext4_mb_unload_buddy(e4b); 2415 } 2416 2417 static noinline_for_stack 2418 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2419 struct ext4_buddy *e4b) 2420 { 2421 ext4_group_t group = ac->ac_g_ex.fe_group; 2422 int max; 2423 int err; 2424 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2425 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2426 struct ext4_free_extent ex; 2427 2428 if (!grp) 2429 return -EFSCORRUPTED; 2430 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2431 return 0; 2432 if (grp->bb_free == 0) 2433 return 0; 2434 2435 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2436 if (err) 2437 return err; 2438 2439 ext4_lock_group(ac->ac_sb, group); 2440 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2441 goto out; 2442 2443 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2444 ac->ac_g_ex.fe_len, &ex); 2445 ex.fe_logical = 0xDEADFA11; /* debug value */ 2446 2447 if (max >= ac->ac_g_ex.fe_len && 2448 ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) { 2449 ext4_fsblk_t start; 2450 2451 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2452 /* use do_div to get remainder (would be 64-bit modulo) */ 2453 if (do_div(start, sbi->s_stripe) == 0) { 2454 ac->ac_found++; 2455 ac->ac_b_ex = ex; 2456 ext4_mb_use_best_found(ac, e4b); 2457 } 2458 } else if (max >= ac->ac_g_ex.fe_len) { 2459 BUG_ON(ex.fe_len <= 0); 2460 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2461 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2462 ac->ac_found++; 2463 ac->ac_b_ex = ex; 2464 ext4_mb_use_best_found(ac, e4b); 2465 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2466 /* Sometimes, caller may want to merge even small 2467 * number of blocks to an existing extent */ 2468 BUG_ON(ex.fe_len <= 0); 2469 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2470 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2471 ac->ac_found++; 2472 ac->ac_b_ex = ex; 2473 ext4_mb_use_best_found(ac, e4b); 2474 } 2475 out: 2476 ext4_unlock_group(ac->ac_sb, group); 2477 ext4_mb_unload_buddy(e4b); 2478 2479 return 0; 2480 } 2481 2482 /* 2483 * The routine scans buddy structures (not bitmap!) from given order 2484 * to max order and tries to find big enough chunk to satisfy the req 2485 */ 2486 static noinline_for_stack 2487 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2488 struct ext4_buddy *e4b) 2489 { 2490 struct super_block *sb = ac->ac_sb; 2491 struct ext4_group_info *grp = e4b->bd_info; 2492 void *buddy; 2493 int i; 2494 int k; 2495 int max; 2496 2497 BUG_ON(ac->ac_2order <= 0); 2498 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2499 if (grp->bb_counters[i] == 0) 2500 continue; 2501 2502 buddy = mb_find_buddy(e4b, i, &max); 2503 if (WARN_RATELIMIT(buddy == NULL, 2504 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2505 continue; 2506 2507 k = mb_find_next_zero_bit(buddy, max, 0); 2508 if (k >= max) { 2509 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2510 e4b->bd_group, 2511 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2512 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2513 "%d free clusters of order %d. But found 0", 2514 grp->bb_counters[i], i); 2515 break; 2516 } 2517 ac->ac_found++; 2518 ac->ac_cX_found[ac->ac_criteria]++; 2519 2520 ac->ac_b_ex.fe_len = 1 << i; 2521 ac->ac_b_ex.fe_start = k << i; 2522 ac->ac_b_ex.fe_group = e4b->bd_group; 2523 2524 ext4_mb_use_best_found(ac, e4b); 2525 2526 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2527 2528 if (EXT4_SB(sb)->s_mb_stats) 2529 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2530 2531 break; 2532 } 2533 } 2534 2535 /* 2536 * The routine scans the group and measures all found extents. 2537 * In order to optimize scanning, caller must pass number of 2538 * free blocks in the group, so the routine can know upper limit. 2539 */ 2540 static noinline_for_stack 2541 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2542 struct ext4_buddy *e4b) 2543 { 2544 struct super_block *sb = ac->ac_sb; 2545 void *bitmap = e4b->bd_bitmap; 2546 struct ext4_free_extent ex; 2547 int i, j, freelen; 2548 int free; 2549 2550 free = e4b->bd_info->bb_free; 2551 if (WARN_ON(free <= 0)) 2552 return; 2553 2554 i = e4b->bd_info->bb_first_free; 2555 2556 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2557 i = mb_find_next_zero_bit(bitmap, 2558 EXT4_CLUSTERS_PER_GROUP(sb), i); 2559 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2560 /* 2561 * IF we have corrupt bitmap, we won't find any 2562 * free blocks even though group info says we 2563 * have free blocks 2564 */ 2565 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2566 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2567 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2568 "%d free clusters as per " 2569 "group info. But bitmap says 0", 2570 free); 2571 break; 2572 } 2573 2574 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { 2575 /* 2576 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are 2577 * sure that this group will have a large enough 2578 * continuous free extent, so skip over the smaller free 2579 * extents 2580 */ 2581 j = mb_find_next_bit(bitmap, 2582 EXT4_CLUSTERS_PER_GROUP(sb), i); 2583 freelen = j - i; 2584 2585 if (freelen < ac->ac_g_ex.fe_len) { 2586 i = j; 2587 free -= freelen; 2588 continue; 2589 } 2590 } 2591 2592 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2593 if (WARN_ON(ex.fe_len <= 0)) 2594 break; 2595 if (free < ex.fe_len) { 2596 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2597 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2598 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2599 "%d free clusters as per " 2600 "group info. But got %d blocks", 2601 free, ex.fe_len); 2602 /* 2603 * The number of free blocks differs. This mostly 2604 * indicate that the bitmap is corrupt. So exit 2605 * without claiming the space. 2606 */ 2607 break; 2608 } 2609 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2610 ext4_mb_measure_extent(ac, &ex, e4b); 2611 2612 i += ex.fe_len; 2613 free -= ex.fe_len; 2614 } 2615 2616 ext4_mb_check_limits(ac, e4b, 1); 2617 } 2618 2619 /* 2620 * This is a special case for storages like raid5 2621 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2622 */ 2623 static noinline_for_stack 2624 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2625 struct ext4_buddy *e4b) 2626 { 2627 struct super_block *sb = ac->ac_sb; 2628 struct ext4_sb_info *sbi = EXT4_SB(sb); 2629 void *bitmap = e4b->bd_bitmap; 2630 struct ext4_free_extent ex; 2631 ext4_fsblk_t first_group_block; 2632 ext4_fsblk_t a; 2633 ext4_grpblk_t i, stripe; 2634 int max; 2635 2636 BUG_ON(sbi->s_stripe == 0); 2637 2638 /* find first stripe-aligned block in group */ 2639 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2640 2641 a = first_group_block + sbi->s_stripe - 1; 2642 do_div(a, sbi->s_stripe); 2643 i = (a * sbi->s_stripe) - first_group_block; 2644 2645 stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe); 2646 i = EXT4_B2C(sbi, i); 2647 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2648 if (!mb_test_bit(i, bitmap)) { 2649 max = mb_find_extent(e4b, i, stripe, &ex); 2650 if (max >= stripe) { 2651 ac->ac_found++; 2652 ac->ac_cX_found[ac->ac_criteria]++; 2653 ex.fe_logical = 0xDEADF00D; /* debug value */ 2654 ac->ac_b_ex = ex; 2655 ext4_mb_use_best_found(ac, e4b); 2656 break; 2657 } 2658 } 2659 i += stripe; 2660 } 2661 } 2662 2663 static void __ext4_mb_scan_group(struct ext4_allocation_context *ac) 2664 { 2665 bool is_stripe_aligned; 2666 struct ext4_sb_info *sbi; 2667 enum criteria cr = ac->ac_criteria; 2668 2669 ac->ac_groups_scanned++; 2670 if (cr == CR_POWER2_ALIGNED) 2671 return ext4_mb_simple_scan_group(ac, ac->ac_e4b); 2672 2673 sbi = EXT4_SB(ac->ac_sb); 2674 is_stripe_aligned = false; 2675 if ((sbi->s_stripe >= sbi->s_cluster_ratio) && 2676 !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe))) 2677 is_stripe_aligned = true; 2678 2679 if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) && 2680 is_stripe_aligned) 2681 ext4_mb_scan_aligned(ac, ac->ac_e4b); 2682 2683 if (ac->ac_status == AC_STATUS_CONTINUE) 2684 ext4_mb_complex_scan_group(ac, ac->ac_e4b); 2685 } 2686 2687 /* 2688 * This is also called BEFORE we load the buddy bitmap. 2689 * Returns either 1 or 0 indicating that the group is either suitable 2690 * for the allocation or not. 2691 */ 2692 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2693 ext4_group_t group, enum criteria cr) 2694 { 2695 ext4_grpblk_t free, fragments; 2696 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2697 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2698 2699 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); 2700 2701 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2702 return false; 2703 2704 free = grp->bb_free; 2705 if (free == 0) 2706 return false; 2707 2708 fragments = grp->bb_fragments; 2709 if (fragments == 0) 2710 return false; 2711 2712 switch (cr) { 2713 case CR_POWER2_ALIGNED: 2714 BUG_ON(ac->ac_2order == 0); 2715 2716 /* Avoid using the first bg of a flexgroup for data files */ 2717 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2718 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2719 ((group % flex_size) == 0)) 2720 return false; 2721 2722 if (free < ac->ac_g_ex.fe_len) 2723 return false; 2724 2725 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2726 return true; 2727 2728 if (grp->bb_largest_free_order < ac->ac_2order) 2729 return false; 2730 2731 return true; 2732 case CR_GOAL_LEN_FAST: 2733 case CR_BEST_AVAIL_LEN: 2734 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2735 return true; 2736 break; 2737 case CR_GOAL_LEN_SLOW: 2738 if (free >= ac->ac_g_ex.fe_len) 2739 return true; 2740 break; 2741 case CR_ANY_FREE: 2742 return true; 2743 default: 2744 BUG(); 2745 } 2746 2747 return false; 2748 } 2749 2750 /* 2751 * This could return negative error code if something goes wrong 2752 * during ext4_mb_init_group(). This should not be called with 2753 * ext4_lock_group() held. 2754 * 2755 * Note: because we are conditionally operating with the group lock in 2756 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2757 * function using __acquire and __release. This means we need to be 2758 * super careful before messing with the error path handling via "goto 2759 * out"! 2760 */ 2761 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2762 ext4_group_t group, enum criteria cr) 2763 { 2764 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2765 struct super_block *sb = ac->ac_sb; 2766 struct ext4_sb_info *sbi = EXT4_SB(sb); 2767 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2768 ext4_grpblk_t free; 2769 int ret = 0; 2770 2771 if (!grp) 2772 return -EFSCORRUPTED; 2773 if (sbi->s_mb_stats) 2774 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2775 if (should_lock) { 2776 ext4_lock_group(sb, group); 2777 __release(ext4_group_lock_ptr(sb, group)); 2778 } 2779 free = grp->bb_free; 2780 if (free == 0) 2781 goto out; 2782 /* 2783 * In all criterias except CR_ANY_FREE we try to avoid groups that 2784 * can't possibly satisfy the full goal request due to insufficient 2785 * free blocks. 2786 */ 2787 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) 2788 goto out; 2789 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2790 goto out; 2791 if (should_lock) { 2792 __acquire(ext4_group_lock_ptr(sb, group)); 2793 ext4_unlock_group(sb, group); 2794 } 2795 2796 /* We only do this if the grp has never been initialized */ 2797 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2798 struct ext4_group_desc *gdp = 2799 ext4_get_group_desc(sb, group, NULL); 2800 int ret; 2801 2802 /* 2803 * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic 2804 * search to find large good chunks almost for free. If buddy 2805 * data is not ready, then this optimization makes no sense. But 2806 * we never skip the first block group in a flex_bg, since this 2807 * gets used for metadata block allocation, and we want to make 2808 * sure we locate metadata blocks in the first block group in 2809 * the flex_bg if possible. 2810 */ 2811 if (!ext4_mb_cr_expensive(cr) && 2812 (!sbi->s_log_groups_per_flex || 2813 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2814 !(ext4_has_group_desc_csum(sb) && 2815 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2816 return 0; 2817 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2818 if (ret) 2819 return ret; 2820 } 2821 2822 if (should_lock) { 2823 ext4_lock_group(sb, group); 2824 __release(ext4_group_lock_ptr(sb, group)); 2825 } 2826 ret = ext4_mb_good_group(ac, group, cr); 2827 out: 2828 if (should_lock) { 2829 __acquire(ext4_group_lock_ptr(sb, group)); 2830 ext4_unlock_group(sb, group); 2831 } 2832 return ret; 2833 } 2834 2835 /* 2836 * Start prefetching @nr block bitmaps starting at @group. 2837 * Return the next group which needs to be prefetched. 2838 */ 2839 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2840 unsigned int nr, int *cnt) 2841 { 2842 ext4_group_t ngroups = ext4_get_groups_count(sb); 2843 struct buffer_head *bh; 2844 struct blk_plug plug; 2845 2846 blk_start_plug(&plug); 2847 while (nr-- > 0) { 2848 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2849 NULL); 2850 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2851 2852 /* 2853 * Prefetch block groups with free blocks; but don't 2854 * bother if it is marked uninitialized on disk, since 2855 * it won't require I/O to read. Also only try to 2856 * prefetch once, so we avoid getblk() call, which can 2857 * be expensive. 2858 */ 2859 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2860 EXT4_MB_GRP_NEED_INIT(grp) && 2861 ext4_free_group_clusters(sb, gdp) > 0 ) { 2862 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2863 if (bh && !IS_ERR(bh)) { 2864 if (!buffer_uptodate(bh) && cnt) 2865 (*cnt)++; 2866 brelse(bh); 2867 } 2868 } 2869 if (++group >= ngroups) 2870 group = 0; 2871 } 2872 blk_finish_plug(&plug); 2873 return group; 2874 } 2875 2876 /* 2877 * Batch reads of the block allocation bitmaps to get 2878 * multiple READs in flight; limit prefetching at inexpensive 2879 * CR, otherwise mballoc can spend a lot of time loading 2880 * imperfect groups 2881 */ 2882 static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac, 2883 ext4_group_t group) 2884 { 2885 struct ext4_sb_info *sbi; 2886 2887 if (ac->ac_prefetch_grp != group) 2888 return; 2889 2890 sbi = EXT4_SB(ac->ac_sb); 2891 if (ext4_mb_cr_expensive(ac->ac_criteria) || 2892 ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) { 2893 unsigned int nr = sbi->s_mb_prefetch; 2894 2895 if (ext4_has_feature_flex_bg(ac->ac_sb)) { 2896 nr = 1 << sbi->s_log_groups_per_flex; 2897 nr -= group & (nr - 1); 2898 nr = umin(nr, sbi->s_mb_prefetch); 2899 } 2900 2901 ac->ac_prefetch_nr = nr; 2902 ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr, 2903 &ac->ac_prefetch_ios); 2904 } 2905 } 2906 2907 /* 2908 * Prefetching reads the block bitmap into the buffer cache; but we 2909 * need to make sure that the buddy bitmap in the page cache has been 2910 * initialized. Note that ext4_mb_init_group() will block if the I/O 2911 * is not yet completed, or indeed if it was not initiated by 2912 * ext4_mb_prefetch did not start the I/O. 2913 * 2914 * TODO: We should actually kick off the buddy bitmap setup in a work 2915 * queue when the buffer I/O is completed, so that we don't block 2916 * waiting for the block allocation bitmap read to finish when 2917 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2918 */ 2919 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2920 unsigned int nr) 2921 { 2922 struct ext4_group_desc *gdp; 2923 struct ext4_group_info *grp; 2924 2925 while (nr-- > 0) { 2926 if (!group) 2927 group = ext4_get_groups_count(sb); 2928 group--; 2929 gdp = ext4_get_group_desc(sb, group, NULL); 2930 grp = ext4_get_group_info(sb, group); 2931 2932 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2933 ext4_free_group_clusters(sb, gdp) > 0) { 2934 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2935 break; 2936 } 2937 } 2938 } 2939 2940 static int ext4_mb_scan_group(struct ext4_allocation_context *ac, 2941 ext4_group_t group) 2942 { 2943 int ret; 2944 struct super_block *sb = ac->ac_sb; 2945 enum criteria cr = ac->ac_criteria; 2946 2947 ext4_mb_might_prefetch(ac, group); 2948 2949 /* prevent unnecessary buddy loading. */ 2950 if (cr < CR_ANY_FREE && spin_is_locked(ext4_group_lock_ptr(sb, group))) 2951 return 0; 2952 2953 /* This now checks without needing the buddy folio */ 2954 ret = ext4_mb_good_group_nolock(ac, group, cr); 2955 if (ret <= 0) { 2956 if (!ac->ac_first_err) 2957 ac->ac_first_err = ret; 2958 return 0; 2959 } 2960 2961 ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b); 2962 if (ret) 2963 return ret; 2964 2965 /* skip busy group */ 2966 if (cr >= CR_ANY_FREE) 2967 ext4_lock_group(sb, group); 2968 else if (!ext4_try_lock_group(sb, group)) 2969 goto out_unload; 2970 2971 /* We need to check again after locking the block group. */ 2972 if (unlikely(!ext4_mb_good_group(ac, group, cr))) 2973 goto out_unlock; 2974 2975 __ext4_mb_scan_group(ac); 2976 2977 out_unlock: 2978 ext4_unlock_group(sb, group); 2979 out_unload: 2980 ext4_mb_unload_buddy(ac->ac_e4b); 2981 return ret; 2982 } 2983 2984 static noinline_for_stack int 2985 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2986 { 2987 ext4_group_t i; 2988 int err = 0; 2989 struct super_block *sb = ac->ac_sb; 2990 struct ext4_sb_info *sbi = EXT4_SB(sb); 2991 struct ext4_buddy e4b; 2992 2993 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2994 2995 /* first, try the goal */ 2996 err = ext4_mb_find_by_goal(ac, &e4b); 2997 if (err || ac->ac_status == AC_STATUS_FOUND) 2998 goto out; 2999 3000 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3001 goto out; 3002 3003 /* 3004 * ac->ac_2order is set only if the fe_len is a power of 2 3005 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED 3006 * so that we try exact allocation using buddy. 3007 */ 3008 i = fls(ac->ac_g_ex.fe_len); 3009 ac->ac_2order = 0; 3010 /* 3011 * We search using buddy data only if the order of the request 3012 * is greater than equal to the sbi_s_mb_order2_reqs 3013 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 3014 * We also support searching for power-of-two requests only for 3015 * requests upto maximum buddy size we have constructed. 3016 */ 3017 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 3018 if (is_power_of_2(ac->ac_g_ex.fe_len)) 3019 ac->ac_2order = array_index_nospec(i - 1, 3020 MB_NUM_ORDERS(sb)); 3021 } 3022 3023 /* if stream allocation is enabled, use global goal */ 3024 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 3025 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; 3026 3027 ac->ac_g_ex.fe_group = READ_ONCE(sbi->s_mb_last_groups[hash]); 3028 ac->ac_g_ex.fe_start = -1; 3029 ac->ac_flags &= ~EXT4_MB_HINT_TRY_GOAL; 3030 } 3031 3032 /* 3033 * Let's just scan groups to find more-less suitable blocks We 3034 * start with CR_GOAL_LEN_FAST, unless it is power of 2 3035 * aligned, in which case let's do that faster approach first. 3036 */ 3037 ac->ac_criteria = CR_GOAL_LEN_FAST; 3038 if (ac->ac_2order) 3039 ac->ac_criteria = CR_POWER2_ALIGNED; 3040 3041 ac->ac_e4b = &e4b; 3042 ac->ac_prefetch_ios = 0; 3043 ac->ac_first_err = 0; 3044 repeat: 3045 while (ac->ac_criteria < EXT4_MB_NUM_CRS) { 3046 err = ext4_mb_scan_groups(ac); 3047 if (err) 3048 goto out; 3049 3050 if (ac->ac_status != AC_STATUS_CONTINUE) 3051 break; 3052 } 3053 3054 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 3055 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 3056 /* 3057 * We've been searching too long. Let's try to allocate 3058 * the best chunk we've found so far 3059 */ 3060 ext4_mb_try_best_found(ac, &e4b); 3061 if (ac->ac_status != AC_STATUS_FOUND) { 3062 int lost; 3063 3064 /* 3065 * Someone more lucky has already allocated it. 3066 * The only thing we can do is just take first 3067 * found block(s) 3068 */ 3069 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 3070 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 3071 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 3072 ac->ac_b_ex.fe_len, lost); 3073 3074 ac->ac_b_ex.fe_group = 0; 3075 ac->ac_b_ex.fe_start = 0; 3076 ac->ac_b_ex.fe_len = 0; 3077 ac->ac_status = AC_STATUS_CONTINUE; 3078 ac->ac_flags |= EXT4_MB_HINT_FIRST; 3079 ac->ac_criteria = CR_ANY_FREE; 3080 goto repeat; 3081 } 3082 } 3083 3084 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) { 3085 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 3086 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC && 3087 ac->ac_b_ex.fe_group == ac->ac_g_ex.fe_group) 3088 atomic_inc(&sbi->s_bal_stream_goals); 3089 } 3090 out: 3091 if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err) 3092 err = ac->ac_first_err; 3093 3094 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 3095 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 3096 ac->ac_flags, ac->ac_criteria, err); 3097 3098 if (ac->ac_prefetch_nr) 3099 ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr); 3100 3101 return err; 3102 } 3103 3104 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 3105 { 3106 struct super_block *sb = pde_data(file_inode(seq->file)); 3107 ext4_group_t group; 3108 3109 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3110 return NULL; 3111 group = *pos + 1; 3112 return (void *) ((unsigned long) group); 3113 } 3114 3115 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 3116 { 3117 struct super_block *sb = pde_data(file_inode(seq->file)); 3118 ext4_group_t group; 3119 3120 ++*pos; 3121 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3122 return NULL; 3123 group = *pos + 1; 3124 return (void *) ((unsigned long) group); 3125 } 3126 3127 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 3128 { 3129 struct super_block *sb = pde_data(file_inode(seq->file)); 3130 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 3131 int i, err; 3132 char nbuf[16]; 3133 struct ext4_buddy e4b; 3134 struct ext4_group_info *grinfo; 3135 unsigned char blocksize_bits = min_t(unsigned char, 3136 sb->s_blocksize_bits, 3137 EXT4_MAX_BLOCK_LOG_SIZE); 3138 DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters, 3139 EXT4_MAX_BLOCK_LOG_SIZE + 2); 3140 3141 group--; 3142 if (group == 0) 3143 seq_puts(seq, "#group: free frags first [" 3144 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 3145 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 3146 3147 i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) + 3148 sizeof(struct ext4_group_info); 3149 3150 grinfo = ext4_get_group_info(sb, group); 3151 if (!grinfo) 3152 return 0; 3153 /* Load the group info in memory only if not already loaded. */ 3154 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 3155 err = ext4_mb_load_buddy(sb, group, &e4b); 3156 if (err) { 3157 seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf)); 3158 return 0; 3159 } 3160 ext4_mb_unload_buddy(&e4b); 3161 } 3162 3163 /* 3164 * We care only about free space counters in the group info and 3165 * these are safe to access even after the buddy has been unloaded 3166 */ 3167 memcpy(sg, grinfo, i); 3168 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free, 3169 sg->bb_fragments, sg->bb_first_free); 3170 for (i = 0; i <= 13; i++) 3171 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 3172 sg->bb_counters[i] : 0); 3173 seq_puts(seq, " ]"); 3174 if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg)) 3175 seq_puts(seq, " Block bitmap corrupted!"); 3176 seq_putc(seq, '\n'); 3177 return 0; 3178 } 3179 3180 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 3181 { 3182 } 3183 3184 const struct seq_operations ext4_mb_seq_groups_ops = { 3185 .start = ext4_mb_seq_groups_start, 3186 .next = ext4_mb_seq_groups_next, 3187 .stop = ext4_mb_seq_groups_stop, 3188 .show = ext4_mb_seq_groups_show, 3189 }; 3190 3191 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 3192 { 3193 struct super_block *sb = seq->private; 3194 struct ext4_sb_info *sbi = EXT4_SB(sb); 3195 3196 seq_puts(seq, "mballoc:\n"); 3197 if (!sbi->s_mb_stats) { 3198 seq_puts(seq, "\tmb stats collection turned off.\n"); 3199 seq_puts( 3200 seq, 3201 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 3202 return 0; 3203 } 3204 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 3205 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 3206 3207 seq_printf(seq, "\tgroups_scanned: %u\n", 3208 atomic_read(&sbi->s_bal_groups_scanned)); 3209 3210 /* CR_POWER2_ALIGNED stats */ 3211 seq_puts(seq, "\tcr_p2_aligned_stats:\n"); 3212 seq_printf(seq, "\t\thits: %llu\n", 3213 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); 3214 seq_printf( 3215 seq, "\t\tgroups_considered: %llu\n", 3216 atomic64_read( 3217 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); 3218 seq_printf(seq, "\t\textents_scanned: %u\n", 3219 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3220 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3221 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3222 3223 /* CR_GOAL_LEN_FAST stats */ 3224 seq_puts(seq, "\tcr_goal_fast_stats:\n"); 3225 seq_printf(seq, "\t\thits: %llu\n", 3226 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); 3227 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3228 atomic64_read( 3229 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); 3230 seq_printf(seq, "\t\textents_scanned: %u\n", 3231 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3232 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3233 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3234 3235 /* CR_BEST_AVAIL_LEN stats */ 3236 seq_puts(seq, "\tcr_best_avail_stats:\n"); 3237 seq_printf(seq, "\t\thits: %llu\n", 3238 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); 3239 seq_printf( 3240 seq, "\t\tgroups_considered: %llu\n", 3241 atomic64_read( 3242 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); 3243 seq_printf(seq, "\t\textents_scanned: %u\n", 3244 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3245 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3246 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3247 3248 /* CR_GOAL_LEN_SLOW stats */ 3249 seq_puts(seq, "\tcr_goal_slow_stats:\n"); 3250 seq_printf(seq, "\t\thits: %llu\n", 3251 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); 3252 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3253 atomic64_read( 3254 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); 3255 seq_printf(seq, "\t\textents_scanned: %u\n", 3256 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); 3257 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3258 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); 3259 3260 /* CR_ANY_FREE stats */ 3261 seq_puts(seq, "\tcr_any_free_stats:\n"); 3262 seq_printf(seq, "\t\thits: %llu\n", 3263 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); 3264 seq_printf( 3265 seq, "\t\tgroups_considered: %llu\n", 3266 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); 3267 seq_printf(seq, "\t\textents_scanned: %u\n", 3268 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); 3269 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3270 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); 3271 3272 /* Aggregates */ 3273 seq_printf(seq, "\textents_scanned: %u\n", 3274 atomic_read(&sbi->s_bal_ex_scanned)); 3275 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 3276 seq_printf(seq, "\t\tstream_goal_hits: %u\n", 3277 atomic_read(&sbi->s_bal_stream_goals)); 3278 seq_printf(seq, "\t\tlen_goal_hits: %u\n", 3279 atomic_read(&sbi->s_bal_len_goals)); 3280 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 3281 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 3282 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 3283 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 3284 atomic_read(&sbi->s_mb_buddies_generated), 3285 ext4_get_groups_count(sb)); 3286 seq_printf(seq, "\tbuddies_time_used: %llu\n", 3287 atomic64_read(&sbi->s_mb_generation_time)); 3288 seq_printf(seq, "\tpreallocated: %u\n", 3289 atomic_read(&sbi->s_mb_preallocated)); 3290 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); 3291 return 0; 3292 } 3293 3294 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3295 { 3296 struct super_block *sb = pde_data(file_inode(seq->file)); 3297 unsigned long position; 3298 3299 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3300 return NULL; 3301 position = *pos + 1; 3302 return (void *) ((unsigned long) position); 3303 } 3304 3305 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3306 { 3307 struct super_block *sb = pde_data(file_inode(seq->file)); 3308 unsigned long position; 3309 3310 ++*pos; 3311 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3312 return NULL; 3313 position = *pos + 1; 3314 return (void *) ((unsigned long) position); 3315 } 3316 3317 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3318 { 3319 struct super_block *sb = pde_data(file_inode(seq->file)); 3320 struct ext4_sb_info *sbi = EXT4_SB(sb); 3321 unsigned long position = ((unsigned long) v); 3322 struct ext4_group_info *grp; 3323 unsigned int count; 3324 unsigned long idx; 3325 3326 position--; 3327 if (position >= MB_NUM_ORDERS(sb)) { 3328 position -= MB_NUM_ORDERS(sb); 3329 if (position == 0) 3330 seq_puts(seq, "avg_fragment_size_lists:\n"); 3331 3332 count = 0; 3333 xa_for_each(&sbi->s_mb_avg_fragment_size[position], idx, grp) 3334 count++; 3335 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3336 (unsigned int)position, count); 3337 return 0; 3338 } 3339 3340 if (position == 0) { 3341 seq_printf(seq, "optimize_scan: %d\n", 3342 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3343 seq_puts(seq, "max_free_order_lists:\n"); 3344 } 3345 count = 0; 3346 xa_for_each(&sbi->s_mb_largest_free_orders[position], idx, grp) 3347 count++; 3348 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3349 (unsigned int)position, count); 3350 3351 return 0; 3352 } 3353 3354 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3355 { 3356 } 3357 3358 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3359 .start = ext4_mb_seq_structs_summary_start, 3360 .next = ext4_mb_seq_structs_summary_next, 3361 .stop = ext4_mb_seq_structs_summary_stop, 3362 .show = ext4_mb_seq_structs_summary_show, 3363 }; 3364 3365 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3366 { 3367 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3368 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3369 3370 BUG_ON(!cachep); 3371 return cachep; 3372 } 3373 3374 /* 3375 * Allocate the top-level s_group_info array for the specified number 3376 * of groups 3377 */ 3378 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3379 { 3380 struct ext4_sb_info *sbi = EXT4_SB(sb); 3381 unsigned size; 3382 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3383 3384 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3385 EXT4_DESC_PER_BLOCK_BITS(sb); 3386 if (size <= sbi->s_group_info_size) 3387 return 0; 3388 3389 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3390 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3391 if (!new_groupinfo) { 3392 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3393 return -ENOMEM; 3394 } 3395 rcu_read_lock(); 3396 old_groupinfo = rcu_dereference(sbi->s_group_info); 3397 if (old_groupinfo) 3398 memcpy(new_groupinfo, old_groupinfo, 3399 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3400 rcu_read_unlock(); 3401 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3402 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3403 if (old_groupinfo) 3404 ext4_kvfree_array_rcu(old_groupinfo); 3405 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3406 sbi->s_group_info_size); 3407 return 0; 3408 } 3409 3410 /* Create and initialize ext4_group_info data for the given group. */ 3411 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3412 struct ext4_group_desc *desc) 3413 { 3414 int i; 3415 int metalen = 0; 3416 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3417 struct ext4_sb_info *sbi = EXT4_SB(sb); 3418 struct ext4_group_info **meta_group_info; 3419 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3420 3421 /* 3422 * First check if this group is the first of a reserved block. 3423 * If it's true, we have to allocate a new table of pointers 3424 * to ext4_group_info structures 3425 */ 3426 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3427 metalen = sizeof(*meta_group_info) << 3428 EXT4_DESC_PER_BLOCK_BITS(sb); 3429 meta_group_info = kmalloc(metalen, GFP_NOFS); 3430 if (meta_group_info == NULL) { 3431 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3432 "for a buddy group"); 3433 return -ENOMEM; 3434 } 3435 rcu_read_lock(); 3436 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3437 rcu_read_unlock(); 3438 } 3439 3440 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3441 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3442 3443 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3444 if (meta_group_info[i] == NULL) { 3445 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3446 goto exit_group_info; 3447 } 3448 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3449 &(meta_group_info[i]->bb_state)); 3450 3451 /* 3452 * initialize bb_free to be able to skip 3453 * empty groups without initialization 3454 */ 3455 if (ext4_has_group_desc_csum(sb) && 3456 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3457 meta_group_info[i]->bb_free = 3458 ext4_free_clusters_after_init(sb, group, desc); 3459 } else { 3460 meta_group_info[i]->bb_free = 3461 ext4_free_group_clusters(sb, desc); 3462 } 3463 3464 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3465 init_rwsem(&meta_group_info[i]->alloc_sem); 3466 meta_group_info[i]->bb_free_root = RB_ROOT; 3467 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3468 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3469 meta_group_info[i]->bb_group = group; 3470 3471 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3472 return 0; 3473 3474 exit_group_info: 3475 /* If a meta_group_info table has been allocated, release it now */ 3476 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3477 struct ext4_group_info ***group_info; 3478 3479 rcu_read_lock(); 3480 group_info = rcu_dereference(sbi->s_group_info); 3481 kfree(group_info[idx]); 3482 group_info[idx] = NULL; 3483 rcu_read_unlock(); 3484 } 3485 return -ENOMEM; 3486 } /* ext4_mb_add_groupinfo */ 3487 3488 static int ext4_mb_init_backend(struct super_block *sb) 3489 { 3490 ext4_group_t ngroups = ext4_get_groups_count(sb); 3491 ext4_group_t i; 3492 struct ext4_sb_info *sbi = EXT4_SB(sb); 3493 int err; 3494 struct ext4_group_desc *desc; 3495 struct ext4_group_info ***group_info; 3496 struct kmem_cache *cachep; 3497 3498 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3499 if (err) 3500 return err; 3501 3502 sbi->s_buddy_cache = new_inode(sb); 3503 if (sbi->s_buddy_cache == NULL) { 3504 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3505 goto err_freesgi; 3506 } 3507 /* To avoid potentially colliding with an valid on-disk inode number, 3508 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3509 * not in the inode hash, so it should never be found by iget(), but 3510 * this will avoid confusion if it ever shows up during debugging. */ 3511 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3512 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3513 ext4_set_inode_mapping_order(sbi->s_buddy_cache); 3514 3515 for (i = 0; i < ngroups; i++) { 3516 cond_resched(); 3517 desc = ext4_get_group_desc(sb, i, NULL); 3518 if (desc == NULL) { 3519 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3520 goto err_freebuddy; 3521 } 3522 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3523 goto err_freebuddy; 3524 } 3525 3526 if (ext4_has_feature_flex_bg(sb)) { 3527 /* a single flex group is supposed to be read by a single IO. 3528 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3529 * unsigned integer, so the maximum shift is 32. 3530 */ 3531 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3532 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3533 goto err_freebuddy; 3534 } 3535 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3536 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3537 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3538 } else { 3539 sbi->s_mb_prefetch = 32; 3540 } 3541 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3542 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3543 /* 3544 * now many real IOs to prefetch within a single allocation at 3545 * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related 3546 * optimization we shouldn't try to load too many groups, at some point 3547 * we should start to use what we've got in memory. 3548 * with an average random access time 5ms, it'd take a second to get 3549 * 200 groups (* N with flex_bg), so let's make this limit 4 3550 */ 3551 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3552 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3553 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3554 3555 return 0; 3556 3557 err_freebuddy: 3558 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3559 while (i-- > 0) { 3560 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3561 3562 if (grp) 3563 kmem_cache_free(cachep, grp); 3564 } 3565 i = sbi->s_group_info_size; 3566 rcu_read_lock(); 3567 group_info = rcu_dereference(sbi->s_group_info); 3568 while (i-- > 0) 3569 kfree(group_info[i]); 3570 rcu_read_unlock(); 3571 iput(sbi->s_buddy_cache); 3572 err_freesgi: 3573 rcu_read_lock(); 3574 kvfree(rcu_dereference(sbi->s_group_info)); 3575 rcu_read_unlock(); 3576 return -ENOMEM; 3577 } 3578 3579 static void ext4_groupinfo_destroy_slabs(void) 3580 { 3581 int i; 3582 3583 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3584 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3585 ext4_groupinfo_caches[i] = NULL; 3586 } 3587 } 3588 3589 static int ext4_groupinfo_create_slab(size_t size) 3590 { 3591 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3592 int slab_size; 3593 int blocksize_bits = order_base_2(size); 3594 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3595 struct kmem_cache *cachep; 3596 3597 if (cache_index >= NR_GRPINFO_CACHES) 3598 return -EINVAL; 3599 3600 if (unlikely(cache_index < 0)) 3601 cache_index = 0; 3602 3603 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3604 if (ext4_groupinfo_caches[cache_index]) { 3605 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3606 return 0; /* Already created */ 3607 } 3608 3609 slab_size = offsetof(struct ext4_group_info, 3610 bb_counters[blocksize_bits + 2]); 3611 3612 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3613 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3614 NULL); 3615 3616 ext4_groupinfo_caches[cache_index] = cachep; 3617 3618 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3619 if (!cachep) { 3620 printk(KERN_EMERG 3621 "EXT4-fs: no memory for groupinfo slab cache\n"); 3622 return -ENOMEM; 3623 } 3624 3625 return 0; 3626 } 3627 3628 static void ext4_discard_work(struct work_struct *work) 3629 { 3630 struct ext4_sb_info *sbi = container_of(work, 3631 struct ext4_sb_info, s_discard_work); 3632 struct super_block *sb = sbi->s_sb; 3633 struct ext4_free_data *fd, *nfd; 3634 struct ext4_buddy e4b; 3635 LIST_HEAD(discard_list); 3636 ext4_group_t grp, load_grp; 3637 int err = 0; 3638 3639 spin_lock(&sbi->s_md_lock); 3640 list_splice_init(&sbi->s_discard_list, &discard_list); 3641 spin_unlock(&sbi->s_md_lock); 3642 3643 load_grp = UINT_MAX; 3644 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3645 /* 3646 * If filesystem is umounting or no memory or suffering 3647 * from no space, give up the discard 3648 */ 3649 if ((sb->s_flags & SB_ACTIVE) && !err && 3650 !atomic_read(&sbi->s_retry_alloc_pending)) { 3651 grp = fd->efd_group; 3652 if (grp != load_grp) { 3653 if (load_grp != UINT_MAX) 3654 ext4_mb_unload_buddy(&e4b); 3655 3656 err = ext4_mb_load_buddy(sb, grp, &e4b); 3657 if (err) { 3658 kmem_cache_free(ext4_free_data_cachep, fd); 3659 load_grp = UINT_MAX; 3660 continue; 3661 } else { 3662 load_grp = grp; 3663 } 3664 } 3665 3666 ext4_lock_group(sb, grp); 3667 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3668 fd->efd_start_cluster + fd->efd_count - 1, 1); 3669 ext4_unlock_group(sb, grp); 3670 } 3671 kmem_cache_free(ext4_free_data_cachep, fd); 3672 } 3673 3674 if (load_grp != UINT_MAX) 3675 ext4_mb_unload_buddy(&e4b); 3676 } 3677 3678 static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi) 3679 { 3680 if (!sbi->s_mb_avg_fragment_size) 3681 return; 3682 3683 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3684 xa_destroy(&sbi->s_mb_avg_fragment_size[i]); 3685 3686 kfree(sbi->s_mb_avg_fragment_size); 3687 sbi->s_mb_avg_fragment_size = NULL; 3688 } 3689 3690 static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi) 3691 { 3692 if (!sbi->s_mb_largest_free_orders) 3693 return; 3694 3695 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3696 xa_destroy(&sbi->s_mb_largest_free_orders[i]); 3697 3698 kfree(sbi->s_mb_largest_free_orders); 3699 sbi->s_mb_largest_free_orders = NULL; 3700 } 3701 3702 int ext4_mb_init(struct super_block *sb) 3703 { 3704 struct ext4_sb_info *sbi = EXT4_SB(sb); 3705 unsigned i, j; 3706 unsigned offset, offset_incr; 3707 unsigned max; 3708 int ret; 3709 3710 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3711 3712 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3713 if (sbi->s_mb_offsets == NULL) { 3714 ret = -ENOMEM; 3715 goto out; 3716 } 3717 3718 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3719 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3720 if (sbi->s_mb_maxs == NULL) { 3721 ret = -ENOMEM; 3722 goto out; 3723 } 3724 3725 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3726 if (ret < 0) 3727 goto out; 3728 3729 /* order 0 is regular bitmap */ 3730 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3731 sbi->s_mb_offsets[0] = 0; 3732 3733 i = 1; 3734 offset = 0; 3735 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3736 max = sb->s_blocksize << 2; 3737 do { 3738 sbi->s_mb_offsets[i] = offset; 3739 sbi->s_mb_maxs[i] = max; 3740 offset += offset_incr; 3741 offset_incr = offset_incr >> 1; 3742 max = max >> 1; 3743 i++; 3744 } while (i < MB_NUM_ORDERS(sb)); 3745 3746 sbi->s_mb_avg_fragment_size = 3747 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray), 3748 GFP_KERNEL); 3749 if (!sbi->s_mb_avg_fragment_size) { 3750 ret = -ENOMEM; 3751 goto out; 3752 } 3753 for (i = 0; i < MB_NUM_ORDERS(sb); i++) 3754 xa_init(&sbi->s_mb_avg_fragment_size[i]); 3755 3756 sbi->s_mb_largest_free_orders = 3757 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray), 3758 GFP_KERNEL); 3759 if (!sbi->s_mb_largest_free_orders) { 3760 ret = -ENOMEM; 3761 goto out; 3762 } 3763 for (i = 0; i < MB_NUM_ORDERS(sb); i++) 3764 xa_init(&sbi->s_mb_largest_free_orders[i]); 3765 3766 spin_lock_init(&sbi->s_md_lock); 3767 atomic_set(&sbi->s_mb_free_pending, 0); 3768 INIT_LIST_HEAD(&sbi->s_freed_data_list[0]); 3769 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); 3770 INIT_LIST_HEAD(&sbi->s_discard_list); 3771 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3772 atomic_set(&sbi->s_retry_alloc_pending, 0); 3773 3774 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3775 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3776 sbi->s_mb_stats = MB_DEFAULT_STATS; 3777 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3778 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3779 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; 3780 3781 /* 3782 * The default group preallocation is 512, which for 4k block 3783 * sizes translates to 2 megabytes. However for bigalloc file 3784 * systems, this is probably too big (i.e, if the cluster size 3785 * is 1 megabyte, then group preallocation size becomes half a 3786 * gigabyte!). As a default, we will keep a two megabyte 3787 * group pralloc size for cluster sizes up to 64k, and after 3788 * that, we will force a minimum group preallocation size of 3789 * 32 clusters. This translates to 8 megs when the cluster 3790 * size is 256k, and 32 megs when the cluster size is 1 meg, 3791 * which seems reasonable as a default. 3792 */ 3793 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3794 sbi->s_cluster_bits, 32); 3795 /* 3796 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3797 * to the lowest multiple of s_stripe which is bigger than 3798 * the s_mb_group_prealloc as determined above. We want 3799 * the preallocation size to be an exact multiple of the 3800 * RAID stripe size so that preallocations don't fragment 3801 * the stripes. 3802 */ 3803 if (sbi->s_stripe > 1) { 3804 sbi->s_mb_group_prealloc = roundup( 3805 sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe)); 3806 } 3807 3808 sbi->s_mb_nr_global_goals = umin(num_possible_cpus(), 3809 DIV_ROUND_UP(sbi->s_groups_count, 4)); 3810 sbi->s_mb_last_groups = kcalloc(sbi->s_mb_nr_global_goals, 3811 sizeof(ext4_group_t), GFP_KERNEL); 3812 if (sbi->s_mb_last_groups == NULL) { 3813 ret = -ENOMEM; 3814 goto out; 3815 } 3816 3817 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3818 if (sbi->s_locality_groups == NULL) { 3819 ret = -ENOMEM; 3820 goto out_free_last_groups; 3821 } 3822 for_each_possible_cpu(i) { 3823 struct ext4_locality_group *lg; 3824 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3825 mutex_init(&lg->lg_mutex); 3826 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3827 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3828 spin_lock_init(&lg->lg_prealloc_lock); 3829 } 3830 3831 if (bdev_nonrot(sb->s_bdev)) 3832 sbi->s_mb_max_linear_groups = 0; 3833 else 3834 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3835 /* init file for buddy data */ 3836 ret = ext4_mb_init_backend(sb); 3837 if (ret != 0) 3838 goto out_free_locality_groups; 3839 3840 return 0; 3841 3842 out_free_locality_groups: 3843 free_percpu(sbi->s_locality_groups); 3844 sbi->s_locality_groups = NULL; 3845 out_free_last_groups: 3846 kfree(sbi->s_mb_last_groups); 3847 sbi->s_mb_last_groups = NULL; 3848 out: 3849 ext4_mb_avg_fragment_size_destroy(sbi); 3850 ext4_mb_largest_free_orders_destroy(sbi); 3851 kfree(sbi->s_mb_offsets); 3852 sbi->s_mb_offsets = NULL; 3853 kfree(sbi->s_mb_maxs); 3854 sbi->s_mb_maxs = NULL; 3855 return ret; 3856 } 3857 3858 /* need to called with the ext4 group lock held */ 3859 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3860 { 3861 struct ext4_prealloc_space *pa; 3862 struct list_head *cur, *tmp; 3863 int count = 0; 3864 3865 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3866 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3867 list_del(&pa->pa_group_list); 3868 count++; 3869 kmem_cache_free(ext4_pspace_cachep, pa); 3870 } 3871 return count; 3872 } 3873 3874 void ext4_mb_release(struct super_block *sb) 3875 { 3876 ext4_group_t ngroups = ext4_get_groups_count(sb); 3877 ext4_group_t i; 3878 int num_meta_group_infos; 3879 struct ext4_group_info *grinfo, ***group_info; 3880 struct ext4_sb_info *sbi = EXT4_SB(sb); 3881 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3882 int count; 3883 3884 if (test_opt(sb, DISCARD)) { 3885 /* 3886 * wait the discard work to drain all of ext4_free_data 3887 */ 3888 flush_work(&sbi->s_discard_work); 3889 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3890 } 3891 3892 if (sbi->s_group_info) { 3893 for (i = 0; i < ngroups; i++) { 3894 cond_resched(); 3895 grinfo = ext4_get_group_info(sb, i); 3896 if (!grinfo) 3897 continue; 3898 mb_group_bb_bitmap_free(grinfo); 3899 ext4_lock_group(sb, i); 3900 count = ext4_mb_cleanup_pa(grinfo); 3901 if (count) 3902 mb_debug(sb, "mballoc: %d PAs left\n", 3903 count); 3904 ext4_unlock_group(sb, i); 3905 kmem_cache_free(cachep, grinfo); 3906 } 3907 num_meta_group_infos = (ngroups + 3908 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3909 EXT4_DESC_PER_BLOCK_BITS(sb); 3910 rcu_read_lock(); 3911 group_info = rcu_dereference(sbi->s_group_info); 3912 for (i = 0; i < num_meta_group_infos; i++) 3913 kfree(group_info[i]); 3914 kvfree(group_info); 3915 rcu_read_unlock(); 3916 } 3917 ext4_mb_avg_fragment_size_destroy(sbi); 3918 ext4_mb_largest_free_orders_destroy(sbi); 3919 kfree(sbi->s_mb_offsets); 3920 kfree(sbi->s_mb_maxs); 3921 iput(sbi->s_buddy_cache); 3922 if (sbi->s_mb_stats) { 3923 ext4_msg(sb, KERN_INFO, 3924 "mballoc: %u blocks %u reqs (%u success)", 3925 atomic_read(&sbi->s_bal_allocated), 3926 atomic_read(&sbi->s_bal_reqs), 3927 atomic_read(&sbi->s_bal_success)); 3928 ext4_msg(sb, KERN_INFO, 3929 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3930 "%u 2^N hits, %u breaks, %u lost", 3931 atomic_read(&sbi->s_bal_ex_scanned), 3932 atomic_read(&sbi->s_bal_groups_scanned), 3933 atomic_read(&sbi->s_bal_goals), 3934 atomic_read(&sbi->s_bal_2orders), 3935 atomic_read(&sbi->s_bal_breaks), 3936 atomic_read(&sbi->s_mb_lost_chunks)); 3937 ext4_msg(sb, KERN_INFO, 3938 "mballoc: %u generated and it took %llu", 3939 atomic_read(&sbi->s_mb_buddies_generated), 3940 atomic64_read(&sbi->s_mb_generation_time)); 3941 ext4_msg(sb, KERN_INFO, 3942 "mballoc: %u preallocated, %u discarded", 3943 atomic_read(&sbi->s_mb_preallocated), 3944 atomic_read(&sbi->s_mb_discarded)); 3945 } 3946 3947 free_percpu(sbi->s_locality_groups); 3948 kfree(sbi->s_mb_last_groups); 3949 } 3950 3951 static inline int ext4_issue_discard(struct super_block *sb, 3952 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 3953 { 3954 ext4_fsblk_t discard_block; 3955 3956 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3957 ext4_group_first_block_no(sb, block_group)); 3958 count = EXT4_C2B(EXT4_SB(sb), count); 3959 trace_ext4_discard_blocks(sb, 3960 (unsigned long long) discard_block, count); 3961 3962 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3963 } 3964 3965 static void ext4_free_data_in_buddy(struct super_block *sb, 3966 struct ext4_free_data *entry) 3967 { 3968 struct ext4_buddy e4b; 3969 struct ext4_group_info *db; 3970 int err, count = 0; 3971 3972 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3973 entry->efd_count, entry->efd_group, entry); 3974 3975 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3976 /* we expect to find existing buddy because it's pinned */ 3977 BUG_ON(err != 0); 3978 3979 atomic_sub(entry->efd_count, &EXT4_SB(sb)->s_mb_free_pending); 3980 db = e4b.bd_info; 3981 /* there are blocks to put in buddy to make them really free */ 3982 count += entry->efd_count; 3983 ext4_lock_group(sb, entry->efd_group); 3984 /* Take it out of per group rb tree */ 3985 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3986 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3987 3988 /* 3989 * Clear the trimmed flag for the group so that the next 3990 * ext4_trim_fs can trim it. 3991 */ 3992 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3993 3994 if (!db->bb_free_root.rb_node) { 3995 /* No more items in the per group rb tree 3996 * balance refcounts from ext4_mb_free_metadata() 3997 */ 3998 folio_put(e4b.bd_buddy_folio); 3999 folio_put(e4b.bd_bitmap_folio); 4000 } 4001 ext4_unlock_group(sb, entry->efd_group); 4002 ext4_mb_unload_buddy(&e4b); 4003 4004 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 4005 } 4006 4007 /* 4008 * This function is called by the jbd2 layer once the commit has finished, 4009 * so we know we can free the blocks that were released with that commit. 4010 */ 4011 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 4012 { 4013 struct ext4_sb_info *sbi = EXT4_SB(sb); 4014 struct ext4_free_data *entry, *tmp; 4015 LIST_HEAD(freed_data_list); 4016 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; 4017 bool wake; 4018 4019 list_replace_init(s_freed_head, &freed_data_list); 4020 4021 list_for_each_entry(entry, &freed_data_list, efd_list) 4022 ext4_free_data_in_buddy(sb, entry); 4023 4024 if (test_opt(sb, DISCARD)) { 4025 spin_lock(&sbi->s_md_lock); 4026 wake = list_empty(&sbi->s_discard_list); 4027 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 4028 spin_unlock(&sbi->s_md_lock); 4029 if (wake) 4030 queue_work(system_dfl_wq, &sbi->s_discard_work); 4031 } else { 4032 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 4033 kmem_cache_free(ext4_free_data_cachep, entry); 4034 } 4035 } 4036 4037 int __init ext4_init_mballoc(void) 4038 { 4039 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 4040 SLAB_RECLAIM_ACCOUNT); 4041 if (ext4_pspace_cachep == NULL) 4042 goto out; 4043 4044 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 4045 SLAB_RECLAIM_ACCOUNT); 4046 if (ext4_ac_cachep == NULL) 4047 goto out_pa_free; 4048 4049 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 4050 SLAB_RECLAIM_ACCOUNT); 4051 if (ext4_free_data_cachep == NULL) 4052 goto out_ac_free; 4053 4054 return 0; 4055 4056 out_ac_free: 4057 kmem_cache_destroy(ext4_ac_cachep); 4058 out_pa_free: 4059 kmem_cache_destroy(ext4_pspace_cachep); 4060 out: 4061 return -ENOMEM; 4062 } 4063 4064 void ext4_exit_mballoc(void) 4065 { 4066 /* 4067 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 4068 * before destroying the slab cache. 4069 */ 4070 rcu_barrier(); 4071 kmem_cache_destroy(ext4_pspace_cachep); 4072 kmem_cache_destroy(ext4_ac_cachep); 4073 kmem_cache_destroy(ext4_free_data_cachep); 4074 ext4_groupinfo_destroy_slabs(); 4075 } 4076 4077 #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001 4078 #define EXT4_MB_SYNC_UPDATE 0x0002 4079 static int 4080 ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state, 4081 ext4_group_t group, ext4_grpblk_t blkoff, 4082 ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed) 4083 { 4084 struct ext4_sb_info *sbi = EXT4_SB(sb); 4085 struct buffer_head *bitmap_bh = NULL; 4086 struct ext4_group_desc *gdp; 4087 struct buffer_head *gdp_bh; 4088 int err; 4089 unsigned int i, already, changed = len; 4090 4091 KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context, 4092 handle, sb, state, group, blkoff, len, 4093 flags, ret_changed); 4094 4095 if (ret_changed) 4096 *ret_changed = 0; 4097 bitmap_bh = ext4_read_block_bitmap(sb, group); 4098 if (IS_ERR(bitmap_bh)) 4099 return PTR_ERR(bitmap_bh); 4100 4101 if (handle) { 4102 BUFFER_TRACE(bitmap_bh, "getting write access"); 4103 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 4104 EXT4_JTR_NONE); 4105 if (err) 4106 goto out_err; 4107 } 4108 4109 err = -EIO; 4110 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 4111 if (!gdp) 4112 goto out_err; 4113 4114 if (handle) { 4115 BUFFER_TRACE(gdp_bh, "get_write_access"); 4116 err = ext4_journal_get_write_access(handle, sb, gdp_bh, 4117 EXT4_JTR_NONE); 4118 if (err) 4119 goto out_err; 4120 } 4121 4122 ext4_lock_group(sb, group); 4123 if (ext4_has_group_desc_csum(sb) && 4124 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4125 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4126 ext4_free_group_clusters_set(sb, gdp, 4127 ext4_free_clusters_after_init(sb, group, gdp)); 4128 } 4129 4130 if (flags & EXT4_MB_BITMAP_MARKED_CHECK) { 4131 already = 0; 4132 for (i = 0; i < len; i++) 4133 if (mb_test_bit(blkoff + i, bitmap_bh->b_data) == 4134 state) 4135 already++; 4136 changed = len - already; 4137 } 4138 4139 if (state) { 4140 mb_set_bits(bitmap_bh->b_data, blkoff, len); 4141 ext4_free_group_clusters_set(sb, gdp, 4142 ext4_free_group_clusters(sb, gdp) - changed); 4143 } else { 4144 mb_clear_bits(bitmap_bh->b_data, blkoff, len); 4145 ext4_free_group_clusters_set(sb, gdp, 4146 ext4_free_group_clusters(sb, gdp) + changed); 4147 } 4148 4149 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4150 ext4_group_desc_csum_set(sb, group, gdp); 4151 ext4_unlock_group(sb, group); 4152 if (ret_changed) 4153 *ret_changed = changed; 4154 4155 if (sbi->s_log_groups_per_flex) { 4156 ext4_group_t flex_group = ext4_flex_group(sbi, group); 4157 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4158 s_flex_groups, flex_group); 4159 4160 if (state) 4161 atomic64_sub(changed, &fg->free_clusters); 4162 else 4163 atomic64_add(changed, &fg->free_clusters); 4164 } 4165 4166 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4167 if (err) 4168 goto out_err; 4169 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 4170 if (err) 4171 goto out_err; 4172 4173 if (flags & EXT4_MB_SYNC_UPDATE) { 4174 sync_dirty_buffer(bitmap_bh); 4175 sync_dirty_buffer(gdp_bh); 4176 } 4177 4178 out_err: 4179 brelse(bitmap_bh); 4180 return err; 4181 } 4182 4183 /* 4184 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 4185 * Returns 0 if success or error code 4186 */ 4187 static noinline_for_stack int 4188 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 4189 handle_t *handle, unsigned int reserv_clstrs) 4190 { 4191 struct ext4_group_desc *gdp; 4192 struct ext4_sb_info *sbi; 4193 struct super_block *sb; 4194 ext4_fsblk_t block; 4195 int err, len; 4196 int flags = 0; 4197 ext4_grpblk_t changed; 4198 4199 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4200 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4201 4202 sb = ac->ac_sb; 4203 sbi = EXT4_SB(sb); 4204 4205 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); 4206 if (!gdp) 4207 return -EIO; 4208 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 4209 ext4_free_group_clusters(sb, gdp)); 4210 4211 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4212 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4213 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 4214 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 4215 "fs metadata", block, block+len); 4216 /* File system mounted not to panic on error 4217 * Fix the bitmap and return EFSCORRUPTED 4218 * We leak some of the blocks here. 4219 */ 4220 err = ext4_mb_mark_context(handle, sb, true, 4221 ac->ac_b_ex.fe_group, 4222 ac->ac_b_ex.fe_start, 4223 ac->ac_b_ex.fe_len, 4224 0, NULL); 4225 if (!err) 4226 err = -EFSCORRUPTED; 4227 return err; 4228 } 4229 4230 #ifdef AGGRESSIVE_CHECK 4231 flags |= EXT4_MB_BITMAP_MARKED_CHECK; 4232 #endif 4233 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, 4234 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, 4235 flags, &changed); 4236 4237 if (err && changed == 0) 4238 return err; 4239 4240 #ifdef AGGRESSIVE_CHECK 4241 BUG_ON(changed != ac->ac_b_ex.fe_len); 4242 #endif 4243 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 4244 /* 4245 * Now reduce the dirty block count also. Should not go negative 4246 */ 4247 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 4248 /* release all the reserved blocks if non delalloc */ 4249 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4250 reserv_clstrs); 4251 4252 return err; 4253 } 4254 4255 /* 4256 * Idempotent helper for Ext4 fast commit replay path to set the state of 4257 * blocks in bitmaps and update counters. 4258 */ 4259 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 4260 int len, bool state) 4261 { 4262 struct ext4_sb_info *sbi = EXT4_SB(sb); 4263 ext4_group_t group; 4264 ext4_grpblk_t blkoff; 4265 int err = 0; 4266 unsigned int clen, thisgrp_len; 4267 4268 while (len > 0) { 4269 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 4270 4271 /* 4272 * Check to see if we are freeing blocks across a group 4273 * boundary. 4274 * In case of flex_bg, this can happen that (block, len) may 4275 * span across more than one group. In that case we need to 4276 * get the corresponding group metadata to work with. 4277 * For this we have goto again loop. 4278 */ 4279 thisgrp_len = min_t(unsigned int, (unsigned int)len, 4280 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 4281 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 4282 4283 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 4284 ext4_error(sb, "Marking blocks in system zone - " 4285 "Block = %llu, len = %u", 4286 block, thisgrp_len); 4287 break; 4288 } 4289 4290 err = ext4_mb_mark_context(NULL, sb, state, 4291 group, blkoff, clen, 4292 EXT4_MB_BITMAP_MARKED_CHECK | 4293 EXT4_MB_SYNC_UPDATE, 4294 NULL); 4295 if (err) 4296 break; 4297 4298 block += thisgrp_len; 4299 len -= thisgrp_len; 4300 BUG_ON(len < 0); 4301 } 4302 } 4303 4304 /* 4305 * here we normalize request for locality group 4306 * Group request are normalized to s_mb_group_prealloc, which goes to 4307 * s_strip if we set the same via mount option. 4308 * s_mb_group_prealloc can be configured via 4309 * /sys/fs/ext4/<partition>/mb_group_prealloc 4310 * 4311 * XXX: should we try to preallocate more than the group has now? 4312 */ 4313 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4314 { 4315 struct super_block *sb = ac->ac_sb; 4316 struct ext4_locality_group *lg = ac->ac_lg; 4317 4318 BUG_ON(lg == NULL); 4319 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4320 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4321 } 4322 4323 /* 4324 * This function returns the next element to look at during inode 4325 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4326 * (ei->i_prealloc_lock) 4327 * 4328 * new_start The start of the range we want to compare 4329 * cur_start The existing start that we are comparing against 4330 * node The node of the rb_tree 4331 */ 4332 static inline struct rb_node* 4333 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4334 { 4335 if (new_start < cur_start) 4336 return node->rb_left; 4337 else 4338 return node->rb_right; 4339 } 4340 4341 static inline void 4342 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4343 ext4_lblk_t start, loff_t end) 4344 { 4345 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4346 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4347 struct ext4_prealloc_space *tmp_pa; 4348 ext4_lblk_t tmp_pa_start; 4349 loff_t tmp_pa_end; 4350 struct rb_node *iter; 4351 4352 read_lock(&ei->i_prealloc_lock); 4353 for (iter = ei->i_prealloc_node.rb_node; iter; 4354 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4355 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4356 pa_node.inode_node); 4357 tmp_pa_start = tmp_pa->pa_lstart; 4358 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4359 4360 spin_lock(&tmp_pa->pa_lock); 4361 if (tmp_pa->pa_deleted == 0) 4362 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4363 spin_unlock(&tmp_pa->pa_lock); 4364 } 4365 read_unlock(&ei->i_prealloc_lock); 4366 } 4367 4368 /* 4369 * Given an allocation context "ac" and a range "start", "end", check 4370 * and adjust boundaries if the range overlaps with any of the existing 4371 * preallocatoins stored in the corresponding inode of the allocation context. 4372 * 4373 * Parameters: 4374 * ac allocation context 4375 * start start of the new range 4376 * end end of the new range 4377 */ 4378 static inline void 4379 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4380 ext4_lblk_t *start, loff_t *end) 4381 { 4382 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4383 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4384 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4385 struct rb_node *iter; 4386 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; 4387 loff_t new_end, tmp_pa_end, left_pa_end = -1; 4388 4389 new_start = *start; 4390 new_end = *end; 4391 4392 /* 4393 * Adjust the normalized range so that it doesn't overlap with any 4394 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4395 * so it doesn't change underneath us. 4396 */ 4397 read_lock(&ei->i_prealloc_lock); 4398 4399 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4400 for (iter = ei->i_prealloc_node.rb_node; iter; 4401 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4402 tmp_pa_start, iter)) { 4403 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4404 pa_node.inode_node); 4405 tmp_pa_start = tmp_pa->pa_lstart; 4406 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4407 4408 /* PA must not overlap original request */ 4409 spin_lock(&tmp_pa->pa_lock); 4410 if (tmp_pa->pa_deleted == 0) 4411 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4412 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4413 spin_unlock(&tmp_pa->pa_lock); 4414 } 4415 4416 /* 4417 * Step 2: check if the found PA is left or right neighbor and 4418 * get the other neighbor 4419 */ 4420 if (tmp_pa) { 4421 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4422 struct rb_node *tmp; 4423 4424 left_pa = tmp_pa; 4425 tmp = rb_next(&left_pa->pa_node.inode_node); 4426 if (tmp) { 4427 right_pa = rb_entry(tmp, 4428 struct ext4_prealloc_space, 4429 pa_node.inode_node); 4430 } 4431 } else { 4432 struct rb_node *tmp; 4433 4434 right_pa = tmp_pa; 4435 tmp = rb_prev(&right_pa->pa_node.inode_node); 4436 if (tmp) { 4437 left_pa = rb_entry(tmp, 4438 struct ext4_prealloc_space, 4439 pa_node.inode_node); 4440 } 4441 } 4442 } 4443 4444 /* Step 3: get the non deleted neighbors */ 4445 if (left_pa) { 4446 for (iter = &left_pa->pa_node.inode_node;; 4447 iter = rb_prev(iter)) { 4448 if (!iter) { 4449 left_pa = NULL; 4450 break; 4451 } 4452 4453 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4454 pa_node.inode_node); 4455 left_pa = tmp_pa; 4456 spin_lock(&tmp_pa->pa_lock); 4457 if (tmp_pa->pa_deleted == 0) { 4458 spin_unlock(&tmp_pa->pa_lock); 4459 break; 4460 } 4461 spin_unlock(&tmp_pa->pa_lock); 4462 } 4463 } 4464 4465 if (right_pa) { 4466 for (iter = &right_pa->pa_node.inode_node;; 4467 iter = rb_next(iter)) { 4468 if (!iter) { 4469 right_pa = NULL; 4470 break; 4471 } 4472 4473 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4474 pa_node.inode_node); 4475 right_pa = tmp_pa; 4476 spin_lock(&tmp_pa->pa_lock); 4477 if (tmp_pa->pa_deleted == 0) { 4478 spin_unlock(&tmp_pa->pa_lock); 4479 break; 4480 } 4481 spin_unlock(&tmp_pa->pa_lock); 4482 } 4483 } 4484 4485 if (left_pa) { 4486 left_pa_end = pa_logical_end(sbi, left_pa); 4487 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4488 } 4489 4490 if (right_pa) { 4491 right_pa_start = right_pa->pa_lstart; 4492 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4493 } 4494 4495 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4496 if (left_pa) { 4497 if (left_pa_end > new_start) 4498 new_start = left_pa_end; 4499 } 4500 4501 if (right_pa) { 4502 if (right_pa_start < new_end) 4503 new_end = right_pa_start; 4504 } 4505 read_unlock(&ei->i_prealloc_lock); 4506 4507 /* XXX: extra loop to check we really don't overlap preallocations */ 4508 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4509 4510 *start = new_start; 4511 *end = new_end; 4512 } 4513 4514 /* 4515 * Normalization means making request better in terms of 4516 * size and alignment 4517 */ 4518 static noinline_for_stack void 4519 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4520 struct ext4_allocation_request *ar) 4521 { 4522 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4523 struct ext4_super_block *es = sbi->s_es; 4524 int bsbits, max; 4525 loff_t size, start_off, end; 4526 loff_t orig_size __maybe_unused; 4527 ext4_lblk_t start; 4528 4529 /* do normalize only data requests, metadata requests 4530 do not need preallocation */ 4531 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4532 return; 4533 4534 /* sometime caller may want exact blocks */ 4535 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4536 return; 4537 4538 /* caller may indicate that preallocation isn't 4539 * required (it's a tail, for example) */ 4540 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4541 return; 4542 4543 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4544 ext4_mb_normalize_group_request(ac); 4545 return ; 4546 } 4547 4548 bsbits = ac->ac_sb->s_blocksize_bits; 4549 4550 /* first, let's learn actual file size 4551 * given current request is allocated */ 4552 size = extent_logical_end(sbi, &ac->ac_o_ex); 4553 size = size << bsbits; 4554 if (size < i_size_read(ac->ac_inode)) 4555 size = i_size_read(ac->ac_inode); 4556 orig_size = size; 4557 4558 /* max size of free chunks */ 4559 max = 2 << bsbits; 4560 4561 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4562 (req <= (size) || max <= (chunk_size)) 4563 4564 /* first, try to predict filesize */ 4565 /* XXX: should this table be tunable? */ 4566 start_off = 0; 4567 if (size <= 16 * 1024) { 4568 size = 16 * 1024; 4569 } else if (size <= 32 * 1024) { 4570 size = 32 * 1024; 4571 } else if (size <= 64 * 1024) { 4572 size = 64 * 1024; 4573 } else if (size <= 128 * 1024) { 4574 size = 128 * 1024; 4575 } else if (size <= 256 * 1024) { 4576 size = 256 * 1024; 4577 } else if (size <= 512 * 1024) { 4578 size = 512 * 1024; 4579 } else if (size <= 1024 * 1024) { 4580 size = 1024 * 1024; 4581 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4582 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4583 (21 - bsbits)) << 21; 4584 size = 2 * 1024 * 1024; 4585 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4586 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4587 (22 - bsbits)) << 22; 4588 size = 4 * 1024 * 1024; 4589 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 4590 (8<<20)>>bsbits, max, 8 * 1024)) { 4591 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4592 (23 - bsbits)) << 23; 4593 size = 8 * 1024 * 1024; 4594 } else { 4595 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4596 size = (loff_t) EXT4_C2B(sbi, 4597 ac->ac_o_ex.fe_len) << bsbits; 4598 } 4599 size = size >> bsbits; 4600 start = start_off >> bsbits; 4601 4602 /* 4603 * For tiny groups (smaller than 8MB) the chosen allocation 4604 * alignment may be larger than group size. Make sure the 4605 * alignment does not move allocation to a different group which 4606 * makes mballoc fail assertions later. 4607 */ 4608 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4609 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4610 4611 /* avoid unnecessary preallocation that may trigger assertions */ 4612 if (start + size > EXT_MAX_BLOCKS) 4613 size = EXT_MAX_BLOCKS - start; 4614 4615 /* don't cover already allocated blocks in selected range */ 4616 if (ar->pleft && start <= ar->lleft) { 4617 size -= ar->lleft + 1 - start; 4618 start = ar->lleft + 1; 4619 } 4620 if (ar->pright && start + size - 1 >= ar->lright) 4621 size -= start + size - ar->lright; 4622 4623 /* 4624 * Trim allocation request for filesystems with artificially small 4625 * groups. 4626 */ 4627 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4628 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4629 4630 end = start + size; 4631 4632 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4633 4634 size = end - start; 4635 4636 /* 4637 * In this function "start" and "size" are normalized for better 4638 * alignment and length such that we could preallocate more blocks. 4639 * This normalization is done such that original request of 4640 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4641 * "size" boundaries. 4642 * (Note fe_len can be relaxed since FS block allocation API does not 4643 * provide gurantee on number of contiguous blocks allocation since that 4644 * depends upon free space left, etc). 4645 * In case of inode pa, later we use the allocated blocks 4646 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4647 * range of goal/best blocks [start, size] to put it at the 4648 * ac_o_ex.fe_logical extent of this inode. 4649 * (See ext4_mb_use_inode_pa() for more details) 4650 */ 4651 if (start + size <= ac->ac_o_ex.fe_logical || 4652 start > ac->ac_o_ex.fe_logical) { 4653 ext4_msg(ac->ac_sb, KERN_ERR, 4654 "start %lu, size %lu, fe_logical %lu", 4655 (unsigned long) start, (unsigned long) size, 4656 (unsigned long) ac->ac_o_ex.fe_logical); 4657 BUG(); 4658 } 4659 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4660 4661 /* now prepare goal request */ 4662 4663 /* XXX: is it better to align blocks WRT to logical 4664 * placement or satisfy big request as is */ 4665 ac->ac_g_ex.fe_logical = start; 4666 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4667 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 4668 4669 /* define goal start in order to merge */ 4670 if (ar->pright && (ar->lright == (start + size)) && 4671 ar->pright >= size && 4672 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4673 /* merge to the right */ 4674 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4675 &ac->ac_g_ex.fe_group, 4676 &ac->ac_g_ex.fe_start); 4677 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4678 } 4679 if (ar->pleft && (ar->lleft + 1 == start) && 4680 ar->pleft + 1 < ext4_blocks_count(es)) { 4681 /* merge to the left */ 4682 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4683 &ac->ac_g_ex.fe_group, 4684 &ac->ac_g_ex.fe_start); 4685 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4686 } 4687 4688 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4689 orig_size, start); 4690 } 4691 4692 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4693 { 4694 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4695 4696 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4697 atomic_inc(&sbi->s_bal_reqs); 4698 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4699 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4700 atomic_inc(&sbi->s_bal_success); 4701 4702 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4703 for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4704 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4705 } 4706 4707 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4708 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4709 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4710 atomic_inc(&sbi->s_bal_goals); 4711 /* did we allocate as much as normalizer originally wanted? */ 4712 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) 4713 atomic_inc(&sbi->s_bal_len_goals); 4714 4715 if (ac->ac_found > sbi->s_mb_max_to_scan) 4716 atomic_inc(&sbi->s_bal_breaks); 4717 } 4718 4719 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4720 trace_ext4_mballoc_alloc(ac); 4721 else 4722 trace_ext4_mballoc_prealloc(ac); 4723 } 4724 4725 /* 4726 * Called on failure; free up any blocks from the inode PA for this 4727 * context. We don't need this for MB_GROUP_PA because we only change 4728 * pa_free in ext4_mb_release_context(), but on failure, we've already 4729 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4730 */ 4731 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4732 { 4733 struct ext4_prealloc_space *pa = ac->ac_pa; 4734 struct ext4_buddy e4b; 4735 int err; 4736 4737 if (pa == NULL) { 4738 if (ac->ac_f_ex.fe_len == 0) 4739 return; 4740 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4741 if (WARN_RATELIMIT(err, 4742 "ext4: mb_load_buddy failed (%d)", err)) 4743 /* 4744 * This should never happen since we pin the 4745 * folios in the ext4_allocation_context so 4746 * ext4_mb_load_buddy() should never fail. 4747 */ 4748 return; 4749 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4750 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4751 ac->ac_f_ex.fe_len); 4752 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4753 ext4_mb_unload_buddy(&e4b); 4754 return; 4755 } 4756 if (pa->pa_type == MB_INODE_PA) { 4757 spin_lock(&pa->pa_lock); 4758 pa->pa_free += ac->ac_b_ex.fe_len; 4759 spin_unlock(&pa->pa_lock); 4760 } 4761 } 4762 4763 /* 4764 * use blocks preallocated to inode 4765 */ 4766 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4767 struct ext4_prealloc_space *pa) 4768 { 4769 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4770 ext4_fsblk_t start; 4771 ext4_fsblk_t end; 4772 int len; 4773 4774 /* found preallocated blocks, use them */ 4775 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4776 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4777 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4778 len = EXT4_NUM_B2C(sbi, end - start); 4779 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4780 &ac->ac_b_ex.fe_start); 4781 ac->ac_b_ex.fe_len = len; 4782 ac->ac_status = AC_STATUS_FOUND; 4783 ac->ac_pa = pa; 4784 4785 BUG_ON(start < pa->pa_pstart); 4786 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4787 BUG_ON(pa->pa_free < len); 4788 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4789 pa->pa_free -= len; 4790 4791 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4792 } 4793 4794 /* 4795 * use blocks preallocated to locality group 4796 */ 4797 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4798 struct ext4_prealloc_space *pa) 4799 { 4800 unsigned int len = ac->ac_o_ex.fe_len; 4801 4802 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4803 &ac->ac_b_ex.fe_group, 4804 &ac->ac_b_ex.fe_start); 4805 ac->ac_b_ex.fe_len = len; 4806 ac->ac_status = AC_STATUS_FOUND; 4807 ac->ac_pa = pa; 4808 4809 /* we don't correct pa_pstart or pa_len here to avoid 4810 * possible race when the group is being loaded concurrently 4811 * instead we correct pa later, after blocks are marked 4812 * in on-disk bitmap -- see ext4_mb_release_context() 4813 * Other CPUs are prevented from allocating from this pa by lg_mutex 4814 */ 4815 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4816 pa->pa_lstart, len, pa); 4817 } 4818 4819 /* 4820 * Return the prealloc space that have minimal distance 4821 * from the goal block. @cpa is the prealloc 4822 * space that is having currently known minimal distance 4823 * from the goal block. 4824 */ 4825 static struct ext4_prealloc_space * 4826 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4827 struct ext4_prealloc_space *pa, 4828 struct ext4_prealloc_space *cpa) 4829 { 4830 ext4_fsblk_t cur_distance, new_distance; 4831 4832 if (cpa == NULL) { 4833 atomic_inc(&pa->pa_count); 4834 return pa; 4835 } 4836 cur_distance = abs(goal_block - cpa->pa_pstart); 4837 new_distance = abs(goal_block - pa->pa_pstart); 4838 4839 if (cur_distance <= new_distance) 4840 return cpa; 4841 4842 /* drop the previous reference */ 4843 atomic_dec(&cpa->pa_count); 4844 atomic_inc(&pa->pa_count); 4845 return pa; 4846 } 4847 4848 /* 4849 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 4850 */ 4851 static bool 4852 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 4853 struct ext4_prealloc_space *pa) 4854 { 4855 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4856 ext4_fsblk_t start; 4857 4858 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 4859 return true; 4860 4861 /* 4862 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 4863 * in ext4_mb_normalize_request and will keep same with ac_o_ex 4864 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 4865 * consistent with ext4_mb_find_by_goal. 4866 */ 4867 start = pa->pa_pstart + 4868 (ac->ac_g_ex.fe_logical - pa->pa_lstart); 4869 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 4870 return false; 4871 4872 if (ac->ac_g_ex.fe_len > pa->pa_len - 4873 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 4874 return false; 4875 4876 return true; 4877 } 4878 4879 /* 4880 * search goal blocks in preallocated space 4881 */ 4882 static noinline_for_stack bool 4883 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4884 { 4885 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4886 int order, i; 4887 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4888 struct ext4_locality_group *lg; 4889 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL; 4890 struct rb_node *iter; 4891 ext4_fsblk_t goal_block; 4892 4893 /* only data can be preallocated */ 4894 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4895 return false; 4896 4897 /* 4898 * first, try per-file preallocation by searching the inode pa rbtree. 4899 * 4900 * Here, we can't do a direct traversal of the tree because 4901 * ext4_mb_discard_group_preallocation() can paralelly mark the pa 4902 * deleted and that can cause direct traversal to skip some entries. 4903 */ 4904 read_lock(&ei->i_prealloc_lock); 4905 4906 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { 4907 goto try_group_pa; 4908 } 4909 4910 /* 4911 * Step 1: Find a pa with logical start immediately adjacent to the 4912 * original logical start. This could be on the left or right. 4913 * 4914 * (tmp_pa->pa_lstart never changes so we can skip locking for it). 4915 */ 4916 for (iter = ei->i_prealloc_node.rb_node; iter; 4917 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4918 tmp_pa->pa_lstart, iter)) { 4919 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4920 pa_node.inode_node); 4921 } 4922 4923 /* 4924 * Step 2: The adjacent pa might be to the right of logical start, find 4925 * the left adjacent pa. After this step we'd have a valid tmp_pa whose 4926 * logical start is towards the left of original request's logical start 4927 */ 4928 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4929 struct rb_node *tmp; 4930 tmp = rb_prev(&tmp_pa->pa_node.inode_node); 4931 4932 if (tmp) { 4933 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space, 4934 pa_node.inode_node); 4935 } else { 4936 /* 4937 * If there is no adjacent pa to the left then finding 4938 * an overlapping pa is not possible hence stop searching 4939 * inode pa tree 4940 */ 4941 goto try_group_pa; 4942 } 4943 } 4944 4945 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4946 4947 /* 4948 * Step 3: If the left adjacent pa is deleted, keep moving left to find 4949 * the first non deleted adjacent pa. After this step we should have a 4950 * valid tmp_pa which is guaranteed to be non deleted. 4951 */ 4952 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { 4953 if (!iter) { 4954 /* 4955 * no non deleted left adjacent pa, so stop searching 4956 * inode pa tree 4957 */ 4958 goto try_group_pa; 4959 } 4960 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4961 pa_node.inode_node); 4962 spin_lock(&tmp_pa->pa_lock); 4963 if (tmp_pa->pa_deleted == 0) { 4964 /* 4965 * We will keep holding the pa_lock from 4966 * this point on because we don't want group discard 4967 * to delete this pa underneath us. Since group 4968 * discard is anyways an ENOSPC operation it 4969 * should be okay for it to wait a few more cycles. 4970 */ 4971 break; 4972 } else { 4973 spin_unlock(&tmp_pa->pa_lock); 4974 } 4975 } 4976 4977 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4978 BUG_ON(tmp_pa->pa_deleted == 1); 4979 4980 /* 4981 * Step 4: We now have the non deleted left adjacent pa. Only this 4982 * pa can possibly satisfy the request hence check if it overlaps 4983 * original logical start and stop searching if it doesn't. 4984 */ 4985 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { 4986 spin_unlock(&tmp_pa->pa_lock); 4987 goto try_group_pa; 4988 } 4989 4990 /* non-extent files can't have physical blocks past 2^32 */ 4991 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4992 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4993 EXT4_MAX_BLOCK_FILE_PHYS)) { 4994 /* 4995 * Since PAs don't overlap, we won't find any other PA to 4996 * satisfy this. 4997 */ 4998 spin_unlock(&tmp_pa->pa_lock); 4999 goto try_group_pa; 5000 } 5001 5002 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 5003 atomic_inc(&tmp_pa->pa_count); 5004 ext4_mb_use_inode_pa(ac, tmp_pa); 5005 spin_unlock(&tmp_pa->pa_lock); 5006 read_unlock(&ei->i_prealloc_lock); 5007 return true; 5008 } else { 5009 /* 5010 * We found a valid overlapping pa but couldn't use it because 5011 * it had no free blocks. This should ideally never happen 5012 * because: 5013 * 5014 * 1. When a new inode pa is added to rbtree it must have 5015 * pa_free > 0 since otherwise we won't actually need 5016 * preallocation. 5017 * 5018 * 2. An inode pa that is in the rbtree can only have it's 5019 * pa_free become zero when another thread calls: 5020 * ext4_mb_new_blocks 5021 * ext4_mb_use_preallocated 5022 * ext4_mb_use_inode_pa 5023 * 5024 * 3. Further, after the above calls make pa_free == 0, we will 5025 * immediately remove it from the rbtree in: 5026 * ext4_mb_new_blocks 5027 * ext4_mb_release_context 5028 * ext4_mb_put_pa 5029 * 5030 * 4. Since the pa_free becoming 0 and pa_free getting removed 5031 * from tree both happen in ext4_mb_new_blocks, which is always 5032 * called with i_data_sem held for data allocations, we can be 5033 * sure that another process will never see a pa in rbtree with 5034 * pa_free == 0. 5035 */ 5036 WARN_ON_ONCE(tmp_pa->pa_free == 0); 5037 } 5038 spin_unlock(&tmp_pa->pa_lock); 5039 try_group_pa: 5040 read_unlock(&ei->i_prealloc_lock); 5041 5042 /* can we use group allocation? */ 5043 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 5044 return false; 5045 5046 /* inode may have no locality group for some reason */ 5047 lg = ac->ac_lg; 5048 if (lg == NULL) 5049 return false; 5050 order = fls(ac->ac_o_ex.fe_len) - 1; 5051 if (order > PREALLOC_TB_SIZE - 1) 5052 /* The max size of hash table is PREALLOC_TB_SIZE */ 5053 order = PREALLOC_TB_SIZE - 1; 5054 5055 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 5056 /* 5057 * search for the prealloc space that is having 5058 * minimal distance from the goal block. 5059 */ 5060 for (i = order; i < PREALLOC_TB_SIZE; i++) { 5061 rcu_read_lock(); 5062 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 5063 pa_node.lg_list) { 5064 spin_lock(&tmp_pa->pa_lock); 5065 if (tmp_pa->pa_deleted == 0 && 5066 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 5067 5068 cpa = ext4_mb_check_group_pa(goal_block, 5069 tmp_pa, cpa); 5070 } 5071 spin_unlock(&tmp_pa->pa_lock); 5072 } 5073 rcu_read_unlock(); 5074 } 5075 if (cpa) { 5076 ext4_mb_use_group_pa(ac, cpa); 5077 return true; 5078 } 5079 return false; 5080 } 5081 5082 /* 5083 * the function goes through all preallocation in this group and marks them 5084 * used in in-core bitmap. buddy must be generated from this bitmap 5085 * Need to be called with ext4 group lock held 5086 */ 5087 static noinline_for_stack 5088 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 5089 ext4_group_t group) 5090 { 5091 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5092 struct ext4_prealloc_space *pa; 5093 struct list_head *cur; 5094 ext4_group_t groupnr; 5095 ext4_grpblk_t start; 5096 int preallocated = 0; 5097 int len; 5098 5099 if (!grp) 5100 return; 5101 5102 /* all form of preallocation discards first load group, 5103 * so the only competing code is preallocation use. 5104 * we don't need any locking here 5105 * notice we do NOT ignore preallocations with pa_deleted 5106 * otherwise we could leave used blocks available for 5107 * allocation in buddy when concurrent ext4_mb_put_pa() 5108 * is dropping preallocation 5109 */ 5110 list_for_each(cur, &grp->bb_prealloc_list) { 5111 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 5112 spin_lock(&pa->pa_lock); 5113 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5114 &groupnr, &start); 5115 len = pa->pa_len; 5116 spin_unlock(&pa->pa_lock); 5117 if (unlikely(len == 0)) 5118 continue; 5119 BUG_ON(groupnr != group); 5120 mb_set_bits(bitmap, start, len); 5121 preallocated += len; 5122 } 5123 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 5124 } 5125 5126 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 5127 struct ext4_prealloc_space *pa) 5128 { 5129 struct ext4_inode_info *ei; 5130 5131 if (pa->pa_deleted) { 5132 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 5133 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 5134 pa->pa_len); 5135 return; 5136 } 5137 5138 pa->pa_deleted = 1; 5139 5140 if (pa->pa_type == MB_INODE_PA) { 5141 ei = EXT4_I(pa->pa_inode); 5142 atomic_dec(&ei->i_prealloc_active); 5143 } 5144 } 5145 5146 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 5147 { 5148 BUG_ON(!pa); 5149 BUG_ON(atomic_read(&pa->pa_count)); 5150 BUG_ON(pa->pa_deleted == 0); 5151 kmem_cache_free(ext4_pspace_cachep, pa); 5152 } 5153 5154 static void ext4_mb_pa_callback(struct rcu_head *head) 5155 { 5156 struct ext4_prealloc_space *pa; 5157 5158 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 5159 ext4_mb_pa_free(pa); 5160 } 5161 5162 /* 5163 * drops a reference to preallocated space descriptor 5164 * if this was the last reference and the space is consumed 5165 */ 5166 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 5167 struct super_block *sb, struct ext4_prealloc_space *pa) 5168 { 5169 ext4_group_t grp; 5170 ext4_fsblk_t grp_blk; 5171 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 5172 5173 /* in this short window concurrent discard can set pa_deleted */ 5174 spin_lock(&pa->pa_lock); 5175 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 5176 spin_unlock(&pa->pa_lock); 5177 return; 5178 } 5179 5180 if (pa->pa_deleted == 1) { 5181 spin_unlock(&pa->pa_lock); 5182 return; 5183 } 5184 5185 ext4_mb_mark_pa_deleted(sb, pa); 5186 spin_unlock(&pa->pa_lock); 5187 5188 grp_blk = pa->pa_pstart; 5189 /* 5190 * If doing group-based preallocation, pa_pstart may be in the 5191 * next group when pa is used up 5192 */ 5193 if (pa->pa_type == MB_GROUP_PA) 5194 grp_blk--; 5195 5196 grp = ext4_get_group_number(sb, grp_blk); 5197 5198 /* 5199 * possible race: 5200 * 5201 * P1 (buddy init) P2 (regular allocation) 5202 * find block B in PA 5203 * copy on-disk bitmap to buddy 5204 * mark B in on-disk bitmap 5205 * drop PA from group 5206 * mark all PAs in buddy 5207 * 5208 * thus, P1 initializes buddy with B available. to prevent this 5209 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 5210 * against that pair 5211 */ 5212 ext4_lock_group(sb, grp); 5213 list_del(&pa->pa_group_list); 5214 ext4_unlock_group(sb, grp); 5215 5216 if (pa->pa_type == MB_INODE_PA) { 5217 write_lock(pa->pa_node_lock.inode_lock); 5218 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5219 write_unlock(pa->pa_node_lock.inode_lock); 5220 ext4_mb_pa_free(pa); 5221 } else { 5222 spin_lock(pa->pa_node_lock.lg_lock); 5223 list_del_rcu(&pa->pa_node.lg_list); 5224 spin_unlock(pa->pa_node_lock.lg_lock); 5225 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5226 } 5227 } 5228 5229 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 5230 { 5231 struct rb_node **iter = &root->rb_node, *parent = NULL; 5232 struct ext4_prealloc_space *iter_pa, *new_pa; 5233 ext4_lblk_t iter_start, new_start; 5234 5235 while (*iter) { 5236 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 5237 pa_node.inode_node); 5238 new_pa = rb_entry(new, struct ext4_prealloc_space, 5239 pa_node.inode_node); 5240 iter_start = iter_pa->pa_lstart; 5241 new_start = new_pa->pa_lstart; 5242 5243 parent = *iter; 5244 if (new_start < iter_start) 5245 iter = &((*iter)->rb_left); 5246 else 5247 iter = &((*iter)->rb_right); 5248 } 5249 5250 rb_link_node(new, parent, iter); 5251 rb_insert_color(new, root); 5252 } 5253 5254 /* 5255 * creates new preallocated space for given inode 5256 */ 5257 static noinline_for_stack void 5258 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 5259 { 5260 struct super_block *sb = ac->ac_sb; 5261 struct ext4_sb_info *sbi = EXT4_SB(sb); 5262 struct ext4_prealloc_space *pa; 5263 struct ext4_group_info *grp; 5264 struct ext4_inode_info *ei; 5265 5266 /* preallocate only when found space is larger then requested */ 5267 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5268 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5269 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5270 BUG_ON(ac->ac_pa == NULL); 5271 5272 pa = ac->ac_pa; 5273 5274 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { 5275 struct ext4_free_extent ex = { 5276 .fe_logical = ac->ac_g_ex.fe_logical, 5277 .fe_len = ac->ac_orig_goal_len, 5278 }; 5279 loff_t orig_goal_end = extent_logical_end(sbi, &ex); 5280 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); 5281 5282 /* 5283 * We can't allocate as much as normalizer wants, so we try 5284 * to get proper lstart to cover the original request, except 5285 * when the goal doesn't cover the original request as below: 5286 * 5287 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 5288 * best_ex:0/200(200) -> adjusted: 1848/2048(200) 5289 */ 5290 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 5291 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 5292 5293 /* 5294 * Use the below logic for adjusting best extent as it keeps 5295 * fragmentation in check while ensuring logical range of best 5296 * extent doesn't overflow out of goal extent: 5297 * 5298 * 1. Check if best ex can be kept at end of goal (before 5299 * cr_best_avail trimmed it) and still cover original start 5300 * 2. Else, check if best ex can be kept at start of goal and 5301 * still cover original end 5302 * 3. Else, keep the best ex at start of original request. 5303 */ 5304 ex.fe_len = ac->ac_b_ex.fe_len; 5305 5306 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); 5307 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) 5308 goto adjust_bex; 5309 5310 ex.fe_logical = ac->ac_g_ex.fe_logical; 5311 if (o_ex_end <= extent_logical_end(sbi, &ex)) 5312 goto adjust_bex; 5313 5314 ex.fe_logical = ac->ac_o_ex.fe_logical; 5315 adjust_bex: 5316 ac->ac_b_ex.fe_logical = ex.fe_logical; 5317 5318 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 5319 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); 5320 } 5321 5322 pa->pa_lstart = ac->ac_b_ex.fe_logical; 5323 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5324 pa->pa_len = ac->ac_b_ex.fe_len; 5325 pa->pa_free = pa->pa_len; 5326 spin_lock_init(&pa->pa_lock); 5327 INIT_LIST_HEAD(&pa->pa_group_list); 5328 pa->pa_deleted = 0; 5329 pa->pa_type = MB_INODE_PA; 5330 5331 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5332 pa->pa_len, pa->pa_lstart); 5333 trace_ext4_mb_new_inode_pa(ac, pa); 5334 5335 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 5336 ext4_mb_use_inode_pa(ac, pa); 5337 5338 ei = EXT4_I(ac->ac_inode); 5339 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5340 if (!grp) 5341 return; 5342 5343 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 5344 pa->pa_inode = ac->ac_inode; 5345 5346 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5347 5348 write_lock(pa->pa_node_lock.inode_lock); 5349 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 5350 write_unlock(pa->pa_node_lock.inode_lock); 5351 atomic_inc(&ei->i_prealloc_active); 5352 } 5353 5354 /* 5355 * creates new preallocated space for locality group inodes belongs to 5356 */ 5357 static noinline_for_stack void 5358 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5359 { 5360 struct super_block *sb = ac->ac_sb; 5361 struct ext4_locality_group *lg; 5362 struct ext4_prealloc_space *pa; 5363 struct ext4_group_info *grp; 5364 5365 /* preallocate only when found space is larger then requested */ 5366 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5367 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5368 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5369 BUG_ON(ac->ac_pa == NULL); 5370 5371 pa = ac->ac_pa; 5372 5373 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5374 pa->pa_lstart = pa->pa_pstart; 5375 pa->pa_len = ac->ac_b_ex.fe_len; 5376 pa->pa_free = pa->pa_len; 5377 spin_lock_init(&pa->pa_lock); 5378 INIT_LIST_HEAD(&pa->pa_node.lg_list); 5379 INIT_LIST_HEAD(&pa->pa_group_list); 5380 pa->pa_deleted = 0; 5381 pa->pa_type = MB_GROUP_PA; 5382 5383 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5384 pa->pa_len, pa->pa_lstart); 5385 trace_ext4_mb_new_group_pa(ac, pa); 5386 5387 ext4_mb_use_group_pa(ac, pa); 5388 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5389 5390 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5391 if (!grp) 5392 return; 5393 lg = ac->ac_lg; 5394 BUG_ON(lg == NULL); 5395 5396 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5397 pa->pa_inode = NULL; 5398 5399 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5400 5401 /* 5402 * We will later add the new pa to the right bucket 5403 * after updating the pa_free in ext4_mb_release_context 5404 */ 5405 } 5406 5407 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5408 { 5409 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5410 ext4_mb_new_group_pa(ac); 5411 else 5412 ext4_mb_new_inode_pa(ac); 5413 } 5414 5415 /* 5416 * finds all unused blocks in on-disk bitmap, frees them in 5417 * in-core bitmap and buddy. 5418 * @pa must be unlinked from inode and group lists, so that 5419 * nobody else can find/use it. 5420 * the caller MUST hold group/inode locks. 5421 * TODO: optimize the case when there are no in-core structures yet 5422 */ 5423 static noinline_for_stack void 5424 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5425 struct ext4_prealloc_space *pa) 5426 { 5427 struct super_block *sb = e4b->bd_sb; 5428 struct ext4_sb_info *sbi = EXT4_SB(sb); 5429 unsigned int end; 5430 unsigned int next; 5431 ext4_group_t group; 5432 ext4_grpblk_t bit; 5433 unsigned long long grp_blk_start; 5434 int free = 0; 5435 5436 BUG_ON(pa->pa_deleted == 0); 5437 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5438 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5439 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5440 end = bit + pa->pa_len; 5441 5442 while (bit < end) { 5443 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5444 if (bit >= end) 5445 break; 5446 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5447 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5448 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5449 (unsigned) next - bit, (unsigned) group); 5450 free += next - bit; 5451 5452 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5453 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5454 EXT4_C2B(sbi, bit)), 5455 next - bit); 5456 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5457 bit = next + 1; 5458 } 5459 if (free != pa->pa_free) { 5460 ext4_msg(e4b->bd_sb, KERN_CRIT, 5461 "pa %p: logic %lu, phys. %lu, len %d", 5462 pa, (unsigned long) pa->pa_lstart, 5463 (unsigned long) pa->pa_pstart, 5464 pa->pa_len); 5465 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5466 free, pa->pa_free); 5467 /* 5468 * pa is already deleted so we use the value obtained 5469 * from the bitmap and continue. 5470 */ 5471 } 5472 atomic_add(free, &sbi->s_mb_discarded); 5473 } 5474 5475 static noinline_for_stack void 5476 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5477 struct ext4_prealloc_space *pa) 5478 { 5479 struct super_block *sb = e4b->bd_sb; 5480 ext4_group_t group; 5481 ext4_grpblk_t bit; 5482 5483 trace_ext4_mb_release_group_pa(sb, pa); 5484 BUG_ON(pa->pa_deleted == 0); 5485 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5486 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5487 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5488 e4b->bd_group, group, pa->pa_pstart); 5489 return; 5490 } 5491 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5492 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5493 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5494 } 5495 5496 /* 5497 * releases all preallocations in given group 5498 * 5499 * first, we need to decide discard policy: 5500 * - when do we discard 5501 * 1) ENOSPC 5502 * - how many do we discard 5503 * 1) how many requested 5504 */ 5505 static noinline_for_stack int 5506 ext4_mb_discard_group_preallocations(struct super_block *sb, 5507 ext4_group_t group, int *busy) 5508 { 5509 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5510 struct buffer_head *bitmap_bh = NULL; 5511 struct ext4_prealloc_space *pa, *tmp; 5512 LIST_HEAD(list); 5513 struct ext4_buddy e4b; 5514 struct ext4_inode_info *ei; 5515 int err; 5516 int free = 0; 5517 5518 if (!grp) 5519 return 0; 5520 mb_debug(sb, "discard preallocation for group %u\n", group); 5521 if (list_empty(&grp->bb_prealloc_list)) 5522 goto out_dbg; 5523 5524 bitmap_bh = ext4_read_block_bitmap(sb, group); 5525 if (IS_ERR(bitmap_bh)) { 5526 err = PTR_ERR(bitmap_bh); 5527 ext4_error_err(sb, -err, 5528 "Error %d reading block bitmap for %u", 5529 err, group); 5530 goto out_dbg; 5531 } 5532 5533 err = ext4_mb_load_buddy(sb, group, &e4b); 5534 if (err) { 5535 ext4_warning(sb, "Error %d loading buddy information for %u", 5536 err, group); 5537 put_bh(bitmap_bh); 5538 goto out_dbg; 5539 } 5540 5541 ext4_lock_group(sb, group); 5542 list_for_each_entry_safe(pa, tmp, 5543 &grp->bb_prealloc_list, pa_group_list) { 5544 spin_lock(&pa->pa_lock); 5545 if (atomic_read(&pa->pa_count)) { 5546 spin_unlock(&pa->pa_lock); 5547 *busy = 1; 5548 continue; 5549 } 5550 if (pa->pa_deleted) { 5551 spin_unlock(&pa->pa_lock); 5552 continue; 5553 } 5554 5555 /* seems this one can be freed ... */ 5556 ext4_mb_mark_pa_deleted(sb, pa); 5557 5558 if (!free) 5559 this_cpu_inc(discard_pa_seq); 5560 5561 /* we can trust pa_free ... */ 5562 free += pa->pa_free; 5563 5564 spin_unlock(&pa->pa_lock); 5565 5566 list_del(&pa->pa_group_list); 5567 list_add(&pa->u.pa_tmp_list, &list); 5568 } 5569 5570 /* now free all selected PAs */ 5571 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5572 5573 /* remove from object (inode or locality group) */ 5574 if (pa->pa_type == MB_GROUP_PA) { 5575 spin_lock(pa->pa_node_lock.lg_lock); 5576 list_del_rcu(&pa->pa_node.lg_list); 5577 spin_unlock(pa->pa_node_lock.lg_lock); 5578 } else { 5579 write_lock(pa->pa_node_lock.inode_lock); 5580 ei = EXT4_I(pa->pa_inode); 5581 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5582 write_unlock(pa->pa_node_lock.inode_lock); 5583 } 5584 5585 list_del(&pa->u.pa_tmp_list); 5586 5587 if (pa->pa_type == MB_GROUP_PA) { 5588 ext4_mb_release_group_pa(&e4b, pa); 5589 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5590 } else { 5591 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5592 ext4_mb_pa_free(pa); 5593 } 5594 } 5595 5596 ext4_unlock_group(sb, group); 5597 ext4_mb_unload_buddy(&e4b); 5598 put_bh(bitmap_bh); 5599 out_dbg: 5600 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5601 free, group, grp->bb_free); 5602 return free; 5603 } 5604 5605 /* 5606 * releases all non-used preallocated blocks for given inode 5607 * 5608 * It's important to discard preallocations under i_data_sem 5609 * We don't want another block to be served from the prealloc 5610 * space when we are discarding the inode prealloc space. 5611 * 5612 * FIXME!! Make sure it is valid at all the call sites 5613 */ 5614 void ext4_discard_preallocations(struct inode *inode) 5615 { 5616 struct ext4_inode_info *ei = EXT4_I(inode); 5617 struct super_block *sb = inode->i_sb; 5618 struct buffer_head *bitmap_bh = NULL; 5619 struct ext4_prealloc_space *pa, *tmp; 5620 ext4_group_t group = 0; 5621 LIST_HEAD(list); 5622 struct ext4_buddy e4b; 5623 struct rb_node *iter; 5624 int err; 5625 5626 if (!S_ISREG(inode->i_mode)) 5627 return; 5628 5629 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5630 return; 5631 5632 mb_debug(sb, "discard preallocation for inode %lu\n", 5633 inode->i_ino); 5634 trace_ext4_discard_preallocations(inode, 5635 atomic_read(&ei->i_prealloc_active)); 5636 5637 repeat: 5638 /* first, collect all pa's in the inode */ 5639 write_lock(&ei->i_prealloc_lock); 5640 for (iter = rb_first(&ei->i_prealloc_node); iter; 5641 iter = rb_next(iter)) { 5642 pa = rb_entry(iter, struct ext4_prealloc_space, 5643 pa_node.inode_node); 5644 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5645 5646 spin_lock(&pa->pa_lock); 5647 if (atomic_read(&pa->pa_count)) { 5648 /* this shouldn't happen often - nobody should 5649 * use preallocation while we're discarding it */ 5650 spin_unlock(&pa->pa_lock); 5651 write_unlock(&ei->i_prealloc_lock); 5652 ext4_msg(sb, KERN_ERR, 5653 "uh-oh! used pa while discarding"); 5654 WARN_ON(1); 5655 schedule_timeout_uninterruptible(HZ); 5656 goto repeat; 5657 5658 } 5659 if (pa->pa_deleted == 0) { 5660 ext4_mb_mark_pa_deleted(sb, pa); 5661 spin_unlock(&pa->pa_lock); 5662 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5663 list_add(&pa->u.pa_tmp_list, &list); 5664 continue; 5665 } 5666 5667 /* someone is deleting pa right now */ 5668 spin_unlock(&pa->pa_lock); 5669 write_unlock(&ei->i_prealloc_lock); 5670 5671 /* we have to wait here because pa_deleted 5672 * doesn't mean pa is already unlinked from 5673 * the list. as we might be called from 5674 * ->clear_inode() the inode will get freed 5675 * and concurrent thread which is unlinking 5676 * pa from inode's list may access already 5677 * freed memory, bad-bad-bad */ 5678 5679 /* XXX: if this happens too often, we can 5680 * add a flag to force wait only in case 5681 * of ->clear_inode(), but not in case of 5682 * regular truncate */ 5683 schedule_timeout_uninterruptible(HZ); 5684 goto repeat; 5685 } 5686 write_unlock(&ei->i_prealloc_lock); 5687 5688 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5689 BUG_ON(pa->pa_type != MB_INODE_PA); 5690 group = ext4_get_group_number(sb, pa->pa_pstart); 5691 5692 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5693 GFP_NOFS|__GFP_NOFAIL); 5694 if (err) { 5695 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5696 err, group); 5697 continue; 5698 } 5699 5700 bitmap_bh = ext4_read_block_bitmap(sb, group); 5701 if (IS_ERR(bitmap_bh)) { 5702 err = PTR_ERR(bitmap_bh); 5703 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5704 err, group); 5705 ext4_mb_unload_buddy(&e4b); 5706 continue; 5707 } 5708 5709 ext4_lock_group(sb, group); 5710 list_del(&pa->pa_group_list); 5711 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5712 ext4_unlock_group(sb, group); 5713 5714 ext4_mb_unload_buddy(&e4b); 5715 put_bh(bitmap_bh); 5716 5717 list_del(&pa->u.pa_tmp_list); 5718 ext4_mb_pa_free(pa); 5719 } 5720 } 5721 5722 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5723 { 5724 struct ext4_prealloc_space *pa; 5725 5726 BUG_ON(ext4_pspace_cachep == NULL); 5727 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5728 if (!pa) 5729 return -ENOMEM; 5730 atomic_set(&pa->pa_count, 1); 5731 ac->ac_pa = pa; 5732 return 0; 5733 } 5734 5735 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5736 { 5737 struct ext4_prealloc_space *pa = ac->ac_pa; 5738 5739 BUG_ON(!pa); 5740 ac->ac_pa = NULL; 5741 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5742 /* 5743 * current function is only called due to an error or due to 5744 * len of found blocks < len of requested blocks hence the PA has not 5745 * been added to grp->bb_prealloc_list. So we don't need to lock it 5746 */ 5747 pa->pa_deleted = 1; 5748 ext4_mb_pa_free(pa); 5749 } 5750 5751 #ifdef CONFIG_EXT4_DEBUG 5752 static inline void ext4_mb_show_pa(struct super_block *sb) 5753 { 5754 ext4_group_t i, ngroups; 5755 5756 if (ext4_emergency_state(sb)) 5757 return; 5758 5759 ngroups = ext4_get_groups_count(sb); 5760 mb_debug(sb, "groups: "); 5761 for (i = 0; i < ngroups; i++) { 5762 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5763 struct ext4_prealloc_space *pa; 5764 ext4_grpblk_t start; 5765 struct list_head *cur; 5766 5767 if (!grp) 5768 continue; 5769 ext4_lock_group(sb, i); 5770 list_for_each(cur, &grp->bb_prealloc_list) { 5771 pa = list_entry(cur, struct ext4_prealloc_space, 5772 pa_group_list); 5773 spin_lock(&pa->pa_lock); 5774 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5775 NULL, &start); 5776 spin_unlock(&pa->pa_lock); 5777 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5778 pa->pa_len); 5779 } 5780 ext4_unlock_group(sb, i); 5781 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5782 grp->bb_fragments); 5783 } 5784 } 5785 5786 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5787 { 5788 struct super_block *sb = ac->ac_sb; 5789 5790 if (ext4_emergency_state(sb)) 5791 return; 5792 5793 mb_debug(sb, "Can't allocate:" 5794 " Allocation context details:"); 5795 mb_debug(sb, "status %u flags 0x%x", 5796 ac->ac_status, ac->ac_flags); 5797 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5798 "goal %lu/%lu/%lu@%lu, " 5799 "best %lu/%lu/%lu@%lu cr %d", 5800 (unsigned long)ac->ac_o_ex.fe_group, 5801 (unsigned long)ac->ac_o_ex.fe_start, 5802 (unsigned long)ac->ac_o_ex.fe_len, 5803 (unsigned long)ac->ac_o_ex.fe_logical, 5804 (unsigned long)ac->ac_g_ex.fe_group, 5805 (unsigned long)ac->ac_g_ex.fe_start, 5806 (unsigned long)ac->ac_g_ex.fe_len, 5807 (unsigned long)ac->ac_g_ex.fe_logical, 5808 (unsigned long)ac->ac_b_ex.fe_group, 5809 (unsigned long)ac->ac_b_ex.fe_start, 5810 (unsigned long)ac->ac_b_ex.fe_len, 5811 (unsigned long)ac->ac_b_ex.fe_logical, 5812 (int)ac->ac_criteria); 5813 mb_debug(sb, "%u found", ac->ac_found); 5814 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa)); 5815 if (ac->ac_pa) 5816 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5817 "group pa" : "inode pa"); 5818 ext4_mb_show_pa(sb); 5819 } 5820 #else 5821 static inline void ext4_mb_show_pa(struct super_block *sb) 5822 { 5823 } 5824 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5825 { 5826 ext4_mb_show_pa(ac->ac_sb); 5827 } 5828 #endif 5829 5830 /* 5831 * We use locality group preallocation for small size file. The size of the 5832 * file is determined by the current size or the resulting size after 5833 * allocation which ever is larger 5834 * 5835 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5836 */ 5837 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5838 { 5839 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5840 int bsbits = ac->ac_sb->s_blocksize_bits; 5841 loff_t size, isize; 5842 bool inode_pa_eligible, group_pa_eligible; 5843 5844 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5845 return; 5846 5847 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5848 return; 5849 5850 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5851 inode_pa_eligible = true; 5852 size = extent_logical_end(sbi, &ac->ac_o_ex); 5853 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5854 >> bsbits; 5855 5856 /* No point in using inode preallocation for closed files */ 5857 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5858 !inode_is_open_for_write(ac->ac_inode)) 5859 inode_pa_eligible = false; 5860 5861 size = max(size, isize); 5862 /* Don't use group allocation for large files */ 5863 if (size > sbi->s_mb_stream_request) 5864 group_pa_eligible = false; 5865 5866 if (!group_pa_eligible) { 5867 if (inode_pa_eligible) 5868 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5869 else 5870 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5871 return; 5872 } 5873 5874 BUG_ON(ac->ac_lg != NULL); 5875 /* 5876 * locality group prealloc space are per cpu. The reason for having 5877 * per cpu locality group is to reduce the contention between block 5878 * request from multiple CPUs. 5879 */ 5880 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5881 5882 /* we're going to use group allocation */ 5883 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5884 5885 /* serialize all allocations in the group */ 5886 mutex_lock(&ac->ac_lg->lg_mutex); 5887 } 5888 5889 static noinline_for_stack void 5890 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5891 struct ext4_allocation_request *ar) 5892 { 5893 struct super_block *sb = ar->inode->i_sb; 5894 struct ext4_sb_info *sbi = EXT4_SB(sb); 5895 struct ext4_super_block *es = sbi->s_es; 5896 ext4_group_t group; 5897 unsigned int len; 5898 ext4_fsblk_t goal; 5899 ext4_grpblk_t block; 5900 5901 /* we can't allocate > group size */ 5902 len = ar->len; 5903 5904 /* just a dirty hack to filter too big requests */ 5905 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5906 len = EXT4_CLUSTERS_PER_GROUP(sb); 5907 5908 /* start searching from the goal */ 5909 goal = ar->goal; 5910 if (goal < le32_to_cpu(es->s_first_data_block) || 5911 goal >= ext4_blocks_count(es)) 5912 goal = le32_to_cpu(es->s_first_data_block); 5913 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5914 5915 /* set up allocation goals */ 5916 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5917 ac->ac_status = AC_STATUS_CONTINUE; 5918 ac->ac_sb = sb; 5919 ac->ac_inode = ar->inode; 5920 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5921 ac->ac_o_ex.fe_group = group; 5922 ac->ac_o_ex.fe_start = block; 5923 ac->ac_o_ex.fe_len = len; 5924 ac->ac_g_ex = ac->ac_o_ex; 5925 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 5926 ac->ac_flags = ar->flags; 5927 5928 /* we have to define context: we'll work with a file or 5929 * locality group. this is a policy, actually */ 5930 ext4_mb_group_or_file(ac); 5931 5932 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5933 "left: %u/%u, right %u/%u to %swritable\n", 5934 (unsigned) ar->len, (unsigned) ar->logical, 5935 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5936 (unsigned) ar->lleft, (unsigned) ar->pleft, 5937 (unsigned) ar->lright, (unsigned) ar->pright, 5938 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5939 } 5940 5941 static noinline_for_stack void 5942 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5943 struct ext4_locality_group *lg, 5944 int order, int total_entries) 5945 { 5946 ext4_group_t group = 0; 5947 struct ext4_buddy e4b; 5948 LIST_HEAD(discard_list); 5949 struct ext4_prealloc_space *pa, *tmp; 5950 5951 mb_debug(sb, "discard locality group preallocation\n"); 5952 5953 spin_lock(&lg->lg_prealloc_lock); 5954 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5955 pa_node.lg_list, 5956 lockdep_is_held(&lg->lg_prealloc_lock)) { 5957 spin_lock(&pa->pa_lock); 5958 if (atomic_read(&pa->pa_count)) { 5959 /* 5960 * This is the pa that we just used 5961 * for block allocation. So don't 5962 * free that 5963 */ 5964 spin_unlock(&pa->pa_lock); 5965 continue; 5966 } 5967 if (pa->pa_deleted) { 5968 spin_unlock(&pa->pa_lock); 5969 continue; 5970 } 5971 /* only lg prealloc space */ 5972 BUG_ON(pa->pa_type != MB_GROUP_PA); 5973 5974 /* seems this one can be freed ... */ 5975 ext4_mb_mark_pa_deleted(sb, pa); 5976 spin_unlock(&pa->pa_lock); 5977 5978 list_del_rcu(&pa->pa_node.lg_list); 5979 list_add(&pa->u.pa_tmp_list, &discard_list); 5980 5981 total_entries--; 5982 if (total_entries <= 5) { 5983 /* 5984 * we want to keep only 5 entries 5985 * allowing it to grow to 8. This 5986 * mak sure we don't call discard 5987 * soon for this list. 5988 */ 5989 break; 5990 } 5991 } 5992 spin_unlock(&lg->lg_prealloc_lock); 5993 5994 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5995 int err; 5996 5997 group = ext4_get_group_number(sb, pa->pa_pstart); 5998 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5999 GFP_NOFS|__GFP_NOFAIL); 6000 if (err) { 6001 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 6002 err, group); 6003 continue; 6004 } 6005 ext4_lock_group(sb, group); 6006 list_del(&pa->pa_group_list); 6007 ext4_mb_release_group_pa(&e4b, pa); 6008 ext4_unlock_group(sb, group); 6009 6010 ext4_mb_unload_buddy(&e4b); 6011 list_del(&pa->u.pa_tmp_list); 6012 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 6013 } 6014 } 6015 6016 /* 6017 * We have incremented pa_count. So it cannot be freed at this 6018 * point. Also we hold lg_mutex. So no parallel allocation is 6019 * possible from this lg. That means pa_free cannot be updated. 6020 * 6021 * A parallel ext4_mb_discard_group_preallocations is possible. 6022 * which can cause the lg_prealloc_list to be updated. 6023 */ 6024 6025 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 6026 { 6027 int order, added = 0, lg_prealloc_count = 1; 6028 struct super_block *sb = ac->ac_sb; 6029 struct ext4_locality_group *lg = ac->ac_lg; 6030 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 6031 6032 order = fls(pa->pa_free) - 1; 6033 if (order > PREALLOC_TB_SIZE - 1) 6034 /* The max size of hash table is PREALLOC_TB_SIZE */ 6035 order = PREALLOC_TB_SIZE - 1; 6036 /* Add the prealloc space to lg */ 6037 spin_lock(&lg->lg_prealloc_lock); 6038 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 6039 pa_node.lg_list, 6040 lockdep_is_held(&lg->lg_prealloc_lock)) { 6041 spin_lock(&tmp_pa->pa_lock); 6042 if (tmp_pa->pa_deleted) { 6043 spin_unlock(&tmp_pa->pa_lock); 6044 continue; 6045 } 6046 if (!added && pa->pa_free < tmp_pa->pa_free) { 6047 /* Add to the tail of the previous entry */ 6048 list_add_tail_rcu(&pa->pa_node.lg_list, 6049 &tmp_pa->pa_node.lg_list); 6050 added = 1; 6051 /* 6052 * we want to count the total 6053 * number of entries in the list 6054 */ 6055 } 6056 spin_unlock(&tmp_pa->pa_lock); 6057 lg_prealloc_count++; 6058 } 6059 if (!added) 6060 list_add_tail_rcu(&pa->pa_node.lg_list, 6061 &lg->lg_prealloc_list[order]); 6062 spin_unlock(&lg->lg_prealloc_lock); 6063 6064 /* Now trim the list to be not more than 8 elements */ 6065 if (lg_prealloc_count > 8) 6066 ext4_mb_discard_lg_preallocations(sb, lg, 6067 order, lg_prealloc_count); 6068 } 6069 6070 /* 6071 * release all resource we used in allocation 6072 */ 6073 static void ext4_mb_release_context(struct ext4_allocation_context *ac) 6074 { 6075 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 6076 struct ext4_prealloc_space *pa = ac->ac_pa; 6077 if (pa) { 6078 if (pa->pa_type == MB_GROUP_PA) { 6079 /* see comment in ext4_mb_use_group_pa() */ 6080 spin_lock(&pa->pa_lock); 6081 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 6082 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 6083 pa->pa_free -= ac->ac_b_ex.fe_len; 6084 pa->pa_len -= ac->ac_b_ex.fe_len; 6085 spin_unlock(&pa->pa_lock); 6086 6087 /* 6088 * We want to add the pa to the right bucket. 6089 * Remove it from the list and while adding 6090 * make sure the list to which we are adding 6091 * doesn't grow big. 6092 */ 6093 if (likely(pa->pa_free)) { 6094 spin_lock(pa->pa_node_lock.lg_lock); 6095 list_del_rcu(&pa->pa_node.lg_list); 6096 spin_unlock(pa->pa_node_lock.lg_lock); 6097 ext4_mb_add_n_trim(ac); 6098 } 6099 } 6100 6101 ext4_mb_put_pa(ac, ac->ac_sb, pa); 6102 } 6103 if (ac->ac_bitmap_folio) 6104 folio_put(ac->ac_bitmap_folio); 6105 if (ac->ac_buddy_folio) 6106 folio_put(ac->ac_buddy_folio); 6107 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 6108 mutex_unlock(&ac->ac_lg->lg_mutex); 6109 ext4_mb_collect_stats(ac); 6110 } 6111 6112 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 6113 { 6114 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 6115 int ret; 6116 int freed = 0, busy = 0; 6117 int retry = 0; 6118 6119 trace_ext4_mb_discard_preallocations(sb, needed); 6120 6121 if (needed == 0) 6122 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 6123 repeat: 6124 for (i = 0; i < ngroups && needed > 0; i++) { 6125 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 6126 freed += ret; 6127 needed -= ret; 6128 cond_resched(); 6129 } 6130 6131 if (needed > 0 && busy && ++retry < 3) { 6132 busy = 0; 6133 goto repeat; 6134 } 6135 6136 return freed; 6137 } 6138 6139 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 6140 struct ext4_allocation_context *ac, u64 *seq) 6141 { 6142 int freed; 6143 u64 seq_retry = 0; 6144 bool ret = false; 6145 6146 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 6147 if (freed) { 6148 ret = true; 6149 goto out_dbg; 6150 } 6151 seq_retry = ext4_get_discard_pa_seq_sum(); 6152 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 6153 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 6154 *seq = seq_retry; 6155 ret = true; 6156 } 6157 6158 out_dbg: 6159 mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret)); 6160 return ret; 6161 } 6162 6163 /* 6164 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 6165 * linearly starting at the goal block and also excludes the blocks which 6166 * are going to be in use after fast commit replay. 6167 */ 6168 static ext4_fsblk_t 6169 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 6170 { 6171 struct buffer_head *bitmap_bh; 6172 struct super_block *sb = ar->inode->i_sb; 6173 struct ext4_sb_info *sbi = EXT4_SB(sb); 6174 ext4_group_t group, nr; 6175 ext4_grpblk_t blkoff; 6176 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 6177 ext4_grpblk_t i = 0; 6178 ext4_fsblk_t goal, block; 6179 struct ext4_super_block *es = sbi->s_es; 6180 6181 goal = ar->goal; 6182 if (goal < le32_to_cpu(es->s_first_data_block) || 6183 goal >= ext4_blocks_count(es)) 6184 goal = le32_to_cpu(es->s_first_data_block); 6185 6186 ar->len = 0; 6187 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6188 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 6189 bitmap_bh = ext4_read_block_bitmap(sb, group); 6190 if (IS_ERR(bitmap_bh)) { 6191 *errp = PTR_ERR(bitmap_bh); 6192 pr_warn("Failed to read block bitmap\n"); 6193 return 0; 6194 } 6195 6196 while (1) { 6197 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6198 blkoff); 6199 if (i >= max) 6200 break; 6201 if (ext4_fc_replay_check_excluded(sb, 6202 ext4_group_first_block_no(sb, group) + 6203 EXT4_C2B(sbi, i))) { 6204 blkoff = i + 1; 6205 } else 6206 break; 6207 } 6208 brelse(bitmap_bh); 6209 if (i < max) 6210 break; 6211 6212 if (++group >= ext4_get_groups_count(sb)) 6213 group = 0; 6214 6215 blkoff = 0; 6216 } 6217 6218 if (i >= max) { 6219 *errp = -ENOSPC; 6220 return 0; 6221 } 6222 6223 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 6224 ext4_mb_mark_bb(sb, block, 1, true); 6225 ar->len = 1; 6226 6227 *errp = 0; 6228 return block; 6229 } 6230 6231 /* 6232 * Main entry point into mballoc to allocate blocks 6233 * it tries to use preallocation first, then falls back 6234 * to usual allocation 6235 */ 6236 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 6237 struct ext4_allocation_request *ar, int *errp) 6238 { 6239 struct ext4_allocation_context *ac = NULL; 6240 struct ext4_sb_info *sbi; 6241 struct super_block *sb; 6242 ext4_fsblk_t block = 0; 6243 unsigned int inquota = 0; 6244 unsigned int reserv_clstrs = 0; 6245 int retries = 0; 6246 u64 seq; 6247 6248 might_sleep(); 6249 sb = ar->inode->i_sb; 6250 sbi = EXT4_SB(sb); 6251 6252 trace_ext4_request_blocks(ar); 6253 if (sbi->s_mount_state & EXT4_FC_REPLAY) 6254 return ext4_mb_new_blocks_simple(ar, errp); 6255 6256 /* Allow to use superuser reservation for quota file */ 6257 if (ext4_is_quota_file(ar->inode)) 6258 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 6259 6260 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 6261 /* Without delayed allocation we need to verify 6262 * there is enough free blocks to do block allocation 6263 * and verify allocation doesn't exceed the quota limits. 6264 */ 6265 while (ar->len && 6266 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 6267 6268 /* let others to free the space */ 6269 cond_resched(); 6270 ar->len = ar->len >> 1; 6271 } 6272 if (!ar->len) { 6273 ext4_mb_show_pa(sb); 6274 *errp = -ENOSPC; 6275 return 0; 6276 } 6277 reserv_clstrs = ar->len; 6278 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 6279 dquot_alloc_block_nofail(ar->inode, 6280 EXT4_C2B(sbi, ar->len)); 6281 } else { 6282 while (ar->len && 6283 dquot_alloc_block(ar->inode, 6284 EXT4_C2B(sbi, ar->len))) { 6285 6286 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 6287 ar->len--; 6288 } 6289 } 6290 inquota = ar->len; 6291 if (ar->len == 0) { 6292 *errp = -EDQUOT; 6293 goto out; 6294 } 6295 } 6296 6297 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 6298 if (!ac) { 6299 ar->len = 0; 6300 *errp = -ENOMEM; 6301 goto out; 6302 } 6303 6304 ext4_mb_initialize_context(ac, ar); 6305 6306 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 6307 seq = this_cpu_read(discard_pa_seq); 6308 if (!ext4_mb_use_preallocated(ac)) { 6309 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 6310 ext4_mb_normalize_request(ac, ar); 6311 6312 *errp = ext4_mb_pa_alloc(ac); 6313 if (*errp) 6314 goto errout; 6315 repeat: 6316 /* allocate space in core */ 6317 *errp = ext4_mb_regular_allocator(ac); 6318 /* 6319 * pa allocated above is added to grp->bb_prealloc_list only 6320 * when we were able to allocate some block i.e. when 6321 * ac->ac_status == AC_STATUS_FOUND. 6322 * And error from above mean ac->ac_status != AC_STATUS_FOUND 6323 * So we have to free this pa here itself. 6324 */ 6325 if (*errp) { 6326 ext4_mb_pa_put_free(ac); 6327 ext4_discard_allocated_blocks(ac); 6328 goto errout; 6329 } 6330 if (ac->ac_status == AC_STATUS_FOUND && 6331 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 6332 ext4_mb_pa_put_free(ac); 6333 } 6334 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 6335 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 6336 if (*errp) { 6337 ext4_discard_allocated_blocks(ac); 6338 goto errout; 6339 } else { 6340 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6341 ar->len = ac->ac_b_ex.fe_len; 6342 } 6343 } else { 6344 if (++retries < 3 && 6345 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6346 goto repeat; 6347 /* 6348 * If block allocation fails then the pa allocated above 6349 * needs to be freed here itself. 6350 */ 6351 ext4_mb_pa_put_free(ac); 6352 *errp = -ENOSPC; 6353 } 6354 6355 if (*errp) { 6356 errout: 6357 ac->ac_b_ex.fe_len = 0; 6358 ar->len = 0; 6359 ext4_mb_show_ac(ac); 6360 } 6361 ext4_mb_release_context(ac); 6362 kmem_cache_free(ext4_ac_cachep, ac); 6363 out: 6364 if (inquota && ar->len < inquota) 6365 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 6366 if (!ar->len) { 6367 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 6368 /* release all the reserved blocks if non delalloc */ 6369 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 6370 reserv_clstrs); 6371 } 6372 6373 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6374 6375 return block; 6376 } 6377 6378 /* 6379 * We can merge two free data extents only if the physical blocks 6380 * are contiguous, AND the extents were freed by the same transaction, 6381 * AND the blocks are associated with the same group. 6382 */ 6383 static inline bool 6384 ext4_freed_extents_can_be_merged(struct ext4_free_data *entry1, 6385 struct ext4_free_data *entry2) 6386 { 6387 if (entry1->efd_tid != entry2->efd_tid) 6388 return false; 6389 if (entry1->efd_start_cluster + entry1->efd_count != 6390 entry2->efd_start_cluster) 6391 return false; 6392 if (WARN_ON_ONCE(entry1->efd_group != entry2->efd_group)) 6393 return false; 6394 return true; 6395 } 6396 6397 static inline void 6398 ext4_merge_freed_extents(struct ext4_sb_info *sbi, struct rb_root *root, 6399 struct ext4_free_data *entry1, 6400 struct ext4_free_data *entry2) 6401 { 6402 entry1->efd_count += entry2->efd_count; 6403 spin_lock(&sbi->s_md_lock); 6404 list_del(&entry2->efd_list); 6405 spin_unlock(&sbi->s_md_lock); 6406 rb_erase(&entry2->efd_node, root); 6407 kmem_cache_free(ext4_free_data_cachep, entry2); 6408 } 6409 6410 static inline void 6411 ext4_try_merge_freed_extent_prev(struct ext4_sb_info *sbi, struct rb_root *root, 6412 struct ext4_free_data *entry) 6413 { 6414 struct ext4_free_data *prev; 6415 struct rb_node *node; 6416 6417 node = rb_prev(&entry->efd_node); 6418 if (!node) 6419 return; 6420 6421 prev = rb_entry(node, struct ext4_free_data, efd_node); 6422 if (ext4_freed_extents_can_be_merged(prev, entry)) 6423 ext4_merge_freed_extents(sbi, root, prev, entry); 6424 } 6425 6426 static inline void 6427 ext4_try_merge_freed_extent_next(struct ext4_sb_info *sbi, struct rb_root *root, 6428 struct ext4_free_data *entry) 6429 { 6430 struct ext4_free_data *next; 6431 struct rb_node *node; 6432 6433 node = rb_next(&entry->efd_node); 6434 if (!node) 6435 return; 6436 6437 next = rb_entry(node, struct ext4_free_data, efd_node); 6438 if (ext4_freed_extents_can_be_merged(entry, next)) 6439 ext4_merge_freed_extents(sbi, root, entry, next); 6440 } 6441 6442 static noinline_for_stack void 6443 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 6444 struct ext4_free_data *new_entry) 6445 { 6446 ext4_group_t group = e4b->bd_group; 6447 ext4_grpblk_t cluster; 6448 ext4_grpblk_t clusters = new_entry->efd_count; 6449 struct ext4_free_data *entry = NULL; 6450 struct ext4_group_info *db = e4b->bd_info; 6451 struct super_block *sb = e4b->bd_sb; 6452 struct ext4_sb_info *sbi = EXT4_SB(sb); 6453 struct rb_root *root = &db->bb_free_root; 6454 struct rb_node **n = &root->rb_node; 6455 struct rb_node *parent = NULL, *new_node; 6456 6457 BUG_ON(!ext4_handle_valid(handle)); 6458 BUG_ON(e4b->bd_bitmap_folio == NULL); 6459 BUG_ON(e4b->bd_buddy_folio == NULL); 6460 6461 new_node = &new_entry->efd_node; 6462 cluster = new_entry->efd_start_cluster; 6463 6464 if (!*n) { 6465 /* first free block exent. We need to 6466 protect buddy cache from being freed, 6467 * otherwise we'll refresh it from 6468 * on-disk bitmap and lose not-yet-available 6469 * blocks */ 6470 folio_get(e4b->bd_buddy_folio); 6471 folio_get(e4b->bd_bitmap_folio); 6472 } 6473 while (*n) { 6474 parent = *n; 6475 entry = rb_entry(parent, struct ext4_free_data, efd_node); 6476 if (cluster < entry->efd_start_cluster) 6477 n = &(*n)->rb_left; 6478 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6479 n = &(*n)->rb_right; 6480 else { 6481 ext4_grp_locked_error(sb, group, 0, 6482 ext4_group_first_block_no(sb, group) + 6483 EXT4_C2B(sbi, cluster), 6484 "Block already on to-be-freed list"); 6485 kmem_cache_free(ext4_free_data_cachep, new_entry); 6486 return; 6487 } 6488 } 6489 6490 atomic_add(clusters, &sbi->s_mb_free_pending); 6491 if (!entry) 6492 goto insert; 6493 6494 /* Now try to see the extent can be merged to prev and next */ 6495 if (ext4_freed_extents_can_be_merged(new_entry, entry)) { 6496 entry->efd_start_cluster = cluster; 6497 entry->efd_count += new_entry->efd_count; 6498 kmem_cache_free(ext4_free_data_cachep, new_entry); 6499 ext4_try_merge_freed_extent_prev(sbi, root, entry); 6500 return; 6501 } 6502 if (ext4_freed_extents_can_be_merged(entry, new_entry)) { 6503 entry->efd_count += new_entry->efd_count; 6504 kmem_cache_free(ext4_free_data_cachep, new_entry); 6505 ext4_try_merge_freed_extent_next(sbi, root, entry); 6506 return; 6507 } 6508 insert: 6509 rb_link_node(new_node, parent, n); 6510 rb_insert_color(new_node, root); 6511 6512 spin_lock(&sbi->s_md_lock); 6513 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); 6514 spin_unlock(&sbi->s_md_lock); 6515 } 6516 6517 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6518 unsigned long count) 6519 { 6520 struct super_block *sb = inode->i_sb; 6521 ext4_group_t group; 6522 ext4_grpblk_t blkoff; 6523 6524 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6525 ext4_mb_mark_context(NULL, sb, false, group, blkoff, count, 6526 EXT4_MB_BITMAP_MARKED_CHECK | 6527 EXT4_MB_SYNC_UPDATE, 6528 NULL); 6529 } 6530 6531 /** 6532 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6533 * Used by ext4_free_blocks() 6534 * @handle: handle for this transaction 6535 * @inode: inode 6536 * @block: starting physical block to be freed 6537 * @count: number of blocks to be freed 6538 * @flags: flags used by ext4_free_blocks 6539 */ 6540 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6541 ext4_fsblk_t block, unsigned long count, 6542 int flags) 6543 { 6544 struct super_block *sb = inode->i_sb; 6545 struct ext4_group_info *grp; 6546 unsigned int overflow; 6547 ext4_grpblk_t bit; 6548 ext4_group_t block_group; 6549 struct ext4_sb_info *sbi; 6550 struct ext4_buddy e4b; 6551 unsigned int count_clusters; 6552 int err = 0; 6553 int mark_flags = 0; 6554 ext4_grpblk_t changed; 6555 6556 sbi = EXT4_SB(sb); 6557 6558 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6559 !ext4_inode_block_valid(inode, block, count)) { 6560 ext4_error(sb, "Freeing blocks in system zone - " 6561 "Block = %llu, count = %lu", block, count); 6562 /* err = 0. ext4_std_error should be a no op */ 6563 goto error_out; 6564 } 6565 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6566 6567 do_more: 6568 overflow = 0; 6569 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6570 6571 grp = ext4_get_group_info(sb, block_group); 6572 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6573 return; 6574 6575 /* 6576 * Check to see if we are freeing blocks across a group 6577 * boundary. 6578 */ 6579 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6580 overflow = EXT4_C2B(sbi, bit) + count - 6581 EXT4_BLOCKS_PER_GROUP(sb); 6582 count -= overflow; 6583 /* The range changed so it's no longer validated */ 6584 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6585 } 6586 count_clusters = EXT4_NUM_B2C(sbi, count); 6587 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6588 6589 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6590 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6591 GFP_NOFS|__GFP_NOFAIL); 6592 if (err) 6593 goto error_out; 6594 6595 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6596 !ext4_inode_block_valid(inode, block, count)) { 6597 ext4_error(sb, "Freeing blocks in system zone - " 6598 "Block = %llu, count = %lu", block, count); 6599 /* err = 0. ext4_std_error should be a no op */ 6600 goto error_clean; 6601 } 6602 6603 #ifdef AGGRESSIVE_CHECK 6604 mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK; 6605 #endif 6606 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6607 count_clusters, mark_flags, &changed); 6608 6609 6610 if (err && changed == 0) 6611 goto error_clean; 6612 6613 #ifdef AGGRESSIVE_CHECK 6614 BUG_ON(changed != count_clusters); 6615 #endif 6616 6617 /* 6618 * We need to make sure we don't reuse the freed block until after the 6619 * transaction is committed. We make an exception if the inode is to be 6620 * written in writeback mode since writeback mode has weak data 6621 * consistency guarantees. 6622 */ 6623 if (ext4_handle_valid(handle) && 6624 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6625 !ext4_should_writeback_data(inode))) { 6626 struct ext4_free_data *new_entry; 6627 /* 6628 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6629 * to fail. 6630 */ 6631 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6632 GFP_NOFS|__GFP_NOFAIL); 6633 new_entry->efd_start_cluster = bit; 6634 new_entry->efd_group = block_group; 6635 new_entry->efd_count = count_clusters; 6636 new_entry->efd_tid = handle->h_transaction->t_tid; 6637 6638 ext4_lock_group(sb, block_group); 6639 ext4_mb_free_metadata(handle, &e4b, new_entry); 6640 } else { 6641 if (test_opt(sb, DISCARD)) { 6642 err = ext4_issue_discard(sb, block_group, bit, 6643 count_clusters); 6644 /* 6645 * Ignore EOPNOTSUPP error. This is consistent with 6646 * what happens when using journal. 6647 */ 6648 if (err == -EOPNOTSUPP) 6649 err = 0; 6650 if (err) 6651 ext4_msg(sb, KERN_WARNING, "discard request in" 6652 " group:%u block:%d count:%lu failed" 6653 " with %d", block_group, bit, count, 6654 err); 6655 } 6656 6657 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6658 6659 ext4_lock_group(sb, block_group); 6660 mb_free_blocks(inode, &e4b, bit, count_clusters); 6661 } 6662 6663 ext4_unlock_group(sb, block_group); 6664 6665 /* 6666 * on a bigalloc file system, defer the s_freeclusters_counter 6667 * update to the caller (ext4_remove_space and friends) so they 6668 * can determine if a cluster freed here should be rereserved 6669 */ 6670 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6671 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6672 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6673 percpu_counter_add(&sbi->s_freeclusters_counter, 6674 count_clusters); 6675 } 6676 6677 if (overflow && !err) { 6678 block += count; 6679 count = overflow; 6680 ext4_mb_unload_buddy(&e4b); 6681 /* The range changed so it's no longer validated */ 6682 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6683 goto do_more; 6684 } 6685 6686 error_clean: 6687 ext4_mb_unload_buddy(&e4b); 6688 error_out: 6689 ext4_std_error(sb, err); 6690 } 6691 6692 /** 6693 * ext4_free_blocks() -- Free given blocks and update quota 6694 * @handle: handle for this transaction 6695 * @inode: inode 6696 * @bh: optional buffer of the block to be freed 6697 * @block: starting physical block to be freed 6698 * @count: number of blocks to be freed 6699 * @flags: flags used by ext4_free_blocks 6700 */ 6701 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6702 struct buffer_head *bh, ext4_fsblk_t block, 6703 unsigned long count, int flags) 6704 { 6705 struct super_block *sb = inode->i_sb; 6706 unsigned int overflow; 6707 struct ext4_sb_info *sbi; 6708 6709 sbi = EXT4_SB(sb); 6710 6711 if (bh) { 6712 if (block) 6713 BUG_ON(block != bh->b_blocknr); 6714 else 6715 block = bh->b_blocknr; 6716 } 6717 6718 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6719 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 6720 return; 6721 } 6722 6723 might_sleep(); 6724 6725 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6726 !ext4_inode_block_valid(inode, block, count)) { 6727 ext4_error(sb, "Freeing blocks not in datazone - " 6728 "block = %llu, count = %lu", block, count); 6729 return; 6730 } 6731 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6732 6733 ext4_debug("freeing block %llu\n", block); 6734 trace_ext4_free_blocks(inode, block, count, flags); 6735 6736 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6737 BUG_ON(count > 1); 6738 6739 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6740 inode, bh, block); 6741 } 6742 6743 /* 6744 * If the extent to be freed does not begin on a cluster 6745 * boundary, we need to deal with partial clusters at the 6746 * beginning and end of the extent. Normally we will free 6747 * blocks at the beginning or the end unless we are explicitly 6748 * requested to avoid doing so. 6749 */ 6750 overflow = EXT4_PBLK_COFF(sbi, block); 6751 if (overflow) { 6752 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6753 overflow = sbi->s_cluster_ratio - overflow; 6754 block += overflow; 6755 if (count > overflow) 6756 count -= overflow; 6757 else 6758 return; 6759 } else { 6760 block -= overflow; 6761 count += overflow; 6762 } 6763 /* The range changed so it's no longer validated */ 6764 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6765 } 6766 overflow = EXT4_LBLK_COFF(sbi, count); 6767 if (overflow) { 6768 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6769 if (count > overflow) 6770 count -= overflow; 6771 else 6772 return; 6773 } else 6774 count += sbi->s_cluster_ratio - overflow; 6775 /* The range changed so it's no longer validated */ 6776 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6777 } 6778 6779 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6780 int i; 6781 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6782 6783 for (i = 0; i < count; i++) { 6784 cond_resched(); 6785 if (is_metadata) 6786 bh = sb_find_get_block_nonatomic(inode->i_sb, 6787 block + i); 6788 ext4_forget(handle, is_metadata, inode, bh, block + i); 6789 } 6790 } 6791 6792 ext4_mb_clear_bb(handle, inode, block, count, flags); 6793 } 6794 6795 /** 6796 * ext4_group_add_blocks() -- Add given blocks to an existing group 6797 * @handle: handle to this transaction 6798 * @sb: super block 6799 * @block: start physical block to add to the block group 6800 * @count: number of blocks to free 6801 * 6802 * This marks the blocks as free in the bitmap and buddy. 6803 */ 6804 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6805 ext4_fsblk_t block, unsigned long count) 6806 { 6807 ext4_group_t block_group; 6808 ext4_grpblk_t bit; 6809 struct ext4_sb_info *sbi = EXT4_SB(sb); 6810 struct ext4_buddy e4b; 6811 int err = 0; 6812 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6813 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6814 unsigned long cluster_count = last_cluster - first_cluster + 1; 6815 ext4_grpblk_t changed; 6816 6817 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6818 6819 if (cluster_count == 0) 6820 return 0; 6821 6822 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6823 /* 6824 * Check to see if we are freeing blocks across a group 6825 * boundary. 6826 */ 6827 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6828 ext4_warning(sb, "too many blocks added to group %u", 6829 block_group); 6830 err = -EINVAL; 6831 goto error_out; 6832 } 6833 6834 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6835 if (err) 6836 goto error_out; 6837 6838 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6839 ext4_error(sb, "Adding blocks in system zones - " 6840 "Block = %llu, count = %lu", 6841 block, count); 6842 err = -EINVAL; 6843 goto error_clean; 6844 } 6845 6846 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6847 cluster_count, EXT4_MB_BITMAP_MARKED_CHECK, 6848 &changed); 6849 if (err && changed == 0) 6850 goto error_clean; 6851 6852 if (changed != cluster_count) 6853 ext4_error(sb, "bit already cleared in group %u", block_group); 6854 6855 ext4_lock_group(sb, block_group); 6856 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6857 ext4_unlock_group(sb, block_group); 6858 percpu_counter_add(&sbi->s_freeclusters_counter, 6859 changed); 6860 6861 error_clean: 6862 ext4_mb_unload_buddy(&e4b); 6863 error_out: 6864 ext4_std_error(sb, err); 6865 return err; 6866 } 6867 6868 /** 6869 * ext4_trim_extent -- function to TRIM one single free extent in the group 6870 * @sb: super block for the file system 6871 * @start: starting block of the free extent in the alloc. group 6872 * @count: number of blocks to TRIM 6873 * @e4b: ext4 buddy for the group 6874 * 6875 * Trim "count" blocks starting at "start" in the "group". To assure that no 6876 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6877 * be called with under the group lock. 6878 */ 6879 static int ext4_trim_extent(struct super_block *sb, 6880 int start, int count, struct ext4_buddy *e4b) 6881 __releases(bitlock) 6882 __acquires(bitlock) 6883 { 6884 struct ext4_free_extent ex; 6885 ext4_group_t group = e4b->bd_group; 6886 int ret = 0; 6887 6888 trace_ext4_trim_extent(sb, group, start, count); 6889 6890 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6891 6892 ex.fe_start = start; 6893 ex.fe_group = group; 6894 ex.fe_len = count; 6895 6896 /* 6897 * Mark blocks used, so no one can reuse them while 6898 * being trimmed. 6899 */ 6900 mb_mark_used(e4b, &ex); 6901 ext4_unlock_group(sb, group); 6902 ret = ext4_issue_discard(sb, group, start, count); 6903 ext4_lock_group(sb, group); 6904 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6905 return ret; 6906 } 6907 6908 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, 6909 ext4_group_t grp) 6910 { 6911 unsigned long nr_clusters_in_group; 6912 6913 if (grp < (ext4_get_groups_count(sb) - 1)) 6914 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb); 6915 else 6916 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - 6917 ext4_group_first_block_no(sb, grp)) 6918 >> EXT4_CLUSTER_BITS(sb); 6919 6920 return nr_clusters_in_group - 1; 6921 } 6922 6923 static bool ext4_trim_interrupted(void) 6924 { 6925 return fatal_signal_pending(current) || freezing(current); 6926 } 6927 6928 static int ext4_try_to_trim_range(struct super_block *sb, 6929 struct ext4_buddy *e4b, ext4_grpblk_t start, 6930 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6931 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6932 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6933 { 6934 ext4_grpblk_t next, count, free_count, last, origin_start; 6935 bool set_trimmed = false; 6936 void *bitmap; 6937 6938 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 6939 return 0; 6940 6941 last = ext4_last_grp_cluster(sb, e4b->bd_group); 6942 bitmap = e4b->bd_bitmap; 6943 if (start == 0 && max >= last) 6944 set_trimmed = true; 6945 origin_start = start; 6946 start = max(e4b->bd_info->bb_first_free, start); 6947 count = 0; 6948 free_count = 0; 6949 6950 while (start <= max) { 6951 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6952 if (start > max) 6953 break; 6954 6955 next = mb_find_next_bit(bitmap, last + 1, start); 6956 if (origin_start == 0 && next >= last) 6957 set_trimmed = true; 6958 6959 if ((next - start) >= minblocks) { 6960 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6961 6962 if (ret && ret != -EOPNOTSUPP) 6963 return count; 6964 count += next - start; 6965 } 6966 free_count += next - start; 6967 start = next + 1; 6968 6969 if (ext4_trim_interrupted()) 6970 return count; 6971 6972 if (need_resched()) { 6973 ext4_unlock_group(sb, e4b->bd_group); 6974 cond_resched(); 6975 ext4_lock_group(sb, e4b->bd_group); 6976 } 6977 6978 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6979 break; 6980 } 6981 6982 if (set_trimmed) 6983 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); 6984 6985 return count; 6986 } 6987 6988 /** 6989 * ext4_trim_all_free -- function to trim all free space in alloc. group 6990 * @sb: super block for file system 6991 * @group: group to be trimmed 6992 * @start: first group block to examine 6993 * @max: last group block to examine 6994 * @minblocks: minimum extent block count 6995 * 6996 * ext4_trim_all_free walks through group's block bitmap searching for free 6997 * extents. When the free extent is found, mark it as used in group buddy 6998 * bitmap. Then issue a TRIM command on this extent and free the extent in 6999 * the group buddy bitmap. 7000 */ 7001 static ext4_grpblk_t 7002 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 7003 ext4_grpblk_t start, ext4_grpblk_t max, 7004 ext4_grpblk_t minblocks) 7005 { 7006 struct ext4_buddy e4b; 7007 int ret; 7008 7009 trace_ext4_trim_all_free(sb, group, start, max); 7010 7011 ret = ext4_mb_load_buddy(sb, group, &e4b); 7012 if (ret) { 7013 ext4_warning(sb, "Error %d loading buddy information for %u", 7014 ret, group); 7015 return ret; 7016 } 7017 7018 ext4_lock_group(sb, group); 7019 7020 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 7021 minblocks < EXT4_SB(sb)->s_last_trim_minblks) 7022 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 7023 else 7024 ret = 0; 7025 7026 ext4_unlock_group(sb, group); 7027 ext4_mb_unload_buddy(&e4b); 7028 7029 ext4_debug("trimmed %d blocks in the group %d\n", 7030 ret, group); 7031 7032 return ret; 7033 } 7034 7035 /** 7036 * ext4_trim_fs() -- trim ioctl handle function 7037 * @sb: superblock for filesystem 7038 * @range: fstrim_range structure 7039 * 7040 * start: First Byte to trim 7041 * len: number of Bytes to trim from start 7042 * minlen: minimum extent length in Bytes 7043 * ext4_trim_fs goes through all allocation groups containing Bytes from 7044 * start to start+len. For each such a group ext4_trim_all_free function 7045 * is invoked to trim all free space. 7046 */ 7047 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 7048 { 7049 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 7050 struct ext4_group_info *grp; 7051 ext4_group_t group, first_group, last_group; 7052 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 7053 uint64_t start, end, minlen, trimmed = 0; 7054 ext4_fsblk_t first_data_blk = 7055 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 7056 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 7057 int ret = 0; 7058 7059 start = range->start >> sb->s_blocksize_bits; 7060 end = start + (range->len >> sb->s_blocksize_bits) - 1; 7061 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7062 range->minlen >> sb->s_blocksize_bits); 7063 7064 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 7065 start >= max_blks || 7066 range->len < sb->s_blocksize) 7067 return -EINVAL; 7068 /* No point to try to trim less than discard granularity */ 7069 if (range->minlen < discard_granularity) { 7070 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7071 discard_granularity >> sb->s_blocksize_bits); 7072 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 7073 goto out; 7074 } 7075 if (end >= max_blks - 1) 7076 end = max_blks - 1; 7077 if (end <= first_data_blk) 7078 goto out; 7079 if (start < first_data_blk) 7080 start = first_data_blk; 7081 7082 /* Determine first and last group to examine based on start and end */ 7083 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 7084 &first_group, &first_cluster); 7085 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 7086 &last_group, &last_cluster); 7087 7088 /* end now represents the last cluster to discard in this group */ 7089 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7090 7091 for (group = first_group; group <= last_group; group++) { 7092 if (ext4_trim_interrupted()) 7093 break; 7094 grp = ext4_get_group_info(sb, group); 7095 if (!grp) 7096 continue; 7097 /* We only do this if the grp has never been initialized */ 7098 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 7099 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 7100 if (ret) 7101 break; 7102 } 7103 7104 /* 7105 * For all the groups except the last one, last cluster will 7106 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 7107 * change it for the last group, note that last_cluster is 7108 * already computed earlier by ext4_get_group_no_and_offset() 7109 */ 7110 if (group == last_group) 7111 end = last_cluster; 7112 if (grp->bb_free >= minlen) { 7113 cnt = ext4_trim_all_free(sb, group, first_cluster, 7114 end, minlen); 7115 if (cnt < 0) { 7116 ret = cnt; 7117 break; 7118 } 7119 trimmed += cnt; 7120 } 7121 7122 /* 7123 * For every group except the first one, we are sure 7124 * that the first cluster to discard will be cluster #0. 7125 */ 7126 first_cluster = 0; 7127 } 7128 7129 if (!ret) 7130 EXT4_SB(sb)->s_last_trim_minblks = minlen; 7131 7132 out: 7133 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 7134 return ret; 7135 } 7136 7137 /* Iterate all the free extents in the group. */ 7138 int 7139 ext4_mballoc_query_range( 7140 struct super_block *sb, 7141 ext4_group_t group, 7142 ext4_grpblk_t first, 7143 ext4_grpblk_t end, 7144 ext4_mballoc_query_range_fn meta_formatter, 7145 ext4_mballoc_query_range_fn formatter, 7146 void *priv) 7147 { 7148 void *bitmap; 7149 ext4_grpblk_t start, next; 7150 struct ext4_buddy e4b; 7151 int error; 7152 7153 error = ext4_mb_load_buddy(sb, group, &e4b); 7154 if (error) 7155 return error; 7156 bitmap = e4b.bd_bitmap; 7157 7158 ext4_lock_group(sb, group); 7159 7160 start = max(e4b.bd_info->bb_first_free, first); 7161 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 7162 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7163 if (meta_formatter && start != first) { 7164 if (start > end) 7165 start = end; 7166 ext4_unlock_group(sb, group); 7167 error = meta_formatter(sb, group, first, start - first, 7168 priv); 7169 if (error) 7170 goto out_unload; 7171 ext4_lock_group(sb, group); 7172 } 7173 while (start <= end) { 7174 start = mb_find_next_zero_bit(bitmap, end + 1, start); 7175 if (start > end) 7176 break; 7177 next = mb_find_next_bit(bitmap, end + 1, start); 7178 7179 ext4_unlock_group(sb, group); 7180 error = formatter(sb, group, start, next - start, priv); 7181 if (error) 7182 goto out_unload; 7183 ext4_lock_group(sb, group); 7184 7185 start = next + 1; 7186 } 7187 7188 ext4_unlock_group(sb, group); 7189 out_unload: 7190 ext4_mb_unload_buddy(&e4b); 7191 7192 return error; 7193 } 7194 7195 #ifdef CONFIG_EXT4_KUNIT_TESTS 7196 #include "mballoc-test.c" 7197 #endif 7198