1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <linux/freezer.h> 20 #include <trace/events/ext4.h> 21 #include <kunit/static_stub.h> 22 23 /* 24 * MUSTDO: 25 * - test ext4_ext_search_left() and ext4_ext_search_right() 26 * - search for metadata in few groups 27 * 28 * TODO v4: 29 * - normalization should take into account whether file is still open 30 * - discard preallocations if no free space left (policy?) 31 * - don't normalize tails 32 * - quota 33 * - reservation for superuser 34 * 35 * TODO v3: 36 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 37 * - track min/max extents in each group for better group selection 38 * - mb_mark_used() may allocate chunk right after splitting buddy 39 * - tree of groups sorted by number of free blocks 40 * - error handling 41 */ 42 43 /* 44 * The allocation request involve request for multiple number of blocks 45 * near to the goal(block) value specified. 46 * 47 * During initialization phase of the allocator we decide to use the 48 * group preallocation or inode preallocation depending on the size of 49 * the file. The size of the file could be the resulting file size we 50 * would have after allocation, or the current file size, which ever 51 * is larger. If the size is less than sbi->s_mb_stream_request we 52 * select to use the group preallocation. The default value of 53 * s_mb_stream_request is 16 blocks. This can also be tuned via 54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 55 * terms of number of blocks. 56 * 57 * The main motivation for having small file use group preallocation is to 58 * ensure that we have small files closer together on the disk. 59 * 60 * First stage the allocator looks at the inode prealloc list, 61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 62 * spaces for this particular inode. The inode prealloc space is 63 * represented as: 64 * 65 * pa_lstart -> the logical start block for this prealloc space 66 * pa_pstart -> the physical start block for this prealloc space 67 * pa_len -> length for this prealloc space (in clusters) 68 * pa_free -> free space available in this prealloc space (in clusters) 69 * 70 * The inode preallocation space is used looking at the _logical_ start 71 * block. If only the logical file block falls within the range of prealloc 72 * space we will consume the particular prealloc space. This makes sure that 73 * we have contiguous physical blocks representing the file blocks 74 * 75 * The important thing to be noted in case of inode prealloc space is that 76 * we don't modify the values associated to inode prealloc space except 77 * pa_free. 78 * 79 * If we are not able to find blocks in the inode prealloc space and if we 80 * have the group allocation flag set then we look at the locality group 81 * prealloc space. These are per CPU prealloc list represented as 82 * 83 * ext4_sb_info.s_locality_groups[smp_processor_id()] 84 * 85 * The reason for having a per cpu locality group is to reduce the contention 86 * between CPUs. It is possible to get scheduled at this point. 87 * 88 * The locality group prealloc space is used looking at whether we have 89 * enough free space (pa_free) within the prealloc space. 90 * 91 * If we can't allocate blocks via inode prealloc or/and locality group 92 * prealloc then we look at the buddy cache. The buddy cache is represented 93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 94 * mapped to the buddy and bitmap information regarding different 95 * groups. The buddy information is attached to buddy cache inode so that 96 * we can access them through the page cache. The information regarding 97 * each group is loaded via ext4_mb_load_buddy. The information involve 98 * block bitmap and buddy information. The information are stored in the 99 * inode as: 100 * 101 * { folio } 102 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 103 * 104 * 105 * one block each for bitmap and buddy information. So for each group we 106 * take up 2 blocks. A folio can contain blocks_per_folio (folio_size / 107 * blocksize) blocks. So it can have information regarding groups_per_folio 108 * which is blocks_per_folio/2 109 * 110 * The buddy cache inode is not stored on disk. The inode is thrown 111 * away when the filesystem is unmounted. 112 * 113 * We look for count number of blocks in the buddy cache. If we were able 114 * to locate that many free blocks we return with additional information 115 * regarding rest of the contiguous physical block available 116 * 117 * Before allocating blocks via buddy cache we normalize the request 118 * blocks. This ensure we ask for more blocks that we needed. The extra 119 * blocks that we get after allocation is added to the respective prealloc 120 * list. In case of inode preallocation we follow a list of heuristics 121 * based on file size. This can be found in ext4_mb_normalize_request. If 122 * we are doing a group prealloc we try to normalize the request to 123 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 124 * dependent on the cluster size; for non-bigalloc file systems, it is 125 * 512 blocks. This can be tuned via 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 127 * terms of number of blocks. If we have mounted the file system with -O 128 * stripe=<value> option the group prealloc request is normalized to the 129 * smallest multiple of the stripe value (sbi->s_stripe) which is 130 * greater than the default mb_group_prealloc. 131 * 132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 133 * structures in two data structures: 134 * 135 * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders) 136 * 137 * Locking: Writers use xa_lock, readers use rcu_read_lock. 138 * 139 * This is an array of xarrays where the index in the array represents the 140 * largest free order in the buddy bitmap of the participating group infos of 141 * that xarray. So, there are exactly MB_NUM_ORDERS(sb) (which means total 142 * number of buddy bitmap orders possible) number of xarrays. Group-infos are 143 * placed in appropriate xarrays. 144 * 145 * 2) Average fragment size xarrays (sbi->s_mb_avg_fragment_size) 146 * 147 * Locking: Writers use xa_lock, readers use rcu_read_lock. 148 * 149 * This is an array of xarrays where in the i-th xarray there are groups with 150 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 151 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 152 * Note that we don't bother with a special xarray for completely empty 153 * groups so we only have MB_NUM_ORDERS(sb) xarrays. Group-infos are placed 154 * in appropriate xarrays. 155 * 156 * In xarray, the index is the block group number, the value is the block group 157 * information, and a non-empty value indicates the block group is present in 158 * the current xarray. 159 * 160 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 161 * structures to decide the order in which groups are to be traversed for 162 * fulfilling an allocation request. 163 * 164 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order 165 * >= the order of the request. We directly look at the largest free order list 166 * in the data structure (1) above where largest_free_order = order of the 167 * request. If that list is empty, we look at remaining list in the increasing 168 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED 169 * lookup in O(1) time. 170 * 171 * At CR_GOAL_LEN_FAST, we only consider groups where 172 * average fragment size > request size. So, we lookup a group which has average 173 * fragment size just above or equal to request size using our average fragment 174 * size group lists (data structure 2) in O(1) time. 175 * 176 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied 177 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in 178 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg 179 * fragment size > goal length. So before falling to the slower 180 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and 181 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big 182 * enough average fragment size. This increases the chances of finding a 183 * suitable block group in O(1) time and results in faster allocation at the 184 * cost of reduced size of allocation. 185 * 186 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 187 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and 188 * CR_GOAL_LEN_FAST phase. 189 * 190 * The regular allocator (using the buddy cache) supports a few tunables. 191 * 192 * /sys/fs/ext4/<partition>/mb_min_to_scan 193 * /sys/fs/ext4/<partition>/mb_max_to_scan 194 * /sys/fs/ext4/<partition>/mb_order2_req 195 * /sys/fs/ext4/<partition>/mb_max_linear_groups 196 * 197 * The regular allocator uses buddy scan only if the request len is power of 198 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 199 * value of s_mb_order2_reqs can be tuned via 200 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 201 * stripe size (sbi->s_stripe), we try to search for contiguous block in 202 * stripe size. This should result in better allocation on RAID setups. If 203 * not, we search in the specific group using bitmap for best extents. The 204 * tunable min_to_scan and max_to_scan control the behaviour here. 205 * min_to_scan indicate how long the mballoc __must__ look for a best 206 * extent and max_to_scan indicates how long the mballoc __can__ look for a 207 * best extent in the found extents. Searching for the blocks starts with 208 * the group specified as the goal value in allocation context via 209 * ac_g_ex. Each group is first checked based on the criteria whether it 210 * can be used for allocation. ext4_mb_good_group explains how the groups are 211 * checked. 212 * 213 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 214 * get traversed linearly. That may result in subsequent allocations being not 215 * close to each other. And so, the underlying device may get filled up in a 216 * non-linear fashion. While that may not matter on non-rotational devices, for 217 * rotational devices that may result in higher seek times. "mb_max_linear_groups" 218 * tells mballoc how many groups mballoc should search linearly before 219 * performing consulting above data structures for more efficient lookups. For 220 * non rotational devices, this value defaults to 0 and for rotational devices 221 * this is set to MB_DEFAULT_LINEAR_LIMIT. 222 * 223 * Both the prealloc space are getting populated as above. So for the first 224 * request we will hit the buddy cache which will result in this prealloc 225 * space getting filled. The prealloc space is then later used for the 226 * subsequent request. 227 */ 228 229 /* 230 * mballoc operates on the following data: 231 * - on-disk bitmap 232 * - in-core buddy (actually includes buddy and bitmap) 233 * - preallocation descriptors (PAs) 234 * 235 * there are two types of preallocations: 236 * - inode 237 * assiged to specific inode and can be used for this inode only. 238 * it describes part of inode's space preallocated to specific 239 * physical blocks. any block from that preallocated can be used 240 * independent. the descriptor just tracks number of blocks left 241 * unused. so, before taking some block from descriptor, one must 242 * make sure corresponded logical block isn't allocated yet. this 243 * also means that freeing any block within descriptor's range 244 * must discard all preallocated blocks. 245 * - locality group 246 * assigned to specific locality group which does not translate to 247 * permanent set of inodes: inode can join and leave group. space 248 * from this type of preallocation can be used for any inode. thus 249 * it's consumed from the beginning to the end. 250 * 251 * relation between them can be expressed as: 252 * in-core buddy = on-disk bitmap + preallocation descriptors 253 * 254 * this mean blocks mballoc considers used are: 255 * - allocated blocks (persistent) 256 * - preallocated blocks (non-persistent) 257 * 258 * consistency in mballoc world means that at any time a block is either 259 * free or used in ALL structures. notice: "any time" should not be read 260 * literally -- time is discrete and delimited by locks. 261 * 262 * to keep it simple, we don't use block numbers, instead we count number of 263 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 264 * 265 * all operations can be expressed as: 266 * - init buddy: buddy = on-disk + PAs 267 * - new PA: buddy += N; PA = N 268 * - use inode PA: on-disk += N; PA -= N 269 * - discard inode PA buddy -= on-disk - PA; PA = 0 270 * - use locality group PA on-disk += N; PA -= N 271 * - discard locality group PA buddy -= PA; PA = 0 272 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 273 * is used in real operation because we can't know actual used 274 * bits from PA, only from on-disk bitmap 275 * 276 * if we follow this strict logic, then all operations above should be atomic. 277 * given some of them can block, we'd have to use something like semaphores 278 * killing performance on high-end SMP hardware. let's try to relax it using 279 * the following knowledge: 280 * 1) if buddy is referenced, it's already initialized 281 * 2) while block is used in buddy and the buddy is referenced, 282 * nobody can re-allocate that block 283 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 284 * bit set and PA claims same block, it's OK. IOW, one can set bit in 285 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 286 * block 287 * 288 * so, now we're building a concurrency table: 289 * - init buddy vs. 290 * - new PA 291 * blocks for PA are allocated in the buddy, buddy must be referenced 292 * until PA is linked to allocation group to avoid concurrent buddy init 293 * - use inode PA 294 * we need to make sure that either on-disk bitmap or PA has uptodate data 295 * given (3) we care that PA-=N operation doesn't interfere with init 296 * - discard inode PA 297 * the simplest way would be to have buddy initialized by the discard 298 * - use locality group PA 299 * again PA-=N must be serialized with init 300 * - discard locality group PA 301 * the simplest way would be to have buddy initialized by the discard 302 * - new PA vs. 303 * - use inode PA 304 * i_data_sem serializes them 305 * - discard inode PA 306 * discard process must wait until PA isn't used by another process 307 * - use locality group PA 308 * some mutex should serialize them 309 * - discard locality group PA 310 * discard process must wait until PA isn't used by another process 311 * - use inode PA 312 * - use inode PA 313 * i_data_sem or another mutex should serializes them 314 * - discard inode PA 315 * discard process must wait until PA isn't used by another process 316 * - use locality group PA 317 * nothing wrong here -- they're different PAs covering different blocks 318 * - discard locality group PA 319 * discard process must wait until PA isn't used by another process 320 * 321 * now we're ready to make few consequences: 322 * - PA is referenced and while it is no discard is possible 323 * - PA is referenced until block isn't marked in on-disk bitmap 324 * - PA changes only after on-disk bitmap 325 * - discard must not compete with init. either init is done before 326 * any discard or they're serialized somehow 327 * - buddy init as sum of on-disk bitmap and PAs is done atomically 328 * 329 * a special case when we've used PA to emptiness. no need to modify buddy 330 * in this case, but we should care about concurrent init 331 * 332 */ 333 334 /* 335 * Logic in few words: 336 * 337 * - allocation: 338 * load group 339 * find blocks 340 * mark bits in on-disk bitmap 341 * release group 342 * 343 * - use preallocation: 344 * find proper PA (per-inode or group) 345 * load group 346 * mark bits in on-disk bitmap 347 * release group 348 * release PA 349 * 350 * - free: 351 * load group 352 * mark bits in on-disk bitmap 353 * release group 354 * 355 * - discard preallocations in group: 356 * mark PAs deleted 357 * move them onto local list 358 * load on-disk bitmap 359 * load group 360 * remove PA from object (inode or locality group) 361 * mark free blocks in-core 362 * 363 * - discard inode's preallocations: 364 */ 365 366 /* 367 * Locking rules 368 * 369 * Locks: 370 * - bitlock on a group (group) 371 * - object (inode/locality) (object) 372 * - per-pa lock (pa) 373 * - cr_power2_aligned lists lock (cr_power2_aligned) 374 * - cr_goal_len_fast lists lock (cr_goal_len_fast) 375 * 376 * Paths: 377 * - new pa 378 * object 379 * group 380 * 381 * - find and use pa: 382 * pa 383 * 384 * - release consumed pa: 385 * pa 386 * group 387 * object 388 * 389 * - generate in-core bitmap: 390 * group 391 * pa 392 * 393 * - discard all for given object (inode, locality group): 394 * object 395 * pa 396 * group 397 * 398 * - discard all for given group: 399 * group 400 * pa 401 * group 402 * object 403 * 404 * - allocation path (ext4_mb_regular_allocator) 405 * group 406 * cr_power2_aligned/cr_goal_len_fast 407 */ 408 static struct kmem_cache *ext4_pspace_cachep; 409 static struct kmem_cache *ext4_ac_cachep; 410 static struct kmem_cache *ext4_free_data_cachep; 411 412 /* We create slab caches for groupinfo data structures based on the 413 * superblock block size. There will be one per mounted filesystem for 414 * each unique s_blocksize_bits */ 415 #define NR_GRPINFO_CACHES 8 416 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 417 418 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 419 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 420 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 421 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 422 }; 423 424 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 425 ext4_group_t group); 426 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 427 428 static int ext4_mb_scan_group(struct ext4_allocation_context *ac, 429 ext4_group_t group); 430 431 static int ext4_try_to_trim_range(struct super_block *sb, 432 struct ext4_buddy *e4b, ext4_grpblk_t start, 433 ext4_grpblk_t max, ext4_grpblk_t minblocks); 434 435 /* 436 * The algorithm using this percpu seq counter goes below: 437 * 1. We sample the percpu discard_pa_seq counter before trying for block 438 * allocation in ext4_mb_new_blocks(). 439 * 2. We increment this percpu discard_pa_seq counter when we either allocate 440 * or free these blocks i.e. while marking those blocks as used/free in 441 * mb_mark_used()/mb_free_blocks(). 442 * 3. We also increment this percpu seq counter when we successfully identify 443 * that the bb_prealloc_list is not empty and hence proceed for discarding 444 * of those PAs inside ext4_mb_discard_group_preallocations(). 445 * 446 * Now to make sure that the regular fast path of block allocation is not 447 * affected, as a small optimization we only sample the percpu seq counter 448 * on that cpu. Only when the block allocation fails and when freed blocks 449 * found were 0, that is when we sample percpu seq counter for all cpus using 450 * below function ext4_get_discard_pa_seq_sum(). This happens after making 451 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 452 */ 453 static DEFINE_PER_CPU(u64, discard_pa_seq); 454 static inline u64 ext4_get_discard_pa_seq_sum(void) 455 { 456 int __cpu; 457 u64 __seq = 0; 458 459 for_each_possible_cpu(__cpu) 460 __seq += per_cpu(discard_pa_seq, __cpu); 461 return __seq; 462 } 463 464 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 465 { 466 #if BITS_PER_LONG == 64 467 *bit += ((unsigned long) addr & 7UL) << 3; 468 addr = (void *) ((unsigned long) addr & ~7UL); 469 #elif BITS_PER_LONG == 32 470 *bit += ((unsigned long) addr & 3UL) << 3; 471 addr = (void *) ((unsigned long) addr & ~3UL); 472 #else 473 #error "how many bits you are?!" 474 #endif 475 return addr; 476 } 477 478 static inline int mb_test_bit(int bit, void *addr) 479 { 480 /* 481 * ext4_test_bit on architecture like powerpc 482 * needs unsigned long aligned address 483 */ 484 addr = mb_correct_addr_and_bit(&bit, addr); 485 return ext4_test_bit(bit, addr); 486 } 487 488 static inline void mb_set_bit(int bit, void *addr) 489 { 490 addr = mb_correct_addr_and_bit(&bit, addr); 491 ext4_set_bit(bit, addr); 492 } 493 494 static inline void mb_clear_bit(int bit, void *addr) 495 { 496 addr = mb_correct_addr_and_bit(&bit, addr); 497 ext4_clear_bit(bit, addr); 498 } 499 500 static inline int mb_test_and_clear_bit(int bit, void *addr) 501 { 502 addr = mb_correct_addr_and_bit(&bit, addr); 503 return ext4_test_and_clear_bit(bit, addr); 504 } 505 506 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 507 { 508 int fix = 0, ret, tmpmax; 509 addr = mb_correct_addr_and_bit(&fix, addr); 510 tmpmax = max + fix; 511 start += fix; 512 513 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 514 if (ret > max) 515 return max; 516 return ret; 517 } 518 519 static inline int mb_find_next_bit(void *addr, int max, int start) 520 { 521 int fix = 0, ret, tmpmax; 522 addr = mb_correct_addr_and_bit(&fix, addr); 523 tmpmax = max + fix; 524 start += fix; 525 526 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 527 if (ret > max) 528 return max; 529 return ret; 530 } 531 532 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 533 { 534 char *bb; 535 536 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 537 BUG_ON(max == NULL); 538 539 if (order > e4b->bd_blkbits + 1) { 540 *max = 0; 541 return NULL; 542 } 543 544 /* at order 0 we see each particular block */ 545 if (order == 0) { 546 *max = 1 << (e4b->bd_blkbits + 3); 547 return e4b->bd_bitmap; 548 } 549 550 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 551 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 552 553 return bb; 554 } 555 556 #ifdef DOUBLE_CHECK 557 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 558 int first, int count) 559 { 560 int i; 561 struct super_block *sb = e4b->bd_sb; 562 563 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 564 return; 565 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 566 for (i = 0; i < count; i++) { 567 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 568 ext4_fsblk_t blocknr; 569 570 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 571 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 573 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 574 ext4_grp_locked_error(sb, e4b->bd_group, 575 inode ? inode->i_ino : 0, 576 blocknr, 577 "freeing block already freed " 578 "(bit %u)", 579 first + i); 580 } 581 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 582 } 583 } 584 585 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 586 { 587 int i; 588 589 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 590 return; 591 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 592 for (i = 0; i < count; i++) { 593 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 594 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 595 } 596 } 597 598 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 599 { 600 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 601 return; 602 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 603 unsigned char *b1, *b2; 604 int i; 605 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 606 b2 = (unsigned char *) bitmap; 607 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 608 if (b1[i] != b2[i]) { 609 ext4_msg(e4b->bd_sb, KERN_ERR, 610 "corruption in group %u " 611 "at byte %u(%u): %x in copy != %x " 612 "on disk/prealloc", 613 e4b->bd_group, i, i * 8, b1[i], b2[i]); 614 BUG(); 615 } 616 } 617 } 618 } 619 620 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 621 struct ext4_group_info *grp, ext4_group_t group) 622 { 623 struct buffer_head *bh; 624 625 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 626 if (!grp->bb_bitmap) 627 return; 628 629 bh = ext4_read_block_bitmap(sb, group); 630 if (IS_ERR_OR_NULL(bh)) { 631 kfree(grp->bb_bitmap); 632 grp->bb_bitmap = NULL; 633 return; 634 } 635 636 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 637 put_bh(bh); 638 } 639 640 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 641 { 642 kfree(grp->bb_bitmap); 643 } 644 645 #else 646 static inline void mb_free_blocks_double(struct inode *inode, 647 struct ext4_buddy *e4b, int first, int count) 648 { 649 return; 650 } 651 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 652 int first, int count) 653 { 654 return; 655 } 656 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 657 { 658 return; 659 } 660 661 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 662 struct ext4_group_info *grp, ext4_group_t group) 663 { 664 return; 665 } 666 667 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 668 { 669 return; 670 } 671 #endif 672 673 #ifdef AGGRESSIVE_CHECK 674 675 #define MB_CHECK_ASSERT(assert) \ 676 do { \ 677 if (!(assert)) { \ 678 printk(KERN_EMERG \ 679 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 680 function, file, line, # assert); \ 681 BUG(); \ 682 } \ 683 } while (0) 684 685 /* 686 * Perform buddy integrity check with the following steps: 687 * 688 * 1. Top-down validation (from highest order down to order 1, excluding order-0 bitmap): 689 * For each pair of adjacent orders, if a higher-order bit is set (indicating a free block), 690 * at most one of the two corresponding lower-order bits may be clear (free). 691 * 692 * 2. Order-0 (bitmap) validation, performed on bit pairs: 693 * - If either bit in a pair is set (1, allocated), then all corresponding higher-order bits 694 * must not be free (0). 695 * - If both bits in a pair are clear (0, free), then exactly one of the corresponding 696 * higher-order bits must be free (0). 697 * 698 * 3. Preallocation (pa) list validation: 699 * For each preallocated block (pa) in the group: 700 * - Verify that pa_pstart falls within the bounds of this block group. 701 * - Ensure the corresponding bit(s) in the order-0 bitmap are marked as allocated (1). 702 */ 703 static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, 704 const char *function, int line) 705 { 706 struct super_block *sb = e4b->bd_sb; 707 int order = e4b->bd_blkbits + 1; 708 int max; 709 int max2; 710 int i; 711 int j; 712 int k; 713 int count; 714 struct ext4_group_info *grp; 715 int fragments = 0; 716 int fstart; 717 struct list_head *cur; 718 void *buddy; 719 void *buddy2; 720 721 if (e4b->bd_info->bb_check_counter++ % 10) 722 return; 723 724 while (order > 1) { 725 buddy = mb_find_buddy(e4b, order, &max); 726 MB_CHECK_ASSERT(buddy); 727 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 728 MB_CHECK_ASSERT(buddy2); 729 MB_CHECK_ASSERT(buddy != buddy2); 730 MB_CHECK_ASSERT(max * 2 == max2); 731 732 count = 0; 733 for (i = 0; i < max; i++) { 734 735 if (mb_test_bit(i, buddy)) { 736 /* only single bit in buddy2 may be 0 */ 737 if (!mb_test_bit(i << 1, buddy2)) { 738 MB_CHECK_ASSERT( 739 mb_test_bit((i<<1)+1, buddy2)); 740 } 741 continue; 742 } 743 744 count++; 745 } 746 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 747 order--; 748 } 749 750 fstart = -1; 751 buddy = mb_find_buddy(e4b, 0, &max); 752 for (i = 0; i < max; i++) { 753 if (!mb_test_bit(i, buddy)) { 754 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 755 if (fstart == -1) { 756 fragments++; 757 fstart = i; 758 } 759 } else { 760 fstart = -1; 761 } 762 if (!(i & 1)) { 763 int in_use, zero_bit_count = 0; 764 765 in_use = mb_test_bit(i, buddy) || mb_test_bit(i + 1, buddy); 766 for (j = 1; j < e4b->bd_blkbits + 2; j++) { 767 buddy2 = mb_find_buddy(e4b, j, &max2); 768 k = i >> j; 769 MB_CHECK_ASSERT(k < max2); 770 if (!mb_test_bit(k, buddy2)) 771 zero_bit_count++; 772 } 773 MB_CHECK_ASSERT(zero_bit_count == !in_use); 774 } 775 } 776 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 777 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 778 779 grp = ext4_get_group_info(sb, e4b->bd_group); 780 if (!grp) 781 return; 782 list_for_each(cur, &grp->bb_prealloc_list) { 783 ext4_group_t groupnr; 784 struct ext4_prealloc_space *pa; 785 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 786 if (!pa->pa_len) 787 continue; 788 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 789 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 790 for (i = 0; i < pa->pa_len; i++) 791 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 792 } 793 } 794 #undef MB_CHECK_ASSERT 795 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 796 __FILE__, __func__, __LINE__) 797 #else 798 #define mb_check_buddy(e4b) 799 #endif 800 801 /* 802 * Divide blocks started from @first with length @len into 803 * smaller chunks with power of 2 blocks. 804 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 805 * then increase bb_counters[] for corresponded chunk size. 806 */ 807 static void ext4_mb_mark_free_simple(struct super_block *sb, 808 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 809 struct ext4_group_info *grp) 810 { 811 struct ext4_sb_info *sbi = EXT4_SB(sb); 812 ext4_grpblk_t min; 813 ext4_grpblk_t max; 814 ext4_grpblk_t chunk; 815 unsigned int border; 816 817 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 818 819 border = 2 << sb->s_blocksize_bits; 820 821 while (len > 0) { 822 /* find how many blocks can be covered since this position */ 823 max = ffs(first | border) - 1; 824 825 /* find how many blocks of power 2 we need to mark */ 826 min = fls(len) - 1; 827 828 if (max < min) 829 min = max; 830 chunk = 1 << min; 831 832 /* mark multiblock chunks only */ 833 grp->bb_counters[min]++; 834 if (min > 0) 835 mb_clear_bit(first >> min, 836 buddy + sbi->s_mb_offsets[min]); 837 838 len -= chunk; 839 first += chunk; 840 } 841 } 842 843 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 844 { 845 int order; 846 847 /* 848 * We don't bother with a special lists groups with only 1 block free 849 * extents and for completely empty groups. 850 */ 851 order = fls(len) - 2; 852 if (order < 0) 853 return 0; 854 if (order == MB_NUM_ORDERS(sb)) 855 order--; 856 if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb))) 857 order = MB_NUM_ORDERS(sb) - 1; 858 return order; 859 } 860 861 /* Move group to appropriate avg_fragment_size list */ 862 static void 863 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 864 { 865 struct ext4_sb_info *sbi = EXT4_SB(sb); 866 int new, old; 867 868 if (!test_opt2(sb, MB_OPTIMIZE_SCAN)) 869 return; 870 871 old = grp->bb_avg_fragment_size_order; 872 new = grp->bb_fragments == 0 ? -1 : 873 mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments); 874 if (new == old) 875 return; 876 877 if (old >= 0) 878 xa_erase(&sbi->s_mb_avg_fragment_size[old], grp->bb_group); 879 880 grp->bb_avg_fragment_size_order = new; 881 if (new >= 0) { 882 /* 883 * Cannot use __GFP_NOFAIL because we hold the group lock. 884 * Although allocation for insertion may fails, it's not fatal 885 * as we have linear traversal to fall back on. 886 */ 887 int err = xa_insert(&sbi->s_mb_avg_fragment_size[new], 888 grp->bb_group, grp, GFP_ATOMIC); 889 if (err) 890 mb_debug(sb, "insert group: %u to s_mb_avg_fragment_size[%d] failed, err %d", 891 grp->bb_group, new, err); 892 } 893 } 894 895 static ext4_group_t ext4_get_allocation_groups_count( 896 struct ext4_allocation_context *ac) 897 { 898 ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb); 899 900 /* non-extent files are limited to low blocks/groups */ 901 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 902 ngroups = EXT4_SB(ac->ac_sb)->s_blockfile_groups; 903 904 /* Pairs with smp_wmb() in ext4_update_super() */ 905 smp_rmb(); 906 907 return ngroups; 908 } 909 910 static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac, 911 struct xarray *xa, 912 ext4_group_t start, ext4_group_t end) 913 { 914 struct super_block *sb = ac->ac_sb; 915 struct ext4_sb_info *sbi = EXT4_SB(sb); 916 enum criteria cr = ac->ac_criteria; 917 ext4_group_t ngroups = ext4_get_allocation_groups_count(ac); 918 unsigned long group = start; 919 struct ext4_group_info *grp; 920 921 if (WARN_ON_ONCE(end > ngroups || start >= end)) 922 return 0; 923 924 xa_for_each_range(xa, group, grp, start, end - 1) { 925 int err; 926 927 if (sbi->s_mb_stats) 928 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 929 930 err = ext4_mb_scan_group(ac, grp->bb_group); 931 if (err || ac->ac_status != AC_STATUS_CONTINUE) 932 return err; 933 934 cond_resched(); 935 } 936 937 return 0; 938 } 939 940 /* 941 * Find a suitable group of given order from the largest free orders xarray. 942 */ 943 static inline int 944 ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac, 945 int order, ext4_group_t start, 946 ext4_group_t end) 947 { 948 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order]; 949 950 if (xa_empty(xa)) 951 return 0; 952 953 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); 954 } 955 956 /* 957 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 958 * cr level needs an update. 959 */ 960 static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac, 961 ext4_group_t group) 962 { 963 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 964 int i; 965 int ret = 0; 966 ext4_group_t start, end; 967 968 start = group; 969 end = ext4_get_allocation_groups_count(ac); 970 wrap_around: 971 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 972 ret = ext4_mb_scan_groups_largest_free_order_range(ac, i, 973 start, end); 974 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 975 return ret; 976 } 977 if (start) { 978 end = start; 979 start = 0; 980 goto wrap_around; 981 } 982 983 if (sbi->s_mb_stats) 984 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 985 986 /* Increment cr and search again if no group is found */ 987 ac->ac_criteria = CR_GOAL_LEN_FAST; 988 return ret; 989 } 990 991 /* 992 * Find a suitable group of given order from the average fragments xarray. 993 */ 994 static int 995 ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac, 996 int order, ext4_group_t start, 997 ext4_group_t end) 998 { 999 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order]; 1000 1001 if (xa_empty(xa)) 1002 return 0; 1003 1004 return ext4_mb_scan_groups_xa_range(ac, xa, start, end); 1005 } 1006 1007 /* 1008 * Choose next group by traversing average fragment size list of suitable 1009 * order. Updates *new_cr if cr level needs an update. 1010 */ 1011 static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac, 1012 ext4_group_t group) 1013 { 1014 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1015 int i, ret = 0; 1016 ext4_group_t start, end; 1017 1018 start = group; 1019 end = ext4_get_allocation_groups_count(ac); 1020 wrap_around: 1021 i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 1022 for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 1023 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i, 1024 start, end); 1025 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1026 return ret; 1027 } 1028 if (start) { 1029 end = start; 1030 start = 0; 1031 goto wrap_around; 1032 } 1033 1034 if (sbi->s_mb_stats) 1035 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1036 /* 1037 * CR_BEST_AVAIL_LEN works based on the concept that we have 1038 * a larger normalized goal len request which can be trimmed to 1039 * a smaller goal len such that it can still satisfy original 1040 * request len. However, allocation request for non-regular 1041 * files never gets normalized. 1042 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). 1043 */ 1044 if (ac->ac_flags & EXT4_MB_HINT_DATA) 1045 ac->ac_criteria = CR_BEST_AVAIL_LEN; 1046 else 1047 ac->ac_criteria = CR_GOAL_LEN_SLOW; 1048 1049 return ret; 1050 } 1051 1052 /* 1053 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment 1054 * order we have and proactively trim the goal request length to that order to 1055 * find a suitable group faster. 1056 * 1057 * This optimizes allocation speed at the cost of slightly reduced 1058 * preallocations. However, we make sure that we don't trim the request too 1059 * much and fall to CR_GOAL_LEN_SLOW in that case. 1060 */ 1061 static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac, 1062 ext4_group_t group) 1063 { 1064 int ret = 0; 1065 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1066 int i, order, min_order; 1067 unsigned long num_stripe_clusters = 0; 1068 ext4_group_t start, end; 1069 1070 /* 1071 * mb_avg_fragment_size_order() returns order in a way that makes 1072 * retrieving back the length using (1 << order) inaccurate. Hence, use 1073 * fls() instead since we need to know the actual length while modifying 1074 * goal length. 1075 */ 1076 order = fls(ac->ac_g_ex.fe_len) - 1; 1077 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) 1078 order = MB_NUM_ORDERS(ac->ac_sb); 1079 min_order = order - sbi->s_mb_best_avail_max_trim_order; 1080 if (min_order < 0) 1081 min_order = 0; 1082 1083 if (sbi->s_stripe > 0) { 1084 /* 1085 * We are assuming that stripe size is always a multiple of 1086 * cluster ratio otherwise __ext4_fill_super exists early. 1087 */ 1088 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); 1089 if (1 << min_order < num_stripe_clusters) 1090 /* 1091 * We consider 1 order less because later we round 1092 * up the goal len to num_stripe_clusters 1093 */ 1094 min_order = fls(num_stripe_clusters) - 1; 1095 } 1096 1097 if (1 << min_order < ac->ac_o_ex.fe_len) 1098 min_order = fls(ac->ac_o_ex.fe_len); 1099 1100 start = group; 1101 end = ext4_get_allocation_groups_count(ac); 1102 wrap_around: 1103 for (i = order; i >= min_order; i--) { 1104 int frag_order; 1105 /* 1106 * Scale down goal len to make sure we find something 1107 * in the free fragments list. Basically, reduce 1108 * preallocations. 1109 */ 1110 ac->ac_g_ex.fe_len = 1 << i; 1111 1112 if (num_stripe_clusters > 0) { 1113 /* 1114 * Try to round up the adjusted goal length to 1115 * stripe size (in cluster units) multiple for 1116 * efficiency. 1117 */ 1118 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, 1119 num_stripe_clusters); 1120 } 1121 1122 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1123 ac->ac_g_ex.fe_len); 1124 1125 ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order, 1126 start, end); 1127 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1128 return ret; 1129 } 1130 if (start) { 1131 end = start; 1132 start = 0; 1133 goto wrap_around; 1134 } 1135 1136 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1137 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1138 if (sbi->s_mb_stats) 1139 atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1140 ac->ac_criteria = CR_GOAL_LEN_SLOW; 1141 1142 return ret; 1143 } 1144 1145 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 1146 { 1147 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1148 return 0; 1149 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) 1150 return 0; 1151 return 1; 1152 } 1153 1154 /* 1155 * next linear group for allocation. 1156 */ 1157 static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups) 1158 { 1159 /* 1160 * Artificially restricted ngroups for non-extent 1161 * files makes group > ngroups possible on first loop. 1162 */ 1163 *group = *group + 1 >= ngroups ? 0 : *group + 1; 1164 } 1165 1166 static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac, 1167 ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count) 1168 { 1169 int ret, i; 1170 enum criteria cr = ac->ac_criteria; 1171 struct super_block *sb = ac->ac_sb; 1172 struct ext4_sb_info *sbi = EXT4_SB(sb); 1173 ext4_group_t group = *start; 1174 1175 for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) { 1176 ret = ext4_mb_scan_group(ac, group); 1177 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1178 return ret; 1179 cond_resched(); 1180 } 1181 1182 *start = group; 1183 if (count == ngroups) 1184 ac->ac_criteria++; 1185 1186 /* Processed all groups and haven't found blocks */ 1187 if (sbi->s_mb_stats && i == ngroups) 1188 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 1189 1190 return 0; 1191 } 1192 1193 static int ext4_mb_scan_groups(struct ext4_allocation_context *ac) 1194 { 1195 int ret = 0; 1196 ext4_group_t start; 1197 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1198 ext4_group_t ngroups = ext4_get_allocation_groups_count(ac); 1199 1200 /* searching for the right group start from the goal value specified */ 1201 start = ac->ac_g_ex.fe_group; 1202 ac->ac_prefetch_grp = start; 1203 ac->ac_prefetch_nr = 0; 1204 1205 if (!should_optimize_scan(ac)) 1206 return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups); 1207 1208 /* 1209 * Optimized scanning can return non adjacent groups which can cause 1210 * seek overhead for rotational disks. So try few linear groups before 1211 * trying optimized scan. 1212 */ 1213 if (sbi->s_mb_max_linear_groups) 1214 ret = ext4_mb_scan_groups_linear(ac, ngroups, &start, 1215 sbi->s_mb_max_linear_groups); 1216 if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1217 return ret; 1218 1219 switch (ac->ac_criteria) { 1220 case CR_POWER2_ALIGNED: 1221 return ext4_mb_scan_groups_p2_aligned(ac, start); 1222 case CR_GOAL_LEN_FAST: 1223 return ext4_mb_scan_groups_goal_fast(ac, start); 1224 case CR_BEST_AVAIL_LEN: 1225 return ext4_mb_scan_groups_best_avail(ac, start); 1226 default: 1227 /* 1228 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an 1229 * rb tree sorted by bb_free. But until that happens, we should 1230 * never come here. 1231 */ 1232 WARN_ON(1); 1233 } 1234 1235 return 0; 1236 } 1237 1238 /* 1239 * Cache the order of the largest free extent we have available in this block 1240 * group. 1241 */ 1242 static void 1243 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1244 { 1245 struct ext4_sb_info *sbi = EXT4_SB(sb); 1246 int new, old = grp->bb_largest_free_order; 1247 1248 for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--) 1249 if (grp->bb_counters[new] > 0) 1250 break; 1251 1252 /* No need to move between order lists? */ 1253 if (new == old) 1254 return; 1255 1256 if (old >= 0) { 1257 struct xarray *xa = &sbi->s_mb_largest_free_orders[old]; 1258 1259 if (!xa_empty(xa) && xa_load(xa, grp->bb_group)) 1260 xa_erase(xa, grp->bb_group); 1261 } 1262 1263 grp->bb_largest_free_order = new; 1264 if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) { 1265 /* 1266 * Cannot use __GFP_NOFAIL because we hold the group lock. 1267 * Although allocation for insertion may fails, it's not fatal 1268 * as we have linear traversal to fall back on. 1269 */ 1270 int err = xa_insert(&sbi->s_mb_largest_free_orders[new], 1271 grp->bb_group, grp, GFP_ATOMIC); 1272 if (err) 1273 mb_debug(sb, "insert group: %u to s_mb_largest_free_orders[%d] failed, err %d", 1274 grp->bb_group, new, err); 1275 } 1276 } 1277 1278 static noinline_for_stack 1279 void ext4_mb_generate_buddy(struct super_block *sb, 1280 void *buddy, void *bitmap, ext4_group_t group, 1281 struct ext4_group_info *grp) 1282 { 1283 struct ext4_sb_info *sbi = EXT4_SB(sb); 1284 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1285 ext4_grpblk_t i = 0; 1286 ext4_grpblk_t first; 1287 ext4_grpblk_t len; 1288 unsigned free = 0; 1289 unsigned fragments = 0; 1290 unsigned long long period = get_cycles(); 1291 1292 /* initialize buddy from bitmap which is aggregation 1293 * of on-disk bitmap and preallocations */ 1294 i = mb_find_next_zero_bit(bitmap, max, 0); 1295 grp->bb_first_free = i; 1296 while (i < max) { 1297 fragments++; 1298 first = i; 1299 i = mb_find_next_bit(bitmap, max, i); 1300 len = i - first; 1301 free += len; 1302 if (len > 1) 1303 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1304 else 1305 grp->bb_counters[0]++; 1306 if (i < max) 1307 i = mb_find_next_zero_bit(bitmap, max, i); 1308 } 1309 grp->bb_fragments = fragments; 1310 1311 if (free != grp->bb_free) { 1312 ext4_grp_locked_error(sb, group, 0, 0, 1313 "block bitmap and bg descriptor " 1314 "inconsistent: %u vs %u free clusters", 1315 free, grp->bb_free); 1316 /* 1317 * If we intend to continue, we consider group descriptor 1318 * corrupt and update bb_free using bitmap value 1319 */ 1320 grp->bb_free = free; 1321 ext4_mark_group_bitmap_corrupted(sb, group, 1322 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1323 } 1324 mb_set_largest_free_order(sb, grp); 1325 mb_update_avg_fragment_size(sb, grp); 1326 1327 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1328 1329 period = get_cycles() - period; 1330 atomic_inc(&sbi->s_mb_buddies_generated); 1331 atomic64_add(period, &sbi->s_mb_generation_time); 1332 } 1333 1334 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 1335 { 1336 int count; 1337 int order = 1; 1338 void *buddy; 1339 1340 while ((buddy = mb_find_buddy(e4b, order++, &count))) 1341 mb_set_bits(buddy, 0, count); 1342 1343 e4b->bd_info->bb_fragments = 0; 1344 memset(e4b->bd_info->bb_counters, 0, 1345 sizeof(*e4b->bd_info->bb_counters) * 1346 (e4b->bd_sb->s_blocksize_bits + 2)); 1347 1348 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 1349 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); 1350 } 1351 1352 /* The buddy information is attached the buddy cache inode 1353 * for convenience. The information regarding each group 1354 * is loaded via ext4_mb_load_buddy. The information involve 1355 * block bitmap and buddy information. The information are 1356 * stored in the inode as 1357 * 1358 * { folio } 1359 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1360 * 1361 * 1362 * one block each for bitmap and buddy information. 1363 * So for each group we take up 2 blocks. A folio can 1364 * contain blocks_per_folio (folio_size / blocksize) blocks. 1365 * So it can have information regarding groups_per_folio which 1366 * is blocks_per_folio/2 1367 * 1368 * Locking note: This routine takes the block group lock of all groups 1369 * for this folio; do not hold this lock when calling this routine! 1370 */ 1371 static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp) 1372 { 1373 ext4_group_t ngroups; 1374 unsigned int blocksize; 1375 int blocks_per_folio; 1376 int groups_per_folio; 1377 int err = 0; 1378 int i; 1379 ext4_group_t first_group, group; 1380 int first_block; 1381 struct super_block *sb; 1382 struct buffer_head *bhs; 1383 struct buffer_head **bh = NULL; 1384 struct inode *inode; 1385 char *data; 1386 char *bitmap; 1387 struct ext4_group_info *grinfo; 1388 1389 inode = folio->mapping->host; 1390 sb = inode->i_sb; 1391 ngroups = ext4_get_groups_count(sb); 1392 blocksize = i_blocksize(inode); 1393 blocks_per_folio = folio_size(folio) / blocksize; 1394 WARN_ON_ONCE(!blocks_per_folio); 1395 groups_per_folio = DIV_ROUND_UP(blocks_per_folio, 2); 1396 1397 mb_debug(sb, "init folio %lu\n", folio->index); 1398 1399 /* allocate buffer_heads to read bitmaps */ 1400 if (groups_per_folio > 1) { 1401 i = sizeof(struct buffer_head *) * groups_per_folio; 1402 bh = kzalloc(i, gfp); 1403 if (bh == NULL) 1404 return -ENOMEM; 1405 } else 1406 bh = &bhs; 1407 1408 /* read all groups the folio covers into the cache */ 1409 first_group = EXT4_PG_TO_LBLK(inode, folio->index) / 2; 1410 for (i = 0, group = first_group; i < groups_per_folio; i++, group++) { 1411 if (group >= ngroups) 1412 break; 1413 1414 grinfo = ext4_get_group_info(sb, group); 1415 if (!grinfo) 1416 continue; 1417 /* 1418 * If folio is uptodate then we came here after online resize 1419 * which added some new uninitialized group info structs, so 1420 * we must skip all initialized uptodate buddies on the folio, 1421 * which may be currently in use by an allocating task. 1422 */ 1423 if (folio_test_uptodate(folio) && 1424 !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1425 bh[i] = NULL; 1426 continue; 1427 } 1428 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1429 if (IS_ERR(bh[i])) { 1430 err = PTR_ERR(bh[i]); 1431 bh[i] = NULL; 1432 goto out; 1433 } 1434 mb_debug(sb, "read bitmap for group %u\n", group); 1435 } 1436 1437 /* wait for I/O completion */ 1438 for (i = 0, group = first_group; i < groups_per_folio; i++, group++) { 1439 int err2; 1440 1441 if (!bh[i]) 1442 continue; 1443 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1444 if (!err) 1445 err = err2; 1446 } 1447 1448 first_block = EXT4_PG_TO_LBLK(inode, folio->index); 1449 for (i = 0; i < blocks_per_folio; i++) { 1450 group = (first_block + i) >> 1; 1451 if (group >= ngroups) 1452 break; 1453 1454 if (!bh[group - first_group]) 1455 /* skip initialized uptodate buddy */ 1456 continue; 1457 1458 if (!buffer_verified(bh[group - first_group])) 1459 /* Skip faulty bitmaps */ 1460 continue; 1461 err = 0; 1462 1463 /* 1464 * data carry information regarding this 1465 * particular group in the format specified 1466 * above 1467 * 1468 */ 1469 data = folio_address(folio) + (i * blocksize); 1470 bitmap = bh[group - first_group]->b_data; 1471 1472 /* 1473 * We place the buddy block and bitmap block 1474 * close together 1475 */ 1476 grinfo = ext4_get_group_info(sb, group); 1477 if (!grinfo) { 1478 err = -EFSCORRUPTED; 1479 goto out; 1480 } 1481 if ((first_block + i) & 1) { 1482 /* this is block of buddy */ 1483 BUG_ON(incore == NULL); 1484 mb_debug(sb, "put buddy for group %u in folio %lu/%x\n", 1485 group, folio->index, i * blocksize); 1486 trace_ext4_mb_buddy_bitmap_load(sb, group); 1487 grinfo->bb_fragments = 0; 1488 memset(grinfo->bb_counters, 0, 1489 sizeof(*grinfo->bb_counters) * 1490 (MB_NUM_ORDERS(sb))); 1491 /* 1492 * incore got set to the group block bitmap below 1493 */ 1494 ext4_lock_group(sb, group); 1495 /* init the buddy */ 1496 memset(data, 0xff, blocksize); 1497 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1498 ext4_unlock_group(sb, group); 1499 incore = NULL; 1500 } else { 1501 /* this is block of bitmap */ 1502 BUG_ON(incore != NULL); 1503 mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n", 1504 group, folio->index, i * blocksize); 1505 trace_ext4_mb_bitmap_load(sb, group); 1506 1507 /* see comments in ext4_mb_put_pa() */ 1508 ext4_lock_group(sb, group); 1509 memcpy(data, bitmap, blocksize); 1510 1511 /* mark all preallocated blks used in in-core bitmap */ 1512 ext4_mb_generate_from_pa(sb, data, group); 1513 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); 1514 ext4_unlock_group(sb, group); 1515 1516 /* set incore so that the buddy information can be 1517 * generated using this 1518 */ 1519 incore = data; 1520 } 1521 } 1522 folio_mark_uptodate(folio); 1523 1524 out: 1525 if (bh) { 1526 for (i = 0; i < groups_per_folio; i++) 1527 brelse(bh[i]); 1528 if (bh != &bhs) 1529 kfree(bh); 1530 } 1531 return err; 1532 } 1533 1534 /* 1535 * Lock the buddy and bitmap folios. This makes sure other parallel init_group 1536 * on the same buddy folio doesn't happen while holding the buddy folio lock. 1537 * Return locked buddy and bitmap folios on e4b struct. If buddy and bitmap 1538 * are on the same folio e4b->bd_buddy_folio is NULL and return value is 0. 1539 */ 1540 static int ext4_mb_get_buddy_folio_lock(struct super_block *sb, 1541 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1542 { 1543 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1544 int block, pnum; 1545 struct folio *folio; 1546 1547 e4b->bd_buddy_folio = NULL; 1548 e4b->bd_bitmap_folio = NULL; 1549 1550 /* 1551 * the buddy cache inode stores the block bitmap 1552 * and buddy information in consecutive blocks. 1553 * So for each group we need two blocks. 1554 */ 1555 block = group * 2; 1556 pnum = EXT4_LBLK_TO_PG(inode, block); 1557 folio = __filemap_get_folio(inode->i_mapping, pnum, 1558 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1559 if (IS_ERR(folio)) 1560 return PTR_ERR(folio); 1561 BUG_ON(folio->mapping != inode->i_mapping); 1562 WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize); 1563 e4b->bd_bitmap_folio = folio; 1564 e4b->bd_bitmap = folio_address(folio) + 1565 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1566 1567 block++; 1568 pnum = EXT4_LBLK_TO_PG(inode, block); 1569 if (folio_contains(folio, pnum)) { 1570 /* buddy and bitmap are on the same folio */ 1571 return 0; 1572 } 1573 1574 /* we need another folio for the buddy */ 1575 folio = __filemap_get_folio(inode->i_mapping, pnum, 1576 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1577 if (IS_ERR(folio)) 1578 return PTR_ERR(folio); 1579 BUG_ON(folio->mapping != inode->i_mapping); 1580 WARN_ON_ONCE(folio_size(folio) < sb->s_blocksize); 1581 e4b->bd_buddy_folio = folio; 1582 return 0; 1583 } 1584 1585 static void ext4_mb_put_buddy_folio_lock(struct ext4_buddy *e4b) 1586 { 1587 if (e4b->bd_bitmap_folio) { 1588 folio_unlock(e4b->bd_bitmap_folio); 1589 folio_put(e4b->bd_bitmap_folio); 1590 } 1591 if (e4b->bd_buddy_folio) { 1592 folio_unlock(e4b->bd_buddy_folio); 1593 folio_put(e4b->bd_buddy_folio); 1594 } 1595 } 1596 1597 /* 1598 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1599 * block group lock of all groups for this folio; do not hold the BG lock when 1600 * calling this routine! 1601 */ 1602 static noinline_for_stack 1603 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1604 { 1605 1606 struct ext4_group_info *this_grp; 1607 struct ext4_buddy e4b; 1608 struct folio *folio; 1609 int ret = 0; 1610 1611 might_sleep(); 1612 mb_debug(sb, "init group %u\n", group); 1613 this_grp = ext4_get_group_info(sb, group); 1614 if (!this_grp) 1615 return -EFSCORRUPTED; 1616 1617 /* 1618 * This ensures that we don't reinit the buddy cache 1619 * folio which map to the group from which we are already 1620 * allocating. If we are looking at the buddy cache we would 1621 * have taken a reference using ext4_mb_load_buddy and that 1622 * would have pinned buddy folio to page cache. 1623 * The call to ext4_mb_get_buddy_folio_lock will mark the 1624 * folio accessed. 1625 */ 1626 ret = ext4_mb_get_buddy_folio_lock(sb, group, &e4b, gfp); 1627 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1628 /* 1629 * somebody initialized the group 1630 * return without doing anything 1631 */ 1632 goto err; 1633 } 1634 1635 folio = e4b.bd_bitmap_folio; 1636 ret = ext4_mb_init_cache(folio, NULL, gfp); 1637 if (ret) 1638 goto err; 1639 if (!folio_test_uptodate(folio)) { 1640 ret = -EIO; 1641 goto err; 1642 } 1643 1644 if (e4b.bd_buddy_folio == NULL) { 1645 /* 1646 * If both the bitmap and buddy are in 1647 * the same folio we don't need to force 1648 * init the buddy 1649 */ 1650 ret = 0; 1651 goto err; 1652 } 1653 /* init buddy cache */ 1654 folio = e4b.bd_buddy_folio; 1655 ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp); 1656 if (ret) 1657 goto err; 1658 if (!folio_test_uptodate(folio)) { 1659 ret = -EIO; 1660 goto err; 1661 } 1662 err: 1663 ext4_mb_put_buddy_folio_lock(&e4b); 1664 return ret; 1665 } 1666 1667 /* 1668 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1669 * block group lock of all groups for this folio; do not hold the BG lock when 1670 * calling this routine! 1671 */ 1672 static noinline_for_stack int 1673 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1674 struct ext4_buddy *e4b, gfp_t gfp) 1675 { 1676 int block; 1677 int pnum; 1678 struct folio *folio; 1679 int ret; 1680 struct ext4_group_info *grp; 1681 struct ext4_sb_info *sbi = EXT4_SB(sb); 1682 struct inode *inode = sbi->s_buddy_cache; 1683 1684 might_sleep(); 1685 mb_debug(sb, "load group %u\n", group); 1686 1687 grp = ext4_get_group_info(sb, group); 1688 if (!grp) 1689 return -EFSCORRUPTED; 1690 1691 e4b->bd_blkbits = sb->s_blocksize_bits; 1692 e4b->bd_info = grp; 1693 e4b->bd_sb = sb; 1694 e4b->bd_group = group; 1695 e4b->bd_buddy_folio = NULL; 1696 e4b->bd_bitmap_folio = NULL; 1697 1698 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1699 /* 1700 * we need full data about the group 1701 * to make a good selection 1702 */ 1703 ret = ext4_mb_init_group(sb, group, gfp); 1704 if (ret) 1705 return ret; 1706 } 1707 1708 /* 1709 * the buddy cache inode stores the block bitmap 1710 * and buddy information in consecutive blocks. 1711 * So for each group we need two blocks. 1712 */ 1713 block = group * 2; 1714 pnum = EXT4_LBLK_TO_PG(inode, block); 1715 1716 /* Avoid locking the folio in the fast path ... */ 1717 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1718 if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) { 1719 /* 1720 * folio_test_locked is employed to detect ongoing folio 1721 * migrations, since concurrent migrations can lead to 1722 * bitmap inconsistency. And if we are not uptodate that 1723 * implies somebody just created the folio but is yet to 1724 * initialize it. We can drop the folio reference and 1725 * try to get the folio with lock in both cases to avoid 1726 * concurrency. 1727 */ 1728 if (!IS_ERR(folio)) 1729 folio_put(folio); 1730 folio = __filemap_get_folio(inode->i_mapping, pnum, 1731 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1732 if (!IS_ERR(folio)) { 1733 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1734 "ext4: bitmap's mapping != inode->i_mapping\n")) { 1735 /* should never happen */ 1736 folio_unlock(folio); 1737 ret = -EINVAL; 1738 goto err; 1739 } 1740 if (!folio_test_uptodate(folio)) { 1741 ret = ext4_mb_init_cache(folio, NULL, gfp); 1742 if (ret) { 1743 folio_unlock(folio); 1744 goto err; 1745 } 1746 mb_cmp_bitmaps(e4b, folio_address(folio) + 1747 offset_in_folio(folio, 1748 EXT4_LBLK_TO_B(inode, block))); 1749 } 1750 folio_unlock(folio); 1751 } 1752 } 1753 if (IS_ERR(folio)) { 1754 ret = PTR_ERR(folio); 1755 goto err; 1756 } 1757 if (!folio_test_uptodate(folio)) { 1758 ret = -EIO; 1759 goto err; 1760 } 1761 1762 /* Folios marked accessed already */ 1763 e4b->bd_bitmap_folio = folio; 1764 e4b->bd_bitmap = folio_address(folio) + 1765 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1766 1767 block++; 1768 pnum = EXT4_LBLK_TO_PG(inode, block); 1769 /* buddy and bitmap are on the same folio? */ 1770 if (folio_contains(folio, pnum)) { 1771 folio_get(folio); 1772 goto update_buddy; 1773 } 1774 1775 /* we need another folio for the buddy */ 1776 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1777 if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) { 1778 if (!IS_ERR(folio)) 1779 folio_put(folio); 1780 folio = __filemap_get_folio(inode->i_mapping, pnum, 1781 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1782 if (!IS_ERR(folio)) { 1783 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1784 "ext4: buddy bitmap's mapping != inode->i_mapping\n")) { 1785 /* should never happen */ 1786 folio_unlock(folio); 1787 ret = -EINVAL; 1788 goto err; 1789 } 1790 if (!folio_test_uptodate(folio)) { 1791 ret = ext4_mb_init_cache(folio, e4b->bd_bitmap, 1792 gfp); 1793 if (ret) { 1794 folio_unlock(folio); 1795 goto err; 1796 } 1797 } 1798 folio_unlock(folio); 1799 } 1800 } 1801 if (IS_ERR(folio)) { 1802 ret = PTR_ERR(folio); 1803 goto err; 1804 } 1805 if (!folio_test_uptodate(folio)) { 1806 ret = -EIO; 1807 goto err; 1808 } 1809 1810 update_buddy: 1811 /* Folios marked accessed already */ 1812 e4b->bd_buddy_folio = folio; 1813 e4b->bd_buddy = folio_address(folio) + 1814 offset_in_folio(folio, EXT4_LBLK_TO_B(inode, block)); 1815 1816 return 0; 1817 1818 err: 1819 if (!IS_ERR_OR_NULL(folio)) 1820 folio_put(folio); 1821 if (e4b->bd_bitmap_folio) 1822 folio_put(e4b->bd_bitmap_folio); 1823 1824 e4b->bd_buddy = NULL; 1825 e4b->bd_bitmap = NULL; 1826 return ret; 1827 } 1828 1829 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1830 struct ext4_buddy *e4b) 1831 { 1832 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1833 } 1834 1835 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1836 { 1837 if (e4b->bd_bitmap_folio) 1838 folio_put(e4b->bd_bitmap_folio); 1839 if (e4b->bd_buddy_folio) 1840 folio_put(e4b->bd_buddy_folio); 1841 } 1842 1843 1844 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1845 { 1846 int order = 1, max; 1847 void *bb; 1848 1849 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1850 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1851 1852 while (order <= e4b->bd_blkbits + 1) { 1853 bb = mb_find_buddy(e4b, order, &max); 1854 if (!mb_test_bit(block >> order, bb)) { 1855 /* this block is part of buddy of order 'order' */ 1856 return order; 1857 } 1858 order++; 1859 } 1860 return 0; 1861 } 1862 1863 static void mb_clear_bits(void *bm, int cur, int len) 1864 { 1865 __u32 *addr; 1866 1867 len = cur + len; 1868 while (cur < len) { 1869 if ((cur & 31) == 0 && (len - cur) >= 32) { 1870 /* fast path: clear whole word at once */ 1871 addr = bm + (cur >> 3); 1872 *addr = 0; 1873 cur += 32; 1874 continue; 1875 } 1876 mb_clear_bit(cur, bm); 1877 cur++; 1878 } 1879 } 1880 1881 /* clear bits in given range 1882 * will return first found zero bit if any, -1 otherwise 1883 */ 1884 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1885 { 1886 __u32 *addr; 1887 int zero_bit = -1; 1888 1889 len = cur + len; 1890 while (cur < len) { 1891 if ((cur & 31) == 0 && (len - cur) >= 32) { 1892 /* fast path: clear whole word at once */ 1893 addr = bm + (cur >> 3); 1894 if (*addr != (__u32)(-1) && zero_bit == -1) 1895 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1896 *addr = 0; 1897 cur += 32; 1898 continue; 1899 } 1900 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1901 zero_bit = cur; 1902 cur++; 1903 } 1904 1905 return zero_bit; 1906 } 1907 1908 void mb_set_bits(void *bm, int cur, int len) 1909 { 1910 __u32 *addr; 1911 1912 len = cur + len; 1913 while (cur < len) { 1914 if ((cur & 31) == 0 && (len - cur) >= 32) { 1915 /* fast path: set whole word at once */ 1916 addr = bm + (cur >> 3); 1917 *addr = 0xffffffff; 1918 cur += 32; 1919 continue; 1920 } 1921 mb_set_bit(cur, bm); 1922 cur++; 1923 } 1924 } 1925 1926 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1927 { 1928 if (mb_test_bit(*bit + side, bitmap)) { 1929 mb_clear_bit(*bit, bitmap); 1930 (*bit) -= side; 1931 return 1; 1932 } 1933 else { 1934 (*bit) += side; 1935 mb_set_bit(*bit, bitmap); 1936 return -1; 1937 } 1938 } 1939 1940 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1941 { 1942 int max; 1943 int order = 1; 1944 void *buddy = mb_find_buddy(e4b, order, &max); 1945 1946 while (buddy) { 1947 void *buddy2; 1948 1949 /* Bits in range [first; last] are known to be set since 1950 * corresponding blocks were allocated. Bits in range 1951 * (first; last) will stay set because they form buddies on 1952 * upper layer. We just deal with borders if they don't 1953 * align with upper layer and then go up. 1954 * Releasing entire group is all about clearing 1955 * single bit of highest order buddy. 1956 */ 1957 1958 /* Example: 1959 * --------------------------------- 1960 * | 1 | 1 | 1 | 1 | 1961 * --------------------------------- 1962 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1963 * --------------------------------- 1964 * 0 1 2 3 4 5 6 7 1965 * \_____________________/ 1966 * 1967 * Neither [1] nor [6] is aligned to above layer. 1968 * Left neighbour [0] is free, so mark it busy, 1969 * decrease bb_counters and extend range to 1970 * [0; 6] 1971 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1972 * mark [6] free, increase bb_counters and shrink range to 1973 * [0; 5]. 1974 * Then shift range to [0; 2], go up and do the same. 1975 */ 1976 1977 1978 if (first & 1) 1979 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1980 if (!(last & 1)) 1981 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1982 if (first > last) 1983 break; 1984 order++; 1985 1986 buddy2 = mb_find_buddy(e4b, order, &max); 1987 if (!buddy2) { 1988 mb_clear_bits(buddy, first, last - first + 1); 1989 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1990 break; 1991 } 1992 first >>= 1; 1993 last >>= 1; 1994 buddy = buddy2; 1995 } 1996 } 1997 1998 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1999 int first, int count) 2000 { 2001 int left_is_free = 0; 2002 int right_is_free = 0; 2003 int block; 2004 int last = first + count - 1; 2005 struct super_block *sb = e4b->bd_sb; 2006 2007 if (WARN_ON(count == 0)) 2008 return; 2009 BUG_ON(last >= (sb->s_blocksize << 3)); 2010 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 2011 /* Don't bother if the block group is corrupt. */ 2012 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2013 return; 2014 2015 mb_check_buddy(e4b); 2016 mb_free_blocks_double(inode, e4b, first, count); 2017 2018 /* access memory sequentially: check left neighbour, 2019 * clear range and then check right neighbour 2020 */ 2021 if (first != 0) 2022 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 2023 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 2024 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 2025 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 2026 2027 if (unlikely(block != -1)) { 2028 struct ext4_sb_info *sbi = EXT4_SB(sb); 2029 ext4_fsblk_t blocknr; 2030 2031 /* 2032 * Fastcommit replay can free already freed blocks which 2033 * corrupts allocation info. Regenerate it. 2034 */ 2035 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 2036 mb_regenerate_buddy(e4b); 2037 goto check; 2038 } 2039 2040 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 2041 blocknr += EXT4_C2B(sbi, block); 2042 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2043 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2044 ext4_grp_locked_error(sb, e4b->bd_group, 2045 inode ? inode->i_ino : 0, blocknr, 2046 "freeing already freed block (bit %u); block bitmap corrupt.", 2047 block); 2048 return; 2049 } 2050 2051 this_cpu_inc(discard_pa_seq); 2052 e4b->bd_info->bb_free += count; 2053 if (first < e4b->bd_info->bb_first_free) 2054 e4b->bd_info->bb_first_free = first; 2055 2056 /* let's maintain fragments counter */ 2057 if (left_is_free && right_is_free) 2058 e4b->bd_info->bb_fragments--; 2059 else if (!left_is_free && !right_is_free) 2060 e4b->bd_info->bb_fragments++; 2061 2062 /* buddy[0] == bd_bitmap is a special case, so handle 2063 * it right away and let mb_buddy_mark_free stay free of 2064 * zero order checks. 2065 * Check if neighbours are to be coaleasced, 2066 * adjust bitmap bb_counters and borders appropriately. 2067 */ 2068 if (first & 1) { 2069 first += !left_is_free; 2070 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 2071 } 2072 if (!(last & 1)) { 2073 last -= !right_is_free; 2074 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 2075 } 2076 2077 if (first <= last) 2078 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 2079 2080 mb_set_largest_free_order(sb, e4b->bd_info); 2081 mb_update_avg_fragment_size(sb, e4b->bd_info); 2082 check: 2083 mb_check_buddy(e4b); 2084 } 2085 2086 static int mb_find_extent(struct ext4_buddy *e4b, int block, 2087 int needed, struct ext4_free_extent *ex) 2088 { 2089 int max, order, next; 2090 void *buddy; 2091 2092 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2093 BUG_ON(ex == NULL); 2094 2095 buddy = mb_find_buddy(e4b, 0, &max); 2096 BUG_ON(buddy == NULL); 2097 BUG_ON(block >= max); 2098 if (mb_test_bit(block, buddy)) { 2099 ex->fe_len = 0; 2100 ex->fe_start = 0; 2101 ex->fe_group = 0; 2102 return 0; 2103 } 2104 2105 /* find actual order */ 2106 order = mb_find_order_for_block(e4b, block); 2107 2108 ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); 2109 ex->fe_start = block; 2110 ex->fe_group = e4b->bd_group; 2111 2112 block = block >> order; 2113 2114 while (needed > ex->fe_len && 2115 mb_find_buddy(e4b, order, &max)) { 2116 2117 if (block + 1 >= max) 2118 break; 2119 2120 next = (block + 1) * (1 << order); 2121 if (mb_test_bit(next, e4b->bd_bitmap)) 2122 break; 2123 2124 order = mb_find_order_for_block(e4b, next); 2125 2126 block = next >> order; 2127 ex->fe_len += 1 << order; 2128 } 2129 2130 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 2131 /* Should never happen! (but apparently sometimes does?!?) */ 2132 WARN_ON(1); 2133 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 2134 "corruption or bug in mb_find_extent " 2135 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 2136 block, order, needed, ex->fe_group, ex->fe_start, 2137 ex->fe_len, ex->fe_logical); 2138 ex->fe_len = 0; 2139 ex->fe_start = 0; 2140 ex->fe_group = 0; 2141 } 2142 return ex->fe_len; 2143 } 2144 2145 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 2146 { 2147 int ord; 2148 int mlen = 0; 2149 int max = 0; 2150 int start = ex->fe_start; 2151 int len = ex->fe_len; 2152 unsigned ret = 0; 2153 int len0 = len; 2154 void *buddy; 2155 int ord_start, ord_end; 2156 2157 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 2158 BUG_ON(e4b->bd_group != ex->fe_group); 2159 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2160 mb_check_buddy(e4b); 2161 mb_mark_used_double(e4b, start, len); 2162 2163 this_cpu_inc(discard_pa_seq); 2164 e4b->bd_info->bb_free -= len; 2165 if (e4b->bd_info->bb_first_free == start) 2166 e4b->bd_info->bb_first_free += len; 2167 2168 /* let's maintain fragments counter */ 2169 if (start != 0) 2170 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 2171 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 2172 max = !mb_test_bit(start + len, e4b->bd_bitmap); 2173 if (mlen && max) 2174 e4b->bd_info->bb_fragments++; 2175 else if (!mlen && !max) 2176 e4b->bd_info->bb_fragments--; 2177 2178 /* let's maintain buddy itself */ 2179 while (len) { 2180 ord = mb_find_order_for_block(e4b, start); 2181 2182 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 2183 /* the whole chunk may be allocated at once! */ 2184 mlen = 1 << ord; 2185 buddy = mb_find_buddy(e4b, ord, &max); 2186 BUG_ON((start >> ord) >= max); 2187 mb_set_bit(start >> ord, buddy); 2188 e4b->bd_info->bb_counters[ord]--; 2189 start += mlen; 2190 len -= mlen; 2191 BUG_ON(len < 0); 2192 continue; 2193 } 2194 2195 /* store for history */ 2196 if (ret == 0) 2197 ret = len | (ord << 16); 2198 2199 BUG_ON(ord <= 0); 2200 buddy = mb_find_buddy(e4b, ord, &max); 2201 mb_set_bit(start >> ord, buddy); 2202 e4b->bd_info->bb_counters[ord]--; 2203 2204 ord_start = (start >> ord) << ord; 2205 ord_end = ord_start + (1 << ord); 2206 /* first chunk */ 2207 if (start > ord_start) 2208 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2209 ord_start, start - ord_start, 2210 e4b->bd_info); 2211 2212 /* last chunk */ 2213 if (start + len < ord_end) { 2214 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2215 start + len, 2216 ord_end - (start + len), 2217 e4b->bd_info); 2218 break; 2219 } 2220 len = start + len - ord_end; 2221 start = ord_end; 2222 } 2223 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 2224 2225 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 2226 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2227 mb_check_buddy(e4b); 2228 2229 return ret; 2230 } 2231 2232 /* 2233 * Must be called under group lock! 2234 */ 2235 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2236 struct ext4_buddy *e4b) 2237 { 2238 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2239 int ret; 2240 2241 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2242 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2243 2244 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2245 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2246 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2247 2248 /* preallocation can change ac_b_ex, thus we store actually 2249 * allocated blocks for history */ 2250 ac->ac_f_ex = ac->ac_b_ex; 2251 2252 ac->ac_status = AC_STATUS_FOUND; 2253 ac->ac_tail = ret & 0xffff; 2254 ac->ac_buddy = ret >> 16; 2255 2256 /* 2257 * take the folio reference. We want the folio to be pinned 2258 * so that we don't get a ext4_mb_init_cache_call for this 2259 * group until we update the bitmap. That would mean we 2260 * double allocate blocks. The reference is dropped 2261 * in ext4_mb_release_context 2262 */ 2263 ac->ac_bitmap_folio = e4b->bd_bitmap_folio; 2264 folio_get(ac->ac_bitmap_folio); 2265 ac->ac_buddy_folio = e4b->bd_buddy_folio; 2266 folio_get(ac->ac_buddy_folio); 2267 /* store last allocated for subsequent stream allocation */ 2268 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2269 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; 2270 2271 WRITE_ONCE(sbi->s_mb_last_groups[hash], ac->ac_f_ex.fe_group); 2272 } 2273 2274 /* 2275 * As we've just preallocated more space than 2276 * user requested originally, we store allocated 2277 * space in a special descriptor. 2278 */ 2279 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2280 ext4_mb_new_preallocation(ac); 2281 2282 } 2283 2284 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2285 struct ext4_buddy *e4b, 2286 int finish_group) 2287 { 2288 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2289 struct ext4_free_extent *bex = &ac->ac_b_ex; 2290 struct ext4_free_extent *gex = &ac->ac_g_ex; 2291 2292 if (ac->ac_status == AC_STATUS_FOUND) 2293 return; 2294 /* 2295 * We don't want to scan for a whole year 2296 */ 2297 if (ac->ac_found > sbi->s_mb_max_to_scan && 2298 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2299 ac->ac_status = AC_STATUS_BREAK; 2300 return; 2301 } 2302 2303 /* 2304 * Haven't found good chunk so far, let's continue 2305 */ 2306 if (bex->fe_len < gex->fe_len) 2307 return; 2308 2309 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2310 ext4_mb_use_best_found(ac, e4b); 2311 } 2312 2313 /* 2314 * The routine checks whether found extent is good enough. If it is, 2315 * then the extent gets marked used and flag is set to the context 2316 * to stop scanning. Otherwise, the extent is compared with the 2317 * previous found extent and if new one is better, then it's stored 2318 * in the context. Later, the best found extent will be used, if 2319 * mballoc can't find good enough extent. 2320 * 2321 * The algorithm used is roughly as follows: 2322 * 2323 * * If free extent found is exactly as big as goal, then 2324 * stop the scan and use it immediately 2325 * 2326 * * If free extent found is smaller than goal, then keep retrying 2327 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2328 * that stop scanning and use whatever we have. 2329 * 2330 * * If free extent found is bigger than goal, then keep retrying 2331 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2332 * stopping the scan and using the extent. 2333 * 2334 * 2335 * FIXME: real allocation policy is to be designed yet! 2336 */ 2337 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2338 struct ext4_free_extent *ex, 2339 struct ext4_buddy *e4b) 2340 { 2341 struct ext4_free_extent *bex = &ac->ac_b_ex; 2342 struct ext4_free_extent *gex = &ac->ac_g_ex; 2343 2344 BUG_ON(ex->fe_len <= 0); 2345 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2346 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2347 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2348 2349 ac->ac_found++; 2350 ac->ac_cX_found[ac->ac_criteria]++; 2351 2352 /* 2353 * The special case - take what you catch first 2354 */ 2355 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2356 *bex = *ex; 2357 ext4_mb_use_best_found(ac, e4b); 2358 return; 2359 } 2360 2361 /* 2362 * Let's check whether the chuck is good enough 2363 */ 2364 if (ex->fe_len == gex->fe_len) { 2365 *bex = *ex; 2366 ext4_mb_use_best_found(ac, e4b); 2367 return; 2368 } 2369 2370 /* 2371 * If this is first found extent, just store it in the context 2372 */ 2373 if (bex->fe_len == 0) { 2374 *bex = *ex; 2375 return; 2376 } 2377 2378 /* 2379 * If new found extent is better, store it in the context 2380 */ 2381 if (bex->fe_len < gex->fe_len) { 2382 /* if the request isn't satisfied, any found extent 2383 * larger than previous best one is better */ 2384 if (ex->fe_len > bex->fe_len) 2385 *bex = *ex; 2386 } else if (ex->fe_len > gex->fe_len) { 2387 /* if the request is satisfied, then we try to find 2388 * an extent that still satisfy the request, but is 2389 * smaller than previous one */ 2390 if (ex->fe_len < bex->fe_len) 2391 *bex = *ex; 2392 } 2393 2394 ext4_mb_check_limits(ac, e4b, 0); 2395 } 2396 2397 static noinline_for_stack 2398 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2399 struct ext4_buddy *e4b) 2400 { 2401 struct ext4_free_extent ex = ac->ac_b_ex; 2402 ext4_group_t group = ex.fe_group; 2403 int max; 2404 int err; 2405 2406 BUG_ON(ex.fe_len <= 0); 2407 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2408 if (err) 2409 return; 2410 2411 ext4_lock_group(ac->ac_sb, group); 2412 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2413 goto out; 2414 2415 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2416 2417 if (max > 0) { 2418 ac->ac_b_ex = ex; 2419 ext4_mb_use_best_found(ac, e4b); 2420 } 2421 2422 out: 2423 ext4_unlock_group(ac->ac_sb, group); 2424 ext4_mb_unload_buddy(e4b); 2425 } 2426 2427 static noinline_for_stack 2428 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2429 struct ext4_buddy *e4b) 2430 { 2431 ext4_group_t group = ac->ac_g_ex.fe_group; 2432 int max; 2433 int err; 2434 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2435 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2436 struct ext4_free_extent ex; 2437 2438 if (!grp) 2439 return -EFSCORRUPTED; 2440 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2441 return 0; 2442 if (grp->bb_free == 0) 2443 return 0; 2444 2445 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2446 if (err) 2447 return err; 2448 2449 ext4_lock_group(ac->ac_sb, group); 2450 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2451 goto out; 2452 2453 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2454 ac->ac_g_ex.fe_len, &ex); 2455 ex.fe_logical = 0xDEADFA11; /* debug value */ 2456 2457 if (max >= ac->ac_g_ex.fe_len && 2458 ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) { 2459 ext4_fsblk_t start; 2460 2461 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2462 /* use do_div to get remainder (would be 64-bit modulo) */ 2463 if (do_div(start, sbi->s_stripe) == 0) { 2464 ac->ac_found++; 2465 ac->ac_b_ex = ex; 2466 ext4_mb_use_best_found(ac, e4b); 2467 } 2468 } else if (max >= ac->ac_g_ex.fe_len) { 2469 BUG_ON(ex.fe_len <= 0); 2470 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2471 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2472 ac->ac_found++; 2473 ac->ac_b_ex = ex; 2474 ext4_mb_use_best_found(ac, e4b); 2475 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2476 /* Sometimes, caller may want to merge even small 2477 * number of blocks to an existing extent */ 2478 BUG_ON(ex.fe_len <= 0); 2479 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2480 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2481 ac->ac_found++; 2482 ac->ac_b_ex = ex; 2483 ext4_mb_use_best_found(ac, e4b); 2484 } 2485 out: 2486 ext4_unlock_group(ac->ac_sb, group); 2487 ext4_mb_unload_buddy(e4b); 2488 2489 return 0; 2490 } 2491 2492 /* 2493 * The routine scans buddy structures (not bitmap!) from given order 2494 * to max order and tries to find big enough chunk to satisfy the req 2495 */ 2496 static noinline_for_stack 2497 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2498 struct ext4_buddy *e4b) 2499 { 2500 struct super_block *sb = ac->ac_sb; 2501 struct ext4_group_info *grp = e4b->bd_info; 2502 void *buddy; 2503 int i; 2504 int k; 2505 int max; 2506 2507 BUG_ON(ac->ac_2order <= 0); 2508 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2509 if (grp->bb_counters[i] == 0) 2510 continue; 2511 2512 buddy = mb_find_buddy(e4b, i, &max); 2513 if (WARN_RATELIMIT(buddy == NULL, 2514 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2515 continue; 2516 2517 k = mb_find_next_zero_bit(buddy, max, 0); 2518 if (k >= max) { 2519 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2520 e4b->bd_group, 2521 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2522 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2523 "%d free clusters of order %d. But found 0", 2524 grp->bb_counters[i], i); 2525 break; 2526 } 2527 ac->ac_found++; 2528 ac->ac_cX_found[ac->ac_criteria]++; 2529 2530 ac->ac_b_ex.fe_len = 1 << i; 2531 ac->ac_b_ex.fe_start = k << i; 2532 ac->ac_b_ex.fe_group = e4b->bd_group; 2533 2534 ext4_mb_use_best_found(ac, e4b); 2535 2536 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2537 2538 if (EXT4_SB(sb)->s_mb_stats) 2539 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2540 2541 break; 2542 } 2543 } 2544 2545 /* 2546 * The routine scans the group and measures all found extents. 2547 * In order to optimize scanning, caller must pass number of 2548 * free blocks in the group, so the routine can know upper limit. 2549 */ 2550 static noinline_for_stack 2551 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2552 struct ext4_buddy *e4b) 2553 { 2554 struct super_block *sb = ac->ac_sb; 2555 void *bitmap = e4b->bd_bitmap; 2556 struct ext4_free_extent ex; 2557 int i, j, freelen; 2558 int free; 2559 2560 free = e4b->bd_info->bb_free; 2561 if (WARN_ON(free <= 0)) 2562 return; 2563 2564 i = e4b->bd_info->bb_first_free; 2565 2566 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2567 i = mb_find_next_zero_bit(bitmap, 2568 EXT4_CLUSTERS_PER_GROUP(sb), i); 2569 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2570 /* 2571 * IF we have corrupt bitmap, we won't find any 2572 * free blocks even though group info says we 2573 * have free blocks 2574 */ 2575 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2576 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2577 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2578 "%d free clusters as per " 2579 "group info. But bitmap says 0", 2580 free); 2581 break; 2582 } 2583 2584 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { 2585 /* 2586 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are 2587 * sure that this group will have a large enough 2588 * continuous free extent, so skip over the smaller free 2589 * extents 2590 */ 2591 j = mb_find_next_bit(bitmap, 2592 EXT4_CLUSTERS_PER_GROUP(sb), i); 2593 freelen = j - i; 2594 2595 if (freelen < ac->ac_g_ex.fe_len) { 2596 i = j; 2597 free -= freelen; 2598 continue; 2599 } 2600 } 2601 2602 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2603 if (WARN_ON(ex.fe_len <= 0)) 2604 break; 2605 if (free < ex.fe_len) { 2606 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2607 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2608 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2609 "%d free clusters as per " 2610 "group info. But got %d blocks", 2611 free, ex.fe_len); 2612 /* 2613 * The number of free blocks differs. This mostly 2614 * indicate that the bitmap is corrupt. So exit 2615 * without claiming the space. 2616 */ 2617 break; 2618 } 2619 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2620 ext4_mb_measure_extent(ac, &ex, e4b); 2621 2622 i += ex.fe_len; 2623 free -= ex.fe_len; 2624 } 2625 2626 ext4_mb_check_limits(ac, e4b, 1); 2627 } 2628 2629 /* 2630 * This is a special case for storages like raid5 2631 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2632 */ 2633 static noinline_for_stack 2634 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2635 struct ext4_buddy *e4b) 2636 { 2637 struct super_block *sb = ac->ac_sb; 2638 struct ext4_sb_info *sbi = EXT4_SB(sb); 2639 void *bitmap = e4b->bd_bitmap; 2640 struct ext4_free_extent ex; 2641 ext4_fsblk_t first_group_block; 2642 ext4_fsblk_t a; 2643 ext4_grpblk_t i, stripe; 2644 int max; 2645 2646 BUG_ON(sbi->s_stripe == 0); 2647 2648 /* find first stripe-aligned block in group */ 2649 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2650 2651 a = first_group_block + sbi->s_stripe - 1; 2652 do_div(a, sbi->s_stripe); 2653 i = (a * sbi->s_stripe) - first_group_block; 2654 2655 stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe); 2656 i = EXT4_B2C(sbi, i); 2657 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2658 if (!mb_test_bit(i, bitmap)) { 2659 max = mb_find_extent(e4b, i, stripe, &ex); 2660 if (max >= stripe) { 2661 ac->ac_found++; 2662 ac->ac_cX_found[ac->ac_criteria]++; 2663 ex.fe_logical = 0xDEADF00D; /* debug value */ 2664 ac->ac_b_ex = ex; 2665 ext4_mb_use_best_found(ac, e4b); 2666 break; 2667 } 2668 } 2669 i += stripe; 2670 } 2671 } 2672 2673 static void __ext4_mb_scan_group(struct ext4_allocation_context *ac) 2674 { 2675 bool is_stripe_aligned; 2676 struct ext4_sb_info *sbi; 2677 enum criteria cr = ac->ac_criteria; 2678 2679 ac->ac_groups_scanned++; 2680 if (cr == CR_POWER2_ALIGNED) 2681 return ext4_mb_simple_scan_group(ac, ac->ac_e4b); 2682 2683 sbi = EXT4_SB(ac->ac_sb); 2684 is_stripe_aligned = false; 2685 if ((sbi->s_stripe >= sbi->s_cluster_ratio) && 2686 !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe))) 2687 is_stripe_aligned = true; 2688 2689 if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) && 2690 is_stripe_aligned) 2691 ext4_mb_scan_aligned(ac, ac->ac_e4b); 2692 2693 if (ac->ac_status == AC_STATUS_CONTINUE) 2694 ext4_mb_complex_scan_group(ac, ac->ac_e4b); 2695 } 2696 2697 /* 2698 * This is also called BEFORE we load the buddy bitmap. 2699 * Returns either 1 or 0 indicating that the group is either suitable 2700 * for the allocation or not. 2701 */ 2702 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2703 ext4_group_t group, enum criteria cr) 2704 { 2705 ext4_grpblk_t free, fragments; 2706 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2707 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2708 2709 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); 2710 2711 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2712 return false; 2713 2714 free = grp->bb_free; 2715 if (free == 0) 2716 return false; 2717 2718 fragments = grp->bb_fragments; 2719 if (fragments == 0) 2720 return false; 2721 2722 switch (cr) { 2723 case CR_POWER2_ALIGNED: 2724 BUG_ON(ac->ac_2order == 0); 2725 2726 /* Avoid using the first bg of a flexgroup for data files */ 2727 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2728 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2729 ((group % flex_size) == 0)) 2730 return false; 2731 2732 if (free < ac->ac_g_ex.fe_len) 2733 return false; 2734 2735 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2736 return true; 2737 2738 if (grp->bb_largest_free_order < ac->ac_2order) 2739 return false; 2740 2741 return true; 2742 case CR_GOAL_LEN_FAST: 2743 case CR_BEST_AVAIL_LEN: 2744 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2745 return true; 2746 break; 2747 case CR_GOAL_LEN_SLOW: 2748 if (free >= ac->ac_g_ex.fe_len) 2749 return true; 2750 break; 2751 case CR_ANY_FREE: 2752 return true; 2753 default: 2754 BUG(); 2755 } 2756 2757 return false; 2758 } 2759 2760 /* 2761 * This could return negative error code if something goes wrong 2762 * during ext4_mb_init_group(). This should not be called with 2763 * ext4_lock_group() held. 2764 * 2765 * Note: because we are conditionally operating with the group lock in 2766 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2767 * function using __acquire and __release. This means we need to be 2768 * super careful before messing with the error path handling via "goto 2769 * out"! 2770 */ 2771 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2772 ext4_group_t group, enum criteria cr) 2773 { 2774 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2775 struct super_block *sb = ac->ac_sb; 2776 struct ext4_sb_info *sbi = EXT4_SB(sb); 2777 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2778 ext4_grpblk_t free; 2779 int ret = 0; 2780 2781 if (!grp) 2782 return -EFSCORRUPTED; 2783 if (sbi->s_mb_stats) 2784 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2785 if (should_lock) { 2786 ext4_lock_group(sb, group); 2787 __release(ext4_group_lock_ptr(sb, group)); 2788 } 2789 free = grp->bb_free; 2790 if (free == 0) 2791 goto out; 2792 /* 2793 * In all criterias except CR_ANY_FREE we try to avoid groups that 2794 * can't possibly satisfy the full goal request due to insufficient 2795 * free blocks. 2796 */ 2797 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) 2798 goto out; 2799 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2800 goto out; 2801 if (should_lock) { 2802 __acquire(ext4_group_lock_ptr(sb, group)); 2803 ext4_unlock_group(sb, group); 2804 } 2805 2806 /* We only do this if the grp has never been initialized */ 2807 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2808 struct ext4_group_desc *gdp = 2809 ext4_get_group_desc(sb, group, NULL); 2810 int ret; 2811 2812 /* 2813 * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic 2814 * search to find large good chunks almost for free. If buddy 2815 * data is not ready, then this optimization makes no sense. But 2816 * we never skip the first block group in a flex_bg, since this 2817 * gets used for metadata block allocation, and we want to make 2818 * sure we locate metadata blocks in the first block group in 2819 * the flex_bg if possible. 2820 */ 2821 if (!ext4_mb_cr_expensive(cr) && 2822 (!sbi->s_log_groups_per_flex || 2823 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2824 !(ext4_has_group_desc_csum(sb) && 2825 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2826 return 0; 2827 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2828 if (ret) 2829 return ret; 2830 } 2831 2832 if (should_lock) { 2833 ext4_lock_group(sb, group); 2834 __release(ext4_group_lock_ptr(sb, group)); 2835 } 2836 ret = ext4_mb_good_group(ac, group, cr); 2837 out: 2838 if (should_lock) { 2839 __acquire(ext4_group_lock_ptr(sb, group)); 2840 ext4_unlock_group(sb, group); 2841 } 2842 return ret; 2843 } 2844 2845 /* 2846 * Start prefetching @nr block bitmaps starting at @group. 2847 * Return the next group which needs to be prefetched. 2848 */ 2849 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2850 unsigned int nr, int *cnt) 2851 { 2852 ext4_group_t ngroups = ext4_get_groups_count(sb); 2853 struct buffer_head *bh; 2854 struct blk_plug plug; 2855 2856 blk_start_plug(&plug); 2857 while (nr-- > 0) { 2858 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2859 NULL); 2860 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2861 2862 /* 2863 * Prefetch block groups with free blocks; but don't 2864 * bother if it is marked uninitialized on disk, since 2865 * it won't require I/O to read. Also only try to 2866 * prefetch once, so we avoid getblk() call, which can 2867 * be expensive. 2868 */ 2869 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2870 EXT4_MB_GRP_NEED_INIT(grp) && 2871 ext4_free_group_clusters(sb, gdp) > 0 ) { 2872 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2873 if (bh && !IS_ERR(bh)) { 2874 if (!buffer_uptodate(bh) && cnt) 2875 (*cnt)++; 2876 brelse(bh); 2877 } 2878 } 2879 if (++group >= ngroups) 2880 group = 0; 2881 } 2882 blk_finish_plug(&plug); 2883 return group; 2884 } 2885 2886 /* 2887 * Batch reads of the block allocation bitmaps to get 2888 * multiple READs in flight; limit prefetching at inexpensive 2889 * CR, otherwise mballoc can spend a lot of time loading 2890 * imperfect groups 2891 */ 2892 static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac, 2893 ext4_group_t group) 2894 { 2895 struct ext4_sb_info *sbi; 2896 2897 if (ac->ac_prefetch_grp != group) 2898 return; 2899 2900 sbi = EXT4_SB(ac->ac_sb); 2901 if (ext4_mb_cr_expensive(ac->ac_criteria) || 2902 ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) { 2903 unsigned int nr = sbi->s_mb_prefetch; 2904 2905 if (ext4_has_feature_flex_bg(ac->ac_sb)) { 2906 nr = 1 << sbi->s_log_groups_per_flex; 2907 nr -= group & (nr - 1); 2908 nr = umin(nr, sbi->s_mb_prefetch); 2909 } 2910 2911 ac->ac_prefetch_nr = nr; 2912 ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr, 2913 &ac->ac_prefetch_ios); 2914 } 2915 } 2916 2917 /* 2918 * Prefetching reads the block bitmap into the buffer cache; but we 2919 * need to make sure that the buddy bitmap in the page cache has been 2920 * initialized. Note that ext4_mb_init_group() will block if the I/O 2921 * is not yet completed, or indeed if it was not initiated by 2922 * ext4_mb_prefetch did not start the I/O. 2923 * 2924 * TODO: We should actually kick off the buddy bitmap setup in a work 2925 * queue when the buffer I/O is completed, so that we don't block 2926 * waiting for the block allocation bitmap read to finish when 2927 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2928 */ 2929 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2930 unsigned int nr) 2931 { 2932 struct ext4_group_desc *gdp; 2933 struct ext4_group_info *grp; 2934 2935 while (nr-- > 0) { 2936 if (!group) 2937 group = ext4_get_groups_count(sb); 2938 group--; 2939 gdp = ext4_get_group_desc(sb, group, NULL); 2940 grp = ext4_get_group_info(sb, group); 2941 2942 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2943 ext4_free_group_clusters(sb, gdp) > 0) { 2944 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2945 break; 2946 } 2947 } 2948 } 2949 2950 static int ext4_mb_scan_group(struct ext4_allocation_context *ac, 2951 ext4_group_t group) 2952 { 2953 int ret; 2954 struct super_block *sb = ac->ac_sb; 2955 enum criteria cr = ac->ac_criteria; 2956 2957 ext4_mb_might_prefetch(ac, group); 2958 2959 /* prevent unnecessary buddy loading. */ 2960 if (cr < CR_ANY_FREE && spin_is_locked(ext4_group_lock_ptr(sb, group))) 2961 return 0; 2962 2963 /* This now checks without needing the buddy folio */ 2964 ret = ext4_mb_good_group_nolock(ac, group, cr); 2965 if (ret <= 0) { 2966 if (!ac->ac_first_err) 2967 ac->ac_first_err = ret; 2968 return 0; 2969 } 2970 2971 ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b); 2972 if (ret) 2973 return ret; 2974 2975 /* skip busy group */ 2976 if (cr >= CR_ANY_FREE) 2977 ext4_lock_group(sb, group); 2978 else if (!ext4_try_lock_group(sb, group)) 2979 goto out_unload; 2980 2981 /* We need to check again after locking the block group. */ 2982 if (unlikely(!ext4_mb_good_group(ac, group, cr))) 2983 goto out_unlock; 2984 2985 __ext4_mb_scan_group(ac); 2986 2987 out_unlock: 2988 ext4_unlock_group(sb, group); 2989 out_unload: 2990 ext4_mb_unload_buddy(ac->ac_e4b); 2991 return ret; 2992 } 2993 2994 static noinline_for_stack int 2995 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2996 { 2997 ext4_group_t i; 2998 int err = 0; 2999 struct super_block *sb = ac->ac_sb; 3000 struct ext4_sb_info *sbi = EXT4_SB(sb); 3001 struct ext4_buddy e4b; 3002 3003 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 3004 3005 /* first, try the goal */ 3006 err = ext4_mb_find_by_goal(ac, &e4b); 3007 if (err || ac->ac_status == AC_STATUS_FOUND) 3008 goto out; 3009 3010 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3011 goto out; 3012 3013 /* 3014 * ac->ac_2order is set only if the fe_len is a power of 2 3015 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED 3016 * so that we try exact allocation using buddy. 3017 */ 3018 i = fls(ac->ac_g_ex.fe_len); 3019 ac->ac_2order = 0; 3020 /* 3021 * We search using buddy data only if the order of the request 3022 * is greater than equal to the sbi_s_mb_order2_reqs 3023 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 3024 * We also support searching for power-of-two requests only for 3025 * requests upto maximum buddy size we have constructed. 3026 */ 3027 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 3028 if (is_power_of_2(ac->ac_g_ex.fe_len)) 3029 ac->ac_2order = array_index_nospec(i - 1, 3030 MB_NUM_ORDERS(sb)); 3031 } 3032 3033 /* if stream allocation is enabled, use global goal */ 3034 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 3035 int hash = ac->ac_inode->i_ino % sbi->s_mb_nr_global_goals; 3036 3037 ac->ac_g_ex.fe_group = READ_ONCE(sbi->s_mb_last_groups[hash]); 3038 ac->ac_g_ex.fe_start = -1; 3039 ac->ac_flags &= ~EXT4_MB_HINT_TRY_GOAL; 3040 } 3041 3042 /* 3043 * Let's just scan groups to find more-less suitable blocks We 3044 * start with CR_GOAL_LEN_FAST, unless it is power of 2 3045 * aligned, in which case let's do that faster approach first. 3046 */ 3047 ac->ac_criteria = CR_GOAL_LEN_FAST; 3048 if (ac->ac_2order) 3049 ac->ac_criteria = CR_POWER2_ALIGNED; 3050 3051 ac->ac_e4b = &e4b; 3052 ac->ac_prefetch_ios = 0; 3053 ac->ac_first_err = 0; 3054 repeat: 3055 while (ac->ac_criteria < EXT4_MB_NUM_CRS) { 3056 err = ext4_mb_scan_groups(ac); 3057 if (err) 3058 goto out; 3059 3060 if (ac->ac_status != AC_STATUS_CONTINUE) 3061 break; 3062 } 3063 3064 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 3065 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 3066 /* 3067 * We've been searching too long. Let's try to allocate 3068 * the best chunk we've found so far 3069 */ 3070 ext4_mb_try_best_found(ac, &e4b); 3071 if (ac->ac_status != AC_STATUS_FOUND) { 3072 int lost; 3073 3074 /* 3075 * Someone more lucky has already allocated it. 3076 * The only thing we can do is just take first 3077 * found block(s) 3078 */ 3079 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 3080 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 3081 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 3082 ac->ac_b_ex.fe_len, lost); 3083 3084 ac->ac_b_ex.fe_group = 0; 3085 ac->ac_b_ex.fe_start = 0; 3086 ac->ac_b_ex.fe_len = 0; 3087 ac->ac_status = AC_STATUS_CONTINUE; 3088 ac->ac_flags |= EXT4_MB_HINT_FIRST; 3089 ac->ac_criteria = CR_ANY_FREE; 3090 goto repeat; 3091 } 3092 } 3093 3094 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) { 3095 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 3096 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC && 3097 ac->ac_b_ex.fe_group == ac->ac_g_ex.fe_group) 3098 atomic_inc(&sbi->s_bal_stream_goals); 3099 } 3100 out: 3101 if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err) 3102 err = ac->ac_first_err; 3103 3104 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 3105 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 3106 ac->ac_flags, ac->ac_criteria, err); 3107 3108 if (ac->ac_prefetch_nr) 3109 ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr); 3110 3111 return err; 3112 } 3113 3114 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 3115 { 3116 struct super_block *sb = pde_data(file_inode(seq->file)); 3117 ext4_group_t group; 3118 3119 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3120 return NULL; 3121 group = *pos + 1; 3122 return (void *) ((unsigned long) group); 3123 } 3124 3125 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 3126 { 3127 struct super_block *sb = pde_data(file_inode(seq->file)); 3128 ext4_group_t group; 3129 3130 ++*pos; 3131 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3132 return NULL; 3133 group = *pos + 1; 3134 return (void *) ((unsigned long) group); 3135 } 3136 3137 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 3138 { 3139 struct super_block *sb = pde_data(file_inode(seq->file)); 3140 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 3141 int i, err; 3142 char nbuf[16]; 3143 struct ext4_buddy e4b; 3144 struct ext4_group_info *grinfo; 3145 unsigned char blocksize_bits = min_t(unsigned char, 3146 sb->s_blocksize_bits, 3147 EXT4_MAX_BLOCK_LOG_SIZE); 3148 DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters, 3149 EXT4_MAX_BLOCK_LOG_SIZE + 2); 3150 3151 group--; 3152 if (group == 0) 3153 seq_puts(seq, "#group: free frags first [" 3154 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 3155 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 3156 3157 i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) + 3158 sizeof(struct ext4_group_info); 3159 3160 grinfo = ext4_get_group_info(sb, group); 3161 if (!grinfo) 3162 return 0; 3163 /* Load the group info in memory only if not already loaded. */ 3164 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 3165 err = ext4_mb_load_buddy(sb, group, &e4b); 3166 if (err) { 3167 seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf)); 3168 return 0; 3169 } 3170 ext4_mb_unload_buddy(&e4b); 3171 } 3172 3173 /* 3174 * We care only about free space counters in the group info and 3175 * these are safe to access even after the buddy has been unloaded 3176 */ 3177 memcpy(sg, grinfo, i); 3178 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free, 3179 sg->bb_fragments, sg->bb_first_free); 3180 for (i = 0; i <= 13; i++) 3181 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 3182 sg->bb_counters[i] : 0); 3183 seq_puts(seq, " ]"); 3184 if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg)) 3185 seq_puts(seq, " Block bitmap corrupted!"); 3186 seq_putc(seq, '\n'); 3187 return 0; 3188 } 3189 3190 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 3191 { 3192 } 3193 3194 const struct seq_operations ext4_mb_seq_groups_ops = { 3195 .start = ext4_mb_seq_groups_start, 3196 .next = ext4_mb_seq_groups_next, 3197 .stop = ext4_mb_seq_groups_stop, 3198 .show = ext4_mb_seq_groups_show, 3199 }; 3200 3201 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 3202 { 3203 struct super_block *sb = seq->private; 3204 struct ext4_sb_info *sbi = EXT4_SB(sb); 3205 3206 seq_puts(seq, "mballoc:\n"); 3207 if (!sbi->s_mb_stats) { 3208 seq_puts(seq, "\tmb stats collection turned off.\n"); 3209 seq_puts( 3210 seq, 3211 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 3212 return 0; 3213 } 3214 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 3215 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 3216 3217 seq_printf(seq, "\tgroups_scanned: %u\n", 3218 atomic_read(&sbi->s_bal_groups_scanned)); 3219 3220 /* CR_POWER2_ALIGNED stats */ 3221 seq_puts(seq, "\tcr_p2_aligned_stats:\n"); 3222 seq_printf(seq, "\t\thits: %llu\n", 3223 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); 3224 seq_printf( 3225 seq, "\t\tgroups_considered: %llu\n", 3226 atomic64_read( 3227 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); 3228 seq_printf(seq, "\t\textents_scanned: %u\n", 3229 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3230 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3231 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3232 3233 /* CR_GOAL_LEN_FAST stats */ 3234 seq_puts(seq, "\tcr_goal_fast_stats:\n"); 3235 seq_printf(seq, "\t\thits: %llu\n", 3236 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); 3237 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3238 atomic64_read( 3239 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); 3240 seq_printf(seq, "\t\textents_scanned: %u\n", 3241 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3242 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3243 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3244 3245 /* CR_BEST_AVAIL_LEN stats */ 3246 seq_puts(seq, "\tcr_best_avail_stats:\n"); 3247 seq_printf(seq, "\t\thits: %llu\n", 3248 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); 3249 seq_printf( 3250 seq, "\t\tgroups_considered: %llu\n", 3251 atomic64_read( 3252 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); 3253 seq_printf(seq, "\t\textents_scanned: %u\n", 3254 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3255 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3256 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3257 3258 /* CR_GOAL_LEN_SLOW stats */ 3259 seq_puts(seq, "\tcr_goal_slow_stats:\n"); 3260 seq_printf(seq, "\t\thits: %llu\n", 3261 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); 3262 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3263 atomic64_read( 3264 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); 3265 seq_printf(seq, "\t\textents_scanned: %u\n", 3266 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); 3267 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3268 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); 3269 3270 /* CR_ANY_FREE stats */ 3271 seq_puts(seq, "\tcr_any_free_stats:\n"); 3272 seq_printf(seq, "\t\thits: %llu\n", 3273 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); 3274 seq_printf( 3275 seq, "\t\tgroups_considered: %llu\n", 3276 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); 3277 seq_printf(seq, "\t\textents_scanned: %u\n", 3278 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); 3279 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3280 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); 3281 3282 /* Aggregates */ 3283 seq_printf(seq, "\textents_scanned: %u\n", 3284 atomic_read(&sbi->s_bal_ex_scanned)); 3285 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 3286 seq_printf(seq, "\t\tstream_goal_hits: %u\n", 3287 atomic_read(&sbi->s_bal_stream_goals)); 3288 seq_printf(seq, "\t\tlen_goal_hits: %u\n", 3289 atomic_read(&sbi->s_bal_len_goals)); 3290 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 3291 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 3292 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 3293 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 3294 atomic_read(&sbi->s_mb_buddies_generated), 3295 ext4_get_groups_count(sb)); 3296 seq_printf(seq, "\tbuddies_time_used: %llu\n", 3297 atomic64_read(&sbi->s_mb_generation_time)); 3298 seq_printf(seq, "\tpreallocated: %u\n", 3299 atomic_read(&sbi->s_mb_preallocated)); 3300 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); 3301 return 0; 3302 } 3303 3304 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3305 { 3306 struct super_block *sb = pde_data(file_inode(seq->file)); 3307 unsigned long position; 3308 3309 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3310 return NULL; 3311 position = *pos + 1; 3312 return (void *) ((unsigned long) position); 3313 } 3314 3315 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3316 { 3317 struct super_block *sb = pde_data(file_inode(seq->file)); 3318 unsigned long position; 3319 3320 ++*pos; 3321 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3322 return NULL; 3323 position = *pos + 1; 3324 return (void *) ((unsigned long) position); 3325 } 3326 3327 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3328 { 3329 struct super_block *sb = pde_data(file_inode(seq->file)); 3330 struct ext4_sb_info *sbi = EXT4_SB(sb); 3331 unsigned long position = ((unsigned long) v); 3332 struct ext4_group_info *grp; 3333 unsigned int count; 3334 unsigned long idx; 3335 3336 position--; 3337 if (position >= MB_NUM_ORDERS(sb)) { 3338 position -= MB_NUM_ORDERS(sb); 3339 if (position == 0) 3340 seq_puts(seq, "avg_fragment_size_lists:\n"); 3341 3342 count = 0; 3343 xa_for_each(&sbi->s_mb_avg_fragment_size[position], idx, grp) 3344 count++; 3345 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3346 (unsigned int)position, count); 3347 return 0; 3348 } 3349 3350 if (position == 0) { 3351 seq_printf(seq, "optimize_scan: %d\n", 3352 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3353 seq_puts(seq, "max_free_order_lists:\n"); 3354 } 3355 count = 0; 3356 xa_for_each(&sbi->s_mb_largest_free_orders[position], idx, grp) 3357 count++; 3358 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3359 (unsigned int)position, count); 3360 3361 return 0; 3362 } 3363 3364 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3365 { 3366 } 3367 3368 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3369 .start = ext4_mb_seq_structs_summary_start, 3370 .next = ext4_mb_seq_structs_summary_next, 3371 .stop = ext4_mb_seq_structs_summary_stop, 3372 .show = ext4_mb_seq_structs_summary_show, 3373 }; 3374 3375 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3376 { 3377 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3378 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3379 3380 BUG_ON(!cachep); 3381 return cachep; 3382 } 3383 3384 /* 3385 * Allocate the top-level s_group_info array for the specified number 3386 * of groups 3387 */ 3388 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3389 { 3390 struct ext4_sb_info *sbi = EXT4_SB(sb); 3391 unsigned size; 3392 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3393 3394 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3395 EXT4_DESC_PER_BLOCK_BITS(sb); 3396 if (size <= sbi->s_group_info_size) 3397 return 0; 3398 3399 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3400 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3401 if (!new_groupinfo) { 3402 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3403 return -ENOMEM; 3404 } 3405 rcu_read_lock(); 3406 old_groupinfo = rcu_dereference(sbi->s_group_info); 3407 if (old_groupinfo) 3408 memcpy(new_groupinfo, old_groupinfo, 3409 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3410 rcu_read_unlock(); 3411 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3412 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3413 if (old_groupinfo) 3414 ext4_kvfree_array_rcu(old_groupinfo); 3415 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3416 sbi->s_group_info_size); 3417 return 0; 3418 } 3419 3420 /* Create and initialize ext4_group_info data for the given group. */ 3421 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3422 struct ext4_group_desc *desc) 3423 { 3424 int i; 3425 int metalen = 0; 3426 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3427 struct ext4_sb_info *sbi = EXT4_SB(sb); 3428 struct ext4_group_info **meta_group_info; 3429 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3430 3431 /* 3432 * First check if this group is the first of a reserved block. 3433 * If it's true, we have to allocate a new table of pointers 3434 * to ext4_group_info structures 3435 */ 3436 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3437 metalen = sizeof(*meta_group_info) << 3438 EXT4_DESC_PER_BLOCK_BITS(sb); 3439 meta_group_info = kmalloc(metalen, GFP_NOFS); 3440 if (meta_group_info == NULL) { 3441 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3442 "for a buddy group"); 3443 return -ENOMEM; 3444 } 3445 rcu_read_lock(); 3446 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3447 rcu_read_unlock(); 3448 } 3449 3450 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3451 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3452 3453 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3454 if (meta_group_info[i] == NULL) { 3455 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3456 goto exit_group_info; 3457 } 3458 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3459 &(meta_group_info[i]->bb_state)); 3460 3461 /* 3462 * initialize bb_free to be able to skip 3463 * empty groups without initialization 3464 */ 3465 if (ext4_has_group_desc_csum(sb) && 3466 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3467 meta_group_info[i]->bb_free = 3468 ext4_free_clusters_after_init(sb, group, desc); 3469 } else { 3470 meta_group_info[i]->bb_free = 3471 ext4_free_group_clusters(sb, desc); 3472 } 3473 3474 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3475 init_rwsem(&meta_group_info[i]->alloc_sem); 3476 meta_group_info[i]->bb_free_root = RB_ROOT; 3477 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3478 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3479 meta_group_info[i]->bb_group = group; 3480 3481 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3482 return 0; 3483 3484 exit_group_info: 3485 /* If a meta_group_info table has been allocated, release it now */ 3486 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3487 struct ext4_group_info ***group_info; 3488 3489 rcu_read_lock(); 3490 group_info = rcu_dereference(sbi->s_group_info); 3491 kfree(group_info[idx]); 3492 group_info[idx] = NULL; 3493 rcu_read_unlock(); 3494 } 3495 return -ENOMEM; 3496 } /* ext4_mb_add_groupinfo */ 3497 3498 static int ext4_mb_init_backend(struct super_block *sb) 3499 { 3500 ext4_group_t ngroups = ext4_get_groups_count(sb); 3501 ext4_group_t i; 3502 struct ext4_sb_info *sbi = EXT4_SB(sb); 3503 int err; 3504 struct ext4_group_desc *desc; 3505 struct ext4_group_info ***group_info; 3506 struct kmem_cache *cachep; 3507 3508 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3509 if (err) 3510 return err; 3511 3512 sbi->s_buddy_cache = new_inode(sb); 3513 if (sbi->s_buddy_cache == NULL) { 3514 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3515 goto err_freesgi; 3516 } 3517 /* To avoid potentially colliding with an valid on-disk inode number, 3518 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3519 * not in the inode hash, so it should never be found by iget(), but 3520 * this will avoid confusion if it ever shows up during debugging. */ 3521 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3522 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3523 ext4_set_inode_mapping_order(sbi->s_buddy_cache); 3524 3525 for (i = 0; i < ngroups; i++) { 3526 cond_resched(); 3527 desc = ext4_get_group_desc(sb, i, NULL); 3528 if (desc == NULL) { 3529 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3530 goto err_freebuddy; 3531 } 3532 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3533 goto err_freebuddy; 3534 } 3535 3536 if (ext4_has_feature_flex_bg(sb)) { 3537 /* a single flex group is supposed to be read by a single IO. 3538 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3539 * unsigned integer, so the maximum shift is 32. 3540 */ 3541 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3542 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3543 goto err_freebuddy; 3544 } 3545 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3546 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3547 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3548 } else { 3549 sbi->s_mb_prefetch = 32; 3550 } 3551 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3552 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3553 /* 3554 * now many real IOs to prefetch within a single allocation at 3555 * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related 3556 * optimization we shouldn't try to load too many groups, at some point 3557 * we should start to use what we've got in memory. 3558 * with an average random access time 5ms, it'd take a second to get 3559 * 200 groups (* N with flex_bg), so let's make this limit 4 3560 */ 3561 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3562 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3563 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3564 3565 return 0; 3566 3567 err_freebuddy: 3568 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3569 while (i-- > 0) { 3570 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3571 3572 if (grp) 3573 kmem_cache_free(cachep, grp); 3574 } 3575 i = sbi->s_group_info_size; 3576 rcu_read_lock(); 3577 group_info = rcu_dereference(sbi->s_group_info); 3578 while (i-- > 0) 3579 kfree(group_info[i]); 3580 rcu_read_unlock(); 3581 iput(sbi->s_buddy_cache); 3582 err_freesgi: 3583 rcu_read_lock(); 3584 kvfree(rcu_dereference(sbi->s_group_info)); 3585 rcu_read_unlock(); 3586 return -ENOMEM; 3587 } 3588 3589 static void ext4_groupinfo_destroy_slabs(void) 3590 { 3591 int i; 3592 3593 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3594 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3595 ext4_groupinfo_caches[i] = NULL; 3596 } 3597 } 3598 3599 static int ext4_groupinfo_create_slab(size_t size) 3600 { 3601 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3602 int slab_size; 3603 int blocksize_bits = order_base_2(size); 3604 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3605 struct kmem_cache *cachep; 3606 3607 if (cache_index >= NR_GRPINFO_CACHES) 3608 return -EINVAL; 3609 3610 if (unlikely(cache_index < 0)) 3611 cache_index = 0; 3612 3613 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3614 if (ext4_groupinfo_caches[cache_index]) { 3615 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3616 return 0; /* Already created */ 3617 } 3618 3619 slab_size = offsetof(struct ext4_group_info, 3620 bb_counters[blocksize_bits + 2]); 3621 3622 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3623 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3624 NULL); 3625 3626 ext4_groupinfo_caches[cache_index] = cachep; 3627 3628 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3629 if (!cachep) { 3630 printk(KERN_EMERG 3631 "EXT4-fs: no memory for groupinfo slab cache\n"); 3632 return -ENOMEM; 3633 } 3634 3635 return 0; 3636 } 3637 3638 static void ext4_discard_work(struct work_struct *work) 3639 { 3640 struct ext4_sb_info *sbi = container_of(work, 3641 struct ext4_sb_info, s_discard_work); 3642 struct super_block *sb = sbi->s_sb; 3643 struct ext4_free_data *fd, *nfd; 3644 struct ext4_buddy e4b; 3645 LIST_HEAD(discard_list); 3646 ext4_group_t grp, load_grp; 3647 int err = 0; 3648 3649 spin_lock(&sbi->s_md_lock); 3650 list_splice_init(&sbi->s_discard_list, &discard_list); 3651 spin_unlock(&sbi->s_md_lock); 3652 3653 load_grp = UINT_MAX; 3654 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3655 /* 3656 * If filesystem is umounting or no memory or suffering 3657 * from no space, give up the discard 3658 */ 3659 if ((sb->s_flags & SB_ACTIVE) && !err && 3660 !atomic_read(&sbi->s_retry_alloc_pending)) { 3661 grp = fd->efd_group; 3662 if (grp != load_grp) { 3663 if (load_grp != UINT_MAX) 3664 ext4_mb_unload_buddy(&e4b); 3665 3666 err = ext4_mb_load_buddy(sb, grp, &e4b); 3667 if (err) { 3668 kmem_cache_free(ext4_free_data_cachep, fd); 3669 load_grp = UINT_MAX; 3670 continue; 3671 } else { 3672 load_grp = grp; 3673 } 3674 } 3675 3676 ext4_lock_group(sb, grp); 3677 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3678 fd->efd_start_cluster + fd->efd_count - 1, 1); 3679 ext4_unlock_group(sb, grp); 3680 } 3681 kmem_cache_free(ext4_free_data_cachep, fd); 3682 } 3683 3684 if (load_grp != UINT_MAX) 3685 ext4_mb_unload_buddy(&e4b); 3686 } 3687 3688 static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi) 3689 { 3690 if (!sbi->s_mb_avg_fragment_size) 3691 return; 3692 3693 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3694 xa_destroy(&sbi->s_mb_avg_fragment_size[i]); 3695 3696 kfree(sbi->s_mb_avg_fragment_size); 3697 sbi->s_mb_avg_fragment_size = NULL; 3698 } 3699 3700 static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi) 3701 { 3702 if (!sbi->s_mb_largest_free_orders) 3703 return; 3704 3705 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3706 xa_destroy(&sbi->s_mb_largest_free_orders[i]); 3707 3708 kfree(sbi->s_mb_largest_free_orders); 3709 sbi->s_mb_largest_free_orders = NULL; 3710 } 3711 3712 int ext4_mb_init(struct super_block *sb) 3713 { 3714 struct ext4_sb_info *sbi = EXT4_SB(sb); 3715 unsigned i, j; 3716 unsigned offset, offset_incr; 3717 unsigned max; 3718 int ret; 3719 3720 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3721 3722 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3723 if (sbi->s_mb_offsets == NULL) { 3724 ret = -ENOMEM; 3725 goto out; 3726 } 3727 3728 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3729 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3730 if (sbi->s_mb_maxs == NULL) { 3731 ret = -ENOMEM; 3732 goto out; 3733 } 3734 3735 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3736 if (ret < 0) 3737 goto out; 3738 3739 /* order 0 is regular bitmap */ 3740 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3741 sbi->s_mb_offsets[0] = 0; 3742 3743 i = 1; 3744 offset = 0; 3745 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3746 max = sb->s_blocksize << 2; 3747 do { 3748 sbi->s_mb_offsets[i] = offset; 3749 sbi->s_mb_maxs[i] = max; 3750 offset += offset_incr; 3751 offset_incr = offset_incr >> 1; 3752 max = max >> 1; 3753 i++; 3754 } while (i < MB_NUM_ORDERS(sb)); 3755 3756 sbi->s_mb_avg_fragment_size = 3757 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray), 3758 GFP_KERNEL); 3759 if (!sbi->s_mb_avg_fragment_size) { 3760 ret = -ENOMEM; 3761 goto out; 3762 } 3763 for (i = 0; i < MB_NUM_ORDERS(sb); i++) 3764 xa_init(&sbi->s_mb_avg_fragment_size[i]); 3765 3766 sbi->s_mb_largest_free_orders = 3767 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray), 3768 GFP_KERNEL); 3769 if (!sbi->s_mb_largest_free_orders) { 3770 ret = -ENOMEM; 3771 goto out; 3772 } 3773 for (i = 0; i < MB_NUM_ORDERS(sb); i++) 3774 xa_init(&sbi->s_mb_largest_free_orders[i]); 3775 3776 spin_lock_init(&sbi->s_md_lock); 3777 atomic_set(&sbi->s_mb_free_pending, 0); 3778 INIT_LIST_HEAD(&sbi->s_freed_data_list[0]); 3779 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); 3780 INIT_LIST_HEAD(&sbi->s_discard_list); 3781 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3782 atomic_set(&sbi->s_retry_alloc_pending, 0); 3783 3784 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3785 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3786 sbi->s_mb_stats = MB_DEFAULT_STATS; 3787 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3788 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3789 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; 3790 3791 /* 3792 * The default group preallocation is 512, which for 4k block 3793 * sizes translates to 2 megabytes. However for bigalloc file 3794 * systems, this is probably too big (i.e, if the cluster size 3795 * is 1 megabyte, then group preallocation size becomes half a 3796 * gigabyte!). As a default, we will keep a two megabyte 3797 * group pralloc size for cluster sizes up to 64k, and after 3798 * that, we will force a minimum group preallocation size of 3799 * 32 clusters. This translates to 8 megs when the cluster 3800 * size is 256k, and 32 megs when the cluster size is 1 meg, 3801 * which seems reasonable as a default. 3802 */ 3803 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3804 sbi->s_cluster_bits, 32); 3805 /* 3806 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3807 * to the lowest multiple of s_stripe which is bigger than 3808 * the s_mb_group_prealloc as determined above. We want 3809 * the preallocation size to be an exact multiple of the 3810 * RAID stripe size so that preallocations don't fragment 3811 * the stripes. 3812 */ 3813 if (sbi->s_stripe > 1) { 3814 sbi->s_mb_group_prealloc = roundup( 3815 sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe)); 3816 } 3817 3818 sbi->s_mb_nr_global_goals = umin(num_possible_cpus(), 3819 DIV_ROUND_UP(sbi->s_groups_count, 4)); 3820 sbi->s_mb_last_groups = kcalloc(sbi->s_mb_nr_global_goals, 3821 sizeof(ext4_group_t), GFP_KERNEL); 3822 if (sbi->s_mb_last_groups == NULL) { 3823 ret = -ENOMEM; 3824 goto out; 3825 } 3826 3827 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3828 if (sbi->s_locality_groups == NULL) { 3829 ret = -ENOMEM; 3830 goto out_free_last_groups; 3831 } 3832 for_each_possible_cpu(i) { 3833 struct ext4_locality_group *lg; 3834 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3835 mutex_init(&lg->lg_mutex); 3836 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3837 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3838 spin_lock_init(&lg->lg_prealloc_lock); 3839 } 3840 3841 if (bdev_nonrot(sb->s_bdev)) 3842 sbi->s_mb_max_linear_groups = 0; 3843 else 3844 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3845 /* init file for buddy data */ 3846 ret = ext4_mb_init_backend(sb); 3847 if (ret != 0) 3848 goto out_free_locality_groups; 3849 3850 return 0; 3851 3852 out_free_locality_groups: 3853 free_percpu(sbi->s_locality_groups); 3854 sbi->s_locality_groups = NULL; 3855 out_free_last_groups: 3856 kfree(sbi->s_mb_last_groups); 3857 sbi->s_mb_last_groups = NULL; 3858 out: 3859 ext4_mb_avg_fragment_size_destroy(sbi); 3860 ext4_mb_largest_free_orders_destroy(sbi); 3861 kfree(sbi->s_mb_offsets); 3862 sbi->s_mb_offsets = NULL; 3863 kfree(sbi->s_mb_maxs); 3864 sbi->s_mb_maxs = NULL; 3865 return ret; 3866 } 3867 3868 /* need to called with the ext4 group lock held */ 3869 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3870 { 3871 struct ext4_prealloc_space *pa; 3872 struct list_head *cur, *tmp; 3873 int count = 0; 3874 3875 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3876 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3877 list_del(&pa->pa_group_list); 3878 count++; 3879 kmem_cache_free(ext4_pspace_cachep, pa); 3880 } 3881 return count; 3882 } 3883 3884 void ext4_mb_release(struct super_block *sb) 3885 { 3886 ext4_group_t ngroups = ext4_get_groups_count(sb); 3887 ext4_group_t i; 3888 int num_meta_group_infos; 3889 struct ext4_group_info *grinfo, ***group_info; 3890 struct ext4_sb_info *sbi = EXT4_SB(sb); 3891 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3892 int count; 3893 3894 if (test_opt(sb, DISCARD)) { 3895 /* 3896 * wait the discard work to drain all of ext4_free_data 3897 */ 3898 flush_work(&sbi->s_discard_work); 3899 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3900 } 3901 3902 if (sbi->s_group_info) { 3903 for (i = 0; i < ngroups; i++) { 3904 cond_resched(); 3905 grinfo = ext4_get_group_info(sb, i); 3906 if (!grinfo) 3907 continue; 3908 mb_group_bb_bitmap_free(grinfo); 3909 ext4_lock_group(sb, i); 3910 count = ext4_mb_cleanup_pa(grinfo); 3911 if (count) 3912 mb_debug(sb, "mballoc: %d PAs left\n", 3913 count); 3914 ext4_unlock_group(sb, i); 3915 kmem_cache_free(cachep, grinfo); 3916 } 3917 num_meta_group_infos = (ngroups + 3918 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3919 EXT4_DESC_PER_BLOCK_BITS(sb); 3920 rcu_read_lock(); 3921 group_info = rcu_dereference(sbi->s_group_info); 3922 for (i = 0; i < num_meta_group_infos; i++) 3923 kfree(group_info[i]); 3924 kvfree(group_info); 3925 rcu_read_unlock(); 3926 } 3927 ext4_mb_avg_fragment_size_destroy(sbi); 3928 ext4_mb_largest_free_orders_destroy(sbi); 3929 kfree(sbi->s_mb_offsets); 3930 kfree(sbi->s_mb_maxs); 3931 iput(sbi->s_buddy_cache); 3932 if (sbi->s_mb_stats) { 3933 ext4_msg(sb, KERN_INFO, 3934 "mballoc: %u blocks %u reqs (%u success)", 3935 atomic_read(&sbi->s_bal_allocated), 3936 atomic_read(&sbi->s_bal_reqs), 3937 atomic_read(&sbi->s_bal_success)); 3938 ext4_msg(sb, KERN_INFO, 3939 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3940 "%u 2^N hits, %u breaks, %u lost", 3941 atomic_read(&sbi->s_bal_ex_scanned), 3942 atomic_read(&sbi->s_bal_groups_scanned), 3943 atomic_read(&sbi->s_bal_goals), 3944 atomic_read(&sbi->s_bal_2orders), 3945 atomic_read(&sbi->s_bal_breaks), 3946 atomic_read(&sbi->s_mb_lost_chunks)); 3947 ext4_msg(sb, KERN_INFO, 3948 "mballoc: %u generated and it took %llu", 3949 atomic_read(&sbi->s_mb_buddies_generated), 3950 atomic64_read(&sbi->s_mb_generation_time)); 3951 ext4_msg(sb, KERN_INFO, 3952 "mballoc: %u preallocated, %u discarded", 3953 atomic_read(&sbi->s_mb_preallocated), 3954 atomic_read(&sbi->s_mb_discarded)); 3955 } 3956 3957 free_percpu(sbi->s_locality_groups); 3958 kfree(sbi->s_mb_last_groups); 3959 } 3960 3961 static inline int ext4_issue_discard(struct super_block *sb, 3962 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 3963 { 3964 ext4_fsblk_t discard_block; 3965 3966 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3967 ext4_group_first_block_no(sb, block_group)); 3968 count = EXT4_C2B(EXT4_SB(sb), count); 3969 trace_ext4_discard_blocks(sb, 3970 (unsigned long long) discard_block, count); 3971 3972 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3973 } 3974 3975 static void ext4_free_data_in_buddy(struct super_block *sb, 3976 struct ext4_free_data *entry) 3977 { 3978 struct ext4_buddy e4b; 3979 struct ext4_group_info *db; 3980 int err, count = 0; 3981 3982 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3983 entry->efd_count, entry->efd_group, entry); 3984 3985 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3986 /* we expect to find existing buddy because it's pinned */ 3987 BUG_ON(err != 0); 3988 3989 atomic_sub(entry->efd_count, &EXT4_SB(sb)->s_mb_free_pending); 3990 db = e4b.bd_info; 3991 /* there are blocks to put in buddy to make them really free */ 3992 count += entry->efd_count; 3993 ext4_lock_group(sb, entry->efd_group); 3994 /* Take it out of per group rb tree */ 3995 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3996 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3997 3998 /* 3999 * Clear the trimmed flag for the group so that the next 4000 * ext4_trim_fs can trim it. 4001 */ 4002 EXT4_MB_GRP_CLEAR_TRIMMED(db); 4003 4004 if (!db->bb_free_root.rb_node) { 4005 /* No more items in the per group rb tree 4006 * balance refcounts from ext4_mb_free_metadata() 4007 */ 4008 folio_put(e4b.bd_buddy_folio); 4009 folio_put(e4b.bd_bitmap_folio); 4010 } 4011 ext4_unlock_group(sb, entry->efd_group); 4012 ext4_mb_unload_buddy(&e4b); 4013 4014 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 4015 } 4016 4017 /* 4018 * This function is called by the jbd2 layer once the commit has finished, 4019 * so we know we can free the blocks that were released with that commit. 4020 */ 4021 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 4022 { 4023 struct ext4_sb_info *sbi = EXT4_SB(sb); 4024 struct ext4_free_data *entry, *tmp; 4025 LIST_HEAD(freed_data_list); 4026 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; 4027 bool wake; 4028 4029 list_replace_init(s_freed_head, &freed_data_list); 4030 4031 list_for_each_entry(entry, &freed_data_list, efd_list) 4032 ext4_free_data_in_buddy(sb, entry); 4033 4034 if (test_opt(sb, DISCARD)) { 4035 spin_lock(&sbi->s_md_lock); 4036 wake = list_empty(&sbi->s_discard_list); 4037 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 4038 spin_unlock(&sbi->s_md_lock); 4039 if (wake) 4040 queue_work(system_dfl_wq, &sbi->s_discard_work); 4041 } else { 4042 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 4043 kmem_cache_free(ext4_free_data_cachep, entry); 4044 } 4045 } 4046 4047 int __init ext4_init_mballoc(void) 4048 { 4049 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 4050 SLAB_RECLAIM_ACCOUNT); 4051 if (ext4_pspace_cachep == NULL) 4052 goto out; 4053 4054 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 4055 SLAB_RECLAIM_ACCOUNT); 4056 if (ext4_ac_cachep == NULL) 4057 goto out_pa_free; 4058 4059 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 4060 SLAB_RECLAIM_ACCOUNT); 4061 if (ext4_free_data_cachep == NULL) 4062 goto out_ac_free; 4063 4064 return 0; 4065 4066 out_ac_free: 4067 kmem_cache_destroy(ext4_ac_cachep); 4068 out_pa_free: 4069 kmem_cache_destroy(ext4_pspace_cachep); 4070 out: 4071 return -ENOMEM; 4072 } 4073 4074 void ext4_exit_mballoc(void) 4075 { 4076 /* 4077 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 4078 * before destroying the slab cache. 4079 */ 4080 rcu_barrier(); 4081 kmem_cache_destroy(ext4_pspace_cachep); 4082 kmem_cache_destroy(ext4_ac_cachep); 4083 kmem_cache_destroy(ext4_free_data_cachep); 4084 ext4_groupinfo_destroy_slabs(); 4085 } 4086 4087 #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001 4088 #define EXT4_MB_SYNC_UPDATE 0x0002 4089 static int 4090 ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state, 4091 ext4_group_t group, ext4_grpblk_t blkoff, 4092 ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed) 4093 { 4094 struct ext4_sb_info *sbi = EXT4_SB(sb); 4095 struct buffer_head *bitmap_bh = NULL; 4096 struct ext4_group_desc *gdp; 4097 struct buffer_head *gdp_bh; 4098 int err; 4099 unsigned int i, already, changed = len; 4100 4101 KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context, 4102 handle, sb, state, group, blkoff, len, 4103 flags, ret_changed); 4104 4105 if (ret_changed) 4106 *ret_changed = 0; 4107 bitmap_bh = ext4_read_block_bitmap(sb, group); 4108 if (IS_ERR(bitmap_bh)) 4109 return PTR_ERR(bitmap_bh); 4110 4111 if (handle) { 4112 BUFFER_TRACE(bitmap_bh, "getting write access"); 4113 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 4114 EXT4_JTR_NONE); 4115 if (err) 4116 goto out_err; 4117 } 4118 4119 err = -EIO; 4120 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 4121 if (!gdp) 4122 goto out_err; 4123 4124 if (handle) { 4125 BUFFER_TRACE(gdp_bh, "get_write_access"); 4126 err = ext4_journal_get_write_access(handle, sb, gdp_bh, 4127 EXT4_JTR_NONE); 4128 if (err) 4129 goto out_err; 4130 } 4131 4132 ext4_lock_group(sb, group); 4133 if (ext4_has_group_desc_csum(sb) && 4134 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4135 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4136 ext4_free_group_clusters_set(sb, gdp, 4137 ext4_free_clusters_after_init(sb, group, gdp)); 4138 } 4139 4140 if (flags & EXT4_MB_BITMAP_MARKED_CHECK) { 4141 already = 0; 4142 for (i = 0; i < len; i++) 4143 if (mb_test_bit(blkoff + i, bitmap_bh->b_data) == 4144 state) 4145 already++; 4146 changed = len - already; 4147 } 4148 4149 if (state) { 4150 mb_set_bits(bitmap_bh->b_data, blkoff, len); 4151 ext4_free_group_clusters_set(sb, gdp, 4152 ext4_free_group_clusters(sb, gdp) - changed); 4153 } else { 4154 mb_clear_bits(bitmap_bh->b_data, blkoff, len); 4155 ext4_free_group_clusters_set(sb, gdp, 4156 ext4_free_group_clusters(sb, gdp) + changed); 4157 } 4158 4159 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4160 ext4_group_desc_csum_set(sb, group, gdp); 4161 ext4_unlock_group(sb, group); 4162 if (ret_changed) 4163 *ret_changed = changed; 4164 4165 if (sbi->s_log_groups_per_flex) { 4166 ext4_group_t flex_group = ext4_flex_group(sbi, group); 4167 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4168 s_flex_groups, flex_group); 4169 4170 if (state) 4171 atomic64_sub(changed, &fg->free_clusters); 4172 else 4173 atomic64_add(changed, &fg->free_clusters); 4174 } 4175 4176 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4177 if (err) 4178 goto out_err; 4179 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 4180 if (err) 4181 goto out_err; 4182 4183 if (flags & EXT4_MB_SYNC_UPDATE) { 4184 sync_dirty_buffer(bitmap_bh); 4185 sync_dirty_buffer(gdp_bh); 4186 } 4187 4188 out_err: 4189 brelse(bitmap_bh); 4190 return err; 4191 } 4192 4193 /* 4194 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 4195 * Returns 0 if success or error code 4196 */ 4197 static noinline_for_stack int 4198 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle) 4199 { 4200 struct ext4_group_desc *gdp; 4201 struct ext4_sb_info *sbi; 4202 struct super_block *sb; 4203 ext4_fsblk_t block; 4204 int err, len; 4205 int flags = 0; 4206 ext4_grpblk_t changed; 4207 4208 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4209 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4210 4211 sb = ac->ac_sb; 4212 sbi = EXT4_SB(sb); 4213 4214 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); 4215 if (!gdp) 4216 return -EIO; 4217 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 4218 ext4_free_group_clusters(sb, gdp)); 4219 4220 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4221 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4222 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 4223 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 4224 "fs metadata", block, block+len); 4225 /* File system mounted not to panic on error 4226 * Fix the bitmap and return EFSCORRUPTED 4227 * We leak some of the blocks here. 4228 */ 4229 err = ext4_mb_mark_context(handle, sb, true, 4230 ac->ac_b_ex.fe_group, 4231 ac->ac_b_ex.fe_start, 4232 ac->ac_b_ex.fe_len, 4233 0, NULL); 4234 if (!err) 4235 err = -EFSCORRUPTED; 4236 return err; 4237 } 4238 4239 #ifdef AGGRESSIVE_CHECK 4240 flags |= EXT4_MB_BITMAP_MARKED_CHECK; 4241 #endif 4242 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, 4243 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, 4244 flags, &changed); 4245 4246 if (err && changed == 0) 4247 return err; 4248 4249 #ifdef AGGRESSIVE_CHECK 4250 BUG_ON(changed != ac->ac_b_ex.fe_len); 4251 #endif 4252 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 4253 4254 return err; 4255 } 4256 4257 /* 4258 * Idempotent helper for Ext4 fast commit replay path to set the state of 4259 * blocks in bitmaps and update counters. 4260 */ 4261 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 4262 int len, bool state) 4263 { 4264 struct ext4_sb_info *sbi = EXT4_SB(sb); 4265 ext4_group_t group; 4266 ext4_grpblk_t blkoff; 4267 int err = 0; 4268 unsigned int clen, thisgrp_len; 4269 4270 while (len > 0) { 4271 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 4272 4273 /* 4274 * Check to see if we are freeing blocks across a group 4275 * boundary. 4276 * In case of flex_bg, this can happen that (block, len) may 4277 * span across more than one group. In that case we need to 4278 * get the corresponding group metadata to work with. 4279 * For this we have goto again loop. 4280 */ 4281 thisgrp_len = min(len, EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 4282 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 4283 4284 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 4285 ext4_error(sb, "Marking blocks in system zone - " 4286 "Block = %llu, len = %u", 4287 block, thisgrp_len); 4288 break; 4289 } 4290 4291 err = ext4_mb_mark_context(NULL, sb, state, 4292 group, blkoff, clen, 4293 EXT4_MB_BITMAP_MARKED_CHECK | 4294 EXT4_MB_SYNC_UPDATE, 4295 NULL); 4296 if (err) 4297 break; 4298 4299 block += thisgrp_len; 4300 len -= thisgrp_len; 4301 BUG_ON(len < 0); 4302 } 4303 } 4304 4305 /* 4306 * here we normalize request for locality group 4307 * Group request are normalized to s_mb_group_prealloc, which goes to 4308 * s_strip if we set the same via mount option. 4309 * s_mb_group_prealloc can be configured via 4310 * /sys/fs/ext4/<partition>/mb_group_prealloc 4311 * 4312 * XXX: should we try to preallocate more than the group has now? 4313 */ 4314 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4315 { 4316 struct super_block *sb = ac->ac_sb; 4317 struct ext4_locality_group *lg = ac->ac_lg; 4318 4319 BUG_ON(lg == NULL); 4320 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4321 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4322 } 4323 4324 /* 4325 * This function returns the next element to look at during inode 4326 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4327 * (ei->i_prealloc_lock) 4328 * 4329 * new_start The start of the range we want to compare 4330 * cur_start The existing start that we are comparing against 4331 * node The node of the rb_tree 4332 */ 4333 static inline struct rb_node* 4334 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4335 { 4336 if (new_start < cur_start) 4337 return node->rb_left; 4338 else 4339 return node->rb_right; 4340 } 4341 4342 static inline void 4343 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4344 ext4_lblk_t start, loff_t end) 4345 { 4346 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4347 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4348 struct ext4_prealloc_space *tmp_pa; 4349 ext4_lblk_t tmp_pa_start; 4350 loff_t tmp_pa_end; 4351 struct rb_node *iter; 4352 4353 read_lock(&ei->i_prealloc_lock); 4354 for (iter = ei->i_prealloc_node.rb_node; iter; 4355 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4356 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4357 pa_node.inode_node); 4358 tmp_pa_start = tmp_pa->pa_lstart; 4359 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4360 4361 spin_lock(&tmp_pa->pa_lock); 4362 if (tmp_pa->pa_deleted == 0) 4363 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4364 spin_unlock(&tmp_pa->pa_lock); 4365 } 4366 read_unlock(&ei->i_prealloc_lock); 4367 } 4368 4369 /* 4370 * Given an allocation context "ac" and a range "start", "end", check 4371 * and adjust boundaries if the range overlaps with any of the existing 4372 * preallocatoins stored in the corresponding inode of the allocation context. 4373 * 4374 * Parameters: 4375 * ac allocation context 4376 * start start of the new range 4377 * end end of the new range 4378 */ 4379 static inline void 4380 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4381 ext4_lblk_t *start, loff_t *end) 4382 { 4383 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4384 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4385 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4386 struct rb_node *iter; 4387 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; 4388 loff_t new_end, tmp_pa_end, left_pa_end = -1; 4389 4390 new_start = *start; 4391 new_end = *end; 4392 4393 /* 4394 * Adjust the normalized range so that it doesn't overlap with any 4395 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4396 * so it doesn't change underneath us. 4397 */ 4398 read_lock(&ei->i_prealloc_lock); 4399 4400 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4401 for (iter = ei->i_prealloc_node.rb_node; iter; 4402 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4403 tmp_pa_start, iter)) { 4404 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4405 pa_node.inode_node); 4406 tmp_pa_start = tmp_pa->pa_lstart; 4407 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4408 4409 /* PA must not overlap original request */ 4410 spin_lock(&tmp_pa->pa_lock); 4411 if (tmp_pa->pa_deleted == 0) 4412 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4413 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4414 spin_unlock(&tmp_pa->pa_lock); 4415 } 4416 4417 /* 4418 * Step 2: check if the found PA is left or right neighbor and 4419 * get the other neighbor 4420 */ 4421 if (tmp_pa) { 4422 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4423 struct rb_node *tmp; 4424 4425 left_pa = tmp_pa; 4426 tmp = rb_next(&left_pa->pa_node.inode_node); 4427 if (tmp) { 4428 right_pa = rb_entry(tmp, 4429 struct ext4_prealloc_space, 4430 pa_node.inode_node); 4431 } 4432 } else { 4433 struct rb_node *tmp; 4434 4435 right_pa = tmp_pa; 4436 tmp = rb_prev(&right_pa->pa_node.inode_node); 4437 if (tmp) { 4438 left_pa = rb_entry(tmp, 4439 struct ext4_prealloc_space, 4440 pa_node.inode_node); 4441 } 4442 } 4443 } 4444 4445 /* Step 3: get the non deleted neighbors */ 4446 if (left_pa) { 4447 for (iter = &left_pa->pa_node.inode_node;; 4448 iter = rb_prev(iter)) { 4449 if (!iter) { 4450 left_pa = NULL; 4451 break; 4452 } 4453 4454 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4455 pa_node.inode_node); 4456 left_pa = tmp_pa; 4457 spin_lock(&tmp_pa->pa_lock); 4458 if (tmp_pa->pa_deleted == 0) { 4459 spin_unlock(&tmp_pa->pa_lock); 4460 break; 4461 } 4462 spin_unlock(&tmp_pa->pa_lock); 4463 } 4464 } 4465 4466 if (right_pa) { 4467 for (iter = &right_pa->pa_node.inode_node;; 4468 iter = rb_next(iter)) { 4469 if (!iter) { 4470 right_pa = NULL; 4471 break; 4472 } 4473 4474 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4475 pa_node.inode_node); 4476 right_pa = tmp_pa; 4477 spin_lock(&tmp_pa->pa_lock); 4478 if (tmp_pa->pa_deleted == 0) { 4479 spin_unlock(&tmp_pa->pa_lock); 4480 break; 4481 } 4482 spin_unlock(&tmp_pa->pa_lock); 4483 } 4484 } 4485 4486 if (left_pa) { 4487 left_pa_end = pa_logical_end(sbi, left_pa); 4488 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4489 } 4490 4491 if (right_pa) { 4492 right_pa_start = right_pa->pa_lstart; 4493 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4494 } 4495 4496 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4497 if (left_pa) { 4498 if (left_pa_end > new_start) 4499 new_start = left_pa_end; 4500 } 4501 4502 if (right_pa) { 4503 if (right_pa_start < new_end) 4504 new_end = right_pa_start; 4505 } 4506 read_unlock(&ei->i_prealloc_lock); 4507 4508 /* XXX: extra loop to check we really don't overlap preallocations */ 4509 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4510 4511 *start = new_start; 4512 *end = new_end; 4513 } 4514 4515 /* 4516 * Normalization means making request better in terms of 4517 * size and alignment 4518 */ 4519 static noinline_for_stack void 4520 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4521 struct ext4_allocation_request *ar) 4522 { 4523 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4524 struct ext4_super_block *es = sbi->s_es; 4525 int bsbits, max; 4526 loff_t size, start_off, end; 4527 loff_t orig_size __maybe_unused; 4528 ext4_lblk_t start; 4529 4530 /* do normalize only data requests, metadata requests 4531 do not need preallocation */ 4532 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4533 return; 4534 4535 /* sometime caller may want exact blocks */ 4536 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4537 return; 4538 4539 /* caller may indicate that preallocation isn't 4540 * required (it's a tail, for example) */ 4541 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4542 return; 4543 4544 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4545 ext4_mb_normalize_group_request(ac); 4546 return ; 4547 } 4548 4549 bsbits = ac->ac_sb->s_blocksize_bits; 4550 4551 /* first, let's learn actual file size 4552 * given current request is allocated */ 4553 size = extent_logical_end(sbi, &ac->ac_o_ex); 4554 size = size << bsbits; 4555 if (size < i_size_read(ac->ac_inode)) 4556 size = i_size_read(ac->ac_inode); 4557 orig_size = size; 4558 4559 /* max size of free chunks */ 4560 max = 2 << bsbits; 4561 4562 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4563 (req <= (size) || max <= (chunk_size)) 4564 4565 /* first, try to predict filesize */ 4566 /* XXX: should this table be tunable? */ 4567 start_off = 0; 4568 if (size <= 16 * 1024) { 4569 size = 16 * 1024; 4570 } else if (size <= 32 * 1024) { 4571 size = 32 * 1024; 4572 } else if (size <= 64 * 1024) { 4573 size = 64 * 1024; 4574 } else if (size <= 128 * 1024) { 4575 size = 128 * 1024; 4576 } else if (size <= 256 * 1024) { 4577 size = 256 * 1024; 4578 } else if (size <= 512 * 1024) { 4579 size = 512 * 1024; 4580 } else if (size <= 1024 * 1024) { 4581 size = 1024 * 1024; 4582 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4583 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4584 (21 - bsbits)) << 21; 4585 size = 2 * 1024 * 1024; 4586 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4587 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4588 (22 - bsbits)) << 22; 4589 size = 4 * 1024 * 1024; 4590 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 4591 (8<<20)>>bsbits, max, 8 * 1024)) { 4592 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4593 (23 - bsbits)) << 23; 4594 size = 8 * 1024 * 1024; 4595 } else { 4596 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4597 size = (loff_t) EXT4_C2B(sbi, 4598 ac->ac_o_ex.fe_len) << bsbits; 4599 } 4600 size = size >> bsbits; 4601 start = start_off >> bsbits; 4602 4603 /* 4604 * For tiny groups (smaller than 8MB) the chosen allocation 4605 * alignment may be larger than group size. Make sure the 4606 * alignment does not move allocation to a different group which 4607 * makes mballoc fail assertions later. 4608 */ 4609 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4610 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4611 4612 /* avoid unnecessary preallocation that may trigger assertions */ 4613 if (start + size > EXT_MAX_BLOCKS) 4614 size = EXT_MAX_BLOCKS - start; 4615 4616 /* don't cover already allocated blocks in selected range */ 4617 if (ar->pleft && start <= ar->lleft) { 4618 size -= ar->lleft + 1 - start; 4619 start = ar->lleft + 1; 4620 } 4621 if (ar->pright && start + size - 1 >= ar->lright) 4622 size -= start + size - ar->lright; 4623 4624 /* 4625 * Trim allocation request for filesystems with artificially small 4626 * groups. 4627 */ 4628 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4629 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4630 4631 end = start + size; 4632 4633 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4634 4635 size = end - start; 4636 4637 /* 4638 * In this function "start" and "size" are normalized for better 4639 * alignment and length such that we could preallocate more blocks. 4640 * This normalization is done such that original request of 4641 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4642 * "size" boundaries. 4643 * (Note fe_len can be relaxed since FS block allocation API does not 4644 * provide gurantee on number of contiguous blocks allocation since that 4645 * depends upon free space left, etc). 4646 * In case of inode pa, later we use the allocated blocks 4647 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4648 * range of goal/best blocks [start, size] to put it at the 4649 * ac_o_ex.fe_logical extent of this inode. 4650 * (See ext4_mb_use_inode_pa() for more details) 4651 */ 4652 if (start + size <= ac->ac_o_ex.fe_logical || 4653 start > ac->ac_o_ex.fe_logical) { 4654 ext4_msg(ac->ac_sb, KERN_ERR, 4655 "start %lu, size %lu, fe_logical %lu", 4656 (unsigned long) start, (unsigned long) size, 4657 (unsigned long) ac->ac_o_ex.fe_logical); 4658 BUG(); 4659 } 4660 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4661 4662 /* now prepare goal request */ 4663 4664 /* XXX: is it better to align blocks WRT to logical 4665 * placement or satisfy big request as is */ 4666 ac->ac_g_ex.fe_logical = start; 4667 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4668 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 4669 4670 /* define goal start in order to merge */ 4671 if (ar->pright && (ar->lright == (start + size)) && 4672 ar->pright >= size && 4673 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4674 /* merge to the right */ 4675 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4676 &ac->ac_g_ex.fe_group, 4677 &ac->ac_g_ex.fe_start); 4678 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4679 } 4680 if (ar->pleft && (ar->lleft + 1 == start) && 4681 ar->pleft + 1 < ext4_blocks_count(es)) { 4682 /* merge to the left */ 4683 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4684 &ac->ac_g_ex.fe_group, 4685 &ac->ac_g_ex.fe_start); 4686 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4687 } 4688 4689 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4690 orig_size, start); 4691 } 4692 4693 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4694 { 4695 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4696 4697 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4698 atomic_inc(&sbi->s_bal_reqs); 4699 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4700 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4701 atomic_inc(&sbi->s_bal_success); 4702 4703 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4704 for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4705 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4706 } 4707 4708 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4709 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4710 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4711 atomic_inc(&sbi->s_bal_goals); 4712 /* did we allocate as much as normalizer originally wanted? */ 4713 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) 4714 atomic_inc(&sbi->s_bal_len_goals); 4715 4716 if (ac->ac_found > sbi->s_mb_max_to_scan) 4717 atomic_inc(&sbi->s_bal_breaks); 4718 } 4719 4720 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4721 trace_ext4_mballoc_alloc(ac); 4722 else 4723 trace_ext4_mballoc_prealloc(ac); 4724 } 4725 4726 /* 4727 * Called on failure; free up any blocks from the inode PA for this 4728 * context. We don't need this for MB_GROUP_PA because we only change 4729 * pa_free in ext4_mb_release_context(), but on failure, we've already 4730 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4731 */ 4732 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4733 { 4734 struct ext4_prealloc_space *pa = ac->ac_pa; 4735 struct ext4_buddy e4b; 4736 int err; 4737 4738 if (pa == NULL) { 4739 if (ac->ac_f_ex.fe_len == 0) 4740 return; 4741 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4742 if (WARN_RATELIMIT(err, 4743 "ext4: mb_load_buddy failed (%d)", err)) 4744 /* 4745 * This should never happen since we pin the 4746 * folios in the ext4_allocation_context so 4747 * ext4_mb_load_buddy() should never fail. 4748 */ 4749 return; 4750 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4751 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4752 ac->ac_f_ex.fe_len); 4753 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4754 ext4_mb_unload_buddy(&e4b); 4755 return; 4756 } 4757 if (pa->pa_type == MB_INODE_PA) { 4758 spin_lock(&pa->pa_lock); 4759 pa->pa_free += ac->ac_b_ex.fe_len; 4760 spin_unlock(&pa->pa_lock); 4761 } 4762 } 4763 4764 /* 4765 * use blocks preallocated to inode 4766 */ 4767 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4768 struct ext4_prealloc_space *pa) 4769 { 4770 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4771 ext4_fsblk_t start; 4772 ext4_fsblk_t end; 4773 int len; 4774 4775 /* found preallocated blocks, use them */ 4776 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4777 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4778 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4779 len = EXT4_NUM_B2C(sbi, end - start); 4780 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4781 &ac->ac_b_ex.fe_start); 4782 ac->ac_b_ex.fe_len = len; 4783 ac->ac_status = AC_STATUS_FOUND; 4784 ac->ac_pa = pa; 4785 4786 BUG_ON(start < pa->pa_pstart); 4787 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4788 BUG_ON(pa->pa_free < len); 4789 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4790 pa->pa_free -= len; 4791 4792 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4793 } 4794 4795 /* 4796 * use blocks preallocated to locality group 4797 */ 4798 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4799 struct ext4_prealloc_space *pa) 4800 { 4801 unsigned int len = ac->ac_o_ex.fe_len; 4802 4803 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4804 &ac->ac_b_ex.fe_group, 4805 &ac->ac_b_ex.fe_start); 4806 ac->ac_b_ex.fe_len = len; 4807 ac->ac_status = AC_STATUS_FOUND; 4808 ac->ac_pa = pa; 4809 4810 /* we don't correct pa_pstart or pa_len here to avoid 4811 * possible race when the group is being loaded concurrently 4812 * instead we correct pa later, after blocks are marked 4813 * in on-disk bitmap -- see ext4_mb_release_context() 4814 * Other CPUs are prevented from allocating from this pa by lg_mutex 4815 */ 4816 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4817 pa->pa_lstart, len, pa); 4818 } 4819 4820 /* 4821 * Return the prealloc space that have minimal distance 4822 * from the goal block. @cpa is the prealloc 4823 * space that is having currently known minimal distance 4824 * from the goal block. 4825 */ 4826 static struct ext4_prealloc_space * 4827 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4828 struct ext4_prealloc_space *pa, 4829 struct ext4_prealloc_space *cpa) 4830 { 4831 ext4_fsblk_t cur_distance, new_distance; 4832 4833 if (cpa == NULL) { 4834 atomic_inc(&pa->pa_count); 4835 return pa; 4836 } 4837 cur_distance = abs(goal_block - cpa->pa_pstart); 4838 new_distance = abs(goal_block - pa->pa_pstart); 4839 4840 if (cur_distance <= new_distance) 4841 return cpa; 4842 4843 /* drop the previous reference */ 4844 atomic_dec(&cpa->pa_count); 4845 atomic_inc(&pa->pa_count); 4846 return pa; 4847 } 4848 4849 /* 4850 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 4851 */ 4852 static bool 4853 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 4854 struct ext4_prealloc_space *pa) 4855 { 4856 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4857 ext4_fsblk_t start; 4858 4859 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 4860 return true; 4861 4862 /* 4863 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 4864 * in ext4_mb_normalize_request and will keep same with ac_o_ex 4865 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 4866 * consistent with ext4_mb_find_by_goal. 4867 */ 4868 start = pa->pa_pstart + 4869 (ac->ac_g_ex.fe_logical - pa->pa_lstart); 4870 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 4871 return false; 4872 4873 if (ac->ac_g_ex.fe_len > pa->pa_len - 4874 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 4875 return false; 4876 4877 return true; 4878 } 4879 4880 /* 4881 * search goal blocks in preallocated space 4882 */ 4883 static noinline_for_stack bool 4884 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4885 { 4886 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4887 int order, i; 4888 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4889 struct ext4_locality_group *lg; 4890 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL; 4891 struct rb_node *iter; 4892 ext4_fsblk_t goal_block; 4893 4894 /* only data can be preallocated */ 4895 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4896 return false; 4897 4898 /* 4899 * first, try per-file preallocation by searching the inode pa rbtree. 4900 * 4901 * Here, we can't do a direct traversal of the tree because 4902 * ext4_mb_discard_group_preallocation() can paralelly mark the pa 4903 * deleted and that can cause direct traversal to skip some entries. 4904 */ 4905 read_lock(&ei->i_prealloc_lock); 4906 4907 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { 4908 goto try_group_pa; 4909 } 4910 4911 /* 4912 * Step 1: Find a pa with logical start immediately adjacent to the 4913 * original logical start. This could be on the left or right. 4914 * 4915 * (tmp_pa->pa_lstart never changes so we can skip locking for it). 4916 */ 4917 for (iter = ei->i_prealloc_node.rb_node; iter; 4918 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4919 tmp_pa->pa_lstart, iter)) { 4920 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4921 pa_node.inode_node); 4922 } 4923 4924 /* 4925 * Step 2: The adjacent pa might be to the right of logical start, find 4926 * the left adjacent pa. After this step we'd have a valid tmp_pa whose 4927 * logical start is towards the left of original request's logical start 4928 */ 4929 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4930 struct rb_node *tmp; 4931 tmp = rb_prev(&tmp_pa->pa_node.inode_node); 4932 4933 if (tmp) { 4934 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space, 4935 pa_node.inode_node); 4936 } else { 4937 /* 4938 * If there is no adjacent pa to the left then finding 4939 * an overlapping pa is not possible hence stop searching 4940 * inode pa tree 4941 */ 4942 goto try_group_pa; 4943 } 4944 } 4945 4946 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4947 4948 /* 4949 * Step 3: If the left adjacent pa is deleted, keep moving left to find 4950 * the first non deleted adjacent pa. After this step we should have a 4951 * valid tmp_pa which is guaranteed to be non deleted. 4952 */ 4953 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { 4954 if (!iter) { 4955 /* 4956 * no non deleted left adjacent pa, so stop searching 4957 * inode pa tree 4958 */ 4959 goto try_group_pa; 4960 } 4961 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4962 pa_node.inode_node); 4963 spin_lock(&tmp_pa->pa_lock); 4964 if (tmp_pa->pa_deleted == 0) { 4965 /* 4966 * We will keep holding the pa_lock from 4967 * this point on because we don't want group discard 4968 * to delete this pa underneath us. Since group 4969 * discard is anyways an ENOSPC operation it 4970 * should be okay for it to wait a few more cycles. 4971 */ 4972 break; 4973 } else { 4974 spin_unlock(&tmp_pa->pa_lock); 4975 } 4976 } 4977 4978 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4979 BUG_ON(tmp_pa->pa_deleted == 1); 4980 4981 /* 4982 * Step 4: We now have the non deleted left adjacent pa. Only this 4983 * pa can possibly satisfy the request hence check if it overlaps 4984 * original logical start and stop searching if it doesn't. 4985 */ 4986 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { 4987 spin_unlock(&tmp_pa->pa_lock); 4988 goto try_group_pa; 4989 } 4990 4991 /* non-extent files can't have physical blocks past 2^32 */ 4992 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4993 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4994 EXT4_MAX_BLOCK_FILE_PHYS)) { 4995 /* 4996 * Since PAs don't overlap, we won't find any other PA to 4997 * satisfy this. 4998 */ 4999 spin_unlock(&tmp_pa->pa_lock); 5000 goto try_group_pa; 5001 } 5002 5003 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 5004 atomic_inc(&tmp_pa->pa_count); 5005 ext4_mb_use_inode_pa(ac, tmp_pa); 5006 spin_unlock(&tmp_pa->pa_lock); 5007 read_unlock(&ei->i_prealloc_lock); 5008 return true; 5009 } else { 5010 /* 5011 * We found a valid overlapping pa but couldn't use it because 5012 * it had no free blocks. This should ideally never happen 5013 * because: 5014 * 5015 * 1. When a new inode pa is added to rbtree it must have 5016 * pa_free > 0 since otherwise we won't actually need 5017 * preallocation. 5018 * 5019 * 2. An inode pa that is in the rbtree can only have it's 5020 * pa_free become zero when another thread calls: 5021 * ext4_mb_new_blocks 5022 * ext4_mb_use_preallocated 5023 * ext4_mb_use_inode_pa 5024 * 5025 * 3. Further, after the above calls make pa_free == 0, we will 5026 * immediately remove it from the rbtree in: 5027 * ext4_mb_new_blocks 5028 * ext4_mb_release_context 5029 * ext4_mb_put_pa 5030 * 5031 * 4. Since the pa_free becoming 0 and pa_free getting removed 5032 * from tree both happen in ext4_mb_new_blocks, which is always 5033 * called with i_data_sem held for data allocations, we can be 5034 * sure that another process will never see a pa in rbtree with 5035 * pa_free == 0. 5036 */ 5037 WARN_ON_ONCE(tmp_pa->pa_free == 0); 5038 } 5039 spin_unlock(&tmp_pa->pa_lock); 5040 try_group_pa: 5041 read_unlock(&ei->i_prealloc_lock); 5042 5043 /* can we use group allocation? */ 5044 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 5045 return false; 5046 5047 /* inode may have no locality group for some reason */ 5048 lg = ac->ac_lg; 5049 if (lg == NULL) 5050 return false; 5051 order = fls(ac->ac_o_ex.fe_len) - 1; 5052 if (order > PREALLOC_TB_SIZE - 1) 5053 /* The max size of hash table is PREALLOC_TB_SIZE */ 5054 order = PREALLOC_TB_SIZE - 1; 5055 5056 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 5057 /* 5058 * search for the prealloc space that is having 5059 * minimal distance from the goal block. 5060 */ 5061 for (i = order; i < PREALLOC_TB_SIZE; i++) { 5062 rcu_read_lock(); 5063 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 5064 pa_node.lg_list) { 5065 spin_lock(&tmp_pa->pa_lock); 5066 if (tmp_pa->pa_deleted == 0 && 5067 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 5068 5069 cpa = ext4_mb_check_group_pa(goal_block, 5070 tmp_pa, cpa); 5071 } 5072 spin_unlock(&tmp_pa->pa_lock); 5073 } 5074 rcu_read_unlock(); 5075 } 5076 if (cpa) { 5077 ext4_mb_use_group_pa(ac, cpa); 5078 return true; 5079 } 5080 return false; 5081 } 5082 5083 /* 5084 * the function goes through all preallocation in this group and marks them 5085 * used in in-core bitmap. buddy must be generated from this bitmap 5086 * Need to be called with ext4 group lock held 5087 */ 5088 static noinline_for_stack 5089 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 5090 ext4_group_t group) 5091 { 5092 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5093 struct ext4_prealloc_space *pa; 5094 struct list_head *cur; 5095 ext4_group_t groupnr; 5096 ext4_grpblk_t start; 5097 int preallocated = 0; 5098 int len; 5099 5100 if (!grp) 5101 return; 5102 5103 /* all form of preallocation discards first load group, 5104 * so the only competing code is preallocation use. 5105 * we don't need any locking here 5106 * notice we do NOT ignore preallocations with pa_deleted 5107 * otherwise we could leave used blocks available for 5108 * allocation in buddy when concurrent ext4_mb_put_pa() 5109 * is dropping preallocation 5110 */ 5111 list_for_each(cur, &grp->bb_prealloc_list) { 5112 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 5113 spin_lock(&pa->pa_lock); 5114 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5115 &groupnr, &start); 5116 len = pa->pa_len; 5117 spin_unlock(&pa->pa_lock); 5118 if (unlikely(len == 0)) 5119 continue; 5120 BUG_ON(groupnr != group); 5121 mb_set_bits(bitmap, start, len); 5122 preallocated += len; 5123 } 5124 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 5125 } 5126 5127 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 5128 struct ext4_prealloc_space *pa) 5129 { 5130 struct ext4_inode_info *ei; 5131 5132 if (pa->pa_deleted) { 5133 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 5134 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 5135 pa->pa_len); 5136 return; 5137 } 5138 5139 pa->pa_deleted = 1; 5140 5141 if (pa->pa_type == MB_INODE_PA) { 5142 ei = EXT4_I(pa->pa_inode); 5143 atomic_dec(&ei->i_prealloc_active); 5144 } 5145 } 5146 5147 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 5148 { 5149 BUG_ON(!pa); 5150 BUG_ON(atomic_read(&pa->pa_count)); 5151 BUG_ON(pa->pa_deleted == 0); 5152 kmem_cache_free(ext4_pspace_cachep, pa); 5153 } 5154 5155 static void ext4_mb_pa_callback(struct rcu_head *head) 5156 { 5157 struct ext4_prealloc_space *pa; 5158 5159 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 5160 ext4_mb_pa_free(pa); 5161 } 5162 5163 /* 5164 * drops a reference to preallocated space descriptor 5165 * if this was the last reference and the space is consumed 5166 */ 5167 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 5168 struct super_block *sb, struct ext4_prealloc_space *pa) 5169 { 5170 ext4_group_t grp; 5171 ext4_fsblk_t grp_blk; 5172 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 5173 5174 /* in this short window concurrent discard can set pa_deleted */ 5175 spin_lock(&pa->pa_lock); 5176 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 5177 spin_unlock(&pa->pa_lock); 5178 return; 5179 } 5180 5181 if (pa->pa_deleted == 1) { 5182 spin_unlock(&pa->pa_lock); 5183 return; 5184 } 5185 5186 ext4_mb_mark_pa_deleted(sb, pa); 5187 spin_unlock(&pa->pa_lock); 5188 5189 grp_blk = pa->pa_pstart; 5190 /* 5191 * If doing group-based preallocation, pa_pstart may be in the 5192 * next group when pa is used up 5193 */ 5194 if (pa->pa_type == MB_GROUP_PA) 5195 grp_blk--; 5196 5197 grp = ext4_get_group_number(sb, grp_blk); 5198 5199 /* 5200 * possible race: 5201 * 5202 * P1 (buddy init) P2 (regular allocation) 5203 * find block B in PA 5204 * copy on-disk bitmap to buddy 5205 * mark B in on-disk bitmap 5206 * drop PA from group 5207 * mark all PAs in buddy 5208 * 5209 * thus, P1 initializes buddy with B available. to prevent this 5210 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 5211 * against that pair 5212 */ 5213 ext4_lock_group(sb, grp); 5214 list_del(&pa->pa_group_list); 5215 ext4_unlock_group(sb, grp); 5216 5217 if (pa->pa_type == MB_INODE_PA) { 5218 write_lock(pa->pa_node_lock.inode_lock); 5219 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5220 write_unlock(pa->pa_node_lock.inode_lock); 5221 ext4_mb_pa_free(pa); 5222 } else { 5223 spin_lock(pa->pa_node_lock.lg_lock); 5224 list_del_rcu(&pa->pa_node.lg_list); 5225 spin_unlock(pa->pa_node_lock.lg_lock); 5226 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5227 } 5228 } 5229 5230 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 5231 { 5232 struct rb_node **iter = &root->rb_node, *parent = NULL; 5233 struct ext4_prealloc_space *iter_pa, *new_pa; 5234 ext4_lblk_t iter_start, new_start; 5235 5236 while (*iter) { 5237 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 5238 pa_node.inode_node); 5239 new_pa = rb_entry(new, struct ext4_prealloc_space, 5240 pa_node.inode_node); 5241 iter_start = iter_pa->pa_lstart; 5242 new_start = new_pa->pa_lstart; 5243 5244 parent = *iter; 5245 if (new_start < iter_start) 5246 iter = &((*iter)->rb_left); 5247 else 5248 iter = &((*iter)->rb_right); 5249 } 5250 5251 rb_link_node(new, parent, iter); 5252 rb_insert_color(new, root); 5253 } 5254 5255 /* 5256 * creates new preallocated space for given inode 5257 */ 5258 static noinline_for_stack void 5259 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 5260 { 5261 struct super_block *sb = ac->ac_sb; 5262 struct ext4_sb_info *sbi = EXT4_SB(sb); 5263 struct ext4_prealloc_space *pa; 5264 struct ext4_group_info *grp; 5265 struct ext4_inode_info *ei; 5266 5267 /* preallocate only when found space is larger then requested */ 5268 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5269 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5270 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5271 BUG_ON(ac->ac_pa == NULL); 5272 5273 pa = ac->ac_pa; 5274 5275 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { 5276 struct ext4_free_extent ex = { 5277 .fe_logical = ac->ac_g_ex.fe_logical, 5278 .fe_len = ac->ac_orig_goal_len, 5279 }; 5280 loff_t orig_goal_end = extent_logical_end(sbi, &ex); 5281 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); 5282 5283 /* 5284 * We can't allocate as much as normalizer wants, so we try 5285 * to get proper lstart to cover the original request, except 5286 * when the goal doesn't cover the original request as below: 5287 * 5288 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 5289 * best_ex:0/200(200) -> adjusted: 1848/2048(200) 5290 */ 5291 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 5292 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 5293 5294 /* 5295 * Use the below logic for adjusting best extent as it keeps 5296 * fragmentation in check while ensuring logical range of best 5297 * extent doesn't overflow out of goal extent: 5298 * 5299 * 1. Check if best ex can be kept at end of goal (before 5300 * cr_best_avail trimmed it) and still cover original start 5301 * 2. Else, check if best ex can be kept at start of goal and 5302 * still cover original end 5303 * 3. Else, keep the best ex at start of original request. 5304 */ 5305 ex.fe_len = ac->ac_b_ex.fe_len; 5306 5307 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); 5308 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) 5309 goto adjust_bex; 5310 5311 ex.fe_logical = ac->ac_g_ex.fe_logical; 5312 if (o_ex_end <= extent_logical_end(sbi, &ex)) 5313 goto adjust_bex; 5314 5315 ex.fe_logical = ac->ac_o_ex.fe_logical; 5316 adjust_bex: 5317 ac->ac_b_ex.fe_logical = ex.fe_logical; 5318 5319 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 5320 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); 5321 } 5322 5323 pa->pa_lstart = ac->ac_b_ex.fe_logical; 5324 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5325 pa->pa_len = ac->ac_b_ex.fe_len; 5326 pa->pa_free = pa->pa_len; 5327 spin_lock_init(&pa->pa_lock); 5328 INIT_LIST_HEAD(&pa->pa_group_list); 5329 pa->pa_deleted = 0; 5330 pa->pa_type = MB_INODE_PA; 5331 5332 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5333 pa->pa_len, pa->pa_lstart); 5334 trace_ext4_mb_new_inode_pa(ac, pa); 5335 5336 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 5337 ext4_mb_use_inode_pa(ac, pa); 5338 5339 ei = EXT4_I(ac->ac_inode); 5340 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5341 if (!grp) 5342 return; 5343 5344 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 5345 pa->pa_inode = ac->ac_inode; 5346 5347 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5348 5349 write_lock(pa->pa_node_lock.inode_lock); 5350 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 5351 write_unlock(pa->pa_node_lock.inode_lock); 5352 atomic_inc(&ei->i_prealloc_active); 5353 } 5354 5355 /* 5356 * creates new preallocated space for locality group inodes belongs to 5357 */ 5358 static noinline_for_stack void 5359 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5360 { 5361 struct super_block *sb = ac->ac_sb; 5362 struct ext4_locality_group *lg; 5363 struct ext4_prealloc_space *pa; 5364 struct ext4_group_info *grp; 5365 5366 /* preallocate only when found space is larger then requested */ 5367 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5368 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5369 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5370 BUG_ON(ac->ac_pa == NULL); 5371 5372 pa = ac->ac_pa; 5373 5374 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5375 pa->pa_lstart = pa->pa_pstart; 5376 pa->pa_len = ac->ac_b_ex.fe_len; 5377 pa->pa_free = pa->pa_len; 5378 spin_lock_init(&pa->pa_lock); 5379 INIT_LIST_HEAD(&pa->pa_node.lg_list); 5380 INIT_LIST_HEAD(&pa->pa_group_list); 5381 pa->pa_deleted = 0; 5382 pa->pa_type = MB_GROUP_PA; 5383 5384 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5385 pa->pa_len, pa->pa_lstart); 5386 trace_ext4_mb_new_group_pa(ac, pa); 5387 5388 ext4_mb_use_group_pa(ac, pa); 5389 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5390 5391 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5392 if (!grp) 5393 return; 5394 lg = ac->ac_lg; 5395 BUG_ON(lg == NULL); 5396 5397 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5398 pa->pa_inode = NULL; 5399 5400 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5401 5402 /* 5403 * We will later add the new pa to the right bucket 5404 * after updating the pa_free in ext4_mb_release_context 5405 */ 5406 } 5407 5408 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5409 { 5410 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5411 ext4_mb_new_group_pa(ac); 5412 else 5413 ext4_mb_new_inode_pa(ac); 5414 } 5415 5416 /* 5417 * finds all unused blocks in on-disk bitmap, frees them in 5418 * in-core bitmap and buddy. 5419 * @pa must be unlinked from inode and group lists, so that 5420 * nobody else can find/use it. 5421 * the caller MUST hold group/inode locks. 5422 * TODO: optimize the case when there are no in-core structures yet 5423 */ 5424 static noinline_for_stack void 5425 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5426 struct ext4_prealloc_space *pa) 5427 { 5428 struct super_block *sb = e4b->bd_sb; 5429 struct ext4_sb_info *sbi = EXT4_SB(sb); 5430 unsigned int end; 5431 unsigned int next; 5432 ext4_group_t group; 5433 ext4_grpblk_t bit; 5434 unsigned long long grp_blk_start; 5435 int free = 0; 5436 5437 BUG_ON(pa->pa_deleted == 0); 5438 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5439 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5440 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5441 end = bit + pa->pa_len; 5442 5443 while (bit < end) { 5444 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5445 if (bit >= end) 5446 break; 5447 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5448 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5449 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5450 (unsigned) next - bit, (unsigned) group); 5451 free += next - bit; 5452 5453 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5454 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5455 EXT4_C2B(sbi, bit)), 5456 next - bit); 5457 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5458 bit = next + 1; 5459 } 5460 if (free != pa->pa_free) { 5461 ext4_msg(e4b->bd_sb, KERN_CRIT, 5462 "pa %p: logic %lu, phys. %lu, len %d", 5463 pa, (unsigned long) pa->pa_lstart, 5464 (unsigned long) pa->pa_pstart, 5465 pa->pa_len); 5466 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5467 free, pa->pa_free); 5468 /* 5469 * pa is already deleted so we use the value obtained 5470 * from the bitmap and continue. 5471 */ 5472 } 5473 atomic_add(free, &sbi->s_mb_discarded); 5474 } 5475 5476 static noinline_for_stack void 5477 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5478 struct ext4_prealloc_space *pa) 5479 { 5480 struct super_block *sb = e4b->bd_sb; 5481 ext4_group_t group; 5482 ext4_grpblk_t bit; 5483 5484 trace_ext4_mb_release_group_pa(sb, pa); 5485 BUG_ON(pa->pa_deleted == 0); 5486 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5487 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5488 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5489 e4b->bd_group, group, pa->pa_pstart); 5490 return; 5491 } 5492 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5493 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5494 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5495 } 5496 5497 /* 5498 * releases all preallocations in given group 5499 * 5500 * first, we need to decide discard policy: 5501 * - when do we discard 5502 * 1) ENOSPC 5503 * - how many do we discard 5504 * 1) how many requested 5505 */ 5506 static noinline_for_stack int 5507 ext4_mb_discard_group_preallocations(struct super_block *sb, 5508 ext4_group_t group, int *busy) 5509 { 5510 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5511 struct buffer_head *bitmap_bh = NULL; 5512 struct ext4_prealloc_space *pa, *tmp; 5513 LIST_HEAD(list); 5514 struct ext4_buddy e4b; 5515 struct ext4_inode_info *ei; 5516 int err; 5517 int free = 0; 5518 5519 if (!grp) 5520 return 0; 5521 mb_debug(sb, "discard preallocation for group %u\n", group); 5522 if (list_empty(&grp->bb_prealloc_list)) 5523 goto out_dbg; 5524 5525 bitmap_bh = ext4_read_block_bitmap(sb, group); 5526 if (IS_ERR(bitmap_bh)) { 5527 err = PTR_ERR(bitmap_bh); 5528 ext4_error_err(sb, -err, 5529 "Error %d reading block bitmap for %u", 5530 err, group); 5531 goto out_dbg; 5532 } 5533 5534 err = ext4_mb_load_buddy(sb, group, &e4b); 5535 if (err) { 5536 ext4_warning(sb, "Error %d loading buddy information for %u", 5537 err, group); 5538 put_bh(bitmap_bh); 5539 goto out_dbg; 5540 } 5541 5542 ext4_lock_group(sb, group); 5543 list_for_each_entry_safe(pa, tmp, 5544 &grp->bb_prealloc_list, pa_group_list) { 5545 spin_lock(&pa->pa_lock); 5546 if (atomic_read(&pa->pa_count)) { 5547 spin_unlock(&pa->pa_lock); 5548 *busy = 1; 5549 continue; 5550 } 5551 if (pa->pa_deleted) { 5552 spin_unlock(&pa->pa_lock); 5553 continue; 5554 } 5555 5556 /* seems this one can be freed ... */ 5557 ext4_mb_mark_pa_deleted(sb, pa); 5558 5559 if (!free) 5560 this_cpu_inc(discard_pa_seq); 5561 5562 /* we can trust pa_free ... */ 5563 free += pa->pa_free; 5564 5565 spin_unlock(&pa->pa_lock); 5566 5567 list_del(&pa->pa_group_list); 5568 list_add(&pa->u.pa_tmp_list, &list); 5569 } 5570 5571 /* now free all selected PAs */ 5572 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5573 5574 /* remove from object (inode or locality group) */ 5575 if (pa->pa_type == MB_GROUP_PA) { 5576 spin_lock(pa->pa_node_lock.lg_lock); 5577 list_del_rcu(&pa->pa_node.lg_list); 5578 spin_unlock(pa->pa_node_lock.lg_lock); 5579 } else { 5580 write_lock(pa->pa_node_lock.inode_lock); 5581 ei = EXT4_I(pa->pa_inode); 5582 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5583 write_unlock(pa->pa_node_lock.inode_lock); 5584 } 5585 5586 list_del(&pa->u.pa_tmp_list); 5587 5588 if (pa->pa_type == MB_GROUP_PA) { 5589 ext4_mb_release_group_pa(&e4b, pa); 5590 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5591 } else { 5592 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5593 ext4_mb_pa_free(pa); 5594 } 5595 } 5596 5597 ext4_unlock_group(sb, group); 5598 ext4_mb_unload_buddy(&e4b); 5599 put_bh(bitmap_bh); 5600 out_dbg: 5601 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5602 free, group, grp->bb_free); 5603 return free; 5604 } 5605 5606 /* 5607 * releases all non-used preallocated blocks for given inode 5608 * 5609 * It's important to discard preallocations under i_data_sem 5610 * We don't want another block to be served from the prealloc 5611 * space when we are discarding the inode prealloc space. 5612 * 5613 * FIXME!! Make sure it is valid at all the call sites 5614 */ 5615 void ext4_discard_preallocations(struct inode *inode) 5616 { 5617 struct ext4_inode_info *ei = EXT4_I(inode); 5618 struct super_block *sb = inode->i_sb; 5619 struct buffer_head *bitmap_bh = NULL; 5620 struct ext4_prealloc_space *pa, *tmp; 5621 ext4_group_t group = 0; 5622 LIST_HEAD(list); 5623 struct ext4_buddy e4b; 5624 struct rb_node *iter; 5625 int err; 5626 5627 if (!S_ISREG(inode->i_mode)) 5628 return; 5629 5630 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5631 return; 5632 5633 mb_debug(sb, "discard preallocation for inode %lu\n", 5634 inode->i_ino); 5635 trace_ext4_discard_preallocations(inode, 5636 atomic_read(&ei->i_prealloc_active)); 5637 5638 repeat: 5639 /* first, collect all pa's in the inode */ 5640 write_lock(&ei->i_prealloc_lock); 5641 for (iter = rb_first(&ei->i_prealloc_node); iter; 5642 iter = rb_next(iter)) { 5643 pa = rb_entry(iter, struct ext4_prealloc_space, 5644 pa_node.inode_node); 5645 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5646 5647 spin_lock(&pa->pa_lock); 5648 if (atomic_read(&pa->pa_count)) { 5649 /* this shouldn't happen often - nobody should 5650 * use preallocation while we're discarding it */ 5651 spin_unlock(&pa->pa_lock); 5652 write_unlock(&ei->i_prealloc_lock); 5653 ext4_msg(sb, KERN_ERR, 5654 "uh-oh! used pa while discarding"); 5655 WARN_ON(1); 5656 schedule_timeout_uninterruptible(HZ); 5657 goto repeat; 5658 5659 } 5660 if (pa->pa_deleted == 0) { 5661 ext4_mb_mark_pa_deleted(sb, pa); 5662 spin_unlock(&pa->pa_lock); 5663 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5664 list_add(&pa->u.pa_tmp_list, &list); 5665 continue; 5666 } 5667 5668 /* someone is deleting pa right now */ 5669 spin_unlock(&pa->pa_lock); 5670 write_unlock(&ei->i_prealloc_lock); 5671 5672 /* we have to wait here because pa_deleted 5673 * doesn't mean pa is already unlinked from 5674 * the list. as we might be called from 5675 * ->clear_inode() the inode will get freed 5676 * and concurrent thread which is unlinking 5677 * pa from inode's list may access already 5678 * freed memory, bad-bad-bad */ 5679 5680 /* XXX: if this happens too often, we can 5681 * add a flag to force wait only in case 5682 * of ->clear_inode(), but not in case of 5683 * regular truncate */ 5684 schedule_timeout_uninterruptible(HZ); 5685 goto repeat; 5686 } 5687 write_unlock(&ei->i_prealloc_lock); 5688 5689 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5690 BUG_ON(pa->pa_type != MB_INODE_PA); 5691 group = ext4_get_group_number(sb, pa->pa_pstart); 5692 5693 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5694 GFP_NOFS|__GFP_NOFAIL); 5695 if (err) { 5696 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5697 err, group); 5698 continue; 5699 } 5700 5701 bitmap_bh = ext4_read_block_bitmap(sb, group); 5702 if (IS_ERR(bitmap_bh)) { 5703 err = PTR_ERR(bitmap_bh); 5704 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5705 err, group); 5706 ext4_mb_unload_buddy(&e4b); 5707 continue; 5708 } 5709 5710 ext4_lock_group(sb, group); 5711 list_del(&pa->pa_group_list); 5712 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5713 ext4_unlock_group(sb, group); 5714 5715 ext4_mb_unload_buddy(&e4b); 5716 put_bh(bitmap_bh); 5717 5718 list_del(&pa->u.pa_tmp_list); 5719 ext4_mb_pa_free(pa); 5720 } 5721 } 5722 5723 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5724 { 5725 struct ext4_prealloc_space *pa; 5726 5727 BUG_ON(ext4_pspace_cachep == NULL); 5728 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5729 if (!pa) 5730 return -ENOMEM; 5731 atomic_set(&pa->pa_count, 1); 5732 ac->ac_pa = pa; 5733 return 0; 5734 } 5735 5736 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5737 { 5738 struct ext4_prealloc_space *pa = ac->ac_pa; 5739 5740 BUG_ON(!pa); 5741 ac->ac_pa = NULL; 5742 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5743 /* 5744 * current function is only called due to an error or due to 5745 * len of found blocks < len of requested blocks hence the PA has not 5746 * been added to grp->bb_prealloc_list. So we don't need to lock it 5747 */ 5748 pa->pa_deleted = 1; 5749 ext4_mb_pa_free(pa); 5750 } 5751 5752 #ifdef CONFIG_EXT4_DEBUG 5753 static inline void ext4_mb_show_pa(struct super_block *sb) 5754 { 5755 ext4_group_t i, ngroups; 5756 5757 if (ext4_emergency_state(sb)) 5758 return; 5759 5760 ngroups = ext4_get_groups_count(sb); 5761 mb_debug(sb, "groups: "); 5762 for (i = 0; i < ngroups; i++) { 5763 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5764 struct ext4_prealloc_space *pa; 5765 ext4_grpblk_t start; 5766 struct list_head *cur; 5767 5768 if (!grp) 5769 continue; 5770 ext4_lock_group(sb, i); 5771 list_for_each(cur, &grp->bb_prealloc_list) { 5772 pa = list_entry(cur, struct ext4_prealloc_space, 5773 pa_group_list); 5774 spin_lock(&pa->pa_lock); 5775 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5776 NULL, &start); 5777 spin_unlock(&pa->pa_lock); 5778 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5779 pa->pa_len); 5780 } 5781 ext4_unlock_group(sb, i); 5782 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5783 grp->bb_fragments); 5784 } 5785 } 5786 5787 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5788 { 5789 struct super_block *sb = ac->ac_sb; 5790 5791 if (ext4_emergency_state(sb)) 5792 return; 5793 5794 mb_debug(sb, "Can't allocate:" 5795 " Allocation context details:"); 5796 mb_debug(sb, "status %u flags 0x%x", 5797 ac->ac_status, ac->ac_flags); 5798 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5799 "goal %lu/%lu/%lu@%lu, " 5800 "best %lu/%lu/%lu@%lu cr %d", 5801 (unsigned long)ac->ac_o_ex.fe_group, 5802 (unsigned long)ac->ac_o_ex.fe_start, 5803 (unsigned long)ac->ac_o_ex.fe_len, 5804 (unsigned long)ac->ac_o_ex.fe_logical, 5805 (unsigned long)ac->ac_g_ex.fe_group, 5806 (unsigned long)ac->ac_g_ex.fe_start, 5807 (unsigned long)ac->ac_g_ex.fe_len, 5808 (unsigned long)ac->ac_g_ex.fe_logical, 5809 (unsigned long)ac->ac_b_ex.fe_group, 5810 (unsigned long)ac->ac_b_ex.fe_start, 5811 (unsigned long)ac->ac_b_ex.fe_len, 5812 (unsigned long)ac->ac_b_ex.fe_logical, 5813 (int)ac->ac_criteria); 5814 mb_debug(sb, "%u found", ac->ac_found); 5815 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa)); 5816 if (ac->ac_pa) 5817 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5818 "group pa" : "inode pa"); 5819 ext4_mb_show_pa(sb); 5820 } 5821 #else 5822 static inline void ext4_mb_show_pa(struct super_block *sb) 5823 { 5824 } 5825 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5826 { 5827 ext4_mb_show_pa(ac->ac_sb); 5828 } 5829 #endif 5830 5831 /* 5832 * We use locality group preallocation for small size file. The size of the 5833 * file is determined by the current size or the resulting size after 5834 * allocation which ever is larger 5835 * 5836 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5837 */ 5838 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5839 { 5840 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5841 int bsbits = ac->ac_sb->s_blocksize_bits; 5842 loff_t size, isize; 5843 bool inode_pa_eligible, group_pa_eligible; 5844 5845 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5846 return; 5847 5848 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5849 return; 5850 5851 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5852 inode_pa_eligible = true; 5853 size = extent_logical_end(sbi, &ac->ac_o_ex); 5854 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5855 >> bsbits; 5856 5857 /* No point in using inode preallocation for closed files */ 5858 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5859 !inode_is_open_for_write(ac->ac_inode)) 5860 inode_pa_eligible = false; 5861 5862 size = max(size, isize); 5863 /* Don't use group allocation for large files */ 5864 if (size > sbi->s_mb_stream_request) 5865 group_pa_eligible = false; 5866 5867 if (!group_pa_eligible) { 5868 if (inode_pa_eligible) 5869 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5870 else 5871 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5872 return; 5873 } 5874 5875 BUG_ON(ac->ac_lg != NULL); 5876 /* 5877 * locality group prealloc space are per cpu. The reason for having 5878 * per cpu locality group is to reduce the contention between block 5879 * request from multiple CPUs. 5880 */ 5881 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5882 5883 /* we're going to use group allocation */ 5884 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5885 5886 /* serialize all allocations in the group */ 5887 mutex_lock(&ac->ac_lg->lg_mutex); 5888 } 5889 5890 static noinline_for_stack void 5891 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5892 struct ext4_allocation_request *ar) 5893 { 5894 struct super_block *sb = ar->inode->i_sb; 5895 struct ext4_sb_info *sbi = EXT4_SB(sb); 5896 struct ext4_super_block *es = sbi->s_es; 5897 ext4_group_t group; 5898 unsigned int len; 5899 ext4_fsblk_t goal; 5900 ext4_grpblk_t block; 5901 5902 /* we can't allocate > group size */ 5903 len = ar->len; 5904 5905 /* just a dirty hack to filter too big requests */ 5906 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5907 len = EXT4_CLUSTERS_PER_GROUP(sb); 5908 5909 /* start searching from the goal */ 5910 goal = ar->goal; 5911 if (goal < le32_to_cpu(es->s_first_data_block) || 5912 goal >= ext4_blocks_count(es)) 5913 goal = le32_to_cpu(es->s_first_data_block); 5914 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5915 5916 /* set up allocation goals */ 5917 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5918 ac->ac_status = AC_STATUS_CONTINUE; 5919 ac->ac_sb = sb; 5920 ac->ac_inode = ar->inode; 5921 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5922 ac->ac_o_ex.fe_group = group; 5923 ac->ac_o_ex.fe_start = block; 5924 ac->ac_o_ex.fe_len = len; 5925 ac->ac_g_ex = ac->ac_o_ex; 5926 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 5927 ac->ac_flags = ar->flags; 5928 5929 /* we have to define context: we'll work with a file or 5930 * locality group. this is a policy, actually */ 5931 ext4_mb_group_or_file(ac); 5932 5933 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5934 "left: %u/%u, right %u/%u to %swritable\n", 5935 (unsigned) ar->len, (unsigned) ar->logical, 5936 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5937 (unsigned) ar->lleft, (unsigned) ar->pleft, 5938 (unsigned) ar->lright, (unsigned) ar->pright, 5939 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5940 } 5941 5942 static noinline_for_stack void 5943 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5944 struct ext4_locality_group *lg, 5945 int order, int total_entries) 5946 { 5947 ext4_group_t group = 0; 5948 struct ext4_buddy e4b; 5949 LIST_HEAD(discard_list); 5950 struct ext4_prealloc_space *pa, *tmp; 5951 5952 mb_debug(sb, "discard locality group preallocation\n"); 5953 5954 spin_lock(&lg->lg_prealloc_lock); 5955 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5956 pa_node.lg_list, 5957 lockdep_is_held(&lg->lg_prealloc_lock)) { 5958 spin_lock(&pa->pa_lock); 5959 if (atomic_read(&pa->pa_count)) { 5960 /* 5961 * This is the pa that we just used 5962 * for block allocation. So don't 5963 * free that 5964 */ 5965 spin_unlock(&pa->pa_lock); 5966 continue; 5967 } 5968 if (pa->pa_deleted) { 5969 spin_unlock(&pa->pa_lock); 5970 continue; 5971 } 5972 /* only lg prealloc space */ 5973 BUG_ON(pa->pa_type != MB_GROUP_PA); 5974 5975 /* seems this one can be freed ... */ 5976 ext4_mb_mark_pa_deleted(sb, pa); 5977 spin_unlock(&pa->pa_lock); 5978 5979 list_del_rcu(&pa->pa_node.lg_list); 5980 list_add(&pa->u.pa_tmp_list, &discard_list); 5981 5982 total_entries--; 5983 if (total_entries <= 5) { 5984 /* 5985 * we want to keep only 5 entries 5986 * allowing it to grow to 8. This 5987 * mak sure we don't call discard 5988 * soon for this list. 5989 */ 5990 break; 5991 } 5992 } 5993 spin_unlock(&lg->lg_prealloc_lock); 5994 5995 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5996 int err; 5997 5998 group = ext4_get_group_number(sb, pa->pa_pstart); 5999 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 6000 GFP_NOFS|__GFP_NOFAIL); 6001 if (err) { 6002 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 6003 err, group); 6004 continue; 6005 } 6006 ext4_lock_group(sb, group); 6007 list_del(&pa->pa_group_list); 6008 ext4_mb_release_group_pa(&e4b, pa); 6009 ext4_unlock_group(sb, group); 6010 6011 ext4_mb_unload_buddy(&e4b); 6012 list_del(&pa->u.pa_tmp_list); 6013 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 6014 } 6015 } 6016 6017 /* 6018 * We have incremented pa_count. So it cannot be freed at this 6019 * point. Also we hold lg_mutex. So no parallel allocation is 6020 * possible from this lg. That means pa_free cannot be updated. 6021 * 6022 * A parallel ext4_mb_discard_group_preallocations is possible. 6023 * which can cause the lg_prealloc_list to be updated. 6024 */ 6025 6026 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 6027 { 6028 int order, added = 0, lg_prealloc_count = 1; 6029 struct super_block *sb = ac->ac_sb; 6030 struct ext4_locality_group *lg = ac->ac_lg; 6031 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 6032 6033 order = fls(pa->pa_free) - 1; 6034 if (order > PREALLOC_TB_SIZE - 1) 6035 /* The max size of hash table is PREALLOC_TB_SIZE */ 6036 order = PREALLOC_TB_SIZE - 1; 6037 /* Add the prealloc space to lg */ 6038 spin_lock(&lg->lg_prealloc_lock); 6039 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 6040 pa_node.lg_list, 6041 lockdep_is_held(&lg->lg_prealloc_lock)) { 6042 spin_lock(&tmp_pa->pa_lock); 6043 if (tmp_pa->pa_deleted) { 6044 spin_unlock(&tmp_pa->pa_lock); 6045 continue; 6046 } 6047 if (!added && pa->pa_free < tmp_pa->pa_free) { 6048 /* Add to the tail of the previous entry */ 6049 list_add_tail_rcu(&pa->pa_node.lg_list, 6050 &tmp_pa->pa_node.lg_list); 6051 added = 1; 6052 /* 6053 * we want to count the total 6054 * number of entries in the list 6055 */ 6056 } 6057 spin_unlock(&tmp_pa->pa_lock); 6058 lg_prealloc_count++; 6059 } 6060 if (!added) 6061 list_add_tail_rcu(&pa->pa_node.lg_list, 6062 &lg->lg_prealloc_list[order]); 6063 spin_unlock(&lg->lg_prealloc_lock); 6064 6065 /* Now trim the list to be not more than 8 elements */ 6066 if (lg_prealloc_count > 8) 6067 ext4_mb_discard_lg_preallocations(sb, lg, 6068 order, lg_prealloc_count); 6069 } 6070 6071 /* 6072 * release all resource we used in allocation 6073 */ 6074 static void ext4_mb_release_context(struct ext4_allocation_context *ac) 6075 { 6076 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 6077 struct ext4_prealloc_space *pa = ac->ac_pa; 6078 if (pa) { 6079 if (pa->pa_type == MB_GROUP_PA) { 6080 /* see comment in ext4_mb_use_group_pa() */ 6081 spin_lock(&pa->pa_lock); 6082 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 6083 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 6084 pa->pa_free -= ac->ac_b_ex.fe_len; 6085 pa->pa_len -= ac->ac_b_ex.fe_len; 6086 spin_unlock(&pa->pa_lock); 6087 6088 /* 6089 * We want to add the pa to the right bucket. 6090 * Remove it from the list and while adding 6091 * make sure the list to which we are adding 6092 * doesn't grow big. 6093 */ 6094 if (likely(pa->pa_free)) { 6095 spin_lock(pa->pa_node_lock.lg_lock); 6096 list_del_rcu(&pa->pa_node.lg_list); 6097 spin_unlock(pa->pa_node_lock.lg_lock); 6098 ext4_mb_add_n_trim(ac); 6099 } 6100 } 6101 6102 ext4_mb_put_pa(ac, ac->ac_sb, pa); 6103 } 6104 if (ac->ac_bitmap_folio) 6105 folio_put(ac->ac_bitmap_folio); 6106 if (ac->ac_buddy_folio) 6107 folio_put(ac->ac_buddy_folio); 6108 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 6109 mutex_unlock(&ac->ac_lg->lg_mutex); 6110 ext4_mb_collect_stats(ac); 6111 } 6112 6113 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 6114 { 6115 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 6116 int ret; 6117 int freed = 0, busy = 0; 6118 int retry = 0; 6119 6120 trace_ext4_mb_discard_preallocations(sb, needed); 6121 6122 if (needed == 0) 6123 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 6124 repeat: 6125 for (i = 0; i < ngroups && needed > 0; i++) { 6126 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 6127 freed += ret; 6128 needed -= ret; 6129 cond_resched(); 6130 } 6131 6132 if (needed > 0 && busy && ++retry < 3) { 6133 busy = 0; 6134 goto repeat; 6135 } 6136 6137 return freed; 6138 } 6139 6140 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 6141 struct ext4_allocation_context *ac, u64 *seq) 6142 { 6143 int freed; 6144 u64 seq_retry = 0; 6145 bool ret = false; 6146 6147 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 6148 if (freed) { 6149 ret = true; 6150 goto out_dbg; 6151 } 6152 seq_retry = ext4_get_discard_pa_seq_sum(); 6153 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 6154 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 6155 *seq = seq_retry; 6156 ret = true; 6157 } 6158 6159 out_dbg: 6160 mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret)); 6161 return ret; 6162 } 6163 6164 /* 6165 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 6166 * linearly starting at the goal block and also excludes the blocks which 6167 * are going to be in use after fast commit replay. 6168 */ 6169 static ext4_fsblk_t 6170 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 6171 { 6172 struct buffer_head *bitmap_bh; 6173 struct super_block *sb = ar->inode->i_sb; 6174 struct ext4_sb_info *sbi = EXT4_SB(sb); 6175 ext4_group_t group, nr; 6176 ext4_grpblk_t blkoff; 6177 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 6178 ext4_grpblk_t i = 0; 6179 ext4_fsblk_t goal, block; 6180 struct ext4_super_block *es = sbi->s_es; 6181 6182 goal = ar->goal; 6183 if (goal < le32_to_cpu(es->s_first_data_block) || 6184 goal >= ext4_blocks_count(es)) 6185 goal = le32_to_cpu(es->s_first_data_block); 6186 6187 ar->len = 0; 6188 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6189 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 6190 bitmap_bh = ext4_read_block_bitmap(sb, group); 6191 if (IS_ERR(bitmap_bh)) { 6192 *errp = PTR_ERR(bitmap_bh); 6193 pr_warn("Failed to read block bitmap\n"); 6194 return 0; 6195 } 6196 6197 while (1) { 6198 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6199 blkoff); 6200 if (i >= max) 6201 break; 6202 if (ext4_fc_replay_check_excluded(sb, 6203 ext4_group_first_block_no(sb, group) + 6204 EXT4_C2B(sbi, i))) { 6205 blkoff = i + 1; 6206 } else 6207 break; 6208 } 6209 brelse(bitmap_bh); 6210 if (i < max) 6211 break; 6212 6213 if (++group >= ext4_get_groups_count(sb)) 6214 group = 0; 6215 6216 blkoff = 0; 6217 } 6218 6219 if (i >= max) { 6220 *errp = -ENOSPC; 6221 return 0; 6222 } 6223 6224 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 6225 ext4_mb_mark_bb(sb, block, 1, true); 6226 ar->len = 1; 6227 6228 *errp = 0; 6229 return block; 6230 } 6231 6232 /* 6233 * Main entry point into mballoc to allocate blocks 6234 * it tries to use preallocation first, then falls back 6235 * to usual allocation 6236 */ 6237 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 6238 struct ext4_allocation_request *ar, int *errp) 6239 { 6240 struct ext4_allocation_context *ac = NULL; 6241 struct ext4_sb_info *sbi; 6242 struct super_block *sb; 6243 ext4_fsblk_t block = 0; 6244 unsigned int inquota = 0; 6245 unsigned int reserv_clstrs = 0; 6246 int retries = 0; 6247 u64 seq; 6248 6249 might_sleep(); 6250 sb = ar->inode->i_sb; 6251 sbi = EXT4_SB(sb); 6252 6253 trace_ext4_request_blocks(ar); 6254 if (sbi->s_mount_state & EXT4_FC_REPLAY) 6255 return ext4_mb_new_blocks_simple(ar, errp); 6256 6257 /* Allow to use superuser reservation for quota file */ 6258 if (ext4_is_quota_file(ar->inode)) 6259 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 6260 6261 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 6262 /* Without delayed allocation we need to verify 6263 * there is enough free blocks to do block allocation 6264 * and verify allocation doesn't exceed the quota limits. 6265 */ 6266 while (ar->len && 6267 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 6268 6269 /* let others to free the space */ 6270 cond_resched(); 6271 ar->len = ar->len >> 1; 6272 } 6273 if (!ar->len) { 6274 ext4_mb_show_pa(sb); 6275 *errp = -ENOSPC; 6276 return 0; 6277 } 6278 reserv_clstrs = ar->len; 6279 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 6280 dquot_alloc_block_nofail(ar->inode, 6281 EXT4_C2B(sbi, ar->len)); 6282 } else { 6283 while (ar->len && 6284 dquot_alloc_block(ar->inode, 6285 EXT4_C2B(sbi, ar->len))) { 6286 6287 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 6288 ar->len--; 6289 } 6290 } 6291 inquota = ar->len; 6292 if (ar->len == 0) { 6293 *errp = -EDQUOT; 6294 goto out; 6295 } 6296 } 6297 6298 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 6299 if (!ac) { 6300 ar->len = 0; 6301 *errp = -ENOMEM; 6302 goto out; 6303 } 6304 6305 ext4_mb_initialize_context(ac, ar); 6306 6307 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 6308 seq = this_cpu_read(discard_pa_seq); 6309 if (!ext4_mb_use_preallocated(ac)) { 6310 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 6311 ext4_mb_normalize_request(ac, ar); 6312 6313 *errp = ext4_mb_pa_alloc(ac); 6314 if (*errp) 6315 goto errout; 6316 repeat: 6317 /* allocate space in core */ 6318 *errp = ext4_mb_regular_allocator(ac); 6319 /* 6320 * pa allocated above is added to grp->bb_prealloc_list only 6321 * when we were able to allocate some block i.e. when 6322 * ac->ac_status == AC_STATUS_FOUND. 6323 * And error from above mean ac->ac_status != AC_STATUS_FOUND 6324 * So we have to free this pa here itself. 6325 */ 6326 if (*errp) { 6327 ext4_mb_pa_put_free(ac); 6328 ext4_discard_allocated_blocks(ac); 6329 goto errout; 6330 } 6331 if (ac->ac_status == AC_STATUS_FOUND && 6332 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 6333 ext4_mb_pa_put_free(ac); 6334 } 6335 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 6336 *errp = ext4_mb_mark_diskspace_used(ac, handle); 6337 if (*errp) { 6338 ext4_discard_allocated_blocks(ac); 6339 goto errout; 6340 } else { 6341 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6342 ar->len = ac->ac_b_ex.fe_len; 6343 } 6344 } else { 6345 if (++retries < 3 && 6346 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6347 goto repeat; 6348 /* 6349 * If block allocation fails then the pa allocated above 6350 * needs to be freed here itself. 6351 */ 6352 ext4_mb_pa_put_free(ac); 6353 *errp = -ENOSPC; 6354 } 6355 6356 if (*errp) { 6357 errout: 6358 ac->ac_b_ex.fe_len = 0; 6359 ar->len = 0; 6360 ext4_mb_show_ac(ac); 6361 } 6362 ext4_mb_release_context(ac); 6363 kmem_cache_free(ext4_ac_cachep, ac); 6364 out: 6365 if (inquota && ar->len < inquota) 6366 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 6367 /* release any reserved blocks */ 6368 if (reserv_clstrs) 6369 percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); 6370 6371 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6372 6373 return block; 6374 } 6375 6376 /* 6377 * We can merge two free data extents only if the physical blocks 6378 * are contiguous, AND the extents were freed by the same transaction, 6379 * AND the blocks are associated with the same group. 6380 */ 6381 static inline bool 6382 ext4_freed_extents_can_be_merged(struct ext4_free_data *entry1, 6383 struct ext4_free_data *entry2) 6384 { 6385 if (entry1->efd_tid != entry2->efd_tid) 6386 return false; 6387 if (entry1->efd_start_cluster + entry1->efd_count != 6388 entry2->efd_start_cluster) 6389 return false; 6390 if (WARN_ON_ONCE(entry1->efd_group != entry2->efd_group)) 6391 return false; 6392 return true; 6393 } 6394 6395 static inline void 6396 ext4_merge_freed_extents(struct ext4_sb_info *sbi, struct rb_root *root, 6397 struct ext4_free_data *entry1, 6398 struct ext4_free_data *entry2) 6399 { 6400 entry1->efd_count += entry2->efd_count; 6401 spin_lock(&sbi->s_md_lock); 6402 list_del(&entry2->efd_list); 6403 spin_unlock(&sbi->s_md_lock); 6404 rb_erase(&entry2->efd_node, root); 6405 kmem_cache_free(ext4_free_data_cachep, entry2); 6406 } 6407 6408 static inline void 6409 ext4_try_merge_freed_extent_prev(struct ext4_sb_info *sbi, struct rb_root *root, 6410 struct ext4_free_data *entry) 6411 { 6412 struct ext4_free_data *prev; 6413 struct rb_node *node; 6414 6415 node = rb_prev(&entry->efd_node); 6416 if (!node) 6417 return; 6418 6419 prev = rb_entry(node, struct ext4_free_data, efd_node); 6420 if (ext4_freed_extents_can_be_merged(prev, entry)) 6421 ext4_merge_freed_extents(sbi, root, prev, entry); 6422 } 6423 6424 static inline void 6425 ext4_try_merge_freed_extent_next(struct ext4_sb_info *sbi, struct rb_root *root, 6426 struct ext4_free_data *entry) 6427 { 6428 struct ext4_free_data *next; 6429 struct rb_node *node; 6430 6431 node = rb_next(&entry->efd_node); 6432 if (!node) 6433 return; 6434 6435 next = rb_entry(node, struct ext4_free_data, efd_node); 6436 if (ext4_freed_extents_can_be_merged(entry, next)) 6437 ext4_merge_freed_extents(sbi, root, entry, next); 6438 } 6439 6440 static noinline_for_stack void 6441 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 6442 struct ext4_free_data *new_entry) 6443 { 6444 ext4_group_t group = e4b->bd_group; 6445 ext4_grpblk_t cluster; 6446 ext4_grpblk_t clusters = new_entry->efd_count; 6447 struct ext4_free_data *entry = NULL; 6448 struct ext4_group_info *db = e4b->bd_info; 6449 struct super_block *sb = e4b->bd_sb; 6450 struct ext4_sb_info *sbi = EXT4_SB(sb); 6451 struct rb_root *root = &db->bb_free_root; 6452 struct rb_node **n = &root->rb_node; 6453 struct rb_node *parent = NULL, *new_node; 6454 6455 BUG_ON(!ext4_handle_valid(handle)); 6456 BUG_ON(e4b->bd_bitmap_folio == NULL); 6457 BUG_ON(e4b->bd_buddy_folio == NULL); 6458 6459 new_node = &new_entry->efd_node; 6460 cluster = new_entry->efd_start_cluster; 6461 6462 if (!*n) { 6463 /* first free block exent. We need to 6464 protect buddy cache from being freed, 6465 * otherwise we'll refresh it from 6466 * on-disk bitmap and lose not-yet-available 6467 * blocks */ 6468 folio_get(e4b->bd_buddy_folio); 6469 folio_get(e4b->bd_bitmap_folio); 6470 } 6471 while (*n) { 6472 parent = *n; 6473 entry = rb_entry(parent, struct ext4_free_data, efd_node); 6474 if (cluster < entry->efd_start_cluster) 6475 n = &(*n)->rb_left; 6476 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6477 n = &(*n)->rb_right; 6478 else { 6479 ext4_grp_locked_error(sb, group, 0, 6480 ext4_group_first_block_no(sb, group) + 6481 EXT4_C2B(sbi, cluster), 6482 "Block already on to-be-freed list"); 6483 kmem_cache_free(ext4_free_data_cachep, new_entry); 6484 return; 6485 } 6486 } 6487 6488 atomic_add(clusters, &sbi->s_mb_free_pending); 6489 if (!entry) 6490 goto insert; 6491 6492 /* Now try to see the extent can be merged to prev and next */ 6493 if (ext4_freed_extents_can_be_merged(new_entry, entry)) { 6494 entry->efd_start_cluster = cluster; 6495 entry->efd_count += new_entry->efd_count; 6496 kmem_cache_free(ext4_free_data_cachep, new_entry); 6497 ext4_try_merge_freed_extent_prev(sbi, root, entry); 6498 return; 6499 } 6500 if (ext4_freed_extents_can_be_merged(entry, new_entry)) { 6501 entry->efd_count += new_entry->efd_count; 6502 kmem_cache_free(ext4_free_data_cachep, new_entry); 6503 ext4_try_merge_freed_extent_next(sbi, root, entry); 6504 return; 6505 } 6506 insert: 6507 rb_link_node(new_node, parent, n); 6508 rb_insert_color(new_node, root); 6509 6510 spin_lock(&sbi->s_md_lock); 6511 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); 6512 spin_unlock(&sbi->s_md_lock); 6513 } 6514 6515 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6516 unsigned long count) 6517 { 6518 struct super_block *sb = inode->i_sb; 6519 ext4_group_t group; 6520 ext4_grpblk_t blkoff; 6521 6522 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6523 ext4_mb_mark_context(NULL, sb, false, group, blkoff, count, 6524 EXT4_MB_BITMAP_MARKED_CHECK | 6525 EXT4_MB_SYNC_UPDATE, 6526 NULL); 6527 } 6528 6529 /** 6530 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6531 * Used by ext4_free_blocks() 6532 * @handle: handle for this transaction 6533 * @inode: inode 6534 * @block: starting physical block to be freed 6535 * @count: number of blocks to be freed 6536 * @flags: flags used by ext4_free_blocks 6537 */ 6538 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6539 ext4_fsblk_t block, unsigned long count, 6540 int flags) 6541 { 6542 struct super_block *sb = inode->i_sb; 6543 struct ext4_group_info *grp; 6544 unsigned int overflow; 6545 ext4_grpblk_t bit; 6546 ext4_group_t block_group; 6547 struct ext4_sb_info *sbi; 6548 struct ext4_buddy e4b; 6549 unsigned int count_clusters; 6550 int err = 0; 6551 int mark_flags = 0; 6552 ext4_grpblk_t changed; 6553 6554 sbi = EXT4_SB(sb); 6555 6556 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6557 !ext4_inode_block_valid(inode, block, count)) { 6558 ext4_error(sb, "Freeing blocks in system zone - " 6559 "Block = %llu, count = %lu", block, count); 6560 /* err = 0. ext4_std_error should be a no op */ 6561 goto error_out; 6562 } 6563 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6564 6565 do_more: 6566 overflow = 0; 6567 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6568 6569 grp = ext4_get_group_info(sb, block_group); 6570 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6571 return; 6572 6573 /* 6574 * Check to see if we are freeing blocks across a group 6575 * boundary. 6576 */ 6577 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6578 overflow = EXT4_C2B(sbi, bit) + count - 6579 EXT4_BLOCKS_PER_GROUP(sb); 6580 count -= overflow; 6581 /* The range changed so it's no longer validated */ 6582 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6583 } 6584 count_clusters = EXT4_NUM_B2C(sbi, count); 6585 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6586 6587 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6588 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6589 GFP_NOFS|__GFP_NOFAIL); 6590 if (err) 6591 goto error_out; 6592 6593 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6594 !ext4_inode_block_valid(inode, block, count)) { 6595 ext4_error(sb, "Freeing blocks in system zone - " 6596 "Block = %llu, count = %lu", block, count); 6597 /* err = 0. ext4_std_error should be a no op */ 6598 goto error_clean; 6599 } 6600 6601 #ifdef AGGRESSIVE_CHECK 6602 mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK; 6603 #endif 6604 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6605 count_clusters, mark_flags, &changed); 6606 6607 6608 if (err && changed == 0) 6609 goto error_clean; 6610 6611 #ifdef AGGRESSIVE_CHECK 6612 BUG_ON(changed != count_clusters); 6613 #endif 6614 6615 /* 6616 * We need to make sure we don't reuse the freed block until after the 6617 * transaction is committed. We make an exception if the inode is to be 6618 * written in writeback mode since writeback mode has weak data 6619 * consistency guarantees. 6620 */ 6621 if (ext4_handle_valid(handle) && 6622 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6623 !ext4_should_writeback_data(inode))) { 6624 struct ext4_free_data *new_entry; 6625 /* 6626 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6627 * to fail. 6628 */ 6629 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6630 GFP_NOFS|__GFP_NOFAIL); 6631 new_entry->efd_start_cluster = bit; 6632 new_entry->efd_group = block_group; 6633 new_entry->efd_count = count_clusters; 6634 new_entry->efd_tid = handle->h_transaction->t_tid; 6635 6636 ext4_lock_group(sb, block_group); 6637 ext4_mb_free_metadata(handle, &e4b, new_entry); 6638 } else { 6639 if (test_opt(sb, DISCARD)) { 6640 err = ext4_issue_discard(sb, block_group, bit, 6641 count_clusters); 6642 /* 6643 * Ignore EOPNOTSUPP error. This is consistent with 6644 * what happens when using journal. 6645 */ 6646 if (err == -EOPNOTSUPP) 6647 err = 0; 6648 if (err) 6649 ext4_msg(sb, KERN_WARNING, "discard request in" 6650 " group:%u block:%d count:%lu failed" 6651 " with %d", block_group, bit, count, 6652 err); 6653 } 6654 6655 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6656 6657 ext4_lock_group(sb, block_group); 6658 mb_free_blocks(inode, &e4b, bit, count_clusters); 6659 } 6660 6661 ext4_unlock_group(sb, block_group); 6662 6663 /* 6664 * on a bigalloc file system, defer the s_freeclusters_counter 6665 * update to the caller (ext4_remove_space and friends) so they 6666 * can determine if a cluster freed here should be rereserved 6667 */ 6668 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6669 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6670 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6671 percpu_counter_add(&sbi->s_freeclusters_counter, 6672 count_clusters); 6673 } 6674 6675 if (overflow && !err) { 6676 block += count; 6677 count = overflow; 6678 ext4_mb_unload_buddy(&e4b); 6679 /* The range changed so it's no longer validated */ 6680 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6681 goto do_more; 6682 } 6683 6684 error_clean: 6685 ext4_mb_unload_buddy(&e4b); 6686 error_out: 6687 ext4_std_error(sb, err); 6688 } 6689 6690 /** 6691 * ext4_free_blocks() -- Free given blocks and update quota 6692 * @handle: handle for this transaction 6693 * @inode: inode 6694 * @bh: optional buffer of the block to be freed 6695 * @block: starting physical block to be freed 6696 * @count: number of blocks to be freed 6697 * @flags: flags used by ext4_free_blocks 6698 */ 6699 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6700 struct buffer_head *bh, ext4_fsblk_t block, 6701 unsigned long count, int flags) 6702 { 6703 struct super_block *sb = inode->i_sb; 6704 unsigned int overflow; 6705 struct ext4_sb_info *sbi; 6706 6707 sbi = EXT4_SB(sb); 6708 6709 if (bh) { 6710 if (block) 6711 BUG_ON(block != bh->b_blocknr); 6712 else 6713 block = bh->b_blocknr; 6714 } 6715 6716 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6717 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 6718 return; 6719 } 6720 6721 might_sleep(); 6722 6723 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6724 !ext4_inode_block_valid(inode, block, count)) { 6725 ext4_error(sb, "Freeing blocks not in datazone - " 6726 "block = %llu, count = %lu", block, count); 6727 return; 6728 } 6729 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6730 6731 ext4_debug("freeing block %llu\n", block); 6732 trace_ext4_free_blocks(inode, block, count, flags); 6733 6734 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6735 BUG_ON(count > 1); 6736 6737 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6738 inode, bh, block); 6739 } 6740 6741 /* 6742 * If the extent to be freed does not begin on a cluster 6743 * boundary, we need to deal with partial clusters at the 6744 * beginning and end of the extent. Normally we will free 6745 * blocks at the beginning or the end unless we are explicitly 6746 * requested to avoid doing so. 6747 */ 6748 overflow = EXT4_PBLK_COFF(sbi, block); 6749 if (overflow) { 6750 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6751 overflow = sbi->s_cluster_ratio - overflow; 6752 block += overflow; 6753 if (count > overflow) 6754 count -= overflow; 6755 else 6756 return; 6757 } else { 6758 block -= overflow; 6759 count += overflow; 6760 } 6761 /* The range changed so it's no longer validated */ 6762 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6763 } 6764 overflow = EXT4_LBLK_COFF(sbi, count); 6765 if (overflow) { 6766 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6767 if (count > overflow) 6768 count -= overflow; 6769 else 6770 return; 6771 } else 6772 count += sbi->s_cluster_ratio - overflow; 6773 /* The range changed so it's no longer validated */ 6774 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6775 } 6776 6777 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6778 int i; 6779 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6780 6781 for (i = 0; i < count; i++) { 6782 cond_resched(); 6783 if (is_metadata) 6784 bh = sb_find_get_block_nonatomic(inode->i_sb, 6785 block + i); 6786 ext4_forget(handle, is_metadata, inode, bh, block + i); 6787 } 6788 } 6789 6790 ext4_mb_clear_bb(handle, inode, block, count, flags); 6791 } 6792 6793 /** 6794 * ext4_group_add_blocks() -- Add given blocks to an existing group 6795 * @handle: handle to this transaction 6796 * @sb: super block 6797 * @block: start physical block to add to the block group 6798 * @count: number of blocks to free 6799 * 6800 * This marks the blocks as free in the bitmap and buddy. 6801 */ 6802 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6803 ext4_fsblk_t block, unsigned long count) 6804 { 6805 ext4_group_t block_group; 6806 ext4_grpblk_t bit; 6807 struct ext4_sb_info *sbi = EXT4_SB(sb); 6808 struct ext4_buddy e4b; 6809 int err = 0; 6810 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6811 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6812 unsigned long cluster_count = last_cluster - first_cluster + 1; 6813 ext4_grpblk_t changed; 6814 6815 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6816 6817 if (cluster_count == 0) 6818 return 0; 6819 6820 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6821 /* 6822 * Check to see if we are freeing blocks across a group 6823 * boundary. 6824 */ 6825 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6826 ext4_warning(sb, "too many blocks added to group %u", 6827 block_group); 6828 err = -EINVAL; 6829 goto error_out; 6830 } 6831 6832 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6833 if (err) 6834 goto error_out; 6835 6836 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6837 ext4_error(sb, "Adding blocks in system zones - " 6838 "Block = %llu, count = %lu", 6839 block, count); 6840 err = -EINVAL; 6841 goto error_clean; 6842 } 6843 6844 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6845 cluster_count, EXT4_MB_BITMAP_MARKED_CHECK, 6846 &changed); 6847 if (err && changed == 0) 6848 goto error_clean; 6849 6850 if (changed != cluster_count) 6851 ext4_error(sb, "bit already cleared in group %u", block_group); 6852 6853 ext4_lock_group(sb, block_group); 6854 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6855 ext4_unlock_group(sb, block_group); 6856 percpu_counter_add(&sbi->s_freeclusters_counter, 6857 changed); 6858 6859 error_clean: 6860 ext4_mb_unload_buddy(&e4b); 6861 error_out: 6862 ext4_std_error(sb, err); 6863 return err; 6864 } 6865 6866 /** 6867 * ext4_trim_extent -- function to TRIM one single free extent in the group 6868 * @sb: super block for the file system 6869 * @start: starting block of the free extent in the alloc. group 6870 * @count: number of blocks to TRIM 6871 * @e4b: ext4 buddy for the group 6872 * 6873 * Trim "count" blocks starting at "start" in the "group". To assure that no 6874 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6875 * be called with under the group lock. 6876 */ 6877 static int ext4_trim_extent(struct super_block *sb, 6878 int start, int count, struct ext4_buddy *e4b) 6879 __releases(bitlock) 6880 __acquires(bitlock) 6881 { 6882 struct ext4_free_extent ex; 6883 ext4_group_t group = e4b->bd_group; 6884 int ret = 0; 6885 6886 trace_ext4_trim_extent(sb, group, start, count); 6887 6888 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6889 6890 ex.fe_start = start; 6891 ex.fe_group = group; 6892 ex.fe_len = count; 6893 6894 /* 6895 * Mark blocks used, so no one can reuse them while 6896 * being trimmed. 6897 */ 6898 mb_mark_used(e4b, &ex); 6899 ext4_unlock_group(sb, group); 6900 ret = ext4_issue_discard(sb, group, start, count); 6901 ext4_lock_group(sb, group); 6902 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6903 return ret; 6904 } 6905 6906 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, 6907 ext4_group_t grp) 6908 { 6909 unsigned long nr_clusters_in_group; 6910 6911 if (grp < (ext4_get_groups_count(sb) - 1)) 6912 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb); 6913 else 6914 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - 6915 ext4_group_first_block_no(sb, grp)) 6916 >> EXT4_CLUSTER_BITS(sb); 6917 6918 return nr_clusters_in_group - 1; 6919 } 6920 6921 static bool ext4_trim_interrupted(void) 6922 { 6923 return fatal_signal_pending(current) || freezing(current); 6924 } 6925 6926 static int ext4_try_to_trim_range(struct super_block *sb, 6927 struct ext4_buddy *e4b, ext4_grpblk_t start, 6928 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6929 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6930 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6931 { 6932 ext4_grpblk_t next, count, free_count, last, origin_start; 6933 bool set_trimmed = false; 6934 void *bitmap; 6935 6936 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 6937 return 0; 6938 6939 last = ext4_last_grp_cluster(sb, e4b->bd_group); 6940 bitmap = e4b->bd_bitmap; 6941 if (start == 0 && max >= last) 6942 set_trimmed = true; 6943 origin_start = start; 6944 start = max(e4b->bd_info->bb_first_free, start); 6945 count = 0; 6946 free_count = 0; 6947 6948 while (start <= max) { 6949 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6950 if (start > max) 6951 break; 6952 6953 next = mb_find_next_bit(bitmap, last + 1, start); 6954 if (origin_start == 0 && next >= last) 6955 set_trimmed = true; 6956 6957 if ((next - start) >= minblocks) { 6958 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6959 6960 if (ret && ret != -EOPNOTSUPP) 6961 return count; 6962 count += next - start; 6963 } 6964 free_count += next - start; 6965 start = next + 1; 6966 6967 if (ext4_trim_interrupted()) 6968 return count; 6969 6970 if (need_resched()) { 6971 ext4_unlock_group(sb, e4b->bd_group); 6972 cond_resched(); 6973 ext4_lock_group(sb, e4b->bd_group); 6974 } 6975 6976 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6977 break; 6978 } 6979 6980 if (set_trimmed) 6981 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); 6982 6983 return count; 6984 } 6985 6986 /** 6987 * ext4_trim_all_free -- function to trim all free space in alloc. group 6988 * @sb: super block for file system 6989 * @group: group to be trimmed 6990 * @start: first group block to examine 6991 * @max: last group block to examine 6992 * @minblocks: minimum extent block count 6993 * 6994 * ext4_trim_all_free walks through group's block bitmap searching for free 6995 * extents. When the free extent is found, mark it as used in group buddy 6996 * bitmap. Then issue a TRIM command on this extent and free the extent in 6997 * the group buddy bitmap. 6998 */ 6999 static ext4_grpblk_t 7000 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 7001 ext4_grpblk_t start, ext4_grpblk_t max, 7002 ext4_grpblk_t minblocks) 7003 { 7004 struct ext4_buddy e4b; 7005 int ret; 7006 7007 trace_ext4_trim_all_free(sb, group, start, max); 7008 7009 ret = ext4_mb_load_buddy(sb, group, &e4b); 7010 if (ret) { 7011 ext4_warning(sb, "Error %d loading buddy information for %u", 7012 ret, group); 7013 return ret; 7014 } 7015 7016 ext4_lock_group(sb, group); 7017 7018 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 7019 minblocks < EXT4_SB(sb)->s_last_trim_minblks) 7020 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 7021 else 7022 ret = 0; 7023 7024 ext4_unlock_group(sb, group); 7025 ext4_mb_unload_buddy(&e4b); 7026 7027 ext4_debug("trimmed %d blocks in the group %d\n", 7028 ret, group); 7029 7030 return ret; 7031 } 7032 7033 /** 7034 * ext4_trim_fs() -- trim ioctl handle function 7035 * @sb: superblock for filesystem 7036 * @range: fstrim_range structure 7037 * 7038 * start: First Byte to trim 7039 * len: number of Bytes to trim from start 7040 * minlen: minimum extent length in Bytes 7041 * ext4_trim_fs goes through all allocation groups containing Bytes from 7042 * start to start+len. For each such a group ext4_trim_all_free function 7043 * is invoked to trim all free space. 7044 */ 7045 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 7046 { 7047 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 7048 struct ext4_group_info *grp; 7049 ext4_group_t group, first_group, last_group; 7050 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 7051 uint64_t start, end, minlen, trimmed = 0; 7052 ext4_fsblk_t first_data_blk = 7053 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 7054 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 7055 int ret = 0; 7056 7057 start = range->start >> sb->s_blocksize_bits; 7058 end = start + (range->len >> sb->s_blocksize_bits) - 1; 7059 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7060 range->minlen >> sb->s_blocksize_bits); 7061 7062 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 7063 start >= max_blks || 7064 range->len < sb->s_blocksize) 7065 return -EINVAL; 7066 /* No point to try to trim less than discard granularity */ 7067 if (range->minlen < discard_granularity) { 7068 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7069 discard_granularity >> sb->s_blocksize_bits); 7070 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 7071 goto out; 7072 } 7073 if (end >= max_blks - 1) 7074 end = max_blks - 1; 7075 if (end <= first_data_blk) 7076 goto out; 7077 if (start < first_data_blk) 7078 start = first_data_blk; 7079 7080 /* Determine first and last group to examine based on start and end */ 7081 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 7082 &first_group, &first_cluster); 7083 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 7084 &last_group, &last_cluster); 7085 7086 /* end now represents the last cluster to discard in this group */ 7087 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7088 7089 for (group = first_group; group <= last_group; group++) { 7090 if (ext4_trim_interrupted()) 7091 break; 7092 grp = ext4_get_group_info(sb, group); 7093 if (!grp) 7094 continue; 7095 /* We only do this if the grp has never been initialized */ 7096 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 7097 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 7098 if (ret) 7099 break; 7100 } 7101 7102 /* 7103 * For all the groups except the last one, last cluster will 7104 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 7105 * change it for the last group, note that last_cluster is 7106 * already computed earlier by ext4_get_group_no_and_offset() 7107 */ 7108 if (group == last_group) 7109 end = last_cluster; 7110 if (grp->bb_free >= minlen) { 7111 cnt = ext4_trim_all_free(sb, group, first_cluster, 7112 end, minlen); 7113 if (cnt < 0) { 7114 ret = cnt; 7115 break; 7116 } 7117 trimmed += cnt; 7118 } 7119 7120 /* 7121 * For every group except the first one, we are sure 7122 * that the first cluster to discard will be cluster #0. 7123 */ 7124 first_cluster = 0; 7125 } 7126 7127 if (!ret) 7128 EXT4_SB(sb)->s_last_trim_minblks = minlen; 7129 7130 out: 7131 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 7132 return ret; 7133 } 7134 7135 /* Iterate all the free extents in the group. */ 7136 int 7137 ext4_mballoc_query_range( 7138 struct super_block *sb, 7139 ext4_group_t group, 7140 ext4_grpblk_t first, 7141 ext4_grpblk_t end, 7142 ext4_mballoc_query_range_fn meta_formatter, 7143 ext4_mballoc_query_range_fn formatter, 7144 void *priv) 7145 { 7146 void *bitmap; 7147 ext4_grpblk_t start, next; 7148 struct ext4_buddy e4b; 7149 int error; 7150 7151 error = ext4_mb_load_buddy(sb, group, &e4b); 7152 if (error) 7153 return error; 7154 bitmap = e4b.bd_bitmap; 7155 7156 ext4_lock_group(sb, group); 7157 7158 start = max(e4b.bd_info->bb_first_free, first); 7159 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 7160 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7161 if (meta_formatter && start != first) { 7162 if (start > end) 7163 start = end; 7164 ext4_unlock_group(sb, group); 7165 error = meta_formatter(sb, group, first, start - first, 7166 priv); 7167 if (error) 7168 goto out_unload; 7169 ext4_lock_group(sb, group); 7170 } 7171 while (start <= end) { 7172 start = mb_find_next_zero_bit(bitmap, end + 1, start); 7173 if (start > end) 7174 break; 7175 next = mb_find_next_bit(bitmap, end + 1, start); 7176 7177 ext4_unlock_group(sb, group); 7178 error = formatter(sb, group, start, next - start, priv); 7179 if (error) 7180 goto out_unload; 7181 ext4_lock_group(sb, group); 7182 7183 start = next + 1; 7184 } 7185 7186 ext4_unlock_group(sb, group); 7187 out_unload: 7188 ext4_mb_unload_buddy(&e4b); 7189 7190 return error; 7191 } 7192 7193 #ifdef CONFIG_EXT4_KUNIT_TESTS 7194 #include "mballoc-test.c" 7195 #endif 7196