1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <trace/events/ext4.h> 20 21 /* 22 * MUSTDO: 23 * - test ext4_ext_search_left() and ext4_ext_search_right() 24 * - search for metadata in few groups 25 * 26 * TODO v4: 27 * - normalization should take into account whether file is still open 28 * - discard preallocations if no free space left (policy?) 29 * - don't normalize tails 30 * - quota 31 * - reservation for superuser 32 * 33 * TODO v3: 34 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35 * - track min/max extents in each group for better group selection 36 * - mb_mark_used() may allocate chunk right after splitting buddy 37 * - tree of groups sorted by number of free blocks 38 * - error handling 39 */ 40 41 /* 42 * The allocation request involve request for multiple number of blocks 43 * near to the goal(block) value specified. 44 * 45 * During initialization phase of the allocator we decide to use the 46 * group preallocation or inode preallocation depending on the size of 47 * the file. The size of the file could be the resulting file size we 48 * would have after allocation, or the current file size, which ever 49 * is larger. If the size is less than sbi->s_mb_stream_request we 50 * select to use the group preallocation. The default value of 51 * s_mb_stream_request is 16 blocks. This can also be tuned via 52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53 * terms of number of blocks. 54 * 55 * The main motivation for having small file use group preallocation is to 56 * ensure that we have small files closer together on the disk. 57 * 58 * First stage the allocator looks at the inode prealloc list, 59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60 * spaces for this particular inode. The inode prealloc space is 61 * represented as: 62 * 63 * pa_lstart -> the logical start block for this prealloc space 64 * pa_pstart -> the physical start block for this prealloc space 65 * pa_len -> length for this prealloc space (in clusters) 66 * pa_free -> free space available in this prealloc space (in clusters) 67 * 68 * The inode preallocation space is used looking at the _logical_ start 69 * block. If only the logical file block falls within the range of prealloc 70 * space we will consume the particular prealloc space. This makes sure that 71 * we have contiguous physical blocks representing the file blocks 72 * 73 * The important thing to be noted in case of inode prealloc space is that 74 * we don't modify the values associated to inode prealloc space except 75 * pa_free. 76 * 77 * If we are not able to find blocks in the inode prealloc space and if we 78 * have the group allocation flag set then we look at the locality group 79 * prealloc space. These are per CPU prealloc list represented as 80 * 81 * ext4_sb_info.s_locality_groups[smp_processor_id()] 82 * 83 * The reason for having a per cpu locality group is to reduce the contention 84 * between CPUs. It is possible to get scheduled at this point. 85 * 86 * The locality group prealloc space is used looking at whether we have 87 * enough free space (pa_free) within the prealloc space. 88 * 89 * If we can't allocate blocks via inode prealloc or/and locality group 90 * prealloc then we look at the buddy cache. The buddy cache is represented 91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92 * mapped to the buddy and bitmap information regarding different 93 * groups. The buddy information is attached to buddy cache inode so that 94 * we can access them through the page cache. The information regarding 95 * each group is loaded via ext4_mb_load_buddy. The information involve 96 * block bitmap and buddy information. The information are stored in the 97 * inode as: 98 * 99 * { page } 100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101 * 102 * 103 * one block each for bitmap and buddy information. So for each group we 104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105 * blocksize) blocks. So it can have information regarding groups_per_page 106 * which is blocks_per_page/2 107 * 108 * The buddy cache inode is not stored on disk. The inode is thrown 109 * away when the filesystem is unmounted. 110 * 111 * We look for count number of blocks in the buddy cache. If we were able 112 * to locate that many free blocks we return with additional information 113 * regarding rest of the contiguous physical block available 114 * 115 * Before allocating blocks via buddy cache we normalize the request 116 * blocks. This ensure we ask for more blocks that we needed. The extra 117 * blocks that we get after allocation is added to the respective prealloc 118 * list. In case of inode preallocation we follow a list of heuristics 119 * based on file size. This can be found in ext4_mb_normalize_request. If 120 * we are doing a group prealloc we try to normalize the request to 121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 122 * dependent on the cluster size; for non-bigalloc file systems, it is 123 * 512 blocks. This can be tuned via 124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125 * terms of number of blocks. If we have mounted the file system with -O 126 * stripe=<value> option the group prealloc request is normalized to the 127 * smallest multiple of the stripe value (sbi->s_stripe) which is 128 * greater than the default mb_group_prealloc. 129 * 130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131 * structures in two data structures: 132 * 133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134 * 135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136 * 137 * This is an array of lists where the index in the array represents the 138 * largest free order in the buddy bitmap of the participating group infos of 139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140 * number of buddy bitmap orders possible) number of lists. Group-infos are 141 * placed in appropriate lists. 142 * 143 * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root) 144 * 145 * Locking: sbi->s_mb_rb_lock (rwlock) 146 * 147 * This is a red black tree consisting of group infos and the tree is sorted 148 * by average fragment sizes (which is calculated as ext4_group_info->bb_free 149 * / ext4_group_info->bb_fragments). 150 * 151 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 152 * structures to decide the order in which groups are to be traversed for 153 * fulfilling an allocation request. 154 * 155 * At CR = 0, we look for groups which have the largest_free_order >= the order 156 * of the request. We directly look at the largest free order list in the data 157 * structure (1) above where largest_free_order = order of the request. If that 158 * list is empty, we look at remaining list in the increasing order of 159 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 160 * 161 * At CR = 1, we only consider groups where average fragment size > request 162 * size. So, we lookup a group which has average fragment size just above or 163 * equal to request size using our rb tree (data structure 2) in O(log N) time. 164 * 165 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 166 * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 167 * 168 * The regular allocator (using the buddy cache) supports a few tunables. 169 * 170 * /sys/fs/ext4/<partition>/mb_min_to_scan 171 * /sys/fs/ext4/<partition>/mb_max_to_scan 172 * /sys/fs/ext4/<partition>/mb_order2_req 173 * /sys/fs/ext4/<partition>/mb_linear_limit 174 * 175 * The regular allocator uses buddy scan only if the request len is power of 176 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 177 * value of s_mb_order2_reqs can be tuned via 178 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 179 * stripe size (sbi->s_stripe), we try to search for contiguous block in 180 * stripe size. This should result in better allocation on RAID setups. If 181 * not, we search in the specific group using bitmap for best extents. The 182 * tunable min_to_scan and max_to_scan control the behaviour here. 183 * min_to_scan indicate how long the mballoc __must__ look for a best 184 * extent and max_to_scan indicates how long the mballoc __can__ look for a 185 * best extent in the found extents. Searching for the blocks starts with 186 * the group specified as the goal value in allocation context via 187 * ac_g_ex. Each group is first checked based on the criteria whether it 188 * can be used for allocation. ext4_mb_good_group explains how the groups are 189 * checked. 190 * 191 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 192 * get traversed linearly. That may result in subsequent allocations being not 193 * close to each other. And so, the underlying device may get filled up in a 194 * non-linear fashion. While that may not matter on non-rotational devices, for 195 * rotational devices that may result in higher seek times. "mb_linear_limit" 196 * tells mballoc how many groups mballoc should search linearly before 197 * performing consulting above data structures for more efficient lookups. For 198 * non rotational devices, this value defaults to 0 and for rotational devices 199 * this is set to MB_DEFAULT_LINEAR_LIMIT. 200 * 201 * Both the prealloc space are getting populated as above. So for the first 202 * request we will hit the buddy cache which will result in this prealloc 203 * space getting filled. The prealloc space is then later used for the 204 * subsequent request. 205 */ 206 207 /* 208 * mballoc operates on the following data: 209 * - on-disk bitmap 210 * - in-core buddy (actually includes buddy and bitmap) 211 * - preallocation descriptors (PAs) 212 * 213 * there are two types of preallocations: 214 * - inode 215 * assiged to specific inode and can be used for this inode only. 216 * it describes part of inode's space preallocated to specific 217 * physical blocks. any block from that preallocated can be used 218 * independent. the descriptor just tracks number of blocks left 219 * unused. so, before taking some block from descriptor, one must 220 * make sure corresponded logical block isn't allocated yet. this 221 * also means that freeing any block within descriptor's range 222 * must discard all preallocated blocks. 223 * - locality group 224 * assigned to specific locality group which does not translate to 225 * permanent set of inodes: inode can join and leave group. space 226 * from this type of preallocation can be used for any inode. thus 227 * it's consumed from the beginning to the end. 228 * 229 * relation between them can be expressed as: 230 * in-core buddy = on-disk bitmap + preallocation descriptors 231 * 232 * this mean blocks mballoc considers used are: 233 * - allocated blocks (persistent) 234 * - preallocated blocks (non-persistent) 235 * 236 * consistency in mballoc world means that at any time a block is either 237 * free or used in ALL structures. notice: "any time" should not be read 238 * literally -- time is discrete and delimited by locks. 239 * 240 * to keep it simple, we don't use block numbers, instead we count number of 241 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 242 * 243 * all operations can be expressed as: 244 * - init buddy: buddy = on-disk + PAs 245 * - new PA: buddy += N; PA = N 246 * - use inode PA: on-disk += N; PA -= N 247 * - discard inode PA buddy -= on-disk - PA; PA = 0 248 * - use locality group PA on-disk += N; PA -= N 249 * - discard locality group PA buddy -= PA; PA = 0 250 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 251 * is used in real operation because we can't know actual used 252 * bits from PA, only from on-disk bitmap 253 * 254 * if we follow this strict logic, then all operations above should be atomic. 255 * given some of them can block, we'd have to use something like semaphores 256 * killing performance on high-end SMP hardware. let's try to relax it using 257 * the following knowledge: 258 * 1) if buddy is referenced, it's already initialized 259 * 2) while block is used in buddy and the buddy is referenced, 260 * nobody can re-allocate that block 261 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 262 * bit set and PA claims same block, it's OK. IOW, one can set bit in 263 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 264 * block 265 * 266 * so, now we're building a concurrency table: 267 * - init buddy vs. 268 * - new PA 269 * blocks for PA are allocated in the buddy, buddy must be referenced 270 * until PA is linked to allocation group to avoid concurrent buddy init 271 * - use inode PA 272 * we need to make sure that either on-disk bitmap or PA has uptodate data 273 * given (3) we care that PA-=N operation doesn't interfere with init 274 * - discard inode PA 275 * the simplest way would be to have buddy initialized by the discard 276 * - use locality group PA 277 * again PA-=N must be serialized with init 278 * - discard locality group PA 279 * the simplest way would be to have buddy initialized by the discard 280 * - new PA vs. 281 * - use inode PA 282 * i_data_sem serializes them 283 * - discard inode PA 284 * discard process must wait until PA isn't used by another process 285 * - use locality group PA 286 * some mutex should serialize them 287 * - discard locality group PA 288 * discard process must wait until PA isn't used by another process 289 * - use inode PA 290 * - use inode PA 291 * i_data_sem or another mutex should serializes them 292 * - discard inode PA 293 * discard process must wait until PA isn't used by another process 294 * - use locality group PA 295 * nothing wrong here -- they're different PAs covering different blocks 296 * - discard locality group PA 297 * discard process must wait until PA isn't used by another process 298 * 299 * now we're ready to make few consequences: 300 * - PA is referenced and while it is no discard is possible 301 * - PA is referenced until block isn't marked in on-disk bitmap 302 * - PA changes only after on-disk bitmap 303 * - discard must not compete with init. either init is done before 304 * any discard or they're serialized somehow 305 * - buddy init as sum of on-disk bitmap and PAs is done atomically 306 * 307 * a special case when we've used PA to emptiness. no need to modify buddy 308 * in this case, but we should care about concurrent init 309 * 310 */ 311 312 /* 313 * Logic in few words: 314 * 315 * - allocation: 316 * load group 317 * find blocks 318 * mark bits in on-disk bitmap 319 * release group 320 * 321 * - use preallocation: 322 * find proper PA (per-inode or group) 323 * load group 324 * mark bits in on-disk bitmap 325 * release group 326 * release PA 327 * 328 * - free: 329 * load group 330 * mark bits in on-disk bitmap 331 * release group 332 * 333 * - discard preallocations in group: 334 * mark PAs deleted 335 * move them onto local list 336 * load on-disk bitmap 337 * load group 338 * remove PA from object (inode or locality group) 339 * mark free blocks in-core 340 * 341 * - discard inode's preallocations: 342 */ 343 344 /* 345 * Locking rules 346 * 347 * Locks: 348 * - bitlock on a group (group) 349 * - object (inode/locality) (object) 350 * - per-pa lock (pa) 351 * - cr0 lists lock (cr0) 352 * - cr1 tree lock (cr1) 353 * 354 * Paths: 355 * - new pa 356 * object 357 * group 358 * 359 * - find and use pa: 360 * pa 361 * 362 * - release consumed pa: 363 * pa 364 * group 365 * object 366 * 367 * - generate in-core bitmap: 368 * group 369 * pa 370 * 371 * - discard all for given object (inode, locality group): 372 * object 373 * pa 374 * group 375 * 376 * - discard all for given group: 377 * group 378 * pa 379 * group 380 * object 381 * 382 * - allocation path (ext4_mb_regular_allocator) 383 * group 384 * cr0/cr1 385 */ 386 static struct kmem_cache *ext4_pspace_cachep; 387 static struct kmem_cache *ext4_ac_cachep; 388 static struct kmem_cache *ext4_free_data_cachep; 389 390 /* We create slab caches for groupinfo data structures based on the 391 * superblock block size. There will be one per mounted filesystem for 392 * each unique s_blocksize_bits */ 393 #define NR_GRPINFO_CACHES 8 394 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 395 396 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 397 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 398 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 399 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 400 }; 401 402 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 403 ext4_group_t group); 404 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 405 ext4_group_t group); 406 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 407 408 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 409 ext4_group_t group, int cr); 410 411 static int ext4_try_to_trim_range(struct super_block *sb, 412 struct ext4_buddy *e4b, ext4_grpblk_t start, 413 ext4_grpblk_t max, ext4_grpblk_t minblocks); 414 415 /* 416 * The algorithm using this percpu seq counter goes below: 417 * 1. We sample the percpu discard_pa_seq counter before trying for block 418 * allocation in ext4_mb_new_blocks(). 419 * 2. We increment this percpu discard_pa_seq counter when we either allocate 420 * or free these blocks i.e. while marking those blocks as used/free in 421 * mb_mark_used()/mb_free_blocks(). 422 * 3. We also increment this percpu seq counter when we successfully identify 423 * that the bb_prealloc_list is not empty and hence proceed for discarding 424 * of those PAs inside ext4_mb_discard_group_preallocations(). 425 * 426 * Now to make sure that the regular fast path of block allocation is not 427 * affected, as a small optimization we only sample the percpu seq counter 428 * on that cpu. Only when the block allocation fails and when freed blocks 429 * found were 0, that is when we sample percpu seq counter for all cpus using 430 * below function ext4_get_discard_pa_seq_sum(). This happens after making 431 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 432 */ 433 static DEFINE_PER_CPU(u64, discard_pa_seq); 434 static inline u64 ext4_get_discard_pa_seq_sum(void) 435 { 436 int __cpu; 437 u64 __seq = 0; 438 439 for_each_possible_cpu(__cpu) 440 __seq += per_cpu(discard_pa_seq, __cpu); 441 return __seq; 442 } 443 444 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 445 { 446 #if BITS_PER_LONG == 64 447 *bit += ((unsigned long) addr & 7UL) << 3; 448 addr = (void *) ((unsigned long) addr & ~7UL); 449 #elif BITS_PER_LONG == 32 450 *bit += ((unsigned long) addr & 3UL) << 3; 451 addr = (void *) ((unsigned long) addr & ~3UL); 452 #else 453 #error "how many bits you are?!" 454 #endif 455 return addr; 456 } 457 458 static inline int mb_test_bit(int bit, void *addr) 459 { 460 /* 461 * ext4_test_bit on architecture like powerpc 462 * needs unsigned long aligned address 463 */ 464 addr = mb_correct_addr_and_bit(&bit, addr); 465 return ext4_test_bit(bit, addr); 466 } 467 468 static inline void mb_set_bit(int bit, void *addr) 469 { 470 addr = mb_correct_addr_and_bit(&bit, addr); 471 ext4_set_bit(bit, addr); 472 } 473 474 static inline void mb_clear_bit(int bit, void *addr) 475 { 476 addr = mb_correct_addr_and_bit(&bit, addr); 477 ext4_clear_bit(bit, addr); 478 } 479 480 static inline int mb_test_and_clear_bit(int bit, void *addr) 481 { 482 addr = mb_correct_addr_and_bit(&bit, addr); 483 return ext4_test_and_clear_bit(bit, addr); 484 } 485 486 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 487 { 488 int fix = 0, ret, tmpmax; 489 addr = mb_correct_addr_and_bit(&fix, addr); 490 tmpmax = max + fix; 491 start += fix; 492 493 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 494 if (ret > max) 495 return max; 496 return ret; 497 } 498 499 static inline int mb_find_next_bit(void *addr, int max, int start) 500 { 501 int fix = 0, ret, tmpmax; 502 addr = mb_correct_addr_and_bit(&fix, addr); 503 tmpmax = max + fix; 504 start += fix; 505 506 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 507 if (ret > max) 508 return max; 509 return ret; 510 } 511 512 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 513 { 514 char *bb; 515 516 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 517 BUG_ON(max == NULL); 518 519 if (order > e4b->bd_blkbits + 1) { 520 *max = 0; 521 return NULL; 522 } 523 524 /* at order 0 we see each particular block */ 525 if (order == 0) { 526 *max = 1 << (e4b->bd_blkbits + 3); 527 return e4b->bd_bitmap; 528 } 529 530 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 531 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 532 533 return bb; 534 } 535 536 #ifdef DOUBLE_CHECK 537 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 538 int first, int count) 539 { 540 int i; 541 struct super_block *sb = e4b->bd_sb; 542 543 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 544 return; 545 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 546 for (i = 0; i < count; i++) { 547 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 548 ext4_fsblk_t blocknr; 549 550 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 551 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 552 ext4_grp_locked_error(sb, e4b->bd_group, 553 inode ? inode->i_ino : 0, 554 blocknr, 555 "freeing block already freed " 556 "(bit %u)", 557 first + i); 558 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 559 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 560 } 561 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 562 } 563 } 564 565 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 566 { 567 int i; 568 569 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 570 return; 571 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 572 for (i = 0; i < count; i++) { 573 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 574 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 575 } 576 } 577 578 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 579 { 580 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 581 return; 582 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 583 unsigned char *b1, *b2; 584 int i; 585 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 586 b2 = (unsigned char *) bitmap; 587 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 588 if (b1[i] != b2[i]) { 589 ext4_msg(e4b->bd_sb, KERN_ERR, 590 "corruption in group %u " 591 "at byte %u(%u): %x in copy != %x " 592 "on disk/prealloc", 593 e4b->bd_group, i, i * 8, b1[i], b2[i]); 594 BUG(); 595 } 596 } 597 } 598 } 599 600 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 601 struct ext4_group_info *grp, ext4_group_t group) 602 { 603 struct buffer_head *bh; 604 605 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 606 if (!grp->bb_bitmap) 607 return; 608 609 bh = ext4_read_block_bitmap(sb, group); 610 if (IS_ERR_OR_NULL(bh)) { 611 kfree(grp->bb_bitmap); 612 grp->bb_bitmap = NULL; 613 return; 614 } 615 616 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 617 put_bh(bh); 618 } 619 620 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 621 { 622 kfree(grp->bb_bitmap); 623 } 624 625 #else 626 static inline void mb_free_blocks_double(struct inode *inode, 627 struct ext4_buddy *e4b, int first, int count) 628 { 629 return; 630 } 631 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 632 int first, int count) 633 { 634 return; 635 } 636 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 637 { 638 return; 639 } 640 641 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 642 struct ext4_group_info *grp, ext4_group_t group) 643 { 644 return; 645 } 646 647 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 648 { 649 return; 650 } 651 #endif 652 653 #ifdef AGGRESSIVE_CHECK 654 655 #define MB_CHECK_ASSERT(assert) \ 656 do { \ 657 if (!(assert)) { \ 658 printk(KERN_EMERG \ 659 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 660 function, file, line, # assert); \ 661 BUG(); \ 662 } \ 663 } while (0) 664 665 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 666 const char *function, int line) 667 { 668 struct super_block *sb = e4b->bd_sb; 669 int order = e4b->bd_blkbits + 1; 670 int max; 671 int max2; 672 int i; 673 int j; 674 int k; 675 int count; 676 struct ext4_group_info *grp; 677 int fragments = 0; 678 int fstart; 679 struct list_head *cur; 680 void *buddy; 681 void *buddy2; 682 683 if (e4b->bd_info->bb_check_counter++ % 10) 684 return 0; 685 686 while (order > 1) { 687 buddy = mb_find_buddy(e4b, order, &max); 688 MB_CHECK_ASSERT(buddy); 689 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 690 MB_CHECK_ASSERT(buddy2); 691 MB_CHECK_ASSERT(buddy != buddy2); 692 MB_CHECK_ASSERT(max * 2 == max2); 693 694 count = 0; 695 for (i = 0; i < max; i++) { 696 697 if (mb_test_bit(i, buddy)) { 698 /* only single bit in buddy2 may be 1 */ 699 if (!mb_test_bit(i << 1, buddy2)) { 700 MB_CHECK_ASSERT( 701 mb_test_bit((i<<1)+1, buddy2)); 702 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 703 MB_CHECK_ASSERT( 704 mb_test_bit(i << 1, buddy2)); 705 } 706 continue; 707 } 708 709 /* both bits in buddy2 must be 1 */ 710 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712 713 for (j = 0; j < (1 << order); j++) { 714 k = (i * (1 << order)) + j; 715 MB_CHECK_ASSERT( 716 !mb_test_bit(k, e4b->bd_bitmap)); 717 } 718 count++; 719 } 720 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721 order--; 722 } 723 724 fstart = -1; 725 buddy = mb_find_buddy(e4b, 0, &max); 726 for (i = 0; i < max; i++) { 727 if (!mb_test_bit(i, buddy)) { 728 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729 if (fstart == -1) { 730 fragments++; 731 fstart = i; 732 } 733 continue; 734 } 735 fstart = -1; 736 /* check used bits only */ 737 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738 buddy2 = mb_find_buddy(e4b, j, &max2); 739 k = i >> j; 740 MB_CHECK_ASSERT(k < max2); 741 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742 } 743 } 744 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746 747 grp = ext4_get_group_info(sb, e4b->bd_group); 748 list_for_each(cur, &grp->bb_prealloc_list) { 749 ext4_group_t groupnr; 750 struct ext4_prealloc_space *pa; 751 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 752 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 753 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 754 for (i = 0; i < pa->pa_len; i++) 755 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 756 } 757 return 0; 758 } 759 #undef MB_CHECK_ASSERT 760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 761 __FILE__, __func__, __LINE__) 762 #else 763 #define mb_check_buddy(e4b) 764 #endif 765 766 /* 767 * Divide blocks started from @first with length @len into 768 * smaller chunks with power of 2 blocks. 769 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 770 * then increase bb_counters[] for corresponded chunk size. 771 */ 772 static void ext4_mb_mark_free_simple(struct super_block *sb, 773 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 774 struct ext4_group_info *grp) 775 { 776 struct ext4_sb_info *sbi = EXT4_SB(sb); 777 ext4_grpblk_t min; 778 ext4_grpblk_t max; 779 ext4_grpblk_t chunk; 780 unsigned int border; 781 782 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 783 784 border = 2 << sb->s_blocksize_bits; 785 786 while (len > 0) { 787 /* find how many blocks can be covered since this position */ 788 max = ffs(first | border) - 1; 789 790 /* find how many blocks of power 2 we need to mark */ 791 min = fls(len) - 1; 792 793 if (max < min) 794 min = max; 795 chunk = 1 << min; 796 797 /* mark multiblock chunks only */ 798 grp->bb_counters[min]++; 799 if (min > 0) 800 mb_clear_bit(first >> min, 801 buddy + sbi->s_mb_offsets[min]); 802 803 len -= chunk; 804 first += chunk; 805 } 806 } 807 808 static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new, 809 int (*cmp)(struct rb_node *, struct rb_node *)) 810 { 811 struct rb_node **iter = &root->rb_node, *parent = NULL; 812 813 while (*iter) { 814 parent = *iter; 815 if (cmp(new, *iter) > 0) 816 iter = &((*iter)->rb_left); 817 else 818 iter = &((*iter)->rb_right); 819 } 820 821 rb_link_node(new, parent, iter); 822 rb_insert_color(new, root); 823 } 824 825 static int 826 ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2) 827 { 828 struct ext4_group_info *grp1 = rb_entry(rb1, 829 struct ext4_group_info, 830 bb_avg_fragment_size_rb); 831 struct ext4_group_info *grp2 = rb_entry(rb2, 832 struct ext4_group_info, 833 bb_avg_fragment_size_rb); 834 int num_frags_1, num_frags_2; 835 836 num_frags_1 = grp1->bb_fragments ? 837 grp1->bb_free / grp1->bb_fragments : 0; 838 num_frags_2 = grp2->bb_fragments ? 839 grp2->bb_free / grp2->bb_fragments : 0; 840 841 return (num_frags_2 - num_frags_1); 842 } 843 844 /* 845 * Reinsert grpinfo into the avg_fragment_size tree with new average 846 * fragment size. 847 */ 848 static void 849 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 850 { 851 struct ext4_sb_info *sbi = EXT4_SB(sb); 852 853 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 854 return; 855 856 write_lock(&sbi->s_mb_rb_lock); 857 if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) { 858 rb_erase(&grp->bb_avg_fragment_size_rb, 859 &sbi->s_mb_avg_fragment_size_root); 860 RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb); 861 } 862 863 ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root, 864 &grp->bb_avg_fragment_size_rb, 865 ext4_mb_avg_fragment_size_cmp); 866 write_unlock(&sbi->s_mb_rb_lock); 867 } 868 869 /* 870 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 871 * cr level needs an update. 872 */ 873 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 874 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 875 { 876 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 877 struct ext4_group_info *iter, *grp; 878 int i; 879 880 if (ac->ac_status == AC_STATUS_FOUND) 881 return; 882 883 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 884 atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 885 886 grp = NULL; 887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 888 if (list_empty(&sbi->s_mb_largest_free_orders[i])) 889 continue; 890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 893 continue; 894 } 895 grp = NULL; 896 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 897 bb_largest_free_order_node) { 898 if (sbi->s_mb_stats) 899 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 900 if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 901 grp = iter; 902 break; 903 } 904 } 905 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 906 if (grp) 907 break; 908 } 909 910 if (!grp) { 911 /* Increment cr and search again */ 912 *new_cr = 1; 913 } else { 914 *group = grp->bb_group; 915 ac->ac_last_optimal_group = *group; 916 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 917 } 918 } 919 920 /* 921 * Choose next group by traversing average fragment size tree. Updates *new_cr 922 * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that 923 * the linear search should continue for one iteration since there's lock 924 * contention on the rb tree lock. 925 */ 926 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 927 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 928 { 929 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 930 int avg_fragment_size, best_so_far; 931 struct rb_node *node, *found; 932 struct ext4_group_info *grp; 933 934 /* 935 * If there is contention on the lock, instead of waiting for the lock 936 * to become available, just continue searching lineraly. We'll resume 937 * our rb tree search later starting at ac->ac_last_optimal_group. 938 */ 939 if (!read_trylock(&sbi->s_mb_rb_lock)) { 940 ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR; 941 return; 942 } 943 944 if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 945 if (sbi->s_mb_stats) 946 atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 947 /* We have found something at CR 1 in the past */ 948 grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group); 949 for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL; 950 found = rb_next(found)) { 951 grp = rb_entry(found, struct ext4_group_info, 952 bb_avg_fragment_size_rb); 953 if (sbi->s_mb_stats) 954 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 955 if (likely(ext4_mb_good_group(ac, grp->bb_group, 1))) 956 break; 957 } 958 goto done; 959 } 960 961 node = sbi->s_mb_avg_fragment_size_root.rb_node; 962 best_so_far = 0; 963 found = NULL; 964 965 while (node) { 966 grp = rb_entry(node, struct ext4_group_info, 967 bb_avg_fragment_size_rb); 968 avg_fragment_size = 0; 969 if (ext4_mb_good_group(ac, grp->bb_group, 1)) { 970 avg_fragment_size = grp->bb_fragments ? 971 grp->bb_free / grp->bb_fragments : 0; 972 if (!best_so_far || avg_fragment_size < best_so_far) { 973 best_so_far = avg_fragment_size; 974 found = node; 975 } 976 } 977 if (avg_fragment_size > ac->ac_g_ex.fe_len) 978 node = node->rb_right; 979 else 980 node = node->rb_left; 981 } 982 983 done: 984 if (found) { 985 grp = rb_entry(found, struct ext4_group_info, 986 bb_avg_fragment_size_rb); 987 *group = grp->bb_group; 988 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 989 } else { 990 *new_cr = 2; 991 } 992 993 read_unlock(&sbi->s_mb_rb_lock); 994 ac->ac_last_optimal_group = *group; 995 } 996 997 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 998 { 999 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1000 return 0; 1001 if (ac->ac_criteria >= 2) 1002 return 0; 1003 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1004 return 0; 1005 return 1; 1006 } 1007 1008 /* 1009 * Return next linear group for allocation. If linear traversal should not be 1010 * performed, this function just returns the same group 1011 */ 1012 static int 1013 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 1014 { 1015 if (!should_optimize_scan(ac)) 1016 goto inc_and_return; 1017 1018 if (ac->ac_groups_linear_remaining) { 1019 ac->ac_groups_linear_remaining--; 1020 goto inc_and_return; 1021 } 1022 1023 if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) { 1024 ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR; 1025 goto inc_and_return; 1026 } 1027 1028 return group; 1029 inc_and_return: 1030 /* 1031 * Artificially restricted ngroups for non-extent 1032 * files makes group > ngroups possible on first loop. 1033 */ 1034 return group + 1 >= ngroups ? 0 : group + 1; 1035 } 1036 1037 /* 1038 * ext4_mb_choose_next_group: choose next group for allocation. 1039 * 1040 * @ac Allocation Context 1041 * @new_cr This is an output parameter. If the there is no good group 1042 * available at current CR level, this field is updated to indicate 1043 * the new cr level that should be used. 1044 * @group This is an input / output parameter. As an input it indicates the 1045 * next group that the allocator intends to use for allocation. As 1046 * output, this field indicates the next group that should be used as 1047 * determined by the optimization functions. 1048 * @ngroups Total number of groups 1049 */ 1050 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1051 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1052 { 1053 *new_cr = ac->ac_criteria; 1054 1055 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) 1056 return; 1057 1058 if (*new_cr == 0) { 1059 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1060 } else if (*new_cr == 1) { 1061 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1062 } else { 1063 /* 1064 * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1065 * bb_free. But until that happens, we should never come here. 1066 */ 1067 WARN_ON(1); 1068 } 1069 } 1070 1071 /* 1072 * Cache the order of the largest free extent we have available in this block 1073 * group. 1074 */ 1075 static void 1076 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1077 { 1078 struct ext4_sb_info *sbi = EXT4_SB(sb); 1079 int i; 1080 1081 if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) { 1082 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1083 grp->bb_largest_free_order]); 1084 list_del_init(&grp->bb_largest_free_order_node); 1085 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1086 grp->bb_largest_free_order]); 1087 } 1088 grp->bb_largest_free_order = -1; /* uninit */ 1089 1090 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) { 1091 if (grp->bb_counters[i] > 0) { 1092 grp->bb_largest_free_order = i; 1093 break; 1094 } 1095 } 1096 if (test_opt2(sb, MB_OPTIMIZE_SCAN) && 1097 grp->bb_largest_free_order >= 0 && grp->bb_free) { 1098 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1099 grp->bb_largest_free_order]); 1100 list_add_tail(&grp->bb_largest_free_order_node, 1101 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1102 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1103 grp->bb_largest_free_order]); 1104 } 1105 } 1106 1107 static noinline_for_stack 1108 void ext4_mb_generate_buddy(struct super_block *sb, 1109 void *buddy, void *bitmap, ext4_group_t group) 1110 { 1111 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1112 struct ext4_sb_info *sbi = EXT4_SB(sb); 1113 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1114 ext4_grpblk_t i = 0; 1115 ext4_grpblk_t first; 1116 ext4_grpblk_t len; 1117 unsigned free = 0; 1118 unsigned fragments = 0; 1119 unsigned long long period = get_cycles(); 1120 1121 /* initialize buddy from bitmap which is aggregation 1122 * of on-disk bitmap and preallocations */ 1123 i = mb_find_next_zero_bit(bitmap, max, 0); 1124 grp->bb_first_free = i; 1125 while (i < max) { 1126 fragments++; 1127 first = i; 1128 i = mb_find_next_bit(bitmap, max, i); 1129 len = i - first; 1130 free += len; 1131 if (len > 1) 1132 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1133 else 1134 grp->bb_counters[0]++; 1135 if (i < max) 1136 i = mb_find_next_zero_bit(bitmap, max, i); 1137 } 1138 grp->bb_fragments = fragments; 1139 1140 if (free != grp->bb_free) { 1141 ext4_grp_locked_error(sb, group, 0, 0, 1142 "block bitmap and bg descriptor " 1143 "inconsistent: %u vs %u free clusters", 1144 free, grp->bb_free); 1145 /* 1146 * If we intend to continue, we consider group descriptor 1147 * corrupt and update bb_free using bitmap value 1148 */ 1149 grp->bb_free = free; 1150 ext4_mark_group_bitmap_corrupted(sb, group, 1151 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1152 } 1153 mb_set_largest_free_order(sb, grp); 1154 1155 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1156 1157 period = get_cycles() - period; 1158 atomic_inc(&sbi->s_mb_buddies_generated); 1159 atomic64_add(period, &sbi->s_mb_generation_time); 1160 mb_update_avg_fragment_size(sb, grp); 1161 } 1162 1163 /* The buddy information is attached the buddy cache inode 1164 * for convenience. The information regarding each group 1165 * is loaded via ext4_mb_load_buddy. The information involve 1166 * block bitmap and buddy information. The information are 1167 * stored in the inode as 1168 * 1169 * { page } 1170 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1171 * 1172 * 1173 * one block each for bitmap and buddy information. 1174 * So for each group we take up 2 blocks. A page can 1175 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1176 * So it can have information regarding groups_per_page which 1177 * is blocks_per_page/2 1178 * 1179 * Locking note: This routine takes the block group lock of all groups 1180 * for this page; do not hold this lock when calling this routine! 1181 */ 1182 1183 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1184 { 1185 ext4_group_t ngroups; 1186 int blocksize; 1187 int blocks_per_page; 1188 int groups_per_page; 1189 int err = 0; 1190 int i; 1191 ext4_group_t first_group, group; 1192 int first_block; 1193 struct super_block *sb; 1194 struct buffer_head *bhs; 1195 struct buffer_head **bh = NULL; 1196 struct inode *inode; 1197 char *data; 1198 char *bitmap; 1199 struct ext4_group_info *grinfo; 1200 1201 inode = page->mapping->host; 1202 sb = inode->i_sb; 1203 ngroups = ext4_get_groups_count(sb); 1204 blocksize = i_blocksize(inode); 1205 blocks_per_page = PAGE_SIZE / blocksize; 1206 1207 mb_debug(sb, "init page %lu\n", page->index); 1208 1209 groups_per_page = blocks_per_page >> 1; 1210 if (groups_per_page == 0) 1211 groups_per_page = 1; 1212 1213 /* allocate buffer_heads to read bitmaps */ 1214 if (groups_per_page > 1) { 1215 i = sizeof(struct buffer_head *) * groups_per_page; 1216 bh = kzalloc(i, gfp); 1217 if (bh == NULL) { 1218 err = -ENOMEM; 1219 goto out; 1220 } 1221 } else 1222 bh = &bhs; 1223 1224 first_group = page->index * blocks_per_page / 2; 1225 1226 /* read all groups the page covers into the cache */ 1227 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1228 if (group >= ngroups) 1229 break; 1230 1231 grinfo = ext4_get_group_info(sb, group); 1232 /* 1233 * If page is uptodate then we came here after online resize 1234 * which added some new uninitialized group info structs, so 1235 * we must skip all initialized uptodate buddies on the page, 1236 * which may be currently in use by an allocating task. 1237 */ 1238 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1239 bh[i] = NULL; 1240 continue; 1241 } 1242 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1243 if (IS_ERR(bh[i])) { 1244 err = PTR_ERR(bh[i]); 1245 bh[i] = NULL; 1246 goto out; 1247 } 1248 mb_debug(sb, "read bitmap for group %u\n", group); 1249 } 1250 1251 /* wait for I/O completion */ 1252 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1253 int err2; 1254 1255 if (!bh[i]) 1256 continue; 1257 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1258 if (!err) 1259 err = err2; 1260 } 1261 1262 first_block = page->index * blocks_per_page; 1263 for (i = 0; i < blocks_per_page; i++) { 1264 group = (first_block + i) >> 1; 1265 if (group >= ngroups) 1266 break; 1267 1268 if (!bh[group - first_group]) 1269 /* skip initialized uptodate buddy */ 1270 continue; 1271 1272 if (!buffer_verified(bh[group - first_group])) 1273 /* Skip faulty bitmaps */ 1274 continue; 1275 err = 0; 1276 1277 /* 1278 * data carry information regarding this 1279 * particular group in the format specified 1280 * above 1281 * 1282 */ 1283 data = page_address(page) + (i * blocksize); 1284 bitmap = bh[group - first_group]->b_data; 1285 1286 /* 1287 * We place the buddy block and bitmap block 1288 * close together 1289 */ 1290 if ((first_block + i) & 1) { 1291 /* this is block of buddy */ 1292 BUG_ON(incore == NULL); 1293 mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1294 group, page->index, i * blocksize); 1295 trace_ext4_mb_buddy_bitmap_load(sb, group); 1296 grinfo = ext4_get_group_info(sb, group); 1297 grinfo->bb_fragments = 0; 1298 memset(grinfo->bb_counters, 0, 1299 sizeof(*grinfo->bb_counters) * 1300 (MB_NUM_ORDERS(sb))); 1301 /* 1302 * incore got set to the group block bitmap below 1303 */ 1304 ext4_lock_group(sb, group); 1305 /* init the buddy */ 1306 memset(data, 0xff, blocksize); 1307 ext4_mb_generate_buddy(sb, data, incore, group); 1308 ext4_unlock_group(sb, group); 1309 incore = NULL; 1310 } else { 1311 /* this is block of bitmap */ 1312 BUG_ON(incore != NULL); 1313 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1314 group, page->index, i * blocksize); 1315 trace_ext4_mb_bitmap_load(sb, group); 1316 1317 /* see comments in ext4_mb_put_pa() */ 1318 ext4_lock_group(sb, group); 1319 memcpy(data, bitmap, blocksize); 1320 1321 /* mark all preallocated blks used in in-core bitmap */ 1322 ext4_mb_generate_from_pa(sb, data, group); 1323 ext4_mb_generate_from_freelist(sb, data, group); 1324 ext4_unlock_group(sb, group); 1325 1326 /* set incore so that the buddy information can be 1327 * generated using this 1328 */ 1329 incore = data; 1330 } 1331 } 1332 SetPageUptodate(page); 1333 1334 out: 1335 if (bh) { 1336 for (i = 0; i < groups_per_page; i++) 1337 brelse(bh[i]); 1338 if (bh != &bhs) 1339 kfree(bh); 1340 } 1341 return err; 1342 } 1343 1344 /* 1345 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1346 * on the same buddy page doesn't happen whild holding the buddy page lock. 1347 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1348 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1349 */ 1350 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1351 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1352 { 1353 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1354 int block, pnum, poff; 1355 int blocks_per_page; 1356 struct page *page; 1357 1358 e4b->bd_buddy_page = NULL; 1359 e4b->bd_bitmap_page = NULL; 1360 1361 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1362 /* 1363 * the buddy cache inode stores the block bitmap 1364 * and buddy information in consecutive blocks. 1365 * So for each group we need two blocks. 1366 */ 1367 block = group * 2; 1368 pnum = block / blocks_per_page; 1369 poff = block % blocks_per_page; 1370 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1371 if (!page) 1372 return -ENOMEM; 1373 BUG_ON(page->mapping != inode->i_mapping); 1374 e4b->bd_bitmap_page = page; 1375 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1376 1377 if (blocks_per_page >= 2) { 1378 /* buddy and bitmap are on the same page */ 1379 return 0; 1380 } 1381 1382 block++; 1383 pnum = block / blocks_per_page; 1384 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1385 if (!page) 1386 return -ENOMEM; 1387 BUG_ON(page->mapping != inode->i_mapping); 1388 e4b->bd_buddy_page = page; 1389 return 0; 1390 } 1391 1392 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1393 { 1394 if (e4b->bd_bitmap_page) { 1395 unlock_page(e4b->bd_bitmap_page); 1396 put_page(e4b->bd_bitmap_page); 1397 } 1398 if (e4b->bd_buddy_page) { 1399 unlock_page(e4b->bd_buddy_page); 1400 put_page(e4b->bd_buddy_page); 1401 } 1402 } 1403 1404 /* 1405 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1406 * block group lock of all groups for this page; do not hold the BG lock when 1407 * calling this routine! 1408 */ 1409 static noinline_for_stack 1410 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1411 { 1412 1413 struct ext4_group_info *this_grp; 1414 struct ext4_buddy e4b; 1415 struct page *page; 1416 int ret = 0; 1417 1418 might_sleep(); 1419 mb_debug(sb, "init group %u\n", group); 1420 this_grp = ext4_get_group_info(sb, group); 1421 /* 1422 * This ensures that we don't reinit the buddy cache 1423 * page which map to the group from which we are already 1424 * allocating. If we are looking at the buddy cache we would 1425 * have taken a reference using ext4_mb_load_buddy and that 1426 * would have pinned buddy page to page cache. 1427 * The call to ext4_mb_get_buddy_page_lock will mark the 1428 * page accessed. 1429 */ 1430 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1431 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1432 /* 1433 * somebody initialized the group 1434 * return without doing anything 1435 */ 1436 goto err; 1437 } 1438 1439 page = e4b.bd_bitmap_page; 1440 ret = ext4_mb_init_cache(page, NULL, gfp); 1441 if (ret) 1442 goto err; 1443 if (!PageUptodate(page)) { 1444 ret = -EIO; 1445 goto err; 1446 } 1447 1448 if (e4b.bd_buddy_page == NULL) { 1449 /* 1450 * If both the bitmap and buddy are in 1451 * the same page we don't need to force 1452 * init the buddy 1453 */ 1454 ret = 0; 1455 goto err; 1456 } 1457 /* init buddy cache */ 1458 page = e4b.bd_buddy_page; 1459 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1460 if (ret) 1461 goto err; 1462 if (!PageUptodate(page)) { 1463 ret = -EIO; 1464 goto err; 1465 } 1466 err: 1467 ext4_mb_put_buddy_page_lock(&e4b); 1468 return ret; 1469 } 1470 1471 /* 1472 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1473 * block group lock of all groups for this page; do not hold the BG lock when 1474 * calling this routine! 1475 */ 1476 static noinline_for_stack int 1477 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1478 struct ext4_buddy *e4b, gfp_t gfp) 1479 { 1480 int blocks_per_page; 1481 int block; 1482 int pnum; 1483 int poff; 1484 struct page *page; 1485 int ret; 1486 struct ext4_group_info *grp; 1487 struct ext4_sb_info *sbi = EXT4_SB(sb); 1488 struct inode *inode = sbi->s_buddy_cache; 1489 1490 might_sleep(); 1491 mb_debug(sb, "load group %u\n", group); 1492 1493 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1494 grp = ext4_get_group_info(sb, group); 1495 1496 e4b->bd_blkbits = sb->s_blocksize_bits; 1497 e4b->bd_info = grp; 1498 e4b->bd_sb = sb; 1499 e4b->bd_group = group; 1500 e4b->bd_buddy_page = NULL; 1501 e4b->bd_bitmap_page = NULL; 1502 1503 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1504 /* 1505 * we need full data about the group 1506 * to make a good selection 1507 */ 1508 ret = ext4_mb_init_group(sb, group, gfp); 1509 if (ret) 1510 return ret; 1511 } 1512 1513 /* 1514 * the buddy cache inode stores the block bitmap 1515 * and buddy information in consecutive blocks. 1516 * So for each group we need two blocks. 1517 */ 1518 block = group * 2; 1519 pnum = block / blocks_per_page; 1520 poff = block % blocks_per_page; 1521 1522 /* we could use find_or_create_page(), but it locks page 1523 * what we'd like to avoid in fast path ... */ 1524 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1525 if (page == NULL || !PageUptodate(page)) { 1526 if (page) 1527 /* 1528 * drop the page reference and try 1529 * to get the page with lock. If we 1530 * are not uptodate that implies 1531 * somebody just created the page but 1532 * is yet to initialize the same. So 1533 * wait for it to initialize. 1534 */ 1535 put_page(page); 1536 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1537 if (page) { 1538 BUG_ON(page->mapping != inode->i_mapping); 1539 if (!PageUptodate(page)) { 1540 ret = ext4_mb_init_cache(page, NULL, gfp); 1541 if (ret) { 1542 unlock_page(page); 1543 goto err; 1544 } 1545 mb_cmp_bitmaps(e4b, page_address(page) + 1546 (poff * sb->s_blocksize)); 1547 } 1548 unlock_page(page); 1549 } 1550 } 1551 if (page == NULL) { 1552 ret = -ENOMEM; 1553 goto err; 1554 } 1555 if (!PageUptodate(page)) { 1556 ret = -EIO; 1557 goto err; 1558 } 1559 1560 /* Pages marked accessed already */ 1561 e4b->bd_bitmap_page = page; 1562 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1563 1564 block++; 1565 pnum = block / blocks_per_page; 1566 poff = block % blocks_per_page; 1567 1568 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1569 if (page == NULL || !PageUptodate(page)) { 1570 if (page) 1571 put_page(page); 1572 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1573 if (page) { 1574 BUG_ON(page->mapping != inode->i_mapping); 1575 if (!PageUptodate(page)) { 1576 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1577 gfp); 1578 if (ret) { 1579 unlock_page(page); 1580 goto err; 1581 } 1582 } 1583 unlock_page(page); 1584 } 1585 } 1586 if (page == NULL) { 1587 ret = -ENOMEM; 1588 goto err; 1589 } 1590 if (!PageUptodate(page)) { 1591 ret = -EIO; 1592 goto err; 1593 } 1594 1595 /* Pages marked accessed already */ 1596 e4b->bd_buddy_page = page; 1597 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1598 1599 return 0; 1600 1601 err: 1602 if (page) 1603 put_page(page); 1604 if (e4b->bd_bitmap_page) 1605 put_page(e4b->bd_bitmap_page); 1606 if (e4b->bd_buddy_page) 1607 put_page(e4b->bd_buddy_page); 1608 e4b->bd_buddy = NULL; 1609 e4b->bd_bitmap = NULL; 1610 return ret; 1611 } 1612 1613 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1614 struct ext4_buddy *e4b) 1615 { 1616 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1617 } 1618 1619 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1620 { 1621 if (e4b->bd_bitmap_page) 1622 put_page(e4b->bd_bitmap_page); 1623 if (e4b->bd_buddy_page) 1624 put_page(e4b->bd_buddy_page); 1625 } 1626 1627 1628 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1629 { 1630 int order = 1, max; 1631 void *bb; 1632 1633 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1634 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1635 1636 while (order <= e4b->bd_blkbits + 1) { 1637 bb = mb_find_buddy(e4b, order, &max); 1638 if (!mb_test_bit(block >> order, bb)) { 1639 /* this block is part of buddy of order 'order' */ 1640 return order; 1641 } 1642 order++; 1643 } 1644 return 0; 1645 } 1646 1647 static void mb_clear_bits(void *bm, int cur, int len) 1648 { 1649 __u32 *addr; 1650 1651 len = cur + len; 1652 while (cur < len) { 1653 if ((cur & 31) == 0 && (len - cur) >= 32) { 1654 /* fast path: clear whole word at once */ 1655 addr = bm + (cur >> 3); 1656 *addr = 0; 1657 cur += 32; 1658 continue; 1659 } 1660 mb_clear_bit(cur, bm); 1661 cur++; 1662 } 1663 } 1664 1665 /* clear bits in given range 1666 * will return first found zero bit if any, -1 otherwise 1667 */ 1668 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1669 { 1670 __u32 *addr; 1671 int zero_bit = -1; 1672 1673 len = cur + len; 1674 while (cur < len) { 1675 if ((cur & 31) == 0 && (len - cur) >= 32) { 1676 /* fast path: clear whole word at once */ 1677 addr = bm + (cur >> 3); 1678 if (*addr != (__u32)(-1) && zero_bit == -1) 1679 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1680 *addr = 0; 1681 cur += 32; 1682 continue; 1683 } 1684 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1685 zero_bit = cur; 1686 cur++; 1687 } 1688 1689 return zero_bit; 1690 } 1691 1692 void mb_set_bits(void *bm, int cur, int len) 1693 { 1694 __u32 *addr; 1695 1696 len = cur + len; 1697 while (cur < len) { 1698 if ((cur & 31) == 0 && (len - cur) >= 32) { 1699 /* fast path: set whole word at once */ 1700 addr = bm + (cur >> 3); 1701 *addr = 0xffffffff; 1702 cur += 32; 1703 continue; 1704 } 1705 mb_set_bit(cur, bm); 1706 cur++; 1707 } 1708 } 1709 1710 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1711 { 1712 if (mb_test_bit(*bit + side, bitmap)) { 1713 mb_clear_bit(*bit, bitmap); 1714 (*bit) -= side; 1715 return 1; 1716 } 1717 else { 1718 (*bit) += side; 1719 mb_set_bit(*bit, bitmap); 1720 return -1; 1721 } 1722 } 1723 1724 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1725 { 1726 int max; 1727 int order = 1; 1728 void *buddy = mb_find_buddy(e4b, order, &max); 1729 1730 while (buddy) { 1731 void *buddy2; 1732 1733 /* Bits in range [first; last] are known to be set since 1734 * corresponding blocks were allocated. Bits in range 1735 * (first; last) will stay set because they form buddies on 1736 * upper layer. We just deal with borders if they don't 1737 * align with upper layer and then go up. 1738 * Releasing entire group is all about clearing 1739 * single bit of highest order buddy. 1740 */ 1741 1742 /* Example: 1743 * --------------------------------- 1744 * | 1 | 1 | 1 | 1 | 1745 * --------------------------------- 1746 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1747 * --------------------------------- 1748 * 0 1 2 3 4 5 6 7 1749 * \_____________________/ 1750 * 1751 * Neither [1] nor [6] is aligned to above layer. 1752 * Left neighbour [0] is free, so mark it busy, 1753 * decrease bb_counters and extend range to 1754 * [0; 6] 1755 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1756 * mark [6] free, increase bb_counters and shrink range to 1757 * [0; 5]. 1758 * Then shift range to [0; 2], go up and do the same. 1759 */ 1760 1761 1762 if (first & 1) 1763 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1764 if (!(last & 1)) 1765 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1766 if (first > last) 1767 break; 1768 order++; 1769 1770 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1771 mb_clear_bits(buddy, first, last - first + 1); 1772 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1773 break; 1774 } 1775 first >>= 1; 1776 last >>= 1; 1777 buddy = buddy2; 1778 } 1779 } 1780 1781 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1782 int first, int count) 1783 { 1784 int left_is_free = 0; 1785 int right_is_free = 0; 1786 int block; 1787 int last = first + count - 1; 1788 struct super_block *sb = e4b->bd_sb; 1789 1790 if (WARN_ON(count == 0)) 1791 return; 1792 BUG_ON(last >= (sb->s_blocksize << 3)); 1793 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1794 /* Don't bother if the block group is corrupt. */ 1795 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1796 return; 1797 1798 mb_check_buddy(e4b); 1799 mb_free_blocks_double(inode, e4b, first, count); 1800 1801 this_cpu_inc(discard_pa_seq); 1802 e4b->bd_info->bb_free += count; 1803 if (first < e4b->bd_info->bb_first_free) 1804 e4b->bd_info->bb_first_free = first; 1805 1806 /* access memory sequentially: check left neighbour, 1807 * clear range and then check right neighbour 1808 */ 1809 if (first != 0) 1810 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1811 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1812 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1813 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1814 1815 if (unlikely(block != -1)) { 1816 struct ext4_sb_info *sbi = EXT4_SB(sb); 1817 ext4_fsblk_t blocknr; 1818 1819 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1820 blocknr += EXT4_C2B(sbi, block); 1821 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 1822 ext4_grp_locked_error(sb, e4b->bd_group, 1823 inode ? inode->i_ino : 0, 1824 blocknr, 1825 "freeing already freed block (bit %u); block bitmap corrupt.", 1826 block); 1827 ext4_mark_group_bitmap_corrupted( 1828 sb, e4b->bd_group, 1829 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1830 } 1831 goto done; 1832 } 1833 1834 /* let's maintain fragments counter */ 1835 if (left_is_free && right_is_free) 1836 e4b->bd_info->bb_fragments--; 1837 else if (!left_is_free && !right_is_free) 1838 e4b->bd_info->bb_fragments++; 1839 1840 /* buddy[0] == bd_bitmap is a special case, so handle 1841 * it right away and let mb_buddy_mark_free stay free of 1842 * zero order checks. 1843 * Check if neighbours are to be coaleasced, 1844 * adjust bitmap bb_counters and borders appropriately. 1845 */ 1846 if (first & 1) { 1847 first += !left_is_free; 1848 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1849 } 1850 if (!(last & 1)) { 1851 last -= !right_is_free; 1852 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1853 } 1854 1855 if (first <= last) 1856 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1857 1858 done: 1859 mb_set_largest_free_order(sb, e4b->bd_info); 1860 mb_update_avg_fragment_size(sb, e4b->bd_info); 1861 mb_check_buddy(e4b); 1862 } 1863 1864 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1865 int needed, struct ext4_free_extent *ex) 1866 { 1867 int next = block; 1868 int max, order; 1869 void *buddy; 1870 1871 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1872 BUG_ON(ex == NULL); 1873 1874 buddy = mb_find_buddy(e4b, 0, &max); 1875 BUG_ON(buddy == NULL); 1876 BUG_ON(block >= max); 1877 if (mb_test_bit(block, buddy)) { 1878 ex->fe_len = 0; 1879 ex->fe_start = 0; 1880 ex->fe_group = 0; 1881 return 0; 1882 } 1883 1884 /* find actual order */ 1885 order = mb_find_order_for_block(e4b, block); 1886 block = block >> order; 1887 1888 ex->fe_len = 1 << order; 1889 ex->fe_start = block << order; 1890 ex->fe_group = e4b->bd_group; 1891 1892 /* calc difference from given start */ 1893 next = next - ex->fe_start; 1894 ex->fe_len -= next; 1895 ex->fe_start += next; 1896 1897 while (needed > ex->fe_len && 1898 mb_find_buddy(e4b, order, &max)) { 1899 1900 if (block + 1 >= max) 1901 break; 1902 1903 next = (block + 1) * (1 << order); 1904 if (mb_test_bit(next, e4b->bd_bitmap)) 1905 break; 1906 1907 order = mb_find_order_for_block(e4b, next); 1908 1909 block = next >> order; 1910 ex->fe_len += 1 << order; 1911 } 1912 1913 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 1914 /* Should never happen! (but apparently sometimes does?!?) */ 1915 WARN_ON(1); 1916 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1917 "corruption or bug in mb_find_extent " 1918 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 1919 block, order, needed, ex->fe_group, ex->fe_start, 1920 ex->fe_len, ex->fe_logical); 1921 ex->fe_len = 0; 1922 ex->fe_start = 0; 1923 ex->fe_group = 0; 1924 } 1925 return ex->fe_len; 1926 } 1927 1928 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1929 { 1930 int ord; 1931 int mlen = 0; 1932 int max = 0; 1933 int cur; 1934 int start = ex->fe_start; 1935 int len = ex->fe_len; 1936 unsigned ret = 0; 1937 int len0 = len; 1938 void *buddy; 1939 1940 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1941 BUG_ON(e4b->bd_group != ex->fe_group); 1942 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1943 mb_check_buddy(e4b); 1944 mb_mark_used_double(e4b, start, len); 1945 1946 this_cpu_inc(discard_pa_seq); 1947 e4b->bd_info->bb_free -= len; 1948 if (e4b->bd_info->bb_first_free == start) 1949 e4b->bd_info->bb_first_free += len; 1950 1951 /* let's maintain fragments counter */ 1952 if (start != 0) 1953 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1954 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1955 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1956 if (mlen && max) 1957 e4b->bd_info->bb_fragments++; 1958 else if (!mlen && !max) 1959 e4b->bd_info->bb_fragments--; 1960 1961 /* let's maintain buddy itself */ 1962 while (len) { 1963 ord = mb_find_order_for_block(e4b, start); 1964 1965 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1966 /* the whole chunk may be allocated at once! */ 1967 mlen = 1 << ord; 1968 buddy = mb_find_buddy(e4b, ord, &max); 1969 BUG_ON((start >> ord) >= max); 1970 mb_set_bit(start >> ord, buddy); 1971 e4b->bd_info->bb_counters[ord]--; 1972 start += mlen; 1973 len -= mlen; 1974 BUG_ON(len < 0); 1975 continue; 1976 } 1977 1978 /* store for history */ 1979 if (ret == 0) 1980 ret = len | (ord << 16); 1981 1982 /* we have to split large buddy */ 1983 BUG_ON(ord <= 0); 1984 buddy = mb_find_buddy(e4b, ord, &max); 1985 mb_set_bit(start >> ord, buddy); 1986 e4b->bd_info->bb_counters[ord]--; 1987 1988 ord--; 1989 cur = (start >> ord) & ~1U; 1990 buddy = mb_find_buddy(e4b, ord, &max); 1991 mb_clear_bit(cur, buddy); 1992 mb_clear_bit(cur + 1, buddy); 1993 e4b->bd_info->bb_counters[ord]++; 1994 e4b->bd_info->bb_counters[ord]++; 1995 } 1996 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1997 1998 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1999 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2000 mb_check_buddy(e4b); 2001 2002 return ret; 2003 } 2004 2005 /* 2006 * Must be called under group lock! 2007 */ 2008 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2009 struct ext4_buddy *e4b) 2010 { 2011 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2012 int ret; 2013 2014 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2015 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2016 2017 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2018 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2019 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2020 2021 /* preallocation can change ac_b_ex, thus we store actually 2022 * allocated blocks for history */ 2023 ac->ac_f_ex = ac->ac_b_ex; 2024 2025 ac->ac_status = AC_STATUS_FOUND; 2026 ac->ac_tail = ret & 0xffff; 2027 ac->ac_buddy = ret >> 16; 2028 2029 /* 2030 * take the page reference. We want the page to be pinned 2031 * so that we don't get a ext4_mb_init_cache_call for this 2032 * group until we update the bitmap. That would mean we 2033 * double allocate blocks. The reference is dropped 2034 * in ext4_mb_release_context 2035 */ 2036 ac->ac_bitmap_page = e4b->bd_bitmap_page; 2037 get_page(ac->ac_bitmap_page); 2038 ac->ac_buddy_page = e4b->bd_buddy_page; 2039 get_page(ac->ac_buddy_page); 2040 /* store last allocated for subsequent stream allocation */ 2041 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2042 spin_lock(&sbi->s_md_lock); 2043 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2044 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2045 spin_unlock(&sbi->s_md_lock); 2046 } 2047 /* 2048 * As we've just preallocated more space than 2049 * user requested originally, we store allocated 2050 * space in a special descriptor. 2051 */ 2052 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2053 ext4_mb_new_preallocation(ac); 2054 2055 } 2056 2057 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2058 struct ext4_buddy *e4b, 2059 int finish_group) 2060 { 2061 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2062 struct ext4_free_extent *bex = &ac->ac_b_ex; 2063 struct ext4_free_extent *gex = &ac->ac_g_ex; 2064 struct ext4_free_extent ex; 2065 int max; 2066 2067 if (ac->ac_status == AC_STATUS_FOUND) 2068 return; 2069 /* 2070 * We don't want to scan for a whole year 2071 */ 2072 if (ac->ac_found > sbi->s_mb_max_to_scan && 2073 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2074 ac->ac_status = AC_STATUS_BREAK; 2075 return; 2076 } 2077 2078 /* 2079 * Haven't found good chunk so far, let's continue 2080 */ 2081 if (bex->fe_len < gex->fe_len) 2082 return; 2083 2084 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2085 && bex->fe_group == e4b->bd_group) { 2086 /* recheck chunk's availability - we don't know 2087 * when it was found (within this lock-unlock 2088 * period or not) */ 2089 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 2090 if (max >= gex->fe_len) { 2091 ext4_mb_use_best_found(ac, e4b); 2092 return; 2093 } 2094 } 2095 } 2096 2097 /* 2098 * The routine checks whether found extent is good enough. If it is, 2099 * then the extent gets marked used and flag is set to the context 2100 * to stop scanning. Otherwise, the extent is compared with the 2101 * previous found extent and if new one is better, then it's stored 2102 * in the context. Later, the best found extent will be used, if 2103 * mballoc can't find good enough extent. 2104 * 2105 * FIXME: real allocation policy is to be designed yet! 2106 */ 2107 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2108 struct ext4_free_extent *ex, 2109 struct ext4_buddy *e4b) 2110 { 2111 struct ext4_free_extent *bex = &ac->ac_b_ex; 2112 struct ext4_free_extent *gex = &ac->ac_g_ex; 2113 2114 BUG_ON(ex->fe_len <= 0); 2115 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2116 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2117 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2118 2119 ac->ac_found++; 2120 2121 /* 2122 * The special case - take what you catch first 2123 */ 2124 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2125 *bex = *ex; 2126 ext4_mb_use_best_found(ac, e4b); 2127 return; 2128 } 2129 2130 /* 2131 * Let's check whether the chuck is good enough 2132 */ 2133 if (ex->fe_len == gex->fe_len) { 2134 *bex = *ex; 2135 ext4_mb_use_best_found(ac, e4b); 2136 return; 2137 } 2138 2139 /* 2140 * If this is first found extent, just store it in the context 2141 */ 2142 if (bex->fe_len == 0) { 2143 *bex = *ex; 2144 return; 2145 } 2146 2147 /* 2148 * If new found extent is better, store it in the context 2149 */ 2150 if (bex->fe_len < gex->fe_len) { 2151 /* if the request isn't satisfied, any found extent 2152 * larger than previous best one is better */ 2153 if (ex->fe_len > bex->fe_len) 2154 *bex = *ex; 2155 } else if (ex->fe_len > gex->fe_len) { 2156 /* if the request is satisfied, then we try to find 2157 * an extent that still satisfy the request, but is 2158 * smaller than previous one */ 2159 if (ex->fe_len < bex->fe_len) 2160 *bex = *ex; 2161 } 2162 2163 ext4_mb_check_limits(ac, e4b, 0); 2164 } 2165 2166 static noinline_for_stack 2167 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2168 struct ext4_buddy *e4b) 2169 { 2170 struct ext4_free_extent ex = ac->ac_b_ex; 2171 ext4_group_t group = ex.fe_group; 2172 int max; 2173 int err; 2174 2175 BUG_ON(ex.fe_len <= 0); 2176 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2177 if (err) 2178 return err; 2179 2180 ext4_lock_group(ac->ac_sb, group); 2181 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2182 2183 if (max > 0) { 2184 ac->ac_b_ex = ex; 2185 ext4_mb_use_best_found(ac, e4b); 2186 } 2187 2188 ext4_unlock_group(ac->ac_sb, group); 2189 ext4_mb_unload_buddy(e4b); 2190 2191 return 0; 2192 } 2193 2194 static noinline_for_stack 2195 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2196 struct ext4_buddy *e4b) 2197 { 2198 ext4_group_t group = ac->ac_g_ex.fe_group; 2199 int max; 2200 int err; 2201 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2202 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2203 struct ext4_free_extent ex; 2204 2205 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 2206 return 0; 2207 if (grp->bb_free == 0) 2208 return 0; 2209 2210 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2211 if (err) 2212 return err; 2213 2214 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2215 ext4_mb_unload_buddy(e4b); 2216 return 0; 2217 } 2218 2219 ext4_lock_group(ac->ac_sb, group); 2220 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2221 ac->ac_g_ex.fe_len, &ex); 2222 ex.fe_logical = 0xDEADFA11; /* debug value */ 2223 2224 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2225 ext4_fsblk_t start; 2226 2227 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 2228 ex.fe_start; 2229 /* use do_div to get remainder (would be 64-bit modulo) */ 2230 if (do_div(start, sbi->s_stripe) == 0) { 2231 ac->ac_found++; 2232 ac->ac_b_ex = ex; 2233 ext4_mb_use_best_found(ac, e4b); 2234 } 2235 } else if (max >= ac->ac_g_ex.fe_len) { 2236 BUG_ON(ex.fe_len <= 0); 2237 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2238 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2239 ac->ac_found++; 2240 ac->ac_b_ex = ex; 2241 ext4_mb_use_best_found(ac, e4b); 2242 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2243 /* Sometimes, caller may want to merge even small 2244 * number of blocks to an existing extent */ 2245 BUG_ON(ex.fe_len <= 0); 2246 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2247 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2248 ac->ac_found++; 2249 ac->ac_b_ex = ex; 2250 ext4_mb_use_best_found(ac, e4b); 2251 } 2252 ext4_unlock_group(ac->ac_sb, group); 2253 ext4_mb_unload_buddy(e4b); 2254 2255 return 0; 2256 } 2257 2258 /* 2259 * The routine scans buddy structures (not bitmap!) from given order 2260 * to max order and tries to find big enough chunk to satisfy the req 2261 */ 2262 static noinline_for_stack 2263 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2264 struct ext4_buddy *e4b) 2265 { 2266 struct super_block *sb = ac->ac_sb; 2267 struct ext4_group_info *grp = e4b->bd_info; 2268 void *buddy; 2269 int i; 2270 int k; 2271 int max; 2272 2273 BUG_ON(ac->ac_2order <= 0); 2274 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2275 if (grp->bb_counters[i] == 0) 2276 continue; 2277 2278 buddy = mb_find_buddy(e4b, i, &max); 2279 BUG_ON(buddy == NULL); 2280 2281 k = mb_find_next_zero_bit(buddy, max, 0); 2282 if (k >= max) { 2283 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2284 "%d free clusters of order %d. But found 0", 2285 grp->bb_counters[i], i); 2286 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2287 e4b->bd_group, 2288 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2289 break; 2290 } 2291 ac->ac_found++; 2292 2293 ac->ac_b_ex.fe_len = 1 << i; 2294 ac->ac_b_ex.fe_start = k << i; 2295 ac->ac_b_ex.fe_group = e4b->bd_group; 2296 2297 ext4_mb_use_best_found(ac, e4b); 2298 2299 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2300 2301 if (EXT4_SB(sb)->s_mb_stats) 2302 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2303 2304 break; 2305 } 2306 } 2307 2308 /* 2309 * The routine scans the group and measures all found extents. 2310 * In order to optimize scanning, caller must pass number of 2311 * free blocks in the group, so the routine can know upper limit. 2312 */ 2313 static noinline_for_stack 2314 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2315 struct ext4_buddy *e4b) 2316 { 2317 struct super_block *sb = ac->ac_sb; 2318 void *bitmap = e4b->bd_bitmap; 2319 struct ext4_free_extent ex; 2320 int i; 2321 int free; 2322 2323 free = e4b->bd_info->bb_free; 2324 if (WARN_ON(free <= 0)) 2325 return; 2326 2327 i = e4b->bd_info->bb_first_free; 2328 2329 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2330 i = mb_find_next_zero_bit(bitmap, 2331 EXT4_CLUSTERS_PER_GROUP(sb), i); 2332 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2333 /* 2334 * IF we have corrupt bitmap, we won't find any 2335 * free blocks even though group info says we 2336 * have free blocks 2337 */ 2338 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2339 "%d free clusters as per " 2340 "group info. But bitmap says 0", 2341 free); 2342 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2343 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2344 break; 2345 } 2346 2347 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2348 if (WARN_ON(ex.fe_len <= 0)) 2349 break; 2350 if (free < ex.fe_len) { 2351 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2352 "%d free clusters as per " 2353 "group info. But got %d blocks", 2354 free, ex.fe_len); 2355 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2356 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2357 /* 2358 * The number of free blocks differs. This mostly 2359 * indicate that the bitmap is corrupt. So exit 2360 * without claiming the space. 2361 */ 2362 break; 2363 } 2364 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2365 ext4_mb_measure_extent(ac, &ex, e4b); 2366 2367 i += ex.fe_len; 2368 free -= ex.fe_len; 2369 } 2370 2371 ext4_mb_check_limits(ac, e4b, 1); 2372 } 2373 2374 /* 2375 * This is a special case for storages like raid5 2376 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2377 */ 2378 static noinline_for_stack 2379 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2380 struct ext4_buddy *e4b) 2381 { 2382 struct super_block *sb = ac->ac_sb; 2383 struct ext4_sb_info *sbi = EXT4_SB(sb); 2384 void *bitmap = e4b->bd_bitmap; 2385 struct ext4_free_extent ex; 2386 ext4_fsblk_t first_group_block; 2387 ext4_fsblk_t a; 2388 ext4_grpblk_t i; 2389 int max; 2390 2391 BUG_ON(sbi->s_stripe == 0); 2392 2393 /* find first stripe-aligned block in group */ 2394 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2395 2396 a = first_group_block + sbi->s_stripe - 1; 2397 do_div(a, sbi->s_stripe); 2398 i = (a * sbi->s_stripe) - first_group_block; 2399 2400 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2401 if (!mb_test_bit(i, bitmap)) { 2402 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2403 if (max >= sbi->s_stripe) { 2404 ac->ac_found++; 2405 ex.fe_logical = 0xDEADF00D; /* debug value */ 2406 ac->ac_b_ex = ex; 2407 ext4_mb_use_best_found(ac, e4b); 2408 break; 2409 } 2410 } 2411 i += sbi->s_stripe; 2412 } 2413 } 2414 2415 /* 2416 * This is also called BEFORE we load the buddy bitmap. 2417 * Returns either 1 or 0 indicating that the group is either suitable 2418 * for the allocation or not. 2419 */ 2420 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2421 ext4_group_t group, int cr) 2422 { 2423 ext4_grpblk_t free, fragments; 2424 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2425 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2426 2427 BUG_ON(cr < 0 || cr >= 4); 2428 2429 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2430 return false; 2431 2432 free = grp->bb_free; 2433 if (free == 0) 2434 return false; 2435 2436 fragments = grp->bb_fragments; 2437 if (fragments == 0) 2438 return false; 2439 2440 switch (cr) { 2441 case 0: 2442 BUG_ON(ac->ac_2order == 0); 2443 2444 /* Avoid using the first bg of a flexgroup for data files */ 2445 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2446 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2447 ((group % flex_size) == 0)) 2448 return false; 2449 2450 if (free < ac->ac_g_ex.fe_len) 2451 return false; 2452 2453 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2454 return true; 2455 2456 if (grp->bb_largest_free_order < ac->ac_2order) 2457 return false; 2458 2459 return true; 2460 case 1: 2461 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2462 return true; 2463 break; 2464 case 2: 2465 if (free >= ac->ac_g_ex.fe_len) 2466 return true; 2467 break; 2468 case 3: 2469 return true; 2470 default: 2471 BUG(); 2472 } 2473 2474 return false; 2475 } 2476 2477 /* 2478 * This could return negative error code if something goes wrong 2479 * during ext4_mb_init_group(). This should not be called with 2480 * ext4_lock_group() held. 2481 * 2482 * Note: because we are conditionally operating with the group lock in 2483 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2484 * function using __acquire and __release. This means we need to be 2485 * super careful before messing with the error path handling via "goto 2486 * out"! 2487 */ 2488 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2489 ext4_group_t group, int cr) 2490 { 2491 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2492 struct super_block *sb = ac->ac_sb; 2493 struct ext4_sb_info *sbi = EXT4_SB(sb); 2494 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2495 ext4_grpblk_t free; 2496 int ret = 0; 2497 2498 if (sbi->s_mb_stats) 2499 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2500 if (should_lock) { 2501 ext4_lock_group(sb, group); 2502 __release(ext4_group_lock_ptr(sb, group)); 2503 } 2504 free = grp->bb_free; 2505 if (free == 0) 2506 goto out; 2507 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2508 goto out; 2509 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2510 goto out; 2511 if (should_lock) { 2512 __acquire(ext4_group_lock_ptr(sb, group)); 2513 ext4_unlock_group(sb, group); 2514 } 2515 2516 /* We only do this if the grp has never been initialized */ 2517 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2518 struct ext4_group_desc *gdp = 2519 ext4_get_group_desc(sb, group, NULL); 2520 int ret; 2521 2522 /* cr=0/1 is a very optimistic search to find large 2523 * good chunks almost for free. If buddy data is not 2524 * ready, then this optimization makes no sense. But 2525 * we never skip the first block group in a flex_bg, 2526 * since this gets used for metadata block allocation, 2527 * and we want to make sure we locate metadata blocks 2528 * in the first block group in the flex_bg if possible. 2529 */ 2530 if (cr < 2 && 2531 (!sbi->s_log_groups_per_flex || 2532 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2533 !(ext4_has_group_desc_csum(sb) && 2534 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2535 return 0; 2536 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2537 if (ret) 2538 return ret; 2539 } 2540 2541 if (should_lock) { 2542 ext4_lock_group(sb, group); 2543 __release(ext4_group_lock_ptr(sb, group)); 2544 } 2545 ret = ext4_mb_good_group(ac, group, cr); 2546 out: 2547 if (should_lock) { 2548 __acquire(ext4_group_lock_ptr(sb, group)); 2549 ext4_unlock_group(sb, group); 2550 } 2551 return ret; 2552 } 2553 2554 /* 2555 * Start prefetching @nr block bitmaps starting at @group. 2556 * Return the next group which needs to be prefetched. 2557 */ 2558 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2559 unsigned int nr, int *cnt) 2560 { 2561 ext4_group_t ngroups = ext4_get_groups_count(sb); 2562 struct buffer_head *bh; 2563 struct blk_plug plug; 2564 2565 blk_start_plug(&plug); 2566 while (nr-- > 0) { 2567 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2568 NULL); 2569 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2570 2571 /* 2572 * Prefetch block groups with free blocks; but don't 2573 * bother if it is marked uninitialized on disk, since 2574 * it won't require I/O to read. Also only try to 2575 * prefetch once, so we avoid getblk() call, which can 2576 * be expensive. 2577 */ 2578 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2579 EXT4_MB_GRP_NEED_INIT(grp) && 2580 ext4_free_group_clusters(sb, gdp) > 0 && 2581 !(ext4_has_group_desc_csum(sb) && 2582 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2583 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2584 if (bh && !IS_ERR(bh)) { 2585 if (!buffer_uptodate(bh) && cnt) 2586 (*cnt)++; 2587 brelse(bh); 2588 } 2589 } 2590 if (++group >= ngroups) 2591 group = 0; 2592 } 2593 blk_finish_plug(&plug); 2594 return group; 2595 } 2596 2597 /* 2598 * Prefetching reads the block bitmap into the buffer cache; but we 2599 * need to make sure that the buddy bitmap in the page cache has been 2600 * initialized. Note that ext4_mb_init_group() will block if the I/O 2601 * is not yet completed, or indeed if it was not initiated by 2602 * ext4_mb_prefetch did not start the I/O. 2603 * 2604 * TODO: We should actually kick off the buddy bitmap setup in a work 2605 * queue when the buffer I/O is completed, so that we don't block 2606 * waiting for the block allocation bitmap read to finish when 2607 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2608 */ 2609 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2610 unsigned int nr) 2611 { 2612 while (nr-- > 0) { 2613 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2614 NULL); 2615 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2616 2617 if (!group) 2618 group = ext4_get_groups_count(sb); 2619 group--; 2620 grp = ext4_get_group_info(sb, group); 2621 2622 if (EXT4_MB_GRP_NEED_INIT(grp) && 2623 ext4_free_group_clusters(sb, gdp) > 0 && 2624 !(ext4_has_group_desc_csum(sb) && 2625 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2626 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2627 break; 2628 } 2629 } 2630 } 2631 2632 static noinline_for_stack int 2633 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2634 { 2635 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2636 int cr = -1; 2637 int err = 0, first_err = 0; 2638 unsigned int nr = 0, prefetch_ios = 0; 2639 struct ext4_sb_info *sbi; 2640 struct super_block *sb; 2641 struct ext4_buddy e4b; 2642 int lost; 2643 2644 sb = ac->ac_sb; 2645 sbi = EXT4_SB(sb); 2646 ngroups = ext4_get_groups_count(sb); 2647 /* non-extent files are limited to low blocks/groups */ 2648 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2649 ngroups = sbi->s_blockfile_groups; 2650 2651 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2652 2653 /* first, try the goal */ 2654 err = ext4_mb_find_by_goal(ac, &e4b); 2655 if (err || ac->ac_status == AC_STATUS_FOUND) 2656 goto out; 2657 2658 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2659 goto out; 2660 2661 /* 2662 * ac->ac_2order is set only if the fe_len is a power of 2 2663 * if ac->ac_2order is set we also set criteria to 0 so that we 2664 * try exact allocation using buddy. 2665 */ 2666 i = fls(ac->ac_g_ex.fe_len); 2667 ac->ac_2order = 0; 2668 /* 2669 * We search using buddy data only if the order of the request 2670 * is greater than equal to the sbi_s_mb_order2_reqs 2671 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2672 * We also support searching for power-of-two requests only for 2673 * requests upto maximum buddy size we have constructed. 2674 */ 2675 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2676 /* 2677 * This should tell if fe_len is exactly power of 2 2678 */ 2679 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2680 ac->ac_2order = array_index_nospec(i - 1, 2681 MB_NUM_ORDERS(sb)); 2682 } 2683 2684 /* if stream allocation is enabled, use global goal */ 2685 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2686 /* TBD: may be hot point */ 2687 spin_lock(&sbi->s_md_lock); 2688 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2689 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2690 spin_unlock(&sbi->s_md_lock); 2691 } 2692 2693 /* Let's just scan groups to find more-less suitable blocks */ 2694 cr = ac->ac_2order ? 0 : 1; 2695 /* 2696 * cr == 0 try to get exact allocation, 2697 * cr == 3 try to get anything 2698 */ 2699 repeat: 2700 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2701 ac->ac_criteria = cr; 2702 /* 2703 * searching for the right group start 2704 * from the goal value specified 2705 */ 2706 group = ac->ac_g_ex.fe_group; 2707 ac->ac_last_optimal_group = group; 2708 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2709 prefetch_grp = group; 2710 2711 for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups), 2712 i++) { 2713 int ret = 0, new_cr; 2714 2715 cond_resched(); 2716 2717 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups); 2718 if (new_cr != cr) { 2719 cr = new_cr; 2720 goto repeat; 2721 } 2722 2723 /* 2724 * Batch reads of the block allocation bitmaps 2725 * to get multiple READs in flight; limit 2726 * prefetching at cr=0/1, otherwise mballoc can 2727 * spend a lot of time loading imperfect groups 2728 */ 2729 if ((prefetch_grp == group) && 2730 (cr > 1 || 2731 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2732 unsigned int curr_ios = prefetch_ios; 2733 2734 nr = sbi->s_mb_prefetch; 2735 if (ext4_has_feature_flex_bg(sb)) { 2736 nr = 1 << sbi->s_log_groups_per_flex; 2737 nr -= group & (nr - 1); 2738 nr = min(nr, sbi->s_mb_prefetch); 2739 } 2740 prefetch_grp = ext4_mb_prefetch(sb, group, 2741 nr, &prefetch_ios); 2742 if (prefetch_ios == curr_ios) 2743 nr = 0; 2744 } 2745 2746 /* This now checks without needing the buddy page */ 2747 ret = ext4_mb_good_group_nolock(ac, group, cr); 2748 if (ret <= 0) { 2749 if (!first_err) 2750 first_err = ret; 2751 continue; 2752 } 2753 2754 err = ext4_mb_load_buddy(sb, group, &e4b); 2755 if (err) 2756 goto out; 2757 2758 ext4_lock_group(sb, group); 2759 2760 /* 2761 * We need to check again after locking the 2762 * block group 2763 */ 2764 ret = ext4_mb_good_group(ac, group, cr); 2765 if (ret == 0) { 2766 ext4_unlock_group(sb, group); 2767 ext4_mb_unload_buddy(&e4b); 2768 continue; 2769 } 2770 2771 ac->ac_groups_scanned++; 2772 if (cr == 0) 2773 ext4_mb_simple_scan_group(ac, &e4b); 2774 else if (cr == 1 && sbi->s_stripe && 2775 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2776 ext4_mb_scan_aligned(ac, &e4b); 2777 else 2778 ext4_mb_complex_scan_group(ac, &e4b); 2779 2780 ext4_unlock_group(sb, group); 2781 ext4_mb_unload_buddy(&e4b); 2782 2783 if (ac->ac_status != AC_STATUS_CONTINUE) 2784 break; 2785 } 2786 /* Processed all groups and haven't found blocks */ 2787 if (sbi->s_mb_stats && i == ngroups) 2788 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2789 } 2790 2791 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2792 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2793 /* 2794 * We've been searching too long. Let's try to allocate 2795 * the best chunk we've found so far 2796 */ 2797 ext4_mb_try_best_found(ac, &e4b); 2798 if (ac->ac_status != AC_STATUS_FOUND) { 2799 /* 2800 * Someone more lucky has already allocated it. 2801 * The only thing we can do is just take first 2802 * found block(s) 2803 */ 2804 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2805 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2806 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2807 ac->ac_b_ex.fe_len, lost); 2808 2809 ac->ac_b_ex.fe_group = 0; 2810 ac->ac_b_ex.fe_start = 0; 2811 ac->ac_b_ex.fe_len = 0; 2812 ac->ac_status = AC_STATUS_CONTINUE; 2813 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2814 cr = 3; 2815 goto repeat; 2816 } 2817 } 2818 2819 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2820 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2821 out: 2822 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2823 err = first_err; 2824 2825 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2826 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2827 ac->ac_flags, cr, err); 2828 2829 if (nr) 2830 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2831 2832 return err; 2833 } 2834 2835 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2836 { 2837 struct super_block *sb = pde_data(file_inode(seq->file)); 2838 ext4_group_t group; 2839 2840 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2841 return NULL; 2842 group = *pos + 1; 2843 return (void *) ((unsigned long) group); 2844 } 2845 2846 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2847 { 2848 struct super_block *sb = pde_data(file_inode(seq->file)); 2849 ext4_group_t group; 2850 2851 ++*pos; 2852 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2853 return NULL; 2854 group = *pos + 1; 2855 return (void *) ((unsigned long) group); 2856 } 2857 2858 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2859 { 2860 struct super_block *sb = pde_data(file_inode(seq->file)); 2861 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2862 int i; 2863 int err, buddy_loaded = 0; 2864 struct ext4_buddy e4b; 2865 struct ext4_group_info *grinfo; 2866 unsigned char blocksize_bits = min_t(unsigned char, 2867 sb->s_blocksize_bits, 2868 EXT4_MAX_BLOCK_LOG_SIZE); 2869 struct sg { 2870 struct ext4_group_info info; 2871 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2872 } sg; 2873 2874 group--; 2875 if (group == 0) 2876 seq_puts(seq, "#group: free frags first [" 2877 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2878 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2879 2880 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2881 sizeof(struct ext4_group_info); 2882 2883 grinfo = ext4_get_group_info(sb, group); 2884 /* Load the group info in memory only if not already loaded. */ 2885 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2886 err = ext4_mb_load_buddy(sb, group, &e4b); 2887 if (err) { 2888 seq_printf(seq, "#%-5u: I/O error\n", group); 2889 return 0; 2890 } 2891 buddy_loaded = 1; 2892 } 2893 2894 memcpy(&sg, ext4_get_group_info(sb, group), i); 2895 2896 if (buddy_loaded) 2897 ext4_mb_unload_buddy(&e4b); 2898 2899 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2900 sg.info.bb_fragments, sg.info.bb_first_free); 2901 for (i = 0; i <= 13; i++) 2902 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2903 sg.info.bb_counters[i] : 0); 2904 seq_puts(seq, " ]\n"); 2905 2906 return 0; 2907 } 2908 2909 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2910 { 2911 } 2912 2913 const struct seq_operations ext4_mb_seq_groups_ops = { 2914 .start = ext4_mb_seq_groups_start, 2915 .next = ext4_mb_seq_groups_next, 2916 .stop = ext4_mb_seq_groups_stop, 2917 .show = ext4_mb_seq_groups_show, 2918 }; 2919 2920 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2921 { 2922 struct super_block *sb = (struct super_block *)seq->private; 2923 struct ext4_sb_info *sbi = EXT4_SB(sb); 2924 2925 seq_puts(seq, "mballoc:\n"); 2926 if (!sbi->s_mb_stats) { 2927 seq_puts(seq, "\tmb stats collection turned off.\n"); 2928 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2929 return 0; 2930 } 2931 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2932 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2933 2934 seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2935 2936 seq_puts(seq, "\tcr0_stats:\n"); 2937 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2938 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2939 atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2940 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2941 atomic64_read(&sbi->s_bal_cX_failed[0])); 2942 seq_printf(seq, "\t\tbad_suggestions: %u\n", 2943 atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2944 2945 seq_puts(seq, "\tcr1_stats:\n"); 2946 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2947 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2948 atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2949 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2950 atomic64_read(&sbi->s_bal_cX_failed[1])); 2951 seq_printf(seq, "\t\tbad_suggestions: %u\n", 2952 atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2953 2954 seq_puts(seq, "\tcr2_stats:\n"); 2955 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2956 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2957 atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2958 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2959 atomic64_read(&sbi->s_bal_cX_failed[2])); 2960 2961 seq_puts(seq, "\tcr3_stats:\n"); 2962 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2963 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2964 atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2965 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2966 atomic64_read(&sbi->s_bal_cX_failed[3])); 2967 seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2968 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2969 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2970 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2971 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2972 2973 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2974 atomic_read(&sbi->s_mb_buddies_generated), 2975 ext4_get_groups_count(sb)); 2976 seq_printf(seq, "\tbuddies_time_used: %llu\n", 2977 atomic64_read(&sbi->s_mb_generation_time)); 2978 seq_printf(seq, "\tpreallocated: %u\n", 2979 atomic_read(&sbi->s_mb_preallocated)); 2980 seq_printf(seq, "\tdiscarded: %u\n", 2981 atomic_read(&sbi->s_mb_discarded)); 2982 return 0; 2983 } 2984 2985 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2986 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2987 { 2988 struct super_block *sb = pde_data(file_inode(seq->file)); 2989 unsigned long position; 2990 2991 read_lock(&EXT4_SB(sb)->s_mb_rb_lock); 2992 2993 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) 2994 return NULL; 2995 position = *pos + 1; 2996 return (void *) ((unsigned long) position); 2997 } 2998 2999 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3000 { 3001 struct super_block *sb = pde_data(file_inode(seq->file)); 3002 unsigned long position; 3003 3004 ++*pos; 3005 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) 3006 return NULL; 3007 position = *pos + 1; 3008 return (void *) ((unsigned long) position); 3009 } 3010 3011 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3012 { 3013 struct super_block *sb = pde_data(file_inode(seq->file)); 3014 struct ext4_sb_info *sbi = EXT4_SB(sb); 3015 unsigned long position = ((unsigned long) v); 3016 struct ext4_group_info *grp; 3017 struct rb_node *n; 3018 unsigned int count, min, max; 3019 3020 position--; 3021 if (position >= MB_NUM_ORDERS(sb)) { 3022 seq_puts(seq, "fragment_size_tree:\n"); 3023 n = rb_first(&sbi->s_mb_avg_fragment_size_root); 3024 if (!n) { 3025 seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n"); 3026 return 0; 3027 } 3028 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); 3029 min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; 3030 count = 1; 3031 while (rb_next(n)) { 3032 count++; 3033 n = rb_next(n); 3034 } 3035 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); 3036 max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; 3037 3038 seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n", 3039 min, max, count); 3040 return 0; 3041 } 3042 3043 if (position == 0) { 3044 seq_printf(seq, "optimize_scan: %d\n", 3045 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3046 seq_puts(seq, "max_free_order_lists:\n"); 3047 } 3048 count = 0; 3049 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3050 bb_largest_free_order_node) 3051 count++; 3052 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3053 (unsigned int)position, count); 3054 3055 return 0; 3056 } 3057 3058 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3059 __releases(&EXT4_SB(sb)->s_mb_rb_lock) 3060 { 3061 struct super_block *sb = pde_data(file_inode(seq->file)); 3062 3063 read_unlock(&EXT4_SB(sb)->s_mb_rb_lock); 3064 } 3065 3066 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3067 .start = ext4_mb_seq_structs_summary_start, 3068 .next = ext4_mb_seq_structs_summary_next, 3069 .stop = ext4_mb_seq_structs_summary_stop, 3070 .show = ext4_mb_seq_structs_summary_show, 3071 }; 3072 3073 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3074 { 3075 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3076 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3077 3078 BUG_ON(!cachep); 3079 return cachep; 3080 } 3081 3082 /* 3083 * Allocate the top-level s_group_info array for the specified number 3084 * of groups 3085 */ 3086 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3087 { 3088 struct ext4_sb_info *sbi = EXT4_SB(sb); 3089 unsigned size; 3090 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3091 3092 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3093 EXT4_DESC_PER_BLOCK_BITS(sb); 3094 if (size <= sbi->s_group_info_size) 3095 return 0; 3096 3097 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3098 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3099 if (!new_groupinfo) { 3100 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3101 return -ENOMEM; 3102 } 3103 rcu_read_lock(); 3104 old_groupinfo = rcu_dereference(sbi->s_group_info); 3105 if (old_groupinfo) 3106 memcpy(new_groupinfo, old_groupinfo, 3107 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3108 rcu_read_unlock(); 3109 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3110 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3111 if (old_groupinfo) 3112 ext4_kvfree_array_rcu(old_groupinfo); 3113 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3114 sbi->s_group_info_size); 3115 return 0; 3116 } 3117 3118 /* Create and initialize ext4_group_info data for the given group. */ 3119 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3120 struct ext4_group_desc *desc) 3121 { 3122 int i; 3123 int metalen = 0; 3124 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3125 struct ext4_sb_info *sbi = EXT4_SB(sb); 3126 struct ext4_group_info **meta_group_info; 3127 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3128 3129 /* 3130 * First check if this group is the first of a reserved block. 3131 * If it's true, we have to allocate a new table of pointers 3132 * to ext4_group_info structures 3133 */ 3134 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3135 metalen = sizeof(*meta_group_info) << 3136 EXT4_DESC_PER_BLOCK_BITS(sb); 3137 meta_group_info = kmalloc(metalen, GFP_NOFS); 3138 if (meta_group_info == NULL) { 3139 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3140 "for a buddy group"); 3141 goto exit_meta_group_info; 3142 } 3143 rcu_read_lock(); 3144 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3145 rcu_read_unlock(); 3146 } 3147 3148 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3149 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3150 3151 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3152 if (meta_group_info[i] == NULL) { 3153 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3154 goto exit_group_info; 3155 } 3156 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3157 &(meta_group_info[i]->bb_state)); 3158 3159 /* 3160 * initialize bb_free to be able to skip 3161 * empty groups without initialization 3162 */ 3163 if (ext4_has_group_desc_csum(sb) && 3164 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3165 meta_group_info[i]->bb_free = 3166 ext4_free_clusters_after_init(sb, group, desc); 3167 } else { 3168 meta_group_info[i]->bb_free = 3169 ext4_free_group_clusters(sb, desc); 3170 } 3171 3172 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3173 init_rwsem(&meta_group_info[i]->alloc_sem); 3174 meta_group_info[i]->bb_free_root = RB_ROOT; 3175 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3176 RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb); 3177 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3178 meta_group_info[i]->bb_group = group; 3179 3180 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3181 return 0; 3182 3183 exit_group_info: 3184 /* If a meta_group_info table has been allocated, release it now */ 3185 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3186 struct ext4_group_info ***group_info; 3187 3188 rcu_read_lock(); 3189 group_info = rcu_dereference(sbi->s_group_info); 3190 kfree(group_info[idx]); 3191 group_info[idx] = NULL; 3192 rcu_read_unlock(); 3193 } 3194 exit_meta_group_info: 3195 return -ENOMEM; 3196 } /* ext4_mb_add_groupinfo */ 3197 3198 static int ext4_mb_init_backend(struct super_block *sb) 3199 { 3200 ext4_group_t ngroups = ext4_get_groups_count(sb); 3201 ext4_group_t i; 3202 struct ext4_sb_info *sbi = EXT4_SB(sb); 3203 int err; 3204 struct ext4_group_desc *desc; 3205 struct ext4_group_info ***group_info; 3206 struct kmem_cache *cachep; 3207 3208 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3209 if (err) 3210 return err; 3211 3212 sbi->s_buddy_cache = new_inode(sb); 3213 if (sbi->s_buddy_cache == NULL) { 3214 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3215 goto err_freesgi; 3216 } 3217 /* To avoid potentially colliding with an valid on-disk inode number, 3218 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3219 * not in the inode hash, so it should never be found by iget(), but 3220 * this will avoid confusion if it ever shows up during debugging. */ 3221 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3222 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3223 for (i = 0; i < ngroups; i++) { 3224 cond_resched(); 3225 desc = ext4_get_group_desc(sb, i, NULL); 3226 if (desc == NULL) { 3227 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3228 goto err_freebuddy; 3229 } 3230 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3231 goto err_freebuddy; 3232 } 3233 3234 if (ext4_has_feature_flex_bg(sb)) { 3235 /* a single flex group is supposed to be read by a single IO. 3236 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3237 * unsigned integer, so the maximum shift is 32. 3238 */ 3239 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3240 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3241 goto err_freebuddy; 3242 } 3243 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3244 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3245 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3246 } else { 3247 sbi->s_mb_prefetch = 32; 3248 } 3249 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3250 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3251 /* now many real IOs to prefetch within a single allocation at cr=0 3252 * given cr=0 is an CPU-related optimization we shouldn't try to 3253 * load too many groups, at some point we should start to use what 3254 * we've got in memory. 3255 * with an average random access time 5ms, it'd take a second to get 3256 * 200 groups (* N with flex_bg), so let's make this limit 4 3257 */ 3258 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3259 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3260 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3261 3262 return 0; 3263 3264 err_freebuddy: 3265 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3266 while (i-- > 0) 3267 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 3268 i = sbi->s_group_info_size; 3269 rcu_read_lock(); 3270 group_info = rcu_dereference(sbi->s_group_info); 3271 while (i-- > 0) 3272 kfree(group_info[i]); 3273 rcu_read_unlock(); 3274 iput(sbi->s_buddy_cache); 3275 err_freesgi: 3276 rcu_read_lock(); 3277 kvfree(rcu_dereference(sbi->s_group_info)); 3278 rcu_read_unlock(); 3279 return -ENOMEM; 3280 } 3281 3282 static void ext4_groupinfo_destroy_slabs(void) 3283 { 3284 int i; 3285 3286 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3287 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3288 ext4_groupinfo_caches[i] = NULL; 3289 } 3290 } 3291 3292 static int ext4_groupinfo_create_slab(size_t size) 3293 { 3294 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3295 int slab_size; 3296 int blocksize_bits = order_base_2(size); 3297 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3298 struct kmem_cache *cachep; 3299 3300 if (cache_index >= NR_GRPINFO_CACHES) 3301 return -EINVAL; 3302 3303 if (unlikely(cache_index < 0)) 3304 cache_index = 0; 3305 3306 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3307 if (ext4_groupinfo_caches[cache_index]) { 3308 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3309 return 0; /* Already created */ 3310 } 3311 3312 slab_size = offsetof(struct ext4_group_info, 3313 bb_counters[blocksize_bits + 2]); 3314 3315 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3316 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3317 NULL); 3318 3319 ext4_groupinfo_caches[cache_index] = cachep; 3320 3321 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3322 if (!cachep) { 3323 printk(KERN_EMERG 3324 "EXT4-fs: no memory for groupinfo slab cache\n"); 3325 return -ENOMEM; 3326 } 3327 3328 return 0; 3329 } 3330 3331 static void ext4_discard_work(struct work_struct *work) 3332 { 3333 struct ext4_sb_info *sbi = container_of(work, 3334 struct ext4_sb_info, s_discard_work); 3335 struct super_block *sb = sbi->s_sb; 3336 struct ext4_free_data *fd, *nfd; 3337 struct ext4_buddy e4b; 3338 struct list_head discard_list; 3339 ext4_group_t grp, load_grp; 3340 int err = 0; 3341 3342 INIT_LIST_HEAD(&discard_list); 3343 spin_lock(&sbi->s_md_lock); 3344 list_splice_init(&sbi->s_discard_list, &discard_list); 3345 spin_unlock(&sbi->s_md_lock); 3346 3347 load_grp = UINT_MAX; 3348 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3349 /* 3350 * If filesystem is umounting or no memory or suffering 3351 * from no space, give up the discard 3352 */ 3353 if ((sb->s_flags & SB_ACTIVE) && !err && 3354 !atomic_read(&sbi->s_retry_alloc_pending)) { 3355 grp = fd->efd_group; 3356 if (grp != load_grp) { 3357 if (load_grp != UINT_MAX) 3358 ext4_mb_unload_buddy(&e4b); 3359 3360 err = ext4_mb_load_buddy(sb, grp, &e4b); 3361 if (err) { 3362 kmem_cache_free(ext4_free_data_cachep, fd); 3363 load_grp = UINT_MAX; 3364 continue; 3365 } else { 3366 load_grp = grp; 3367 } 3368 } 3369 3370 ext4_lock_group(sb, grp); 3371 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3372 fd->efd_start_cluster + fd->efd_count - 1, 1); 3373 ext4_unlock_group(sb, grp); 3374 } 3375 kmem_cache_free(ext4_free_data_cachep, fd); 3376 } 3377 3378 if (load_grp != UINT_MAX) 3379 ext4_mb_unload_buddy(&e4b); 3380 } 3381 3382 int ext4_mb_init(struct super_block *sb) 3383 { 3384 struct ext4_sb_info *sbi = EXT4_SB(sb); 3385 unsigned i, j; 3386 unsigned offset, offset_incr; 3387 unsigned max; 3388 int ret; 3389 3390 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3391 3392 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3393 if (sbi->s_mb_offsets == NULL) { 3394 ret = -ENOMEM; 3395 goto out; 3396 } 3397 3398 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3399 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3400 if (sbi->s_mb_maxs == NULL) { 3401 ret = -ENOMEM; 3402 goto out; 3403 } 3404 3405 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3406 if (ret < 0) 3407 goto out; 3408 3409 /* order 0 is regular bitmap */ 3410 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3411 sbi->s_mb_offsets[0] = 0; 3412 3413 i = 1; 3414 offset = 0; 3415 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3416 max = sb->s_blocksize << 2; 3417 do { 3418 sbi->s_mb_offsets[i] = offset; 3419 sbi->s_mb_maxs[i] = max; 3420 offset += offset_incr; 3421 offset_incr = offset_incr >> 1; 3422 max = max >> 1; 3423 i++; 3424 } while (i < MB_NUM_ORDERS(sb)); 3425 3426 sbi->s_mb_avg_fragment_size_root = RB_ROOT; 3427 sbi->s_mb_largest_free_orders = 3428 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3429 GFP_KERNEL); 3430 if (!sbi->s_mb_largest_free_orders) { 3431 ret = -ENOMEM; 3432 goto out; 3433 } 3434 sbi->s_mb_largest_free_orders_locks = 3435 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3436 GFP_KERNEL); 3437 if (!sbi->s_mb_largest_free_orders_locks) { 3438 ret = -ENOMEM; 3439 goto out; 3440 } 3441 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3442 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3443 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3444 } 3445 rwlock_init(&sbi->s_mb_rb_lock); 3446 3447 spin_lock_init(&sbi->s_md_lock); 3448 sbi->s_mb_free_pending = 0; 3449 INIT_LIST_HEAD(&sbi->s_freed_data_list); 3450 INIT_LIST_HEAD(&sbi->s_discard_list); 3451 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3452 atomic_set(&sbi->s_retry_alloc_pending, 0); 3453 3454 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3455 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3456 sbi->s_mb_stats = MB_DEFAULT_STATS; 3457 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3458 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3459 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; 3460 /* 3461 * The default group preallocation is 512, which for 4k block 3462 * sizes translates to 2 megabytes. However for bigalloc file 3463 * systems, this is probably too big (i.e, if the cluster size 3464 * is 1 megabyte, then group preallocation size becomes half a 3465 * gigabyte!). As a default, we will keep a two megabyte 3466 * group pralloc size for cluster sizes up to 64k, and after 3467 * that, we will force a minimum group preallocation size of 3468 * 32 clusters. This translates to 8 megs when the cluster 3469 * size is 256k, and 32 megs when the cluster size is 1 meg, 3470 * which seems reasonable as a default. 3471 */ 3472 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3473 sbi->s_cluster_bits, 32); 3474 /* 3475 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3476 * to the lowest multiple of s_stripe which is bigger than 3477 * the s_mb_group_prealloc as determined above. We want 3478 * the preallocation size to be an exact multiple of the 3479 * RAID stripe size so that preallocations don't fragment 3480 * the stripes. 3481 */ 3482 if (sbi->s_stripe > 1) { 3483 sbi->s_mb_group_prealloc = roundup( 3484 sbi->s_mb_group_prealloc, sbi->s_stripe); 3485 } 3486 3487 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3488 if (sbi->s_locality_groups == NULL) { 3489 ret = -ENOMEM; 3490 goto out; 3491 } 3492 for_each_possible_cpu(i) { 3493 struct ext4_locality_group *lg; 3494 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3495 mutex_init(&lg->lg_mutex); 3496 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3497 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3498 spin_lock_init(&lg->lg_prealloc_lock); 3499 } 3500 3501 if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev))) 3502 sbi->s_mb_max_linear_groups = 0; 3503 else 3504 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3505 /* init file for buddy data */ 3506 ret = ext4_mb_init_backend(sb); 3507 if (ret != 0) 3508 goto out_free_locality_groups; 3509 3510 return 0; 3511 3512 out_free_locality_groups: 3513 free_percpu(sbi->s_locality_groups); 3514 sbi->s_locality_groups = NULL; 3515 out: 3516 kfree(sbi->s_mb_largest_free_orders); 3517 kfree(sbi->s_mb_largest_free_orders_locks); 3518 kfree(sbi->s_mb_offsets); 3519 sbi->s_mb_offsets = NULL; 3520 kfree(sbi->s_mb_maxs); 3521 sbi->s_mb_maxs = NULL; 3522 return ret; 3523 } 3524 3525 /* need to called with the ext4 group lock held */ 3526 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3527 { 3528 struct ext4_prealloc_space *pa; 3529 struct list_head *cur, *tmp; 3530 int count = 0; 3531 3532 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3533 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3534 list_del(&pa->pa_group_list); 3535 count++; 3536 kmem_cache_free(ext4_pspace_cachep, pa); 3537 } 3538 return count; 3539 } 3540 3541 int ext4_mb_release(struct super_block *sb) 3542 { 3543 ext4_group_t ngroups = ext4_get_groups_count(sb); 3544 ext4_group_t i; 3545 int num_meta_group_infos; 3546 struct ext4_group_info *grinfo, ***group_info; 3547 struct ext4_sb_info *sbi = EXT4_SB(sb); 3548 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3549 int count; 3550 3551 if (test_opt(sb, DISCARD)) { 3552 /* 3553 * wait the discard work to drain all of ext4_free_data 3554 */ 3555 flush_work(&sbi->s_discard_work); 3556 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3557 } 3558 3559 if (sbi->s_group_info) { 3560 for (i = 0; i < ngroups; i++) { 3561 cond_resched(); 3562 grinfo = ext4_get_group_info(sb, i); 3563 mb_group_bb_bitmap_free(grinfo); 3564 ext4_lock_group(sb, i); 3565 count = ext4_mb_cleanup_pa(grinfo); 3566 if (count) 3567 mb_debug(sb, "mballoc: %d PAs left\n", 3568 count); 3569 ext4_unlock_group(sb, i); 3570 kmem_cache_free(cachep, grinfo); 3571 } 3572 num_meta_group_infos = (ngroups + 3573 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3574 EXT4_DESC_PER_BLOCK_BITS(sb); 3575 rcu_read_lock(); 3576 group_info = rcu_dereference(sbi->s_group_info); 3577 for (i = 0; i < num_meta_group_infos; i++) 3578 kfree(group_info[i]); 3579 kvfree(group_info); 3580 rcu_read_unlock(); 3581 } 3582 kfree(sbi->s_mb_largest_free_orders); 3583 kfree(sbi->s_mb_largest_free_orders_locks); 3584 kfree(sbi->s_mb_offsets); 3585 kfree(sbi->s_mb_maxs); 3586 iput(sbi->s_buddy_cache); 3587 if (sbi->s_mb_stats) { 3588 ext4_msg(sb, KERN_INFO, 3589 "mballoc: %u blocks %u reqs (%u success)", 3590 atomic_read(&sbi->s_bal_allocated), 3591 atomic_read(&sbi->s_bal_reqs), 3592 atomic_read(&sbi->s_bal_success)); 3593 ext4_msg(sb, KERN_INFO, 3594 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3595 "%u 2^N hits, %u breaks, %u lost", 3596 atomic_read(&sbi->s_bal_ex_scanned), 3597 atomic_read(&sbi->s_bal_groups_scanned), 3598 atomic_read(&sbi->s_bal_goals), 3599 atomic_read(&sbi->s_bal_2orders), 3600 atomic_read(&sbi->s_bal_breaks), 3601 atomic_read(&sbi->s_mb_lost_chunks)); 3602 ext4_msg(sb, KERN_INFO, 3603 "mballoc: %u generated and it took %llu", 3604 atomic_read(&sbi->s_mb_buddies_generated), 3605 atomic64_read(&sbi->s_mb_generation_time)); 3606 ext4_msg(sb, KERN_INFO, 3607 "mballoc: %u preallocated, %u discarded", 3608 atomic_read(&sbi->s_mb_preallocated), 3609 atomic_read(&sbi->s_mb_discarded)); 3610 } 3611 3612 free_percpu(sbi->s_locality_groups); 3613 3614 return 0; 3615 } 3616 3617 static inline int ext4_issue_discard(struct super_block *sb, 3618 ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3619 struct bio **biop) 3620 { 3621 ext4_fsblk_t discard_block; 3622 3623 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3624 ext4_group_first_block_no(sb, block_group)); 3625 count = EXT4_C2B(EXT4_SB(sb), count); 3626 trace_ext4_discard_blocks(sb, 3627 (unsigned long long) discard_block, count); 3628 if (biop) { 3629 return __blkdev_issue_discard(sb->s_bdev, 3630 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3631 (sector_t)count << (sb->s_blocksize_bits - 9), 3632 GFP_NOFS, 0, biop); 3633 } else 3634 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3635 } 3636 3637 static void ext4_free_data_in_buddy(struct super_block *sb, 3638 struct ext4_free_data *entry) 3639 { 3640 struct ext4_buddy e4b; 3641 struct ext4_group_info *db; 3642 int err, count = 0, count2 = 0; 3643 3644 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3645 entry->efd_count, entry->efd_group, entry); 3646 3647 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3648 /* we expect to find existing buddy because it's pinned */ 3649 BUG_ON(err != 0); 3650 3651 spin_lock(&EXT4_SB(sb)->s_md_lock); 3652 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3653 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3654 3655 db = e4b.bd_info; 3656 /* there are blocks to put in buddy to make them really free */ 3657 count += entry->efd_count; 3658 count2++; 3659 ext4_lock_group(sb, entry->efd_group); 3660 /* Take it out of per group rb tree */ 3661 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3662 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3663 3664 /* 3665 * Clear the trimmed flag for the group so that the next 3666 * ext4_trim_fs can trim it. 3667 * If the volume is mounted with -o discard, online discard 3668 * is supported and the free blocks will be trimmed online. 3669 */ 3670 if (!test_opt(sb, DISCARD)) 3671 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3672 3673 if (!db->bb_free_root.rb_node) { 3674 /* No more items in the per group rb tree 3675 * balance refcounts from ext4_mb_free_metadata() 3676 */ 3677 put_page(e4b.bd_buddy_page); 3678 put_page(e4b.bd_bitmap_page); 3679 } 3680 ext4_unlock_group(sb, entry->efd_group); 3681 ext4_mb_unload_buddy(&e4b); 3682 3683 mb_debug(sb, "freed %d blocks in %d structures\n", count, 3684 count2); 3685 } 3686 3687 /* 3688 * This function is called by the jbd2 layer once the commit has finished, 3689 * so we know we can free the blocks that were released with that commit. 3690 */ 3691 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3692 { 3693 struct ext4_sb_info *sbi = EXT4_SB(sb); 3694 struct ext4_free_data *entry, *tmp; 3695 struct list_head freed_data_list; 3696 struct list_head *cut_pos = NULL; 3697 bool wake; 3698 3699 INIT_LIST_HEAD(&freed_data_list); 3700 3701 spin_lock(&sbi->s_md_lock); 3702 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3703 if (entry->efd_tid != commit_tid) 3704 break; 3705 cut_pos = &entry->efd_list; 3706 } 3707 if (cut_pos) 3708 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3709 cut_pos); 3710 spin_unlock(&sbi->s_md_lock); 3711 3712 list_for_each_entry(entry, &freed_data_list, efd_list) 3713 ext4_free_data_in_buddy(sb, entry); 3714 3715 if (test_opt(sb, DISCARD)) { 3716 spin_lock(&sbi->s_md_lock); 3717 wake = list_empty(&sbi->s_discard_list); 3718 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3719 spin_unlock(&sbi->s_md_lock); 3720 if (wake) 3721 queue_work(system_unbound_wq, &sbi->s_discard_work); 3722 } else { 3723 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3724 kmem_cache_free(ext4_free_data_cachep, entry); 3725 } 3726 } 3727 3728 int __init ext4_init_mballoc(void) 3729 { 3730 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3731 SLAB_RECLAIM_ACCOUNT); 3732 if (ext4_pspace_cachep == NULL) 3733 goto out; 3734 3735 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3736 SLAB_RECLAIM_ACCOUNT); 3737 if (ext4_ac_cachep == NULL) 3738 goto out_pa_free; 3739 3740 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3741 SLAB_RECLAIM_ACCOUNT); 3742 if (ext4_free_data_cachep == NULL) 3743 goto out_ac_free; 3744 3745 return 0; 3746 3747 out_ac_free: 3748 kmem_cache_destroy(ext4_ac_cachep); 3749 out_pa_free: 3750 kmem_cache_destroy(ext4_pspace_cachep); 3751 out: 3752 return -ENOMEM; 3753 } 3754 3755 void ext4_exit_mballoc(void) 3756 { 3757 /* 3758 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3759 * before destroying the slab cache. 3760 */ 3761 rcu_barrier(); 3762 kmem_cache_destroy(ext4_pspace_cachep); 3763 kmem_cache_destroy(ext4_ac_cachep); 3764 kmem_cache_destroy(ext4_free_data_cachep); 3765 ext4_groupinfo_destroy_slabs(); 3766 } 3767 3768 3769 /* 3770 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3771 * Returns 0 if success or error code 3772 */ 3773 static noinline_for_stack int 3774 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 3775 handle_t *handle, unsigned int reserv_clstrs) 3776 { 3777 struct buffer_head *bitmap_bh = NULL; 3778 struct ext4_group_desc *gdp; 3779 struct buffer_head *gdp_bh; 3780 struct ext4_sb_info *sbi; 3781 struct super_block *sb; 3782 ext4_fsblk_t block; 3783 int err, len; 3784 3785 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3786 BUG_ON(ac->ac_b_ex.fe_len <= 0); 3787 3788 sb = ac->ac_sb; 3789 sbi = EXT4_SB(sb); 3790 3791 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 3792 if (IS_ERR(bitmap_bh)) { 3793 err = PTR_ERR(bitmap_bh); 3794 bitmap_bh = NULL; 3795 goto out_err; 3796 } 3797 3798 BUFFER_TRACE(bitmap_bh, "getting write access"); 3799 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3800 EXT4_JTR_NONE); 3801 if (err) 3802 goto out_err; 3803 3804 err = -EIO; 3805 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3806 if (!gdp) 3807 goto out_err; 3808 3809 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3810 ext4_free_group_clusters(sb, gdp)); 3811 3812 BUFFER_TRACE(gdp_bh, "get_write_access"); 3813 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3814 if (err) 3815 goto out_err; 3816 3817 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3818 3819 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3820 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 3821 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 3822 "fs metadata", block, block+len); 3823 /* File system mounted not to panic on error 3824 * Fix the bitmap and return EFSCORRUPTED 3825 * We leak some of the blocks here. 3826 */ 3827 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3828 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3829 ac->ac_b_ex.fe_len); 3830 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3831 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3832 if (!err) 3833 err = -EFSCORRUPTED; 3834 goto out_err; 3835 } 3836 3837 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3838 #ifdef AGGRESSIVE_CHECK 3839 { 3840 int i; 3841 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3842 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3843 bitmap_bh->b_data)); 3844 } 3845 } 3846 #endif 3847 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3848 ac->ac_b_ex.fe_len); 3849 if (ext4_has_group_desc_csum(sb) && 3850 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3851 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3852 ext4_free_group_clusters_set(sb, gdp, 3853 ext4_free_clusters_after_init(sb, 3854 ac->ac_b_ex.fe_group, gdp)); 3855 } 3856 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3857 ext4_free_group_clusters_set(sb, gdp, len); 3858 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 3859 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3860 3861 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3862 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3863 /* 3864 * Now reduce the dirty block count also. Should not go negative 3865 */ 3866 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 3867 /* release all the reserved blocks if non delalloc */ 3868 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 3869 reserv_clstrs); 3870 3871 if (sbi->s_log_groups_per_flex) { 3872 ext4_group_t flex_group = ext4_flex_group(sbi, 3873 ac->ac_b_ex.fe_group); 3874 atomic64_sub(ac->ac_b_ex.fe_len, 3875 &sbi_array_rcu_deref(sbi, s_flex_groups, 3876 flex_group)->free_clusters); 3877 } 3878 3879 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3880 if (err) 3881 goto out_err; 3882 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3883 3884 out_err: 3885 brelse(bitmap_bh); 3886 return err; 3887 } 3888 3889 /* 3890 * Idempotent helper for Ext4 fast commit replay path to set the state of 3891 * blocks in bitmaps and update counters. 3892 */ 3893 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 3894 int len, int state) 3895 { 3896 struct buffer_head *bitmap_bh = NULL; 3897 struct ext4_group_desc *gdp; 3898 struct buffer_head *gdp_bh; 3899 struct ext4_sb_info *sbi = EXT4_SB(sb); 3900 ext4_group_t group; 3901 ext4_grpblk_t blkoff; 3902 int i, err; 3903 int already; 3904 unsigned int clen, clen_changed, thisgrp_len; 3905 3906 while (len > 0) { 3907 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3908 3909 /* 3910 * Check to see if we are freeing blocks across a group 3911 * boundary. 3912 * In case of flex_bg, this can happen that (block, len) may 3913 * span across more than one group. In that case we need to 3914 * get the corresponding group metadata to work with. 3915 * For this we have goto again loop. 3916 */ 3917 thisgrp_len = min_t(unsigned int, (unsigned int)len, 3918 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3919 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3920 3921 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 3922 ext4_error(sb, "Marking blocks in system zone - " 3923 "Block = %llu, len = %u", 3924 block, thisgrp_len); 3925 bitmap_bh = NULL; 3926 break; 3927 } 3928 3929 bitmap_bh = ext4_read_block_bitmap(sb, group); 3930 if (IS_ERR(bitmap_bh)) { 3931 err = PTR_ERR(bitmap_bh); 3932 bitmap_bh = NULL; 3933 break; 3934 } 3935 3936 err = -EIO; 3937 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 3938 if (!gdp) 3939 break; 3940 3941 ext4_lock_group(sb, group); 3942 already = 0; 3943 for (i = 0; i < clen; i++) 3944 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3945 !state) 3946 already++; 3947 3948 clen_changed = clen - already; 3949 if (state) 3950 mb_set_bits(bitmap_bh->b_data, blkoff, clen); 3951 else 3952 mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 3953 if (ext4_has_group_desc_csum(sb) && 3954 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3955 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3956 ext4_free_group_clusters_set(sb, gdp, 3957 ext4_free_clusters_after_init(sb, group, gdp)); 3958 } 3959 if (state) 3960 clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 3961 else 3962 clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 3963 3964 ext4_free_group_clusters_set(sb, gdp, clen); 3965 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 3966 ext4_group_desc_csum_set(sb, group, gdp); 3967 3968 ext4_unlock_group(sb, group); 3969 3970 if (sbi->s_log_groups_per_flex) { 3971 ext4_group_t flex_group = ext4_flex_group(sbi, group); 3972 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3973 s_flex_groups, flex_group); 3974 3975 if (state) 3976 atomic64_sub(clen_changed, &fg->free_clusters); 3977 else 3978 atomic64_add(clen_changed, &fg->free_clusters); 3979 3980 } 3981 3982 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 3983 if (err) 3984 break; 3985 sync_dirty_buffer(bitmap_bh); 3986 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 3987 sync_dirty_buffer(gdp_bh); 3988 if (err) 3989 break; 3990 3991 block += thisgrp_len; 3992 len -= thisgrp_len; 3993 brelse(bitmap_bh); 3994 BUG_ON(len < 0); 3995 } 3996 3997 if (err) 3998 brelse(bitmap_bh); 3999 } 4000 4001 /* 4002 * here we normalize request for locality group 4003 * Group request are normalized to s_mb_group_prealloc, which goes to 4004 * s_strip if we set the same via mount option. 4005 * s_mb_group_prealloc can be configured via 4006 * /sys/fs/ext4/<partition>/mb_group_prealloc 4007 * 4008 * XXX: should we try to preallocate more than the group has now? 4009 */ 4010 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4011 { 4012 struct super_block *sb = ac->ac_sb; 4013 struct ext4_locality_group *lg = ac->ac_lg; 4014 4015 BUG_ON(lg == NULL); 4016 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4017 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4018 } 4019 4020 /* 4021 * Normalization means making request better in terms of 4022 * size and alignment 4023 */ 4024 static noinline_for_stack void 4025 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4026 struct ext4_allocation_request *ar) 4027 { 4028 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4029 int bsbits, max; 4030 ext4_lblk_t end; 4031 loff_t size, start_off; 4032 loff_t orig_size __maybe_unused; 4033 ext4_lblk_t start; 4034 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4035 struct ext4_prealloc_space *pa; 4036 4037 /* do normalize only data requests, metadata requests 4038 do not need preallocation */ 4039 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4040 return; 4041 4042 /* sometime caller may want exact blocks */ 4043 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4044 return; 4045 4046 /* caller may indicate that preallocation isn't 4047 * required (it's a tail, for example) */ 4048 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4049 return; 4050 4051 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4052 ext4_mb_normalize_group_request(ac); 4053 return ; 4054 } 4055 4056 bsbits = ac->ac_sb->s_blocksize_bits; 4057 4058 /* first, let's learn actual file size 4059 * given current request is allocated */ 4060 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4061 size = size << bsbits; 4062 if (size < i_size_read(ac->ac_inode)) 4063 size = i_size_read(ac->ac_inode); 4064 orig_size = size; 4065 4066 /* max size of free chunks */ 4067 max = 2 << bsbits; 4068 4069 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4070 (req <= (size) || max <= (chunk_size)) 4071 4072 /* first, try to predict filesize */ 4073 /* XXX: should this table be tunable? */ 4074 start_off = 0; 4075 if (size <= 16 * 1024) { 4076 size = 16 * 1024; 4077 } else if (size <= 32 * 1024) { 4078 size = 32 * 1024; 4079 } else if (size <= 64 * 1024) { 4080 size = 64 * 1024; 4081 } else if (size <= 128 * 1024) { 4082 size = 128 * 1024; 4083 } else if (size <= 256 * 1024) { 4084 size = 256 * 1024; 4085 } else if (size <= 512 * 1024) { 4086 size = 512 * 1024; 4087 } else if (size <= 1024 * 1024) { 4088 size = 1024 * 1024; 4089 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4090 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4091 (21 - bsbits)) << 21; 4092 size = 2 * 1024 * 1024; 4093 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4094 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4095 (22 - bsbits)) << 22; 4096 size = 4 * 1024 * 1024; 4097 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 4098 (8<<20)>>bsbits, max, 8 * 1024)) { 4099 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4100 (23 - bsbits)) << 23; 4101 size = 8 * 1024 * 1024; 4102 } else { 4103 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4104 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 4105 ac->ac_o_ex.fe_len) << bsbits; 4106 } 4107 size = size >> bsbits; 4108 start = start_off >> bsbits; 4109 4110 /* don't cover already allocated blocks in selected range */ 4111 if (ar->pleft && start <= ar->lleft) { 4112 size -= ar->lleft + 1 - start; 4113 start = ar->lleft + 1; 4114 } 4115 if (ar->pright && start + size - 1 >= ar->lright) 4116 size -= start + size - ar->lright; 4117 4118 /* 4119 * Trim allocation request for filesystems with artificially small 4120 * groups. 4121 */ 4122 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4123 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4124 4125 end = start + size; 4126 4127 /* check we don't cross already preallocated blocks */ 4128 rcu_read_lock(); 4129 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4130 ext4_lblk_t pa_end; 4131 4132 if (pa->pa_deleted) 4133 continue; 4134 spin_lock(&pa->pa_lock); 4135 if (pa->pa_deleted) { 4136 spin_unlock(&pa->pa_lock); 4137 continue; 4138 } 4139 4140 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 4141 pa->pa_len); 4142 4143 /* PA must not overlap original request */ 4144 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 4145 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 4146 4147 /* skip PAs this normalized request doesn't overlap with */ 4148 if (pa->pa_lstart >= end || pa_end <= start) { 4149 spin_unlock(&pa->pa_lock); 4150 continue; 4151 } 4152 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 4153 4154 /* adjust start or end to be adjacent to this pa */ 4155 if (pa_end <= ac->ac_o_ex.fe_logical) { 4156 BUG_ON(pa_end < start); 4157 start = pa_end; 4158 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4159 BUG_ON(pa->pa_lstart > end); 4160 end = pa->pa_lstart; 4161 } 4162 spin_unlock(&pa->pa_lock); 4163 } 4164 rcu_read_unlock(); 4165 size = end - start; 4166 4167 /* XXX: extra loop to check we really don't overlap preallocations */ 4168 rcu_read_lock(); 4169 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4170 ext4_lblk_t pa_end; 4171 4172 spin_lock(&pa->pa_lock); 4173 if (pa->pa_deleted == 0) { 4174 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 4175 pa->pa_len); 4176 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 4177 } 4178 spin_unlock(&pa->pa_lock); 4179 } 4180 rcu_read_unlock(); 4181 4182 if (start + size <= ac->ac_o_ex.fe_logical && 4183 start > ac->ac_o_ex.fe_logical) { 4184 ext4_msg(ac->ac_sb, KERN_ERR, 4185 "start %lu, size %lu, fe_logical %lu", 4186 (unsigned long) start, (unsigned long) size, 4187 (unsigned long) ac->ac_o_ex.fe_logical); 4188 BUG(); 4189 } 4190 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4191 4192 /* now prepare goal request */ 4193 4194 /* XXX: is it better to align blocks WRT to logical 4195 * placement or satisfy big request as is */ 4196 ac->ac_g_ex.fe_logical = start; 4197 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4198 4199 /* define goal start in order to merge */ 4200 if (ar->pright && (ar->lright == (start + size))) { 4201 /* merge to the right */ 4202 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4203 &ac->ac_f_ex.fe_group, 4204 &ac->ac_f_ex.fe_start); 4205 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4206 } 4207 if (ar->pleft && (ar->lleft + 1 == start)) { 4208 /* merge to the left */ 4209 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4210 &ac->ac_f_ex.fe_group, 4211 &ac->ac_f_ex.fe_start); 4212 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4213 } 4214 4215 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4216 orig_size, start); 4217 } 4218 4219 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4220 { 4221 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4222 4223 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4224 atomic_inc(&sbi->s_bal_reqs); 4225 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4226 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4227 atomic_inc(&sbi->s_bal_success); 4228 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4229 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4230 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4231 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4232 atomic_inc(&sbi->s_bal_goals); 4233 if (ac->ac_found > sbi->s_mb_max_to_scan) 4234 atomic_inc(&sbi->s_bal_breaks); 4235 } 4236 4237 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4238 trace_ext4_mballoc_alloc(ac); 4239 else 4240 trace_ext4_mballoc_prealloc(ac); 4241 } 4242 4243 /* 4244 * Called on failure; free up any blocks from the inode PA for this 4245 * context. We don't need this for MB_GROUP_PA because we only change 4246 * pa_free in ext4_mb_release_context(), but on failure, we've already 4247 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4248 */ 4249 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4250 { 4251 struct ext4_prealloc_space *pa = ac->ac_pa; 4252 struct ext4_buddy e4b; 4253 int err; 4254 4255 if (pa == NULL) { 4256 if (ac->ac_f_ex.fe_len == 0) 4257 return; 4258 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4259 if (err) { 4260 /* 4261 * This should never happen since we pin the 4262 * pages in the ext4_allocation_context so 4263 * ext4_mb_load_buddy() should never fail. 4264 */ 4265 WARN(1, "mb_load_buddy failed (%d)", err); 4266 return; 4267 } 4268 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4269 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4270 ac->ac_f_ex.fe_len); 4271 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4272 ext4_mb_unload_buddy(&e4b); 4273 return; 4274 } 4275 if (pa->pa_type == MB_INODE_PA) 4276 pa->pa_free += ac->ac_b_ex.fe_len; 4277 } 4278 4279 /* 4280 * use blocks preallocated to inode 4281 */ 4282 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4283 struct ext4_prealloc_space *pa) 4284 { 4285 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4286 ext4_fsblk_t start; 4287 ext4_fsblk_t end; 4288 int len; 4289 4290 /* found preallocated blocks, use them */ 4291 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4292 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4293 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4294 len = EXT4_NUM_B2C(sbi, end - start); 4295 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4296 &ac->ac_b_ex.fe_start); 4297 ac->ac_b_ex.fe_len = len; 4298 ac->ac_status = AC_STATUS_FOUND; 4299 ac->ac_pa = pa; 4300 4301 BUG_ON(start < pa->pa_pstart); 4302 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4303 BUG_ON(pa->pa_free < len); 4304 pa->pa_free -= len; 4305 4306 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4307 } 4308 4309 /* 4310 * use blocks preallocated to locality group 4311 */ 4312 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4313 struct ext4_prealloc_space *pa) 4314 { 4315 unsigned int len = ac->ac_o_ex.fe_len; 4316 4317 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4318 &ac->ac_b_ex.fe_group, 4319 &ac->ac_b_ex.fe_start); 4320 ac->ac_b_ex.fe_len = len; 4321 ac->ac_status = AC_STATUS_FOUND; 4322 ac->ac_pa = pa; 4323 4324 /* we don't correct pa_pstart or pa_plen here to avoid 4325 * possible race when the group is being loaded concurrently 4326 * instead we correct pa later, after blocks are marked 4327 * in on-disk bitmap -- see ext4_mb_release_context() 4328 * Other CPUs are prevented from allocating from this pa by lg_mutex 4329 */ 4330 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4331 pa->pa_lstart-len, len, pa); 4332 } 4333 4334 /* 4335 * Return the prealloc space that have minimal distance 4336 * from the goal block. @cpa is the prealloc 4337 * space that is having currently known minimal distance 4338 * from the goal block. 4339 */ 4340 static struct ext4_prealloc_space * 4341 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4342 struct ext4_prealloc_space *pa, 4343 struct ext4_prealloc_space *cpa) 4344 { 4345 ext4_fsblk_t cur_distance, new_distance; 4346 4347 if (cpa == NULL) { 4348 atomic_inc(&pa->pa_count); 4349 return pa; 4350 } 4351 cur_distance = abs(goal_block - cpa->pa_pstart); 4352 new_distance = abs(goal_block - pa->pa_pstart); 4353 4354 if (cur_distance <= new_distance) 4355 return cpa; 4356 4357 /* drop the previous reference */ 4358 atomic_dec(&cpa->pa_count); 4359 atomic_inc(&pa->pa_count); 4360 return pa; 4361 } 4362 4363 /* 4364 * search goal blocks in preallocated space 4365 */ 4366 static noinline_for_stack bool 4367 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4368 { 4369 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4370 int order, i; 4371 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4372 struct ext4_locality_group *lg; 4373 struct ext4_prealloc_space *pa, *cpa = NULL; 4374 ext4_fsblk_t goal_block; 4375 4376 /* only data can be preallocated */ 4377 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4378 return false; 4379 4380 /* first, try per-file preallocation */ 4381 rcu_read_lock(); 4382 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4383 4384 /* all fields in this condition don't change, 4385 * so we can skip locking for them */ 4386 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 4387 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 4388 EXT4_C2B(sbi, pa->pa_len))) 4389 continue; 4390 4391 /* non-extent files can't have physical blocks past 2^32 */ 4392 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4393 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 4394 EXT4_MAX_BLOCK_FILE_PHYS)) 4395 continue; 4396 4397 /* found preallocated blocks, use them */ 4398 spin_lock(&pa->pa_lock); 4399 if (pa->pa_deleted == 0 && pa->pa_free) { 4400 atomic_inc(&pa->pa_count); 4401 ext4_mb_use_inode_pa(ac, pa); 4402 spin_unlock(&pa->pa_lock); 4403 ac->ac_criteria = 10; 4404 rcu_read_unlock(); 4405 return true; 4406 } 4407 spin_unlock(&pa->pa_lock); 4408 } 4409 rcu_read_unlock(); 4410 4411 /* can we use group allocation? */ 4412 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 4413 return false; 4414 4415 /* inode may have no locality group for some reason */ 4416 lg = ac->ac_lg; 4417 if (lg == NULL) 4418 return false; 4419 order = fls(ac->ac_o_ex.fe_len) - 1; 4420 if (order > PREALLOC_TB_SIZE - 1) 4421 /* The max size of hash table is PREALLOC_TB_SIZE */ 4422 order = PREALLOC_TB_SIZE - 1; 4423 4424 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 4425 /* 4426 * search for the prealloc space that is having 4427 * minimal distance from the goal block. 4428 */ 4429 for (i = order; i < PREALLOC_TB_SIZE; i++) { 4430 rcu_read_lock(); 4431 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 4432 pa_inode_list) { 4433 spin_lock(&pa->pa_lock); 4434 if (pa->pa_deleted == 0 && 4435 pa->pa_free >= ac->ac_o_ex.fe_len) { 4436 4437 cpa = ext4_mb_check_group_pa(goal_block, 4438 pa, cpa); 4439 } 4440 spin_unlock(&pa->pa_lock); 4441 } 4442 rcu_read_unlock(); 4443 } 4444 if (cpa) { 4445 ext4_mb_use_group_pa(ac, cpa); 4446 ac->ac_criteria = 20; 4447 return true; 4448 } 4449 return false; 4450 } 4451 4452 /* 4453 * the function goes through all block freed in the group 4454 * but not yet committed and marks them used in in-core bitmap. 4455 * buddy must be generated from this bitmap 4456 * Need to be called with the ext4 group lock held 4457 */ 4458 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4459 ext4_group_t group) 4460 { 4461 struct rb_node *n; 4462 struct ext4_group_info *grp; 4463 struct ext4_free_data *entry; 4464 4465 grp = ext4_get_group_info(sb, group); 4466 n = rb_first(&(grp->bb_free_root)); 4467 4468 while (n) { 4469 entry = rb_entry(n, struct ext4_free_data, efd_node); 4470 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 4471 n = rb_next(n); 4472 } 4473 return; 4474 } 4475 4476 /* 4477 * the function goes through all preallocation in this group and marks them 4478 * used in in-core bitmap. buddy must be generated from this bitmap 4479 * Need to be called with ext4 group lock held 4480 */ 4481 static noinline_for_stack 4482 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4483 ext4_group_t group) 4484 { 4485 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4486 struct ext4_prealloc_space *pa; 4487 struct list_head *cur; 4488 ext4_group_t groupnr; 4489 ext4_grpblk_t start; 4490 int preallocated = 0; 4491 int len; 4492 4493 /* all form of preallocation discards first load group, 4494 * so the only competing code is preallocation use. 4495 * we don't need any locking here 4496 * notice we do NOT ignore preallocations with pa_deleted 4497 * otherwise we could leave used blocks available for 4498 * allocation in buddy when concurrent ext4_mb_put_pa() 4499 * is dropping preallocation 4500 */ 4501 list_for_each(cur, &grp->bb_prealloc_list) { 4502 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4503 spin_lock(&pa->pa_lock); 4504 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4505 &groupnr, &start); 4506 len = pa->pa_len; 4507 spin_unlock(&pa->pa_lock); 4508 if (unlikely(len == 0)) 4509 continue; 4510 BUG_ON(groupnr != group); 4511 mb_set_bits(bitmap, start, len); 4512 preallocated += len; 4513 } 4514 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4515 } 4516 4517 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 4518 struct ext4_prealloc_space *pa) 4519 { 4520 struct ext4_inode_info *ei; 4521 4522 if (pa->pa_deleted) { 4523 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 4524 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 4525 pa->pa_len); 4526 return; 4527 } 4528 4529 pa->pa_deleted = 1; 4530 4531 if (pa->pa_type == MB_INODE_PA) { 4532 ei = EXT4_I(pa->pa_inode); 4533 atomic_dec(&ei->i_prealloc_active); 4534 } 4535 } 4536 4537 static void ext4_mb_pa_callback(struct rcu_head *head) 4538 { 4539 struct ext4_prealloc_space *pa; 4540 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 4541 4542 BUG_ON(atomic_read(&pa->pa_count)); 4543 BUG_ON(pa->pa_deleted == 0); 4544 kmem_cache_free(ext4_pspace_cachep, pa); 4545 } 4546 4547 /* 4548 * drops a reference to preallocated space descriptor 4549 * if this was the last reference and the space is consumed 4550 */ 4551 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4552 struct super_block *sb, struct ext4_prealloc_space *pa) 4553 { 4554 ext4_group_t grp; 4555 ext4_fsblk_t grp_blk; 4556 4557 /* in this short window concurrent discard can set pa_deleted */ 4558 spin_lock(&pa->pa_lock); 4559 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 4560 spin_unlock(&pa->pa_lock); 4561 return; 4562 } 4563 4564 if (pa->pa_deleted == 1) { 4565 spin_unlock(&pa->pa_lock); 4566 return; 4567 } 4568 4569 ext4_mb_mark_pa_deleted(sb, pa); 4570 spin_unlock(&pa->pa_lock); 4571 4572 grp_blk = pa->pa_pstart; 4573 /* 4574 * If doing group-based preallocation, pa_pstart may be in the 4575 * next group when pa is used up 4576 */ 4577 if (pa->pa_type == MB_GROUP_PA) 4578 grp_blk--; 4579 4580 grp = ext4_get_group_number(sb, grp_blk); 4581 4582 /* 4583 * possible race: 4584 * 4585 * P1 (buddy init) P2 (regular allocation) 4586 * find block B in PA 4587 * copy on-disk bitmap to buddy 4588 * mark B in on-disk bitmap 4589 * drop PA from group 4590 * mark all PAs in buddy 4591 * 4592 * thus, P1 initializes buddy with B available. to prevent this 4593 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4594 * against that pair 4595 */ 4596 ext4_lock_group(sb, grp); 4597 list_del(&pa->pa_group_list); 4598 ext4_unlock_group(sb, grp); 4599 4600 spin_lock(pa->pa_obj_lock); 4601 list_del_rcu(&pa->pa_inode_list); 4602 spin_unlock(pa->pa_obj_lock); 4603 4604 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4605 } 4606 4607 /* 4608 * creates new preallocated space for given inode 4609 */ 4610 static noinline_for_stack void 4611 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4612 { 4613 struct super_block *sb = ac->ac_sb; 4614 struct ext4_sb_info *sbi = EXT4_SB(sb); 4615 struct ext4_prealloc_space *pa; 4616 struct ext4_group_info *grp; 4617 struct ext4_inode_info *ei; 4618 4619 /* preallocate only when found space is larger then requested */ 4620 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4621 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4622 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 4623 BUG_ON(ac->ac_pa == NULL); 4624 4625 pa = ac->ac_pa; 4626 4627 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 4628 int winl; 4629 int wins; 4630 int win; 4631 int offs; 4632 4633 /* we can't allocate as much as normalizer wants. 4634 * so, found space must get proper lstart 4635 * to cover original request */ 4636 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4637 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4638 4639 /* we're limited by original request in that 4640 * logical block must be covered any way 4641 * winl is window we can move our chunk within */ 4642 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 4643 4644 /* also, we should cover whole original request */ 4645 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 4646 4647 /* the smallest one defines real window */ 4648 win = min(winl, wins); 4649 4650 offs = ac->ac_o_ex.fe_logical % 4651 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4652 if (offs && offs < win) 4653 win = offs; 4654 4655 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 4656 EXT4_NUM_B2C(sbi, win); 4657 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4658 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4659 } 4660 4661 /* preallocation can change ac_b_ex, thus we store actually 4662 * allocated blocks for history */ 4663 ac->ac_f_ex = ac->ac_b_ex; 4664 4665 pa->pa_lstart = ac->ac_b_ex.fe_logical; 4666 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4667 pa->pa_len = ac->ac_b_ex.fe_len; 4668 pa->pa_free = pa->pa_len; 4669 spin_lock_init(&pa->pa_lock); 4670 INIT_LIST_HEAD(&pa->pa_inode_list); 4671 INIT_LIST_HEAD(&pa->pa_group_list); 4672 pa->pa_deleted = 0; 4673 pa->pa_type = MB_INODE_PA; 4674 4675 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4676 pa->pa_len, pa->pa_lstart); 4677 trace_ext4_mb_new_inode_pa(ac, pa); 4678 4679 ext4_mb_use_inode_pa(ac, pa); 4680 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4681 4682 ei = EXT4_I(ac->ac_inode); 4683 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4684 4685 pa->pa_obj_lock = &ei->i_prealloc_lock; 4686 pa->pa_inode = ac->ac_inode; 4687 4688 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4689 4690 spin_lock(pa->pa_obj_lock); 4691 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 4692 spin_unlock(pa->pa_obj_lock); 4693 atomic_inc(&ei->i_prealloc_active); 4694 } 4695 4696 /* 4697 * creates new preallocated space for locality group inodes belongs to 4698 */ 4699 static noinline_for_stack void 4700 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4701 { 4702 struct super_block *sb = ac->ac_sb; 4703 struct ext4_locality_group *lg; 4704 struct ext4_prealloc_space *pa; 4705 struct ext4_group_info *grp; 4706 4707 /* preallocate only when found space is larger then requested */ 4708 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4709 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4710 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 4711 BUG_ON(ac->ac_pa == NULL); 4712 4713 pa = ac->ac_pa; 4714 4715 /* preallocation can change ac_b_ex, thus we store actually 4716 * allocated blocks for history */ 4717 ac->ac_f_ex = ac->ac_b_ex; 4718 4719 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4720 pa->pa_lstart = pa->pa_pstart; 4721 pa->pa_len = ac->ac_b_ex.fe_len; 4722 pa->pa_free = pa->pa_len; 4723 spin_lock_init(&pa->pa_lock); 4724 INIT_LIST_HEAD(&pa->pa_inode_list); 4725 INIT_LIST_HEAD(&pa->pa_group_list); 4726 pa->pa_deleted = 0; 4727 pa->pa_type = MB_GROUP_PA; 4728 4729 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4730 pa->pa_len, pa->pa_lstart); 4731 trace_ext4_mb_new_group_pa(ac, pa); 4732 4733 ext4_mb_use_group_pa(ac, pa); 4734 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4735 4736 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4737 lg = ac->ac_lg; 4738 BUG_ON(lg == NULL); 4739 4740 pa->pa_obj_lock = &lg->lg_prealloc_lock; 4741 pa->pa_inode = NULL; 4742 4743 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4744 4745 /* 4746 * We will later add the new pa to the right bucket 4747 * after updating the pa_free in ext4_mb_release_context 4748 */ 4749 } 4750 4751 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4752 { 4753 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4754 ext4_mb_new_group_pa(ac); 4755 else 4756 ext4_mb_new_inode_pa(ac); 4757 } 4758 4759 /* 4760 * finds all unused blocks in on-disk bitmap, frees them in 4761 * in-core bitmap and buddy. 4762 * @pa must be unlinked from inode and group lists, so that 4763 * nobody else can find/use it. 4764 * the caller MUST hold group/inode locks. 4765 * TODO: optimize the case when there are no in-core structures yet 4766 */ 4767 static noinline_for_stack int 4768 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 4769 struct ext4_prealloc_space *pa) 4770 { 4771 struct super_block *sb = e4b->bd_sb; 4772 struct ext4_sb_info *sbi = EXT4_SB(sb); 4773 unsigned int end; 4774 unsigned int next; 4775 ext4_group_t group; 4776 ext4_grpblk_t bit; 4777 unsigned long long grp_blk_start; 4778 int free = 0; 4779 4780 BUG_ON(pa->pa_deleted == 0); 4781 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4782 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 4783 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4784 end = bit + pa->pa_len; 4785 4786 while (bit < end) { 4787 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 4788 if (bit >= end) 4789 break; 4790 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 4791 mb_debug(sb, "free preallocated %u/%u in group %u\n", 4792 (unsigned) ext4_group_first_block_no(sb, group) + bit, 4793 (unsigned) next - bit, (unsigned) group); 4794 free += next - bit; 4795 4796 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 4797 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 4798 EXT4_C2B(sbi, bit)), 4799 next - bit); 4800 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 4801 bit = next + 1; 4802 } 4803 if (free != pa->pa_free) { 4804 ext4_msg(e4b->bd_sb, KERN_CRIT, 4805 "pa %p: logic %lu, phys. %lu, len %d", 4806 pa, (unsigned long) pa->pa_lstart, 4807 (unsigned long) pa->pa_pstart, 4808 pa->pa_len); 4809 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 4810 free, pa->pa_free); 4811 /* 4812 * pa is already deleted so we use the value obtained 4813 * from the bitmap and continue. 4814 */ 4815 } 4816 atomic_add(free, &sbi->s_mb_discarded); 4817 4818 return 0; 4819 } 4820 4821 static noinline_for_stack int 4822 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 4823 struct ext4_prealloc_space *pa) 4824 { 4825 struct super_block *sb = e4b->bd_sb; 4826 ext4_group_t group; 4827 ext4_grpblk_t bit; 4828 4829 trace_ext4_mb_release_group_pa(sb, pa); 4830 BUG_ON(pa->pa_deleted == 0); 4831 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4832 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4833 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 4834 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 4835 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 4836 4837 return 0; 4838 } 4839 4840 /* 4841 * releases all preallocations in given group 4842 * 4843 * first, we need to decide discard policy: 4844 * - when do we discard 4845 * 1) ENOSPC 4846 * - how many do we discard 4847 * 1) how many requested 4848 */ 4849 static noinline_for_stack int 4850 ext4_mb_discard_group_preallocations(struct super_block *sb, 4851 ext4_group_t group, int *busy) 4852 { 4853 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4854 struct buffer_head *bitmap_bh = NULL; 4855 struct ext4_prealloc_space *pa, *tmp; 4856 struct list_head list; 4857 struct ext4_buddy e4b; 4858 int err; 4859 int free = 0; 4860 4861 mb_debug(sb, "discard preallocation for group %u\n", group); 4862 if (list_empty(&grp->bb_prealloc_list)) 4863 goto out_dbg; 4864 4865 bitmap_bh = ext4_read_block_bitmap(sb, group); 4866 if (IS_ERR(bitmap_bh)) { 4867 err = PTR_ERR(bitmap_bh); 4868 ext4_error_err(sb, -err, 4869 "Error %d reading block bitmap for %u", 4870 err, group); 4871 goto out_dbg; 4872 } 4873 4874 err = ext4_mb_load_buddy(sb, group, &e4b); 4875 if (err) { 4876 ext4_warning(sb, "Error %d loading buddy information for %u", 4877 err, group); 4878 put_bh(bitmap_bh); 4879 goto out_dbg; 4880 } 4881 4882 INIT_LIST_HEAD(&list); 4883 ext4_lock_group(sb, group); 4884 list_for_each_entry_safe(pa, tmp, 4885 &grp->bb_prealloc_list, pa_group_list) { 4886 spin_lock(&pa->pa_lock); 4887 if (atomic_read(&pa->pa_count)) { 4888 spin_unlock(&pa->pa_lock); 4889 *busy = 1; 4890 continue; 4891 } 4892 if (pa->pa_deleted) { 4893 spin_unlock(&pa->pa_lock); 4894 continue; 4895 } 4896 4897 /* seems this one can be freed ... */ 4898 ext4_mb_mark_pa_deleted(sb, pa); 4899 4900 if (!free) 4901 this_cpu_inc(discard_pa_seq); 4902 4903 /* we can trust pa_free ... */ 4904 free += pa->pa_free; 4905 4906 spin_unlock(&pa->pa_lock); 4907 4908 list_del(&pa->pa_group_list); 4909 list_add(&pa->u.pa_tmp_list, &list); 4910 } 4911 4912 /* now free all selected PAs */ 4913 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4914 4915 /* remove from object (inode or locality group) */ 4916 spin_lock(pa->pa_obj_lock); 4917 list_del_rcu(&pa->pa_inode_list); 4918 spin_unlock(pa->pa_obj_lock); 4919 4920 if (pa->pa_type == MB_GROUP_PA) 4921 ext4_mb_release_group_pa(&e4b, pa); 4922 else 4923 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4924 4925 list_del(&pa->u.pa_tmp_list); 4926 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4927 } 4928 4929 ext4_unlock_group(sb, group); 4930 ext4_mb_unload_buddy(&e4b); 4931 put_bh(bitmap_bh); 4932 out_dbg: 4933 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 4934 free, group, grp->bb_free); 4935 return free; 4936 } 4937 4938 /* 4939 * releases all non-used preallocated blocks for given inode 4940 * 4941 * It's important to discard preallocations under i_data_sem 4942 * We don't want another block to be served from the prealloc 4943 * space when we are discarding the inode prealloc space. 4944 * 4945 * FIXME!! Make sure it is valid at all the call sites 4946 */ 4947 void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 4948 { 4949 struct ext4_inode_info *ei = EXT4_I(inode); 4950 struct super_block *sb = inode->i_sb; 4951 struct buffer_head *bitmap_bh = NULL; 4952 struct ext4_prealloc_space *pa, *tmp; 4953 ext4_group_t group = 0; 4954 struct list_head list; 4955 struct ext4_buddy e4b; 4956 int err; 4957 4958 if (!S_ISREG(inode->i_mode)) { 4959 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 4960 return; 4961 } 4962 4963 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 4964 return; 4965 4966 mb_debug(sb, "discard preallocation for inode %lu\n", 4967 inode->i_ino); 4968 trace_ext4_discard_preallocations(inode, 4969 atomic_read(&ei->i_prealloc_active), needed); 4970 4971 INIT_LIST_HEAD(&list); 4972 4973 if (needed == 0) 4974 needed = UINT_MAX; 4975 4976 repeat: 4977 /* first, collect all pa's in the inode */ 4978 spin_lock(&ei->i_prealloc_lock); 4979 while (!list_empty(&ei->i_prealloc_list) && needed) { 4980 pa = list_entry(ei->i_prealloc_list.prev, 4981 struct ext4_prealloc_space, pa_inode_list); 4982 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 4983 spin_lock(&pa->pa_lock); 4984 if (atomic_read(&pa->pa_count)) { 4985 /* this shouldn't happen often - nobody should 4986 * use preallocation while we're discarding it */ 4987 spin_unlock(&pa->pa_lock); 4988 spin_unlock(&ei->i_prealloc_lock); 4989 ext4_msg(sb, KERN_ERR, 4990 "uh-oh! used pa while discarding"); 4991 WARN_ON(1); 4992 schedule_timeout_uninterruptible(HZ); 4993 goto repeat; 4994 4995 } 4996 if (pa->pa_deleted == 0) { 4997 ext4_mb_mark_pa_deleted(sb, pa); 4998 spin_unlock(&pa->pa_lock); 4999 list_del_rcu(&pa->pa_inode_list); 5000 list_add(&pa->u.pa_tmp_list, &list); 5001 needed--; 5002 continue; 5003 } 5004 5005 /* someone is deleting pa right now */ 5006 spin_unlock(&pa->pa_lock); 5007 spin_unlock(&ei->i_prealloc_lock); 5008 5009 /* we have to wait here because pa_deleted 5010 * doesn't mean pa is already unlinked from 5011 * the list. as we might be called from 5012 * ->clear_inode() the inode will get freed 5013 * and concurrent thread which is unlinking 5014 * pa from inode's list may access already 5015 * freed memory, bad-bad-bad */ 5016 5017 /* XXX: if this happens too often, we can 5018 * add a flag to force wait only in case 5019 * of ->clear_inode(), but not in case of 5020 * regular truncate */ 5021 schedule_timeout_uninterruptible(HZ); 5022 goto repeat; 5023 } 5024 spin_unlock(&ei->i_prealloc_lock); 5025 5026 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5027 BUG_ON(pa->pa_type != MB_INODE_PA); 5028 group = ext4_get_group_number(sb, pa->pa_pstart); 5029 5030 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5031 GFP_NOFS|__GFP_NOFAIL); 5032 if (err) { 5033 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5034 err, group); 5035 continue; 5036 } 5037 5038 bitmap_bh = ext4_read_block_bitmap(sb, group); 5039 if (IS_ERR(bitmap_bh)) { 5040 err = PTR_ERR(bitmap_bh); 5041 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5042 err, group); 5043 ext4_mb_unload_buddy(&e4b); 5044 continue; 5045 } 5046 5047 ext4_lock_group(sb, group); 5048 list_del(&pa->pa_group_list); 5049 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5050 ext4_unlock_group(sb, group); 5051 5052 ext4_mb_unload_buddy(&e4b); 5053 put_bh(bitmap_bh); 5054 5055 list_del(&pa->u.pa_tmp_list); 5056 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5057 } 5058 } 5059 5060 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5061 { 5062 struct ext4_prealloc_space *pa; 5063 5064 BUG_ON(ext4_pspace_cachep == NULL); 5065 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5066 if (!pa) 5067 return -ENOMEM; 5068 atomic_set(&pa->pa_count, 1); 5069 ac->ac_pa = pa; 5070 return 0; 5071 } 5072 5073 static void ext4_mb_pa_free(struct ext4_allocation_context *ac) 5074 { 5075 struct ext4_prealloc_space *pa = ac->ac_pa; 5076 5077 BUG_ON(!pa); 5078 ac->ac_pa = NULL; 5079 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5080 kmem_cache_free(ext4_pspace_cachep, pa); 5081 } 5082 5083 #ifdef CONFIG_EXT4_DEBUG 5084 static inline void ext4_mb_show_pa(struct super_block *sb) 5085 { 5086 ext4_group_t i, ngroups; 5087 5088 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5089 return; 5090 5091 ngroups = ext4_get_groups_count(sb); 5092 mb_debug(sb, "groups: "); 5093 for (i = 0; i < ngroups; i++) { 5094 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5095 struct ext4_prealloc_space *pa; 5096 ext4_grpblk_t start; 5097 struct list_head *cur; 5098 ext4_lock_group(sb, i); 5099 list_for_each(cur, &grp->bb_prealloc_list) { 5100 pa = list_entry(cur, struct ext4_prealloc_space, 5101 pa_group_list); 5102 spin_lock(&pa->pa_lock); 5103 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5104 NULL, &start); 5105 spin_unlock(&pa->pa_lock); 5106 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5107 pa->pa_len); 5108 } 5109 ext4_unlock_group(sb, i); 5110 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5111 grp->bb_fragments); 5112 } 5113 } 5114 5115 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5116 { 5117 struct super_block *sb = ac->ac_sb; 5118 5119 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5120 return; 5121 5122 mb_debug(sb, "Can't allocate:" 5123 " Allocation context details:"); 5124 mb_debug(sb, "status %u flags 0x%x", 5125 ac->ac_status, ac->ac_flags); 5126 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5127 "goal %lu/%lu/%lu@%lu, " 5128 "best %lu/%lu/%lu@%lu cr %d", 5129 (unsigned long)ac->ac_o_ex.fe_group, 5130 (unsigned long)ac->ac_o_ex.fe_start, 5131 (unsigned long)ac->ac_o_ex.fe_len, 5132 (unsigned long)ac->ac_o_ex.fe_logical, 5133 (unsigned long)ac->ac_g_ex.fe_group, 5134 (unsigned long)ac->ac_g_ex.fe_start, 5135 (unsigned long)ac->ac_g_ex.fe_len, 5136 (unsigned long)ac->ac_g_ex.fe_logical, 5137 (unsigned long)ac->ac_b_ex.fe_group, 5138 (unsigned long)ac->ac_b_ex.fe_start, 5139 (unsigned long)ac->ac_b_ex.fe_len, 5140 (unsigned long)ac->ac_b_ex.fe_logical, 5141 (int)ac->ac_criteria); 5142 mb_debug(sb, "%u found", ac->ac_found); 5143 ext4_mb_show_pa(sb); 5144 } 5145 #else 5146 static inline void ext4_mb_show_pa(struct super_block *sb) 5147 { 5148 return; 5149 } 5150 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5151 { 5152 ext4_mb_show_pa(ac->ac_sb); 5153 return; 5154 } 5155 #endif 5156 5157 /* 5158 * We use locality group preallocation for small size file. The size of the 5159 * file is determined by the current size or the resulting size after 5160 * allocation which ever is larger 5161 * 5162 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5163 */ 5164 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5165 { 5166 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5167 int bsbits = ac->ac_sb->s_blocksize_bits; 5168 loff_t size, isize; 5169 5170 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5171 return; 5172 5173 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5174 return; 5175 5176 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 5177 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5178 >> bsbits; 5179 5180 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5181 !inode_is_open_for_write(ac->ac_inode)) { 5182 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5183 return; 5184 } 5185 5186 if (sbi->s_mb_group_prealloc <= 0) { 5187 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5188 return; 5189 } 5190 5191 /* don't use group allocation for large files */ 5192 size = max(size, isize); 5193 if (size > sbi->s_mb_stream_request) { 5194 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5195 return; 5196 } 5197 5198 BUG_ON(ac->ac_lg != NULL); 5199 /* 5200 * locality group prealloc space are per cpu. The reason for having 5201 * per cpu locality group is to reduce the contention between block 5202 * request from multiple CPUs. 5203 */ 5204 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5205 5206 /* we're going to use group allocation */ 5207 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5208 5209 /* serialize all allocations in the group */ 5210 mutex_lock(&ac->ac_lg->lg_mutex); 5211 } 5212 5213 static noinline_for_stack int 5214 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5215 struct ext4_allocation_request *ar) 5216 { 5217 struct super_block *sb = ar->inode->i_sb; 5218 struct ext4_sb_info *sbi = EXT4_SB(sb); 5219 struct ext4_super_block *es = sbi->s_es; 5220 ext4_group_t group; 5221 unsigned int len; 5222 ext4_fsblk_t goal; 5223 ext4_grpblk_t block; 5224 5225 /* we can't allocate > group size */ 5226 len = ar->len; 5227 5228 /* just a dirty hack to filter too big requests */ 5229 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5230 len = EXT4_CLUSTERS_PER_GROUP(sb); 5231 5232 /* start searching from the goal */ 5233 goal = ar->goal; 5234 if (goal < le32_to_cpu(es->s_first_data_block) || 5235 goal >= ext4_blocks_count(es)) 5236 goal = le32_to_cpu(es->s_first_data_block); 5237 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5238 5239 /* set up allocation goals */ 5240 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5241 ac->ac_status = AC_STATUS_CONTINUE; 5242 ac->ac_sb = sb; 5243 ac->ac_inode = ar->inode; 5244 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5245 ac->ac_o_ex.fe_group = group; 5246 ac->ac_o_ex.fe_start = block; 5247 ac->ac_o_ex.fe_len = len; 5248 ac->ac_g_ex = ac->ac_o_ex; 5249 ac->ac_flags = ar->flags; 5250 5251 /* we have to define context: we'll work with a file or 5252 * locality group. this is a policy, actually */ 5253 ext4_mb_group_or_file(ac); 5254 5255 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5256 "left: %u/%u, right %u/%u to %swritable\n", 5257 (unsigned) ar->len, (unsigned) ar->logical, 5258 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5259 (unsigned) ar->lleft, (unsigned) ar->pleft, 5260 (unsigned) ar->lright, (unsigned) ar->pright, 5261 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5262 return 0; 5263 5264 } 5265 5266 static noinline_for_stack void 5267 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5268 struct ext4_locality_group *lg, 5269 int order, int total_entries) 5270 { 5271 ext4_group_t group = 0; 5272 struct ext4_buddy e4b; 5273 struct list_head discard_list; 5274 struct ext4_prealloc_space *pa, *tmp; 5275 5276 mb_debug(sb, "discard locality group preallocation\n"); 5277 5278 INIT_LIST_HEAD(&discard_list); 5279 5280 spin_lock(&lg->lg_prealloc_lock); 5281 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5282 pa_inode_list, 5283 lockdep_is_held(&lg->lg_prealloc_lock)) { 5284 spin_lock(&pa->pa_lock); 5285 if (atomic_read(&pa->pa_count)) { 5286 /* 5287 * This is the pa that we just used 5288 * for block allocation. So don't 5289 * free that 5290 */ 5291 spin_unlock(&pa->pa_lock); 5292 continue; 5293 } 5294 if (pa->pa_deleted) { 5295 spin_unlock(&pa->pa_lock); 5296 continue; 5297 } 5298 /* only lg prealloc space */ 5299 BUG_ON(pa->pa_type != MB_GROUP_PA); 5300 5301 /* seems this one can be freed ... */ 5302 ext4_mb_mark_pa_deleted(sb, pa); 5303 spin_unlock(&pa->pa_lock); 5304 5305 list_del_rcu(&pa->pa_inode_list); 5306 list_add(&pa->u.pa_tmp_list, &discard_list); 5307 5308 total_entries--; 5309 if (total_entries <= 5) { 5310 /* 5311 * we want to keep only 5 entries 5312 * allowing it to grow to 8. This 5313 * mak sure we don't call discard 5314 * soon for this list. 5315 */ 5316 break; 5317 } 5318 } 5319 spin_unlock(&lg->lg_prealloc_lock); 5320 5321 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5322 int err; 5323 5324 group = ext4_get_group_number(sb, pa->pa_pstart); 5325 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5326 GFP_NOFS|__GFP_NOFAIL); 5327 if (err) { 5328 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5329 err, group); 5330 continue; 5331 } 5332 ext4_lock_group(sb, group); 5333 list_del(&pa->pa_group_list); 5334 ext4_mb_release_group_pa(&e4b, pa); 5335 ext4_unlock_group(sb, group); 5336 5337 ext4_mb_unload_buddy(&e4b); 5338 list_del(&pa->u.pa_tmp_list); 5339 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5340 } 5341 } 5342 5343 /* 5344 * We have incremented pa_count. So it cannot be freed at this 5345 * point. Also we hold lg_mutex. So no parallel allocation is 5346 * possible from this lg. That means pa_free cannot be updated. 5347 * 5348 * A parallel ext4_mb_discard_group_preallocations is possible. 5349 * which can cause the lg_prealloc_list to be updated. 5350 */ 5351 5352 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 5353 { 5354 int order, added = 0, lg_prealloc_count = 1; 5355 struct super_block *sb = ac->ac_sb; 5356 struct ext4_locality_group *lg = ac->ac_lg; 5357 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 5358 5359 order = fls(pa->pa_free) - 1; 5360 if (order > PREALLOC_TB_SIZE - 1) 5361 /* The max size of hash table is PREALLOC_TB_SIZE */ 5362 order = PREALLOC_TB_SIZE - 1; 5363 /* Add the prealloc space to lg */ 5364 spin_lock(&lg->lg_prealloc_lock); 5365 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5366 pa_inode_list, 5367 lockdep_is_held(&lg->lg_prealloc_lock)) { 5368 spin_lock(&tmp_pa->pa_lock); 5369 if (tmp_pa->pa_deleted) { 5370 spin_unlock(&tmp_pa->pa_lock); 5371 continue; 5372 } 5373 if (!added && pa->pa_free < tmp_pa->pa_free) { 5374 /* Add to the tail of the previous entry */ 5375 list_add_tail_rcu(&pa->pa_inode_list, 5376 &tmp_pa->pa_inode_list); 5377 added = 1; 5378 /* 5379 * we want to count the total 5380 * number of entries in the list 5381 */ 5382 } 5383 spin_unlock(&tmp_pa->pa_lock); 5384 lg_prealloc_count++; 5385 } 5386 if (!added) 5387 list_add_tail_rcu(&pa->pa_inode_list, 5388 &lg->lg_prealloc_list[order]); 5389 spin_unlock(&lg->lg_prealloc_lock); 5390 5391 /* Now trim the list to be not more than 8 elements */ 5392 if (lg_prealloc_count > 8) { 5393 ext4_mb_discard_lg_preallocations(sb, lg, 5394 order, lg_prealloc_count); 5395 return; 5396 } 5397 return ; 5398 } 5399 5400 /* 5401 * if per-inode prealloc list is too long, trim some PA 5402 */ 5403 static void ext4_mb_trim_inode_pa(struct inode *inode) 5404 { 5405 struct ext4_inode_info *ei = EXT4_I(inode); 5406 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5407 int count, delta; 5408 5409 count = atomic_read(&ei->i_prealloc_active); 5410 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; 5411 if (count > sbi->s_mb_max_inode_prealloc + delta) { 5412 count -= sbi->s_mb_max_inode_prealloc; 5413 ext4_discard_preallocations(inode, count); 5414 } 5415 } 5416 5417 /* 5418 * release all resource we used in allocation 5419 */ 5420 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5421 { 5422 struct inode *inode = ac->ac_inode; 5423 struct ext4_inode_info *ei = EXT4_I(inode); 5424 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5425 struct ext4_prealloc_space *pa = ac->ac_pa; 5426 if (pa) { 5427 if (pa->pa_type == MB_GROUP_PA) { 5428 /* see comment in ext4_mb_use_group_pa() */ 5429 spin_lock(&pa->pa_lock); 5430 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5431 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5432 pa->pa_free -= ac->ac_b_ex.fe_len; 5433 pa->pa_len -= ac->ac_b_ex.fe_len; 5434 spin_unlock(&pa->pa_lock); 5435 5436 /* 5437 * We want to add the pa to the right bucket. 5438 * Remove it from the list and while adding 5439 * make sure the list to which we are adding 5440 * doesn't grow big. 5441 */ 5442 if (likely(pa->pa_free)) { 5443 spin_lock(pa->pa_obj_lock); 5444 list_del_rcu(&pa->pa_inode_list); 5445 spin_unlock(pa->pa_obj_lock); 5446 ext4_mb_add_n_trim(ac); 5447 } 5448 } 5449 5450 if (pa->pa_type == MB_INODE_PA) { 5451 /* 5452 * treat per-inode prealloc list as a lru list, then try 5453 * to trim the least recently used PA. 5454 */ 5455 spin_lock(pa->pa_obj_lock); 5456 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); 5457 spin_unlock(pa->pa_obj_lock); 5458 } 5459 5460 ext4_mb_put_pa(ac, ac->ac_sb, pa); 5461 } 5462 if (ac->ac_bitmap_page) 5463 put_page(ac->ac_bitmap_page); 5464 if (ac->ac_buddy_page) 5465 put_page(ac->ac_buddy_page); 5466 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5467 mutex_unlock(&ac->ac_lg->lg_mutex); 5468 ext4_mb_collect_stats(ac); 5469 ext4_mb_trim_inode_pa(inode); 5470 return 0; 5471 } 5472 5473 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5474 { 5475 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5476 int ret; 5477 int freed = 0, busy = 0; 5478 int retry = 0; 5479 5480 trace_ext4_mb_discard_preallocations(sb, needed); 5481 5482 if (needed == 0) 5483 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 5484 repeat: 5485 for (i = 0; i < ngroups && needed > 0; i++) { 5486 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5487 freed += ret; 5488 needed -= ret; 5489 cond_resched(); 5490 } 5491 5492 if (needed > 0 && busy && ++retry < 3) { 5493 busy = 0; 5494 goto repeat; 5495 } 5496 5497 return freed; 5498 } 5499 5500 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 5501 struct ext4_allocation_context *ac, u64 *seq) 5502 { 5503 int freed; 5504 u64 seq_retry = 0; 5505 bool ret = false; 5506 5507 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 5508 if (freed) { 5509 ret = true; 5510 goto out_dbg; 5511 } 5512 seq_retry = ext4_get_discard_pa_seq_sum(); 5513 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 5514 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 5515 *seq = seq_retry; 5516 ret = true; 5517 } 5518 5519 out_dbg: 5520 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 5521 return ret; 5522 } 5523 5524 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 5525 struct ext4_allocation_request *ar, int *errp); 5526 5527 /* 5528 * Main entry point into mballoc to allocate blocks 5529 * it tries to use preallocation first, then falls back 5530 * to usual allocation 5531 */ 5532 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5533 struct ext4_allocation_request *ar, int *errp) 5534 { 5535 struct ext4_allocation_context *ac = NULL; 5536 struct ext4_sb_info *sbi; 5537 struct super_block *sb; 5538 ext4_fsblk_t block = 0; 5539 unsigned int inquota = 0; 5540 unsigned int reserv_clstrs = 0; 5541 u64 seq; 5542 5543 might_sleep(); 5544 sb = ar->inode->i_sb; 5545 sbi = EXT4_SB(sb); 5546 5547 trace_ext4_request_blocks(ar); 5548 if (sbi->s_mount_state & EXT4_FC_REPLAY) 5549 return ext4_mb_new_blocks_simple(handle, ar, errp); 5550 5551 /* Allow to use superuser reservation for quota file */ 5552 if (ext4_is_quota_file(ar->inode)) 5553 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 5554 5555 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 5556 /* Without delayed allocation we need to verify 5557 * there is enough free blocks to do block allocation 5558 * and verify allocation doesn't exceed the quota limits. 5559 */ 5560 while (ar->len && 5561 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 5562 5563 /* let others to free the space */ 5564 cond_resched(); 5565 ar->len = ar->len >> 1; 5566 } 5567 if (!ar->len) { 5568 ext4_mb_show_pa(sb); 5569 *errp = -ENOSPC; 5570 return 0; 5571 } 5572 reserv_clstrs = ar->len; 5573 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 5574 dquot_alloc_block_nofail(ar->inode, 5575 EXT4_C2B(sbi, ar->len)); 5576 } else { 5577 while (ar->len && 5578 dquot_alloc_block(ar->inode, 5579 EXT4_C2B(sbi, ar->len))) { 5580 5581 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5582 ar->len--; 5583 } 5584 } 5585 inquota = ar->len; 5586 if (ar->len == 0) { 5587 *errp = -EDQUOT; 5588 goto out; 5589 } 5590 } 5591 5592 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5593 if (!ac) { 5594 ar->len = 0; 5595 *errp = -ENOMEM; 5596 goto out; 5597 } 5598 5599 *errp = ext4_mb_initialize_context(ac, ar); 5600 if (*errp) { 5601 ar->len = 0; 5602 goto out; 5603 } 5604 5605 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 5606 seq = this_cpu_read(discard_pa_seq); 5607 if (!ext4_mb_use_preallocated(ac)) { 5608 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5609 ext4_mb_normalize_request(ac, ar); 5610 5611 *errp = ext4_mb_pa_alloc(ac); 5612 if (*errp) 5613 goto errout; 5614 repeat: 5615 /* allocate space in core */ 5616 *errp = ext4_mb_regular_allocator(ac); 5617 /* 5618 * pa allocated above is added to grp->bb_prealloc_list only 5619 * when we were able to allocate some block i.e. when 5620 * ac->ac_status == AC_STATUS_FOUND. 5621 * And error from above mean ac->ac_status != AC_STATUS_FOUND 5622 * So we have to free this pa here itself. 5623 */ 5624 if (*errp) { 5625 ext4_mb_pa_free(ac); 5626 ext4_discard_allocated_blocks(ac); 5627 goto errout; 5628 } 5629 if (ac->ac_status == AC_STATUS_FOUND && 5630 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 5631 ext4_mb_pa_free(ac); 5632 } 5633 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 5634 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5635 if (*errp) { 5636 ext4_discard_allocated_blocks(ac); 5637 goto errout; 5638 } else { 5639 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5640 ar->len = ac->ac_b_ex.fe_len; 5641 } 5642 } else { 5643 if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5644 goto repeat; 5645 /* 5646 * If block allocation fails then the pa allocated above 5647 * needs to be freed here itself. 5648 */ 5649 ext4_mb_pa_free(ac); 5650 *errp = -ENOSPC; 5651 } 5652 5653 errout: 5654 if (*errp) { 5655 ac->ac_b_ex.fe_len = 0; 5656 ar->len = 0; 5657 ext4_mb_show_ac(ac); 5658 } 5659 ext4_mb_release_context(ac); 5660 out: 5661 if (ac) 5662 kmem_cache_free(ext4_ac_cachep, ac); 5663 if (inquota && ar->len < inquota) 5664 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 5665 if (!ar->len) { 5666 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 5667 /* release all the reserved blocks if non delalloc */ 5668 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 5669 reserv_clstrs); 5670 } 5671 5672 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5673 5674 return block; 5675 } 5676 5677 /* 5678 * We can merge two free data extents only if the physical blocks 5679 * are contiguous, AND the extents were freed by the same transaction, 5680 * AND the blocks are associated with the same group. 5681 */ 5682 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5683 struct ext4_free_data *entry, 5684 struct ext4_free_data *new_entry, 5685 struct rb_root *entry_rb_root) 5686 { 5687 if ((entry->efd_tid != new_entry->efd_tid) || 5688 (entry->efd_group != new_entry->efd_group)) 5689 return; 5690 if (entry->efd_start_cluster + entry->efd_count == 5691 new_entry->efd_start_cluster) { 5692 new_entry->efd_start_cluster = entry->efd_start_cluster; 5693 new_entry->efd_count += entry->efd_count; 5694 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5695 entry->efd_start_cluster) { 5696 new_entry->efd_count += entry->efd_count; 5697 } else 5698 return; 5699 spin_lock(&sbi->s_md_lock); 5700 list_del(&entry->efd_list); 5701 spin_unlock(&sbi->s_md_lock); 5702 rb_erase(&entry->efd_node, entry_rb_root); 5703 kmem_cache_free(ext4_free_data_cachep, entry); 5704 } 5705 5706 static noinline_for_stack int 5707 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 5708 struct ext4_free_data *new_entry) 5709 { 5710 ext4_group_t group = e4b->bd_group; 5711 ext4_grpblk_t cluster; 5712 ext4_grpblk_t clusters = new_entry->efd_count; 5713 struct ext4_free_data *entry; 5714 struct ext4_group_info *db = e4b->bd_info; 5715 struct super_block *sb = e4b->bd_sb; 5716 struct ext4_sb_info *sbi = EXT4_SB(sb); 5717 struct rb_node **n = &db->bb_free_root.rb_node, *node; 5718 struct rb_node *parent = NULL, *new_node; 5719 5720 BUG_ON(!ext4_handle_valid(handle)); 5721 BUG_ON(e4b->bd_bitmap_page == NULL); 5722 BUG_ON(e4b->bd_buddy_page == NULL); 5723 5724 new_node = &new_entry->efd_node; 5725 cluster = new_entry->efd_start_cluster; 5726 5727 if (!*n) { 5728 /* first free block exent. We need to 5729 protect buddy cache from being freed, 5730 * otherwise we'll refresh it from 5731 * on-disk bitmap and lose not-yet-available 5732 * blocks */ 5733 get_page(e4b->bd_buddy_page); 5734 get_page(e4b->bd_bitmap_page); 5735 } 5736 while (*n) { 5737 parent = *n; 5738 entry = rb_entry(parent, struct ext4_free_data, efd_node); 5739 if (cluster < entry->efd_start_cluster) 5740 n = &(*n)->rb_left; 5741 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5742 n = &(*n)->rb_right; 5743 else { 5744 ext4_grp_locked_error(sb, group, 0, 5745 ext4_group_first_block_no(sb, group) + 5746 EXT4_C2B(sbi, cluster), 5747 "Block already on to-be-freed list"); 5748 kmem_cache_free(ext4_free_data_cachep, new_entry); 5749 return 0; 5750 } 5751 } 5752 5753 rb_link_node(new_node, parent, n); 5754 rb_insert_color(new_node, &db->bb_free_root); 5755 5756 /* Now try to see the extent can be merged to left and right */ 5757 node = rb_prev(new_node); 5758 if (node) { 5759 entry = rb_entry(node, struct ext4_free_data, efd_node); 5760 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5761 &(db->bb_free_root)); 5762 } 5763 5764 node = rb_next(new_node); 5765 if (node) { 5766 entry = rb_entry(node, struct ext4_free_data, efd_node); 5767 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5768 &(db->bb_free_root)); 5769 } 5770 5771 spin_lock(&sbi->s_md_lock); 5772 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 5773 sbi->s_mb_free_pending += clusters; 5774 spin_unlock(&sbi->s_md_lock); 5775 return 0; 5776 } 5777 5778 /* 5779 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 5780 * linearly starting at the goal block and also excludes the blocks which 5781 * are going to be in use after fast commit replay. 5782 */ 5783 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 5784 struct ext4_allocation_request *ar, int *errp) 5785 { 5786 struct buffer_head *bitmap_bh; 5787 struct super_block *sb = ar->inode->i_sb; 5788 ext4_group_t group; 5789 ext4_grpblk_t blkoff; 5790 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 5791 ext4_grpblk_t i = 0; 5792 ext4_fsblk_t goal, block; 5793 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 5794 5795 goal = ar->goal; 5796 if (goal < le32_to_cpu(es->s_first_data_block) || 5797 goal >= ext4_blocks_count(es)) 5798 goal = le32_to_cpu(es->s_first_data_block); 5799 5800 ar->len = 0; 5801 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 5802 for (; group < ext4_get_groups_count(sb); group++) { 5803 bitmap_bh = ext4_read_block_bitmap(sb, group); 5804 if (IS_ERR(bitmap_bh)) { 5805 *errp = PTR_ERR(bitmap_bh); 5806 pr_warn("Failed to read block bitmap\n"); 5807 return 0; 5808 } 5809 5810 ext4_get_group_no_and_offset(sb, 5811 max(ext4_group_first_block_no(sb, group), goal), 5812 NULL, &blkoff); 5813 while (1) { 5814 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 5815 blkoff); 5816 if (i >= max) 5817 break; 5818 if (ext4_fc_replay_check_excluded(sb, 5819 ext4_group_first_block_no(sb, group) + i)) { 5820 blkoff = i + 1; 5821 } else 5822 break; 5823 } 5824 brelse(bitmap_bh); 5825 if (i < max) 5826 break; 5827 } 5828 5829 if (group >= ext4_get_groups_count(sb) || i >= max) { 5830 *errp = -ENOSPC; 5831 return 0; 5832 } 5833 5834 block = ext4_group_first_block_no(sb, group) + i; 5835 ext4_mb_mark_bb(sb, block, 1, 1); 5836 ar->len = 1; 5837 5838 return block; 5839 } 5840 5841 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 5842 unsigned long count) 5843 { 5844 struct buffer_head *bitmap_bh; 5845 struct super_block *sb = inode->i_sb; 5846 struct ext4_group_desc *gdp; 5847 struct buffer_head *gdp_bh; 5848 ext4_group_t group; 5849 ext4_grpblk_t blkoff; 5850 int already_freed = 0, err, i; 5851 5852 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 5853 bitmap_bh = ext4_read_block_bitmap(sb, group); 5854 if (IS_ERR(bitmap_bh)) { 5855 err = PTR_ERR(bitmap_bh); 5856 pr_warn("Failed to read block bitmap\n"); 5857 return; 5858 } 5859 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 5860 if (!gdp) 5861 return; 5862 5863 for (i = 0; i < count; i++) { 5864 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 5865 already_freed++; 5866 } 5867 mb_clear_bits(bitmap_bh->b_data, blkoff, count); 5868 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 5869 if (err) 5870 return; 5871 ext4_free_group_clusters_set( 5872 sb, gdp, ext4_free_group_clusters(sb, gdp) + 5873 count - already_freed); 5874 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 5875 ext4_group_desc_csum_set(sb, group, gdp); 5876 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 5877 sync_dirty_buffer(bitmap_bh); 5878 sync_dirty_buffer(gdp_bh); 5879 brelse(bitmap_bh); 5880 } 5881 5882 /** 5883 * ext4_mb_clear_bb() -- helper function for freeing blocks. 5884 * Used by ext4_free_blocks() 5885 * @handle: handle for this transaction 5886 * @inode: inode 5887 * @block: starting physical block to be freed 5888 * @count: number of blocks to be freed 5889 * @flags: flags used by ext4_free_blocks 5890 */ 5891 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 5892 ext4_fsblk_t block, unsigned long count, 5893 int flags) 5894 { 5895 struct buffer_head *bitmap_bh = NULL; 5896 struct super_block *sb = inode->i_sb; 5897 struct ext4_group_desc *gdp; 5898 unsigned int overflow; 5899 ext4_grpblk_t bit; 5900 struct buffer_head *gd_bh; 5901 ext4_group_t block_group; 5902 struct ext4_sb_info *sbi; 5903 struct ext4_buddy e4b; 5904 unsigned int count_clusters; 5905 int err = 0; 5906 int ret; 5907 5908 sbi = EXT4_SB(sb); 5909 5910 do_more: 5911 overflow = 0; 5912 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5913 5914 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 5915 ext4_get_group_info(sb, block_group)))) 5916 return; 5917 5918 /* 5919 * Check to see if we are freeing blocks across a group 5920 * boundary. 5921 */ 5922 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 5923 overflow = EXT4_C2B(sbi, bit) + count - 5924 EXT4_BLOCKS_PER_GROUP(sb); 5925 count -= overflow; 5926 } 5927 count_clusters = EXT4_NUM_B2C(sbi, count); 5928 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 5929 if (IS_ERR(bitmap_bh)) { 5930 err = PTR_ERR(bitmap_bh); 5931 bitmap_bh = NULL; 5932 goto error_return; 5933 } 5934 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 5935 if (!gdp) { 5936 err = -EIO; 5937 goto error_return; 5938 } 5939 5940 if (!ext4_inode_block_valid(inode, block, count)) { 5941 ext4_error(sb, "Freeing blocks in system zone - " 5942 "Block = %llu, count = %lu", block, count); 5943 /* err = 0. ext4_std_error should be a no op */ 5944 goto error_return; 5945 } 5946 5947 BUFFER_TRACE(bitmap_bh, "getting write access"); 5948 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 5949 EXT4_JTR_NONE); 5950 if (err) 5951 goto error_return; 5952 5953 /* 5954 * We are about to modify some metadata. Call the journal APIs 5955 * to unshare ->b_data if a currently-committing transaction is 5956 * using it 5957 */ 5958 BUFFER_TRACE(gd_bh, "get_write_access"); 5959 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 5960 if (err) 5961 goto error_return; 5962 #ifdef AGGRESSIVE_CHECK 5963 { 5964 int i; 5965 for (i = 0; i < count_clusters; i++) 5966 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 5967 } 5968 #endif 5969 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 5970 5971 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 5972 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 5973 GFP_NOFS|__GFP_NOFAIL); 5974 if (err) 5975 goto error_return; 5976 5977 /* 5978 * We need to make sure we don't reuse the freed block until after the 5979 * transaction is committed. We make an exception if the inode is to be 5980 * written in writeback mode since writeback mode has weak data 5981 * consistency guarantees. 5982 */ 5983 if (ext4_handle_valid(handle) && 5984 ((flags & EXT4_FREE_BLOCKS_METADATA) || 5985 !ext4_should_writeback_data(inode))) { 5986 struct ext4_free_data *new_entry; 5987 /* 5988 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 5989 * to fail. 5990 */ 5991 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 5992 GFP_NOFS|__GFP_NOFAIL); 5993 new_entry->efd_start_cluster = bit; 5994 new_entry->efd_group = block_group; 5995 new_entry->efd_count = count_clusters; 5996 new_entry->efd_tid = handle->h_transaction->t_tid; 5997 5998 ext4_lock_group(sb, block_group); 5999 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6000 ext4_mb_free_metadata(handle, &e4b, new_entry); 6001 } else { 6002 /* need to update group_info->bb_free and bitmap 6003 * with group lock held. generate_buddy look at 6004 * them with group lock_held 6005 */ 6006 if (test_opt(sb, DISCARD)) { 6007 err = ext4_issue_discard(sb, block_group, bit, count, 6008 NULL); 6009 if (err && err != -EOPNOTSUPP) 6010 ext4_msg(sb, KERN_WARNING, "discard request in" 6011 " group:%u block:%d count:%lu failed" 6012 " with %d", block_group, bit, count, 6013 err); 6014 } else 6015 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6016 6017 ext4_lock_group(sb, block_group); 6018 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6019 mb_free_blocks(inode, &e4b, bit, count_clusters); 6020 } 6021 6022 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6023 ext4_free_group_clusters_set(sb, gdp, ret); 6024 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 6025 ext4_group_desc_csum_set(sb, block_group, gdp); 6026 ext4_unlock_group(sb, block_group); 6027 6028 if (sbi->s_log_groups_per_flex) { 6029 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6030 atomic64_add(count_clusters, 6031 &sbi_array_rcu_deref(sbi, s_flex_groups, 6032 flex_group)->free_clusters); 6033 } 6034 6035 /* 6036 * on a bigalloc file system, defer the s_freeclusters_counter 6037 * update to the caller (ext4_remove_space and friends) so they 6038 * can determine if a cluster freed here should be rereserved 6039 */ 6040 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6041 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6042 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6043 percpu_counter_add(&sbi->s_freeclusters_counter, 6044 count_clusters); 6045 } 6046 6047 ext4_mb_unload_buddy(&e4b); 6048 6049 /* We dirtied the bitmap block */ 6050 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6051 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6052 6053 /* And the group descriptor block */ 6054 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6055 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6056 if (!err) 6057 err = ret; 6058 6059 if (overflow && !err) { 6060 block += count; 6061 count = overflow; 6062 put_bh(bitmap_bh); 6063 goto do_more; 6064 } 6065 error_return: 6066 brelse(bitmap_bh); 6067 ext4_std_error(sb, err); 6068 return; 6069 } 6070 6071 /** 6072 * ext4_free_blocks() -- Free given blocks and update quota 6073 * @handle: handle for this transaction 6074 * @inode: inode 6075 * @bh: optional buffer of the block to be freed 6076 * @block: starting physical block to be freed 6077 * @count: number of blocks to be freed 6078 * @flags: flags used by ext4_free_blocks 6079 */ 6080 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6081 struct buffer_head *bh, ext4_fsblk_t block, 6082 unsigned long count, int flags) 6083 { 6084 struct super_block *sb = inode->i_sb; 6085 unsigned int overflow; 6086 struct ext4_sb_info *sbi; 6087 6088 sbi = EXT4_SB(sb); 6089 6090 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6091 ext4_free_blocks_simple(inode, block, count); 6092 return; 6093 } 6094 6095 might_sleep(); 6096 if (bh) { 6097 if (block) 6098 BUG_ON(block != bh->b_blocknr); 6099 else 6100 block = bh->b_blocknr; 6101 } 6102 6103 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6104 !ext4_inode_block_valid(inode, block, count)) { 6105 ext4_error(sb, "Freeing blocks not in datazone - " 6106 "block = %llu, count = %lu", block, count); 6107 return; 6108 } 6109 6110 ext4_debug("freeing block %llu\n", block); 6111 trace_ext4_free_blocks(inode, block, count, flags); 6112 6113 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6114 BUG_ON(count > 1); 6115 6116 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6117 inode, bh, block); 6118 } 6119 6120 /* 6121 * If the extent to be freed does not begin on a cluster 6122 * boundary, we need to deal with partial clusters at the 6123 * beginning and end of the extent. Normally we will free 6124 * blocks at the beginning or the end unless we are explicitly 6125 * requested to avoid doing so. 6126 */ 6127 overflow = EXT4_PBLK_COFF(sbi, block); 6128 if (overflow) { 6129 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6130 overflow = sbi->s_cluster_ratio - overflow; 6131 block += overflow; 6132 if (count > overflow) 6133 count -= overflow; 6134 else 6135 return; 6136 } else { 6137 block -= overflow; 6138 count += overflow; 6139 } 6140 } 6141 overflow = EXT4_LBLK_COFF(sbi, count); 6142 if (overflow) { 6143 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6144 if (count > overflow) 6145 count -= overflow; 6146 else 6147 return; 6148 } else 6149 count += sbi->s_cluster_ratio - overflow; 6150 } 6151 6152 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6153 int i; 6154 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6155 6156 for (i = 0; i < count; i++) { 6157 cond_resched(); 6158 if (is_metadata) 6159 bh = sb_find_get_block(inode->i_sb, block + i); 6160 ext4_forget(handle, is_metadata, inode, bh, block + i); 6161 } 6162 } 6163 6164 ext4_mb_clear_bb(handle, inode, block, count, flags); 6165 return; 6166 } 6167 6168 /** 6169 * ext4_group_add_blocks() -- Add given blocks to an existing group 6170 * @handle: handle to this transaction 6171 * @sb: super block 6172 * @block: start physical block to add to the block group 6173 * @count: number of blocks to free 6174 * 6175 * This marks the blocks as free in the bitmap and buddy. 6176 */ 6177 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6178 ext4_fsblk_t block, unsigned long count) 6179 { 6180 struct buffer_head *bitmap_bh = NULL; 6181 struct buffer_head *gd_bh; 6182 ext4_group_t block_group; 6183 ext4_grpblk_t bit; 6184 unsigned int i; 6185 struct ext4_group_desc *desc; 6186 struct ext4_sb_info *sbi = EXT4_SB(sb); 6187 struct ext4_buddy e4b; 6188 int err = 0, ret, free_clusters_count; 6189 ext4_grpblk_t clusters_freed; 6190 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6191 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6192 unsigned long cluster_count = last_cluster - first_cluster + 1; 6193 6194 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6195 6196 if (count == 0) 6197 return 0; 6198 6199 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6200 /* 6201 * Check to see if we are freeing blocks across a group 6202 * boundary. 6203 */ 6204 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6205 ext4_warning(sb, "too many blocks added to group %u", 6206 block_group); 6207 err = -EINVAL; 6208 goto error_return; 6209 } 6210 6211 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6212 if (IS_ERR(bitmap_bh)) { 6213 err = PTR_ERR(bitmap_bh); 6214 bitmap_bh = NULL; 6215 goto error_return; 6216 } 6217 6218 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6219 if (!desc) { 6220 err = -EIO; 6221 goto error_return; 6222 } 6223 6224 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6225 ext4_error(sb, "Adding blocks in system zones - " 6226 "Block = %llu, count = %lu", 6227 block, count); 6228 err = -EINVAL; 6229 goto error_return; 6230 } 6231 6232 BUFFER_TRACE(bitmap_bh, "getting write access"); 6233 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6234 EXT4_JTR_NONE); 6235 if (err) 6236 goto error_return; 6237 6238 /* 6239 * We are about to modify some metadata. Call the journal APIs 6240 * to unshare ->b_data if a currently-committing transaction is 6241 * using it 6242 */ 6243 BUFFER_TRACE(gd_bh, "get_write_access"); 6244 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6245 if (err) 6246 goto error_return; 6247 6248 for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 6249 BUFFER_TRACE(bitmap_bh, "clear bit"); 6250 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 6251 ext4_error(sb, "bit already cleared for block %llu", 6252 (ext4_fsblk_t)(block + i)); 6253 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 6254 } else { 6255 clusters_freed++; 6256 } 6257 } 6258 6259 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6260 if (err) 6261 goto error_return; 6262 6263 /* 6264 * need to update group_info->bb_free and bitmap 6265 * with group lock held. generate_buddy look at 6266 * them with group lock_held 6267 */ 6268 ext4_lock_group(sb, block_group); 6269 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6270 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6271 free_clusters_count = clusters_freed + 6272 ext4_free_group_clusters(sb, desc); 6273 ext4_free_group_clusters_set(sb, desc, free_clusters_count); 6274 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 6275 ext4_group_desc_csum_set(sb, block_group, desc); 6276 ext4_unlock_group(sb, block_group); 6277 percpu_counter_add(&sbi->s_freeclusters_counter, 6278 clusters_freed); 6279 6280 if (sbi->s_log_groups_per_flex) { 6281 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6282 atomic64_add(clusters_freed, 6283 &sbi_array_rcu_deref(sbi, s_flex_groups, 6284 flex_group)->free_clusters); 6285 } 6286 6287 ext4_mb_unload_buddy(&e4b); 6288 6289 /* We dirtied the bitmap block */ 6290 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6291 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6292 6293 /* And the group descriptor block */ 6294 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6295 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6296 if (!err) 6297 err = ret; 6298 6299 error_return: 6300 brelse(bitmap_bh); 6301 ext4_std_error(sb, err); 6302 return err; 6303 } 6304 6305 /** 6306 * ext4_trim_extent -- function to TRIM one single free extent in the group 6307 * @sb: super block for the file system 6308 * @start: starting block of the free extent in the alloc. group 6309 * @count: number of blocks to TRIM 6310 * @e4b: ext4 buddy for the group 6311 * 6312 * Trim "count" blocks starting at "start" in the "group". To assure that no 6313 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6314 * be called with under the group lock. 6315 */ 6316 static int ext4_trim_extent(struct super_block *sb, 6317 int start, int count, struct ext4_buddy *e4b) 6318 __releases(bitlock) 6319 __acquires(bitlock) 6320 { 6321 struct ext4_free_extent ex; 6322 ext4_group_t group = e4b->bd_group; 6323 int ret = 0; 6324 6325 trace_ext4_trim_extent(sb, group, start, count); 6326 6327 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6328 6329 ex.fe_start = start; 6330 ex.fe_group = group; 6331 ex.fe_len = count; 6332 6333 /* 6334 * Mark blocks used, so no one can reuse them while 6335 * being trimmed. 6336 */ 6337 mb_mark_used(e4b, &ex); 6338 ext4_unlock_group(sb, group); 6339 ret = ext4_issue_discard(sb, group, start, count, NULL); 6340 ext4_lock_group(sb, group); 6341 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6342 return ret; 6343 } 6344 6345 static int ext4_try_to_trim_range(struct super_block *sb, 6346 struct ext4_buddy *e4b, ext4_grpblk_t start, 6347 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6348 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6349 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6350 { 6351 ext4_grpblk_t next, count, free_count; 6352 void *bitmap; 6353 6354 bitmap = e4b->bd_bitmap; 6355 start = (e4b->bd_info->bb_first_free > start) ? 6356 e4b->bd_info->bb_first_free : start; 6357 count = 0; 6358 free_count = 0; 6359 6360 while (start <= max) { 6361 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6362 if (start > max) 6363 break; 6364 next = mb_find_next_bit(bitmap, max + 1, start); 6365 6366 if ((next - start) >= minblocks) { 6367 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6368 6369 if (ret && ret != -EOPNOTSUPP) 6370 break; 6371 count += next - start; 6372 } 6373 free_count += next - start; 6374 start = next + 1; 6375 6376 if (fatal_signal_pending(current)) { 6377 count = -ERESTARTSYS; 6378 break; 6379 } 6380 6381 if (need_resched()) { 6382 ext4_unlock_group(sb, e4b->bd_group); 6383 cond_resched(); 6384 ext4_lock_group(sb, e4b->bd_group); 6385 } 6386 6387 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6388 break; 6389 } 6390 6391 return count; 6392 } 6393 6394 /** 6395 * ext4_trim_all_free -- function to trim all free space in alloc. group 6396 * @sb: super block for file system 6397 * @group: group to be trimmed 6398 * @start: first group block to examine 6399 * @max: last group block to examine 6400 * @minblocks: minimum extent block count 6401 * 6402 * ext4_trim_all_free walks through group's block bitmap searching for free 6403 * extents. When the free extent is found, mark it as used in group buddy 6404 * bitmap. Then issue a TRIM command on this extent and free the extent in 6405 * the group buddy bitmap. 6406 */ 6407 static ext4_grpblk_t 6408 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 6409 ext4_grpblk_t start, ext4_grpblk_t max, 6410 ext4_grpblk_t minblocks) 6411 { 6412 struct ext4_buddy e4b; 6413 int ret; 6414 6415 trace_ext4_trim_all_free(sb, group, start, max); 6416 6417 ret = ext4_mb_load_buddy(sb, group, &e4b); 6418 if (ret) { 6419 ext4_warning(sb, "Error %d loading buddy information for %u", 6420 ret, group); 6421 return ret; 6422 } 6423 6424 ext4_lock_group(sb, group); 6425 6426 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 6427 minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 6428 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6429 if (ret >= 0) 6430 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 6431 } else { 6432 ret = 0; 6433 } 6434 6435 ext4_unlock_group(sb, group); 6436 ext4_mb_unload_buddy(&e4b); 6437 6438 ext4_debug("trimmed %d blocks in the group %d\n", 6439 ret, group); 6440 6441 return ret; 6442 } 6443 6444 /** 6445 * ext4_trim_fs() -- trim ioctl handle function 6446 * @sb: superblock for filesystem 6447 * @range: fstrim_range structure 6448 * 6449 * start: First Byte to trim 6450 * len: number of Bytes to trim from start 6451 * minlen: minimum extent length in Bytes 6452 * ext4_trim_fs goes through all allocation groups containing Bytes from 6453 * start to start+len. For each such a group ext4_trim_all_free function 6454 * is invoked to trim all free space. 6455 */ 6456 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 6457 { 6458 struct request_queue *q = bdev_get_queue(sb->s_bdev); 6459 struct ext4_group_info *grp; 6460 ext4_group_t group, first_group, last_group; 6461 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6462 uint64_t start, end, minlen, trimmed = 0; 6463 ext4_fsblk_t first_data_blk = 6464 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6465 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6466 int ret = 0; 6467 6468 start = range->start >> sb->s_blocksize_bits; 6469 end = start + (range->len >> sb->s_blocksize_bits) - 1; 6470 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6471 range->minlen >> sb->s_blocksize_bits); 6472 6473 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 6474 start >= max_blks || 6475 range->len < sb->s_blocksize) 6476 return -EINVAL; 6477 /* No point to try to trim less than discard granularity */ 6478 if (range->minlen < q->limits.discard_granularity) { 6479 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6480 q->limits.discard_granularity >> sb->s_blocksize_bits); 6481 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6482 goto out; 6483 } 6484 if (end >= max_blks) 6485 end = max_blks - 1; 6486 if (end <= first_data_blk) 6487 goto out; 6488 if (start < first_data_blk) 6489 start = first_data_blk; 6490 6491 /* Determine first and last group to examine based on start and end */ 6492 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 6493 &first_group, &first_cluster); 6494 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 6495 &last_group, &last_cluster); 6496 6497 /* end now represents the last cluster to discard in this group */ 6498 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6499 6500 for (group = first_group; group <= last_group; group++) { 6501 grp = ext4_get_group_info(sb, group); 6502 /* We only do this if the grp has never been initialized */ 6503 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6504 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 6505 if (ret) 6506 break; 6507 } 6508 6509 /* 6510 * For all the groups except the last one, last cluster will 6511 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6512 * change it for the last group, note that last_cluster is 6513 * already computed earlier by ext4_get_group_no_and_offset() 6514 */ 6515 if (group == last_group) 6516 end = last_cluster; 6517 6518 if (grp->bb_free >= minlen) { 6519 cnt = ext4_trim_all_free(sb, group, first_cluster, 6520 end, minlen); 6521 if (cnt < 0) { 6522 ret = cnt; 6523 break; 6524 } 6525 trimmed += cnt; 6526 } 6527 6528 /* 6529 * For every group except the first one, we are sure 6530 * that the first cluster to discard will be cluster #0. 6531 */ 6532 first_cluster = 0; 6533 } 6534 6535 if (!ret) 6536 EXT4_SB(sb)->s_last_trim_minblks = minlen; 6537 6538 out: 6539 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 6540 return ret; 6541 } 6542 6543 /* Iterate all the free extents in the group. */ 6544 int 6545 ext4_mballoc_query_range( 6546 struct super_block *sb, 6547 ext4_group_t group, 6548 ext4_grpblk_t start, 6549 ext4_grpblk_t end, 6550 ext4_mballoc_query_range_fn formatter, 6551 void *priv) 6552 { 6553 void *bitmap; 6554 ext4_grpblk_t next; 6555 struct ext4_buddy e4b; 6556 int error; 6557 6558 error = ext4_mb_load_buddy(sb, group, &e4b); 6559 if (error) 6560 return error; 6561 bitmap = e4b.bd_bitmap; 6562 6563 ext4_lock_group(sb, group); 6564 6565 start = (e4b.bd_info->bb_first_free > start) ? 6566 e4b.bd_info->bb_first_free : start; 6567 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 6568 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6569 6570 while (start <= end) { 6571 start = mb_find_next_zero_bit(bitmap, end + 1, start); 6572 if (start > end) 6573 break; 6574 next = mb_find_next_bit(bitmap, end + 1, start); 6575 6576 ext4_unlock_group(sb, group); 6577 error = formatter(sb, group, start, next - start, priv); 6578 if (error) 6579 goto out_unload; 6580 ext4_lock_group(sb, group); 6581 6582 start = next + 1; 6583 } 6584 6585 ext4_unlock_group(sb, group); 6586 out_unload: 6587 ext4_mb_unload_buddy(&e4b); 6588 6589 return error; 6590 } 6591