1 /* 2 * linux/fs/ext4/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14 #include <linux/time.h> 15 #include <linux/capability.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 #include <trace/events/ext4.h> 25 26 /* 27 * balloc.c contains the blocks allocation and deallocation routines 28 */ 29 30 /* 31 * Calculate the block group number and offset into the block/cluster 32 * allocation bitmap, given a block number 33 */ 34 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 35 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 36 { 37 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 38 ext4_grpblk_t offset; 39 40 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 41 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> 42 EXT4_SB(sb)->s_cluster_bits; 43 if (offsetp) 44 *offsetp = offset; 45 if (blockgrpp) 46 *blockgrpp = blocknr; 47 48 } 49 50 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block, 51 ext4_group_t block_group) 52 { 53 ext4_group_t actual_group; 54 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL); 55 if (actual_group == block_group) 56 return 1; 57 return 0; 58 } 59 60 /* Return the number of clusters used for file system metadata; this 61 * represents the overhead needed by the file system. 62 */ 63 unsigned ext4_num_overhead_clusters(struct super_block *sb, 64 ext4_group_t block_group, 65 struct ext4_group_desc *gdp) 66 { 67 unsigned num_clusters; 68 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; 69 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); 70 ext4_fsblk_t itbl_blk; 71 struct ext4_sb_info *sbi = EXT4_SB(sb); 72 73 /* This is the number of clusters used by the superblock, 74 * block group descriptors, and reserved block group 75 * descriptor blocks */ 76 num_clusters = ext4_num_base_meta_clusters(sb, block_group); 77 78 /* 79 * For the allocation bitmaps and inode table, we first need 80 * to check to see if the block is in the block group. If it 81 * is, then check to see if the cluster is already accounted 82 * for in the clusters used for the base metadata cluster, or 83 * if we can increment the base metadata cluster to include 84 * that block. Otherwise, we will have to track the cluster 85 * used for the allocation bitmap or inode table explicitly. 86 * Normally all of these blocks are contiguous, so the special 87 * case handling shouldn't be necessary except for *very* 88 * unusual file system layouts. 89 */ 90 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 91 block_cluster = EXT4_B2C(sbi, (start - 92 ext4_block_bitmap(sb, gdp))); 93 if (block_cluster < num_clusters) 94 block_cluster = -1; 95 else if (block_cluster == num_clusters) { 96 num_clusters++; 97 block_cluster = -1; 98 } 99 } 100 101 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 102 inode_cluster = EXT4_B2C(sbi, 103 start - ext4_inode_bitmap(sb, gdp)); 104 if (inode_cluster < num_clusters) 105 inode_cluster = -1; 106 else if (inode_cluster == num_clusters) { 107 num_clusters++; 108 inode_cluster = -1; 109 } 110 } 111 112 itbl_blk = ext4_inode_table(sb, gdp); 113 for (i = 0; i < sbi->s_itb_per_group; i++) { 114 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 115 c = EXT4_B2C(sbi, start - itbl_blk + i); 116 if ((c < num_clusters) || (c == inode_cluster) || 117 (c == block_cluster) || (c == itbl_cluster)) 118 continue; 119 if (c == num_clusters) { 120 num_clusters++; 121 continue; 122 } 123 num_clusters++; 124 itbl_cluster = c; 125 } 126 } 127 128 if (block_cluster != -1) 129 num_clusters++; 130 if (inode_cluster != -1) 131 num_clusters++; 132 133 return num_clusters; 134 } 135 136 static unsigned int num_clusters_in_group(struct super_block *sb, 137 ext4_group_t block_group) 138 { 139 unsigned int blocks; 140 141 if (block_group == ext4_get_groups_count(sb) - 1) { 142 /* 143 * Even though mke2fs always initializes the first and 144 * last group, just in case some other tool was used, 145 * we need to make sure we calculate the right free 146 * blocks. 147 */ 148 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - 149 ext4_group_first_block_no(sb, block_group); 150 } else 151 blocks = EXT4_BLOCKS_PER_GROUP(sb); 152 return EXT4_NUM_B2C(EXT4_SB(sb), blocks); 153 } 154 155 /* Initializes an uninitialized block bitmap */ 156 void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, 157 ext4_group_t block_group, 158 struct ext4_group_desc *gdp) 159 { 160 unsigned int bit, bit_max; 161 struct ext4_sb_info *sbi = EXT4_SB(sb); 162 ext4_fsblk_t start, tmp; 163 int flex_bg = 0; 164 165 J_ASSERT_BH(bh, buffer_locked(bh)); 166 167 /* If checksum is bad mark all blocks used to prevent allocation 168 * essentially implementing a per-group read-only flag. */ 169 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 170 ext4_error(sb, "Checksum bad for group %u", block_group); 171 ext4_free_group_clusters_set(sb, gdp, 0); 172 ext4_free_inodes_set(sb, gdp, 0); 173 ext4_itable_unused_set(sb, gdp, 0); 174 memset(bh->b_data, 0xff, sb->s_blocksize); 175 return; 176 } 177 memset(bh->b_data, 0, sb->s_blocksize); 178 179 bit_max = ext4_num_base_meta_clusters(sb, block_group); 180 for (bit = 0; bit < bit_max; bit++) 181 ext4_set_bit(bit, bh->b_data); 182 183 start = ext4_group_first_block_no(sb, block_group); 184 185 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 186 flex_bg = 1; 187 188 /* Set bits for block and inode bitmaps, and inode table */ 189 tmp = ext4_block_bitmap(sb, gdp); 190 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 191 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 192 193 tmp = ext4_inode_bitmap(sb, gdp); 194 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 195 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 196 197 tmp = ext4_inode_table(sb, gdp); 198 for (; tmp < ext4_inode_table(sb, gdp) + 199 sbi->s_itb_per_group; tmp++) { 200 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 201 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 202 } 203 204 /* 205 * Also if the number of blocks within the group is less than 206 * the blocksize * 8 ( which is the size of bitmap ), set rest 207 * of the block bitmap to 1 208 */ 209 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 210 sb->s_blocksize * 8, bh->b_data); 211 } 212 213 /* Return the number of free blocks in a block group. It is used when 214 * the block bitmap is uninitialized, so we can't just count the bits 215 * in the bitmap. */ 216 unsigned ext4_free_clusters_after_init(struct super_block *sb, 217 ext4_group_t block_group, 218 struct ext4_group_desc *gdp) 219 { 220 return num_clusters_in_group(sb, block_group) - 221 ext4_num_overhead_clusters(sb, block_group, gdp); 222 } 223 224 /* 225 * The free blocks are managed by bitmaps. A file system contains several 226 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 227 * block for inodes, N blocks for the inode table and data blocks. 228 * 229 * The file system contains group descriptors which are located after the 230 * super block. Each descriptor contains the number of the bitmap block and 231 * the free blocks count in the block. The descriptors are loaded in memory 232 * when a file system is mounted (see ext4_fill_super). 233 */ 234 235 /** 236 * ext4_get_group_desc() -- load group descriptor from disk 237 * @sb: super block 238 * @block_group: given block group 239 * @bh: pointer to the buffer head to store the block 240 * group descriptor 241 */ 242 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 243 ext4_group_t block_group, 244 struct buffer_head **bh) 245 { 246 unsigned int group_desc; 247 unsigned int offset; 248 ext4_group_t ngroups = ext4_get_groups_count(sb); 249 struct ext4_group_desc *desc; 250 struct ext4_sb_info *sbi = EXT4_SB(sb); 251 252 if (block_group >= ngroups) { 253 ext4_error(sb, "block_group >= groups_count - block_group = %u," 254 " groups_count = %u", block_group, ngroups); 255 256 return NULL; 257 } 258 259 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 260 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 261 if (!sbi->s_group_desc[group_desc]) { 262 ext4_error(sb, "Group descriptor not loaded - " 263 "block_group = %u, group_desc = %u, desc = %u", 264 block_group, group_desc, offset); 265 return NULL; 266 } 267 268 desc = (struct ext4_group_desc *)( 269 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 270 offset * EXT4_DESC_SIZE(sb)); 271 if (bh) 272 *bh = sbi->s_group_desc[group_desc]; 273 return desc; 274 } 275 276 static int ext4_valid_block_bitmap(struct super_block *sb, 277 struct ext4_group_desc *desc, 278 unsigned int block_group, 279 struct buffer_head *bh) 280 { 281 ext4_grpblk_t offset; 282 ext4_grpblk_t next_zero_bit; 283 ext4_fsblk_t bitmap_blk; 284 ext4_fsblk_t group_first_block; 285 286 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 287 /* with FLEX_BG, the inode/block bitmaps and itable 288 * blocks may not be in the group at all 289 * so the bitmap validation will be skipped for those groups 290 * or it has to also read the block group where the bitmaps 291 * are located to verify they are set. 292 */ 293 return 1; 294 } 295 group_first_block = ext4_group_first_block_no(sb, block_group); 296 297 /* check whether block bitmap block number is set */ 298 bitmap_blk = ext4_block_bitmap(sb, desc); 299 offset = bitmap_blk - group_first_block; 300 if (!ext4_test_bit(offset, bh->b_data)) 301 /* bad block bitmap */ 302 goto err_out; 303 304 /* check whether the inode bitmap block number is set */ 305 bitmap_blk = ext4_inode_bitmap(sb, desc); 306 offset = bitmap_blk - group_first_block; 307 if (!ext4_test_bit(offset, bh->b_data)) 308 /* bad block bitmap */ 309 goto err_out; 310 311 /* check whether the inode table block number is set */ 312 bitmap_blk = ext4_inode_table(sb, desc); 313 offset = bitmap_blk - group_first_block; 314 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 315 offset + EXT4_SB(sb)->s_itb_per_group, 316 offset); 317 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) 318 /* good bitmap for inode tables */ 319 return 1; 320 321 err_out: 322 ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", 323 block_group, bitmap_blk); 324 return 0; 325 } 326 /** 327 * ext4_read_block_bitmap() 328 * @sb: super block 329 * @block_group: given block group 330 * 331 * Read the bitmap for a given block_group,and validate the 332 * bits for block/inode/inode tables are set in the bitmaps 333 * 334 * Return buffer_head on success or NULL in case of failure. 335 */ 336 struct buffer_head * 337 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 338 { 339 struct ext4_group_desc *desc; 340 struct buffer_head *bh = NULL; 341 ext4_fsblk_t bitmap_blk; 342 343 desc = ext4_get_group_desc(sb, block_group, NULL); 344 if (!desc) 345 return NULL; 346 bitmap_blk = ext4_block_bitmap(sb, desc); 347 bh = sb_getblk(sb, bitmap_blk); 348 if (unlikely(!bh)) { 349 ext4_error(sb, "Cannot read block bitmap - " 350 "block_group = %u, block_bitmap = %llu", 351 block_group, bitmap_blk); 352 return NULL; 353 } 354 355 if (bitmap_uptodate(bh)) 356 return bh; 357 358 lock_buffer(bh); 359 if (bitmap_uptodate(bh)) { 360 unlock_buffer(bh); 361 return bh; 362 } 363 ext4_lock_group(sb, block_group); 364 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 365 ext4_init_block_bitmap(sb, bh, block_group, desc); 366 set_bitmap_uptodate(bh); 367 set_buffer_uptodate(bh); 368 ext4_unlock_group(sb, block_group); 369 unlock_buffer(bh); 370 return bh; 371 } 372 ext4_unlock_group(sb, block_group); 373 if (buffer_uptodate(bh)) { 374 /* 375 * if not uninit if bh is uptodate, 376 * bitmap is also uptodate 377 */ 378 set_bitmap_uptodate(bh); 379 unlock_buffer(bh); 380 return bh; 381 } 382 /* 383 * submit the buffer_head for read. We can 384 * safely mark the bitmap as uptodate now. 385 * We do it here so the bitmap uptodate bit 386 * get set with buffer lock held. 387 */ 388 trace_ext4_read_block_bitmap_load(sb, block_group); 389 set_bitmap_uptodate(bh); 390 if (bh_submit_read(bh) < 0) { 391 put_bh(bh); 392 ext4_error(sb, "Cannot read block bitmap - " 393 "block_group = %u, block_bitmap = %llu", 394 block_group, bitmap_blk); 395 return NULL; 396 } 397 ext4_valid_block_bitmap(sb, desc, block_group, bh); 398 /* 399 * file system mounted not to panic on error, 400 * continue with corrupt bitmap 401 */ 402 return bh; 403 } 404 405 /** 406 * ext4_has_free_clusters() 407 * @sbi: in-core super block structure. 408 * @nclusters: number of needed blocks 409 * @flags: flags from ext4_mb_new_blocks() 410 * 411 * Check if filesystem has nclusters free & available for allocation. 412 * On success return 1, return 0 on failure. 413 */ 414 static int ext4_has_free_clusters(struct ext4_sb_info *sbi, 415 s64 nclusters, unsigned int flags) 416 { 417 s64 free_clusters, dirty_clusters, root_clusters; 418 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; 419 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; 420 421 free_clusters = percpu_counter_read_positive(fcc); 422 dirty_clusters = percpu_counter_read_positive(dcc); 423 root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es)); 424 425 if (free_clusters - (nclusters + root_clusters + dirty_clusters) < 426 EXT4_FREECLUSTERS_WATERMARK) { 427 free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc)); 428 dirty_clusters = percpu_counter_sum_positive(dcc); 429 } 430 /* Check whether we have space after accounting for current 431 * dirty clusters & root reserved clusters. 432 */ 433 if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters)) 434 return 1; 435 436 /* Hm, nope. Are (enough) root reserved clusters available? */ 437 if (sbi->s_resuid == current_fsuid() || 438 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || 439 capable(CAP_SYS_RESOURCE) || 440 (flags & EXT4_MB_USE_ROOT_BLOCKS)) { 441 442 if (free_clusters >= (nclusters + dirty_clusters)) 443 return 1; 444 } 445 446 return 0; 447 } 448 449 int ext4_claim_free_clusters(struct ext4_sb_info *sbi, 450 s64 nclusters, unsigned int flags) 451 { 452 if (ext4_has_free_clusters(sbi, nclusters, flags)) { 453 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); 454 return 0; 455 } else 456 return -ENOSPC; 457 } 458 459 /** 460 * ext4_should_retry_alloc() 461 * @sb: super block 462 * @retries number of attemps has been made 463 * 464 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 465 * it is profitable to retry the operation, this function will wait 466 * for the current or committing transaction to complete, and then 467 * return TRUE. 468 * 469 * if the total number of retries exceed three times, return FALSE. 470 */ 471 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 472 { 473 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || 474 (*retries)++ > 3 || 475 !EXT4_SB(sb)->s_journal) 476 return 0; 477 478 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 479 480 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 481 } 482 483 /* 484 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 485 * 486 * @handle: handle to this transaction 487 * @inode: file inode 488 * @goal: given target block(filesystem wide) 489 * @count: pointer to total number of clusters needed 490 * @errp: error code 491 * 492 * Return 1st allocated block number on success, *count stores total account 493 * error stores in errp pointer 494 */ 495 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 496 ext4_fsblk_t goal, unsigned int flags, 497 unsigned long *count, int *errp) 498 { 499 struct ext4_allocation_request ar; 500 ext4_fsblk_t ret; 501 502 memset(&ar, 0, sizeof(ar)); 503 /* Fill with neighbour allocated blocks */ 504 ar.inode = inode; 505 ar.goal = goal; 506 ar.len = count ? *count : 1; 507 ar.flags = flags; 508 509 ret = ext4_mb_new_blocks(handle, &ar, errp); 510 if (count) 511 *count = ar.len; 512 /* 513 * Account for the allocated meta blocks. We will never 514 * fail EDQUOT for metdata, but we do account for it. 515 */ 516 if (!(*errp) && 517 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { 518 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 519 EXT4_I(inode)->i_allocated_meta_blocks += ar.len; 520 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 521 dquot_alloc_block_nofail(inode, 522 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 523 } 524 return ret; 525 } 526 527 /** 528 * ext4_count_free_clusters() -- count filesystem free clusters 529 * @sb: superblock 530 * 531 * Adds up the number of free clusters from each block group. 532 */ 533 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) 534 { 535 ext4_fsblk_t desc_count; 536 struct ext4_group_desc *gdp; 537 ext4_group_t i; 538 ext4_group_t ngroups = ext4_get_groups_count(sb); 539 #ifdef EXT4FS_DEBUG 540 struct ext4_super_block *es; 541 ext4_fsblk_t bitmap_count; 542 unsigned int x; 543 struct buffer_head *bitmap_bh = NULL; 544 545 es = EXT4_SB(sb)->s_es; 546 desc_count = 0; 547 bitmap_count = 0; 548 gdp = NULL; 549 550 for (i = 0; i < ngroups; i++) { 551 gdp = ext4_get_group_desc(sb, i, NULL); 552 if (!gdp) 553 continue; 554 desc_count += ext4_free_group_clusters(sb, gdp); 555 brelse(bitmap_bh); 556 bitmap_bh = ext4_read_block_bitmap(sb, i); 557 if (bitmap_bh == NULL) 558 continue; 559 560 x = ext4_count_free(bitmap_bh, sb->s_blocksize); 561 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 562 i, ext4_free_group_clusters(sb, gdp), x); 563 bitmap_count += x; 564 } 565 brelse(bitmap_bh); 566 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 567 ", computed = %llu, %llu\n", 568 EXT4_B2C(sbi, ext4_free_blocks_count(es)), 569 desc_count, bitmap_count); 570 return bitmap_count; 571 #else 572 desc_count = 0; 573 for (i = 0; i < ngroups; i++) { 574 gdp = ext4_get_group_desc(sb, i, NULL); 575 if (!gdp) 576 continue; 577 desc_count += ext4_free_group_clusters(sb, gdp); 578 } 579 580 return desc_count; 581 #endif 582 } 583 584 static inline int test_root(ext4_group_t a, int b) 585 { 586 int num = b; 587 588 while (a > num) 589 num *= b; 590 return num == a; 591 } 592 593 static int ext4_group_sparse(ext4_group_t group) 594 { 595 if (group <= 1) 596 return 1; 597 if (!(group & 1)) 598 return 0; 599 return (test_root(group, 7) || test_root(group, 5) || 600 test_root(group, 3)); 601 } 602 603 /** 604 * ext4_bg_has_super - number of blocks used by the superblock in group 605 * @sb: superblock for filesystem 606 * @group: group number to check 607 * 608 * Return the number of blocks used by the superblock (primary or backup) 609 * in this group. Currently this will be only 0 or 1. 610 */ 611 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 612 { 613 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 614 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && 615 !ext4_group_sparse(group)) 616 return 0; 617 return 1; 618 } 619 620 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 621 ext4_group_t group) 622 { 623 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 624 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 625 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 626 627 if (group == first || group == first + 1 || group == last) 628 return 1; 629 return 0; 630 } 631 632 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 633 ext4_group_t group) 634 { 635 if (!ext4_bg_has_super(sb, group)) 636 return 0; 637 638 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) 639 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 640 else 641 return EXT4_SB(sb)->s_gdb_count; 642 } 643 644 /** 645 * ext4_bg_num_gdb - number of blocks used by the group table in group 646 * @sb: superblock for filesystem 647 * @group: group number to check 648 * 649 * Return the number of blocks used by the group descriptor table 650 * (primary or backup) in this group. In the future there may be a 651 * different number of descriptor blocks in each group. 652 */ 653 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 654 { 655 unsigned long first_meta_bg = 656 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 657 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 658 659 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || 660 metagroup < first_meta_bg) 661 return ext4_bg_num_gdb_nometa(sb, group); 662 663 return ext4_bg_num_gdb_meta(sb,group); 664 665 } 666 667 /* 668 * This function returns the number of file system metadata clusters at 669 * the beginning of a block group, including the reserved gdt blocks. 670 */ 671 unsigned ext4_num_base_meta_clusters(struct super_block *sb, 672 ext4_group_t block_group) 673 { 674 struct ext4_sb_info *sbi = EXT4_SB(sb); 675 unsigned num; 676 677 /* Check for superblock and gdt backups in this group */ 678 num = ext4_bg_has_super(sb, block_group); 679 680 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 681 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 682 sbi->s_desc_per_block) { 683 if (num) { 684 num += ext4_bg_num_gdb(sb, block_group); 685 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 686 } 687 } else { /* For META_BG_BLOCK_GROUPS */ 688 num += ext4_bg_num_gdb(sb, block_group); 689 } 690 return EXT4_NUM_B2C(sbi, num); 691 } 692 /** 693 * ext4_inode_to_goal_block - return a hint for block allocation 694 * @inode: inode for block allocation 695 * 696 * Return the ideal location to start allocating blocks for a 697 * newly created inode. 698 */ 699 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) 700 { 701 struct ext4_inode_info *ei = EXT4_I(inode); 702 ext4_group_t block_group; 703 ext4_grpblk_t colour; 704 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 705 ext4_fsblk_t bg_start; 706 ext4_fsblk_t last_block; 707 708 block_group = ei->i_block_group; 709 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 710 /* 711 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 712 * block groups per flexgroup, reserve the first block 713 * group for directories and special files. Regular 714 * files will start at the second block group. This 715 * tends to speed up directory access and improves 716 * fsck times. 717 */ 718 block_group &= ~(flex_size-1); 719 if (S_ISREG(inode->i_mode)) 720 block_group++; 721 } 722 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 723 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 724 725 /* 726 * If we are doing delayed allocation, we don't need take 727 * colour into account. 728 */ 729 if (test_opt(inode->i_sb, DELALLOC)) 730 return bg_start; 731 732 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 733 colour = (current->pid % 16) * 734 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 735 else 736 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 737 return bg_start + colour; 738 } 739 740