1 /* 2 * linux/fs/ext4/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14 #include <linux/time.h> 15 #include <linux/capability.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 /* 25 * balloc.c contains the blocks allocation and deallocation routines 26 */ 27 28 /* 29 * Calculate the block group number and offset, given a block number 30 */ 31 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 32 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 33 { 34 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 35 ext4_grpblk_t offset; 36 37 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 38 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); 39 if (offsetp) 40 *offsetp = offset; 41 if (blockgrpp) 42 *blockgrpp = blocknr; 43 44 } 45 46 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block, 47 ext4_group_t block_group) 48 { 49 ext4_group_t actual_group; 50 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL); 51 if (actual_group == block_group) 52 return 1; 53 return 0; 54 } 55 56 static int ext4_group_used_meta_blocks(struct super_block *sb, 57 ext4_group_t block_group, 58 struct ext4_group_desc *gdp) 59 { 60 ext4_fsblk_t tmp; 61 struct ext4_sb_info *sbi = EXT4_SB(sb); 62 /* block bitmap, inode bitmap, and inode table blocks */ 63 int used_blocks = sbi->s_itb_per_group + 2; 64 65 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 66 if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), 67 block_group)) 68 used_blocks--; 69 70 if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), 71 block_group)) 72 used_blocks--; 73 74 tmp = ext4_inode_table(sb, gdp); 75 for (; tmp < ext4_inode_table(sb, gdp) + 76 sbi->s_itb_per_group; tmp++) { 77 if (!ext4_block_in_group(sb, tmp, block_group)) 78 used_blocks -= 1; 79 } 80 } 81 return used_blocks; 82 } 83 84 /* Initializes an uninitialized block bitmap if given, and returns the 85 * number of blocks free in the group. */ 86 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, 87 ext4_group_t block_group, struct ext4_group_desc *gdp) 88 { 89 int bit, bit_max; 90 ext4_group_t ngroups = ext4_get_groups_count(sb); 91 unsigned free_blocks, group_blocks; 92 struct ext4_sb_info *sbi = EXT4_SB(sb); 93 94 if (bh) { 95 J_ASSERT_BH(bh, buffer_locked(bh)); 96 97 /* If checksum is bad mark all blocks used to prevent allocation 98 * essentially implementing a per-group read-only flag. */ 99 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 100 ext4_error(sb, "Checksum bad for group %u", 101 block_group); 102 ext4_free_blks_set(sb, gdp, 0); 103 ext4_free_inodes_set(sb, gdp, 0); 104 ext4_itable_unused_set(sb, gdp, 0); 105 memset(bh->b_data, 0xff, sb->s_blocksize); 106 return 0; 107 } 108 memset(bh->b_data, 0, sb->s_blocksize); 109 } 110 111 /* Check for superblock and gdt backups in this group */ 112 bit_max = ext4_bg_has_super(sb, block_group); 113 114 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 115 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 116 sbi->s_desc_per_block) { 117 if (bit_max) { 118 bit_max += ext4_bg_num_gdb(sb, block_group); 119 bit_max += 120 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 121 } 122 } else { /* For META_BG_BLOCK_GROUPS */ 123 bit_max += ext4_bg_num_gdb(sb, block_group); 124 } 125 126 if (block_group == ngroups - 1) { 127 /* 128 * Even though mke2fs always initialize first and last group 129 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need 130 * to make sure we calculate the right free blocks 131 */ 132 group_blocks = ext4_blocks_count(sbi->s_es) - 133 ext4_group_first_block_no(sb, ngroups - 1); 134 } else { 135 group_blocks = EXT4_BLOCKS_PER_GROUP(sb); 136 } 137 138 free_blocks = group_blocks - bit_max; 139 140 if (bh) { 141 ext4_fsblk_t start, tmp; 142 int flex_bg = 0; 143 144 for (bit = 0; bit < bit_max; bit++) 145 ext4_set_bit(bit, bh->b_data); 146 147 start = ext4_group_first_block_no(sb, block_group); 148 149 if (EXT4_HAS_INCOMPAT_FEATURE(sb, 150 EXT4_FEATURE_INCOMPAT_FLEX_BG)) 151 flex_bg = 1; 152 153 /* Set bits for block and inode bitmaps, and inode table */ 154 tmp = ext4_block_bitmap(sb, gdp); 155 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 156 ext4_set_bit(tmp - start, bh->b_data); 157 158 tmp = ext4_inode_bitmap(sb, gdp); 159 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 160 ext4_set_bit(tmp - start, bh->b_data); 161 162 tmp = ext4_inode_table(sb, gdp); 163 for (; tmp < ext4_inode_table(sb, gdp) + 164 sbi->s_itb_per_group; tmp++) { 165 if (!flex_bg || 166 ext4_block_in_group(sb, tmp, block_group)) 167 ext4_set_bit(tmp - start, bh->b_data); 168 } 169 /* 170 * Also if the number of blocks within the group is 171 * less than the blocksize * 8 ( which is the size 172 * of bitmap ), set rest of the block bitmap to 1 173 */ 174 mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data); 175 } 176 return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp); 177 } 178 179 180 /* 181 * The free blocks are managed by bitmaps. A file system contains several 182 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 183 * block for inodes, N blocks for the inode table and data blocks. 184 * 185 * The file system contains group descriptors which are located after the 186 * super block. Each descriptor contains the number of the bitmap block and 187 * the free blocks count in the block. The descriptors are loaded in memory 188 * when a file system is mounted (see ext4_fill_super). 189 */ 190 191 /** 192 * ext4_get_group_desc() -- load group descriptor from disk 193 * @sb: super block 194 * @block_group: given block group 195 * @bh: pointer to the buffer head to store the block 196 * group descriptor 197 */ 198 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 199 ext4_group_t block_group, 200 struct buffer_head **bh) 201 { 202 unsigned int group_desc; 203 unsigned int offset; 204 ext4_group_t ngroups = ext4_get_groups_count(sb); 205 struct ext4_group_desc *desc; 206 struct ext4_sb_info *sbi = EXT4_SB(sb); 207 208 if (block_group >= ngroups) { 209 ext4_error(sb, "block_group >= groups_count - block_group = %u," 210 " groups_count = %u", block_group, ngroups); 211 212 return NULL; 213 } 214 215 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 216 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 217 if (!sbi->s_group_desc[group_desc]) { 218 ext4_error(sb, "Group descriptor not loaded - " 219 "block_group = %u, group_desc = %u, desc = %u", 220 block_group, group_desc, offset); 221 return NULL; 222 } 223 224 desc = (struct ext4_group_desc *)( 225 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 226 offset * EXT4_DESC_SIZE(sb)); 227 if (bh) 228 *bh = sbi->s_group_desc[group_desc]; 229 return desc; 230 } 231 232 static int ext4_valid_block_bitmap(struct super_block *sb, 233 struct ext4_group_desc *desc, 234 unsigned int block_group, 235 struct buffer_head *bh) 236 { 237 ext4_grpblk_t offset; 238 ext4_grpblk_t next_zero_bit; 239 ext4_fsblk_t bitmap_blk; 240 ext4_fsblk_t group_first_block; 241 242 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 243 /* with FLEX_BG, the inode/block bitmaps and itable 244 * blocks may not be in the group at all 245 * so the bitmap validation will be skipped for those groups 246 * or it has to also read the block group where the bitmaps 247 * are located to verify they are set. 248 */ 249 return 1; 250 } 251 group_first_block = ext4_group_first_block_no(sb, block_group); 252 253 /* check whether block bitmap block number is set */ 254 bitmap_blk = ext4_block_bitmap(sb, desc); 255 offset = bitmap_blk - group_first_block; 256 if (!ext4_test_bit(offset, bh->b_data)) 257 /* bad block bitmap */ 258 goto err_out; 259 260 /* check whether the inode bitmap block number is set */ 261 bitmap_blk = ext4_inode_bitmap(sb, desc); 262 offset = bitmap_blk - group_first_block; 263 if (!ext4_test_bit(offset, bh->b_data)) 264 /* bad block bitmap */ 265 goto err_out; 266 267 /* check whether the inode table block number is set */ 268 bitmap_blk = ext4_inode_table(sb, desc); 269 offset = bitmap_blk - group_first_block; 270 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 271 offset + EXT4_SB(sb)->s_itb_per_group, 272 offset); 273 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) 274 /* good bitmap for inode tables */ 275 return 1; 276 277 err_out: 278 ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", 279 block_group, bitmap_blk); 280 return 0; 281 } 282 /** 283 * ext4_read_block_bitmap() 284 * @sb: super block 285 * @block_group: given block group 286 * 287 * Read the bitmap for a given block_group,and validate the 288 * bits for block/inode/inode tables are set in the bitmaps 289 * 290 * Return buffer_head on success or NULL in case of failure. 291 */ 292 struct buffer_head * 293 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 294 { 295 struct ext4_group_desc *desc; 296 struct buffer_head *bh = NULL; 297 ext4_fsblk_t bitmap_blk; 298 299 desc = ext4_get_group_desc(sb, block_group, NULL); 300 if (!desc) 301 return NULL; 302 bitmap_blk = ext4_block_bitmap(sb, desc); 303 bh = sb_getblk(sb, bitmap_blk); 304 if (unlikely(!bh)) { 305 ext4_error(sb, "Cannot read block bitmap - " 306 "block_group = %u, block_bitmap = %llu", 307 block_group, bitmap_blk); 308 return NULL; 309 } 310 311 if (bitmap_uptodate(bh)) 312 return bh; 313 314 lock_buffer(bh); 315 if (bitmap_uptodate(bh)) { 316 unlock_buffer(bh); 317 return bh; 318 } 319 ext4_lock_group(sb, block_group); 320 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 321 ext4_init_block_bitmap(sb, bh, block_group, desc); 322 set_bitmap_uptodate(bh); 323 set_buffer_uptodate(bh); 324 ext4_unlock_group(sb, block_group); 325 unlock_buffer(bh); 326 return bh; 327 } 328 ext4_unlock_group(sb, block_group); 329 if (buffer_uptodate(bh)) { 330 /* 331 * if not uninit if bh is uptodate, 332 * bitmap is also uptodate 333 */ 334 set_bitmap_uptodate(bh); 335 unlock_buffer(bh); 336 return bh; 337 } 338 /* 339 * submit the buffer_head for read. We can 340 * safely mark the bitmap as uptodate now. 341 * We do it here so the bitmap uptodate bit 342 * get set with buffer lock held. 343 */ 344 set_bitmap_uptodate(bh); 345 if (bh_submit_read(bh) < 0) { 346 put_bh(bh); 347 ext4_error(sb, "Cannot read block bitmap - " 348 "block_group = %u, block_bitmap = %llu", 349 block_group, bitmap_blk); 350 return NULL; 351 } 352 ext4_valid_block_bitmap(sb, desc, block_group, bh); 353 /* 354 * file system mounted not to panic on error, 355 * continue with corrupt bitmap 356 */ 357 return bh; 358 } 359 360 /** 361 * ext4_add_groupblocks() -- Add given blocks to an existing group 362 * @handle: handle to this transaction 363 * @sb: super block 364 * @block: start physcial block to add to the block group 365 * @count: number of blocks to free 366 * 367 * This marks the blocks as free in the bitmap. We ask the 368 * mballoc to reload the buddy after this by setting group 369 * EXT4_GROUP_INFO_NEED_INIT_BIT flag 370 */ 371 void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, 372 ext4_fsblk_t block, unsigned long count) 373 { 374 struct buffer_head *bitmap_bh = NULL; 375 struct buffer_head *gd_bh; 376 ext4_group_t block_group; 377 ext4_grpblk_t bit; 378 unsigned int i; 379 struct ext4_group_desc *desc; 380 struct ext4_sb_info *sbi = EXT4_SB(sb); 381 int err = 0, ret, blk_free_count; 382 ext4_grpblk_t blocks_freed; 383 struct ext4_group_info *grp; 384 385 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 386 387 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 388 grp = ext4_get_group_info(sb, block_group); 389 /* 390 * Check to see if we are freeing blocks across a group 391 * boundary. 392 */ 393 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 394 goto error_return; 395 } 396 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 397 if (!bitmap_bh) 398 goto error_return; 399 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 400 if (!desc) 401 goto error_return; 402 403 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 404 in_range(ext4_inode_bitmap(sb, desc), block, count) || 405 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 406 in_range(block + count - 1, ext4_inode_table(sb, desc), 407 sbi->s_itb_per_group)) { 408 ext4_error(sb, "Adding blocks in system zones - " 409 "Block = %llu, count = %lu", 410 block, count); 411 goto error_return; 412 } 413 414 /* 415 * We are about to add blocks to the bitmap, 416 * so we need undo access. 417 */ 418 BUFFER_TRACE(bitmap_bh, "getting undo access"); 419 err = ext4_journal_get_undo_access(handle, bitmap_bh); 420 if (err) 421 goto error_return; 422 423 /* 424 * We are about to modify some metadata. Call the journal APIs 425 * to unshare ->b_data if a currently-committing transaction is 426 * using it 427 */ 428 BUFFER_TRACE(gd_bh, "get_write_access"); 429 err = ext4_journal_get_write_access(handle, gd_bh); 430 if (err) 431 goto error_return; 432 /* 433 * make sure we don't allow a parallel init on other groups in the 434 * same buddy cache 435 */ 436 down_write(&grp->alloc_sem); 437 for (i = 0, blocks_freed = 0; i < count; i++) { 438 BUFFER_TRACE(bitmap_bh, "clear bit"); 439 if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), 440 bit + i, bitmap_bh->b_data)) { 441 ext4_error(sb, "bit already cleared for block %llu", 442 (ext4_fsblk_t)(block + i)); 443 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 444 } else { 445 blocks_freed++; 446 } 447 } 448 ext4_lock_group(sb, block_group); 449 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); 450 ext4_free_blks_set(sb, desc, blk_free_count); 451 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); 452 ext4_unlock_group(sb, block_group); 453 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); 454 455 if (sbi->s_log_groups_per_flex) { 456 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 457 atomic_add(blocks_freed, 458 &sbi->s_flex_groups[flex_group].free_blocks); 459 } 460 /* 461 * request to reload the buddy with the 462 * new bitmap information 463 */ 464 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 465 grp->bb_free += blocks_freed; 466 up_write(&grp->alloc_sem); 467 468 /* We dirtied the bitmap block */ 469 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 470 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 471 472 /* And the group descriptor block */ 473 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 474 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 475 if (!err) 476 err = ret; 477 478 error_return: 479 brelse(bitmap_bh); 480 ext4_std_error(sb, err); 481 return; 482 } 483 484 /** 485 * ext4_has_free_blocks() 486 * @sbi: in-core super block structure. 487 * @nblocks: number of needed blocks 488 * 489 * Check if filesystem has nblocks free & available for allocation. 490 * On success return 1, return 0 on failure. 491 */ 492 int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) 493 { 494 s64 free_blocks, dirty_blocks, root_blocks; 495 struct percpu_counter *fbc = &sbi->s_freeblocks_counter; 496 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter; 497 498 free_blocks = percpu_counter_read_positive(fbc); 499 dirty_blocks = percpu_counter_read_positive(dbc); 500 root_blocks = ext4_r_blocks_count(sbi->s_es); 501 502 if (free_blocks - (nblocks + root_blocks + dirty_blocks) < 503 EXT4_FREEBLOCKS_WATERMARK) { 504 free_blocks = percpu_counter_sum_positive(fbc); 505 dirty_blocks = percpu_counter_sum_positive(dbc); 506 if (dirty_blocks < 0) { 507 printk(KERN_CRIT "Dirty block accounting " 508 "went wrong %lld\n", 509 (long long)dirty_blocks); 510 } 511 } 512 /* Check whether we have space after 513 * accounting for current dirty blocks & root reserved blocks. 514 */ 515 if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks)) 516 return 1; 517 518 /* Hm, nope. Are (enough) root reserved blocks available? */ 519 if (sbi->s_resuid == current_fsuid() || 520 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || 521 capable(CAP_SYS_RESOURCE)) { 522 if (free_blocks >= (nblocks + dirty_blocks)) 523 return 1; 524 } 525 526 return 0; 527 } 528 529 int ext4_claim_free_blocks(struct ext4_sb_info *sbi, 530 s64 nblocks) 531 { 532 if (ext4_has_free_blocks(sbi, nblocks)) { 533 percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); 534 return 0; 535 } else 536 return -ENOSPC; 537 } 538 539 /** 540 * ext4_should_retry_alloc() 541 * @sb: super block 542 * @retries number of attemps has been made 543 * 544 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 545 * it is profitable to retry the operation, this function will wait 546 * for the current or commiting transaction to complete, and then 547 * return TRUE. 548 * 549 * if the total number of retries exceed three times, return FALSE. 550 */ 551 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 552 { 553 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || 554 (*retries)++ > 3 || 555 !EXT4_SB(sb)->s_journal) 556 return 0; 557 558 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 559 560 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 561 } 562 563 /* 564 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 565 * 566 * @handle: handle to this transaction 567 * @inode: file inode 568 * @goal: given target block(filesystem wide) 569 * @count: pointer to total number of blocks needed 570 * @errp: error code 571 * 572 * Return 1st allocated block number on success, *count stores total account 573 * error stores in errp pointer 574 */ 575 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 576 ext4_fsblk_t goal, unsigned long *count, int *errp) 577 { 578 struct ext4_allocation_request ar; 579 ext4_fsblk_t ret; 580 581 memset(&ar, 0, sizeof(ar)); 582 /* Fill with neighbour allocated blocks */ 583 ar.inode = inode; 584 ar.goal = goal; 585 ar.len = count ? *count : 1; 586 587 ret = ext4_mb_new_blocks(handle, &ar, errp); 588 if (count) 589 *count = ar.len; 590 /* 591 * Account for the allocated meta blocks. We will never 592 * fail EDQUOT for metdata, but we do account for it. 593 */ 594 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { 595 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 596 EXT4_I(inode)->i_allocated_meta_blocks += ar.len; 597 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 598 dquot_alloc_block_nofail(inode, ar.len); 599 } 600 return ret; 601 } 602 603 /** 604 * ext4_count_free_blocks() -- count filesystem free blocks 605 * @sb: superblock 606 * 607 * Adds up the number of free blocks from each block group. 608 */ 609 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) 610 { 611 ext4_fsblk_t desc_count; 612 struct ext4_group_desc *gdp; 613 ext4_group_t i; 614 ext4_group_t ngroups = ext4_get_groups_count(sb); 615 #ifdef EXT4FS_DEBUG 616 struct ext4_super_block *es; 617 ext4_fsblk_t bitmap_count; 618 unsigned int x; 619 struct buffer_head *bitmap_bh = NULL; 620 621 es = EXT4_SB(sb)->s_es; 622 desc_count = 0; 623 bitmap_count = 0; 624 gdp = NULL; 625 626 for (i = 0; i < ngroups; i++) { 627 gdp = ext4_get_group_desc(sb, i, NULL); 628 if (!gdp) 629 continue; 630 desc_count += ext4_free_blks_count(sb, gdp); 631 brelse(bitmap_bh); 632 bitmap_bh = ext4_read_block_bitmap(sb, i); 633 if (bitmap_bh == NULL) 634 continue; 635 636 x = ext4_count_free(bitmap_bh, sb->s_blocksize); 637 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 638 i, ext4_free_blks_count(sb, gdp), x); 639 bitmap_count += x; 640 } 641 brelse(bitmap_bh); 642 printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu" 643 ", computed = %llu, %llu\n", ext4_free_blocks_count(es), 644 desc_count, bitmap_count); 645 return bitmap_count; 646 #else 647 desc_count = 0; 648 for (i = 0; i < ngroups; i++) { 649 gdp = ext4_get_group_desc(sb, i, NULL); 650 if (!gdp) 651 continue; 652 desc_count += ext4_free_blks_count(sb, gdp); 653 } 654 655 return desc_count; 656 #endif 657 } 658 659 static inline int test_root(ext4_group_t a, int b) 660 { 661 int num = b; 662 663 while (a > num) 664 num *= b; 665 return num == a; 666 } 667 668 static int ext4_group_sparse(ext4_group_t group) 669 { 670 if (group <= 1) 671 return 1; 672 if (!(group & 1)) 673 return 0; 674 return (test_root(group, 7) || test_root(group, 5) || 675 test_root(group, 3)); 676 } 677 678 /** 679 * ext4_bg_has_super - number of blocks used by the superblock in group 680 * @sb: superblock for filesystem 681 * @group: group number to check 682 * 683 * Return the number of blocks used by the superblock (primary or backup) 684 * in this group. Currently this will be only 0 or 1. 685 */ 686 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 687 { 688 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 689 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && 690 !ext4_group_sparse(group)) 691 return 0; 692 return 1; 693 } 694 695 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 696 ext4_group_t group) 697 { 698 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 699 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 700 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 701 702 if (group == first || group == first + 1 || group == last) 703 return 1; 704 return 0; 705 } 706 707 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 708 ext4_group_t group) 709 { 710 if (!ext4_bg_has_super(sb, group)) 711 return 0; 712 713 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) 714 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 715 else 716 return EXT4_SB(sb)->s_gdb_count; 717 } 718 719 /** 720 * ext4_bg_num_gdb - number of blocks used by the group table in group 721 * @sb: superblock for filesystem 722 * @group: group number to check 723 * 724 * Return the number of blocks used by the group descriptor table 725 * (primary or backup) in this group. In the future there may be a 726 * different number of descriptor blocks in each group. 727 */ 728 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 729 { 730 unsigned long first_meta_bg = 731 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 732 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 733 734 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || 735 metagroup < first_meta_bg) 736 return ext4_bg_num_gdb_nometa(sb, group); 737 738 return ext4_bg_num_gdb_meta(sb,group); 739 740 } 741 742