1 /* 2 * linux/fs/ext4/ialloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * BSD ufs-inspired inode and directory allocation by 10 * Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/stat.h> 19 #include <linux/string.h> 20 #include <linux/quotaops.h> 21 #include <linux/buffer_head.h> 22 #include <linux/random.h> 23 #include <linux/bitops.h> 24 #include <linux/blkdev.h> 25 #include <asm/byteorder.h> 26 27 #include "ext4.h" 28 #include "ext4_jbd2.h" 29 #include "xattr.h" 30 #include "acl.h" 31 32 #include <trace/events/ext4.h> 33 34 /* 35 * ialloc.c contains the inodes allocation and deallocation routines 36 */ 37 38 /* 39 * The free inodes are managed by bitmaps. A file system contains several 40 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 41 * block for inodes, N blocks for the inode table and data blocks. 42 * 43 * The file system contains group descriptors which are located after the 44 * super block. Each descriptor contains the number of the bitmap block and 45 * the free blocks count in the block. 46 */ 47 48 /* 49 * To avoid calling the atomic setbit hundreds or thousands of times, we only 50 * need to use it within a single byte (to ensure we get endianness right). 51 * We can use memset for the rest of the bitmap as there are no other users. 52 */ 53 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 54 { 55 int i; 56 57 if (start_bit >= end_bit) 58 return; 59 60 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); 61 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 62 ext4_set_bit(i, bitmap); 63 if (i < end_bit) 64 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 65 } 66 67 /* Initializes an uninitialized inode bitmap */ 68 static unsigned ext4_init_inode_bitmap(struct super_block *sb, 69 struct buffer_head *bh, 70 ext4_group_t block_group, 71 struct ext4_group_desc *gdp) 72 { 73 struct ext4_sb_info *sbi = EXT4_SB(sb); 74 75 J_ASSERT_BH(bh, buffer_locked(bh)); 76 77 /* If checksum is bad mark all blocks and inodes use to prevent 78 * allocation, essentially implementing a per-group read-only flag. */ 79 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 80 ext4_error(sb, "Checksum bad for group %u", block_group); 81 ext4_free_blks_set(sb, gdp, 0); 82 ext4_free_inodes_set(sb, gdp, 0); 83 ext4_itable_unused_set(sb, gdp, 0); 84 memset(bh->b_data, 0xff, sb->s_blocksize); 85 return 0; 86 } 87 88 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 89 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, 90 bh->b_data); 91 92 return EXT4_INODES_PER_GROUP(sb); 93 } 94 95 /* 96 * Read the inode allocation bitmap for a given block_group, reading 97 * into the specified slot in the superblock's bitmap cache. 98 * 99 * Return buffer_head of bitmap on success or NULL. 100 */ 101 static struct buffer_head * 102 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 103 { 104 struct ext4_group_desc *desc; 105 struct buffer_head *bh = NULL; 106 ext4_fsblk_t bitmap_blk; 107 108 desc = ext4_get_group_desc(sb, block_group, NULL); 109 if (!desc) 110 return NULL; 111 112 bitmap_blk = ext4_inode_bitmap(sb, desc); 113 bh = sb_getblk(sb, bitmap_blk); 114 if (unlikely(!bh)) { 115 ext4_error(sb, "Cannot read inode bitmap - " 116 "block_group = %u, inode_bitmap = %llu", 117 block_group, bitmap_blk); 118 return NULL; 119 } 120 if (bitmap_uptodate(bh)) 121 return bh; 122 123 lock_buffer(bh); 124 if (bitmap_uptodate(bh)) { 125 unlock_buffer(bh); 126 return bh; 127 } 128 129 ext4_lock_group(sb, block_group); 130 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 131 ext4_init_inode_bitmap(sb, bh, block_group, desc); 132 set_bitmap_uptodate(bh); 133 set_buffer_uptodate(bh); 134 ext4_unlock_group(sb, block_group); 135 unlock_buffer(bh); 136 return bh; 137 } 138 ext4_unlock_group(sb, block_group); 139 140 if (buffer_uptodate(bh)) { 141 /* 142 * if not uninit if bh is uptodate, 143 * bitmap is also uptodate 144 */ 145 set_bitmap_uptodate(bh); 146 unlock_buffer(bh); 147 return bh; 148 } 149 /* 150 * submit the buffer_head for read. We can 151 * safely mark the bitmap as uptodate now. 152 * We do it here so the bitmap uptodate bit 153 * get set with buffer lock held. 154 */ 155 trace_ext4_load_inode_bitmap(sb, block_group); 156 set_bitmap_uptodate(bh); 157 if (bh_submit_read(bh) < 0) { 158 put_bh(bh); 159 ext4_error(sb, "Cannot read inode bitmap - " 160 "block_group = %u, inode_bitmap = %llu", 161 block_group, bitmap_blk); 162 return NULL; 163 } 164 return bh; 165 } 166 167 /* 168 * NOTE! When we get the inode, we're the only people 169 * that have access to it, and as such there are no 170 * race conditions we have to worry about. The inode 171 * is not on the hash-lists, and it cannot be reached 172 * through the filesystem because the directory entry 173 * has been deleted earlier. 174 * 175 * HOWEVER: we must make sure that we get no aliases, 176 * which means that we have to call "clear_inode()" 177 * _before_ we mark the inode not in use in the inode 178 * bitmaps. Otherwise a newly created file might use 179 * the same inode number (not actually the same pointer 180 * though), and then we'd have two inodes sharing the 181 * same inode number and space on the harddisk. 182 */ 183 void ext4_free_inode(handle_t *handle, struct inode *inode) 184 { 185 struct super_block *sb = inode->i_sb; 186 int is_directory; 187 unsigned long ino; 188 struct buffer_head *bitmap_bh = NULL; 189 struct buffer_head *bh2; 190 ext4_group_t block_group; 191 unsigned long bit; 192 struct ext4_group_desc *gdp; 193 struct ext4_super_block *es; 194 struct ext4_sb_info *sbi; 195 int fatal = 0, err, count, cleared; 196 197 if (atomic_read(&inode->i_count) > 1) { 198 printk(KERN_ERR "ext4_free_inode: inode has count=%d\n", 199 atomic_read(&inode->i_count)); 200 return; 201 } 202 if (inode->i_nlink) { 203 printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n", 204 inode->i_nlink); 205 return; 206 } 207 if (!sb) { 208 printk(KERN_ERR "ext4_free_inode: inode on " 209 "nonexistent device\n"); 210 return; 211 } 212 sbi = EXT4_SB(sb); 213 214 ino = inode->i_ino; 215 ext4_debug("freeing inode %lu\n", ino); 216 trace_ext4_free_inode(inode); 217 218 /* 219 * Note: we must free any quota before locking the superblock, 220 * as writing the quota to disk may need the lock as well. 221 */ 222 dquot_initialize(inode); 223 ext4_xattr_delete_inode(handle, inode); 224 dquot_free_inode(inode); 225 dquot_drop(inode); 226 227 is_directory = S_ISDIR(inode->i_mode); 228 229 /* Do this BEFORE marking the inode not in use or returning an error */ 230 ext4_clear_inode(inode); 231 232 es = EXT4_SB(sb)->s_es; 233 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 234 ext4_error(sb, "reserved or nonexistent inode %lu", ino); 235 goto error_return; 236 } 237 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 238 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 239 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 240 if (!bitmap_bh) 241 goto error_return; 242 243 BUFFER_TRACE(bitmap_bh, "get_write_access"); 244 fatal = ext4_journal_get_write_access(handle, bitmap_bh); 245 if (fatal) 246 goto error_return; 247 248 fatal = -ESRCH; 249 gdp = ext4_get_group_desc(sb, block_group, &bh2); 250 if (gdp) { 251 BUFFER_TRACE(bh2, "get_write_access"); 252 fatal = ext4_journal_get_write_access(handle, bh2); 253 } 254 ext4_lock_group(sb, block_group); 255 cleared = ext4_clear_bit(bit, bitmap_bh->b_data); 256 if (fatal || !cleared) { 257 ext4_unlock_group(sb, block_group); 258 goto out; 259 } 260 261 count = ext4_free_inodes_count(sb, gdp) + 1; 262 ext4_free_inodes_set(sb, gdp, count); 263 if (is_directory) { 264 count = ext4_used_dirs_count(sb, gdp) - 1; 265 ext4_used_dirs_set(sb, gdp, count); 266 percpu_counter_dec(&sbi->s_dirs_counter); 267 } 268 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); 269 ext4_unlock_group(sb, block_group); 270 271 percpu_counter_inc(&sbi->s_freeinodes_counter); 272 if (sbi->s_log_groups_per_flex) { 273 ext4_group_t f = ext4_flex_group(sbi, block_group); 274 275 atomic_inc(&sbi->s_flex_groups[f].free_inodes); 276 if (is_directory) 277 atomic_dec(&sbi->s_flex_groups[f].used_dirs); 278 } 279 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); 280 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); 281 out: 282 if (cleared) { 283 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); 284 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 285 if (!fatal) 286 fatal = err; 287 ext4_mark_super_dirty(sb); 288 } else 289 ext4_error(sb, "bit already cleared for inode %lu", ino); 290 291 error_return: 292 brelse(bitmap_bh); 293 ext4_std_error(sb, fatal); 294 } 295 296 /* 297 * There are two policies for allocating an inode. If the new inode is 298 * a directory, then a forward search is made for a block group with both 299 * free space and a low directory-to-inode ratio; if that fails, then of 300 * the groups with above-average free space, that group with the fewest 301 * directories already is chosen. 302 * 303 * For other inodes, search forward from the parent directory\'s block 304 * group to find a free inode. 305 */ 306 static int find_group_dir(struct super_block *sb, struct inode *parent, 307 ext4_group_t *best_group) 308 { 309 ext4_group_t ngroups = ext4_get_groups_count(sb); 310 unsigned int freei, avefreei; 311 struct ext4_group_desc *desc, *best_desc = NULL; 312 ext4_group_t group; 313 int ret = -1; 314 315 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter); 316 avefreei = freei / ngroups; 317 318 for (group = 0; group < ngroups; group++) { 319 desc = ext4_get_group_desc(sb, group, NULL); 320 if (!desc || !ext4_free_inodes_count(sb, desc)) 321 continue; 322 if (ext4_free_inodes_count(sb, desc) < avefreei) 323 continue; 324 if (!best_desc || 325 (ext4_free_blks_count(sb, desc) > 326 ext4_free_blks_count(sb, best_desc))) { 327 *best_group = group; 328 best_desc = desc; 329 ret = 0; 330 } 331 } 332 return ret; 333 } 334 335 #define free_block_ratio 10 336 337 static int find_group_flex(struct super_block *sb, struct inode *parent, 338 ext4_group_t *best_group) 339 { 340 struct ext4_sb_info *sbi = EXT4_SB(sb); 341 struct ext4_group_desc *desc; 342 struct flex_groups *flex_group = sbi->s_flex_groups; 343 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 344 ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group); 345 ext4_group_t ngroups = ext4_get_groups_count(sb); 346 int flex_size = ext4_flex_bg_size(sbi); 347 ext4_group_t best_flex = parent_fbg_group; 348 int blocks_per_flex = sbi->s_blocks_per_group * flex_size; 349 int flexbg_free_blocks; 350 int flex_freeb_ratio; 351 ext4_group_t n_fbg_groups; 352 ext4_group_t i; 353 354 n_fbg_groups = (ngroups + flex_size - 1) >> 355 sbi->s_log_groups_per_flex; 356 357 find_close_to_parent: 358 flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks); 359 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; 360 if (atomic_read(&flex_group[best_flex].free_inodes) && 361 flex_freeb_ratio > free_block_ratio) 362 goto found_flexbg; 363 364 if (best_flex && best_flex == parent_fbg_group) { 365 best_flex--; 366 goto find_close_to_parent; 367 } 368 369 for (i = 0; i < n_fbg_groups; i++) { 370 if (i == parent_fbg_group || i == parent_fbg_group - 1) 371 continue; 372 373 flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks); 374 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; 375 376 if (flex_freeb_ratio > free_block_ratio && 377 (atomic_read(&flex_group[i].free_inodes))) { 378 best_flex = i; 379 goto found_flexbg; 380 } 381 382 if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) || 383 ((atomic_read(&flex_group[i].free_blocks) > 384 atomic_read(&flex_group[best_flex].free_blocks)) && 385 atomic_read(&flex_group[i].free_inodes))) 386 best_flex = i; 387 } 388 389 if (!atomic_read(&flex_group[best_flex].free_inodes) || 390 !atomic_read(&flex_group[best_flex].free_blocks)) 391 return -1; 392 393 found_flexbg: 394 for (i = best_flex * flex_size; i < ngroups && 395 i < (best_flex + 1) * flex_size; i++) { 396 desc = ext4_get_group_desc(sb, i, NULL); 397 if (ext4_free_inodes_count(sb, desc)) { 398 *best_group = i; 399 goto out; 400 } 401 } 402 403 return -1; 404 out: 405 return 0; 406 } 407 408 struct orlov_stats { 409 __u32 free_inodes; 410 __u32 free_blocks; 411 __u32 used_dirs; 412 }; 413 414 /* 415 * Helper function for Orlov's allocator; returns critical information 416 * for a particular block group or flex_bg. If flex_size is 1, then g 417 * is a block group number; otherwise it is flex_bg number. 418 */ 419 static void get_orlov_stats(struct super_block *sb, ext4_group_t g, 420 int flex_size, struct orlov_stats *stats) 421 { 422 struct ext4_group_desc *desc; 423 struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; 424 425 if (flex_size > 1) { 426 stats->free_inodes = atomic_read(&flex_group[g].free_inodes); 427 stats->free_blocks = atomic_read(&flex_group[g].free_blocks); 428 stats->used_dirs = atomic_read(&flex_group[g].used_dirs); 429 return; 430 } 431 432 desc = ext4_get_group_desc(sb, g, NULL); 433 if (desc) { 434 stats->free_inodes = ext4_free_inodes_count(sb, desc); 435 stats->free_blocks = ext4_free_blks_count(sb, desc); 436 stats->used_dirs = ext4_used_dirs_count(sb, desc); 437 } else { 438 stats->free_inodes = 0; 439 stats->free_blocks = 0; 440 stats->used_dirs = 0; 441 } 442 } 443 444 /* 445 * Orlov's allocator for directories. 446 * 447 * We always try to spread first-level directories. 448 * 449 * If there are blockgroups with both free inodes and free blocks counts 450 * not worse than average we return one with smallest directory count. 451 * Otherwise we simply return a random group. 452 * 453 * For the rest rules look so: 454 * 455 * It's OK to put directory into a group unless 456 * it has too many directories already (max_dirs) or 457 * it has too few free inodes left (min_inodes) or 458 * it has too few free blocks left (min_blocks) or 459 * Parent's group is preferred, if it doesn't satisfy these 460 * conditions we search cyclically through the rest. If none 461 * of the groups look good we just look for a group with more 462 * free inodes than average (starting at parent's group). 463 */ 464 465 static int find_group_orlov(struct super_block *sb, struct inode *parent, 466 ext4_group_t *group, int mode, 467 const struct qstr *qstr) 468 { 469 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 470 struct ext4_sb_info *sbi = EXT4_SB(sb); 471 ext4_group_t real_ngroups = ext4_get_groups_count(sb); 472 int inodes_per_group = EXT4_INODES_PER_GROUP(sb); 473 unsigned int freei, avefreei; 474 ext4_fsblk_t freeb, avefreeb; 475 unsigned int ndirs; 476 int max_dirs, min_inodes; 477 ext4_grpblk_t min_blocks; 478 ext4_group_t i, grp, g, ngroups; 479 struct ext4_group_desc *desc; 480 struct orlov_stats stats; 481 int flex_size = ext4_flex_bg_size(sbi); 482 struct dx_hash_info hinfo; 483 484 ngroups = real_ngroups; 485 if (flex_size > 1) { 486 ngroups = (real_ngroups + flex_size - 1) >> 487 sbi->s_log_groups_per_flex; 488 parent_group >>= sbi->s_log_groups_per_flex; 489 } 490 491 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); 492 avefreei = freei / ngroups; 493 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 494 avefreeb = freeb; 495 do_div(avefreeb, ngroups); 496 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); 497 498 if (S_ISDIR(mode) && 499 ((parent == sb->s_root->d_inode) || 500 (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { 501 int best_ndir = inodes_per_group; 502 int ret = -1; 503 504 if (qstr) { 505 hinfo.hash_version = DX_HASH_HALF_MD4; 506 hinfo.seed = sbi->s_hash_seed; 507 ext4fs_dirhash(qstr->name, qstr->len, &hinfo); 508 grp = hinfo.hash; 509 } else 510 get_random_bytes(&grp, sizeof(grp)); 511 parent_group = (unsigned)grp % ngroups; 512 for (i = 0; i < ngroups; i++) { 513 g = (parent_group + i) % ngroups; 514 get_orlov_stats(sb, g, flex_size, &stats); 515 if (!stats.free_inodes) 516 continue; 517 if (stats.used_dirs >= best_ndir) 518 continue; 519 if (stats.free_inodes < avefreei) 520 continue; 521 if (stats.free_blocks < avefreeb) 522 continue; 523 grp = g; 524 ret = 0; 525 best_ndir = stats.used_dirs; 526 } 527 if (ret) 528 goto fallback; 529 found_flex_bg: 530 if (flex_size == 1) { 531 *group = grp; 532 return 0; 533 } 534 535 /* 536 * We pack inodes at the beginning of the flexgroup's 537 * inode tables. Block allocation decisions will do 538 * something similar, although regular files will 539 * start at 2nd block group of the flexgroup. See 540 * ext4_ext_find_goal() and ext4_find_near(). 541 */ 542 grp *= flex_size; 543 for (i = 0; i < flex_size; i++) { 544 if (grp+i >= real_ngroups) 545 break; 546 desc = ext4_get_group_desc(sb, grp+i, NULL); 547 if (desc && ext4_free_inodes_count(sb, desc)) { 548 *group = grp+i; 549 return 0; 550 } 551 } 552 goto fallback; 553 } 554 555 max_dirs = ndirs / ngroups + inodes_per_group / 16; 556 min_inodes = avefreei - inodes_per_group*flex_size / 4; 557 if (min_inodes < 1) 558 min_inodes = 1; 559 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4; 560 561 /* 562 * Start looking in the flex group where we last allocated an 563 * inode for this parent directory 564 */ 565 if (EXT4_I(parent)->i_last_alloc_group != ~0) { 566 parent_group = EXT4_I(parent)->i_last_alloc_group; 567 if (flex_size > 1) 568 parent_group >>= sbi->s_log_groups_per_flex; 569 } 570 571 for (i = 0; i < ngroups; i++) { 572 grp = (parent_group + i) % ngroups; 573 get_orlov_stats(sb, grp, flex_size, &stats); 574 if (stats.used_dirs >= max_dirs) 575 continue; 576 if (stats.free_inodes < min_inodes) 577 continue; 578 if (stats.free_blocks < min_blocks) 579 continue; 580 goto found_flex_bg; 581 } 582 583 fallback: 584 ngroups = real_ngroups; 585 avefreei = freei / ngroups; 586 fallback_retry: 587 parent_group = EXT4_I(parent)->i_block_group; 588 for (i = 0; i < ngroups; i++) { 589 grp = (parent_group + i) % ngroups; 590 desc = ext4_get_group_desc(sb, grp, NULL); 591 if (desc && ext4_free_inodes_count(sb, desc) && 592 ext4_free_inodes_count(sb, desc) >= avefreei) { 593 *group = grp; 594 return 0; 595 } 596 } 597 598 if (avefreei) { 599 /* 600 * The free-inodes counter is approximate, and for really small 601 * filesystems the above test can fail to find any blockgroups 602 */ 603 avefreei = 0; 604 goto fallback_retry; 605 } 606 607 return -1; 608 } 609 610 static int find_group_other(struct super_block *sb, struct inode *parent, 611 ext4_group_t *group, int mode) 612 { 613 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 614 ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); 615 struct ext4_group_desc *desc; 616 int flex_size = ext4_flex_bg_size(EXT4_SB(sb)); 617 618 /* 619 * Try to place the inode is the same flex group as its 620 * parent. If we can't find space, use the Orlov algorithm to 621 * find another flex group, and store that information in the 622 * parent directory's inode information so that use that flex 623 * group for future allocations. 624 */ 625 if (flex_size > 1) { 626 int retry = 0; 627 628 try_again: 629 parent_group &= ~(flex_size-1); 630 last = parent_group + flex_size; 631 if (last > ngroups) 632 last = ngroups; 633 for (i = parent_group; i < last; i++) { 634 desc = ext4_get_group_desc(sb, i, NULL); 635 if (desc && ext4_free_inodes_count(sb, desc)) { 636 *group = i; 637 return 0; 638 } 639 } 640 if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) { 641 retry = 1; 642 parent_group = EXT4_I(parent)->i_last_alloc_group; 643 goto try_again; 644 } 645 /* 646 * If this didn't work, use the Orlov search algorithm 647 * to find a new flex group; we pass in the mode to 648 * avoid the topdir algorithms. 649 */ 650 *group = parent_group + flex_size; 651 if (*group > ngroups) 652 *group = 0; 653 return find_group_orlov(sb, parent, group, mode, NULL); 654 } 655 656 /* 657 * Try to place the inode in its parent directory 658 */ 659 *group = parent_group; 660 desc = ext4_get_group_desc(sb, *group, NULL); 661 if (desc && ext4_free_inodes_count(sb, desc) && 662 ext4_free_blks_count(sb, desc)) 663 return 0; 664 665 /* 666 * We're going to place this inode in a different blockgroup from its 667 * parent. We want to cause files in a common directory to all land in 668 * the same blockgroup. But we want files which are in a different 669 * directory which shares a blockgroup with our parent to land in a 670 * different blockgroup. 671 * 672 * So add our directory's i_ino into the starting point for the hash. 673 */ 674 *group = (*group + parent->i_ino) % ngroups; 675 676 /* 677 * Use a quadratic hash to find a group with a free inode and some free 678 * blocks. 679 */ 680 for (i = 1; i < ngroups; i <<= 1) { 681 *group += i; 682 if (*group >= ngroups) 683 *group -= ngroups; 684 desc = ext4_get_group_desc(sb, *group, NULL); 685 if (desc && ext4_free_inodes_count(sb, desc) && 686 ext4_free_blks_count(sb, desc)) 687 return 0; 688 } 689 690 /* 691 * That failed: try linear search for a free inode, even if that group 692 * has no free blocks. 693 */ 694 *group = parent_group; 695 for (i = 0; i < ngroups; i++) { 696 if (++*group >= ngroups) 697 *group = 0; 698 desc = ext4_get_group_desc(sb, *group, NULL); 699 if (desc && ext4_free_inodes_count(sb, desc)) 700 return 0; 701 } 702 703 return -1; 704 } 705 706 /* 707 * claim the inode from the inode bitmap. If the group 708 * is uninit we need to take the groups's ext4_group_lock 709 * and clear the uninit flag. The inode bitmap update 710 * and group desc uninit flag clear should be done 711 * after holding ext4_group_lock so that ext4_read_inode_bitmap 712 * doesn't race with the ext4_claim_inode 713 */ 714 static int ext4_claim_inode(struct super_block *sb, 715 struct buffer_head *inode_bitmap_bh, 716 unsigned long ino, ext4_group_t group, int mode) 717 { 718 int free = 0, retval = 0, count; 719 struct ext4_sb_info *sbi = EXT4_SB(sb); 720 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 721 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 722 723 /* 724 * We have to be sure that new inode allocation does not race with 725 * inode table initialization, because otherwise we may end up 726 * allocating and writing new inode right before sb_issue_zeroout 727 * takes place and overwriting our new inode with zeroes. So we 728 * take alloc_sem to prevent it. 729 */ 730 down_read(&grp->alloc_sem); 731 ext4_lock_group(sb, group); 732 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { 733 /* not a free inode */ 734 retval = 1; 735 goto err_ret; 736 } 737 ino++; 738 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 739 ino > EXT4_INODES_PER_GROUP(sb)) { 740 ext4_unlock_group(sb, group); 741 up_read(&grp->alloc_sem); 742 ext4_error(sb, "reserved inode or inode > inodes count - " 743 "block_group = %u, inode=%lu", group, 744 ino + group * EXT4_INODES_PER_GROUP(sb)); 745 return 1; 746 } 747 /* If we didn't allocate from within the initialized part of the inode 748 * table then we need to initialize up to this inode. */ 749 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 750 751 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 752 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); 753 /* When marking the block group with 754 * ~EXT4_BG_INODE_UNINIT we don't want to depend 755 * on the value of bg_itable_unused even though 756 * mke2fs could have initialized the same for us. 757 * Instead we calculated the value below 758 */ 759 760 free = 0; 761 } else { 762 free = EXT4_INODES_PER_GROUP(sb) - 763 ext4_itable_unused_count(sb, gdp); 764 } 765 766 /* 767 * Check the relative inode number against the last used 768 * relative inode number in this group. if it is greater 769 * we need to update the bg_itable_unused count 770 * 771 */ 772 if (ino > free) 773 ext4_itable_unused_set(sb, gdp, 774 (EXT4_INODES_PER_GROUP(sb) - ino)); 775 } 776 count = ext4_free_inodes_count(sb, gdp) - 1; 777 ext4_free_inodes_set(sb, gdp, count); 778 if (S_ISDIR(mode)) { 779 count = ext4_used_dirs_count(sb, gdp) + 1; 780 ext4_used_dirs_set(sb, gdp, count); 781 if (sbi->s_log_groups_per_flex) { 782 ext4_group_t f = ext4_flex_group(sbi, group); 783 784 atomic_inc(&sbi->s_flex_groups[f].used_dirs); 785 } 786 } 787 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 788 err_ret: 789 ext4_unlock_group(sb, group); 790 up_read(&grp->alloc_sem); 791 return retval; 792 } 793 794 /* 795 * There are two policies for allocating an inode. If the new inode is 796 * a directory, then a forward search is made for a block group with both 797 * free space and a low directory-to-inode ratio; if that fails, then of 798 * the groups with above-average free space, that group with the fewest 799 * directories already is chosen. 800 * 801 * For other inodes, search forward from the parent directory's block 802 * group to find a free inode. 803 */ 804 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, 805 const struct qstr *qstr, __u32 goal) 806 { 807 struct super_block *sb; 808 struct buffer_head *inode_bitmap_bh = NULL; 809 struct buffer_head *group_desc_bh; 810 ext4_group_t ngroups, group = 0; 811 unsigned long ino = 0; 812 struct inode *inode; 813 struct ext4_group_desc *gdp = NULL; 814 struct ext4_inode_info *ei; 815 struct ext4_sb_info *sbi; 816 int ret2, err = 0; 817 struct inode *ret; 818 ext4_group_t i; 819 int free = 0; 820 static int once = 1; 821 ext4_group_t flex_group; 822 823 /* Cannot create files in a deleted directory */ 824 if (!dir || !dir->i_nlink) 825 return ERR_PTR(-EPERM); 826 827 sb = dir->i_sb; 828 ngroups = ext4_get_groups_count(sb); 829 trace_ext4_request_inode(dir, mode); 830 inode = new_inode(sb); 831 if (!inode) 832 return ERR_PTR(-ENOMEM); 833 ei = EXT4_I(inode); 834 sbi = EXT4_SB(sb); 835 836 if (!goal) 837 goal = sbi->s_inode_goal; 838 839 if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { 840 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); 841 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); 842 ret2 = 0; 843 goto got_group; 844 } 845 846 if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) { 847 ret2 = find_group_flex(sb, dir, &group); 848 if (ret2 == -1) { 849 ret2 = find_group_other(sb, dir, &group, mode); 850 if (ret2 == 0 && once) { 851 once = 0; 852 printk(KERN_NOTICE "ext4: find_group_flex " 853 "failed, fallback succeeded dir %lu\n", 854 dir->i_ino); 855 } 856 } 857 goto got_group; 858 } 859 860 if (S_ISDIR(mode)) { 861 if (test_opt(sb, OLDALLOC)) 862 ret2 = find_group_dir(sb, dir, &group); 863 else 864 ret2 = find_group_orlov(sb, dir, &group, mode, qstr); 865 } else 866 ret2 = find_group_other(sb, dir, &group, mode); 867 868 got_group: 869 EXT4_I(dir)->i_last_alloc_group = group; 870 err = -ENOSPC; 871 if (ret2 == -1) 872 goto out; 873 874 for (i = 0; i < ngroups; i++, ino = 0) { 875 err = -EIO; 876 877 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); 878 if (!gdp) 879 goto fail; 880 881 brelse(inode_bitmap_bh); 882 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); 883 if (!inode_bitmap_bh) 884 goto fail; 885 886 repeat_in_this_group: 887 ino = ext4_find_next_zero_bit((unsigned long *) 888 inode_bitmap_bh->b_data, 889 EXT4_INODES_PER_GROUP(sb), ino); 890 891 if (ino < EXT4_INODES_PER_GROUP(sb)) { 892 893 BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); 894 err = ext4_journal_get_write_access(handle, 895 inode_bitmap_bh); 896 if (err) 897 goto fail; 898 899 BUFFER_TRACE(group_desc_bh, "get_write_access"); 900 err = ext4_journal_get_write_access(handle, 901 group_desc_bh); 902 if (err) 903 goto fail; 904 if (!ext4_claim_inode(sb, inode_bitmap_bh, 905 ino, group, mode)) { 906 /* we won it */ 907 BUFFER_TRACE(inode_bitmap_bh, 908 "call ext4_handle_dirty_metadata"); 909 err = ext4_handle_dirty_metadata(handle, 910 NULL, 911 inode_bitmap_bh); 912 if (err) 913 goto fail; 914 /* zero bit is inode number 1*/ 915 ino++; 916 goto got; 917 } 918 /* we lost it */ 919 ext4_handle_release_buffer(handle, inode_bitmap_bh); 920 ext4_handle_release_buffer(handle, group_desc_bh); 921 922 if (++ino < EXT4_INODES_PER_GROUP(sb)) 923 goto repeat_in_this_group; 924 } 925 926 /* 927 * This case is possible in concurrent environment. It is very 928 * rare. We cannot repeat the find_group_xxx() call because 929 * that will simply return the same blockgroup, because the 930 * group descriptor metadata has not yet been updated. 931 * So we just go onto the next blockgroup. 932 */ 933 if (++group == ngroups) 934 group = 0; 935 } 936 err = -ENOSPC; 937 goto out; 938 939 got: 940 /* We may have to initialize the block bitmap if it isn't already */ 941 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 942 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 943 struct buffer_head *block_bitmap_bh; 944 945 block_bitmap_bh = ext4_read_block_bitmap(sb, group); 946 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); 947 err = ext4_journal_get_write_access(handle, block_bitmap_bh); 948 if (err) { 949 brelse(block_bitmap_bh); 950 goto fail; 951 } 952 953 free = 0; 954 ext4_lock_group(sb, group); 955 /* recheck and clear flag under lock if we still need to */ 956 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 957 free = ext4_free_blocks_after_init(sb, group, gdp); 958 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 959 ext4_free_blks_set(sb, gdp, free); 960 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, 961 gdp); 962 } 963 ext4_unlock_group(sb, group); 964 965 /* Don't need to dirty bitmap block if we didn't change it */ 966 if (free) { 967 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); 968 err = ext4_handle_dirty_metadata(handle, 969 NULL, block_bitmap_bh); 970 } 971 972 brelse(block_bitmap_bh); 973 if (err) 974 goto fail; 975 } 976 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); 977 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); 978 if (err) 979 goto fail; 980 981 percpu_counter_dec(&sbi->s_freeinodes_counter); 982 if (S_ISDIR(mode)) 983 percpu_counter_inc(&sbi->s_dirs_counter); 984 ext4_mark_super_dirty(sb); 985 986 if (sbi->s_log_groups_per_flex) { 987 flex_group = ext4_flex_group(sbi, group); 988 atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); 989 } 990 991 if (test_opt(sb, GRPID)) { 992 inode->i_mode = mode; 993 inode->i_uid = current_fsuid(); 994 inode->i_gid = dir->i_gid; 995 } else 996 inode_init_owner(inode, dir, mode); 997 998 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); 999 /* This is the optimal IO size (for stat), not the fs block size */ 1000 inode->i_blocks = 0; 1001 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = 1002 ext4_current_time(inode); 1003 1004 memset(ei->i_data, 0, sizeof(ei->i_data)); 1005 ei->i_dir_start_lookup = 0; 1006 ei->i_disksize = 0; 1007 1008 /* 1009 * Don't inherit extent flag from directory, amongst others. We set 1010 * extent flag on newly created directory and file only if -o extent 1011 * mount option is specified 1012 */ 1013 ei->i_flags = 1014 ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); 1015 ei->i_file_acl = 0; 1016 ei->i_dtime = 0; 1017 ei->i_block_group = group; 1018 ei->i_last_alloc_group = ~0; 1019 1020 ext4_set_inode_flags(inode); 1021 if (IS_DIRSYNC(inode)) 1022 ext4_handle_sync(handle); 1023 if (insert_inode_locked(inode) < 0) { 1024 err = -EINVAL; 1025 goto fail_drop; 1026 } 1027 spin_lock(&sbi->s_next_gen_lock); 1028 inode->i_generation = sbi->s_next_generation++; 1029 spin_unlock(&sbi->s_next_gen_lock); 1030 1031 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 1032 ext4_set_inode_state(inode, EXT4_STATE_NEW); 1033 1034 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 1035 1036 ret = inode; 1037 dquot_initialize(inode); 1038 err = dquot_alloc_inode(inode); 1039 if (err) 1040 goto fail_drop; 1041 1042 err = ext4_init_acl(handle, inode, dir); 1043 if (err) 1044 goto fail_free_drop; 1045 1046 err = ext4_init_security(handle, inode, dir, qstr); 1047 if (err) 1048 goto fail_free_drop; 1049 1050 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 1051 /* set extent flag only for directory, file and normal symlink*/ 1052 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 1053 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); 1054 ext4_ext_tree_init(handle, inode); 1055 } 1056 } 1057 1058 if (ext4_handle_valid(handle)) { 1059 ei->i_sync_tid = handle->h_transaction->t_tid; 1060 ei->i_datasync_tid = handle->h_transaction->t_tid; 1061 } 1062 1063 err = ext4_mark_inode_dirty(handle, inode); 1064 if (err) { 1065 ext4_std_error(sb, err); 1066 goto fail_free_drop; 1067 } 1068 1069 ext4_debug("allocating inode %lu\n", inode->i_ino); 1070 trace_ext4_allocate_inode(inode, dir, mode); 1071 goto really_out; 1072 fail: 1073 ext4_std_error(sb, err); 1074 out: 1075 iput(inode); 1076 ret = ERR_PTR(err); 1077 really_out: 1078 brelse(inode_bitmap_bh); 1079 return ret; 1080 1081 fail_free_drop: 1082 dquot_free_inode(inode); 1083 1084 fail_drop: 1085 dquot_drop(inode); 1086 inode->i_flags |= S_NOQUOTA; 1087 inode->i_nlink = 0; 1088 unlock_new_inode(inode); 1089 iput(inode); 1090 brelse(inode_bitmap_bh); 1091 return ERR_PTR(err); 1092 } 1093 1094 /* Verify that we are loading a valid orphan from disk */ 1095 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) 1096 { 1097 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 1098 ext4_group_t block_group; 1099 int bit; 1100 struct buffer_head *bitmap_bh; 1101 struct inode *inode = NULL; 1102 long err = -EIO; 1103 1104 /* Error cases - e2fsck has already cleaned up for us */ 1105 if (ino > max_ino) { 1106 ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); 1107 goto error; 1108 } 1109 1110 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 1111 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 1112 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 1113 if (!bitmap_bh) { 1114 ext4_warning(sb, "inode bitmap error for orphan %lu", ino); 1115 goto error; 1116 } 1117 1118 /* Having the inode bit set should be a 100% indicator that this 1119 * is a valid orphan (no e2fsck run on fs). Orphans also include 1120 * inodes that were being truncated, so we can't check i_nlink==0. 1121 */ 1122 if (!ext4_test_bit(bit, bitmap_bh->b_data)) 1123 goto bad_orphan; 1124 1125 inode = ext4_iget(sb, ino); 1126 if (IS_ERR(inode)) 1127 goto iget_failed; 1128 1129 /* 1130 * If the orphans has i_nlinks > 0 then it should be able to be 1131 * truncated, otherwise it won't be removed from the orphan list 1132 * during processing and an infinite loop will result. 1133 */ 1134 if (inode->i_nlink && !ext4_can_truncate(inode)) 1135 goto bad_orphan; 1136 1137 if (NEXT_ORPHAN(inode) > max_ino) 1138 goto bad_orphan; 1139 brelse(bitmap_bh); 1140 return inode; 1141 1142 iget_failed: 1143 err = PTR_ERR(inode); 1144 inode = NULL; 1145 bad_orphan: 1146 ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); 1147 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 1148 bit, (unsigned long long)bitmap_bh->b_blocknr, 1149 ext4_test_bit(bit, bitmap_bh->b_data)); 1150 printk(KERN_NOTICE "inode=%p\n", inode); 1151 if (inode) { 1152 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 1153 is_bad_inode(inode)); 1154 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 1155 NEXT_ORPHAN(inode)); 1156 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 1157 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); 1158 /* Avoid freeing blocks if we got a bad deleted inode */ 1159 if (inode->i_nlink == 0) 1160 inode->i_blocks = 0; 1161 iput(inode); 1162 } 1163 brelse(bitmap_bh); 1164 error: 1165 return ERR_PTR(err); 1166 } 1167 1168 unsigned long ext4_count_free_inodes(struct super_block *sb) 1169 { 1170 unsigned long desc_count; 1171 struct ext4_group_desc *gdp; 1172 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 1173 #ifdef EXT4FS_DEBUG 1174 struct ext4_super_block *es; 1175 unsigned long bitmap_count, x; 1176 struct buffer_head *bitmap_bh = NULL; 1177 1178 es = EXT4_SB(sb)->s_es; 1179 desc_count = 0; 1180 bitmap_count = 0; 1181 gdp = NULL; 1182 for (i = 0; i < ngroups; i++) { 1183 gdp = ext4_get_group_desc(sb, i, NULL); 1184 if (!gdp) 1185 continue; 1186 desc_count += ext4_free_inodes_count(sb, gdp); 1187 brelse(bitmap_bh); 1188 bitmap_bh = ext4_read_inode_bitmap(sb, i); 1189 if (!bitmap_bh) 1190 continue; 1191 1192 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); 1193 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 1194 (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); 1195 bitmap_count += x; 1196 } 1197 brelse(bitmap_bh); 1198 printk(KERN_DEBUG "ext4_count_free_inodes: " 1199 "stored = %u, computed = %lu, %lu\n", 1200 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 1201 return desc_count; 1202 #else 1203 desc_count = 0; 1204 for (i = 0; i < ngroups; i++) { 1205 gdp = ext4_get_group_desc(sb, i, NULL); 1206 if (!gdp) 1207 continue; 1208 desc_count += ext4_free_inodes_count(sb, gdp); 1209 cond_resched(); 1210 } 1211 return desc_count; 1212 #endif 1213 } 1214 1215 /* Called at mount-time, super-block is locked */ 1216 unsigned long ext4_count_dirs(struct super_block * sb) 1217 { 1218 unsigned long count = 0; 1219 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 1220 1221 for (i = 0; i < ngroups; i++) { 1222 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 1223 if (!gdp) 1224 continue; 1225 count += ext4_used_dirs_count(sb, gdp); 1226 } 1227 return count; 1228 } 1229 1230 /* 1231 * Zeroes not yet zeroed inode table - just write zeroes through the whole 1232 * inode table. Must be called without any spinlock held. The only place 1233 * where it is called from on active part of filesystem is ext4lazyinit 1234 * thread, so we do not need any special locks, however we have to prevent 1235 * inode allocation from the current group, so we take alloc_sem lock, to 1236 * block ext4_claim_inode until we are finished. 1237 */ 1238 extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, 1239 int barrier) 1240 { 1241 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1242 struct ext4_sb_info *sbi = EXT4_SB(sb); 1243 struct ext4_group_desc *gdp = NULL; 1244 struct buffer_head *group_desc_bh; 1245 handle_t *handle; 1246 ext4_fsblk_t blk; 1247 int num, ret = 0, used_blks = 0; 1248 1249 /* This should not happen, but just to be sure check this */ 1250 if (sb->s_flags & MS_RDONLY) { 1251 ret = 1; 1252 goto out; 1253 } 1254 1255 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); 1256 if (!gdp) 1257 goto out; 1258 1259 /* 1260 * We do not need to lock this, because we are the only one 1261 * handling this flag. 1262 */ 1263 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) 1264 goto out; 1265 1266 handle = ext4_journal_start_sb(sb, 1); 1267 if (IS_ERR(handle)) { 1268 ret = PTR_ERR(handle); 1269 goto out; 1270 } 1271 1272 down_write(&grp->alloc_sem); 1273 /* 1274 * If inode bitmap was already initialized there may be some 1275 * used inodes so we need to skip blocks with used inodes in 1276 * inode table. 1277 */ 1278 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) 1279 used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - 1280 ext4_itable_unused_count(sb, gdp)), 1281 sbi->s_inodes_per_block); 1282 1283 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1284 ext4_error(sb, "Something is wrong with group %u\n" 1285 "Used itable blocks: %d" 1286 "itable unused count: %u\n", 1287 group, used_blks, 1288 ext4_itable_unused_count(sb, gdp)); 1289 ret = 1; 1290 goto err_out; 1291 } 1292 1293 blk = ext4_inode_table(sb, gdp) + used_blks; 1294 num = sbi->s_itb_per_group - used_blks; 1295 1296 BUFFER_TRACE(group_desc_bh, "get_write_access"); 1297 ret = ext4_journal_get_write_access(handle, 1298 group_desc_bh); 1299 if (ret) 1300 goto err_out; 1301 1302 /* 1303 * Skip zeroout if the inode table is full. But we set the ZEROED 1304 * flag anyway, because obviously, when it is full it does not need 1305 * further zeroing. 1306 */ 1307 if (unlikely(num == 0)) 1308 goto skip_zeroout; 1309 1310 ext4_debug("going to zero out inode table in group %d\n", 1311 group); 1312 ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); 1313 if (ret < 0) 1314 goto err_out; 1315 if (barrier) 1316 blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); 1317 1318 skip_zeroout: 1319 ext4_lock_group(sb, group); 1320 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); 1321 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 1322 ext4_unlock_group(sb, group); 1323 1324 BUFFER_TRACE(group_desc_bh, 1325 "call ext4_handle_dirty_metadata"); 1326 ret = ext4_handle_dirty_metadata(handle, NULL, 1327 group_desc_bh); 1328 1329 err_out: 1330 up_write(&grp->alloc_sem); 1331 ext4_journal_stop(handle); 1332 out: 1333 return ret; 1334 } 1335