1 /* 2 * linux/fs/ext4/ialloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * BSD ufs-inspired inode and directory allocation by 10 * Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/stat.h> 19 #include <linux/string.h> 20 #include <linux/quotaops.h> 21 #include <linux/buffer_head.h> 22 #include <linux/random.h> 23 #include <linux/bitops.h> 24 #include <linux/blkdev.h> 25 #include <asm/byteorder.h> 26 #include "ext4.h" 27 #include "ext4_jbd2.h" 28 #include "xattr.h" 29 #include "acl.h" 30 #include "group.h" 31 32 /* 33 * ialloc.c contains the inodes allocation and deallocation routines 34 */ 35 36 /* 37 * The free inodes are managed by bitmaps. A file system contains several 38 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 39 * block for inodes, N blocks for the inode table and data blocks. 40 * 41 * The file system contains group descriptors which are located after the 42 * super block. Each descriptor contains the number of the bitmap block and 43 * the free blocks count in the block. 44 */ 45 46 /* 47 * To avoid calling the atomic setbit hundreds or thousands of times, we only 48 * need to use it within a single byte (to ensure we get endianness right). 49 * We can use memset for the rest of the bitmap as there are no other users. 50 */ 51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 52 { 53 int i; 54 55 if (start_bit >= end_bit) 56 return; 57 58 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); 59 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 60 ext4_set_bit(i, bitmap); 61 if (i < end_bit) 62 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 63 } 64 65 /* Initializes an uninitialized inode bitmap */ 66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, 67 ext4_group_t block_group, 68 struct ext4_group_desc *gdp) 69 { 70 struct ext4_sb_info *sbi = EXT4_SB(sb); 71 72 J_ASSERT_BH(bh, buffer_locked(bh)); 73 74 /* If checksum is bad mark all blocks and inodes use to prevent 75 * allocation, essentially implementing a per-group read-only flag. */ 76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 77 ext4_error(sb, __func__, "Checksum bad for group %u", 78 block_group); 79 ext4_free_blks_set(sb, gdp, 0); 80 ext4_free_inodes_set(sb, gdp, 0); 81 ext4_itable_unused_set(sb, gdp, 0); 82 memset(bh->b_data, 0xff, sb->s_blocksize); 83 return 0; 84 } 85 86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, 88 bh->b_data); 89 90 return EXT4_INODES_PER_GROUP(sb); 91 } 92 93 /* 94 * Read the inode allocation bitmap for a given block_group, reading 95 * into the specified slot in the superblock's bitmap cache. 96 * 97 * Return buffer_head of bitmap on success or NULL. 98 */ 99 static struct buffer_head * 100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 101 { 102 struct ext4_group_desc *desc; 103 struct buffer_head *bh = NULL; 104 ext4_fsblk_t bitmap_blk; 105 106 desc = ext4_get_group_desc(sb, block_group, NULL); 107 if (!desc) 108 return NULL; 109 bitmap_blk = ext4_inode_bitmap(sb, desc); 110 bh = sb_getblk(sb, bitmap_blk); 111 if (unlikely(!bh)) { 112 ext4_error(sb, __func__, 113 "Cannot read inode bitmap - " 114 "block_group = %u, inode_bitmap = %llu", 115 block_group, bitmap_blk); 116 return NULL; 117 } 118 if (bitmap_uptodate(bh)) 119 return bh; 120 121 lock_buffer(bh); 122 if (bitmap_uptodate(bh)) { 123 unlock_buffer(bh); 124 return bh; 125 } 126 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); 127 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 128 ext4_init_inode_bitmap(sb, bh, block_group, desc); 129 set_bitmap_uptodate(bh); 130 set_buffer_uptodate(bh); 131 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 132 unlock_buffer(bh); 133 return bh; 134 } 135 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 136 if (buffer_uptodate(bh)) { 137 /* 138 * if not uninit if bh is uptodate, 139 * bitmap is also uptodate 140 */ 141 set_bitmap_uptodate(bh); 142 unlock_buffer(bh); 143 return bh; 144 } 145 /* 146 * submit the buffer_head for read. We can 147 * safely mark the bitmap as uptodate now. 148 * We do it here so the bitmap uptodate bit 149 * get set with buffer lock held. 150 */ 151 set_bitmap_uptodate(bh); 152 if (bh_submit_read(bh) < 0) { 153 put_bh(bh); 154 ext4_error(sb, __func__, 155 "Cannot read inode bitmap - " 156 "block_group = %u, inode_bitmap = %llu", 157 block_group, bitmap_blk); 158 return NULL; 159 } 160 return bh; 161 } 162 163 /* 164 * NOTE! When we get the inode, we're the only people 165 * that have access to it, and as such there are no 166 * race conditions we have to worry about. The inode 167 * is not on the hash-lists, and it cannot be reached 168 * through the filesystem because the directory entry 169 * has been deleted earlier. 170 * 171 * HOWEVER: we must make sure that we get no aliases, 172 * which means that we have to call "clear_inode()" 173 * _before_ we mark the inode not in use in the inode 174 * bitmaps. Otherwise a newly created file might use 175 * the same inode number (not actually the same pointer 176 * though), and then we'd have two inodes sharing the 177 * same inode number and space on the harddisk. 178 */ 179 void ext4_free_inode(handle_t *handle, struct inode *inode) 180 { 181 struct super_block *sb = inode->i_sb; 182 int is_directory; 183 unsigned long ino; 184 struct buffer_head *bitmap_bh = NULL; 185 struct buffer_head *bh2; 186 ext4_group_t block_group; 187 unsigned long bit; 188 struct ext4_group_desc *gdp; 189 struct ext4_super_block *es; 190 struct ext4_sb_info *sbi; 191 int fatal = 0, err, count; 192 ext4_group_t flex_group; 193 194 if (atomic_read(&inode->i_count) > 1) { 195 printk(KERN_ERR "ext4_free_inode: inode has count=%d\n", 196 atomic_read(&inode->i_count)); 197 return; 198 } 199 if (inode->i_nlink) { 200 printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n", 201 inode->i_nlink); 202 return; 203 } 204 if (!sb) { 205 printk(KERN_ERR "ext4_free_inode: inode on " 206 "nonexistent device\n"); 207 return; 208 } 209 sbi = EXT4_SB(sb); 210 211 ino = inode->i_ino; 212 ext4_debug("freeing inode %lu\n", ino); 213 trace_mark(ext4_free_inode, 214 "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu", 215 sb->s_id, inode->i_ino, inode->i_mode, 216 (unsigned long) inode->i_uid, (unsigned long) inode->i_gid, 217 (unsigned long long) inode->i_blocks); 218 219 /* 220 * Note: we must free any quota before locking the superblock, 221 * as writing the quota to disk may need the lock as well. 222 */ 223 DQUOT_INIT(inode); 224 ext4_xattr_delete_inode(handle, inode); 225 DQUOT_FREE_INODE(inode); 226 DQUOT_DROP(inode); 227 228 is_directory = S_ISDIR(inode->i_mode); 229 230 /* Do this BEFORE marking the inode not in use or returning an error */ 231 clear_inode(inode); 232 233 es = EXT4_SB(sb)->s_es; 234 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 235 ext4_error(sb, "ext4_free_inode", 236 "reserved or nonexistent inode %lu", ino); 237 goto error_return; 238 } 239 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 240 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 241 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 242 if (!bitmap_bh) 243 goto error_return; 244 245 BUFFER_TRACE(bitmap_bh, "get_write_access"); 246 fatal = ext4_journal_get_write_access(handle, bitmap_bh); 247 if (fatal) 248 goto error_return; 249 250 /* Ok, now we can actually update the inode bitmaps.. */ 251 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 252 bit, bitmap_bh->b_data)) 253 ext4_error(sb, "ext4_free_inode", 254 "bit already cleared for inode %lu", ino); 255 else { 256 gdp = ext4_get_group_desc(sb, block_group, &bh2); 257 258 BUFFER_TRACE(bh2, "get_write_access"); 259 fatal = ext4_journal_get_write_access(handle, bh2); 260 if (fatal) goto error_return; 261 262 if (gdp) { 263 spin_lock(sb_bgl_lock(sbi, block_group)); 264 count = ext4_free_inodes_count(sb, gdp) + 1; 265 ext4_free_inodes_set(sb, gdp, count); 266 if (is_directory) { 267 count = ext4_used_dirs_count(sb, gdp) - 1; 268 ext4_used_dirs_set(sb, gdp, count); 269 } 270 gdp->bg_checksum = ext4_group_desc_csum(sbi, 271 block_group, gdp); 272 spin_unlock(sb_bgl_lock(sbi, block_group)); 273 percpu_counter_inc(&sbi->s_freeinodes_counter); 274 if (is_directory) 275 percpu_counter_dec(&sbi->s_dirs_counter); 276 277 if (sbi->s_log_groups_per_flex) { 278 flex_group = ext4_flex_group(sbi, block_group); 279 spin_lock(sb_bgl_lock(sbi, flex_group)); 280 sbi->s_flex_groups[flex_group].free_inodes++; 281 spin_unlock(sb_bgl_lock(sbi, flex_group)); 282 } 283 } 284 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); 285 err = ext4_handle_dirty_metadata(handle, NULL, bh2); 286 if (!fatal) fatal = err; 287 } 288 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); 289 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 290 if (!fatal) 291 fatal = err; 292 sb->s_dirt = 1; 293 error_return: 294 brelse(bitmap_bh); 295 ext4_std_error(sb, fatal); 296 } 297 298 /* 299 * There are two policies for allocating an inode. If the new inode is 300 * a directory, then a forward search is made for a block group with both 301 * free space and a low directory-to-inode ratio; if that fails, then of 302 * the groups with above-average free space, that group with the fewest 303 * directories already is chosen. 304 * 305 * For other inodes, search forward from the parent directory\'s block 306 * group to find a free inode. 307 */ 308 static int find_group_dir(struct super_block *sb, struct inode *parent, 309 ext4_group_t *best_group) 310 { 311 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 312 unsigned int freei, avefreei; 313 struct ext4_group_desc *desc, *best_desc = NULL; 314 ext4_group_t group; 315 int ret = -1; 316 317 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter); 318 avefreei = freei / ngroups; 319 320 for (group = 0; group < ngroups; group++) { 321 desc = ext4_get_group_desc(sb, group, NULL); 322 if (!desc || !ext4_free_inodes_count(sb, desc)) 323 continue; 324 if (ext4_free_inodes_count(sb, desc) < avefreei) 325 continue; 326 if (!best_desc || 327 (ext4_free_blks_count(sb, desc) > 328 ext4_free_blks_count(sb, best_desc))) { 329 *best_group = group; 330 best_desc = desc; 331 ret = 0; 332 } 333 } 334 return ret; 335 } 336 337 #define free_block_ratio 10 338 339 static int find_group_flex(struct super_block *sb, struct inode *parent, 340 ext4_group_t *best_group) 341 { 342 struct ext4_sb_info *sbi = EXT4_SB(sb); 343 struct ext4_group_desc *desc; 344 struct buffer_head *bh; 345 struct flex_groups *flex_group = sbi->s_flex_groups; 346 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 347 ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group); 348 ext4_group_t ngroups = sbi->s_groups_count; 349 int flex_size = ext4_flex_bg_size(sbi); 350 ext4_group_t best_flex = parent_fbg_group; 351 int blocks_per_flex = sbi->s_blocks_per_group * flex_size; 352 int flexbg_free_blocks; 353 int flex_freeb_ratio; 354 ext4_group_t n_fbg_groups; 355 ext4_group_t i; 356 357 n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >> 358 sbi->s_log_groups_per_flex; 359 360 find_close_to_parent: 361 flexbg_free_blocks = flex_group[best_flex].free_blocks; 362 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; 363 if (flex_group[best_flex].free_inodes && 364 flex_freeb_ratio > free_block_ratio) 365 goto found_flexbg; 366 367 if (best_flex && best_flex == parent_fbg_group) { 368 best_flex--; 369 goto find_close_to_parent; 370 } 371 372 for (i = 0; i < n_fbg_groups; i++) { 373 if (i == parent_fbg_group || i == parent_fbg_group - 1) 374 continue; 375 376 flexbg_free_blocks = flex_group[i].free_blocks; 377 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; 378 379 if (flex_freeb_ratio > free_block_ratio && 380 flex_group[i].free_inodes) { 381 best_flex = i; 382 goto found_flexbg; 383 } 384 385 if (flex_group[best_flex].free_inodes == 0 || 386 (flex_group[i].free_blocks > 387 flex_group[best_flex].free_blocks && 388 flex_group[i].free_inodes)) 389 best_flex = i; 390 } 391 392 if (!flex_group[best_flex].free_inodes || 393 !flex_group[best_flex].free_blocks) 394 return -1; 395 396 found_flexbg: 397 for (i = best_flex * flex_size; i < ngroups && 398 i < (best_flex + 1) * flex_size; i++) { 399 desc = ext4_get_group_desc(sb, i, &bh); 400 if (ext4_free_inodes_count(sb, desc)) { 401 *best_group = i; 402 goto out; 403 } 404 } 405 406 return -1; 407 out: 408 return 0; 409 } 410 411 /* 412 * Orlov's allocator for directories. 413 * 414 * We always try to spread first-level directories. 415 * 416 * If there are blockgroups with both free inodes and free blocks counts 417 * not worse than average we return one with smallest directory count. 418 * Otherwise we simply return a random group. 419 * 420 * For the rest rules look so: 421 * 422 * It's OK to put directory into a group unless 423 * it has too many directories already (max_dirs) or 424 * it has too few free inodes left (min_inodes) or 425 * it has too few free blocks left (min_blocks) or 426 * it's already running too large debt (max_debt). 427 * Parent's group is preferred, if it doesn't satisfy these 428 * conditions we search cyclically through the rest. If none 429 * of the groups look good we just look for a group with more 430 * free inodes than average (starting at parent's group). 431 * 432 * Debt is incremented each time we allocate a directory and decremented 433 * when we allocate an inode, within 0--255. 434 */ 435 436 #define INODE_COST 64 437 #define BLOCK_COST 256 438 439 static int find_group_orlov(struct super_block *sb, struct inode *parent, 440 ext4_group_t *group) 441 { 442 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 443 struct ext4_sb_info *sbi = EXT4_SB(sb); 444 struct ext4_super_block *es = sbi->s_es; 445 ext4_group_t ngroups = sbi->s_groups_count; 446 int inodes_per_group = EXT4_INODES_PER_GROUP(sb); 447 unsigned int freei, avefreei; 448 ext4_fsblk_t freeb, avefreeb; 449 ext4_fsblk_t blocks_per_dir; 450 unsigned int ndirs; 451 int max_debt, max_dirs, min_inodes; 452 ext4_grpblk_t min_blocks; 453 ext4_group_t i; 454 struct ext4_group_desc *desc; 455 456 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); 457 avefreei = freei / ngroups; 458 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 459 avefreeb = freeb; 460 do_div(avefreeb, ngroups); 461 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); 462 463 if ((parent == sb->s_root->d_inode) || 464 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) { 465 int best_ndir = inodes_per_group; 466 ext4_group_t grp; 467 int ret = -1; 468 469 get_random_bytes(&grp, sizeof(grp)); 470 parent_group = (unsigned)grp % ngroups; 471 for (i = 0; i < ngroups; i++) { 472 grp = (parent_group + i) % ngroups; 473 desc = ext4_get_group_desc(sb, grp, NULL); 474 if (!desc || !ext4_free_inodes_count(sb, desc)) 475 continue; 476 if (ext4_used_dirs_count(sb, desc) >= best_ndir) 477 continue; 478 if (ext4_free_inodes_count(sb, desc) < avefreei) 479 continue; 480 if (ext4_free_blks_count(sb, desc) < avefreeb) 481 continue; 482 *group = grp; 483 ret = 0; 484 best_ndir = ext4_used_dirs_count(sb, desc); 485 } 486 if (ret == 0) 487 return ret; 488 goto fallback; 489 } 490 491 blocks_per_dir = ext4_blocks_count(es) - freeb; 492 do_div(blocks_per_dir, ndirs); 493 494 max_dirs = ndirs / ngroups + inodes_per_group / 16; 495 min_inodes = avefreei - inodes_per_group / 4; 496 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4; 497 498 max_debt = EXT4_BLOCKS_PER_GROUP(sb); 499 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST); 500 if (max_debt * INODE_COST > inodes_per_group) 501 max_debt = inodes_per_group / INODE_COST; 502 if (max_debt > 255) 503 max_debt = 255; 504 if (max_debt == 0) 505 max_debt = 1; 506 507 for (i = 0; i < ngroups; i++) { 508 *group = (parent_group + i) % ngroups; 509 desc = ext4_get_group_desc(sb, *group, NULL); 510 if (!desc || !ext4_free_inodes_count(sb, desc)) 511 continue; 512 if (ext4_used_dirs_count(sb, desc) >= max_dirs) 513 continue; 514 if (ext4_free_inodes_count(sb, desc) < min_inodes) 515 continue; 516 if (ext4_free_blks_count(sb, desc) < min_blocks) 517 continue; 518 return 0; 519 } 520 521 fallback: 522 for (i = 0; i < ngroups; i++) { 523 *group = (parent_group + i) % ngroups; 524 desc = ext4_get_group_desc(sb, *group, NULL); 525 if (desc && ext4_free_inodes_count(sb, desc) && 526 ext4_free_inodes_count(sb, desc) >= avefreei) 527 return 0; 528 } 529 530 if (avefreei) { 531 /* 532 * The free-inodes counter is approximate, and for really small 533 * filesystems the above test can fail to find any blockgroups 534 */ 535 avefreei = 0; 536 goto fallback; 537 } 538 539 return -1; 540 } 541 542 static int find_group_other(struct super_block *sb, struct inode *parent, 543 ext4_group_t *group) 544 { 545 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 546 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 547 struct ext4_group_desc *desc; 548 ext4_group_t i; 549 550 /* 551 * Try to place the inode in its parent directory 552 */ 553 *group = parent_group; 554 desc = ext4_get_group_desc(sb, *group, NULL); 555 if (desc && ext4_free_inodes_count(sb, desc) && 556 ext4_free_blks_count(sb, desc)) 557 return 0; 558 559 /* 560 * We're going to place this inode in a different blockgroup from its 561 * parent. We want to cause files in a common directory to all land in 562 * the same blockgroup. But we want files which are in a different 563 * directory which shares a blockgroup with our parent to land in a 564 * different blockgroup. 565 * 566 * So add our directory's i_ino into the starting point for the hash. 567 */ 568 *group = (*group + parent->i_ino) % ngroups; 569 570 /* 571 * Use a quadratic hash to find a group with a free inode and some free 572 * blocks. 573 */ 574 for (i = 1; i < ngroups; i <<= 1) { 575 *group += i; 576 if (*group >= ngroups) 577 *group -= ngroups; 578 desc = ext4_get_group_desc(sb, *group, NULL); 579 if (desc && ext4_free_inodes_count(sb, desc) && 580 ext4_free_blks_count(sb, desc)) 581 return 0; 582 } 583 584 /* 585 * That failed: try linear search for a free inode, even if that group 586 * has no free blocks. 587 */ 588 *group = parent_group; 589 for (i = 0; i < ngroups; i++) { 590 if (++*group >= ngroups) 591 *group = 0; 592 desc = ext4_get_group_desc(sb, *group, NULL); 593 if (desc && ext4_free_inodes_count(sb, desc)) 594 return 0; 595 } 596 597 return -1; 598 } 599 600 /* 601 * claim the inode from the inode bitmap. If the group 602 * is uninit we need to take the groups's sb_bgl_lock 603 * and clear the uninit flag. The inode bitmap update 604 * and group desc uninit flag clear should be done 605 * after holding sb_bgl_lock so that ext4_read_inode_bitmap 606 * doesn't race with the ext4_claim_inode 607 */ 608 static int ext4_claim_inode(struct super_block *sb, 609 struct buffer_head *inode_bitmap_bh, 610 unsigned long ino, ext4_group_t group, int mode) 611 { 612 int free = 0, retval = 0, count; 613 struct ext4_sb_info *sbi = EXT4_SB(sb); 614 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 615 616 spin_lock(sb_bgl_lock(sbi, group)); 617 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { 618 /* not a free inode */ 619 retval = 1; 620 goto err_ret; 621 } 622 ino++; 623 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 624 ino > EXT4_INODES_PER_GROUP(sb)) { 625 spin_unlock(sb_bgl_lock(sbi, group)); 626 ext4_error(sb, __func__, 627 "reserved inode or inode > inodes count - " 628 "block_group = %u, inode=%lu", group, 629 ino + group * EXT4_INODES_PER_GROUP(sb)); 630 return 1; 631 } 632 /* If we didn't allocate from within the initialized part of the inode 633 * table then we need to initialize up to this inode. */ 634 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 635 636 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 637 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); 638 /* When marking the block group with 639 * ~EXT4_BG_INODE_UNINIT we don't want to depend 640 * on the value of bg_itable_unused even though 641 * mke2fs could have initialized the same for us. 642 * Instead we calculated the value below 643 */ 644 645 free = 0; 646 } else { 647 free = EXT4_INODES_PER_GROUP(sb) - 648 ext4_itable_unused_count(sb, gdp); 649 } 650 651 /* 652 * Check the relative inode number against the last used 653 * relative inode number in this group. if it is greater 654 * we need to update the bg_itable_unused count 655 * 656 */ 657 if (ino > free) 658 ext4_itable_unused_set(sb, gdp, 659 (EXT4_INODES_PER_GROUP(sb) - ino)); 660 } 661 count = ext4_free_inodes_count(sb, gdp) - 1; 662 ext4_free_inodes_set(sb, gdp, count); 663 if (S_ISDIR(mode)) { 664 count = ext4_used_dirs_count(sb, gdp) + 1; 665 ext4_used_dirs_set(sb, gdp, count); 666 } 667 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 668 err_ret: 669 spin_unlock(sb_bgl_lock(sbi, group)); 670 return retval; 671 } 672 673 /* 674 * There are two policies for allocating an inode. If the new inode is 675 * a directory, then a forward search is made for a block group with both 676 * free space and a low directory-to-inode ratio; if that fails, then of 677 * the groups with above-average free space, that group with the fewest 678 * directories already is chosen. 679 * 680 * For other inodes, search forward from the parent directory's block 681 * group to find a free inode. 682 */ 683 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) 684 { 685 struct super_block *sb; 686 struct buffer_head *inode_bitmap_bh = NULL; 687 struct buffer_head *group_desc_bh; 688 ext4_group_t group = 0; 689 unsigned long ino = 0; 690 struct inode *inode; 691 struct ext4_group_desc *gdp = NULL; 692 struct ext4_super_block *es; 693 struct ext4_inode_info *ei; 694 struct ext4_sb_info *sbi; 695 int ret2, err = 0; 696 struct inode *ret; 697 ext4_group_t i; 698 int free = 0; 699 ext4_group_t flex_group; 700 701 /* Cannot create files in a deleted directory */ 702 if (!dir || !dir->i_nlink) 703 return ERR_PTR(-EPERM); 704 705 sb = dir->i_sb; 706 trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id, 707 dir->i_ino, mode); 708 inode = new_inode(sb); 709 if (!inode) 710 return ERR_PTR(-ENOMEM); 711 ei = EXT4_I(inode); 712 713 sbi = EXT4_SB(sb); 714 es = sbi->s_es; 715 716 if (sbi->s_log_groups_per_flex) { 717 ret2 = find_group_flex(sb, dir, &group); 718 goto got_group; 719 } 720 721 if (S_ISDIR(mode)) { 722 if (test_opt(sb, OLDALLOC)) 723 ret2 = find_group_dir(sb, dir, &group); 724 else 725 ret2 = find_group_orlov(sb, dir, &group); 726 } else 727 ret2 = find_group_other(sb, dir, &group); 728 729 got_group: 730 err = -ENOSPC; 731 if (ret2 == -1) 732 goto out; 733 734 for (i = 0; i < sbi->s_groups_count; i++) { 735 err = -EIO; 736 737 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); 738 if (!gdp) 739 goto fail; 740 741 brelse(inode_bitmap_bh); 742 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); 743 if (!inode_bitmap_bh) 744 goto fail; 745 746 ino = 0; 747 748 repeat_in_this_group: 749 ino = ext4_find_next_zero_bit((unsigned long *) 750 inode_bitmap_bh->b_data, 751 EXT4_INODES_PER_GROUP(sb), ino); 752 753 if (ino < EXT4_INODES_PER_GROUP(sb)) { 754 755 BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); 756 err = ext4_journal_get_write_access(handle, 757 inode_bitmap_bh); 758 if (err) 759 goto fail; 760 761 BUFFER_TRACE(group_desc_bh, "get_write_access"); 762 err = ext4_journal_get_write_access(handle, 763 group_desc_bh); 764 if (err) 765 goto fail; 766 if (!ext4_claim_inode(sb, inode_bitmap_bh, 767 ino, group, mode)) { 768 /* we won it */ 769 BUFFER_TRACE(inode_bitmap_bh, 770 "call ext4_handle_dirty_metadata"); 771 err = ext4_handle_dirty_metadata(handle, 772 inode, 773 inode_bitmap_bh); 774 if (err) 775 goto fail; 776 /* zero bit is inode number 1*/ 777 ino++; 778 goto got; 779 } 780 /* we lost it */ 781 ext4_handle_release_buffer(handle, inode_bitmap_bh); 782 ext4_handle_release_buffer(handle, group_desc_bh); 783 784 if (++ino < EXT4_INODES_PER_GROUP(sb)) 785 goto repeat_in_this_group; 786 } 787 788 /* 789 * This case is possible in concurrent environment. It is very 790 * rare. We cannot repeat the find_group_xxx() call because 791 * that will simply return the same blockgroup, because the 792 * group descriptor metadata has not yet been updated. 793 * So we just go onto the next blockgroup. 794 */ 795 if (++group == sbi->s_groups_count) 796 group = 0; 797 } 798 err = -ENOSPC; 799 goto out; 800 801 got: 802 /* We may have to initialize the block bitmap if it isn't already */ 803 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 804 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 805 struct buffer_head *block_bitmap_bh; 806 807 block_bitmap_bh = ext4_read_block_bitmap(sb, group); 808 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); 809 err = ext4_journal_get_write_access(handle, block_bitmap_bh); 810 if (err) { 811 brelse(block_bitmap_bh); 812 goto fail; 813 } 814 815 free = 0; 816 spin_lock(sb_bgl_lock(sbi, group)); 817 /* recheck and clear flag under lock if we still need to */ 818 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 819 free = ext4_free_blocks_after_init(sb, group, gdp); 820 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 821 ext4_free_blks_set(sb, gdp, free); 822 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, 823 gdp); 824 } 825 spin_unlock(sb_bgl_lock(sbi, group)); 826 827 /* Don't need to dirty bitmap block if we didn't change it */ 828 if (free) { 829 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); 830 err = ext4_handle_dirty_metadata(handle, 831 NULL, block_bitmap_bh); 832 } 833 834 brelse(block_bitmap_bh); 835 if (err) 836 goto fail; 837 } 838 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); 839 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); 840 if (err) 841 goto fail; 842 843 percpu_counter_dec(&sbi->s_freeinodes_counter); 844 if (S_ISDIR(mode)) 845 percpu_counter_inc(&sbi->s_dirs_counter); 846 sb->s_dirt = 1; 847 848 if (sbi->s_log_groups_per_flex) { 849 flex_group = ext4_flex_group(sbi, group); 850 spin_lock(sb_bgl_lock(sbi, flex_group)); 851 sbi->s_flex_groups[flex_group].free_inodes--; 852 spin_unlock(sb_bgl_lock(sbi, flex_group)); 853 } 854 855 inode->i_uid = current_fsuid(); 856 if (test_opt(sb, GRPID)) 857 inode->i_gid = dir->i_gid; 858 else if (dir->i_mode & S_ISGID) { 859 inode->i_gid = dir->i_gid; 860 if (S_ISDIR(mode)) 861 mode |= S_ISGID; 862 } else 863 inode->i_gid = current_fsgid(); 864 inode->i_mode = mode; 865 866 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); 867 /* This is the optimal IO size (for stat), not the fs block size */ 868 inode->i_blocks = 0; 869 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = 870 ext4_current_time(inode); 871 872 memset(ei->i_data, 0, sizeof(ei->i_data)); 873 ei->i_dir_start_lookup = 0; 874 ei->i_disksize = 0; 875 876 /* 877 * Don't inherit extent flag from directory. We set extent flag on 878 * newly created directory and file only if -o extent mount option is 879 * specified 880 */ 881 ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL); 882 if (S_ISLNK(mode)) 883 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); 884 /* dirsync only applies to directories */ 885 if (!S_ISDIR(mode)) 886 ei->i_flags &= ~EXT4_DIRSYNC_FL; 887 ei->i_file_acl = 0; 888 ei->i_dtime = 0; 889 ei->i_block_group = group; 890 891 ext4_set_inode_flags(inode); 892 if (IS_DIRSYNC(inode)) 893 ext4_handle_sync(handle); 894 if (insert_inode_locked(inode) < 0) { 895 err = -EINVAL; 896 goto fail_drop; 897 } 898 spin_lock(&sbi->s_next_gen_lock); 899 inode->i_generation = sbi->s_next_generation++; 900 spin_unlock(&sbi->s_next_gen_lock); 901 902 ei->i_state = EXT4_STATE_NEW; 903 904 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 905 906 ret = inode; 907 if (DQUOT_ALLOC_INODE(inode)) { 908 err = -EDQUOT; 909 goto fail_drop; 910 } 911 912 err = ext4_init_acl(handle, inode, dir); 913 if (err) 914 goto fail_free_drop; 915 916 err = ext4_init_security(handle, inode, dir); 917 if (err) 918 goto fail_free_drop; 919 920 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 921 /* set extent flag only for directory, file and normal symlink*/ 922 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 923 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 924 ext4_ext_tree_init(handle, inode); 925 } 926 } 927 928 err = ext4_mark_inode_dirty(handle, inode); 929 if (err) { 930 ext4_std_error(sb, err); 931 goto fail_free_drop; 932 } 933 934 ext4_debug("allocating inode %lu\n", inode->i_ino); 935 trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d", 936 sb->s_id, inode->i_ino, dir->i_ino, mode); 937 goto really_out; 938 fail: 939 ext4_std_error(sb, err); 940 out: 941 iput(inode); 942 ret = ERR_PTR(err); 943 really_out: 944 brelse(inode_bitmap_bh); 945 return ret; 946 947 fail_free_drop: 948 DQUOT_FREE_INODE(inode); 949 950 fail_drop: 951 DQUOT_DROP(inode); 952 inode->i_flags |= S_NOQUOTA; 953 inode->i_nlink = 0; 954 unlock_new_inode(inode); 955 iput(inode); 956 brelse(inode_bitmap_bh); 957 return ERR_PTR(err); 958 } 959 960 /* Verify that we are loading a valid orphan from disk */ 961 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) 962 { 963 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 964 ext4_group_t block_group; 965 int bit; 966 struct buffer_head *bitmap_bh; 967 struct inode *inode = NULL; 968 long err = -EIO; 969 970 /* Error cases - e2fsck has already cleaned up for us */ 971 if (ino > max_ino) { 972 ext4_warning(sb, __func__, 973 "bad orphan ino %lu! e2fsck was run?", ino); 974 goto error; 975 } 976 977 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 978 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 979 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 980 if (!bitmap_bh) { 981 ext4_warning(sb, __func__, 982 "inode bitmap error for orphan %lu", ino); 983 goto error; 984 } 985 986 /* Having the inode bit set should be a 100% indicator that this 987 * is a valid orphan (no e2fsck run on fs). Orphans also include 988 * inodes that were being truncated, so we can't check i_nlink==0. 989 */ 990 if (!ext4_test_bit(bit, bitmap_bh->b_data)) 991 goto bad_orphan; 992 993 inode = ext4_iget(sb, ino); 994 if (IS_ERR(inode)) 995 goto iget_failed; 996 997 /* 998 * If the orphans has i_nlinks > 0 then it should be able to be 999 * truncated, otherwise it won't be removed from the orphan list 1000 * during processing and an infinite loop will result. 1001 */ 1002 if (inode->i_nlink && !ext4_can_truncate(inode)) 1003 goto bad_orphan; 1004 1005 if (NEXT_ORPHAN(inode) > max_ino) 1006 goto bad_orphan; 1007 brelse(bitmap_bh); 1008 return inode; 1009 1010 iget_failed: 1011 err = PTR_ERR(inode); 1012 inode = NULL; 1013 bad_orphan: 1014 ext4_warning(sb, __func__, 1015 "bad orphan inode %lu! e2fsck was run?", ino); 1016 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 1017 bit, (unsigned long long)bitmap_bh->b_blocknr, 1018 ext4_test_bit(bit, bitmap_bh->b_data)); 1019 printk(KERN_NOTICE "inode=%p\n", inode); 1020 if (inode) { 1021 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 1022 is_bad_inode(inode)); 1023 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 1024 NEXT_ORPHAN(inode)); 1025 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 1026 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); 1027 /* Avoid freeing blocks if we got a bad deleted inode */ 1028 if (inode->i_nlink == 0) 1029 inode->i_blocks = 0; 1030 iput(inode); 1031 } 1032 brelse(bitmap_bh); 1033 error: 1034 return ERR_PTR(err); 1035 } 1036 1037 unsigned long ext4_count_free_inodes(struct super_block *sb) 1038 { 1039 unsigned long desc_count; 1040 struct ext4_group_desc *gdp; 1041 ext4_group_t i; 1042 #ifdef EXT4FS_DEBUG 1043 struct ext4_super_block *es; 1044 unsigned long bitmap_count, x; 1045 struct buffer_head *bitmap_bh = NULL; 1046 1047 es = EXT4_SB(sb)->s_es; 1048 desc_count = 0; 1049 bitmap_count = 0; 1050 gdp = NULL; 1051 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 1052 gdp = ext4_get_group_desc(sb, i, NULL); 1053 if (!gdp) 1054 continue; 1055 desc_count += ext4_free_inodes_count(sb, gdp); 1056 brelse(bitmap_bh); 1057 bitmap_bh = ext4_read_inode_bitmap(sb, i); 1058 if (!bitmap_bh) 1059 continue; 1060 1061 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); 1062 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 1063 i, ext4_free_inodes_count(sb, gdp), x); 1064 bitmap_count += x; 1065 } 1066 brelse(bitmap_bh); 1067 printk(KERN_DEBUG "ext4_count_free_inodes: " 1068 "stored = %u, computed = %lu, %lu\n", 1069 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 1070 return desc_count; 1071 #else 1072 desc_count = 0; 1073 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 1074 gdp = ext4_get_group_desc(sb, i, NULL); 1075 if (!gdp) 1076 continue; 1077 desc_count += ext4_free_inodes_count(sb, gdp); 1078 cond_resched(); 1079 } 1080 return desc_count; 1081 #endif 1082 } 1083 1084 /* Called at mount-time, super-block is locked */ 1085 unsigned long ext4_count_dirs(struct super_block * sb) 1086 { 1087 unsigned long count = 0; 1088 ext4_group_t i; 1089 1090 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 1091 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 1092 if (!gdp) 1093 continue; 1094 count += ext4_used_dirs_count(sb, gdp); 1095 } 1096 return count; 1097 } 1098