1 /* 2 * linux/fs/ext4/resize.c 3 * 4 * Support for resizing an ext4 filesystem while it is mounted. 5 * 6 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 7 * 8 * This could probably be made into a module, because it is not often in use. 9 */ 10 11 12 #define EXT4FS_DEBUG 13 14 #include <linux/errno.h> 15 #include <linux/slab.h> 16 17 #include "ext4_jbd2.h" 18 19 int ext4_resize_begin(struct super_block *sb) 20 { 21 int ret = 0; 22 23 if (!capable(CAP_SYS_RESOURCE)) 24 return -EPERM; 25 26 /* 27 * If we are not using the primary superblock/GDT copy don't resize, 28 * because the user tools have no way of handling this. Probably a 29 * bad time to do it anyways. 30 */ 31 if (EXT4_SB(sb)->s_sbh->b_blocknr != 32 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 33 ext4_warning(sb, "won't resize using backup superblock at %llu", 34 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 35 return -EPERM; 36 } 37 38 /* 39 * We are not allowed to do online-resizing on a filesystem mounted 40 * with error, because it can destroy the filesystem easily. 41 */ 42 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 43 ext4_warning(sb, "There are errors in the filesystem, " 44 "so online resizing is not allowed\n"); 45 return -EPERM; 46 } 47 48 if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags)) 49 ret = -EBUSY; 50 51 return ret; 52 } 53 54 void ext4_resize_end(struct super_block *sb) 55 { 56 clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); 57 smp_mb__after_atomic(); 58 } 59 60 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, 61 ext4_group_t group) { 62 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << 63 EXT4_DESC_PER_BLOCK_BITS(sb); 64 } 65 66 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, 67 ext4_group_t group) { 68 group = ext4_meta_bg_first_group(sb, group); 69 return ext4_group_first_block_no(sb, group); 70 } 71 72 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 73 ext4_group_t group) { 74 ext4_grpblk_t overhead; 75 overhead = ext4_bg_num_gdb(sb, group); 76 if (ext4_bg_has_super(sb, group)) 77 overhead += 1 + 78 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 79 return overhead; 80 } 81 82 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 83 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 84 85 static int verify_group_input(struct super_block *sb, 86 struct ext4_new_group_data *input) 87 { 88 struct ext4_sb_info *sbi = EXT4_SB(sb); 89 struct ext4_super_block *es = sbi->s_es; 90 ext4_fsblk_t start = ext4_blocks_count(es); 91 ext4_fsblk_t end = start + input->blocks_count; 92 ext4_group_t group = input->group; 93 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 94 unsigned overhead; 95 ext4_fsblk_t metaend; 96 struct buffer_head *bh = NULL; 97 ext4_grpblk_t free_blocks_count, offset; 98 int err = -EINVAL; 99 100 if (group != sbi->s_groups_count) { 101 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 102 input->group, sbi->s_groups_count); 103 return -EINVAL; 104 } 105 106 overhead = ext4_group_overhead_blocks(sb, group); 107 metaend = start + overhead; 108 input->free_blocks_count = free_blocks_count = 109 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; 110 111 if (test_opt(sb, DEBUG)) 112 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 113 "(%d free, %u reserved)\n", 114 ext4_bg_has_super(sb, input->group) ? "normal" : 115 "no-super", input->group, input->blocks_count, 116 free_blocks_count, input->reserved_blocks); 117 118 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 119 if (offset != 0) 120 ext4_warning(sb, "Last group not full"); 121 else if (input->reserved_blocks > input->blocks_count / 5) 122 ext4_warning(sb, "Reserved blocks too high (%u)", 123 input->reserved_blocks); 124 else if (free_blocks_count < 0) 125 ext4_warning(sb, "Bad blocks count %u", 126 input->blocks_count); 127 else if (!(bh = sb_bread(sb, end - 1))) 128 ext4_warning(sb, "Cannot read last block (%llu)", 129 end - 1); 130 else if (outside(input->block_bitmap, start, end)) 131 ext4_warning(sb, "Block bitmap not in group (block %llu)", 132 (unsigned long long)input->block_bitmap); 133 else if (outside(input->inode_bitmap, start, end)) 134 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 135 (unsigned long long)input->inode_bitmap); 136 else if (outside(input->inode_table, start, end) || 137 outside(itend - 1, start, end)) 138 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 139 (unsigned long long)input->inode_table, itend - 1); 140 else if (input->inode_bitmap == input->block_bitmap) 141 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 142 (unsigned long long)input->block_bitmap); 143 else if (inside(input->block_bitmap, input->inode_table, itend)) 144 ext4_warning(sb, "Block bitmap (%llu) in inode table " 145 "(%llu-%llu)", 146 (unsigned long long)input->block_bitmap, 147 (unsigned long long)input->inode_table, itend - 1); 148 else if (inside(input->inode_bitmap, input->inode_table, itend)) 149 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 150 "(%llu-%llu)", 151 (unsigned long long)input->inode_bitmap, 152 (unsigned long long)input->inode_table, itend - 1); 153 else if (inside(input->block_bitmap, start, metaend)) 154 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 155 (unsigned long long)input->block_bitmap, 156 start, metaend - 1); 157 else if (inside(input->inode_bitmap, start, metaend)) 158 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 159 (unsigned long long)input->inode_bitmap, 160 start, metaend - 1); 161 else if (inside(input->inode_table, start, metaend) || 162 inside(itend - 1, start, metaend)) 163 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 164 "(%llu-%llu)", 165 (unsigned long long)input->inode_table, 166 itend - 1, start, metaend - 1); 167 else 168 err = 0; 169 brelse(bh); 170 171 return err; 172 } 173 174 /* 175 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 176 * group each time. 177 */ 178 struct ext4_new_flex_group_data { 179 struct ext4_new_group_data *groups; /* new_group_data for groups 180 in the flex group */ 181 __u16 *bg_flags; /* block group flags of groups 182 in @groups */ 183 ext4_group_t count; /* number of groups in @groups 184 */ 185 }; 186 187 /* 188 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 189 * @flexbg_size. 190 * 191 * Returns NULL on failure otherwise address of the allocated structure. 192 */ 193 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) 194 { 195 struct ext4_new_flex_group_data *flex_gd; 196 197 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 198 if (flex_gd == NULL) 199 goto out3; 200 201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) 202 goto out2; 203 flex_gd->count = flexbg_size; 204 205 flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) * 206 flexbg_size, GFP_NOFS); 207 if (flex_gd->groups == NULL) 208 goto out2; 209 210 flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS); 211 if (flex_gd->bg_flags == NULL) 212 goto out1; 213 214 return flex_gd; 215 216 out1: 217 kfree(flex_gd->groups); 218 out2: 219 kfree(flex_gd); 220 out3: 221 return NULL; 222 } 223 224 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 225 { 226 kfree(flex_gd->bg_flags); 227 kfree(flex_gd->groups); 228 kfree(flex_gd); 229 } 230 231 /* 232 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 233 * and inode tables for a flex group. 234 * 235 * This function is used by 64bit-resize. Note that this function allocates 236 * group tables from the 1st group of groups contained by @flexgd, which may 237 * be a partial of a flex group. 238 * 239 * @sb: super block of fs to which the groups belongs 240 * 241 * Returns 0 on a successful allocation of the metadata blocks in the 242 * block group. 243 */ 244 static int ext4_alloc_group_tables(struct super_block *sb, 245 struct ext4_new_flex_group_data *flex_gd, 246 int flexbg_size) 247 { 248 struct ext4_new_group_data *group_data = flex_gd->groups; 249 ext4_fsblk_t start_blk; 250 ext4_fsblk_t last_blk; 251 ext4_group_t src_group; 252 ext4_group_t bb_index = 0; 253 ext4_group_t ib_index = 0; 254 ext4_group_t it_index = 0; 255 ext4_group_t group; 256 ext4_group_t last_group; 257 unsigned overhead; 258 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 259 260 BUG_ON(flex_gd->count == 0 || group_data == NULL); 261 262 src_group = group_data[0].group; 263 last_group = src_group + flex_gd->count - 1; 264 265 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 266 (last_group & ~(flexbg_size - 1)))); 267 next_group: 268 group = group_data[0].group; 269 if (src_group >= group_data[0].group + flex_gd->count) 270 return -ENOSPC; 271 start_blk = ext4_group_first_block_no(sb, src_group); 272 last_blk = start_blk + group_data[src_group - group].blocks_count; 273 274 overhead = ext4_group_overhead_blocks(sb, src_group); 275 276 start_blk += overhead; 277 278 /* We collect contiguous blocks as much as possible. */ 279 src_group++; 280 for (; src_group <= last_group; src_group++) { 281 overhead = ext4_group_overhead_blocks(sb, src_group); 282 if (overhead == 0) 283 last_blk += group_data[src_group - group].blocks_count; 284 else 285 break; 286 } 287 288 /* Allocate block bitmaps */ 289 for (; bb_index < flex_gd->count; bb_index++) { 290 if (start_blk >= last_blk) 291 goto next_group; 292 group_data[bb_index].block_bitmap = start_blk++; 293 group = ext4_get_group_number(sb, start_blk - 1); 294 group -= group_data[0].group; 295 group_data[group].free_blocks_count--; 296 flex_gd->bg_flags[group] &= uninit_mask; 297 } 298 299 /* Allocate inode bitmaps */ 300 for (; ib_index < flex_gd->count; ib_index++) { 301 if (start_blk >= last_blk) 302 goto next_group; 303 group_data[ib_index].inode_bitmap = start_blk++; 304 group = ext4_get_group_number(sb, start_blk - 1); 305 group -= group_data[0].group; 306 group_data[group].free_blocks_count--; 307 flex_gd->bg_flags[group] &= uninit_mask; 308 } 309 310 /* Allocate inode tables */ 311 for (; it_index < flex_gd->count; it_index++) { 312 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 313 ext4_fsblk_t next_group_start; 314 315 if (start_blk + itb > last_blk) 316 goto next_group; 317 group_data[it_index].inode_table = start_blk; 318 group = ext4_get_group_number(sb, start_blk); 319 next_group_start = ext4_group_first_block_no(sb, group + 1); 320 group -= group_data[0].group; 321 322 if (start_blk + itb > next_group_start) { 323 flex_gd->bg_flags[group + 1] &= uninit_mask; 324 overhead = start_blk + itb - next_group_start; 325 group_data[group + 1].free_blocks_count -= overhead; 326 itb -= overhead; 327 } 328 329 group_data[group].free_blocks_count -= itb; 330 flex_gd->bg_flags[group] &= uninit_mask; 331 start_blk += EXT4_SB(sb)->s_itb_per_group; 332 } 333 334 if (test_opt(sb, DEBUG)) { 335 int i; 336 group = group_data[0].group; 337 338 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 339 "%d groups, flexbg size is %d:\n", flex_gd->count, 340 flexbg_size); 341 342 for (i = 0; i < flex_gd->count; i++) { 343 printk(KERN_DEBUG "adding %s group %u: %u " 344 "blocks (%d free)\n", 345 ext4_bg_has_super(sb, group + i) ? "normal" : 346 "no-super", group + i, 347 group_data[i].blocks_count, 348 group_data[i].free_blocks_count); 349 } 350 } 351 return 0; 352 } 353 354 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 355 ext4_fsblk_t blk) 356 { 357 struct buffer_head *bh; 358 int err; 359 360 bh = sb_getblk(sb, blk); 361 if (unlikely(!bh)) 362 return ERR_PTR(-ENOMEM); 363 BUFFER_TRACE(bh, "get_write_access"); 364 if ((err = ext4_journal_get_write_access(handle, bh))) { 365 brelse(bh); 366 bh = ERR_PTR(err); 367 } else { 368 memset(bh->b_data, 0, sb->s_blocksize); 369 set_buffer_uptodate(bh); 370 } 371 372 return bh; 373 } 374 375 /* 376 * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA. 377 * If that fails, restart the transaction & regain write access for the 378 * buffer head which is used for block_bitmap modifications. 379 */ 380 static int extend_or_restart_transaction(handle_t *handle, int thresh) 381 { 382 int err; 383 384 if (ext4_handle_has_enough_credits(handle, thresh)) 385 return 0; 386 387 err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA); 388 if (err < 0) 389 return err; 390 if (err) { 391 err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA); 392 if (err) 393 return err; 394 } 395 396 return 0; 397 } 398 399 /* 400 * set_flexbg_block_bitmap() mark @count blocks starting from @block used. 401 * 402 * Helper function for ext4_setup_new_group_blocks() which set . 403 * 404 * @sb: super block 405 * @handle: journal handle 406 * @flex_gd: flex group data 407 */ 408 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 409 struct ext4_new_flex_group_data *flex_gd, 410 ext4_fsblk_t block, ext4_group_t count) 411 { 412 ext4_group_t count2; 413 414 ext4_debug("mark blocks [%llu/%u] used\n", block, count); 415 for (count2 = count; count > 0; count -= count2, block += count2) { 416 ext4_fsblk_t start; 417 struct buffer_head *bh; 418 ext4_group_t group; 419 int err; 420 421 group = ext4_get_group_number(sb, block); 422 start = ext4_group_first_block_no(sb, group); 423 group -= flex_gd->groups[0].group; 424 425 count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); 426 if (count2 > count) 427 count2 = count; 428 429 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 430 BUG_ON(flex_gd->count > 1); 431 continue; 432 } 433 434 err = extend_or_restart_transaction(handle, 1); 435 if (err) 436 return err; 437 438 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 439 if (unlikely(!bh)) 440 return -ENOMEM; 441 442 BUFFER_TRACE(bh, "get_write_access"); 443 err = ext4_journal_get_write_access(handle, bh); 444 if (err) 445 return err; 446 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block, 447 block - start, count2); 448 ext4_set_bits(bh->b_data, block - start, count2); 449 450 err = ext4_handle_dirty_metadata(handle, NULL, bh); 451 if (unlikely(err)) 452 return err; 453 brelse(bh); 454 } 455 456 return 0; 457 } 458 459 /* 460 * Set up the block and inode bitmaps, and the inode table for the new groups. 461 * This doesn't need to be part of the main transaction, since we are only 462 * changing blocks outside the actual filesystem. We still do journaling to 463 * ensure the recovery is correct in case of a failure just after resize. 464 * If any part of this fails, we simply abort the resize. 465 * 466 * setup_new_flex_group_blocks handles a flex group as follow: 467 * 1. copy super block and GDT, and initialize group tables if necessary. 468 * In this step, we only set bits in blocks bitmaps for blocks taken by 469 * super block and GDT. 470 * 2. allocate group tables in block bitmaps, that is, set bits in block 471 * bitmap for blocks taken by group tables. 472 */ 473 static int setup_new_flex_group_blocks(struct super_block *sb, 474 struct ext4_new_flex_group_data *flex_gd) 475 { 476 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 477 ext4_fsblk_t start; 478 ext4_fsblk_t block; 479 struct ext4_sb_info *sbi = EXT4_SB(sb); 480 struct ext4_super_block *es = sbi->s_es; 481 struct ext4_new_group_data *group_data = flex_gd->groups; 482 __u16 *bg_flags = flex_gd->bg_flags; 483 handle_t *handle; 484 ext4_group_t group, count; 485 struct buffer_head *bh = NULL; 486 int reserved_gdb, i, j, err = 0, err2; 487 int meta_bg; 488 489 BUG_ON(!flex_gd->count || !group_data || 490 group_data[0].group != sbi->s_groups_count); 491 492 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 493 meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); 494 495 /* This transaction may be extended/restarted along the way */ 496 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 497 if (IS_ERR(handle)) 498 return PTR_ERR(handle); 499 500 group = group_data[0].group; 501 for (i = 0; i < flex_gd->count; i++, group++) { 502 unsigned long gdblocks; 503 ext4_grpblk_t overhead; 504 505 gdblocks = ext4_bg_num_gdb(sb, group); 506 start = ext4_group_first_block_no(sb, group); 507 508 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 509 goto handle_itb; 510 511 if (meta_bg == 1) { 512 ext4_group_t first_group; 513 first_group = ext4_meta_bg_first_group(sb, group); 514 if (first_group != group + 1 && 515 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1) 516 goto handle_itb; 517 } 518 519 block = start + ext4_bg_has_super(sb, group); 520 /* Copy all of the GDT blocks into the backup in this group */ 521 for (j = 0; j < gdblocks; j++, block++) { 522 struct buffer_head *gdb; 523 524 ext4_debug("update backup group %#04llx\n", block); 525 err = extend_or_restart_transaction(handle, 1); 526 if (err) 527 goto out; 528 529 gdb = sb_getblk(sb, block); 530 if (unlikely(!gdb)) { 531 err = -ENOMEM; 532 goto out; 533 } 534 535 BUFFER_TRACE(gdb, "get_write_access"); 536 err = ext4_journal_get_write_access(handle, gdb); 537 if (err) { 538 brelse(gdb); 539 goto out; 540 } 541 memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, 542 gdb->b_size); 543 set_buffer_uptodate(gdb); 544 545 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 546 if (unlikely(err)) { 547 brelse(gdb); 548 goto out; 549 } 550 brelse(gdb); 551 } 552 553 /* Zero out all of the reserved backup group descriptor 554 * table blocks 555 */ 556 if (ext4_bg_has_super(sb, group)) { 557 err = sb_issue_zeroout(sb, gdblocks + start + 1, 558 reserved_gdb, GFP_NOFS); 559 if (err) 560 goto out; 561 } 562 563 handle_itb: 564 /* Initialize group tables of the grop @group */ 565 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 566 goto handle_bb; 567 568 /* Zero out all of the inode table blocks */ 569 block = group_data[i].inode_table; 570 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 571 block, sbi->s_itb_per_group); 572 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 573 GFP_NOFS); 574 if (err) 575 goto out; 576 577 handle_bb: 578 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 579 goto handle_ib; 580 581 /* Initialize block bitmap of the @group */ 582 block = group_data[i].block_bitmap; 583 err = extend_or_restart_transaction(handle, 1); 584 if (err) 585 goto out; 586 587 bh = bclean(handle, sb, block); 588 if (IS_ERR(bh)) { 589 err = PTR_ERR(bh); 590 bh = NULL; 591 goto out; 592 } 593 overhead = ext4_group_overhead_blocks(sb, group); 594 if (overhead != 0) { 595 ext4_debug("mark backup superblock %#04llx (+0)\n", 596 start); 597 ext4_set_bits(bh->b_data, 0, overhead); 598 } 599 ext4_mark_bitmap_end(group_data[i].blocks_count, 600 sb->s_blocksize * 8, bh->b_data); 601 err = ext4_handle_dirty_metadata(handle, NULL, bh); 602 if (err) 603 goto out; 604 brelse(bh); 605 606 handle_ib: 607 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 608 continue; 609 610 /* Initialize inode bitmap of the @group */ 611 block = group_data[i].inode_bitmap; 612 err = extend_or_restart_transaction(handle, 1); 613 if (err) 614 goto out; 615 /* Mark unused entries in inode bitmap used */ 616 bh = bclean(handle, sb, block); 617 if (IS_ERR(bh)) { 618 err = PTR_ERR(bh); 619 bh = NULL; 620 goto out; 621 } 622 623 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 624 sb->s_blocksize * 8, bh->b_data); 625 err = ext4_handle_dirty_metadata(handle, NULL, bh); 626 if (err) 627 goto out; 628 brelse(bh); 629 } 630 bh = NULL; 631 632 /* Mark group tables in block bitmap */ 633 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 634 count = group_table_count[j]; 635 start = (&group_data[0].block_bitmap)[j]; 636 block = start; 637 for (i = 1; i < flex_gd->count; i++) { 638 block += group_table_count[j]; 639 if (block == (&group_data[i].block_bitmap)[j]) { 640 count += group_table_count[j]; 641 continue; 642 } 643 err = set_flexbg_block_bitmap(sb, handle, 644 flex_gd, start, count); 645 if (err) 646 goto out; 647 count = group_table_count[j]; 648 start = (&group_data[i].block_bitmap)[j]; 649 block = start; 650 } 651 652 if (count) { 653 err = set_flexbg_block_bitmap(sb, handle, 654 flex_gd, start, count); 655 if (err) 656 goto out; 657 } 658 } 659 660 out: 661 brelse(bh); 662 err2 = ext4_journal_stop(handle); 663 if (err2 && !err) 664 err = err2; 665 666 return err; 667 } 668 669 /* 670 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 671 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 672 * calling this for the first time. In a sparse filesystem it will be the 673 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 674 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 675 */ 676 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, 677 unsigned *five, unsigned *seven) 678 { 679 unsigned *min = three; 680 int mult = 3; 681 unsigned ret; 682 683 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 684 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { 685 ret = *min; 686 *min += 1; 687 return ret; 688 } 689 690 if (*five < *min) { 691 min = five; 692 mult = 5; 693 } 694 if (*seven < *min) { 695 min = seven; 696 mult = 7; 697 } 698 699 ret = *min; 700 *min *= mult; 701 702 return ret; 703 } 704 705 /* 706 * Check that all of the backup GDT blocks are held in the primary GDT block. 707 * It is assumed that they are stored in group order. Returns the number of 708 * groups in current filesystem that have BACKUPS, or -ve error code. 709 */ 710 static int verify_reserved_gdb(struct super_block *sb, 711 ext4_group_t end, 712 struct buffer_head *primary) 713 { 714 const ext4_fsblk_t blk = primary->b_blocknr; 715 unsigned three = 1; 716 unsigned five = 5; 717 unsigned seven = 7; 718 unsigned grp; 719 __le32 *p = (__le32 *)primary->b_data; 720 int gdbackups = 0; 721 722 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 723 if (le32_to_cpu(*p++) != 724 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 725 ext4_warning(sb, "reserved GDT %llu" 726 " missing grp %d (%llu)", 727 blk, grp, 728 grp * 729 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 730 blk); 731 return -EINVAL; 732 } 733 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 734 return -EFBIG; 735 } 736 737 return gdbackups; 738 } 739 740 /* 741 * Called when we need to bring a reserved group descriptor table block into 742 * use from the resize inode. The primary copy of the new GDT block currently 743 * is an indirect block (under the double indirect block in the resize inode). 744 * The new backup GDT blocks will be stored as leaf blocks in this indirect 745 * block, in group order. Even though we know all the block numbers we need, 746 * we check to ensure that the resize inode has actually reserved these blocks. 747 * 748 * Don't need to update the block bitmaps because the blocks are still in use. 749 * 750 * We get all of the error cases out of the way, so that we are sure to not 751 * fail once we start modifying the data on disk, because JBD has no rollback. 752 */ 753 static int add_new_gdb(handle_t *handle, struct inode *inode, 754 ext4_group_t group) 755 { 756 struct super_block *sb = inode->i_sb; 757 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 758 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 759 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 760 struct buffer_head **o_group_desc, **n_group_desc; 761 struct buffer_head *dind; 762 struct buffer_head *gdb_bh; 763 int gdbackups; 764 struct ext4_iloc iloc; 765 __le32 *data; 766 int err; 767 768 if (test_opt(sb, DEBUG)) 769 printk(KERN_DEBUG 770 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 771 gdb_num); 772 773 gdb_bh = sb_bread(sb, gdblock); 774 if (!gdb_bh) 775 return -EIO; 776 777 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 778 if (gdbackups < 0) { 779 err = gdbackups; 780 goto exit_bh; 781 } 782 783 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 784 dind = sb_bread(sb, le32_to_cpu(*data)); 785 if (!dind) { 786 err = -EIO; 787 goto exit_bh; 788 } 789 790 data = (__le32 *)dind->b_data; 791 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 792 ext4_warning(sb, "new group %u GDT block %llu not reserved", 793 group, gdblock); 794 err = -EINVAL; 795 goto exit_dind; 796 } 797 798 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 799 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 800 if (unlikely(err)) 801 goto exit_dind; 802 803 BUFFER_TRACE(gdb_bh, "get_write_access"); 804 err = ext4_journal_get_write_access(handle, gdb_bh); 805 if (unlikely(err)) 806 goto exit_dind; 807 808 BUFFER_TRACE(dind, "get_write_access"); 809 err = ext4_journal_get_write_access(handle, dind); 810 if (unlikely(err)) 811 ext4_std_error(sb, err); 812 813 /* ext4_reserve_inode_write() gets a reference on the iloc */ 814 err = ext4_reserve_inode_write(handle, inode, &iloc); 815 if (unlikely(err)) 816 goto exit_dind; 817 818 n_group_desc = ext4_kvmalloc((gdb_num + 1) * 819 sizeof(struct buffer_head *), 820 GFP_NOFS); 821 if (!n_group_desc) { 822 err = -ENOMEM; 823 ext4_warning(sb, "not enough memory for %lu groups", 824 gdb_num + 1); 825 goto exit_inode; 826 } 827 828 /* 829 * Finally, we have all of the possible failures behind us... 830 * 831 * Remove new GDT block from inode double-indirect block and clear out 832 * the new GDT block for use (which also "frees" the backup GDT blocks 833 * from the reserved inode). We don't need to change the bitmaps for 834 * these blocks, because they are marked as in-use from being in the 835 * reserved inode, and will become GDT blocks (primary and backup). 836 */ 837 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 838 err = ext4_handle_dirty_metadata(handle, NULL, dind); 839 if (unlikely(err)) { 840 ext4_std_error(sb, err); 841 goto exit_inode; 842 } 843 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; 844 ext4_mark_iloc_dirty(handle, inode, &iloc); 845 memset(gdb_bh->b_data, 0, sb->s_blocksize); 846 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 847 if (unlikely(err)) { 848 ext4_std_error(sb, err); 849 goto exit_inode; 850 } 851 brelse(dind); 852 853 o_group_desc = EXT4_SB(sb)->s_group_desc; 854 memcpy(n_group_desc, o_group_desc, 855 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 856 n_group_desc[gdb_num] = gdb_bh; 857 EXT4_SB(sb)->s_group_desc = n_group_desc; 858 EXT4_SB(sb)->s_gdb_count++; 859 kvfree(o_group_desc); 860 861 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 862 err = ext4_handle_dirty_super(handle, sb); 863 if (err) 864 ext4_std_error(sb, err); 865 866 return err; 867 868 exit_inode: 869 kvfree(n_group_desc); 870 brelse(iloc.bh); 871 exit_dind: 872 brelse(dind); 873 exit_bh: 874 brelse(gdb_bh); 875 876 ext4_debug("leaving with error %d\n", err); 877 return err; 878 } 879 880 /* 881 * add_new_gdb_meta_bg is the sister of add_new_gdb. 882 */ 883 static int add_new_gdb_meta_bg(struct super_block *sb, 884 handle_t *handle, ext4_group_t group) { 885 ext4_fsblk_t gdblock; 886 struct buffer_head *gdb_bh; 887 struct buffer_head **o_group_desc, **n_group_desc; 888 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 889 int err; 890 891 gdblock = ext4_meta_bg_first_block_no(sb, group) + 892 ext4_bg_has_super(sb, group); 893 gdb_bh = sb_bread(sb, gdblock); 894 if (!gdb_bh) 895 return -EIO; 896 n_group_desc = ext4_kvmalloc((gdb_num + 1) * 897 sizeof(struct buffer_head *), 898 GFP_NOFS); 899 if (!n_group_desc) { 900 err = -ENOMEM; 901 ext4_warning(sb, "not enough memory for %lu groups", 902 gdb_num + 1); 903 return err; 904 } 905 906 o_group_desc = EXT4_SB(sb)->s_group_desc; 907 memcpy(n_group_desc, o_group_desc, 908 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 909 n_group_desc[gdb_num] = gdb_bh; 910 EXT4_SB(sb)->s_group_desc = n_group_desc; 911 EXT4_SB(sb)->s_gdb_count++; 912 kvfree(o_group_desc); 913 BUFFER_TRACE(gdb_bh, "get_write_access"); 914 err = ext4_journal_get_write_access(handle, gdb_bh); 915 if (unlikely(err)) 916 brelse(gdb_bh); 917 return err; 918 } 919 920 /* 921 * Called when we are adding a new group which has a backup copy of each of 922 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 923 * We need to add these reserved backup GDT blocks to the resize inode, so 924 * that they are kept for future resizing and not allocated to files. 925 * 926 * Each reserved backup GDT block will go into a different indirect block. 927 * The indirect blocks are actually the primary reserved GDT blocks, 928 * so we know in advance what their block numbers are. We only get the 929 * double-indirect block to verify it is pointing to the primary reserved 930 * GDT blocks so we don't overwrite a data block by accident. The reserved 931 * backup GDT blocks are stored in their reserved primary GDT block. 932 */ 933 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 934 ext4_group_t group) 935 { 936 struct super_block *sb = inode->i_sb; 937 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 938 struct buffer_head **primary; 939 struct buffer_head *dind; 940 struct ext4_iloc iloc; 941 ext4_fsblk_t blk; 942 __le32 *data, *end; 943 int gdbackups = 0; 944 int res, i; 945 int err; 946 947 primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS); 948 if (!primary) 949 return -ENOMEM; 950 951 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 952 dind = sb_bread(sb, le32_to_cpu(*data)); 953 if (!dind) { 954 err = -EIO; 955 goto exit_free; 956 } 957 958 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 959 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 960 EXT4_ADDR_PER_BLOCK(sb)); 961 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 962 963 /* Get each reserved primary GDT block and verify it holds backups */ 964 for (res = 0; res < reserved_gdb; res++, blk++) { 965 if (le32_to_cpu(*data) != blk) { 966 ext4_warning(sb, "reserved block %llu" 967 " not at offset %ld", 968 blk, 969 (long)(data - (__le32 *)dind->b_data)); 970 err = -EINVAL; 971 goto exit_bh; 972 } 973 primary[res] = sb_bread(sb, blk); 974 if (!primary[res]) { 975 err = -EIO; 976 goto exit_bh; 977 } 978 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 979 if (gdbackups < 0) { 980 brelse(primary[res]); 981 err = gdbackups; 982 goto exit_bh; 983 } 984 if (++data >= end) 985 data = (__le32 *)dind->b_data; 986 } 987 988 for (i = 0; i < reserved_gdb; i++) { 989 BUFFER_TRACE(primary[i], "get_write_access"); 990 if ((err = ext4_journal_get_write_access(handle, primary[i]))) 991 goto exit_bh; 992 } 993 994 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 995 goto exit_bh; 996 997 /* 998 * Finally we can add each of the reserved backup GDT blocks from 999 * the new group to its reserved primary GDT block. 1000 */ 1001 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1002 for (i = 0; i < reserved_gdb; i++) { 1003 int err2; 1004 data = (__le32 *)primary[i]->b_data; 1005 /* printk("reserving backup %lu[%u] = %lu\n", 1006 primary[i]->b_blocknr, gdbackups, 1007 blk + primary[i]->b_blocknr); */ 1008 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1009 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1010 if (!err) 1011 err = err2; 1012 } 1013 inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; 1014 ext4_mark_iloc_dirty(handle, inode, &iloc); 1015 1016 exit_bh: 1017 while (--res >= 0) 1018 brelse(primary[res]); 1019 brelse(dind); 1020 1021 exit_free: 1022 kfree(primary); 1023 1024 return err; 1025 } 1026 1027 /* 1028 * Update the backup copies of the ext4 metadata. These don't need to be part 1029 * of the main resize transaction, because e2fsck will re-write them if there 1030 * is a problem (basically only OOM will cause a problem). However, we 1031 * _should_ update the backups if possible, in case the primary gets trashed 1032 * for some reason and we need to run e2fsck from a backup superblock. The 1033 * important part is that the new block and inode counts are in the backup 1034 * superblocks, and the location of the new group metadata in the GDT backups. 1035 * 1036 * We do not need take the s_resize_lock for this, because these 1037 * blocks are not otherwise touched by the filesystem code when it is 1038 * mounted. We don't need to worry about last changing from 1039 * sbi->s_groups_count, because the worst that can happen is that we 1040 * do not copy the full number of backups at this time. The resize 1041 * which changed s_groups_count will backup again. 1042 */ 1043 static void update_backups(struct super_block *sb, int blk_off, char *data, 1044 int size, int meta_bg) 1045 { 1046 struct ext4_sb_info *sbi = EXT4_SB(sb); 1047 ext4_group_t last; 1048 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1049 unsigned three = 1; 1050 unsigned five = 5; 1051 unsigned seven = 7; 1052 ext4_group_t group = 0; 1053 int rest = sb->s_blocksize - size; 1054 handle_t *handle; 1055 int err = 0, err2; 1056 1057 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1058 if (IS_ERR(handle)) { 1059 group = 1; 1060 err = PTR_ERR(handle); 1061 goto exit_err; 1062 } 1063 1064 if (meta_bg == 0) { 1065 group = ext4_list_backups(sb, &three, &five, &seven); 1066 last = sbi->s_groups_count; 1067 } else { 1068 group = ext4_meta_bg_first_group(sb, group) + 1; 1069 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1070 } 1071 1072 while (group < sbi->s_groups_count) { 1073 struct buffer_head *bh; 1074 ext4_fsblk_t backup_block; 1075 1076 /* Out of journal space, and can't get more - abort - so sad */ 1077 if (ext4_handle_valid(handle) && 1078 handle->h_buffer_credits == 0 && 1079 ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && 1080 (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) 1081 break; 1082 1083 if (meta_bg == 0) 1084 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1085 else 1086 backup_block = (ext4_group_first_block_no(sb, group) + 1087 ext4_bg_has_super(sb, group)); 1088 1089 bh = sb_getblk(sb, backup_block); 1090 if (unlikely(!bh)) { 1091 err = -ENOMEM; 1092 break; 1093 } 1094 ext4_debug("update metadata backup %llu(+%llu)\n", 1095 backup_block, backup_block - 1096 ext4_group_first_block_no(sb, group)); 1097 BUFFER_TRACE(bh, "get_write_access"); 1098 if ((err = ext4_journal_get_write_access(handle, bh))) 1099 break; 1100 lock_buffer(bh); 1101 memcpy(bh->b_data, data, size); 1102 if (rest) 1103 memset(bh->b_data + size, 0, rest); 1104 set_buffer_uptodate(bh); 1105 unlock_buffer(bh); 1106 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1107 if (unlikely(err)) 1108 ext4_std_error(sb, err); 1109 brelse(bh); 1110 1111 if (meta_bg == 0) 1112 group = ext4_list_backups(sb, &three, &five, &seven); 1113 else if (group == last) 1114 break; 1115 else 1116 group = last; 1117 } 1118 if ((err2 = ext4_journal_stop(handle)) && !err) 1119 err = err2; 1120 1121 /* 1122 * Ugh! Need to have e2fsck write the backup copies. It is too 1123 * late to revert the resize, we shouldn't fail just because of 1124 * the backup copies (they are only needed in case of corruption). 1125 * 1126 * However, if we got here we have a journal problem too, so we 1127 * can't really start a transaction to mark the superblock. 1128 * Chicken out and just set the flag on the hope it will be written 1129 * to disk, and if not - we will simply wait until next fsck. 1130 */ 1131 exit_err: 1132 if (err) { 1133 ext4_warning(sb, "can't update backup for group %u (err %d), " 1134 "forcing fsck on next reboot", group, err); 1135 sbi->s_mount_state &= ~EXT4_VALID_FS; 1136 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1137 mark_buffer_dirty(sbi->s_sbh); 1138 } 1139 } 1140 1141 /* 1142 * ext4_add_new_descs() adds @count group descriptor of groups 1143 * starting at @group 1144 * 1145 * @handle: journal handle 1146 * @sb: super block 1147 * @group: the group no. of the first group desc to be added 1148 * @resize_inode: the resize inode 1149 * @count: number of group descriptors to be added 1150 */ 1151 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1152 ext4_group_t group, struct inode *resize_inode, 1153 ext4_group_t count) 1154 { 1155 struct ext4_sb_info *sbi = EXT4_SB(sb); 1156 struct ext4_super_block *es = sbi->s_es; 1157 struct buffer_head *gdb_bh; 1158 int i, gdb_off, gdb_num, err = 0; 1159 int meta_bg; 1160 1161 meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); 1162 for (i = 0; i < count; i++, group++) { 1163 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1164 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1165 1166 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1167 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1168 1169 /* 1170 * We will only either add reserved group blocks to a backup group 1171 * or remove reserved blocks for the first group in a new group block. 1172 * Doing both would be mean more complex code, and sane people don't 1173 * use non-sparse filesystems anymore. This is already checked above. 1174 */ 1175 if (gdb_off) { 1176 gdb_bh = sbi->s_group_desc[gdb_num]; 1177 BUFFER_TRACE(gdb_bh, "get_write_access"); 1178 err = ext4_journal_get_write_access(handle, gdb_bh); 1179 1180 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1181 err = reserve_backup_gdb(handle, resize_inode, group); 1182 } else if (meta_bg != 0) { 1183 err = add_new_gdb_meta_bg(sb, handle, group); 1184 } else { 1185 err = add_new_gdb(handle, resize_inode, group); 1186 } 1187 if (err) 1188 break; 1189 } 1190 return err; 1191 } 1192 1193 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1194 { 1195 struct buffer_head *bh = sb_getblk(sb, block); 1196 if (unlikely(!bh)) 1197 return NULL; 1198 if (!bh_uptodate_or_lock(bh)) { 1199 if (bh_submit_read(bh) < 0) { 1200 brelse(bh); 1201 return NULL; 1202 } 1203 } 1204 1205 return bh; 1206 } 1207 1208 static int ext4_set_bitmap_checksums(struct super_block *sb, 1209 ext4_group_t group, 1210 struct ext4_group_desc *gdp, 1211 struct ext4_new_group_data *group_data) 1212 { 1213 struct buffer_head *bh; 1214 1215 if (!ext4_has_metadata_csum(sb)) 1216 return 0; 1217 1218 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1219 if (!bh) 1220 return -EIO; 1221 ext4_inode_bitmap_csum_set(sb, group, gdp, bh, 1222 EXT4_INODES_PER_GROUP(sb) / 8); 1223 brelse(bh); 1224 1225 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1226 if (!bh) 1227 return -EIO; 1228 ext4_block_bitmap_csum_set(sb, group, gdp, bh); 1229 brelse(bh); 1230 1231 return 0; 1232 } 1233 1234 /* 1235 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1236 */ 1237 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1238 struct ext4_new_flex_group_data *flex_gd) 1239 { 1240 struct ext4_new_group_data *group_data = flex_gd->groups; 1241 struct ext4_group_desc *gdp; 1242 struct ext4_sb_info *sbi = EXT4_SB(sb); 1243 struct buffer_head *gdb_bh; 1244 ext4_group_t group; 1245 __u16 *bg_flags = flex_gd->bg_flags; 1246 int i, gdb_off, gdb_num, err = 0; 1247 1248 1249 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1250 group = group_data->group; 1251 1252 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1253 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1254 1255 /* 1256 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1257 */ 1258 gdb_bh = sbi->s_group_desc[gdb_num]; 1259 /* Update group descriptor block for new group */ 1260 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1261 gdb_off * EXT4_DESC_SIZE(sb)); 1262 1263 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1264 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1265 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1266 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); 1267 if (err) { 1268 ext4_std_error(sb, err); 1269 break; 1270 } 1271 1272 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1273 ext4_free_group_clusters_set(sb, gdp, 1274 EXT4_NUM_B2C(sbi, group_data->free_blocks_count)); 1275 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1276 if (ext4_has_group_desc_csum(sb)) 1277 ext4_itable_unused_set(sb, gdp, 1278 EXT4_INODES_PER_GROUP(sb)); 1279 gdp->bg_flags = cpu_to_le16(*bg_flags); 1280 ext4_group_desc_csum_set(sb, group, gdp); 1281 1282 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1283 if (unlikely(err)) { 1284 ext4_std_error(sb, err); 1285 break; 1286 } 1287 1288 /* 1289 * We can allocate memory for mb_alloc based on the new group 1290 * descriptor 1291 */ 1292 err = ext4_mb_add_groupinfo(sb, group, gdp); 1293 if (err) 1294 break; 1295 } 1296 return err; 1297 } 1298 1299 /* 1300 * ext4_update_super() updates the super block so that the newly added 1301 * groups can be seen by the filesystem. 1302 * 1303 * @sb: super block 1304 * @flex_gd: new added groups 1305 */ 1306 static void ext4_update_super(struct super_block *sb, 1307 struct ext4_new_flex_group_data *flex_gd) 1308 { 1309 ext4_fsblk_t blocks_count = 0; 1310 ext4_fsblk_t free_blocks = 0; 1311 ext4_fsblk_t reserved_blocks = 0; 1312 struct ext4_new_group_data *group_data = flex_gd->groups; 1313 struct ext4_sb_info *sbi = EXT4_SB(sb); 1314 struct ext4_super_block *es = sbi->s_es; 1315 int i; 1316 1317 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1318 /* 1319 * Make the new blocks and inodes valid next. We do this before 1320 * increasing the group count so that once the group is enabled, 1321 * all of its blocks and inodes are already valid. 1322 * 1323 * We always allocate group-by-group, then block-by-block or 1324 * inode-by-inode within a group, so enabling these 1325 * blocks/inodes before the group is live won't actually let us 1326 * allocate the new space yet. 1327 */ 1328 for (i = 0; i < flex_gd->count; i++) { 1329 blocks_count += group_data[i].blocks_count; 1330 free_blocks += group_data[i].free_blocks_count; 1331 } 1332 1333 reserved_blocks = ext4_r_blocks_count(es) * 100; 1334 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1335 reserved_blocks *= blocks_count; 1336 do_div(reserved_blocks, 100); 1337 1338 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1339 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1340 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1341 flex_gd->count); 1342 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1343 flex_gd->count); 1344 1345 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1346 /* 1347 * We need to protect s_groups_count against other CPUs seeing 1348 * inconsistent state in the superblock. 1349 * 1350 * The precise rules we use are: 1351 * 1352 * * Writers must perform a smp_wmb() after updating all 1353 * dependent data and before modifying the groups count 1354 * 1355 * * Readers must perform an smp_rmb() after reading the groups 1356 * count and before reading any dependent data. 1357 * 1358 * NB. These rules can be relaxed when checking the group count 1359 * while freeing data, as we can only allocate from a block 1360 * group after serialising against the group count, and we can 1361 * only then free after serialising in turn against that 1362 * allocation. 1363 */ 1364 smp_wmb(); 1365 1366 /* Update the global fs size fields */ 1367 sbi->s_groups_count += flex_gd->count; 1368 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1369 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1370 1371 /* Update the reserved block counts only once the new group is 1372 * active. */ 1373 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1374 reserved_blocks); 1375 1376 /* Update the free space counts */ 1377 percpu_counter_add(&sbi->s_freeclusters_counter, 1378 EXT4_NUM_B2C(sbi, free_blocks)); 1379 percpu_counter_add(&sbi->s_freeinodes_counter, 1380 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1381 1382 ext4_debug("free blocks count %llu", 1383 percpu_counter_read(&sbi->s_freeclusters_counter)); 1384 if (EXT4_HAS_INCOMPAT_FEATURE(sb, 1385 EXT4_FEATURE_INCOMPAT_FLEX_BG) && 1386 sbi->s_log_groups_per_flex) { 1387 ext4_group_t flex_group; 1388 flex_group = ext4_flex_group(sbi, group_data[0].group); 1389 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1390 &sbi->s_flex_groups[flex_group].free_clusters); 1391 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1392 &sbi->s_flex_groups[flex_group].free_inodes); 1393 } 1394 1395 /* 1396 * Update the fs overhead information 1397 */ 1398 ext4_calculate_overhead(sb); 1399 1400 if (test_opt(sb, DEBUG)) 1401 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1402 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1403 blocks_count, free_blocks, reserved_blocks); 1404 } 1405 1406 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1407 * _before_ we start modifying the filesystem, because we cannot abort the 1408 * transaction and not have it write the data to disk. 1409 */ 1410 static int ext4_flex_group_add(struct super_block *sb, 1411 struct inode *resize_inode, 1412 struct ext4_new_flex_group_data *flex_gd) 1413 { 1414 struct ext4_sb_info *sbi = EXT4_SB(sb); 1415 struct ext4_super_block *es = sbi->s_es; 1416 ext4_fsblk_t o_blocks_count; 1417 ext4_grpblk_t last; 1418 ext4_group_t group; 1419 handle_t *handle; 1420 unsigned reserved_gdb; 1421 int err = 0, err2 = 0, credit; 1422 1423 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1424 1425 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1426 o_blocks_count = ext4_blocks_count(es); 1427 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1428 BUG_ON(last); 1429 1430 err = setup_new_flex_group_blocks(sb, flex_gd); 1431 if (err) 1432 goto exit; 1433 /* 1434 * We will always be modifying at least the superblock and GDT 1435 * block. If we are adding a group past the last current GDT block, 1436 * we will also modify the inode and the dindirect block. If we 1437 * are adding a group with superblock/GDT backups we will also 1438 * modify each of the reserved GDT dindirect blocks. 1439 */ 1440 credit = flex_gd->count * 4 + reserved_gdb; 1441 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1442 if (IS_ERR(handle)) { 1443 err = PTR_ERR(handle); 1444 goto exit; 1445 } 1446 1447 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1448 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 1449 if (err) 1450 goto exit_journal; 1451 1452 group = flex_gd->groups[0].group; 1453 BUG_ON(group != EXT4_SB(sb)->s_groups_count); 1454 err = ext4_add_new_descs(handle, sb, group, 1455 resize_inode, flex_gd->count); 1456 if (err) 1457 goto exit_journal; 1458 1459 err = ext4_setup_new_descs(handle, sb, flex_gd); 1460 if (err) 1461 goto exit_journal; 1462 1463 ext4_update_super(sb, flex_gd); 1464 1465 err = ext4_handle_dirty_super(handle, sb); 1466 1467 exit_journal: 1468 err2 = ext4_journal_stop(handle); 1469 if (!err) 1470 err = err2; 1471 1472 if (!err) { 1473 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1474 int gdb_num_end = ((group + flex_gd->count - 1) / 1475 EXT4_DESC_PER_BLOCK(sb)); 1476 int meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, 1477 EXT4_FEATURE_INCOMPAT_META_BG); 1478 sector_t old_gdb = 0; 1479 1480 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, 1481 sizeof(struct ext4_super_block), 0); 1482 for (; gdb_num <= gdb_num_end; gdb_num++) { 1483 struct buffer_head *gdb_bh; 1484 1485 gdb_bh = sbi->s_group_desc[gdb_num]; 1486 if (old_gdb == gdb_bh->b_blocknr) 1487 continue; 1488 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, 1489 gdb_bh->b_size, meta_bg); 1490 old_gdb = gdb_bh->b_blocknr; 1491 } 1492 } 1493 exit: 1494 return err; 1495 } 1496 1497 static int ext4_setup_next_flex_gd(struct super_block *sb, 1498 struct ext4_new_flex_group_data *flex_gd, 1499 ext4_fsblk_t n_blocks_count, 1500 unsigned long flexbg_size) 1501 { 1502 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1503 struct ext4_new_group_data *group_data = flex_gd->groups; 1504 ext4_fsblk_t o_blocks_count; 1505 ext4_group_t n_group; 1506 ext4_group_t group; 1507 ext4_group_t last_group; 1508 ext4_grpblk_t last; 1509 ext4_grpblk_t blocks_per_group; 1510 unsigned long i; 1511 1512 blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb); 1513 1514 o_blocks_count = ext4_blocks_count(es); 1515 1516 if (o_blocks_count == n_blocks_count) 1517 return 0; 1518 1519 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1520 BUG_ON(last); 1521 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1522 1523 last_group = group | (flexbg_size - 1); 1524 if (last_group > n_group) 1525 last_group = n_group; 1526 1527 flex_gd->count = last_group - group + 1; 1528 1529 for (i = 0; i < flex_gd->count; i++) { 1530 int overhead; 1531 1532 group_data[i].group = group + i; 1533 group_data[i].blocks_count = blocks_per_group; 1534 overhead = ext4_group_overhead_blocks(sb, group + i); 1535 group_data[i].free_blocks_count = blocks_per_group - overhead; 1536 if (ext4_has_group_desc_csum(sb)) { 1537 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1538 EXT4_BG_INODE_UNINIT; 1539 if (!test_opt(sb, INIT_INODE_TABLE)) 1540 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1541 } else 1542 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1543 } 1544 1545 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1546 /* We need to initialize block bitmap of last group. */ 1547 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1548 1549 if ((last_group == n_group) && (last != blocks_per_group - 1)) { 1550 group_data[i - 1].blocks_count = last + 1; 1551 group_data[i - 1].free_blocks_count -= blocks_per_group- 1552 last - 1; 1553 } 1554 1555 return 1; 1556 } 1557 1558 /* Add group descriptor data to an existing or new group descriptor block. 1559 * Ensure we handle all possible error conditions _before_ we start modifying 1560 * the filesystem, because we cannot abort the transaction and not have it 1561 * write the data to disk. 1562 * 1563 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1564 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1565 * 1566 * We only need to hold the superblock lock while we are actually adding 1567 * in the new group's counts to the superblock. Prior to that we have 1568 * not really "added" the group at all. We re-check that we are still 1569 * adding in the last group in case things have changed since verifying. 1570 */ 1571 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1572 { 1573 struct ext4_new_flex_group_data flex_gd; 1574 struct ext4_sb_info *sbi = EXT4_SB(sb); 1575 struct ext4_super_block *es = sbi->s_es; 1576 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1577 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1578 struct inode *inode = NULL; 1579 int gdb_off; 1580 int err; 1581 __u16 bg_flags = 0; 1582 1583 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1584 1585 if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, 1586 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { 1587 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1588 return -EPERM; 1589 } 1590 1591 if (ext4_blocks_count(es) + input->blocks_count < 1592 ext4_blocks_count(es)) { 1593 ext4_warning(sb, "blocks_count overflow"); 1594 return -EINVAL; 1595 } 1596 1597 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1598 le32_to_cpu(es->s_inodes_count)) { 1599 ext4_warning(sb, "inodes_count overflow"); 1600 return -EINVAL; 1601 } 1602 1603 if (reserved_gdb || gdb_off == 0) { 1604 if (!EXT4_HAS_COMPAT_FEATURE(sb, 1605 EXT4_FEATURE_COMPAT_RESIZE_INODE) 1606 || !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1607 ext4_warning(sb, 1608 "No reserved GDT blocks, can't resize"); 1609 return -EPERM; 1610 } 1611 inode = ext4_iget(sb, EXT4_RESIZE_INO); 1612 if (IS_ERR(inode)) { 1613 ext4_warning(sb, "Error opening resize inode"); 1614 return PTR_ERR(inode); 1615 } 1616 } 1617 1618 1619 err = verify_group_input(sb, input); 1620 if (err) 1621 goto out; 1622 1623 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1624 if (err) 1625 goto out; 1626 1627 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1628 if (err) 1629 goto out; 1630 1631 flex_gd.count = 1; 1632 flex_gd.groups = input; 1633 flex_gd.bg_flags = &bg_flags; 1634 err = ext4_flex_group_add(sb, inode, &flex_gd); 1635 out: 1636 iput(inode); 1637 return err; 1638 } /* ext4_group_add */ 1639 1640 /* 1641 * extend a group without checking assuming that checking has been done. 1642 */ 1643 static int ext4_group_extend_no_check(struct super_block *sb, 1644 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1645 { 1646 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1647 handle_t *handle; 1648 int err = 0, err2; 1649 1650 /* We will update the superblock, one block bitmap, and 1651 * one group descriptor via ext4_group_add_blocks(). 1652 */ 1653 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1654 if (IS_ERR(handle)) { 1655 err = PTR_ERR(handle); 1656 ext4_warning(sb, "error %d on journal start", err); 1657 return err; 1658 } 1659 1660 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1661 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 1662 if (err) { 1663 ext4_warning(sb, "error %d on journal write access", err); 1664 goto errout; 1665 } 1666 1667 ext4_blocks_count_set(es, o_blocks_count + add); 1668 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1669 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1670 o_blocks_count + add); 1671 /* We add the blocks to the bitmap and set the group need init bit */ 1672 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1673 if (err) 1674 goto errout; 1675 ext4_handle_dirty_super(handle, sb); 1676 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1677 o_blocks_count + add); 1678 errout: 1679 err2 = ext4_journal_stop(handle); 1680 if (err2 && !err) 1681 err = err2; 1682 1683 if (!err) { 1684 if (test_opt(sb, DEBUG)) 1685 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1686 "blocks\n", ext4_blocks_count(es)); 1687 update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, 1688 (char *)es, sizeof(struct ext4_super_block), 0); 1689 } 1690 return err; 1691 } 1692 1693 /* 1694 * Extend the filesystem to the new number of blocks specified. This entry 1695 * point is only used to extend the current filesystem to the end of the last 1696 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1697 * for emergencies (because it has no dependencies on reserved blocks). 1698 * 1699 * If we _really_ wanted, we could use default values to call ext4_group_add() 1700 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1701 * GDT blocks are reserved to grow to the desired size. 1702 */ 1703 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1704 ext4_fsblk_t n_blocks_count) 1705 { 1706 ext4_fsblk_t o_blocks_count; 1707 ext4_grpblk_t last; 1708 ext4_grpblk_t add; 1709 struct buffer_head *bh; 1710 int err; 1711 ext4_group_t group; 1712 1713 o_blocks_count = ext4_blocks_count(es); 1714 1715 if (test_opt(sb, DEBUG)) 1716 ext4_msg(sb, KERN_DEBUG, 1717 "extending last group from %llu to %llu blocks", 1718 o_blocks_count, n_blocks_count); 1719 1720 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1721 return 0; 1722 1723 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1724 ext4_msg(sb, KERN_ERR, 1725 "filesystem too large to resize to %llu blocks safely", 1726 n_blocks_count); 1727 if (sizeof(sector_t) < 8) 1728 ext4_warning(sb, "CONFIG_LBDAF not enabled"); 1729 return -EINVAL; 1730 } 1731 1732 if (n_blocks_count < o_blocks_count) { 1733 ext4_warning(sb, "can't shrink FS - resize aborted"); 1734 return -EINVAL; 1735 } 1736 1737 /* Handle the remaining blocks in the last group only. */ 1738 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1739 1740 if (last == 0) { 1741 ext4_warning(sb, "need to use ext2online to resize further"); 1742 return -EPERM; 1743 } 1744 1745 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1746 1747 if (o_blocks_count + add < o_blocks_count) { 1748 ext4_warning(sb, "blocks_count overflow"); 1749 return -EINVAL; 1750 } 1751 1752 if (o_blocks_count + add > n_blocks_count) 1753 add = n_blocks_count - o_blocks_count; 1754 1755 if (o_blocks_count + add < n_blocks_count) 1756 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1757 o_blocks_count + add, add); 1758 1759 /* See if the device is actually as big as what was requested */ 1760 bh = sb_bread(sb, o_blocks_count + add - 1); 1761 if (!bh) { 1762 ext4_warning(sb, "can't read last block, resize aborted"); 1763 return -ENOSPC; 1764 } 1765 brelse(bh); 1766 1767 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 1768 return err; 1769 } /* ext4_group_extend */ 1770 1771 1772 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1773 { 1774 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1775 } 1776 1777 /* 1778 * Release the resize inode and drop the resize_inode feature if there 1779 * are no more reserved gdt blocks, and then convert the file system 1780 * to enable meta_bg 1781 */ 1782 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1783 { 1784 handle_t *handle; 1785 struct ext4_sb_info *sbi = EXT4_SB(sb); 1786 struct ext4_super_block *es = sbi->s_es; 1787 struct ext4_inode_info *ei = EXT4_I(inode); 1788 ext4_fsblk_t nr; 1789 int i, ret, err = 0; 1790 int credits = 1; 1791 1792 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1793 if (inode) { 1794 if (es->s_reserved_gdt_blocks) { 1795 ext4_error(sb, "Unexpected non-zero " 1796 "s_reserved_gdt_blocks"); 1797 return -EPERM; 1798 } 1799 1800 /* Do a quick sanity check of the resize inode */ 1801 if (inode->i_blocks != 1 << (inode->i_blkbits - 9)) 1802 goto invalid_resize_inode; 1803 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1804 if (i == EXT4_DIND_BLOCK) { 1805 if (ei->i_data[i]) 1806 continue; 1807 else 1808 goto invalid_resize_inode; 1809 } 1810 if (ei->i_data[i]) 1811 goto invalid_resize_inode; 1812 } 1813 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1814 } 1815 1816 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1817 if (IS_ERR(handle)) 1818 return PTR_ERR(handle); 1819 1820 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1821 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 1822 if (err) 1823 goto errout; 1824 1825 EXT4_CLEAR_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE); 1826 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); 1827 sbi->s_es->s_first_meta_bg = 1828 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1829 1830 err = ext4_handle_dirty_super(handle, sb); 1831 if (err) { 1832 ext4_std_error(sb, err); 1833 goto errout; 1834 } 1835 1836 if (inode) { 1837 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1838 ext4_free_blocks(handle, inode, NULL, nr, 1, 1839 EXT4_FREE_BLOCKS_METADATA | 1840 EXT4_FREE_BLOCKS_FORGET); 1841 ei->i_data[EXT4_DIND_BLOCK] = 0; 1842 inode->i_blocks = 0; 1843 1844 err = ext4_mark_inode_dirty(handle, inode); 1845 if (err) 1846 ext4_std_error(sb, err); 1847 } 1848 1849 errout: 1850 ret = ext4_journal_stop(handle); 1851 if (!err) 1852 err = ret; 1853 return ret; 1854 1855 invalid_resize_inode: 1856 ext4_error(sb, "corrupted/inconsistent resize inode"); 1857 return -EINVAL; 1858 } 1859 1860 /* 1861 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1862 * 1863 * @sb: super block of the fs to be resized 1864 * @n_blocks_count: the number of blocks resides in the resized fs 1865 */ 1866 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1867 { 1868 struct ext4_new_flex_group_data *flex_gd = NULL; 1869 struct ext4_sb_info *sbi = EXT4_SB(sb); 1870 struct ext4_super_block *es = sbi->s_es; 1871 struct buffer_head *bh; 1872 struct inode *resize_inode = NULL; 1873 ext4_grpblk_t add, offset; 1874 unsigned long n_desc_blocks; 1875 unsigned long o_desc_blocks; 1876 ext4_group_t o_group; 1877 ext4_group_t n_group; 1878 ext4_fsblk_t o_blocks_count; 1879 ext4_fsblk_t n_blocks_count_retry = 0; 1880 unsigned long last_update_time = 0; 1881 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; 1882 int meta_bg; 1883 1884 /* See if the device is actually as big as what was requested */ 1885 bh = sb_bread(sb, n_blocks_count - 1); 1886 if (!bh) { 1887 ext4_warning(sb, "can't read last block, resize aborted"); 1888 return -ENOSPC; 1889 } 1890 brelse(bh); 1891 1892 retry: 1893 o_blocks_count = ext4_blocks_count(es); 1894 1895 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 1896 "to %llu blocks", o_blocks_count, n_blocks_count); 1897 1898 if (n_blocks_count < o_blocks_count) { 1899 /* On-line shrinking not supported */ 1900 ext4_warning(sb, "can't shrink FS - resize aborted"); 1901 return -EINVAL; 1902 } 1903 1904 if (n_blocks_count == o_blocks_count) 1905 /* Nothing need to do */ 1906 return 0; 1907 1908 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 1909 if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 1910 ext4_warning(sb, "resize would cause inodes_count overflow"); 1911 return -EINVAL; 1912 } 1913 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 1914 1915 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 1916 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 1917 1918 meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); 1919 1920 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE)) { 1921 if (meta_bg) { 1922 ext4_error(sb, "resize_inode and meta_bg enabled " 1923 "simultaneously"); 1924 return -EINVAL; 1925 } 1926 if (n_desc_blocks > o_desc_blocks + 1927 le16_to_cpu(es->s_reserved_gdt_blocks)) { 1928 n_blocks_count_retry = n_blocks_count; 1929 n_desc_blocks = o_desc_blocks + 1930 le16_to_cpu(es->s_reserved_gdt_blocks); 1931 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 1932 n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); 1933 n_group--; /* set to last group number */ 1934 } 1935 1936 if (!resize_inode) 1937 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO); 1938 if (IS_ERR(resize_inode)) { 1939 ext4_warning(sb, "Error opening resize inode"); 1940 return PTR_ERR(resize_inode); 1941 } 1942 } 1943 1944 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 1945 err = ext4_convert_meta_bg(sb, resize_inode); 1946 if (err) 1947 goto out; 1948 if (resize_inode) { 1949 iput(resize_inode); 1950 resize_inode = NULL; 1951 } 1952 if (n_blocks_count_retry) { 1953 n_blocks_count = n_blocks_count_retry; 1954 n_blocks_count_retry = 0; 1955 goto retry; 1956 } 1957 } 1958 1959 /* extend the last group */ 1960 if (n_group == o_group) 1961 add = n_blocks_count - o_blocks_count; 1962 else 1963 add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1); 1964 if (add > 0) { 1965 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 1966 if (err) 1967 goto out; 1968 } 1969 1970 if (ext4_blocks_count(es) == n_blocks_count) 1971 goto out; 1972 1973 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 1974 if (err) 1975 return err; 1976 1977 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 1978 if (err) 1979 goto out; 1980 1981 flex_gd = alloc_flex_gd(flexbg_size); 1982 if (flex_gd == NULL) { 1983 err = -ENOMEM; 1984 goto out; 1985 } 1986 1987 /* Add flex groups. Note that a regular group is a 1988 * flex group with 1 group. 1989 */ 1990 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 1991 flexbg_size)) { 1992 if (jiffies - last_update_time > HZ * 10) { 1993 if (last_update_time) 1994 ext4_msg(sb, KERN_INFO, 1995 "resized to %llu blocks", 1996 ext4_blocks_count(es)); 1997 last_update_time = jiffies; 1998 } 1999 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2000 break; 2001 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2002 if (unlikely(err)) 2003 break; 2004 } 2005 2006 if (!err && n_blocks_count_retry) { 2007 n_blocks_count = n_blocks_count_retry; 2008 n_blocks_count_retry = 0; 2009 free_flex_gd(flex_gd); 2010 flex_gd = NULL; 2011 goto retry; 2012 } 2013 2014 out: 2015 if (flex_gd) 2016 free_flex_gd(flex_gd); 2017 if (resize_inode != NULL) 2018 iput(resize_inode); 2019 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count); 2020 return err; 2021 } 2022