1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/resize.c 4 * 5 * Support for resizing an ext4 filesystem while it is mounted. 6 * 7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 8 * 9 * This could probably be made into a module, because it is not often in use. 10 */ 11 12 13 #define EXT4FS_DEBUG 14 15 #include <linux/errno.h> 16 #include <linux/slab.h> 17 18 #include "ext4_jbd2.h" 19 20 struct ext4_rcu_ptr { 21 struct rcu_head rcu; 22 void *ptr; 23 }; 24 25 static void ext4_rcu_ptr_callback(struct rcu_head *head) 26 { 27 struct ext4_rcu_ptr *ptr; 28 29 ptr = container_of(head, struct ext4_rcu_ptr, rcu); 30 kvfree(ptr->ptr); 31 kfree(ptr); 32 } 33 34 void ext4_kvfree_array_rcu(void *to_free) 35 { 36 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 37 38 if (ptr) { 39 ptr->ptr = to_free; 40 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); 41 return; 42 } 43 synchronize_rcu(); 44 kvfree(to_free); 45 } 46 47 int ext4_resize_begin(struct super_block *sb) 48 { 49 struct ext4_sb_info *sbi = EXT4_SB(sb); 50 int ret = 0; 51 52 if (!capable(CAP_SYS_RESOURCE)) 53 return -EPERM; 54 55 /* 56 * If we are not using the primary superblock/GDT copy don't resize, 57 * because the user tools have no way of handling this. Probably a 58 * bad time to do it anyways. 59 */ 60 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != 61 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 62 ext4_warning(sb, "won't resize using backup superblock at %llu", 63 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 64 return -EPERM; 65 } 66 67 /* 68 * We are not allowed to do online-resizing on a filesystem mounted 69 * with error, because it can destroy the filesystem easily. 70 */ 71 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 72 ext4_warning(sb, "There are errors in the filesystem, " 73 "so online resizing is not allowed"); 74 return -EPERM; 75 } 76 77 if (ext4_has_feature_sparse_super2(sb)) { 78 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); 79 return -EOPNOTSUPP; 80 } 81 82 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 83 &EXT4_SB(sb)->s_ext4_flags)) 84 ret = -EBUSY; 85 86 return ret; 87 } 88 89 void ext4_resize_end(struct super_block *sb) 90 { 91 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); 92 smp_mb__after_atomic(); 93 } 94 95 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, 96 ext4_group_t group) { 97 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << 98 EXT4_DESC_PER_BLOCK_BITS(sb); 99 } 100 101 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, 102 ext4_group_t group) { 103 group = ext4_meta_bg_first_group(sb, group); 104 return ext4_group_first_block_no(sb, group); 105 } 106 107 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 108 ext4_group_t group) { 109 ext4_grpblk_t overhead; 110 overhead = ext4_bg_num_gdb(sb, group); 111 if (ext4_bg_has_super(sb, group)) 112 overhead += 1 + 113 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 114 return overhead; 115 } 116 117 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 118 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 119 120 static int verify_group_input(struct super_block *sb, 121 struct ext4_new_group_data *input) 122 { 123 struct ext4_sb_info *sbi = EXT4_SB(sb); 124 struct ext4_super_block *es = sbi->s_es; 125 ext4_fsblk_t start = ext4_blocks_count(es); 126 ext4_fsblk_t end = start + input->blocks_count; 127 ext4_group_t group = input->group; 128 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 129 unsigned overhead; 130 ext4_fsblk_t metaend; 131 struct buffer_head *bh = NULL; 132 ext4_grpblk_t free_blocks_count, offset; 133 int err = -EINVAL; 134 135 if (group != sbi->s_groups_count) { 136 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 137 input->group, sbi->s_groups_count); 138 return -EINVAL; 139 } 140 141 overhead = ext4_group_overhead_blocks(sb, group); 142 metaend = start + overhead; 143 input->free_clusters_count = free_blocks_count = 144 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; 145 146 if (test_opt(sb, DEBUG)) 147 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 148 "(%d free, %u reserved)\n", 149 ext4_bg_has_super(sb, input->group) ? "normal" : 150 "no-super", input->group, input->blocks_count, 151 free_blocks_count, input->reserved_blocks); 152 153 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 154 if (offset != 0) 155 ext4_warning(sb, "Last group not full"); 156 else if (input->reserved_blocks > input->blocks_count / 5) 157 ext4_warning(sb, "Reserved blocks too high (%u)", 158 input->reserved_blocks); 159 else if (free_blocks_count < 0) 160 ext4_warning(sb, "Bad blocks count %u", 161 input->blocks_count); 162 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) { 163 err = PTR_ERR(bh); 164 bh = NULL; 165 ext4_warning(sb, "Cannot read last block (%llu)", 166 end - 1); 167 } else if (outside(input->block_bitmap, start, end)) 168 ext4_warning(sb, "Block bitmap not in group (block %llu)", 169 (unsigned long long)input->block_bitmap); 170 else if (outside(input->inode_bitmap, start, end)) 171 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 172 (unsigned long long)input->inode_bitmap); 173 else if (outside(input->inode_table, start, end) || 174 outside(itend - 1, start, end)) 175 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 176 (unsigned long long)input->inode_table, itend - 1); 177 else if (input->inode_bitmap == input->block_bitmap) 178 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 179 (unsigned long long)input->block_bitmap); 180 else if (inside(input->block_bitmap, input->inode_table, itend)) 181 ext4_warning(sb, "Block bitmap (%llu) in inode table " 182 "(%llu-%llu)", 183 (unsigned long long)input->block_bitmap, 184 (unsigned long long)input->inode_table, itend - 1); 185 else if (inside(input->inode_bitmap, input->inode_table, itend)) 186 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 187 "(%llu-%llu)", 188 (unsigned long long)input->inode_bitmap, 189 (unsigned long long)input->inode_table, itend - 1); 190 else if (inside(input->block_bitmap, start, metaend)) 191 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 192 (unsigned long long)input->block_bitmap, 193 start, metaend - 1); 194 else if (inside(input->inode_bitmap, start, metaend)) 195 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 196 (unsigned long long)input->inode_bitmap, 197 start, metaend - 1); 198 else if (inside(input->inode_table, start, metaend) || 199 inside(itend - 1, start, metaend)) 200 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 201 "(%llu-%llu)", 202 (unsigned long long)input->inode_table, 203 itend - 1, start, metaend - 1); 204 else 205 err = 0; 206 brelse(bh); 207 208 return err; 209 } 210 211 /* 212 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 213 * group each time. 214 */ 215 struct ext4_new_flex_group_data { 216 struct ext4_new_group_data *groups; /* new_group_data for groups 217 in the flex group */ 218 __u16 *bg_flags; /* block group flags of groups 219 in @groups */ 220 ext4_group_t count; /* number of groups in @groups 221 */ 222 }; 223 224 /* 225 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 226 * @flexbg_size. 227 * 228 * Returns NULL on failure otherwise address of the allocated structure. 229 */ 230 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) 231 { 232 struct ext4_new_flex_group_data *flex_gd; 233 234 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 235 if (flex_gd == NULL) 236 goto out3; 237 238 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) 239 goto out2; 240 flex_gd->count = flexbg_size; 241 242 flex_gd->groups = kmalloc_array(flexbg_size, 243 sizeof(struct ext4_new_group_data), 244 GFP_NOFS); 245 if (flex_gd->groups == NULL) 246 goto out2; 247 248 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16), 249 GFP_NOFS); 250 if (flex_gd->bg_flags == NULL) 251 goto out1; 252 253 return flex_gd; 254 255 out1: 256 kfree(flex_gd->groups); 257 out2: 258 kfree(flex_gd); 259 out3: 260 return NULL; 261 } 262 263 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 264 { 265 kfree(flex_gd->bg_flags); 266 kfree(flex_gd->groups); 267 kfree(flex_gd); 268 } 269 270 /* 271 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 272 * and inode tables for a flex group. 273 * 274 * This function is used by 64bit-resize. Note that this function allocates 275 * group tables from the 1st group of groups contained by @flexgd, which may 276 * be a partial of a flex group. 277 * 278 * @sb: super block of fs to which the groups belongs 279 * 280 * Returns 0 on a successful allocation of the metadata blocks in the 281 * block group. 282 */ 283 static int ext4_alloc_group_tables(struct super_block *sb, 284 struct ext4_new_flex_group_data *flex_gd, 285 int flexbg_size) 286 { 287 struct ext4_new_group_data *group_data = flex_gd->groups; 288 ext4_fsblk_t start_blk; 289 ext4_fsblk_t last_blk; 290 ext4_group_t src_group; 291 ext4_group_t bb_index = 0; 292 ext4_group_t ib_index = 0; 293 ext4_group_t it_index = 0; 294 ext4_group_t group; 295 ext4_group_t last_group; 296 unsigned overhead; 297 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 298 int i; 299 300 BUG_ON(flex_gd->count == 0 || group_data == NULL); 301 302 src_group = group_data[0].group; 303 last_group = src_group + flex_gd->count - 1; 304 305 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 306 (last_group & ~(flexbg_size - 1)))); 307 next_group: 308 group = group_data[0].group; 309 if (src_group >= group_data[0].group + flex_gd->count) 310 return -ENOSPC; 311 start_blk = ext4_group_first_block_no(sb, src_group); 312 last_blk = start_blk + group_data[src_group - group].blocks_count; 313 314 overhead = ext4_group_overhead_blocks(sb, src_group); 315 316 start_blk += overhead; 317 318 /* We collect contiguous blocks as much as possible. */ 319 src_group++; 320 for (; src_group <= last_group; src_group++) { 321 overhead = ext4_group_overhead_blocks(sb, src_group); 322 if (overhead == 0) 323 last_blk += group_data[src_group - group].blocks_count; 324 else 325 break; 326 } 327 328 /* Allocate block bitmaps */ 329 for (; bb_index < flex_gd->count; bb_index++) { 330 if (start_blk >= last_blk) 331 goto next_group; 332 group_data[bb_index].block_bitmap = start_blk++; 333 group = ext4_get_group_number(sb, start_blk - 1); 334 group -= group_data[0].group; 335 group_data[group].mdata_blocks++; 336 flex_gd->bg_flags[group] &= uninit_mask; 337 } 338 339 /* Allocate inode bitmaps */ 340 for (; ib_index < flex_gd->count; ib_index++) { 341 if (start_blk >= last_blk) 342 goto next_group; 343 group_data[ib_index].inode_bitmap = start_blk++; 344 group = ext4_get_group_number(sb, start_blk - 1); 345 group -= group_data[0].group; 346 group_data[group].mdata_blocks++; 347 flex_gd->bg_flags[group] &= uninit_mask; 348 } 349 350 /* Allocate inode tables */ 351 for (; it_index < flex_gd->count; it_index++) { 352 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 353 ext4_fsblk_t next_group_start; 354 355 if (start_blk + itb > last_blk) 356 goto next_group; 357 group_data[it_index].inode_table = start_blk; 358 group = ext4_get_group_number(sb, start_blk); 359 next_group_start = ext4_group_first_block_no(sb, group + 1); 360 group -= group_data[0].group; 361 362 if (start_blk + itb > next_group_start) { 363 flex_gd->bg_flags[group + 1] &= uninit_mask; 364 overhead = start_blk + itb - next_group_start; 365 group_data[group + 1].mdata_blocks += overhead; 366 itb -= overhead; 367 } 368 369 group_data[group].mdata_blocks += itb; 370 flex_gd->bg_flags[group] &= uninit_mask; 371 start_blk += EXT4_SB(sb)->s_itb_per_group; 372 } 373 374 /* Update free clusters count to exclude metadata blocks */ 375 for (i = 0; i < flex_gd->count; i++) { 376 group_data[i].free_clusters_count -= 377 EXT4_NUM_B2C(EXT4_SB(sb), 378 group_data[i].mdata_blocks); 379 } 380 381 if (test_opt(sb, DEBUG)) { 382 int i; 383 group = group_data[0].group; 384 385 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 386 "%d groups, flexbg size is %d:\n", flex_gd->count, 387 flexbg_size); 388 389 for (i = 0; i < flex_gd->count; i++) { 390 ext4_debug( 391 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n", 392 ext4_bg_has_super(sb, group + i) ? "normal" : 393 "no-super", group + i, 394 group_data[i].blocks_count, 395 group_data[i].free_clusters_count, 396 group_data[i].mdata_blocks); 397 } 398 } 399 return 0; 400 } 401 402 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 403 ext4_fsblk_t blk) 404 { 405 struct buffer_head *bh; 406 int err; 407 408 bh = sb_getblk(sb, blk); 409 if (unlikely(!bh)) 410 return ERR_PTR(-ENOMEM); 411 BUFFER_TRACE(bh, "get_write_access"); 412 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 413 if (err) { 414 brelse(bh); 415 bh = ERR_PTR(err); 416 } else { 417 memset(bh->b_data, 0, sb->s_blocksize); 418 set_buffer_uptodate(bh); 419 } 420 421 return bh; 422 } 423 424 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) 425 { 426 return ext4_journal_ensure_credits_fn(handle, credits, 427 EXT4_MAX_TRANS_DATA, 0, 0); 428 } 429 430 /* 431 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. 432 * 433 * Helper function for ext4_setup_new_group_blocks() which set . 434 * 435 * @sb: super block 436 * @handle: journal handle 437 * @flex_gd: flex group data 438 */ 439 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 440 struct ext4_new_flex_group_data *flex_gd, 441 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) 442 { 443 struct ext4_sb_info *sbi = EXT4_SB(sb); 444 ext4_group_t count = last_cluster - first_cluster + 1; 445 ext4_group_t count2; 446 447 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, 448 last_cluster); 449 for (count2 = count; count > 0; 450 count -= count2, first_cluster += count2) { 451 ext4_fsblk_t start; 452 struct buffer_head *bh; 453 ext4_group_t group; 454 int err; 455 456 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); 457 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); 458 group -= flex_gd->groups[0].group; 459 460 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); 461 if (count2 > count) 462 count2 = count; 463 464 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 465 BUG_ON(flex_gd->count > 1); 466 continue; 467 } 468 469 err = ext4_resize_ensure_credits_batch(handle, 1); 470 if (err < 0) 471 return err; 472 473 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 474 if (unlikely(!bh)) 475 return -ENOMEM; 476 477 BUFFER_TRACE(bh, "get_write_access"); 478 err = ext4_journal_get_write_access(handle, sb, bh, 479 EXT4_JTR_NONE); 480 if (err) { 481 brelse(bh); 482 return err; 483 } 484 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 485 first_cluster, first_cluster - start, count2); 486 ext4_set_bits(bh->b_data, first_cluster - start, count2); 487 488 err = ext4_handle_dirty_metadata(handle, NULL, bh); 489 brelse(bh); 490 if (unlikely(err)) 491 return err; 492 } 493 494 return 0; 495 } 496 497 /* 498 * Set up the block and inode bitmaps, and the inode table for the new groups. 499 * This doesn't need to be part of the main transaction, since we are only 500 * changing blocks outside the actual filesystem. We still do journaling to 501 * ensure the recovery is correct in case of a failure just after resize. 502 * If any part of this fails, we simply abort the resize. 503 * 504 * setup_new_flex_group_blocks handles a flex group as follow: 505 * 1. copy super block and GDT, and initialize group tables if necessary. 506 * In this step, we only set bits in blocks bitmaps for blocks taken by 507 * super block and GDT. 508 * 2. allocate group tables in block bitmaps, that is, set bits in block 509 * bitmap for blocks taken by group tables. 510 */ 511 static int setup_new_flex_group_blocks(struct super_block *sb, 512 struct ext4_new_flex_group_data *flex_gd) 513 { 514 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 515 ext4_fsblk_t start; 516 ext4_fsblk_t block; 517 struct ext4_sb_info *sbi = EXT4_SB(sb); 518 struct ext4_super_block *es = sbi->s_es; 519 struct ext4_new_group_data *group_data = flex_gd->groups; 520 __u16 *bg_flags = flex_gd->bg_flags; 521 handle_t *handle; 522 ext4_group_t group, count; 523 struct buffer_head *bh = NULL; 524 int reserved_gdb, i, j, err = 0, err2; 525 int meta_bg; 526 527 BUG_ON(!flex_gd->count || !group_data || 528 group_data[0].group != sbi->s_groups_count); 529 530 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 531 meta_bg = ext4_has_feature_meta_bg(sb); 532 533 /* This transaction may be extended/restarted along the way */ 534 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 535 if (IS_ERR(handle)) 536 return PTR_ERR(handle); 537 538 group = group_data[0].group; 539 for (i = 0; i < flex_gd->count; i++, group++) { 540 unsigned long gdblocks; 541 ext4_grpblk_t overhead; 542 543 gdblocks = ext4_bg_num_gdb(sb, group); 544 start = ext4_group_first_block_no(sb, group); 545 546 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 547 goto handle_itb; 548 549 if (meta_bg == 1) { 550 ext4_group_t first_group; 551 first_group = ext4_meta_bg_first_group(sb, group); 552 if (first_group != group + 1 && 553 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1) 554 goto handle_itb; 555 } 556 557 block = start + ext4_bg_has_super(sb, group); 558 /* Copy all of the GDT blocks into the backup in this group */ 559 for (j = 0; j < gdblocks; j++, block++) { 560 struct buffer_head *gdb; 561 562 ext4_debug("update backup group %#04llx\n", block); 563 err = ext4_resize_ensure_credits_batch(handle, 1); 564 if (err < 0) 565 goto out; 566 567 gdb = sb_getblk(sb, block); 568 if (unlikely(!gdb)) { 569 err = -ENOMEM; 570 goto out; 571 } 572 573 BUFFER_TRACE(gdb, "get_write_access"); 574 err = ext4_journal_get_write_access(handle, sb, gdb, 575 EXT4_JTR_NONE); 576 if (err) { 577 brelse(gdb); 578 goto out; 579 } 580 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, 581 s_group_desc, j)->b_data, gdb->b_size); 582 set_buffer_uptodate(gdb); 583 584 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 585 if (unlikely(err)) { 586 brelse(gdb); 587 goto out; 588 } 589 brelse(gdb); 590 } 591 592 /* Zero out all of the reserved backup group descriptor 593 * table blocks 594 */ 595 if (ext4_bg_has_super(sb, group)) { 596 err = sb_issue_zeroout(sb, gdblocks + start + 1, 597 reserved_gdb, GFP_NOFS); 598 if (err) 599 goto out; 600 } 601 602 handle_itb: 603 /* Initialize group tables of the grop @group */ 604 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 605 goto handle_bb; 606 607 /* Zero out all of the inode table blocks */ 608 block = group_data[i].inode_table; 609 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 610 block, sbi->s_itb_per_group); 611 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 612 GFP_NOFS); 613 if (err) 614 goto out; 615 616 handle_bb: 617 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 618 goto handle_ib; 619 620 /* Initialize block bitmap of the @group */ 621 block = group_data[i].block_bitmap; 622 err = ext4_resize_ensure_credits_batch(handle, 1); 623 if (err < 0) 624 goto out; 625 626 bh = bclean(handle, sb, block); 627 if (IS_ERR(bh)) { 628 err = PTR_ERR(bh); 629 goto out; 630 } 631 overhead = ext4_group_overhead_blocks(sb, group); 632 if (overhead != 0) { 633 ext4_debug("mark backup superblock %#04llx (+0)\n", 634 start); 635 ext4_set_bits(bh->b_data, 0, 636 EXT4_NUM_B2C(sbi, overhead)); 637 } 638 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 639 sb->s_blocksize * 8, bh->b_data); 640 err = ext4_handle_dirty_metadata(handle, NULL, bh); 641 brelse(bh); 642 if (err) 643 goto out; 644 645 handle_ib: 646 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 647 continue; 648 649 /* Initialize inode bitmap of the @group */ 650 block = group_data[i].inode_bitmap; 651 err = ext4_resize_ensure_credits_batch(handle, 1); 652 if (err < 0) 653 goto out; 654 /* Mark unused entries in inode bitmap used */ 655 bh = bclean(handle, sb, block); 656 if (IS_ERR(bh)) { 657 err = PTR_ERR(bh); 658 goto out; 659 } 660 661 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 662 sb->s_blocksize * 8, bh->b_data); 663 err = ext4_handle_dirty_metadata(handle, NULL, bh); 664 brelse(bh); 665 if (err) 666 goto out; 667 } 668 669 /* Mark group tables in block bitmap */ 670 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 671 count = group_table_count[j]; 672 start = (&group_data[0].block_bitmap)[j]; 673 block = start; 674 for (i = 1; i < flex_gd->count; i++) { 675 block += group_table_count[j]; 676 if (block == (&group_data[i].block_bitmap)[j]) { 677 count += group_table_count[j]; 678 continue; 679 } 680 err = set_flexbg_block_bitmap(sb, handle, 681 flex_gd, 682 EXT4_B2C(sbi, start), 683 EXT4_B2C(sbi, 684 start + count 685 - 1)); 686 if (err) 687 goto out; 688 count = group_table_count[j]; 689 start = (&group_data[i].block_bitmap)[j]; 690 block = start; 691 } 692 693 if (count) { 694 err = set_flexbg_block_bitmap(sb, handle, 695 flex_gd, 696 EXT4_B2C(sbi, start), 697 EXT4_B2C(sbi, 698 start + count 699 - 1)); 700 if (err) 701 goto out; 702 } 703 } 704 705 out: 706 err2 = ext4_journal_stop(handle); 707 if (err2 && !err) 708 err = err2; 709 710 return err; 711 } 712 713 /* 714 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 715 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 716 * calling this for the first time. In a sparse filesystem it will be the 717 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 718 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 719 */ 720 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, 721 unsigned *five, unsigned *seven) 722 { 723 unsigned *min = three; 724 int mult = 3; 725 unsigned ret; 726 727 if (!ext4_has_feature_sparse_super(sb)) { 728 ret = *min; 729 *min += 1; 730 return ret; 731 } 732 733 if (*five < *min) { 734 min = five; 735 mult = 5; 736 } 737 if (*seven < *min) { 738 min = seven; 739 mult = 7; 740 } 741 742 ret = *min; 743 *min *= mult; 744 745 return ret; 746 } 747 748 /* 749 * Check that all of the backup GDT blocks are held in the primary GDT block. 750 * It is assumed that they are stored in group order. Returns the number of 751 * groups in current filesystem that have BACKUPS, or -ve error code. 752 */ 753 static int verify_reserved_gdb(struct super_block *sb, 754 ext4_group_t end, 755 struct buffer_head *primary) 756 { 757 const ext4_fsblk_t blk = primary->b_blocknr; 758 unsigned three = 1; 759 unsigned five = 5; 760 unsigned seven = 7; 761 unsigned grp; 762 __le32 *p = (__le32 *)primary->b_data; 763 int gdbackups = 0; 764 765 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 766 if (le32_to_cpu(*p++) != 767 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 768 ext4_warning(sb, "reserved GDT %llu" 769 " missing grp %d (%llu)", 770 blk, grp, 771 grp * 772 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 773 blk); 774 return -EINVAL; 775 } 776 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 777 return -EFBIG; 778 } 779 780 return gdbackups; 781 } 782 783 /* 784 * Called when we need to bring a reserved group descriptor table block into 785 * use from the resize inode. The primary copy of the new GDT block currently 786 * is an indirect block (under the double indirect block in the resize inode). 787 * The new backup GDT blocks will be stored as leaf blocks in this indirect 788 * block, in group order. Even though we know all the block numbers we need, 789 * we check to ensure that the resize inode has actually reserved these blocks. 790 * 791 * Don't need to update the block bitmaps because the blocks are still in use. 792 * 793 * We get all of the error cases out of the way, so that we are sure to not 794 * fail once we start modifying the data on disk, because JBD has no rollback. 795 */ 796 static int add_new_gdb(handle_t *handle, struct inode *inode, 797 ext4_group_t group) 798 { 799 struct super_block *sb = inode->i_sb; 800 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 801 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 802 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 803 struct buffer_head **o_group_desc, **n_group_desc = NULL; 804 struct buffer_head *dind = NULL; 805 struct buffer_head *gdb_bh = NULL; 806 int gdbackups; 807 struct ext4_iloc iloc = { .bh = NULL }; 808 __le32 *data; 809 int err; 810 811 if (test_opt(sb, DEBUG)) 812 printk(KERN_DEBUG 813 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 814 gdb_num); 815 816 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 817 if (IS_ERR(gdb_bh)) 818 return PTR_ERR(gdb_bh); 819 820 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 821 if (gdbackups < 0) { 822 err = gdbackups; 823 goto errout; 824 } 825 826 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 827 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 828 if (IS_ERR(dind)) { 829 err = PTR_ERR(dind); 830 dind = NULL; 831 goto errout; 832 } 833 834 data = (__le32 *)dind->b_data; 835 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 836 ext4_warning(sb, "new group %u GDT block %llu not reserved", 837 group, gdblock); 838 err = -EINVAL; 839 goto errout; 840 } 841 842 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 843 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 844 EXT4_JTR_NONE); 845 if (unlikely(err)) 846 goto errout; 847 848 BUFFER_TRACE(gdb_bh, "get_write_access"); 849 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 850 if (unlikely(err)) 851 goto errout; 852 853 BUFFER_TRACE(dind, "get_write_access"); 854 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); 855 if (unlikely(err)) { 856 ext4_std_error(sb, err); 857 goto errout; 858 } 859 860 /* ext4_reserve_inode_write() gets a reference on the iloc */ 861 err = ext4_reserve_inode_write(handle, inode, &iloc); 862 if (unlikely(err)) 863 goto errout; 864 865 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 866 GFP_KERNEL); 867 if (!n_group_desc) { 868 err = -ENOMEM; 869 ext4_warning(sb, "not enough memory for %lu groups", 870 gdb_num + 1); 871 goto errout; 872 } 873 874 /* 875 * Finally, we have all of the possible failures behind us... 876 * 877 * Remove new GDT block from inode double-indirect block and clear out 878 * the new GDT block for use (which also "frees" the backup GDT blocks 879 * from the reserved inode). We don't need to change the bitmaps for 880 * these blocks, because they are marked as in-use from being in the 881 * reserved inode, and will become GDT blocks (primary and backup). 882 */ 883 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 884 err = ext4_handle_dirty_metadata(handle, NULL, dind); 885 if (unlikely(err)) { 886 ext4_std_error(sb, err); 887 goto errout; 888 } 889 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 890 (9 - EXT4_SB(sb)->s_cluster_bits); 891 ext4_mark_iloc_dirty(handle, inode, &iloc); 892 memset(gdb_bh->b_data, 0, sb->s_blocksize); 893 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 894 if (unlikely(err)) { 895 ext4_std_error(sb, err); 896 iloc.bh = NULL; 897 goto errout; 898 } 899 brelse(dind); 900 901 rcu_read_lock(); 902 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 903 memcpy(n_group_desc, o_group_desc, 904 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 905 rcu_read_unlock(); 906 n_group_desc[gdb_num] = gdb_bh; 907 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 908 EXT4_SB(sb)->s_gdb_count++; 909 ext4_kvfree_array_rcu(o_group_desc); 910 911 lock_buffer(EXT4_SB(sb)->s_sbh); 912 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 913 ext4_superblock_csum_set(sb); 914 unlock_buffer(EXT4_SB(sb)->s_sbh); 915 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 916 if (err) 917 ext4_std_error(sb, err); 918 return err; 919 errout: 920 kvfree(n_group_desc); 921 brelse(iloc.bh); 922 brelse(dind); 923 brelse(gdb_bh); 924 925 ext4_debug("leaving with error %d\n", err); 926 return err; 927 } 928 929 /* 930 * add_new_gdb_meta_bg is the sister of add_new_gdb. 931 */ 932 static int add_new_gdb_meta_bg(struct super_block *sb, 933 handle_t *handle, ext4_group_t group) { 934 ext4_fsblk_t gdblock; 935 struct buffer_head *gdb_bh; 936 struct buffer_head **o_group_desc, **n_group_desc; 937 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 938 int err; 939 940 gdblock = ext4_meta_bg_first_block_no(sb, group) + 941 ext4_bg_has_super(sb, group); 942 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 943 if (IS_ERR(gdb_bh)) 944 return PTR_ERR(gdb_bh); 945 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 946 GFP_KERNEL); 947 if (!n_group_desc) { 948 brelse(gdb_bh); 949 err = -ENOMEM; 950 ext4_warning(sb, "not enough memory for %lu groups", 951 gdb_num + 1); 952 return err; 953 } 954 955 rcu_read_lock(); 956 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 957 memcpy(n_group_desc, o_group_desc, 958 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 959 rcu_read_unlock(); 960 n_group_desc[gdb_num] = gdb_bh; 961 962 BUFFER_TRACE(gdb_bh, "get_write_access"); 963 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 964 if (err) { 965 kvfree(n_group_desc); 966 brelse(gdb_bh); 967 return err; 968 } 969 970 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 971 EXT4_SB(sb)->s_gdb_count++; 972 ext4_kvfree_array_rcu(o_group_desc); 973 return err; 974 } 975 976 /* 977 * Called when we are adding a new group which has a backup copy of each of 978 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 979 * We need to add these reserved backup GDT blocks to the resize inode, so 980 * that they are kept for future resizing and not allocated to files. 981 * 982 * Each reserved backup GDT block will go into a different indirect block. 983 * The indirect blocks are actually the primary reserved GDT blocks, 984 * so we know in advance what their block numbers are. We only get the 985 * double-indirect block to verify it is pointing to the primary reserved 986 * GDT blocks so we don't overwrite a data block by accident. The reserved 987 * backup GDT blocks are stored in their reserved primary GDT block. 988 */ 989 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 990 ext4_group_t group) 991 { 992 struct super_block *sb = inode->i_sb; 993 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 994 int cluster_bits = EXT4_SB(sb)->s_cluster_bits; 995 struct buffer_head **primary; 996 struct buffer_head *dind; 997 struct ext4_iloc iloc; 998 ext4_fsblk_t blk; 999 __le32 *data, *end; 1000 int gdbackups = 0; 1001 int res, i; 1002 int err; 1003 1004 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); 1005 if (!primary) 1006 return -ENOMEM; 1007 1008 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 1009 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 1010 if (IS_ERR(dind)) { 1011 err = PTR_ERR(dind); 1012 dind = NULL; 1013 goto exit_free; 1014 } 1015 1016 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 1017 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 1018 EXT4_ADDR_PER_BLOCK(sb)); 1019 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 1020 1021 /* Get each reserved primary GDT block and verify it holds backups */ 1022 for (res = 0; res < reserved_gdb; res++, blk++) { 1023 if (le32_to_cpu(*data) != blk) { 1024 ext4_warning(sb, "reserved block %llu" 1025 " not at offset %ld", 1026 blk, 1027 (long)(data - (__le32 *)dind->b_data)); 1028 err = -EINVAL; 1029 goto exit_bh; 1030 } 1031 primary[res] = ext4_sb_bread(sb, blk, 0); 1032 if (IS_ERR(primary[res])) { 1033 err = PTR_ERR(primary[res]); 1034 primary[res] = NULL; 1035 goto exit_bh; 1036 } 1037 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 1038 if (gdbackups < 0) { 1039 brelse(primary[res]); 1040 err = gdbackups; 1041 goto exit_bh; 1042 } 1043 if (++data >= end) 1044 data = (__le32 *)dind->b_data; 1045 } 1046 1047 for (i = 0; i < reserved_gdb; i++) { 1048 BUFFER_TRACE(primary[i], "get_write_access"); 1049 if ((err = ext4_journal_get_write_access(handle, sb, primary[i], 1050 EXT4_JTR_NONE))) 1051 goto exit_bh; 1052 } 1053 1054 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 1055 goto exit_bh; 1056 1057 /* 1058 * Finally we can add each of the reserved backup GDT blocks from 1059 * the new group to its reserved primary GDT block. 1060 */ 1061 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1062 for (i = 0; i < reserved_gdb; i++) { 1063 int err2; 1064 data = (__le32 *)primary[i]->b_data; 1065 /* printk("reserving backup %lu[%u] = %lu\n", 1066 primary[i]->b_blocknr, gdbackups, 1067 blk + primary[i]->b_blocknr); */ 1068 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1069 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1070 if (!err) 1071 err = err2; 1072 } 1073 1074 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); 1075 ext4_mark_iloc_dirty(handle, inode, &iloc); 1076 1077 exit_bh: 1078 while (--res >= 0) 1079 brelse(primary[res]); 1080 brelse(dind); 1081 1082 exit_free: 1083 kfree(primary); 1084 1085 return err; 1086 } 1087 1088 /* 1089 * Update the backup copies of the ext4 metadata. These don't need to be part 1090 * of the main resize transaction, because e2fsck will re-write them if there 1091 * is a problem (basically only OOM will cause a problem). However, we 1092 * _should_ update the backups if possible, in case the primary gets trashed 1093 * for some reason and we need to run e2fsck from a backup superblock. The 1094 * important part is that the new block and inode counts are in the backup 1095 * superblocks, and the location of the new group metadata in the GDT backups. 1096 * 1097 * We do not need take the s_resize_lock for this, because these 1098 * blocks are not otherwise touched by the filesystem code when it is 1099 * mounted. We don't need to worry about last changing from 1100 * sbi->s_groups_count, because the worst that can happen is that we 1101 * do not copy the full number of backups at this time. The resize 1102 * which changed s_groups_count will backup again. 1103 */ 1104 static void update_backups(struct super_block *sb, sector_t blk_off, char *data, 1105 int size, int meta_bg) 1106 { 1107 struct ext4_sb_info *sbi = EXT4_SB(sb); 1108 ext4_group_t last; 1109 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1110 unsigned three = 1; 1111 unsigned five = 5; 1112 unsigned seven = 7; 1113 ext4_group_t group = 0; 1114 int rest = sb->s_blocksize - size; 1115 handle_t *handle; 1116 int err = 0, err2; 1117 1118 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1119 if (IS_ERR(handle)) { 1120 group = 1; 1121 err = PTR_ERR(handle); 1122 goto exit_err; 1123 } 1124 1125 if (meta_bg == 0) { 1126 group = ext4_list_backups(sb, &three, &five, &seven); 1127 last = sbi->s_groups_count; 1128 } else { 1129 group = ext4_get_group_number(sb, blk_off) + 1; 1130 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1131 } 1132 1133 while (group < sbi->s_groups_count) { 1134 struct buffer_head *bh; 1135 ext4_fsblk_t backup_block; 1136 1137 /* Out of journal space, and can't get more - abort - so sad */ 1138 err = ext4_resize_ensure_credits_batch(handle, 1); 1139 if (err < 0) 1140 break; 1141 1142 if (meta_bg == 0) 1143 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1144 else 1145 backup_block = (ext4_group_first_block_no(sb, group) + 1146 ext4_bg_has_super(sb, group)); 1147 1148 bh = sb_getblk(sb, backup_block); 1149 if (unlikely(!bh)) { 1150 err = -ENOMEM; 1151 break; 1152 } 1153 ext4_debug("update metadata backup %llu(+%llu)\n", 1154 backup_block, backup_block - 1155 ext4_group_first_block_no(sb, group)); 1156 BUFFER_TRACE(bh, "get_write_access"); 1157 if ((err = ext4_journal_get_write_access(handle, sb, bh, 1158 EXT4_JTR_NONE))) 1159 break; 1160 lock_buffer(bh); 1161 memcpy(bh->b_data, data, size); 1162 if (rest) 1163 memset(bh->b_data + size, 0, rest); 1164 set_buffer_uptodate(bh); 1165 unlock_buffer(bh); 1166 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1167 if (unlikely(err)) 1168 ext4_std_error(sb, err); 1169 brelse(bh); 1170 1171 if (meta_bg == 0) 1172 group = ext4_list_backups(sb, &three, &five, &seven); 1173 else if (group == last) 1174 break; 1175 else 1176 group = last; 1177 } 1178 if ((err2 = ext4_journal_stop(handle)) && !err) 1179 err = err2; 1180 1181 /* 1182 * Ugh! Need to have e2fsck write the backup copies. It is too 1183 * late to revert the resize, we shouldn't fail just because of 1184 * the backup copies (they are only needed in case of corruption). 1185 * 1186 * However, if we got here we have a journal problem too, so we 1187 * can't really start a transaction to mark the superblock. 1188 * Chicken out and just set the flag on the hope it will be written 1189 * to disk, and if not - we will simply wait until next fsck. 1190 */ 1191 exit_err: 1192 if (err) { 1193 ext4_warning(sb, "can't update backup for group %u (err %d), " 1194 "forcing fsck on next reboot", group, err); 1195 sbi->s_mount_state &= ~EXT4_VALID_FS; 1196 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1197 mark_buffer_dirty(sbi->s_sbh); 1198 } 1199 } 1200 1201 /* 1202 * ext4_add_new_descs() adds @count group descriptor of groups 1203 * starting at @group 1204 * 1205 * @handle: journal handle 1206 * @sb: super block 1207 * @group: the group no. of the first group desc to be added 1208 * @resize_inode: the resize inode 1209 * @count: number of group descriptors to be added 1210 */ 1211 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1212 ext4_group_t group, struct inode *resize_inode, 1213 ext4_group_t count) 1214 { 1215 struct ext4_sb_info *sbi = EXT4_SB(sb); 1216 struct ext4_super_block *es = sbi->s_es; 1217 struct buffer_head *gdb_bh; 1218 int i, gdb_off, gdb_num, err = 0; 1219 int meta_bg; 1220 1221 meta_bg = ext4_has_feature_meta_bg(sb); 1222 for (i = 0; i < count; i++, group++) { 1223 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1224 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1225 1226 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1227 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1228 1229 /* 1230 * We will only either add reserved group blocks to a backup group 1231 * or remove reserved blocks for the first group in a new group block. 1232 * Doing both would be mean more complex code, and sane people don't 1233 * use non-sparse filesystems anymore. This is already checked above. 1234 */ 1235 if (gdb_off) { 1236 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1237 gdb_num); 1238 BUFFER_TRACE(gdb_bh, "get_write_access"); 1239 err = ext4_journal_get_write_access(handle, sb, gdb_bh, 1240 EXT4_JTR_NONE); 1241 1242 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1243 err = reserve_backup_gdb(handle, resize_inode, group); 1244 } else if (meta_bg != 0) { 1245 err = add_new_gdb_meta_bg(sb, handle, group); 1246 } else { 1247 err = add_new_gdb(handle, resize_inode, group); 1248 } 1249 if (err) 1250 break; 1251 } 1252 return err; 1253 } 1254 1255 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1256 { 1257 struct buffer_head *bh = sb_getblk(sb, block); 1258 if (unlikely(!bh)) 1259 return NULL; 1260 if (!bh_uptodate_or_lock(bh)) { 1261 if (ext4_read_bh(bh, 0, NULL) < 0) { 1262 brelse(bh); 1263 return NULL; 1264 } 1265 } 1266 1267 return bh; 1268 } 1269 1270 static int ext4_set_bitmap_checksums(struct super_block *sb, 1271 ext4_group_t group, 1272 struct ext4_group_desc *gdp, 1273 struct ext4_new_group_data *group_data) 1274 { 1275 struct buffer_head *bh; 1276 1277 if (!ext4_has_metadata_csum(sb)) 1278 return 0; 1279 1280 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1281 if (!bh) 1282 return -EIO; 1283 ext4_inode_bitmap_csum_set(sb, group, gdp, bh, 1284 EXT4_INODES_PER_GROUP(sb) / 8); 1285 brelse(bh); 1286 1287 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1288 if (!bh) 1289 return -EIO; 1290 ext4_block_bitmap_csum_set(sb, group, gdp, bh); 1291 brelse(bh); 1292 1293 return 0; 1294 } 1295 1296 /* 1297 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1298 */ 1299 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1300 struct ext4_new_flex_group_data *flex_gd) 1301 { 1302 struct ext4_new_group_data *group_data = flex_gd->groups; 1303 struct ext4_group_desc *gdp; 1304 struct ext4_sb_info *sbi = EXT4_SB(sb); 1305 struct buffer_head *gdb_bh; 1306 ext4_group_t group; 1307 __u16 *bg_flags = flex_gd->bg_flags; 1308 int i, gdb_off, gdb_num, err = 0; 1309 1310 1311 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1312 group = group_data->group; 1313 1314 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1315 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1316 1317 /* 1318 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1319 */ 1320 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); 1321 /* Update group descriptor block for new group */ 1322 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1323 gdb_off * EXT4_DESC_SIZE(sb)); 1324 1325 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1326 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1327 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1328 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); 1329 if (err) { 1330 ext4_std_error(sb, err); 1331 break; 1332 } 1333 1334 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1335 ext4_free_group_clusters_set(sb, gdp, 1336 group_data->free_clusters_count); 1337 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1338 if (ext4_has_group_desc_csum(sb)) 1339 ext4_itable_unused_set(sb, gdp, 1340 EXT4_INODES_PER_GROUP(sb)); 1341 gdp->bg_flags = cpu_to_le16(*bg_flags); 1342 ext4_group_desc_csum_set(sb, group, gdp); 1343 1344 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1345 if (unlikely(err)) { 1346 ext4_std_error(sb, err); 1347 break; 1348 } 1349 1350 /* 1351 * We can allocate memory for mb_alloc based on the new group 1352 * descriptor 1353 */ 1354 err = ext4_mb_add_groupinfo(sb, group, gdp); 1355 if (err) 1356 break; 1357 } 1358 return err; 1359 } 1360 1361 /* 1362 * ext4_update_super() updates the super block so that the newly added 1363 * groups can be seen by the filesystem. 1364 * 1365 * @sb: super block 1366 * @flex_gd: new added groups 1367 */ 1368 static void ext4_update_super(struct super_block *sb, 1369 struct ext4_new_flex_group_data *flex_gd) 1370 { 1371 ext4_fsblk_t blocks_count = 0; 1372 ext4_fsblk_t free_blocks = 0; 1373 ext4_fsblk_t reserved_blocks = 0; 1374 struct ext4_new_group_data *group_data = flex_gd->groups; 1375 struct ext4_sb_info *sbi = EXT4_SB(sb); 1376 struct ext4_super_block *es = sbi->s_es; 1377 int i; 1378 1379 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1380 /* 1381 * Make the new blocks and inodes valid next. We do this before 1382 * increasing the group count so that once the group is enabled, 1383 * all of its blocks and inodes are already valid. 1384 * 1385 * We always allocate group-by-group, then block-by-block or 1386 * inode-by-inode within a group, so enabling these 1387 * blocks/inodes before the group is live won't actually let us 1388 * allocate the new space yet. 1389 */ 1390 for (i = 0; i < flex_gd->count; i++) { 1391 blocks_count += group_data[i].blocks_count; 1392 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); 1393 } 1394 1395 reserved_blocks = ext4_r_blocks_count(es) * 100; 1396 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1397 reserved_blocks *= blocks_count; 1398 do_div(reserved_blocks, 100); 1399 1400 lock_buffer(sbi->s_sbh); 1401 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1402 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1403 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1404 flex_gd->count); 1405 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1406 flex_gd->count); 1407 1408 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1409 /* 1410 * We need to protect s_groups_count against other CPUs seeing 1411 * inconsistent state in the superblock. 1412 * 1413 * The precise rules we use are: 1414 * 1415 * * Writers must perform a smp_wmb() after updating all 1416 * dependent data and before modifying the groups count 1417 * 1418 * * Readers must perform an smp_rmb() after reading the groups 1419 * count and before reading any dependent data. 1420 * 1421 * NB. These rules can be relaxed when checking the group count 1422 * while freeing data, as we can only allocate from a block 1423 * group after serialising against the group count, and we can 1424 * only then free after serialising in turn against that 1425 * allocation. 1426 */ 1427 smp_wmb(); 1428 1429 /* Update the global fs size fields */ 1430 sbi->s_groups_count += flex_gd->count; 1431 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1432 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1433 1434 /* Update the reserved block counts only once the new group is 1435 * active. */ 1436 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1437 reserved_blocks); 1438 ext4_superblock_csum_set(sb); 1439 unlock_buffer(sbi->s_sbh); 1440 1441 /* Update the free space counts */ 1442 percpu_counter_add(&sbi->s_freeclusters_counter, 1443 EXT4_NUM_B2C(sbi, free_blocks)); 1444 percpu_counter_add(&sbi->s_freeinodes_counter, 1445 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1446 1447 ext4_debug("free blocks count %llu", 1448 percpu_counter_read(&sbi->s_freeclusters_counter)); 1449 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { 1450 ext4_group_t flex_group; 1451 struct flex_groups *fg; 1452 1453 flex_group = ext4_flex_group(sbi, group_data[0].group); 1454 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 1455 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1456 &fg->free_clusters); 1457 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1458 &fg->free_inodes); 1459 } 1460 1461 /* 1462 * Update the fs overhead information 1463 */ 1464 ext4_calculate_overhead(sb); 1465 1466 if (test_opt(sb, DEBUG)) 1467 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1468 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1469 blocks_count, free_blocks, reserved_blocks); 1470 } 1471 1472 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1473 * _before_ we start modifying the filesystem, because we cannot abort the 1474 * transaction and not have it write the data to disk. 1475 */ 1476 static int ext4_flex_group_add(struct super_block *sb, 1477 struct inode *resize_inode, 1478 struct ext4_new_flex_group_data *flex_gd) 1479 { 1480 struct ext4_sb_info *sbi = EXT4_SB(sb); 1481 struct ext4_super_block *es = sbi->s_es; 1482 ext4_fsblk_t o_blocks_count; 1483 ext4_grpblk_t last; 1484 ext4_group_t group; 1485 handle_t *handle; 1486 unsigned reserved_gdb; 1487 int err = 0, err2 = 0, credit; 1488 1489 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1490 1491 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1492 o_blocks_count = ext4_blocks_count(es); 1493 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1494 BUG_ON(last); 1495 1496 err = setup_new_flex_group_blocks(sb, flex_gd); 1497 if (err) 1498 goto exit; 1499 /* 1500 * We will always be modifying at least the superblock and GDT 1501 * blocks. If we are adding a group past the last current GDT block, 1502 * we will also modify the inode and the dindirect block. If we 1503 * are adding a group with superblock/GDT backups we will also 1504 * modify each of the reserved GDT dindirect blocks. 1505 */ 1506 credit = 3; /* sb, resize inode, resize inode dindirect */ 1507 /* GDT blocks */ 1508 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); 1509 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ 1510 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1511 if (IS_ERR(handle)) { 1512 err = PTR_ERR(handle); 1513 goto exit; 1514 } 1515 1516 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1517 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1518 EXT4_JTR_NONE); 1519 if (err) 1520 goto exit_journal; 1521 1522 group = flex_gd->groups[0].group; 1523 BUG_ON(group != sbi->s_groups_count); 1524 err = ext4_add_new_descs(handle, sb, group, 1525 resize_inode, flex_gd->count); 1526 if (err) 1527 goto exit_journal; 1528 1529 err = ext4_setup_new_descs(handle, sb, flex_gd); 1530 if (err) 1531 goto exit_journal; 1532 1533 ext4_update_super(sb, flex_gd); 1534 1535 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1536 1537 exit_journal: 1538 err2 = ext4_journal_stop(handle); 1539 if (!err) 1540 err = err2; 1541 1542 if (!err) { 1543 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1544 int gdb_num_end = ((group + flex_gd->count - 1) / 1545 EXT4_DESC_PER_BLOCK(sb)); 1546 int meta_bg = ext4_has_feature_meta_bg(sb); 1547 sector_t old_gdb = 0; 1548 1549 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, 1550 sizeof(struct ext4_super_block), 0); 1551 for (; gdb_num <= gdb_num_end; gdb_num++) { 1552 struct buffer_head *gdb_bh; 1553 1554 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1555 gdb_num); 1556 if (old_gdb == gdb_bh->b_blocknr) 1557 continue; 1558 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, 1559 gdb_bh->b_size, meta_bg); 1560 old_gdb = gdb_bh->b_blocknr; 1561 } 1562 } 1563 exit: 1564 return err; 1565 } 1566 1567 static int ext4_setup_next_flex_gd(struct super_block *sb, 1568 struct ext4_new_flex_group_data *flex_gd, 1569 ext4_fsblk_t n_blocks_count, 1570 unsigned long flexbg_size) 1571 { 1572 struct ext4_sb_info *sbi = EXT4_SB(sb); 1573 struct ext4_super_block *es = sbi->s_es; 1574 struct ext4_new_group_data *group_data = flex_gd->groups; 1575 ext4_fsblk_t o_blocks_count; 1576 ext4_group_t n_group; 1577 ext4_group_t group; 1578 ext4_group_t last_group; 1579 ext4_grpblk_t last; 1580 ext4_grpblk_t clusters_per_group; 1581 unsigned long i; 1582 1583 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); 1584 1585 o_blocks_count = ext4_blocks_count(es); 1586 1587 if (o_blocks_count == n_blocks_count) 1588 return 0; 1589 1590 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1591 BUG_ON(last); 1592 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1593 1594 last_group = group | (flexbg_size - 1); 1595 if (last_group > n_group) 1596 last_group = n_group; 1597 1598 flex_gd->count = last_group - group + 1; 1599 1600 for (i = 0; i < flex_gd->count; i++) { 1601 int overhead; 1602 1603 group_data[i].group = group + i; 1604 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); 1605 overhead = ext4_group_overhead_blocks(sb, group + i); 1606 group_data[i].mdata_blocks = overhead; 1607 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); 1608 if (ext4_has_group_desc_csum(sb)) { 1609 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1610 EXT4_BG_INODE_UNINIT; 1611 if (!test_opt(sb, INIT_INODE_TABLE)) 1612 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1613 } else 1614 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1615 } 1616 1617 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1618 /* We need to initialize block bitmap of last group. */ 1619 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1620 1621 if ((last_group == n_group) && (last != clusters_per_group - 1)) { 1622 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); 1623 group_data[i - 1].free_clusters_count -= clusters_per_group - 1624 last - 1; 1625 } 1626 1627 return 1; 1628 } 1629 1630 /* Add group descriptor data to an existing or new group descriptor block. 1631 * Ensure we handle all possible error conditions _before_ we start modifying 1632 * the filesystem, because we cannot abort the transaction and not have it 1633 * write the data to disk. 1634 * 1635 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1636 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1637 * 1638 * We only need to hold the superblock lock while we are actually adding 1639 * in the new group's counts to the superblock. Prior to that we have 1640 * not really "added" the group at all. We re-check that we are still 1641 * adding in the last group in case things have changed since verifying. 1642 */ 1643 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1644 { 1645 struct ext4_new_flex_group_data flex_gd; 1646 struct ext4_sb_info *sbi = EXT4_SB(sb); 1647 struct ext4_super_block *es = sbi->s_es; 1648 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1649 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1650 struct inode *inode = NULL; 1651 int gdb_off; 1652 int err; 1653 __u16 bg_flags = 0; 1654 1655 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1656 1657 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { 1658 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1659 return -EPERM; 1660 } 1661 1662 if (ext4_blocks_count(es) + input->blocks_count < 1663 ext4_blocks_count(es)) { 1664 ext4_warning(sb, "blocks_count overflow"); 1665 return -EINVAL; 1666 } 1667 1668 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1669 le32_to_cpu(es->s_inodes_count)) { 1670 ext4_warning(sb, "inodes_count overflow"); 1671 return -EINVAL; 1672 } 1673 1674 if (reserved_gdb || gdb_off == 0) { 1675 if (!ext4_has_feature_resize_inode(sb) || 1676 !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1677 ext4_warning(sb, 1678 "No reserved GDT blocks, can't resize"); 1679 return -EPERM; 1680 } 1681 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); 1682 if (IS_ERR(inode)) { 1683 ext4_warning(sb, "Error opening resize inode"); 1684 return PTR_ERR(inode); 1685 } 1686 } 1687 1688 1689 err = verify_group_input(sb, input); 1690 if (err) 1691 goto out; 1692 1693 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1694 if (err) 1695 goto out; 1696 1697 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1698 if (err) 1699 goto out; 1700 1701 flex_gd.count = 1; 1702 flex_gd.groups = input; 1703 flex_gd.bg_flags = &bg_flags; 1704 err = ext4_flex_group_add(sb, inode, &flex_gd); 1705 out: 1706 iput(inode); 1707 return err; 1708 } /* ext4_group_add */ 1709 1710 /* 1711 * extend a group without checking assuming that checking has been done. 1712 */ 1713 static int ext4_group_extend_no_check(struct super_block *sb, 1714 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1715 { 1716 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1717 handle_t *handle; 1718 int err = 0, err2; 1719 1720 /* We will update the superblock, one block bitmap, and 1721 * one group descriptor via ext4_group_add_blocks(). 1722 */ 1723 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1724 if (IS_ERR(handle)) { 1725 err = PTR_ERR(handle); 1726 ext4_warning(sb, "error %d on journal start", err); 1727 return err; 1728 } 1729 1730 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1731 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 1732 EXT4_JTR_NONE); 1733 if (err) { 1734 ext4_warning(sb, "error %d on journal write access", err); 1735 goto errout; 1736 } 1737 1738 lock_buffer(EXT4_SB(sb)->s_sbh); 1739 ext4_blocks_count_set(es, o_blocks_count + add); 1740 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1741 ext4_superblock_csum_set(sb); 1742 unlock_buffer(EXT4_SB(sb)->s_sbh); 1743 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1744 o_blocks_count + add); 1745 /* We add the blocks to the bitmap and set the group need init bit */ 1746 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1747 if (err) 1748 goto errout; 1749 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 1750 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1751 o_blocks_count + add); 1752 errout: 1753 err2 = ext4_journal_stop(handle); 1754 if (err2 && !err) 1755 err = err2; 1756 1757 if (!err) { 1758 if (test_opt(sb, DEBUG)) 1759 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1760 "blocks\n", ext4_blocks_count(es)); 1761 update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, 1762 (char *)es, sizeof(struct ext4_super_block), 0); 1763 } 1764 return err; 1765 } 1766 1767 /* 1768 * Extend the filesystem to the new number of blocks specified. This entry 1769 * point is only used to extend the current filesystem to the end of the last 1770 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1771 * for emergencies (because it has no dependencies on reserved blocks). 1772 * 1773 * If we _really_ wanted, we could use default values to call ext4_group_add() 1774 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1775 * GDT blocks are reserved to grow to the desired size. 1776 */ 1777 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1778 ext4_fsblk_t n_blocks_count) 1779 { 1780 ext4_fsblk_t o_blocks_count; 1781 ext4_grpblk_t last; 1782 ext4_grpblk_t add; 1783 struct buffer_head *bh; 1784 int err; 1785 ext4_group_t group; 1786 1787 o_blocks_count = ext4_blocks_count(es); 1788 1789 if (test_opt(sb, DEBUG)) 1790 ext4_msg(sb, KERN_DEBUG, 1791 "extending last group from %llu to %llu blocks", 1792 o_blocks_count, n_blocks_count); 1793 1794 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1795 return 0; 1796 1797 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1798 ext4_msg(sb, KERN_ERR, 1799 "filesystem too large to resize to %llu blocks safely", 1800 n_blocks_count); 1801 return -EINVAL; 1802 } 1803 1804 if (n_blocks_count < o_blocks_count) { 1805 ext4_warning(sb, "can't shrink FS - resize aborted"); 1806 return -EINVAL; 1807 } 1808 1809 /* Handle the remaining blocks in the last group only. */ 1810 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1811 1812 if (last == 0) { 1813 ext4_warning(sb, "need to use ext2online to resize further"); 1814 return -EPERM; 1815 } 1816 1817 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1818 1819 if (o_blocks_count + add < o_blocks_count) { 1820 ext4_warning(sb, "blocks_count overflow"); 1821 return -EINVAL; 1822 } 1823 1824 if (o_blocks_count + add > n_blocks_count) 1825 add = n_blocks_count - o_blocks_count; 1826 1827 if (o_blocks_count + add < n_blocks_count) 1828 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1829 o_blocks_count + add, add); 1830 1831 /* See if the device is actually as big as what was requested */ 1832 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); 1833 if (IS_ERR(bh)) { 1834 ext4_warning(sb, "can't read last block, resize aborted"); 1835 return -ENOSPC; 1836 } 1837 brelse(bh); 1838 1839 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 1840 return err; 1841 } /* ext4_group_extend */ 1842 1843 1844 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1845 { 1846 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1847 } 1848 1849 /* 1850 * Release the resize inode and drop the resize_inode feature if there 1851 * are no more reserved gdt blocks, and then convert the file system 1852 * to enable meta_bg 1853 */ 1854 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1855 { 1856 handle_t *handle; 1857 struct ext4_sb_info *sbi = EXT4_SB(sb); 1858 struct ext4_super_block *es = sbi->s_es; 1859 struct ext4_inode_info *ei = EXT4_I(inode); 1860 ext4_fsblk_t nr; 1861 int i, ret, err = 0; 1862 int credits = 1; 1863 1864 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1865 if (inode) { 1866 if (es->s_reserved_gdt_blocks) { 1867 ext4_error(sb, "Unexpected non-zero " 1868 "s_reserved_gdt_blocks"); 1869 return -EPERM; 1870 } 1871 1872 /* Do a quick sanity check of the resize inode */ 1873 if (inode->i_blocks != 1 << (inode->i_blkbits - 1874 (9 - sbi->s_cluster_bits))) 1875 goto invalid_resize_inode; 1876 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1877 if (i == EXT4_DIND_BLOCK) { 1878 if (ei->i_data[i]) 1879 continue; 1880 else 1881 goto invalid_resize_inode; 1882 } 1883 if (ei->i_data[i]) 1884 goto invalid_resize_inode; 1885 } 1886 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1887 } 1888 1889 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1890 if (IS_ERR(handle)) 1891 return PTR_ERR(handle); 1892 1893 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1894 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1895 EXT4_JTR_NONE); 1896 if (err) 1897 goto errout; 1898 1899 lock_buffer(sbi->s_sbh); 1900 ext4_clear_feature_resize_inode(sb); 1901 ext4_set_feature_meta_bg(sb); 1902 sbi->s_es->s_first_meta_bg = 1903 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1904 ext4_superblock_csum_set(sb); 1905 unlock_buffer(sbi->s_sbh); 1906 1907 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1908 if (err) { 1909 ext4_std_error(sb, err); 1910 goto errout; 1911 } 1912 1913 if (inode) { 1914 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1915 ext4_free_blocks(handle, inode, NULL, nr, 1, 1916 EXT4_FREE_BLOCKS_METADATA | 1917 EXT4_FREE_BLOCKS_FORGET); 1918 ei->i_data[EXT4_DIND_BLOCK] = 0; 1919 inode->i_blocks = 0; 1920 1921 err = ext4_mark_inode_dirty(handle, inode); 1922 if (err) 1923 ext4_std_error(sb, err); 1924 } 1925 1926 errout: 1927 ret = ext4_journal_stop(handle); 1928 if (!err) 1929 err = ret; 1930 return ret; 1931 1932 invalid_resize_inode: 1933 ext4_error(sb, "corrupted/inconsistent resize inode"); 1934 return -EINVAL; 1935 } 1936 1937 /* 1938 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1939 * 1940 * @sb: super block of the fs to be resized 1941 * @n_blocks_count: the number of blocks resides in the resized fs 1942 */ 1943 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1944 { 1945 struct ext4_new_flex_group_data *flex_gd = NULL; 1946 struct ext4_sb_info *sbi = EXT4_SB(sb); 1947 struct ext4_super_block *es = sbi->s_es; 1948 struct buffer_head *bh; 1949 struct inode *resize_inode = NULL; 1950 ext4_grpblk_t add, offset; 1951 unsigned long n_desc_blocks; 1952 unsigned long o_desc_blocks; 1953 ext4_group_t o_group; 1954 ext4_group_t n_group; 1955 ext4_fsblk_t o_blocks_count; 1956 ext4_fsblk_t n_blocks_count_retry = 0; 1957 unsigned long last_update_time = 0; 1958 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; 1959 int meta_bg; 1960 1961 /* See if the device is actually as big as what was requested */ 1962 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); 1963 if (IS_ERR(bh)) { 1964 ext4_warning(sb, "can't read last block, resize aborted"); 1965 return -ENOSPC; 1966 } 1967 brelse(bh); 1968 1969 retry: 1970 o_blocks_count = ext4_blocks_count(es); 1971 1972 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 1973 "to %llu blocks", o_blocks_count, n_blocks_count); 1974 1975 if (n_blocks_count < o_blocks_count) { 1976 /* On-line shrinking not supported */ 1977 ext4_warning(sb, "can't shrink FS - resize aborted"); 1978 return -EINVAL; 1979 } 1980 1981 if (n_blocks_count == o_blocks_count) 1982 /* Nothing need to do */ 1983 return 0; 1984 1985 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 1986 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 1987 ext4_warning(sb, "resize would cause inodes_count overflow"); 1988 return -EINVAL; 1989 } 1990 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 1991 1992 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 1993 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 1994 1995 meta_bg = ext4_has_feature_meta_bg(sb); 1996 1997 if (ext4_has_feature_resize_inode(sb)) { 1998 if (meta_bg) { 1999 ext4_error(sb, "resize_inode and meta_bg enabled " 2000 "simultaneously"); 2001 return -EINVAL; 2002 } 2003 if (n_desc_blocks > o_desc_blocks + 2004 le16_to_cpu(es->s_reserved_gdt_blocks)) { 2005 n_blocks_count_retry = n_blocks_count; 2006 n_desc_blocks = o_desc_blocks + 2007 le16_to_cpu(es->s_reserved_gdt_blocks); 2008 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 2009 n_blocks_count = (ext4_fsblk_t)n_group * 2010 EXT4_BLOCKS_PER_GROUP(sb) + 2011 le32_to_cpu(es->s_first_data_block); 2012 n_group--; /* set to last group number */ 2013 } 2014 2015 if (!resize_inode) 2016 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, 2017 EXT4_IGET_SPECIAL); 2018 if (IS_ERR(resize_inode)) { 2019 ext4_warning(sb, "Error opening resize inode"); 2020 return PTR_ERR(resize_inode); 2021 } 2022 } 2023 2024 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 2025 err = ext4_convert_meta_bg(sb, resize_inode); 2026 if (err) 2027 goto out; 2028 if (resize_inode) { 2029 iput(resize_inode); 2030 resize_inode = NULL; 2031 } 2032 if (n_blocks_count_retry) { 2033 n_blocks_count = n_blocks_count_retry; 2034 n_blocks_count_retry = 0; 2035 goto retry; 2036 } 2037 } 2038 2039 /* 2040 * Make sure the last group has enough space so that it's 2041 * guaranteed to have enough space for all metadata blocks 2042 * that it might need to hold. (We might not need to store 2043 * the inode table blocks in the last block group, but there 2044 * will be cases where this might be needed.) 2045 */ 2046 if ((ext4_group_first_block_no(sb, n_group) + 2047 ext4_group_overhead_blocks(sb, n_group) + 2 + 2048 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { 2049 n_blocks_count = ext4_group_first_block_no(sb, n_group); 2050 n_group--; 2051 n_blocks_count_retry = 0; 2052 if (resize_inode) { 2053 iput(resize_inode); 2054 resize_inode = NULL; 2055 } 2056 goto retry; 2057 } 2058 2059 /* extend the last group */ 2060 if (n_group == o_group) 2061 add = n_blocks_count - o_blocks_count; 2062 else 2063 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); 2064 if (add > 0) { 2065 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 2066 if (err) 2067 goto out; 2068 } 2069 2070 if (ext4_blocks_count(es) == n_blocks_count) 2071 goto out; 2072 2073 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2074 if (err) 2075 goto out; 2076 2077 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2078 if (err) 2079 goto out; 2080 2081 flex_gd = alloc_flex_gd(flexbg_size); 2082 if (flex_gd == NULL) { 2083 err = -ENOMEM; 2084 goto out; 2085 } 2086 2087 /* Add flex groups. Note that a regular group is a 2088 * flex group with 1 group. 2089 */ 2090 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 2091 flexbg_size)) { 2092 if (jiffies - last_update_time > HZ * 10) { 2093 if (last_update_time) 2094 ext4_msg(sb, KERN_INFO, 2095 "resized to %llu blocks", 2096 ext4_blocks_count(es)); 2097 last_update_time = jiffies; 2098 } 2099 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2100 break; 2101 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2102 if (unlikely(err)) 2103 break; 2104 } 2105 2106 if (!err && n_blocks_count_retry) { 2107 n_blocks_count = n_blocks_count_retry; 2108 n_blocks_count_retry = 0; 2109 free_flex_gd(flex_gd); 2110 flex_gd = NULL; 2111 if (resize_inode) { 2112 iput(resize_inode); 2113 resize_inode = NULL; 2114 } 2115 goto retry; 2116 } 2117 2118 out: 2119 if (flex_gd) 2120 free_flex_gd(flex_gd); 2121 if (resize_inode != NULL) 2122 iput(resize_inode); 2123 if (err) 2124 ext4_warning(sb, "error (%d) occurred during " 2125 "file system resize", err); 2126 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", 2127 ext4_blocks_count(es)); 2128 return err; 2129 } 2130