1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/resize.c 4 * 5 * Support for resizing an ext4 filesystem while it is mounted. 6 * 7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 8 * 9 * This could probably be made into a module, because it is not often in use. 10 */ 11 12 13 #include <linux/errno.h> 14 #include <linux/slab.h> 15 #include <linux/jiffies.h> 16 17 #include "ext4_jbd2.h" 18 19 struct ext4_rcu_ptr { 20 struct rcu_head rcu; 21 void *ptr; 22 }; 23 24 static void ext4_rcu_ptr_callback(struct rcu_head *head) 25 { 26 struct ext4_rcu_ptr *ptr; 27 28 ptr = container_of(head, struct ext4_rcu_ptr, rcu); 29 kvfree(ptr->ptr); 30 kfree(ptr); 31 } 32 33 void ext4_kvfree_array_rcu(void *to_free) 34 { 35 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 36 37 if (ptr) { 38 ptr->ptr = to_free; 39 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); 40 return; 41 } 42 synchronize_rcu(); 43 kvfree(to_free); 44 } 45 46 int ext4_resize_begin(struct super_block *sb) 47 { 48 struct ext4_sb_info *sbi = EXT4_SB(sb); 49 int ret = 0; 50 51 if (!capable(CAP_SYS_RESOURCE)) 52 return -EPERM; 53 54 /* 55 * If the reserved GDT blocks is non-zero, the resize_inode feature 56 * should always be set. 57 */ 58 if (sbi->s_es->s_reserved_gdt_blocks && 59 !ext4_has_feature_resize_inode(sb)) { 60 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); 61 return -EFSCORRUPTED; 62 } 63 64 /* 65 * If we are not using the primary superblock/GDT copy don't resize, 66 * because the user tools have no way of handling this. Probably a 67 * bad time to do it anyways. 68 */ 69 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != 70 le32_to_cpu(sbi->s_es->s_first_data_block)) { 71 ext4_warning(sb, "won't resize using backup superblock at %llu", 72 (unsigned long long)sbi->s_sbh->b_blocknr); 73 return -EPERM; 74 } 75 76 /* 77 * We are not allowed to do online-resizing on a filesystem mounted 78 * with error, because it can destroy the filesystem easily. 79 */ 80 if (sbi->s_mount_state & EXT4_ERROR_FS) { 81 ext4_warning(sb, "There are errors in the filesystem, " 82 "so online resizing is not allowed"); 83 return -EPERM; 84 } 85 86 if (ext4_has_feature_sparse_super2(sb)) { 87 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); 88 return -EOPNOTSUPP; 89 } 90 91 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 92 &sbi->s_ext4_flags)) 93 ret = -EBUSY; 94 95 return ret; 96 } 97 98 int ext4_resize_end(struct super_block *sb, bool update_backups) 99 { 100 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); 101 smp_mb__after_atomic(); 102 if (update_backups) 103 return ext4_update_overhead(sb, true); 104 return 0; 105 } 106 107 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 108 ext4_group_t group) { 109 ext4_grpblk_t overhead; 110 overhead = ext4_bg_num_gdb(sb, group); 111 if (ext4_bg_has_super(sb, group)) 112 overhead += 1 + 113 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 114 return overhead; 115 } 116 117 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 118 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 119 120 static int verify_group_input(struct super_block *sb, 121 struct ext4_new_group_data *input) 122 { 123 struct ext4_sb_info *sbi = EXT4_SB(sb); 124 struct ext4_super_block *es = sbi->s_es; 125 ext4_fsblk_t start = ext4_blocks_count(es); 126 ext4_fsblk_t end = start + input->blocks_count; 127 ext4_group_t group = input->group; 128 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 129 unsigned overhead; 130 ext4_fsblk_t metaend; 131 struct buffer_head *bh = NULL; 132 ext4_grpblk_t free_blocks_count, offset; 133 int err = -EINVAL; 134 135 if (group != sbi->s_groups_count) { 136 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 137 input->group, sbi->s_groups_count); 138 return -EINVAL; 139 } 140 141 overhead = ext4_group_overhead_blocks(sb, group); 142 metaend = start + overhead; 143 free_blocks_count = input->blocks_count - 2 - overhead - 144 sbi->s_itb_per_group; 145 input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count); 146 147 if (test_opt(sb, DEBUG)) 148 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 149 "(%d free, %u reserved)\n", 150 ext4_bg_has_super(sb, input->group) ? "normal" : 151 "no-super", input->group, input->blocks_count, 152 free_blocks_count, input->reserved_blocks); 153 154 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 155 if (offset != 0) 156 ext4_warning(sb, "Last group not full"); 157 else if (input->reserved_blocks > input->blocks_count / 5) 158 ext4_warning(sb, "Reserved blocks too high (%u)", 159 input->reserved_blocks); 160 else if (free_blocks_count < 0) 161 ext4_warning(sb, "Bad blocks count %u", 162 input->blocks_count); 163 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) { 164 err = PTR_ERR(bh); 165 bh = NULL; 166 ext4_warning(sb, "Cannot read last block (%llu)", 167 end - 1); 168 } else if (outside(input->block_bitmap, start, end)) 169 ext4_warning(sb, "Block bitmap not in group (block %llu)", 170 (unsigned long long)input->block_bitmap); 171 else if (outside(input->inode_bitmap, start, end)) 172 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 173 (unsigned long long)input->inode_bitmap); 174 else if (outside(input->inode_table, start, end) || 175 outside(itend - 1, start, end)) 176 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 177 (unsigned long long)input->inode_table, itend - 1); 178 else if (input->inode_bitmap == input->block_bitmap) 179 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 180 (unsigned long long)input->block_bitmap); 181 else if (inside(input->block_bitmap, input->inode_table, itend)) 182 ext4_warning(sb, "Block bitmap (%llu) in inode table " 183 "(%llu-%llu)", 184 (unsigned long long)input->block_bitmap, 185 (unsigned long long)input->inode_table, itend - 1); 186 else if (inside(input->inode_bitmap, input->inode_table, itend)) 187 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 188 "(%llu-%llu)", 189 (unsigned long long)input->inode_bitmap, 190 (unsigned long long)input->inode_table, itend - 1); 191 else if (inside(input->block_bitmap, start, metaend)) 192 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 193 (unsigned long long)input->block_bitmap, 194 start, metaend - 1); 195 else if (inside(input->inode_bitmap, start, metaend)) 196 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 197 (unsigned long long)input->inode_bitmap, 198 start, metaend - 1); 199 else if (inside(input->inode_table, start, metaend) || 200 inside(itend - 1, start, metaend)) 201 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 202 "(%llu-%llu)", 203 (unsigned long long)input->inode_table, 204 itend - 1, start, metaend - 1); 205 else 206 err = 0; 207 brelse(bh); 208 209 return err; 210 } 211 212 /* 213 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 214 * group each time. 215 */ 216 struct ext4_new_flex_group_data { 217 struct ext4_new_group_data *groups; /* new_group_data for groups 218 in the flex group */ 219 __u16 *bg_flags; /* block group flags of groups 220 in @groups */ 221 ext4_group_t count; /* number of groups in @groups 222 */ 223 }; 224 225 /* 226 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 227 * @flexbg_size. 228 * 229 * Returns NULL on failure otherwise address of the allocated structure. 230 */ 231 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) 232 { 233 struct ext4_new_flex_group_data *flex_gd; 234 235 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 236 if (flex_gd == NULL) 237 goto out3; 238 239 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) 240 goto out2; 241 flex_gd->count = flexbg_size; 242 243 flex_gd->groups = kmalloc_array(flexbg_size, 244 sizeof(struct ext4_new_group_data), 245 GFP_NOFS); 246 if (flex_gd->groups == NULL) 247 goto out2; 248 249 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16), 250 GFP_NOFS); 251 if (flex_gd->bg_flags == NULL) 252 goto out1; 253 254 return flex_gd; 255 256 out1: 257 kfree(flex_gd->groups); 258 out2: 259 kfree(flex_gd); 260 out3: 261 return NULL; 262 } 263 264 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 265 { 266 kfree(flex_gd->bg_flags); 267 kfree(flex_gd->groups); 268 kfree(flex_gd); 269 } 270 271 /* 272 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 273 * and inode tables for a flex group. 274 * 275 * This function is used by 64bit-resize. Note that this function allocates 276 * group tables from the 1st group of groups contained by @flexgd, which may 277 * be a partial of a flex group. 278 * 279 * @sb: super block of fs to which the groups belongs 280 * 281 * Returns 0 on a successful allocation of the metadata blocks in the 282 * block group. 283 */ 284 static int ext4_alloc_group_tables(struct super_block *sb, 285 struct ext4_new_flex_group_data *flex_gd, 286 int flexbg_size) 287 { 288 struct ext4_new_group_data *group_data = flex_gd->groups; 289 ext4_fsblk_t start_blk; 290 ext4_fsblk_t last_blk; 291 ext4_group_t src_group; 292 ext4_group_t bb_index = 0; 293 ext4_group_t ib_index = 0; 294 ext4_group_t it_index = 0; 295 ext4_group_t group; 296 ext4_group_t last_group; 297 unsigned overhead; 298 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 299 int i; 300 301 BUG_ON(flex_gd->count == 0 || group_data == NULL); 302 303 src_group = group_data[0].group; 304 last_group = src_group + flex_gd->count - 1; 305 306 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 307 (last_group & ~(flexbg_size - 1)))); 308 next_group: 309 group = group_data[0].group; 310 if (src_group >= group_data[0].group + flex_gd->count) 311 return -ENOSPC; 312 start_blk = ext4_group_first_block_no(sb, src_group); 313 last_blk = start_blk + group_data[src_group - group].blocks_count; 314 315 overhead = ext4_group_overhead_blocks(sb, src_group); 316 317 start_blk += overhead; 318 319 /* We collect contiguous blocks as much as possible. */ 320 src_group++; 321 for (; src_group <= last_group; src_group++) { 322 overhead = ext4_group_overhead_blocks(sb, src_group); 323 if (overhead == 0) 324 last_blk += group_data[src_group - group].blocks_count; 325 else 326 break; 327 } 328 329 /* Allocate block bitmaps */ 330 for (; bb_index < flex_gd->count; bb_index++) { 331 if (start_blk >= last_blk) 332 goto next_group; 333 group_data[bb_index].block_bitmap = start_blk++; 334 group = ext4_get_group_number(sb, start_blk - 1); 335 group -= group_data[0].group; 336 group_data[group].mdata_blocks++; 337 flex_gd->bg_flags[group] &= uninit_mask; 338 } 339 340 /* Allocate inode bitmaps */ 341 for (; ib_index < flex_gd->count; ib_index++) { 342 if (start_blk >= last_blk) 343 goto next_group; 344 group_data[ib_index].inode_bitmap = start_blk++; 345 group = ext4_get_group_number(sb, start_blk - 1); 346 group -= group_data[0].group; 347 group_data[group].mdata_blocks++; 348 flex_gd->bg_flags[group] &= uninit_mask; 349 } 350 351 /* Allocate inode tables */ 352 for (; it_index < flex_gd->count; it_index++) { 353 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 354 ext4_fsblk_t next_group_start; 355 356 if (start_blk + itb > last_blk) 357 goto next_group; 358 group_data[it_index].inode_table = start_blk; 359 group = ext4_get_group_number(sb, start_blk); 360 next_group_start = ext4_group_first_block_no(sb, group + 1); 361 group -= group_data[0].group; 362 363 if (start_blk + itb > next_group_start) { 364 flex_gd->bg_flags[group + 1] &= uninit_mask; 365 overhead = start_blk + itb - next_group_start; 366 group_data[group + 1].mdata_blocks += overhead; 367 itb -= overhead; 368 } 369 370 group_data[group].mdata_blocks += itb; 371 flex_gd->bg_flags[group] &= uninit_mask; 372 start_blk += EXT4_SB(sb)->s_itb_per_group; 373 } 374 375 /* Update free clusters count to exclude metadata blocks */ 376 for (i = 0; i < flex_gd->count; i++) { 377 group_data[i].free_clusters_count -= 378 EXT4_NUM_B2C(EXT4_SB(sb), 379 group_data[i].mdata_blocks); 380 } 381 382 if (test_opt(sb, DEBUG)) { 383 int i; 384 group = group_data[0].group; 385 386 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 387 "%d groups, flexbg size is %d:\n", flex_gd->count, 388 flexbg_size); 389 390 for (i = 0; i < flex_gd->count; i++) { 391 ext4_debug( 392 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n", 393 ext4_bg_has_super(sb, group + i) ? "normal" : 394 "no-super", group + i, 395 group_data[i].blocks_count, 396 group_data[i].free_clusters_count, 397 group_data[i].mdata_blocks); 398 } 399 } 400 return 0; 401 } 402 403 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 404 ext4_fsblk_t blk) 405 { 406 struct buffer_head *bh; 407 int err; 408 409 bh = sb_getblk(sb, blk); 410 if (unlikely(!bh)) 411 return ERR_PTR(-ENOMEM); 412 BUFFER_TRACE(bh, "get_write_access"); 413 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 414 if (err) { 415 brelse(bh); 416 bh = ERR_PTR(err); 417 } else { 418 memset(bh->b_data, 0, sb->s_blocksize); 419 set_buffer_uptodate(bh); 420 } 421 422 return bh; 423 } 424 425 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) 426 { 427 return ext4_journal_ensure_credits_fn(handle, credits, 428 EXT4_MAX_TRANS_DATA, 0, 0); 429 } 430 431 /* 432 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. 433 * 434 * Helper function for ext4_setup_new_group_blocks() which set . 435 * 436 * @sb: super block 437 * @handle: journal handle 438 * @flex_gd: flex group data 439 */ 440 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 441 struct ext4_new_flex_group_data *flex_gd, 442 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) 443 { 444 struct ext4_sb_info *sbi = EXT4_SB(sb); 445 ext4_group_t count = last_cluster - first_cluster + 1; 446 ext4_group_t count2; 447 448 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, 449 last_cluster); 450 for (; count > 0; count -= count2, first_cluster += count2) { 451 ext4_fsblk_t start; 452 struct buffer_head *bh; 453 ext4_group_t group; 454 int err; 455 456 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); 457 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); 458 group -= flex_gd->groups[0].group; 459 460 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); 461 if (count2 > count) 462 count2 = count; 463 464 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 465 BUG_ON(flex_gd->count > 1); 466 continue; 467 } 468 469 err = ext4_resize_ensure_credits_batch(handle, 1); 470 if (err < 0) 471 return err; 472 473 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 474 if (unlikely(!bh)) 475 return -ENOMEM; 476 477 BUFFER_TRACE(bh, "get_write_access"); 478 err = ext4_journal_get_write_access(handle, sb, bh, 479 EXT4_JTR_NONE); 480 if (err) { 481 brelse(bh); 482 return err; 483 } 484 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 485 first_cluster, first_cluster - start, count2); 486 mb_set_bits(bh->b_data, first_cluster - start, count2); 487 488 err = ext4_handle_dirty_metadata(handle, NULL, bh); 489 brelse(bh); 490 if (unlikely(err)) 491 return err; 492 } 493 494 return 0; 495 } 496 497 /* 498 * Set up the block and inode bitmaps, and the inode table for the new groups. 499 * This doesn't need to be part of the main transaction, since we are only 500 * changing blocks outside the actual filesystem. We still do journaling to 501 * ensure the recovery is correct in case of a failure just after resize. 502 * If any part of this fails, we simply abort the resize. 503 * 504 * setup_new_flex_group_blocks handles a flex group as follow: 505 * 1. copy super block and GDT, and initialize group tables if necessary. 506 * In this step, we only set bits in blocks bitmaps for blocks taken by 507 * super block and GDT. 508 * 2. allocate group tables in block bitmaps, that is, set bits in block 509 * bitmap for blocks taken by group tables. 510 */ 511 static int setup_new_flex_group_blocks(struct super_block *sb, 512 struct ext4_new_flex_group_data *flex_gd) 513 { 514 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 515 ext4_fsblk_t start; 516 ext4_fsblk_t block; 517 struct ext4_sb_info *sbi = EXT4_SB(sb); 518 struct ext4_super_block *es = sbi->s_es; 519 struct ext4_new_group_data *group_data = flex_gd->groups; 520 __u16 *bg_flags = flex_gd->bg_flags; 521 handle_t *handle; 522 ext4_group_t group, count; 523 struct buffer_head *bh = NULL; 524 int reserved_gdb, i, j, err = 0, err2; 525 int meta_bg; 526 527 BUG_ON(!flex_gd->count || !group_data || 528 group_data[0].group != sbi->s_groups_count); 529 530 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 531 meta_bg = ext4_has_feature_meta_bg(sb); 532 533 /* This transaction may be extended/restarted along the way */ 534 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 535 if (IS_ERR(handle)) 536 return PTR_ERR(handle); 537 538 group = group_data[0].group; 539 for (i = 0; i < flex_gd->count; i++, group++) { 540 unsigned long gdblocks; 541 ext4_grpblk_t overhead; 542 543 gdblocks = ext4_bg_num_gdb(sb, group); 544 start = ext4_group_first_block_no(sb, group); 545 546 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 547 goto handle_itb; 548 549 if (meta_bg == 1) 550 goto handle_itb; 551 552 block = start + ext4_bg_has_super(sb, group); 553 /* Copy all of the GDT blocks into the backup in this group */ 554 for (j = 0; j < gdblocks; j++, block++) { 555 struct buffer_head *gdb; 556 557 ext4_debug("update backup group %#04llx\n", block); 558 err = ext4_resize_ensure_credits_batch(handle, 1); 559 if (err < 0) 560 goto out; 561 562 gdb = sb_getblk(sb, block); 563 if (unlikely(!gdb)) { 564 err = -ENOMEM; 565 goto out; 566 } 567 568 BUFFER_TRACE(gdb, "get_write_access"); 569 err = ext4_journal_get_write_access(handle, sb, gdb, 570 EXT4_JTR_NONE); 571 if (err) { 572 brelse(gdb); 573 goto out; 574 } 575 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, 576 s_group_desc, j)->b_data, gdb->b_size); 577 set_buffer_uptodate(gdb); 578 579 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 580 if (unlikely(err)) { 581 brelse(gdb); 582 goto out; 583 } 584 brelse(gdb); 585 } 586 587 /* Zero out all of the reserved backup group descriptor 588 * table blocks 589 */ 590 if (ext4_bg_has_super(sb, group)) { 591 err = sb_issue_zeroout(sb, gdblocks + start + 1, 592 reserved_gdb, GFP_NOFS); 593 if (err) 594 goto out; 595 } 596 597 handle_itb: 598 /* Initialize group tables of the group @group */ 599 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 600 goto handle_bb; 601 602 /* Zero out all of the inode table blocks */ 603 block = group_data[i].inode_table; 604 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 605 block, sbi->s_itb_per_group); 606 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 607 GFP_NOFS); 608 if (err) 609 goto out; 610 611 handle_bb: 612 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 613 goto handle_ib; 614 615 /* Initialize block bitmap of the @group */ 616 block = group_data[i].block_bitmap; 617 err = ext4_resize_ensure_credits_batch(handle, 1); 618 if (err < 0) 619 goto out; 620 621 bh = bclean(handle, sb, block); 622 if (IS_ERR(bh)) { 623 err = PTR_ERR(bh); 624 goto out; 625 } 626 overhead = ext4_group_overhead_blocks(sb, group); 627 if (overhead != 0) { 628 ext4_debug("mark backup superblock %#04llx (+0)\n", 629 start); 630 mb_set_bits(bh->b_data, 0, 631 EXT4_NUM_B2C(sbi, overhead)); 632 } 633 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 634 sb->s_blocksize * 8, bh->b_data); 635 err = ext4_handle_dirty_metadata(handle, NULL, bh); 636 brelse(bh); 637 if (err) 638 goto out; 639 640 handle_ib: 641 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 642 continue; 643 644 /* Initialize inode bitmap of the @group */ 645 block = group_data[i].inode_bitmap; 646 err = ext4_resize_ensure_credits_batch(handle, 1); 647 if (err < 0) 648 goto out; 649 /* Mark unused entries in inode bitmap used */ 650 bh = bclean(handle, sb, block); 651 if (IS_ERR(bh)) { 652 err = PTR_ERR(bh); 653 goto out; 654 } 655 656 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 657 sb->s_blocksize * 8, bh->b_data); 658 err = ext4_handle_dirty_metadata(handle, NULL, bh); 659 brelse(bh); 660 if (err) 661 goto out; 662 } 663 664 /* Mark group tables in block bitmap */ 665 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 666 count = group_table_count[j]; 667 start = (&group_data[0].block_bitmap)[j]; 668 block = start; 669 for (i = 1; i < flex_gd->count; i++) { 670 block += group_table_count[j]; 671 if (block == (&group_data[i].block_bitmap)[j]) { 672 count += group_table_count[j]; 673 continue; 674 } 675 err = set_flexbg_block_bitmap(sb, handle, 676 flex_gd, 677 EXT4_B2C(sbi, start), 678 EXT4_B2C(sbi, 679 start + count 680 - 1)); 681 if (err) 682 goto out; 683 count = group_table_count[j]; 684 start = (&group_data[i].block_bitmap)[j]; 685 block = start; 686 } 687 688 err = set_flexbg_block_bitmap(sb, handle, 689 flex_gd, 690 EXT4_B2C(sbi, start), 691 EXT4_B2C(sbi, 692 start + count 693 - 1)); 694 if (err) 695 goto out; 696 } 697 698 out: 699 err2 = ext4_journal_stop(handle); 700 if (err2 && !err) 701 err = err2; 702 703 return err; 704 } 705 706 /* 707 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 708 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 709 * calling this for the first time. In a sparse filesystem it will be the 710 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 711 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 712 */ 713 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three, 714 unsigned int *five, unsigned int *seven) 715 { 716 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 717 unsigned int *min = three; 718 int mult = 3; 719 unsigned int ret; 720 721 if (ext4_has_feature_sparse_super2(sb)) { 722 do { 723 if (*min > 2) 724 return UINT_MAX; 725 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]); 726 *min += 1; 727 } while (!ret); 728 return ret; 729 } 730 731 if (!ext4_has_feature_sparse_super(sb)) { 732 ret = *min; 733 *min += 1; 734 return ret; 735 } 736 737 if (*five < *min) { 738 min = five; 739 mult = 5; 740 } 741 if (*seven < *min) { 742 min = seven; 743 mult = 7; 744 } 745 746 ret = *min; 747 *min *= mult; 748 749 return ret; 750 } 751 752 /* 753 * Check that all of the backup GDT blocks are held in the primary GDT block. 754 * It is assumed that they are stored in group order. Returns the number of 755 * groups in current filesystem that have BACKUPS, or -ve error code. 756 */ 757 static int verify_reserved_gdb(struct super_block *sb, 758 ext4_group_t end, 759 struct buffer_head *primary) 760 { 761 const ext4_fsblk_t blk = primary->b_blocknr; 762 unsigned three = 1; 763 unsigned five = 5; 764 unsigned seven = 7; 765 unsigned grp; 766 __le32 *p = (__le32 *)primary->b_data; 767 int gdbackups = 0; 768 769 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 770 if (le32_to_cpu(*p++) != 771 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 772 ext4_warning(sb, "reserved GDT %llu" 773 " missing grp %d (%llu)", 774 blk, grp, 775 grp * 776 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 777 blk); 778 return -EINVAL; 779 } 780 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 781 return -EFBIG; 782 } 783 784 return gdbackups; 785 } 786 787 /* 788 * Called when we need to bring a reserved group descriptor table block into 789 * use from the resize inode. The primary copy of the new GDT block currently 790 * is an indirect block (under the double indirect block in the resize inode). 791 * The new backup GDT blocks will be stored as leaf blocks in this indirect 792 * block, in group order. Even though we know all the block numbers we need, 793 * we check to ensure that the resize inode has actually reserved these blocks. 794 * 795 * Don't need to update the block bitmaps because the blocks are still in use. 796 * 797 * We get all of the error cases out of the way, so that we are sure to not 798 * fail once we start modifying the data on disk, because JBD has no rollback. 799 */ 800 static int add_new_gdb(handle_t *handle, struct inode *inode, 801 ext4_group_t group) 802 { 803 struct super_block *sb = inode->i_sb; 804 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 805 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 806 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 807 struct buffer_head **o_group_desc, **n_group_desc = NULL; 808 struct buffer_head *dind = NULL; 809 struct buffer_head *gdb_bh = NULL; 810 int gdbackups; 811 struct ext4_iloc iloc = { .bh = NULL }; 812 __le32 *data; 813 int err; 814 815 if (test_opt(sb, DEBUG)) 816 printk(KERN_DEBUG 817 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 818 gdb_num); 819 820 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 821 if (IS_ERR(gdb_bh)) 822 return PTR_ERR(gdb_bh); 823 824 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 825 if (gdbackups < 0) { 826 err = gdbackups; 827 goto errout; 828 } 829 830 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 831 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 832 if (IS_ERR(dind)) { 833 err = PTR_ERR(dind); 834 dind = NULL; 835 goto errout; 836 } 837 838 data = (__le32 *)dind->b_data; 839 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 840 ext4_warning(sb, "new group %u GDT block %llu not reserved", 841 group, gdblock); 842 err = -EINVAL; 843 goto errout; 844 } 845 846 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 847 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 848 EXT4_JTR_NONE); 849 if (unlikely(err)) 850 goto errout; 851 852 BUFFER_TRACE(gdb_bh, "get_write_access"); 853 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 854 if (unlikely(err)) 855 goto errout; 856 857 BUFFER_TRACE(dind, "get_write_access"); 858 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); 859 if (unlikely(err)) { 860 ext4_std_error(sb, err); 861 goto errout; 862 } 863 864 /* ext4_reserve_inode_write() gets a reference on the iloc */ 865 err = ext4_reserve_inode_write(handle, inode, &iloc); 866 if (unlikely(err)) 867 goto errout; 868 869 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 870 GFP_KERNEL); 871 if (!n_group_desc) { 872 err = -ENOMEM; 873 ext4_warning(sb, "not enough memory for %lu groups", 874 gdb_num + 1); 875 goto errout; 876 } 877 878 /* 879 * Finally, we have all of the possible failures behind us... 880 * 881 * Remove new GDT block from inode double-indirect block and clear out 882 * the new GDT block for use (which also "frees" the backup GDT blocks 883 * from the reserved inode). We don't need to change the bitmaps for 884 * these blocks, because they are marked as in-use from being in the 885 * reserved inode, and will become GDT blocks (primary and backup). 886 */ 887 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 888 err = ext4_handle_dirty_metadata(handle, NULL, dind); 889 if (unlikely(err)) { 890 ext4_std_error(sb, err); 891 goto errout; 892 } 893 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 894 (9 - EXT4_SB(sb)->s_cluster_bits); 895 ext4_mark_iloc_dirty(handle, inode, &iloc); 896 memset(gdb_bh->b_data, 0, sb->s_blocksize); 897 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 898 if (unlikely(err)) { 899 ext4_std_error(sb, err); 900 iloc.bh = NULL; 901 goto errout; 902 } 903 brelse(dind); 904 905 rcu_read_lock(); 906 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 907 memcpy(n_group_desc, o_group_desc, 908 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 909 rcu_read_unlock(); 910 n_group_desc[gdb_num] = gdb_bh; 911 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 912 EXT4_SB(sb)->s_gdb_count++; 913 ext4_kvfree_array_rcu(o_group_desc); 914 915 lock_buffer(EXT4_SB(sb)->s_sbh); 916 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 917 ext4_superblock_csum_set(sb); 918 unlock_buffer(EXT4_SB(sb)->s_sbh); 919 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 920 if (err) 921 ext4_std_error(sb, err); 922 return err; 923 errout: 924 kvfree(n_group_desc); 925 brelse(iloc.bh); 926 brelse(dind); 927 brelse(gdb_bh); 928 929 ext4_debug("leaving with error %d\n", err); 930 return err; 931 } 932 933 /* 934 * If there is no available space in the existing block group descriptors for 935 * the new block group and there are no reserved block group descriptors, then 936 * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set 937 * to the first block group that is managed using meta_bg and s_first_meta_bg 938 * must be a multiple of EXT4_DESC_PER_BLOCK(sb). 939 * This function will be called when first group of meta_bg is added to bring 940 * new group descriptors block of new added meta_bg. 941 */ 942 static int add_new_gdb_meta_bg(struct super_block *sb, 943 handle_t *handle, ext4_group_t group) { 944 ext4_fsblk_t gdblock; 945 struct buffer_head *gdb_bh; 946 struct buffer_head **o_group_desc, **n_group_desc; 947 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 948 int err; 949 950 gdblock = ext4_group_first_block_no(sb, group) + 951 ext4_bg_has_super(sb, group); 952 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 953 if (IS_ERR(gdb_bh)) 954 return PTR_ERR(gdb_bh); 955 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 956 GFP_KERNEL); 957 if (!n_group_desc) { 958 brelse(gdb_bh); 959 err = -ENOMEM; 960 ext4_warning(sb, "not enough memory for %lu groups", 961 gdb_num + 1); 962 return err; 963 } 964 965 rcu_read_lock(); 966 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 967 memcpy(n_group_desc, o_group_desc, 968 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 969 rcu_read_unlock(); 970 n_group_desc[gdb_num] = gdb_bh; 971 972 BUFFER_TRACE(gdb_bh, "get_write_access"); 973 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 974 if (err) { 975 kvfree(n_group_desc); 976 brelse(gdb_bh); 977 return err; 978 } 979 980 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 981 EXT4_SB(sb)->s_gdb_count++; 982 ext4_kvfree_array_rcu(o_group_desc); 983 return err; 984 } 985 986 /* 987 * Called when we are adding a new group which has a backup copy of each of 988 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 989 * We need to add these reserved backup GDT blocks to the resize inode, so 990 * that they are kept for future resizing and not allocated to files. 991 * 992 * Each reserved backup GDT block will go into a different indirect block. 993 * The indirect blocks are actually the primary reserved GDT blocks, 994 * so we know in advance what their block numbers are. We only get the 995 * double-indirect block to verify it is pointing to the primary reserved 996 * GDT blocks so we don't overwrite a data block by accident. The reserved 997 * backup GDT blocks are stored in their reserved primary GDT block. 998 */ 999 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 1000 ext4_group_t group) 1001 { 1002 struct super_block *sb = inode->i_sb; 1003 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 1004 int cluster_bits = EXT4_SB(sb)->s_cluster_bits; 1005 struct buffer_head **primary; 1006 struct buffer_head *dind; 1007 struct ext4_iloc iloc; 1008 ext4_fsblk_t blk; 1009 __le32 *data, *end; 1010 int gdbackups = 0; 1011 int res, i; 1012 int err; 1013 1014 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); 1015 if (!primary) 1016 return -ENOMEM; 1017 1018 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 1019 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 1020 if (IS_ERR(dind)) { 1021 err = PTR_ERR(dind); 1022 dind = NULL; 1023 goto exit_free; 1024 } 1025 1026 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 1027 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 1028 EXT4_ADDR_PER_BLOCK(sb)); 1029 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 1030 1031 /* Get each reserved primary GDT block and verify it holds backups */ 1032 for (res = 0; res < reserved_gdb; res++, blk++) { 1033 if (le32_to_cpu(*data) != blk) { 1034 ext4_warning(sb, "reserved block %llu" 1035 " not at offset %ld", 1036 blk, 1037 (long)(data - (__le32 *)dind->b_data)); 1038 err = -EINVAL; 1039 goto exit_bh; 1040 } 1041 primary[res] = ext4_sb_bread(sb, blk, 0); 1042 if (IS_ERR(primary[res])) { 1043 err = PTR_ERR(primary[res]); 1044 primary[res] = NULL; 1045 goto exit_bh; 1046 } 1047 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 1048 if (gdbackups < 0) { 1049 brelse(primary[res]); 1050 err = gdbackups; 1051 goto exit_bh; 1052 } 1053 if (++data >= end) 1054 data = (__le32 *)dind->b_data; 1055 } 1056 1057 for (i = 0; i < reserved_gdb; i++) { 1058 BUFFER_TRACE(primary[i], "get_write_access"); 1059 if ((err = ext4_journal_get_write_access(handle, sb, primary[i], 1060 EXT4_JTR_NONE))) 1061 goto exit_bh; 1062 } 1063 1064 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 1065 goto exit_bh; 1066 1067 /* 1068 * Finally we can add each of the reserved backup GDT blocks from 1069 * the new group to its reserved primary GDT block. 1070 */ 1071 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1072 for (i = 0; i < reserved_gdb; i++) { 1073 int err2; 1074 data = (__le32 *)primary[i]->b_data; 1075 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1076 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1077 if (!err) 1078 err = err2; 1079 } 1080 1081 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); 1082 ext4_mark_iloc_dirty(handle, inode, &iloc); 1083 1084 exit_bh: 1085 while (--res >= 0) 1086 brelse(primary[res]); 1087 brelse(dind); 1088 1089 exit_free: 1090 kfree(primary); 1091 1092 return err; 1093 } 1094 1095 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, 1096 ext4_group_t group) 1097 { 1098 struct ext4_super_block *es = (struct ext4_super_block *) data; 1099 1100 es->s_block_group_nr = cpu_to_le16(group); 1101 if (ext4_has_metadata_csum(sb)) 1102 es->s_checksum = ext4_superblock_csum(sb, es); 1103 } 1104 1105 /* 1106 * Update the backup copies of the ext4 metadata. These don't need to be part 1107 * of the main resize transaction, because e2fsck will re-write them if there 1108 * is a problem (basically only OOM will cause a problem). However, we 1109 * _should_ update the backups if possible, in case the primary gets trashed 1110 * for some reason and we need to run e2fsck from a backup superblock. The 1111 * important part is that the new block and inode counts are in the backup 1112 * superblocks, and the location of the new group metadata in the GDT backups. 1113 * 1114 * We do not need take the s_resize_lock for this, because these 1115 * blocks are not otherwise touched by the filesystem code when it is 1116 * mounted. We don't need to worry about last changing from 1117 * sbi->s_groups_count, because the worst that can happen is that we 1118 * do not copy the full number of backups at this time. The resize 1119 * which changed s_groups_count will backup again. 1120 */ 1121 static void update_backups(struct super_block *sb, sector_t blk_off, char *data, 1122 int size, int meta_bg) 1123 { 1124 struct ext4_sb_info *sbi = EXT4_SB(sb); 1125 ext4_group_t last; 1126 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1127 unsigned three = 1; 1128 unsigned five = 5; 1129 unsigned seven = 7; 1130 ext4_group_t group = 0; 1131 int rest = sb->s_blocksize - size; 1132 handle_t *handle; 1133 int err = 0, err2; 1134 1135 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1136 if (IS_ERR(handle)) { 1137 group = 1; 1138 err = PTR_ERR(handle); 1139 goto exit_err; 1140 } 1141 1142 if (meta_bg == 0) { 1143 group = ext4_list_backups(sb, &three, &five, &seven); 1144 last = sbi->s_groups_count; 1145 } else { 1146 group = ext4_get_group_number(sb, blk_off) + 1; 1147 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1148 } 1149 1150 while (group < sbi->s_groups_count) { 1151 struct buffer_head *bh; 1152 ext4_fsblk_t backup_block; 1153 int has_super = ext4_bg_has_super(sb, group); 1154 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group); 1155 1156 /* Out of journal space, and can't get more - abort - so sad */ 1157 err = ext4_resize_ensure_credits_batch(handle, 1); 1158 if (err < 0) 1159 break; 1160 1161 if (meta_bg == 0) 1162 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1163 else 1164 backup_block = first_block + has_super; 1165 1166 bh = sb_getblk(sb, backup_block); 1167 if (unlikely(!bh)) { 1168 err = -ENOMEM; 1169 break; 1170 } 1171 ext4_debug("update metadata backup %llu(+%llu)\n", 1172 backup_block, backup_block - 1173 ext4_group_first_block_no(sb, group)); 1174 BUFFER_TRACE(bh, "get_write_access"); 1175 if ((err = ext4_journal_get_write_access(handle, sb, bh, 1176 EXT4_JTR_NONE))) { 1177 brelse(bh); 1178 break; 1179 } 1180 lock_buffer(bh); 1181 memcpy(bh->b_data, data, size); 1182 if (rest) 1183 memset(bh->b_data + size, 0, rest); 1184 if (has_super && (backup_block == first_block)) 1185 ext4_set_block_group_nr(sb, bh->b_data, group); 1186 set_buffer_uptodate(bh); 1187 unlock_buffer(bh); 1188 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1189 if (unlikely(err)) 1190 ext4_std_error(sb, err); 1191 brelse(bh); 1192 1193 if (meta_bg == 0) 1194 group = ext4_list_backups(sb, &three, &five, &seven); 1195 else if (group == last) 1196 break; 1197 else 1198 group = last; 1199 } 1200 if ((err2 = ext4_journal_stop(handle)) && !err) 1201 err = err2; 1202 1203 /* 1204 * Ugh! Need to have e2fsck write the backup copies. It is too 1205 * late to revert the resize, we shouldn't fail just because of 1206 * the backup copies (they are only needed in case of corruption). 1207 * 1208 * However, if we got here we have a journal problem too, so we 1209 * can't really start a transaction to mark the superblock. 1210 * Chicken out and just set the flag on the hope it will be written 1211 * to disk, and if not - we will simply wait until next fsck. 1212 */ 1213 exit_err: 1214 if (err) { 1215 ext4_warning(sb, "can't update backup for group %u (err %d), " 1216 "forcing fsck on next reboot", group, err); 1217 sbi->s_mount_state &= ~EXT4_VALID_FS; 1218 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1219 mark_buffer_dirty(sbi->s_sbh); 1220 } 1221 } 1222 1223 /* 1224 * ext4_add_new_descs() adds @count group descriptor of groups 1225 * starting at @group 1226 * 1227 * @handle: journal handle 1228 * @sb: super block 1229 * @group: the group no. of the first group desc to be added 1230 * @resize_inode: the resize inode 1231 * @count: number of group descriptors to be added 1232 */ 1233 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1234 ext4_group_t group, struct inode *resize_inode, 1235 ext4_group_t count) 1236 { 1237 struct ext4_sb_info *sbi = EXT4_SB(sb); 1238 struct ext4_super_block *es = sbi->s_es; 1239 struct buffer_head *gdb_bh; 1240 int i, gdb_off, gdb_num, err = 0; 1241 int meta_bg; 1242 1243 meta_bg = ext4_has_feature_meta_bg(sb); 1244 for (i = 0; i < count; i++, group++) { 1245 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1246 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1247 1248 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1249 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1250 1251 /* 1252 * We will only either add reserved group blocks to a backup group 1253 * or remove reserved blocks for the first group in a new group block. 1254 * Doing both would be mean more complex code, and sane people don't 1255 * use non-sparse filesystems anymore. This is already checked above. 1256 */ 1257 if (gdb_off) { 1258 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1259 gdb_num); 1260 BUFFER_TRACE(gdb_bh, "get_write_access"); 1261 err = ext4_journal_get_write_access(handle, sb, gdb_bh, 1262 EXT4_JTR_NONE); 1263 1264 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1265 err = reserve_backup_gdb(handle, resize_inode, group); 1266 } else if (meta_bg != 0) { 1267 err = add_new_gdb_meta_bg(sb, handle, group); 1268 } else { 1269 err = add_new_gdb(handle, resize_inode, group); 1270 } 1271 if (err) 1272 break; 1273 } 1274 return err; 1275 } 1276 1277 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1278 { 1279 struct buffer_head *bh = sb_getblk(sb, block); 1280 if (unlikely(!bh)) 1281 return NULL; 1282 if (!bh_uptodate_or_lock(bh)) { 1283 if (ext4_read_bh(bh, 0, NULL) < 0) { 1284 brelse(bh); 1285 return NULL; 1286 } 1287 } 1288 1289 return bh; 1290 } 1291 1292 static int ext4_set_bitmap_checksums(struct super_block *sb, 1293 struct ext4_group_desc *gdp, 1294 struct ext4_new_group_data *group_data) 1295 { 1296 struct buffer_head *bh; 1297 1298 if (!ext4_has_metadata_csum(sb)) 1299 return 0; 1300 1301 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1302 if (!bh) 1303 return -EIO; 1304 ext4_inode_bitmap_csum_set(sb, gdp, bh, 1305 EXT4_INODES_PER_GROUP(sb) / 8); 1306 brelse(bh); 1307 1308 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1309 if (!bh) 1310 return -EIO; 1311 ext4_block_bitmap_csum_set(sb, gdp, bh); 1312 brelse(bh); 1313 1314 return 0; 1315 } 1316 1317 /* 1318 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1319 */ 1320 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1321 struct ext4_new_flex_group_data *flex_gd) 1322 { 1323 struct ext4_new_group_data *group_data = flex_gd->groups; 1324 struct ext4_group_desc *gdp; 1325 struct ext4_sb_info *sbi = EXT4_SB(sb); 1326 struct buffer_head *gdb_bh; 1327 ext4_group_t group; 1328 __u16 *bg_flags = flex_gd->bg_flags; 1329 int i, gdb_off, gdb_num, err = 0; 1330 1331 1332 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1333 group = group_data->group; 1334 1335 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1336 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1337 1338 /* 1339 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1340 */ 1341 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); 1342 /* Update group descriptor block for new group */ 1343 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1344 gdb_off * EXT4_DESC_SIZE(sb)); 1345 1346 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1347 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1348 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1349 err = ext4_set_bitmap_checksums(sb, gdp, group_data); 1350 if (err) { 1351 ext4_std_error(sb, err); 1352 break; 1353 } 1354 1355 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1356 ext4_free_group_clusters_set(sb, gdp, 1357 group_data->free_clusters_count); 1358 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1359 if (ext4_has_group_desc_csum(sb)) 1360 ext4_itable_unused_set(sb, gdp, 1361 EXT4_INODES_PER_GROUP(sb)); 1362 gdp->bg_flags = cpu_to_le16(*bg_flags); 1363 ext4_group_desc_csum_set(sb, group, gdp); 1364 1365 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1366 if (unlikely(err)) { 1367 ext4_std_error(sb, err); 1368 break; 1369 } 1370 1371 /* 1372 * We can allocate memory for mb_alloc based on the new group 1373 * descriptor 1374 */ 1375 err = ext4_mb_add_groupinfo(sb, group, gdp); 1376 if (err) 1377 break; 1378 } 1379 return err; 1380 } 1381 1382 static void ext4_add_overhead(struct super_block *sb, 1383 const ext4_fsblk_t overhead) 1384 { 1385 struct ext4_sb_info *sbi = EXT4_SB(sb); 1386 struct ext4_super_block *es = sbi->s_es; 1387 1388 sbi->s_overhead += overhead; 1389 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1390 smp_wmb(); 1391 } 1392 1393 /* 1394 * ext4_update_super() updates the super block so that the newly added 1395 * groups can be seen by the filesystem. 1396 * 1397 * @sb: super block 1398 * @flex_gd: new added groups 1399 */ 1400 static void ext4_update_super(struct super_block *sb, 1401 struct ext4_new_flex_group_data *flex_gd) 1402 { 1403 ext4_fsblk_t blocks_count = 0; 1404 ext4_fsblk_t free_blocks = 0; 1405 ext4_fsblk_t reserved_blocks = 0; 1406 struct ext4_new_group_data *group_data = flex_gd->groups; 1407 struct ext4_sb_info *sbi = EXT4_SB(sb); 1408 struct ext4_super_block *es = sbi->s_es; 1409 int i; 1410 1411 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1412 /* 1413 * Make the new blocks and inodes valid next. We do this before 1414 * increasing the group count so that once the group is enabled, 1415 * all of its blocks and inodes are already valid. 1416 * 1417 * We always allocate group-by-group, then block-by-block or 1418 * inode-by-inode within a group, so enabling these 1419 * blocks/inodes before the group is live won't actually let us 1420 * allocate the new space yet. 1421 */ 1422 for (i = 0; i < flex_gd->count; i++) { 1423 blocks_count += group_data[i].blocks_count; 1424 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); 1425 } 1426 1427 reserved_blocks = ext4_r_blocks_count(es) * 100; 1428 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1429 reserved_blocks *= blocks_count; 1430 do_div(reserved_blocks, 100); 1431 1432 lock_buffer(sbi->s_sbh); 1433 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1434 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1435 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1436 flex_gd->count); 1437 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1438 flex_gd->count); 1439 1440 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1441 /* 1442 * We need to protect s_groups_count against other CPUs seeing 1443 * inconsistent state in the superblock. 1444 * 1445 * The precise rules we use are: 1446 * 1447 * * Writers must perform a smp_wmb() after updating all 1448 * dependent data and before modifying the groups count 1449 * 1450 * * Readers must perform an smp_rmb() after reading the groups 1451 * count and before reading any dependent data. 1452 * 1453 * NB. These rules can be relaxed when checking the group count 1454 * while freeing data, as we can only allocate from a block 1455 * group after serialising against the group count, and we can 1456 * only then free after serialising in turn against that 1457 * allocation. 1458 */ 1459 smp_wmb(); 1460 1461 /* Update the global fs size fields */ 1462 sbi->s_groups_count += flex_gd->count; 1463 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1464 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1465 1466 /* Update the reserved block counts only once the new group is 1467 * active. */ 1468 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1469 reserved_blocks); 1470 1471 /* Update the free space counts */ 1472 percpu_counter_add(&sbi->s_freeclusters_counter, 1473 EXT4_NUM_B2C(sbi, free_blocks)); 1474 percpu_counter_add(&sbi->s_freeinodes_counter, 1475 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1476 1477 ext4_debug("free blocks count %llu", 1478 percpu_counter_read(&sbi->s_freeclusters_counter)); 1479 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { 1480 ext4_group_t flex_group; 1481 struct flex_groups *fg; 1482 1483 flex_group = ext4_flex_group(sbi, group_data[0].group); 1484 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 1485 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1486 &fg->free_clusters); 1487 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1488 &fg->free_inodes); 1489 } 1490 1491 /* 1492 * Update the fs overhead information. 1493 * 1494 * For bigalloc, if the superblock already has a properly calculated 1495 * overhead, update it with a value based on numbers already computed 1496 * above for the newly allocated capacity. 1497 */ 1498 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0)) 1499 ext4_add_overhead(sb, 1500 EXT4_NUM_B2C(sbi, blocks_count - free_blocks)); 1501 else 1502 ext4_calculate_overhead(sb); 1503 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1504 1505 ext4_superblock_csum_set(sb); 1506 unlock_buffer(sbi->s_sbh); 1507 if (test_opt(sb, DEBUG)) 1508 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1509 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1510 blocks_count, free_blocks, reserved_blocks); 1511 } 1512 1513 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1514 * _before_ we start modifying the filesystem, because we cannot abort the 1515 * transaction and not have it write the data to disk. 1516 */ 1517 static int ext4_flex_group_add(struct super_block *sb, 1518 struct inode *resize_inode, 1519 struct ext4_new_flex_group_data *flex_gd) 1520 { 1521 struct ext4_sb_info *sbi = EXT4_SB(sb); 1522 struct ext4_super_block *es = sbi->s_es; 1523 ext4_fsblk_t o_blocks_count; 1524 ext4_grpblk_t last; 1525 ext4_group_t group; 1526 handle_t *handle; 1527 unsigned reserved_gdb; 1528 int err = 0, err2 = 0, credit; 1529 1530 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1531 1532 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1533 o_blocks_count = ext4_blocks_count(es); 1534 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1535 BUG_ON(last); 1536 1537 err = setup_new_flex_group_blocks(sb, flex_gd); 1538 if (err) 1539 goto exit; 1540 /* 1541 * We will always be modifying at least the superblock and GDT 1542 * blocks. If we are adding a group past the last current GDT block, 1543 * we will also modify the inode and the dindirect block. If we 1544 * are adding a group with superblock/GDT backups we will also 1545 * modify each of the reserved GDT dindirect blocks. 1546 */ 1547 credit = 3; /* sb, resize inode, resize inode dindirect */ 1548 /* GDT blocks */ 1549 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); 1550 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ 1551 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1552 if (IS_ERR(handle)) { 1553 err = PTR_ERR(handle); 1554 goto exit; 1555 } 1556 1557 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1558 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1559 EXT4_JTR_NONE); 1560 if (err) 1561 goto exit_journal; 1562 1563 group = flex_gd->groups[0].group; 1564 BUG_ON(group != sbi->s_groups_count); 1565 err = ext4_add_new_descs(handle, sb, group, 1566 resize_inode, flex_gd->count); 1567 if (err) 1568 goto exit_journal; 1569 1570 err = ext4_setup_new_descs(handle, sb, flex_gd); 1571 if (err) 1572 goto exit_journal; 1573 1574 ext4_update_super(sb, flex_gd); 1575 1576 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1577 1578 exit_journal: 1579 err2 = ext4_journal_stop(handle); 1580 if (!err) 1581 err = err2; 1582 1583 if (!err) { 1584 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1585 int gdb_num_end = ((group + flex_gd->count - 1) / 1586 EXT4_DESC_PER_BLOCK(sb)); 1587 int meta_bg = ext4_has_feature_meta_bg(sb); 1588 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - 1589 ext4_group_first_block_no(sb, 0); 1590 1591 update_backups(sb, ext4_group_first_block_no(sb, 0), 1592 (char *)es, sizeof(struct ext4_super_block), 0); 1593 for (; gdb_num <= gdb_num_end; gdb_num++) { 1594 struct buffer_head *gdb_bh; 1595 1596 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1597 gdb_num); 1598 update_backups(sb, gdb_bh->b_blocknr - padding_blocks, 1599 gdb_bh->b_data, gdb_bh->b_size, meta_bg); 1600 } 1601 } 1602 exit: 1603 return err; 1604 } 1605 1606 static int ext4_setup_next_flex_gd(struct super_block *sb, 1607 struct ext4_new_flex_group_data *flex_gd, 1608 ext4_fsblk_t n_blocks_count, 1609 unsigned long flexbg_size) 1610 { 1611 struct ext4_sb_info *sbi = EXT4_SB(sb); 1612 struct ext4_super_block *es = sbi->s_es; 1613 struct ext4_new_group_data *group_data = flex_gd->groups; 1614 ext4_fsblk_t o_blocks_count; 1615 ext4_group_t n_group; 1616 ext4_group_t group; 1617 ext4_group_t last_group; 1618 ext4_grpblk_t last; 1619 ext4_grpblk_t clusters_per_group; 1620 unsigned long i; 1621 1622 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); 1623 1624 o_blocks_count = ext4_blocks_count(es); 1625 1626 if (o_blocks_count == n_blocks_count) 1627 return 0; 1628 1629 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1630 BUG_ON(last); 1631 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1632 1633 last_group = group | (flexbg_size - 1); 1634 if (last_group > n_group) 1635 last_group = n_group; 1636 1637 flex_gd->count = last_group - group + 1; 1638 1639 for (i = 0; i < flex_gd->count; i++) { 1640 int overhead; 1641 1642 group_data[i].group = group + i; 1643 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); 1644 overhead = ext4_group_overhead_blocks(sb, group + i); 1645 group_data[i].mdata_blocks = overhead; 1646 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); 1647 if (ext4_has_group_desc_csum(sb)) { 1648 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1649 EXT4_BG_INODE_UNINIT; 1650 if (!test_opt(sb, INIT_INODE_TABLE)) 1651 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1652 } else 1653 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1654 } 1655 1656 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1657 /* We need to initialize block bitmap of last group. */ 1658 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1659 1660 if ((last_group == n_group) && (last != clusters_per_group - 1)) { 1661 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); 1662 group_data[i - 1].free_clusters_count -= clusters_per_group - 1663 last - 1; 1664 } 1665 1666 return 1; 1667 } 1668 1669 /* Add group descriptor data to an existing or new group descriptor block. 1670 * Ensure we handle all possible error conditions _before_ we start modifying 1671 * the filesystem, because we cannot abort the transaction and not have it 1672 * write the data to disk. 1673 * 1674 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1675 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1676 * 1677 * We only need to hold the superblock lock while we are actually adding 1678 * in the new group's counts to the superblock. Prior to that we have 1679 * not really "added" the group at all. We re-check that we are still 1680 * adding in the last group in case things have changed since verifying. 1681 */ 1682 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1683 { 1684 struct ext4_new_flex_group_data flex_gd; 1685 struct ext4_sb_info *sbi = EXT4_SB(sb); 1686 struct ext4_super_block *es = sbi->s_es; 1687 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1688 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1689 struct inode *inode = NULL; 1690 int gdb_off; 1691 int err; 1692 __u16 bg_flags = 0; 1693 1694 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1695 1696 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { 1697 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1698 return -EPERM; 1699 } 1700 1701 if (ext4_blocks_count(es) + input->blocks_count < 1702 ext4_blocks_count(es)) { 1703 ext4_warning(sb, "blocks_count overflow"); 1704 return -EINVAL; 1705 } 1706 1707 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1708 le32_to_cpu(es->s_inodes_count)) { 1709 ext4_warning(sb, "inodes_count overflow"); 1710 return -EINVAL; 1711 } 1712 1713 if (reserved_gdb || gdb_off == 0) { 1714 if (!ext4_has_feature_resize_inode(sb) || 1715 !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1716 ext4_warning(sb, 1717 "No reserved GDT blocks, can't resize"); 1718 return -EPERM; 1719 } 1720 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); 1721 if (IS_ERR(inode)) { 1722 ext4_warning(sb, "Error opening resize inode"); 1723 return PTR_ERR(inode); 1724 } 1725 } 1726 1727 1728 err = verify_group_input(sb, input); 1729 if (err) 1730 goto out; 1731 1732 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1733 if (err) 1734 goto out; 1735 1736 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1737 if (err) 1738 goto out; 1739 1740 flex_gd.count = 1; 1741 flex_gd.groups = input; 1742 flex_gd.bg_flags = &bg_flags; 1743 err = ext4_flex_group_add(sb, inode, &flex_gd); 1744 out: 1745 iput(inode); 1746 return err; 1747 } /* ext4_group_add */ 1748 1749 /* 1750 * extend a group without checking assuming that checking has been done. 1751 */ 1752 static int ext4_group_extend_no_check(struct super_block *sb, 1753 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1754 { 1755 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1756 handle_t *handle; 1757 int err = 0, err2; 1758 1759 /* We will update the superblock, one block bitmap, and 1760 * one group descriptor via ext4_group_add_blocks(). 1761 */ 1762 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1763 if (IS_ERR(handle)) { 1764 err = PTR_ERR(handle); 1765 ext4_warning(sb, "error %d on journal start", err); 1766 return err; 1767 } 1768 1769 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1770 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 1771 EXT4_JTR_NONE); 1772 if (err) { 1773 ext4_warning(sb, "error %d on journal write access", err); 1774 goto errout; 1775 } 1776 1777 lock_buffer(EXT4_SB(sb)->s_sbh); 1778 ext4_blocks_count_set(es, o_blocks_count + add); 1779 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1780 ext4_superblock_csum_set(sb); 1781 unlock_buffer(EXT4_SB(sb)->s_sbh); 1782 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1783 o_blocks_count + add); 1784 /* We add the blocks to the bitmap and set the group need init bit */ 1785 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1786 if (err) 1787 goto errout; 1788 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 1789 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1790 o_blocks_count + add); 1791 errout: 1792 err2 = ext4_journal_stop(handle); 1793 if (err2 && !err) 1794 err = err2; 1795 1796 if (!err) { 1797 if (test_opt(sb, DEBUG)) 1798 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1799 "blocks\n", ext4_blocks_count(es)); 1800 update_backups(sb, ext4_group_first_block_no(sb, 0), 1801 (char *)es, sizeof(struct ext4_super_block), 0); 1802 } 1803 return err; 1804 } 1805 1806 /* 1807 * Extend the filesystem to the new number of blocks specified. This entry 1808 * point is only used to extend the current filesystem to the end of the last 1809 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1810 * for emergencies (because it has no dependencies on reserved blocks). 1811 * 1812 * If we _really_ wanted, we could use default values to call ext4_group_add() 1813 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1814 * GDT blocks are reserved to grow to the desired size. 1815 */ 1816 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1817 ext4_fsblk_t n_blocks_count) 1818 { 1819 ext4_fsblk_t o_blocks_count; 1820 ext4_grpblk_t last; 1821 ext4_grpblk_t add; 1822 struct buffer_head *bh; 1823 ext4_group_t group; 1824 1825 o_blocks_count = ext4_blocks_count(es); 1826 1827 if (test_opt(sb, DEBUG)) 1828 ext4_msg(sb, KERN_DEBUG, 1829 "extending last group from %llu to %llu blocks", 1830 o_blocks_count, n_blocks_count); 1831 1832 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1833 return 0; 1834 1835 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1836 ext4_msg(sb, KERN_ERR, 1837 "filesystem too large to resize to %llu blocks safely", 1838 n_blocks_count); 1839 return -EINVAL; 1840 } 1841 1842 if (n_blocks_count < o_blocks_count) { 1843 ext4_warning(sb, "can't shrink FS - resize aborted"); 1844 return -EINVAL; 1845 } 1846 1847 /* Handle the remaining blocks in the last group only. */ 1848 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1849 1850 if (last == 0) { 1851 ext4_warning(sb, "need to use ext2online to resize further"); 1852 return -EPERM; 1853 } 1854 1855 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1856 1857 if (o_blocks_count + add < o_blocks_count) { 1858 ext4_warning(sb, "blocks_count overflow"); 1859 return -EINVAL; 1860 } 1861 1862 if (o_blocks_count + add > n_blocks_count) 1863 add = n_blocks_count - o_blocks_count; 1864 1865 if (o_blocks_count + add < n_blocks_count) 1866 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1867 o_blocks_count + add, add); 1868 1869 /* See if the device is actually as big as what was requested */ 1870 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); 1871 if (IS_ERR(bh)) { 1872 ext4_warning(sb, "can't read last block, resize aborted"); 1873 return -ENOSPC; 1874 } 1875 brelse(bh); 1876 1877 return ext4_group_extend_no_check(sb, o_blocks_count, add); 1878 } /* ext4_group_extend */ 1879 1880 1881 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1882 { 1883 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1884 } 1885 1886 /* 1887 * Release the resize inode and drop the resize_inode feature if there 1888 * are no more reserved gdt blocks, and then convert the file system 1889 * to enable meta_bg 1890 */ 1891 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1892 { 1893 handle_t *handle; 1894 struct ext4_sb_info *sbi = EXT4_SB(sb); 1895 struct ext4_super_block *es = sbi->s_es; 1896 struct ext4_inode_info *ei = EXT4_I(inode); 1897 ext4_fsblk_t nr; 1898 int i, ret, err = 0; 1899 int credits = 1; 1900 1901 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1902 if (inode) { 1903 if (es->s_reserved_gdt_blocks) { 1904 ext4_error(sb, "Unexpected non-zero " 1905 "s_reserved_gdt_blocks"); 1906 return -EPERM; 1907 } 1908 1909 /* Do a quick sanity check of the resize inode */ 1910 if (inode->i_blocks != 1 << (inode->i_blkbits - 1911 (9 - sbi->s_cluster_bits))) 1912 goto invalid_resize_inode; 1913 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1914 if (i == EXT4_DIND_BLOCK) { 1915 if (ei->i_data[i]) 1916 continue; 1917 else 1918 goto invalid_resize_inode; 1919 } 1920 if (ei->i_data[i]) 1921 goto invalid_resize_inode; 1922 } 1923 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1924 } 1925 1926 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1927 if (IS_ERR(handle)) 1928 return PTR_ERR(handle); 1929 1930 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1931 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1932 EXT4_JTR_NONE); 1933 if (err) 1934 goto errout; 1935 1936 lock_buffer(sbi->s_sbh); 1937 ext4_clear_feature_resize_inode(sb); 1938 ext4_set_feature_meta_bg(sb); 1939 sbi->s_es->s_first_meta_bg = 1940 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1941 ext4_superblock_csum_set(sb); 1942 unlock_buffer(sbi->s_sbh); 1943 1944 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1945 if (err) { 1946 ext4_std_error(sb, err); 1947 goto errout; 1948 } 1949 1950 if (inode) { 1951 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1952 ext4_free_blocks(handle, inode, NULL, nr, 1, 1953 EXT4_FREE_BLOCKS_METADATA | 1954 EXT4_FREE_BLOCKS_FORGET); 1955 ei->i_data[EXT4_DIND_BLOCK] = 0; 1956 inode->i_blocks = 0; 1957 1958 err = ext4_mark_inode_dirty(handle, inode); 1959 if (err) 1960 ext4_std_error(sb, err); 1961 } 1962 1963 errout: 1964 ret = ext4_journal_stop(handle); 1965 return err ? err : ret; 1966 1967 invalid_resize_inode: 1968 ext4_error(sb, "corrupted/inconsistent resize inode"); 1969 return -EINVAL; 1970 } 1971 1972 /* 1973 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1974 * 1975 * @sb: super block of the fs to be resized 1976 * @n_blocks_count: the number of blocks resides in the resized fs 1977 */ 1978 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1979 { 1980 struct ext4_new_flex_group_data *flex_gd = NULL; 1981 struct ext4_sb_info *sbi = EXT4_SB(sb); 1982 struct ext4_super_block *es = sbi->s_es; 1983 struct buffer_head *bh; 1984 struct inode *resize_inode = NULL; 1985 ext4_grpblk_t add, offset; 1986 unsigned long n_desc_blocks; 1987 unsigned long o_desc_blocks; 1988 ext4_group_t o_group; 1989 ext4_group_t n_group; 1990 ext4_fsblk_t o_blocks_count; 1991 ext4_fsblk_t n_blocks_count_retry = 0; 1992 unsigned long last_update_time = 0; 1993 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; 1994 int meta_bg; 1995 1996 /* See if the device is actually as big as what was requested */ 1997 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); 1998 if (IS_ERR(bh)) { 1999 ext4_warning(sb, "can't read last block, resize aborted"); 2000 return -ENOSPC; 2001 } 2002 brelse(bh); 2003 2004 /* 2005 * For bigalloc, trim the requested size to the nearest cluster 2006 * boundary to avoid creating an unusable filesystem. We do this 2007 * silently, instead of returning an error, to avoid breaking 2008 * callers that blindly resize the filesystem to the full size of 2009 * the underlying block device. 2010 */ 2011 if (ext4_has_feature_bigalloc(sb)) 2012 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); 2013 2014 retry: 2015 o_blocks_count = ext4_blocks_count(es); 2016 2017 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 2018 "to %llu blocks", o_blocks_count, n_blocks_count); 2019 2020 if (n_blocks_count < o_blocks_count) { 2021 /* On-line shrinking not supported */ 2022 ext4_warning(sb, "can't shrink FS - resize aborted"); 2023 return -EINVAL; 2024 } 2025 2026 if (n_blocks_count == o_blocks_count) 2027 /* Nothing need to do */ 2028 return 0; 2029 2030 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 2031 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 2032 ext4_warning(sb, "resize would cause inodes_count overflow"); 2033 return -EINVAL; 2034 } 2035 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 2036 2037 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 2038 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 2039 2040 meta_bg = ext4_has_feature_meta_bg(sb); 2041 2042 if (ext4_has_feature_resize_inode(sb)) { 2043 if (meta_bg) { 2044 ext4_error(sb, "resize_inode and meta_bg enabled " 2045 "simultaneously"); 2046 return -EINVAL; 2047 } 2048 if (n_desc_blocks > o_desc_blocks + 2049 le16_to_cpu(es->s_reserved_gdt_blocks)) { 2050 n_blocks_count_retry = n_blocks_count; 2051 n_desc_blocks = o_desc_blocks + 2052 le16_to_cpu(es->s_reserved_gdt_blocks); 2053 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 2054 n_blocks_count = (ext4_fsblk_t)n_group * 2055 EXT4_BLOCKS_PER_GROUP(sb) + 2056 le32_to_cpu(es->s_first_data_block); 2057 n_group--; /* set to last group number */ 2058 } 2059 2060 if (!resize_inode) 2061 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, 2062 EXT4_IGET_SPECIAL); 2063 if (IS_ERR(resize_inode)) { 2064 ext4_warning(sb, "Error opening resize inode"); 2065 return PTR_ERR(resize_inode); 2066 } 2067 } 2068 2069 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 2070 err = ext4_convert_meta_bg(sb, resize_inode); 2071 if (err) 2072 goto out; 2073 if (resize_inode) { 2074 iput(resize_inode); 2075 resize_inode = NULL; 2076 } 2077 if (n_blocks_count_retry) { 2078 n_blocks_count = n_blocks_count_retry; 2079 n_blocks_count_retry = 0; 2080 goto retry; 2081 } 2082 } 2083 2084 /* 2085 * Make sure the last group has enough space so that it's 2086 * guaranteed to have enough space for all metadata blocks 2087 * that it might need to hold. (We might not need to store 2088 * the inode table blocks in the last block group, but there 2089 * will be cases where this might be needed.) 2090 */ 2091 if ((ext4_group_first_block_no(sb, n_group) + 2092 ext4_group_overhead_blocks(sb, n_group) + 2 + 2093 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { 2094 n_blocks_count = ext4_group_first_block_no(sb, n_group); 2095 n_group--; 2096 n_blocks_count_retry = 0; 2097 if (resize_inode) { 2098 iput(resize_inode); 2099 resize_inode = NULL; 2100 } 2101 goto retry; 2102 } 2103 2104 /* extend the last group */ 2105 if (n_group == o_group) 2106 add = n_blocks_count - o_blocks_count; 2107 else 2108 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); 2109 if (add > 0) { 2110 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 2111 if (err) 2112 goto out; 2113 } 2114 2115 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) 2116 goto out; 2117 2118 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2119 if (err) 2120 goto out; 2121 2122 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2123 if (err) 2124 goto out; 2125 2126 flex_gd = alloc_flex_gd(flexbg_size); 2127 if (flex_gd == NULL) { 2128 err = -ENOMEM; 2129 goto out; 2130 } 2131 2132 /* Add flex groups. Note that a regular group is a 2133 * flex group with 1 group. 2134 */ 2135 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 2136 flexbg_size)) { 2137 if (time_is_before_jiffies(last_update_time + HZ * 10)) { 2138 if (last_update_time) 2139 ext4_msg(sb, KERN_INFO, 2140 "resized to %llu blocks", 2141 ext4_blocks_count(es)); 2142 last_update_time = jiffies; 2143 } 2144 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2145 break; 2146 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2147 if (unlikely(err)) 2148 break; 2149 } 2150 2151 if (!err && n_blocks_count_retry) { 2152 n_blocks_count = n_blocks_count_retry; 2153 n_blocks_count_retry = 0; 2154 free_flex_gd(flex_gd); 2155 flex_gd = NULL; 2156 if (resize_inode) { 2157 iput(resize_inode); 2158 resize_inode = NULL; 2159 } 2160 goto retry; 2161 } 2162 2163 out: 2164 if (flex_gd) 2165 free_flex_gd(flex_gd); 2166 if (resize_inode != NULL) 2167 iput(resize_inode); 2168 if (err) 2169 ext4_warning(sb, "error (%d) occurred during " 2170 "file system resize", err); 2171 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", 2172 ext4_blocks_count(es)); 2173 return err; 2174 } 2175