xref: /linux/fs/ext4/resize.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *  linux/fs/ext4/resize.c
3  *
4  * Support for resizing an ext4 filesystem while it is mounted.
5  *
6  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
7  *
8  * This could probably be made into a module, because it is not often in use.
9  */
10 
11 
12 #define EXT4FS_DEBUG
13 
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 
17 #include "ext4_jbd2.h"
18 
19 int ext4_resize_begin(struct super_block *sb)
20 {
21 	int ret = 0;
22 
23 	if (!capable(CAP_SYS_RESOURCE))
24 		return -EPERM;
25 
26 	/*
27 	 * If we are not using the primary superblock/GDT copy don't resize,
28          * because the user tools have no way of handling this.  Probably a
29          * bad time to do it anyways.
30          */
31 	if (EXT4_SB(sb)->s_sbh->b_blocknr !=
32 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
33 		ext4_warning(sb, "won't resize using backup superblock at %llu",
34 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
35 		return -EPERM;
36 	}
37 
38 	/*
39 	 * We are not allowed to do online-resizing on a filesystem mounted
40 	 * with error, because it can destroy the filesystem easily.
41 	 */
42 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
43 		ext4_warning(sb, "There are errors in the filesystem, "
44 			     "so online resizing is not allowed\n");
45 		return -EPERM;
46 	}
47 
48 	if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
49 		ret = -EBUSY;
50 
51 	return ret;
52 }
53 
54 void ext4_resize_end(struct super_block *sb)
55 {
56 	clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
57 	smp_mb__after_atomic();
58 }
59 
60 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
61 					     ext4_group_t group) {
62 	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
63 	       EXT4_DESC_PER_BLOCK_BITS(sb);
64 }
65 
66 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
67 					     ext4_group_t group) {
68 	group = ext4_meta_bg_first_group(sb, group);
69 	return ext4_group_first_block_no(sb, group);
70 }
71 
72 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
73 						ext4_group_t group) {
74 	ext4_grpblk_t overhead;
75 	overhead = ext4_bg_num_gdb(sb, group);
76 	if (ext4_bg_has_super(sb, group))
77 		overhead += 1 +
78 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
79 	return overhead;
80 }
81 
82 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
83 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
84 
85 static int verify_group_input(struct super_block *sb,
86 			      struct ext4_new_group_data *input)
87 {
88 	struct ext4_sb_info *sbi = EXT4_SB(sb);
89 	struct ext4_super_block *es = sbi->s_es;
90 	ext4_fsblk_t start = ext4_blocks_count(es);
91 	ext4_fsblk_t end = start + input->blocks_count;
92 	ext4_group_t group = input->group;
93 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
94 	unsigned overhead;
95 	ext4_fsblk_t metaend;
96 	struct buffer_head *bh = NULL;
97 	ext4_grpblk_t free_blocks_count, offset;
98 	int err = -EINVAL;
99 
100 	if (group != sbi->s_groups_count) {
101 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
102 			     input->group, sbi->s_groups_count);
103 		return -EINVAL;
104 	}
105 
106 	overhead = ext4_group_overhead_blocks(sb, group);
107 	metaend = start + overhead;
108 	input->free_blocks_count = free_blocks_count =
109 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
110 
111 	if (test_opt(sb, DEBUG))
112 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
113 		       "(%d free, %u reserved)\n",
114 		       ext4_bg_has_super(sb, input->group) ? "normal" :
115 		       "no-super", input->group, input->blocks_count,
116 		       free_blocks_count, input->reserved_blocks);
117 
118 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
119 	if (offset != 0)
120 			ext4_warning(sb, "Last group not full");
121 	else if (input->reserved_blocks > input->blocks_count / 5)
122 		ext4_warning(sb, "Reserved blocks too high (%u)",
123 			     input->reserved_blocks);
124 	else if (free_blocks_count < 0)
125 		ext4_warning(sb, "Bad blocks count %u",
126 			     input->blocks_count);
127 	else if (!(bh = sb_bread(sb, end - 1)))
128 		ext4_warning(sb, "Cannot read last block (%llu)",
129 			     end - 1);
130 	else if (outside(input->block_bitmap, start, end))
131 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
132 			     (unsigned long long)input->block_bitmap);
133 	else if (outside(input->inode_bitmap, start, end))
134 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
135 			     (unsigned long long)input->inode_bitmap);
136 	else if (outside(input->inode_table, start, end) ||
137 		 outside(itend - 1, start, end))
138 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
139 			     (unsigned long long)input->inode_table, itend - 1);
140 	else if (input->inode_bitmap == input->block_bitmap)
141 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
142 			     (unsigned long long)input->block_bitmap);
143 	else if (inside(input->block_bitmap, input->inode_table, itend))
144 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
145 			     "(%llu-%llu)",
146 			     (unsigned long long)input->block_bitmap,
147 			     (unsigned long long)input->inode_table, itend - 1);
148 	else if (inside(input->inode_bitmap, input->inode_table, itend))
149 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
150 			     "(%llu-%llu)",
151 			     (unsigned long long)input->inode_bitmap,
152 			     (unsigned long long)input->inode_table, itend - 1);
153 	else if (inside(input->block_bitmap, start, metaend))
154 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
155 			     (unsigned long long)input->block_bitmap,
156 			     start, metaend - 1);
157 	else if (inside(input->inode_bitmap, start, metaend))
158 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
159 			     (unsigned long long)input->inode_bitmap,
160 			     start, metaend - 1);
161 	else if (inside(input->inode_table, start, metaend) ||
162 		 inside(itend - 1, start, metaend))
163 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
164 			     "(%llu-%llu)",
165 			     (unsigned long long)input->inode_table,
166 			     itend - 1, start, metaend - 1);
167 	else
168 		err = 0;
169 	brelse(bh);
170 
171 	return err;
172 }
173 
174 /*
175  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
176  * group each time.
177  */
178 struct ext4_new_flex_group_data {
179 	struct ext4_new_group_data *groups;	/* new_group_data for groups
180 						   in the flex group */
181 	__u16 *bg_flags;			/* block group flags of groups
182 						   in @groups */
183 	ext4_group_t count;			/* number of groups in @groups
184 						 */
185 };
186 
187 /*
188  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
189  * @flexbg_size.
190  *
191  * Returns NULL on failure otherwise address of the allocated structure.
192  */
193 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
194 {
195 	struct ext4_new_flex_group_data *flex_gd;
196 
197 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
198 	if (flex_gd == NULL)
199 		goto out3;
200 
201 	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
202 		goto out2;
203 	flex_gd->count = flexbg_size;
204 
205 	flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
206 				  flexbg_size, GFP_NOFS);
207 	if (flex_gd->groups == NULL)
208 		goto out2;
209 
210 	flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
211 	if (flex_gd->bg_flags == NULL)
212 		goto out1;
213 
214 	return flex_gd;
215 
216 out1:
217 	kfree(flex_gd->groups);
218 out2:
219 	kfree(flex_gd);
220 out3:
221 	return NULL;
222 }
223 
224 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
225 {
226 	kfree(flex_gd->bg_flags);
227 	kfree(flex_gd->groups);
228 	kfree(flex_gd);
229 }
230 
231 /*
232  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
233  * and inode tables for a flex group.
234  *
235  * This function is used by 64bit-resize.  Note that this function allocates
236  * group tables from the 1st group of groups contained by @flexgd, which may
237  * be a partial of a flex group.
238  *
239  * @sb: super block of fs to which the groups belongs
240  *
241  * Returns 0 on a successful allocation of the metadata blocks in the
242  * block group.
243  */
244 static int ext4_alloc_group_tables(struct super_block *sb,
245 				struct ext4_new_flex_group_data *flex_gd,
246 				int flexbg_size)
247 {
248 	struct ext4_new_group_data *group_data = flex_gd->groups;
249 	ext4_fsblk_t start_blk;
250 	ext4_fsblk_t last_blk;
251 	ext4_group_t src_group;
252 	ext4_group_t bb_index = 0;
253 	ext4_group_t ib_index = 0;
254 	ext4_group_t it_index = 0;
255 	ext4_group_t group;
256 	ext4_group_t last_group;
257 	unsigned overhead;
258 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
259 
260 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
261 
262 	src_group = group_data[0].group;
263 	last_group  = src_group + flex_gd->count - 1;
264 
265 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
266 	       (last_group & ~(flexbg_size - 1))));
267 next_group:
268 	group = group_data[0].group;
269 	if (src_group >= group_data[0].group + flex_gd->count)
270 		return -ENOSPC;
271 	start_blk = ext4_group_first_block_no(sb, src_group);
272 	last_blk = start_blk + group_data[src_group - group].blocks_count;
273 
274 	overhead = ext4_group_overhead_blocks(sb, src_group);
275 
276 	start_blk += overhead;
277 
278 	/* We collect contiguous blocks as much as possible. */
279 	src_group++;
280 	for (; src_group <= last_group; src_group++) {
281 		overhead = ext4_group_overhead_blocks(sb, src_group);
282 		if (overhead == 0)
283 			last_blk += group_data[src_group - group].blocks_count;
284 		else
285 			break;
286 	}
287 
288 	/* Allocate block bitmaps */
289 	for (; bb_index < flex_gd->count; bb_index++) {
290 		if (start_blk >= last_blk)
291 			goto next_group;
292 		group_data[bb_index].block_bitmap = start_blk++;
293 		group = ext4_get_group_number(sb, start_blk - 1);
294 		group -= group_data[0].group;
295 		group_data[group].free_blocks_count--;
296 		flex_gd->bg_flags[group] &= uninit_mask;
297 	}
298 
299 	/* Allocate inode bitmaps */
300 	for (; ib_index < flex_gd->count; ib_index++) {
301 		if (start_blk >= last_blk)
302 			goto next_group;
303 		group_data[ib_index].inode_bitmap = start_blk++;
304 		group = ext4_get_group_number(sb, start_blk - 1);
305 		group -= group_data[0].group;
306 		group_data[group].free_blocks_count--;
307 		flex_gd->bg_flags[group] &= uninit_mask;
308 	}
309 
310 	/* Allocate inode tables */
311 	for (; it_index < flex_gd->count; it_index++) {
312 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
313 		ext4_fsblk_t next_group_start;
314 
315 		if (start_blk + itb > last_blk)
316 			goto next_group;
317 		group_data[it_index].inode_table = start_blk;
318 		group = ext4_get_group_number(sb, start_blk);
319 		next_group_start = ext4_group_first_block_no(sb, group + 1);
320 		group -= group_data[0].group;
321 
322 		if (start_blk + itb > next_group_start) {
323 			flex_gd->bg_flags[group + 1] &= uninit_mask;
324 			overhead = start_blk + itb - next_group_start;
325 			group_data[group + 1].free_blocks_count -= overhead;
326 			itb -= overhead;
327 		}
328 
329 		group_data[group].free_blocks_count -= itb;
330 		flex_gd->bg_flags[group] &= uninit_mask;
331 		start_blk += EXT4_SB(sb)->s_itb_per_group;
332 	}
333 
334 	if (test_opt(sb, DEBUG)) {
335 		int i;
336 		group = group_data[0].group;
337 
338 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
339 		       "%d groups, flexbg size is %d:\n", flex_gd->count,
340 		       flexbg_size);
341 
342 		for (i = 0; i < flex_gd->count; i++) {
343 			printk(KERN_DEBUG "adding %s group %u: %u "
344 			       "blocks (%d free)\n",
345 			       ext4_bg_has_super(sb, group + i) ? "normal" :
346 			       "no-super", group + i,
347 			       group_data[i].blocks_count,
348 			       group_data[i].free_blocks_count);
349 		}
350 	}
351 	return 0;
352 }
353 
354 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
355 				  ext4_fsblk_t blk)
356 {
357 	struct buffer_head *bh;
358 	int err;
359 
360 	bh = sb_getblk(sb, blk);
361 	if (unlikely(!bh))
362 		return ERR_PTR(-ENOMEM);
363 	BUFFER_TRACE(bh, "get_write_access");
364 	if ((err = ext4_journal_get_write_access(handle, bh))) {
365 		brelse(bh);
366 		bh = ERR_PTR(err);
367 	} else {
368 		memset(bh->b_data, 0, sb->s_blocksize);
369 		set_buffer_uptodate(bh);
370 	}
371 
372 	return bh;
373 }
374 
375 /*
376  * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
377  * If that fails, restart the transaction & regain write access for the
378  * buffer head which is used for block_bitmap modifications.
379  */
380 static int extend_or_restart_transaction(handle_t *handle, int thresh)
381 {
382 	int err;
383 
384 	if (ext4_handle_has_enough_credits(handle, thresh))
385 		return 0;
386 
387 	err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
388 	if (err < 0)
389 		return err;
390 	if (err) {
391 		err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
392 		if (err)
393 			return err;
394 	}
395 
396 	return 0;
397 }
398 
399 /*
400  * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
401  *
402  * Helper function for ext4_setup_new_group_blocks() which set .
403  *
404  * @sb: super block
405  * @handle: journal handle
406  * @flex_gd: flex group data
407  */
408 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
409 			struct ext4_new_flex_group_data *flex_gd,
410 			ext4_fsblk_t block, ext4_group_t count)
411 {
412 	ext4_group_t count2;
413 
414 	ext4_debug("mark blocks [%llu/%u] used\n", block, count);
415 	for (count2 = count; count > 0; count -= count2, block += count2) {
416 		ext4_fsblk_t start;
417 		struct buffer_head *bh;
418 		ext4_group_t group;
419 		int err;
420 
421 		group = ext4_get_group_number(sb, block);
422 		start = ext4_group_first_block_no(sb, group);
423 		group -= flex_gd->groups[0].group;
424 
425 		count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
426 		if (count2 > count)
427 			count2 = count;
428 
429 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
430 			BUG_ON(flex_gd->count > 1);
431 			continue;
432 		}
433 
434 		err = extend_or_restart_transaction(handle, 1);
435 		if (err)
436 			return err;
437 
438 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
439 		if (unlikely(!bh))
440 			return -ENOMEM;
441 
442 		BUFFER_TRACE(bh, "get_write_access");
443 		err = ext4_journal_get_write_access(handle, bh);
444 		if (err)
445 			return err;
446 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
447 			   block - start, count2);
448 		ext4_set_bits(bh->b_data, block - start, count2);
449 
450 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
451 		if (unlikely(err))
452 			return err;
453 		brelse(bh);
454 	}
455 
456 	return 0;
457 }
458 
459 /*
460  * Set up the block and inode bitmaps, and the inode table for the new groups.
461  * This doesn't need to be part of the main transaction, since we are only
462  * changing blocks outside the actual filesystem.  We still do journaling to
463  * ensure the recovery is correct in case of a failure just after resize.
464  * If any part of this fails, we simply abort the resize.
465  *
466  * setup_new_flex_group_blocks handles a flex group as follow:
467  *  1. copy super block and GDT, and initialize group tables if necessary.
468  *     In this step, we only set bits in blocks bitmaps for blocks taken by
469  *     super block and GDT.
470  *  2. allocate group tables in block bitmaps, that is, set bits in block
471  *     bitmap for blocks taken by group tables.
472  */
473 static int setup_new_flex_group_blocks(struct super_block *sb,
474 				struct ext4_new_flex_group_data *flex_gd)
475 {
476 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
477 	ext4_fsblk_t start;
478 	ext4_fsblk_t block;
479 	struct ext4_sb_info *sbi = EXT4_SB(sb);
480 	struct ext4_super_block *es = sbi->s_es;
481 	struct ext4_new_group_data *group_data = flex_gd->groups;
482 	__u16 *bg_flags = flex_gd->bg_flags;
483 	handle_t *handle;
484 	ext4_group_t group, count;
485 	struct buffer_head *bh = NULL;
486 	int reserved_gdb, i, j, err = 0, err2;
487 	int meta_bg;
488 
489 	BUG_ON(!flex_gd->count || !group_data ||
490 	       group_data[0].group != sbi->s_groups_count);
491 
492 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
493 	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
494 
495 	/* This transaction may be extended/restarted along the way */
496 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
497 	if (IS_ERR(handle))
498 		return PTR_ERR(handle);
499 
500 	group = group_data[0].group;
501 	for (i = 0; i < flex_gd->count; i++, group++) {
502 		unsigned long gdblocks;
503 		ext4_grpblk_t overhead;
504 
505 		gdblocks = ext4_bg_num_gdb(sb, group);
506 		start = ext4_group_first_block_no(sb, group);
507 
508 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
509 			goto handle_itb;
510 
511 		if (meta_bg == 1) {
512 			ext4_group_t first_group;
513 			first_group = ext4_meta_bg_first_group(sb, group);
514 			if (first_group != group + 1 &&
515 			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
516 				goto handle_itb;
517 		}
518 
519 		block = start + ext4_bg_has_super(sb, group);
520 		/* Copy all of the GDT blocks into the backup in this group */
521 		for (j = 0; j < gdblocks; j++, block++) {
522 			struct buffer_head *gdb;
523 
524 			ext4_debug("update backup group %#04llx\n", block);
525 			err = extend_or_restart_transaction(handle, 1);
526 			if (err)
527 				goto out;
528 
529 			gdb = sb_getblk(sb, block);
530 			if (unlikely(!gdb)) {
531 				err = -ENOMEM;
532 				goto out;
533 			}
534 
535 			BUFFER_TRACE(gdb, "get_write_access");
536 			err = ext4_journal_get_write_access(handle, gdb);
537 			if (err) {
538 				brelse(gdb);
539 				goto out;
540 			}
541 			memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
542 			       gdb->b_size);
543 			set_buffer_uptodate(gdb);
544 
545 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
546 			if (unlikely(err)) {
547 				brelse(gdb);
548 				goto out;
549 			}
550 			brelse(gdb);
551 		}
552 
553 		/* Zero out all of the reserved backup group descriptor
554 		 * table blocks
555 		 */
556 		if (ext4_bg_has_super(sb, group)) {
557 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
558 					reserved_gdb, GFP_NOFS);
559 			if (err)
560 				goto out;
561 		}
562 
563 handle_itb:
564 		/* Initialize group tables of the grop @group */
565 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
566 			goto handle_bb;
567 
568 		/* Zero out all of the inode table blocks */
569 		block = group_data[i].inode_table;
570 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
571 			   block, sbi->s_itb_per_group);
572 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
573 				       GFP_NOFS);
574 		if (err)
575 			goto out;
576 
577 handle_bb:
578 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
579 			goto handle_ib;
580 
581 		/* Initialize block bitmap of the @group */
582 		block = group_data[i].block_bitmap;
583 		err = extend_or_restart_transaction(handle, 1);
584 		if (err)
585 			goto out;
586 
587 		bh = bclean(handle, sb, block);
588 		if (IS_ERR(bh)) {
589 			err = PTR_ERR(bh);
590 			bh = NULL;
591 			goto out;
592 		}
593 		overhead = ext4_group_overhead_blocks(sb, group);
594 		if (overhead != 0) {
595 			ext4_debug("mark backup superblock %#04llx (+0)\n",
596 				   start);
597 			ext4_set_bits(bh->b_data, 0, overhead);
598 		}
599 		ext4_mark_bitmap_end(group_data[i].blocks_count,
600 				     sb->s_blocksize * 8, bh->b_data);
601 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
602 		if (err)
603 			goto out;
604 		brelse(bh);
605 
606 handle_ib:
607 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
608 			continue;
609 
610 		/* Initialize inode bitmap of the @group */
611 		block = group_data[i].inode_bitmap;
612 		err = extend_or_restart_transaction(handle, 1);
613 		if (err)
614 			goto out;
615 		/* Mark unused entries in inode bitmap used */
616 		bh = bclean(handle, sb, block);
617 		if (IS_ERR(bh)) {
618 			err = PTR_ERR(bh);
619 			bh = NULL;
620 			goto out;
621 		}
622 
623 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
624 				     sb->s_blocksize * 8, bh->b_data);
625 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
626 		if (err)
627 			goto out;
628 		brelse(bh);
629 	}
630 	bh = NULL;
631 
632 	/* Mark group tables in block bitmap */
633 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
634 		count = group_table_count[j];
635 		start = (&group_data[0].block_bitmap)[j];
636 		block = start;
637 		for (i = 1; i < flex_gd->count; i++) {
638 			block += group_table_count[j];
639 			if (block == (&group_data[i].block_bitmap)[j]) {
640 				count += group_table_count[j];
641 				continue;
642 			}
643 			err = set_flexbg_block_bitmap(sb, handle,
644 						flex_gd, start, count);
645 			if (err)
646 				goto out;
647 			count = group_table_count[j];
648 			start = (&group_data[i].block_bitmap)[j];
649 			block = start;
650 		}
651 
652 		if (count) {
653 			err = set_flexbg_block_bitmap(sb, handle,
654 						flex_gd, start, count);
655 			if (err)
656 				goto out;
657 		}
658 	}
659 
660 out:
661 	brelse(bh);
662 	err2 = ext4_journal_stop(handle);
663 	if (err2 && !err)
664 		err = err2;
665 
666 	return err;
667 }
668 
669 /*
670  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
671  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
672  * calling this for the first time.  In a sparse filesystem it will be the
673  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
674  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
675  */
676 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
677 				  unsigned *five, unsigned *seven)
678 {
679 	unsigned *min = three;
680 	int mult = 3;
681 	unsigned ret;
682 
683 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
684 					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
685 		ret = *min;
686 		*min += 1;
687 		return ret;
688 	}
689 
690 	if (*five < *min) {
691 		min = five;
692 		mult = 5;
693 	}
694 	if (*seven < *min) {
695 		min = seven;
696 		mult = 7;
697 	}
698 
699 	ret = *min;
700 	*min *= mult;
701 
702 	return ret;
703 }
704 
705 /*
706  * Check that all of the backup GDT blocks are held in the primary GDT block.
707  * It is assumed that they are stored in group order.  Returns the number of
708  * groups in current filesystem that have BACKUPS, or -ve error code.
709  */
710 static int verify_reserved_gdb(struct super_block *sb,
711 			       ext4_group_t end,
712 			       struct buffer_head *primary)
713 {
714 	const ext4_fsblk_t blk = primary->b_blocknr;
715 	unsigned three = 1;
716 	unsigned five = 5;
717 	unsigned seven = 7;
718 	unsigned grp;
719 	__le32 *p = (__le32 *)primary->b_data;
720 	int gdbackups = 0;
721 
722 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
723 		if (le32_to_cpu(*p++) !=
724 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
725 			ext4_warning(sb, "reserved GDT %llu"
726 				     " missing grp %d (%llu)",
727 				     blk, grp,
728 				     grp *
729 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
730 				     blk);
731 			return -EINVAL;
732 		}
733 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
734 			return -EFBIG;
735 	}
736 
737 	return gdbackups;
738 }
739 
740 /*
741  * Called when we need to bring a reserved group descriptor table block into
742  * use from the resize inode.  The primary copy of the new GDT block currently
743  * is an indirect block (under the double indirect block in the resize inode).
744  * The new backup GDT blocks will be stored as leaf blocks in this indirect
745  * block, in group order.  Even though we know all the block numbers we need,
746  * we check to ensure that the resize inode has actually reserved these blocks.
747  *
748  * Don't need to update the block bitmaps because the blocks are still in use.
749  *
750  * We get all of the error cases out of the way, so that we are sure to not
751  * fail once we start modifying the data on disk, because JBD has no rollback.
752  */
753 static int add_new_gdb(handle_t *handle, struct inode *inode,
754 		       ext4_group_t group)
755 {
756 	struct super_block *sb = inode->i_sb;
757 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
758 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
759 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
760 	struct buffer_head **o_group_desc, **n_group_desc;
761 	struct buffer_head *dind;
762 	struct buffer_head *gdb_bh;
763 	int gdbackups;
764 	struct ext4_iloc iloc;
765 	__le32 *data;
766 	int err;
767 
768 	if (test_opt(sb, DEBUG))
769 		printk(KERN_DEBUG
770 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
771 		       gdb_num);
772 
773 	gdb_bh = sb_bread(sb, gdblock);
774 	if (!gdb_bh)
775 		return -EIO;
776 
777 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
778 	if (gdbackups < 0) {
779 		err = gdbackups;
780 		goto exit_bh;
781 	}
782 
783 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
784 	dind = sb_bread(sb, le32_to_cpu(*data));
785 	if (!dind) {
786 		err = -EIO;
787 		goto exit_bh;
788 	}
789 
790 	data = (__le32 *)dind->b_data;
791 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
792 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
793 			     group, gdblock);
794 		err = -EINVAL;
795 		goto exit_dind;
796 	}
797 
798 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
799 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
800 	if (unlikely(err))
801 		goto exit_dind;
802 
803 	BUFFER_TRACE(gdb_bh, "get_write_access");
804 	err = ext4_journal_get_write_access(handle, gdb_bh);
805 	if (unlikely(err))
806 		goto exit_dind;
807 
808 	BUFFER_TRACE(dind, "get_write_access");
809 	err = ext4_journal_get_write_access(handle, dind);
810 	if (unlikely(err))
811 		ext4_std_error(sb, err);
812 
813 	/* ext4_reserve_inode_write() gets a reference on the iloc */
814 	err = ext4_reserve_inode_write(handle, inode, &iloc);
815 	if (unlikely(err))
816 		goto exit_dind;
817 
818 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
819 				     sizeof(struct buffer_head *),
820 				     GFP_NOFS);
821 	if (!n_group_desc) {
822 		err = -ENOMEM;
823 		ext4_warning(sb, "not enough memory for %lu groups",
824 			     gdb_num + 1);
825 		goto exit_inode;
826 	}
827 
828 	/*
829 	 * Finally, we have all of the possible failures behind us...
830 	 *
831 	 * Remove new GDT block from inode double-indirect block and clear out
832 	 * the new GDT block for use (which also "frees" the backup GDT blocks
833 	 * from the reserved inode).  We don't need to change the bitmaps for
834 	 * these blocks, because they are marked as in-use from being in the
835 	 * reserved inode, and will become GDT blocks (primary and backup).
836 	 */
837 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
838 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
839 	if (unlikely(err)) {
840 		ext4_std_error(sb, err);
841 		goto exit_inode;
842 	}
843 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
844 	ext4_mark_iloc_dirty(handle, inode, &iloc);
845 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
846 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
847 	if (unlikely(err)) {
848 		ext4_std_error(sb, err);
849 		goto exit_inode;
850 	}
851 	brelse(dind);
852 
853 	o_group_desc = EXT4_SB(sb)->s_group_desc;
854 	memcpy(n_group_desc, o_group_desc,
855 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
856 	n_group_desc[gdb_num] = gdb_bh;
857 	EXT4_SB(sb)->s_group_desc = n_group_desc;
858 	EXT4_SB(sb)->s_gdb_count++;
859 	kvfree(o_group_desc);
860 
861 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
862 	err = ext4_handle_dirty_super(handle, sb);
863 	if (err)
864 		ext4_std_error(sb, err);
865 
866 	return err;
867 
868 exit_inode:
869 	kvfree(n_group_desc);
870 	brelse(iloc.bh);
871 exit_dind:
872 	brelse(dind);
873 exit_bh:
874 	brelse(gdb_bh);
875 
876 	ext4_debug("leaving with error %d\n", err);
877 	return err;
878 }
879 
880 /*
881  * add_new_gdb_meta_bg is the sister of add_new_gdb.
882  */
883 static int add_new_gdb_meta_bg(struct super_block *sb,
884 			       handle_t *handle, ext4_group_t group) {
885 	ext4_fsblk_t gdblock;
886 	struct buffer_head *gdb_bh;
887 	struct buffer_head **o_group_desc, **n_group_desc;
888 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
889 	int err;
890 
891 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
892 		   ext4_bg_has_super(sb, group);
893 	gdb_bh = sb_bread(sb, gdblock);
894 	if (!gdb_bh)
895 		return -EIO;
896 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
897 				     sizeof(struct buffer_head *),
898 				     GFP_NOFS);
899 	if (!n_group_desc) {
900 		err = -ENOMEM;
901 		ext4_warning(sb, "not enough memory for %lu groups",
902 			     gdb_num + 1);
903 		return err;
904 	}
905 
906 	o_group_desc = EXT4_SB(sb)->s_group_desc;
907 	memcpy(n_group_desc, o_group_desc,
908 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
909 	n_group_desc[gdb_num] = gdb_bh;
910 	EXT4_SB(sb)->s_group_desc = n_group_desc;
911 	EXT4_SB(sb)->s_gdb_count++;
912 	kvfree(o_group_desc);
913 	BUFFER_TRACE(gdb_bh, "get_write_access");
914 	err = ext4_journal_get_write_access(handle, gdb_bh);
915 	if (unlikely(err))
916 		brelse(gdb_bh);
917 	return err;
918 }
919 
920 /*
921  * Called when we are adding a new group which has a backup copy of each of
922  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
923  * We need to add these reserved backup GDT blocks to the resize inode, so
924  * that they are kept for future resizing and not allocated to files.
925  *
926  * Each reserved backup GDT block will go into a different indirect block.
927  * The indirect blocks are actually the primary reserved GDT blocks,
928  * so we know in advance what their block numbers are.  We only get the
929  * double-indirect block to verify it is pointing to the primary reserved
930  * GDT blocks so we don't overwrite a data block by accident.  The reserved
931  * backup GDT blocks are stored in their reserved primary GDT block.
932  */
933 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
934 			      ext4_group_t group)
935 {
936 	struct super_block *sb = inode->i_sb;
937 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
938 	struct buffer_head **primary;
939 	struct buffer_head *dind;
940 	struct ext4_iloc iloc;
941 	ext4_fsblk_t blk;
942 	__le32 *data, *end;
943 	int gdbackups = 0;
944 	int res, i;
945 	int err;
946 
947 	primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
948 	if (!primary)
949 		return -ENOMEM;
950 
951 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
952 	dind = sb_bread(sb, le32_to_cpu(*data));
953 	if (!dind) {
954 		err = -EIO;
955 		goto exit_free;
956 	}
957 
958 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
959 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
960 					 EXT4_ADDR_PER_BLOCK(sb));
961 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
962 
963 	/* Get each reserved primary GDT block and verify it holds backups */
964 	for (res = 0; res < reserved_gdb; res++, blk++) {
965 		if (le32_to_cpu(*data) != blk) {
966 			ext4_warning(sb, "reserved block %llu"
967 				     " not at offset %ld",
968 				     blk,
969 				     (long)(data - (__le32 *)dind->b_data));
970 			err = -EINVAL;
971 			goto exit_bh;
972 		}
973 		primary[res] = sb_bread(sb, blk);
974 		if (!primary[res]) {
975 			err = -EIO;
976 			goto exit_bh;
977 		}
978 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
979 		if (gdbackups < 0) {
980 			brelse(primary[res]);
981 			err = gdbackups;
982 			goto exit_bh;
983 		}
984 		if (++data >= end)
985 			data = (__le32 *)dind->b_data;
986 	}
987 
988 	for (i = 0; i < reserved_gdb; i++) {
989 		BUFFER_TRACE(primary[i], "get_write_access");
990 		if ((err = ext4_journal_get_write_access(handle, primary[i])))
991 			goto exit_bh;
992 	}
993 
994 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
995 		goto exit_bh;
996 
997 	/*
998 	 * Finally we can add each of the reserved backup GDT blocks from
999 	 * the new group to its reserved primary GDT block.
1000 	 */
1001 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1002 	for (i = 0; i < reserved_gdb; i++) {
1003 		int err2;
1004 		data = (__le32 *)primary[i]->b_data;
1005 		/* printk("reserving backup %lu[%u] = %lu\n",
1006 		       primary[i]->b_blocknr, gdbackups,
1007 		       blk + primary[i]->b_blocknr); */
1008 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1009 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1010 		if (!err)
1011 			err = err2;
1012 	}
1013 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
1014 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1015 
1016 exit_bh:
1017 	while (--res >= 0)
1018 		brelse(primary[res]);
1019 	brelse(dind);
1020 
1021 exit_free:
1022 	kfree(primary);
1023 
1024 	return err;
1025 }
1026 
1027 /*
1028  * Update the backup copies of the ext4 metadata.  These don't need to be part
1029  * of the main resize transaction, because e2fsck will re-write them if there
1030  * is a problem (basically only OOM will cause a problem).  However, we
1031  * _should_ update the backups if possible, in case the primary gets trashed
1032  * for some reason and we need to run e2fsck from a backup superblock.  The
1033  * important part is that the new block and inode counts are in the backup
1034  * superblocks, and the location of the new group metadata in the GDT backups.
1035  *
1036  * We do not need take the s_resize_lock for this, because these
1037  * blocks are not otherwise touched by the filesystem code when it is
1038  * mounted.  We don't need to worry about last changing from
1039  * sbi->s_groups_count, because the worst that can happen is that we
1040  * do not copy the full number of backups at this time.  The resize
1041  * which changed s_groups_count will backup again.
1042  */
1043 static void update_backups(struct super_block *sb, int blk_off, char *data,
1044 			   int size, int meta_bg)
1045 {
1046 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1047 	ext4_group_t last;
1048 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1049 	unsigned three = 1;
1050 	unsigned five = 5;
1051 	unsigned seven = 7;
1052 	ext4_group_t group = 0;
1053 	int rest = sb->s_blocksize - size;
1054 	handle_t *handle;
1055 	int err = 0, err2;
1056 
1057 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1058 	if (IS_ERR(handle)) {
1059 		group = 1;
1060 		err = PTR_ERR(handle);
1061 		goto exit_err;
1062 	}
1063 
1064 	if (meta_bg == 0) {
1065 		group = ext4_list_backups(sb, &three, &five, &seven);
1066 		last = sbi->s_groups_count;
1067 	} else {
1068 		group = ext4_meta_bg_first_group(sb, group) + 1;
1069 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1070 	}
1071 
1072 	while (group < sbi->s_groups_count) {
1073 		struct buffer_head *bh;
1074 		ext4_fsblk_t backup_block;
1075 
1076 		/* Out of journal space, and can't get more - abort - so sad */
1077 		if (ext4_handle_valid(handle) &&
1078 		    handle->h_buffer_credits == 0 &&
1079 		    ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
1080 		    (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
1081 			break;
1082 
1083 		if (meta_bg == 0)
1084 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1085 		else
1086 			backup_block = (ext4_group_first_block_no(sb, group) +
1087 					ext4_bg_has_super(sb, group));
1088 
1089 		bh = sb_getblk(sb, backup_block);
1090 		if (unlikely(!bh)) {
1091 			err = -ENOMEM;
1092 			break;
1093 		}
1094 		ext4_debug("update metadata backup %llu(+%llu)\n",
1095 			   backup_block, backup_block -
1096 			   ext4_group_first_block_no(sb, group));
1097 		BUFFER_TRACE(bh, "get_write_access");
1098 		if ((err = ext4_journal_get_write_access(handle, bh)))
1099 			break;
1100 		lock_buffer(bh);
1101 		memcpy(bh->b_data, data, size);
1102 		if (rest)
1103 			memset(bh->b_data + size, 0, rest);
1104 		set_buffer_uptodate(bh);
1105 		unlock_buffer(bh);
1106 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1107 		if (unlikely(err))
1108 			ext4_std_error(sb, err);
1109 		brelse(bh);
1110 
1111 		if (meta_bg == 0)
1112 			group = ext4_list_backups(sb, &three, &five, &seven);
1113 		else if (group == last)
1114 			break;
1115 		else
1116 			group = last;
1117 	}
1118 	if ((err2 = ext4_journal_stop(handle)) && !err)
1119 		err = err2;
1120 
1121 	/*
1122 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1123 	 * late to revert the resize, we shouldn't fail just because of
1124 	 * the backup copies (they are only needed in case of corruption).
1125 	 *
1126 	 * However, if we got here we have a journal problem too, so we
1127 	 * can't really start a transaction to mark the superblock.
1128 	 * Chicken out and just set the flag on the hope it will be written
1129 	 * to disk, and if not - we will simply wait until next fsck.
1130 	 */
1131 exit_err:
1132 	if (err) {
1133 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1134 			     "forcing fsck on next reboot", group, err);
1135 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1136 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1137 		mark_buffer_dirty(sbi->s_sbh);
1138 	}
1139 }
1140 
1141 /*
1142  * ext4_add_new_descs() adds @count group descriptor of groups
1143  * starting at @group
1144  *
1145  * @handle: journal handle
1146  * @sb: super block
1147  * @group: the group no. of the first group desc to be added
1148  * @resize_inode: the resize inode
1149  * @count: number of group descriptors to be added
1150  */
1151 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1152 			      ext4_group_t group, struct inode *resize_inode,
1153 			      ext4_group_t count)
1154 {
1155 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1156 	struct ext4_super_block *es = sbi->s_es;
1157 	struct buffer_head *gdb_bh;
1158 	int i, gdb_off, gdb_num, err = 0;
1159 	int meta_bg;
1160 
1161 	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
1162 	for (i = 0; i < count; i++, group++) {
1163 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1164 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1165 
1166 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1167 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1168 
1169 		/*
1170 		 * We will only either add reserved group blocks to a backup group
1171 		 * or remove reserved blocks for the first group in a new group block.
1172 		 * Doing both would be mean more complex code, and sane people don't
1173 		 * use non-sparse filesystems anymore.  This is already checked above.
1174 		 */
1175 		if (gdb_off) {
1176 			gdb_bh = sbi->s_group_desc[gdb_num];
1177 			BUFFER_TRACE(gdb_bh, "get_write_access");
1178 			err = ext4_journal_get_write_access(handle, gdb_bh);
1179 
1180 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1181 				err = reserve_backup_gdb(handle, resize_inode, group);
1182 		} else if (meta_bg != 0) {
1183 			err = add_new_gdb_meta_bg(sb, handle, group);
1184 		} else {
1185 			err = add_new_gdb(handle, resize_inode, group);
1186 		}
1187 		if (err)
1188 			break;
1189 	}
1190 	return err;
1191 }
1192 
1193 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1194 {
1195 	struct buffer_head *bh = sb_getblk(sb, block);
1196 	if (unlikely(!bh))
1197 		return NULL;
1198 	if (!bh_uptodate_or_lock(bh)) {
1199 		if (bh_submit_read(bh) < 0) {
1200 			brelse(bh);
1201 			return NULL;
1202 		}
1203 	}
1204 
1205 	return bh;
1206 }
1207 
1208 static int ext4_set_bitmap_checksums(struct super_block *sb,
1209 				     ext4_group_t group,
1210 				     struct ext4_group_desc *gdp,
1211 				     struct ext4_new_group_data *group_data)
1212 {
1213 	struct buffer_head *bh;
1214 
1215 	if (!ext4_has_metadata_csum(sb))
1216 		return 0;
1217 
1218 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1219 	if (!bh)
1220 		return -EIO;
1221 	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1222 				   EXT4_INODES_PER_GROUP(sb) / 8);
1223 	brelse(bh);
1224 
1225 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1226 	if (!bh)
1227 		return -EIO;
1228 	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1229 	brelse(bh);
1230 
1231 	return 0;
1232 }
1233 
1234 /*
1235  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1236  */
1237 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1238 				struct ext4_new_flex_group_data *flex_gd)
1239 {
1240 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1241 	struct ext4_group_desc		*gdp;
1242 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1243 	struct buffer_head		*gdb_bh;
1244 	ext4_group_t			group;
1245 	__u16				*bg_flags = flex_gd->bg_flags;
1246 	int				i, gdb_off, gdb_num, err = 0;
1247 
1248 
1249 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1250 		group = group_data->group;
1251 
1252 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1253 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1254 
1255 		/*
1256 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1257 		 */
1258 		gdb_bh = sbi->s_group_desc[gdb_num];
1259 		/* Update group descriptor block for new group */
1260 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1261 						 gdb_off * EXT4_DESC_SIZE(sb));
1262 
1263 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1264 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1265 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1266 		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1267 		if (err) {
1268 			ext4_std_error(sb, err);
1269 			break;
1270 		}
1271 
1272 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1273 		ext4_free_group_clusters_set(sb, gdp,
1274 			EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
1275 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1276 		if (ext4_has_group_desc_csum(sb))
1277 			ext4_itable_unused_set(sb, gdp,
1278 					       EXT4_INODES_PER_GROUP(sb));
1279 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1280 		ext4_group_desc_csum_set(sb, group, gdp);
1281 
1282 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1283 		if (unlikely(err)) {
1284 			ext4_std_error(sb, err);
1285 			break;
1286 		}
1287 
1288 		/*
1289 		 * We can allocate memory for mb_alloc based on the new group
1290 		 * descriptor
1291 		 */
1292 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1293 		if (err)
1294 			break;
1295 	}
1296 	return err;
1297 }
1298 
1299 /*
1300  * ext4_update_super() updates the super block so that the newly added
1301  * groups can be seen by the filesystem.
1302  *
1303  * @sb: super block
1304  * @flex_gd: new added groups
1305  */
1306 static void ext4_update_super(struct super_block *sb,
1307 			     struct ext4_new_flex_group_data *flex_gd)
1308 {
1309 	ext4_fsblk_t blocks_count = 0;
1310 	ext4_fsblk_t free_blocks = 0;
1311 	ext4_fsblk_t reserved_blocks = 0;
1312 	struct ext4_new_group_data *group_data = flex_gd->groups;
1313 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1314 	struct ext4_super_block *es = sbi->s_es;
1315 	int i;
1316 
1317 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1318 	/*
1319 	 * Make the new blocks and inodes valid next.  We do this before
1320 	 * increasing the group count so that once the group is enabled,
1321 	 * all of its blocks and inodes are already valid.
1322 	 *
1323 	 * We always allocate group-by-group, then block-by-block or
1324 	 * inode-by-inode within a group, so enabling these
1325 	 * blocks/inodes before the group is live won't actually let us
1326 	 * allocate the new space yet.
1327 	 */
1328 	for (i = 0; i < flex_gd->count; i++) {
1329 		blocks_count += group_data[i].blocks_count;
1330 		free_blocks += group_data[i].free_blocks_count;
1331 	}
1332 
1333 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1334 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1335 	reserved_blocks *= blocks_count;
1336 	do_div(reserved_blocks, 100);
1337 
1338 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1339 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1340 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1341 		     flex_gd->count);
1342 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1343 		     flex_gd->count);
1344 
1345 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1346 	/*
1347 	 * We need to protect s_groups_count against other CPUs seeing
1348 	 * inconsistent state in the superblock.
1349 	 *
1350 	 * The precise rules we use are:
1351 	 *
1352 	 * * Writers must perform a smp_wmb() after updating all
1353 	 *   dependent data and before modifying the groups count
1354 	 *
1355 	 * * Readers must perform an smp_rmb() after reading the groups
1356 	 *   count and before reading any dependent data.
1357 	 *
1358 	 * NB. These rules can be relaxed when checking the group count
1359 	 * while freeing data, as we can only allocate from a block
1360 	 * group after serialising against the group count, and we can
1361 	 * only then free after serialising in turn against that
1362 	 * allocation.
1363 	 */
1364 	smp_wmb();
1365 
1366 	/* Update the global fs size fields */
1367 	sbi->s_groups_count += flex_gd->count;
1368 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1369 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1370 
1371 	/* Update the reserved block counts only once the new group is
1372 	 * active. */
1373 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1374 				reserved_blocks);
1375 
1376 	/* Update the free space counts */
1377 	percpu_counter_add(&sbi->s_freeclusters_counter,
1378 			   EXT4_NUM_B2C(sbi, free_blocks));
1379 	percpu_counter_add(&sbi->s_freeinodes_counter,
1380 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1381 
1382 	ext4_debug("free blocks count %llu",
1383 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1384 	if (EXT4_HAS_INCOMPAT_FEATURE(sb,
1385 				      EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
1386 	    sbi->s_log_groups_per_flex) {
1387 		ext4_group_t flex_group;
1388 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1389 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1390 			     &sbi->s_flex_groups[flex_group].free_clusters);
1391 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1392 			   &sbi->s_flex_groups[flex_group].free_inodes);
1393 	}
1394 
1395 	/*
1396 	 * Update the fs overhead information
1397 	 */
1398 	ext4_calculate_overhead(sb);
1399 
1400 	if (test_opt(sb, DEBUG))
1401 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1402 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1403 		       blocks_count, free_blocks, reserved_blocks);
1404 }
1405 
1406 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1407  * _before_ we start modifying the filesystem, because we cannot abort the
1408  * transaction and not have it write the data to disk.
1409  */
1410 static int ext4_flex_group_add(struct super_block *sb,
1411 			       struct inode *resize_inode,
1412 			       struct ext4_new_flex_group_data *flex_gd)
1413 {
1414 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1415 	struct ext4_super_block *es = sbi->s_es;
1416 	ext4_fsblk_t o_blocks_count;
1417 	ext4_grpblk_t last;
1418 	ext4_group_t group;
1419 	handle_t *handle;
1420 	unsigned reserved_gdb;
1421 	int err = 0, err2 = 0, credit;
1422 
1423 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1424 
1425 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1426 	o_blocks_count = ext4_blocks_count(es);
1427 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1428 	BUG_ON(last);
1429 
1430 	err = setup_new_flex_group_blocks(sb, flex_gd);
1431 	if (err)
1432 		goto exit;
1433 	/*
1434 	 * We will always be modifying at least the superblock and  GDT
1435 	 * blocks.  If we are adding a group past the last current GDT block,
1436 	 * we will also modify the inode and the dindirect block.  If we
1437 	 * are adding a group with superblock/GDT backups  we will also
1438 	 * modify each of the reserved GDT dindirect blocks.
1439 	 */
1440 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1441 	/* GDT blocks */
1442 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1443 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1444 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1445 	if (IS_ERR(handle)) {
1446 		err = PTR_ERR(handle);
1447 		goto exit;
1448 	}
1449 
1450 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1451 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1452 	if (err)
1453 		goto exit_journal;
1454 
1455 	group = flex_gd->groups[0].group;
1456 	BUG_ON(group != EXT4_SB(sb)->s_groups_count);
1457 	err = ext4_add_new_descs(handle, sb, group,
1458 				resize_inode, flex_gd->count);
1459 	if (err)
1460 		goto exit_journal;
1461 
1462 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1463 	if (err)
1464 		goto exit_journal;
1465 
1466 	ext4_update_super(sb, flex_gd);
1467 
1468 	err = ext4_handle_dirty_super(handle, sb);
1469 
1470 exit_journal:
1471 	err2 = ext4_journal_stop(handle);
1472 	if (!err)
1473 		err = err2;
1474 
1475 	if (!err) {
1476 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1477 		int gdb_num_end = ((group + flex_gd->count - 1) /
1478 				   EXT4_DESC_PER_BLOCK(sb));
1479 		int meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb,
1480 				EXT4_FEATURE_INCOMPAT_META_BG);
1481 		sector_t old_gdb = 0;
1482 
1483 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1484 			       sizeof(struct ext4_super_block), 0);
1485 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1486 			struct buffer_head *gdb_bh;
1487 
1488 			gdb_bh = sbi->s_group_desc[gdb_num];
1489 			if (old_gdb == gdb_bh->b_blocknr)
1490 				continue;
1491 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1492 				       gdb_bh->b_size, meta_bg);
1493 			old_gdb = gdb_bh->b_blocknr;
1494 		}
1495 	}
1496 exit:
1497 	return err;
1498 }
1499 
1500 static int ext4_setup_next_flex_gd(struct super_block *sb,
1501 				    struct ext4_new_flex_group_data *flex_gd,
1502 				    ext4_fsblk_t n_blocks_count,
1503 				    unsigned long flexbg_size)
1504 {
1505 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1506 	struct ext4_new_group_data *group_data = flex_gd->groups;
1507 	ext4_fsblk_t o_blocks_count;
1508 	ext4_group_t n_group;
1509 	ext4_group_t group;
1510 	ext4_group_t last_group;
1511 	ext4_grpblk_t last;
1512 	ext4_grpblk_t blocks_per_group;
1513 	unsigned long i;
1514 
1515 	blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
1516 
1517 	o_blocks_count = ext4_blocks_count(es);
1518 
1519 	if (o_blocks_count == n_blocks_count)
1520 		return 0;
1521 
1522 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1523 	BUG_ON(last);
1524 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1525 
1526 	last_group = group | (flexbg_size - 1);
1527 	if (last_group > n_group)
1528 		last_group = n_group;
1529 
1530 	flex_gd->count = last_group - group + 1;
1531 
1532 	for (i = 0; i < flex_gd->count; i++) {
1533 		int overhead;
1534 
1535 		group_data[i].group = group + i;
1536 		group_data[i].blocks_count = blocks_per_group;
1537 		overhead = ext4_group_overhead_blocks(sb, group + i);
1538 		group_data[i].free_blocks_count = blocks_per_group - overhead;
1539 		if (ext4_has_group_desc_csum(sb)) {
1540 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1541 					       EXT4_BG_INODE_UNINIT;
1542 			if (!test_opt(sb, INIT_INODE_TABLE))
1543 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1544 		} else
1545 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1546 	}
1547 
1548 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1549 		/* We need to initialize block bitmap of last group. */
1550 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1551 
1552 	if ((last_group == n_group) && (last != blocks_per_group - 1)) {
1553 		group_data[i - 1].blocks_count = last + 1;
1554 		group_data[i - 1].free_blocks_count -= blocks_per_group-
1555 					last - 1;
1556 	}
1557 
1558 	return 1;
1559 }
1560 
1561 /* Add group descriptor data to an existing or new group descriptor block.
1562  * Ensure we handle all possible error conditions _before_ we start modifying
1563  * the filesystem, because we cannot abort the transaction and not have it
1564  * write the data to disk.
1565  *
1566  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1567  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1568  *
1569  * We only need to hold the superblock lock while we are actually adding
1570  * in the new group's counts to the superblock.  Prior to that we have
1571  * not really "added" the group at all.  We re-check that we are still
1572  * adding in the last group in case things have changed since verifying.
1573  */
1574 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1575 {
1576 	struct ext4_new_flex_group_data flex_gd;
1577 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1578 	struct ext4_super_block *es = sbi->s_es;
1579 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1580 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1581 	struct inode *inode = NULL;
1582 	int gdb_off;
1583 	int err;
1584 	__u16 bg_flags = 0;
1585 
1586 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1587 
1588 	if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
1589 					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
1590 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1591 		return -EPERM;
1592 	}
1593 
1594 	if (ext4_blocks_count(es) + input->blocks_count <
1595 	    ext4_blocks_count(es)) {
1596 		ext4_warning(sb, "blocks_count overflow");
1597 		return -EINVAL;
1598 	}
1599 
1600 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1601 	    le32_to_cpu(es->s_inodes_count)) {
1602 		ext4_warning(sb, "inodes_count overflow");
1603 		return -EINVAL;
1604 	}
1605 
1606 	if (reserved_gdb || gdb_off == 0) {
1607 		if (!EXT4_HAS_COMPAT_FEATURE(sb,
1608 					     EXT4_FEATURE_COMPAT_RESIZE_INODE)
1609 		    || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1610 			ext4_warning(sb,
1611 				     "No reserved GDT blocks, can't resize");
1612 			return -EPERM;
1613 		}
1614 		inode = ext4_iget(sb, EXT4_RESIZE_INO);
1615 		if (IS_ERR(inode)) {
1616 			ext4_warning(sb, "Error opening resize inode");
1617 			return PTR_ERR(inode);
1618 		}
1619 	}
1620 
1621 
1622 	err = verify_group_input(sb, input);
1623 	if (err)
1624 		goto out;
1625 
1626 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1627 	if (err)
1628 		goto out;
1629 
1630 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1631 	if (err)
1632 		goto out;
1633 
1634 	flex_gd.count = 1;
1635 	flex_gd.groups = input;
1636 	flex_gd.bg_flags = &bg_flags;
1637 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1638 out:
1639 	iput(inode);
1640 	return err;
1641 } /* ext4_group_add */
1642 
1643 /*
1644  * extend a group without checking assuming that checking has been done.
1645  */
1646 static int ext4_group_extend_no_check(struct super_block *sb,
1647 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1648 {
1649 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1650 	handle_t *handle;
1651 	int err = 0, err2;
1652 
1653 	/* We will update the superblock, one block bitmap, and
1654 	 * one group descriptor via ext4_group_add_blocks().
1655 	 */
1656 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1657 	if (IS_ERR(handle)) {
1658 		err = PTR_ERR(handle);
1659 		ext4_warning(sb, "error %d on journal start", err);
1660 		return err;
1661 	}
1662 
1663 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1664 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1665 	if (err) {
1666 		ext4_warning(sb, "error %d on journal write access", err);
1667 		goto errout;
1668 	}
1669 
1670 	ext4_blocks_count_set(es, o_blocks_count + add);
1671 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1672 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1673 		   o_blocks_count + add);
1674 	/* We add the blocks to the bitmap and set the group need init bit */
1675 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1676 	if (err)
1677 		goto errout;
1678 	ext4_handle_dirty_super(handle, sb);
1679 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1680 		   o_blocks_count + add);
1681 errout:
1682 	err2 = ext4_journal_stop(handle);
1683 	if (err2 && !err)
1684 		err = err2;
1685 
1686 	if (!err) {
1687 		if (test_opt(sb, DEBUG))
1688 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1689 			       "blocks\n", ext4_blocks_count(es));
1690 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1691 			       (char *)es, sizeof(struct ext4_super_block), 0);
1692 	}
1693 	return err;
1694 }
1695 
1696 /*
1697  * Extend the filesystem to the new number of blocks specified.  This entry
1698  * point is only used to extend the current filesystem to the end of the last
1699  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1700  * for emergencies (because it has no dependencies on reserved blocks).
1701  *
1702  * If we _really_ wanted, we could use default values to call ext4_group_add()
1703  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1704  * GDT blocks are reserved to grow to the desired size.
1705  */
1706 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1707 		      ext4_fsblk_t n_blocks_count)
1708 {
1709 	ext4_fsblk_t o_blocks_count;
1710 	ext4_grpblk_t last;
1711 	ext4_grpblk_t add;
1712 	struct buffer_head *bh;
1713 	int err;
1714 	ext4_group_t group;
1715 
1716 	o_blocks_count = ext4_blocks_count(es);
1717 
1718 	if (test_opt(sb, DEBUG))
1719 		ext4_msg(sb, KERN_DEBUG,
1720 			 "extending last group from %llu to %llu blocks",
1721 			 o_blocks_count, n_blocks_count);
1722 
1723 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1724 		return 0;
1725 
1726 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1727 		ext4_msg(sb, KERN_ERR,
1728 			 "filesystem too large to resize to %llu blocks safely",
1729 			 n_blocks_count);
1730 		if (sizeof(sector_t) < 8)
1731 			ext4_warning(sb, "CONFIG_LBDAF not enabled");
1732 		return -EINVAL;
1733 	}
1734 
1735 	if (n_blocks_count < o_blocks_count) {
1736 		ext4_warning(sb, "can't shrink FS - resize aborted");
1737 		return -EINVAL;
1738 	}
1739 
1740 	/* Handle the remaining blocks in the last group only. */
1741 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1742 
1743 	if (last == 0) {
1744 		ext4_warning(sb, "need to use ext2online to resize further");
1745 		return -EPERM;
1746 	}
1747 
1748 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1749 
1750 	if (o_blocks_count + add < o_blocks_count) {
1751 		ext4_warning(sb, "blocks_count overflow");
1752 		return -EINVAL;
1753 	}
1754 
1755 	if (o_blocks_count + add > n_blocks_count)
1756 		add = n_blocks_count - o_blocks_count;
1757 
1758 	if (o_blocks_count + add < n_blocks_count)
1759 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1760 			     o_blocks_count + add, add);
1761 
1762 	/* See if the device is actually as big as what was requested */
1763 	bh = sb_bread(sb, o_blocks_count + add - 1);
1764 	if (!bh) {
1765 		ext4_warning(sb, "can't read last block, resize aborted");
1766 		return -ENOSPC;
1767 	}
1768 	brelse(bh);
1769 
1770 	err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1771 	return err;
1772 } /* ext4_group_extend */
1773 
1774 
1775 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1776 {
1777 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1778 }
1779 
1780 /*
1781  * Release the resize inode and drop the resize_inode feature if there
1782  * are no more reserved gdt blocks, and then convert the file system
1783  * to enable meta_bg
1784  */
1785 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1786 {
1787 	handle_t *handle;
1788 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1789 	struct ext4_super_block *es = sbi->s_es;
1790 	struct ext4_inode_info *ei = EXT4_I(inode);
1791 	ext4_fsblk_t nr;
1792 	int i, ret, err = 0;
1793 	int credits = 1;
1794 
1795 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1796 	if (inode) {
1797 		if (es->s_reserved_gdt_blocks) {
1798 			ext4_error(sb, "Unexpected non-zero "
1799 				   "s_reserved_gdt_blocks");
1800 			return -EPERM;
1801 		}
1802 
1803 		/* Do a quick sanity check of the resize inode */
1804 		if (inode->i_blocks != 1 << (inode->i_blkbits - 9))
1805 			goto invalid_resize_inode;
1806 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1807 			if (i == EXT4_DIND_BLOCK) {
1808 				if (ei->i_data[i])
1809 					continue;
1810 				else
1811 					goto invalid_resize_inode;
1812 			}
1813 			if (ei->i_data[i])
1814 				goto invalid_resize_inode;
1815 		}
1816 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1817 	}
1818 
1819 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1820 	if (IS_ERR(handle))
1821 		return PTR_ERR(handle);
1822 
1823 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1824 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1825 	if (err)
1826 		goto errout;
1827 
1828 	EXT4_CLEAR_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE);
1829 	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
1830 	sbi->s_es->s_first_meta_bg =
1831 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1832 
1833 	err = ext4_handle_dirty_super(handle, sb);
1834 	if (err) {
1835 		ext4_std_error(sb, err);
1836 		goto errout;
1837 	}
1838 
1839 	if (inode) {
1840 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1841 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1842 				 EXT4_FREE_BLOCKS_METADATA |
1843 				 EXT4_FREE_BLOCKS_FORGET);
1844 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1845 		inode->i_blocks = 0;
1846 
1847 		err = ext4_mark_inode_dirty(handle, inode);
1848 		if (err)
1849 			ext4_std_error(sb, err);
1850 	}
1851 
1852 errout:
1853 	ret = ext4_journal_stop(handle);
1854 	if (!err)
1855 		err = ret;
1856 	return ret;
1857 
1858 invalid_resize_inode:
1859 	ext4_error(sb, "corrupted/inconsistent resize inode");
1860 	return -EINVAL;
1861 }
1862 
1863 /*
1864  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1865  *
1866  * @sb: super block of the fs to be resized
1867  * @n_blocks_count: the number of blocks resides in the resized fs
1868  */
1869 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1870 {
1871 	struct ext4_new_flex_group_data *flex_gd = NULL;
1872 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1873 	struct ext4_super_block *es = sbi->s_es;
1874 	struct buffer_head *bh;
1875 	struct inode *resize_inode = NULL;
1876 	ext4_grpblk_t add, offset;
1877 	unsigned long n_desc_blocks;
1878 	unsigned long o_desc_blocks;
1879 	ext4_group_t o_group;
1880 	ext4_group_t n_group;
1881 	ext4_fsblk_t o_blocks_count;
1882 	ext4_fsblk_t n_blocks_count_retry = 0;
1883 	unsigned long last_update_time = 0;
1884 	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1885 	int meta_bg;
1886 
1887 	/* See if the device is actually as big as what was requested */
1888 	bh = sb_bread(sb, n_blocks_count - 1);
1889 	if (!bh) {
1890 		ext4_warning(sb, "can't read last block, resize aborted");
1891 		return -ENOSPC;
1892 	}
1893 	brelse(bh);
1894 
1895 retry:
1896 	o_blocks_count = ext4_blocks_count(es);
1897 
1898 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1899 		 "to %llu blocks", o_blocks_count, n_blocks_count);
1900 
1901 	if (n_blocks_count < o_blocks_count) {
1902 		/* On-line shrinking not supported */
1903 		ext4_warning(sb, "can't shrink FS - resize aborted");
1904 		return -EINVAL;
1905 	}
1906 
1907 	if (n_blocks_count == o_blocks_count)
1908 		/* Nothing need to do */
1909 		return 0;
1910 
1911 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1912 	if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1913 		ext4_warning(sb, "resize would cause inodes_count overflow");
1914 		return -EINVAL;
1915 	}
1916 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
1917 
1918 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
1919 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
1920 
1921 	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
1922 
1923 	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE)) {
1924 		if (meta_bg) {
1925 			ext4_error(sb, "resize_inode and meta_bg enabled "
1926 				   "simultaneously");
1927 			return -EINVAL;
1928 		}
1929 		if (n_desc_blocks > o_desc_blocks +
1930 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
1931 			n_blocks_count_retry = n_blocks_count;
1932 			n_desc_blocks = o_desc_blocks +
1933 				le16_to_cpu(es->s_reserved_gdt_blocks);
1934 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
1935 			n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
1936 			n_group--; /* set to last group number */
1937 		}
1938 
1939 		if (!resize_inode)
1940 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
1941 		if (IS_ERR(resize_inode)) {
1942 			ext4_warning(sb, "Error opening resize inode");
1943 			return PTR_ERR(resize_inode);
1944 		}
1945 	}
1946 
1947 	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
1948 		err = ext4_convert_meta_bg(sb, resize_inode);
1949 		if (err)
1950 			goto out;
1951 		if (resize_inode) {
1952 			iput(resize_inode);
1953 			resize_inode = NULL;
1954 		}
1955 		if (n_blocks_count_retry) {
1956 			n_blocks_count = n_blocks_count_retry;
1957 			n_blocks_count_retry = 0;
1958 			goto retry;
1959 		}
1960 	}
1961 
1962 	/* extend the last group */
1963 	if (n_group == o_group)
1964 		add = n_blocks_count - o_blocks_count;
1965 	else
1966 		add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1);
1967 	if (add > 0) {
1968 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1969 		if (err)
1970 			goto out;
1971 	}
1972 
1973 	if (ext4_blocks_count(es) == n_blocks_count)
1974 		goto out;
1975 
1976 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
1977 	if (err)
1978 		return err;
1979 
1980 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
1981 	if (err)
1982 		goto out;
1983 
1984 	flex_gd = alloc_flex_gd(flexbg_size);
1985 	if (flex_gd == NULL) {
1986 		err = -ENOMEM;
1987 		goto out;
1988 	}
1989 
1990 	/* Add flex groups. Note that a regular group is a
1991 	 * flex group with 1 group.
1992 	 */
1993 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
1994 					      flexbg_size)) {
1995 		if (jiffies - last_update_time > HZ * 10) {
1996 			if (last_update_time)
1997 				ext4_msg(sb, KERN_INFO,
1998 					 "resized to %llu blocks",
1999 					 ext4_blocks_count(es));
2000 			last_update_time = jiffies;
2001 		}
2002 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2003 			break;
2004 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2005 		if (unlikely(err))
2006 			break;
2007 	}
2008 
2009 	if (!err && n_blocks_count_retry) {
2010 		n_blocks_count = n_blocks_count_retry;
2011 		n_blocks_count_retry = 0;
2012 		free_flex_gd(flex_gd);
2013 		flex_gd = NULL;
2014 		goto retry;
2015 	}
2016 
2017 out:
2018 	if (flex_gd)
2019 		free_flex_gd(flex_gd);
2020 	if (resize_inode != NULL)
2021 		iput(resize_inode);
2022 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
2023 	return err;
2024 }
2025