xref: /linux/fs/ext4/resize.c (revision 1fd02f6605b855b4af2883f29a2abc88bdf17857)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/resize.c
4  *
5  * Support for resizing an ext4 filesystem while it is mounted.
6  *
7  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8  *
9  * This could probably be made into a module, because it is not often in use.
10  */
11 
12 
13 #define EXT4FS_DEBUG
14 
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/jiffies.h>
18 
19 #include "ext4_jbd2.h"
20 
21 struct ext4_rcu_ptr {
22 	struct rcu_head rcu;
23 	void *ptr;
24 };
25 
26 static void ext4_rcu_ptr_callback(struct rcu_head *head)
27 {
28 	struct ext4_rcu_ptr *ptr;
29 
30 	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
31 	kvfree(ptr->ptr);
32 	kfree(ptr);
33 }
34 
35 void ext4_kvfree_array_rcu(void *to_free)
36 {
37 	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
38 
39 	if (ptr) {
40 		ptr->ptr = to_free;
41 		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
42 		return;
43 	}
44 	synchronize_rcu();
45 	kvfree(to_free);
46 }
47 
48 int ext4_resize_begin(struct super_block *sb)
49 {
50 	struct ext4_sb_info *sbi = EXT4_SB(sb);
51 	int ret = 0;
52 
53 	if (!capable(CAP_SYS_RESOURCE))
54 		return -EPERM;
55 
56 	/*
57 	 * If we are not using the primary superblock/GDT copy don't resize,
58          * because the user tools have no way of handling this.  Probably a
59          * bad time to do it anyways.
60          */
61 	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
62 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
63 		ext4_warning(sb, "won't resize using backup superblock at %llu",
64 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
65 		return -EPERM;
66 	}
67 
68 	/*
69 	 * We are not allowed to do online-resizing on a filesystem mounted
70 	 * with error, because it can destroy the filesystem easily.
71 	 */
72 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
73 		ext4_warning(sb, "There are errors in the filesystem, "
74 			     "so online resizing is not allowed");
75 		return -EPERM;
76 	}
77 
78 	if (ext4_has_feature_sparse_super2(sb)) {
79 		ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
80 		return -EOPNOTSUPP;
81 	}
82 
83 	if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
84 				  &EXT4_SB(sb)->s_ext4_flags))
85 		ret = -EBUSY;
86 
87 	return ret;
88 }
89 
90 void ext4_resize_end(struct super_block *sb)
91 {
92 	clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
93 	smp_mb__after_atomic();
94 }
95 
96 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
97 					     ext4_group_t group) {
98 	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
99 	       EXT4_DESC_PER_BLOCK_BITS(sb);
100 }
101 
102 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
103 					     ext4_group_t group) {
104 	group = ext4_meta_bg_first_group(sb, group);
105 	return ext4_group_first_block_no(sb, group);
106 }
107 
108 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
109 						ext4_group_t group) {
110 	ext4_grpblk_t overhead;
111 	overhead = ext4_bg_num_gdb(sb, group);
112 	if (ext4_bg_has_super(sb, group))
113 		overhead += 1 +
114 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
115 	return overhead;
116 }
117 
118 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
119 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
120 
121 static int verify_group_input(struct super_block *sb,
122 			      struct ext4_new_group_data *input)
123 {
124 	struct ext4_sb_info *sbi = EXT4_SB(sb);
125 	struct ext4_super_block *es = sbi->s_es;
126 	ext4_fsblk_t start = ext4_blocks_count(es);
127 	ext4_fsblk_t end = start + input->blocks_count;
128 	ext4_group_t group = input->group;
129 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
130 	unsigned overhead;
131 	ext4_fsblk_t metaend;
132 	struct buffer_head *bh = NULL;
133 	ext4_grpblk_t free_blocks_count, offset;
134 	int err = -EINVAL;
135 
136 	if (group != sbi->s_groups_count) {
137 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
138 			     input->group, sbi->s_groups_count);
139 		return -EINVAL;
140 	}
141 
142 	overhead = ext4_group_overhead_blocks(sb, group);
143 	metaend = start + overhead;
144 	input->free_clusters_count = free_blocks_count =
145 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
146 
147 	if (test_opt(sb, DEBUG))
148 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
149 		       "(%d free, %u reserved)\n",
150 		       ext4_bg_has_super(sb, input->group) ? "normal" :
151 		       "no-super", input->group, input->blocks_count,
152 		       free_blocks_count, input->reserved_blocks);
153 
154 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
155 	if (offset != 0)
156 			ext4_warning(sb, "Last group not full");
157 	else if (input->reserved_blocks > input->blocks_count / 5)
158 		ext4_warning(sb, "Reserved blocks too high (%u)",
159 			     input->reserved_blocks);
160 	else if (free_blocks_count < 0)
161 		ext4_warning(sb, "Bad blocks count %u",
162 			     input->blocks_count);
163 	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
164 		err = PTR_ERR(bh);
165 		bh = NULL;
166 		ext4_warning(sb, "Cannot read last block (%llu)",
167 			     end - 1);
168 	} else if (outside(input->block_bitmap, start, end))
169 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
170 			     (unsigned long long)input->block_bitmap);
171 	else if (outside(input->inode_bitmap, start, end))
172 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
173 			     (unsigned long long)input->inode_bitmap);
174 	else if (outside(input->inode_table, start, end) ||
175 		 outside(itend - 1, start, end))
176 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
177 			     (unsigned long long)input->inode_table, itend - 1);
178 	else if (input->inode_bitmap == input->block_bitmap)
179 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
180 			     (unsigned long long)input->block_bitmap);
181 	else if (inside(input->block_bitmap, input->inode_table, itend))
182 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
183 			     "(%llu-%llu)",
184 			     (unsigned long long)input->block_bitmap,
185 			     (unsigned long long)input->inode_table, itend - 1);
186 	else if (inside(input->inode_bitmap, input->inode_table, itend))
187 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
188 			     "(%llu-%llu)",
189 			     (unsigned long long)input->inode_bitmap,
190 			     (unsigned long long)input->inode_table, itend - 1);
191 	else if (inside(input->block_bitmap, start, metaend))
192 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
193 			     (unsigned long long)input->block_bitmap,
194 			     start, metaend - 1);
195 	else if (inside(input->inode_bitmap, start, metaend))
196 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
197 			     (unsigned long long)input->inode_bitmap,
198 			     start, metaend - 1);
199 	else if (inside(input->inode_table, start, metaend) ||
200 		 inside(itend - 1, start, metaend))
201 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
202 			     "(%llu-%llu)",
203 			     (unsigned long long)input->inode_table,
204 			     itend - 1, start, metaend - 1);
205 	else
206 		err = 0;
207 	brelse(bh);
208 
209 	return err;
210 }
211 
212 /*
213  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
214  * group each time.
215  */
216 struct ext4_new_flex_group_data {
217 	struct ext4_new_group_data *groups;	/* new_group_data for groups
218 						   in the flex group */
219 	__u16 *bg_flags;			/* block group flags of groups
220 						   in @groups */
221 	ext4_group_t count;			/* number of groups in @groups
222 						 */
223 };
224 
225 /*
226  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
227  * @flexbg_size.
228  *
229  * Returns NULL on failure otherwise address of the allocated structure.
230  */
231 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
232 {
233 	struct ext4_new_flex_group_data *flex_gd;
234 
235 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
236 	if (flex_gd == NULL)
237 		goto out3;
238 
239 	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
240 		goto out2;
241 	flex_gd->count = flexbg_size;
242 
243 	flex_gd->groups = kmalloc_array(flexbg_size,
244 					sizeof(struct ext4_new_group_data),
245 					GFP_NOFS);
246 	if (flex_gd->groups == NULL)
247 		goto out2;
248 
249 	flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
250 					  GFP_NOFS);
251 	if (flex_gd->bg_flags == NULL)
252 		goto out1;
253 
254 	return flex_gd;
255 
256 out1:
257 	kfree(flex_gd->groups);
258 out2:
259 	kfree(flex_gd);
260 out3:
261 	return NULL;
262 }
263 
264 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
265 {
266 	kfree(flex_gd->bg_flags);
267 	kfree(flex_gd->groups);
268 	kfree(flex_gd);
269 }
270 
271 /*
272  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
273  * and inode tables for a flex group.
274  *
275  * This function is used by 64bit-resize.  Note that this function allocates
276  * group tables from the 1st group of groups contained by @flexgd, which may
277  * be a partial of a flex group.
278  *
279  * @sb: super block of fs to which the groups belongs
280  *
281  * Returns 0 on a successful allocation of the metadata blocks in the
282  * block group.
283  */
284 static int ext4_alloc_group_tables(struct super_block *sb,
285 				struct ext4_new_flex_group_data *flex_gd,
286 				int flexbg_size)
287 {
288 	struct ext4_new_group_data *group_data = flex_gd->groups;
289 	ext4_fsblk_t start_blk;
290 	ext4_fsblk_t last_blk;
291 	ext4_group_t src_group;
292 	ext4_group_t bb_index = 0;
293 	ext4_group_t ib_index = 0;
294 	ext4_group_t it_index = 0;
295 	ext4_group_t group;
296 	ext4_group_t last_group;
297 	unsigned overhead;
298 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
299 	int i;
300 
301 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
302 
303 	src_group = group_data[0].group;
304 	last_group  = src_group + flex_gd->count - 1;
305 
306 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
307 	       (last_group & ~(flexbg_size - 1))));
308 next_group:
309 	group = group_data[0].group;
310 	if (src_group >= group_data[0].group + flex_gd->count)
311 		return -ENOSPC;
312 	start_blk = ext4_group_first_block_no(sb, src_group);
313 	last_blk = start_blk + group_data[src_group - group].blocks_count;
314 
315 	overhead = ext4_group_overhead_blocks(sb, src_group);
316 
317 	start_blk += overhead;
318 
319 	/* We collect contiguous blocks as much as possible. */
320 	src_group++;
321 	for (; src_group <= last_group; src_group++) {
322 		overhead = ext4_group_overhead_blocks(sb, src_group);
323 		if (overhead == 0)
324 			last_blk += group_data[src_group - group].blocks_count;
325 		else
326 			break;
327 	}
328 
329 	/* Allocate block bitmaps */
330 	for (; bb_index < flex_gd->count; bb_index++) {
331 		if (start_blk >= last_blk)
332 			goto next_group;
333 		group_data[bb_index].block_bitmap = start_blk++;
334 		group = ext4_get_group_number(sb, start_blk - 1);
335 		group -= group_data[0].group;
336 		group_data[group].mdata_blocks++;
337 		flex_gd->bg_flags[group] &= uninit_mask;
338 	}
339 
340 	/* Allocate inode bitmaps */
341 	for (; ib_index < flex_gd->count; ib_index++) {
342 		if (start_blk >= last_blk)
343 			goto next_group;
344 		group_data[ib_index].inode_bitmap = start_blk++;
345 		group = ext4_get_group_number(sb, start_blk - 1);
346 		group -= group_data[0].group;
347 		group_data[group].mdata_blocks++;
348 		flex_gd->bg_flags[group] &= uninit_mask;
349 	}
350 
351 	/* Allocate inode tables */
352 	for (; it_index < flex_gd->count; it_index++) {
353 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
354 		ext4_fsblk_t next_group_start;
355 
356 		if (start_blk + itb > last_blk)
357 			goto next_group;
358 		group_data[it_index].inode_table = start_blk;
359 		group = ext4_get_group_number(sb, start_blk);
360 		next_group_start = ext4_group_first_block_no(sb, group + 1);
361 		group -= group_data[0].group;
362 
363 		if (start_blk + itb > next_group_start) {
364 			flex_gd->bg_flags[group + 1] &= uninit_mask;
365 			overhead = start_blk + itb - next_group_start;
366 			group_data[group + 1].mdata_blocks += overhead;
367 			itb -= overhead;
368 		}
369 
370 		group_data[group].mdata_blocks += itb;
371 		flex_gd->bg_flags[group] &= uninit_mask;
372 		start_blk += EXT4_SB(sb)->s_itb_per_group;
373 	}
374 
375 	/* Update free clusters count to exclude metadata blocks */
376 	for (i = 0; i < flex_gd->count; i++) {
377 		group_data[i].free_clusters_count -=
378 				EXT4_NUM_B2C(EXT4_SB(sb),
379 					     group_data[i].mdata_blocks);
380 	}
381 
382 	if (test_opt(sb, DEBUG)) {
383 		int i;
384 		group = group_data[0].group;
385 
386 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
387 		       "%d groups, flexbg size is %d:\n", flex_gd->count,
388 		       flexbg_size);
389 
390 		for (i = 0; i < flex_gd->count; i++) {
391 			ext4_debug(
392 			       "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
393 			       ext4_bg_has_super(sb, group + i) ? "normal" :
394 			       "no-super", group + i,
395 			       group_data[i].blocks_count,
396 			       group_data[i].free_clusters_count,
397 			       group_data[i].mdata_blocks);
398 		}
399 	}
400 	return 0;
401 }
402 
403 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
404 				  ext4_fsblk_t blk)
405 {
406 	struct buffer_head *bh;
407 	int err;
408 
409 	bh = sb_getblk(sb, blk);
410 	if (unlikely(!bh))
411 		return ERR_PTR(-ENOMEM);
412 	BUFFER_TRACE(bh, "get_write_access");
413 	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
414 	if (err) {
415 		brelse(bh);
416 		bh = ERR_PTR(err);
417 	} else {
418 		memset(bh->b_data, 0, sb->s_blocksize);
419 		set_buffer_uptodate(bh);
420 	}
421 
422 	return bh;
423 }
424 
425 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
426 {
427 	return ext4_journal_ensure_credits_fn(handle, credits,
428 		EXT4_MAX_TRANS_DATA, 0, 0);
429 }
430 
431 /*
432  * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
433  *
434  * Helper function for ext4_setup_new_group_blocks() which set .
435  *
436  * @sb: super block
437  * @handle: journal handle
438  * @flex_gd: flex group data
439  */
440 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
441 			struct ext4_new_flex_group_data *flex_gd,
442 			ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
443 {
444 	struct ext4_sb_info *sbi = EXT4_SB(sb);
445 	ext4_group_t count = last_cluster - first_cluster + 1;
446 	ext4_group_t count2;
447 
448 	ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
449 		   last_cluster);
450 	for (count2 = count; count > 0;
451 	     count -= count2, first_cluster += count2) {
452 		ext4_fsblk_t start;
453 		struct buffer_head *bh;
454 		ext4_group_t group;
455 		int err;
456 
457 		group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
458 		start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
459 		group -= flex_gd->groups[0].group;
460 
461 		count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
462 		if (count2 > count)
463 			count2 = count;
464 
465 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
466 			BUG_ON(flex_gd->count > 1);
467 			continue;
468 		}
469 
470 		err = ext4_resize_ensure_credits_batch(handle, 1);
471 		if (err < 0)
472 			return err;
473 
474 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
475 		if (unlikely(!bh))
476 			return -ENOMEM;
477 
478 		BUFFER_TRACE(bh, "get_write_access");
479 		err = ext4_journal_get_write_access(handle, sb, bh,
480 						    EXT4_JTR_NONE);
481 		if (err) {
482 			brelse(bh);
483 			return err;
484 		}
485 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
486 			   first_cluster, first_cluster - start, count2);
487 		mb_set_bits(bh->b_data, first_cluster - start, count2);
488 
489 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
490 		brelse(bh);
491 		if (unlikely(err))
492 			return err;
493 	}
494 
495 	return 0;
496 }
497 
498 /*
499  * Set up the block and inode bitmaps, and the inode table for the new groups.
500  * This doesn't need to be part of the main transaction, since we are only
501  * changing blocks outside the actual filesystem.  We still do journaling to
502  * ensure the recovery is correct in case of a failure just after resize.
503  * If any part of this fails, we simply abort the resize.
504  *
505  * setup_new_flex_group_blocks handles a flex group as follow:
506  *  1. copy super block and GDT, and initialize group tables if necessary.
507  *     In this step, we only set bits in blocks bitmaps for blocks taken by
508  *     super block and GDT.
509  *  2. allocate group tables in block bitmaps, that is, set bits in block
510  *     bitmap for blocks taken by group tables.
511  */
512 static int setup_new_flex_group_blocks(struct super_block *sb,
513 				struct ext4_new_flex_group_data *flex_gd)
514 {
515 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
516 	ext4_fsblk_t start;
517 	ext4_fsblk_t block;
518 	struct ext4_sb_info *sbi = EXT4_SB(sb);
519 	struct ext4_super_block *es = sbi->s_es;
520 	struct ext4_new_group_data *group_data = flex_gd->groups;
521 	__u16 *bg_flags = flex_gd->bg_flags;
522 	handle_t *handle;
523 	ext4_group_t group, count;
524 	struct buffer_head *bh = NULL;
525 	int reserved_gdb, i, j, err = 0, err2;
526 	int meta_bg;
527 
528 	BUG_ON(!flex_gd->count || !group_data ||
529 	       group_data[0].group != sbi->s_groups_count);
530 
531 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
532 	meta_bg = ext4_has_feature_meta_bg(sb);
533 
534 	/* This transaction may be extended/restarted along the way */
535 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
536 	if (IS_ERR(handle))
537 		return PTR_ERR(handle);
538 
539 	group = group_data[0].group;
540 	for (i = 0; i < flex_gd->count; i++, group++) {
541 		unsigned long gdblocks;
542 		ext4_grpblk_t overhead;
543 
544 		gdblocks = ext4_bg_num_gdb(sb, group);
545 		start = ext4_group_first_block_no(sb, group);
546 
547 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
548 			goto handle_itb;
549 
550 		if (meta_bg == 1) {
551 			ext4_group_t first_group;
552 			first_group = ext4_meta_bg_first_group(sb, group);
553 			if (first_group != group + 1 &&
554 			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
555 				goto handle_itb;
556 		}
557 
558 		block = start + ext4_bg_has_super(sb, group);
559 		/* Copy all of the GDT blocks into the backup in this group */
560 		for (j = 0; j < gdblocks; j++, block++) {
561 			struct buffer_head *gdb;
562 
563 			ext4_debug("update backup group %#04llx\n", block);
564 			err = ext4_resize_ensure_credits_batch(handle, 1);
565 			if (err < 0)
566 				goto out;
567 
568 			gdb = sb_getblk(sb, block);
569 			if (unlikely(!gdb)) {
570 				err = -ENOMEM;
571 				goto out;
572 			}
573 
574 			BUFFER_TRACE(gdb, "get_write_access");
575 			err = ext4_journal_get_write_access(handle, sb, gdb,
576 							    EXT4_JTR_NONE);
577 			if (err) {
578 				brelse(gdb);
579 				goto out;
580 			}
581 			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
582 				s_group_desc, j)->b_data, gdb->b_size);
583 			set_buffer_uptodate(gdb);
584 
585 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
586 			if (unlikely(err)) {
587 				brelse(gdb);
588 				goto out;
589 			}
590 			brelse(gdb);
591 		}
592 
593 		/* Zero out all of the reserved backup group descriptor
594 		 * table blocks
595 		 */
596 		if (ext4_bg_has_super(sb, group)) {
597 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
598 					reserved_gdb, GFP_NOFS);
599 			if (err)
600 				goto out;
601 		}
602 
603 handle_itb:
604 		/* Initialize group tables of the grop @group */
605 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
606 			goto handle_bb;
607 
608 		/* Zero out all of the inode table blocks */
609 		block = group_data[i].inode_table;
610 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
611 			   block, sbi->s_itb_per_group);
612 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
613 				       GFP_NOFS);
614 		if (err)
615 			goto out;
616 
617 handle_bb:
618 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
619 			goto handle_ib;
620 
621 		/* Initialize block bitmap of the @group */
622 		block = group_data[i].block_bitmap;
623 		err = ext4_resize_ensure_credits_batch(handle, 1);
624 		if (err < 0)
625 			goto out;
626 
627 		bh = bclean(handle, sb, block);
628 		if (IS_ERR(bh)) {
629 			err = PTR_ERR(bh);
630 			goto out;
631 		}
632 		overhead = ext4_group_overhead_blocks(sb, group);
633 		if (overhead != 0) {
634 			ext4_debug("mark backup superblock %#04llx (+0)\n",
635 				   start);
636 			mb_set_bits(bh->b_data, 0,
637 				      EXT4_NUM_B2C(sbi, overhead));
638 		}
639 		ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
640 				     sb->s_blocksize * 8, bh->b_data);
641 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
642 		brelse(bh);
643 		if (err)
644 			goto out;
645 
646 handle_ib:
647 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
648 			continue;
649 
650 		/* Initialize inode bitmap of the @group */
651 		block = group_data[i].inode_bitmap;
652 		err = ext4_resize_ensure_credits_batch(handle, 1);
653 		if (err < 0)
654 			goto out;
655 		/* Mark unused entries in inode bitmap used */
656 		bh = bclean(handle, sb, block);
657 		if (IS_ERR(bh)) {
658 			err = PTR_ERR(bh);
659 			goto out;
660 		}
661 
662 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
663 				     sb->s_blocksize * 8, bh->b_data);
664 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
665 		brelse(bh);
666 		if (err)
667 			goto out;
668 	}
669 
670 	/* Mark group tables in block bitmap */
671 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
672 		count = group_table_count[j];
673 		start = (&group_data[0].block_bitmap)[j];
674 		block = start;
675 		for (i = 1; i < flex_gd->count; i++) {
676 			block += group_table_count[j];
677 			if (block == (&group_data[i].block_bitmap)[j]) {
678 				count += group_table_count[j];
679 				continue;
680 			}
681 			err = set_flexbg_block_bitmap(sb, handle,
682 						      flex_gd,
683 						      EXT4_B2C(sbi, start),
684 						      EXT4_B2C(sbi,
685 							       start + count
686 							       - 1));
687 			if (err)
688 				goto out;
689 			count = group_table_count[j];
690 			start = (&group_data[i].block_bitmap)[j];
691 			block = start;
692 		}
693 
694 		if (count) {
695 			err = set_flexbg_block_bitmap(sb, handle,
696 						      flex_gd,
697 						      EXT4_B2C(sbi, start),
698 						      EXT4_B2C(sbi,
699 							       start + count
700 							       - 1));
701 			if (err)
702 				goto out;
703 		}
704 	}
705 
706 out:
707 	err2 = ext4_journal_stop(handle);
708 	if (err2 && !err)
709 		err = err2;
710 
711 	return err;
712 }
713 
714 /*
715  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
716  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
717  * calling this for the first time.  In a sparse filesystem it will be the
718  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
719  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
720  */
721 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
722 			       unsigned int *five, unsigned int *seven)
723 {
724 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
725 	unsigned int *min = three;
726 	int mult = 3;
727 	unsigned int ret;
728 
729 	if (ext4_has_feature_sparse_super2(sb)) {
730 		do {
731 			if (*min > 2)
732 				return UINT_MAX;
733 			ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
734 			*min += 1;
735 		} while (!ret);
736 		return ret;
737 	}
738 
739 	if (!ext4_has_feature_sparse_super(sb)) {
740 		ret = *min;
741 		*min += 1;
742 		return ret;
743 	}
744 
745 	if (*five < *min) {
746 		min = five;
747 		mult = 5;
748 	}
749 	if (*seven < *min) {
750 		min = seven;
751 		mult = 7;
752 	}
753 
754 	ret = *min;
755 	*min *= mult;
756 
757 	return ret;
758 }
759 
760 /*
761  * Check that all of the backup GDT blocks are held in the primary GDT block.
762  * It is assumed that they are stored in group order.  Returns the number of
763  * groups in current filesystem that have BACKUPS, or -ve error code.
764  */
765 static int verify_reserved_gdb(struct super_block *sb,
766 			       ext4_group_t end,
767 			       struct buffer_head *primary)
768 {
769 	const ext4_fsblk_t blk = primary->b_blocknr;
770 	unsigned three = 1;
771 	unsigned five = 5;
772 	unsigned seven = 7;
773 	unsigned grp;
774 	__le32 *p = (__le32 *)primary->b_data;
775 	int gdbackups = 0;
776 
777 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
778 		if (le32_to_cpu(*p++) !=
779 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
780 			ext4_warning(sb, "reserved GDT %llu"
781 				     " missing grp %d (%llu)",
782 				     blk, grp,
783 				     grp *
784 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
785 				     blk);
786 			return -EINVAL;
787 		}
788 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
789 			return -EFBIG;
790 	}
791 
792 	return gdbackups;
793 }
794 
795 /*
796  * Called when we need to bring a reserved group descriptor table block into
797  * use from the resize inode.  The primary copy of the new GDT block currently
798  * is an indirect block (under the double indirect block in the resize inode).
799  * The new backup GDT blocks will be stored as leaf blocks in this indirect
800  * block, in group order.  Even though we know all the block numbers we need,
801  * we check to ensure that the resize inode has actually reserved these blocks.
802  *
803  * Don't need to update the block bitmaps because the blocks are still in use.
804  *
805  * We get all of the error cases out of the way, so that we are sure to not
806  * fail once we start modifying the data on disk, because JBD has no rollback.
807  */
808 static int add_new_gdb(handle_t *handle, struct inode *inode,
809 		       ext4_group_t group)
810 {
811 	struct super_block *sb = inode->i_sb;
812 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
813 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
814 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
815 	struct buffer_head **o_group_desc, **n_group_desc = NULL;
816 	struct buffer_head *dind = NULL;
817 	struct buffer_head *gdb_bh = NULL;
818 	int gdbackups;
819 	struct ext4_iloc iloc = { .bh = NULL };
820 	__le32 *data;
821 	int err;
822 
823 	if (test_opt(sb, DEBUG))
824 		printk(KERN_DEBUG
825 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
826 		       gdb_num);
827 
828 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
829 	if (IS_ERR(gdb_bh))
830 		return PTR_ERR(gdb_bh);
831 
832 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
833 	if (gdbackups < 0) {
834 		err = gdbackups;
835 		goto errout;
836 	}
837 
838 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
839 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
840 	if (IS_ERR(dind)) {
841 		err = PTR_ERR(dind);
842 		dind = NULL;
843 		goto errout;
844 	}
845 
846 	data = (__le32 *)dind->b_data;
847 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
848 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
849 			     group, gdblock);
850 		err = -EINVAL;
851 		goto errout;
852 	}
853 
854 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
855 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
856 					    EXT4_JTR_NONE);
857 	if (unlikely(err))
858 		goto errout;
859 
860 	BUFFER_TRACE(gdb_bh, "get_write_access");
861 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
862 	if (unlikely(err))
863 		goto errout;
864 
865 	BUFFER_TRACE(dind, "get_write_access");
866 	err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
867 	if (unlikely(err)) {
868 		ext4_std_error(sb, err);
869 		goto errout;
870 	}
871 
872 	/* ext4_reserve_inode_write() gets a reference on the iloc */
873 	err = ext4_reserve_inode_write(handle, inode, &iloc);
874 	if (unlikely(err))
875 		goto errout;
876 
877 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
878 				GFP_KERNEL);
879 	if (!n_group_desc) {
880 		err = -ENOMEM;
881 		ext4_warning(sb, "not enough memory for %lu groups",
882 			     gdb_num + 1);
883 		goto errout;
884 	}
885 
886 	/*
887 	 * Finally, we have all of the possible failures behind us...
888 	 *
889 	 * Remove new GDT block from inode double-indirect block and clear out
890 	 * the new GDT block for use (which also "frees" the backup GDT blocks
891 	 * from the reserved inode).  We don't need to change the bitmaps for
892 	 * these blocks, because they are marked as in-use from being in the
893 	 * reserved inode, and will become GDT blocks (primary and backup).
894 	 */
895 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
896 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
897 	if (unlikely(err)) {
898 		ext4_std_error(sb, err);
899 		goto errout;
900 	}
901 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
902 			   (9 - EXT4_SB(sb)->s_cluster_bits);
903 	ext4_mark_iloc_dirty(handle, inode, &iloc);
904 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
905 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
906 	if (unlikely(err)) {
907 		ext4_std_error(sb, err);
908 		iloc.bh = NULL;
909 		goto errout;
910 	}
911 	brelse(dind);
912 
913 	rcu_read_lock();
914 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
915 	memcpy(n_group_desc, o_group_desc,
916 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
917 	rcu_read_unlock();
918 	n_group_desc[gdb_num] = gdb_bh;
919 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
920 	EXT4_SB(sb)->s_gdb_count++;
921 	ext4_kvfree_array_rcu(o_group_desc);
922 
923 	lock_buffer(EXT4_SB(sb)->s_sbh);
924 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
925 	ext4_superblock_csum_set(sb);
926 	unlock_buffer(EXT4_SB(sb)->s_sbh);
927 	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
928 	if (err)
929 		ext4_std_error(sb, err);
930 	return err;
931 errout:
932 	kvfree(n_group_desc);
933 	brelse(iloc.bh);
934 	brelse(dind);
935 	brelse(gdb_bh);
936 
937 	ext4_debug("leaving with error %d\n", err);
938 	return err;
939 }
940 
941 /*
942  * add_new_gdb_meta_bg is the sister of add_new_gdb.
943  */
944 static int add_new_gdb_meta_bg(struct super_block *sb,
945 			       handle_t *handle, ext4_group_t group) {
946 	ext4_fsblk_t gdblock;
947 	struct buffer_head *gdb_bh;
948 	struct buffer_head **o_group_desc, **n_group_desc;
949 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
950 	int err;
951 
952 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
953 		   ext4_bg_has_super(sb, group);
954 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
955 	if (IS_ERR(gdb_bh))
956 		return PTR_ERR(gdb_bh);
957 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
958 				GFP_KERNEL);
959 	if (!n_group_desc) {
960 		brelse(gdb_bh);
961 		err = -ENOMEM;
962 		ext4_warning(sb, "not enough memory for %lu groups",
963 			     gdb_num + 1);
964 		return err;
965 	}
966 
967 	rcu_read_lock();
968 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
969 	memcpy(n_group_desc, o_group_desc,
970 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
971 	rcu_read_unlock();
972 	n_group_desc[gdb_num] = gdb_bh;
973 
974 	BUFFER_TRACE(gdb_bh, "get_write_access");
975 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
976 	if (err) {
977 		kvfree(n_group_desc);
978 		brelse(gdb_bh);
979 		return err;
980 	}
981 
982 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
983 	EXT4_SB(sb)->s_gdb_count++;
984 	ext4_kvfree_array_rcu(o_group_desc);
985 	return err;
986 }
987 
988 /*
989  * Called when we are adding a new group which has a backup copy of each of
990  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
991  * We need to add these reserved backup GDT blocks to the resize inode, so
992  * that they are kept for future resizing and not allocated to files.
993  *
994  * Each reserved backup GDT block will go into a different indirect block.
995  * The indirect blocks are actually the primary reserved GDT blocks,
996  * so we know in advance what their block numbers are.  We only get the
997  * double-indirect block to verify it is pointing to the primary reserved
998  * GDT blocks so we don't overwrite a data block by accident.  The reserved
999  * backup GDT blocks are stored in their reserved primary GDT block.
1000  */
1001 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1002 			      ext4_group_t group)
1003 {
1004 	struct super_block *sb = inode->i_sb;
1005 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1006 	int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1007 	struct buffer_head **primary;
1008 	struct buffer_head *dind;
1009 	struct ext4_iloc iloc;
1010 	ext4_fsblk_t blk;
1011 	__le32 *data, *end;
1012 	int gdbackups = 0;
1013 	int res, i;
1014 	int err;
1015 
1016 	primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1017 	if (!primary)
1018 		return -ENOMEM;
1019 
1020 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1021 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1022 	if (IS_ERR(dind)) {
1023 		err = PTR_ERR(dind);
1024 		dind = NULL;
1025 		goto exit_free;
1026 	}
1027 
1028 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1029 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1030 					 EXT4_ADDR_PER_BLOCK(sb));
1031 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1032 
1033 	/* Get each reserved primary GDT block and verify it holds backups */
1034 	for (res = 0; res < reserved_gdb; res++, blk++) {
1035 		if (le32_to_cpu(*data) != blk) {
1036 			ext4_warning(sb, "reserved block %llu"
1037 				     " not at offset %ld",
1038 				     blk,
1039 				     (long)(data - (__le32 *)dind->b_data));
1040 			err = -EINVAL;
1041 			goto exit_bh;
1042 		}
1043 		primary[res] = ext4_sb_bread(sb, blk, 0);
1044 		if (IS_ERR(primary[res])) {
1045 			err = PTR_ERR(primary[res]);
1046 			primary[res] = NULL;
1047 			goto exit_bh;
1048 		}
1049 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1050 		if (gdbackups < 0) {
1051 			brelse(primary[res]);
1052 			err = gdbackups;
1053 			goto exit_bh;
1054 		}
1055 		if (++data >= end)
1056 			data = (__le32 *)dind->b_data;
1057 	}
1058 
1059 	for (i = 0; i < reserved_gdb; i++) {
1060 		BUFFER_TRACE(primary[i], "get_write_access");
1061 		if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1062 							 EXT4_JTR_NONE)))
1063 			goto exit_bh;
1064 	}
1065 
1066 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1067 		goto exit_bh;
1068 
1069 	/*
1070 	 * Finally we can add each of the reserved backup GDT blocks from
1071 	 * the new group to its reserved primary GDT block.
1072 	 */
1073 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1074 	for (i = 0; i < reserved_gdb; i++) {
1075 		int err2;
1076 		data = (__le32 *)primary[i]->b_data;
1077 		/* printk("reserving backup %lu[%u] = %lu\n",
1078 		       primary[i]->b_blocknr, gdbackups,
1079 		       blk + primary[i]->b_blocknr); */
1080 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1081 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1082 		if (!err)
1083 			err = err2;
1084 	}
1085 
1086 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1087 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1088 
1089 exit_bh:
1090 	while (--res >= 0)
1091 		brelse(primary[res]);
1092 	brelse(dind);
1093 
1094 exit_free:
1095 	kfree(primary);
1096 
1097 	return err;
1098 }
1099 
1100 /*
1101  * Update the backup copies of the ext4 metadata.  These don't need to be part
1102  * of the main resize transaction, because e2fsck will re-write them if there
1103  * is a problem (basically only OOM will cause a problem).  However, we
1104  * _should_ update the backups if possible, in case the primary gets trashed
1105  * for some reason and we need to run e2fsck from a backup superblock.  The
1106  * important part is that the new block and inode counts are in the backup
1107  * superblocks, and the location of the new group metadata in the GDT backups.
1108  *
1109  * We do not need take the s_resize_lock for this, because these
1110  * blocks are not otherwise touched by the filesystem code when it is
1111  * mounted.  We don't need to worry about last changing from
1112  * sbi->s_groups_count, because the worst that can happen is that we
1113  * do not copy the full number of backups at this time.  The resize
1114  * which changed s_groups_count will backup again.
1115  */
1116 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1117 			   int size, int meta_bg)
1118 {
1119 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1120 	ext4_group_t last;
1121 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1122 	unsigned three = 1;
1123 	unsigned five = 5;
1124 	unsigned seven = 7;
1125 	ext4_group_t group = 0;
1126 	int rest = sb->s_blocksize - size;
1127 	handle_t *handle;
1128 	int err = 0, err2;
1129 
1130 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1131 	if (IS_ERR(handle)) {
1132 		group = 1;
1133 		err = PTR_ERR(handle);
1134 		goto exit_err;
1135 	}
1136 
1137 	if (meta_bg == 0) {
1138 		group = ext4_list_backups(sb, &three, &five, &seven);
1139 		last = sbi->s_groups_count;
1140 	} else {
1141 		group = ext4_get_group_number(sb, blk_off) + 1;
1142 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1143 	}
1144 
1145 	while (group < sbi->s_groups_count) {
1146 		struct buffer_head *bh;
1147 		ext4_fsblk_t backup_block;
1148 
1149 		/* Out of journal space, and can't get more - abort - so sad */
1150 		err = ext4_resize_ensure_credits_batch(handle, 1);
1151 		if (err < 0)
1152 			break;
1153 
1154 		if (meta_bg == 0)
1155 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1156 		else
1157 			backup_block = (ext4_group_first_block_no(sb, group) +
1158 					ext4_bg_has_super(sb, group));
1159 
1160 		bh = sb_getblk(sb, backup_block);
1161 		if (unlikely(!bh)) {
1162 			err = -ENOMEM;
1163 			break;
1164 		}
1165 		ext4_debug("update metadata backup %llu(+%llu)\n",
1166 			   backup_block, backup_block -
1167 			   ext4_group_first_block_no(sb, group));
1168 		BUFFER_TRACE(bh, "get_write_access");
1169 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
1170 							 EXT4_JTR_NONE)))
1171 			break;
1172 		lock_buffer(bh);
1173 		memcpy(bh->b_data, data, size);
1174 		if (rest)
1175 			memset(bh->b_data + size, 0, rest);
1176 		set_buffer_uptodate(bh);
1177 		unlock_buffer(bh);
1178 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1179 		if (unlikely(err))
1180 			ext4_std_error(sb, err);
1181 		brelse(bh);
1182 
1183 		if (meta_bg == 0)
1184 			group = ext4_list_backups(sb, &three, &five, &seven);
1185 		else if (group == last)
1186 			break;
1187 		else
1188 			group = last;
1189 	}
1190 	if ((err2 = ext4_journal_stop(handle)) && !err)
1191 		err = err2;
1192 
1193 	/*
1194 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1195 	 * late to revert the resize, we shouldn't fail just because of
1196 	 * the backup copies (they are only needed in case of corruption).
1197 	 *
1198 	 * However, if we got here we have a journal problem too, so we
1199 	 * can't really start a transaction to mark the superblock.
1200 	 * Chicken out and just set the flag on the hope it will be written
1201 	 * to disk, and if not - we will simply wait until next fsck.
1202 	 */
1203 exit_err:
1204 	if (err) {
1205 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1206 			     "forcing fsck on next reboot", group, err);
1207 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1208 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1209 		mark_buffer_dirty(sbi->s_sbh);
1210 	}
1211 }
1212 
1213 /*
1214  * ext4_add_new_descs() adds @count group descriptor of groups
1215  * starting at @group
1216  *
1217  * @handle: journal handle
1218  * @sb: super block
1219  * @group: the group no. of the first group desc to be added
1220  * @resize_inode: the resize inode
1221  * @count: number of group descriptors to be added
1222  */
1223 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1224 			      ext4_group_t group, struct inode *resize_inode,
1225 			      ext4_group_t count)
1226 {
1227 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1228 	struct ext4_super_block *es = sbi->s_es;
1229 	struct buffer_head *gdb_bh;
1230 	int i, gdb_off, gdb_num, err = 0;
1231 	int meta_bg;
1232 
1233 	meta_bg = ext4_has_feature_meta_bg(sb);
1234 	for (i = 0; i < count; i++, group++) {
1235 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1236 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1237 
1238 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1239 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1240 
1241 		/*
1242 		 * We will only either add reserved group blocks to a backup group
1243 		 * or remove reserved blocks for the first group in a new group block.
1244 		 * Doing both would be mean more complex code, and sane people don't
1245 		 * use non-sparse filesystems anymore.  This is already checked above.
1246 		 */
1247 		if (gdb_off) {
1248 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1249 						     gdb_num);
1250 			BUFFER_TRACE(gdb_bh, "get_write_access");
1251 			err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1252 							    EXT4_JTR_NONE);
1253 
1254 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1255 				err = reserve_backup_gdb(handle, resize_inode, group);
1256 		} else if (meta_bg != 0) {
1257 			err = add_new_gdb_meta_bg(sb, handle, group);
1258 		} else {
1259 			err = add_new_gdb(handle, resize_inode, group);
1260 		}
1261 		if (err)
1262 			break;
1263 	}
1264 	return err;
1265 }
1266 
1267 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1268 {
1269 	struct buffer_head *bh = sb_getblk(sb, block);
1270 	if (unlikely(!bh))
1271 		return NULL;
1272 	if (!bh_uptodate_or_lock(bh)) {
1273 		if (ext4_read_bh(bh, 0, NULL) < 0) {
1274 			brelse(bh);
1275 			return NULL;
1276 		}
1277 	}
1278 
1279 	return bh;
1280 }
1281 
1282 static int ext4_set_bitmap_checksums(struct super_block *sb,
1283 				     ext4_group_t group,
1284 				     struct ext4_group_desc *gdp,
1285 				     struct ext4_new_group_data *group_data)
1286 {
1287 	struct buffer_head *bh;
1288 
1289 	if (!ext4_has_metadata_csum(sb))
1290 		return 0;
1291 
1292 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1293 	if (!bh)
1294 		return -EIO;
1295 	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1296 				   EXT4_INODES_PER_GROUP(sb) / 8);
1297 	brelse(bh);
1298 
1299 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1300 	if (!bh)
1301 		return -EIO;
1302 	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1303 	brelse(bh);
1304 
1305 	return 0;
1306 }
1307 
1308 /*
1309  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1310  */
1311 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1312 				struct ext4_new_flex_group_data *flex_gd)
1313 {
1314 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1315 	struct ext4_group_desc		*gdp;
1316 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1317 	struct buffer_head		*gdb_bh;
1318 	ext4_group_t			group;
1319 	__u16				*bg_flags = flex_gd->bg_flags;
1320 	int				i, gdb_off, gdb_num, err = 0;
1321 
1322 
1323 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1324 		group = group_data->group;
1325 
1326 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1327 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1328 
1329 		/*
1330 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1331 		 */
1332 		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1333 		/* Update group descriptor block for new group */
1334 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1335 						 gdb_off * EXT4_DESC_SIZE(sb));
1336 
1337 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1338 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1339 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1340 		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1341 		if (err) {
1342 			ext4_std_error(sb, err);
1343 			break;
1344 		}
1345 
1346 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1347 		ext4_free_group_clusters_set(sb, gdp,
1348 					     group_data->free_clusters_count);
1349 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1350 		if (ext4_has_group_desc_csum(sb))
1351 			ext4_itable_unused_set(sb, gdp,
1352 					       EXT4_INODES_PER_GROUP(sb));
1353 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1354 		ext4_group_desc_csum_set(sb, group, gdp);
1355 
1356 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1357 		if (unlikely(err)) {
1358 			ext4_std_error(sb, err);
1359 			break;
1360 		}
1361 
1362 		/*
1363 		 * We can allocate memory for mb_alloc based on the new group
1364 		 * descriptor
1365 		 */
1366 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1367 		if (err)
1368 			break;
1369 	}
1370 	return err;
1371 }
1372 
1373 /*
1374  * ext4_update_super() updates the super block so that the newly added
1375  * groups can be seen by the filesystem.
1376  *
1377  * @sb: super block
1378  * @flex_gd: new added groups
1379  */
1380 static void ext4_update_super(struct super_block *sb,
1381 			     struct ext4_new_flex_group_data *flex_gd)
1382 {
1383 	ext4_fsblk_t blocks_count = 0;
1384 	ext4_fsblk_t free_blocks = 0;
1385 	ext4_fsblk_t reserved_blocks = 0;
1386 	struct ext4_new_group_data *group_data = flex_gd->groups;
1387 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1388 	struct ext4_super_block *es = sbi->s_es;
1389 	int i;
1390 
1391 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1392 	/*
1393 	 * Make the new blocks and inodes valid next.  We do this before
1394 	 * increasing the group count so that once the group is enabled,
1395 	 * all of its blocks and inodes are already valid.
1396 	 *
1397 	 * We always allocate group-by-group, then block-by-block or
1398 	 * inode-by-inode within a group, so enabling these
1399 	 * blocks/inodes before the group is live won't actually let us
1400 	 * allocate the new space yet.
1401 	 */
1402 	for (i = 0; i < flex_gd->count; i++) {
1403 		blocks_count += group_data[i].blocks_count;
1404 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1405 	}
1406 
1407 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1408 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1409 	reserved_blocks *= blocks_count;
1410 	do_div(reserved_blocks, 100);
1411 
1412 	lock_buffer(sbi->s_sbh);
1413 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1414 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1415 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1416 		     flex_gd->count);
1417 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1418 		     flex_gd->count);
1419 
1420 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1421 	/*
1422 	 * We need to protect s_groups_count against other CPUs seeing
1423 	 * inconsistent state in the superblock.
1424 	 *
1425 	 * The precise rules we use are:
1426 	 *
1427 	 * * Writers must perform a smp_wmb() after updating all
1428 	 *   dependent data and before modifying the groups count
1429 	 *
1430 	 * * Readers must perform an smp_rmb() after reading the groups
1431 	 *   count and before reading any dependent data.
1432 	 *
1433 	 * NB. These rules can be relaxed when checking the group count
1434 	 * while freeing data, as we can only allocate from a block
1435 	 * group after serialising against the group count, and we can
1436 	 * only then free after serialising in turn against that
1437 	 * allocation.
1438 	 */
1439 	smp_wmb();
1440 
1441 	/* Update the global fs size fields */
1442 	sbi->s_groups_count += flex_gd->count;
1443 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1444 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1445 
1446 	/* Update the reserved block counts only once the new group is
1447 	 * active. */
1448 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1449 				reserved_blocks);
1450 	ext4_superblock_csum_set(sb);
1451 	unlock_buffer(sbi->s_sbh);
1452 
1453 	/* Update the free space counts */
1454 	percpu_counter_add(&sbi->s_freeclusters_counter,
1455 			   EXT4_NUM_B2C(sbi, free_blocks));
1456 	percpu_counter_add(&sbi->s_freeinodes_counter,
1457 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1458 
1459 	ext4_debug("free blocks count %llu",
1460 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1461 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1462 		ext4_group_t flex_group;
1463 		struct flex_groups *fg;
1464 
1465 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1466 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1467 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1468 			     &fg->free_clusters);
1469 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1470 			   &fg->free_inodes);
1471 	}
1472 
1473 	/*
1474 	 * Update the fs overhead information
1475 	 */
1476 	ext4_calculate_overhead(sb);
1477 
1478 	if (test_opt(sb, DEBUG))
1479 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1480 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1481 		       blocks_count, free_blocks, reserved_blocks);
1482 }
1483 
1484 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1485  * _before_ we start modifying the filesystem, because we cannot abort the
1486  * transaction and not have it write the data to disk.
1487  */
1488 static int ext4_flex_group_add(struct super_block *sb,
1489 			       struct inode *resize_inode,
1490 			       struct ext4_new_flex_group_data *flex_gd)
1491 {
1492 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1493 	struct ext4_super_block *es = sbi->s_es;
1494 	ext4_fsblk_t o_blocks_count;
1495 	ext4_grpblk_t last;
1496 	ext4_group_t group;
1497 	handle_t *handle;
1498 	unsigned reserved_gdb;
1499 	int err = 0, err2 = 0, credit;
1500 
1501 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1502 
1503 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1504 	o_blocks_count = ext4_blocks_count(es);
1505 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1506 	BUG_ON(last);
1507 
1508 	err = setup_new_flex_group_blocks(sb, flex_gd);
1509 	if (err)
1510 		goto exit;
1511 	/*
1512 	 * We will always be modifying at least the superblock and  GDT
1513 	 * blocks.  If we are adding a group past the last current GDT block,
1514 	 * we will also modify the inode and the dindirect block.  If we
1515 	 * are adding a group with superblock/GDT backups  we will also
1516 	 * modify each of the reserved GDT dindirect blocks.
1517 	 */
1518 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1519 	/* GDT blocks */
1520 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1521 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1522 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1523 	if (IS_ERR(handle)) {
1524 		err = PTR_ERR(handle);
1525 		goto exit;
1526 	}
1527 
1528 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1529 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1530 					    EXT4_JTR_NONE);
1531 	if (err)
1532 		goto exit_journal;
1533 
1534 	group = flex_gd->groups[0].group;
1535 	BUG_ON(group != sbi->s_groups_count);
1536 	err = ext4_add_new_descs(handle, sb, group,
1537 				resize_inode, flex_gd->count);
1538 	if (err)
1539 		goto exit_journal;
1540 
1541 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1542 	if (err)
1543 		goto exit_journal;
1544 
1545 	ext4_update_super(sb, flex_gd);
1546 
1547 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1548 
1549 exit_journal:
1550 	err2 = ext4_journal_stop(handle);
1551 	if (!err)
1552 		err = err2;
1553 
1554 	if (!err) {
1555 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1556 		int gdb_num_end = ((group + flex_gd->count - 1) /
1557 				   EXT4_DESC_PER_BLOCK(sb));
1558 		int meta_bg = ext4_has_feature_meta_bg(sb);
1559 		sector_t old_gdb = 0;
1560 
1561 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1562 			       sizeof(struct ext4_super_block), 0);
1563 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1564 			struct buffer_head *gdb_bh;
1565 
1566 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1567 						     gdb_num);
1568 			if (old_gdb == gdb_bh->b_blocknr)
1569 				continue;
1570 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1571 				       gdb_bh->b_size, meta_bg);
1572 			old_gdb = gdb_bh->b_blocknr;
1573 		}
1574 	}
1575 exit:
1576 	return err;
1577 }
1578 
1579 static int ext4_setup_next_flex_gd(struct super_block *sb,
1580 				    struct ext4_new_flex_group_data *flex_gd,
1581 				    ext4_fsblk_t n_blocks_count,
1582 				    unsigned long flexbg_size)
1583 {
1584 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1585 	struct ext4_super_block *es = sbi->s_es;
1586 	struct ext4_new_group_data *group_data = flex_gd->groups;
1587 	ext4_fsblk_t o_blocks_count;
1588 	ext4_group_t n_group;
1589 	ext4_group_t group;
1590 	ext4_group_t last_group;
1591 	ext4_grpblk_t last;
1592 	ext4_grpblk_t clusters_per_group;
1593 	unsigned long i;
1594 
1595 	clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1596 
1597 	o_blocks_count = ext4_blocks_count(es);
1598 
1599 	if (o_blocks_count == n_blocks_count)
1600 		return 0;
1601 
1602 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1603 	BUG_ON(last);
1604 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1605 
1606 	last_group = group | (flexbg_size - 1);
1607 	if (last_group > n_group)
1608 		last_group = n_group;
1609 
1610 	flex_gd->count = last_group - group + 1;
1611 
1612 	for (i = 0; i < flex_gd->count; i++) {
1613 		int overhead;
1614 
1615 		group_data[i].group = group + i;
1616 		group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1617 		overhead = ext4_group_overhead_blocks(sb, group + i);
1618 		group_data[i].mdata_blocks = overhead;
1619 		group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1620 		if (ext4_has_group_desc_csum(sb)) {
1621 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1622 					       EXT4_BG_INODE_UNINIT;
1623 			if (!test_opt(sb, INIT_INODE_TABLE))
1624 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1625 		} else
1626 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1627 	}
1628 
1629 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1630 		/* We need to initialize block bitmap of last group. */
1631 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1632 
1633 	if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1634 		group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1635 		group_data[i - 1].free_clusters_count -= clusters_per_group -
1636 						       last - 1;
1637 	}
1638 
1639 	return 1;
1640 }
1641 
1642 /* Add group descriptor data to an existing or new group descriptor block.
1643  * Ensure we handle all possible error conditions _before_ we start modifying
1644  * the filesystem, because we cannot abort the transaction and not have it
1645  * write the data to disk.
1646  *
1647  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1648  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1649  *
1650  * We only need to hold the superblock lock while we are actually adding
1651  * in the new group's counts to the superblock.  Prior to that we have
1652  * not really "added" the group at all.  We re-check that we are still
1653  * adding in the last group in case things have changed since verifying.
1654  */
1655 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1656 {
1657 	struct ext4_new_flex_group_data flex_gd;
1658 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1659 	struct ext4_super_block *es = sbi->s_es;
1660 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1661 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1662 	struct inode *inode = NULL;
1663 	int gdb_off;
1664 	int err;
1665 	__u16 bg_flags = 0;
1666 
1667 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1668 
1669 	if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1670 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1671 		return -EPERM;
1672 	}
1673 
1674 	if (ext4_blocks_count(es) + input->blocks_count <
1675 	    ext4_blocks_count(es)) {
1676 		ext4_warning(sb, "blocks_count overflow");
1677 		return -EINVAL;
1678 	}
1679 
1680 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1681 	    le32_to_cpu(es->s_inodes_count)) {
1682 		ext4_warning(sb, "inodes_count overflow");
1683 		return -EINVAL;
1684 	}
1685 
1686 	if (reserved_gdb || gdb_off == 0) {
1687 		if (!ext4_has_feature_resize_inode(sb) ||
1688 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1689 			ext4_warning(sb,
1690 				     "No reserved GDT blocks, can't resize");
1691 			return -EPERM;
1692 		}
1693 		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1694 		if (IS_ERR(inode)) {
1695 			ext4_warning(sb, "Error opening resize inode");
1696 			return PTR_ERR(inode);
1697 		}
1698 	}
1699 
1700 
1701 	err = verify_group_input(sb, input);
1702 	if (err)
1703 		goto out;
1704 
1705 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1706 	if (err)
1707 		goto out;
1708 
1709 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1710 	if (err)
1711 		goto out;
1712 
1713 	flex_gd.count = 1;
1714 	flex_gd.groups = input;
1715 	flex_gd.bg_flags = &bg_flags;
1716 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1717 out:
1718 	iput(inode);
1719 	return err;
1720 } /* ext4_group_add */
1721 
1722 /*
1723  * extend a group without checking assuming that checking has been done.
1724  */
1725 static int ext4_group_extend_no_check(struct super_block *sb,
1726 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1727 {
1728 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1729 	handle_t *handle;
1730 	int err = 0, err2;
1731 
1732 	/* We will update the superblock, one block bitmap, and
1733 	 * one group descriptor via ext4_group_add_blocks().
1734 	 */
1735 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1736 	if (IS_ERR(handle)) {
1737 		err = PTR_ERR(handle);
1738 		ext4_warning(sb, "error %d on journal start", err);
1739 		return err;
1740 	}
1741 
1742 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1743 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1744 					    EXT4_JTR_NONE);
1745 	if (err) {
1746 		ext4_warning(sb, "error %d on journal write access", err);
1747 		goto errout;
1748 	}
1749 
1750 	lock_buffer(EXT4_SB(sb)->s_sbh);
1751 	ext4_blocks_count_set(es, o_blocks_count + add);
1752 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1753 	ext4_superblock_csum_set(sb);
1754 	unlock_buffer(EXT4_SB(sb)->s_sbh);
1755 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1756 		   o_blocks_count + add);
1757 	/* We add the blocks to the bitmap and set the group need init bit */
1758 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1759 	if (err)
1760 		goto errout;
1761 	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1762 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1763 		   o_blocks_count + add);
1764 errout:
1765 	err2 = ext4_journal_stop(handle);
1766 	if (err2 && !err)
1767 		err = err2;
1768 
1769 	if (!err) {
1770 		if (test_opt(sb, DEBUG))
1771 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1772 			       "blocks\n", ext4_blocks_count(es));
1773 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1774 			       (char *)es, sizeof(struct ext4_super_block), 0);
1775 	}
1776 	return err;
1777 }
1778 
1779 /*
1780  * Extend the filesystem to the new number of blocks specified.  This entry
1781  * point is only used to extend the current filesystem to the end of the last
1782  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1783  * for emergencies (because it has no dependencies on reserved blocks).
1784  *
1785  * If we _really_ wanted, we could use default values to call ext4_group_add()
1786  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1787  * GDT blocks are reserved to grow to the desired size.
1788  */
1789 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1790 		      ext4_fsblk_t n_blocks_count)
1791 {
1792 	ext4_fsblk_t o_blocks_count;
1793 	ext4_grpblk_t last;
1794 	ext4_grpblk_t add;
1795 	struct buffer_head *bh;
1796 	int err;
1797 	ext4_group_t group;
1798 
1799 	o_blocks_count = ext4_blocks_count(es);
1800 
1801 	if (test_opt(sb, DEBUG))
1802 		ext4_msg(sb, KERN_DEBUG,
1803 			 "extending last group from %llu to %llu blocks",
1804 			 o_blocks_count, n_blocks_count);
1805 
1806 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1807 		return 0;
1808 
1809 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1810 		ext4_msg(sb, KERN_ERR,
1811 			 "filesystem too large to resize to %llu blocks safely",
1812 			 n_blocks_count);
1813 		return -EINVAL;
1814 	}
1815 
1816 	if (n_blocks_count < o_blocks_count) {
1817 		ext4_warning(sb, "can't shrink FS - resize aborted");
1818 		return -EINVAL;
1819 	}
1820 
1821 	/* Handle the remaining blocks in the last group only. */
1822 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1823 
1824 	if (last == 0) {
1825 		ext4_warning(sb, "need to use ext2online to resize further");
1826 		return -EPERM;
1827 	}
1828 
1829 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1830 
1831 	if (o_blocks_count + add < o_blocks_count) {
1832 		ext4_warning(sb, "blocks_count overflow");
1833 		return -EINVAL;
1834 	}
1835 
1836 	if (o_blocks_count + add > n_blocks_count)
1837 		add = n_blocks_count - o_blocks_count;
1838 
1839 	if (o_blocks_count + add < n_blocks_count)
1840 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1841 			     o_blocks_count + add, add);
1842 
1843 	/* See if the device is actually as big as what was requested */
1844 	bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1845 	if (IS_ERR(bh)) {
1846 		ext4_warning(sb, "can't read last block, resize aborted");
1847 		return -ENOSPC;
1848 	}
1849 	brelse(bh);
1850 
1851 	err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1852 	return err;
1853 } /* ext4_group_extend */
1854 
1855 
1856 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1857 {
1858 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1859 }
1860 
1861 /*
1862  * Release the resize inode and drop the resize_inode feature if there
1863  * are no more reserved gdt blocks, and then convert the file system
1864  * to enable meta_bg
1865  */
1866 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1867 {
1868 	handle_t *handle;
1869 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1870 	struct ext4_super_block *es = sbi->s_es;
1871 	struct ext4_inode_info *ei = EXT4_I(inode);
1872 	ext4_fsblk_t nr;
1873 	int i, ret, err = 0;
1874 	int credits = 1;
1875 
1876 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1877 	if (inode) {
1878 		if (es->s_reserved_gdt_blocks) {
1879 			ext4_error(sb, "Unexpected non-zero "
1880 				   "s_reserved_gdt_blocks");
1881 			return -EPERM;
1882 		}
1883 
1884 		/* Do a quick sanity check of the resize inode */
1885 		if (inode->i_blocks != 1 << (inode->i_blkbits -
1886 					     (9 - sbi->s_cluster_bits)))
1887 			goto invalid_resize_inode;
1888 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1889 			if (i == EXT4_DIND_BLOCK) {
1890 				if (ei->i_data[i])
1891 					continue;
1892 				else
1893 					goto invalid_resize_inode;
1894 			}
1895 			if (ei->i_data[i])
1896 				goto invalid_resize_inode;
1897 		}
1898 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1899 	}
1900 
1901 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1902 	if (IS_ERR(handle))
1903 		return PTR_ERR(handle);
1904 
1905 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1906 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1907 					    EXT4_JTR_NONE);
1908 	if (err)
1909 		goto errout;
1910 
1911 	lock_buffer(sbi->s_sbh);
1912 	ext4_clear_feature_resize_inode(sb);
1913 	ext4_set_feature_meta_bg(sb);
1914 	sbi->s_es->s_first_meta_bg =
1915 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1916 	ext4_superblock_csum_set(sb);
1917 	unlock_buffer(sbi->s_sbh);
1918 
1919 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1920 	if (err) {
1921 		ext4_std_error(sb, err);
1922 		goto errout;
1923 	}
1924 
1925 	if (inode) {
1926 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1927 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1928 				 EXT4_FREE_BLOCKS_METADATA |
1929 				 EXT4_FREE_BLOCKS_FORGET);
1930 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1931 		inode->i_blocks = 0;
1932 
1933 		err = ext4_mark_inode_dirty(handle, inode);
1934 		if (err)
1935 			ext4_std_error(sb, err);
1936 	}
1937 
1938 errout:
1939 	ret = ext4_journal_stop(handle);
1940 	if (!err)
1941 		err = ret;
1942 	return ret;
1943 
1944 invalid_resize_inode:
1945 	ext4_error(sb, "corrupted/inconsistent resize inode");
1946 	return -EINVAL;
1947 }
1948 
1949 /*
1950  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1951  *
1952  * @sb: super block of the fs to be resized
1953  * @n_blocks_count: the number of blocks resides in the resized fs
1954  */
1955 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1956 {
1957 	struct ext4_new_flex_group_data *flex_gd = NULL;
1958 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1959 	struct ext4_super_block *es = sbi->s_es;
1960 	struct buffer_head *bh;
1961 	struct inode *resize_inode = NULL;
1962 	ext4_grpblk_t add, offset;
1963 	unsigned long n_desc_blocks;
1964 	unsigned long o_desc_blocks;
1965 	ext4_group_t o_group;
1966 	ext4_group_t n_group;
1967 	ext4_fsblk_t o_blocks_count;
1968 	ext4_fsblk_t n_blocks_count_retry = 0;
1969 	unsigned long last_update_time = 0;
1970 	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1971 	int meta_bg;
1972 
1973 	/* See if the device is actually as big as what was requested */
1974 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
1975 	if (IS_ERR(bh)) {
1976 		ext4_warning(sb, "can't read last block, resize aborted");
1977 		return -ENOSPC;
1978 	}
1979 	brelse(bh);
1980 
1981 retry:
1982 	o_blocks_count = ext4_blocks_count(es);
1983 
1984 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1985 		 "to %llu blocks", o_blocks_count, n_blocks_count);
1986 
1987 	if (n_blocks_count < o_blocks_count) {
1988 		/* On-line shrinking not supported */
1989 		ext4_warning(sb, "can't shrink FS - resize aborted");
1990 		return -EINVAL;
1991 	}
1992 
1993 	if (n_blocks_count == o_blocks_count)
1994 		/* Nothing need to do */
1995 		return 0;
1996 
1997 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1998 	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1999 		ext4_warning(sb, "resize would cause inodes_count overflow");
2000 		return -EINVAL;
2001 	}
2002 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2003 
2004 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2005 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2006 
2007 	meta_bg = ext4_has_feature_meta_bg(sb);
2008 
2009 	if (ext4_has_feature_resize_inode(sb)) {
2010 		if (meta_bg) {
2011 			ext4_error(sb, "resize_inode and meta_bg enabled "
2012 				   "simultaneously");
2013 			return -EINVAL;
2014 		}
2015 		if (n_desc_blocks > o_desc_blocks +
2016 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
2017 			n_blocks_count_retry = n_blocks_count;
2018 			n_desc_blocks = o_desc_blocks +
2019 				le16_to_cpu(es->s_reserved_gdt_blocks);
2020 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2021 			n_blocks_count = (ext4_fsblk_t)n_group *
2022 				EXT4_BLOCKS_PER_GROUP(sb) +
2023 				le32_to_cpu(es->s_first_data_block);
2024 			n_group--; /* set to last group number */
2025 		}
2026 
2027 		if (!resize_inode)
2028 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2029 						 EXT4_IGET_SPECIAL);
2030 		if (IS_ERR(resize_inode)) {
2031 			ext4_warning(sb, "Error opening resize inode");
2032 			return PTR_ERR(resize_inode);
2033 		}
2034 	}
2035 
2036 	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2037 		err = ext4_convert_meta_bg(sb, resize_inode);
2038 		if (err)
2039 			goto out;
2040 		if (resize_inode) {
2041 			iput(resize_inode);
2042 			resize_inode = NULL;
2043 		}
2044 		if (n_blocks_count_retry) {
2045 			n_blocks_count = n_blocks_count_retry;
2046 			n_blocks_count_retry = 0;
2047 			goto retry;
2048 		}
2049 	}
2050 
2051 	/*
2052 	 * Make sure the last group has enough space so that it's
2053 	 * guaranteed to have enough space for all metadata blocks
2054 	 * that it might need to hold.  (We might not need to store
2055 	 * the inode table blocks in the last block group, but there
2056 	 * will be cases where this might be needed.)
2057 	 */
2058 	if ((ext4_group_first_block_no(sb, n_group) +
2059 	     ext4_group_overhead_blocks(sb, n_group) + 2 +
2060 	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2061 		n_blocks_count = ext4_group_first_block_no(sb, n_group);
2062 		n_group--;
2063 		n_blocks_count_retry = 0;
2064 		if (resize_inode) {
2065 			iput(resize_inode);
2066 			resize_inode = NULL;
2067 		}
2068 		goto retry;
2069 	}
2070 
2071 	/* extend the last group */
2072 	if (n_group == o_group)
2073 		add = n_blocks_count - o_blocks_count;
2074 	else
2075 		add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2076 	if (add > 0) {
2077 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2078 		if (err)
2079 			goto out;
2080 	}
2081 
2082 	if (ext4_blocks_count(es) == n_blocks_count)
2083 		goto out;
2084 
2085 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2086 	if (err)
2087 		goto out;
2088 
2089 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2090 	if (err)
2091 		goto out;
2092 
2093 	flex_gd = alloc_flex_gd(flexbg_size);
2094 	if (flex_gd == NULL) {
2095 		err = -ENOMEM;
2096 		goto out;
2097 	}
2098 
2099 	/* Add flex groups. Note that a regular group is a
2100 	 * flex group with 1 group.
2101 	 */
2102 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2103 					      flexbg_size)) {
2104 		if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2105 			if (last_update_time)
2106 				ext4_msg(sb, KERN_INFO,
2107 					 "resized to %llu blocks",
2108 					 ext4_blocks_count(es));
2109 			last_update_time = jiffies;
2110 		}
2111 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2112 			break;
2113 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2114 		if (unlikely(err))
2115 			break;
2116 	}
2117 
2118 	if (!err && n_blocks_count_retry) {
2119 		n_blocks_count = n_blocks_count_retry;
2120 		n_blocks_count_retry = 0;
2121 		free_flex_gd(flex_gd);
2122 		flex_gd = NULL;
2123 		if (resize_inode) {
2124 			iput(resize_inode);
2125 			resize_inode = NULL;
2126 		}
2127 		goto retry;
2128 	}
2129 
2130 out:
2131 	if (flex_gd)
2132 		free_flex_gd(flex_gd);
2133 	if (resize_inode != NULL)
2134 		iput(resize_inode);
2135 	if (err)
2136 		ext4_warning(sb, "error (%d) occurred during "
2137 			     "file system resize", err);
2138 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2139 		 ext4_blocks_count(es));
2140 	return err;
2141 }
2142