xref: /linux/fs/ext4/resize.c (revision 056f8c437dc33e9e8e64b9344e816d7d46c06c16)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/resize.c
4  *
5  * Support for resizing an ext4 filesystem while it is mounted.
6  *
7  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8  *
9  * This could probably be made into a module, because it is not often in use.
10  */
11 
12 
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/jiffies.h>
16 
17 #include "ext4_jbd2.h"
18 
19 struct ext4_rcu_ptr {
20 	struct rcu_head rcu;
21 	void *ptr;
22 };
23 
24 static void ext4_rcu_ptr_callback(struct rcu_head *head)
25 {
26 	struct ext4_rcu_ptr *ptr;
27 
28 	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
29 	kvfree(ptr->ptr);
30 	kfree(ptr);
31 }
32 
33 void ext4_kvfree_array_rcu(void *to_free)
34 {
35 	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
36 
37 	if (ptr) {
38 		ptr->ptr = to_free;
39 		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
40 		return;
41 	}
42 	synchronize_rcu();
43 	kvfree(to_free);
44 }
45 
46 int ext4_resize_begin(struct super_block *sb)
47 {
48 	struct ext4_sb_info *sbi = EXT4_SB(sb);
49 	int ret = 0;
50 
51 	if (!capable(CAP_SYS_RESOURCE))
52 		return -EPERM;
53 
54 	/*
55 	 * If the reserved GDT blocks is non-zero, the resize_inode feature
56 	 * should always be set.
57 	 */
58 	if (sbi->s_es->s_reserved_gdt_blocks &&
59 	    !ext4_has_feature_resize_inode(sb)) {
60 		ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
61 		return -EFSCORRUPTED;
62 	}
63 
64 	/*
65 	 * If we are not using the primary superblock/GDT copy don't resize,
66          * because the user tools have no way of handling this.  Probably a
67          * bad time to do it anyways.
68          */
69 	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
70 	    le32_to_cpu(sbi->s_es->s_first_data_block)) {
71 		ext4_warning(sb, "won't resize using backup superblock at %llu",
72 			(unsigned long long)sbi->s_sbh->b_blocknr);
73 		return -EPERM;
74 	}
75 
76 	/*
77 	 * We are not allowed to do online-resizing on a filesystem mounted
78 	 * with error, because it can destroy the filesystem easily.
79 	 */
80 	if (sbi->s_mount_state & EXT4_ERROR_FS) {
81 		ext4_warning(sb, "There are errors in the filesystem, "
82 			     "so online resizing is not allowed");
83 		return -EPERM;
84 	}
85 
86 	if (ext4_has_feature_sparse_super2(sb)) {
87 		ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
88 		return -EOPNOTSUPP;
89 	}
90 
91 	if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
92 				  &sbi->s_ext4_flags))
93 		ret = -EBUSY;
94 
95 	return ret;
96 }
97 
98 int ext4_resize_end(struct super_block *sb, bool update_backups)
99 {
100 	clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
101 	smp_mb__after_atomic();
102 	if (update_backups)
103 		return ext4_update_overhead(sb, true);
104 	return 0;
105 }
106 
107 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
108 						ext4_group_t group) {
109 	ext4_grpblk_t overhead;
110 	overhead = ext4_bg_num_gdb(sb, group);
111 	if (ext4_bg_has_super(sb, group))
112 		overhead += 1 +
113 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
114 	return overhead;
115 }
116 
117 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
118 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
119 
120 static int verify_group_input(struct super_block *sb,
121 			      struct ext4_new_group_data *input)
122 {
123 	struct ext4_sb_info *sbi = EXT4_SB(sb);
124 	struct ext4_super_block *es = sbi->s_es;
125 	ext4_fsblk_t start = ext4_blocks_count(es);
126 	ext4_fsblk_t end = start + input->blocks_count;
127 	ext4_group_t group = input->group;
128 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
129 	unsigned overhead;
130 	ext4_fsblk_t metaend;
131 	struct buffer_head *bh = NULL;
132 	ext4_grpblk_t free_blocks_count, offset;
133 	int err = -EINVAL;
134 
135 	if (group != sbi->s_groups_count) {
136 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
137 			     input->group, sbi->s_groups_count);
138 		return -EINVAL;
139 	}
140 
141 	overhead = ext4_group_overhead_blocks(sb, group);
142 	metaend = start + overhead;
143 	free_blocks_count = input->blocks_count - 2 - overhead -
144 			    sbi->s_itb_per_group;
145 	input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count);
146 
147 	if (test_opt(sb, DEBUG))
148 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
149 		       "(%d free, %u reserved)\n",
150 		       ext4_bg_has_super(sb, input->group) ? "normal" :
151 		       "no-super", input->group, input->blocks_count,
152 		       free_blocks_count, input->reserved_blocks);
153 
154 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
155 	if (offset != 0)
156 			ext4_warning(sb, "Last group not full");
157 	else if (input->reserved_blocks > input->blocks_count / 5)
158 		ext4_warning(sb, "Reserved blocks too high (%u)",
159 			     input->reserved_blocks);
160 	else if (free_blocks_count < 0)
161 		ext4_warning(sb, "Bad blocks count %u",
162 			     input->blocks_count);
163 	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
164 		err = PTR_ERR(bh);
165 		bh = NULL;
166 		ext4_warning(sb, "Cannot read last block (%llu)",
167 			     end - 1);
168 	} else if (outside(input->block_bitmap, start, end))
169 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
170 			     (unsigned long long)input->block_bitmap);
171 	else if (outside(input->inode_bitmap, start, end))
172 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
173 			     (unsigned long long)input->inode_bitmap);
174 	else if (outside(input->inode_table, start, end) ||
175 		 outside(itend - 1, start, end))
176 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
177 			     (unsigned long long)input->inode_table, itend - 1);
178 	else if (input->inode_bitmap == input->block_bitmap)
179 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
180 			     (unsigned long long)input->block_bitmap);
181 	else if (inside(input->block_bitmap, input->inode_table, itend))
182 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
183 			     "(%llu-%llu)",
184 			     (unsigned long long)input->block_bitmap,
185 			     (unsigned long long)input->inode_table, itend - 1);
186 	else if (inside(input->inode_bitmap, input->inode_table, itend))
187 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
188 			     "(%llu-%llu)",
189 			     (unsigned long long)input->inode_bitmap,
190 			     (unsigned long long)input->inode_table, itend - 1);
191 	else if (inside(input->block_bitmap, start, metaend))
192 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
193 			     (unsigned long long)input->block_bitmap,
194 			     start, metaend - 1);
195 	else if (inside(input->inode_bitmap, start, metaend))
196 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
197 			     (unsigned long long)input->inode_bitmap,
198 			     start, metaend - 1);
199 	else if (inside(input->inode_table, start, metaend) ||
200 		 inside(itend - 1, start, metaend))
201 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
202 			     "(%llu-%llu)",
203 			     (unsigned long long)input->inode_table,
204 			     itend - 1, start, metaend - 1);
205 	else
206 		err = 0;
207 	brelse(bh);
208 
209 	return err;
210 }
211 
212 /*
213  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
214  * group each time.
215  */
216 struct ext4_new_flex_group_data {
217 	struct ext4_new_group_data *groups;	/* new_group_data for groups
218 						   in the flex group */
219 	__u16 *bg_flags;			/* block group flags of groups
220 						   in @groups */
221 	ext4_group_t resize_bg;			/* number of allocated
222 						   new_group_data */
223 	ext4_group_t count;			/* number of groups in @groups
224 						 */
225 };
226 
227 /*
228  * Avoiding memory allocation failures due to too many groups added each time.
229  */
230 #define MAX_RESIZE_BG				16384
231 
232 /*
233  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
234  * @flexbg_size.
235  *
236  * Returns NULL on failure otherwise address of the allocated structure.
237  */
238 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size,
239 				ext4_group_t o_group, ext4_group_t n_group)
240 {
241 	ext4_group_t last_group;
242 	struct ext4_new_flex_group_data *flex_gd;
243 
244 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
245 	if (flex_gd == NULL)
246 		goto out3;
247 
248 	if (unlikely(flexbg_size > MAX_RESIZE_BG))
249 		flex_gd->resize_bg = MAX_RESIZE_BG;
250 	else
251 		flex_gd->resize_bg = flexbg_size;
252 
253 	/* Avoid allocating large 'groups' array if not needed */
254 	last_group = o_group | (flex_gd->resize_bg - 1);
255 	if (n_group <= last_group)
256 		flex_gd->resize_bg = 1 << fls(n_group - o_group + 1);
257 	else if (n_group - last_group < flex_gd->resize_bg)
258 		flex_gd->resize_bg = 1 << max(fls(last_group - o_group + 1),
259 					      fls(n_group - last_group));
260 
261 	flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
262 					sizeof(struct ext4_new_group_data),
263 					GFP_NOFS);
264 	if (flex_gd->groups == NULL)
265 		goto out2;
266 
267 	flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
268 					  GFP_NOFS);
269 	if (flex_gd->bg_flags == NULL)
270 		goto out1;
271 
272 	return flex_gd;
273 
274 out1:
275 	kfree(flex_gd->groups);
276 out2:
277 	kfree(flex_gd);
278 out3:
279 	return NULL;
280 }
281 
282 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
283 {
284 	kfree(flex_gd->bg_flags);
285 	kfree(flex_gd->groups);
286 	kfree(flex_gd);
287 }
288 
289 /*
290  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
291  * and inode tables for a flex group.
292  *
293  * This function is used by 64bit-resize.  Note that this function allocates
294  * group tables from the 1st group of groups contained by @flexgd, which may
295  * be a partial of a flex group.
296  *
297  * @sb: super block of fs to which the groups belongs
298  *
299  * Returns 0 on a successful allocation of the metadata blocks in the
300  * block group.
301  */
302 static int ext4_alloc_group_tables(struct super_block *sb,
303 				struct ext4_new_flex_group_data *flex_gd,
304 				unsigned int flexbg_size)
305 {
306 	struct ext4_new_group_data *group_data = flex_gd->groups;
307 	ext4_fsblk_t start_blk;
308 	ext4_fsblk_t last_blk;
309 	ext4_group_t src_group;
310 	ext4_group_t bb_index = 0;
311 	ext4_group_t ib_index = 0;
312 	ext4_group_t it_index = 0;
313 	ext4_group_t group;
314 	ext4_group_t last_group;
315 	unsigned overhead;
316 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
317 	int i;
318 
319 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
320 
321 	src_group = group_data[0].group;
322 	last_group  = src_group + flex_gd->count - 1;
323 
324 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
325 	       (last_group & ~(flexbg_size - 1))));
326 next_group:
327 	group = group_data[0].group;
328 	if (src_group >= group_data[0].group + flex_gd->count)
329 		return -ENOSPC;
330 	start_blk = ext4_group_first_block_no(sb, src_group);
331 	last_blk = start_blk + group_data[src_group - group].blocks_count;
332 
333 	overhead = ext4_group_overhead_blocks(sb, src_group);
334 
335 	start_blk += overhead;
336 
337 	/* We collect contiguous blocks as much as possible. */
338 	src_group++;
339 	for (; src_group <= last_group; src_group++) {
340 		overhead = ext4_group_overhead_blocks(sb, src_group);
341 		if (overhead == 0)
342 			last_blk += group_data[src_group - group].blocks_count;
343 		else
344 			break;
345 	}
346 
347 	/* Allocate block bitmaps */
348 	for (; bb_index < flex_gd->count; bb_index++) {
349 		if (start_blk >= last_blk)
350 			goto next_group;
351 		group_data[bb_index].block_bitmap = start_blk++;
352 		group = ext4_get_group_number(sb, start_blk - 1);
353 		group -= group_data[0].group;
354 		group_data[group].mdata_blocks++;
355 		flex_gd->bg_flags[group] &= uninit_mask;
356 	}
357 
358 	/* Allocate inode bitmaps */
359 	for (; ib_index < flex_gd->count; ib_index++) {
360 		if (start_blk >= last_blk)
361 			goto next_group;
362 		group_data[ib_index].inode_bitmap = start_blk++;
363 		group = ext4_get_group_number(sb, start_blk - 1);
364 		group -= group_data[0].group;
365 		group_data[group].mdata_blocks++;
366 		flex_gd->bg_flags[group] &= uninit_mask;
367 	}
368 
369 	/* Allocate inode tables */
370 	for (; it_index < flex_gd->count; it_index++) {
371 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
372 		ext4_fsblk_t next_group_start;
373 
374 		if (start_blk + itb > last_blk)
375 			goto next_group;
376 		group_data[it_index].inode_table = start_blk;
377 		group = ext4_get_group_number(sb, start_blk);
378 		next_group_start = ext4_group_first_block_no(sb, group + 1);
379 		group -= group_data[0].group;
380 
381 		if (start_blk + itb > next_group_start) {
382 			flex_gd->bg_flags[group + 1] &= uninit_mask;
383 			overhead = start_blk + itb - next_group_start;
384 			group_data[group + 1].mdata_blocks += overhead;
385 			itb -= overhead;
386 		}
387 
388 		group_data[group].mdata_blocks += itb;
389 		flex_gd->bg_flags[group] &= uninit_mask;
390 		start_blk += EXT4_SB(sb)->s_itb_per_group;
391 	}
392 
393 	/* Update free clusters count to exclude metadata blocks */
394 	for (i = 0; i < flex_gd->count; i++) {
395 		group_data[i].free_clusters_count -=
396 				EXT4_NUM_B2C(EXT4_SB(sb),
397 					     group_data[i].mdata_blocks);
398 	}
399 
400 	if (test_opt(sb, DEBUG)) {
401 		int i;
402 		group = group_data[0].group;
403 
404 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
405 		       "%u groups, flexbg size is %u:\n", flex_gd->count,
406 		       flexbg_size);
407 
408 		for (i = 0; i < flex_gd->count; i++) {
409 			ext4_debug(
410 			       "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
411 			       ext4_bg_has_super(sb, group + i) ? "normal" :
412 			       "no-super", group + i,
413 			       group_data[i].blocks_count,
414 			       group_data[i].free_clusters_count,
415 			       group_data[i].mdata_blocks);
416 		}
417 	}
418 	return 0;
419 }
420 
421 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
422 				  ext4_fsblk_t blk)
423 {
424 	struct buffer_head *bh;
425 	int err;
426 
427 	bh = sb_getblk(sb, blk);
428 	if (unlikely(!bh))
429 		return ERR_PTR(-ENOMEM);
430 	BUFFER_TRACE(bh, "get_write_access");
431 	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
432 	if (err) {
433 		brelse(bh);
434 		bh = ERR_PTR(err);
435 	} else {
436 		memset(bh->b_data, 0, sb->s_blocksize);
437 		set_buffer_uptodate(bh);
438 	}
439 
440 	return bh;
441 }
442 
443 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
444 {
445 	return ext4_journal_ensure_credits_fn(handle, credits,
446 		EXT4_MAX_TRANS_DATA, 0, 0);
447 }
448 
449 /*
450  * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
451  *
452  * Helper function for ext4_setup_new_group_blocks() which set .
453  *
454  * @sb: super block
455  * @handle: journal handle
456  * @flex_gd: flex group data
457  */
458 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
459 			struct ext4_new_flex_group_data *flex_gd,
460 			ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
461 {
462 	struct ext4_sb_info *sbi = EXT4_SB(sb);
463 	ext4_group_t count = last_cluster - first_cluster + 1;
464 	ext4_group_t count2;
465 
466 	ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
467 		   last_cluster);
468 	for (; count > 0; count -= count2, first_cluster += count2) {
469 		ext4_fsblk_t start;
470 		struct buffer_head *bh;
471 		ext4_group_t group;
472 		int err;
473 
474 		group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
475 		start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
476 		group -= flex_gd->groups[0].group;
477 
478 		count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
479 		if (count2 > count)
480 			count2 = count;
481 
482 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
483 			BUG_ON(flex_gd->count > 1);
484 			continue;
485 		}
486 
487 		err = ext4_resize_ensure_credits_batch(handle, 1);
488 		if (err < 0)
489 			return err;
490 
491 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
492 		if (unlikely(!bh))
493 			return -ENOMEM;
494 
495 		BUFFER_TRACE(bh, "get_write_access");
496 		err = ext4_journal_get_write_access(handle, sb, bh,
497 						    EXT4_JTR_NONE);
498 		if (err) {
499 			brelse(bh);
500 			return err;
501 		}
502 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
503 			   first_cluster, first_cluster - start, count2);
504 		mb_set_bits(bh->b_data, first_cluster - start, count2);
505 
506 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
507 		brelse(bh);
508 		if (unlikely(err))
509 			return err;
510 	}
511 
512 	return 0;
513 }
514 
515 /*
516  * Set up the block and inode bitmaps, and the inode table for the new groups.
517  * This doesn't need to be part of the main transaction, since we are only
518  * changing blocks outside the actual filesystem.  We still do journaling to
519  * ensure the recovery is correct in case of a failure just after resize.
520  * If any part of this fails, we simply abort the resize.
521  *
522  * setup_new_flex_group_blocks handles a flex group as follow:
523  *  1. copy super block and GDT, and initialize group tables if necessary.
524  *     In this step, we only set bits in blocks bitmaps for blocks taken by
525  *     super block and GDT.
526  *  2. allocate group tables in block bitmaps, that is, set bits in block
527  *     bitmap for blocks taken by group tables.
528  */
529 static int setup_new_flex_group_blocks(struct super_block *sb,
530 				struct ext4_new_flex_group_data *flex_gd)
531 {
532 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
533 	ext4_fsblk_t start;
534 	ext4_fsblk_t block;
535 	struct ext4_sb_info *sbi = EXT4_SB(sb);
536 	struct ext4_super_block *es = sbi->s_es;
537 	struct ext4_new_group_data *group_data = flex_gd->groups;
538 	__u16 *bg_flags = flex_gd->bg_flags;
539 	handle_t *handle;
540 	ext4_group_t group, count;
541 	struct buffer_head *bh = NULL;
542 	int reserved_gdb, i, j, err = 0, err2;
543 	int meta_bg;
544 
545 	BUG_ON(!flex_gd->count || !group_data ||
546 	       group_data[0].group != sbi->s_groups_count);
547 
548 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
549 	meta_bg = ext4_has_feature_meta_bg(sb);
550 
551 	/* This transaction may be extended/restarted along the way */
552 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
553 	if (IS_ERR(handle))
554 		return PTR_ERR(handle);
555 
556 	group = group_data[0].group;
557 	for (i = 0; i < flex_gd->count; i++, group++) {
558 		unsigned long gdblocks;
559 		ext4_grpblk_t overhead;
560 
561 		gdblocks = ext4_bg_num_gdb(sb, group);
562 		start = ext4_group_first_block_no(sb, group);
563 
564 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
565 			goto handle_itb;
566 
567 		if (meta_bg == 1)
568 			goto handle_itb;
569 
570 		block = start + ext4_bg_has_super(sb, group);
571 		/* Copy all of the GDT blocks into the backup in this group */
572 		for (j = 0; j < gdblocks; j++, block++) {
573 			struct buffer_head *gdb;
574 
575 			ext4_debug("update backup group %#04llx\n", block);
576 			err = ext4_resize_ensure_credits_batch(handle, 1);
577 			if (err < 0)
578 				goto out;
579 
580 			gdb = sb_getblk(sb, block);
581 			if (unlikely(!gdb)) {
582 				err = -ENOMEM;
583 				goto out;
584 			}
585 
586 			BUFFER_TRACE(gdb, "get_write_access");
587 			err = ext4_journal_get_write_access(handle, sb, gdb,
588 							    EXT4_JTR_NONE);
589 			if (err) {
590 				brelse(gdb);
591 				goto out;
592 			}
593 			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
594 				s_group_desc, j)->b_data, gdb->b_size);
595 			set_buffer_uptodate(gdb);
596 
597 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
598 			if (unlikely(err)) {
599 				brelse(gdb);
600 				goto out;
601 			}
602 			brelse(gdb);
603 		}
604 
605 		/* Zero out all of the reserved backup group descriptor
606 		 * table blocks
607 		 */
608 		if (ext4_bg_has_super(sb, group)) {
609 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
610 					reserved_gdb, GFP_NOFS);
611 			if (err)
612 				goto out;
613 		}
614 
615 handle_itb:
616 		/* Initialize group tables of the group @group */
617 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
618 			goto handle_bb;
619 
620 		/* Zero out all of the inode table blocks */
621 		block = group_data[i].inode_table;
622 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
623 			   block, sbi->s_itb_per_group);
624 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
625 				       GFP_NOFS);
626 		if (err)
627 			goto out;
628 
629 handle_bb:
630 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
631 			goto handle_ib;
632 
633 		/* Initialize block bitmap of the @group */
634 		block = group_data[i].block_bitmap;
635 		err = ext4_resize_ensure_credits_batch(handle, 1);
636 		if (err < 0)
637 			goto out;
638 
639 		bh = bclean(handle, sb, block);
640 		if (IS_ERR(bh)) {
641 			err = PTR_ERR(bh);
642 			goto out;
643 		}
644 		overhead = ext4_group_overhead_blocks(sb, group);
645 		if (overhead != 0) {
646 			ext4_debug("mark backup superblock %#04llx (+0)\n",
647 				   start);
648 			mb_set_bits(bh->b_data, 0,
649 				      EXT4_NUM_B2C(sbi, overhead));
650 		}
651 		ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
652 				     sb->s_blocksize * 8, bh->b_data);
653 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
654 		brelse(bh);
655 		if (err)
656 			goto out;
657 
658 handle_ib:
659 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
660 			continue;
661 
662 		/* Initialize inode bitmap of the @group */
663 		block = group_data[i].inode_bitmap;
664 		err = ext4_resize_ensure_credits_batch(handle, 1);
665 		if (err < 0)
666 			goto out;
667 		/* Mark unused entries in inode bitmap used */
668 		bh = bclean(handle, sb, block);
669 		if (IS_ERR(bh)) {
670 			err = PTR_ERR(bh);
671 			goto out;
672 		}
673 
674 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
675 				     sb->s_blocksize * 8, bh->b_data);
676 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
677 		brelse(bh);
678 		if (err)
679 			goto out;
680 	}
681 
682 	/* Mark group tables in block bitmap */
683 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
684 		count = group_table_count[j];
685 		start = (&group_data[0].block_bitmap)[j];
686 		block = start;
687 		for (i = 1; i < flex_gd->count; i++) {
688 			block += group_table_count[j];
689 			if (block == (&group_data[i].block_bitmap)[j]) {
690 				count += group_table_count[j];
691 				continue;
692 			}
693 			err = set_flexbg_block_bitmap(sb, handle,
694 						      flex_gd,
695 						      EXT4_B2C(sbi, start),
696 						      EXT4_B2C(sbi,
697 							       start + count
698 							       - 1));
699 			if (err)
700 				goto out;
701 			count = group_table_count[j];
702 			start = (&group_data[i].block_bitmap)[j];
703 			block = start;
704 		}
705 
706 		err = set_flexbg_block_bitmap(sb, handle,
707 				flex_gd,
708 				EXT4_B2C(sbi, start),
709 				EXT4_B2C(sbi,
710 					start + count
711 					- 1));
712 		if (err)
713 			goto out;
714 	}
715 
716 out:
717 	err2 = ext4_journal_stop(handle);
718 	if (err2 && !err)
719 		err = err2;
720 
721 	return err;
722 }
723 
724 /*
725  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
726  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
727  * calling this for the first time.  In a sparse filesystem it will be the
728  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
729  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
730  */
731 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
732 			       unsigned int *five, unsigned int *seven)
733 {
734 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
735 	unsigned int *min = three;
736 	int mult = 3;
737 	unsigned int ret;
738 
739 	if (ext4_has_feature_sparse_super2(sb)) {
740 		do {
741 			if (*min > 2)
742 				return UINT_MAX;
743 			ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
744 			*min += 1;
745 		} while (!ret);
746 		return ret;
747 	}
748 
749 	if (!ext4_has_feature_sparse_super(sb)) {
750 		ret = *min;
751 		*min += 1;
752 		return ret;
753 	}
754 
755 	if (*five < *min) {
756 		min = five;
757 		mult = 5;
758 	}
759 	if (*seven < *min) {
760 		min = seven;
761 		mult = 7;
762 	}
763 
764 	ret = *min;
765 	*min *= mult;
766 
767 	return ret;
768 }
769 
770 /*
771  * Check that all of the backup GDT blocks are held in the primary GDT block.
772  * It is assumed that they are stored in group order.  Returns the number of
773  * groups in current filesystem that have BACKUPS, or -ve error code.
774  */
775 static int verify_reserved_gdb(struct super_block *sb,
776 			       ext4_group_t end,
777 			       struct buffer_head *primary)
778 {
779 	const ext4_fsblk_t blk = primary->b_blocknr;
780 	unsigned three = 1;
781 	unsigned five = 5;
782 	unsigned seven = 7;
783 	unsigned grp;
784 	__le32 *p = (__le32 *)primary->b_data;
785 	int gdbackups = 0;
786 
787 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
788 		if (le32_to_cpu(*p++) !=
789 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
790 			ext4_warning(sb, "reserved GDT %llu"
791 				     " missing grp %d (%llu)",
792 				     blk, grp,
793 				     grp *
794 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
795 				     blk);
796 			return -EINVAL;
797 		}
798 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
799 			return -EFBIG;
800 	}
801 
802 	return gdbackups;
803 }
804 
805 /*
806  * Called when we need to bring a reserved group descriptor table block into
807  * use from the resize inode.  The primary copy of the new GDT block currently
808  * is an indirect block (under the double indirect block in the resize inode).
809  * The new backup GDT blocks will be stored as leaf blocks in this indirect
810  * block, in group order.  Even though we know all the block numbers we need,
811  * we check to ensure that the resize inode has actually reserved these blocks.
812  *
813  * Don't need to update the block bitmaps because the blocks are still in use.
814  *
815  * We get all of the error cases out of the way, so that we are sure to not
816  * fail once we start modifying the data on disk, because JBD has no rollback.
817  */
818 static int add_new_gdb(handle_t *handle, struct inode *inode,
819 		       ext4_group_t group)
820 {
821 	struct super_block *sb = inode->i_sb;
822 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
823 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
824 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
825 	struct buffer_head **o_group_desc, **n_group_desc = NULL;
826 	struct buffer_head *dind = NULL;
827 	struct buffer_head *gdb_bh = NULL;
828 	int gdbackups;
829 	struct ext4_iloc iloc = { .bh = NULL };
830 	__le32 *data;
831 	int err;
832 
833 	if (test_opt(sb, DEBUG))
834 		printk(KERN_DEBUG
835 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
836 		       gdb_num);
837 
838 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
839 	if (IS_ERR(gdb_bh))
840 		return PTR_ERR(gdb_bh);
841 
842 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
843 	if (gdbackups < 0) {
844 		err = gdbackups;
845 		goto errout;
846 	}
847 
848 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
849 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
850 	if (IS_ERR(dind)) {
851 		err = PTR_ERR(dind);
852 		dind = NULL;
853 		goto errout;
854 	}
855 
856 	data = (__le32 *)dind->b_data;
857 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
858 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
859 			     group, gdblock);
860 		err = -EINVAL;
861 		goto errout;
862 	}
863 
864 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
865 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
866 					    EXT4_JTR_NONE);
867 	if (unlikely(err))
868 		goto errout;
869 
870 	BUFFER_TRACE(gdb_bh, "get_write_access");
871 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
872 	if (unlikely(err))
873 		goto errout;
874 
875 	BUFFER_TRACE(dind, "get_write_access");
876 	err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
877 	if (unlikely(err)) {
878 		ext4_std_error(sb, err);
879 		goto errout;
880 	}
881 
882 	/* ext4_reserve_inode_write() gets a reference on the iloc */
883 	err = ext4_reserve_inode_write(handle, inode, &iloc);
884 	if (unlikely(err))
885 		goto errout;
886 
887 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
888 				GFP_KERNEL);
889 	if (!n_group_desc) {
890 		err = -ENOMEM;
891 		ext4_warning(sb, "not enough memory for %lu groups",
892 			     gdb_num + 1);
893 		goto errout;
894 	}
895 
896 	/*
897 	 * Finally, we have all of the possible failures behind us...
898 	 *
899 	 * Remove new GDT block from inode double-indirect block and clear out
900 	 * the new GDT block for use (which also "frees" the backup GDT blocks
901 	 * from the reserved inode).  We don't need to change the bitmaps for
902 	 * these blocks, because they are marked as in-use from being in the
903 	 * reserved inode, and will become GDT blocks (primary and backup).
904 	 */
905 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
906 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
907 	if (unlikely(err)) {
908 		ext4_std_error(sb, err);
909 		goto errout;
910 	}
911 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
912 			   (9 - EXT4_SB(sb)->s_cluster_bits);
913 	ext4_mark_iloc_dirty(handle, inode, &iloc);
914 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
915 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
916 	if (unlikely(err)) {
917 		ext4_std_error(sb, err);
918 		iloc.bh = NULL;
919 		goto errout;
920 	}
921 	brelse(dind);
922 
923 	rcu_read_lock();
924 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
925 	memcpy(n_group_desc, o_group_desc,
926 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
927 	rcu_read_unlock();
928 	n_group_desc[gdb_num] = gdb_bh;
929 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
930 	EXT4_SB(sb)->s_gdb_count++;
931 	ext4_kvfree_array_rcu(o_group_desc);
932 
933 	lock_buffer(EXT4_SB(sb)->s_sbh);
934 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
935 	ext4_superblock_csum_set(sb);
936 	unlock_buffer(EXT4_SB(sb)->s_sbh);
937 	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
938 	if (err)
939 		ext4_std_error(sb, err);
940 	return err;
941 errout:
942 	kvfree(n_group_desc);
943 	brelse(iloc.bh);
944 	brelse(dind);
945 	brelse(gdb_bh);
946 
947 	ext4_debug("leaving with error %d\n", err);
948 	return err;
949 }
950 
951 /*
952  * If there is no available space in the existing block group descriptors for
953  * the new block group and there are no reserved block group descriptors, then
954  * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set
955  * to the first block group that is managed using meta_bg and s_first_meta_bg
956  * must be a multiple of EXT4_DESC_PER_BLOCK(sb).
957  * This function will be called when first group of meta_bg is added to bring
958  * new group descriptors block of new added meta_bg.
959  */
960 static int add_new_gdb_meta_bg(struct super_block *sb,
961 			       handle_t *handle, ext4_group_t group) {
962 	ext4_fsblk_t gdblock;
963 	struct buffer_head *gdb_bh;
964 	struct buffer_head **o_group_desc, **n_group_desc;
965 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
966 	int err;
967 
968 	gdblock = ext4_group_first_block_no(sb, group) +
969 		  ext4_bg_has_super(sb, group);
970 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
971 	if (IS_ERR(gdb_bh))
972 		return PTR_ERR(gdb_bh);
973 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
974 				GFP_KERNEL);
975 	if (!n_group_desc) {
976 		brelse(gdb_bh);
977 		err = -ENOMEM;
978 		ext4_warning(sb, "not enough memory for %lu groups",
979 			     gdb_num + 1);
980 		return err;
981 	}
982 
983 	rcu_read_lock();
984 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
985 	memcpy(n_group_desc, o_group_desc,
986 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
987 	rcu_read_unlock();
988 	n_group_desc[gdb_num] = gdb_bh;
989 
990 	BUFFER_TRACE(gdb_bh, "get_write_access");
991 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
992 	if (err) {
993 		kvfree(n_group_desc);
994 		brelse(gdb_bh);
995 		return err;
996 	}
997 
998 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
999 	EXT4_SB(sb)->s_gdb_count++;
1000 	ext4_kvfree_array_rcu(o_group_desc);
1001 	return err;
1002 }
1003 
1004 /*
1005  * Called when we are adding a new group which has a backup copy of each of
1006  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1007  * We need to add these reserved backup GDT blocks to the resize inode, so
1008  * that they are kept for future resizing and not allocated to files.
1009  *
1010  * Each reserved backup GDT block will go into a different indirect block.
1011  * The indirect blocks are actually the primary reserved GDT blocks,
1012  * so we know in advance what their block numbers are.  We only get the
1013  * double-indirect block to verify it is pointing to the primary reserved
1014  * GDT blocks so we don't overwrite a data block by accident.  The reserved
1015  * backup GDT blocks are stored in their reserved primary GDT block.
1016  */
1017 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1018 			      ext4_group_t group)
1019 {
1020 	struct super_block *sb = inode->i_sb;
1021 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1022 	int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1023 	struct buffer_head **primary;
1024 	struct buffer_head *dind;
1025 	struct ext4_iloc iloc;
1026 	ext4_fsblk_t blk;
1027 	__le32 *data, *end;
1028 	int gdbackups = 0;
1029 	int res, i;
1030 	int err;
1031 
1032 	primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1033 	if (!primary)
1034 		return -ENOMEM;
1035 
1036 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1037 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1038 	if (IS_ERR(dind)) {
1039 		err = PTR_ERR(dind);
1040 		dind = NULL;
1041 		goto exit_free;
1042 	}
1043 
1044 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1045 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1046 					 EXT4_ADDR_PER_BLOCK(sb));
1047 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1048 
1049 	/* Get each reserved primary GDT block and verify it holds backups */
1050 	for (res = 0; res < reserved_gdb; res++, blk++) {
1051 		if (le32_to_cpu(*data) != blk) {
1052 			ext4_warning(sb, "reserved block %llu"
1053 				     " not at offset %ld",
1054 				     blk,
1055 				     (long)(data - (__le32 *)dind->b_data));
1056 			err = -EINVAL;
1057 			goto exit_bh;
1058 		}
1059 		primary[res] = ext4_sb_bread(sb, blk, 0);
1060 		if (IS_ERR(primary[res])) {
1061 			err = PTR_ERR(primary[res]);
1062 			primary[res] = NULL;
1063 			goto exit_bh;
1064 		}
1065 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1066 		if (gdbackups < 0) {
1067 			brelse(primary[res]);
1068 			err = gdbackups;
1069 			goto exit_bh;
1070 		}
1071 		if (++data >= end)
1072 			data = (__le32 *)dind->b_data;
1073 	}
1074 
1075 	for (i = 0; i < reserved_gdb; i++) {
1076 		BUFFER_TRACE(primary[i], "get_write_access");
1077 		if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1078 							 EXT4_JTR_NONE)))
1079 			goto exit_bh;
1080 	}
1081 
1082 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1083 		goto exit_bh;
1084 
1085 	/*
1086 	 * Finally we can add each of the reserved backup GDT blocks from
1087 	 * the new group to its reserved primary GDT block.
1088 	 */
1089 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1090 	for (i = 0; i < reserved_gdb; i++) {
1091 		int err2;
1092 		data = (__le32 *)primary[i]->b_data;
1093 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1094 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1095 		if (!err)
1096 			err = err2;
1097 	}
1098 
1099 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1100 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1101 
1102 exit_bh:
1103 	while (--res >= 0)
1104 		brelse(primary[res]);
1105 	brelse(dind);
1106 
1107 exit_free:
1108 	kfree(primary);
1109 
1110 	return err;
1111 }
1112 
1113 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
1114 					   ext4_group_t group)
1115 {
1116 	struct ext4_super_block *es = (struct ext4_super_block *) data;
1117 
1118 	es->s_block_group_nr = cpu_to_le16(group);
1119 	if (ext4_has_metadata_csum(sb))
1120 		es->s_checksum = ext4_superblock_csum(sb, es);
1121 }
1122 
1123 /*
1124  * Update the backup copies of the ext4 metadata.  These don't need to be part
1125  * of the main resize transaction, because e2fsck will re-write them if there
1126  * is a problem (basically only OOM will cause a problem).  However, we
1127  * _should_ update the backups if possible, in case the primary gets trashed
1128  * for some reason and we need to run e2fsck from a backup superblock.  The
1129  * important part is that the new block and inode counts are in the backup
1130  * superblocks, and the location of the new group metadata in the GDT backups.
1131  *
1132  * We do not need take the s_resize_lock for this, because these
1133  * blocks are not otherwise touched by the filesystem code when it is
1134  * mounted.  We don't need to worry about last changing from
1135  * sbi->s_groups_count, because the worst that can happen is that we
1136  * do not copy the full number of backups at this time.  The resize
1137  * which changed s_groups_count will backup again.
1138  */
1139 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1140 			   int size, int meta_bg)
1141 {
1142 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1143 	ext4_group_t last;
1144 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1145 	unsigned three = 1;
1146 	unsigned five = 5;
1147 	unsigned seven = 7;
1148 	ext4_group_t group = 0;
1149 	int rest = sb->s_blocksize - size;
1150 	handle_t *handle;
1151 	int err = 0, err2;
1152 
1153 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1154 	if (IS_ERR(handle)) {
1155 		group = 1;
1156 		err = PTR_ERR(handle);
1157 		goto exit_err;
1158 	}
1159 
1160 	if (meta_bg == 0) {
1161 		group = ext4_list_backups(sb, &three, &five, &seven);
1162 		last = sbi->s_groups_count;
1163 	} else {
1164 		group = ext4_get_group_number(sb, blk_off) + 1;
1165 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1166 	}
1167 
1168 	while (group < sbi->s_groups_count) {
1169 		struct buffer_head *bh;
1170 		ext4_fsblk_t backup_block;
1171 		int has_super = ext4_bg_has_super(sb, group);
1172 		ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
1173 
1174 		/* Out of journal space, and can't get more - abort - so sad */
1175 		err = ext4_resize_ensure_credits_batch(handle, 1);
1176 		if (err < 0)
1177 			break;
1178 
1179 		if (meta_bg == 0)
1180 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1181 		else
1182 			backup_block = first_block + has_super;
1183 
1184 		bh = sb_getblk(sb, backup_block);
1185 		if (unlikely(!bh)) {
1186 			err = -ENOMEM;
1187 			break;
1188 		}
1189 		ext4_debug("update metadata backup %llu(+%llu)\n",
1190 			   backup_block, backup_block -
1191 			   ext4_group_first_block_no(sb, group));
1192 		BUFFER_TRACE(bh, "get_write_access");
1193 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
1194 							 EXT4_JTR_NONE))) {
1195 			brelse(bh);
1196 			break;
1197 		}
1198 		lock_buffer(bh);
1199 		memcpy(bh->b_data, data, size);
1200 		if (rest)
1201 			memset(bh->b_data + size, 0, rest);
1202 		if (has_super && (backup_block == first_block))
1203 			ext4_set_block_group_nr(sb, bh->b_data, group);
1204 		set_buffer_uptodate(bh);
1205 		unlock_buffer(bh);
1206 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1207 		if (unlikely(err))
1208 			ext4_std_error(sb, err);
1209 		brelse(bh);
1210 
1211 		if (meta_bg == 0)
1212 			group = ext4_list_backups(sb, &three, &five, &seven);
1213 		else if (group == last)
1214 			break;
1215 		else
1216 			group = last;
1217 	}
1218 	if ((err2 = ext4_journal_stop(handle)) && !err)
1219 		err = err2;
1220 
1221 	/*
1222 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1223 	 * late to revert the resize, we shouldn't fail just because of
1224 	 * the backup copies (they are only needed in case of corruption).
1225 	 *
1226 	 * However, if we got here we have a journal problem too, so we
1227 	 * can't really start a transaction to mark the superblock.
1228 	 * Chicken out and just set the flag on the hope it will be written
1229 	 * to disk, and if not - we will simply wait until next fsck.
1230 	 */
1231 exit_err:
1232 	if (err) {
1233 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1234 			     "forcing fsck on next reboot", group, err);
1235 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1236 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1237 		mark_buffer_dirty(sbi->s_sbh);
1238 	}
1239 }
1240 
1241 /*
1242  * ext4_add_new_descs() adds @count group descriptor of groups
1243  * starting at @group
1244  *
1245  * @handle: journal handle
1246  * @sb: super block
1247  * @group: the group no. of the first group desc to be added
1248  * @resize_inode: the resize inode
1249  * @count: number of group descriptors to be added
1250  */
1251 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1252 			      ext4_group_t group, struct inode *resize_inode,
1253 			      ext4_group_t count)
1254 {
1255 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1256 	struct ext4_super_block *es = sbi->s_es;
1257 	struct buffer_head *gdb_bh;
1258 	int i, gdb_off, gdb_num, err = 0;
1259 	int meta_bg;
1260 
1261 	meta_bg = ext4_has_feature_meta_bg(sb);
1262 	for (i = 0; i < count; i++, group++) {
1263 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1264 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1265 
1266 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1267 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1268 
1269 		/*
1270 		 * We will only either add reserved group blocks to a backup group
1271 		 * or remove reserved blocks for the first group in a new group block.
1272 		 * Doing both would be mean more complex code, and sane people don't
1273 		 * use non-sparse filesystems anymore.  This is already checked above.
1274 		 */
1275 		if (gdb_off) {
1276 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1277 						     gdb_num);
1278 			BUFFER_TRACE(gdb_bh, "get_write_access");
1279 			err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1280 							    EXT4_JTR_NONE);
1281 
1282 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1283 				err = reserve_backup_gdb(handle, resize_inode, group);
1284 		} else if (meta_bg != 0) {
1285 			err = add_new_gdb_meta_bg(sb, handle, group);
1286 		} else {
1287 			err = add_new_gdb(handle, resize_inode, group);
1288 		}
1289 		if (err)
1290 			break;
1291 	}
1292 	return err;
1293 }
1294 
1295 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1296 {
1297 	struct buffer_head *bh = sb_getblk(sb, block);
1298 	if (unlikely(!bh))
1299 		return NULL;
1300 	if (!bh_uptodate_or_lock(bh)) {
1301 		if (ext4_read_bh(bh, 0, NULL) < 0) {
1302 			brelse(bh);
1303 			return NULL;
1304 		}
1305 	}
1306 
1307 	return bh;
1308 }
1309 
1310 static int ext4_set_bitmap_checksums(struct super_block *sb,
1311 				     struct ext4_group_desc *gdp,
1312 				     struct ext4_new_group_data *group_data)
1313 {
1314 	struct buffer_head *bh;
1315 
1316 	if (!ext4_has_metadata_csum(sb))
1317 		return 0;
1318 
1319 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1320 	if (!bh)
1321 		return -EIO;
1322 	ext4_inode_bitmap_csum_set(sb, gdp, bh);
1323 	brelse(bh);
1324 
1325 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1326 	if (!bh)
1327 		return -EIO;
1328 	ext4_block_bitmap_csum_set(sb, gdp, bh);
1329 	brelse(bh);
1330 
1331 	return 0;
1332 }
1333 
1334 /*
1335  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1336  */
1337 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1338 				struct ext4_new_flex_group_data *flex_gd)
1339 {
1340 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1341 	struct ext4_group_desc		*gdp;
1342 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1343 	struct buffer_head		*gdb_bh;
1344 	ext4_group_t			group;
1345 	__u16				*bg_flags = flex_gd->bg_flags;
1346 	int				i, gdb_off, gdb_num, err = 0;
1347 
1348 
1349 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1350 		group = group_data->group;
1351 
1352 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1353 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1354 
1355 		/*
1356 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1357 		 */
1358 		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1359 		/* Update group descriptor block for new group */
1360 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1361 						 gdb_off * EXT4_DESC_SIZE(sb));
1362 
1363 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1364 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1365 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1366 		err = ext4_set_bitmap_checksums(sb, gdp, group_data);
1367 		if (err) {
1368 			ext4_std_error(sb, err);
1369 			break;
1370 		}
1371 
1372 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1373 		ext4_free_group_clusters_set(sb, gdp,
1374 					     group_data->free_clusters_count);
1375 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1376 		if (ext4_has_group_desc_csum(sb))
1377 			ext4_itable_unused_set(sb, gdp,
1378 					       EXT4_INODES_PER_GROUP(sb));
1379 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1380 		ext4_group_desc_csum_set(sb, group, gdp);
1381 
1382 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1383 		if (unlikely(err)) {
1384 			ext4_std_error(sb, err);
1385 			break;
1386 		}
1387 
1388 		/*
1389 		 * We can allocate memory for mb_alloc based on the new group
1390 		 * descriptor
1391 		 */
1392 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1393 		if (err)
1394 			break;
1395 	}
1396 	return err;
1397 }
1398 
1399 static void ext4_add_overhead(struct super_block *sb,
1400                               const ext4_fsblk_t overhead)
1401 {
1402        struct ext4_sb_info *sbi = EXT4_SB(sb);
1403        struct ext4_super_block *es = sbi->s_es;
1404 
1405        sbi->s_overhead += overhead;
1406        es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1407        smp_wmb();
1408 }
1409 
1410 /*
1411  * ext4_update_super() updates the super block so that the newly added
1412  * groups can be seen by the filesystem.
1413  *
1414  * @sb: super block
1415  * @flex_gd: new added groups
1416  */
1417 static void ext4_update_super(struct super_block *sb,
1418 			     struct ext4_new_flex_group_data *flex_gd)
1419 {
1420 	ext4_fsblk_t blocks_count = 0;
1421 	ext4_fsblk_t free_blocks = 0;
1422 	ext4_fsblk_t reserved_blocks = 0;
1423 	struct ext4_new_group_data *group_data = flex_gd->groups;
1424 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1425 	struct ext4_super_block *es = sbi->s_es;
1426 	int i;
1427 
1428 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1429 	/*
1430 	 * Make the new blocks and inodes valid next.  We do this before
1431 	 * increasing the group count so that once the group is enabled,
1432 	 * all of its blocks and inodes are already valid.
1433 	 *
1434 	 * We always allocate group-by-group, then block-by-block or
1435 	 * inode-by-inode within a group, so enabling these
1436 	 * blocks/inodes before the group is live won't actually let us
1437 	 * allocate the new space yet.
1438 	 */
1439 	for (i = 0; i < flex_gd->count; i++) {
1440 		blocks_count += group_data[i].blocks_count;
1441 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1442 	}
1443 
1444 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1445 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1446 	reserved_blocks *= blocks_count;
1447 	do_div(reserved_blocks, 100);
1448 
1449 	lock_buffer(sbi->s_sbh);
1450 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1451 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1452 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1453 		     flex_gd->count);
1454 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1455 		     flex_gd->count);
1456 
1457 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1458 	/*
1459 	 * We need to protect s_groups_count against other CPUs seeing
1460 	 * inconsistent state in the superblock.
1461 	 *
1462 	 * The precise rules we use are:
1463 	 *
1464 	 * * Writers must perform a smp_wmb() after updating all
1465 	 *   dependent data and before modifying the groups count
1466 	 *
1467 	 * * Readers must perform an smp_rmb() after reading the groups
1468 	 *   count and before reading any dependent data.
1469 	 *
1470 	 * NB. These rules can be relaxed when checking the group count
1471 	 * while freeing data, as we can only allocate from a block
1472 	 * group after serialising against the group count, and we can
1473 	 * only then free after serialising in turn against that
1474 	 * allocation.
1475 	 */
1476 	smp_wmb();
1477 
1478 	/* Update the global fs size fields */
1479 	sbi->s_groups_count += flex_gd->count;
1480 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1481 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1482 
1483 	/* Update the reserved block counts only once the new group is
1484 	 * active. */
1485 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1486 				reserved_blocks);
1487 
1488 	/* Update the free space counts */
1489 	percpu_counter_add(&sbi->s_freeclusters_counter,
1490 			   EXT4_NUM_B2C(sbi, free_blocks));
1491 	percpu_counter_add(&sbi->s_freeinodes_counter,
1492 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1493 
1494 	ext4_debug("free blocks count %llu",
1495 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1496 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1497 		ext4_group_t flex_group;
1498 		struct flex_groups *fg;
1499 
1500 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1501 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1502 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1503 			     &fg->free_clusters);
1504 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1505 			   &fg->free_inodes);
1506 	}
1507 
1508 	/*
1509 	 * Update the fs overhead information.
1510 	 *
1511 	 * For bigalloc, if the superblock already has a properly calculated
1512 	 * overhead, update it with a value based on numbers already computed
1513 	 * above for the newly allocated capacity.
1514 	 */
1515 	if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
1516 		ext4_add_overhead(sb,
1517 			EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
1518 	else
1519 		ext4_calculate_overhead(sb);
1520 	es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1521 
1522 	ext4_superblock_csum_set(sb);
1523 	unlock_buffer(sbi->s_sbh);
1524 	if (test_opt(sb, DEBUG))
1525 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1526 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1527 		       blocks_count, free_blocks, reserved_blocks);
1528 }
1529 
1530 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1531  * _before_ we start modifying the filesystem, because we cannot abort the
1532  * transaction and not have it write the data to disk.
1533  */
1534 static int ext4_flex_group_add(struct super_block *sb,
1535 			       struct inode *resize_inode,
1536 			       struct ext4_new_flex_group_data *flex_gd)
1537 {
1538 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1539 	struct ext4_super_block *es = sbi->s_es;
1540 	ext4_fsblk_t o_blocks_count;
1541 	ext4_grpblk_t last;
1542 	ext4_group_t group;
1543 	handle_t *handle;
1544 	unsigned reserved_gdb;
1545 	int err = 0, err2 = 0, credit;
1546 
1547 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1548 
1549 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1550 	o_blocks_count = ext4_blocks_count(es);
1551 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1552 	BUG_ON(last);
1553 
1554 	err = setup_new_flex_group_blocks(sb, flex_gd);
1555 	if (err)
1556 		goto exit;
1557 	/*
1558 	 * We will always be modifying at least the superblock and  GDT
1559 	 * blocks.  If we are adding a group past the last current GDT block,
1560 	 * we will also modify the inode and the dindirect block.  If we
1561 	 * are adding a group with superblock/GDT backups  we will also
1562 	 * modify each of the reserved GDT dindirect blocks.
1563 	 */
1564 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1565 	/* GDT blocks */
1566 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1567 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1568 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1569 	if (IS_ERR(handle)) {
1570 		err = PTR_ERR(handle);
1571 		goto exit;
1572 	}
1573 
1574 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1575 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1576 					    EXT4_JTR_NONE);
1577 	if (err)
1578 		goto exit_journal;
1579 
1580 	group = flex_gd->groups[0].group;
1581 	BUG_ON(group != sbi->s_groups_count);
1582 	err = ext4_add_new_descs(handle, sb, group,
1583 				resize_inode, flex_gd->count);
1584 	if (err)
1585 		goto exit_journal;
1586 
1587 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1588 	if (err)
1589 		goto exit_journal;
1590 
1591 	ext4_update_super(sb, flex_gd);
1592 
1593 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1594 
1595 exit_journal:
1596 	err2 = ext4_journal_stop(handle);
1597 	if (!err)
1598 		err = err2;
1599 
1600 	if (!err) {
1601 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1602 		int gdb_num_end = ((group + flex_gd->count - 1) /
1603 				   EXT4_DESC_PER_BLOCK(sb));
1604 		int meta_bg = ext4_has_feature_meta_bg(sb) &&
1605 			      gdb_num >= le32_to_cpu(es->s_first_meta_bg);
1606 		sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1607 					 ext4_group_first_block_no(sb, 0);
1608 
1609 		update_backups(sb, ext4_group_first_block_no(sb, 0),
1610 			       (char *)es, sizeof(struct ext4_super_block), 0);
1611 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1612 			struct buffer_head *gdb_bh;
1613 
1614 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1615 						     gdb_num);
1616 			update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1617 				       gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1618 		}
1619 	}
1620 exit:
1621 	return err;
1622 }
1623 
1624 static int ext4_setup_next_flex_gd(struct super_block *sb,
1625 				    struct ext4_new_flex_group_data *flex_gd,
1626 				    ext4_fsblk_t n_blocks_count)
1627 {
1628 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1629 	struct ext4_super_block *es = sbi->s_es;
1630 	struct ext4_new_group_data *group_data = flex_gd->groups;
1631 	ext4_fsblk_t o_blocks_count;
1632 	ext4_group_t n_group;
1633 	ext4_group_t group;
1634 	ext4_group_t last_group;
1635 	ext4_grpblk_t last;
1636 	ext4_grpblk_t clusters_per_group;
1637 	unsigned long i;
1638 
1639 	clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1640 
1641 	o_blocks_count = ext4_blocks_count(es);
1642 
1643 	if (o_blocks_count == n_blocks_count)
1644 		return 0;
1645 
1646 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1647 	BUG_ON(last);
1648 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1649 
1650 	last_group = group | (flex_gd->resize_bg - 1);
1651 	if (last_group > n_group)
1652 		last_group = n_group;
1653 
1654 	flex_gd->count = last_group - group + 1;
1655 
1656 	for (i = 0; i < flex_gd->count; i++) {
1657 		int overhead;
1658 
1659 		group_data[i].group = group + i;
1660 		group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1661 		overhead = ext4_group_overhead_blocks(sb, group + i);
1662 		group_data[i].mdata_blocks = overhead;
1663 		group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1664 		if (ext4_has_group_desc_csum(sb)) {
1665 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1666 					       EXT4_BG_INODE_UNINIT;
1667 			if (!test_opt(sb, INIT_INODE_TABLE))
1668 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1669 		} else
1670 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1671 	}
1672 
1673 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1674 		/* We need to initialize block bitmap of last group. */
1675 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1676 
1677 	if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1678 		group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1679 		group_data[i - 1].free_clusters_count -= clusters_per_group -
1680 						       last - 1;
1681 	}
1682 
1683 	return 1;
1684 }
1685 
1686 /* Add group descriptor data to an existing or new group descriptor block.
1687  * Ensure we handle all possible error conditions _before_ we start modifying
1688  * the filesystem, because we cannot abort the transaction and not have it
1689  * write the data to disk.
1690  *
1691  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1692  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1693  *
1694  * We only need to hold the superblock lock while we are actually adding
1695  * in the new group's counts to the superblock.  Prior to that we have
1696  * not really "added" the group at all.  We re-check that we are still
1697  * adding in the last group in case things have changed since verifying.
1698  */
1699 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1700 {
1701 	struct ext4_new_flex_group_data flex_gd;
1702 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1703 	struct ext4_super_block *es = sbi->s_es;
1704 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1705 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1706 	struct inode *inode = NULL;
1707 	int gdb_off;
1708 	int err;
1709 	__u16 bg_flags = 0;
1710 
1711 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1712 
1713 	if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1714 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1715 		return -EPERM;
1716 	}
1717 
1718 	if (ext4_blocks_count(es) + input->blocks_count <
1719 	    ext4_blocks_count(es)) {
1720 		ext4_warning(sb, "blocks_count overflow");
1721 		return -EINVAL;
1722 	}
1723 
1724 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1725 	    le32_to_cpu(es->s_inodes_count)) {
1726 		ext4_warning(sb, "inodes_count overflow");
1727 		return -EINVAL;
1728 	}
1729 
1730 	if (reserved_gdb || gdb_off == 0) {
1731 		if (!ext4_has_feature_resize_inode(sb) ||
1732 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1733 			ext4_warning(sb,
1734 				     "No reserved GDT blocks, can't resize");
1735 			return -EPERM;
1736 		}
1737 		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1738 		if (IS_ERR(inode)) {
1739 			ext4_warning(sb, "Error opening resize inode");
1740 			return PTR_ERR(inode);
1741 		}
1742 	}
1743 
1744 
1745 	err = verify_group_input(sb, input);
1746 	if (err)
1747 		goto out;
1748 
1749 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1750 	if (err)
1751 		goto out;
1752 
1753 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1754 	if (err)
1755 		goto out;
1756 
1757 	flex_gd.count = 1;
1758 	flex_gd.groups = input;
1759 	flex_gd.bg_flags = &bg_flags;
1760 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1761 out:
1762 	iput(inode);
1763 	return err;
1764 } /* ext4_group_add */
1765 
1766 /*
1767  * extend a group without checking assuming that checking has been done.
1768  */
1769 static int ext4_group_extend_no_check(struct super_block *sb,
1770 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1771 {
1772 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1773 	handle_t *handle;
1774 	int err = 0, err2;
1775 
1776 	/* We will update the superblock, one block bitmap, and
1777 	 * one group descriptor via ext4_group_add_blocks().
1778 	 */
1779 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1780 	if (IS_ERR(handle)) {
1781 		err = PTR_ERR(handle);
1782 		ext4_warning(sb, "error %d on journal start", err);
1783 		return err;
1784 	}
1785 
1786 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1787 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1788 					    EXT4_JTR_NONE);
1789 	if (err) {
1790 		ext4_warning(sb, "error %d on journal write access", err);
1791 		goto errout;
1792 	}
1793 
1794 	lock_buffer(EXT4_SB(sb)->s_sbh);
1795 	ext4_blocks_count_set(es, o_blocks_count + add);
1796 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1797 	ext4_superblock_csum_set(sb);
1798 	unlock_buffer(EXT4_SB(sb)->s_sbh);
1799 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1800 		   o_blocks_count + add);
1801 	/* We add the blocks to the bitmap and set the group need init bit */
1802 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1803 	if (err)
1804 		goto errout;
1805 	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1806 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1807 		   o_blocks_count + add);
1808 errout:
1809 	err2 = ext4_journal_stop(handle);
1810 	if (err2 && !err)
1811 		err = err2;
1812 
1813 	if (!err) {
1814 		if (test_opt(sb, DEBUG))
1815 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1816 			       "blocks\n", ext4_blocks_count(es));
1817 		update_backups(sb, ext4_group_first_block_no(sb, 0),
1818 			       (char *)es, sizeof(struct ext4_super_block), 0);
1819 	}
1820 	return err;
1821 }
1822 
1823 /*
1824  * Extend the filesystem to the new number of blocks specified.  This entry
1825  * point is only used to extend the current filesystem to the end of the last
1826  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1827  * for emergencies (because it has no dependencies on reserved blocks).
1828  *
1829  * If we _really_ wanted, we could use default values to call ext4_group_add()
1830  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1831  * GDT blocks are reserved to grow to the desired size.
1832  */
1833 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1834 		      ext4_fsblk_t n_blocks_count)
1835 {
1836 	ext4_fsblk_t o_blocks_count;
1837 	ext4_grpblk_t last;
1838 	ext4_grpblk_t add;
1839 	struct buffer_head *bh;
1840 	ext4_group_t group;
1841 
1842 	o_blocks_count = ext4_blocks_count(es);
1843 
1844 	if (test_opt(sb, DEBUG))
1845 		ext4_msg(sb, KERN_DEBUG,
1846 			 "extending last group from %llu to %llu blocks",
1847 			 o_blocks_count, n_blocks_count);
1848 
1849 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1850 		return 0;
1851 
1852 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1853 		ext4_msg(sb, KERN_ERR,
1854 			 "filesystem too large to resize to %llu blocks safely",
1855 			 n_blocks_count);
1856 		return -EINVAL;
1857 	}
1858 
1859 	if (n_blocks_count < o_blocks_count) {
1860 		ext4_warning(sb, "can't shrink FS - resize aborted");
1861 		return -EINVAL;
1862 	}
1863 
1864 	/* Handle the remaining blocks in the last group only. */
1865 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1866 
1867 	if (last == 0) {
1868 		ext4_warning(sb, "need to use ext2online to resize further");
1869 		return -EPERM;
1870 	}
1871 
1872 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1873 
1874 	if (o_blocks_count + add < o_blocks_count) {
1875 		ext4_warning(sb, "blocks_count overflow");
1876 		return -EINVAL;
1877 	}
1878 
1879 	if (o_blocks_count + add > n_blocks_count)
1880 		add = n_blocks_count - o_blocks_count;
1881 
1882 	if (o_blocks_count + add < n_blocks_count)
1883 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1884 			     o_blocks_count + add, add);
1885 
1886 	/* See if the device is actually as big as what was requested */
1887 	bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1888 	if (IS_ERR(bh)) {
1889 		ext4_warning(sb, "can't read last block, resize aborted");
1890 		return -ENOSPC;
1891 	}
1892 	brelse(bh);
1893 
1894 	return ext4_group_extend_no_check(sb, o_blocks_count, add);
1895 } /* ext4_group_extend */
1896 
1897 
1898 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1899 {
1900 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1901 }
1902 
1903 /*
1904  * Release the resize inode and drop the resize_inode feature if there
1905  * are no more reserved gdt blocks, and then convert the file system
1906  * to enable meta_bg
1907  */
1908 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1909 {
1910 	handle_t *handle;
1911 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1912 	struct ext4_super_block *es = sbi->s_es;
1913 	struct ext4_inode_info *ei = EXT4_I(inode);
1914 	ext4_fsblk_t nr;
1915 	int i, ret, err = 0;
1916 	int credits = 1;
1917 
1918 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1919 	if (inode) {
1920 		if (es->s_reserved_gdt_blocks) {
1921 			ext4_error(sb, "Unexpected non-zero "
1922 				   "s_reserved_gdt_blocks");
1923 			return -EPERM;
1924 		}
1925 
1926 		/* Do a quick sanity check of the resize inode */
1927 		if (inode->i_blocks != 1 << (inode->i_blkbits -
1928 					     (9 - sbi->s_cluster_bits)))
1929 			goto invalid_resize_inode;
1930 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1931 			if (i == EXT4_DIND_BLOCK) {
1932 				if (ei->i_data[i])
1933 					continue;
1934 				else
1935 					goto invalid_resize_inode;
1936 			}
1937 			if (ei->i_data[i])
1938 				goto invalid_resize_inode;
1939 		}
1940 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1941 	}
1942 
1943 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1944 	if (IS_ERR(handle))
1945 		return PTR_ERR(handle);
1946 
1947 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1948 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1949 					    EXT4_JTR_NONE);
1950 	if (err)
1951 		goto errout;
1952 
1953 	lock_buffer(sbi->s_sbh);
1954 	ext4_clear_feature_resize_inode(sb);
1955 	ext4_set_feature_meta_bg(sb);
1956 	sbi->s_es->s_first_meta_bg =
1957 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1958 	ext4_superblock_csum_set(sb);
1959 	unlock_buffer(sbi->s_sbh);
1960 
1961 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1962 	if (err) {
1963 		ext4_std_error(sb, err);
1964 		goto errout;
1965 	}
1966 
1967 	if (inode) {
1968 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1969 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1970 				 EXT4_FREE_BLOCKS_METADATA |
1971 				 EXT4_FREE_BLOCKS_FORGET);
1972 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1973 		inode->i_blocks = 0;
1974 
1975 		err = ext4_mark_inode_dirty(handle, inode);
1976 		if (err)
1977 			ext4_std_error(sb, err);
1978 	}
1979 
1980 errout:
1981 	ret = ext4_journal_stop(handle);
1982 	return err ? err : ret;
1983 
1984 invalid_resize_inode:
1985 	ext4_error(sb, "corrupted/inconsistent resize inode");
1986 	return -EINVAL;
1987 }
1988 
1989 /*
1990  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1991  *
1992  * @sb: super block of the fs to be resized
1993  * @n_blocks_count: the number of blocks resides in the resized fs
1994  */
1995 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1996 {
1997 	struct ext4_new_flex_group_data *flex_gd = NULL;
1998 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1999 	struct ext4_super_block *es = sbi->s_es;
2000 	struct buffer_head *bh;
2001 	struct inode *resize_inode = NULL;
2002 	ext4_grpblk_t add, offset;
2003 	unsigned long n_desc_blocks;
2004 	unsigned long o_desc_blocks;
2005 	ext4_group_t o_group;
2006 	ext4_group_t n_group;
2007 	ext4_fsblk_t o_blocks_count;
2008 	ext4_fsblk_t n_blocks_count_retry = 0;
2009 	unsigned long last_update_time = 0;
2010 	int err = 0;
2011 	int meta_bg;
2012 	unsigned int flexbg_size = ext4_flex_bg_size(sbi);
2013 
2014 	/* See if the device is actually as big as what was requested */
2015 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
2016 	if (IS_ERR(bh)) {
2017 		ext4_warning(sb, "can't read last block, resize aborted");
2018 		return -ENOSPC;
2019 	}
2020 	brelse(bh);
2021 
2022 	/*
2023 	 * For bigalloc, trim the requested size to the nearest cluster
2024 	 * boundary to avoid creating an unusable filesystem. We do this
2025 	 * silently, instead of returning an error, to avoid breaking
2026 	 * callers that blindly resize the filesystem to the full size of
2027 	 * the underlying block device.
2028 	 */
2029 	if (ext4_has_feature_bigalloc(sb))
2030 		n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
2031 
2032 retry:
2033 	o_blocks_count = ext4_blocks_count(es);
2034 
2035 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2036 		 "to %llu blocks", o_blocks_count, n_blocks_count);
2037 
2038 	if (n_blocks_count < o_blocks_count) {
2039 		/* On-line shrinking not supported */
2040 		ext4_warning(sb, "can't shrink FS - resize aborted");
2041 		return -EINVAL;
2042 	}
2043 
2044 	if (n_blocks_count == o_blocks_count)
2045 		/* Nothing need to do */
2046 		return 0;
2047 
2048 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2049 	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2050 		ext4_warning(sb, "resize would cause inodes_count overflow");
2051 		return -EINVAL;
2052 	}
2053 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2054 
2055 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2056 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2057 
2058 	meta_bg = ext4_has_feature_meta_bg(sb);
2059 
2060 	if (ext4_has_feature_resize_inode(sb)) {
2061 		if (meta_bg) {
2062 			ext4_error(sb, "resize_inode and meta_bg enabled "
2063 				   "simultaneously");
2064 			return -EINVAL;
2065 		}
2066 		if (n_desc_blocks > o_desc_blocks +
2067 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
2068 			n_blocks_count_retry = n_blocks_count;
2069 			n_desc_blocks = o_desc_blocks +
2070 				le16_to_cpu(es->s_reserved_gdt_blocks);
2071 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2072 			n_blocks_count = (ext4_fsblk_t)n_group *
2073 				EXT4_BLOCKS_PER_GROUP(sb) +
2074 				le32_to_cpu(es->s_first_data_block);
2075 			n_group--; /* set to last group number */
2076 		}
2077 
2078 		if (!resize_inode)
2079 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2080 						 EXT4_IGET_SPECIAL);
2081 		if (IS_ERR(resize_inode)) {
2082 			ext4_warning(sb, "Error opening resize inode");
2083 			return PTR_ERR(resize_inode);
2084 		}
2085 	}
2086 
2087 	if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) {
2088 		err = ext4_convert_meta_bg(sb, resize_inode);
2089 		if (err)
2090 			goto out;
2091 		if (resize_inode) {
2092 			iput(resize_inode);
2093 			resize_inode = NULL;
2094 		}
2095 		if (n_blocks_count_retry) {
2096 			n_blocks_count = n_blocks_count_retry;
2097 			n_blocks_count_retry = 0;
2098 			goto retry;
2099 		}
2100 	}
2101 
2102 	/*
2103 	 * Make sure the last group has enough space so that it's
2104 	 * guaranteed to have enough space for all metadata blocks
2105 	 * that it might need to hold.  (We might not need to store
2106 	 * the inode table blocks in the last block group, but there
2107 	 * will be cases where this might be needed.)
2108 	 */
2109 	if ((ext4_group_first_block_no(sb, n_group) +
2110 	     ext4_group_overhead_blocks(sb, n_group) + 2 +
2111 	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2112 		n_blocks_count = ext4_group_first_block_no(sb, n_group);
2113 		n_group--;
2114 		n_blocks_count_retry = 0;
2115 		if (resize_inode) {
2116 			iput(resize_inode);
2117 			resize_inode = NULL;
2118 		}
2119 		goto retry;
2120 	}
2121 
2122 	/* extend the last group */
2123 	if (n_group == o_group)
2124 		add = n_blocks_count - o_blocks_count;
2125 	else
2126 		add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2127 	if (add > 0) {
2128 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2129 		if (err)
2130 			goto out;
2131 	}
2132 
2133 	if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2134 		goto out;
2135 
2136 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2137 	if (err)
2138 		goto out;
2139 
2140 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2141 	if (err)
2142 		goto out;
2143 
2144 	flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group);
2145 	if (flex_gd == NULL) {
2146 		err = -ENOMEM;
2147 		goto out;
2148 	}
2149 
2150 	/* Add flex groups. Note that a regular group is a
2151 	 * flex group with 1 group.
2152 	 */
2153 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2154 		if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2155 			if (last_update_time)
2156 				ext4_msg(sb, KERN_INFO,
2157 					 "resized to %llu blocks",
2158 					 ext4_blocks_count(es));
2159 			last_update_time = jiffies;
2160 		}
2161 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2162 			break;
2163 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2164 		if (unlikely(err))
2165 			break;
2166 	}
2167 
2168 	if (!err && n_blocks_count_retry) {
2169 		n_blocks_count = n_blocks_count_retry;
2170 		n_blocks_count_retry = 0;
2171 		free_flex_gd(flex_gd);
2172 		flex_gd = NULL;
2173 		if (resize_inode) {
2174 			iput(resize_inode);
2175 			resize_inode = NULL;
2176 		}
2177 		goto retry;
2178 	}
2179 
2180 out:
2181 	if (flex_gd)
2182 		free_flex_gd(flex_gd);
2183 	if (resize_inode != NULL)
2184 		iput(resize_inode);
2185 	if (err)
2186 		ext4_warning(sb, "error (%d) occurred during "
2187 			     "file system resize", err);
2188 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2189 		 ext4_blocks_count(es));
2190 	return err;
2191 }
2192