xref: /linux/fs/ext4/ialloc.c (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "group.h"
31 
32 /*
33  * ialloc.c contains the inodes allocation and deallocation routines
34  */
35 
36 /*
37  * The free inodes are managed by bitmaps.  A file system contains several
38  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
39  * block for inodes, N blocks for the inode table and data blocks.
40  *
41  * The file system contains group descriptors which are located after the
42  * super block.  Each descriptor contains the number of the bitmap block and
43  * the free blocks count in the block.
44  */
45 
46 /*
47  * To avoid calling the atomic setbit hundreds or thousands of times, we only
48  * need to use it within a single byte (to ensure we get endianness right).
49  * We can use memset for the rest of the bitmap as there are no other users.
50  */
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
52 {
53 	int i;
54 
55 	if (start_bit >= end_bit)
56 		return;
57 
58 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 		ext4_set_bit(i, bitmap);
61 	if (i < end_bit)
62 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
63 }
64 
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 				ext4_group_t block_group,
68 				struct ext4_group_desc *gdp)
69 {
70 	struct ext4_sb_info *sbi = EXT4_SB(sb);
71 
72 	J_ASSERT_BH(bh, buffer_locked(bh));
73 
74 	/* If checksum is bad mark all blocks and inodes use to prevent
75 	 * allocation, essentially implementing a per-group read-only flag. */
76 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 		ext4_error(sb, __func__, "Checksum bad for group %lu\n",
78 			   block_group);
79 		gdp->bg_free_blocks_count = 0;
80 		gdp->bg_free_inodes_count = 0;
81 		gdp->bg_itable_unused = 0;
82 		memset(bh->b_data, 0xff, sb->s_blocksize);
83 		return 0;
84 	}
85 
86 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
88 			bh->b_data);
89 
90 	return EXT4_INODES_PER_GROUP(sb);
91 }
92 
93 /*
94  * Read the inode allocation bitmap for a given block_group, reading
95  * into the specified slot in the superblock's bitmap cache.
96  *
97  * Return buffer_head of bitmap on success or NULL.
98  */
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
101 {
102 	struct ext4_group_desc *desc;
103 	struct buffer_head *bh = NULL;
104 	ext4_fsblk_t bitmap_blk;
105 
106 	desc = ext4_get_group_desc(sb, block_group, NULL);
107 	if (!desc)
108 		return NULL;
109 	bitmap_blk = ext4_inode_bitmap(sb, desc);
110 	bh = sb_getblk(sb, bitmap_blk);
111 	if (unlikely(!bh)) {
112 		ext4_error(sb, __func__,
113 			    "Cannot read inode bitmap - "
114 			    "block_group = %lu, inode_bitmap = %llu",
115 			    block_group, bitmap_blk);
116 		return NULL;
117 	}
118 	if (buffer_uptodate(bh) &&
119 	    !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
120 		return bh;
121 
122 	lock_buffer(bh);
123 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
124 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
125 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
126 		set_buffer_uptodate(bh);
127 		unlock_buffer(bh);
128 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
129 		return bh;
130 	}
131 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 	if (bh_submit_read(bh) < 0) {
133 		put_bh(bh);
134 		ext4_error(sb, __func__,
135 			    "Cannot read inode bitmap - "
136 			    "block_group = %lu, inode_bitmap = %llu",
137 			    block_group, bitmap_blk);
138 		return NULL;
139 	}
140 	return bh;
141 }
142 
143 /*
144  * NOTE! When we get the inode, we're the only people
145  * that have access to it, and as such there are no
146  * race conditions we have to worry about. The inode
147  * is not on the hash-lists, and it cannot be reached
148  * through the filesystem because the directory entry
149  * has been deleted earlier.
150  *
151  * HOWEVER: we must make sure that we get no aliases,
152  * which means that we have to call "clear_inode()"
153  * _before_ we mark the inode not in use in the inode
154  * bitmaps. Otherwise a newly created file might use
155  * the same inode number (not actually the same pointer
156  * though), and then we'd have two inodes sharing the
157  * same inode number and space on the harddisk.
158  */
159 void ext4_free_inode(handle_t *handle, struct inode *inode)
160 {
161 	struct super_block *sb = inode->i_sb;
162 	int is_directory;
163 	unsigned long ino;
164 	struct buffer_head *bitmap_bh = NULL;
165 	struct buffer_head *bh2;
166 	ext4_group_t block_group;
167 	unsigned long bit;
168 	struct ext4_group_desc *gdp;
169 	struct ext4_super_block *es;
170 	struct ext4_sb_info *sbi;
171 	int fatal = 0, err;
172 	ext4_group_t flex_group;
173 
174 	if (atomic_read(&inode->i_count) > 1) {
175 		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
176 		       atomic_read(&inode->i_count));
177 		return;
178 	}
179 	if (inode->i_nlink) {
180 		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
181 		       inode->i_nlink);
182 		return;
183 	}
184 	if (!sb) {
185 		printk(KERN_ERR "ext4_free_inode: inode on "
186 		       "nonexistent device\n");
187 		return;
188 	}
189 	sbi = EXT4_SB(sb);
190 
191 	ino = inode->i_ino;
192 	ext4_debug("freeing inode %lu\n", ino);
193 
194 	/*
195 	 * Note: we must free any quota before locking the superblock,
196 	 * as writing the quota to disk may need the lock as well.
197 	 */
198 	DQUOT_INIT(inode);
199 	ext4_xattr_delete_inode(handle, inode);
200 	DQUOT_FREE_INODE(inode);
201 	DQUOT_DROP(inode);
202 
203 	is_directory = S_ISDIR(inode->i_mode);
204 
205 	/* Do this BEFORE marking the inode not in use or returning an error */
206 	clear_inode(inode);
207 
208 	es = EXT4_SB(sb)->s_es;
209 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
210 		ext4_error(sb, "ext4_free_inode",
211 			   "reserved or nonexistent inode %lu", ino);
212 		goto error_return;
213 	}
214 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
215 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
216 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
217 	if (!bitmap_bh)
218 		goto error_return;
219 
220 	BUFFER_TRACE(bitmap_bh, "get_write_access");
221 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
222 	if (fatal)
223 		goto error_return;
224 
225 	/* Ok, now we can actually update the inode bitmaps.. */
226 	if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
227 					bit, bitmap_bh->b_data))
228 		ext4_error(sb, "ext4_free_inode",
229 			   "bit already cleared for inode %lu", ino);
230 	else {
231 		gdp = ext4_get_group_desc(sb, block_group, &bh2);
232 
233 		BUFFER_TRACE(bh2, "get_write_access");
234 		fatal = ext4_journal_get_write_access(handle, bh2);
235 		if (fatal) goto error_return;
236 
237 		if (gdp) {
238 			spin_lock(sb_bgl_lock(sbi, block_group));
239 			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
240 			if (is_directory)
241 				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
242 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
243 							block_group, gdp);
244 			spin_unlock(sb_bgl_lock(sbi, block_group));
245 			percpu_counter_inc(&sbi->s_freeinodes_counter);
246 			if (is_directory)
247 				percpu_counter_dec(&sbi->s_dirs_counter);
248 
249 			if (sbi->s_log_groups_per_flex) {
250 				flex_group = ext4_flex_group(sbi, block_group);
251 				spin_lock(sb_bgl_lock(sbi, flex_group));
252 				sbi->s_flex_groups[flex_group].free_inodes++;
253 				spin_unlock(sb_bgl_lock(sbi, flex_group));
254 			}
255 		}
256 		BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
257 		err = ext4_journal_dirty_metadata(handle, bh2);
258 		if (!fatal) fatal = err;
259 	}
260 	BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
261 	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
262 	if (!fatal)
263 		fatal = err;
264 	sb->s_dirt = 1;
265 error_return:
266 	brelse(bitmap_bh);
267 	ext4_std_error(sb, fatal);
268 }
269 
270 /*
271  * There are two policies for allocating an inode.  If the new inode is
272  * a directory, then a forward search is made for a block group with both
273  * free space and a low directory-to-inode ratio; if that fails, then of
274  * the groups with above-average free space, that group with the fewest
275  * directories already is chosen.
276  *
277  * For other inodes, search forward from the parent directory\'s block
278  * group to find a free inode.
279  */
280 static int find_group_dir(struct super_block *sb, struct inode *parent,
281 				ext4_group_t *best_group)
282 {
283 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
284 	unsigned int freei, avefreei;
285 	struct ext4_group_desc *desc, *best_desc = NULL;
286 	ext4_group_t group;
287 	int ret = -1;
288 
289 	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
290 	avefreei = freei / ngroups;
291 
292 	for (group = 0; group < ngroups; group++) {
293 		desc = ext4_get_group_desc(sb, group, NULL);
294 		if (!desc || !desc->bg_free_inodes_count)
295 			continue;
296 		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
297 			continue;
298 		if (!best_desc ||
299 		    (le16_to_cpu(desc->bg_free_blocks_count) >
300 		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
301 			*best_group = group;
302 			best_desc = desc;
303 			ret = 0;
304 		}
305 	}
306 	return ret;
307 }
308 
309 #define free_block_ratio 10
310 
311 static int find_group_flex(struct super_block *sb, struct inode *parent,
312 			   ext4_group_t *best_group)
313 {
314 	struct ext4_sb_info *sbi = EXT4_SB(sb);
315 	struct ext4_group_desc *desc;
316 	struct buffer_head *bh;
317 	struct flex_groups *flex_group = sbi->s_flex_groups;
318 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
319 	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
320 	ext4_group_t ngroups = sbi->s_groups_count;
321 	int flex_size = ext4_flex_bg_size(sbi);
322 	ext4_group_t best_flex = parent_fbg_group;
323 	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
324 	int flexbg_free_blocks;
325 	int flex_freeb_ratio;
326 	ext4_group_t n_fbg_groups;
327 	ext4_group_t i;
328 
329 	n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
330 		sbi->s_log_groups_per_flex;
331 
332 find_close_to_parent:
333 	flexbg_free_blocks = flex_group[best_flex].free_blocks;
334 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
335 	if (flex_group[best_flex].free_inodes &&
336 	    flex_freeb_ratio > free_block_ratio)
337 		goto found_flexbg;
338 
339 	if (best_flex && best_flex == parent_fbg_group) {
340 		best_flex--;
341 		goto find_close_to_parent;
342 	}
343 
344 	for (i = 0; i < n_fbg_groups; i++) {
345 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
346 			continue;
347 
348 		flexbg_free_blocks = flex_group[i].free_blocks;
349 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
350 
351 		if (flex_freeb_ratio > free_block_ratio &&
352 		    flex_group[i].free_inodes) {
353 			best_flex = i;
354 			goto found_flexbg;
355 		}
356 
357 		if (flex_group[best_flex].free_inodes == 0 ||
358 		    (flex_group[i].free_blocks >
359 		     flex_group[best_flex].free_blocks &&
360 		     flex_group[i].free_inodes))
361 			best_flex = i;
362 	}
363 
364 	if (!flex_group[best_flex].free_inodes ||
365 	    !flex_group[best_flex].free_blocks)
366 		return -1;
367 
368 found_flexbg:
369 	for (i = best_flex * flex_size; i < ngroups &&
370 		     i < (best_flex + 1) * flex_size; i++) {
371 		desc = ext4_get_group_desc(sb, i, &bh);
372 		if (le16_to_cpu(desc->bg_free_inodes_count)) {
373 			*best_group = i;
374 			goto out;
375 		}
376 	}
377 
378 	return -1;
379 out:
380 	return 0;
381 }
382 
383 /*
384  * Orlov's allocator for directories.
385  *
386  * We always try to spread first-level directories.
387  *
388  * If there are blockgroups with both free inodes and free blocks counts
389  * not worse than average we return one with smallest directory count.
390  * Otherwise we simply return a random group.
391  *
392  * For the rest rules look so:
393  *
394  * It's OK to put directory into a group unless
395  * it has too many directories already (max_dirs) or
396  * it has too few free inodes left (min_inodes) or
397  * it has too few free blocks left (min_blocks) or
398  * it's already running too large debt (max_debt).
399  * Parent's group is preferred, if it doesn't satisfy these
400  * conditions we search cyclically through the rest. If none
401  * of the groups look good we just look for a group with more
402  * free inodes than average (starting at parent's group).
403  *
404  * Debt is incremented each time we allocate a directory and decremented
405  * when we allocate an inode, within 0--255.
406  */
407 
408 #define INODE_COST 64
409 #define BLOCK_COST 256
410 
411 static int find_group_orlov(struct super_block *sb, struct inode *parent,
412 				ext4_group_t *group)
413 {
414 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
415 	struct ext4_sb_info *sbi = EXT4_SB(sb);
416 	struct ext4_super_block *es = sbi->s_es;
417 	ext4_group_t ngroups = sbi->s_groups_count;
418 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
419 	unsigned int freei, avefreei;
420 	ext4_fsblk_t freeb, avefreeb;
421 	ext4_fsblk_t blocks_per_dir;
422 	unsigned int ndirs;
423 	int max_debt, max_dirs, min_inodes;
424 	ext4_grpblk_t min_blocks;
425 	ext4_group_t i;
426 	struct ext4_group_desc *desc;
427 
428 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
429 	avefreei = freei / ngroups;
430 	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
431 	avefreeb = freeb;
432 	do_div(avefreeb, ngroups);
433 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
434 
435 	if ((parent == sb->s_root->d_inode) ||
436 	    (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
437 		int best_ndir = inodes_per_group;
438 		ext4_group_t grp;
439 		int ret = -1;
440 
441 		get_random_bytes(&grp, sizeof(grp));
442 		parent_group = (unsigned)grp % ngroups;
443 		for (i = 0; i < ngroups; i++) {
444 			grp = (parent_group + i) % ngroups;
445 			desc = ext4_get_group_desc(sb, grp, NULL);
446 			if (!desc || !desc->bg_free_inodes_count)
447 				continue;
448 			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
449 				continue;
450 			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
451 				continue;
452 			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
453 				continue;
454 			*group = grp;
455 			ret = 0;
456 			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
457 		}
458 		if (ret == 0)
459 			return ret;
460 		goto fallback;
461 	}
462 
463 	blocks_per_dir = ext4_blocks_count(es) - freeb;
464 	do_div(blocks_per_dir, ndirs);
465 
466 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
467 	min_inodes = avefreei - inodes_per_group / 4;
468 	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
469 
470 	max_debt = EXT4_BLOCKS_PER_GROUP(sb);
471 	max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
472 	if (max_debt * INODE_COST > inodes_per_group)
473 		max_debt = inodes_per_group / INODE_COST;
474 	if (max_debt > 255)
475 		max_debt = 255;
476 	if (max_debt == 0)
477 		max_debt = 1;
478 
479 	for (i = 0; i < ngroups; i++) {
480 		*group = (parent_group + i) % ngroups;
481 		desc = ext4_get_group_desc(sb, *group, NULL);
482 		if (!desc || !desc->bg_free_inodes_count)
483 			continue;
484 		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
485 			continue;
486 		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
487 			continue;
488 		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
489 			continue;
490 		return 0;
491 	}
492 
493 fallback:
494 	for (i = 0; i < ngroups; i++) {
495 		*group = (parent_group + i) % ngroups;
496 		desc = ext4_get_group_desc(sb, *group, NULL);
497 		if (desc && desc->bg_free_inodes_count &&
498 			le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
499 			return 0;
500 	}
501 
502 	if (avefreei) {
503 		/*
504 		 * The free-inodes counter is approximate, and for really small
505 		 * filesystems the above test can fail to find any blockgroups
506 		 */
507 		avefreei = 0;
508 		goto fallback;
509 	}
510 
511 	return -1;
512 }
513 
514 static int find_group_other(struct super_block *sb, struct inode *parent,
515 				ext4_group_t *group)
516 {
517 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
518 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
519 	struct ext4_group_desc *desc;
520 	ext4_group_t i;
521 
522 	/*
523 	 * Try to place the inode in its parent directory
524 	 */
525 	*group = parent_group;
526 	desc = ext4_get_group_desc(sb, *group, NULL);
527 	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
528 			le16_to_cpu(desc->bg_free_blocks_count))
529 		return 0;
530 
531 	/*
532 	 * We're going to place this inode in a different blockgroup from its
533 	 * parent.  We want to cause files in a common directory to all land in
534 	 * the same blockgroup.  But we want files which are in a different
535 	 * directory which shares a blockgroup with our parent to land in a
536 	 * different blockgroup.
537 	 *
538 	 * So add our directory's i_ino into the starting point for the hash.
539 	 */
540 	*group = (*group + parent->i_ino) % ngroups;
541 
542 	/*
543 	 * Use a quadratic hash to find a group with a free inode and some free
544 	 * blocks.
545 	 */
546 	for (i = 1; i < ngroups; i <<= 1) {
547 		*group += i;
548 		if (*group >= ngroups)
549 			*group -= ngroups;
550 		desc = ext4_get_group_desc(sb, *group, NULL);
551 		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
552 				le16_to_cpu(desc->bg_free_blocks_count))
553 			return 0;
554 	}
555 
556 	/*
557 	 * That failed: try linear search for a free inode, even if that group
558 	 * has no free blocks.
559 	 */
560 	*group = parent_group;
561 	for (i = 0; i < ngroups; i++) {
562 		if (++*group >= ngroups)
563 			*group = 0;
564 		desc = ext4_get_group_desc(sb, *group, NULL);
565 		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
566 			return 0;
567 	}
568 
569 	return -1;
570 }
571 
572 /*
573  * There are two policies for allocating an inode.  If the new inode is
574  * a directory, then a forward search is made for a block group with both
575  * free space and a low directory-to-inode ratio; if that fails, then of
576  * the groups with above-average free space, that group with the fewest
577  * directories already is chosen.
578  *
579  * For other inodes, search forward from the parent directory's block
580  * group to find a free inode.
581  */
582 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
583 {
584 	struct super_block *sb;
585 	struct buffer_head *bitmap_bh = NULL;
586 	struct buffer_head *bh2;
587 	ext4_group_t group = 0;
588 	unsigned long ino = 0;
589 	struct inode *inode;
590 	struct ext4_group_desc *gdp = NULL;
591 	struct ext4_super_block *es;
592 	struct ext4_inode_info *ei;
593 	struct ext4_sb_info *sbi;
594 	int ret2, err = 0;
595 	struct inode *ret;
596 	ext4_group_t i;
597 	int free = 0;
598 	ext4_group_t flex_group;
599 
600 	/* Cannot create files in a deleted directory */
601 	if (!dir || !dir->i_nlink)
602 		return ERR_PTR(-EPERM);
603 
604 	sb = dir->i_sb;
605 	inode = new_inode(sb);
606 	if (!inode)
607 		return ERR_PTR(-ENOMEM);
608 	ei = EXT4_I(inode);
609 
610 	sbi = EXT4_SB(sb);
611 	es = sbi->s_es;
612 
613 	if (sbi->s_log_groups_per_flex) {
614 		ret2 = find_group_flex(sb, dir, &group);
615 		goto got_group;
616 	}
617 
618 	if (S_ISDIR(mode)) {
619 		if (test_opt(sb, OLDALLOC))
620 			ret2 = find_group_dir(sb, dir, &group);
621 		else
622 			ret2 = find_group_orlov(sb, dir, &group);
623 	} else
624 		ret2 = find_group_other(sb, dir, &group);
625 
626 got_group:
627 	err = -ENOSPC;
628 	if (ret2 == -1)
629 		goto out;
630 
631 	for (i = 0; i < sbi->s_groups_count; i++) {
632 		err = -EIO;
633 
634 		gdp = ext4_get_group_desc(sb, group, &bh2);
635 		if (!gdp)
636 			goto fail;
637 
638 		brelse(bitmap_bh);
639 		bitmap_bh = ext4_read_inode_bitmap(sb, group);
640 		if (!bitmap_bh)
641 			goto fail;
642 
643 		ino = 0;
644 
645 repeat_in_this_group:
646 		ino = ext4_find_next_zero_bit((unsigned long *)
647 				bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
648 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
649 
650 			BUFFER_TRACE(bitmap_bh, "get_write_access");
651 			err = ext4_journal_get_write_access(handle, bitmap_bh);
652 			if (err)
653 				goto fail;
654 
655 			if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
656 						ino, bitmap_bh->b_data)) {
657 				/* we won it */
658 				BUFFER_TRACE(bitmap_bh,
659 					"call ext4_journal_dirty_metadata");
660 				err = ext4_journal_dirty_metadata(handle,
661 								bitmap_bh);
662 				if (err)
663 					goto fail;
664 				goto got;
665 			}
666 			/* we lost it */
667 			jbd2_journal_release_buffer(handle, bitmap_bh);
668 
669 			if (++ino < EXT4_INODES_PER_GROUP(sb))
670 				goto repeat_in_this_group;
671 		}
672 
673 		/*
674 		 * This case is possible in concurrent environment.  It is very
675 		 * rare.  We cannot repeat the find_group_xxx() call because
676 		 * that will simply return the same blockgroup, because the
677 		 * group descriptor metadata has not yet been updated.
678 		 * So we just go onto the next blockgroup.
679 		 */
680 		if (++group == sbi->s_groups_count)
681 			group = 0;
682 	}
683 	err = -ENOSPC;
684 	goto out;
685 
686 got:
687 	ino++;
688 	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
689 	    ino > EXT4_INODES_PER_GROUP(sb)) {
690 		ext4_error(sb, __func__,
691 			   "reserved inode or inode > inodes count - "
692 			   "block_group = %lu, inode=%lu", group,
693 			   ino + group * EXT4_INODES_PER_GROUP(sb));
694 		err = -EIO;
695 		goto fail;
696 	}
697 
698 	BUFFER_TRACE(bh2, "get_write_access");
699 	err = ext4_journal_get_write_access(handle, bh2);
700 	if (err) goto fail;
701 
702 	/* We may have to initialize the block bitmap if it isn't already */
703 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
704 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
705 		struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group);
706 
707 		BUFFER_TRACE(block_bh, "get block bitmap access");
708 		err = ext4_journal_get_write_access(handle, block_bh);
709 		if (err) {
710 			brelse(block_bh);
711 			goto fail;
712 		}
713 
714 		free = 0;
715 		spin_lock(sb_bgl_lock(sbi, group));
716 		/* recheck and clear flag under lock if we still need to */
717 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
718 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
719 			free = ext4_free_blocks_after_init(sb, group, gdp);
720 			gdp->bg_free_blocks_count = cpu_to_le16(free);
721 		}
722 		spin_unlock(sb_bgl_lock(sbi, group));
723 
724 		/* Don't need to dirty bitmap block if we didn't change it */
725 		if (free) {
726 			BUFFER_TRACE(block_bh, "dirty block bitmap");
727 			err = ext4_journal_dirty_metadata(handle, block_bh);
728 		}
729 
730 		brelse(block_bh);
731 		if (err)
732 			goto fail;
733 	}
734 
735 	spin_lock(sb_bgl_lock(sbi, group));
736 	/* If we didn't allocate from within the initialized part of the inode
737 	 * table then we need to initialize up to this inode. */
738 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
739 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
740 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
741 
742 			/* When marking the block group with
743 			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
744 			 * on the value of bg_itable_unused even though
745 			 * mke2fs could have initialized the same for us.
746 			 * Instead we calculated the value below
747 			 */
748 
749 			free = 0;
750 		} else {
751 			free = EXT4_INODES_PER_GROUP(sb) -
752 				le16_to_cpu(gdp->bg_itable_unused);
753 		}
754 
755 		/*
756 		 * Check the relative inode number against the last used
757 		 * relative inode number in this group. if it is greater
758 		 * we need to  update the bg_itable_unused count
759 		 *
760 		 */
761 		if (ino > free)
762 			gdp->bg_itable_unused =
763 				cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
764 	}
765 
766 	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
767 	if (S_ISDIR(mode)) {
768 		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
769 	}
770 	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
771 	spin_unlock(sb_bgl_lock(sbi, group));
772 	BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
773 	err = ext4_journal_dirty_metadata(handle, bh2);
774 	if (err) goto fail;
775 
776 	percpu_counter_dec(&sbi->s_freeinodes_counter);
777 	if (S_ISDIR(mode))
778 		percpu_counter_inc(&sbi->s_dirs_counter);
779 	sb->s_dirt = 1;
780 
781 	if (sbi->s_log_groups_per_flex) {
782 		flex_group = ext4_flex_group(sbi, group);
783 		spin_lock(sb_bgl_lock(sbi, flex_group));
784 		sbi->s_flex_groups[flex_group].free_inodes--;
785 		spin_unlock(sb_bgl_lock(sbi, flex_group));
786 	}
787 
788 	inode->i_uid = current->fsuid;
789 	if (test_opt(sb, GRPID))
790 		inode->i_gid = dir->i_gid;
791 	else if (dir->i_mode & S_ISGID) {
792 		inode->i_gid = dir->i_gid;
793 		if (S_ISDIR(mode))
794 			mode |= S_ISGID;
795 	} else
796 		inode->i_gid = current->fsgid;
797 	inode->i_mode = mode;
798 
799 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
800 	/* This is the optimal IO size (for stat), not the fs block size */
801 	inode->i_blocks = 0;
802 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
803 						       ext4_current_time(inode);
804 
805 	memset(ei->i_data, 0, sizeof(ei->i_data));
806 	ei->i_dir_start_lookup = 0;
807 	ei->i_disksize = 0;
808 
809 	/*
810 	 * Don't inherit extent flag from directory. We set extent flag on
811 	 * newly created directory and file only if -o extent mount option is
812 	 * specified
813 	 */
814 	ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
815 	if (S_ISLNK(mode))
816 		ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
817 	/* dirsync only applies to directories */
818 	if (!S_ISDIR(mode))
819 		ei->i_flags &= ~EXT4_DIRSYNC_FL;
820 	ei->i_file_acl = 0;
821 	ei->i_dtime = 0;
822 	ei->i_block_group = group;
823 
824 	ext4_set_inode_flags(inode);
825 	if (IS_DIRSYNC(inode))
826 		handle->h_sync = 1;
827 	insert_inode_hash(inode);
828 	spin_lock(&sbi->s_next_gen_lock);
829 	inode->i_generation = sbi->s_next_generation++;
830 	spin_unlock(&sbi->s_next_gen_lock);
831 
832 	ei->i_state = EXT4_STATE_NEW;
833 
834 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
835 
836 	ret = inode;
837 	if (DQUOT_ALLOC_INODE(inode)) {
838 		err = -EDQUOT;
839 		goto fail_drop;
840 	}
841 
842 	err = ext4_init_acl(handle, inode, dir);
843 	if (err)
844 		goto fail_free_drop;
845 
846 	err = ext4_init_security(handle, inode, dir);
847 	if (err)
848 		goto fail_free_drop;
849 
850 	if (test_opt(sb, EXTENTS)) {
851 		/* set extent flag only for directory, file and normal symlink*/
852 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
853 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
854 			ext4_ext_tree_init(handle, inode);
855 		}
856 	}
857 
858 	err = ext4_mark_inode_dirty(handle, inode);
859 	if (err) {
860 		ext4_std_error(sb, err);
861 		goto fail_free_drop;
862 	}
863 
864 	ext4_debug("allocating inode %lu\n", inode->i_ino);
865 	goto really_out;
866 fail:
867 	ext4_std_error(sb, err);
868 out:
869 	iput(inode);
870 	ret = ERR_PTR(err);
871 really_out:
872 	brelse(bitmap_bh);
873 	return ret;
874 
875 fail_free_drop:
876 	DQUOT_FREE_INODE(inode);
877 
878 fail_drop:
879 	DQUOT_DROP(inode);
880 	inode->i_flags |= S_NOQUOTA;
881 	inode->i_nlink = 0;
882 	iput(inode);
883 	brelse(bitmap_bh);
884 	return ERR_PTR(err);
885 }
886 
887 /* Verify that we are loading a valid orphan from disk */
888 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
889 {
890 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
891 	ext4_group_t block_group;
892 	int bit;
893 	struct buffer_head *bitmap_bh;
894 	struct inode *inode = NULL;
895 	long err = -EIO;
896 
897 	/* Error cases - e2fsck has already cleaned up for us */
898 	if (ino > max_ino) {
899 		ext4_warning(sb, __func__,
900 			     "bad orphan ino %lu!  e2fsck was run?", ino);
901 		goto error;
902 	}
903 
904 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
905 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
906 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
907 	if (!bitmap_bh) {
908 		ext4_warning(sb, __func__,
909 			     "inode bitmap error for orphan %lu", ino);
910 		goto error;
911 	}
912 
913 	/* Having the inode bit set should be a 100% indicator that this
914 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
915 	 * inodes that were being truncated, so we can't check i_nlink==0.
916 	 */
917 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
918 		goto bad_orphan;
919 
920 	inode = ext4_iget(sb, ino);
921 	if (IS_ERR(inode))
922 		goto iget_failed;
923 
924 	/*
925 	 * If the orphans has i_nlinks > 0 then it should be able to be
926 	 * truncated, otherwise it won't be removed from the orphan list
927 	 * during processing and an infinite loop will result.
928 	 */
929 	if (inode->i_nlink && !ext4_can_truncate(inode))
930 		goto bad_orphan;
931 
932 	if (NEXT_ORPHAN(inode) > max_ino)
933 		goto bad_orphan;
934 	brelse(bitmap_bh);
935 	return inode;
936 
937 iget_failed:
938 	err = PTR_ERR(inode);
939 	inode = NULL;
940 bad_orphan:
941 	ext4_warning(sb, __func__,
942 		     "bad orphan inode %lu!  e2fsck was run?", ino);
943 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
944 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
945 	       ext4_test_bit(bit, bitmap_bh->b_data));
946 	printk(KERN_NOTICE "inode=%p\n", inode);
947 	if (inode) {
948 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
949 		       is_bad_inode(inode));
950 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
951 		       NEXT_ORPHAN(inode));
952 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
953 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
954 		/* Avoid freeing blocks if we got a bad deleted inode */
955 		if (inode->i_nlink == 0)
956 			inode->i_blocks = 0;
957 		iput(inode);
958 	}
959 	brelse(bitmap_bh);
960 error:
961 	return ERR_PTR(err);
962 }
963 
964 unsigned long ext4_count_free_inodes(struct super_block *sb)
965 {
966 	unsigned long desc_count;
967 	struct ext4_group_desc *gdp;
968 	ext4_group_t i;
969 #ifdef EXT4FS_DEBUG
970 	struct ext4_super_block *es;
971 	unsigned long bitmap_count, x;
972 	struct buffer_head *bitmap_bh = NULL;
973 
974 	es = EXT4_SB(sb)->s_es;
975 	desc_count = 0;
976 	bitmap_count = 0;
977 	gdp = NULL;
978 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
979 		gdp = ext4_get_group_desc(sb, i, NULL);
980 		if (!gdp)
981 			continue;
982 		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
983 		brelse(bitmap_bh);
984 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
985 		if (!bitmap_bh)
986 			continue;
987 
988 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
989 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
990 			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
991 		bitmap_count += x;
992 	}
993 	brelse(bitmap_bh);
994 	printk(KERN_DEBUG "ext4_count_free_inodes: "
995 	       "stored = %u, computed = %lu, %lu\n",
996 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
997 	return desc_count;
998 #else
999 	desc_count = 0;
1000 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1001 		gdp = ext4_get_group_desc(sb, i, NULL);
1002 		if (!gdp)
1003 			continue;
1004 		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
1005 		cond_resched();
1006 	}
1007 	return desc_count;
1008 #endif
1009 }
1010 
1011 /* Called at mount-time, super-block is locked */
1012 unsigned long ext4_count_dirs(struct super_block * sb)
1013 {
1014 	unsigned long count = 0;
1015 	ext4_group_t i;
1016 
1017 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1018 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1019 		if (!gdp)
1020 			continue;
1021 		count += le16_to_cpu(gdp->bg_used_dirs_count);
1022 	}
1023 	return count;
1024 }
1025 
1026