xref: /linux/fs/ext4/ialloc.c (revision e2fc4d19292ef2eb208f76976ddc3320cc5839b6)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "group.h"
31 
32 /*
33  * ialloc.c contains the inodes allocation and deallocation routines
34  */
35 
36 /*
37  * The free inodes are managed by bitmaps.  A file system contains several
38  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
39  * block for inodes, N blocks for the inode table and data blocks.
40  *
41  * The file system contains group descriptors which are located after the
42  * super block.  Each descriptor contains the number of the bitmap block and
43  * the free blocks count in the block.
44  */
45 
46 /*
47  * To avoid calling the atomic setbit hundreds or thousands of times, we only
48  * need to use it within a single byte (to ensure we get endianness right).
49  * We can use memset for the rest of the bitmap as there are no other users.
50  */
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
52 {
53 	int i;
54 
55 	if (start_bit >= end_bit)
56 		return;
57 
58 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 		ext4_set_bit(i, bitmap);
61 	if (i < end_bit)
62 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
63 }
64 
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 				ext4_group_t block_group,
68 				struct ext4_group_desc *gdp)
69 {
70 	struct ext4_sb_info *sbi = EXT4_SB(sb);
71 
72 	J_ASSERT_BH(bh, buffer_locked(bh));
73 
74 	/* If checksum is bad mark all blocks and inodes use to prevent
75 	 * allocation, essentially implementing a per-group read-only flag. */
76 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 		ext4_error(sb, __func__, "Checksum bad for group %u",
78 			   block_group);
79 		ext4_free_blks_set(sb, gdp, 0);
80 		ext4_free_inodes_set(sb, gdp, 0);
81 		ext4_itable_unused_set(sb, gdp, 0);
82 		memset(bh->b_data, 0xff, sb->s_blocksize);
83 		return 0;
84 	}
85 
86 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
88 			bh->b_data);
89 
90 	return EXT4_INODES_PER_GROUP(sb);
91 }
92 
93 /*
94  * Read the inode allocation bitmap for a given block_group, reading
95  * into the specified slot in the superblock's bitmap cache.
96  *
97  * Return buffer_head of bitmap on success or NULL.
98  */
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
101 {
102 	struct ext4_group_desc *desc;
103 	struct buffer_head *bh = NULL;
104 	ext4_fsblk_t bitmap_blk;
105 
106 	desc = ext4_get_group_desc(sb, block_group, NULL);
107 	if (!desc)
108 		return NULL;
109 	bitmap_blk = ext4_inode_bitmap(sb, desc);
110 	bh = sb_getblk(sb, bitmap_blk);
111 	if (unlikely(!bh)) {
112 		ext4_error(sb, __func__,
113 			    "Cannot read inode bitmap - "
114 			    "block_group = %u, inode_bitmap = %llu",
115 			    block_group, bitmap_blk);
116 		return NULL;
117 	}
118 	if (bitmap_uptodate(bh))
119 		return bh;
120 
121 	lock_buffer(bh);
122 	if (bitmap_uptodate(bh)) {
123 		unlock_buffer(bh);
124 		return bh;
125 	}
126 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
127 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
128 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
129 		set_bitmap_uptodate(bh);
130 		set_buffer_uptodate(bh);
131 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 		unlock_buffer(bh);
133 		return bh;
134 	}
135 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 	if (buffer_uptodate(bh)) {
137 		/*
138 		 * if not uninit if bh is uptodate,
139 		 * bitmap is also uptodate
140 		 */
141 		set_bitmap_uptodate(bh);
142 		unlock_buffer(bh);
143 		return bh;
144 	}
145 	/*
146 	 * submit the buffer_head for read. We can
147 	 * safely mark the bitmap as uptodate now.
148 	 * We do it here so the bitmap uptodate bit
149 	 * get set with buffer lock held.
150 	 */
151 	set_bitmap_uptodate(bh);
152 	if (bh_submit_read(bh) < 0) {
153 		put_bh(bh);
154 		ext4_error(sb, __func__,
155 			    "Cannot read inode bitmap - "
156 			    "block_group = %u, inode_bitmap = %llu",
157 			    block_group, bitmap_blk);
158 		return NULL;
159 	}
160 	return bh;
161 }
162 
163 /*
164  * NOTE! When we get the inode, we're the only people
165  * that have access to it, and as such there are no
166  * race conditions we have to worry about. The inode
167  * is not on the hash-lists, and it cannot be reached
168  * through the filesystem because the directory entry
169  * has been deleted earlier.
170  *
171  * HOWEVER: we must make sure that we get no aliases,
172  * which means that we have to call "clear_inode()"
173  * _before_ we mark the inode not in use in the inode
174  * bitmaps. Otherwise a newly created file might use
175  * the same inode number (not actually the same pointer
176  * though), and then we'd have two inodes sharing the
177  * same inode number and space on the harddisk.
178  */
179 void ext4_free_inode(handle_t *handle, struct inode *inode)
180 {
181 	struct super_block *sb = inode->i_sb;
182 	int is_directory;
183 	unsigned long ino;
184 	struct buffer_head *bitmap_bh = NULL;
185 	struct buffer_head *bh2;
186 	ext4_group_t block_group;
187 	unsigned long bit;
188 	struct ext4_group_desc *gdp;
189 	struct ext4_super_block *es;
190 	struct ext4_sb_info *sbi;
191 	int fatal = 0, err, count;
192 	ext4_group_t flex_group;
193 
194 	if (atomic_read(&inode->i_count) > 1) {
195 		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
196 		       atomic_read(&inode->i_count));
197 		return;
198 	}
199 	if (inode->i_nlink) {
200 		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
201 		       inode->i_nlink);
202 		return;
203 	}
204 	if (!sb) {
205 		printk(KERN_ERR "ext4_free_inode: inode on "
206 		       "nonexistent device\n");
207 		return;
208 	}
209 	sbi = EXT4_SB(sb);
210 
211 	ino = inode->i_ino;
212 	ext4_debug("freeing inode %lu\n", ino);
213 	trace_mark(ext4_free_inode,
214 		   "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
215 		   sb->s_id, inode->i_ino, inode->i_mode,
216 		   (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
217 		   (unsigned long long) inode->i_blocks);
218 
219 	/*
220 	 * Note: we must free any quota before locking the superblock,
221 	 * as writing the quota to disk may need the lock as well.
222 	 */
223 	DQUOT_INIT(inode);
224 	ext4_xattr_delete_inode(handle, inode);
225 	DQUOT_FREE_INODE(inode);
226 	DQUOT_DROP(inode);
227 
228 	is_directory = S_ISDIR(inode->i_mode);
229 
230 	/* Do this BEFORE marking the inode not in use or returning an error */
231 	clear_inode(inode);
232 
233 	es = EXT4_SB(sb)->s_es;
234 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
235 		ext4_error(sb, "ext4_free_inode",
236 			   "reserved or nonexistent inode %lu", ino);
237 		goto error_return;
238 	}
239 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
240 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
241 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
242 	if (!bitmap_bh)
243 		goto error_return;
244 
245 	BUFFER_TRACE(bitmap_bh, "get_write_access");
246 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
247 	if (fatal)
248 		goto error_return;
249 
250 	/* Ok, now we can actually update the inode bitmaps.. */
251 	if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
252 					bit, bitmap_bh->b_data))
253 		ext4_error(sb, "ext4_free_inode",
254 			   "bit already cleared for inode %lu", ino);
255 	else {
256 		gdp = ext4_get_group_desc(sb, block_group, &bh2);
257 
258 		BUFFER_TRACE(bh2, "get_write_access");
259 		fatal = ext4_journal_get_write_access(handle, bh2);
260 		if (fatal) goto error_return;
261 
262 		if (gdp) {
263 			spin_lock(sb_bgl_lock(sbi, block_group));
264 			count = ext4_free_inodes_count(sb, gdp) + 1;
265 			ext4_free_inodes_set(sb, gdp, count);
266 			if (is_directory) {
267 				count = ext4_used_dirs_count(sb, gdp) - 1;
268 				ext4_used_dirs_set(sb, gdp, count);
269 			}
270 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
271 							block_group, gdp);
272 			spin_unlock(sb_bgl_lock(sbi, block_group));
273 			percpu_counter_inc(&sbi->s_freeinodes_counter);
274 			if (is_directory)
275 				percpu_counter_dec(&sbi->s_dirs_counter);
276 
277 			if (sbi->s_log_groups_per_flex) {
278 				flex_group = ext4_flex_group(sbi, block_group);
279 				spin_lock(sb_bgl_lock(sbi, flex_group));
280 				sbi->s_flex_groups[flex_group].free_inodes++;
281 				spin_unlock(sb_bgl_lock(sbi, flex_group));
282 			}
283 		}
284 		BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
285 		err = ext4_handle_dirty_metadata(handle, NULL, bh2);
286 		if (!fatal) fatal = err;
287 	}
288 	BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
289 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
290 	if (!fatal)
291 		fatal = err;
292 	sb->s_dirt = 1;
293 error_return:
294 	brelse(bitmap_bh);
295 	ext4_std_error(sb, fatal);
296 }
297 
298 /*
299  * There are two policies for allocating an inode.  If the new inode is
300  * a directory, then a forward search is made for a block group with both
301  * free space and a low directory-to-inode ratio; if that fails, then of
302  * the groups with above-average free space, that group with the fewest
303  * directories already is chosen.
304  *
305  * For other inodes, search forward from the parent directory\'s block
306  * group to find a free inode.
307  */
308 static int find_group_dir(struct super_block *sb, struct inode *parent,
309 				ext4_group_t *best_group)
310 {
311 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
312 	unsigned int freei, avefreei;
313 	struct ext4_group_desc *desc, *best_desc = NULL;
314 	ext4_group_t group;
315 	int ret = -1;
316 
317 	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
318 	avefreei = freei / ngroups;
319 
320 	for (group = 0; group < ngroups; group++) {
321 		desc = ext4_get_group_desc(sb, group, NULL);
322 		if (!desc || !ext4_free_inodes_count(sb, desc))
323 			continue;
324 		if (ext4_free_inodes_count(sb, desc) < avefreei)
325 			continue;
326 		if (!best_desc ||
327 		    (ext4_free_blks_count(sb, desc) >
328 		     ext4_free_blks_count(sb, best_desc))) {
329 			*best_group = group;
330 			best_desc = desc;
331 			ret = 0;
332 		}
333 	}
334 	return ret;
335 }
336 
337 #define free_block_ratio 10
338 
339 static int find_group_flex(struct super_block *sb, struct inode *parent,
340 			   ext4_group_t *best_group)
341 {
342 	struct ext4_sb_info *sbi = EXT4_SB(sb);
343 	struct ext4_group_desc *desc;
344 	struct buffer_head *bh;
345 	struct flex_groups *flex_group = sbi->s_flex_groups;
346 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
347 	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
348 	ext4_group_t ngroups = sbi->s_groups_count;
349 	int flex_size = ext4_flex_bg_size(sbi);
350 	ext4_group_t best_flex = parent_fbg_group;
351 	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
352 	int flexbg_free_blocks;
353 	int flex_freeb_ratio;
354 	ext4_group_t n_fbg_groups;
355 	ext4_group_t i;
356 
357 	n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
358 		sbi->s_log_groups_per_flex;
359 
360 find_close_to_parent:
361 	flexbg_free_blocks = flex_group[best_flex].free_blocks;
362 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
363 	if (flex_group[best_flex].free_inodes &&
364 	    flex_freeb_ratio > free_block_ratio)
365 		goto found_flexbg;
366 
367 	if (best_flex && best_flex == parent_fbg_group) {
368 		best_flex--;
369 		goto find_close_to_parent;
370 	}
371 
372 	for (i = 0; i < n_fbg_groups; i++) {
373 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
374 			continue;
375 
376 		flexbg_free_blocks = flex_group[i].free_blocks;
377 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
378 
379 		if (flex_freeb_ratio > free_block_ratio &&
380 		    flex_group[i].free_inodes) {
381 			best_flex = i;
382 			goto found_flexbg;
383 		}
384 
385 		if (flex_group[best_flex].free_inodes == 0 ||
386 		    (flex_group[i].free_blocks >
387 		     flex_group[best_flex].free_blocks &&
388 		     flex_group[i].free_inodes))
389 			best_flex = i;
390 	}
391 
392 	if (!flex_group[best_flex].free_inodes ||
393 	    !flex_group[best_flex].free_blocks)
394 		return -1;
395 
396 found_flexbg:
397 	for (i = best_flex * flex_size; i < ngroups &&
398 		     i < (best_flex + 1) * flex_size; i++) {
399 		desc = ext4_get_group_desc(sb, i, &bh);
400 		if (ext4_free_inodes_count(sb, desc)) {
401 			*best_group = i;
402 			goto out;
403 		}
404 	}
405 
406 	return -1;
407 out:
408 	return 0;
409 }
410 
411 /*
412  * Orlov's allocator for directories.
413  *
414  * We always try to spread first-level directories.
415  *
416  * If there are blockgroups with both free inodes and free blocks counts
417  * not worse than average we return one with smallest directory count.
418  * Otherwise we simply return a random group.
419  *
420  * For the rest rules look so:
421  *
422  * It's OK to put directory into a group unless
423  * it has too many directories already (max_dirs) or
424  * it has too few free inodes left (min_inodes) or
425  * it has too few free blocks left (min_blocks) or
426  * it's already running too large debt (max_debt).
427  * Parent's group is preferred, if it doesn't satisfy these
428  * conditions we search cyclically through the rest. If none
429  * of the groups look good we just look for a group with more
430  * free inodes than average (starting at parent's group).
431  *
432  * Debt is incremented each time we allocate a directory and decremented
433  * when we allocate an inode, within 0--255.
434  */
435 
436 #define INODE_COST 64
437 #define BLOCK_COST 256
438 
439 static int find_group_orlov(struct super_block *sb, struct inode *parent,
440 				ext4_group_t *group)
441 {
442 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
443 	struct ext4_sb_info *sbi = EXT4_SB(sb);
444 	struct ext4_super_block *es = sbi->s_es;
445 	ext4_group_t ngroups = sbi->s_groups_count;
446 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
447 	unsigned int freei, avefreei;
448 	ext4_fsblk_t freeb, avefreeb;
449 	ext4_fsblk_t blocks_per_dir;
450 	unsigned int ndirs;
451 	int max_debt, max_dirs, min_inodes;
452 	ext4_grpblk_t min_blocks;
453 	ext4_group_t i;
454 	struct ext4_group_desc *desc;
455 
456 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
457 	avefreei = freei / ngroups;
458 	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
459 	avefreeb = freeb;
460 	do_div(avefreeb, ngroups);
461 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
462 
463 	if ((parent == sb->s_root->d_inode) ||
464 	    (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
465 		int best_ndir = inodes_per_group;
466 		ext4_group_t grp;
467 		int ret = -1;
468 
469 		get_random_bytes(&grp, sizeof(grp));
470 		parent_group = (unsigned)grp % ngroups;
471 		for (i = 0; i < ngroups; i++) {
472 			grp = (parent_group + i) % ngroups;
473 			desc = ext4_get_group_desc(sb, grp, NULL);
474 			if (!desc || !ext4_free_inodes_count(sb, desc))
475 				continue;
476 			if (ext4_used_dirs_count(sb, desc) >= best_ndir)
477 				continue;
478 			if (ext4_free_inodes_count(sb, desc) < avefreei)
479 				continue;
480 			if (ext4_free_blks_count(sb, desc) < avefreeb)
481 				continue;
482 			*group = grp;
483 			ret = 0;
484 			best_ndir = ext4_used_dirs_count(sb, desc);
485 		}
486 		if (ret == 0)
487 			return ret;
488 		goto fallback;
489 	}
490 
491 	blocks_per_dir = ext4_blocks_count(es) - freeb;
492 	do_div(blocks_per_dir, ndirs);
493 
494 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
495 	min_inodes = avefreei - inodes_per_group / 4;
496 	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
497 
498 	max_debt = EXT4_BLOCKS_PER_GROUP(sb);
499 	max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
500 	if (max_debt * INODE_COST > inodes_per_group)
501 		max_debt = inodes_per_group / INODE_COST;
502 	if (max_debt > 255)
503 		max_debt = 255;
504 	if (max_debt == 0)
505 		max_debt = 1;
506 
507 	for (i = 0; i < ngroups; i++) {
508 		*group = (parent_group + i) % ngroups;
509 		desc = ext4_get_group_desc(sb, *group, NULL);
510 		if (!desc || !ext4_free_inodes_count(sb, desc))
511 			continue;
512 		if (ext4_used_dirs_count(sb, desc) >= max_dirs)
513 			continue;
514 		if (ext4_free_inodes_count(sb, desc) < min_inodes)
515 			continue;
516 		if (ext4_free_blks_count(sb, desc) < min_blocks)
517 			continue;
518 		return 0;
519 	}
520 
521 fallback:
522 	for (i = 0; i < ngroups; i++) {
523 		*group = (parent_group + i) % ngroups;
524 		desc = ext4_get_group_desc(sb, *group, NULL);
525 		if (desc && ext4_free_inodes_count(sb, desc) &&
526 			ext4_free_inodes_count(sb, desc) >= avefreei)
527 			return 0;
528 	}
529 
530 	if (avefreei) {
531 		/*
532 		 * The free-inodes counter is approximate, and for really small
533 		 * filesystems the above test can fail to find any blockgroups
534 		 */
535 		avefreei = 0;
536 		goto fallback;
537 	}
538 
539 	return -1;
540 }
541 
542 static int find_group_other(struct super_block *sb, struct inode *parent,
543 				ext4_group_t *group)
544 {
545 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
546 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
547 	struct ext4_group_desc *desc;
548 	ext4_group_t i;
549 
550 	/*
551 	 * Try to place the inode in its parent directory
552 	 */
553 	*group = parent_group;
554 	desc = ext4_get_group_desc(sb, *group, NULL);
555 	if (desc && ext4_free_inodes_count(sb, desc) &&
556 			ext4_free_blks_count(sb, desc))
557 		return 0;
558 
559 	/*
560 	 * We're going to place this inode in a different blockgroup from its
561 	 * parent.  We want to cause files in a common directory to all land in
562 	 * the same blockgroup.  But we want files which are in a different
563 	 * directory which shares a blockgroup with our parent to land in a
564 	 * different blockgroup.
565 	 *
566 	 * So add our directory's i_ino into the starting point for the hash.
567 	 */
568 	*group = (*group + parent->i_ino) % ngroups;
569 
570 	/*
571 	 * Use a quadratic hash to find a group with a free inode and some free
572 	 * blocks.
573 	 */
574 	for (i = 1; i < ngroups; i <<= 1) {
575 		*group += i;
576 		if (*group >= ngroups)
577 			*group -= ngroups;
578 		desc = ext4_get_group_desc(sb, *group, NULL);
579 		if (desc && ext4_free_inodes_count(sb, desc) &&
580 				ext4_free_blks_count(sb, desc))
581 			return 0;
582 	}
583 
584 	/*
585 	 * That failed: try linear search for a free inode, even if that group
586 	 * has no free blocks.
587 	 */
588 	*group = parent_group;
589 	for (i = 0; i < ngroups; i++) {
590 		if (++*group >= ngroups)
591 			*group = 0;
592 		desc = ext4_get_group_desc(sb, *group, NULL);
593 		if (desc && ext4_free_inodes_count(sb, desc))
594 			return 0;
595 	}
596 
597 	return -1;
598 }
599 
600 /*
601  * claim the inode from the inode bitmap. If the group
602  * is uninit we need to take the groups's sb_bgl_lock
603  * and clear the uninit flag. The inode bitmap update
604  * and group desc uninit flag clear should be done
605  * after holding sb_bgl_lock so that ext4_read_inode_bitmap
606  * doesn't race with the ext4_claim_inode
607  */
608 static int ext4_claim_inode(struct super_block *sb,
609 			struct buffer_head *inode_bitmap_bh,
610 			unsigned long ino, ext4_group_t group, int mode)
611 {
612 	int free = 0, retval = 0, count;
613 	struct ext4_sb_info *sbi = EXT4_SB(sb);
614 	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
615 
616 	spin_lock(sb_bgl_lock(sbi, group));
617 	if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
618 		/* not a free inode */
619 		retval = 1;
620 		goto err_ret;
621 	}
622 	ino++;
623 	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
624 			ino > EXT4_INODES_PER_GROUP(sb)) {
625 		spin_unlock(sb_bgl_lock(sbi, group));
626 		ext4_error(sb, __func__,
627 			   "reserved inode or inode > inodes count - "
628 			   "block_group = %u, inode=%lu", group,
629 			   ino + group * EXT4_INODES_PER_GROUP(sb));
630 		return 1;
631 	}
632 	/* If we didn't allocate from within the initialized part of the inode
633 	 * table then we need to initialize up to this inode. */
634 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
635 
636 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
637 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
638 			/* When marking the block group with
639 			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
640 			 * on the value of bg_itable_unused even though
641 			 * mke2fs could have initialized the same for us.
642 			 * Instead we calculated the value below
643 			 */
644 
645 			free = 0;
646 		} else {
647 			free = EXT4_INODES_PER_GROUP(sb) -
648 				ext4_itable_unused_count(sb, gdp);
649 		}
650 
651 		/*
652 		 * Check the relative inode number against the last used
653 		 * relative inode number in this group. if it is greater
654 		 * we need to  update the bg_itable_unused count
655 		 *
656 		 */
657 		if (ino > free)
658 			ext4_itable_unused_set(sb, gdp,
659 					(EXT4_INODES_PER_GROUP(sb) - ino));
660 	}
661 	count = ext4_free_inodes_count(sb, gdp) - 1;
662 	ext4_free_inodes_set(sb, gdp, count);
663 	if (S_ISDIR(mode)) {
664 		count = ext4_used_dirs_count(sb, gdp) + 1;
665 		ext4_used_dirs_set(sb, gdp, count);
666 	}
667 	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
668 err_ret:
669 	spin_unlock(sb_bgl_lock(sbi, group));
670 	return retval;
671 }
672 
673 /*
674  * There are two policies for allocating an inode.  If the new inode is
675  * a directory, then a forward search is made for a block group with both
676  * free space and a low directory-to-inode ratio; if that fails, then of
677  * the groups with above-average free space, that group with the fewest
678  * directories already is chosen.
679  *
680  * For other inodes, search forward from the parent directory's block
681  * group to find a free inode.
682  */
683 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
684 {
685 	struct super_block *sb;
686 	struct buffer_head *inode_bitmap_bh = NULL;
687 	struct buffer_head *group_desc_bh;
688 	ext4_group_t group = 0;
689 	unsigned long ino = 0;
690 	struct inode *inode;
691 	struct ext4_group_desc *gdp = NULL;
692 	struct ext4_super_block *es;
693 	struct ext4_inode_info *ei;
694 	struct ext4_sb_info *sbi;
695 	int ret2, err = 0;
696 	struct inode *ret;
697 	ext4_group_t i;
698 	int free = 0;
699 	ext4_group_t flex_group;
700 
701 	/* Cannot create files in a deleted directory */
702 	if (!dir || !dir->i_nlink)
703 		return ERR_PTR(-EPERM);
704 
705 	sb = dir->i_sb;
706 	trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
707 		   dir->i_ino, mode);
708 	inode = new_inode(sb);
709 	if (!inode)
710 		return ERR_PTR(-ENOMEM);
711 	ei = EXT4_I(inode);
712 
713 	sbi = EXT4_SB(sb);
714 	es = sbi->s_es;
715 
716 	if (sbi->s_log_groups_per_flex) {
717 		ret2 = find_group_flex(sb, dir, &group);
718 		if (ret2 == -1) {
719 			ret2 = find_group_other(sb, dir, &group);
720 			if (ret2 == 0 && printk_ratelimit())
721 				printk(KERN_NOTICE "ext4: find_group_flex "
722 				       "failed, fallback succeeded dir %lu\n",
723 				       dir->i_ino);
724 		}
725 		goto got_group;
726 	}
727 
728 	if (S_ISDIR(mode)) {
729 		if (test_opt(sb, OLDALLOC))
730 			ret2 = find_group_dir(sb, dir, &group);
731 		else
732 			ret2 = find_group_orlov(sb, dir, &group);
733 	} else
734 		ret2 = find_group_other(sb, dir, &group);
735 
736 got_group:
737 	err = -ENOSPC;
738 	if (ret2 == -1)
739 		goto out;
740 
741 	for (i = 0; i < sbi->s_groups_count; i++) {
742 		err = -EIO;
743 
744 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
745 		if (!gdp)
746 			goto fail;
747 
748 		brelse(inode_bitmap_bh);
749 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
750 		if (!inode_bitmap_bh)
751 			goto fail;
752 
753 		ino = 0;
754 
755 repeat_in_this_group:
756 		ino = ext4_find_next_zero_bit((unsigned long *)
757 					      inode_bitmap_bh->b_data,
758 					      EXT4_INODES_PER_GROUP(sb), ino);
759 
760 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
761 
762 			BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
763 			err = ext4_journal_get_write_access(handle,
764 							    inode_bitmap_bh);
765 			if (err)
766 				goto fail;
767 
768 			BUFFER_TRACE(group_desc_bh, "get_write_access");
769 			err = ext4_journal_get_write_access(handle,
770 								group_desc_bh);
771 			if (err)
772 				goto fail;
773 			if (!ext4_claim_inode(sb, inode_bitmap_bh,
774 						ino, group, mode)) {
775 				/* we won it */
776 				BUFFER_TRACE(inode_bitmap_bh,
777 					"call ext4_handle_dirty_metadata");
778 				err = ext4_handle_dirty_metadata(handle,
779 								 inode,
780 							inode_bitmap_bh);
781 				if (err)
782 					goto fail;
783 				/* zero bit is inode number 1*/
784 				ino++;
785 				goto got;
786 			}
787 			/* we lost it */
788 			ext4_handle_release_buffer(handle, inode_bitmap_bh);
789 			ext4_handle_release_buffer(handle, group_desc_bh);
790 
791 			if (++ino < EXT4_INODES_PER_GROUP(sb))
792 				goto repeat_in_this_group;
793 		}
794 
795 		/*
796 		 * This case is possible in concurrent environment.  It is very
797 		 * rare.  We cannot repeat the find_group_xxx() call because
798 		 * that will simply return the same blockgroup, because the
799 		 * group descriptor metadata has not yet been updated.
800 		 * So we just go onto the next blockgroup.
801 		 */
802 		if (++group == sbi->s_groups_count)
803 			group = 0;
804 	}
805 	err = -ENOSPC;
806 	goto out;
807 
808 got:
809 	/* We may have to initialize the block bitmap if it isn't already */
810 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
811 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
812 		struct buffer_head *block_bitmap_bh;
813 
814 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
815 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
816 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
817 		if (err) {
818 			brelse(block_bitmap_bh);
819 			goto fail;
820 		}
821 
822 		free = 0;
823 		spin_lock(sb_bgl_lock(sbi, group));
824 		/* recheck and clear flag under lock if we still need to */
825 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
826 			free = ext4_free_blocks_after_init(sb, group, gdp);
827 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
828 			ext4_free_blks_set(sb, gdp, free);
829 			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
830 								gdp);
831 		}
832 		spin_unlock(sb_bgl_lock(sbi, group));
833 
834 		/* Don't need to dirty bitmap block if we didn't change it */
835 		if (free) {
836 			BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
837 			err = ext4_handle_dirty_metadata(handle,
838 							NULL, block_bitmap_bh);
839 		}
840 
841 		brelse(block_bitmap_bh);
842 		if (err)
843 			goto fail;
844 	}
845 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
846 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
847 	if (err)
848 		goto fail;
849 
850 	percpu_counter_dec(&sbi->s_freeinodes_counter);
851 	if (S_ISDIR(mode))
852 		percpu_counter_inc(&sbi->s_dirs_counter);
853 	sb->s_dirt = 1;
854 
855 	if (sbi->s_log_groups_per_flex) {
856 		flex_group = ext4_flex_group(sbi, group);
857 		spin_lock(sb_bgl_lock(sbi, flex_group));
858 		sbi->s_flex_groups[flex_group].free_inodes--;
859 		spin_unlock(sb_bgl_lock(sbi, flex_group));
860 	}
861 
862 	inode->i_uid = current_fsuid();
863 	if (test_opt(sb, GRPID))
864 		inode->i_gid = dir->i_gid;
865 	else if (dir->i_mode & S_ISGID) {
866 		inode->i_gid = dir->i_gid;
867 		if (S_ISDIR(mode))
868 			mode |= S_ISGID;
869 	} else
870 		inode->i_gid = current_fsgid();
871 	inode->i_mode = mode;
872 
873 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
874 	/* This is the optimal IO size (for stat), not the fs block size */
875 	inode->i_blocks = 0;
876 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
877 						       ext4_current_time(inode);
878 
879 	memset(ei->i_data, 0, sizeof(ei->i_data));
880 	ei->i_dir_start_lookup = 0;
881 	ei->i_disksize = 0;
882 
883 	/*
884 	 * Don't inherit extent flag from directory. We set extent flag on
885 	 * newly created directory and file only if -o extent mount option is
886 	 * specified
887 	 */
888 	ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
889 	if (S_ISLNK(mode))
890 		ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
891 	/* dirsync only applies to directories */
892 	if (!S_ISDIR(mode))
893 		ei->i_flags &= ~EXT4_DIRSYNC_FL;
894 	ei->i_file_acl = 0;
895 	ei->i_dtime = 0;
896 	ei->i_block_group = group;
897 
898 	ext4_set_inode_flags(inode);
899 	if (IS_DIRSYNC(inode))
900 		ext4_handle_sync(handle);
901 	if (insert_inode_locked(inode) < 0) {
902 		err = -EINVAL;
903 		goto fail_drop;
904 	}
905 	spin_lock(&sbi->s_next_gen_lock);
906 	inode->i_generation = sbi->s_next_generation++;
907 	spin_unlock(&sbi->s_next_gen_lock);
908 
909 	ei->i_state = EXT4_STATE_NEW;
910 
911 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
912 
913 	ret = inode;
914 	if (DQUOT_ALLOC_INODE(inode)) {
915 		err = -EDQUOT;
916 		goto fail_drop;
917 	}
918 
919 	err = ext4_init_acl(handle, inode, dir);
920 	if (err)
921 		goto fail_free_drop;
922 
923 	err = ext4_init_security(handle, inode, dir);
924 	if (err)
925 		goto fail_free_drop;
926 
927 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
928 		/* set extent flag only for directory, file and normal symlink*/
929 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
930 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
931 			ext4_ext_tree_init(handle, inode);
932 		}
933 	}
934 
935 	err = ext4_mark_inode_dirty(handle, inode);
936 	if (err) {
937 		ext4_std_error(sb, err);
938 		goto fail_free_drop;
939 	}
940 
941 	ext4_debug("allocating inode %lu\n", inode->i_ino);
942 	trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
943 		   sb->s_id, inode->i_ino, dir->i_ino, mode);
944 	goto really_out;
945 fail:
946 	ext4_std_error(sb, err);
947 out:
948 	iput(inode);
949 	ret = ERR_PTR(err);
950 really_out:
951 	brelse(inode_bitmap_bh);
952 	return ret;
953 
954 fail_free_drop:
955 	DQUOT_FREE_INODE(inode);
956 
957 fail_drop:
958 	DQUOT_DROP(inode);
959 	inode->i_flags |= S_NOQUOTA;
960 	inode->i_nlink = 0;
961 	unlock_new_inode(inode);
962 	iput(inode);
963 	brelse(inode_bitmap_bh);
964 	return ERR_PTR(err);
965 }
966 
967 /* Verify that we are loading a valid orphan from disk */
968 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
969 {
970 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
971 	ext4_group_t block_group;
972 	int bit;
973 	struct buffer_head *bitmap_bh;
974 	struct inode *inode = NULL;
975 	long err = -EIO;
976 
977 	/* Error cases - e2fsck has already cleaned up for us */
978 	if (ino > max_ino) {
979 		ext4_warning(sb, __func__,
980 			     "bad orphan ino %lu!  e2fsck was run?", ino);
981 		goto error;
982 	}
983 
984 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
985 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
986 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
987 	if (!bitmap_bh) {
988 		ext4_warning(sb, __func__,
989 			     "inode bitmap error for orphan %lu", ino);
990 		goto error;
991 	}
992 
993 	/* Having the inode bit set should be a 100% indicator that this
994 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
995 	 * inodes that were being truncated, so we can't check i_nlink==0.
996 	 */
997 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
998 		goto bad_orphan;
999 
1000 	inode = ext4_iget(sb, ino);
1001 	if (IS_ERR(inode))
1002 		goto iget_failed;
1003 
1004 	/*
1005 	 * If the orphans has i_nlinks > 0 then it should be able to be
1006 	 * truncated, otherwise it won't be removed from the orphan list
1007 	 * during processing and an infinite loop will result.
1008 	 */
1009 	if (inode->i_nlink && !ext4_can_truncate(inode))
1010 		goto bad_orphan;
1011 
1012 	if (NEXT_ORPHAN(inode) > max_ino)
1013 		goto bad_orphan;
1014 	brelse(bitmap_bh);
1015 	return inode;
1016 
1017 iget_failed:
1018 	err = PTR_ERR(inode);
1019 	inode = NULL;
1020 bad_orphan:
1021 	ext4_warning(sb, __func__,
1022 		     "bad orphan inode %lu!  e2fsck was run?", ino);
1023 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1024 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
1025 	       ext4_test_bit(bit, bitmap_bh->b_data));
1026 	printk(KERN_NOTICE "inode=%p\n", inode);
1027 	if (inode) {
1028 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1029 		       is_bad_inode(inode));
1030 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1031 		       NEXT_ORPHAN(inode));
1032 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1033 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1034 		/* Avoid freeing blocks if we got a bad deleted inode */
1035 		if (inode->i_nlink == 0)
1036 			inode->i_blocks = 0;
1037 		iput(inode);
1038 	}
1039 	brelse(bitmap_bh);
1040 error:
1041 	return ERR_PTR(err);
1042 }
1043 
1044 unsigned long ext4_count_free_inodes(struct super_block *sb)
1045 {
1046 	unsigned long desc_count;
1047 	struct ext4_group_desc *gdp;
1048 	ext4_group_t i;
1049 #ifdef EXT4FS_DEBUG
1050 	struct ext4_super_block *es;
1051 	unsigned long bitmap_count, x;
1052 	struct buffer_head *bitmap_bh = NULL;
1053 
1054 	es = EXT4_SB(sb)->s_es;
1055 	desc_count = 0;
1056 	bitmap_count = 0;
1057 	gdp = NULL;
1058 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1059 		gdp = ext4_get_group_desc(sb, i, NULL);
1060 		if (!gdp)
1061 			continue;
1062 		desc_count += ext4_free_inodes_count(sb, gdp);
1063 		brelse(bitmap_bh);
1064 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1065 		if (!bitmap_bh)
1066 			continue;
1067 
1068 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1069 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1070 			i, ext4_free_inodes_count(sb, gdp), x);
1071 		bitmap_count += x;
1072 	}
1073 	brelse(bitmap_bh);
1074 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1075 	       "stored = %u, computed = %lu, %lu\n",
1076 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1077 	return desc_count;
1078 #else
1079 	desc_count = 0;
1080 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1081 		gdp = ext4_get_group_desc(sb, i, NULL);
1082 		if (!gdp)
1083 			continue;
1084 		desc_count += ext4_free_inodes_count(sb, gdp);
1085 		cond_resched();
1086 	}
1087 	return desc_count;
1088 #endif
1089 }
1090 
1091 /* Called at mount-time, super-block is locked */
1092 unsigned long ext4_count_dirs(struct super_block * sb)
1093 {
1094 	unsigned long count = 0;
1095 	ext4_group_t i;
1096 
1097 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1098 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1099 		if (!gdp)
1100 			continue;
1101 		count += ext4_used_dirs_count(sb, gdp);
1102 	}
1103 	return count;
1104 }
1105