xref: /linux/fs/ext4/ialloc.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 
27 #include "ext4.h"
28 #include "ext4_jbd2.h"
29 #include "xattr.h"
30 #include "acl.h"
31 
32 #include <trace/events/ext4.h>
33 
34 /*
35  * ialloc.c contains the inodes allocation and deallocation routines
36  */
37 
38 /*
39  * The free inodes are managed by bitmaps.  A file system contains several
40  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
41  * block for inodes, N blocks for the inode table and data blocks.
42  *
43  * The file system contains group descriptors which are located after the
44  * super block.  Each descriptor contains the number of the bitmap block and
45  * the free blocks count in the block.
46  */
47 
48 /*
49  * To avoid calling the atomic setbit hundreds or thousands of times, we only
50  * need to use it within a single byte (to ensure we get endianness right).
51  * We can use memset for the rest of the bitmap as there are no other users.
52  */
53 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
54 {
55 	int i;
56 
57 	if (start_bit >= end_bit)
58 		return;
59 
60 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
61 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
62 		ext4_set_bit(i, bitmap);
63 	if (i < end_bit)
64 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
65 }
66 
67 /* Initializes an uninitialized inode bitmap */
68 static unsigned ext4_init_inode_bitmap(struct super_block *sb,
69 				       struct buffer_head *bh,
70 				       ext4_group_t block_group,
71 				       struct ext4_group_desc *gdp)
72 {
73 	J_ASSERT_BH(bh, buffer_locked(bh));
74 
75 	/* If checksum is bad mark all blocks and inodes use to prevent
76 	 * allocation, essentially implementing a per-group read-only flag. */
77 	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
78 		ext4_error(sb, "Checksum bad for group %u", block_group);
79 		ext4_free_group_clusters_set(sb, gdp, 0);
80 		ext4_free_inodes_set(sb, gdp, 0);
81 		ext4_itable_unused_set(sb, gdp, 0);
82 		memset(bh->b_data, 0xff, sb->s_blocksize);
83 		ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
84 					   EXT4_INODES_PER_GROUP(sb) / 8);
85 		return 0;
86 	}
87 
88 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
89 	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
90 			bh->b_data);
91 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
92 				   EXT4_INODES_PER_GROUP(sb) / 8);
93 	ext4_group_desc_csum_set(sb, block_group, gdp);
94 
95 	return EXT4_INODES_PER_GROUP(sb);
96 }
97 
98 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
99 {
100 	if (uptodate) {
101 		set_buffer_uptodate(bh);
102 		set_bitmap_uptodate(bh);
103 	}
104 	unlock_buffer(bh);
105 	put_bh(bh);
106 }
107 
108 /*
109  * Read the inode allocation bitmap for a given block_group, reading
110  * into the specified slot in the superblock's bitmap cache.
111  *
112  * Return buffer_head of bitmap on success or NULL.
113  */
114 static struct buffer_head *
115 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
116 {
117 	struct ext4_group_desc *desc;
118 	struct buffer_head *bh = NULL;
119 	ext4_fsblk_t bitmap_blk;
120 
121 	desc = ext4_get_group_desc(sb, block_group, NULL);
122 	if (!desc)
123 		return NULL;
124 
125 	bitmap_blk = ext4_inode_bitmap(sb, desc);
126 	bh = sb_getblk(sb, bitmap_blk);
127 	if (unlikely(!bh)) {
128 		ext4_error(sb, "Cannot read inode bitmap - "
129 			    "block_group = %u, inode_bitmap = %llu",
130 			    block_group, bitmap_blk);
131 		return NULL;
132 	}
133 	if (bitmap_uptodate(bh))
134 		goto verify;
135 
136 	lock_buffer(bh);
137 	if (bitmap_uptodate(bh)) {
138 		unlock_buffer(bh);
139 		goto verify;
140 	}
141 
142 	ext4_lock_group(sb, block_group);
143 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
144 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
145 		set_bitmap_uptodate(bh);
146 		set_buffer_uptodate(bh);
147 		set_buffer_verified(bh);
148 		ext4_unlock_group(sb, block_group);
149 		unlock_buffer(bh);
150 		return bh;
151 	}
152 	ext4_unlock_group(sb, block_group);
153 
154 	if (buffer_uptodate(bh)) {
155 		/*
156 		 * if not uninit if bh is uptodate,
157 		 * bitmap is also uptodate
158 		 */
159 		set_bitmap_uptodate(bh);
160 		unlock_buffer(bh);
161 		goto verify;
162 	}
163 	/*
164 	 * submit the buffer_head for reading
165 	 */
166 	trace_ext4_load_inode_bitmap(sb, block_group);
167 	bh->b_end_io = ext4_end_bitmap_read;
168 	get_bh(bh);
169 	submit_bh(READ, bh);
170 	wait_on_buffer(bh);
171 	if (!buffer_uptodate(bh)) {
172 		put_bh(bh);
173 		ext4_error(sb, "Cannot read inode bitmap - "
174 			   "block_group = %u, inode_bitmap = %llu",
175 			   block_group, bitmap_blk);
176 		return NULL;
177 	}
178 
179 verify:
180 	ext4_lock_group(sb, block_group);
181 	if (!buffer_verified(bh) &&
182 	    !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
183 					   EXT4_INODES_PER_GROUP(sb) / 8)) {
184 		ext4_unlock_group(sb, block_group);
185 		put_bh(bh);
186 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
187 			   "inode_bitmap = %llu", block_group, bitmap_blk);
188 		return NULL;
189 	}
190 	ext4_unlock_group(sb, block_group);
191 	set_buffer_verified(bh);
192 	return bh;
193 }
194 
195 /*
196  * NOTE! When we get the inode, we're the only people
197  * that have access to it, and as such there are no
198  * race conditions we have to worry about. The inode
199  * is not on the hash-lists, and it cannot be reached
200  * through the filesystem because the directory entry
201  * has been deleted earlier.
202  *
203  * HOWEVER: we must make sure that we get no aliases,
204  * which means that we have to call "clear_inode()"
205  * _before_ we mark the inode not in use in the inode
206  * bitmaps. Otherwise a newly created file might use
207  * the same inode number (not actually the same pointer
208  * though), and then we'd have two inodes sharing the
209  * same inode number and space on the harddisk.
210  */
211 void ext4_free_inode(handle_t *handle, struct inode *inode)
212 {
213 	struct super_block *sb = inode->i_sb;
214 	int is_directory;
215 	unsigned long ino;
216 	struct buffer_head *bitmap_bh = NULL;
217 	struct buffer_head *bh2;
218 	ext4_group_t block_group;
219 	unsigned long bit;
220 	struct ext4_group_desc *gdp;
221 	struct ext4_super_block *es;
222 	struct ext4_sb_info *sbi;
223 	int fatal = 0, err, count, cleared;
224 
225 	if (!sb) {
226 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
227 		       "nonexistent device\n", __func__, __LINE__);
228 		return;
229 	}
230 	if (atomic_read(&inode->i_count) > 1) {
231 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
232 			 __func__, __LINE__, inode->i_ino,
233 			 atomic_read(&inode->i_count));
234 		return;
235 	}
236 	if (inode->i_nlink) {
237 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
238 			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
239 		return;
240 	}
241 	sbi = EXT4_SB(sb);
242 
243 	ino = inode->i_ino;
244 	ext4_debug("freeing inode %lu\n", ino);
245 	trace_ext4_free_inode(inode);
246 
247 	/*
248 	 * Note: we must free any quota before locking the superblock,
249 	 * as writing the quota to disk may need the lock as well.
250 	 */
251 	dquot_initialize(inode);
252 	ext4_xattr_delete_inode(handle, inode);
253 	dquot_free_inode(inode);
254 	dquot_drop(inode);
255 
256 	is_directory = S_ISDIR(inode->i_mode);
257 
258 	/* Do this BEFORE marking the inode not in use or returning an error */
259 	ext4_clear_inode(inode);
260 
261 	es = EXT4_SB(sb)->s_es;
262 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
263 		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
264 		goto error_return;
265 	}
266 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
267 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
268 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
269 	if (!bitmap_bh)
270 		goto error_return;
271 
272 	BUFFER_TRACE(bitmap_bh, "get_write_access");
273 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
274 	if (fatal)
275 		goto error_return;
276 
277 	fatal = -ESRCH;
278 	gdp = ext4_get_group_desc(sb, block_group, &bh2);
279 	if (gdp) {
280 		BUFFER_TRACE(bh2, "get_write_access");
281 		fatal = ext4_journal_get_write_access(handle, bh2);
282 	}
283 	ext4_lock_group(sb, block_group);
284 	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
285 	if (fatal || !cleared) {
286 		ext4_unlock_group(sb, block_group);
287 		goto out;
288 	}
289 
290 	count = ext4_free_inodes_count(sb, gdp) + 1;
291 	ext4_free_inodes_set(sb, gdp, count);
292 	if (is_directory) {
293 		count = ext4_used_dirs_count(sb, gdp) - 1;
294 		ext4_used_dirs_set(sb, gdp, count);
295 		percpu_counter_dec(&sbi->s_dirs_counter);
296 	}
297 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
298 				   EXT4_INODES_PER_GROUP(sb) / 8);
299 	ext4_group_desc_csum_set(sb, block_group, gdp);
300 	ext4_unlock_group(sb, block_group);
301 
302 	percpu_counter_inc(&sbi->s_freeinodes_counter);
303 	if (sbi->s_log_groups_per_flex) {
304 		ext4_group_t f = ext4_flex_group(sbi, block_group);
305 
306 		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
307 		if (is_directory)
308 			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
309 	}
310 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
311 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
312 out:
313 	if (cleared) {
314 		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
315 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
316 		if (!fatal)
317 			fatal = err;
318 		ext4_mark_super_dirty(sb);
319 	} else
320 		ext4_error(sb, "bit already cleared for inode %lu", ino);
321 
322 error_return:
323 	brelse(bitmap_bh);
324 	ext4_std_error(sb, fatal);
325 }
326 
327 struct orlov_stats {
328 	__u32 free_inodes;
329 	__u32 free_clusters;
330 	__u32 used_dirs;
331 };
332 
333 /*
334  * Helper function for Orlov's allocator; returns critical information
335  * for a particular block group or flex_bg.  If flex_size is 1, then g
336  * is a block group number; otherwise it is flex_bg number.
337  */
338 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
339 			    int flex_size, struct orlov_stats *stats)
340 {
341 	struct ext4_group_desc *desc;
342 	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
343 
344 	if (flex_size > 1) {
345 		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
346 		stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
347 		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
348 		return;
349 	}
350 
351 	desc = ext4_get_group_desc(sb, g, NULL);
352 	if (desc) {
353 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
354 		stats->free_clusters = ext4_free_group_clusters(sb, desc);
355 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
356 	} else {
357 		stats->free_inodes = 0;
358 		stats->free_clusters = 0;
359 		stats->used_dirs = 0;
360 	}
361 }
362 
363 /*
364  * Orlov's allocator for directories.
365  *
366  * We always try to spread first-level directories.
367  *
368  * If there are blockgroups with both free inodes and free blocks counts
369  * not worse than average we return one with smallest directory count.
370  * Otherwise we simply return a random group.
371  *
372  * For the rest rules look so:
373  *
374  * It's OK to put directory into a group unless
375  * it has too many directories already (max_dirs) or
376  * it has too few free inodes left (min_inodes) or
377  * it has too few free blocks left (min_blocks) or
378  * Parent's group is preferred, if it doesn't satisfy these
379  * conditions we search cyclically through the rest. If none
380  * of the groups look good we just look for a group with more
381  * free inodes than average (starting at parent's group).
382  */
383 
384 static int find_group_orlov(struct super_block *sb, struct inode *parent,
385 			    ext4_group_t *group, umode_t mode,
386 			    const struct qstr *qstr)
387 {
388 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
389 	struct ext4_sb_info *sbi = EXT4_SB(sb);
390 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
391 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
392 	unsigned int freei, avefreei, grp_free;
393 	ext4_fsblk_t freeb, avefreec;
394 	unsigned int ndirs;
395 	int max_dirs, min_inodes;
396 	ext4_grpblk_t min_clusters;
397 	ext4_group_t i, grp, g, ngroups;
398 	struct ext4_group_desc *desc;
399 	struct orlov_stats stats;
400 	int flex_size = ext4_flex_bg_size(sbi);
401 	struct dx_hash_info hinfo;
402 
403 	ngroups = real_ngroups;
404 	if (flex_size > 1) {
405 		ngroups = (real_ngroups + flex_size - 1) >>
406 			sbi->s_log_groups_per_flex;
407 		parent_group >>= sbi->s_log_groups_per_flex;
408 	}
409 
410 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
411 	avefreei = freei / ngroups;
412 	freeb = EXT4_C2B(sbi,
413 		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
414 	avefreec = freeb;
415 	do_div(avefreec, ngroups);
416 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
417 
418 	if (S_ISDIR(mode) &&
419 	    ((parent == sb->s_root->d_inode) ||
420 	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
421 		int best_ndir = inodes_per_group;
422 		int ret = -1;
423 
424 		if (qstr) {
425 			hinfo.hash_version = DX_HASH_HALF_MD4;
426 			hinfo.seed = sbi->s_hash_seed;
427 			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
428 			grp = hinfo.hash;
429 		} else
430 			get_random_bytes(&grp, sizeof(grp));
431 		parent_group = (unsigned)grp % ngroups;
432 		for (i = 0; i < ngroups; i++) {
433 			g = (parent_group + i) % ngroups;
434 			get_orlov_stats(sb, g, flex_size, &stats);
435 			if (!stats.free_inodes)
436 				continue;
437 			if (stats.used_dirs >= best_ndir)
438 				continue;
439 			if (stats.free_inodes < avefreei)
440 				continue;
441 			if (stats.free_clusters < avefreec)
442 				continue;
443 			grp = g;
444 			ret = 0;
445 			best_ndir = stats.used_dirs;
446 		}
447 		if (ret)
448 			goto fallback;
449 	found_flex_bg:
450 		if (flex_size == 1) {
451 			*group = grp;
452 			return 0;
453 		}
454 
455 		/*
456 		 * We pack inodes at the beginning of the flexgroup's
457 		 * inode tables.  Block allocation decisions will do
458 		 * something similar, although regular files will
459 		 * start at 2nd block group of the flexgroup.  See
460 		 * ext4_ext_find_goal() and ext4_find_near().
461 		 */
462 		grp *= flex_size;
463 		for (i = 0; i < flex_size; i++) {
464 			if (grp+i >= real_ngroups)
465 				break;
466 			desc = ext4_get_group_desc(sb, grp+i, NULL);
467 			if (desc && ext4_free_inodes_count(sb, desc)) {
468 				*group = grp+i;
469 				return 0;
470 			}
471 		}
472 		goto fallback;
473 	}
474 
475 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
476 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
477 	if (min_inodes < 1)
478 		min_inodes = 1;
479 	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
480 
481 	/*
482 	 * Start looking in the flex group where we last allocated an
483 	 * inode for this parent directory
484 	 */
485 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
486 		parent_group = EXT4_I(parent)->i_last_alloc_group;
487 		if (flex_size > 1)
488 			parent_group >>= sbi->s_log_groups_per_flex;
489 	}
490 
491 	for (i = 0; i < ngroups; i++) {
492 		grp = (parent_group + i) % ngroups;
493 		get_orlov_stats(sb, grp, flex_size, &stats);
494 		if (stats.used_dirs >= max_dirs)
495 			continue;
496 		if (stats.free_inodes < min_inodes)
497 			continue;
498 		if (stats.free_clusters < min_clusters)
499 			continue;
500 		goto found_flex_bg;
501 	}
502 
503 fallback:
504 	ngroups = real_ngroups;
505 	avefreei = freei / ngroups;
506 fallback_retry:
507 	parent_group = EXT4_I(parent)->i_block_group;
508 	for (i = 0; i < ngroups; i++) {
509 		grp = (parent_group + i) % ngroups;
510 		desc = ext4_get_group_desc(sb, grp, NULL);
511 		if (desc) {
512 			grp_free = ext4_free_inodes_count(sb, desc);
513 			if (grp_free && grp_free >= avefreei) {
514 				*group = grp;
515 				return 0;
516 			}
517 		}
518 	}
519 
520 	if (avefreei) {
521 		/*
522 		 * The free-inodes counter is approximate, and for really small
523 		 * filesystems the above test can fail to find any blockgroups
524 		 */
525 		avefreei = 0;
526 		goto fallback_retry;
527 	}
528 
529 	return -1;
530 }
531 
532 static int find_group_other(struct super_block *sb, struct inode *parent,
533 			    ext4_group_t *group, umode_t mode)
534 {
535 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
536 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
537 	struct ext4_group_desc *desc;
538 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
539 
540 	/*
541 	 * Try to place the inode is the same flex group as its
542 	 * parent.  If we can't find space, use the Orlov algorithm to
543 	 * find another flex group, and store that information in the
544 	 * parent directory's inode information so that use that flex
545 	 * group for future allocations.
546 	 */
547 	if (flex_size > 1) {
548 		int retry = 0;
549 
550 	try_again:
551 		parent_group &= ~(flex_size-1);
552 		last = parent_group + flex_size;
553 		if (last > ngroups)
554 			last = ngroups;
555 		for  (i = parent_group; i < last; i++) {
556 			desc = ext4_get_group_desc(sb, i, NULL);
557 			if (desc && ext4_free_inodes_count(sb, desc)) {
558 				*group = i;
559 				return 0;
560 			}
561 		}
562 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
563 			retry = 1;
564 			parent_group = EXT4_I(parent)->i_last_alloc_group;
565 			goto try_again;
566 		}
567 		/*
568 		 * If this didn't work, use the Orlov search algorithm
569 		 * to find a new flex group; we pass in the mode to
570 		 * avoid the topdir algorithms.
571 		 */
572 		*group = parent_group + flex_size;
573 		if (*group > ngroups)
574 			*group = 0;
575 		return find_group_orlov(sb, parent, group, mode, NULL);
576 	}
577 
578 	/*
579 	 * Try to place the inode in its parent directory
580 	 */
581 	*group = parent_group;
582 	desc = ext4_get_group_desc(sb, *group, NULL);
583 	if (desc && ext4_free_inodes_count(sb, desc) &&
584 	    ext4_free_group_clusters(sb, desc))
585 		return 0;
586 
587 	/*
588 	 * We're going to place this inode in a different blockgroup from its
589 	 * parent.  We want to cause files in a common directory to all land in
590 	 * the same blockgroup.  But we want files which are in a different
591 	 * directory which shares a blockgroup with our parent to land in a
592 	 * different blockgroup.
593 	 *
594 	 * So add our directory's i_ino into the starting point for the hash.
595 	 */
596 	*group = (*group + parent->i_ino) % ngroups;
597 
598 	/*
599 	 * Use a quadratic hash to find a group with a free inode and some free
600 	 * blocks.
601 	 */
602 	for (i = 1; i < ngroups; i <<= 1) {
603 		*group += i;
604 		if (*group >= ngroups)
605 			*group -= ngroups;
606 		desc = ext4_get_group_desc(sb, *group, NULL);
607 		if (desc && ext4_free_inodes_count(sb, desc) &&
608 		    ext4_free_group_clusters(sb, desc))
609 			return 0;
610 	}
611 
612 	/*
613 	 * That failed: try linear search for a free inode, even if that group
614 	 * has no free blocks.
615 	 */
616 	*group = parent_group;
617 	for (i = 0; i < ngroups; i++) {
618 		if (++*group >= ngroups)
619 			*group = 0;
620 		desc = ext4_get_group_desc(sb, *group, NULL);
621 		if (desc && ext4_free_inodes_count(sb, desc))
622 			return 0;
623 	}
624 
625 	return -1;
626 }
627 
628 /*
629  * There are two policies for allocating an inode.  If the new inode is
630  * a directory, then a forward search is made for a block group with both
631  * free space and a low directory-to-inode ratio; if that fails, then of
632  * the groups with above-average free space, that group with the fewest
633  * directories already is chosen.
634  *
635  * For other inodes, search forward from the parent directory's block
636  * group to find a free inode.
637  */
638 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
639 			     const struct qstr *qstr, __u32 goal, uid_t *owner)
640 {
641 	struct super_block *sb;
642 	struct buffer_head *inode_bitmap_bh = NULL;
643 	struct buffer_head *group_desc_bh;
644 	ext4_group_t ngroups, group = 0;
645 	unsigned long ino = 0;
646 	struct inode *inode;
647 	struct ext4_group_desc *gdp = NULL;
648 	struct ext4_inode_info *ei;
649 	struct ext4_sb_info *sbi;
650 	int ret2, err = 0;
651 	struct inode *ret;
652 	ext4_group_t i;
653 	ext4_group_t flex_group;
654 
655 	/* Cannot create files in a deleted directory */
656 	if (!dir || !dir->i_nlink)
657 		return ERR_PTR(-EPERM);
658 
659 	sb = dir->i_sb;
660 	ngroups = ext4_get_groups_count(sb);
661 	trace_ext4_request_inode(dir, mode);
662 	inode = new_inode(sb);
663 	if (!inode)
664 		return ERR_PTR(-ENOMEM);
665 	ei = EXT4_I(inode);
666 	sbi = EXT4_SB(sb);
667 
668 	if (!goal)
669 		goal = sbi->s_inode_goal;
670 
671 	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
672 		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
673 		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
674 		ret2 = 0;
675 		goto got_group;
676 	}
677 
678 	if (S_ISDIR(mode))
679 		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
680 	else
681 		ret2 = find_group_other(sb, dir, &group, mode);
682 
683 got_group:
684 	EXT4_I(dir)->i_last_alloc_group = group;
685 	err = -ENOSPC;
686 	if (ret2 == -1)
687 		goto out;
688 
689 	/*
690 	 * Normally we will only go through one pass of this loop,
691 	 * unless we get unlucky and it turns out the group we selected
692 	 * had its last inode grabbed by someone else.
693 	 */
694 	for (i = 0; i < ngroups; i++, ino = 0) {
695 		err = -EIO;
696 
697 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
698 		if (!gdp)
699 			goto fail;
700 
701 		brelse(inode_bitmap_bh);
702 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
703 		if (!inode_bitmap_bh)
704 			goto fail;
705 
706 repeat_in_this_group:
707 		ino = ext4_find_next_zero_bit((unsigned long *)
708 					      inode_bitmap_bh->b_data,
709 					      EXT4_INODES_PER_GROUP(sb), ino);
710 		if (ino >= EXT4_INODES_PER_GROUP(sb)) {
711 			if (++group == ngroups)
712 				group = 0;
713 			continue;
714 		}
715 		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
716 			ext4_error(sb, "reserved inode found cleared - "
717 				   "inode=%lu", ino + 1);
718 			continue;
719 		}
720 		ext4_lock_group(sb, group);
721 		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
722 		ext4_unlock_group(sb, group);
723 		ino++;		/* the inode bitmap is zero-based */
724 		if (!ret2)
725 			goto got; /* we grabbed the inode! */
726 		if (ino < EXT4_INODES_PER_GROUP(sb))
727 			goto repeat_in_this_group;
728 	}
729 	err = -ENOSPC;
730 	goto out;
731 
732 got:
733 	/* We may have to initialize the block bitmap if it isn't already */
734 	if (ext4_has_group_desc_csum(sb) &&
735 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
736 		struct buffer_head *block_bitmap_bh;
737 
738 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
739 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
740 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
741 		if (err) {
742 			brelse(block_bitmap_bh);
743 			goto fail;
744 		}
745 
746 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
747 		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
748 		brelse(block_bitmap_bh);
749 
750 		/* recheck and clear flag under lock if we still need to */
751 		ext4_lock_group(sb, group);
752 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
753 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
754 			ext4_free_group_clusters_set(sb, gdp,
755 				ext4_free_clusters_after_init(sb, group, gdp));
756 			ext4_block_bitmap_csum_set(sb, group, gdp,
757 						   block_bitmap_bh,
758 						   EXT4_BLOCKS_PER_GROUP(sb) /
759 						   8);
760 			ext4_group_desc_csum_set(sb, group, gdp);
761 		}
762 		ext4_unlock_group(sb, group);
763 
764 		if (err)
765 			goto fail;
766 	}
767 
768 	BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
769 	err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
770 	if (err)
771 		goto fail;
772 
773 	BUFFER_TRACE(group_desc_bh, "get_write_access");
774 	err = ext4_journal_get_write_access(handle, group_desc_bh);
775 	if (err)
776 		goto fail;
777 
778 	/* Update the relevant bg descriptor fields */
779 	if (ext4_has_group_desc_csum(sb)) {
780 		int free;
781 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
782 
783 		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
784 		ext4_lock_group(sb, group); /* while we modify the bg desc */
785 		free = EXT4_INODES_PER_GROUP(sb) -
786 			ext4_itable_unused_count(sb, gdp);
787 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
788 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
789 			free = 0;
790 		}
791 		/*
792 		 * Check the relative inode number against the last used
793 		 * relative inode number in this group. if it is greater
794 		 * we need to update the bg_itable_unused count
795 		 */
796 		if (ino > free)
797 			ext4_itable_unused_set(sb, gdp,
798 					(EXT4_INODES_PER_GROUP(sb) - ino));
799 		up_read(&grp->alloc_sem);
800 	} else {
801 		ext4_lock_group(sb, group);
802 	}
803 
804 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
805 	if (S_ISDIR(mode)) {
806 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
807 		if (sbi->s_log_groups_per_flex) {
808 			ext4_group_t f = ext4_flex_group(sbi, group);
809 
810 			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
811 		}
812 	}
813 	if (ext4_has_group_desc_csum(sb)) {
814 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
815 					   EXT4_INODES_PER_GROUP(sb) / 8);
816 		ext4_group_desc_csum_set(sb, group, gdp);
817 	}
818 	ext4_unlock_group(sb, group);
819 
820 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
821 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
822 	if (err)
823 		goto fail;
824 
825 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
826 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
827 	if (err)
828 		goto fail;
829 
830 	percpu_counter_dec(&sbi->s_freeinodes_counter);
831 	if (S_ISDIR(mode))
832 		percpu_counter_inc(&sbi->s_dirs_counter);
833 	ext4_mark_super_dirty(sb);
834 
835 	if (sbi->s_log_groups_per_flex) {
836 		flex_group = ext4_flex_group(sbi, group);
837 		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
838 	}
839 	if (owner) {
840 		inode->i_mode = mode;
841 		i_uid_write(inode, owner[0]);
842 		i_gid_write(inode, owner[1]);
843 	} else if (test_opt(sb, GRPID)) {
844 		inode->i_mode = mode;
845 		inode->i_uid = current_fsuid();
846 		inode->i_gid = dir->i_gid;
847 	} else
848 		inode_init_owner(inode, dir, mode);
849 
850 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
851 	/* This is the optimal IO size (for stat), not the fs block size */
852 	inode->i_blocks = 0;
853 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
854 						       ext4_current_time(inode);
855 
856 	memset(ei->i_data, 0, sizeof(ei->i_data));
857 	ei->i_dir_start_lookup = 0;
858 	ei->i_disksize = 0;
859 
860 	/* Don't inherit extent flag from directory, amongst others. */
861 	ei->i_flags =
862 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
863 	ei->i_file_acl = 0;
864 	ei->i_dtime = 0;
865 	ei->i_block_group = group;
866 	ei->i_last_alloc_group = ~0;
867 
868 	ext4_set_inode_flags(inode);
869 	if (IS_DIRSYNC(inode))
870 		ext4_handle_sync(handle);
871 	if (insert_inode_locked(inode) < 0) {
872 		/*
873 		 * Likely a bitmap corruption causing inode to be allocated
874 		 * twice.
875 		 */
876 		err = -EIO;
877 		goto fail;
878 	}
879 	spin_lock(&sbi->s_next_gen_lock);
880 	inode->i_generation = sbi->s_next_generation++;
881 	spin_unlock(&sbi->s_next_gen_lock);
882 
883 	/* Precompute checksum seed for inode metadata */
884 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
885 			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
886 		__u32 csum;
887 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
888 		__le32 inum = cpu_to_le32(inode->i_ino);
889 		__le32 gen = cpu_to_le32(inode->i_generation);
890 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
891 				   sizeof(inum));
892 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
893 					      sizeof(gen));
894 	}
895 
896 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
897 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
898 
899 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
900 
901 	ret = inode;
902 	dquot_initialize(inode);
903 	err = dquot_alloc_inode(inode);
904 	if (err)
905 		goto fail_drop;
906 
907 	err = ext4_init_acl(handle, inode, dir);
908 	if (err)
909 		goto fail_free_drop;
910 
911 	err = ext4_init_security(handle, inode, dir, qstr);
912 	if (err)
913 		goto fail_free_drop;
914 
915 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
916 		/* set extent flag only for directory, file and normal symlink*/
917 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
918 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
919 			ext4_ext_tree_init(handle, inode);
920 		}
921 	}
922 
923 	if (ext4_handle_valid(handle)) {
924 		ei->i_sync_tid = handle->h_transaction->t_tid;
925 		ei->i_datasync_tid = handle->h_transaction->t_tid;
926 	}
927 
928 	err = ext4_mark_inode_dirty(handle, inode);
929 	if (err) {
930 		ext4_std_error(sb, err);
931 		goto fail_free_drop;
932 	}
933 
934 	ext4_debug("allocating inode %lu\n", inode->i_ino);
935 	trace_ext4_allocate_inode(inode, dir, mode);
936 	goto really_out;
937 fail:
938 	ext4_std_error(sb, err);
939 out:
940 	iput(inode);
941 	ret = ERR_PTR(err);
942 really_out:
943 	brelse(inode_bitmap_bh);
944 	return ret;
945 
946 fail_free_drop:
947 	dquot_free_inode(inode);
948 
949 fail_drop:
950 	dquot_drop(inode);
951 	inode->i_flags |= S_NOQUOTA;
952 	clear_nlink(inode);
953 	unlock_new_inode(inode);
954 	iput(inode);
955 	brelse(inode_bitmap_bh);
956 	return ERR_PTR(err);
957 }
958 
959 /* Verify that we are loading a valid orphan from disk */
960 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
961 {
962 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
963 	ext4_group_t block_group;
964 	int bit;
965 	struct buffer_head *bitmap_bh;
966 	struct inode *inode = NULL;
967 	long err = -EIO;
968 
969 	/* Error cases - e2fsck has already cleaned up for us */
970 	if (ino > max_ino) {
971 		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
972 		goto error;
973 	}
974 
975 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
976 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
977 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
978 	if (!bitmap_bh) {
979 		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
980 		goto error;
981 	}
982 
983 	/* Having the inode bit set should be a 100% indicator that this
984 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
985 	 * inodes that were being truncated, so we can't check i_nlink==0.
986 	 */
987 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
988 		goto bad_orphan;
989 
990 	inode = ext4_iget(sb, ino);
991 	if (IS_ERR(inode))
992 		goto iget_failed;
993 
994 	/*
995 	 * If the orphans has i_nlinks > 0 then it should be able to be
996 	 * truncated, otherwise it won't be removed from the orphan list
997 	 * during processing and an infinite loop will result.
998 	 */
999 	if (inode->i_nlink && !ext4_can_truncate(inode))
1000 		goto bad_orphan;
1001 
1002 	if (NEXT_ORPHAN(inode) > max_ino)
1003 		goto bad_orphan;
1004 	brelse(bitmap_bh);
1005 	return inode;
1006 
1007 iget_failed:
1008 	err = PTR_ERR(inode);
1009 	inode = NULL;
1010 bad_orphan:
1011 	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1012 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1013 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
1014 	       ext4_test_bit(bit, bitmap_bh->b_data));
1015 	printk(KERN_NOTICE "inode=%p\n", inode);
1016 	if (inode) {
1017 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1018 		       is_bad_inode(inode));
1019 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1020 		       NEXT_ORPHAN(inode));
1021 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1022 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1023 		/* Avoid freeing blocks if we got a bad deleted inode */
1024 		if (inode->i_nlink == 0)
1025 			inode->i_blocks = 0;
1026 		iput(inode);
1027 	}
1028 	brelse(bitmap_bh);
1029 error:
1030 	return ERR_PTR(err);
1031 }
1032 
1033 unsigned long ext4_count_free_inodes(struct super_block *sb)
1034 {
1035 	unsigned long desc_count;
1036 	struct ext4_group_desc *gdp;
1037 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1038 #ifdef EXT4FS_DEBUG
1039 	struct ext4_super_block *es;
1040 	unsigned long bitmap_count, x;
1041 	struct buffer_head *bitmap_bh = NULL;
1042 
1043 	es = EXT4_SB(sb)->s_es;
1044 	desc_count = 0;
1045 	bitmap_count = 0;
1046 	gdp = NULL;
1047 	for (i = 0; i < ngroups; i++) {
1048 		gdp = ext4_get_group_desc(sb, i, NULL);
1049 		if (!gdp)
1050 			continue;
1051 		desc_count += ext4_free_inodes_count(sb, gdp);
1052 		brelse(bitmap_bh);
1053 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1054 		if (!bitmap_bh)
1055 			continue;
1056 
1057 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1058 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1059 			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1060 		bitmap_count += x;
1061 	}
1062 	brelse(bitmap_bh);
1063 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1064 	       "stored = %u, computed = %lu, %lu\n",
1065 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1066 	return desc_count;
1067 #else
1068 	desc_count = 0;
1069 	for (i = 0; i < ngroups; i++) {
1070 		gdp = ext4_get_group_desc(sb, i, NULL);
1071 		if (!gdp)
1072 			continue;
1073 		desc_count += ext4_free_inodes_count(sb, gdp);
1074 		cond_resched();
1075 	}
1076 	return desc_count;
1077 #endif
1078 }
1079 
1080 /* Called at mount-time, super-block is locked */
1081 unsigned long ext4_count_dirs(struct super_block * sb)
1082 {
1083 	unsigned long count = 0;
1084 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1085 
1086 	for (i = 0; i < ngroups; i++) {
1087 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1088 		if (!gdp)
1089 			continue;
1090 		count += ext4_used_dirs_count(sb, gdp);
1091 	}
1092 	return count;
1093 }
1094 
1095 /*
1096  * Zeroes not yet zeroed inode table - just write zeroes through the whole
1097  * inode table. Must be called without any spinlock held. The only place
1098  * where it is called from on active part of filesystem is ext4lazyinit
1099  * thread, so we do not need any special locks, however we have to prevent
1100  * inode allocation from the current group, so we take alloc_sem lock, to
1101  * block ext4_new_inode() until we are finished.
1102  */
1103 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1104 				 int barrier)
1105 {
1106 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1107 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1108 	struct ext4_group_desc *gdp = NULL;
1109 	struct buffer_head *group_desc_bh;
1110 	handle_t *handle;
1111 	ext4_fsblk_t blk;
1112 	int num, ret = 0, used_blks = 0;
1113 
1114 	/* This should not happen, but just to be sure check this */
1115 	if (sb->s_flags & MS_RDONLY) {
1116 		ret = 1;
1117 		goto out;
1118 	}
1119 
1120 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1121 	if (!gdp)
1122 		goto out;
1123 
1124 	/*
1125 	 * We do not need to lock this, because we are the only one
1126 	 * handling this flag.
1127 	 */
1128 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1129 		goto out;
1130 
1131 	handle = ext4_journal_start_sb(sb, 1);
1132 	if (IS_ERR(handle)) {
1133 		ret = PTR_ERR(handle);
1134 		goto out;
1135 	}
1136 
1137 	down_write(&grp->alloc_sem);
1138 	/*
1139 	 * If inode bitmap was already initialized there may be some
1140 	 * used inodes so we need to skip blocks with used inodes in
1141 	 * inode table.
1142 	 */
1143 	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
1144 		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
1145 			    ext4_itable_unused_count(sb, gdp)),
1146 			    sbi->s_inodes_per_block);
1147 
1148 	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1149 		ext4_error(sb, "Something is wrong with group %u: "
1150 			   "used itable blocks: %d; "
1151 			   "itable unused count: %u",
1152 			   group, used_blks,
1153 			   ext4_itable_unused_count(sb, gdp));
1154 		ret = 1;
1155 		goto err_out;
1156 	}
1157 
1158 	blk = ext4_inode_table(sb, gdp) + used_blks;
1159 	num = sbi->s_itb_per_group - used_blks;
1160 
1161 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1162 	ret = ext4_journal_get_write_access(handle,
1163 					    group_desc_bh);
1164 	if (ret)
1165 		goto err_out;
1166 
1167 	/*
1168 	 * Skip zeroout if the inode table is full. But we set the ZEROED
1169 	 * flag anyway, because obviously, when it is full it does not need
1170 	 * further zeroing.
1171 	 */
1172 	if (unlikely(num == 0))
1173 		goto skip_zeroout;
1174 
1175 	ext4_debug("going to zero out inode table in group %d\n",
1176 		   group);
1177 	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1178 	if (ret < 0)
1179 		goto err_out;
1180 	if (barrier)
1181 		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1182 
1183 skip_zeroout:
1184 	ext4_lock_group(sb, group);
1185 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1186 	ext4_group_desc_csum_set(sb, group, gdp);
1187 	ext4_unlock_group(sb, group);
1188 
1189 	BUFFER_TRACE(group_desc_bh,
1190 		     "call ext4_handle_dirty_metadata");
1191 	ret = ext4_handle_dirty_metadata(handle, NULL,
1192 					 group_desc_bh);
1193 
1194 err_out:
1195 	up_write(&grp->alloc_sem);
1196 	ext4_journal_stop(handle);
1197 out:
1198 	return ret;
1199 }
1200