xref: /linux/fs/ext4/mballoc.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6 
7 
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11 
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20 
21 /*
22  * MUSTDO:
23  *   - test ext4_ext_search_left() and ext4_ext_search_right()
24  *   - search for metadata in few groups
25  *
26  * TODO v4:
27  *   - normalization should take into account whether file is still open
28  *   - discard preallocations if no free space left (policy?)
29  *   - don't normalize tails
30  *   - quota
31  *   - reservation for superuser
32  *
33  * TODO v3:
34  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
35  *   - track min/max extents in each group for better group selection
36  *   - mb_mark_used() may allocate chunk right after splitting buddy
37  *   - tree of groups sorted by number of free blocks
38  *   - error handling
39  */
40 
41 /*
42  * The allocation request involve request for multiple number of blocks
43  * near to the goal(block) value specified.
44  *
45  * During initialization phase of the allocator we decide to use the
46  * group preallocation or inode preallocation depending on the size of
47  * the file. The size of the file could be the resulting file size we
48  * would have after allocation, or the current file size, which ever
49  * is larger. If the size is less than sbi->s_mb_stream_request we
50  * select to use the group preallocation. The default value of
51  * s_mb_stream_request is 16 blocks. This can also be tuned via
52  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53  * terms of number of blocks.
54  *
55  * The main motivation for having small file use group preallocation is to
56  * ensure that we have small files closer together on the disk.
57  *
58  * First stage the allocator looks at the inode prealloc list,
59  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60  * spaces for this particular inode. The inode prealloc space is
61  * represented as:
62  *
63  * pa_lstart -> the logical start block for this prealloc space
64  * pa_pstart -> the physical start block for this prealloc space
65  * pa_len    -> length for this prealloc space (in clusters)
66  * pa_free   ->  free space available in this prealloc space (in clusters)
67  *
68  * The inode preallocation space is used looking at the _logical_ start
69  * block. If only the logical file block falls within the range of prealloc
70  * space we will consume the particular prealloc space. This makes sure that
71  * we have contiguous physical blocks representing the file blocks
72  *
73  * The important thing to be noted in case of inode prealloc space is that
74  * we don't modify the values associated to inode prealloc space except
75  * pa_free.
76  *
77  * If we are not able to find blocks in the inode prealloc space and if we
78  * have the group allocation flag set then we look at the locality group
79  * prealloc space. These are per CPU prealloc list represented as
80  *
81  * ext4_sb_info.s_locality_groups[smp_processor_id()]
82  *
83  * The reason for having a per cpu locality group is to reduce the contention
84  * between CPUs. It is possible to get scheduled at this point.
85  *
86  * The locality group prealloc space is used looking at whether we have
87  * enough free space (pa_free) within the prealloc space.
88  *
89  * If we can't allocate blocks via inode prealloc or/and locality group
90  * prealloc then we look at the buddy cache. The buddy cache is represented
91  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92  * mapped to the buddy and bitmap information regarding different
93  * groups. The buddy information is attached to buddy cache inode so that
94  * we can access them through the page cache. The information regarding
95  * each group is loaded via ext4_mb_load_buddy.  The information involve
96  * block bitmap and buddy information. The information are stored in the
97  * inode as:
98  *
99  *  {                        page                        }
100  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101  *
102  *
103  * one block each for bitmap and buddy information.  So for each group we
104  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105  * blocksize) blocks.  So it can have information regarding groups_per_page
106  * which is blocks_per_page/2
107  *
108  * The buddy cache inode is not stored on disk. The inode is thrown
109  * away when the filesystem is unmounted.
110  *
111  * We look for count number of blocks in the buddy cache. If we were able
112  * to locate that many free blocks we return with additional information
113  * regarding rest of the contiguous physical block available
114  *
115  * Before allocating blocks via buddy cache we normalize the request
116  * blocks. This ensure we ask for more blocks that we needed. The extra
117  * blocks that we get after allocation is added to the respective prealloc
118  * list. In case of inode preallocation we follow a list of heuristics
119  * based on file size. This can be found in ext4_mb_normalize_request. If
120  * we are doing a group prealloc we try to normalize the request to
121  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
122  * dependent on the cluster size; for non-bigalloc file systems, it is
123  * 512 blocks. This can be tuned via
124  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125  * terms of number of blocks. If we have mounted the file system with -O
126  * stripe=<value> option the group prealloc request is normalized to the
127  * smallest multiple of the stripe value (sbi->s_stripe) which is
128  * greater than the default mb_group_prealloc.
129  *
130  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131  * structures in two data structures:
132  *
133  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
134  *
135  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
136  *
137  *    This is an array of lists where the index in the array represents the
138  *    largest free order in the buddy bitmap of the participating group infos of
139  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140  *    number of buddy bitmap orders possible) number of lists. Group-infos are
141  *    placed in appropriate lists.
142  *
143  * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
144  *
145  *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
146  *
147  *    This is an array of lists where in the i-th list there are groups with
148  *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
149  *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
150  *    Note that we don't bother with a special list for completely empty groups
151  *    so we only have MB_NUM_ORDERS(sb) lists.
152  *
153  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
154  * structures to decide the order in which groups are to be traversed for
155  * fulfilling an allocation request.
156  *
157  * At CR = 0, we look for groups which have the largest_free_order >= the order
158  * of the request. We directly look at the largest free order list in the data
159  * structure (1) above where largest_free_order = order of the request. If that
160  * list is empty, we look at remaining list in the increasing order of
161  * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
162  *
163  * At CR = 1, we only consider groups where average fragment size > request
164  * size. So, we lookup a group which has average fragment size just above or
165  * equal to request size using our average fragment size group lists (data
166  * structure 2) in O(1) time.
167  *
168  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
169  * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
170  *
171  * The regular allocator (using the buddy cache) supports a few tunables.
172  *
173  * /sys/fs/ext4/<partition>/mb_min_to_scan
174  * /sys/fs/ext4/<partition>/mb_max_to_scan
175  * /sys/fs/ext4/<partition>/mb_order2_req
176  * /sys/fs/ext4/<partition>/mb_linear_limit
177  *
178  * The regular allocator uses buddy scan only if the request len is power of
179  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
180  * value of s_mb_order2_reqs can be tuned via
181  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
182  * stripe size (sbi->s_stripe), we try to search for contiguous block in
183  * stripe size. This should result in better allocation on RAID setups. If
184  * not, we search in the specific group using bitmap for best extents. The
185  * tunable min_to_scan and max_to_scan control the behaviour here.
186  * min_to_scan indicate how long the mballoc __must__ look for a best
187  * extent and max_to_scan indicates how long the mballoc __can__ look for a
188  * best extent in the found extents. Searching for the blocks starts with
189  * the group specified as the goal value in allocation context via
190  * ac_g_ex. Each group is first checked based on the criteria whether it
191  * can be used for allocation. ext4_mb_good_group explains how the groups are
192  * checked.
193  *
194  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
195  * get traversed linearly. That may result in subsequent allocations being not
196  * close to each other. And so, the underlying device may get filled up in a
197  * non-linear fashion. While that may not matter on non-rotational devices, for
198  * rotational devices that may result in higher seek times. "mb_linear_limit"
199  * tells mballoc how many groups mballoc should search linearly before
200  * performing consulting above data structures for more efficient lookups. For
201  * non rotational devices, this value defaults to 0 and for rotational devices
202  * this is set to MB_DEFAULT_LINEAR_LIMIT.
203  *
204  * Both the prealloc space are getting populated as above. So for the first
205  * request we will hit the buddy cache which will result in this prealloc
206  * space getting filled. The prealloc space is then later used for the
207  * subsequent request.
208  */
209 
210 /*
211  * mballoc operates on the following data:
212  *  - on-disk bitmap
213  *  - in-core buddy (actually includes buddy and bitmap)
214  *  - preallocation descriptors (PAs)
215  *
216  * there are two types of preallocations:
217  *  - inode
218  *    assiged to specific inode and can be used for this inode only.
219  *    it describes part of inode's space preallocated to specific
220  *    physical blocks. any block from that preallocated can be used
221  *    independent. the descriptor just tracks number of blocks left
222  *    unused. so, before taking some block from descriptor, one must
223  *    make sure corresponded logical block isn't allocated yet. this
224  *    also means that freeing any block within descriptor's range
225  *    must discard all preallocated blocks.
226  *  - locality group
227  *    assigned to specific locality group which does not translate to
228  *    permanent set of inodes: inode can join and leave group. space
229  *    from this type of preallocation can be used for any inode. thus
230  *    it's consumed from the beginning to the end.
231  *
232  * relation between them can be expressed as:
233  *    in-core buddy = on-disk bitmap + preallocation descriptors
234  *
235  * this mean blocks mballoc considers used are:
236  *  - allocated blocks (persistent)
237  *  - preallocated blocks (non-persistent)
238  *
239  * consistency in mballoc world means that at any time a block is either
240  * free or used in ALL structures. notice: "any time" should not be read
241  * literally -- time is discrete and delimited by locks.
242  *
243  *  to keep it simple, we don't use block numbers, instead we count number of
244  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
245  *
246  * all operations can be expressed as:
247  *  - init buddy:			buddy = on-disk + PAs
248  *  - new PA:				buddy += N; PA = N
249  *  - use inode PA:			on-disk += N; PA -= N
250  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
251  *  - use locality group PA		on-disk += N; PA -= N
252  *  - discard locality group PA		buddy -= PA; PA = 0
253  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
254  *        is used in real operation because we can't know actual used
255  *        bits from PA, only from on-disk bitmap
256  *
257  * if we follow this strict logic, then all operations above should be atomic.
258  * given some of them can block, we'd have to use something like semaphores
259  * killing performance on high-end SMP hardware. let's try to relax it using
260  * the following knowledge:
261  *  1) if buddy is referenced, it's already initialized
262  *  2) while block is used in buddy and the buddy is referenced,
263  *     nobody can re-allocate that block
264  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
265  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
266  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
267  *     block
268  *
269  * so, now we're building a concurrency table:
270  *  - init buddy vs.
271  *    - new PA
272  *      blocks for PA are allocated in the buddy, buddy must be referenced
273  *      until PA is linked to allocation group to avoid concurrent buddy init
274  *    - use inode PA
275  *      we need to make sure that either on-disk bitmap or PA has uptodate data
276  *      given (3) we care that PA-=N operation doesn't interfere with init
277  *    - discard inode PA
278  *      the simplest way would be to have buddy initialized by the discard
279  *    - use locality group PA
280  *      again PA-=N must be serialized with init
281  *    - discard locality group PA
282  *      the simplest way would be to have buddy initialized by the discard
283  *  - new PA vs.
284  *    - use inode PA
285  *      i_data_sem serializes them
286  *    - discard inode PA
287  *      discard process must wait until PA isn't used by another process
288  *    - use locality group PA
289  *      some mutex should serialize them
290  *    - discard locality group PA
291  *      discard process must wait until PA isn't used by another process
292  *  - use inode PA
293  *    - use inode PA
294  *      i_data_sem or another mutex should serializes them
295  *    - discard inode PA
296  *      discard process must wait until PA isn't used by another process
297  *    - use locality group PA
298  *      nothing wrong here -- they're different PAs covering different blocks
299  *    - discard locality group PA
300  *      discard process must wait until PA isn't used by another process
301  *
302  * now we're ready to make few consequences:
303  *  - PA is referenced and while it is no discard is possible
304  *  - PA is referenced until block isn't marked in on-disk bitmap
305  *  - PA changes only after on-disk bitmap
306  *  - discard must not compete with init. either init is done before
307  *    any discard or they're serialized somehow
308  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
309  *
310  * a special case when we've used PA to emptiness. no need to modify buddy
311  * in this case, but we should care about concurrent init
312  *
313  */
314 
315  /*
316  * Logic in few words:
317  *
318  *  - allocation:
319  *    load group
320  *    find blocks
321  *    mark bits in on-disk bitmap
322  *    release group
323  *
324  *  - use preallocation:
325  *    find proper PA (per-inode or group)
326  *    load group
327  *    mark bits in on-disk bitmap
328  *    release group
329  *    release PA
330  *
331  *  - free:
332  *    load group
333  *    mark bits in on-disk bitmap
334  *    release group
335  *
336  *  - discard preallocations in group:
337  *    mark PAs deleted
338  *    move them onto local list
339  *    load on-disk bitmap
340  *    load group
341  *    remove PA from object (inode or locality group)
342  *    mark free blocks in-core
343  *
344  *  - discard inode's preallocations:
345  */
346 
347 /*
348  * Locking rules
349  *
350  * Locks:
351  *  - bitlock on a group	(group)
352  *  - object (inode/locality)	(object)
353  *  - per-pa lock		(pa)
354  *  - cr0 lists lock		(cr0)
355  *  - cr1 tree lock		(cr1)
356  *
357  * Paths:
358  *  - new pa
359  *    object
360  *    group
361  *
362  *  - find and use pa:
363  *    pa
364  *
365  *  - release consumed pa:
366  *    pa
367  *    group
368  *    object
369  *
370  *  - generate in-core bitmap:
371  *    group
372  *        pa
373  *
374  *  - discard all for given object (inode, locality group):
375  *    object
376  *        pa
377  *    group
378  *
379  *  - discard all for given group:
380  *    group
381  *        pa
382  *    group
383  *        object
384  *
385  *  - allocation path (ext4_mb_regular_allocator)
386  *    group
387  *    cr0/cr1
388  */
389 static struct kmem_cache *ext4_pspace_cachep;
390 static struct kmem_cache *ext4_ac_cachep;
391 static struct kmem_cache *ext4_free_data_cachep;
392 
393 /* We create slab caches for groupinfo data structures based on the
394  * superblock block size.  There will be one per mounted filesystem for
395  * each unique s_blocksize_bits */
396 #define NR_GRPINFO_CACHES 8
397 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
398 
399 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
400 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
401 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
402 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
403 };
404 
405 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
406 					ext4_group_t group);
407 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
408 						ext4_group_t group);
409 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
410 
411 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
412 			       ext4_group_t group, int cr);
413 
414 static int ext4_try_to_trim_range(struct super_block *sb,
415 		struct ext4_buddy *e4b, ext4_grpblk_t start,
416 		ext4_grpblk_t max, ext4_grpblk_t minblocks);
417 
418 /*
419  * The algorithm using this percpu seq counter goes below:
420  * 1. We sample the percpu discard_pa_seq counter before trying for block
421  *    allocation in ext4_mb_new_blocks().
422  * 2. We increment this percpu discard_pa_seq counter when we either allocate
423  *    or free these blocks i.e. while marking those blocks as used/free in
424  *    mb_mark_used()/mb_free_blocks().
425  * 3. We also increment this percpu seq counter when we successfully identify
426  *    that the bb_prealloc_list is not empty and hence proceed for discarding
427  *    of those PAs inside ext4_mb_discard_group_preallocations().
428  *
429  * Now to make sure that the regular fast path of block allocation is not
430  * affected, as a small optimization we only sample the percpu seq counter
431  * on that cpu. Only when the block allocation fails and when freed blocks
432  * found were 0, that is when we sample percpu seq counter for all cpus using
433  * below function ext4_get_discard_pa_seq_sum(). This happens after making
434  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
435  */
436 static DEFINE_PER_CPU(u64, discard_pa_seq);
437 static inline u64 ext4_get_discard_pa_seq_sum(void)
438 {
439 	int __cpu;
440 	u64 __seq = 0;
441 
442 	for_each_possible_cpu(__cpu)
443 		__seq += per_cpu(discard_pa_seq, __cpu);
444 	return __seq;
445 }
446 
447 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
448 {
449 #if BITS_PER_LONG == 64
450 	*bit += ((unsigned long) addr & 7UL) << 3;
451 	addr = (void *) ((unsigned long) addr & ~7UL);
452 #elif BITS_PER_LONG == 32
453 	*bit += ((unsigned long) addr & 3UL) << 3;
454 	addr = (void *) ((unsigned long) addr & ~3UL);
455 #else
456 #error "how many bits you are?!"
457 #endif
458 	return addr;
459 }
460 
461 static inline int mb_test_bit(int bit, void *addr)
462 {
463 	/*
464 	 * ext4_test_bit on architecture like powerpc
465 	 * needs unsigned long aligned address
466 	 */
467 	addr = mb_correct_addr_and_bit(&bit, addr);
468 	return ext4_test_bit(bit, addr);
469 }
470 
471 static inline void mb_set_bit(int bit, void *addr)
472 {
473 	addr = mb_correct_addr_and_bit(&bit, addr);
474 	ext4_set_bit(bit, addr);
475 }
476 
477 static inline void mb_clear_bit(int bit, void *addr)
478 {
479 	addr = mb_correct_addr_and_bit(&bit, addr);
480 	ext4_clear_bit(bit, addr);
481 }
482 
483 static inline int mb_test_and_clear_bit(int bit, void *addr)
484 {
485 	addr = mb_correct_addr_and_bit(&bit, addr);
486 	return ext4_test_and_clear_bit(bit, addr);
487 }
488 
489 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
490 {
491 	int fix = 0, ret, tmpmax;
492 	addr = mb_correct_addr_and_bit(&fix, addr);
493 	tmpmax = max + fix;
494 	start += fix;
495 
496 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
497 	if (ret > max)
498 		return max;
499 	return ret;
500 }
501 
502 static inline int mb_find_next_bit(void *addr, int max, int start)
503 {
504 	int fix = 0, ret, tmpmax;
505 	addr = mb_correct_addr_and_bit(&fix, addr);
506 	tmpmax = max + fix;
507 	start += fix;
508 
509 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
510 	if (ret > max)
511 		return max;
512 	return ret;
513 }
514 
515 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
516 {
517 	char *bb;
518 
519 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
520 	BUG_ON(max == NULL);
521 
522 	if (order > e4b->bd_blkbits + 1) {
523 		*max = 0;
524 		return NULL;
525 	}
526 
527 	/* at order 0 we see each particular block */
528 	if (order == 0) {
529 		*max = 1 << (e4b->bd_blkbits + 3);
530 		return e4b->bd_bitmap;
531 	}
532 
533 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
534 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
535 
536 	return bb;
537 }
538 
539 #ifdef DOUBLE_CHECK
540 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
541 			   int first, int count)
542 {
543 	int i;
544 	struct super_block *sb = e4b->bd_sb;
545 
546 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
547 		return;
548 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
549 	for (i = 0; i < count; i++) {
550 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
551 			ext4_fsblk_t blocknr;
552 
553 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
554 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
555 			ext4_grp_locked_error(sb, e4b->bd_group,
556 					      inode ? inode->i_ino : 0,
557 					      blocknr,
558 					      "freeing block already freed "
559 					      "(bit %u)",
560 					      first + i);
561 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
562 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
563 		}
564 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
565 	}
566 }
567 
568 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
569 {
570 	int i;
571 
572 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
573 		return;
574 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
575 	for (i = 0; i < count; i++) {
576 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
577 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
578 	}
579 }
580 
581 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
582 {
583 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
584 		return;
585 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
586 		unsigned char *b1, *b2;
587 		int i;
588 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
589 		b2 = (unsigned char *) bitmap;
590 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
591 			if (b1[i] != b2[i]) {
592 				ext4_msg(e4b->bd_sb, KERN_ERR,
593 					 "corruption in group %u "
594 					 "at byte %u(%u): %x in copy != %x "
595 					 "on disk/prealloc",
596 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
597 				BUG();
598 			}
599 		}
600 	}
601 }
602 
603 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
604 			struct ext4_group_info *grp, ext4_group_t group)
605 {
606 	struct buffer_head *bh;
607 
608 	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
609 	if (!grp->bb_bitmap)
610 		return;
611 
612 	bh = ext4_read_block_bitmap(sb, group);
613 	if (IS_ERR_OR_NULL(bh)) {
614 		kfree(grp->bb_bitmap);
615 		grp->bb_bitmap = NULL;
616 		return;
617 	}
618 
619 	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
620 	put_bh(bh);
621 }
622 
623 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
624 {
625 	kfree(grp->bb_bitmap);
626 }
627 
628 #else
629 static inline void mb_free_blocks_double(struct inode *inode,
630 				struct ext4_buddy *e4b, int first, int count)
631 {
632 	return;
633 }
634 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
635 						int first, int count)
636 {
637 	return;
638 }
639 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
640 {
641 	return;
642 }
643 
644 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
645 			struct ext4_group_info *grp, ext4_group_t group)
646 {
647 	return;
648 }
649 
650 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
651 {
652 	return;
653 }
654 #endif
655 
656 #ifdef AGGRESSIVE_CHECK
657 
658 #define MB_CHECK_ASSERT(assert)						\
659 do {									\
660 	if (!(assert)) {						\
661 		printk(KERN_EMERG					\
662 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
663 			function, file, line, # assert);		\
664 		BUG();							\
665 	}								\
666 } while (0)
667 
668 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
669 				const char *function, int line)
670 {
671 	struct super_block *sb = e4b->bd_sb;
672 	int order = e4b->bd_blkbits + 1;
673 	int max;
674 	int max2;
675 	int i;
676 	int j;
677 	int k;
678 	int count;
679 	struct ext4_group_info *grp;
680 	int fragments = 0;
681 	int fstart;
682 	struct list_head *cur;
683 	void *buddy;
684 	void *buddy2;
685 
686 	if (e4b->bd_info->bb_check_counter++ % 10)
687 		return 0;
688 
689 	while (order > 1) {
690 		buddy = mb_find_buddy(e4b, order, &max);
691 		MB_CHECK_ASSERT(buddy);
692 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
693 		MB_CHECK_ASSERT(buddy2);
694 		MB_CHECK_ASSERT(buddy != buddy2);
695 		MB_CHECK_ASSERT(max * 2 == max2);
696 
697 		count = 0;
698 		for (i = 0; i < max; i++) {
699 
700 			if (mb_test_bit(i, buddy)) {
701 				/* only single bit in buddy2 may be 0 */
702 				if (!mb_test_bit(i << 1, buddy2)) {
703 					MB_CHECK_ASSERT(
704 						mb_test_bit((i<<1)+1, buddy2));
705 				}
706 				continue;
707 			}
708 
709 			/* both bits in buddy2 must be 1 */
710 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
711 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
712 
713 			for (j = 0; j < (1 << order); j++) {
714 				k = (i * (1 << order)) + j;
715 				MB_CHECK_ASSERT(
716 					!mb_test_bit(k, e4b->bd_bitmap));
717 			}
718 			count++;
719 		}
720 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
721 		order--;
722 	}
723 
724 	fstart = -1;
725 	buddy = mb_find_buddy(e4b, 0, &max);
726 	for (i = 0; i < max; i++) {
727 		if (!mb_test_bit(i, buddy)) {
728 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
729 			if (fstart == -1) {
730 				fragments++;
731 				fstart = i;
732 			}
733 			continue;
734 		}
735 		fstart = -1;
736 		/* check used bits only */
737 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
738 			buddy2 = mb_find_buddy(e4b, j, &max2);
739 			k = i >> j;
740 			MB_CHECK_ASSERT(k < max2);
741 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
742 		}
743 	}
744 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
745 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
746 
747 	grp = ext4_get_group_info(sb, e4b->bd_group);
748 	list_for_each(cur, &grp->bb_prealloc_list) {
749 		ext4_group_t groupnr;
750 		struct ext4_prealloc_space *pa;
751 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
752 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
753 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
754 		for (i = 0; i < pa->pa_len; i++)
755 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
756 	}
757 	return 0;
758 }
759 #undef MB_CHECK_ASSERT
760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
761 					__FILE__, __func__, __LINE__)
762 #else
763 #define mb_check_buddy(e4b)
764 #endif
765 
766 /*
767  * Divide blocks started from @first with length @len into
768  * smaller chunks with power of 2 blocks.
769  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
770  * then increase bb_counters[] for corresponded chunk size.
771  */
772 static void ext4_mb_mark_free_simple(struct super_block *sb,
773 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
774 					struct ext4_group_info *grp)
775 {
776 	struct ext4_sb_info *sbi = EXT4_SB(sb);
777 	ext4_grpblk_t min;
778 	ext4_grpblk_t max;
779 	ext4_grpblk_t chunk;
780 	unsigned int border;
781 
782 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
783 
784 	border = 2 << sb->s_blocksize_bits;
785 
786 	while (len > 0) {
787 		/* find how many blocks can be covered since this position */
788 		max = ffs(first | border) - 1;
789 
790 		/* find how many blocks of power 2 we need to mark */
791 		min = fls(len) - 1;
792 
793 		if (max < min)
794 			min = max;
795 		chunk = 1 << min;
796 
797 		/* mark multiblock chunks only */
798 		grp->bb_counters[min]++;
799 		if (min > 0)
800 			mb_clear_bit(first >> min,
801 				     buddy + sbi->s_mb_offsets[min]);
802 
803 		len -= chunk;
804 		first += chunk;
805 	}
806 }
807 
808 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
809 {
810 	int order;
811 
812 	/*
813 	 * We don't bother with a special lists groups with only 1 block free
814 	 * extents and for completely empty groups.
815 	 */
816 	order = fls(len) - 2;
817 	if (order < 0)
818 		return 0;
819 	if (order == MB_NUM_ORDERS(sb))
820 		order--;
821 	return order;
822 }
823 
824 /* Move group to appropriate avg_fragment_size list */
825 static void
826 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
827 {
828 	struct ext4_sb_info *sbi = EXT4_SB(sb);
829 	int new_order;
830 
831 	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
832 		return;
833 
834 	new_order = mb_avg_fragment_size_order(sb,
835 					grp->bb_free / grp->bb_fragments);
836 	if (new_order == grp->bb_avg_fragment_size_order)
837 		return;
838 
839 	if (grp->bb_avg_fragment_size_order != -1) {
840 		write_lock(&sbi->s_mb_avg_fragment_size_locks[
841 					grp->bb_avg_fragment_size_order]);
842 		list_del(&grp->bb_avg_fragment_size_node);
843 		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
844 					grp->bb_avg_fragment_size_order]);
845 	}
846 	grp->bb_avg_fragment_size_order = new_order;
847 	write_lock(&sbi->s_mb_avg_fragment_size_locks[
848 					grp->bb_avg_fragment_size_order]);
849 	list_add_tail(&grp->bb_avg_fragment_size_node,
850 		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
851 	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
852 					grp->bb_avg_fragment_size_order]);
853 }
854 
855 /*
856  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
857  * cr level needs an update.
858  */
859 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
860 			int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
861 {
862 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
863 	struct ext4_group_info *iter, *grp;
864 	int i;
865 
866 	if (ac->ac_status == AC_STATUS_FOUND)
867 		return;
868 
869 	if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
870 		atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
871 
872 	grp = NULL;
873 	for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
874 		if (list_empty(&sbi->s_mb_largest_free_orders[i]))
875 			continue;
876 		read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
877 		if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
878 			read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
879 			continue;
880 		}
881 		grp = NULL;
882 		list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
883 				    bb_largest_free_order_node) {
884 			if (sbi->s_mb_stats)
885 				atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
886 			if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
887 				grp = iter;
888 				break;
889 			}
890 		}
891 		read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
892 		if (grp)
893 			break;
894 	}
895 
896 	if (!grp) {
897 		/* Increment cr and search again */
898 		*new_cr = 1;
899 	} else {
900 		*group = grp->bb_group;
901 		ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
902 	}
903 }
904 
905 /*
906  * Choose next group by traversing average fragment size list of suitable
907  * order. Updates *new_cr if cr level needs an update.
908  */
909 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
910 		int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
911 {
912 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
913 	struct ext4_group_info *grp, *iter;
914 	int i;
915 
916 	if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
917 		if (sbi->s_mb_stats)
918 			atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
919 	}
920 
921 	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
922 	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
923 		if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
924 			continue;
925 		read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
926 		if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
927 			read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
928 			continue;
929 		}
930 		grp = NULL;
931 		list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
932 				    bb_avg_fragment_size_node) {
933 			if (sbi->s_mb_stats)
934 				atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
935 			if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
936 				grp = iter;
937 				break;
938 			}
939 		}
940 		read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
941 		if (grp)
942 			break;
943 	}
944 
945 	if (grp) {
946 		*group = grp->bb_group;
947 		ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
948 	} else {
949 		*new_cr = 2;
950 	}
951 }
952 
953 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
954 {
955 	if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
956 		return 0;
957 	if (ac->ac_criteria >= 2)
958 		return 0;
959 	if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
960 		return 0;
961 	return 1;
962 }
963 
964 /*
965  * Return next linear group for allocation. If linear traversal should not be
966  * performed, this function just returns the same group
967  */
968 static int
969 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
970 {
971 	if (!should_optimize_scan(ac))
972 		goto inc_and_return;
973 
974 	if (ac->ac_groups_linear_remaining) {
975 		ac->ac_groups_linear_remaining--;
976 		goto inc_and_return;
977 	}
978 
979 	return group;
980 inc_and_return:
981 	/*
982 	 * Artificially restricted ngroups for non-extent
983 	 * files makes group > ngroups possible on first loop.
984 	 */
985 	return group + 1 >= ngroups ? 0 : group + 1;
986 }
987 
988 /*
989  * ext4_mb_choose_next_group: choose next group for allocation.
990  *
991  * @ac        Allocation Context
992  * @new_cr    This is an output parameter. If the there is no good group
993  *            available at current CR level, this field is updated to indicate
994  *            the new cr level that should be used.
995  * @group     This is an input / output parameter. As an input it indicates the
996  *            next group that the allocator intends to use for allocation. As
997  *            output, this field indicates the next group that should be used as
998  *            determined by the optimization functions.
999  * @ngroups   Total number of groups
1000  */
1001 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1002 		int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1003 {
1004 	*new_cr = ac->ac_criteria;
1005 
1006 	if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1007 		*group = next_linear_group(ac, *group, ngroups);
1008 		return;
1009 	}
1010 
1011 	if (*new_cr == 0) {
1012 		ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1013 	} else if (*new_cr == 1) {
1014 		ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1015 	} else {
1016 		/*
1017 		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1018 		 * bb_free. But until that happens, we should never come here.
1019 		 */
1020 		WARN_ON(1);
1021 	}
1022 }
1023 
1024 /*
1025  * Cache the order of the largest free extent we have available in this block
1026  * group.
1027  */
1028 static void
1029 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1030 {
1031 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1032 	int i;
1033 
1034 	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1035 		if (grp->bb_counters[i] > 0)
1036 			break;
1037 	/* No need to move between order lists? */
1038 	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1039 	    i == grp->bb_largest_free_order) {
1040 		grp->bb_largest_free_order = i;
1041 		return;
1042 	}
1043 
1044 	if (grp->bb_largest_free_order >= 0) {
1045 		write_lock(&sbi->s_mb_largest_free_orders_locks[
1046 					      grp->bb_largest_free_order]);
1047 		list_del_init(&grp->bb_largest_free_order_node);
1048 		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1049 					      grp->bb_largest_free_order]);
1050 	}
1051 	grp->bb_largest_free_order = i;
1052 	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1053 		write_lock(&sbi->s_mb_largest_free_orders_locks[
1054 					      grp->bb_largest_free_order]);
1055 		list_add_tail(&grp->bb_largest_free_order_node,
1056 		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1057 		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1058 					      grp->bb_largest_free_order]);
1059 	}
1060 }
1061 
1062 static noinline_for_stack
1063 void ext4_mb_generate_buddy(struct super_block *sb,
1064 				void *buddy, void *bitmap, ext4_group_t group)
1065 {
1066 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1067 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1068 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1069 	ext4_grpblk_t i = 0;
1070 	ext4_grpblk_t first;
1071 	ext4_grpblk_t len;
1072 	unsigned free = 0;
1073 	unsigned fragments = 0;
1074 	unsigned long long period = get_cycles();
1075 
1076 	/* initialize buddy from bitmap which is aggregation
1077 	 * of on-disk bitmap and preallocations */
1078 	i = mb_find_next_zero_bit(bitmap, max, 0);
1079 	grp->bb_first_free = i;
1080 	while (i < max) {
1081 		fragments++;
1082 		first = i;
1083 		i = mb_find_next_bit(bitmap, max, i);
1084 		len = i - first;
1085 		free += len;
1086 		if (len > 1)
1087 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1088 		else
1089 			grp->bb_counters[0]++;
1090 		if (i < max)
1091 			i = mb_find_next_zero_bit(bitmap, max, i);
1092 	}
1093 	grp->bb_fragments = fragments;
1094 
1095 	if (free != grp->bb_free) {
1096 		ext4_grp_locked_error(sb, group, 0, 0,
1097 				      "block bitmap and bg descriptor "
1098 				      "inconsistent: %u vs %u free clusters",
1099 				      free, grp->bb_free);
1100 		/*
1101 		 * If we intend to continue, we consider group descriptor
1102 		 * corrupt and update bb_free using bitmap value
1103 		 */
1104 		grp->bb_free = free;
1105 		ext4_mark_group_bitmap_corrupted(sb, group,
1106 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1107 	}
1108 	mb_set_largest_free_order(sb, grp);
1109 	mb_update_avg_fragment_size(sb, grp);
1110 
1111 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1112 
1113 	period = get_cycles() - period;
1114 	atomic_inc(&sbi->s_mb_buddies_generated);
1115 	atomic64_add(period, &sbi->s_mb_generation_time);
1116 }
1117 
1118 /* The buddy information is attached the buddy cache inode
1119  * for convenience. The information regarding each group
1120  * is loaded via ext4_mb_load_buddy. The information involve
1121  * block bitmap and buddy information. The information are
1122  * stored in the inode as
1123  *
1124  * {                        page                        }
1125  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1126  *
1127  *
1128  * one block each for bitmap and buddy information.
1129  * So for each group we take up 2 blocks. A page can
1130  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1131  * So it can have information regarding groups_per_page which
1132  * is blocks_per_page/2
1133  *
1134  * Locking note:  This routine takes the block group lock of all groups
1135  * for this page; do not hold this lock when calling this routine!
1136  */
1137 
1138 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1139 {
1140 	ext4_group_t ngroups;
1141 	int blocksize;
1142 	int blocks_per_page;
1143 	int groups_per_page;
1144 	int err = 0;
1145 	int i;
1146 	ext4_group_t first_group, group;
1147 	int first_block;
1148 	struct super_block *sb;
1149 	struct buffer_head *bhs;
1150 	struct buffer_head **bh = NULL;
1151 	struct inode *inode;
1152 	char *data;
1153 	char *bitmap;
1154 	struct ext4_group_info *grinfo;
1155 
1156 	inode = page->mapping->host;
1157 	sb = inode->i_sb;
1158 	ngroups = ext4_get_groups_count(sb);
1159 	blocksize = i_blocksize(inode);
1160 	blocks_per_page = PAGE_SIZE / blocksize;
1161 
1162 	mb_debug(sb, "init page %lu\n", page->index);
1163 
1164 	groups_per_page = blocks_per_page >> 1;
1165 	if (groups_per_page == 0)
1166 		groups_per_page = 1;
1167 
1168 	/* allocate buffer_heads to read bitmaps */
1169 	if (groups_per_page > 1) {
1170 		i = sizeof(struct buffer_head *) * groups_per_page;
1171 		bh = kzalloc(i, gfp);
1172 		if (bh == NULL) {
1173 			err = -ENOMEM;
1174 			goto out;
1175 		}
1176 	} else
1177 		bh = &bhs;
1178 
1179 	first_group = page->index * blocks_per_page / 2;
1180 
1181 	/* read all groups the page covers into the cache */
1182 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1183 		if (group >= ngroups)
1184 			break;
1185 
1186 		grinfo = ext4_get_group_info(sb, group);
1187 		/*
1188 		 * If page is uptodate then we came here after online resize
1189 		 * which added some new uninitialized group info structs, so
1190 		 * we must skip all initialized uptodate buddies on the page,
1191 		 * which may be currently in use by an allocating task.
1192 		 */
1193 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1194 			bh[i] = NULL;
1195 			continue;
1196 		}
1197 		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1198 		if (IS_ERR(bh[i])) {
1199 			err = PTR_ERR(bh[i]);
1200 			bh[i] = NULL;
1201 			goto out;
1202 		}
1203 		mb_debug(sb, "read bitmap for group %u\n", group);
1204 	}
1205 
1206 	/* wait for I/O completion */
1207 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1208 		int err2;
1209 
1210 		if (!bh[i])
1211 			continue;
1212 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1213 		if (!err)
1214 			err = err2;
1215 	}
1216 
1217 	first_block = page->index * blocks_per_page;
1218 	for (i = 0; i < blocks_per_page; i++) {
1219 		group = (first_block + i) >> 1;
1220 		if (group >= ngroups)
1221 			break;
1222 
1223 		if (!bh[group - first_group])
1224 			/* skip initialized uptodate buddy */
1225 			continue;
1226 
1227 		if (!buffer_verified(bh[group - first_group]))
1228 			/* Skip faulty bitmaps */
1229 			continue;
1230 		err = 0;
1231 
1232 		/*
1233 		 * data carry information regarding this
1234 		 * particular group in the format specified
1235 		 * above
1236 		 *
1237 		 */
1238 		data = page_address(page) + (i * blocksize);
1239 		bitmap = bh[group - first_group]->b_data;
1240 
1241 		/*
1242 		 * We place the buddy block and bitmap block
1243 		 * close together
1244 		 */
1245 		if ((first_block + i) & 1) {
1246 			/* this is block of buddy */
1247 			BUG_ON(incore == NULL);
1248 			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1249 				group, page->index, i * blocksize);
1250 			trace_ext4_mb_buddy_bitmap_load(sb, group);
1251 			grinfo = ext4_get_group_info(sb, group);
1252 			grinfo->bb_fragments = 0;
1253 			memset(grinfo->bb_counters, 0,
1254 			       sizeof(*grinfo->bb_counters) *
1255 			       (MB_NUM_ORDERS(sb)));
1256 			/*
1257 			 * incore got set to the group block bitmap below
1258 			 */
1259 			ext4_lock_group(sb, group);
1260 			/* init the buddy */
1261 			memset(data, 0xff, blocksize);
1262 			ext4_mb_generate_buddy(sb, data, incore, group);
1263 			ext4_unlock_group(sb, group);
1264 			incore = NULL;
1265 		} else {
1266 			/* this is block of bitmap */
1267 			BUG_ON(incore != NULL);
1268 			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1269 				group, page->index, i * blocksize);
1270 			trace_ext4_mb_bitmap_load(sb, group);
1271 
1272 			/* see comments in ext4_mb_put_pa() */
1273 			ext4_lock_group(sb, group);
1274 			memcpy(data, bitmap, blocksize);
1275 
1276 			/* mark all preallocated blks used in in-core bitmap */
1277 			ext4_mb_generate_from_pa(sb, data, group);
1278 			ext4_mb_generate_from_freelist(sb, data, group);
1279 			ext4_unlock_group(sb, group);
1280 
1281 			/* set incore so that the buddy information can be
1282 			 * generated using this
1283 			 */
1284 			incore = data;
1285 		}
1286 	}
1287 	SetPageUptodate(page);
1288 
1289 out:
1290 	if (bh) {
1291 		for (i = 0; i < groups_per_page; i++)
1292 			brelse(bh[i]);
1293 		if (bh != &bhs)
1294 			kfree(bh);
1295 	}
1296 	return err;
1297 }
1298 
1299 /*
1300  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1301  * on the same buddy page doesn't happen whild holding the buddy page lock.
1302  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1303  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1304  */
1305 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1306 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1307 {
1308 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1309 	int block, pnum, poff;
1310 	int blocks_per_page;
1311 	struct page *page;
1312 
1313 	e4b->bd_buddy_page = NULL;
1314 	e4b->bd_bitmap_page = NULL;
1315 
1316 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1317 	/*
1318 	 * the buddy cache inode stores the block bitmap
1319 	 * and buddy information in consecutive blocks.
1320 	 * So for each group we need two blocks.
1321 	 */
1322 	block = group * 2;
1323 	pnum = block / blocks_per_page;
1324 	poff = block % blocks_per_page;
1325 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1326 	if (!page)
1327 		return -ENOMEM;
1328 	BUG_ON(page->mapping != inode->i_mapping);
1329 	e4b->bd_bitmap_page = page;
1330 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1331 
1332 	if (blocks_per_page >= 2) {
1333 		/* buddy and bitmap are on the same page */
1334 		return 0;
1335 	}
1336 
1337 	block++;
1338 	pnum = block / blocks_per_page;
1339 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1340 	if (!page)
1341 		return -ENOMEM;
1342 	BUG_ON(page->mapping != inode->i_mapping);
1343 	e4b->bd_buddy_page = page;
1344 	return 0;
1345 }
1346 
1347 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1348 {
1349 	if (e4b->bd_bitmap_page) {
1350 		unlock_page(e4b->bd_bitmap_page);
1351 		put_page(e4b->bd_bitmap_page);
1352 	}
1353 	if (e4b->bd_buddy_page) {
1354 		unlock_page(e4b->bd_buddy_page);
1355 		put_page(e4b->bd_buddy_page);
1356 	}
1357 }
1358 
1359 /*
1360  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1361  * block group lock of all groups for this page; do not hold the BG lock when
1362  * calling this routine!
1363  */
1364 static noinline_for_stack
1365 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1366 {
1367 
1368 	struct ext4_group_info *this_grp;
1369 	struct ext4_buddy e4b;
1370 	struct page *page;
1371 	int ret = 0;
1372 
1373 	might_sleep();
1374 	mb_debug(sb, "init group %u\n", group);
1375 	this_grp = ext4_get_group_info(sb, group);
1376 	/*
1377 	 * This ensures that we don't reinit the buddy cache
1378 	 * page which map to the group from which we are already
1379 	 * allocating. If we are looking at the buddy cache we would
1380 	 * have taken a reference using ext4_mb_load_buddy and that
1381 	 * would have pinned buddy page to page cache.
1382 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1383 	 * page accessed.
1384 	 */
1385 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1386 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1387 		/*
1388 		 * somebody initialized the group
1389 		 * return without doing anything
1390 		 */
1391 		goto err;
1392 	}
1393 
1394 	page = e4b.bd_bitmap_page;
1395 	ret = ext4_mb_init_cache(page, NULL, gfp);
1396 	if (ret)
1397 		goto err;
1398 	if (!PageUptodate(page)) {
1399 		ret = -EIO;
1400 		goto err;
1401 	}
1402 
1403 	if (e4b.bd_buddy_page == NULL) {
1404 		/*
1405 		 * If both the bitmap and buddy are in
1406 		 * the same page we don't need to force
1407 		 * init the buddy
1408 		 */
1409 		ret = 0;
1410 		goto err;
1411 	}
1412 	/* init buddy cache */
1413 	page = e4b.bd_buddy_page;
1414 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1415 	if (ret)
1416 		goto err;
1417 	if (!PageUptodate(page)) {
1418 		ret = -EIO;
1419 		goto err;
1420 	}
1421 err:
1422 	ext4_mb_put_buddy_page_lock(&e4b);
1423 	return ret;
1424 }
1425 
1426 /*
1427  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1428  * block group lock of all groups for this page; do not hold the BG lock when
1429  * calling this routine!
1430  */
1431 static noinline_for_stack int
1432 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1433 		       struct ext4_buddy *e4b, gfp_t gfp)
1434 {
1435 	int blocks_per_page;
1436 	int block;
1437 	int pnum;
1438 	int poff;
1439 	struct page *page;
1440 	int ret;
1441 	struct ext4_group_info *grp;
1442 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1443 	struct inode *inode = sbi->s_buddy_cache;
1444 
1445 	might_sleep();
1446 	mb_debug(sb, "load group %u\n", group);
1447 
1448 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1449 	grp = ext4_get_group_info(sb, group);
1450 
1451 	e4b->bd_blkbits = sb->s_blocksize_bits;
1452 	e4b->bd_info = grp;
1453 	e4b->bd_sb = sb;
1454 	e4b->bd_group = group;
1455 	e4b->bd_buddy_page = NULL;
1456 	e4b->bd_bitmap_page = NULL;
1457 
1458 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1459 		/*
1460 		 * we need full data about the group
1461 		 * to make a good selection
1462 		 */
1463 		ret = ext4_mb_init_group(sb, group, gfp);
1464 		if (ret)
1465 			return ret;
1466 	}
1467 
1468 	/*
1469 	 * the buddy cache inode stores the block bitmap
1470 	 * and buddy information in consecutive blocks.
1471 	 * So for each group we need two blocks.
1472 	 */
1473 	block = group * 2;
1474 	pnum = block / blocks_per_page;
1475 	poff = block % blocks_per_page;
1476 
1477 	/* we could use find_or_create_page(), but it locks page
1478 	 * what we'd like to avoid in fast path ... */
1479 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1480 	if (page == NULL || !PageUptodate(page)) {
1481 		if (page)
1482 			/*
1483 			 * drop the page reference and try
1484 			 * to get the page with lock. If we
1485 			 * are not uptodate that implies
1486 			 * somebody just created the page but
1487 			 * is yet to initialize the same. So
1488 			 * wait for it to initialize.
1489 			 */
1490 			put_page(page);
1491 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1492 		if (page) {
1493 			BUG_ON(page->mapping != inode->i_mapping);
1494 			if (!PageUptodate(page)) {
1495 				ret = ext4_mb_init_cache(page, NULL, gfp);
1496 				if (ret) {
1497 					unlock_page(page);
1498 					goto err;
1499 				}
1500 				mb_cmp_bitmaps(e4b, page_address(page) +
1501 					       (poff * sb->s_blocksize));
1502 			}
1503 			unlock_page(page);
1504 		}
1505 	}
1506 	if (page == NULL) {
1507 		ret = -ENOMEM;
1508 		goto err;
1509 	}
1510 	if (!PageUptodate(page)) {
1511 		ret = -EIO;
1512 		goto err;
1513 	}
1514 
1515 	/* Pages marked accessed already */
1516 	e4b->bd_bitmap_page = page;
1517 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1518 
1519 	block++;
1520 	pnum = block / blocks_per_page;
1521 	poff = block % blocks_per_page;
1522 
1523 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1524 	if (page == NULL || !PageUptodate(page)) {
1525 		if (page)
1526 			put_page(page);
1527 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1528 		if (page) {
1529 			BUG_ON(page->mapping != inode->i_mapping);
1530 			if (!PageUptodate(page)) {
1531 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1532 							 gfp);
1533 				if (ret) {
1534 					unlock_page(page);
1535 					goto err;
1536 				}
1537 			}
1538 			unlock_page(page);
1539 		}
1540 	}
1541 	if (page == NULL) {
1542 		ret = -ENOMEM;
1543 		goto err;
1544 	}
1545 	if (!PageUptodate(page)) {
1546 		ret = -EIO;
1547 		goto err;
1548 	}
1549 
1550 	/* Pages marked accessed already */
1551 	e4b->bd_buddy_page = page;
1552 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1553 
1554 	return 0;
1555 
1556 err:
1557 	if (page)
1558 		put_page(page);
1559 	if (e4b->bd_bitmap_page)
1560 		put_page(e4b->bd_bitmap_page);
1561 	if (e4b->bd_buddy_page)
1562 		put_page(e4b->bd_buddy_page);
1563 	e4b->bd_buddy = NULL;
1564 	e4b->bd_bitmap = NULL;
1565 	return ret;
1566 }
1567 
1568 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1569 			      struct ext4_buddy *e4b)
1570 {
1571 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1572 }
1573 
1574 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1575 {
1576 	if (e4b->bd_bitmap_page)
1577 		put_page(e4b->bd_bitmap_page);
1578 	if (e4b->bd_buddy_page)
1579 		put_page(e4b->bd_buddy_page);
1580 }
1581 
1582 
1583 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1584 {
1585 	int order = 1, max;
1586 	void *bb;
1587 
1588 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1589 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1590 
1591 	while (order <= e4b->bd_blkbits + 1) {
1592 		bb = mb_find_buddy(e4b, order, &max);
1593 		if (!mb_test_bit(block >> order, bb)) {
1594 			/* this block is part of buddy of order 'order' */
1595 			return order;
1596 		}
1597 		order++;
1598 	}
1599 	return 0;
1600 }
1601 
1602 static void mb_clear_bits(void *bm, int cur, int len)
1603 {
1604 	__u32 *addr;
1605 
1606 	len = cur + len;
1607 	while (cur < len) {
1608 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1609 			/* fast path: clear whole word at once */
1610 			addr = bm + (cur >> 3);
1611 			*addr = 0;
1612 			cur += 32;
1613 			continue;
1614 		}
1615 		mb_clear_bit(cur, bm);
1616 		cur++;
1617 	}
1618 }
1619 
1620 /* clear bits in given range
1621  * will return first found zero bit if any, -1 otherwise
1622  */
1623 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1624 {
1625 	__u32 *addr;
1626 	int zero_bit = -1;
1627 
1628 	len = cur + len;
1629 	while (cur < len) {
1630 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1631 			/* fast path: clear whole word at once */
1632 			addr = bm + (cur >> 3);
1633 			if (*addr != (__u32)(-1) && zero_bit == -1)
1634 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1635 			*addr = 0;
1636 			cur += 32;
1637 			continue;
1638 		}
1639 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1640 			zero_bit = cur;
1641 		cur++;
1642 	}
1643 
1644 	return zero_bit;
1645 }
1646 
1647 void mb_set_bits(void *bm, int cur, int len)
1648 {
1649 	__u32 *addr;
1650 
1651 	len = cur + len;
1652 	while (cur < len) {
1653 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1654 			/* fast path: set whole word at once */
1655 			addr = bm + (cur >> 3);
1656 			*addr = 0xffffffff;
1657 			cur += 32;
1658 			continue;
1659 		}
1660 		mb_set_bit(cur, bm);
1661 		cur++;
1662 	}
1663 }
1664 
1665 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1666 {
1667 	if (mb_test_bit(*bit + side, bitmap)) {
1668 		mb_clear_bit(*bit, bitmap);
1669 		(*bit) -= side;
1670 		return 1;
1671 	}
1672 	else {
1673 		(*bit) += side;
1674 		mb_set_bit(*bit, bitmap);
1675 		return -1;
1676 	}
1677 }
1678 
1679 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1680 {
1681 	int max;
1682 	int order = 1;
1683 	void *buddy = mb_find_buddy(e4b, order, &max);
1684 
1685 	while (buddy) {
1686 		void *buddy2;
1687 
1688 		/* Bits in range [first; last] are known to be set since
1689 		 * corresponding blocks were allocated. Bits in range
1690 		 * (first; last) will stay set because they form buddies on
1691 		 * upper layer. We just deal with borders if they don't
1692 		 * align with upper layer and then go up.
1693 		 * Releasing entire group is all about clearing
1694 		 * single bit of highest order buddy.
1695 		 */
1696 
1697 		/* Example:
1698 		 * ---------------------------------
1699 		 * |   1   |   1   |   1   |   1   |
1700 		 * ---------------------------------
1701 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1702 		 * ---------------------------------
1703 		 *   0   1   2   3   4   5   6   7
1704 		 *      \_____________________/
1705 		 *
1706 		 * Neither [1] nor [6] is aligned to above layer.
1707 		 * Left neighbour [0] is free, so mark it busy,
1708 		 * decrease bb_counters and extend range to
1709 		 * [0; 6]
1710 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1711 		 * mark [6] free, increase bb_counters and shrink range to
1712 		 * [0; 5].
1713 		 * Then shift range to [0; 2], go up and do the same.
1714 		 */
1715 
1716 
1717 		if (first & 1)
1718 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1719 		if (!(last & 1))
1720 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1721 		if (first > last)
1722 			break;
1723 		order++;
1724 
1725 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1726 			mb_clear_bits(buddy, first, last - first + 1);
1727 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1728 			break;
1729 		}
1730 		first >>= 1;
1731 		last >>= 1;
1732 		buddy = buddy2;
1733 	}
1734 }
1735 
1736 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1737 			   int first, int count)
1738 {
1739 	int left_is_free = 0;
1740 	int right_is_free = 0;
1741 	int block;
1742 	int last = first + count - 1;
1743 	struct super_block *sb = e4b->bd_sb;
1744 
1745 	if (WARN_ON(count == 0))
1746 		return;
1747 	BUG_ON(last >= (sb->s_blocksize << 3));
1748 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1749 	/* Don't bother if the block group is corrupt. */
1750 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1751 		return;
1752 
1753 	mb_check_buddy(e4b);
1754 	mb_free_blocks_double(inode, e4b, first, count);
1755 
1756 	this_cpu_inc(discard_pa_seq);
1757 	e4b->bd_info->bb_free += count;
1758 	if (first < e4b->bd_info->bb_first_free)
1759 		e4b->bd_info->bb_first_free = first;
1760 
1761 	/* access memory sequentially: check left neighbour,
1762 	 * clear range and then check right neighbour
1763 	 */
1764 	if (first != 0)
1765 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1766 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1767 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1768 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1769 
1770 	if (unlikely(block != -1)) {
1771 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1772 		ext4_fsblk_t blocknr;
1773 
1774 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1775 		blocknr += EXT4_C2B(sbi, block);
1776 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1777 			ext4_grp_locked_error(sb, e4b->bd_group,
1778 					      inode ? inode->i_ino : 0,
1779 					      blocknr,
1780 					      "freeing already freed block (bit %u); block bitmap corrupt.",
1781 					      block);
1782 			ext4_mark_group_bitmap_corrupted(
1783 				sb, e4b->bd_group,
1784 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1785 		}
1786 		goto done;
1787 	}
1788 
1789 	/* let's maintain fragments counter */
1790 	if (left_is_free && right_is_free)
1791 		e4b->bd_info->bb_fragments--;
1792 	else if (!left_is_free && !right_is_free)
1793 		e4b->bd_info->bb_fragments++;
1794 
1795 	/* buddy[0] == bd_bitmap is a special case, so handle
1796 	 * it right away and let mb_buddy_mark_free stay free of
1797 	 * zero order checks.
1798 	 * Check if neighbours are to be coaleasced,
1799 	 * adjust bitmap bb_counters and borders appropriately.
1800 	 */
1801 	if (first & 1) {
1802 		first += !left_is_free;
1803 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1804 	}
1805 	if (!(last & 1)) {
1806 		last -= !right_is_free;
1807 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1808 	}
1809 
1810 	if (first <= last)
1811 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1812 
1813 done:
1814 	mb_set_largest_free_order(sb, e4b->bd_info);
1815 	mb_update_avg_fragment_size(sb, e4b->bd_info);
1816 	mb_check_buddy(e4b);
1817 }
1818 
1819 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1820 				int needed, struct ext4_free_extent *ex)
1821 {
1822 	int next = block;
1823 	int max, order;
1824 	void *buddy;
1825 
1826 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1827 	BUG_ON(ex == NULL);
1828 
1829 	buddy = mb_find_buddy(e4b, 0, &max);
1830 	BUG_ON(buddy == NULL);
1831 	BUG_ON(block >= max);
1832 	if (mb_test_bit(block, buddy)) {
1833 		ex->fe_len = 0;
1834 		ex->fe_start = 0;
1835 		ex->fe_group = 0;
1836 		return 0;
1837 	}
1838 
1839 	/* find actual order */
1840 	order = mb_find_order_for_block(e4b, block);
1841 	block = block >> order;
1842 
1843 	ex->fe_len = 1 << order;
1844 	ex->fe_start = block << order;
1845 	ex->fe_group = e4b->bd_group;
1846 
1847 	/* calc difference from given start */
1848 	next = next - ex->fe_start;
1849 	ex->fe_len -= next;
1850 	ex->fe_start += next;
1851 
1852 	while (needed > ex->fe_len &&
1853 	       mb_find_buddy(e4b, order, &max)) {
1854 
1855 		if (block + 1 >= max)
1856 			break;
1857 
1858 		next = (block + 1) * (1 << order);
1859 		if (mb_test_bit(next, e4b->bd_bitmap))
1860 			break;
1861 
1862 		order = mb_find_order_for_block(e4b, next);
1863 
1864 		block = next >> order;
1865 		ex->fe_len += 1 << order;
1866 	}
1867 
1868 	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1869 		/* Should never happen! (but apparently sometimes does?!?) */
1870 		WARN_ON(1);
1871 		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1872 			"corruption or bug in mb_find_extent "
1873 			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1874 			block, order, needed, ex->fe_group, ex->fe_start,
1875 			ex->fe_len, ex->fe_logical);
1876 		ex->fe_len = 0;
1877 		ex->fe_start = 0;
1878 		ex->fe_group = 0;
1879 	}
1880 	return ex->fe_len;
1881 }
1882 
1883 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1884 {
1885 	int ord;
1886 	int mlen = 0;
1887 	int max = 0;
1888 	int cur;
1889 	int start = ex->fe_start;
1890 	int len = ex->fe_len;
1891 	unsigned ret = 0;
1892 	int len0 = len;
1893 	void *buddy;
1894 	bool split = false;
1895 
1896 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1897 	BUG_ON(e4b->bd_group != ex->fe_group);
1898 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1899 	mb_check_buddy(e4b);
1900 	mb_mark_used_double(e4b, start, len);
1901 
1902 	this_cpu_inc(discard_pa_seq);
1903 	e4b->bd_info->bb_free -= len;
1904 	if (e4b->bd_info->bb_first_free == start)
1905 		e4b->bd_info->bb_first_free += len;
1906 
1907 	/* let's maintain fragments counter */
1908 	if (start != 0)
1909 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1910 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1911 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1912 	if (mlen && max)
1913 		e4b->bd_info->bb_fragments++;
1914 	else if (!mlen && !max)
1915 		e4b->bd_info->bb_fragments--;
1916 
1917 	/* let's maintain buddy itself */
1918 	while (len) {
1919 		if (!split)
1920 			ord = mb_find_order_for_block(e4b, start);
1921 
1922 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1923 			/* the whole chunk may be allocated at once! */
1924 			mlen = 1 << ord;
1925 			if (!split)
1926 				buddy = mb_find_buddy(e4b, ord, &max);
1927 			else
1928 				split = false;
1929 			BUG_ON((start >> ord) >= max);
1930 			mb_set_bit(start >> ord, buddy);
1931 			e4b->bd_info->bb_counters[ord]--;
1932 			start += mlen;
1933 			len -= mlen;
1934 			BUG_ON(len < 0);
1935 			continue;
1936 		}
1937 
1938 		/* store for history */
1939 		if (ret == 0)
1940 			ret = len | (ord << 16);
1941 
1942 		/* we have to split large buddy */
1943 		BUG_ON(ord <= 0);
1944 		buddy = mb_find_buddy(e4b, ord, &max);
1945 		mb_set_bit(start >> ord, buddy);
1946 		e4b->bd_info->bb_counters[ord]--;
1947 
1948 		ord--;
1949 		cur = (start >> ord) & ~1U;
1950 		buddy = mb_find_buddy(e4b, ord, &max);
1951 		mb_clear_bit(cur, buddy);
1952 		mb_clear_bit(cur + 1, buddy);
1953 		e4b->bd_info->bb_counters[ord]++;
1954 		e4b->bd_info->bb_counters[ord]++;
1955 		split = true;
1956 	}
1957 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1958 
1959 	mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1960 	mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1961 	mb_check_buddy(e4b);
1962 
1963 	return ret;
1964 }
1965 
1966 /*
1967  * Must be called under group lock!
1968  */
1969 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1970 					struct ext4_buddy *e4b)
1971 {
1972 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1973 	int ret;
1974 
1975 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1976 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1977 
1978 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1979 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1980 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1981 
1982 	/* preallocation can change ac_b_ex, thus we store actually
1983 	 * allocated blocks for history */
1984 	ac->ac_f_ex = ac->ac_b_ex;
1985 
1986 	ac->ac_status = AC_STATUS_FOUND;
1987 	ac->ac_tail = ret & 0xffff;
1988 	ac->ac_buddy = ret >> 16;
1989 
1990 	/*
1991 	 * take the page reference. We want the page to be pinned
1992 	 * so that we don't get a ext4_mb_init_cache_call for this
1993 	 * group until we update the bitmap. That would mean we
1994 	 * double allocate blocks. The reference is dropped
1995 	 * in ext4_mb_release_context
1996 	 */
1997 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1998 	get_page(ac->ac_bitmap_page);
1999 	ac->ac_buddy_page = e4b->bd_buddy_page;
2000 	get_page(ac->ac_buddy_page);
2001 	/* store last allocated for subsequent stream allocation */
2002 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2003 		spin_lock(&sbi->s_md_lock);
2004 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2005 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2006 		spin_unlock(&sbi->s_md_lock);
2007 	}
2008 	/*
2009 	 * As we've just preallocated more space than
2010 	 * user requested originally, we store allocated
2011 	 * space in a special descriptor.
2012 	 */
2013 	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2014 		ext4_mb_new_preallocation(ac);
2015 
2016 }
2017 
2018 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2019 					struct ext4_buddy *e4b,
2020 					int finish_group)
2021 {
2022 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2023 	struct ext4_free_extent *bex = &ac->ac_b_ex;
2024 	struct ext4_free_extent *gex = &ac->ac_g_ex;
2025 	struct ext4_free_extent ex;
2026 	int max;
2027 
2028 	if (ac->ac_status == AC_STATUS_FOUND)
2029 		return;
2030 	/*
2031 	 * We don't want to scan for a whole year
2032 	 */
2033 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
2034 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2035 		ac->ac_status = AC_STATUS_BREAK;
2036 		return;
2037 	}
2038 
2039 	/*
2040 	 * Haven't found good chunk so far, let's continue
2041 	 */
2042 	if (bex->fe_len < gex->fe_len)
2043 		return;
2044 
2045 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2046 			&& bex->fe_group == e4b->bd_group) {
2047 		/* recheck chunk's availability - we don't know
2048 		 * when it was found (within this lock-unlock
2049 		 * period or not) */
2050 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2051 		if (max >= gex->fe_len) {
2052 			ext4_mb_use_best_found(ac, e4b);
2053 			return;
2054 		}
2055 	}
2056 }
2057 
2058 /*
2059  * The routine checks whether found extent is good enough. If it is,
2060  * then the extent gets marked used and flag is set to the context
2061  * to stop scanning. Otherwise, the extent is compared with the
2062  * previous found extent and if new one is better, then it's stored
2063  * in the context. Later, the best found extent will be used, if
2064  * mballoc can't find good enough extent.
2065  *
2066  * FIXME: real allocation policy is to be designed yet!
2067  */
2068 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2069 					struct ext4_free_extent *ex,
2070 					struct ext4_buddy *e4b)
2071 {
2072 	struct ext4_free_extent *bex = &ac->ac_b_ex;
2073 	struct ext4_free_extent *gex = &ac->ac_g_ex;
2074 
2075 	BUG_ON(ex->fe_len <= 0);
2076 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2077 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2078 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2079 
2080 	ac->ac_found++;
2081 
2082 	/*
2083 	 * The special case - take what you catch first
2084 	 */
2085 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2086 		*bex = *ex;
2087 		ext4_mb_use_best_found(ac, e4b);
2088 		return;
2089 	}
2090 
2091 	/*
2092 	 * Let's check whether the chuck is good enough
2093 	 */
2094 	if (ex->fe_len == gex->fe_len) {
2095 		*bex = *ex;
2096 		ext4_mb_use_best_found(ac, e4b);
2097 		return;
2098 	}
2099 
2100 	/*
2101 	 * If this is first found extent, just store it in the context
2102 	 */
2103 	if (bex->fe_len == 0) {
2104 		*bex = *ex;
2105 		return;
2106 	}
2107 
2108 	/*
2109 	 * If new found extent is better, store it in the context
2110 	 */
2111 	if (bex->fe_len < gex->fe_len) {
2112 		/* if the request isn't satisfied, any found extent
2113 		 * larger than previous best one is better */
2114 		if (ex->fe_len > bex->fe_len)
2115 			*bex = *ex;
2116 	} else if (ex->fe_len > gex->fe_len) {
2117 		/* if the request is satisfied, then we try to find
2118 		 * an extent that still satisfy the request, but is
2119 		 * smaller than previous one */
2120 		if (ex->fe_len < bex->fe_len)
2121 			*bex = *ex;
2122 	}
2123 
2124 	ext4_mb_check_limits(ac, e4b, 0);
2125 }
2126 
2127 static noinline_for_stack
2128 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2129 					struct ext4_buddy *e4b)
2130 {
2131 	struct ext4_free_extent ex = ac->ac_b_ex;
2132 	ext4_group_t group = ex.fe_group;
2133 	int max;
2134 	int err;
2135 
2136 	BUG_ON(ex.fe_len <= 0);
2137 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2138 	if (err)
2139 		return err;
2140 
2141 	ext4_lock_group(ac->ac_sb, group);
2142 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2143 
2144 	if (max > 0) {
2145 		ac->ac_b_ex = ex;
2146 		ext4_mb_use_best_found(ac, e4b);
2147 	}
2148 
2149 	ext4_unlock_group(ac->ac_sb, group);
2150 	ext4_mb_unload_buddy(e4b);
2151 
2152 	return 0;
2153 }
2154 
2155 static noinline_for_stack
2156 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2157 				struct ext4_buddy *e4b)
2158 {
2159 	ext4_group_t group = ac->ac_g_ex.fe_group;
2160 	int max;
2161 	int err;
2162 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2163 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2164 	struct ext4_free_extent ex;
2165 
2166 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2167 		return 0;
2168 	if (grp->bb_free == 0)
2169 		return 0;
2170 
2171 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2172 	if (err)
2173 		return err;
2174 
2175 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2176 		ext4_mb_unload_buddy(e4b);
2177 		return 0;
2178 	}
2179 
2180 	ext4_lock_group(ac->ac_sb, group);
2181 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2182 			     ac->ac_g_ex.fe_len, &ex);
2183 	ex.fe_logical = 0xDEADFA11; /* debug value */
2184 
2185 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2186 		ext4_fsblk_t start;
2187 
2188 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2189 			ex.fe_start;
2190 		/* use do_div to get remainder (would be 64-bit modulo) */
2191 		if (do_div(start, sbi->s_stripe) == 0) {
2192 			ac->ac_found++;
2193 			ac->ac_b_ex = ex;
2194 			ext4_mb_use_best_found(ac, e4b);
2195 		}
2196 	} else if (max >= ac->ac_g_ex.fe_len) {
2197 		BUG_ON(ex.fe_len <= 0);
2198 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2199 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2200 		ac->ac_found++;
2201 		ac->ac_b_ex = ex;
2202 		ext4_mb_use_best_found(ac, e4b);
2203 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2204 		/* Sometimes, caller may want to merge even small
2205 		 * number of blocks to an existing extent */
2206 		BUG_ON(ex.fe_len <= 0);
2207 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2208 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2209 		ac->ac_found++;
2210 		ac->ac_b_ex = ex;
2211 		ext4_mb_use_best_found(ac, e4b);
2212 	}
2213 	ext4_unlock_group(ac->ac_sb, group);
2214 	ext4_mb_unload_buddy(e4b);
2215 
2216 	return 0;
2217 }
2218 
2219 /*
2220  * The routine scans buddy structures (not bitmap!) from given order
2221  * to max order and tries to find big enough chunk to satisfy the req
2222  */
2223 static noinline_for_stack
2224 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2225 					struct ext4_buddy *e4b)
2226 {
2227 	struct super_block *sb = ac->ac_sb;
2228 	struct ext4_group_info *grp = e4b->bd_info;
2229 	void *buddy;
2230 	int i;
2231 	int k;
2232 	int max;
2233 
2234 	BUG_ON(ac->ac_2order <= 0);
2235 	for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2236 		if (grp->bb_counters[i] == 0)
2237 			continue;
2238 
2239 		buddy = mb_find_buddy(e4b, i, &max);
2240 		BUG_ON(buddy == NULL);
2241 
2242 		k = mb_find_next_zero_bit(buddy, max, 0);
2243 		if (k >= max) {
2244 			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2245 				"%d free clusters of order %d. But found 0",
2246 				grp->bb_counters[i], i);
2247 			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2248 					 e4b->bd_group,
2249 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2250 			break;
2251 		}
2252 		ac->ac_found++;
2253 
2254 		ac->ac_b_ex.fe_len = 1 << i;
2255 		ac->ac_b_ex.fe_start = k << i;
2256 		ac->ac_b_ex.fe_group = e4b->bd_group;
2257 
2258 		ext4_mb_use_best_found(ac, e4b);
2259 
2260 		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2261 
2262 		if (EXT4_SB(sb)->s_mb_stats)
2263 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2264 
2265 		break;
2266 	}
2267 }
2268 
2269 /*
2270  * The routine scans the group and measures all found extents.
2271  * In order to optimize scanning, caller must pass number of
2272  * free blocks in the group, so the routine can know upper limit.
2273  */
2274 static noinline_for_stack
2275 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2276 					struct ext4_buddy *e4b)
2277 {
2278 	struct super_block *sb = ac->ac_sb;
2279 	void *bitmap = e4b->bd_bitmap;
2280 	struct ext4_free_extent ex;
2281 	int i;
2282 	int free;
2283 
2284 	free = e4b->bd_info->bb_free;
2285 	if (WARN_ON(free <= 0))
2286 		return;
2287 
2288 	i = e4b->bd_info->bb_first_free;
2289 
2290 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2291 		i = mb_find_next_zero_bit(bitmap,
2292 						EXT4_CLUSTERS_PER_GROUP(sb), i);
2293 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2294 			/*
2295 			 * IF we have corrupt bitmap, we won't find any
2296 			 * free blocks even though group info says we
2297 			 * have free blocks
2298 			 */
2299 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2300 					"%d free clusters as per "
2301 					"group info. But bitmap says 0",
2302 					free);
2303 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2304 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2305 			break;
2306 		}
2307 
2308 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2309 		if (WARN_ON(ex.fe_len <= 0))
2310 			break;
2311 		if (free < ex.fe_len) {
2312 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2313 					"%d free clusters as per "
2314 					"group info. But got %d blocks",
2315 					free, ex.fe_len);
2316 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2317 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2318 			/*
2319 			 * The number of free blocks differs. This mostly
2320 			 * indicate that the bitmap is corrupt. So exit
2321 			 * without claiming the space.
2322 			 */
2323 			break;
2324 		}
2325 		ex.fe_logical = 0xDEADC0DE; /* debug value */
2326 		ext4_mb_measure_extent(ac, &ex, e4b);
2327 
2328 		i += ex.fe_len;
2329 		free -= ex.fe_len;
2330 	}
2331 
2332 	ext4_mb_check_limits(ac, e4b, 1);
2333 }
2334 
2335 /*
2336  * This is a special case for storages like raid5
2337  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2338  */
2339 static noinline_for_stack
2340 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2341 				 struct ext4_buddy *e4b)
2342 {
2343 	struct super_block *sb = ac->ac_sb;
2344 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2345 	void *bitmap = e4b->bd_bitmap;
2346 	struct ext4_free_extent ex;
2347 	ext4_fsblk_t first_group_block;
2348 	ext4_fsblk_t a;
2349 	ext4_grpblk_t i;
2350 	int max;
2351 
2352 	BUG_ON(sbi->s_stripe == 0);
2353 
2354 	/* find first stripe-aligned block in group */
2355 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2356 
2357 	a = first_group_block + sbi->s_stripe - 1;
2358 	do_div(a, sbi->s_stripe);
2359 	i = (a * sbi->s_stripe) - first_group_block;
2360 
2361 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2362 		if (!mb_test_bit(i, bitmap)) {
2363 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2364 			if (max >= sbi->s_stripe) {
2365 				ac->ac_found++;
2366 				ex.fe_logical = 0xDEADF00D; /* debug value */
2367 				ac->ac_b_ex = ex;
2368 				ext4_mb_use_best_found(ac, e4b);
2369 				break;
2370 			}
2371 		}
2372 		i += sbi->s_stripe;
2373 	}
2374 }
2375 
2376 /*
2377  * This is also called BEFORE we load the buddy bitmap.
2378  * Returns either 1 or 0 indicating that the group is either suitable
2379  * for the allocation or not.
2380  */
2381 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2382 				ext4_group_t group, int cr)
2383 {
2384 	ext4_grpblk_t free, fragments;
2385 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2386 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2387 
2388 	BUG_ON(cr < 0 || cr >= 4);
2389 
2390 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2391 		return false;
2392 
2393 	free = grp->bb_free;
2394 	if (free == 0)
2395 		return false;
2396 
2397 	fragments = grp->bb_fragments;
2398 	if (fragments == 0)
2399 		return false;
2400 
2401 	switch (cr) {
2402 	case 0:
2403 		BUG_ON(ac->ac_2order == 0);
2404 
2405 		/* Avoid using the first bg of a flexgroup for data files */
2406 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2407 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2408 		    ((group % flex_size) == 0))
2409 			return false;
2410 
2411 		if (free < ac->ac_g_ex.fe_len)
2412 			return false;
2413 
2414 		if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2415 			return true;
2416 
2417 		if (grp->bb_largest_free_order < ac->ac_2order)
2418 			return false;
2419 
2420 		return true;
2421 	case 1:
2422 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2423 			return true;
2424 		break;
2425 	case 2:
2426 		if (free >= ac->ac_g_ex.fe_len)
2427 			return true;
2428 		break;
2429 	case 3:
2430 		return true;
2431 	default:
2432 		BUG();
2433 	}
2434 
2435 	return false;
2436 }
2437 
2438 /*
2439  * This could return negative error code if something goes wrong
2440  * during ext4_mb_init_group(). This should not be called with
2441  * ext4_lock_group() held.
2442  *
2443  * Note: because we are conditionally operating with the group lock in
2444  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2445  * function using __acquire and __release.  This means we need to be
2446  * super careful before messing with the error path handling via "goto
2447  * out"!
2448  */
2449 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2450 				     ext4_group_t group, int cr)
2451 {
2452 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2453 	struct super_block *sb = ac->ac_sb;
2454 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2455 	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2456 	ext4_grpblk_t free;
2457 	int ret = 0;
2458 
2459 	if (sbi->s_mb_stats)
2460 		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2461 	if (should_lock) {
2462 		ext4_lock_group(sb, group);
2463 		__release(ext4_group_lock_ptr(sb, group));
2464 	}
2465 	free = grp->bb_free;
2466 	if (free == 0)
2467 		goto out;
2468 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2469 		goto out;
2470 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2471 		goto out;
2472 	if (should_lock) {
2473 		__acquire(ext4_group_lock_ptr(sb, group));
2474 		ext4_unlock_group(sb, group);
2475 	}
2476 
2477 	/* We only do this if the grp has never been initialized */
2478 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2479 		struct ext4_group_desc *gdp =
2480 			ext4_get_group_desc(sb, group, NULL);
2481 		int ret;
2482 
2483 		/* cr=0/1 is a very optimistic search to find large
2484 		 * good chunks almost for free.  If buddy data is not
2485 		 * ready, then this optimization makes no sense.  But
2486 		 * we never skip the first block group in a flex_bg,
2487 		 * since this gets used for metadata block allocation,
2488 		 * and we want to make sure we locate metadata blocks
2489 		 * in the first block group in the flex_bg if possible.
2490 		 */
2491 		if (cr < 2 &&
2492 		    (!sbi->s_log_groups_per_flex ||
2493 		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2494 		    !(ext4_has_group_desc_csum(sb) &&
2495 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2496 			return 0;
2497 		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2498 		if (ret)
2499 			return ret;
2500 	}
2501 
2502 	if (should_lock) {
2503 		ext4_lock_group(sb, group);
2504 		__release(ext4_group_lock_ptr(sb, group));
2505 	}
2506 	ret = ext4_mb_good_group(ac, group, cr);
2507 out:
2508 	if (should_lock) {
2509 		__acquire(ext4_group_lock_ptr(sb, group));
2510 		ext4_unlock_group(sb, group);
2511 	}
2512 	return ret;
2513 }
2514 
2515 /*
2516  * Start prefetching @nr block bitmaps starting at @group.
2517  * Return the next group which needs to be prefetched.
2518  */
2519 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2520 			      unsigned int nr, int *cnt)
2521 {
2522 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2523 	struct buffer_head *bh;
2524 	struct blk_plug plug;
2525 
2526 	blk_start_plug(&plug);
2527 	while (nr-- > 0) {
2528 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2529 								  NULL);
2530 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2531 
2532 		/*
2533 		 * Prefetch block groups with free blocks; but don't
2534 		 * bother if it is marked uninitialized on disk, since
2535 		 * it won't require I/O to read.  Also only try to
2536 		 * prefetch once, so we avoid getblk() call, which can
2537 		 * be expensive.
2538 		 */
2539 		if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2540 		    EXT4_MB_GRP_NEED_INIT(grp) &&
2541 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2542 		    !(ext4_has_group_desc_csum(sb) &&
2543 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2544 			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2545 			if (bh && !IS_ERR(bh)) {
2546 				if (!buffer_uptodate(bh) && cnt)
2547 					(*cnt)++;
2548 				brelse(bh);
2549 			}
2550 		}
2551 		if (++group >= ngroups)
2552 			group = 0;
2553 	}
2554 	blk_finish_plug(&plug);
2555 	return group;
2556 }
2557 
2558 /*
2559  * Prefetching reads the block bitmap into the buffer cache; but we
2560  * need to make sure that the buddy bitmap in the page cache has been
2561  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2562  * is not yet completed, or indeed if it was not initiated by
2563  * ext4_mb_prefetch did not start the I/O.
2564  *
2565  * TODO: We should actually kick off the buddy bitmap setup in a work
2566  * queue when the buffer I/O is completed, so that we don't block
2567  * waiting for the block allocation bitmap read to finish when
2568  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2569  */
2570 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2571 			   unsigned int nr)
2572 {
2573 	while (nr-- > 0) {
2574 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2575 								  NULL);
2576 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2577 
2578 		if (!group)
2579 			group = ext4_get_groups_count(sb);
2580 		group--;
2581 		grp = ext4_get_group_info(sb, group);
2582 
2583 		if (EXT4_MB_GRP_NEED_INIT(grp) &&
2584 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2585 		    !(ext4_has_group_desc_csum(sb) &&
2586 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2587 			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2588 				break;
2589 		}
2590 	}
2591 }
2592 
2593 static noinline_for_stack int
2594 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2595 {
2596 	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2597 	int cr = -1, new_cr;
2598 	int err = 0, first_err = 0;
2599 	unsigned int nr = 0, prefetch_ios = 0;
2600 	struct ext4_sb_info *sbi;
2601 	struct super_block *sb;
2602 	struct ext4_buddy e4b;
2603 	int lost;
2604 
2605 	sb = ac->ac_sb;
2606 	sbi = EXT4_SB(sb);
2607 	ngroups = ext4_get_groups_count(sb);
2608 	/* non-extent files are limited to low blocks/groups */
2609 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2610 		ngroups = sbi->s_blockfile_groups;
2611 
2612 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2613 
2614 	/* first, try the goal */
2615 	err = ext4_mb_find_by_goal(ac, &e4b);
2616 	if (err || ac->ac_status == AC_STATUS_FOUND)
2617 		goto out;
2618 
2619 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2620 		goto out;
2621 
2622 	/*
2623 	 * ac->ac_2order is set only if the fe_len is a power of 2
2624 	 * if ac->ac_2order is set we also set criteria to 0 so that we
2625 	 * try exact allocation using buddy.
2626 	 */
2627 	i = fls(ac->ac_g_ex.fe_len);
2628 	ac->ac_2order = 0;
2629 	/*
2630 	 * We search using buddy data only if the order of the request
2631 	 * is greater than equal to the sbi_s_mb_order2_reqs
2632 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2633 	 * We also support searching for power-of-two requests only for
2634 	 * requests upto maximum buddy size we have constructed.
2635 	 */
2636 	if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2637 		/*
2638 		 * This should tell if fe_len is exactly power of 2
2639 		 */
2640 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2641 			ac->ac_2order = array_index_nospec(i - 1,
2642 							   MB_NUM_ORDERS(sb));
2643 	}
2644 
2645 	/* if stream allocation is enabled, use global goal */
2646 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2647 		/* TBD: may be hot point */
2648 		spin_lock(&sbi->s_md_lock);
2649 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2650 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2651 		spin_unlock(&sbi->s_md_lock);
2652 	}
2653 
2654 	/* Let's just scan groups to find more-less suitable blocks */
2655 	cr = ac->ac_2order ? 0 : 1;
2656 	/*
2657 	 * cr == 0 try to get exact allocation,
2658 	 * cr == 3  try to get anything
2659 	 */
2660 repeat:
2661 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2662 		ac->ac_criteria = cr;
2663 		/*
2664 		 * searching for the right group start
2665 		 * from the goal value specified
2666 		 */
2667 		group = ac->ac_g_ex.fe_group;
2668 		ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2669 		prefetch_grp = group;
2670 
2671 		for (i = 0, new_cr = cr; i < ngroups; i++,
2672 		     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2673 			int ret = 0;
2674 
2675 			cond_resched();
2676 			if (new_cr != cr) {
2677 				cr = new_cr;
2678 				goto repeat;
2679 			}
2680 
2681 			/*
2682 			 * Batch reads of the block allocation bitmaps
2683 			 * to get multiple READs in flight; limit
2684 			 * prefetching at cr=0/1, otherwise mballoc can
2685 			 * spend a lot of time loading imperfect groups
2686 			 */
2687 			if ((prefetch_grp == group) &&
2688 			    (cr > 1 ||
2689 			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2690 				unsigned int curr_ios = prefetch_ios;
2691 
2692 				nr = sbi->s_mb_prefetch;
2693 				if (ext4_has_feature_flex_bg(sb)) {
2694 					nr = 1 << sbi->s_log_groups_per_flex;
2695 					nr -= group & (nr - 1);
2696 					nr = min(nr, sbi->s_mb_prefetch);
2697 				}
2698 				prefetch_grp = ext4_mb_prefetch(sb, group,
2699 							nr, &prefetch_ios);
2700 				if (prefetch_ios == curr_ios)
2701 					nr = 0;
2702 			}
2703 
2704 			/* This now checks without needing the buddy page */
2705 			ret = ext4_mb_good_group_nolock(ac, group, cr);
2706 			if (ret <= 0) {
2707 				if (!first_err)
2708 					first_err = ret;
2709 				continue;
2710 			}
2711 
2712 			err = ext4_mb_load_buddy(sb, group, &e4b);
2713 			if (err)
2714 				goto out;
2715 
2716 			ext4_lock_group(sb, group);
2717 
2718 			/*
2719 			 * We need to check again after locking the
2720 			 * block group
2721 			 */
2722 			ret = ext4_mb_good_group(ac, group, cr);
2723 			if (ret == 0) {
2724 				ext4_unlock_group(sb, group);
2725 				ext4_mb_unload_buddy(&e4b);
2726 				continue;
2727 			}
2728 
2729 			ac->ac_groups_scanned++;
2730 			if (cr == 0)
2731 				ext4_mb_simple_scan_group(ac, &e4b);
2732 			else if (cr == 1 && sbi->s_stripe &&
2733 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2734 				ext4_mb_scan_aligned(ac, &e4b);
2735 			else
2736 				ext4_mb_complex_scan_group(ac, &e4b);
2737 
2738 			ext4_unlock_group(sb, group);
2739 			ext4_mb_unload_buddy(&e4b);
2740 
2741 			if (ac->ac_status != AC_STATUS_CONTINUE)
2742 				break;
2743 		}
2744 		/* Processed all groups and haven't found blocks */
2745 		if (sbi->s_mb_stats && i == ngroups)
2746 			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2747 	}
2748 
2749 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2750 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2751 		/*
2752 		 * We've been searching too long. Let's try to allocate
2753 		 * the best chunk we've found so far
2754 		 */
2755 		ext4_mb_try_best_found(ac, &e4b);
2756 		if (ac->ac_status != AC_STATUS_FOUND) {
2757 			/*
2758 			 * Someone more lucky has already allocated it.
2759 			 * The only thing we can do is just take first
2760 			 * found block(s)
2761 			 */
2762 			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2763 			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2764 				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2765 				 ac->ac_b_ex.fe_len, lost);
2766 
2767 			ac->ac_b_ex.fe_group = 0;
2768 			ac->ac_b_ex.fe_start = 0;
2769 			ac->ac_b_ex.fe_len = 0;
2770 			ac->ac_status = AC_STATUS_CONTINUE;
2771 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2772 			cr = 3;
2773 			goto repeat;
2774 		}
2775 	}
2776 
2777 	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2778 		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2779 out:
2780 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2781 		err = first_err;
2782 
2783 	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2784 		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2785 		 ac->ac_flags, cr, err);
2786 
2787 	if (nr)
2788 		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2789 
2790 	return err;
2791 }
2792 
2793 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2794 {
2795 	struct super_block *sb = pde_data(file_inode(seq->file));
2796 	ext4_group_t group;
2797 
2798 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2799 		return NULL;
2800 	group = *pos + 1;
2801 	return (void *) ((unsigned long) group);
2802 }
2803 
2804 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2805 {
2806 	struct super_block *sb = pde_data(file_inode(seq->file));
2807 	ext4_group_t group;
2808 
2809 	++*pos;
2810 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2811 		return NULL;
2812 	group = *pos + 1;
2813 	return (void *) ((unsigned long) group);
2814 }
2815 
2816 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2817 {
2818 	struct super_block *sb = pde_data(file_inode(seq->file));
2819 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2820 	int i;
2821 	int err, buddy_loaded = 0;
2822 	struct ext4_buddy e4b;
2823 	struct ext4_group_info *grinfo;
2824 	unsigned char blocksize_bits = min_t(unsigned char,
2825 					     sb->s_blocksize_bits,
2826 					     EXT4_MAX_BLOCK_LOG_SIZE);
2827 	struct sg {
2828 		struct ext4_group_info info;
2829 		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2830 	} sg;
2831 
2832 	group--;
2833 	if (group == 0)
2834 		seq_puts(seq, "#group: free  frags first ["
2835 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2836 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2837 
2838 	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2839 		sizeof(struct ext4_group_info);
2840 
2841 	grinfo = ext4_get_group_info(sb, group);
2842 	/* Load the group info in memory only if not already loaded. */
2843 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2844 		err = ext4_mb_load_buddy(sb, group, &e4b);
2845 		if (err) {
2846 			seq_printf(seq, "#%-5u: I/O error\n", group);
2847 			return 0;
2848 		}
2849 		buddy_loaded = 1;
2850 	}
2851 
2852 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2853 
2854 	if (buddy_loaded)
2855 		ext4_mb_unload_buddy(&e4b);
2856 
2857 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2858 			sg.info.bb_fragments, sg.info.bb_first_free);
2859 	for (i = 0; i <= 13; i++)
2860 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2861 				sg.info.bb_counters[i] : 0);
2862 	seq_puts(seq, " ]\n");
2863 
2864 	return 0;
2865 }
2866 
2867 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2868 {
2869 }
2870 
2871 const struct seq_operations ext4_mb_seq_groups_ops = {
2872 	.start  = ext4_mb_seq_groups_start,
2873 	.next   = ext4_mb_seq_groups_next,
2874 	.stop   = ext4_mb_seq_groups_stop,
2875 	.show   = ext4_mb_seq_groups_show,
2876 };
2877 
2878 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2879 {
2880 	struct super_block *sb = seq->private;
2881 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2882 
2883 	seq_puts(seq, "mballoc:\n");
2884 	if (!sbi->s_mb_stats) {
2885 		seq_puts(seq, "\tmb stats collection turned off.\n");
2886 		seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2887 		return 0;
2888 	}
2889 	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2890 	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2891 
2892 	seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2893 
2894 	seq_puts(seq, "\tcr0_stats:\n");
2895 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2896 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2897 		   atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2898 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2899 		   atomic64_read(&sbi->s_bal_cX_failed[0]));
2900 	seq_printf(seq, "\t\tbad_suggestions: %u\n",
2901 		   atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2902 
2903 	seq_puts(seq, "\tcr1_stats:\n");
2904 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2905 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2906 		   atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2907 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2908 		   atomic64_read(&sbi->s_bal_cX_failed[1]));
2909 	seq_printf(seq, "\t\tbad_suggestions: %u\n",
2910 		   atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2911 
2912 	seq_puts(seq, "\tcr2_stats:\n");
2913 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2914 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2915 		   atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2916 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2917 		   atomic64_read(&sbi->s_bal_cX_failed[2]));
2918 
2919 	seq_puts(seq, "\tcr3_stats:\n");
2920 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2921 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2922 		   atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2923 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2924 		   atomic64_read(&sbi->s_bal_cX_failed[3]));
2925 	seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2926 	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2927 	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2928 	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2929 	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2930 
2931 	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2932 		   atomic_read(&sbi->s_mb_buddies_generated),
2933 		   ext4_get_groups_count(sb));
2934 	seq_printf(seq, "\tbuddies_time_used: %llu\n",
2935 		   atomic64_read(&sbi->s_mb_generation_time));
2936 	seq_printf(seq, "\tpreallocated: %u\n",
2937 		   atomic_read(&sbi->s_mb_preallocated));
2938 	seq_printf(seq, "\tdiscarded: %u\n",
2939 		   atomic_read(&sbi->s_mb_discarded));
2940 	return 0;
2941 }
2942 
2943 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2944 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2945 {
2946 	struct super_block *sb = pde_data(file_inode(seq->file));
2947 	unsigned long position;
2948 
2949 	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2950 		return NULL;
2951 	position = *pos + 1;
2952 	return (void *) ((unsigned long) position);
2953 }
2954 
2955 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
2956 {
2957 	struct super_block *sb = pde_data(file_inode(seq->file));
2958 	unsigned long position;
2959 
2960 	++*pos;
2961 	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2962 		return NULL;
2963 	position = *pos + 1;
2964 	return (void *) ((unsigned long) position);
2965 }
2966 
2967 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
2968 {
2969 	struct super_block *sb = pde_data(file_inode(seq->file));
2970 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2971 	unsigned long position = ((unsigned long) v);
2972 	struct ext4_group_info *grp;
2973 	unsigned int count;
2974 
2975 	position--;
2976 	if (position >= MB_NUM_ORDERS(sb)) {
2977 		position -= MB_NUM_ORDERS(sb);
2978 		if (position == 0)
2979 			seq_puts(seq, "avg_fragment_size_lists:\n");
2980 
2981 		count = 0;
2982 		read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
2983 		list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
2984 				    bb_avg_fragment_size_node)
2985 			count++;
2986 		read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
2987 		seq_printf(seq, "\tlist_order_%u_groups: %u\n",
2988 					(unsigned int)position, count);
2989 		return 0;
2990 	}
2991 
2992 	if (position == 0) {
2993 		seq_printf(seq, "optimize_scan: %d\n",
2994 			   test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
2995 		seq_puts(seq, "max_free_order_lists:\n");
2996 	}
2997 	count = 0;
2998 	read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
2999 	list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3000 			    bb_largest_free_order_node)
3001 		count++;
3002 	read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3003 	seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3004 		   (unsigned int)position, count);
3005 
3006 	return 0;
3007 }
3008 
3009 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3010 {
3011 }
3012 
3013 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3014 	.start  = ext4_mb_seq_structs_summary_start,
3015 	.next   = ext4_mb_seq_structs_summary_next,
3016 	.stop   = ext4_mb_seq_structs_summary_stop,
3017 	.show   = ext4_mb_seq_structs_summary_show,
3018 };
3019 
3020 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3021 {
3022 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3023 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3024 
3025 	BUG_ON(!cachep);
3026 	return cachep;
3027 }
3028 
3029 /*
3030  * Allocate the top-level s_group_info array for the specified number
3031  * of groups
3032  */
3033 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3034 {
3035 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3036 	unsigned size;
3037 	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3038 
3039 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3040 		EXT4_DESC_PER_BLOCK_BITS(sb);
3041 	if (size <= sbi->s_group_info_size)
3042 		return 0;
3043 
3044 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3045 	new_groupinfo = kvzalloc(size, GFP_KERNEL);
3046 	if (!new_groupinfo) {
3047 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3048 		return -ENOMEM;
3049 	}
3050 	rcu_read_lock();
3051 	old_groupinfo = rcu_dereference(sbi->s_group_info);
3052 	if (old_groupinfo)
3053 		memcpy(new_groupinfo, old_groupinfo,
3054 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3055 	rcu_read_unlock();
3056 	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3057 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3058 	if (old_groupinfo)
3059 		ext4_kvfree_array_rcu(old_groupinfo);
3060 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3061 		   sbi->s_group_info_size);
3062 	return 0;
3063 }
3064 
3065 /* Create and initialize ext4_group_info data for the given group. */
3066 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3067 			  struct ext4_group_desc *desc)
3068 {
3069 	int i;
3070 	int metalen = 0;
3071 	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3072 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3073 	struct ext4_group_info **meta_group_info;
3074 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3075 
3076 	/*
3077 	 * First check if this group is the first of a reserved block.
3078 	 * If it's true, we have to allocate a new table of pointers
3079 	 * to ext4_group_info structures
3080 	 */
3081 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3082 		metalen = sizeof(*meta_group_info) <<
3083 			EXT4_DESC_PER_BLOCK_BITS(sb);
3084 		meta_group_info = kmalloc(metalen, GFP_NOFS);
3085 		if (meta_group_info == NULL) {
3086 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
3087 				 "for a buddy group");
3088 			goto exit_meta_group_info;
3089 		}
3090 		rcu_read_lock();
3091 		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3092 		rcu_read_unlock();
3093 	}
3094 
3095 	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3096 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3097 
3098 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3099 	if (meta_group_info[i] == NULL) {
3100 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3101 		goto exit_group_info;
3102 	}
3103 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3104 		&(meta_group_info[i]->bb_state));
3105 
3106 	/*
3107 	 * initialize bb_free to be able to skip
3108 	 * empty groups without initialization
3109 	 */
3110 	if (ext4_has_group_desc_csum(sb) &&
3111 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3112 		meta_group_info[i]->bb_free =
3113 			ext4_free_clusters_after_init(sb, group, desc);
3114 	} else {
3115 		meta_group_info[i]->bb_free =
3116 			ext4_free_group_clusters(sb, desc);
3117 	}
3118 
3119 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3120 	init_rwsem(&meta_group_info[i]->alloc_sem);
3121 	meta_group_info[i]->bb_free_root = RB_ROOT;
3122 	INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3123 	INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3124 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3125 	meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3126 	meta_group_info[i]->bb_group = group;
3127 
3128 	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3129 	return 0;
3130 
3131 exit_group_info:
3132 	/* If a meta_group_info table has been allocated, release it now */
3133 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3134 		struct ext4_group_info ***group_info;
3135 
3136 		rcu_read_lock();
3137 		group_info = rcu_dereference(sbi->s_group_info);
3138 		kfree(group_info[idx]);
3139 		group_info[idx] = NULL;
3140 		rcu_read_unlock();
3141 	}
3142 exit_meta_group_info:
3143 	return -ENOMEM;
3144 } /* ext4_mb_add_groupinfo */
3145 
3146 static int ext4_mb_init_backend(struct super_block *sb)
3147 {
3148 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3149 	ext4_group_t i;
3150 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3151 	int err;
3152 	struct ext4_group_desc *desc;
3153 	struct ext4_group_info ***group_info;
3154 	struct kmem_cache *cachep;
3155 
3156 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
3157 	if (err)
3158 		return err;
3159 
3160 	sbi->s_buddy_cache = new_inode(sb);
3161 	if (sbi->s_buddy_cache == NULL) {
3162 		ext4_msg(sb, KERN_ERR, "can't get new inode");
3163 		goto err_freesgi;
3164 	}
3165 	/* To avoid potentially colliding with an valid on-disk inode number,
3166 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3167 	 * not in the inode hash, so it should never be found by iget(), but
3168 	 * this will avoid confusion if it ever shows up during debugging. */
3169 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3170 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3171 	for (i = 0; i < ngroups; i++) {
3172 		cond_resched();
3173 		desc = ext4_get_group_desc(sb, i, NULL);
3174 		if (desc == NULL) {
3175 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3176 			goto err_freebuddy;
3177 		}
3178 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3179 			goto err_freebuddy;
3180 	}
3181 
3182 	if (ext4_has_feature_flex_bg(sb)) {
3183 		/* a single flex group is supposed to be read by a single IO.
3184 		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3185 		 * unsigned integer, so the maximum shift is 32.
3186 		 */
3187 		if (sbi->s_es->s_log_groups_per_flex >= 32) {
3188 			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3189 			goto err_freebuddy;
3190 		}
3191 		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3192 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3193 		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3194 	} else {
3195 		sbi->s_mb_prefetch = 32;
3196 	}
3197 	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3198 		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3199 	/* now many real IOs to prefetch within a single allocation at cr=0
3200 	 * given cr=0 is an CPU-related optimization we shouldn't try to
3201 	 * load too many groups, at some point we should start to use what
3202 	 * we've got in memory.
3203 	 * with an average random access time 5ms, it'd take a second to get
3204 	 * 200 groups (* N with flex_bg), so let's make this limit 4
3205 	 */
3206 	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3207 	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3208 		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3209 
3210 	return 0;
3211 
3212 err_freebuddy:
3213 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3214 	while (i-- > 0)
3215 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3216 	i = sbi->s_group_info_size;
3217 	rcu_read_lock();
3218 	group_info = rcu_dereference(sbi->s_group_info);
3219 	while (i-- > 0)
3220 		kfree(group_info[i]);
3221 	rcu_read_unlock();
3222 	iput(sbi->s_buddy_cache);
3223 err_freesgi:
3224 	rcu_read_lock();
3225 	kvfree(rcu_dereference(sbi->s_group_info));
3226 	rcu_read_unlock();
3227 	return -ENOMEM;
3228 }
3229 
3230 static void ext4_groupinfo_destroy_slabs(void)
3231 {
3232 	int i;
3233 
3234 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3235 		kmem_cache_destroy(ext4_groupinfo_caches[i]);
3236 		ext4_groupinfo_caches[i] = NULL;
3237 	}
3238 }
3239 
3240 static int ext4_groupinfo_create_slab(size_t size)
3241 {
3242 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3243 	int slab_size;
3244 	int blocksize_bits = order_base_2(size);
3245 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3246 	struct kmem_cache *cachep;
3247 
3248 	if (cache_index >= NR_GRPINFO_CACHES)
3249 		return -EINVAL;
3250 
3251 	if (unlikely(cache_index < 0))
3252 		cache_index = 0;
3253 
3254 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
3255 	if (ext4_groupinfo_caches[cache_index]) {
3256 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3257 		return 0;	/* Already created */
3258 	}
3259 
3260 	slab_size = offsetof(struct ext4_group_info,
3261 				bb_counters[blocksize_bits + 2]);
3262 
3263 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3264 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3265 					NULL);
3266 
3267 	ext4_groupinfo_caches[cache_index] = cachep;
3268 
3269 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3270 	if (!cachep) {
3271 		printk(KERN_EMERG
3272 		       "EXT4-fs: no memory for groupinfo slab cache\n");
3273 		return -ENOMEM;
3274 	}
3275 
3276 	return 0;
3277 }
3278 
3279 static void ext4_discard_work(struct work_struct *work)
3280 {
3281 	struct ext4_sb_info *sbi = container_of(work,
3282 			struct ext4_sb_info, s_discard_work);
3283 	struct super_block *sb = sbi->s_sb;
3284 	struct ext4_free_data *fd, *nfd;
3285 	struct ext4_buddy e4b;
3286 	struct list_head discard_list;
3287 	ext4_group_t grp, load_grp;
3288 	int err = 0;
3289 
3290 	INIT_LIST_HEAD(&discard_list);
3291 	spin_lock(&sbi->s_md_lock);
3292 	list_splice_init(&sbi->s_discard_list, &discard_list);
3293 	spin_unlock(&sbi->s_md_lock);
3294 
3295 	load_grp = UINT_MAX;
3296 	list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3297 		/*
3298 		 * If filesystem is umounting or no memory or suffering
3299 		 * from no space, give up the discard
3300 		 */
3301 		if ((sb->s_flags & SB_ACTIVE) && !err &&
3302 		    !atomic_read(&sbi->s_retry_alloc_pending)) {
3303 			grp = fd->efd_group;
3304 			if (grp != load_grp) {
3305 				if (load_grp != UINT_MAX)
3306 					ext4_mb_unload_buddy(&e4b);
3307 
3308 				err = ext4_mb_load_buddy(sb, grp, &e4b);
3309 				if (err) {
3310 					kmem_cache_free(ext4_free_data_cachep, fd);
3311 					load_grp = UINT_MAX;
3312 					continue;
3313 				} else {
3314 					load_grp = grp;
3315 				}
3316 			}
3317 
3318 			ext4_lock_group(sb, grp);
3319 			ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3320 						fd->efd_start_cluster + fd->efd_count - 1, 1);
3321 			ext4_unlock_group(sb, grp);
3322 		}
3323 		kmem_cache_free(ext4_free_data_cachep, fd);
3324 	}
3325 
3326 	if (load_grp != UINT_MAX)
3327 		ext4_mb_unload_buddy(&e4b);
3328 }
3329 
3330 int ext4_mb_init(struct super_block *sb)
3331 {
3332 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3333 	unsigned i, j;
3334 	unsigned offset, offset_incr;
3335 	unsigned max;
3336 	int ret;
3337 
3338 	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3339 
3340 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3341 	if (sbi->s_mb_offsets == NULL) {
3342 		ret = -ENOMEM;
3343 		goto out;
3344 	}
3345 
3346 	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3347 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3348 	if (sbi->s_mb_maxs == NULL) {
3349 		ret = -ENOMEM;
3350 		goto out;
3351 	}
3352 
3353 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3354 	if (ret < 0)
3355 		goto out;
3356 
3357 	/* order 0 is regular bitmap */
3358 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3359 	sbi->s_mb_offsets[0] = 0;
3360 
3361 	i = 1;
3362 	offset = 0;
3363 	offset_incr = 1 << (sb->s_blocksize_bits - 1);
3364 	max = sb->s_blocksize << 2;
3365 	do {
3366 		sbi->s_mb_offsets[i] = offset;
3367 		sbi->s_mb_maxs[i] = max;
3368 		offset += offset_incr;
3369 		offset_incr = offset_incr >> 1;
3370 		max = max >> 1;
3371 		i++;
3372 	} while (i < MB_NUM_ORDERS(sb));
3373 
3374 	sbi->s_mb_avg_fragment_size =
3375 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3376 			GFP_KERNEL);
3377 	if (!sbi->s_mb_avg_fragment_size) {
3378 		ret = -ENOMEM;
3379 		goto out;
3380 	}
3381 	sbi->s_mb_avg_fragment_size_locks =
3382 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3383 			GFP_KERNEL);
3384 	if (!sbi->s_mb_avg_fragment_size_locks) {
3385 		ret = -ENOMEM;
3386 		goto out;
3387 	}
3388 	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3389 		INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3390 		rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3391 	}
3392 	sbi->s_mb_largest_free_orders =
3393 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3394 			GFP_KERNEL);
3395 	if (!sbi->s_mb_largest_free_orders) {
3396 		ret = -ENOMEM;
3397 		goto out;
3398 	}
3399 	sbi->s_mb_largest_free_orders_locks =
3400 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3401 			GFP_KERNEL);
3402 	if (!sbi->s_mb_largest_free_orders_locks) {
3403 		ret = -ENOMEM;
3404 		goto out;
3405 	}
3406 	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3407 		INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3408 		rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3409 	}
3410 
3411 	spin_lock_init(&sbi->s_md_lock);
3412 	sbi->s_mb_free_pending = 0;
3413 	INIT_LIST_HEAD(&sbi->s_freed_data_list);
3414 	INIT_LIST_HEAD(&sbi->s_discard_list);
3415 	INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3416 	atomic_set(&sbi->s_retry_alloc_pending, 0);
3417 
3418 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3419 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3420 	sbi->s_mb_stats = MB_DEFAULT_STATS;
3421 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3422 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3423 	sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3424 	/*
3425 	 * The default group preallocation is 512, which for 4k block
3426 	 * sizes translates to 2 megabytes.  However for bigalloc file
3427 	 * systems, this is probably too big (i.e, if the cluster size
3428 	 * is 1 megabyte, then group preallocation size becomes half a
3429 	 * gigabyte!).  As a default, we will keep a two megabyte
3430 	 * group pralloc size for cluster sizes up to 64k, and after
3431 	 * that, we will force a minimum group preallocation size of
3432 	 * 32 clusters.  This translates to 8 megs when the cluster
3433 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
3434 	 * which seems reasonable as a default.
3435 	 */
3436 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3437 				       sbi->s_cluster_bits, 32);
3438 	/*
3439 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3440 	 * to the lowest multiple of s_stripe which is bigger than
3441 	 * the s_mb_group_prealloc as determined above. We want
3442 	 * the preallocation size to be an exact multiple of the
3443 	 * RAID stripe size so that preallocations don't fragment
3444 	 * the stripes.
3445 	 */
3446 	if (sbi->s_stripe > 1) {
3447 		sbi->s_mb_group_prealloc = roundup(
3448 			sbi->s_mb_group_prealloc, sbi->s_stripe);
3449 	}
3450 
3451 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3452 	if (sbi->s_locality_groups == NULL) {
3453 		ret = -ENOMEM;
3454 		goto out;
3455 	}
3456 	for_each_possible_cpu(i) {
3457 		struct ext4_locality_group *lg;
3458 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3459 		mutex_init(&lg->lg_mutex);
3460 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3461 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3462 		spin_lock_init(&lg->lg_prealloc_lock);
3463 	}
3464 
3465 	if (bdev_nonrot(sb->s_bdev))
3466 		sbi->s_mb_max_linear_groups = 0;
3467 	else
3468 		sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3469 	/* init file for buddy data */
3470 	ret = ext4_mb_init_backend(sb);
3471 	if (ret != 0)
3472 		goto out_free_locality_groups;
3473 
3474 	return 0;
3475 
3476 out_free_locality_groups:
3477 	free_percpu(sbi->s_locality_groups);
3478 	sbi->s_locality_groups = NULL;
3479 out:
3480 	kfree(sbi->s_mb_avg_fragment_size);
3481 	kfree(sbi->s_mb_avg_fragment_size_locks);
3482 	kfree(sbi->s_mb_largest_free_orders);
3483 	kfree(sbi->s_mb_largest_free_orders_locks);
3484 	kfree(sbi->s_mb_offsets);
3485 	sbi->s_mb_offsets = NULL;
3486 	kfree(sbi->s_mb_maxs);
3487 	sbi->s_mb_maxs = NULL;
3488 	return ret;
3489 }
3490 
3491 /* need to called with the ext4 group lock held */
3492 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3493 {
3494 	struct ext4_prealloc_space *pa;
3495 	struct list_head *cur, *tmp;
3496 	int count = 0;
3497 
3498 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3499 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3500 		list_del(&pa->pa_group_list);
3501 		count++;
3502 		kmem_cache_free(ext4_pspace_cachep, pa);
3503 	}
3504 	return count;
3505 }
3506 
3507 int ext4_mb_release(struct super_block *sb)
3508 {
3509 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3510 	ext4_group_t i;
3511 	int num_meta_group_infos;
3512 	struct ext4_group_info *grinfo, ***group_info;
3513 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3514 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3515 	int count;
3516 
3517 	if (test_opt(sb, DISCARD)) {
3518 		/*
3519 		 * wait the discard work to drain all of ext4_free_data
3520 		 */
3521 		flush_work(&sbi->s_discard_work);
3522 		WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3523 	}
3524 
3525 	if (sbi->s_group_info) {
3526 		for (i = 0; i < ngroups; i++) {
3527 			cond_resched();
3528 			grinfo = ext4_get_group_info(sb, i);
3529 			mb_group_bb_bitmap_free(grinfo);
3530 			ext4_lock_group(sb, i);
3531 			count = ext4_mb_cleanup_pa(grinfo);
3532 			if (count)
3533 				mb_debug(sb, "mballoc: %d PAs left\n",
3534 					 count);
3535 			ext4_unlock_group(sb, i);
3536 			kmem_cache_free(cachep, grinfo);
3537 		}
3538 		num_meta_group_infos = (ngroups +
3539 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3540 			EXT4_DESC_PER_BLOCK_BITS(sb);
3541 		rcu_read_lock();
3542 		group_info = rcu_dereference(sbi->s_group_info);
3543 		for (i = 0; i < num_meta_group_infos; i++)
3544 			kfree(group_info[i]);
3545 		kvfree(group_info);
3546 		rcu_read_unlock();
3547 	}
3548 	kfree(sbi->s_mb_avg_fragment_size);
3549 	kfree(sbi->s_mb_avg_fragment_size_locks);
3550 	kfree(sbi->s_mb_largest_free_orders);
3551 	kfree(sbi->s_mb_largest_free_orders_locks);
3552 	kfree(sbi->s_mb_offsets);
3553 	kfree(sbi->s_mb_maxs);
3554 	iput(sbi->s_buddy_cache);
3555 	if (sbi->s_mb_stats) {
3556 		ext4_msg(sb, KERN_INFO,
3557 		       "mballoc: %u blocks %u reqs (%u success)",
3558 				atomic_read(&sbi->s_bal_allocated),
3559 				atomic_read(&sbi->s_bal_reqs),
3560 				atomic_read(&sbi->s_bal_success));
3561 		ext4_msg(sb, KERN_INFO,
3562 		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3563 				"%u 2^N hits, %u breaks, %u lost",
3564 				atomic_read(&sbi->s_bal_ex_scanned),
3565 				atomic_read(&sbi->s_bal_groups_scanned),
3566 				atomic_read(&sbi->s_bal_goals),
3567 				atomic_read(&sbi->s_bal_2orders),
3568 				atomic_read(&sbi->s_bal_breaks),
3569 				atomic_read(&sbi->s_mb_lost_chunks));
3570 		ext4_msg(sb, KERN_INFO,
3571 		       "mballoc: %u generated and it took %llu",
3572 				atomic_read(&sbi->s_mb_buddies_generated),
3573 				atomic64_read(&sbi->s_mb_generation_time));
3574 		ext4_msg(sb, KERN_INFO,
3575 		       "mballoc: %u preallocated, %u discarded",
3576 				atomic_read(&sbi->s_mb_preallocated),
3577 				atomic_read(&sbi->s_mb_discarded));
3578 	}
3579 
3580 	free_percpu(sbi->s_locality_groups);
3581 
3582 	return 0;
3583 }
3584 
3585 static inline int ext4_issue_discard(struct super_block *sb,
3586 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3587 		struct bio **biop)
3588 {
3589 	ext4_fsblk_t discard_block;
3590 
3591 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3592 			 ext4_group_first_block_no(sb, block_group));
3593 	count = EXT4_C2B(EXT4_SB(sb), count);
3594 	trace_ext4_discard_blocks(sb,
3595 			(unsigned long long) discard_block, count);
3596 	if (biop) {
3597 		return __blkdev_issue_discard(sb->s_bdev,
3598 			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
3599 			(sector_t)count << (sb->s_blocksize_bits - 9),
3600 			GFP_NOFS, biop);
3601 	} else
3602 		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3603 }
3604 
3605 static void ext4_free_data_in_buddy(struct super_block *sb,
3606 				    struct ext4_free_data *entry)
3607 {
3608 	struct ext4_buddy e4b;
3609 	struct ext4_group_info *db;
3610 	int err, count = 0, count2 = 0;
3611 
3612 	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3613 		 entry->efd_count, entry->efd_group, entry);
3614 
3615 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3616 	/* we expect to find existing buddy because it's pinned */
3617 	BUG_ON(err != 0);
3618 
3619 	spin_lock(&EXT4_SB(sb)->s_md_lock);
3620 	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3621 	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3622 
3623 	db = e4b.bd_info;
3624 	/* there are blocks to put in buddy to make them really free */
3625 	count += entry->efd_count;
3626 	count2++;
3627 	ext4_lock_group(sb, entry->efd_group);
3628 	/* Take it out of per group rb tree */
3629 	rb_erase(&entry->efd_node, &(db->bb_free_root));
3630 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3631 
3632 	/*
3633 	 * Clear the trimmed flag for the group so that the next
3634 	 * ext4_trim_fs can trim it.
3635 	 * If the volume is mounted with -o discard, online discard
3636 	 * is supported and the free blocks will be trimmed online.
3637 	 */
3638 	if (!test_opt(sb, DISCARD))
3639 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
3640 
3641 	if (!db->bb_free_root.rb_node) {
3642 		/* No more items in the per group rb tree
3643 		 * balance refcounts from ext4_mb_free_metadata()
3644 		 */
3645 		put_page(e4b.bd_buddy_page);
3646 		put_page(e4b.bd_bitmap_page);
3647 	}
3648 	ext4_unlock_group(sb, entry->efd_group);
3649 	ext4_mb_unload_buddy(&e4b);
3650 
3651 	mb_debug(sb, "freed %d blocks in %d structures\n", count,
3652 		 count2);
3653 }
3654 
3655 /*
3656  * This function is called by the jbd2 layer once the commit has finished,
3657  * so we know we can free the blocks that were released with that commit.
3658  */
3659 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3660 {
3661 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3662 	struct ext4_free_data *entry, *tmp;
3663 	struct list_head freed_data_list;
3664 	struct list_head *cut_pos = NULL;
3665 	bool wake;
3666 
3667 	INIT_LIST_HEAD(&freed_data_list);
3668 
3669 	spin_lock(&sbi->s_md_lock);
3670 	list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3671 		if (entry->efd_tid != commit_tid)
3672 			break;
3673 		cut_pos = &entry->efd_list;
3674 	}
3675 	if (cut_pos)
3676 		list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3677 				  cut_pos);
3678 	spin_unlock(&sbi->s_md_lock);
3679 
3680 	list_for_each_entry(entry, &freed_data_list, efd_list)
3681 		ext4_free_data_in_buddy(sb, entry);
3682 
3683 	if (test_opt(sb, DISCARD)) {
3684 		spin_lock(&sbi->s_md_lock);
3685 		wake = list_empty(&sbi->s_discard_list);
3686 		list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3687 		spin_unlock(&sbi->s_md_lock);
3688 		if (wake)
3689 			queue_work(system_unbound_wq, &sbi->s_discard_work);
3690 	} else {
3691 		list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3692 			kmem_cache_free(ext4_free_data_cachep, entry);
3693 	}
3694 }
3695 
3696 int __init ext4_init_mballoc(void)
3697 {
3698 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3699 					SLAB_RECLAIM_ACCOUNT);
3700 	if (ext4_pspace_cachep == NULL)
3701 		goto out;
3702 
3703 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3704 				    SLAB_RECLAIM_ACCOUNT);
3705 	if (ext4_ac_cachep == NULL)
3706 		goto out_pa_free;
3707 
3708 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3709 					   SLAB_RECLAIM_ACCOUNT);
3710 	if (ext4_free_data_cachep == NULL)
3711 		goto out_ac_free;
3712 
3713 	return 0;
3714 
3715 out_ac_free:
3716 	kmem_cache_destroy(ext4_ac_cachep);
3717 out_pa_free:
3718 	kmem_cache_destroy(ext4_pspace_cachep);
3719 out:
3720 	return -ENOMEM;
3721 }
3722 
3723 void ext4_exit_mballoc(void)
3724 {
3725 	/*
3726 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3727 	 * before destroying the slab cache.
3728 	 */
3729 	rcu_barrier();
3730 	kmem_cache_destroy(ext4_pspace_cachep);
3731 	kmem_cache_destroy(ext4_ac_cachep);
3732 	kmem_cache_destroy(ext4_free_data_cachep);
3733 	ext4_groupinfo_destroy_slabs();
3734 }
3735 
3736 
3737 /*
3738  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3739  * Returns 0 if success or error code
3740  */
3741 static noinline_for_stack int
3742 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3743 				handle_t *handle, unsigned int reserv_clstrs)
3744 {
3745 	struct buffer_head *bitmap_bh = NULL;
3746 	struct ext4_group_desc *gdp;
3747 	struct buffer_head *gdp_bh;
3748 	struct ext4_sb_info *sbi;
3749 	struct super_block *sb;
3750 	ext4_fsblk_t block;
3751 	int err, len;
3752 
3753 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3754 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3755 
3756 	sb = ac->ac_sb;
3757 	sbi = EXT4_SB(sb);
3758 
3759 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3760 	if (IS_ERR(bitmap_bh)) {
3761 		err = PTR_ERR(bitmap_bh);
3762 		bitmap_bh = NULL;
3763 		goto out_err;
3764 	}
3765 
3766 	BUFFER_TRACE(bitmap_bh, "getting write access");
3767 	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3768 					    EXT4_JTR_NONE);
3769 	if (err)
3770 		goto out_err;
3771 
3772 	err = -EIO;
3773 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3774 	if (!gdp)
3775 		goto out_err;
3776 
3777 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3778 			ext4_free_group_clusters(sb, gdp));
3779 
3780 	BUFFER_TRACE(gdp_bh, "get_write_access");
3781 	err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3782 	if (err)
3783 		goto out_err;
3784 
3785 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3786 
3787 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3788 	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3789 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3790 			   "fs metadata", block, block+len);
3791 		/* File system mounted not to panic on error
3792 		 * Fix the bitmap and return EFSCORRUPTED
3793 		 * We leak some of the blocks here.
3794 		 */
3795 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3796 		mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3797 			      ac->ac_b_ex.fe_len);
3798 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3799 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3800 		if (!err)
3801 			err = -EFSCORRUPTED;
3802 		goto out_err;
3803 	}
3804 
3805 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3806 #ifdef AGGRESSIVE_CHECK
3807 	{
3808 		int i;
3809 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3810 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3811 						bitmap_bh->b_data));
3812 		}
3813 	}
3814 #endif
3815 	mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3816 		      ac->ac_b_ex.fe_len);
3817 	if (ext4_has_group_desc_csum(sb) &&
3818 	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3819 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3820 		ext4_free_group_clusters_set(sb, gdp,
3821 					     ext4_free_clusters_after_init(sb,
3822 						ac->ac_b_ex.fe_group, gdp));
3823 	}
3824 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3825 	ext4_free_group_clusters_set(sb, gdp, len);
3826 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3827 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3828 
3829 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3830 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3831 	/*
3832 	 * Now reduce the dirty block count also. Should not go negative
3833 	 */
3834 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3835 		/* release all the reserved blocks if non delalloc */
3836 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3837 				   reserv_clstrs);
3838 
3839 	if (sbi->s_log_groups_per_flex) {
3840 		ext4_group_t flex_group = ext4_flex_group(sbi,
3841 							  ac->ac_b_ex.fe_group);
3842 		atomic64_sub(ac->ac_b_ex.fe_len,
3843 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
3844 						  flex_group)->free_clusters);
3845 	}
3846 
3847 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3848 	if (err)
3849 		goto out_err;
3850 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3851 
3852 out_err:
3853 	brelse(bitmap_bh);
3854 	return err;
3855 }
3856 
3857 /*
3858  * Idempotent helper for Ext4 fast commit replay path to set the state of
3859  * blocks in bitmaps and update counters.
3860  */
3861 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3862 			int len, int state)
3863 {
3864 	struct buffer_head *bitmap_bh = NULL;
3865 	struct ext4_group_desc *gdp;
3866 	struct buffer_head *gdp_bh;
3867 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3868 	ext4_group_t group;
3869 	ext4_grpblk_t blkoff;
3870 	int i, err;
3871 	int already;
3872 	unsigned int clen, clen_changed, thisgrp_len;
3873 
3874 	while (len > 0) {
3875 		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3876 
3877 		/*
3878 		 * Check to see if we are freeing blocks across a group
3879 		 * boundary.
3880 		 * In case of flex_bg, this can happen that (block, len) may
3881 		 * span across more than one group. In that case we need to
3882 		 * get the corresponding group metadata to work with.
3883 		 * For this we have goto again loop.
3884 		 */
3885 		thisgrp_len = min_t(unsigned int, (unsigned int)len,
3886 			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3887 		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3888 
3889 		if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3890 			ext4_error(sb, "Marking blocks in system zone - "
3891 				   "Block = %llu, len = %u",
3892 				   block, thisgrp_len);
3893 			bitmap_bh = NULL;
3894 			break;
3895 		}
3896 
3897 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3898 		if (IS_ERR(bitmap_bh)) {
3899 			err = PTR_ERR(bitmap_bh);
3900 			bitmap_bh = NULL;
3901 			break;
3902 		}
3903 
3904 		err = -EIO;
3905 		gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3906 		if (!gdp)
3907 			break;
3908 
3909 		ext4_lock_group(sb, group);
3910 		already = 0;
3911 		for (i = 0; i < clen; i++)
3912 			if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3913 					 !state)
3914 				already++;
3915 
3916 		clen_changed = clen - already;
3917 		if (state)
3918 			mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3919 		else
3920 			mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3921 		if (ext4_has_group_desc_csum(sb) &&
3922 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3923 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3924 			ext4_free_group_clusters_set(sb, gdp,
3925 			     ext4_free_clusters_after_init(sb, group, gdp));
3926 		}
3927 		if (state)
3928 			clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3929 		else
3930 			clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3931 
3932 		ext4_free_group_clusters_set(sb, gdp, clen);
3933 		ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3934 		ext4_group_desc_csum_set(sb, group, gdp);
3935 
3936 		ext4_unlock_group(sb, group);
3937 
3938 		if (sbi->s_log_groups_per_flex) {
3939 			ext4_group_t flex_group = ext4_flex_group(sbi, group);
3940 			struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3941 						   s_flex_groups, flex_group);
3942 
3943 			if (state)
3944 				atomic64_sub(clen_changed, &fg->free_clusters);
3945 			else
3946 				atomic64_add(clen_changed, &fg->free_clusters);
3947 
3948 		}
3949 
3950 		err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3951 		if (err)
3952 			break;
3953 		sync_dirty_buffer(bitmap_bh);
3954 		err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3955 		sync_dirty_buffer(gdp_bh);
3956 		if (err)
3957 			break;
3958 
3959 		block += thisgrp_len;
3960 		len -= thisgrp_len;
3961 		brelse(bitmap_bh);
3962 		BUG_ON(len < 0);
3963 	}
3964 
3965 	if (err)
3966 		brelse(bitmap_bh);
3967 }
3968 
3969 /*
3970  * here we normalize request for locality group
3971  * Group request are normalized to s_mb_group_prealloc, which goes to
3972  * s_strip if we set the same via mount option.
3973  * s_mb_group_prealloc can be configured via
3974  * /sys/fs/ext4/<partition>/mb_group_prealloc
3975  *
3976  * XXX: should we try to preallocate more than the group has now?
3977  */
3978 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3979 {
3980 	struct super_block *sb = ac->ac_sb;
3981 	struct ext4_locality_group *lg = ac->ac_lg;
3982 
3983 	BUG_ON(lg == NULL);
3984 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3985 	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3986 }
3987 
3988 /*
3989  * Normalization means making request better in terms of
3990  * size and alignment
3991  */
3992 static noinline_for_stack void
3993 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3994 				struct ext4_allocation_request *ar)
3995 {
3996 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3997 	int bsbits, max;
3998 	ext4_lblk_t end;
3999 	loff_t size, start_off;
4000 	loff_t orig_size __maybe_unused;
4001 	ext4_lblk_t start;
4002 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4003 	struct ext4_prealloc_space *pa;
4004 
4005 	/* do normalize only data requests, metadata requests
4006 	   do not need preallocation */
4007 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4008 		return;
4009 
4010 	/* sometime caller may want exact blocks */
4011 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4012 		return;
4013 
4014 	/* caller may indicate that preallocation isn't
4015 	 * required (it's a tail, for example) */
4016 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4017 		return;
4018 
4019 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4020 		ext4_mb_normalize_group_request(ac);
4021 		return ;
4022 	}
4023 
4024 	bsbits = ac->ac_sb->s_blocksize_bits;
4025 
4026 	/* first, let's learn actual file size
4027 	 * given current request is allocated */
4028 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4029 	size = size << bsbits;
4030 	if (size < i_size_read(ac->ac_inode))
4031 		size = i_size_read(ac->ac_inode);
4032 	orig_size = size;
4033 
4034 	/* max size of free chunks */
4035 	max = 2 << bsbits;
4036 
4037 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
4038 		(req <= (size) || max <= (chunk_size))
4039 
4040 	/* first, try to predict filesize */
4041 	/* XXX: should this table be tunable? */
4042 	start_off = 0;
4043 	if (size <= 16 * 1024) {
4044 		size = 16 * 1024;
4045 	} else if (size <= 32 * 1024) {
4046 		size = 32 * 1024;
4047 	} else if (size <= 64 * 1024) {
4048 		size = 64 * 1024;
4049 	} else if (size <= 128 * 1024) {
4050 		size = 128 * 1024;
4051 	} else if (size <= 256 * 1024) {
4052 		size = 256 * 1024;
4053 	} else if (size <= 512 * 1024) {
4054 		size = 512 * 1024;
4055 	} else if (size <= 1024 * 1024) {
4056 		size = 1024 * 1024;
4057 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4058 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4059 						(21 - bsbits)) << 21;
4060 		size = 2 * 1024 * 1024;
4061 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4062 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4063 							(22 - bsbits)) << 22;
4064 		size = 4 * 1024 * 1024;
4065 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4066 					(8<<20)>>bsbits, max, 8 * 1024)) {
4067 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4068 							(23 - bsbits)) << 23;
4069 		size = 8 * 1024 * 1024;
4070 	} else {
4071 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4072 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4073 					      ac->ac_o_ex.fe_len) << bsbits;
4074 	}
4075 	size = size >> bsbits;
4076 	start = start_off >> bsbits;
4077 
4078 	/*
4079 	 * For tiny groups (smaller than 8MB) the chosen allocation
4080 	 * alignment may be larger than group size. Make sure the
4081 	 * alignment does not move allocation to a different group which
4082 	 * makes mballoc fail assertions later.
4083 	 */
4084 	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4085 			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4086 
4087 	/* don't cover already allocated blocks in selected range */
4088 	if (ar->pleft && start <= ar->lleft) {
4089 		size -= ar->lleft + 1 - start;
4090 		start = ar->lleft + 1;
4091 	}
4092 	if (ar->pright && start + size - 1 >= ar->lright)
4093 		size -= start + size - ar->lright;
4094 
4095 	/*
4096 	 * Trim allocation request for filesystems with artificially small
4097 	 * groups.
4098 	 */
4099 	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4100 		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4101 
4102 	end = start + size;
4103 
4104 	/* check we don't cross already preallocated blocks */
4105 	rcu_read_lock();
4106 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4107 		ext4_lblk_t pa_end;
4108 
4109 		if (pa->pa_deleted)
4110 			continue;
4111 		spin_lock(&pa->pa_lock);
4112 		if (pa->pa_deleted) {
4113 			spin_unlock(&pa->pa_lock);
4114 			continue;
4115 		}
4116 
4117 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4118 						  pa->pa_len);
4119 
4120 		/* PA must not overlap original request */
4121 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4122 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
4123 
4124 		/* skip PAs this normalized request doesn't overlap with */
4125 		if (pa->pa_lstart >= end || pa_end <= start) {
4126 			spin_unlock(&pa->pa_lock);
4127 			continue;
4128 		}
4129 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4130 
4131 		/* adjust start or end to be adjacent to this pa */
4132 		if (pa_end <= ac->ac_o_ex.fe_logical) {
4133 			BUG_ON(pa_end < start);
4134 			start = pa_end;
4135 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4136 			BUG_ON(pa->pa_lstart > end);
4137 			end = pa->pa_lstart;
4138 		}
4139 		spin_unlock(&pa->pa_lock);
4140 	}
4141 	rcu_read_unlock();
4142 	size = end - start;
4143 
4144 	/* XXX: extra loop to check we really don't overlap preallocations */
4145 	rcu_read_lock();
4146 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4147 		ext4_lblk_t pa_end;
4148 
4149 		spin_lock(&pa->pa_lock);
4150 		if (pa->pa_deleted == 0) {
4151 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4152 							  pa->pa_len);
4153 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4154 		}
4155 		spin_unlock(&pa->pa_lock);
4156 	}
4157 	rcu_read_unlock();
4158 
4159 	/*
4160 	 * In this function "start" and "size" are normalized for better
4161 	 * alignment and length such that we could preallocate more blocks.
4162 	 * This normalization is done such that original request of
4163 	 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4164 	 * "size" boundaries.
4165 	 * (Note fe_len can be relaxed since FS block allocation API does not
4166 	 * provide gurantee on number of contiguous blocks allocation since that
4167 	 * depends upon free space left, etc).
4168 	 * In case of inode pa, later we use the allocated blocks
4169 	 * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4170 	 * range of goal/best blocks [start, size] to put it at the
4171 	 * ac_o_ex.fe_logical extent of this inode.
4172 	 * (See ext4_mb_use_inode_pa() for more details)
4173 	 */
4174 	if (start + size <= ac->ac_o_ex.fe_logical ||
4175 			start > ac->ac_o_ex.fe_logical) {
4176 		ext4_msg(ac->ac_sb, KERN_ERR,
4177 			 "start %lu, size %lu, fe_logical %lu",
4178 			 (unsigned long) start, (unsigned long) size,
4179 			 (unsigned long) ac->ac_o_ex.fe_logical);
4180 		BUG();
4181 	}
4182 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4183 
4184 	/* now prepare goal request */
4185 
4186 	/* XXX: is it better to align blocks WRT to logical
4187 	 * placement or satisfy big request as is */
4188 	ac->ac_g_ex.fe_logical = start;
4189 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4190 
4191 	/* define goal start in order to merge */
4192 	if (ar->pright && (ar->lright == (start + size))) {
4193 		/* merge to the right */
4194 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4195 						&ac->ac_f_ex.fe_group,
4196 						&ac->ac_f_ex.fe_start);
4197 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4198 	}
4199 	if (ar->pleft && (ar->lleft + 1 == start)) {
4200 		/* merge to the left */
4201 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4202 						&ac->ac_f_ex.fe_group,
4203 						&ac->ac_f_ex.fe_start);
4204 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4205 	}
4206 
4207 	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4208 		 orig_size, start);
4209 }
4210 
4211 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4212 {
4213 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4214 
4215 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4216 		atomic_inc(&sbi->s_bal_reqs);
4217 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4218 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4219 			atomic_inc(&sbi->s_bal_success);
4220 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4221 		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4222 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4223 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4224 			atomic_inc(&sbi->s_bal_goals);
4225 		if (ac->ac_found > sbi->s_mb_max_to_scan)
4226 			atomic_inc(&sbi->s_bal_breaks);
4227 	}
4228 
4229 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4230 		trace_ext4_mballoc_alloc(ac);
4231 	else
4232 		trace_ext4_mballoc_prealloc(ac);
4233 }
4234 
4235 /*
4236  * Called on failure; free up any blocks from the inode PA for this
4237  * context.  We don't need this for MB_GROUP_PA because we only change
4238  * pa_free in ext4_mb_release_context(), but on failure, we've already
4239  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4240  */
4241 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4242 {
4243 	struct ext4_prealloc_space *pa = ac->ac_pa;
4244 	struct ext4_buddy e4b;
4245 	int err;
4246 
4247 	if (pa == NULL) {
4248 		if (ac->ac_f_ex.fe_len == 0)
4249 			return;
4250 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4251 		if (err) {
4252 			/*
4253 			 * This should never happen since we pin the
4254 			 * pages in the ext4_allocation_context so
4255 			 * ext4_mb_load_buddy() should never fail.
4256 			 */
4257 			WARN(1, "mb_load_buddy failed (%d)", err);
4258 			return;
4259 		}
4260 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4261 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4262 			       ac->ac_f_ex.fe_len);
4263 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4264 		ext4_mb_unload_buddy(&e4b);
4265 		return;
4266 	}
4267 	if (pa->pa_type == MB_INODE_PA)
4268 		pa->pa_free += ac->ac_b_ex.fe_len;
4269 }
4270 
4271 /*
4272  * use blocks preallocated to inode
4273  */
4274 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4275 				struct ext4_prealloc_space *pa)
4276 {
4277 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4278 	ext4_fsblk_t start;
4279 	ext4_fsblk_t end;
4280 	int len;
4281 
4282 	/* found preallocated blocks, use them */
4283 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4284 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4285 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4286 	len = EXT4_NUM_B2C(sbi, end - start);
4287 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4288 					&ac->ac_b_ex.fe_start);
4289 	ac->ac_b_ex.fe_len = len;
4290 	ac->ac_status = AC_STATUS_FOUND;
4291 	ac->ac_pa = pa;
4292 
4293 	BUG_ON(start < pa->pa_pstart);
4294 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4295 	BUG_ON(pa->pa_free < len);
4296 	pa->pa_free -= len;
4297 
4298 	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4299 }
4300 
4301 /*
4302  * use blocks preallocated to locality group
4303  */
4304 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4305 				struct ext4_prealloc_space *pa)
4306 {
4307 	unsigned int len = ac->ac_o_ex.fe_len;
4308 
4309 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4310 					&ac->ac_b_ex.fe_group,
4311 					&ac->ac_b_ex.fe_start);
4312 	ac->ac_b_ex.fe_len = len;
4313 	ac->ac_status = AC_STATUS_FOUND;
4314 	ac->ac_pa = pa;
4315 
4316 	/* we don't correct pa_pstart or pa_plen here to avoid
4317 	 * possible race when the group is being loaded concurrently
4318 	 * instead we correct pa later, after blocks are marked
4319 	 * in on-disk bitmap -- see ext4_mb_release_context()
4320 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
4321 	 */
4322 	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4323 		 pa->pa_lstart-len, len, pa);
4324 }
4325 
4326 /*
4327  * Return the prealloc space that have minimal distance
4328  * from the goal block. @cpa is the prealloc
4329  * space that is having currently known minimal distance
4330  * from the goal block.
4331  */
4332 static struct ext4_prealloc_space *
4333 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4334 			struct ext4_prealloc_space *pa,
4335 			struct ext4_prealloc_space *cpa)
4336 {
4337 	ext4_fsblk_t cur_distance, new_distance;
4338 
4339 	if (cpa == NULL) {
4340 		atomic_inc(&pa->pa_count);
4341 		return pa;
4342 	}
4343 	cur_distance = abs(goal_block - cpa->pa_pstart);
4344 	new_distance = abs(goal_block - pa->pa_pstart);
4345 
4346 	if (cur_distance <= new_distance)
4347 		return cpa;
4348 
4349 	/* drop the previous reference */
4350 	atomic_dec(&cpa->pa_count);
4351 	atomic_inc(&pa->pa_count);
4352 	return pa;
4353 }
4354 
4355 /*
4356  * search goal blocks in preallocated space
4357  */
4358 static noinline_for_stack bool
4359 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4360 {
4361 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4362 	int order, i;
4363 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4364 	struct ext4_locality_group *lg;
4365 	struct ext4_prealloc_space *pa, *cpa = NULL;
4366 	ext4_fsblk_t goal_block;
4367 
4368 	/* only data can be preallocated */
4369 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4370 		return false;
4371 
4372 	/* first, try per-file preallocation */
4373 	rcu_read_lock();
4374 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4375 
4376 		/* all fields in this condition don't change,
4377 		 * so we can skip locking for them */
4378 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4379 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4380 					       EXT4_C2B(sbi, pa->pa_len)))
4381 			continue;
4382 
4383 		/* non-extent files can't have physical blocks past 2^32 */
4384 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4385 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4386 		     EXT4_MAX_BLOCK_FILE_PHYS))
4387 			continue;
4388 
4389 		/* found preallocated blocks, use them */
4390 		spin_lock(&pa->pa_lock);
4391 		if (pa->pa_deleted == 0 && pa->pa_free) {
4392 			atomic_inc(&pa->pa_count);
4393 			ext4_mb_use_inode_pa(ac, pa);
4394 			spin_unlock(&pa->pa_lock);
4395 			ac->ac_criteria = 10;
4396 			rcu_read_unlock();
4397 			return true;
4398 		}
4399 		spin_unlock(&pa->pa_lock);
4400 	}
4401 	rcu_read_unlock();
4402 
4403 	/* can we use group allocation? */
4404 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4405 		return false;
4406 
4407 	/* inode may have no locality group for some reason */
4408 	lg = ac->ac_lg;
4409 	if (lg == NULL)
4410 		return false;
4411 	order  = fls(ac->ac_o_ex.fe_len) - 1;
4412 	if (order > PREALLOC_TB_SIZE - 1)
4413 		/* The max size of hash table is PREALLOC_TB_SIZE */
4414 		order = PREALLOC_TB_SIZE - 1;
4415 
4416 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4417 	/*
4418 	 * search for the prealloc space that is having
4419 	 * minimal distance from the goal block.
4420 	 */
4421 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
4422 		rcu_read_lock();
4423 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4424 					pa_inode_list) {
4425 			spin_lock(&pa->pa_lock);
4426 			if (pa->pa_deleted == 0 &&
4427 					pa->pa_free >= ac->ac_o_ex.fe_len) {
4428 
4429 				cpa = ext4_mb_check_group_pa(goal_block,
4430 								pa, cpa);
4431 			}
4432 			spin_unlock(&pa->pa_lock);
4433 		}
4434 		rcu_read_unlock();
4435 	}
4436 	if (cpa) {
4437 		ext4_mb_use_group_pa(ac, cpa);
4438 		ac->ac_criteria = 20;
4439 		return true;
4440 	}
4441 	return false;
4442 }
4443 
4444 /*
4445  * the function goes through all block freed in the group
4446  * but not yet committed and marks them used in in-core bitmap.
4447  * buddy must be generated from this bitmap
4448  * Need to be called with the ext4 group lock held
4449  */
4450 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4451 						ext4_group_t group)
4452 {
4453 	struct rb_node *n;
4454 	struct ext4_group_info *grp;
4455 	struct ext4_free_data *entry;
4456 
4457 	grp = ext4_get_group_info(sb, group);
4458 	n = rb_first(&(grp->bb_free_root));
4459 
4460 	while (n) {
4461 		entry = rb_entry(n, struct ext4_free_data, efd_node);
4462 		mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4463 		n = rb_next(n);
4464 	}
4465 	return;
4466 }
4467 
4468 /*
4469  * the function goes through all preallocation in this group and marks them
4470  * used in in-core bitmap. buddy must be generated from this bitmap
4471  * Need to be called with ext4 group lock held
4472  */
4473 static noinline_for_stack
4474 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4475 					ext4_group_t group)
4476 {
4477 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4478 	struct ext4_prealloc_space *pa;
4479 	struct list_head *cur;
4480 	ext4_group_t groupnr;
4481 	ext4_grpblk_t start;
4482 	int preallocated = 0;
4483 	int len;
4484 
4485 	/* all form of preallocation discards first load group,
4486 	 * so the only competing code is preallocation use.
4487 	 * we don't need any locking here
4488 	 * notice we do NOT ignore preallocations with pa_deleted
4489 	 * otherwise we could leave used blocks available for
4490 	 * allocation in buddy when concurrent ext4_mb_put_pa()
4491 	 * is dropping preallocation
4492 	 */
4493 	list_for_each(cur, &grp->bb_prealloc_list) {
4494 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4495 		spin_lock(&pa->pa_lock);
4496 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4497 					     &groupnr, &start);
4498 		len = pa->pa_len;
4499 		spin_unlock(&pa->pa_lock);
4500 		if (unlikely(len == 0))
4501 			continue;
4502 		BUG_ON(groupnr != group);
4503 		mb_set_bits(bitmap, start, len);
4504 		preallocated += len;
4505 	}
4506 	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4507 }
4508 
4509 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4510 				    struct ext4_prealloc_space *pa)
4511 {
4512 	struct ext4_inode_info *ei;
4513 
4514 	if (pa->pa_deleted) {
4515 		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4516 			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4517 			     pa->pa_len);
4518 		return;
4519 	}
4520 
4521 	pa->pa_deleted = 1;
4522 
4523 	if (pa->pa_type == MB_INODE_PA) {
4524 		ei = EXT4_I(pa->pa_inode);
4525 		atomic_dec(&ei->i_prealloc_active);
4526 	}
4527 }
4528 
4529 static void ext4_mb_pa_callback(struct rcu_head *head)
4530 {
4531 	struct ext4_prealloc_space *pa;
4532 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4533 
4534 	BUG_ON(atomic_read(&pa->pa_count));
4535 	BUG_ON(pa->pa_deleted == 0);
4536 	kmem_cache_free(ext4_pspace_cachep, pa);
4537 }
4538 
4539 /*
4540  * drops a reference to preallocated space descriptor
4541  * if this was the last reference and the space is consumed
4542  */
4543 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4544 			struct super_block *sb, struct ext4_prealloc_space *pa)
4545 {
4546 	ext4_group_t grp;
4547 	ext4_fsblk_t grp_blk;
4548 
4549 	/* in this short window concurrent discard can set pa_deleted */
4550 	spin_lock(&pa->pa_lock);
4551 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4552 		spin_unlock(&pa->pa_lock);
4553 		return;
4554 	}
4555 
4556 	if (pa->pa_deleted == 1) {
4557 		spin_unlock(&pa->pa_lock);
4558 		return;
4559 	}
4560 
4561 	ext4_mb_mark_pa_deleted(sb, pa);
4562 	spin_unlock(&pa->pa_lock);
4563 
4564 	grp_blk = pa->pa_pstart;
4565 	/*
4566 	 * If doing group-based preallocation, pa_pstart may be in the
4567 	 * next group when pa is used up
4568 	 */
4569 	if (pa->pa_type == MB_GROUP_PA)
4570 		grp_blk--;
4571 
4572 	grp = ext4_get_group_number(sb, grp_blk);
4573 
4574 	/*
4575 	 * possible race:
4576 	 *
4577 	 *  P1 (buddy init)			P2 (regular allocation)
4578 	 *					find block B in PA
4579 	 *  copy on-disk bitmap to buddy
4580 	 *  					mark B in on-disk bitmap
4581 	 *					drop PA from group
4582 	 *  mark all PAs in buddy
4583 	 *
4584 	 * thus, P1 initializes buddy with B available. to prevent this
4585 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4586 	 * against that pair
4587 	 */
4588 	ext4_lock_group(sb, grp);
4589 	list_del(&pa->pa_group_list);
4590 	ext4_unlock_group(sb, grp);
4591 
4592 	spin_lock(pa->pa_obj_lock);
4593 	list_del_rcu(&pa->pa_inode_list);
4594 	spin_unlock(pa->pa_obj_lock);
4595 
4596 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4597 }
4598 
4599 /*
4600  * creates new preallocated space for given inode
4601  */
4602 static noinline_for_stack void
4603 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4604 {
4605 	struct super_block *sb = ac->ac_sb;
4606 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4607 	struct ext4_prealloc_space *pa;
4608 	struct ext4_group_info *grp;
4609 	struct ext4_inode_info *ei;
4610 
4611 	/* preallocate only when found space is larger then requested */
4612 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4613 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4614 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4615 	BUG_ON(ac->ac_pa == NULL);
4616 
4617 	pa = ac->ac_pa;
4618 
4619 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4620 		int winl;
4621 		int wins;
4622 		int win;
4623 		int offs;
4624 
4625 		/* we can't allocate as much as normalizer wants.
4626 		 * so, found space must get proper lstart
4627 		 * to cover original request */
4628 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4629 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4630 
4631 		/* we're limited by original request in that
4632 		 * logical block must be covered any way
4633 		 * winl is window we can move our chunk within */
4634 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4635 
4636 		/* also, we should cover whole original request */
4637 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4638 
4639 		/* the smallest one defines real window */
4640 		win = min(winl, wins);
4641 
4642 		offs = ac->ac_o_ex.fe_logical %
4643 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4644 		if (offs && offs < win)
4645 			win = offs;
4646 
4647 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4648 			EXT4_NUM_B2C(sbi, win);
4649 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4650 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4651 	}
4652 
4653 	/* preallocation can change ac_b_ex, thus we store actually
4654 	 * allocated blocks for history */
4655 	ac->ac_f_ex = ac->ac_b_ex;
4656 
4657 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
4658 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4659 	pa->pa_len = ac->ac_b_ex.fe_len;
4660 	pa->pa_free = pa->pa_len;
4661 	spin_lock_init(&pa->pa_lock);
4662 	INIT_LIST_HEAD(&pa->pa_inode_list);
4663 	INIT_LIST_HEAD(&pa->pa_group_list);
4664 	pa->pa_deleted = 0;
4665 	pa->pa_type = MB_INODE_PA;
4666 
4667 	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4668 		 pa->pa_len, pa->pa_lstart);
4669 	trace_ext4_mb_new_inode_pa(ac, pa);
4670 
4671 	ext4_mb_use_inode_pa(ac, pa);
4672 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4673 
4674 	ei = EXT4_I(ac->ac_inode);
4675 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4676 
4677 	pa->pa_obj_lock = &ei->i_prealloc_lock;
4678 	pa->pa_inode = ac->ac_inode;
4679 
4680 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4681 
4682 	spin_lock(pa->pa_obj_lock);
4683 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4684 	spin_unlock(pa->pa_obj_lock);
4685 	atomic_inc(&ei->i_prealloc_active);
4686 }
4687 
4688 /*
4689  * creates new preallocated space for locality group inodes belongs to
4690  */
4691 static noinline_for_stack void
4692 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4693 {
4694 	struct super_block *sb = ac->ac_sb;
4695 	struct ext4_locality_group *lg;
4696 	struct ext4_prealloc_space *pa;
4697 	struct ext4_group_info *grp;
4698 
4699 	/* preallocate only when found space is larger then requested */
4700 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4701 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4702 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4703 	BUG_ON(ac->ac_pa == NULL);
4704 
4705 	pa = ac->ac_pa;
4706 
4707 	/* preallocation can change ac_b_ex, thus we store actually
4708 	 * allocated blocks for history */
4709 	ac->ac_f_ex = ac->ac_b_ex;
4710 
4711 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4712 	pa->pa_lstart = pa->pa_pstart;
4713 	pa->pa_len = ac->ac_b_ex.fe_len;
4714 	pa->pa_free = pa->pa_len;
4715 	spin_lock_init(&pa->pa_lock);
4716 	INIT_LIST_HEAD(&pa->pa_inode_list);
4717 	INIT_LIST_HEAD(&pa->pa_group_list);
4718 	pa->pa_deleted = 0;
4719 	pa->pa_type = MB_GROUP_PA;
4720 
4721 	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4722 		 pa->pa_len, pa->pa_lstart);
4723 	trace_ext4_mb_new_group_pa(ac, pa);
4724 
4725 	ext4_mb_use_group_pa(ac, pa);
4726 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4727 
4728 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4729 	lg = ac->ac_lg;
4730 	BUG_ON(lg == NULL);
4731 
4732 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
4733 	pa->pa_inode = NULL;
4734 
4735 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4736 
4737 	/*
4738 	 * We will later add the new pa to the right bucket
4739 	 * after updating the pa_free in ext4_mb_release_context
4740 	 */
4741 }
4742 
4743 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4744 {
4745 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4746 		ext4_mb_new_group_pa(ac);
4747 	else
4748 		ext4_mb_new_inode_pa(ac);
4749 }
4750 
4751 /*
4752  * finds all unused blocks in on-disk bitmap, frees them in
4753  * in-core bitmap and buddy.
4754  * @pa must be unlinked from inode and group lists, so that
4755  * nobody else can find/use it.
4756  * the caller MUST hold group/inode locks.
4757  * TODO: optimize the case when there are no in-core structures yet
4758  */
4759 static noinline_for_stack int
4760 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4761 			struct ext4_prealloc_space *pa)
4762 {
4763 	struct super_block *sb = e4b->bd_sb;
4764 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4765 	unsigned int end;
4766 	unsigned int next;
4767 	ext4_group_t group;
4768 	ext4_grpblk_t bit;
4769 	unsigned long long grp_blk_start;
4770 	int free = 0;
4771 
4772 	BUG_ON(pa->pa_deleted == 0);
4773 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4774 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4775 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4776 	end = bit + pa->pa_len;
4777 
4778 	while (bit < end) {
4779 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4780 		if (bit >= end)
4781 			break;
4782 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4783 		mb_debug(sb, "free preallocated %u/%u in group %u\n",
4784 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4785 			 (unsigned) next - bit, (unsigned) group);
4786 		free += next - bit;
4787 
4788 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4789 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4790 						    EXT4_C2B(sbi, bit)),
4791 					       next - bit);
4792 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4793 		bit = next + 1;
4794 	}
4795 	if (free != pa->pa_free) {
4796 		ext4_msg(e4b->bd_sb, KERN_CRIT,
4797 			 "pa %p: logic %lu, phys. %lu, len %d",
4798 			 pa, (unsigned long) pa->pa_lstart,
4799 			 (unsigned long) pa->pa_pstart,
4800 			 pa->pa_len);
4801 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4802 					free, pa->pa_free);
4803 		/*
4804 		 * pa is already deleted so we use the value obtained
4805 		 * from the bitmap and continue.
4806 		 */
4807 	}
4808 	atomic_add(free, &sbi->s_mb_discarded);
4809 
4810 	return 0;
4811 }
4812 
4813 static noinline_for_stack int
4814 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4815 				struct ext4_prealloc_space *pa)
4816 {
4817 	struct super_block *sb = e4b->bd_sb;
4818 	ext4_group_t group;
4819 	ext4_grpblk_t bit;
4820 
4821 	trace_ext4_mb_release_group_pa(sb, pa);
4822 	BUG_ON(pa->pa_deleted == 0);
4823 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4824 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4825 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4826 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4827 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4828 
4829 	return 0;
4830 }
4831 
4832 /*
4833  * releases all preallocations in given group
4834  *
4835  * first, we need to decide discard policy:
4836  * - when do we discard
4837  *   1) ENOSPC
4838  * - how many do we discard
4839  *   1) how many requested
4840  */
4841 static noinline_for_stack int
4842 ext4_mb_discard_group_preallocations(struct super_block *sb,
4843 				     ext4_group_t group, int *busy)
4844 {
4845 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4846 	struct buffer_head *bitmap_bh = NULL;
4847 	struct ext4_prealloc_space *pa, *tmp;
4848 	struct list_head list;
4849 	struct ext4_buddy e4b;
4850 	int err;
4851 	int free = 0;
4852 
4853 	mb_debug(sb, "discard preallocation for group %u\n", group);
4854 	if (list_empty(&grp->bb_prealloc_list))
4855 		goto out_dbg;
4856 
4857 	bitmap_bh = ext4_read_block_bitmap(sb, group);
4858 	if (IS_ERR(bitmap_bh)) {
4859 		err = PTR_ERR(bitmap_bh);
4860 		ext4_error_err(sb, -err,
4861 			       "Error %d reading block bitmap for %u",
4862 			       err, group);
4863 		goto out_dbg;
4864 	}
4865 
4866 	err = ext4_mb_load_buddy(sb, group, &e4b);
4867 	if (err) {
4868 		ext4_warning(sb, "Error %d loading buddy information for %u",
4869 			     err, group);
4870 		put_bh(bitmap_bh);
4871 		goto out_dbg;
4872 	}
4873 
4874 	INIT_LIST_HEAD(&list);
4875 	ext4_lock_group(sb, group);
4876 	list_for_each_entry_safe(pa, tmp,
4877 				&grp->bb_prealloc_list, pa_group_list) {
4878 		spin_lock(&pa->pa_lock);
4879 		if (atomic_read(&pa->pa_count)) {
4880 			spin_unlock(&pa->pa_lock);
4881 			*busy = 1;
4882 			continue;
4883 		}
4884 		if (pa->pa_deleted) {
4885 			spin_unlock(&pa->pa_lock);
4886 			continue;
4887 		}
4888 
4889 		/* seems this one can be freed ... */
4890 		ext4_mb_mark_pa_deleted(sb, pa);
4891 
4892 		if (!free)
4893 			this_cpu_inc(discard_pa_seq);
4894 
4895 		/* we can trust pa_free ... */
4896 		free += pa->pa_free;
4897 
4898 		spin_unlock(&pa->pa_lock);
4899 
4900 		list_del(&pa->pa_group_list);
4901 		list_add(&pa->u.pa_tmp_list, &list);
4902 	}
4903 
4904 	/* now free all selected PAs */
4905 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4906 
4907 		/* remove from object (inode or locality group) */
4908 		spin_lock(pa->pa_obj_lock);
4909 		list_del_rcu(&pa->pa_inode_list);
4910 		spin_unlock(pa->pa_obj_lock);
4911 
4912 		if (pa->pa_type == MB_GROUP_PA)
4913 			ext4_mb_release_group_pa(&e4b, pa);
4914 		else
4915 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4916 
4917 		list_del(&pa->u.pa_tmp_list);
4918 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4919 	}
4920 
4921 	ext4_unlock_group(sb, group);
4922 	ext4_mb_unload_buddy(&e4b);
4923 	put_bh(bitmap_bh);
4924 out_dbg:
4925 	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4926 		 free, group, grp->bb_free);
4927 	return free;
4928 }
4929 
4930 /*
4931  * releases all non-used preallocated blocks for given inode
4932  *
4933  * It's important to discard preallocations under i_data_sem
4934  * We don't want another block to be served from the prealloc
4935  * space when we are discarding the inode prealloc space.
4936  *
4937  * FIXME!! Make sure it is valid at all the call sites
4938  */
4939 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4940 {
4941 	struct ext4_inode_info *ei = EXT4_I(inode);
4942 	struct super_block *sb = inode->i_sb;
4943 	struct buffer_head *bitmap_bh = NULL;
4944 	struct ext4_prealloc_space *pa, *tmp;
4945 	ext4_group_t group = 0;
4946 	struct list_head list;
4947 	struct ext4_buddy e4b;
4948 	int err;
4949 
4950 	if (!S_ISREG(inode->i_mode)) {
4951 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4952 		return;
4953 	}
4954 
4955 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4956 		return;
4957 
4958 	mb_debug(sb, "discard preallocation for inode %lu\n",
4959 		 inode->i_ino);
4960 	trace_ext4_discard_preallocations(inode,
4961 			atomic_read(&ei->i_prealloc_active), needed);
4962 
4963 	INIT_LIST_HEAD(&list);
4964 
4965 	if (needed == 0)
4966 		needed = UINT_MAX;
4967 
4968 repeat:
4969 	/* first, collect all pa's in the inode */
4970 	spin_lock(&ei->i_prealloc_lock);
4971 	while (!list_empty(&ei->i_prealloc_list) && needed) {
4972 		pa = list_entry(ei->i_prealloc_list.prev,
4973 				struct ext4_prealloc_space, pa_inode_list);
4974 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4975 		spin_lock(&pa->pa_lock);
4976 		if (atomic_read(&pa->pa_count)) {
4977 			/* this shouldn't happen often - nobody should
4978 			 * use preallocation while we're discarding it */
4979 			spin_unlock(&pa->pa_lock);
4980 			spin_unlock(&ei->i_prealloc_lock);
4981 			ext4_msg(sb, KERN_ERR,
4982 				 "uh-oh! used pa while discarding");
4983 			WARN_ON(1);
4984 			schedule_timeout_uninterruptible(HZ);
4985 			goto repeat;
4986 
4987 		}
4988 		if (pa->pa_deleted == 0) {
4989 			ext4_mb_mark_pa_deleted(sb, pa);
4990 			spin_unlock(&pa->pa_lock);
4991 			list_del_rcu(&pa->pa_inode_list);
4992 			list_add(&pa->u.pa_tmp_list, &list);
4993 			needed--;
4994 			continue;
4995 		}
4996 
4997 		/* someone is deleting pa right now */
4998 		spin_unlock(&pa->pa_lock);
4999 		spin_unlock(&ei->i_prealloc_lock);
5000 
5001 		/* we have to wait here because pa_deleted
5002 		 * doesn't mean pa is already unlinked from
5003 		 * the list. as we might be called from
5004 		 * ->clear_inode() the inode will get freed
5005 		 * and concurrent thread which is unlinking
5006 		 * pa from inode's list may access already
5007 		 * freed memory, bad-bad-bad */
5008 
5009 		/* XXX: if this happens too often, we can
5010 		 * add a flag to force wait only in case
5011 		 * of ->clear_inode(), but not in case of
5012 		 * regular truncate */
5013 		schedule_timeout_uninterruptible(HZ);
5014 		goto repeat;
5015 	}
5016 	spin_unlock(&ei->i_prealloc_lock);
5017 
5018 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5019 		BUG_ON(pa->pa_type != MB_INODE_PA);
5020 		group = ext4_get_group_number(sb, pa->pa_pstart);
5021 
5022 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5023 					     GFP_NOFS|__GFP_NOFAIL);
5024 		if (err) {
5025 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5026 				       err, group);
5027 			continue;
5028 		}
5029 
5030 		bitmap_bh = ext4_read_block_bitmap(sb, group);
5031 		if (IS_ERR(bitmap_bh)) {
5032 			err = PTR_ERR(bitmap_bh);
5033 			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5034 				       err, group);
5035 			ext4_mb_unload_buddy(&e4b);
5036 			continue;
5037 		}
5038 
5039 		ext4_lock_group(sb, group);
5040 		list_del(&pa->pa_group_list);
5041 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5042 		ext4_unlock_group(sb, group);
5043 
5044 		ext4_mb_unload_buddy(&e4b);
5045 		put_bh(bitmap_bh);
5046 
5047 		list_del(&pa->u.pa_tmp_list);
5048 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5049 	}
5050 }
5051 
5052 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5053 {
5054 	struct ext4_prealloc_space *pa;
5055 
5056 	BUG_ON(ext4_pspace_cachep == NULL);
5057 	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5058 	if (!pa)
5059 		return -ENOMEM;
5060 	atomic_set(&pa->pa_count, 1);
5061 	ac->ac_pa = pa;
5062 	return 0;
5063 }
5064 
5065 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5066 {
5067 	struct ext4_prealloc_space *pa = ac->ac_pa;
5068 
5069 	BUG_ON(!pa);
5070 	ac->ac_pa = NULL;
5071 	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5072 	kmem_cache_free(ext4_pspace_cachep, pa);
5073 }
5074 
5075 #ifdef CONFIG_EXT4_DEBUG
5076 static inline void ext4_mb_show_pa(struct super_block *sb)
5077 {
5078 	ext4_group_t i, ngroups;
5079 
5080 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5081 		return;
5082 
5083 	ngroups = ext4_get_groups_count(sb);
5084 	mb_debug(sb, "groups: ");
5085 	for (i = 0; i < ngroups; i++) {
5086 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5087 		struct ext4_prealloc_space *pa;
5088 		ext4_grpblk_t start;
5089 		struct list_head *cur;
5090 		ext4_lock_group(sb, i);
5091 		list_for_each(cur, &grp->bb_prealloc_list) {
5092 			pa = list_entry(cur, struct ext4_prealloc_space,
5093 					pa_group_list);
5094 			spin_lock(&pa->pa_lock);
5095 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5096 						     NULL, &start);
5097 			spin_unlock(&pa->pa_lock);
5098 			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5099 				 pa->pa_len);
5100 		}
5101 		ext4_unlock_group(sb, i);
5102 		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5103 			 grp->bb_fragments);
5104 	}
5105 }
5106 
5107 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5108 {
5109 	struct super_block *sb = ac->ac_sb;
5110 
5111 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5112 		return;
5113 
5114 	mb_debug(sb, "Can't allocate:"
5115 			" Allocation context details:");
5116 	mb_debug(sb, "status %u flags 0x%x",
5117 			ac->ac_status, ac->ac_flags);
5118 	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5119 			"goal %lu/%lu/%lu@%lu, "
5120 			"best %lu/%lu/%lu@%lu cr %d",
5121 			(unsigned long)ac->ac_o_ex.fe_group,
5122 			(unsigned long)ac->ac_o_ex.fe_start,
5123 			(unsigned long)ac->ac_o_ex.fe_len,
5124 			(unsigned long)ac->ac_o_ex.fe_logical,
5125 			(unsigned long)ac->ac_g_ex.fe_group,
5126 			(unsigned long)ac->ac_g_ex.fe_start,
5127 			(unsigned long)ac->ac_g_ex.fe_len,
5128 			(unsigned long)ac->ac_g_ex.fe_logical,
5129 			(unsigned long)ac->ac_b_ex.fe_group,
5130 			(unsigned long)ac->ac_b_ex.fe_start,
5131 			(unsigned long)ac->ac_b_ex.fe_len,
5132 			(unsigned long)ac->ac_b_ex.fe_logical,
5133 			(int)ac->ac_criteria);
5134 	mb_debug(sb, "%u found", ac->ac_found);
5135 	ext4_mb_show_pa(sb);
5136 }
5137 #else
5138 static inline void ext4_mb_show_pa(struct super_block *sb)
5139 {
5140 	return;
5141 }
5142 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5143 {
5144 	ext4_mb_show_pa(ac->ac_sb);
5145 	return;
5146 }
5147 #endif
5148 
5149 /*
5150  * We use locality group preallocation for small size file. The size of the
5151  * file is determined by the current size or the resulting size after
5152  * allocation which ever is larger
5153  *
5154  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5155  */
5156 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5157 {
5158 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5159 	int bsbits = ac->ac_sb->s_blocksize_bits;
5160 	loff_t size, isize;
5161 	bool inode_pa_eligible, group_pa_eligible;
5162 
5163 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5164 		return;
5165 
5166 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5167 		return;
5168 
5169 	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5170 	inode_pa_eligible = true;
5171 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5172 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5173 		>> bsbits;
5174 
5175 	/* No point in using inode preallocation for closed files */
5176 	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5177 	    !inode_is_open_for_write(ac->ac_inode))
5178 		inode_pa_eligible = false;
5179 
5180 	size = max(size, isize);
5181 	/* Don't use group allocation for large files */
5182 	if (size > sbi->s_mb_stream_request)
5183 		group_pa_eligible = false;
5184 
5185 	if (!group_pa_eligible) {
5186 		if (inode_pa_eligible)
5187 			ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5188 		else
5189 			ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5190 		return;
5191 	}
5192 
5193 	BUG_ON(ac->ac_lg != NULL);
5194 	/*
5195 	 * locality group prealloc space are per cpu. The reason for having
5196 	 * per cpu locality group is to reduce the contention between block
5197 	 * request from multiple CPUs.
5198 	 */
5199 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5200 
5201 	/* we're going to use group allocation */
5202 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5203 
5204 	/* serialize all allocations in the group */
5205 	mutex_lock(&ac->ac_lg->lg_mutex);
5206 }
5207 
5208 static noinline_for_stack int
5209 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5210 				struct ext4_allocation_request *ar)
5211 {
5212 	struct super_block *sb = ar->inode->i_sb;
5213 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5214 	struct ext4_super_block *es = sbi->s_es;
5215 	ext4_group_t group;
5216 	unsigned int len;
5217 	ext4_fsblk_t goal;
5218 	ext4_grpblk_t block;
5219 
5220 	/* we can't allocate > group size */
5221 	len = ar->len;
5222 
5223 	/* just a dirty hack to filter too big requests  */
5224 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5225 		len = EXT4_CLUSTERS_PER_GROUP(sb);
5226 
5227 	/* start searching from the goal */
5228 	goal = ar->goal;
5229 	if (goal < le32_to_cpu(es->s_first_data_block) ||
5230 			goal >= ext4_blocks_count(es))
5231 		goal = le32_to_cpu(es->s_first_data_block);
5232 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
5233 
5234 	/* set up allocation goals */
5235 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5236 	ac->ac_status = AC_STATUS_CONTINUE;
5237 	ac->ac_sb = sb;
5238 	ac->ac_inode = ar->inode;
5239 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5240 	ac->ac_o_ex.fe_group = group;
5241 	ac->ac_o_ex.fe_start = block;
5242 	ac->ac_o_ex.fe_len = len;
5243 	ac->ac_g_ex = ac->ac_o_ex;
5244 	ac->ac_flags = ar->flags;
5245 
5246 	/* we have to define context: we'll work with a file or
5247 	 * locality group. this is a policy, actually */
5248 	ext4_mb_group_or_file(ac);
5249 
5250 	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5251 			"left: %u/%u, right %u/%u to %swritable\n",
5252 			(unsigned) ar->len, (unsigned) ar->logical,
5253 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5254 			(unsigned) ar->lleft, (unsigned) ar->pleft,
5255 			(unsigned) ar->lright, (unsigned) ar->pright,
5256 			inode_is_open_for_write(ar->inode) ? "" : "non-");
5257 	return 0;
5258 
5259 }
5260 
5261 static noinline_for_stack void
5262 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5263 					struct ext4_locality_group *lg,
5264 					int order, int total_entries)
5265 {
5266 	ext4_group_t group = 0;
5267 	struct ext4_buddy e4b;
5268 	struct list_head discard_list;
5269 	struct ext4_prealloc_space *pa, *tmp;
5270 
5271 	mb_debug(sb, "discard locality group preallocation\n");
5272 
5273 	INIT_LIST_HEAD(&discard_list);
5274 
5275 	spin_lock(&lg->lg_prealloc_lock);
5276 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5277 				pa_inode_list,
5278 				lockdep_is_held(&lg->lg_prealloc_lock)) {
5279 		spin_lock(&pa->pa_lock);
5280 		if (atomic_read(&pa->pa_count)) {
5281 			/*
5282 			 * This is the pa that we just used
5283 			 * for block allocation. So don't
5284 			 * free that
5285 			 */
5286 			spin_unlock(&pa->pa_lock);
5287 			continue;
5288 		}
5289 		if (pa->pa_deleted) {
5290 			spin_unlock(&pa->pa_lock);
5291 			continue;
5292 		}
5293 		/* only lg prealloc space */
5294 		BUG_ON(pa->pa_type != MB_GROUP_PA);
5295 
5296 		/* seems this one can be freed ... */
5297 		ext4_mb_mark_pa_deleted(sb, pa);
5298 		spin_unlock(&pa->pa_lock);
5299 
5300 		list_del_rcu(&pa->pa_inode_list);
5301 		list_add(&pa->u.pa_tmp_list, &discard_list);
5302 
5303 		total_entries--;
5304 		if (total_entries <= 5) {
5305 			/*
5306 			 * we want to keep only 5 entries
5307 			 * allowing it to grow to 8. This
5308 			 * mak sure we don't call discard
5309 			 * soon for this list.
5310 			 */
5311 			break;
5312 		}
5313 	}
5314 	spin_unlock(&lg->lg_prealloc_lock);
5315 
5316 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5317 		int err;
5318 
5319 		group = ext4_get_group_number(sb, pa->pa_pstart);
5320 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5321 					     GFP_NOFS|__GFP_NOFAIL);
5322 		if (err) {
5323 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5324 				       err, group);
5325 			continue;
5326 		}
5327 		ext4_lock_group(sb, group);
5328 		list_del(&pa->pa_group_list);
5329 		ext4_mb_release_group_pa(&e4b, pa);
5330 		ext4_unlock_group(sb, group);
5331 
5332 		ext4_mb_unload_buddy(&e4b);
5333 		list_del(&pa->u.pa_tmp_list);
5334 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5335 	}
5336 }
5337 
5338 /*
5339  * We have incremented pa_count. So it cannot be freed at this
5340  * point. Also we hold lg_mutex. So no parallel allocation is
5341  * possible from this lg. That means pa_free cannot be updated.
5342  *
5343  * A parallel ext4_mb_discard_group_preallocations is possible.
5344  * which can cause the lg_prealloc_list to be updated.
5345  */
5346 
5347 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5348 {
5349 	int order, added = 0, lg_prealloc_count = 1;
5350 	struct super_block *sb = ac->ac_sb;
5351 	struct ext4_locality_group *lg = ac->ac_lg;
5352 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5353 
5354 	order = fls(pa->pa_free) - 1;
5355 	if (order > PREALLOC_TB_SIZE - 1)
5356 		/* The max size of hash table is PREALLOC_TB_SIZE */
5357 		order = PREALLOC_TB_SIZE - 1;
5358 	/* Add the prealloc space to lg */
5359 	spin_lock(&lg->lg_prealloc_lock);
5360 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5361 				pa_inode_list,
5362 				lockdep_is_held(&lg->lg_prealloc_lock)) {
5363 		spin_lock(&tmp_pa->pa_lock);
5364 		if (tmp_pa->pa_deleted) {
5365 			spin_unlock(&tmp_pa->pa_lock);
5366 			continue;
5367 		}
5368 		if (!added && pa->pa_free < tmp_pa->pa_free) {
5369 			/* Add to the tail of the previous entry */
5370 			list_add_tail_rcu(&pa->pa_inode_list,
5371 						&tmp_pa->pa_inode_list);
5372 			added = 1;
5373 			/*
5374 			 * we want to count the total
5375 			 * number of entries in the list
5376 			 */
5377 		}
5378 		spin_unlock(&tmp_pa->pa_lock);
5379 		lg_prealloc_count++;
5380 	}
5381 	if (!added)
5382 		list_add_tail_rcu(&pa->pa_inode_list,
5383 					&lg->lg_prealloc_list[order]);
5384 	spin_unlock(&lg->lg_prealloc_lock);
5385 
5386 	/* Now trim the list to be not more than 8 elements */
5387 	if (lg_prealloc_count > 8) {
5388 		ext4_mb_discard_lg_preallocations(sb, lg,
5389 						  order, lg_prealloc_count);
5390 		return;
5391 	}
5392 	return ;
5393 }
5394 
5395 /*
5396  * if per-inode prealloc list is too long, trim some PA
5397  */
5398 static void ext4_mb_trim_inode_pa(struct inode *inode)
5399 {
5400 	struct ext4_inode_info *ei = EXT4_I(inode);
5401 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5402 	int count, delta;
5403 
5404 	count = atomic_read(&ei->i_prealloc_active);
5405 	delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5406 	if (count > sbi->s_mb_max_inode_prealloc + delta) {
5407 		count -= sbi->s_mb_max_inode_prealloc;
5408 		ext4_discard_preallocations(inode, count);
5409 	}
5410 }
5411 
5412 /*
5413  * release all resource we used in allocation
5414  */
5415 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5416 {
5417 	struct inode *inode = ac->ac_inode;
5418 	struct ext4_inode_info *ei = EXT4_I(inode);
5419 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5420 	struct ext4_prealloc_space *pa = ac->ac_pa;
5421 	if (pa) {
5422 		if (pa->pa_type == MB_GROUP_PA) {
5423 			/* see comment in ext4_mb_use_group_pa() */
5424 			spin_lock(&pa->pa_lock);
5425 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5426 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5427 			pa->pa_free -= ac->ac_b_ex.fe_len;
5428 			pa->pa_len -= ac->ac_b_ex.fe_len;
5429 			spin_unlock(&pa->pa_lock);
5430 
5431 			/*
5432 			 * We want to add the pa to the right bucket.
5433 			 * Remove it from the list and while adding
5434 			 * make sure the list to which we are adding
5435 			 * doesn't grow big.
5436 			 */
5437 			if (likely(pa->pa_free)) {
5438 				spin_lock(pa->pa_obj_lock);
5439 				list_del_rcu(&pa->pa_inode_list);
5440 				spin_unlock(pa->pa_obj_lock);
5441 				ext4_mb_add_n_trim(ac);
5442 			}
5443 		}
5444 
5445 		if (pa->pa_type == MB_INODE_PA) {
5446 			/*
5447 			 * treat per-inode prealloc list as a lru list, then try
5448 			 * to trim the least recently used PA.
5449 			 */
5450 			spin_lock(pa->pa_obj_lock);
5451 			list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5452 			spin_unlock(pa->pa_obj_lock);
5453 		}
5454 
5455 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5456 	}
5457 	if (ac->ac_bitmap_page)
5458 		put_page(ac->ac_bitmap_page);
5459 	if (ac->ac_buddy_page)
5460 		put_page(ac->ac_buddy_page);
5461 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5462 		mutex_unlock(&ac->ac_lg->lg_mutex);
5463 	ext4_mb_collect_stats(ac);
5464 	ext4_mb_trim_inode_pa(inode);
5465 	return 0;
5466 }
5467 
5468 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5469 {
5470 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5471 	int ret;
5472 	int freed = 0, busy = 0;
5473 	int retry = 0;
5474 
5475 	trace_ext4_mb_discard_preallocations(sb, needed);
5476 
5477 	if (needed == 0)
5478 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5479  repeat:
5480 	for (i = 0; i < ngroups && needed > 0; i++) {
5481 		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5482 		freed += ret;
5483 		needed -= ret;
5484 		cond_resched();
5485 	}
5486 
5487 	if (needed > 0 && busy && ++retry < 3) {
5488 		busy = 0;
5489 		goto repeat;
5490 	}
5491 
5492 	return freed;
5493 }
5494 
5495 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5496 			struct ext4_allocation_context *ac, u64 *seq)
5497 {
5498 	int freed;
5499 	u64 seq_retry = 0;
5500 	bool ret = false;
5501 
5502 	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5503 	if (freed) {
5504 		ret = true;
5505 		goto out_dbg;
5506 	}
5507 	seq_retry = ext4_get_discard_pa_seq_sum();
5508 	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5509 		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5510 		*seq = seq_retry;
5511 		ret = true;
5512 	}
5513 
5514 out_dbg:
5515 	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5516 	return ret;
5517 }
5518 
5519 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5520 				struct ext4_allocation_request *ar, int *errp);
5521 
5522 /*
5523  * Main entry point into mballoc to allocate blocks
5524  * it tries to use preallocation first, then falls back
5525  * to usual allocation
5526  */
5527 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5528 				struct ext4_allocation_request *ar, int *errp)
5529 {
5530 	struct ext4_allocation_context *ac = NULL;
5531 	struct ext4_sb_info *sbi;
5532 	struct super_block *sb;
5533 	ext4_fsblk_t block = 0;
5534 	unsigned int inquota = 0;
5535 	unsigned int reserv_clstrs = 0;
5536 	int retries = 0;
5537 	u64 seq;
5538 
5539 	might_sleep();
5540 	sb = ar->inode->i_sb;
5541 	sbi = EXT4_SB(sb);
5542 
5543 	trace_ext4_request_blocks(ar);
5544 	if (sbi->s_mount_state & EXT4_FC_REPLAY)
5545 		return ext4_mb_new_blocks_simple(handle, ar, errp);
5546 
5547 	/* Allow to use superuser reservation for quota file */
5548 	if (ext4_is_quota_file(ar->inode))
5549 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5550 
5551 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5552 		/* Without delayed allocation we need to verify
5553 		 * there is enough free blocks to do block allocation
5554 		 * and verify allocation doesn't exceed the quota limits.
5555 		 */
5556 		while (ar->len &&
5557 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5558 
5559 			/* let others to free the space */
5560 			cond_resched();
5561 			ar->len = ar->len >> 1;
5562 		}
5563 		if (!ar->len) {
5564 			ext4_mb_show_pa(sb);
5565 			*errp = -ENOSPC;
5566 			return 0;
5567 		}
5568 		reserv_clstrs = ar->len;
5569 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5570 			dquot_alloc_block_nofail(ar->inode,
5571 						 EXT4_C2B(sbi, ar->len));
5572 		} else {
5573 			while (ar->len &&
5574 				dquot_alloc_block(ar->inode,
5575 						  EXT4_C2B(sbi, ar->len))) {
5576 
5577 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5578 				ar->len--;
5579 			}
5580 		}
5581 		inquota = ar->len;
5582 		if (ar->len == 0) {
5583 			*errp = -EDQUOT;
5584 			goto out;
5585 		}
5586 	}
5587 
5588 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5589 	if (!ac) {
5590 		ar->len = 0;
5591 		*errp = -ENOMEM;
5592 		goto out;
5593 	}
5594 
5595 	*errp = ext4_mb_initialize_context(ac, ar);
5596 	if (*errp) {
5597 		ar->len = 0;
5598 		goto out;
5599 	}
5600 
5601 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5602 	seq = this_cpu_read(discard_pa_seq);
5603 	if (!ext4_mb_use_preallocated(ac)) {
5604 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5605 		ext4_mb_normalize_request(ac, ar);
5606 
5607 		*errp = ext4_mb_pa_alloc(ac);
5608 		if (*errp)
5609 			goto errout;
5610 repeat:
5611 		/* allocate space in core */
5612 		*errp = ext4_mb_regular_allocator(ac);
5613 		/*
5614 		 * pa allocated above is added to grp->bb_prealloc_list only
5615 		 * when we were able to allocate some block i.e. when
5616 		 * ac->ac_status == AC_STATUS_FOUND.
5617 		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5618 		 * So we have to free this pa here itself.
5619 		 */
5620 		if (*errp) {
5621 			ext4_mb_pa_free(ac);
5622 			ext4_discard_allocated_blocks(ac);
5623 			goto errout;
5624 		}
5625 		if (ac->ac_status == AC_STATUS_FOUND &&
5626 			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5627 			ext4_mb_pa_free(ac);
5628 	}
5629 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5630 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5631 		if (*errp) {
5632 			ext4_discard_allocated_blocks(ac);
5633 			goto errout;
5634 		} else {
5635 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5636 			ar->len = ac->ac_b_ex.fe_len;
5637 		}
5638 	} else {
5639 		if (++retries < 3 &&
5640 		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5641 			goto repeat;
5642 		/*
5643 		 * If block allocation fails then the pa allocated above
5644 		 * needs to be freed here itself.
5645 		 */
5646 		ext4_mb_pa_free(ac);
5647 		*errp = -ENOSPC;
5648 	}
5649 
5650 errout:
5651 	if (*errp) {
5652 		ac->ac_b_ex.fe_len = 0;
5653 		ar->len = 0;
5654 		ext4_mb_show_ac(ac);
5655 	}
5656 	ext4_mb_release_context(ac);
5657 out:
5658 	if (ac)
5659 		kmem_cache_free(ext4_ac_cachep, ac);
5660 	if (inquota && ar->len < inquota)
5661 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5662 	if (!ar->len) {
5663 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5664 			/* release all the reserved blocks if non delalloc */
5665 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5666 						reserv_clstrs);
5667 	}
5668 
5669 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5670 
5671 	return block;
5672 }
5673 
5674 /*
5675  * We can merge two free data extents only if the physical blocks
5676  * are contiguous, AND the extents were freed by the same transaction,
5677  * AND the blocks are associated with the same group.
5678  */
5679 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5680 					struct ext4_free_data *entry,
5681 					struct ext4_free_data *new_entry,
5682 					struct rb_root *entry_rb_root)
5683 {
5684 	if ((entry->efd_tid != new_entry->efd_tid) ||
5685 	    (entry->efd_group != new_entry->efd_group))
5686 		return;
5687 	if (entry->efd_start_cluster + entry->efd_count ==
5688 	    new_entry->efd_start_cluster) {
5689 		new_entry->efd_start_cluster = entry->efd_start_cluster;
5690 		new_entry->efd_count += entry->efd_count;
5691 	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5692 		   entry->efd_start_cluster) {
5693 		new_entry->efd_count += entry->efd_count;
5694 	} else
5695 		return;
5696 	spin_lock(&sbi->s_md_lock);
5697 	list_del(&entry->efd_list);
5698 	spin_unlock(&sbi->s_md_lock);
5699 	rb_erase(&entry->efd_node, entry_rb_root);
5700 	kmem_cache_free(ext4_free_data_cachep, entry);
5701 }
5702 
5703 static noinline_for_stack int
5704 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5705 		      struct ext4_free_data *new_entry)
5706 {
5707 	ext4_group_t group = e4b->bd_group;
5708 	ext4_grpblk_t cluster;
5709 	ext4_grpblk_t clusters = new_entry->efd_count;
5710 	struct ext4_free_data *entry;
5711 	struct ext4_group_info *db = e4b->bd_info;
5712 	struct super_block *sb = e4b->bd_sb;
5713 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5714 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
5715 	struct rb_node *parent = NULL, *new_node;
5716 
5717 	BUG_ON(!ext4_handle_valid(handle));
5718 	BUG_ON(e4b->bd_bitmap_page == NULL);
5719 	BUG_ON(e4b->bd_buddy_page == NULL);
5720 
5721 	new_node = &new_entry->efd_node;
5722 	cluster = new_entry->efd_start_cluster;
5723 
5724 	if (!*n) {
5725 		/* first free block exent. We need to
5726 		   protect buddy cache from being freed,
5727 		 * otherwise we'll refresh it from
5728 		 * on-disk bitmap and lose not-yet-available
5729 		 * blocks */
5730 		get_page(e4b->bd_buddy_page);
5731 		get_page(e4b->bd_bitmap_page);
5732 	}
5733 	while (*n) {
5734 		parent = *n;
5735 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
5736 		if (cluster < entry->efd_start_cluster)
5737 			n = &(*n)->rb_left;
5738 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5739 			n = &(*n)->rb_right;
5740 		else {
5741 			ext4_grp_locked_error(sb, group, 0,
5742 				ext4_group_first_block_no(sb, group) +
5743 				EXT4_C2B(sbi, cluster),
5744 				"Block already on to-be-freed list");
5745 			kmem_cache_free(ext4_free_data_cachep, new_entry);
5746 			return 0;
5747 		}
5748 	}
5749 
5750 	rb_link_node(new_node, parent, n);
5751 	rb_insert_color(new_node, &db->bb_free_root);
5752 
5753 	/* Now try to see the extent can be merged to left and right */
5754 	node = rb_prev(new_node);
5755 	if (node) {
5756 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5757 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5758 					    &(db->bb_free_root));
5759 	}
5760 
5761 	node = rb_next(new_node);
5762 	if (node) {
5763 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5764 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5765 					    &(db->bb_free_root));
5766 	}
5767 
5768 	spin_lock(&sbi->s_md_lock);
5769 	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5770 	sbi->s_mb_free_pending += clusters;
5771 	spin_unlock(&sbi->s_md_lock);
5772 	return 0;
5773 }
5774 
5775 /*
5776  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5777  * linearly starting at the goal block and also excludes the blocks which
5778  * are going to be in use after fast commit replay.
5779  */
5780 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5781 				struct ext4_allocation_request *ar, int *errp)
5782 {
5783 	struct buffer_head *bitmap_bh;
5784 	struct super_block *sb = ar->inode->i_sb;
5785 	ext4_group_t group;
5786 	ext4_grpblk_t blkoff;
5787 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5788 	ext4_grpblk_t i = 0;
5789 	ext4_fsblk_t goal, block;
5790 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5791 
5792 	goal = ar->goal;
5793 	if (goal < le32_to_cpu(es->s_first_data_block) ||
5794 			goal >= ext4_blocks_count(es))
5795 		goal = le32_to_cpu(es->s_first_data_block);
5796 
5797 	ar->len = 0;
5798 	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5799 	for (; group < ext4_get_groups_count(sb); group++) {
5800 		bitmap_bh = ext4_read_block_bitmap(sb, group);
5801 		if (IS_ERR(bitmap_bh)) {
5802 			*errp = PTR_ERR(bitmap_bh);
5803 			pr_warn("Failed to read block bitmap\n");
5804 			return 0;
5805 		}
5806 
5807 		ext4_get_group_no_and_offset(sb,
5808 			max(ext4_group_first_block_no(sb, group), goal),
5809 			NULL, &blkoff);
5810 		while (1) {
5811 			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5812 						blkoff);
5813 			if (i >= max)
5814 				break;
5815 			if (ext4_fc_replay_check_excluded(sb,
5816 				ext4_group_first_block_no(sb, group) + i)) {
5817 				blkoff = i + 1;
5818 			} else
5819 				break;
5820 		}
5821 		brelse(bitmap_bh);
5822 		if (i < max)
5823 			break;
5824 	}
5825 
5826 	if (group >= ext4_get_groups_count(sb) || i >= max) {
5827 		*errp = -ENOSPC;
5828 		return 0;
5829 	}
5830 
5831 	block = ext4_group_first_block_no(sb, group) + i;
5832 	ext4_mb_mark_bb(sb, block, 1, 1);
5833 	ar->len = 1;
5834 
5835 	return block;
5836 }
5837 
5838 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5839 					unsigned long count)
5840 {
5841 	struct buffer_head *bitmap_bh;
5842 	struct super_block *sb = inode->i_sb;
5843 	struct ext4_group_desc *gdp;
5844 	struct buffer_head *gdp_bh;
5845 	ext4_group_t group;
5846 	ext4_grpblk_t blkoff;
5847 	int already_freed = 0, err, i;
5848 
5849 	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5850 	bitmap_bh = ext4_read_block_bitmap(sb, group);
5851 	if (IS_ERR(bitmap_bh)) {
5852 		err = PTR_ERR(bitmap_bh);
5853 		pr_warn("Failed to read block bitmap\n");
5854 		return;
5855 	}
5856 	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5857 	if (!gdp)
5858 		return;
5859 
5860 	for (i = 0; i < count; i++) {
5861 		if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5862 			already_freed++;
5863 	}
5864 	mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5865 	err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5866 	if (err)
5867 		return;
5868 	ext4_free_group_clusters_set(
5869 		sb, gdp, ext4_free_group_clusters(sb, gdp) +
5870 		count - already_freed);
5871 	ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5872 	ext4_group_desc_csum_set(sb, group, gdp);
5873 	ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5874 	sync_dirty_buffer(bitmap_bh);
5875 	sync_dirty_buffer(gdp_bh);
5876 	brelse(bitmap_bh);
5877 }
5878 
5879 /**
5880  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5881  *			Used by ext4_free_blocks()
5882  * @handle:		handle for this transaction
5883  * @inode:		inode
5884  * @block:		starting physical block to be freed
5885  * @count:		number of blocks to be freed
5886  * @flags:		flags used by ext4_free_blocks
5887  */
5888 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5889 			       ext4_fsblk_t block, unsigned long count,
5890 			       int flags)
5891 {
5892 	struct buffer_head *bitmap_bh = NULL;
5893 	struct super_block *sb = inode->i_sb;
5894 	struct ext4_group_desc *gdp;
5895 	unsigned int overflow;
5896 	ext4_grpblk_t bit;
5897 	struct buffer_head *gd_bh;
5898 	ext4_group_t block_group;
5899 	struct ext4_sb_info *sbi;
5900 	struct ext4_buddy e4b;
5901 	unsigned int count_clusters;
5902 	int err = 0;
5903 	int ret;
5904 
5905 	sbi = EXT4_SB(sb);
5906 
5907 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5908 	    !ext4_inode_block_valid(inode, block, count)) {
5909 		ext4_error(sb, "Freeing blocks in system zone - "
5910 			   "Block = %llu, count = %lu", block, count);
5911 		/* err = 0. ext4_std_error should be a no op */
5912 		goto error_return;
5913 	}
5914 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
5915 
5916 do_more:
5917 	overflow = 0;
5918 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5919 
5920 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5921 			ext4_get_group_info(sb, block_group))))
5922 		return;
5923 
5924 	/*
5925 	 * Check to see if we are freeing blocks across a group
5926 	 * boundary.
5927 	 */
5928 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5929 		overflow = EXT4_C2B(sbi, bit) + count -
5930 			EXT4_BLOCKS_PER_GROUP(sb);
5931 		count -= overflow;
5932 		/* The range changed so it's no longer validated */
5933 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5934 	}
5935 	count_clusters = EXT4_NUM_B2C(sbi, count);
5936 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5937 	if (IS_ERR(bitmap_bh)) {
5938 		err = PTR_ERR(bitmap_bh);
5939 		bitmap_bh = NULL;
5940 		goto error_return;
5941 	}
5942 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5943 	if (!gdp) {
5944 		err = -EIO;
5945 		goto error_return;
5946 	}
5947 
5948 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5949 	    !ext4_inode_block_valid(inode, block, count)) {
5950 		ext4_error(sb, "Freeing blocks in system zone - "
5951 			   "Block = %llu, count = %lu", block, count);
5952 		/* err = 0. ext4_std_error should be a no op */
5953 		goto error_return;
5954 	}
5955 
5956 	BUFFER_TRACE(bitmap_bh, "getting write access");
5957 	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
5958 					    EXT4_JTR_NONE);
5959 	if (err)
5960 		goto error_return;
5961 
5962 	/*
5963 	 * We are about to modify some metadata.  Call the journal APIs
5964 	 * to unshare ->b_data if a currently-committing transaction is
5965 	 * using it
5966 	 */
5967 	BUFFER_TRACE(gd_bh, "get_write_access");
5968 	err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
5969 	if (err)
5970 		goto error_return;
5971 #ifdef AGGRESSIVE_CHECK
5972 	{
5973 		int i;
5974 		for (i = 0; i < count_clusters; i++)
5975 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5976 	}
5977 #endif
5978 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5979 
5980 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5981 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5982 				     GFP_NOFS|__GFP_NOFAIL);
5983 	if (err)
5984 		goto error_return;
5985 
5986 	/*
5987 	 * We need to make sure we don't reuse the freed block until after the
5988 	 * transaction is committed. We make an exception if the inode is to be
5989 	 * written in writeback mode since writeback mode has weak data
5990 	 * consistency guarantees.
5991 	 */
5992 	if (ext4_handle_valid(handle) &&
5993 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5994 	     !ext4_should_writeback_data(inode))) {
5995 		struct ext4_free_data *new_entry;
5996 		/*
5997 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5998 		 * to fail.
5999 		 */
6000 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6001 				GFP_NOFS|__GFP_NOFAIL);
6002 		new_entry->efd_start_cluster = bit;
6003 		new_entry->efd_group = block_group;
6004 		new_entry->efd_count = count_clusters;
6005 		new_entry->efd_tid = handle->h_transaction->t_tid;
6006 
6007 		ext4_lock_group(sb, block_group);
6008 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6009 		ext4_mb_free_metadata(handle, &e4b, new_entry);
6010 	} else {
6011 		/* need to update group_info->bb_free and bitmap
6012 		 * with group lock held. generate_buddy look at
6013 		 * them with group lock_held
6014 		 */
6015 		if (test_opt(sb, DISCARD)) {
6016 			err = ext4_issue_discard(sb, block_group, bit, count,
6017 						 NULL);
6018 			if (err && err != -EOPNOTSUPP)
6019 				ext4_msg(sb, KERN_WARNING, "discard request in"
6020 					 " group:%u block:%d count:%lu failed"
6021 					 " with %d", block_group, bit, count,
6022 					 err);
6023 		} else
6024 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6025 
6026 		ext4_lock_group(sb, block_group);
6027 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6028 		mb_free_blocks(inode, &e4b, bit, count_clusters);
6029 	}
6030 
6031 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6032 	ext4_free_group_clusters_set(sb, gdp, ret);
6033 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6034 	ext4_group_desc_csum_set(sb, block_group, gdp);
6035 	ext4_unlock_group(sb, block_group);
6036 
6037 	if (sbi->s_log_groups_per_flex) {
6038 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6039 		atomic64_add(count_clusters,
6040 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
6041 						  flex_group)->free_clusters);
6042 	}
6043 
6044 	/*
6045 	 * on a bigalloc file system, defer the s_freeclusters_counter
6046 	 * update to the caller (ext4_remove_space and friends) so they
6047 	 * can determine if a cluster freed here should be rereserved
6048 	 */
6049 	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6050 		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6051 			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6052 		percpu_counter_add(&sbi->s_freeclusters_counter,
6053 				   count_clusters);
6054 	}
6055 
6056 	ext4_mb_unload_buddy(&e4b);
6057 
6058 	/* We dirtied the bitmap block */
6059 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6060 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6061 
6062 	/* And the group descriptor block */
6063 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6064 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6065 	if (!err)
6066 		err = ret;
6067 
6068 	if (overflow && !err) {
6069 		block += count;
6070 		count = overflow;
6071 		put_bh(bitmap_bh);
6072 		/* The range changed so it's no longer validated */
6073 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6074 		goto do_more;
6075 	}
6076 error_return:
6077 	brelse(bitmap_bh);
6078 	ext4_std_error(sb, err);
6079 	return;
6080 }
6081 
6082 /**
6083  * ext4_free_blocks() -- Free given blocks and update quota
6084  * @handle:		handle for this transaction
6085  * @inode:		inode
6086  * @bh:			optional buffer of the block to be freed
6087  * @block:		starting physical block to be freed
6088  * @count:		number of blocks to be freed
6089  * @flags:		flags used by ext4_free_blocks
6090  */
6091 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6092 		      struct buffer_head *bh, ext4_fsblk_t block,
6093 		      unsigned long count, int flags)
6094 {
6095 	struct super_block *sb = inode->i_sb;
6096 	unsigned int overflow;
6097 	struct ext4_sb_info *sbi;
6098 
6099 	sbi = EXT4_SB(sb);
6100 
6101 	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6102 		ext4_free_blocks_simple(inode, block, count);
6103 		return;
6104 	}
6105 
6106 	might_sleep();
6107 	if (bh) {
6108 		if (block)
6109 			BUG_ON(block != bh->b_blocknr);
6110 		else
6111 			block = bh->b_blocknr;
6112 	}
6113 
6114 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6115 	    !ext4_inode_block_valid(inode, block, count)) {
6116 		ext4_error(sb, "Freeing blocks not in datazone - "
6117 			   "block = %llu, count = %lu", block, count);
6118 		return;
6119 	}
6120 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6121 
6122 	ext4_debug("freeing block %llu\n", block);
6123 	trace_ext4_free_blocks(inode, block, count, flags);
6124 
6125 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6126 		BUG_ON(count > 1);
6127 
6128 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6129 			    inode, bh, block);
6130 	}
6131 
6132 	/*
6133 	 * If the extent to be freed does not begin on a cluster
6134 	 * boundary, we need to deal with partial clusters at the
6135 	 * beginning and end of the extent.  Normally we will free
6136 	 * blocks at the beginning or the end unless we are explicitly
6137 	 * requested to avoid doing so.
6138 	 */
6139 	overflow = EXT4_PBLK_COFF(sbi, block);
6140 	if (overflow) {
6141 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6142 			overflow = sbi->s_cluster_ratio - overflow;
6143 			block += overflow;
6144 			if (count > overflow)
6145 				count -= overflow;
6146 			else
6147 				return;
6148 		} else {
6149 			block -= overflow;
6150 			count += overflow;
6151 		}
6152 		/* The range changed so it's no longer validated */
6153 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6154 	}
6155 	overflow = EXT4_LBLK_COFF(sbi, count);
6156 	if (overflow) {
6157 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6158 			if (count > overflow)
6159 				count -= overflow;
6160 			else
6161 				return;
6162 		} else
6163 			count += sbi->s_cluster_ratio - overflow;
6164 		/* The range changed so it's no longer validated */
6165 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6166 	}
6167 
6168 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6169 		int i;
6170 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6171 
6172 		for (i = 0; i < count; i++) {
6173 			cond_resched();
6174 			if (is_metadata)
6175 				bh = sb_find_get_block(inode->i_sb, block + i);
6176 			ext4_forget(handle, is_metadata, inode, bh, block + i);
6177 		}
6178 	}
6179 
6180 	ext4_mb_clear_bb(handle, inode, block, count, flags);
6181 	return;
6182 }
6183 
6184 /**
6185  * ext4_group_add_blocks() -- Add given blocks to an existing group
6186  * @handle:			handle to this transaction
6187  * @sb:				super block
6188  * @block:			start physical block to add to the block group
6189  * @count:			number of blocks to free
6190  *
6191  * This marks the blocks as free in the bitmap and buddy.
6192  */
6193 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6194 			 ext4_fsblk_t block, unsigned long count)
6195 {
6196 	struct buffer_head *bitmap_bh = NULL;
6197 	struct buffer_head *gd_bh;
6198 	ext4_group_t block_group;
6199 	ext4_grpblk_t bit;
6200 	unsigned int i;
6201 	struct ext4_group_desc *desc;
6202 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6203 	struct ext4_buddy e4b;
6204 	int err = 0, ret, free_clusters_count;
6205 	ext4_grpblk_t clusters_freed;
6206 	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6207 	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6208 	unsigned long cluster_count = last_cluster - first_cluster + 1;
6209 
6210 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6211 
6212 	if (count == 0)
6213 		return 0;
6214 
6215 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6216 	/*
6217 	 * Check to see if we are freeing blocks across a group
6218 	 * boundary.
6219 	 */
6220 	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6221 		ext4_warning(sb, "too many blocks added to group %u",
6222 			     block_group);
6223 		err = -EINVAL;
6224 		goto error_return;
6225 	}
6226 
6227 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6228 	if (IS_ERR(bitmap_bh)) {
6229 		err = PTR_ERR(bitmap_bh);
6230 		bitmap_bh = NULL;
6231 		goto error_return;
6232 	}
6233 
6234 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6235 	if (!desc) {
6236 		err = -EIO;
6237 		goto error_return;
6238 	}
6239 
6240 	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6241 		ext4_error(sb, "Adding blocks in system zones - "
6242 			   "Block = %llu, count = %lu",
6243 			   block, count);
6244 		err = -EINVAL;
6245 		goto error_return;
6246 	}
6247 
6248 	BUFFER_TRACE(bitmap_bh, "getting write access");
6249 	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6250 					    EXT4_JTR_NONE);
6251 	if (err)
6252 		goto error_return;
6253 
6254 	/*
6255 	 * We are about to modify some metadata.  Call the journal APIs
6256 	 * to unshare ->b_data if a currently-committing transaction is
6257 	 * using it
6258 	 */
6259 	BUFFER_TRACE(gd_bh, "get_write_access");
6260 	err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6261 	if (err)
6262 		goto error_return;
6263 
6264 	for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6265 		BUFFER_TRACE(bitmap_bh, "clear bit");
6266 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6267 			ext4_error(sb, "bit already cleared for block %llu",
6268 				   (ext4_fsblk_t)(block + i));
6269 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
6270 		} else {
6271 			clusters_freed++;
6272 		}
6273 	}
6274 
6275 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
6276 	if (err)
6277 		goto error_return;
6278 
6279 	/*
6280 	 * need to update group_info->bb_free and bitmap
6281 	 * with group lock held. generate_buddy look at
6282 	 * them with group lock_held
6283 	 */
6284 	ext4_lock_group(sb, block_group);
6285 	mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6286 	mb_free_blocks(NULL, &e4b, bit, cluster_count);
6287 	free_clusters_count = clusters_freed +
6288 		ext4_free_group_clusters(sb, desc);
6289 	ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6290 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6291 	ext4_group_desc_csum_set(sb, block_group, desc);
6292 	ext4_unlock_group(sb, block_group);
6293 	percpu_counter_add(&sbi->s_freeclusters_counter,
6294 			   clusters_freed);
6295 
6296 	if (sbi->s_log_groups_per_flex) {
6297 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6298 		atomic64_add(clusters_freed,
6299 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
6300 						  flex_group)->free_clusters);
6301 	}
6302 
6303 	ext4_mb_unload_buddy(&e4b);
6304 
6305 	/* We dirtied the bitmap block */
6306 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6307 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6308 
6309 	/* And the group descriptor block */
6310 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6311 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6312 	if (!err)
6313 		err = ret;
6314 
6315 error_return:
6316 	brelse(bitmap_bh);
6317 	ext4_std_error(sb, err);
6318 	return err;
6319 }
6320 
6321 /**
6322  * ext4_trim_extent -- function to TRIM one single free extent in the group
6323  * @sb:		super block for the file system
6324  * @start:	starting block of the free extent in the alloc. group
6325  * @count:	number of blocks to TRIM
6326  * @e4b:	ext4 buddy for the group
6327  *
6328  * Trim "count" blocks starting at "start" in the "group". To assure that no
6329  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6330  * be called with under the group lock.
6331  */
6332 static int ext4_trim_extent(struct super_block *sb,
6333 		int start, int count, struct ext4_buddy *e4b)
6334 __releases(bitlock)
6335 __acquires(bitlock)
6336 {
6337 	struct ext4_free_extent ex;
6338 	ext4_group_t group = e4b->bd_group;
6339 	int ret = 0;
6340 
6341 	trace_ext4_trim_extent(sb, group, start, count);
6342 
6343 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
6344 
6345 	ex.fe_start = start;
6346 	ex.fe_group = group;
6347 	ex.fe_len = count;
6348 
6349 	/*
6350 	 * Mark blocks used, so no one can reuse them while
6351 	 * being trimmed.
6352 	 */
6353 	mb_mark_used(e4b, &ex);
6354 	ext4_unlock_group(sb, group);
6355 	ret = ext4_issue_discard(sb, group, start, count, NULL);
6356 	ext4_lock_group(sb, group);
6357 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
6358 	return ret;
6359 }
6360 
6361 static int ext4_try_to_trim_range(struct super_block *sb,
6362 		struct ext4_buddy *e4b, ext4_grpblk_t start,
6363 		ext4_grpblk_t max, ext4_grpblk_t minblocks)
6364 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6365 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6366 {
6367 	ext4_grpblk_t next, count, free_count;
6368 	void *bitmap;
6369 
6370 	bitmap = e4b->bd_bitmap;
6371 	start = (e4b->bd_info->bb_first_free > start) ?
6372 		e4b->bd_info->bb_first_free : start;
6373 	count = 0;
6374 	free_count = 0;
6375 
6376 	while (start <= max) {
6377 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
6378 		if (start > max)
6379 			break;
6380 		next = mb_find_next_bit(bitmap, max + 1, start);
6381 
6382 		if ((next - start) >= minblocks) {
6383 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
6384 
6385 			if (ret && ret != -EOPNOTSUPP)
6386 				break;
6387 			count += next - start;
6388 		}
6389 		free_count += next - start;
6390 		start = next + 1;
6391 
6392 		if (fatal_signal_pending(current)) {
6393 			count = -ERESTARTSYS;
6394 			break;
6395 		}
6396 
6397 		if (need_resched()) {
6398 			ext4_unlock_group(sb, e4b->bd_group);
6399 			cond_resched();
6400 			ext4_lock_group(sb, e4b->bd_group);
6401 		}
6402 
6403 		if ((e4b->bd_info->bb_free - free_count) < minblocks)
6404 			break;
6405 	}
6406 
6407 	return count;
6408 }
6409 
6410 /**
6411  * ext4_trim_all_free -- function to trim all free space in alloc. group
6412  * @sb:			super block for file system
6413  * @group:		group to be trimmed
6414  * @start:		first group block to examine
6415  * @max:		last group block to examine
6416  * @minblocks:		minimum extent block count
6417  * @set_trimmed:	set the trimmed flag if at least one block is trimmed
6418  *
6419  * ext4_trim_all_free walks through group's block bitmap searching for free
6420  * extents. When the free extent is found, mark it as used in group buddy
6421  * bitmap. Then issue a TRIM command on this extent and free the extent in
6422  * the group buddy bitmap.
6423  */
6424 static ext4_grpblk_t
6425 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6426 		   ext4_grpblk_t start, ext4_grpblk_t max,
6427 		   ext4_grpblk_t minblocks, bool set_trimmed)
6428 {
6429 	struct ext4_buddy e4b;
6430 	int ret;
6431 
6432 	trace_ext4_trim_all_free(sb, group, start, max);
6433 
6434 	ret = ext4_mb_load_buddy(sb, group, &e4b);
6435 	if (ret) {
6436 		ext4_warning(sb, "Error %d loading buddy information for %u",
6437 			     ret, group);
6438 		return ret;
6439 	}
6440 
6441 	ext4_lock_group(sb, group);
6442 
6443 	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6444 	    minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6445 		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6446 		if (ret >= 0 && set_trimmed)
6447 			EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6448 	} else {
6449 		ret = 0;
6450 	}
6451 
6452 	ext4_unlock_group(sb, group);
6453 	ext4_mb_unload_buddy(&e4b);
6454 
6455 	ext4_debug("trimmed %d blocks in the group %d\n",
6456 		ret, group);
6457 
6458 	return ret;
6459 }
6460 
6461 /**
6462  * ext4_trim_fs() -- trim ioctl handle function
6463  * @sb:			superblock for filesystem
6464  * @range:		fstrim_range structure
6465  *
6466  * start:	First Byte to trim
6467  * len:		number of Bytes to trim from start
6468  * minlen:	minimum extent length in Bytes
6469  * ext4_trim_fs goes through all allocation groups containing Bytes from
6470  * start to start+len. For each such a group ext4_trim_all_free function
6471  * is invoked to trim all free space.
6472  */
6473 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6474 {
6475 	unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6476 	struct ext4_group_info *grp;
6477 	ext4_group_t group, first_group, last_group;
6478 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6479 	uint64_t start, end, minlen, trimmed = 0;
6480 	ext4_fsblk_t first_data_blk =
6481 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6482 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6483 	bool whole_group, eof = false;
6484 	int ret = 0;
6485 
6486 	start = range->start >> sb->s_blocksize_bits;
6487 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6488 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6489 			      range->minlen >> sb->s_blocksize_bits);
6490 
6491 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6492 	    start >= max_blks ||
6493 	    range->len < sb->s_blocksize)
6494 		return -EINVAL;
6495 	/* No point to try to trim less than discard granularity */
6496 	if (range->minlen < discard_granularity) {
6497 		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6498 				discard_granularity >> sb->s_blocksize_bits);
6499 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6500 			goto out;
6501 	}
6502 	if (end >= max_blks - 1) {
6503 		end = max_blks - 1;
6504 		eof = true;
6505 	}
6506 	if (end <= first_data_blk)
6507 		goto out;
6508 	if (start < first_data_blk)
6509 		start = first_data_blk;
6510 
6511 	/* Determine first and last group to examine based on start and end */
6512 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6513 				     &first_group, &first_cluster);
6514 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6515 				     &last_group, &last_cluster);
6516 
6517 	/* end now represents the last cluster to discard in this group */
6518 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6519 	whole_group = true;
6520 
6521 	for (group = first_group; group <= last_group; group++) {
6522 		grp = ext4_get_group_info(sb, group);
6523 		/* We only do this if the grp has never been initialized */
6524 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6525 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6526 			if (ret)
6527 				break;
6528 		}
6529 
6530 		/*
6531 		 * For all the groups except the last one, last cluster will
6532 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6533 		 * change it for the last group, note that last_cluster is
6534 		 * already computed earlier by ext4_get_group_no_and_offset()
6535 		 */
6536 		if (group == last_group) {
6537 			end = last_cluster;
6538 			whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6539 		}
6540 		if (grp->bb_free >= minlen) {
6541 			cnt = ext4_trim_all_free(sb, group, first_cluster,
6542 						 end, minlen, whole_group);
6543 			if (cnt < 0) {
6544 				ret = cnt;
6545 				break;
6546 			}
6547 			trimmed += cnt;
6548 		}
6549 
6550 		/*
6551 		 * For every group except the first one, we are sure
6552 		 * that the first cluster to discard will be cluster #0.
6553 		 */
6554 		first_cluster = 0;
6555 	}
6556 
6557 	if (!ret)
6558 		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6559 
6560 out:
6561 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6562 	return ret;
6563 }
6564 
6565 /* Iterate all the free extents in the group. */
6566 int
6567 ext4_mballoc_query_range(
6568 	struct super_block		*sb,
6569 	ext4_group_t			group,
6570 	ext4_grpblk_t			start,
6571 	ext4_grpblk_t			end,
6572 	ext4_mballoc_query_range_fn	formatter,
6573 	void				*priv)
6574 {
6575 	void				*bitmap;
6576 	ext4_grpblk_t			next;
6577 	struct ext4_buddy		e4b;
6578 	int				error;
6579 
6580 	error = ext4_mb_load_buddy(sb, group, &e4b);
6581 	if (error)
6582 		return error;
6583 	bitmap = e4b.bd_bitmap;
6584 
6585 	ext4_lock_group(sb, group);
6586 
6587 	start = (e4b.bd_info->bb_first_free > start) ?
6588 		e4b.bd_info->bb_first_free : start;
6589 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6590 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6591 
6592 	while (start <= end) {
6593 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
6594 		if (start > end)
6595 			break;
6596 		next = mb_find_next_bit(bitmap, end + 1, start);
6597 
6598 		ext4_unlock_group(sb, group);
6599 		error = formatter(sb, group, start, next - start, priv);
6600 		if (error)
6601 			goto out_unload;
6602 		ext4_lock_group(sb, group);
6603 
6604 		start = next + 1;
6605 	}
6606 
6607 	ext4_unlock_group(sb, group);
6608 out_unload:
6609 	ext4_mb_unload_buddy(&e4b);
6610 
6611 	return error;
6612 }
6613