xref: /linux/fs/ext4/mballoc.c (revision f4b7c07dc19f70ba8fb3f290f76f6199e8090795)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6 
7 
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11 
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21 #include <kunit/static_stub.h>
22 
23 /*
24  * MUSTDO:
25  *   - test ext4_ext_search_left() and ext4_ext_search_right()
26  *   - search for metadata in few groups
27  *
28  * TODO v4:
29  *   - normalization should take into account whether file is still open
30  *   - discard preallocations if no free space left (policy?)
31  *   - don't normalize tails
32  *   - quota
33  *   - reservation for superuser
34  *
35  * TODO v3:
36  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
37  *   - track min/max extents in each group for better group selection
38  *   - mb_mark_used() may allocate chunk right after splitting buddy
39  *   - tree of groups sorted by number of free blocks
40  *   - error handling
41  */
42 
43 /*
44  * The allocation request involve request for multiple number of blocks
45  * near to the goal(block) value specified.
46  *
47  * During initialization phase of the allocator we decide to use the
48  * group preallocation or inode preallocation depending on the size of
49  * the file. The size of the file could be the resulting file size we
50  * would have after allocation, or the current file size, which ever
51  * is larger. If the size is less than sbi->s_mb_stream_request we
52  * select to use the group preallocation. The default value of
53  * s_mb_stream_request is 16 blocks. This can also be tuned via
54  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
55  * terms of number of blocks.
56  *
57  * The main motivation for having small file use group preallocation is to
58  * ensure that we have small files closer together on the disk.
59  *
60  * First stage the allocator looks at the inode prealloc list,
61  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
62  * spaces for this particular inode. The inode prealloc space is
63  * represented as:
64  *
65  * pa_lstart -> the logical start block for this prealloc space
66  * pa_pstart -> the physical start block for this prealloc space
67  * pa_len    -> length for this prealloc space (in clusters)
68  * pa_free   ->  free space available in this prealloc space (in clusters)
69  *
70  * The inode preallocation space is used looking at the _logical_ start
71  * block. If only the logical file block falls within the range of prealloc
72  * space we will consume the particular prealloc space. This makes sure that
73  * we have contiguous physical blocks representing the file blocks
74  *
75  * The important thing to be noted in case of inode prealloc space is that
76  * we don't modify the values associated to inode prealloc space except
77  * pa_free.
78  *
79  * If we are not able to find blocks in the inode prealloc space and if we
80  * have the group allocation flag set then we look at the locality group
81  * prealloc space. These are per CPU prealloc list represented as
82  *
83  * ext4_sb_info.s_locality_groups[smp_processor_id()]
84  *
85  * The reason for having a per cpu locality group is to reduce the contention
86  * between CPUs. It is possible to get scheduled at this point.
87  *
88  * The locality group prealloc space is used looking at whether we have
89  * enough free space (pa_free) within the prealloc space.
90  *
91  * If we can't allocate blocks via inode prealloc or/and locality group
92  * prealloc then we look at the buddy cache. The buddy cache is represented
93  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
94  * mapped to the buddy and bitmap information regarding different
95  * groups. The buddy information is attached to buddy cache inode so that
96  * we can access them through the page cache. The information regarding
97  * each group is loaded via ext4_mb_load_buddy.  The information involve
98  * block bitmap and buddy information. The information are stored in the
99  * inode as:
100  *
101  *  {                        page                        }
102  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
103  *
104  *
105  * one block each for bitmap and buddy information.  So for each group we
106  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
107  * blocksize) blocks.  So it can have information regarding groups_per_page
108  * which is blocks_per_page/2
109  *
110  * The buddy cache inode is not stored on disk. The inode is thrown
111  * away when the filesystem is unmounted.
112  *
113  * We look for count number of blocks in the buddy cache. If we were able
114  * to locate that many free blocks we return with additional information
115  * regarding rest of the contiguous physical block available
116  *
117  * Before allocating blocks via buddy cache we normalize the request
118  * blocks. This ensure we ask for more blocks that we needed. The extra
119  * blocks that we get after allocation is added to the respective prealloc
120  * list. In case of inode preallocation we follow a list of heuristics
121  * based on file size. This can be found in ext4_mb_normalize_request. If
122  * we are doing a group prealloc we try to normalize the request to
123  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
124  * dependent on the cluster size; for non-bigalloc file systems, it is
125  * 512 blocks. This can be tuned via
126  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
127  * terms of number of blocks. If we have mounted the file system with -O
128  * stripe=<value> option the group prealloc request is normalized to the
129  * smallest multiple of the stripe value (sbi->s_stripe) which is
130  * greater than the default mb_group_prealloc.
131  *
132  * If "mb_optimize_scan" mount option is set, we maintain in memory group info
133  * structures in two data structures:
134  *
135  * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
136  *
137  *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
138  *
139  *    This is an array of lists where the index in the array represents the
140  *    largest free order in the buddy bitmap of the participating group infos of
141  *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
142  *    number of buddy bitmap orders possible) number of lists. Group-infos are
143  *    placed in appropriate lists.
144  *
145  * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
146  *
147  *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
148  *
149  *    This is an array of lists where in the i-th list there are groups with
150  *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
151  *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
152  *    Note that we don't bother with a special list for completely empty groups
153  *    so we only have MB_NUM_ORDERS(sb) lists.
154  *
155  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
156  * structures to decide the order in which groups are to be traversed for
157  * fulfilling an allocation request.
158  *
159  * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
160  * >= the order of the request. We directly look at the largest free order list
161  * in the data structure (1) above where largest_free_order = order of the
162  * request. If that list is empty, we look at remaining list in the increasing
163  * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
164  * lookup in O(1) time.
165  *
166  * At CR_GOAL_LEN_FAST, we only consider groups where
167  * average fragment size > request size. So, we lookup a group which has average
168  * fragment size just above or equal to request size using our average fragment
169  * size group lists (data structure 2) in O(1) time.
170  *
171  * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
172  * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
173  * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
174  * fragment size > goal length. So before falling to the slower
175  * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
176  * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
177  * enough average fragment size. This increases the chances of finding a
178  * suitable block group in O(1) time and results in faster allocation at the
179  * cost of reduced size of allocation.
180  *
181  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
182  * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
183  * CR_GOAL_LEN_FAST phase.
184  *
185  * The regular allocator (using the buddy cache) supports a few tunables.
186  *
187  * /sys/fs/ext4/<partition>/mb_min_to_scan
188  * /sys/fs/ext4/<partition>/mb_max_to_scan
189  * /sys/fs/ext4/<partition>/mb_order2_req
190  * /sys/fs/ext4/<partition>/mb_linear_limit
191  *
192  * The regular allocator uses buddy scan only if the request len is power of
193  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
194  * value of s_mb_order2_reqs can be tuned via
195  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
196  * stripe size (sbi->s_stripe), we try to search for contiguous block in
197  * stripe size. This should result in better allocation on RAID setups. If
198  * not, we search in the specific group using bitmap for best extents. The
199  * tunable min_to_scan and max_to_scan control the behaviour here.
200  * min_to_scan indicate how long the mballoc __must__ look for a best
201  * extent and max_to_scan indicates how long the mballoc __can__ look for a
202  * best extent in the found extents. Searching for the blocks starts with
203  * the group specified as the goal value in allocation context via
204  * ac_g_ex. Each group is first checked based on the criteria whether it
205  * can be used for allocation. ext4_mb_good_group explains how the groups are
206  * checked.
207  *
208  * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
209  * get traversed linearly. That may result in subsequent allocations being not
210  * close to each other. And so, the underlying device may get filled up in a
211  * non-linear fashion. While that may not matter on non-rotational devices, for
212  * rotational devices that may result in higher seek times. "mb_linear_limit"
213  * tells mballoc how many groups mballoc should search linearly before
214  * performing consulting above data structures for more efficient lookups. For
215  * non rotational devices, this value defaults to 0 and for rotational devices
216  * this is set to MB_DEFAULT_LINEAR_LIMIT.
217  *
218  * Both the prealloc space are getting populated as above. So for the first
219  * request we will hit the buddy cache which will result in this prealloc
220  * space getting filled. The prealloc space is then later used for the
221  * subsequent request.
222  */
223 
224 /*
225  * mballoc operates on the following data:
226  *  - on-disk bitmap
227  *  - in-core buddy (actually includes buddy and bitmap)
228  *  - preallocation descriptors (PAs)
229  *
230  * there are two types of preallocations:
231  *  - inode
232  *    assiged to specific inode and can be used for this inode only.
233  *    it describes part of inode's space preallocated to specific
234  *    physical blocks. any block from that preallocated can be used
235  *    independent. the descriptor just tracks number of blocks left
236  *    unused. so, before taking some block from descriptor, one must
237  *    make sure corresponded logical block isn't allocated yet. this
238  *    also means that freeing any block within descriptor's range
239  *    must discard all preallocated blocks.
240  *  - locality group
241  *    assigned to specific locality group which does not translate to
242  *    permanent set of inodes: inode can join and leave group. space
243  *    from this type of preallocation can be used for any inode. thus
244  *    it's consumed from the beginning to the end.
245  *
246  * relation between them can be expressed as:
247  *    in-core buddy = on-disk bitmap + preallocation descriptors
248  *
249  * this mean blocks mballoc considers used are:
250  *  - allocated blocks (persistent)
251  *  - preallocated blocks (non-persistent)
252  *
253  * consistency in mballoc world means that at any time a block is either
254  * free or used in ALL structures. notice: "any time" should not be read
255  * literally -- time is discrete and delimited by locks.
256  *
257  *  to keep it simple, we don't use block numbers, instead we count number of
258  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
259  *
260  * all operations can be expressed as:
261  *  - init buddy:			buddy = on-disk + PAs
262  *  - new PA:				buddy += N; PA = N
263  *  - use inode PA:			on-disk += N; PA -= N
264  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
265  *  - use locality group PA		on-disk += N; PA -= N
266  *  - discard locality group PA		buddy -= PA; PA = 0
267  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
268  *        is used in real operation because we can't know actual used
269  *        bits from PA, only from on-disk bitmap
270  *
271  * if we follow this strict logic, then all operations above should be atomic.
272  * given some of them can block, we'd have to use something like semaphores
273  * killing performance on high-end SMP hardware. let's try to relax it using
274  * the following knowledge:
275  *  1) if buddy is referenced, it's already initialized
276  *  2) while block is used in buddy and the buddy is referenced,
277  *     nobody can re-allocate that block
278  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
279  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
280  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
281  *     block
282  *
283  * so, now we're building a concurrency table:
284  *  - init buddy vs.
285  *    - new PA
286  *      blocks for PA are allocated in the buddy, buddy must be referenced
287  *      until PA is linked to allocation group to avoid concurrent buddy init
288  *    - use inode PA
289  *      we need to make sure that either on-disk bitmap or PA has uptodate data
290  *      given (3) we care that PA-=N operation doesn't interfere with init
291  *    - discard inode PA
292  *      the simplest way would be to have buddy initialized by the discard
293  *    - use locality group PA
294  *      again PA-=N must be serialized with init
295  *    - discard locality group PA
296  *      the simplest way would be to have buddy initialized by the discard
297  *  - new PA vs.
298  *    - use inode PA
299  *      i_data_sem serializes them
300  *    - discard inode PA
301  *      discard process must wait until PA isn't used by another process
302  *    - use locality group PA
303  *      some mutex should serialize them
304  *    - discard locality group PA
305  *      discard process must wait until PA isn't used by another process
306  *  - use inode PA
307  *    - use inode PA
308  *      i_data_sem or another mutex should serializes them
309  *    - discard inode PA
310  *      discard process must wait until PA isn't used by another process
311  *    - use locality group PA
312  *      nothing wrong here -- they're different PAs covering different blocks
313  *    - discard locality group PA
314  *      discard process must wait until PA isn't used by another process
315  *
316  * now we're ready to make few consequences:
317  *  - PA is referenced and while it is no discard is possible
318  *  - PA is referenced until block isn't marked in on-disk bitmap
319  *  - PA changes only after on-disk bitmap
320  *  - discard must not compete with init. either init is done before
321  *    any discard or they're serialized somehow
322  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
323  *
324  * a special case when we've used PA to emptiness. no need to modify buddy
325  * in this case, but we should care about concurrent init
326  *
327  */
328 
329  /*
330  * Logic in few words:
331  *
332  *  - allocation:
333  *    load group
334  *    find blocks
335  *    mark bits in on-disk bitmap
336  *    release group
337  *
338  *  - use preallocation:
339  *    find proper PA (per-inode or group)
340  *    load group
341  *    mark bits in on-disk bitmap
342  *    release group
343  *    release PA
344  *
345  *  - free:
346  *    load group
347  *    mark bits in on-disk bitmap
348  *    release group
349  *
350  *  - discard preallocations in group:
351  *    mark PAs deleted
352  *    move them onto local list
353  *    load on-disk bitmap
354  *    load group
355  *    remove PA from object (inode or locality group)
356  *    mark free blocks in-core
357  *
358  *  - discard inode's preallocations:
359  */
360 
361 /*
362  * Locking rules
363  *
364  * Locks:
365  *  - bitlock on a group	(group)
366  *  - object (inode/locality)	(object)
367  *  - per-pa lock		(pa)
368  *  - cr_power2_aligned lists lock	(cr_power2_aligned)
369  *  - cr_goal_len_fast lists lock	(cr_goal_len_fast)
370  *
371  * Paths:
372  *  - new pa
373  *    object
374  *    group
375  *
376  *  - find and use pa:
377  *    pa
378  *
379  *  - release consumed pa:
380  *    pa
381  *    group
382  *    object
383  *
384  *  - generate in-core bitmap:
385  *    group
386  *        pa
387  *
388  *  - discard all for given object (inode, locality group):
389  *    object
390  *        pa
391  *    group
392  *
393  *  - discard all for given group:
394  *    group
395  *        pa
396  *    group
397  *        object
398  *
399  *  - allocation path (ext4_mb_regular_allocator)
400  *    group
401  *    cr_power2_aligned/cr_goal_len_fast
402  */
403 static struct kmem_cache *ext4_pspace_cachep;
404 static struct kmem_cache *ext4_ac_cachep;
405 static struct kmem_cache *ext4_free_data_cachep;
406 
407 /* We create slab caches for groupinfo data structures based on the
408  * superblock block size.  There will be one per mounted filesystem for
409  * each unique s_blocksize_bits */
410 #define NR_GRPINFO_CACHES 8
411 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
412 
413 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
414 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
415 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
416 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
417 };
418 
419 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
420 					ext4_group_t group);
421 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
422 
423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
424 			       ext4_group_t group, enum criteria cr);
425 
426 static int ext4_try_to_trim_range(struct super_block *sb,
427 		struct ext4_buddy *e4b, ext4_grpblk_t start,
428 		ext4_grpblk_t max, ext4_grpblk_t minblocks);
429 
430 /*
431  * The algorithm using this percpu seq counter goes below:
432  * 1. We sample the percpu discard_pa_seq counter before trying for block
433  *    allocation in ext4_mb_new_blocks().
434  * 2. We increment this percpu discard_pa_seq counter when we either allocate
435  *    or free these blocks i.e. while marking those blocks as used/free in
436  *    mb_mark_used()/mb_free_blocks().
437  * 3. We also increment this percpu seq counter when we successfully identify
438  *    that the bb_prealloc_list is not empty and hence proceed for discarding
439  *    of those PAs inside ext4_mb_discard_group_preallocations().
440  *
441  * Now to make sure that the regular fast path of block allocation is not
442  * affected, as a small optimization we only sample the percpu seq counter
443  * on that cpu. Only when the block allocation fails and when freed blocks
444  * found were 0, that is when we sample percpu seq counter for all cpus using
445  * below function ext4_get_discard_pa_seq_sum(). This happens after making
446  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
447  */
448 static DEFINE_PER_CPU(u64, discard_pa_seq);
449 static inline u64 ext4_get_discard_pa_seq_sum(void)
450 {
451 	int __cpu;
452 	u64 __seq = 0;
453 
454 	for_each_possible_cpu(__cpu)
455 		__seq += per_cpu(discard_pa_seq, __cpu);
456 	return __seq;
457 }
458 
459 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
460 {
461 #if BITS_PER_LONG == 64
462 	*bit += ((unsigned long) addr & 7UL) << 3;
463 	addr = (void *) ((unsigned long) addr & ~7UL);
464 #elif BITS_PER_LONG == 32
465 	*bit += ((unsigned long) addr & 3UL) << 3;
466 	addr = (void *) ((unsigned long) addr & ~3UL);
467 #else
468 #error "how many bits you are?!"
469 #endif
470 	return addr;
471 }
472 
473 static inline int mb_test_bit(int bit, void *addr)
474 {
475 	/*
476 	 * ext4_test_bit on architecture like powerpc
477 	 * needs unsigned long aligned address
478 	 */
479 	addr = mb_correct_addr_and_bit(&bit, addr);
480 	return ext4_test_bit(bit, addr);
481 }
482 
483 static inline void mb_set_bit(int bit, void *addr)
484 {
485 	addr = mb_correct_addr_and_bit(&bit, addr);
486 	ext4_set_bit(bit, addr);
487 }
488 
489 static inline void mb_clear_bit(int bit, void *addr)
490 {
491 	addr = mb_correct_addr_and_bit(&bit, addr);
492 	ext4_clear_bit(bit, addr);
493 }
494 
495 static inline int mb_test_and_clear_bit(int bit, void *addr)
496 {
497 	addr = mb_correct_addr_and_bit(&bit, addr);
498 	return ext4_test_and_clear_bit(bit, addr);
499 }
500 
501 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
502 {
503 	int fix = 0, ret, tmpmax;
504 	addr = mb_correct_addr_and_bit(&fix, addr);
505 	tmpmax = max + fix;
506 	start += fix;
507 
508 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
509 	if (ret > max)
510 		return max;
511 	return ret;
512 }
513 
514 static inline int mb_find_next_bit(void *addr, int max, int start)
515 {
516 	int fix = 0, ret, tmpmax;
517 	addr = mb_correct_addr_and_bit(&fix, addr);
518 	tmpmax = max + fix;
519 	start += fix;
520 
521 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
522 	if (ret > max)
523 		return max;
524 	return ret;
525 }
526 
527 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
528 {
529 	char *bb;
530 
531 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
532 	BUG_ON(max == NULL);
533 
534 	if (order > e4b->bd_blkbits + 1) {
535 		*max = 0;
536 		return NULL;
537 	}
538 
539 	/* at order 0 we see each particular block */
540 	if (order == 0) {
541 		*max = 1 << (e4b->bd_blkbits + 3);
542 		return e4b->bd_bitmap;
543 	}
544 
545 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
546 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
547 
548 	return bb;
549 }
550 
551 #ifdef DOUBLE_CHECK
552 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
553 			   int first, int count)
554 {
555 	int i;
556 	struct super_block *sb = e4b->bd_sb;
557 
558 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
559 		return;
560 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
561 	for (i = 0; i < count; i++) {
562 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
563 			ext4_fsblk_t blocknr;
564 
565 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
566 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
567 			ext4_grp_locked_error(sb, e4b->bd_group,
568 					      inode ? inode->i_ino : 0,
569 					      blocknr,
570 					      "freeing block already freed "
571 					      "(bit %u)",
572 					      first + i);
573 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
574 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
575 		}
576 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
577 	}
578 }
579 
580 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
581 {
582 	int i;
583 
584 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
585 		return;
586 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
587 	for (i = 0; i < count; i++) {
588 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
589 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
590 	}
591 }
592 
593 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
594 {
595 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
596 		return;
597 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
598 		unsigned char *b1, *b2;
599 		int i;
600 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
601 		b2 = (unsigned char *) bitmap;
602 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
603 			if (b1[i] != b2[i]) {
604 				ext4_msg(e4b->bd_sb, KERN_ERR,
605 					 "corruption in group %u "
606 					 "at byte %u(%u): %x in copy != %x "
607 					 "on disk/prealloc",
608 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
609 				BUG();
610 			}
611 		}
612 	}
613 }
614 
615 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
616 			struct ext4_group_info *grp, ext4_group_t group)
617 {
618 	struct buffer_head *bh;
619 
620 	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
621 	if (!grp->bb_bitmap)
622 		return;
623 
624 	bh = ext4_read_block_bitmap(sb, group);
625 	if (IS_ERR_OR_NULL(bh)) {
626 		kfree(grp->bb_bitmap);
627 		grp->bb_bitmap = NULL;
628 		return;
629 	}
630 
631 	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
632 	put_bh(bh);
633 }
634 
635 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
636 {
637 	kfree(grp->bb_bitmap);
638 }
639 
640 #else
641 static inline void mb_free_blocks_double(struct inode *inode,
642 				struct ext4_buddy *e4b, int first, int count)
643 {
644 	return;
645 }
646 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
647 						int first, int count)
648 {
649 	return;
650 }
651 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
652 {
653 	return;
654 }
655 
656 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
657 			struct ext4_group_info *grp, ext4_group_t group)
658 {
659 	return;
660 }
661 
662 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
663 {
664 	return;
665 }
666 #endif
667 
668 #ifdef AGGRESSIVE_CHECK
669 
670 #define MB_CHECK_ASSERT(assert)						\
671 do {									\
672 	if (!(assert)) {						\
673 		printk(KERN_EMERG					\
674 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
675 			function, file, line, # assert);		\
676 		BUG();							\
677 	}								\
678 } while (0)
679 
680 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
681 				const char *function, int line)
682 {
683 	struct super_block *sb = e4b->bd_sb;
684 	int order = e4b->bd_blkbits + 1;
685 	int max;
686 	int max2;
687 	int i;
688 	int j;
689 	int k;
690 	int count;
691 	struct ext4_group_info *grp;
692 	int fragments = 0;
693 	int fstart;
694 	struct list_head *cur;
695 	void *buddy;
696 	void *buddy2;
697 
698 	if (e4b->bd_info->bb_check_counter++ % 10)
699 		return 0;
700 
701 	while (order > 1) {
702 		buddy = mb_find_buddy(e4b, order, &max);
703 		MB_CHECK_ASSERT(buddy);
704 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
705 		MB_CHECK_ASSERT(buddy2);
706 		MB_CHECK_ASSERT(buddy != buddy2);
707 		MB_CHECK_ASSERT(max * 2 == max2);
708 
709 		count = 0;
710 		for (i = 0; i < max; i++) {
711 
712 			if (mb_test_bit(i, buddy)) {
713 				/* only single bit in buddy2 may be 0 */
714 				if (!mb_test_bit(i << 1, buddy2)) {
715 					MB_CHECK_ASSERT(
716 						mb_test_bit((i<<1)+1, buddy2));
717 				}
718 				continue;
719 			}
720 
721 			/* both bits in buddy2 must be 1 */
722 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
723 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
724 
725 			for (j = 0; j < (1 << order); j++) {
726 				k = (i * (1 << order)) + j;
727 				MB_CHECK_ASSERT(
728 					!mb_test_bit(k, e4b->bd_bitmap));
729 			}
730 			count++;
731 		}
732 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
733 		order--;
734 	}
735 
736 	fstart = -1;
737 	buddy = mb_find_buddy(e4b, 0, &max);
738 	for (i = 0; i < max; i++) {
739 		if (!mb_test_bit(i, buddy)) {
740 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
741 			if (fstart == -1) {
742 				fragments++;
743 				fstart = i;
744 			}
745 			continue;
746 		}
747 		fstart = -1;
748 		/* check used bits only */
749 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
750 			buddy2 = mb_find_buddy(e4b, j, &max2);
751 			k = i >> j;
752 			MB_CHECK_ASSERT(k < max2);
753 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
754 		}
755 	}
756 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
757 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
758 
759 	grp = ext4_get_group_info(sb, e4b->bd_group);
760 	if (!grp)
761 		return NULL;
762 	list_for_each(cur, &grp->bb_prealloc_list) {
763 		ext4_group_t groupnr;
764 		struct ext4_prealloc_space *pa;
765 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
766 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
767 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
768 		for (i = 0; i < pa->pa_len; i++)
769 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
770 	}
771 	return 0;
772 }
773 #undef MB_CHECK_ASSERT
774 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
775 					__FILE__, __func__, __LINE__)
776 #else
777 #define mb_check_buddy(e4b)
778 #endif
779 
780 /*
781  * Divide blocks started from @first with length @len into
782  * smaller chunks with power of 2 blocks.
783  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
784  * then increase bb_counters[] for corresponded chunk size.
785  */
786 static void ext4_mb_mark_free_simple(struct super_block *sb,
787 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
788 					struct ext4_group_info *grp)
789 {
790 	struct ext4_sb_info *sbi = EXT4_SB(sb);
791 	ext4_grpblk_t min;
792 	ext4_grpblk_t max;
793 	ext4_grpblk_t chunk;
794 	unsigned int border;
795 
796 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
797 
798 	border = 2 << sb->s_blocksize_bits;
799 
800 	while (len > 0) {
801 		/* find how many blocks can be covered since this position */
802 		max = ffs(first | border) - 1;
803 
804 		/* find how many blocks of power 2 we need to mark */
805 		min = fls(len) - 1;
806 
807 		if (max < min)
808 			min = max;
809 		chunk = 1 << min;
810 
811 		/* mark multiblock chunks only */
812 		grp->bb_counters[min]++;
813 		if (min > 0)
814 			mb_clear_bit(first >> min,
815 				     buddy + sbi->s_mb_offsets[min]);
816 
817 		len -= chunk;
818 		first += chunk;
819 	}
820 }
821 
822 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
823 {
824 	int order;
825 
826 	/*
827 	 * We don't bother with a special lists groups with only 1 block free
828 	 * extents and for completely empty groups.
829 	 */
830 	order = fls(len) - 2;
831 	if (order < 0)
832 		return 0;
833 	if (order == MB_NUM_ORDERS(sb))
834 		order--;
835 	return order;
836 }
837 
838 /* Move group to appropriate avg_fragment_size list */
839 static void
840 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
841 {
842 	struct ext4_sb_info *sbi = EXT4_SB(sb);
843 	int new_order;
844 
845 	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
846 		return;
847 
848 	new_order = mb_avg_fragment_size_order(sb,
849 					grp->bb_free / grp->bb_fragments);
850 	if (new_order == grp->bb_avg_fragment_size_order)
851 		return;
852 
853 	if (grp->bb_avg_fragment_size_order != -1) {
854 		write_lock(&sbi->s_mb_avg_fragment_size_locks[
855 					grp->bb_avg_fragment_size_order]);
856 		list_del(&grp->bb_avg_fragment_size_node);
857 		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
858 					grp->bb_avg_fragment_size_order]);
859 	}
860 	grp->bb_avg_fragment_size_order = new_order;
861 	write_lock(&sbi->s_mb_avg_fragment_size_locks[
862 					grp->bb_avg_fragment_size_order]);
863 	list_add_tail(&grp->bb_avg_fragment_size_node,
864 		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
865 	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
866 					grp->bb_avg_fragment_size_order]);
867 }
868 
869 /*
870  * Choose next group by traversing largest_free_order lists. Updates *new_cr if
871  * cr level needs an update.
872  */
873 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
874 			enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
875 {
876 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
877 	struct ext4_group_info *iter;
878 	int i;
879 
880 	if (ac->ac_status == AC_STATUS_FOUND)
881 		return;
882 
883 	if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
884 		atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
885 
886 	for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
887 		if (list_empty(&sbi->s_mb_largest_free_orders[i]))
888 			continue;
889 		read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
890 		if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
891 			read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
892 			continue;
893 		}
894 		list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
895 				    bb_largest_free_order_node) {
896 			if (sbi->s_mb_stats)
897 				atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
898 			if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
899 				*group = iter->bb_group;
900 				ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
901 				read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
902 				return;
903 			}
904 		}
905 		read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
906 	}
907 
908 	/* Increment cr and search again if no group is found */
909 	*new_cr = CR_GOAL_LEN_FAST;
910 }
911 
912 /*
913  * Find a suitable group of given order from the average fragments list.
914  */
915 static struct ext4_group_info *
916 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
917 {
918 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
919 	struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
920 	rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
921 	struct ext4_group_info *grp = NULL, *iter;
922 	enum criteria cr = ac->ac_criteria;
923 
924 	if (list_empty(frag_list))
925 		return NULL;
926 	read_lock(frag_list_lock);
927 	if (list_empty(frag_list)) {
928 		read_unlock(frag_list_lock);
929 		return NULL;
930 	}
931 	list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
932 		if (sbi->s_mb_stats)
933 			atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
934 		if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
935 			grp = iter;
936 			break;
937 		}
938 	}
939 	read_unlock(frag_list_lock);
940 	return grp;
941 }
942 
943 /*
944  * Choose next group by traversing average fragment size list of suitable
945  * order. Updates *new_cr if cr level needs an update.
946  */
947 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
948 		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
949 {
950 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
951 	struct ext4_group_info *grp = NULL;
952 	int i;
953 
954 	if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
955 		if (sbi->s_mb_stats)
956 			atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
957 	}
958 
959 	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
960 	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
961 		grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
962 		if (grp) {
963 			*group = grp->bb_group;
964 			ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
965 			return;
966 		}
967 	}
968 
969 	/*
970 	 * CR_BEST_AVAIL_LEN works based on the concept that we have
971 	 * a larger normalized goal len request which can be trimmed to
972 	 * a smaller goal len such that it can still satisfy original
973 	 * request len. However, allocation request for non-regular
974 	 * files never gets normalized.
975 	 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
976 	 */
977 	if (ac->ac_flags & EXT4_MB_HINT_DATA)
978 		*new_cr = CR_BEST_AVAIL_LEN;
979 	else
980 		*new_cr = CR_GOAL_LEN_SLOW;
981 }
982 
983 /*
984  * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
985  * order we have and proactively trim the goal request length to that order to
986  * find a suitable group faster.
987  *
988  * This optimizes allocation speed at the cost of slightly reduced
989  * preallocations. However, we make sure that we don't trim the request too
990  * much and fall to CR_GOAL_LEN_SLOW in that case.
991  */
992 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
993 		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
994 {
995 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
996 	struct ext4_group_info *grp = NULL;
997 	int i, order, min_order;
998 	unsigned long num_stripe_clusters = 0;
999 
1000 	if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1001 		if (sbi->s_mb_stats)
1002 			atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1003 	}
1004 
1005 	/*
1006 	 * mb_avg_fragment_size_order() returns order in a way that makes
1007 	 * retrieving back the length using (1 << order) inaccurate. Hence, use
1008 	 * fls() instead since we need to know the actual length while modifying
1009 	 * goal length.
1010 	 */
1011 	order = fls(ac->ac_g_ex.fe_len) - 1;
1012 	min_order = order - sbi->s_mb_best_avail_max_trim_order;
1013 	if (min_order < 0)
1014 		min_order = 0;
1015 
1016 	if (sbi->s_stripe > 0) {
1017 		/*
1018 		 * We are assuming that stripe size is always a multiple of
1019 		 * cluster ratio otherwise __ext4_fill_super exists early.
1020 		 */
1021 		num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1022 		if (1 << min_order < num_stripe_clusters)
1023 			/*
1024 			 * We consider 1 order less because later we round
1025 			 * up the goal len to num_stripe_clusters
1026 			 */
1027 			min_order = fls(num_stripe_clusters) - 1;
1028 	}
1029 
1030 	if (1 << min_order < ac->ac_o_ex.fe_len)
1031 		min_order = fls(ac->ac_o_ex.fe_len);
1032 
1033 	for (i = order; i >= min_order; i--) {
1034 		int frag_order;
1035 		/*
1036 		 * Scale down goal len to make sure we find something
1037 		 * in the free fragments list. Basically, reduce
1038 		 * preallocations.
1039 		 */
1040 		ac->ac_g_ex.fe_len = 1 << i;
1041 
1042 		if (num_stripe_clusters > 0) {
1043 			/*
1044 			 * Try to round up the adjusted goal length to
1045 			 * stripe size (in cluster units) multiple for
1046 			 * efficiency.
1047 			 */
1048 			ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1049 						     num_stripe_clusters);
1050 		}
1051 
1052 		frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1053 							ac->ac_g_ex.fe_len);
1054 
1055 		grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1056 		if (grp) {
1057 			*group = grp->bb_group;
1058 			ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1059 			return;
1060 		}
1061 	}
1062 
1063 	/* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1064 	ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1065 	*new_cr = CR_GOAL_LEN_SLOW;
1066 }
1067 
1068 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1069 {
1070 	if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1071 		return 0;
1072 	if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1073 		return 0;
1074 	if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1075 		return 0;
1076 	return 1;
1077 }
1078 
1079 /*
1080  * Return next linear group for allocation. If linear traversal should not be
1081  * performed, this function just returns the same group
1082  */
1083 static ext4_group_t
1084 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1085 		  ext4_group_t ngroups)
1086 {
1087 	if (!should_optimize_scan(ac))
1088 		goto inc_and_return;
1089 
1090 	if (ac->ac_groups_linear_remaining) {
1091 		ac->ac_groups_linear_remaining--;
1092 		goto inc_and_return;
1093 	}
1094 
1095 	return group;
1096 inc_and_return:
1097 	/*
1098 	 * Artificially restricted ngroups for non-extent
1099 	 * files makes group > ngroups possible on first loop.
1100 	 */
1101 	return group + 1 >= ngroups ? 0 : group + 1;
1102 }
1103 
1104 /*
1105  * ext4_mb_choose_next_group: choose next group for allocation.
1106  *
1107  * @ac        Allocation Context
1108  * @new_cr    This is an output parameter. If the there is no good group
1109  *            available at current CR level, this field is updated to indicate
1110  *            the new cr level that should be used.
1111  * @group     This is an input / output parameter. As an input it indicates the
1112  *            next group that the allocator intends to use for allocation. As
1113  *            output, this field indicates the next group that should be used as
1114  *            determined by the optimization functions.
1115  * @ngroups   Total number of groups
1116  */
1117 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1118 		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1119 {
1120 	*new_cr = ac->ac_criteria;
1121 
1122 	if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1123 		*group = next_linear_group(ac, *group, ngroups);
1124 		return;
1125 	}
1126 
1127 	if (*new_cr == CR_POWER2_ALIGNED) {
1128 		ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
1129 	} else if (*new_cr == CR_GOAL_LEN_FAST) {
1130 		ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
1131 	} else if (*new_cr == CR_BEST_AVAIL_LEN) {
1132 		ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
1133 	} else {
1134 		/*
1135 		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1136 		 * bb_free. But until that happens, we should never come here.
1137 		 */
1138 		WARN_ON(1);
1139 	}
1140 }
1141 
1142 /*
1143  * Cache the order of the largest free extent we have available in this block
1144  * group.
1145  */
1146 static void
1147 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1148 {
1149 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1150 	int i;
1151 
1152 	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1153 		if (grp->bb_counters[i] > 0)
1154 			break;
1155 	/* No need to move between order lists? */
1156 	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1157 	    i == grp->bb_largest_free_order) {
1158 		grp->bb_largest_free_order = i;
1159 		return;
1160 	}
1161 
1162 	if (grp->bb_largest_free_order >= 0) {
1163 		write_lock(&sbi->s_mb_largest_free_orders_locks[
1164 					      grp->bb_largest_free_order]);
1165 		list_del_init(&grp->bb_largest_free_order_node);
1166 		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1167 					      grp->bb_largest_free_order]);
1168 	}
1169 	grp->bb_largest_free_order = i;
1170 	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1171 		write_lock(&sbi->s_mb_largest_free_orders_locks[
1172 					      grp->bb_largest_free_order]);
1173 		list_add_tail(&grp->bb_largest_free_order_node,
1174 		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1175 		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1176 					      grp->bb_largest_free_order]);
1177 	}
1178 }
1179 
1180 static noinline_for_stack
1181 void ext4_mb_generate_buddy(struct super_block *sb,
1182 			    void *buddy, void *bitmap, ext4_group_t group,
1183 			    struct ext4_group_info *grp)
1184 {
1185 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1186 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1187 	ext4_grpblk_t i = 0;
1188 	ext4_grpblk_t first;
1189 	ext4_grpblk_t len;
1190 	unsigned free = 0;
1191 	unsigned fragments = 0;
1192 	unsigned long long period = get_cycles();
1193 
1194 	/* initialize buddy from bitmap which is aggregation
1195 	 * of on-disk bitmap and preallocations */
1196 	i = mb_find_next_zero_bit(bitmap, max, 0);
1197 	grp->bb_first_free = i;
1198 	while (i < max) {
1199 		fragments++;
1200 		first = i;
1201 		i = mb_find_next_bit(bitmap, max, i);
1202 		len = i - first;
1203 		free += len;
1204 		if (len > 1)
1205 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1206 		else
1207 			grp->bb_counters[0]++;
1208 		if (i < max)
1209 			i = mb_find_next_zero_bit(bitmap, max, i);
1210 	}
1211 	grp->bb_fragments = fragments;
1212 
1213 	if (free != grp->bb_free) {
1214 		ext4_grp_locked_error(sb, group, 0, 0,
1215 				      "block bitmap and bg descriptor "
1216 				      "inconsistent: %u vs %u free clusters",
1217 				      free, grp->bb_free);
1218 		/*
1219 		 * If we intend to continue, we consider group descriptor
1220 		 * corrupt and update bb_free using bitmap value
1221 		 */
1222 		grp->bb_free = free;
1223 		ext4_mark_group_bitmap_corrupted(sb, group,
1224 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1225 	}
1226 	mb_set_largest_free_order(sb, grp);
1227 	mb_update_avg_fragment_size(sb, grp);
1228 
1229 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1230 
1231 	period = get_cycles() - period;
1232 	atomic_inc(&sbi->s_mb_buddies_generated);
1233 	atomic64_add(period, &sbi->s_mb_generation_time);
1234 }
1235 
1236 /* The buddy information is attached the buddy cache inode
1237  * for convenience. The information regarding each group
1238  * is loaded via ext4_mb_load_buddy. The information involve
1239  * block bitmap and buddy information. The information are
1240  * stored in the inode as
1241  *
1242  * {                        page                        }
1243  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1244  *
1245  *
1246  * one block each for bitmap and buddy information.
1247  * So for each group we take up 2 blocks. A page can
1248  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1249  * So it can have information regarding groups_per_page which
1250  * is blocks_per_page/2
1251  *
1252  * Locking note:  This routine takes the block group lock of all groups
1253  * for this page; do not hold this lock when calling this routine!
1254  */
1255 
1256 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1257 {
1258 	ext4_group_t ngroups;
1259 	unsigned int blocksize;
1260 	int blocks_per_page;
1261 	int groups_per_page;
1262 	int err = 0;
1263 	int i;
1264 	ext4_group_t first_group, group;
1265 	int first_block;
1266 	struct super_block *sb;
1267 	struct buffer_head *bhs;
1268 	struct buffer_head **bh = NULL;
1269 	struct inode *inode;
1270 	char *data;
1271 	char *bitmap;
1272 	struct ext4_group_info *grinfo;
1273 
1274 	inode = page->mapping->host;
1275 	sb = inode->i_sb;
1276 	ngroups = ext4_get_groups_count(sb);
1277 	blocksize = i_blocksize(inode);
1278 	blocks_per_page = PAGE_SIZE / blocksize;
1279 
1280 	mb_debug(sb, "init page %lu\n", page->index);
1281 
1282 	groups_per_page = blocks_per_page >> 1;
1283 	if (groups_per_page == 0)
1284 		groups_per_page = 1;
1285 
1286 	/* allocate buffer_heads to read bitmaps */
1287 	if (groups_per_page > 1) {
1288 		i = sizeof(struct buffer_head *) * groups_per_page;
1289 		bh = kzalloc(i, gfp);
1290 		if (bh == NULL)
1291 			return -ENOMEM;
1292 	} else
1293 		bh = &bhs;
1294 
1295 	first_group = page->index * blocks_per_page / 2;
1296 
1297 	/* read all groups the page covers into the cache */
1298 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1299 		if (group >= ngroups)
1300 			break;
1301 
1302 		grinfo = ext4_get_group_info(sb, group);
1303 		if (!grinfo)
1304 			continue;
1305 		/*
1306 		 * If page is uptodate then we came here after online resize
1307 		 * which added some new uninitialized group info structs, so
1308 		 * we must skip all initialized uptodate buddies on the page,
1309 		 * which may be currently in use by an allocating task.
1310 		 */
1311 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1312 			bh[i] = NULL;
1313 			continue;
1314 		}
1315 		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1316 		if (IS_ERR(bh[i])) {
1317 			err = PTR_ERR(bh[i]);
1318 			bh[i] = NULL;
1319 			goto out;
1320 		}
1321 		mb_debug(sb, "read bitmap for group %u\n", group);
1322 	}
1323 
1324 	/* wait for I/O completion */
1325 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1326 		int err2;
1327 
1328 		if (!bh[i])
1329 			continue;
1330 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1331 		if (!err)
1332 			err = err2;
1333 	}
1334 
1335 	first_block = page->index * blocks_per_page;
1336 	for (i = 0; i < blocks_per_page; i++) {
1337 		group = (first_block + i) >> 1;
1338 		if (group >= ngroups)
1339 			break;
1340 
1341 		if (!bh[group - first_group])
1342 			/* skip initialized uptodate buddy */
1343 			continue;
1344 
1345 		if (!buffer_verified(bh[group - first_group]))
1346 			/* Skip faulty bitmaps */
1347 			continue;
1348 		err = 0;
1349 
1350 		/*
1351 		 * data carry information regarding this
1352 		 * particular group in the format specified
1353 		 * above
1354 		 *
1355 		 */
1356 		data = page_address(page) + (i * blocksize);
1357 		bitmap = bh[group - first_group]->b_data;
1358 
1359 		/*
1360 		 * We place the buddy block and bitmap block
1361 		 * close together
1362 		 */
1363 		grinfo = ext4_get_group_info(sb, group);
1364 		if (!grinfo) {
1365 			err = -EFSCORRUPTED;
1366 		        goto out;
1367 		}
1368 		if ((first_block + i) & 1) {
1369 			/* this is block of buddy */
1370 			BUG_ON(incore == NULL);
1371 			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1372 				group, page->index, i * blocksize);
1373 			trace_ext4_mb_buddy_bitmap_load(sb, group);
1374 			grinfo->bb_fragments = 0;
1375 			memset(grinfo->bb_counters, 0,
1376 			       sizeof(*grinfo->bb_counters) *
1377 			       (MB_NUM_ORDERS(sb)));
1378 			/*
1379 			 * incore got set to the group block bitmap below
1380 			 */
1381 			ext4_lock_group(sb, group);
1382 			/* init the buddy */
1383 			memset(data, 0xff, blocksize);
1384 			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1385 			ext4_unlock_group(sb, group);
1386 			incore = NULL;
1387 		} else {
1388 			/* this is block of bitmap */
1389 			BUG_ON(incore != NULL);
1390 			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1391 				group, page->index, i * blocksize);
1392 			trace_ext4_mb_bitmap_load(sb, group);
1393 
1394 			/* see comments in ext4_mb_put_pa() */
1395 			ext4_lock_group(sb, group);
1396 			memcpy(data, bitmap, blocksize);
1397 
1398 			/* mark all preallocated blks used in in-core bitmap */
1399 			ext4_mb_generate_from_pa(sb, data, group);
1400 			WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1401 			ext4_unlock_group(sb, group);
1402 
1403 			/* set incore so that the buddy information can be
1404 			 * generated using this
1405 			 */
1406 			incore = data;
1407 		}
1408 	}
1409 	SetPageUptodate(page);
1410 
1411 out:
1412 	if (bh) {
1413 		for (i = 0; i < groups_per_page; i++)
1414 			brelse(bh[i]);
1415 		if (bh != &bhs)
1416 			kfree(bh);
1417 	}
1418 	return err;
1419 }
1420 
1421 /*
1422  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1423  * on the same buddy page doesn't happen whild holding the buddy page lock.
1424  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1425  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1426  */
1427 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1428 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1429 {
1430 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1431 	int block, pnum, poff;
1432 	int blocks_per_page;
1433 	struct page *page;
1434 
1435 	e4b->bd_buddy_page = NULL;
1436 	e4b->bd_bitmap_page = NULL;
1437 
1438 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1439 	/*
1440 	 * the buddy cache inode stores the block bitmap
1441 	 * and buddy information in consecutive blocks.
1442 	 * So for each group we need two blocks.
1443 	 */
1444 	block = group * 2;
1445 	pnum = block / blocks_per_page;
1446 	poff = block % blocks_per_page;
1447 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1448 	if (!page)
1449 		return -ENOMEM;
1450 	BUG_ON(page->mapping != inode->i_mapping);
1451 	e4b->bd_bitmap_page = page;
1452 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1453 
1454 	if (blocks_per_page >= 2) {
1455 		/* buddy and bitmap are on the same page */
1456 		return 0;
1457 	}
1458 
1459 	block++;
1460 	pnum = block / blocks_per_page;
1461 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1462 	if (!page)
1463 		return -ENOMEM;
1464 	BUG_ON(page->mapping != inode->i_mapping);
1465 	e4b->bd_buddy_page = page;
1466 	return 0;
1467 }
1468 
1469 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1470 {
1471 	if (e4b->bd_bitmap_page) {
1472 		unlock_page(e4b->bd_bitmap_page);
1473 		put_page(e4b->bd_bitmap_page);
1474 	}
1475 	if (e4b->bd_buddy_page) {
1476 		unlock_page(e4b->bd_buddy_page);
1477 		put_page(e4b->bd_buddy_page);
1478 	}
1479 }
1480 
1481 /*
1482  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1483  * block group lock of all groups for this page; do not hold the BG lock when
1484  * calling this routine!
1485  */
1486 static noinline_for_stack
1487 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1488 {
1489 
1490 	struct ext4_group_info *this_grp;
1491 	struct ext4_buddy e4b;
1492 	struct page *page;
1493 	int ret = 0;
1494 
1495 	might_sleep();
1496 	mb_debug(sb, "init group %u\n", group);
1497 	this_grp = ext4_get_group_info(sb, group);
1498 	if (!this_grp)
1499 		return -EFSCORRUPTED;
1500 
1501 	/*
1502 	 * This ensures that we don't reinit the buddy cache
1503 	 * page which map to the group from which we are already
1504 	 * allocating. If we are looking at the buddy cache we would
1505 	 * have taken a reference using ext4_mb_load_buddy and that
1506 	 * would have pinned buddy page to page cache.
1507 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1508 	 * page accessed.
1509 	 */
1510 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1511 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1512 		/*
1513 		 * somebody initialized the group
1514 		 * return without doing anything
1515 		 */
1516 		goto err;
1517 	}
1518 
1519 	page = e4b.bd_bitmap_page;
1520 	ret = ext4_mb_init_cache(page, NULL, gfp);
1521 	if (ret)
1522 		goto err;
1523 	if (!PageUptodate(page)) {
1524 		ret = -EIO;
1525 		goto err;
1526 	}
1527 
1528 	if (e4b.bd_buddy_page == NULL) {
1529 		/*
1530 		 * If both the bitmap and buddy are in
1531 		 * the same page we don't need to force
1532 		 * init the buddy
1533 		 */
1534 		ret = 0;
1535 		goto err;
1536 	}
1537 	/* init buddy cache */
1538 	page = e4b.bd_buddy_page;
1539 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1540 	if (ret)
1541 		goto err;
1542 	if (!PageUptodate(page)) {
1543 		ret = -EIO;
1544 		goto err;
1545 	}
1546 err:
1547 	ext4_mb_put_buddy_page_lock(&e4b);
1548 	return ret;
1549 }
1550 
1551 /*
1552  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1553  * block group lock of all groups for this page; do not hold the BG lock when
1554  * calling this routine!
1555  */
1556 static noinline_for_stack int
1557 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1558 		       struct ext4_buddy *e4b, gfp_t gfp)
1559 {
1560 	int blocks_per_page;
1561 	int block;
1562 	int pnum;
1563 	int poff;
1564 	struct page *page;
1565 	int ret;
1566 	struct ext4_group_info *grp;
1567 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1568 	struct inode *inode = sbi->s_buddy_cache;
1569 
1570 	might_sleep();
1571 	mb_debug(sb, "load group %u\n", group);
1572 
1573 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1574 	grp = ext4_get_group_info(sb, group);
1575 	if (!grp)
1576 		return -EFSCORRUPTED;
1577 
1578 	e4b->bd_blkbits = sb->s_blocksize_bits;
1579 	e4b->bd_info = grp;
1580 	e4b->bd_sb = sb;
1581 	e4b->bd_group = group;
1582 	e4b->bd_buddy_page = NULL;
1583 	e4b->bd_bitmap_page = NULL;
1584 
1585 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1586 		/*
1587 		 * we need full data about the group
1588 		 * to make a good selection
1589 		 */
1590 		ret = ext4_mb_init_group(sb, group, gfp);
1591 		if (ret)
1592 			return ret;
1593 	}
1594 
1595 	/*
1596 	 * the buddy cache inode stores the block bitmap
1597 	 * and buddy information in consecutive blocks.
1598 	 * So for each group we need two blocks.
1599 	 */
1600 	block = group * 2;
1601 	pnum = block / blocks_per_page;
1602 	poff = block % blocks_per_page;
1603 
1604 	/* we could use find_or_create_page(), but it locks page
1605 	 * what we'd like to avoid in fast path ... */
1606 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1607 	if (page == NULL || !PageUptodate(page)) {
1608 		if (page)
1609 			/*
1610 			 * drop the page reference and try
1611 			 * to get the page with lock. If we
1612 			 * are not uptodate that implies
1613 			 * somebody just created the page but
1614 			 * is yet to initialize the same. So
1615 			 * wait for it to initialize.
1616 			 */
1617 			put_page(page);
1618 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1619 		if (page) {
1620 			if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1621 	"ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
1622 				/* should never happen */
1623 				unlock_page(page);
1624 				ret = -EINVAL;
1625 				goto err;
1626 			}
1627 			if (!PageUptodate(page)) {
1628 				ret = ext4_mb_init_cache(page, NULL, gfp);
1629 				if (ret) {
1630 					unlock_page(page);
1631 					goto err;
1632 				}
1633 				mb_cmp_bitmaps(e4b, page_address(page) +
1634 					       (poff * sb->s_blocksize));
1635 			}
1636 			unlock_page(page);
1637 		}
1638 	}
1639 	if (page == NULL) {
1640 		ret = -ENOMEM;
1641 		goto err;
1642 	}
1643 	if (!PageUptodate(page)) {
1644 		ret = -EIO;
1645 		goto err;
1646 	}
1647 
1648 	/* Pages marked accessed already */
1649 	e4b->bd_bitmap_page = page;
1650 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1651 
1652 	block++;
1653 	pnum = block / blocks_per_page;
1654 	poff = block % blocks_per_page;
1655 
1656 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1657 	if (page == NULL || !PageUptodate(page)) {
1658 		if (page)
1659 			put_page(page);
1660 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1661 		if (page) {
1662 			if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1663 	"ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
1664 				/* should never happen */
1665 				unlock_page(page);
1666 				ret = -EINVAL;
1667 				goto err;
1668 			}
1669 			if (!PageUptodate(page)) {
1670 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1671 							 gfp);
1672 				if (ret) {
1673 					unlock_page(page);
1674 					goto err;
1675 				}
1676 			}
1677 			unlock_page(page);
1678 		}
1679 	}
1680 	if (page == NULL) {
1681 		ret = -ENOMEM;
1682 		goto err;
1683 	}
1684 	if (!PageUptodate(page)) {
1685 		ret = -EIO;
1686 		goto err;
1687 	}
1688 
1689 	/* Pages marked accessed already */
1690 	e4b->bd_buddy_page = page;
1691 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1692 
1693 	return 0;
1694 
1695 err:
1696 	if (page)
1697 		put_page(page);
1698 	if (e4b->bd_bitmap_page)
1699 		put_page(e4b->bd_bitmap_page);
1700 
1701 	e4b->bd_buddy = NULL;
1702 	e4b->bd_bitmap = NULL;
1703 	return ret;
1704 }
1705 
1706 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1707 			      struct ext4_buddy *e4b)
1708 {
1709 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1710 }
1711 
1712 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1713 {
1714 	if (e4b->bd_bitmap_page)
1715 		put_page(e4b->bd_bitmap_page);
1716 	if (e4b->bd_buddy_page)
1717 		put_page(e4b->bd_buddy_page);
1718 }
1719 
1720 
1721 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1722 {
1723 	int order = 1, max;
1724 	void *bb;
1725 
1726 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1727 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1728 
1729 	while (order <= e4b->bd_blkbits + 1) {
1730 		bb = mb_find_buddy(e4b, order, &max);
1731 		if (!mb_test_bit(block >> order, bb)) {
1732 			/* this block is part of buddy of order 'order' */
1733 			return order;
1734 		}
1735 		order++;
1736 	}
1737 	return 0;
1738 }
1739 
1740 static void mb_clear_bits(void *bm, int cur, int len)
1741 {
1742 	__u32 *addr;
1743 
1744 	len = cur + len;
1745 	while (cur < len) {
1746 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1747 			/* fast path: clear whole word at once */
1748 			addr = bm + (cur >> 3);
1749 			*addr = 0;
1750 			cur += 32;
1751 			continue;
1752 		}
1753 		mb_clear_bit(cur, bm);
1754 		cur++;
1755 	}
1756 }
1757 
1758 /* clear bits in given range
1759  * will return first found zero bit if any, -1 otherwise
1760  */
1761 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1762 {
1763 	__u32 *addr;
1764 	int zero_bit = -1;
1765 
1766 	len = cur + len;
1767 	while (cur < len) {
1768 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1769 			/* fast path: clear whole word at once */
1770 			addr = bm + (cur >> 3);
1771 			if (*addr != (__u32)(-1) && zero_bit == -1)
1772 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1773 			*addr = 0;
1774 			cur += 32;
1775 			continue;
1776 		}
1777 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1778 			zero_bit = cur;
1779 		cur++;
1780 	}
1781 
1782 	return zero_bit;
1783 }
1784 
1785 void mb_set_bits(void *bm, int cur, int len)
1786 {
1787 	__u32 *addr;
1788 
1789 	len = cur + len;
1790 	while (cur < len) {
1791 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1792 			/* fast path: set whole word at once */
1793 			addr = bm + (cur >> 3);
1794 			*addr = 0xffffffff;
1795 			cur += 32;
1796 			continue;
1797 		}
1798 		mb_set_bit(cur, bm);
1799 		cur++;
1800 	}
1801 }
1802 
1803 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1804 {
1805 	if (mb_test_bit(*bit + side, bitmap)) {
1806 		mb_clear_bit(*bit, bitmap);
1807 		(*bit) -= side;
1808 		return 1;
1809 	}
1810 	else {
1811 		(*bit) += side;
1812 		mb_set_bit(*bit, bitmap);
1813 		return -1;
1814 	}
1815 }
1816 
1817 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1818 {
1819 	int max;
1820 	int order = 1;
1821 	void *buddy = mb_find_buddy(e4b, order, &max);
1822 
1823 	while (buddy) {
1824 		void *buddy2;
1825 
1826 		/* Bits in range [first; last] are known to be set since
1827 		 * corresponding blocks were allocated. Bits in range
1828 		 * (first; last) will stay set because they form buddies on
1829 		 * upper layer. We just deal with borders if they don't
1830 		 * align with upper layer and then go up.
1831 		 * Releasing entire group is all about clearing
1832 		 * single bit of highest order buddy.
1833 		 */
1834 
1835 		/* Example:
1836 		 * ---------------------------------
1837 		 * |   1   |   1   |   1   |   1   |
1838 		 * ---------------------------------
1839 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1840 		 * ---------------------------------
1841 		 *   0   1   2   3   4   5   6   7
1842 		 *      \_____________________/
1843 		 *
1844 		 * Neither [1] nor [6] is aligned to above layer.
1845 		 * Left neighbour [0] is free, so mark it busy,
1846 		 * decrease bb_counters and extend range to
1847 		 * [0; 6]
1848 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1849 		 * mark [6] free, increase bb_counters and shrink range to
1850 		 * [0; 5].
1851 		 * Then shift range to [0; 2], go up and do the same.
1852 		 */
1853 
1854 
1855 		if (first & 1)
1856 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1857 		if (!(last & 1))
1858 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1859 		if (first > last)
1860 			break;
1861 		order++;
1862 
1863 		buddy2 = mb_find_buddy(e4b, order, &max);
1864 		if (!buddy2) {
1865 			mb_clear_bits(buddy, first, last - first + 1);
1866 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1867 			break;
1868 		}
1869 		first >>= 1;
1870 		last >>= 1;
1871 		buddy = buddy2;
1872 	}
1873 }
1874 
1875 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1876 			   int first, int count)
1877 {
1878 	int left_is_free = 0;
1879 	int right_is_free = 0;
1880 	int block;
1881 	int last = first + count - 1;
1882 	struct super_block *sb = e4b->bd_sb;
1883 
1884 	if (WARN_ON(count == 0))
1885 		return;
1886 	BUG_ON(last >= (sb->s_blocksize << 3));
1887 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1888 	/* Don't bother if the block group is corrupt. */
1889 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1890 		return;
1891 
1892 	mb_check_buddy(e4b);
1893 	mb_free_blocks_double(inode, e4b, first, count);
1894 
1895 	this_cpu_inc(discard_pa_seq);
1896 	e4b->bd_info->bb_free += count;
1897 	if (first < e4b->bd_info->bb_first_free)
1898 		e4b->bd_info->bb_first_free = first;
1899 
1900 	/* access memory sequentially: check left neighbour,
1901 	 * clear range and then check right neighbour
1902 	 */
1903 	if (first != 0)
1904 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1905 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1906 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1907 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1908 
1909 	if (unlikely(block != -1)) {
1910 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1911 		ext4_fsblk_t blocknr;
1912 
1913 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1914 		blocknr += EXT4_C2B(sbi, block);
1915 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1916 			ext4_grp_locked_error(sb, e4b->bd_group,
1917 					      inode ? inode->i_ino : 0,
1918 					      blocknr,
1919 					      "freeing already freed block (bit %u); block bitmap corrupt.",
1920 					      block);
1921 			ext4_mark_group_bitmap_corrupted(
1922 				sb, e4b->bd_group,
1923 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1924 		}
1925 		goto done;
1926 	}
1927 
1928 	/* let's maintain fragments counter */
1929 	if (left_is_free && right_is_free)
1930 		e4b->bd_info->bb_fragments--;
1931 	else if (!left_is_free && !right_is_free)
1932 		e4b->bd_info->bb_fragments++;
1933 
1934 	/* buddy[0] == bd_bitmap is a special case, so handle
1935 	 * it right away and let mb_buddy_mark_free stay free of
1936 	 * zero order checks.
1937 	 * Check if neighbours are to be coaleasced,
1938 	 * adjust bitmap bb_counters and borders appropriately.
1939 	 */
1940 	if (first & 1) {
1941 		first += !left_is_free;
1942 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1943 	}
1944 	if (!(last & 1)) {
1945 		last -= !right_is_free;
1946 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1947 	}
1948 
1949 	if (first <= last)
1950 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1951 
1952 done:
1953 	mb_set_largest_free_order(sb, e4b->bd_info);
1954 	mb_update_avg_fragment_size(sb, e4b->bd_info);
1955 	mb_check_buddy(e4b);
1956 }
1957 
1958 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1959 				int needed, struct ext4_free_extent *ex)
1960 {
1961 	int next = block;
1962 	int max, order;
1963 	void *buddy;
1964 
1965 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1966 	BUG_ON(ex == NULL);
1967 
1968 	buddy = mb_find_buddy(e4b, 0, &max);
1969 	BUG_ON(buddy == NULL);
1970 	BUG_ON(block >= max);
1971 	if (mb_test_bit(block, buddy)) {
1972 		ex->fe_len = 0;
1973 		ex->fe_start = 0;
1974 		ex->fe_group = 0;
1975 		return 0;
1976 	}
1977 
1978 	/* find actual order */
1979 	order = mb_find_order_for_block(e4b, block);
1980 	block = block >> order;
1981 
1982 	ex->fe_len = 1 << order;
1983 	ex->fe_start = block << order;
1984 	ex->fe_group = e4b->bd_group;
1985 
1986 	/* calc difference from given start */
1987 	next = next - ex->fe_start;
1988 	ex->fe_len -= next;
1989 	ex->fe_start += next;
1990 
1991 	while (needed > ex->fe_len &&
1992 	       mb_find_buddy(e4b, order, &max)) {
1993 
1994 		if (block + 1 >= max)
1995 			break;
1996 
1997 		next = (block + 1) * (1 << order);
1998 		if (mb_test_bit(next, e4b->bd_bitmap))
1999 			break;
2000 
2001 		order = mb_find_order_for_block(e4b, next);
2002 
2003 		block = next >> order;
2004 		ex->fe_len += 1 << order;
2005 	}
2006 
2007 	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2008 		/* Should never happen! (but apparently sometimes does?!?) */
2009 		WARN_ON(1);
2010 		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2011 			"corruption or bug in mb_find_extent "
2012 			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2013 			block, order, needed, ex->fe_group, ex->fe_start,
2014 			ex->fe_len, ex->fe_logical);
2015 		ex->fe_len = 0;
2016 		ex->fe_start = 0;
2017 		ex->fe_group = 0;
2018 	}
2019 	return ex->fe_len;
2020 }
2021 
2022 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2023 {
2024 	int ord;
2025 	int mlen = 0;
2026 	int max = 0;
2027 	int cur;
2028 	int start = ex->fe_start;
2029 	int len = ex->fe_len;
2030 	unsigned ret = 0;
2031 	int len0 = len;
2032 	void *buddy;
2033 	bool split = false;
2034 
2035 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2036 	BUG_ON(e4b->bd_group != ex->fe_group);
2037 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2038 	mb_check_buddy(e4b);
2039 	mb_mark_used_double(e4b, start, len);
2040 
2041 	this_cpu_inc(discard_pa_seq);
2042 	e4b->bd_info->bb_free -= len;
2043 	if (e4b->bd_info->bb_first_free == start)
2044 		e4b->bd_info->bb_first_free += len;
2045 
2046 	/* let's maintain fragments counter */
2047 	if (start != 0)
2048 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2049 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2050 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
2051 	if (mlen && max)
2052 		e4b->bd_info->bb_fragments++;
2053 	else if (!mlen && !max)
2054 		e4b->bd_info->bb_fragments--;
2055 
2056 	/* let's maintain buddy itself */
2057 	while (len) {
2058 		if (!split)
2059 			ord = mb_find_order_for_block(e4b, start);
2060 
2061 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2062 			/* the whole chunk may be allocated at once! */
2063 			mlen = 1 << ord;
2064 			if (!split)
2065 				buddy = mb_find_buddy(e4b, ord, &max);
2066 			else
2067 				split = false;
2068 			BUG_ON((start >> ord) >= max);
2069 			mb_set_bit(start >> ord, buddy);
2070 			e4b->bd_info->bb_counters[ord]--;
2071 			start += mlen;
2072 			len -= mlen;
2073 			BUG_ON(len < 0);
2074 			continue;
2075 		}
2076 
2077 		/* store for history */
2078 		if (ret == 0)
2079 			ret = len | (ord << 16);
2080 
2081 		/* we have to split large buddy */
2082 		BUG_ON(ord <= 0);
2083 		buddy = mb_find_buddy(e4b, ord, &max);
2084 		mb_set_bit(start >> ord, buddy);
2085 		e4b->bd_info->bb_counters[ord]--;
2086 
2087 		ord--;
2088 		cur = (start >> ord) & ~1U;
2089 		buddy = mb_find_buddy(e4b, ord, &max);
2090 		mb_clear_bit(cur, buddy);
2091 		mb_clear_bit(cur + 1, buddy);
2092 		e4b->bd_info->bb_counters[ord]++;
2093 		e4b->bd_info->bb_counters[ord]++;
2094 		split = true;
2095 	}
2096 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2097 
2098 	mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2099 	mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2100 	mb_check_buddy(e4b);
2101 
2102 	return ret;
2103 }
2104 
2105 /*
2106  * Must be called under group lock!
2107  */
2108 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2109 					struct ext4_buddy *e4b)
2110 {
2111 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2112 	int ret;
2113 
2114 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2115 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2116 
2117 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2118 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2119 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
2120 
2121 	/* preallocation can change ac_b_ex, thus we store actually
2122 	 * allocated blocks for history */
2123 	ac->ac_f_ex = ac->ac_b_ex;
2124 
2125 	ac->ac_status = AC_STATUS_FOUND;
2126 	ac->ac_tail = ret & 0xffff;
2127 	ac->ac_buddy = ret >> 16;
2128 
2129 	/*
2130 	 * take the page reference. We want the page to be pinned
2131 	 * so that we don't get a ext4_mb_init_cache_call for this
2132 	 * group until we update the bitmap. That would mean we
2133 	 * double allocate blocks. The reference is dropped
2134 	 * in ext4_mb_release_context
2135 	 */
2136 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
2137 	get_page(ac->ac_bitmap_page);
2138 	ac->ac_buddy_page = e4b->bd_buddy_page;
2139 	get_page(ac->ac_buddy_page);
2140 	/* store last allocated for subsequent stream allocation */
2141 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2142 		spin_lock(&sbi->s_md_lock);
2143 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2144 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2145 		spin_unlock(&sbi->s_md_lock);
2146 	}
2147 	/*
2148 	 * As we've just preallocated more space than
2149 	 * user requested originally, we store allocated
2150 	 * space in a special descriptor.
2151 	 */
2152 	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2153 		ext4_mb_new_preallocation(ac);
2154 
2155 }
2156 
2157 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2158 					struct ext4_buddy *e4b,
2159 					int finish_group)
2160 {
2161 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2162 	struct ext4_free_extent *bex = &ac->ac_b_ex;
2163 	struct ext4_free_extent *gex = &ac->ac_g_ex;
2164 
2165 	if (ac->ac_status == AC_STATUS_FOUND)
2166 		return;
2167 	/*
2168 	 * We don't want to scan for a whole year
2169 	 */
2170 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
2171 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2172 		ac->ac_status = AC_STATUS_BREAK;
2173 		return;
2174 	}
2175 
2176 	/*
2177 	 * Haven't found good chunk so far, let's continue
2178 	 */
2179 	if (bex->fe_len < gex->fe_len)
2180 		return;
2181 
2182 	if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2183 		ext4_mb_use_best_found(ac, e4b);
2184 }
2185 
2186 /*
2187  * The routine checks whether found extent is good enough. If it is,
2188  * then the extent gets marked used and flag is set to the context
2189  * to stop scanning. Otherwise, the extent is compared with the
2190  * previous found extent and if new one is better, then it's stored
2191  * in the context. Later, the best found extent will be used, if
2192  * mballoc can't find good enough extent.
2193  *
2194  * The algorithm used is roughly as follows:
2195  *
2196  * * If free extent found is exactly as big as goal, then
2197  *   stop the scan and use it immediately
2198  *
2199  * * If free extent found is smaller than goal, then keep retrying
2200  *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
2201  *   that stop scanning and use whatever we have.
2202  *
2203  * * If free extent found is bigger than goal, then keep retrying
2204  *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
2205  *   stopping the scan and using the extent.
2206  *
2207  *
2208  * FIXME: real allocation policy is to be designed yet!
2209  */
2210 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2211 					struct ext4_free_extent *ex,
2212 					struct ext4_buddy *e4b)
2213 {
2214 	struct ext4_free_extent *bex = &ac->ac_b_ex;
2215 	struct ext4_free_extent *gex = &ac->ac_g_ex;
2216 
2217 	BUG_ON(ex->fe_len <= 0);
2218 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2219 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2220 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2221 
2222 	ac->ac_found++;
2223 	ac->ac_cX_found[ac->ac_criteria]++;
2224 
2225 	/*
2226 	 * The special case - take what you catch first
2227 	 */
2228 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2229 		*bex = *ex;
2230 		ext4_mb_use_best_found(ac, e4b);
2231 		return;
2232 	}
2233 
2234 	/*
2235 	 * Let's check whether the chuck is good enough
2236 	 */
2237 	if (ex->fe_len == gex->fe_len) {
2238 		*bex = *ex;
2239 		ext4_mb_use_best_found(ac, e4b);
2240 		return;
2241 	}
2242 
2243 	/*
2244 	 * If this is first found extent, just store it in the context
2245 	 */
2246 	if (bex->fe_len == 0) {
2247 		*bex = *ex;
2248 		return;
2249 	}
2250 
2251 	/*
2252 	 * If new found extent is better, store it in the context
2253 	 */
2254 	if (bex->fe_len < gex->fe_len) {
2255 		/* if the request isn't satisfied, any found extent
2256 		 * larger than previous best one is better */
2257 		if (ex->fe_len > bex->fe_len)
2258 			*bex = *ex;
2259 	} else if (ex->fe_len > gex->fe_len) {
2260 		/* if the request is satisfied, then we try to find
2261 		 * an extent that still satisfy the request, but is
2262 		 * smaller than previous one */
2263 		if (ex->fe_len < bex->fe_len)
2264 			*bex = *ex;
2265 	}
2266 
2267 	ext4_mb_check_limits(ac, e4b, 0);
2268 }
2269 
2270 static noinline_for_stack
2271 void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2272 					struct ext4_buddy *e4b)
2273 {
2274 	struct ext4_free_extent ex = ac->ac_b_ex;
2275 	ext4_group_t group = ex.fe_group;
2276 	int max;
2277 	int err;
2278 
2279 	BUG_ON(ex.fe_len <= 0);
2280 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2281 	if (err)
2282 		return;
2283 
2284 	ext4_lock_group(ac->ac_sb, group);
2285 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2286 
2287 	if (max > 0) {
2288 		ac->ac_b_ex = ex;
2289 		ext4_mb_use_best_found(ac, e4b);
2290 	}
2291 
2292 	ext4_unlock_group(ac->ac_sb, group);
2293 	ext4_mb_unload_buddy(e4b);
2294 }
2295 
2296 static noinline_for_stack
2297 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2298 				struct ext4_buddy *e4b)
2299 {
2300 	ext4_group_t group = ac->ac_g_ex.fe_group;
2301 	int max;
2302 	int err;
2303 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2304 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2305 	struct ext4_free_extent ex;
2306 
2307 	if (!grp)
2308 		return -EFSCORRUPTED;
2309 	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2310 		return 0;
2311 	if (grp->bb_free == 0)
2312 		return 0;
2313 
2314 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2315 	if (err)
2316 		return err;
2317 
2318 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2319 		ext4_mb_unload_buddy(e4b);
2320 		return 0;
2321 	}
2322 
2323 	ext4_lock_group(ac->ac_sb, group);
2324 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2325 			     ac->ac_g_ex.fe_len, &ex);
2326 	ex.fe_logical = 0xDEADFA11; /* debug value */
2327 
2328 	if (max >= ac->ac_g_ex.fe_len &&
2329 	    ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
2330 		ext4_fsblk_t start;
2331 
2332 		start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2333 		/* use do_div to get remainder (would be 64-bit modulo) */
2334 		if (do_div(start, sbi->s_stripe) == 0) {
2335 			ac->ac_found++;
2336 			ac->ac_b_ex = ex;
2337 			ext4_mb_use_best_found(ac, e4b);
2338 		}
2339 	} else if (max >= ac->ac_g_ex.fe_len) {
2340 		BUG_ON(ex.fe_len <= 0);
2341 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2342 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2343 		ac->ac_found++;
2344 		ac->ac_b_ex = ex;
2345 		ext4_mb_use_best_found(ac, e4b);
2346 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2347 		/* Sometimes, caller may want to merge even small
2348 		 * number of blocks to an existing extent */
2349 		BUG_ON(ex.fe_len <= 0);
2350 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2351 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2352 		ac->ac_found++;
2353 		ac->ac_b_ex = ex;
2354 		ext4_mb_use_best_found(ac, e4b);
2355 	}
2356 	ext4_unlock_group(ac->ac_sb, group);
2357 	ext4_mb_unload_buddy(e4b);
2358 
2359 	return 0;
2360 }
2361 
2362 /*
2363  * The routine scans buddy structures (not bitmap!) from given order
2364  * to max order and tries to find big enough chunk to satisfy the req
2365  */
2366 static noinline_for_stack
2367 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2368 					struct ext4_buddy *e4b)
2369 {
2370 	struct super_block *sb = ac->ac_sb;
2371 	struct ext4_group_info *grp = e4b->bd_info;
2372 	void *buddy;
2373 	int i;
2374 	int k;
2375 	int max;
2376 
2377 	BUG_ON(ac->ac_2order <= 0);
2378 	for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2379 		if (grp->bb_counters[i] == 0)
2380 			continue;
2381 
2382 		buddy = mb_find_buddy(e4b, i, &max);
2383 		if (WARN_RATELIMIT(buddy == NULL,
2384 			 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2385 			continue;
2386 
2387 		k = mb_find_next_zero_bit(buddy, max, 0);
2388 		if (k >= max) {
2389 			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2390 				"%d free clusters of order %d. But found 0",
2391 				grp->bb_counters[i], i);
2392 			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2393 					 e4b->bd_group,
2394 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2395 			break;
2396 		}
2397 		ac->ac_found++;
2398 		ac->ac_cX_found[ac->ac_criteria]++;
2399 
2400 		ac->ac_b_ex.fe_len = 1 << i;
2401 		ac->ac_b_ex.fe_start = k << i;
2402 		ac->ac_b_ex.fe_group = e4b->bd_group;
2403 
2404 		ext4_mb_use_best_found(ac, e4b);
2405 
2406 		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2407 
2408 		if (EXT4_SB(sb)->s_mb_stats)
2409 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2410 
2411 		break;
2412 	}
2413 }
2414 
2415 /*
2416  * The routine scans the group and measures all found extents.
2417  * In order to optimize scanning, caller must pass number of
2418  * free blocks in the group, so the routine can know upper limit.
2419  */
2420 static noinline_for_stack
2421 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2422 					struct ext4_buddy *e4b)
2423 {
2424 	struct super_block *sb = ac->ac_sb;
2425 	void *bitmap = e4b->bd_bitmap;
2426 	struct ext4_free_extent ex;
2427 	int i, j, freelen;
2428 	int free;
2429 
2430 	free = e4b->bd_info->bb_free;
2431 	if (WARN_ON(free <= 0))
2432 		return;
2433 
2434 	i = e4b->bd_info->bb_first_free;
2435 
2436 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2437 		i = mb_find_next_zero_bit(bitmap,
2438 						EXT4_CLUSTERS_PER_GROUP(sb), i);
2439 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2440 			/*
2441 			 * IF we have corrupt bitmap, we won't find any
2442 			 * free blocks even though group info says we
2443 			 * have free blocks
2444 			 */
2445 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2446 					"%d free clusters as per "
2447 					"group info. But bitmap says 0",
2448 					free);
2449 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2450 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2451 			break;
2452 		}
2453 
2454 		if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2455 			/*
2456 			 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2457 			 * sure that this group will have a large enough
2458 			 * continuous free extent, so skip over the smaller free
2459 			 * extents
2460 			 */
2461 			j = mb_find_next_bit(bitmap,
2462 						EXT4_CLUSTERS_PER_GROUP(sb), i);
2463 			freelen = j - i;
2464 
2465 			if (freelen < ac->ac_g_ex.fe_len) {
2466 				i = j;
2467 				free -= freelen;
2468 				continue;
2469 			}
2470 		}
2471 
2472 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2473 		if (WARN_ON(ex.fe_len <= 0))
2474 			break;
2475 		if (free < ex.fe_len) {
2476 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2477 					"%d free clusters as per "
2478 					"group info. But got %d blocks",
2479 					free, ex.fe_len);
2480 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2481 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2482 			/*
2483 			 * The number of free blocks differs. This mostly
2484 			 * indicate that the bitmap is corrupt. So exit
2485 			 * without claiming the space.
2486 			 */
2487 			break;
2488 		}
2489 		ex.fe_logical = 0xDEADC0DE; /* debug value */
2490 		ext4_mb_measure_extent(ac, &ex, e4b);
2491 
2492 		i += ex.fe_len;
2493 		free -= ex.fe_len;
2494 	}
2495 
2496 	ext4_mb_check_limits(ac, e4b, 1);
2497 }
2498 
2499 /*
2500  * This is a special case for storages like raid5
2501  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2502  */
2503 static noinline_for_stack
2504 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2505 				 struct ext4_buddy *e4b)
2506 {
2507 	struct super_block *sb = ac->ac_sb;
2508 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2509 	void *bitmap = e4b->bd_bitmap;
2510 	struct ext4_free_extent ex;
2511 	ext4_fsblk_t first_group_block;
2512 	ext4_fsblk_t a;
2513 	ext4_grpblk_t i, stripe;
2514 	int max;
2515 
2516 	BUG_ON(sbi->s_stripe == 0);
2517 
2518 	/* find first stripe-aligned block in group */
2519 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2520 
2521 	a = first_group_block + sbi->s_stripe - 1;
2522 	do_div(a, sbi->s_stripe);
2523 	i = (a * sbi->s_stripe) - first_group_block;
2524 
2525 	stripe = EXT4_B2C(sbi, sbi->s_stripe);
2526 	i = EXT4_B2C(sbi, i);
2527 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2528 		if (!mb_test_bit(i, bitmap)) {
2529 			max = mb_find_extent(e4b, i, stripe, &ex);
2530 			if (max >= stripe) {
2531 				ac->ac_found++;
2532 				ac->ac_cX_found[ac->ac_criteria]++;
2533 				ex.fe_logical = 0xDEADF00D; /* debug value */
2534 				ac->ac_b_ex = ex;
2535 				ext4_mb_use_best_found(ac, e4b);
2536 				break;
2537 			}
2538 		}
2539 		i += stripe;
2540 	}
2541 }
2542 
2543 /*
2544  * This is also called BEFORE we load the buddy bitmap.
2545  * Returns either 1 or 0 indicating that the group is either suitable
2546  * for the allocation or not.
2547  */
2548 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2549 				ext4_group_t group, enum criteria cr)
2550 {
2551 	ext4_grpblk_t free, fragments;
2552 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2553 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2554 
2555 	BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2556 
2557 	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2558 		return false;
2559 
2560 	free = grp->bb_free;
2561 	if (free == 0)
2562 		return false;
2563 
2564 	fragments = grp->bb_fragments;
2565 	if (fragments == 0)
2566 		return false;
2567 
2568 	switch (cr) {
2569 	case CR_POWER2_ALIGNED:
2570 		BUG_ON(ac->ac_2order == 0);
2571 
2572 		/* Avoid using the first bg of a flexgroup for data files */
2573 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2574 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2575 		    ((group % flex_size) == 0))
2576 			return false;
2577 
2578 		if (free < ac->ac_g_ex.fe_len)
2579 			return false;
2580 
2581 		if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2582 			return true;
2583 
2584 		if (grp->bb_largest_free_order < ac->ac_2order)
2585 			return false;
2586 
2587 		return true;
2588 	case CR_GOAL_LEN_FAST:
2589 	case CR_BEST_AVAIL_LEN:
2590 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2591 			return true;
2592 		break;
2593 	case CR_GOAL_LEN_SLOW:
2594 		if (free >= ac->ac_g_ex.fe_len)
2595 			return true;
2596 		break;
2597 	case CR_ANY_FREE:
2598 		return true;
2599 	default:
2600 		BUG();
2601 	}
2602 
2603 	return false;
2604 }
2605 
2606 /*
2607  * This could return negative error code if something goes wrong
2608  * during ext4_mb_init_group(). This should not be called with
2609  * ext4_lock_group() held.
2610  *
2611  * Note: because we are conditionally operating with the group lock in
2612  * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2613  * function using __acquire and __release.  This means we need to be
2614  * super careful before messing with the error path handling via "goto
2615  * out"!
2616  */
2617 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2618 				     ext4_group_t group, enum criteria cr)
2619 {
2620 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2621 	struct super_block *sb = ac->ac_sb;
2622 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2623 	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2624 	ext4_grpblk_t free;
2625 	int ret = 0;
2626 
2627 	if (!grp)
2628 		return -EFSCORRUPTED;
2629 	if (sbi->s_mb_stats)
2630 		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2631 	if (should_lock) {
2632 		ext4_lock_group(sb, group);
2633 		__release(ext4_group_lock_ptr(sb, group));
2634 	}
2635 	free = grp->bb_free;
2636 	if (free == 0)
2637 		goto out;
2638 	/*
2639 	 * In all criterias except CR_ANY_FREE we try to avoid groups that
2640 	 * can't possibly satisfy the full goal request due to insufficient
2641 	 * free blocks.
2642 	 */
2643 	if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2644 		goto out;
2645 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2646 		goto out;
2647 	if (should_lock) {
2648 		__acquire(ext4_group_lock_ptr(sb, group));
2649 		ext4_unlock_group(sb, group);
2650 	}
2651 
2652 	/* We only do this if the grp has never been initialized */
2653 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2654 		struct ext4_group_desc *gdp =
2655 			ext4_get_group_desc(sb, group, NULL);
2656 		int ret;
2657 
2658 		/*
2659 		 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2660 		 * search to find large good chunks almost for free. If buddy
2661 		 * data is not ready, then this optimization makes no sense. But
2662 		 * we never skip the first block group in a flex_bg, since this
2663 		 * gets used for metadata block allocation, and we want to make
2664 		 * sure we locate metadata blocks in the first block group in
2665 		 * the flex_bg if possible.
2666 		 */
2667 		if (!ext4_mb_cr_expensive(cr) &&
2668 		    (!sbi->s_log_groups_per_flex ||
2669 		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2670 		    !(ext4_has_group_desc_csum(sb) &&
2671 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2672 			return 0;
2673 		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2674 		if (ret)
2675 			return ret;
2676 	}
2677 
2678 	if (should_lock) {
2679 		ext4_lock_group(sb, group);
2680 		__release(ext4_group_lock_ptr(sb, group));
2681 	}
2682 	ret = ext4_mb_good_group(ac, group, cr);
2683 out:
2684 	if (should_lock) {
2685 		__acquire(ext4_group_lock_ptr(sb, group));
2686 		ext4_unlock_group(sb, group);
2687 	}
2688 	return ret;
2689 }
2690 
2691 /*
2692  * Start prefetching @nr block bitmaps starting at @group.
2693  * Return the next group which needs to be prefetched.
2694  */
2695 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2696 			      unsigned int nr, int *cnt)
2697 {
2698 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2699 	struct buffer_head *bh;
2700 	struct blk_plug plug;
2701 
2702 	blk_start_plug(&plug);
2703 	while (nr-- > 0) {
2704 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2705 								  NULL);
2706 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2707 
2708 		/*
2709 		 * Prefetch block groups with free blocks; but don't
2710 		 * bother if it is marked uninitialized on disk, since
2711 		 * it won't require I/O to read.  Also only try to
2712 		 * prefetch once, so we avoid getblk() call, which can
2713 		 * be expensive.
2714 		 */
2715 		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2716 		    EXT4_MB_GRP_NEED_INIT(grp) &&
2717 		    ext4_free_group_clusters(sb, gdp) > 0 ) {
2718 			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2719 			if (bh && !IS_ERR(bh)) {
2720 				if (!buffer_uptodate(bh) && cnt)
2721 					(*cnt)++;
2722 				brelse(bh);
2723 			}
2724 		}
2725 		if (++group >= ngroups)
2726 			group = 0;
2727 	}
2728 	blk_finish_plug(&plug);
2729 	return group;
2730 }
2731 
2732 /*
2733  * Prefetching reads the block bitmap into the buffer cache; but we
2734  * need to make sure that the buddy bitmap in the page cache has been
2735  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2736  * is not yet completed, or indeed if it was not initiated by
2737  * ext4_mb_prefetch did not start the I/O.
2738  *
2739  * TODO: We should actually kick off the buddy bitmap setup in a work
2740  * queue when the buffer I/O is completed, so that we don't block
2741  * waiting for the block allocation bitmap read to finish when
2742  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2743  */
2744 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2745 			   unsigned int nr)
2746 {
2747 	struct ext4_group_desc *gdp;
2748 	struct ext4_group_info *grp;
2749 
2750 	while (nr-- > 0) {
2751 		if (!group)
2752 			group = ext4_get_groups_count(sb);
2753 		group--;
2754 		gdp = ext4_get_group_desc(sb, group, NULL);
2755 		grp = ext4_get_group_info(sb, group);
2756 
2757 		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2758 		    ext4_free_group_clusters(sb, gdp) > 0) {
2759 			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2760 				break;
2761 		}
2762 	}
2763 }
2764 
2765 static noinline_for_stack int
2766 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2767 {
2768 	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2769 	enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2770 	int err = 0, first_err = 0;
2771 	unsigned int nr = 0, prefetch_ios = 0;
2772 	struct ext4_sb_info *sbi;
2773 	struct super_block *sb;
2774 	struct ext4_buddy e4b;
2775 	int lost;
2776 
2777 	sb = ac->ac_sb;
2778 	sbi = EXT4_SB(sb);
2779 	ngroups = ext4_get_groups_count(sb);
2780 	/* non-extent files are limited to low blocks/groups */
2781 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2782 		ngroups = sbi->s_blockfile_groups;
2783 
2784 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2785 
2786 	/* first, try the goal */
2787 	err = ext4_mb_find_by_goal(ac, &e4b);
2788 	if (err || ac->ac_status == AC_STATUS_FOUND)
2789 		goto out;
2790 
2791 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2792 		goto out;
2793 
2794 	/*
2795 	 * ac->ac_2order is set only if the fe_len is a power of 2
2796 	 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2797 	 * so that we try exact allocation using buddy.
2798 	 */
2799 	i = fls(ac->ac_g_ex.fe_len);
2800 	ac->ac_2order = 0;
2801 	/*
2802 	 * We search using buddy data only if the order of the request
2803 	 * is greater than equal to the sbi_s_mb_order2_reqs
2804 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2805 	 * We also support searching for power-of-two requests only for
2806 	 * requests upto maximum buddy size we have constructed.
2807 	 */
2808 	if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2809 		if (is_power_of_2(ac->ac_g_ex.fe_len))
2810 			ac->ac_2order = array_index_nospec(i - 1,
2811 							   MB_NUM_ORDERS(sb));
2812 	}
2813 
2814 	/* if stream allocation is enabled, use global goal */
2815 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2816 		/* TBD: may be hot point */
2817 		spin_lock(&sbi->s_md_lock);
2818 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2819 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2820 		spin_unlock(&sbi->s_md_lock);
2821 	}
2822 
2823 	/*
2824 	 * Let's just scan groups to find more-less suitable blocks We
2825 	 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2826 	 * aligned, in which case let's do that faster approach first.
2827 	 */
2828 	if (ac->ac_2order)
2829 		cr = CR_POWER2_ALIGNED;
2830 repeat:
2831 	for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2832 		ac->ac_criteria = cr;
2833 		/*
2834 		 * searching for the right group start
2835 		 * from the goal value specified
2836 		 */
2837 		group = ac->ac_g_ex.fe_group;
2838 		ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2839 		prefetch_grp = group;
2840 
2841 		for (i = 0, new_cr = cr; i < ngroups; i++,
2842 		     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2843 			int ret = 0;
2844 
2845 			cond_resched();
2846 			if (new_cr != cr) {
2847 				cr = new_cr;
2848 				goto repeat;
2849 			}
2850 
2851 			/*
2852 			 * Batch reads of the block allocation bitmaps
2853 			 * to get multiple READs in flight; limit
2854 			 * prefetching at inexpensive CR, otherwise mballoc
2855 			 * can spend a lot of time loading imperfect groups
2856 			 */
2857 			if ((prefetch_grp == group) &&
2858 			    (ext4_mb_cr_expensive(cr) ||
2859 			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2860 				nr = sbi->s_mb_prefetch;
2861 				if (ext4_has_feature_flex_bg(sb)) {
2862 					nr = 1 << sbi->s_log_groups_per_flex;
2863 					nr -= group & (nr - 1);
2864 					nr = min(nr, sbi->s_mb_prefetch);
2865 				}
2866 				prefetch_grp = ext4_mb_prefetch(sb, group,
2867 							nr, &prefetch_ios);
2868 			}
2869 
2870 			/* This now checks without needing the buddy page */
2871 			ret = ext4_mb_good_group_nolock(ac, group, cr);
2872 			if (ret <= 0) {
2873 				if (!first_err)
2874 					first_err = ret;
2875 				continue;
2876 			}
2877 
2878 			err = ext4_mb_load_buddy(sb, group, &e4b);
2879 			if (err)
2880 				goto out;
2881 
2882 			ext4_lock_group(sb, group);
2883 
2884 			/*
2885 			 * We need to check again after locking the
2886 			 * block group
2887 			 */
2888 			ret = ext4_mb_good_group(ac, group, cr);
2889 			if (ret == 0) {
2890 				ext4_unlock_group(sb, group);
2891 				ext4_mb_unload_buddy(&e4b);
2892 				continue;
2893 			}
2894 
2895 			ac->ac_groups_scanned++;
2896 			if (cr == CR_POWER2_ALIGNED)
2897 				ext4_mb_simple_scan_group(ac, &e4b);
2898 			else if ((cr == CR_GOAL_LEN_FAST ||
2899 				 cr == CR_BEST_AVAIL_LEN) &&
2900 				 sbi->s_stripe &&
2901 				 !(ac->ac_g_ex.fe_len %
2902 				 EXT4_B2C(sbi, sbi->s_stripe)))
2903 				ext4_mb_scan_aligned(ac, &e4b);
2904 			else
2905 				ext4_mb_complex_scan_group(ac, &e4b);
2906 
2907 			ext4_unlock_group(sb, group);
2908 			ext4_mb_unload_buddy(&e4b);
2909 
2910 			if (ac->ac_status != AC_STATUS_CONTINUE)
2911 				break;
2912 		}
2913 		/* Processed all groups and haven't found blocks */
2914 		if (sbi->s_mb_stats && i == ngroups)
2915 			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2916 
2917 		if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2918 			/* Reset goal length to original goal length before
2919 			 * falling into CR_GOAL_LEN_SLOW */
2920 			ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2921 	}
2922 
2923 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2924 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2925 		/*
2926 		 * We've been searching too long. Let's try to allocate
2927 		 * the best chunk we've found so far
2928 		 */
2929 		ext4_mb_try_best_found(ac, &e4b);
2930 		if (ac->ac_status != AC_STATUS_FOUND) {
2931 			/*
2932 			 * Someone more lucky has already allocated it.
2933 			 * The only thing we can do is just take first
2934 			 * found block(s)
2935 			 */
2936 			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2937 			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2938 				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2939 				 ac->ac_b_ex.fe_len, lost);
2940 
2941 			ac->ac_b_ex.fe_group = 0;
2942 			ac->ac_b_ex.fe_start = 0;
2943 			ac->ac_b_ex.fe_len = 0;
2944 			ac->ac_status = AC_STATUS_CONTINUE;
2945 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2946 			cr = CR_ANY_FREE;
2947 			goto repeat;
2948 		}
2949 	}
2950 
2951 	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2952 		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2953 out:
2954 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2955 		err = first_err;
2956 
2957 	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2958 		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2959 		 ac->ac_flags, cr, err);
2960 
2961 	if (nr)
2962 		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2963 
2964 	return err;
2965 }
2966 
2967 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2968 {
2969 	struct super_block *sb = pde_data(file_inode(seq->file));
2970 	ext4_group_t group;
2971 
2972 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2973 		return NULL;
2974 	group = *pos + 1;
2975 	return (void *) ((unsigned long) group);
2976 }
2977 
2978 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2979 {
2980 	struct super_block *sb = pde_data(file_inode(seq->file));
2981 	ext4_group_t group;
2982 
2983 	++*pos;
2984 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2985 		return NULL;
2986 	group = *pos + 1;
2987 	return (void *) ((unsigned long) group);
2988 }
2989 
2990 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2991 {
2992 	struct super_block *sb = pde_data(file_inode(seq->file));
2993 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2994 	int i;
2995 	int err, buddy_loaded = 0;
2996 	struct ext4_buddy e4b;
2997 	struct ext4_group_info *grinfo;
2998 	unsigned char blocksize_bits = min_t(unsigned char,
2999 					     sb->s_blocksize_bits,
3000 					     EXT4_MAX_BLOCK_LOG_SIZE);
3001 	struct sg {
3002 		struct ext4_group_info info;
3003 		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3004 	} sg;
3005 
3006 	group--;
3007 	if (group == 0)
3008 		seq_puts(seq, "#group: free  frags first ["
3009 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
3010 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
3011 
3012 	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3013 		sizeof(struct ext4_group_info);
3014 
3015 	grinfo = ext4_get_group_info(sb, group);
3016 	if (!grinfo)
3017 		return 0;
3018 	/* Load the group info in memory only if not already loaded. */
3019 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3020 		err = ext4_mb_load_buddy(sb, group, &e4b);
3021 		if (err) {
3022 			seq_printf(seq, "#%-5u: I/O error\n", group);
3023 			return 0;
3024 		}
3025 		buddy_loaded = 1;
3026 	}
3027 
3028 	memcpy(&sg, grinfo, i);
3029 
3030 	if (buddy_loaded)
3031 		ext4_mb_unload_buddy(&e4b);
3032 
3033 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3034 			sg.info.bb_fragments, sg.info.bb_first_free);
3035 	for (i = 0; i <= 13; i++)
3036 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3037 				sg.info.bb_counters[i] : 0);
3038 	seq_puts(seq, " ]\n");
3039 
3040 	return 0;
3041 }
3042 
3043 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3044 {
3045 }
3046 
3047 const struct seq_operations ext4_mb_seq_groups_ops = {
3048 	.start  = ext4_mb_seq_groups_start,
3049 	.next   = ext4_mb_seq_groups_next,
3050 	.stop   = ext4_mb_seq_groups_stop,
3051 	.show   = ext4_mb_seq_groups_show,
3052 };
3053 
3054 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3055 {
3056 	struct super_block *sb = seq->private;
3057 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3058 
3059 	seq_puts(seq, "mballoc:\n");
3060 	if (!sbi->s_mb_stats) {
3061 		seq_puts(seq, "\tmb stats collection turned off.\n");
3062 		seq_puts(
3063 			seq,
3064 			"\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3065 		return 0;
3066 	}
3067 	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3068 	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3069 
3070 	seq_printf(seq, "\tgroups_scanned: %u\n",
3071 		   atomic_read(&sbi->s_bal_groups_scanned));
3072 
3073 	/* CR_POWER2_ALIGNED stats */
3074 	seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3075 	seq_printf(seq, "\t\thits: %llu\n",
3076 		   atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3077 	seq_printf(
3078 		seq, "\t\tgroups_considered: %llu\n",
3079 		atomic64_read(
3080 			&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3081 	seq_printf(seq, "\t\textents_scanned: %u\n",
3082 		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3083 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3084 		   atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3085 	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3086 		   atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3087 
3088 	/* CR_GOAL_LEN_FAST stats */
3089 	seq_puts(seq, "\tcr_goal_fast_stats:\n");
3090 	seq_printf(seq, "\t\thits: %llu\n",
3091 		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3092 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3093 		   atomic64_read(
3094 			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3095 	seq_printf(seq, "\t\textents_scanned: %u\n",
3096 		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3097 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3098 		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3099 	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3100 		   atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3101 
3102 	/* CR_BEST_AVAIL_LEN stats */
3103 	seq_puts(seq, "\tcr_best_avail_stats:\n");
3104 	seq_printf(seq, "\t\thits: %llu\n",
3105 		   atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3106 	seq_printf(
3107 		seq, "\t\tgroups_considered: %llu\n",
3108 		atomic64_read(
3109 			&sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3110 	seq_printf(seq, "\t\textents_scanned: %u\n",
3111 		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3112 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3113 		   atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3114 	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3115 		   atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3116 
3117 	/* CR_GOAL_LEN_SLOW stats */
3118 	seq_puts(seq, "\tcr_goal_slow_stats:\n");
3119 	seq_printf(seq, "\t\thits: %llu\n",
3120 		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3121 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3122 		   atomic64_read(
3123 			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3124 	seq_printf(seq, "\t\textents_scanned: %u\n",
3125 		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3126 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3127 		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3128 
3129 	/* CR_ANY_FREE stats */
3130 	seq_puts(seq, "\tcr_any_free_stats:\n");
3131 	seq_printf(seq, "\t\thits: %llu\n",
3132 		   atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3133 	seq_printf(
3134 		seq, "\t\tgroups_considered: %llu\n",
3135 		atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3136 	seq_printf(seq, "\t\textents_scanned: %u\n",
3137 		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3138 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3139 		   atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3140 
3141 	/* Aggregates */
3142 	seq_printf(seq, "\textents_scanned: %u\n",
3143 		   atomic_read(&sbi->s_bal_ex_scanned));
3144 	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3145 	seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3146 		   atomic_read(&sbi->s_bal_len_goals));
3147 	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3148 	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3149 	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3150 	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3151 		   atomic_read(&sbi->s_mb_buddies_generated),
3152 		   ext4_get_groups_count(sb));
3153 	seq_printf(seq, "\tbuddies_time_used: %llu\n",
3154 		   atomic64_read(&sbi->s_mb_generation_time));
3155 	seq_printf(seq, "\tpreallocated: %u\n",
3156 		   atomic_read(&sbi->s_mb_preallocated));
3157 	seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3158 	return 0;
3159 }
3160 
3161 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3162 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3163 {
3164 	struct super_block *sb = pde_data(file_inode(seq->file));
3165 	unsigned long position;
3166 
3167 	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3168 		return NULL;
3169 	position = *pos + 1;
3170 	return (void *) ((unsigned long) position);
3171 }
3172 
3173 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3174 {
3175 	struct super_block *sb = pde_data(file_inode(seq->file));
3176 	unsigned long position;
3177 
3178 	++*pos;
3179 	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3180 		return NULL;
3181 	position = *pos + 1;
3182 	return (void *) ((unsigned long) position);
3183 }
3184 
3185 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3186 {
3187 	struct super_block *sb = pde_data(file_inode(seq->file));
3188 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3189 	unsigned long position = ((unsigned long) v);
3190 	struct ext4_group_info *grp;
3191 	unsigned int count;
3192 
3193 	position--;
3194 	if (position >= MB_NUM_ORDERS(sb)) {
3195 		position -= MB_NUM_ORDERS(sb);
3196 		if (position == 0)
3197 			seq_puts(seq, "avg_fragment_size_lists:\n");
3198 
3199 		count = 0;
3200 		read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3201 		list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3202 				    bb_avg_fragment_size_node)
3203 			count++;
3204 		read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3205 		seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3206 					(unsigned int)position, count);
3207 		return 0;
3208 	}
3209 
3210 	if (position == 0) {
3211 		seq_printf(seq, "optimize_scan: %d\n",
3212 			   test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3213 		seq_puts(seq, "max_free_order_lists:\n");
3214 	}
3215 	count = 0;
3216 	read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3217 	list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3218 			    bb_largest_free_order_node)
3219 		count++;
3220 	read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3221 	seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3222 		   (unsigned int)position, count);
3223 
3224 	return 0;
3225 }
3226 
3227 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3228 {
3229 }
3230 
3231 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3232 	.start  = ext4_mb_seq_structs_summary_start,
3233 	.next   = ext4_mb_seq_structs_summary_next,
3234 	.stop   = ext4_mb_seq_structs_summary_stop,
3235 	.show   = ext4_mb_seq_structs_summary_show,
3236 };
3237 
3238 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3239 {
3240 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3241 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3242 
3243 	BUG_ON(!cachep);
3244 	return cachep;
3245 }
3246 
3247 /*
3248  * Allocate the top-level s_group_info array for the specified number
3249  * of groups
3250  */
3251 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3252 {
3253 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3254 	unsigned size;
3255 	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3256 
3257 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3258 		EXT4_DESC_PER_BLOCK_BITS(sb);
3259 	if (size <= sbi->s_group_info_size)
3260 		return 0;
3261 
3262 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3263 	new_groupinfo = kvzalloc(size, GFP_KERNEL);
3264 	if (!new_groupinfo) {
3265 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3266 		return -ENOMEM;
3267 	}
3268 	rcu_read_lock();
3269 	old_groupinfo = rcu_dereference(sbi->s_group_info);
3270 	if (old_groupinfo)
3271 		memcpy(new_groupinfo, old_groupinfo,
3272 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3273 	rcu_read_unlock();
3274 	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3275 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3276 	if (old_groupinfo)
3277 		ext4_kvfree_array_rcu(old_groupinfo);
3278 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3279 		   sbi->s_group_info_size);
3280 	return 0;
3281 }
3282 
3283 /* Create and initialize ext4_group_info data for the given group. */
3284 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3285 			  struct ext4_group_desc *desc)
3286 {
3287 	int i;
3288 	int metalen = 0;
3289 	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3290 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3291 	struct ext4_group_info **meta_group_info;
3292 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3293 
3294 	/*
3295 	 * First check if this group is the first of a reserved block.
3296 	 * If it's true, we have to allocate a new table of pointers
3297 	 * to ext4_group_info structures
3298 	 */
3299 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3300 		metalen = sizeof(*meta_group_info) <<
3301 			EXT4_DESC_PER_BLOCK_BITS(sb);
3302 		meta_group_info = kmalloc(metalen, GFP_NOFS);
3303 		if (meta_group_info == NULL) {
3304 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
3305 				 "for a buddy group");
3306 			return -ENOMEM;
3307 		}
3308 		rcu_read_lock();
3309 		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3310 		rcu_read_unlock();
3311 	}
3312 
3313 	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3314 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3315 
3316 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3317 	if (meta_group_info[i] == NULL) {
3318 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3319 		goto exit_group_info;
3320 	}
3321 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3322 		&(meta_group_info[i]->bb_state));
3323 
3324 	/*
3325 	 * initialize bb_free to be able to skip
3326 	 * empty groups without initialization
3327 	 */
3328 	if (ext4_has_group_desc_csum(sb) &&
3329 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3330 		meta_group_info[i]->bb_free =
3331 			ext4_free_clusters_after_init(sb, group, desc);
3332 	} else {
3333 		meta_group_info[i]->bb_free =
3334 			ext4_free_group_clusters(sb, desc);
3335 	}
3336 
3337 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3338 	init_rwsem(&meta_group_info[i]->alloc_sem);
3339 	meta_group_info[i]->bb_free_root = RB_ROOT;
3340 	INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3341 	INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3342 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3343 	meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3344 	meta_group_info[i]->bb_group = group;
3345 
3346 	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3347 	return 0;
3348 
3349 exit_group_info:
3350 	/* If a meta_group_info table has been allocated, release it now */
3351 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3352 		struct ext4_group_info ***group_info;
3353 
3354 		rcu_read_lock();
3355 		group_info = rcu_dereference(sbi->s_group_info);
3356 		kfree(group_info[idx]);
3357 		group_info[idx] = NULL;
3358 		rcu_read_unlock();
3359 	}
3360 	return -ENOMEM;
3361 } /* ext4_mb_add_groupinfo */
3362 
3363 static int ext4_mb_init_backend(struct super_block *sb)
3364 {
3365 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3366 	ext4_group_t i;
3367 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3368 	int err;
3369 	struct ext4_group_desc *desc;
3370 	struct ext4_group_info ***group_info;
3371 	struct kmem_cache *cachep;
3372 
3373 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
3374 	if (err)
3375 		return err;
3376 
3377 	sbi->s_buddy_cache = new_inode(sb);
3378 	if (sbi->s_buddy_cache == NULL) {
3379 		ext4_msg(sb, KERN_ERR, "can't get new inode");
3380 		goto err_freesgi;
3381 	}
3382 	/* To avoid potentially colliding with an valid on-disk inode number,
3383 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3384 	 * not in the inode hash, so it should never be found by iget(), but
3385 	 * this will avoid confusion if it ever shows up during debugging. */
3386 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3387 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3388 	for (i = 0; i < ngroups; i++) {
3389 		cond_resched();
3390 		desc = ext4_get_group_desc(sb, i, NULL);
3391 		if (desc == NULL) {
3392 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3393 			goto err_freebuddy;
3394 		}
3395 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3396 			goto err_freebuddy;
3397 	}
3398 
3399 	if (ext4_has_feature_flex_bg(sb)) {
3400 		/* a single flex group is supposed to be read by a single IO.
3401 		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3402 		 * unsigned integer, so the maximum shift is 32.
3403 		 */
3404 		if (sbi->s_es->s_log_groups_per_flex >= 32) {
3405 			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3406 			goto err_freebuddy;
3407 		}
3408 		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3409 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3410 		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3411 	} else {
3412 		sbi->s_mb_prefetch = 32;
3413 	}
3414 	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3415 		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3416 	/* now many real IOs to prefetch within a single allocation at cr=0
3417 	 * given cr=0 is an CPU-related optimization we shouldn't try to
3418 	 * load too many groups, at some point we should start to use what
3419 	 * we've got in memory.
3420 	 * with an average random access time 5ms, it'd take a second to get
3421 	 * 200 groups (* N with flex_bg), so let's make this limit 4
3422 	 */
3423 	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3424 	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3425 		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3426 
3427 	return 0;
3428 
3429 err_freebuddy:
3430 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3431 	while (i-- > 0) {
3432 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3433 
3434 		if (grp)
3435 			kmem_cache_free(cachep, grp);
3436 	}
3437 	i = sbi->s_group_info_size;
3438 	rcu_read_lock();
3439 	group_info = rcu_dereference(sbi->s_group_info);
3440 	while (i-- > 0)
3441 		kfree(group_info[i]);
3442 	rcu_read_unlock();
3443 	iput(sbi->s_buddy_cache);
3444 err_freesgi:
3445 	rcu_read_lock();
3446 	kvfree(rcu_dereference(sbi->s_group_info));
3447 	rcu_read_unlock();
3448 	return -ENOMEM;
3449 }
3450 
3451 static void ext4_groupinfo_destroy_slabs(void)
3452 {
3453 	int i;
3454 
3455 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3456 		kmem_cache_destroy(ext4_groupinfo_caches[i]);
3457 		ext4_groupinfo_caches[i] = NULL;
3458 	}
3459 }
3460 
3461 static int ext4_groupinfo_create_slab(size_t size)
3462 {
3463 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3464 	int slab_size;
3465 	int blocksize_bits = order_base_2(size);
3466 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3467 	struct kmem_cache *cachep;
3468 
3469 	if (cache_index >= NR_GRPINFO_CACHES)
3470 		return -EINVAL;
3471 
3472 	if (unlikely(cache_index < 0))
3473 		cache_index = 0;
3474 
3475 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
3476 	if (ext4_groupinfo_caches[cache_index]) {
3477 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3478 		return 0;	/* Already created */
3479 	}
3480 
3481 	slab_size = offsetof(struct ext4_group_info,
3482 				bb_counters[blocksize_bits + 2]);
3483 
3484 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3485 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3486 					NULL);
3487 
3488 	ext4_groupinfo_caches[cache_index] = cachep;
3489 
3490 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3491 	if (!cachep) {
3492 		printk(KERN_EMERG
3493 		       "EXT4-fs: no memory for groupinfo slab cache\n");
3494 		return -ENOMEM;
3495 	}
3496 
3497 	return 0;
3498 }
3499 
3500 static void ext4_discard_work(struct work_struct *work)
3501 {
3502 	struct ext4_sb_info *sbi = container_of(work,
3503 			struct ext4_sb_info, s_discard_work);
3504 	struct super_block *sb = sbi->s_sb;
3505 	struct ext4_free_data *fd, *nfd;
3506 	struct ext4_buddy e4b;
3507 	LIST_HEAD(discard_list);
3508 	ext4_group_t grp, load_grp;
3509 	int err = 0;
3510 
3511 	spin_lock(&sbi->s_md_lock);
3512 	list_splice_init(&sbi->s_discard_list, &discard_list);
3513 	spin_unlock(&sbi->s_md_lock);
3514 
3515 	load_grp = UINT_MAX;
3516 	list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3517 		/*
3518 		 * If filesystem is umounting or no memory or suffering
3519 		 * from no space, give up the discard
3520 		 */
3521 		if ((sb->s_flags & SB_ACTIVE) && !err &&
3522 		    !atomic_read(&sbi->s_retry_alloc_pending)) {
3523 			grp = fd->efd_group;
3524 			if (grp != load_grp) {
3525 				if (load_grp != UINT_MAX)
3526 					ext4_mb_unload_buddy(&e4b);
3527 
3528 				err = ext4_mb_load_buddy(sb, grp, &e4b);
3529 				if (err) {
3530 					kmem_cache_free(ext4_free_data_cachep, fd);
3531 					load_grp = UINT_MAX;
3532 					continue;
3533 				} else {
3534 					load_grp = grp;
3535 				}
3536 			}
3537 
3538 			ext4_lock_group(sb, grp);
3539 			ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3540 						fd->efd_start_cluster + fd->efd_count - 1, 1);
3541 			ext4_unlock_group(sb, grp);
3542 		}
3543 		kmem_cache_free(ext4_free_data_cachep, fd);
3544 	}
3545 
3546 	if (load_grp != UINT_MAX)
3547 		ext4_mb_unload_buddy(&e4b);
3548 }
3549 
3550 int ext4_mb_init(struct super_block *sb)
3551 {
3552 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3553 	unsigned i, j;
3554 	unsigned offset, offset_incr;
3555 	unsigned max;
3556 	int ret;
3557 
3558 	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3559 
3560 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3561 	if (sbi->s_mb_offsets == NULL) {
3562 		ret = -ENOMEM;
3563 		goto out;
3564 	}
3565 
3566 	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3567 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3568 	if (sbi->s_mb_maxs == NULL) {
3569 		ret = -ENOMEM;
3570 		goto out;
3571 	}
3572 
3573 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3574 	if (ret < 0)
3575 		goto out;
3576 
3577 	/* order 0 is regular bitmap */
3578 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3579 	sbi->s_mb_offsets[0] = 0;
3580 
3581 	i = 1;
3582 	offset = 0;
3583 	offset_incr = 1 << (sb->s_blocksize_bits - 1);
3584 	max = sb->s_blocksize << 2;
3585 	do {
3586 		sbi->s_mb_offsets[i] = offset;
3587 		sbi->s_mb_maxs[i] = max;
3588 		offset += offset_incr;
3589 		offset_incr = offset_incr >> 1;
3590 		max = max >> 1;
3591 		i++;
3592 	} while (i < MB_NUM_ORDERS(sb));
3593 
3594 	sbi->s_mb_avg_fragment_size =
3595 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3596 			GFP_KERNEL);
3597 	if (!sbi->s_mb_avg_fragment_size) {
3598 		ret = -ENOMEM;
3599 		goto out;
3600 	}
3601 	sbi->s_mb_avg_fragment_size_locks =
3602 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3603 			GFP_KERNEL);
3604 	if (!sbi->s_mb_avg_fragment_size_locks) {
3605 		ret = -ENOMEM;
3606 		goto out;
3607 	}
3608 	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3609 		INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3610 		rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3611 	}
3612 	sbi->s_mb_largest_free_orders =
3613 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3614 			GFP_KERNEL);
3615 	if (!sbi->s_mb_largest_free_orders) {
3616 		ret = -ENOMEM;
3617 		goto out;
3618 	}
3619 	sbi->s_mb_largest_free_orders_locks =
3620 		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3621 			GFP_KERNEL);
3622 	if (!sbi->s_mb_largest_free_orders_locks) {
3623 		ret = -ENOMEM;
3624 		goto out;
3625 	}
3626 	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3627 		INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3628 		rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3629 	}
3630 
3631 	spin_lock_init(&sbi->s_md_lock);
3632 	sbi->s_mb_free_pending = 0;
3633 	INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
3634 	INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
3635 	INIT_LIST_HEAD(&sbi->s_discard_list);
3636 	INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3637 	atomic_set(&sbi->s_retry_alloc_pending, 0);
3638 
3639 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3640 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3641 	sbi->s_mb_stats = MB_DEFAULT_STATS;
3642 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3643 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3644 	sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3645 
3646 	/*
3647 	 * The default group preallocation is 512, which for 4k block
3648 	 * sizes translates to 2 megabytes.  However for bigalloc file
3649 	 * systems, this is probably too big (i.e, if the cluster size
3650 	 * is 1 megabyte, then group preallocation size becomes half a
3651 	 * gigabyte!).  As a default, we will keep a two megabyte
3652 	 * group pralloc size for cluster sizes up to 64k, and after
3653 	 * that, we will force a minimum group preallocation size of
3654 	 * 32 clusters.  This translates to 8 megs when the cluster
3655 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
3656 	 * which seems reasonable as a default.
3657 	 */
3658 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3659 				       sbi->s_cluster_bits, 32);
3660 	/*
3661 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3662 	 * to the lowest multiple of s_stripe which is bigger than
3663 	 * the s_mb_group_prealloc as determined above. We want
3664 	 * the preallocation size to be an exact multiple of the
3665 	 * RAID stripe size so that preallocations don't fragment
3666 	 * the stripes.
3667 	 */
3668 	if (sbi->s_stripe > 1) {
3669 		sbi->s_mb_group_prealloc = roundup(
3670 			sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
3671 	}
3672 
3673 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3674 	if (sbi->s_locality_groups == NULL) {
3675 		ret = -ENOMEM;
3676 		goto out;
3677 	}
3678 	for_each_possible_cpu(i) {
3679 		struct ext4_locality_group *lg;
3680 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3681 		mutex_init(&lg->lg_mutex);
3682 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3683 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3684 		spin_lock_init(&lg->lg_prealloc_lock);
3685 	}
3686 
3687 	if (bdev_nonrot(sb->s_bdev))
3688 		sbi->s_mb_max_linear_groups = 0;
3689 	else
3690 		sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3691 	/* init file for buddy data */
3692 	ret = ext4_mb_init_backend(sb);
3693 	if (ret != 0)
3694 		goto out_free_locality_groups;
3695 
3696 	return 0;
3697 
3698 out_free_locality_groups:
3699 	free_percpu(sbi->s_locality_groups);
3700 	sbi->s_locality_groups = NULL;
3701 out:
3702 	kfree(sbi->s_mb_avg_fragment_size);
3703 	kfree(sbi->s_mb_avg_fragment_size_locks);
3704 	kfree(sbi->s_mb_largest_free_orders);
3705 	kfree(sbi->s_mb_largest_free_orders_locks);
3706 	kfree(sbi->s_mb_offsets);
3707 	sbi->s_mb_offsets = NULL;
3708 	kfree(sbi->s_mb_maxs);
3709 	sbi->s_mb_maxs = NULL;
3710 	return ret;
3711 }
3712 
3713 /* need to called with the ext4 group lock held */
3714 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3715 {
3716 	struct ext4_prealloc_space *pa;
3717 	struct list_head *cur, *tmp;
3718 	int count = 0;
3719 
3720 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3721 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3722 		list_del(&pa->pa_group_list);
3723 		count++;
3724 		kmem_cache_free(ext4_pspace_cachep, pa);
3725 	}
3726 	return count;
3727 }
3728 
3729 int ext4_mb_release(struct super_block *sb)
3730 {
3731 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3732 	ext4_group_t i;
3733 	int num_meta_group_infos;
3734 	struct ext4_group_info *grinfo, ***group_info;
3735 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3736 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3737 	int count;
3738 
3739 	if (test_opt(sb, DISCARD)) {
3740 		/*
3741 		 * wait the discard work to drain all of ext4_free_data
3742 		 */
3743 		flush_work(&sbi->s_discard_work);
3744 		WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3745 	}
3746 
3747 	if (sbi->s_group_info) {
3748 		for (i = 0; i < ngroups; i++) {
3749 			cond_resched();
3750 			grinfo = ext4_get_group_info(sb, i);
3751 			if (!grinfo)
3752 				continue;
3753 			mb_group_bb_bitmap_free(grinfo);
3754 			ext4_lock_group(sb, i);
3755 			count = ext4_mb_cleanup_pa(grinfo);
3756 			if (count)
3757 				mb_debug(sb, "mballoc: %d PAs left\n",
3758 					 count);
3759 			ext4_unlock_group(sb, i);
3760 			kmem_cache_free(cachep, grinfo);
3761 		}
3762 		num_meta_group_infos = (ngroups +
3763 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3764 			EXT4_DESC_PER_BLOCK_BITS(sb);
3765 		rcu_read_lock();
3766 		group_info = rcu_dereference(sbi->s_group_info);
3767 		for (i = 0; i < num_meta_group_infos; i++)
3768 			kfree(group_info[i]);
3769 		kvfree(group_info);
3770 		rcu_read_unlock();
3771 	}
3772 	kfree(sbi->s_mb_avg_fragment_size);
3773 	kfree(sbi->s_mb_avg_fragment_size_locks);
3774 	kfree(sbi->s_mb_largest_free_orders);
3775 	kfree(sbi->s_mb_largest_free_orders_locks);
3776 	kfree(sbi->s_mb_offsets);
3777 	kfree(sbi->s_mb_maxs);
3778 	iput(sbi->s_buddy_cache);
3779 	if (sbi->s_mb_stats) {
3780 		ext4_msg(sb, KERN_INFO,
3781 		       "mballoc: %u blocks %u reqs (%u success)",
3782 				atomic_read(&sbi->s_bal_allocated),
3783 				atomic_read(&sbi->s_bal_reqs),
3784 				atomic_read(&sbi->s_bal_success));
3785 		ext4_msg(sb, KERN_INFO,
3786 		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3787 				"%u 2^N hits, %u breaks, %u lost",
3788 				atomic_read(&sbi->s_bal_ex_scanned),
3789 				atomic_read(&sbi->s_bal_groups_scanned),
3790 				atomic_read(&sbi->s_bal_goals),
3791 				atomic_read(&sbi->s_bal_2orders),
3792 				atomic_read(&sbi->s_bal_breaks),
3793 				atomic_read(&sbi->s_mb_lost_chunks));
3794 		ext4_msg(sb, KERN_INFO,
3795 		       "mballoc: %u generated and it took %llu",
3796 				atomic_read(&sbi->s_mb_buddies_generated),
3797 				atomic64_read(&sbi->s_mb_generation_time));
3798 		ext4_msg(sb, KERN_INFO,
3799 		       "mballoc: %u preallocated, %u discarded",
3800 				atomic_read(&sbi->s_mb_preallocated),
3801 				atomic_read(&sbi->s_mb_discarded));
3802 	}
3803 
3804 	free_percpu(sbi->s_locality_groups);
3805 
3806 	return 0;
3807 }
3808 
3809 static inline int ext4_issue_discard(struct super_block *sb,
3810 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3811 		struct bio **biop)
3812 {
3813 	ext4_fsblk_t discard_block;
3814 
3815 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3816 			 ext4_group_first_block_no(sb, block_group));
3817 	count = EXT4_C2B(EXT4_SB(sb), count);
3818 	trace_ext4_discard_blocks(sb,
3819 			(unsigned long long) discard_block, count);
3820 	if (biop) {
3821 		return __blkdev_issue_discard(sb->s_bdev,
3822 			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
3823 			(sector_t)count << (sb->s_blocksize_bits - 9),
3824 			GFP_NOFS, biop);
3825 	} else
3826 		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3827 }
3828 
3829 static void ext4_free_data_in_buddy(struct super_block *sb,
3830 				    struct ext4_free_data *entry)
3831 {
3832 	struct ext4_buddy e4b;
3833 	struct ext4_group_info *db;
3834 	int err, count = 0;
3835 
3836 	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3837 		 entry->efd_count, entry->efd_group, entry);
3838 
3839 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3840 	/* we expect to find existing buddy because it's pinned */
3841 	BUG_ON(err != 0);
3842 
3843 	spin_lock(&EXT4_SB(sb)->s_md_lock);
3844 	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3845 	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3846 
3847 	db = e4b.bd_info;
3848 	/* there are blocks to put in buddy to make them really free */
3849 	count += entry->efd_count;
3850 	ext4_lock_group(sb, entry->efd_group);
3851 	/* Take it out of per group rb tree */
3852 	rb_erase(&entry->efd_node, &(db->bb_free_root));
3853 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3854 
3855 	/*
3856 	 * Clear the trimmed flag for the group so that the next
3857 	 * ext4_trim_fs can trim it.
3858 	 * If the volume is mounted with -o discard, online discard
3859 	 * is supported and the free blocks will be trimmed online.
3860 	 */
3861 	if (!test_opt(sb, DISCARD))
3862 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
3863 
3864 	if (!db->bb_free_root.rb_node) {
3865 		/* No more items in the per group rb tree
3866 		 * balance refcounts from ext4_mb_free_metadata()
3867 		 */
3868 		put_page(e4b.bd_buddy_page);
3869 		put_page(e4b.bd_bitmap_page);
3870 	}
3871 	ext4_unlock_group(sb, entry->efd_group);
3872 	ext4_mb_unload_buddy(&e4b);
3873 
3874 	mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3875 }
3876 
3877 /*
3878  * This function is called by the jbd2 layer once the commit has finished,
3879  * so we know we can free the blocks that were released with that commit.
3880  */
3881 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3882 {
3883 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3884 	struct ext4_free_data *entry, *tmp;
3885 	LIST_HEAD(freed_data_list);
3886 	struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
3887 	bool wake;
3888 
3889 	list_replace_init(s_freed_head, &freed_data_list);
3890 
3891 	list_for_each_entry(entry, &freed_data_list, efd_list)
3892 		ext4_free_data_in_buddy(sb, entry);
3893 
3894 	if (test_opt(sb, DISCARD)) {
3895 		spin_lock(&sbi->s_md_lock);
3896 		wake = list_empty(&sbi->s_discard_list);
3897 		list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3898 		spin_unlock(&sbi->s_md_lock);
3899 		if (wake)
3900 			queue_work(system_unbound_wq, &sbi->s_discard_work);
3901 	} else {
3902 		list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3903 			kmem_cache_free(ext4_free_data_cachep, entry);
3904 	}
3905 }
3906 
3907 int __init ext4_init_mballoc(void)
3908 {
3909 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3910 					SLAB_RECLAIM_ACCOUNT);
3911 	if (ext4_pspace_cachep == NULL)
3912 		goto out;
3913 
3914 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3915 				    SLAB_RECLAIM_ACCOUNT);
3916 	if (ext4_ac_cachep == NULL)
3917 		goto out_pa_free;
3918 
3919 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3920 					   SLAB_RECLAIM_ACCOUNT);
3921 	if (ext4_free_data_cachep == NULL)
3922 		goto out_ac_free;
3923 
3924 	return 0;
3925 
3926 out_ac_free:
3927 	kmem_cache_destroy(ext4_ac_cachep);
3928 out_pa_free:
3929 	kmem_cache_destroy(ext4_pspace_cachep);
3930 out:
3931 	return -ENOMEM;
3932 }
3933 
3934 void ext4_exit_mballoc(void)
3935 {
3936 	/*
3937 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3938 	 * before destroying the slab cache.
3939 	 */
3940 	rcu_barrier();
3941 	kmem_cache_destroy(ext4_pspace_cachep);
3942 	kmem_cache_destroy(ext4_ac_cachep);
3943 	kmem_cache_destroy(ext4_free_data_cachep);
3944 	ext4_groupinfo_destroy_slabs();
3945 }
3946 
3947 #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
3948 #define EXT4_MB_SYNC_UPDATE 0x0002
3949 static int
3950 ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
3951 		     ext4_group_t group, ext4_grpblk_t blkoff,
3952 		     ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
3953 {
3954 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3955 	struct buffer_head *bitmap_bh = NULL;
3956 	struct ext4_group_desc *gdp;
3957 	struct buffer_head *gdp_bh;
3958 	int err;
3959 	unsigned int i, already, changed = len;
3960 
3961 	KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
3962 				   handle, sb, state, group, blkoff, len,
3963 				   flags, ret_changed);
3964 
3965 	if (ret_changed)
3966 		*ret_changed = 0;
3967 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3968 	if (IS_ERR(bitmap_bh))
3969 		return PTR_ERR(bitmap_bh);
3970 
3971 	if (handle) {
3972 		BUFFER_TRACE(bitmap_bh, "getting write access");
3973 		err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3974 						    EXT4_JTR_NONE);
3975 		if (err)
3976 			goto out_err;
3977 	}
3978 
3979 	err = -EIO;
3980 	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3981 	if (!gdp)
3982 		goto out_err;
3983 
3984 	if (handle) {
3985 		BUFFER_TRACE(gdp_bh, "get_write_access");
3986 		err = ext4_journal_get_write_access(handle, sb, gdp_bh,
3987 						    EXT4_JTR_NONE);
3988 		if (err)
3989 			goto out_err;
3990 	}
3991 
3992 	ext4_lock_group(sb, group);
3993 	if (ext4_has_group_desc_csum(sb) &&
3994 	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3995 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3996 		ext4_free_group_clusters_set(sb, gdp,
3997 			ext4_free_clusters_after_init(sb, group, gdp));
3998 	}
3999 
4000 	if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
4001 		already = 0;
4002 		for (i = 0; i < len; i++)
4003 			if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4004 					state)
4005 				already++;
4006 		changed = len - already;
4007 	}
4008 
4009 	if (state) {
4010 		mb_set_bits(bitmap_bh->b_data, blkoff, len);
4011 		ext4_free_group_clusters_set(sb, gdp,
4012 			ext4_free_group_clusters(sb, gdp) - changed);
4013 	} else {
4014 		mb_clear_bits(bitmap_bh->b_data, blkoff, len);
4015 		ext4_free_group_clusters_set(sb, gdp,
4016 			ext4_free_group_clusters(sb, gdp) + changed);
4017 	}
4018 
4019 	ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4020 	ext4_group_desc_csum_set(sb, group, gdp);
4021 	ext4_unlock_group(sb, group);
4022 	if (ret_changed)
4023 		*ret_changed = changed;
4024 
4025 	if (sbi->s_log_groups_per_flex) {
4026 		ext4_group_t flex_group = ext4_flex_group(sbi, group);
4027 		struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4028 					   s_flex_groups, flex_group);
4029 
4030 		if (state)
4031 			atomic64_sub(changed, &fg->free_clusters);
4032 		else
4033 			atomic64_add(changed, &fg->free_clusters);
4034 	}
4035 
4036 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4037 	if (err)
4038 		goto out_err;
4039 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4040 	if (err)
4041 		goto out_err;
4042 
4043 	if (flags & EXT4_MB_SYNC_UPDATE) {
4044 		sync_dirty_buffer(bitmap_bh);
4045 		sync_dirty_buffer(gdp_bh);
4046 	}
4047 
4048 out_err:
4049 	brelse(bitmap_bh);
4050 	return err;
4051 }
4052 
4053 /*
4054  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4055  * Returns 0 if success or error code
4056  */
4057 static noinline_for_stack int
4058 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4059 				handle_t *handle, unsigned int reserv_clstrs)
4060 {
4061 	struct ext4_group_desc *gdp;
4062 	struct ext4_sb_info *sbi;
4063 	struct super_block *sb;
4064 	ext4_fsblk_t block;
4065 	int err, len;
4066 	int flags = 0;
4067 	ext4_grpblk_t changed;
4068 
4069 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4070 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4071 
4072 	sb = ac->ac_sb;
4073 	sbi = EXT4_SB(sb);
4074 
4075 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
4076 	if (!gdp)
4077 		return -EIO;
4078 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4079 			ext4_free_group_clusters(sb, gdp));
4080 
4081 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4082 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4083 	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4084 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4085 			   "fs metadata", block, block+len);
4086 		/* File system mounted not to panic on error
4087 		 * Fix the bitmap and return EFSCORRUPTED
4088 		 * We leak some of the blocks here.
4089 		 */
4090 		err = ext4_mb_mark_context(handle, sb, true,
4091 					   ac->ac_b_ex.fe_group,
4092 					   ac->ac_b_ex.fe_start,
4093 					   ac->ac_b_ex.fe_len,
4094 					   0, NULL);
4095 		if (!err)
4096 			err = -EFSCORRUPTED;
4097 		return err;
4098 	}
4099 
4100 #ifdef AGGRESSIVE_CHECK
4101 	flags |= EXT4_MB_BITMAP_MARKED_CHECK;
4102 #endif
4103 	err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
4104 				   ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
4105 				   flags, &changed);
4106 
4107 	if (err && changed == 0)
4108 		return err;
4109 
4110 #ifdef AGGRESSIVE_CHECK
4111 	BUG_ON(changed != ac->ac_b_ex.fe_len);
4112 #endif
4113 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4114 	/*
4115 	 * Now reduce the dirty block count also. Should not go negative
4116 	 */
4117 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4118 		/* release all the reserved blocks if non delalloc */
4119 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4120 				   reserv_clstrs);
4121 
4122 	return err;
4123 }
4124 
4125 /*
4126  * Idempotent helper for Ext4 fast commit replay path to set the state of
4127  * blocks in bitmaps and update counters.
4128  */
4129 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4130 		     int len, bool state)
4131 {
4132 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4133 	ext4_group_t group;
4134 	ext4_grpblk_t blkoff;
4135 	int err = 0;
4136 	unsigned int clen, thisgrp_len;
4137 
4138 	while (len > 0) {
4139 		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4140 
4141 		/*
4142 		 * Check to see if we are freeing blocks across a group
4143 		 * boundary.
4144 		 * In case of flex_bg, this can happen that (block, len) may
4145 		 * span across more than one group. In that case we need to
4146 		 * get the corresponding group metadata to work with.
4147 		 * For this we have goto again loop.
4148 		 */
4149 		thisgrp_len = min_t(unsigned int, (unsigned int)len,
4150 			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4151 		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4152 
4153 		if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4154 			ext4_error(sb, "Marking blocks in system zone - "
4155 				   "Block = %llu, len = %u",
4156 				   block, thisgrp_len);
4157 			break;
4158 		}
4159 
4160 		err = ext4_mb_mark_context(NULL, sb, state,
4161 					   group, blkoff, clen,
4162 					   EXT4_MB_BITMAP_MARKED_CHECK |
4163 					   EXT4_MB_SYNC_UPDATE,
4164 					   NULL);
4165 		if (err)
4166 			break;
4167 
4168 		block += thisgrp_len;
4169 		len -= thisgrp_len;
4170 		BUG_ON(len < 0);
4171 	}
4172 }
4173 
4174 /*
4175  * here we normalize request for locality group
4176  * Group request are normalized to s_mb_group_prealloc, which goes to
4177  * s_strip if we set the same via mount option.
4178  * s_mb_group_prealloc can be configured via
4179  * /sys/fs/ext4/<partition>/mb_group_prealloc
4180  *
4181  * XXX: should we try to preallocate more than the group has now?
4182  */
4183 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4184 {
4185 	struct super_block *sb = ac->ac_sb;
4186 	struct ext4_locality_group *lg = ac->ac_lg;
4187 
4188 	BUG_ON(lg == NULL);
4189 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4190 	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4191 }
4192 
4193 /*
4194  * This function returns the next element to look at during inode
4195  * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4196  * (ei->i_prealloc_lock)
4197  *
4198  * new_start	The start of the range we want to compare
4199  * cur_start	The existing start that we are comparing against
4200  * node	The node of the rb_tree
4201  */
4202 static inline struct rb_node*
4203 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4204 {
4205 	if (new_start < cur_start)
4206 		return node->rb_left;
4207 	else
4208 		return node->rb_right;
4209 }
4210 
4211 static inline void
4212 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4213 			  ext4_lblk_t start, loff_t end)
4214 {
4215 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4216 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4217 	struct ext4_prealloc_space *tmp_pa;
4218 	ext4_lblk_t tmp_pa_start;
4219 	loff_t tmp_pa_end;
4220 	struct rb_node *iter;
4221 
4222 	read_lock(&ei->i_prealloc_lock);
4223 	for (iter = ei->i_prealloc_node.rb_node; iter;
4224 	     iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4225 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4226 				  pa_node.inode_node);
4227 		tmp_pa_start = tmp_pa->pa_lstart;
4228 		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4229 
4230 		spin_lock(&tmp_pa->pa_lock);
4231 		if (tmp_pa->pa_deleted == 0)
4232 			BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4233 		spin_unlock(&tmp_pa->pa_lock);
4234 	}
4235 	read_unlock(&ei->i_prealloc_lock);
4236 }
4237 
4238 /*
4239  * Given an allocation context "ac" and a range "start", "end", check
4240  * and adjust boundaries if the range overlaps with any of the existing
4241  * preallocatoins stored in the corresponding inode of the allocation context.
4242  *
4243  * Parameters:
4244  *	ac			allocation context
4245  *	start			start of the new range
4246  *	end			end of the new range
4247  */
4248 static inline void
4249 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4250 			  ext4_lblk_t *start, loff_t *end)
4251 {
4252 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4253 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4254 	struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4255 	struct rb_node *iter;
4256 	ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4257 	loff_t new_end, tmp_pa_end, left_pa_end = -1;
4258 
4259 	new_start = *start;
4260 	new_end = *end;
4261 
4262 	/*
4263 	 * Adjust the normalized range so that it doesn't overlap with any
4264 	 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4265 	 * so it doesn't change underneath us.
4266 	 */
4267 	read_lock(&ei->i_prealloc_lock);
4268 
4269 	/* Step 1: find any one immediate neighboring PA of the normalized range */
4270 	for (iter = ei->i_prealloc_node.rb_node; iter;
4271 	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4272 					    tmp_pa_start, iter)) {
4273 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4274 				  pa_node.inode_node);
4275 		tmp_pa_start = tmp_pa->pa_lstart;
4276 		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4277 
4278 		/* PA must not overlap original request */
4279 		spin_lock(&tmp_pa->pa_lock);
4280 		if (tmp_pa->pa_deleted == 0)
4281 			BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4282 				 ac->ac_o_ex.fe_logical < tmp_pa_start));
4283 		spin_unlock(&tmp_pa->pa_lock);
4284 	}
4285 
4286 	/*
4287 	 * Step 2: check if the found PA is left or right neighbor and
4288 	 * get the other neighbor
4289 	 */
4290 	if (tmp_pa) {
4291 		if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4292 			struct rb_node *tmp;
4293 
4294 			left_pa = tmp_pa;
4295 			tmp = rb_next(&left_pa->pa_node.inode_node);
4296 			if (tmp) {
4297 				right_pa = rb_entry(tmp,
4298 						    struct ext4_prealloc_space,
4299 						    pa_node.inode_node);
4300 			}
4301 		} else {
4302 			struct rb_node *tmp;
4303 
4304 			right_pa = tmp_pa;
4305 			tmp = rb_prev(&right_pa->pa_node.inode_node);
4306 			if (tmp) {
4307 				left_pa = rb_entry(tmp,
4308 						   struct ext4_prealloc_space,
4309 						   pa_node.inode_node);
4310 			}
4311 		}
4312 	}
4313 
4314 	/* Step 3: get the non deleted neighbors */
4315 	if (left_pa) {
4316 		for (iter = &left_pa->pa_node.inode_node;;
4317 		     iter = rb_prev(iter)) {
4318 			if (!iter) {
4319 				left_pa = NULL;
4320 				break;
4321 			}
4322 
4323 			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4324 					  pa_node.inode_node);
4325 			left_pa = tmp_pa;
4326 			spin_lock(&tmp_pa->pa_lock);
4327 			if (tmp_pa->pa_deleted == 0) {
4328 				spin_unlock(&tmp_pa->pa_lock);
4329 				break;
4330 			}
4331 			spin_unlock(&tmp_pa->pa_lock);
4332 		}
4333 	}
4334 
4335 	if (right_pa) {
4336 		for (iter = &right_pa->pa_node.inode_node;;
4337 		     iter = rb_next(iter)) {
4338 			if (!iter) {
4339 				right_pa = NULL;
4340 				break;
4341 			}
4342 
4343 			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4344 					  pa_node.inode_node);
4345 			right_pa = tmp_pa;
4346 			spin_lock(&tmp_pa->pa_lock);
4347 			if (tmp_pa->pa_deleted == 0) {
4348 				spin_unlock(&tmp_pa->pa_lock);
4349 				break;
4350 			}
4351 			spin_unlock(&tmp_pa->pa_lock);
4352 		}
4353 	}
4354 
4355 	if (left_pa) {
4356 		left_pa_end = pa_logical_end(sbi, left_pa);
4357 		BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4358 	}
4359 
4360 	if (right_pa) {
4361 		right_pa_start = right_pa->pa_lstart;
4362 		BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4363 	}
4364 
4365 	/* Step 4: trim our normalized range to not overlap with the neighbors */
4366 	if (left_pa) {
4367 		if (left_pa_end > new_start)
4368 			new_start = left_pa_end;
4369 	}
4370 
4371 	if (right_pa) {
4372 		if (right_pa_start < new_end)
4373 			new_end = right_pa_start;
4374 	}
4375 	read_unlock(&ei->i_prealloc_lock);
4376 
4377 	/* XXX: extra loop to check we really don't overlap preallocations */
4378 	ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4379 
4380 	*start = new_start;
4381 	*end = new_end;
4382 }
4383 
4384 /*
4385  * Normalization means making request better in terms of
4386  * size and alignment
4387  */
4388 static noinline_for_stack void
4389 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4390 				struct ext4_allocation_request *ar)
4391 {
4392 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4393 	struct ext4_super_block *es = sbi->s_es;
4394 	int bsbits, max;
4395 	loff_t size, start_off, end;
4396 	loff_t orig_size __maybe_unused;
4397 	ext4_lblk_t start;
4398 
4399 	/* do normalize only data requests, metadata requests
4400 	   do not need preallocation */
4401 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4402 		return;
4403 
4404 	/* sometime caller may want exact blocks */
4405 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4406 		return;
4407 
4408 	/* caller may indicate that preallocation isn't
4409 	 * required (it's a tail, for example) */
4410 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4411 		return;
4412 
4413 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4414 		ext4_mb_normalize_group_request(ac);
4415 		return ;
4416 	}
4417 
4418 	bsbits = ac->ac_sb->s_blocksize_bits;
4419 
4420 	/* first, let's learn actual file size
4421 	 * given current request is allocated */
4422 	size = extent_logical_end(sbi, &ac->ac_o_ex);
4423 	size = size << bsbits;
4424 	if (size < i_size_read(ac->ac_inode))
4425 		size = i_size_read(ac->ac_inode);
4426 	orig_size = size;
4427 
4428 	/* max size of free chunks */
4429 	max = 2 << bsbits;
4430 
4431 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
4432 		(req <= (size) || max <= (chunk_size))
4433 
4434 	/* first, try to predict filesize */
4435 	/* XXX: should this table be tunable? */
4436 	start_off = 0;
4437 	if (size <= 16 * 1024) {
4438 		size = 16 * 1024;
4439 	} else if (size <= 32 * 1024) {
4440 		size = 32 * 1024;
4441 	} else if (size <= 64 * 1024) {
4442 		size = 64 * 1024;
4443 	} else if (size <= 128 * 1024) {
4444 		size = 128 * 1024;
4445 	} else if (size <= 256 * 1024) {
4446 		size = 256 * 1024;
4447 	} else if (size <= 512 * 1024) {
4448 		size = 512 * 1024;
4449 	} else if (size <= 1024 * 1024) {
4450 		size = 1024 * 1024;
4451 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4452 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4453 						(21 - bsbits)) << 21;
4454 		size = 2 * 1024 * 1024;
4455 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4456 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4457 							(22 - bsbits)) << 22;
4458 		size = 4 * 1024 * 1024;
4459 	} else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4460 					(8<<20)>>bsbits, max, 8 * 1024)) {
4461 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4462 							(23 - bsbits)) << 23;
4463 		size = 8 * 1024 * 1024;
4464 	} else {
4465 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4466 		size	  = (loff_t) EXT4_C2B(sbi,
4467 					      ac->ac_o_ex.fe_len) << bsbits;
4468 	}
4469 	size = size >> bsbits;
4470 	start = start_off >> bsbits;
4471 
4472 	/*
4473 	 * For tiny groups (smaller than 8MB) the chosen allocation
4474 	 * alignment may be larger than group size. Make sure the
4475 	 * alignment does not move allocation to a different group which
4476 	 * makes mballoc fail assertions later.
4477 	 */
4478 	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4479 			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4480 
4481 	/* don't cover already allocated blocks in selected range */
4482 	if (ar->pleft && start <= ar->lleft) {
4483 		size -= ar->lleft + 1 - start;
4484 		start = ar->lleft + 1;
4485 	}
4486 	if (ar->pright && start + size - 1 >= ar->lright)
4487 		size -= start + size - ar->lright;
4488 
4489 	/*
4490 	 * Trim allocation request for filesystems with artificially small
4491 	 * groups.
4492 	 */
4493 	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4494 		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4495 
4496 	end = start + size;
4497 
4498 	ext4_mb_pa_adjust_overlap(ac, &start, &end);
4499 
4500 	size = end - start;
4501 
4502 	/*
4503 	 * In this function "start" and "size" are normalized for better
4504 	 * alignment and length such that we could preallocate more blocks.
4505 	 * This normalization is done such that original request of
4506 	 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4507 	 * "size" boundaries.
4508 	 * (Note fe_len can be relaxed since FS block allocation API does not
4509 	 * provide gurantee on number of contiguous blocks allocation since that
4510 	 * depends upon free space left, etc).
4511 	 * In case of inode pa, later we use the allocated blocks
4512 	 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4513 	 * range of goal/best blocks [start, size] to put it at the
4514 	 * ac_o_ex.fe_logical extent of this inode.
4515 	 * (See ext4_mb_use_inode_pa() for more details)
4516 	 */
4517 	if (start + size <= ac->ac_o_ex.fe_logical ||
4518 			start > ac->ac_o_ex.fe_logical) {
4519 		ext4_msg(ac->ac_sb, KERN_ERR,
4520 			 "start %lu, size %lu, fe_logical %lu",
4521 			 (unsigned long) start, (unsigned long) size,
4522 			 (unsigned long) ac->ac_o_ex.fe_logical);
4523 		BUG();
4524 	}
4525 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4526 
4527 	/* now prepare goal request */
4528 
4529 	/* XXX: is it better to align blocks WRT to logical
4530 	 * placement or satisfy big request as is */
4531 	ac->ac_g_ex.fe_logical = start;
4532 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4533 	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4534 
4535 	/* define goal start in order to merge */
4536 	if (ar->pright && (ar->lright == (start + size)) &&
4537 	    ar->pright >= size &&
4538 	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4539 		/* merge to the right */
4540 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4541 						&ac->ac_g_ex.fe_group,
4542 						&ac->ac_g_ex.fe_start);
4543 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4544 	}
4545 	if (ar->pleft && (ar->lleft + 1 == start) &&
4546 	    ar->pleft + 1 < ext4_blocks_count(es)) {
4547 		/* merge to the left */
4548 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4549 						&ac->ac_g_ex.fe_group,
4550 						&ac->ac_g_ex.fe_start);
4551 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4552 	}
4553 
4554 	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4555 		 orig_size, start);
4556 }
4557 
4558 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4559 {
4560 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4561 
4562 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4563 		atomic_inc(&sbi->s_bal_reqs);
4564 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4565 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4566 			atomic_inc(&sbi->s_bal_success);
4567 
4568 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4569 		for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4570 			atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4571 		}
4572 
4573 		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4574 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4575 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4576 			atomic_inc(&sbi->s_bal_goals);
4577 		/* did we allocate as much as normalizer originally wanted? */
4578 		if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4579 			atomic_inc(&sbi->s_bal_len_goals);
4580 
4581 		if (ac->ac_found > sbi->s_mb_max_to_scan)
4582 			atomic_inc(&sbi->s_bal_breaks);
4583 	}
4584 
4585 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4586 		trace_ext4_mballoc_alloc(ac);
4587 	else
4588 		trace_ext4_mballoc_prealloc(ac);
4589 }
4590 
4591 /*
4592  * Called on failure; free up any blocks from the inode PA for this
4593  * context.  We don't need this for MB_GROUP_PA because we only change
4594  * pa_free in ext4_mb_release_context(), but on failure, we've already
4595  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4596  */
4597 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4598 {
4599 	struct ext4_prealloc_space *pa = ac->ac_pa;
4600 	struct ext4_buddy e4b;
4601 	int err;
4602 
4603 	if (pa == NULL) {
4604 		if (ac->ac_f_ex.fe_len == 0)
4605 			return;
4606 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4607 		if (WARN_RATELIMIT(err,
4608 				   "ext4: mb_load_buddy failed (%d)", err))
4609 			/*
4610 			 * This should never happen since we pin the
4611 			 * pages in the ext4_allocation_context so
4612 			 * ext4_mb_load_buddy() should never fail.
4613 			 */
4614 			return;
4615 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4616 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4617 			       ac->ac_f_ex.fe_len);
4618 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4619 		ext4_mb_unload_buddy(&e4b);
4620 		return;
4621 	}
4622 	if (pa->pa_type == MB_INODE_PA) {
4623 		spin_lock(&pa->pa_lock);
4624 		pa->pa_free += ac->ac_b_ex.fe_len;
4625 		spin_unlock(&pa->pa_lock);
4626 	}
4627 }
4628 
4629 /*
4630  * use blocks preallocated to inode
4631  */
4632 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4633 				struct ext4_prealloc_space *pa)
4634 {
4635 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4636 	ext4_fsblk_t start;
4637 	ext4_fsblk_t end;
4638 	int len;
4639 
4640 	/* found preallocated blocks, use them */
4641 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4642 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4643 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4644 	len = EXT4_NUM_B2C(sbi, end - start);
4645 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4646 					&ac->ac_b_ex.fe_start);
4647 	ac->ac_b_ex.fe_len = len;
4648 	ac->ac_status = AC_STATUS_FOUND;
4649 	ac->ac_pa = pa;
4650 
4651 	BUG_ON(start < pa->pa_pstart);
4652 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4653 	BUG_ON(pa->pa_free < len);
4654 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4655 	pa->pa_free -= len;
4656 
4657 	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4658 }
4659 
4660 /*
4661  * use blocks preallocated to locality group
4662  */
4663 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4664 				struct ext4_prealloc_space *pa)
4665 {
4666 	unsigned int len = ac->ac_o_ex.fe_len;
4667 
4668 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4669 					&ac->ac_b_ex.fe_group,
4670 					&ac->ac_b_ex.fe_start);
4671 	ac->ac_b_ex.fe_len = len;
4672 	ac->ac_status = AC_STATUS_FOUND;
4673 	ac->ac_pa = pa;
4674 
4675 	/* we don't correct pa_pstart or pa_len here to avoid
4676 	 * possible race when the group is being loaded concurrently
4677 	 * instead we correct pa later, after blocks are marked
4678 	 * in on-disk bitmap -- see ext4_mb_release_context()
4679 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
4680 	 */
4681 	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4682 		 pa->pa_lstart, len, pa);
4683 }
4684 
4685 /*
4686  * Return the prealloc space that have minimal distance
4687  * from the goal block. @cpa is the prealloc
4688  * space that is having currently known minimal distance
4689  * from the goal block.
4690  */
4691 static struct ext4_prealloc_space *
4692 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4693 			struct ext4_prealloc_space *pa,
4694 			struct ext4_prealloc_space *cpa)
4695 {
4696 	ext4_fsblk_t cur_distance, new_distance;
4697 
4698 	if (cpa == NULL) {
4699 		atomic_inc(&pa->pa_count);
4700 		return pa;
4701 	}
4702 	cur_distance = abs(goal_block - cpa->pa_pstart);
4703 	new_distance = abs(goal_block - pa->pa_pstart);
4704 
4705 	if (cur_distance <= new_distance)
4706 		return cpa;
4707 
4708 	/* drop the previous reference */
4709 	atomic_dec(&cpa->pa_count);
4710 	atomic_inc(&pa->pa_count);
4711 	return pa;
4712 }
4713 
4714 /*
4715  * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4716  */
4717 static bool
4718 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4719 		      struct ext4_prealloc_space *pa)
4720 {
4721 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4722 	ext4_fsblk_t start;
4723 
4724 	if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4725 		return true;
4726 
4727 	/*
4728 	 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4729 	 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4730 	 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4731 	 * consistent with ext4_mb_find_by_goal.
4732 	 */
4733 	start = pa->pa_pstart +
4734 		(ac->ac_g_ex.fe_logical - pa->pa_lstart);
4735 	if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4736 		return false;
4737 
4738 	if (ac->ac_g_ex.fe_len > pa->pa_len -
4739 	    EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4740 		return false;
4741 
4742 	return true;
4743 }
4744 
4745 /*
4746  * search goal blocks in preallocated space
4747  */
4748 static noinline_for_stack bool
4749 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4750 {
4751 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4752 	int order, i;
4753 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4754 	struct ext4_locality_group *lg;
4755 	struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4756 	struct rb_node *iter;
4757 	ext4_fsblk_t goal_block;
4758 
4759 	/* only data can be preallocated */
4760 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4761 		return false;
4762 
4763 	/*
4764 	 * first, try per-file preallocation by searching the inode pa rbtree.
4765 	 *
4766 	 * Here, we can't do a direct traversal of the tree because
4767 	 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4768 	 * deleted and that can cause direct traversal to skip some entries.
4769 	 */
4770 	read_lock(&ei->i_prealloc_lock);
4771 
4772 	if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4773 		goto try_group_pa;
4774 	}
4775 
4776 	/*
4777 	 * Step 1: Find a pa with logical start immediately adjacent to the
4778 	 * original logical start. This could be on the left or right.
4779 	 *
4780 	 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4781 	 */
4782 	for (iter = ei->i_prealloc_node.rb_node; iter;
4783 	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4784 					    tmp_pa->pa_lstart, iter)) {
4785 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4786 				  pa_node.inode_node);
4787 	}
4788 
4789 	/*
4790 	 * Step 2: The adjacent pa might be to the right of logical start, find
4791 	 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4792 	 * logical start is towards the left of original request's logical start
4793 	 */
4794 	if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4795 		struct rb_node *tmp;
4796 		tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4797 
4798 		if (tmp) {
4799 			tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4800 					    pa_node.inode_node);
4801 		} else {
4802 			/*
4803 			 * If there is no adjacent pa to the left then finding
4804 			 * an overlapping pa is not possible hence stop searching
4805 			 * inode pa tree
4806 			 */
4807 			goto try_group_pa;
4808 		}
4809 	}
4810 
4811 	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4812 
4813 	/*
4814 	 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4815 	 * the first non deleted adjacent pa. After this step we should have a
4816 	 * valid tmp_pa which is guaranteed to be non deleted.
4817 	 */
4818 	for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4819 		if (!iter) {
4820 			/*
4821 			 * no non deleted left adjacent pa, so stop searching
4822 			 * inode pa tree
4823 			 */
4824 			goto try_group_pa;
4825 		}
4826 		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4827 				  pa_node.inode_node);
4828 		spin_lock(&tmp_pa->pa_lock);
4829 		if (tmp_pa->pa_deleted == 0) {
4830 			/*
4831 			 * We will keep holding the pa_lock from
4832 			 * this point on because we don't want group discard
4833 			 * to delete this pa underneath us. Since group
4834 			 * discard is anyways an ENOSPC operation it
4835 			 * should be okay for it to wait a few more cycles.
4836 			 */
4837 			break;
4838 		} else {
4839 			spin_unlock(&tmp_pa->pa_lock);
4840 		}
4841 	}
4842 
4843 	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4844 	BUG_ON(tmp_pa->pa_deleted == 1);
4845 
4846 	/*
4847 	 * Step 4: We now have the non deleted left adjacent pa. Only this
4848 	 * pa can possibly satisfy the request hence check if it overlaps
4849 	 * original logical start and stop searching if it doesn't.
4850 	 */
4851 	if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4852 		spin_unlock(&tmp_pa->pa_lock);
4853 		goto try_group_pa;
4854 	}
4855 
4856 	/* non-extent files can't have physical blocks past 2^32 */
4857 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4858 	    (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4859 	     EXT4_MAX_BLOCK_FILE_PHYS)) {
4860 		/*
4861 		 * Since PAs don't overlap, we won't find any other PA to
4862 		 * satisfy this.
4863 		 */
4864 		spin_unlock(&tmp_pa->pa_lock);
4865 		goto try_group_pa;
4866 	}
4867 
4868 	if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4869 		atomic_inc(&tmp_pa->pa_count);
4870 		ext4_mb_use_inode_pa(ac, tmp_pa);
4871 		spin_unlock(&tmp_pa->pa_lock);
4872 		read_unlock(&ei->i_prealloc_lock);
4873 		return true;
4874 	} else {
4875 		/*
4876 		 * We found a valid overlapping pa but couldn't use it because
4877 		 * it had no free blocks. This should ideally never happen
4878 		 * because:
4879 		 *
4880 		 * 1. When a new inode pa is added to rbtree it must have
4881 		 *    pa_free > 0 since otherwise we won't actually need
4882 		 *    preallocation.
4883 		 *
4884 		 * 2. An inode pa that is in the rbtree can only have it's
4885 		 *    pa_free become zero when another thread calls:
4886 		 *      ext4_mb_new_blocks
4887 		 *       ext4_mb_use_preallocated
4888 		 *        ext4_mb_use_inode_pa
4889 		 *
4890 		 * 3. Further, after the above calls make pa_free == 0, we will
4891 		 *    immediately remove it from the rbtree in:
4892 		 *      ext4_mb_new_blocks
4893 		 *       ext4_mb_release_context
4894 		 *        ext4_mb_put_pa
4895 		 *
4896 		 * 4. Since the pa_free becoming 0 and pa_free getting removed
4897 		 * from tree both happen in ext4_mb_new_blocks, which is always
4898 		 * called with i_data_sem held for data allocations, we can be
4899 		 * sure that another process will never see a pa in rbtree with
4900 		 * pa_free == 0.
4901 		 */
4902 		WARN_ON_ONCE(tmp_pa->pa_free == 0);
4903 	}
4904 	spin_unlock(&tmp_pa->pa_lock);
4905 try_group_pa:
4906 	read_unlock(&ei->i_prealloc_lock);
4907 
4908 	/* can we use group allocation? */
4909 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4910 		return false;
4911 
4912 	/* inode may have no locality group for some reason */
4913 	lg = ac->ac_lg;
4914 	if (lg == NULL)
4915 		return false;
4916 	order  = fls(ac->ac_o_ex.fe_len) - 1;
4917 	if (order > PREALLOC_TB_SIZE - 1)
4918 		/* The max size of hash table is PREALLOC_TB_SIZE */
4919 		order = PREALLOC_TB_SIZE - 1;
4920 
4921 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4922 	/*
4923 	 * search for the prealloc space that is having
4924 	 * minimal distance from the goal block.
4925 	 */
4926 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
4927 		rcu_read_lock();
4928 		list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4929 					pa_node.lg_list) {
4930 			spin_lock(&tmp_pa->pa_lock);
4931 			if (tmp_pa->pa_deleted == 0 &&
4932 					tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4933 
4934 				cpa = ext4_mb_check_group_pa(goal_block,
4935 								tmp_pa, cpa);
4936 			}
4937 			spin_unlock(&tmp_pa->pa_lock);
4938 		}
4939 		rcu_read_unlock();
4940 	}
4941 	if (cpa) {
4942 		ext4_mb_use_group_pa(ac, cpa);
4943 		return true;
4944 	}
4945 	return false;
4946 }
4947 
4948 /*
4949  * the function goes through all preallocation in this group and marks them
4950  * used in in-core bitmap. buddy must be generated from this bitmap
4951  * Need to be called with ext4 group lock held
4952  */
4953 static noinline_for_stack
4954 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4955 					ext4_group_t group)
4956 {
4957 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4958 	struct ext4_prealloc_space *pa;
4959 	struct list_head *cur;
4960 	ext4_group_t groupnr;
4961 	ext4_grpblk_t start;
4962 	int preallocated = 0;
4963 	int len;
4964 
4965 	if (!grp)
4966 		return;
4967 
4968 	/* all form of preallocation discards first load group,
4969 	 * so the only competing code is preallocation use.
4970 	 * we don't need any locking here
4971 	 * notice we do NOT ignore preallocations with pa_deleted
4972 	 * otherwise we could leave used blocks available for
4973 	 * allocation in buddy when concurrent ext4_mb_put_pa()
4974 	 * is dropping preallocation
4975 	 */
4976 	list_for_each(cur, &grp->bb_prealloc_list) {
4977 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4978 		spin_lock(&pa->pa_lock);
4979 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4980 					     &groupnr, &start);
4981 		len = pa->pa_len;
4982 		spin_unlock(&pa->pa_lock);
4983 		if (unlikely(len == 0))
4984 			continue;
4985 		BUG_ON(groupnr != group);
4986 		mb_set_bits(bitmap, start, len);
4987 		preallocated += len;
4988 	}
4989 	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4990 }
4991 
4992 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4993 				    struct ext4_prealloc_space *pa)
4994 {
4995 	struct ext4_inode_info *ei;
4996 
4997 	if (pa->pa_deleted) {
4998 		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4999 			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5000 			     pa->pa_len);
5001 		return;
5002 	}
5003 
5004 	pa->pa_deleted = 1;
5005 
5006 	if (pa->pa_type == MB_INODE_PA) {
5007 		ei = EXT4_I(pa->pa_inode);
5008 		atomic_dec(&ei->i_prealloc_active);
5009 	}
5010 }
5011 
5012 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5013 {
5014 	BUG_ON(!pa);
5015 	BUG_ON(atomic_read(&pa->pa_count));
5016 	BUG_ON(pa->pa_deleted == 0);
5017 	kmem_cache_free(ext4_pspace_cachep, pa);
5018 }
5019 
5020 static void ext4_mb_pa_callback(struct rcu_head *head)
5021 {
5022 	struct ext4_prealloc_space *pa;
5023 
5024 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5025 	ext4_mb_pa_free(pa);
5026 }
5027 
5028 /*
5029  * drops a reference to preallocated space descriptor
5030  * if this was the last reference and the space is consumed
5031  */
5032 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5033 			struct super_block *sb, struct ext4_prealloc_space *pa)
5034 {
5035 	ext4_group_t grp;
5036 	ext4_fsblk_t grp_blk;
5037 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5038 
5039 	/* in this short window concurrent discard can set pa_deleted */
5040 	spin_lock(&pa->pa_lock);
5041 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5042 		spin_unlock(&pa->pa_lock);
5043 		return;
5044 	}
5045 
5046 	if (pa->pa_deleted == 1) {
5047 		spin_unlock(&pa->pa_lock);
5048 		return;
5049 	}
5050 
5051 	ext4_mb_mark_pa_deleted(sb, pa);
5052 	spin_unlock(&pa->pa_lock);
5053 
5054 	grp_blk = pa->pa_pstart;
5055 	/*
5056 	 * If doing group-based preallocation, pa_pstart may be in the
5057 	 * next group when pa is used up
5058 	 */
5059 	if (pa->pa_type == MB_GROUP_PA)
5060 		grp_blk--;
5061 
5062 	grp = ext4_get_group_number(sb, grp_blk);
5063 
5064 	/*
5065 	 * possible race:
5066 	 *
5067 	 *  P1 (buddy init)			P2 (regular allocation)
5068 	 *					find block B in PA
5069 	 *  copy on-disk bitmap to buddy
5070 	 *  					mark B in on-disk bitmap
5071 	 *					drop PA from group
5072 	 *  mark all PAs in buddy
5073 	 *
5074 	 * thus, P1 initializes buddy with B available. to prevent this
5075 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5076 	 * against that pair
5077 	 */
5078 	ext4_lock_group(sb, grp);
5079 	list_del(&pa->pa_group_list);
5080 	ext4_unlock_group(sb, grp);
5081 
5082 	if (pa->pa_type == MB_INODE_PA) {
5083 		write_lock(pa->pa_node_lock.inode_lock);
5084 		rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5085 		write_unlock(pa->pa_node_lock.inode_lock);
5086 		ext4_mb_pa_free(pa);
5087 	} else {
5088 		spin_lock(pa->pa_node_lock.lg_lock);
5089 		list_del_rcu(&pa->pa_node.lg_list);
5090 		spin_unlock(pa->pa_node_lock.lg_lock);
5091 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5092 	}
5093 }
5094 
5095 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5096 {
5097 	struct rb_node **iter = &root->rb_node, *parent = NULL;
5098 	struct ext4_prealloc_space *iter_pa, *new_pa;
5099 	ext4_lblk_t iter_start, new_start;
5100 
5101 	while (*iter) {
5102 		iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5103 				   pa_node.inode_node);
5104 		new_pa = rb_entry(new, struct ext4_prealloc_space,
5105 				   pa_node.inode_node);
5106 		iter_start = iter_pa->pa_lstart;
5107 		new_start = new_pa->pa_lstart;
5108 
5109 		parent = *iter;
5110 		if (new_start < iter_start)
5111 			iter = &((*iter)->rb_left);
5112 		else
5113 			iter = &((*iter)->rb_right);
5114 	}
5115 
5116 	rb_link_node(new, parent, iter);
5117 	rb_insert_color(new, root);
5118 }
5119 
5120 /*
5121  * creates new preallocated space for given inode
5122  */
5123 static noinline_for_stack void
5124 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5125 {
5126 	struct super_block *sb = ac->ac_sb;
5127 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5128 	struct ext4_prealloc_space *pa;
5129 	struct ext4_group_info *grp;
5130 	struct ext4_inode_info *ei;
5131 
5132 	/* preallocate only when found space is larger then requested */
5133 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5134 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5135 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5136 	BUG_ON(ac->ac_pa == NULL);
5137 
5138 	pa = ac->ac_pa;
5139 
5140 	if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5141 		struct ext4_free_extent ex = {
5142 			.fe_logical = ac->ac_g_ex.fe_logical,
5143 			.fe_len = ac->ac_orig_goal_len,
5144 		};
5145 		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5146 
5147 		/* we can't allocate as much as normalizer wants.
5148 		 * so, found space must get proper lstart
5149 		 * to cover original request */
5150 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5151 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5152 
5153 		/*
5154 		 * Use the below logic for adjusting best extent as it keeps
5155 		 * fragmentation in check while ensuring logical range of best
5156 		 * extent doesn't overflow out of goal extent:
5157 		 *
5158 		 * 1. Check if best ex can be kept at end of goal (before
5159 		 *    cr_best_avail trimmed it) and still cover original start
5160 		 * 2. Else, check if best ex can be kept at start of goal and
5161 		 *    still cover original start
5162 		 * 3. Else, keep the best ex at start of original request.
5163 		 */
5164 		ex.fe_len = ac->ac_b_ex.fe_len;
5165 
5166 		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5167 		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5168 			goto adjust_bex;
5169 
5170 		ex.fe_logical = ac->ac_g_ex.fe_logical;
5171 		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
5172 			goto adjust_bex;
5173 
5174 		ex.fe_logical = ac->ac_o_ex.fe_logical;
5175 adjust_bex:
5176 		ac->ac_b_ex.fe_logical = ex.fe_logical;
5177 
5178 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5179 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
5180 		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5181 	}
5182 
5183 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
5184 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5185 	pa->pa_len = ac->ac_b_ex.fe_len;
5186 	pa->pa_free = pa->pa_len;
5187 	spin_lock_init(&pa->pa_lock);
5188 	INIT_LIST_HEAD(&pa->pa_group_list);
5189 	pa->pa_deleted = 0;
5190 	pa->pa_type = MB_INODE_PA;
5191 
5192 	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5193 		 pa->pa_len, pa->pa_lstart);
5194 	trace_ext4_mb_new_inode_pa(ac, pa);
5195 
5196 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5197 	ext4_mb_use_inode_pa(ac, pa);
5198 
5199 	ei = EXT4_I(ac->ac_inode);
5200 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5201 	if (!grp)
5202 		return;
5203 
5204 	pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5205 	pa->pa_inode = ac->ac_inode;
5206 
5207 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5208 
5209 	write_lock(pa->pa_node_lock.inode_lock);
5210 	ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5211 	write_unlock(pa->pa_node_lock.inode_lock);
5212 	atomic_inc(&ei->i_prealloc_active);
5213 }
5214 
5215 /*
5216  * creates new preallocated space for locality group inodes belongs to
5217  */
5218 static noinline_for_stack void
5219 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5220 {
5221 	struct super_block *sb = ac->ac_sb;
5222 	struct ext4_locality_group *lg;
5223 	struct ext4_prealloc_space *pa;
5224 	struct ext4_group_info *grp;
5225 
5226 	/* preallocate only when found space is larger then requested */
5227 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5228 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5229 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5230 	BUG_ON(ac->ac_pa == NULL);
5231 
5232 	pa = ac->ac_pa;
5233 
5234 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5235 	pa->pa_lstart = pa->pa_pstart;
5236 	pa->pa_len = ac->ac_b_ex.fe_len;
5237 	pa->pa_free = pa->pa_len;
5238 	spin_lock_init(&pa->pa_lock);
5239 	INIT_LIST_HEAD(&pa->pa_node.lg_list);
5240 	INIT_LIST_HEAD(&pa->pa_group_list);
5241 	pa->pa_deleted = 0;
5242 	pa->pa_type = MB_GROUP_PA;
5243 
5244 	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5245 		 pa->pa_len, pa->pa_lstart);
5246 	trace_ext4_mb_new_group_pa(ac, pa);
5247 
5248 	ext4_mb_use_group_pa(ac, pa);
5249 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5250 
5251 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5252 	if (!grp)
5253 		return;
5254 	lg = ac->ac_lg;
5255 	BUG_ON(lg == NULL);
5256 
5257 	pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5258 	pa->pa_inode = NULL;
5259 
5260 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5261 
5262 	/*
5263 	 * We will later add the new pa to the right bucket
5264 	 * after updating the pa_free in ext4_mb_release_context
5265 	 */
5266 }
5267 
5268 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5269 {
5270 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5271 		ext4_mb_new_group_pa(ac);
5272 	else
5273 		ext4_mb_new_inode_pa(ac);
5274 }
5275 
5276 /*
5277  * finds all unused blocks in on-disk bitmap, frees them in
5278  * in-core bitmap and buddy.
5279  * @pa must be unlinked from inode and group lists, so that
5280  * nobody else can find/use it.
5281  * the caller MUST hold group/inode locks.
5282  * TODO: optimize the case when there are no in-core structures yet
5283  */
5284 static noinline_for_stack int
5285 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5286 			struct ext4_prealloc_space *pa)
5287 {
5288 	struct super_block *sb = e4b->bd_sb;
5289 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5290 	unsigned int end;
5291 	unsigned int next;
5292 	ext4_group_t group;
5293 	ext4_grpblk_t bit;
5294 	unsigned long long grp_blk_start;
5295 	int free = 0;
5296 
5297 	BUG_ON(pa->pa_deleted == 0);
5298 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5299 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5300 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5301 	end = bit + pa->pa_len;
5302 
5303 	while (bit < end) {
5304 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5305 		if (bit >= end)
5306 			break;
5307 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5308 		mb_debug(sb, "free preallocated %u/%u in group %u\n",
5309 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5310 			 (unsigned) next - bit, (unsigned) group);
5311 		free += next - bit;
5312 
5313 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5314 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5315 						    EXT4_C2B(sbi, bit)),
5316 					       next - bit);
5317 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5318 		bit = next + 1;
5319 	}
5320 	if (free != pa->pa_free) {
5321 		ext4_msg(e4b->bd_sb, KERN_CRIT,
5322 			 "pa %p: logic %lu, phys. %lu, len %d",
5323 			 pa, (unsigned long) pa->pa_lstart,
5324 			 (unsigned long) pa->pa_pstart,
5325 			 pa->pa_len);
5326 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5327 					free, pa->pa_free);
5328 		/*
5329 		 * pa is already deleted so we use the value obtained
5330 		 * from the bitmap and continue.
5331 		 */
5332 	}
5333 	atomic_add(free, &sbi->s_mb_discarded);
5334 
5335 	return 0;
5336 }
5337 
5338 static noinline_for_stack int
5339 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5340 				struct ext4_prealloc_space *pa)
5341 {
5342 	struct super_block *sb = e4b->bd_sb;
5343 	ext4_group_t group;
5344 	ext4_grpblk_t bit;
5345 
5346 	trace_ext4_mb_release_group_pa(sb, pa);
5347 	BUG_ON(pa->pa_deleted == 0);
5348 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5349 	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5350 		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5351 			     e4b->bd_group, group, pa->pa_pstart);
5352 		return 0;
5353 	}
5354 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5355 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5356 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5357 
5358 	return 0;
5359 }
5360 
5361 /*
5362  * releases all preallocations in given group
5363  *
5364  * first, we need to decide discard policy:
5365  * - when do we discard
5366  *   1) ENOSPC
5367  * - how many do we discard
5368  *   1) how many requested
5369  */
5370 static noinline_for_stack int
5371 ext4_mb_discard_group_preallocations(struct super_block *sb,
5372 				     ext4_group_t group, int *busy)
5373 {
5374 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5375 	struct buffer_head *bitmap_bh = NULL;
5376 	struct ext4_prealloc_space *pa, *tmp;
5377 	LIST_HEAD(list);
5378 	struct ext4_buddy e4b;
5379 	struct ext4_inode_info *ei;
5380 	int err;
5381 	int free = 0;
5382 
5383 	if (!grp)
5384 		return 0;
5385 	mb_debug(sb, "discard preallocation for group %u\n", group);
5386 	if (list_empty(&grp->bb_prealloc_list))
5387 		goto out_dbg;
5388 
5389 	bitmap_bh = ext4_read_block_bitmap(sb, group);
5390 	if (IS_ERR(bitmap_bh)) {
5391 		err = PTR_ERR(bitmap_bh);
5392 		ext4_error_err(sb, -err,
5393 			       "Error %d reading block bitmap for %u",
5394 			       err, group);
5395 		goto out_dbg;
5396 	}
5397 
5398 	err = ext4_mb_load_buddy(sb, group, &e4b);
5399 	if (err) {
5400 		ext4_warning(sb, "Error %d loading buddy information for %u",
5401 			     err, group);
5402 		put_bh(bitmap_bh);
5403 		goto out_dbg;
5404 	}
5405 
5406 	ext4_lock_group(sb, group);
5407 	list_for_each_entry_safe(pa, tmp,
5408 				&grp->bb_prealloc_list, pa_group_list) {
5409 		spin_lock(&pa->pa_lock);
5410 		if (atomic_read(&pa->pa_count)) {
5411 			spin_unlock(&pa->pa_lock);
5412 			*busy = 1;
5413 			continue;
5414 		}
5415 		if (pa->pa_deleted) {
5416 			spin_unlock(&pa->pa_lock);
5417 			continue;
5418 		}
5419 
5420 		/* seems this one can be freed ... */
5421 		ext4_mb_mark_pa_deleted(sb, pa);
5422 
5423 		if (!free)
5424 			this_cpu_inc(discard_pa_seq);
5425 
5426 		/* we can trust pa_free ... */
5427 		free += pa->pa_free;
5428 
5429 		spin_unlock(&pa->pa_lock);
5430 
5431 		list_del(&pa->pa_group_list);
5432 		list_add(&pa->u.pa_tmp_list, &list);
5433 	}
5434 
5435 	/* now free all selected PAs */
5436 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5437 
5438 		/* remove from object (inode or locality group) */
5439 		if (pa->pa_type == MB_GROUP_PA) {
5440 			spin_lock(pa->pa_node_lock.lg_lock);
5441 			list_del_rcu(&pa->pa_node.lg_list);
5442 			spin_unlock(pa->pa_node_lock.lg_lock);
5443 		} else {
5444 			write_lock(pa->pa_node_lock.inode_lock);
5445 			ei = EXT4_I(pa->pa_inode);
5446 			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5447 			write_unlock(pa->pa_node_lock.inode_lock);
5448 		}
5449 
5450 		list_del(&pa->u.pa_tmp_list);
5451 
5452 		if (pa->pa_type == MB_GROUP_PA) {
5453 			ext4_mb_release_group_pa(&e4b, pa);
5454 			call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5455 		} else {
5456 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5457 			ext4_mb_pa_free(pa);
5458 		}
5459 	}
5460 
5461 	ext4_unlock_group(sb, group);
5462 	ext4_mb_unload_buddy(&e4b);
5463 	put_bh(bitmap_bh);
5464 out_dbg:
5465 	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5466 		 free, group, grp->bb_free);
5467 	return free;
5468 }
5469 
5470 /*
5471  * releases all non-used preallocated blocks for given inode
5472  *
5473  * It's important to discard preallocations under i_data_sem
5474  * We don't want another block to be served from the prealloc
5475  * space when we are discarding the inode prealloc space.
5476  *
5477  * FIXME!! Make sure it is valid at all the call sites
5478  */
5479 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5480 {
5481 	struct ext4_inode_info *ei = EXT4_I(inode);
5482 	struct super_block *sb = inode->i_sb;
5483 	struct buffer_head *bitmap_bh = NULL;
5484 	struct ext4_prealloc_space *pa, *tmp;
5485 	ext4_group_t group = 0;
5486 	LIST_HEAD(list);
5487 	struct ext4_buddy e4b;
5488 	struct rb_node *iter;
5489 	int err;
5490 
5491 	if (!S_ISREG(inode->i_mode)) {
5492 		return;
5493 	}
5494 
5495 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5496 		return;
5497 
5498 	mb_debug(sb, "discard preallocation for inode %lu\n",
5499 		 inode->i_ino);
5500 	trace_ext4_discard_preallocations(inode,
5501 			atomic_read(&ei->i_prealloc_active), needed);
5502 
5503 	if (needed == 0)
5504 		needed = UINT_MAX;
5505 
5506 repeat:
5507 	/* first, collect all pa's in the inode */
5508 	write_lock(&ei->i_prealloc_lock);
5509 	for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
5510 	     iter = rb_next(iter)) {
5511 		pa = rb_entry(iter, struct ext4_prealloc_space,
5512 			      pa_node.inode_node);
5513 		BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5514 
5515 		spin_lock(&pa->pa_lock);
5516 		if (atomic_read(&pa->pa_count)) {
5517 			/* this shouldn't happen often - nobody should
5518 			 * use preallocation while we're discarding it */
5519 			spin_unlock(&pa->pa_lock);
5520 			write_unlock(&ei->i_prealloc_lock);
5521 			ext4_msg(sb, KERN_ERR,
5522 				 "uh-oh! used pa while discarding");
5523 			WARN_ON(1);
5524 			schedule_timeout_uninterruptible(HZ);
5525 			goto repeat;
5526 
5527 		}
5528 		if (pa->pa_deleted == 0) {
5529 			ext4_mb_mark_pa_deleted(sb, pa);
5530 			spin_unlock(&pa->pa_lock);
5531 			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5532 			list_add(&pa->u.pa_tmp_list, &list);
5533 			needed--;
5534 			continue;
5535 		}
5536 
5537 		/* someone is deleting pa right now */
5538 		spin_unlock(&pa->pa_lock);
5539 		write_unlock(&ei->i_prealloc_lock);
5540 
5541 		/* we have to wait here because pa_deleted
5542 		 * doesn't mean pa is already unlinked from
5543 		 * the list. as we might be called from
5544 		 * ->clear_inode() the inode will get freed
5545 		 * and concurrent thread which is unlinking
5546 		 * pa from inode's list may access already
5547 		 * freed memory, bad-bad-bad */
5548 
5549 		/* XXX: if this happens too often, we can
5550 		 * add a flag to force wait only in case
5551 		 * of ->clear_inode(), but not in case of
5552 		 * regular truncate */
5553 		schedule_timeout_uninterruptible(HZ);
5554 		goto repeat;
5555 	}
5556 	write_unlock(&ei->i_prealloc_lock);
5557 
5558 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5559 		BUG_ON(pa->pa_type != MB_INODE_PA);
5560 		group = ext4_get_group_number(sb, pa->pa_pstart);
5561 
5562 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5563 					     GFP_NOFS|__GFP_NOFAIL);
5564 		if (err) {
5565 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5566 				       err, group);
5567 			continue;
5568 		}
5569 
5570 		bitmap_bh = ext4_read_block_bitmap(sb, group);
5571 		if (IS_ERR(bitmap_bh)) {
5572 			err = PTR_ERR(bitmap_bh);
5573 			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5574 				       err, group);
5575 			ext4_mb_unload_buddy(&e4b);
5576 			continue;
5577 		}
5578 
5579 		ext4_lock_group(sb, group);
5580 		list_del(&pa->pa_group_list);
5581 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5582 		ext4_unlock_group(sb, group);
5583 
5584 		ext4_mb_unload_buddy(&e4b);
5585 		put_bh(bitmap_bh);
5586 
5587 		list_del(&pa->u.pa_tmp_list);
5588 		ext4_mb_pa_free(pa);
5589 	}
5590 }
5591 
5592 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5593 {
5594 	struct ext4_prealloc_space *pa;
5595 
5596 	BUG_ON(ext4_pspace_cachep == NULL);
5597 	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5598 	if (!pa)
5599 		return -ENOMEM;
5600 	atomic_set(&pa->pa_count, 1);
5601 	ac->ac_pa = pa;
5602 	return 0;
5603 }
5604 
5605 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5606 {
5607 	struct ext4_prealloc_space *pa = ac->ac_pa;
5608 
5609 	BUG_ON(!pa);
5610 	ac->ac_pa = NULL;
5611 	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5612 	/*
5613 	 * current function is only called due to an error or due to
5614 	 * len of found blocks < len of requested blocks hence the PA has not
5615 	 * been added to grp->bb_prealloc_list. So we don't need to lock it
5616 	 */
5617 	pa->pa_deleted = 1;
5618 	ext4_mb_pa_free(pa);
5619 }
5620 
5621 #ifdef CONFIG_EXT4_DEBUG
5622 static inline void ext4_mb_show_pa(struct super_block *sb)
5623 {
5624 	ext4_group_t i, ngroups;
5625 
5626 	if (ext4_forced_shutdown(sb))
5627 		return;
5628 
5629 	ngroups = ext4_get_groups_count(sb);
5630 	mb_debug(sb, "groups: ");
5631 	for (i = 0; i < ngroups; i++) {
5632 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5633 		struct ext4_prealloc_space *pa;
5634 		ext4_grpblk_t start;
5635 		struct list_head *cur;
5636 
5637 		if (!grp)
5638 			continue;
5639 		ext4_lock_group(sb, i);
5640 		list_for_each(cur, &grp->bb_prealloc_list) {
5641 			pa = list_entry(cur, struct ext4_prealloc_space,
5642 					pa_group_list);
5643 			spin_lock(&pa->pa_lock);
5644 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5645 						     NULL, &start);
5646 			spin_unlock(&pa->pa_lock);
5647 			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5648 				 pa->pa_len);
5649 		}
5650 		ext4_unlock_group(sb, i);
5651 		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5652 			 grp->bb_fragments);
5653 	}
5654 }
5655 
5656 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5657 {
5658 	struct super_block *sb = ac->ac_sb;
5659 
5660 	if (ext4_forced_shutdown(sb))
5661 		return;
5662 
5663 	mb_debug(sb, "Can't allocate:"
5664 			" Allocation context details:");
5665 	mb_debug(sb, "status %u flags 0x%x",
5666 			ac->ac_status, ac->ac_flags);
5667 	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5668 			"goal %lu/%lu/%lu@%lu, "
5669 			"best %lu/%lu/%lu@%lu cr %d",
5670 			(unsigned long)ac->ac_o_ex.fe_group,
5671 			(unsigned long)ac->ac_o_ex.fe_start,
5672 			(unsigned long)ac->ac_o_ex.fe_len,
5673 			(unsigned long)ac->ac_o_ex.fe_logical,
5674 			(unsigned long)ac->ac_g_ex.fe_group,
5675 			(unsigned long)ac->ac_g_ex.fe_start,
5676 			(unsigned long)ac->ac_g_ex.fe_len,
5677 			(unsigned long)ac->ac_g_ex.fe_logical,
5678 			(unsigned long)ac->ac_b_ex.fe_group,
5679 			(unsigned long)ac->ac_b_ex.fe_start,
5680 			(unsigned long)ac->ac_b_ex.fe_len,
5681 			(unsigned long)ac->ac_b_ex.fe_logical,
5682 			(int)ac->ac_criteria);
5683 	mb_debug(sb, "%u found", ac->ac_found);
5684 	mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no");
5685 	if (ac->ac_pa)
5686 		mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5687 			 "group pa" : "inode pa");
5688 	ext4_mb_show_pa(sb);
5689 }
5690 #else
5691 static inline void ext4_mb_show_pa(struct super_block *sb)
5692 {
5693 }
5694 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5695 {
5696 	ext4_mb_show_pa(ac->ac_sb);
5697 }
5698 #endif
5699 
5700 /*
5701  * We use locality group preallocation for small size file. The size of the
5702  * file is determined by the current size or the resulting size after
5703  * allocation which ever is larger
5704  *
5705  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5706  */
5707 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5708 {
5709 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5710 	int bsbits = ac->ac_sb->s_blocksize_bits;
5711 	loff_t size, isize;
5712 	bool inode_pa_eligible, group_pa_eligible;
5713 
5714 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5715 		return;
5716 
5717 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5718 		return;
5719 
5720 	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5721 	inode_pa_eligible = true;
5722 	size = extent_logical_end(sbi, &ac->ac_o_ex);
5723 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5724 		>> bsbits;
5725 
5726 	/* No point in using inode preallocation for closed files */
5727 	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5728 	    !inode_is_open_for_write(ac->ac_inode))
5729 		inode_pa_eligible = false;
5730 
5731 	size = max(size, isize);
5732 	/* Don't use group allocation for large files */
5733 	if (size > sbi->s_mb_stream_request)
5734 		group_pa_eligible = false;
5735 
5736 	if (!group_pa_eligible) {
5737 		if (inode_pa_eligible)
5738 			ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5739 		else
5740 			ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5741 		return;
5742 	}
5743 
5744 	BUG_ON(ac->ac_lg != NULL);
5745 	/*
5746 	 * locality group prealloc space are per cpu. The reason for having
5747 	 * per cpu locality group is to reduce the contention between block
5748 	 * request from multiple CPUs.
5749 	 */
5750 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5751 
5752 	/* we're going to use group allocation */
5753 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5754 
5755 	/* serialize all allocations in the group */
5756 	mutex_lock(&ac->ac_lg->lg_mutex);
5757 }
5758 
5759 static noinline_for_stack void
5760 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5761 				struct ext4_allocation_request *ar)
5762 {
5763 	struct super_block *sb = ar->inode->i_sb;
5764 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5765 	struct ext4_super_block *es = sbi->s_es;
5766 	ext4_group_t group;
5767 	unsigned int len;
5768 	ext4_fsblk_t goal;
5769 	ext4_grpblk_t block;
5770 
5771 	/* we can't allocate > group size */
5772 	len = ar->len;
5773 
5774 	/* just a dirty hack to filter too big requests  */
5775 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5776 		len = EXT4_CLUSTERS_PER_GROUP(sb);
5777 
5778 	/* start searching from the goal */
5779 	goal = ar->goal;
5780 	if (goal < le32_to_cpu(es->s_first_data_block) ||
5781 			goal >= ext4_blocks_count(es))
5782 		goal = le32_to_cpu(es->s_first_data_block);
5783 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
5784 
5785 	/* set up allocation goals */
5786 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5787 	ac->ac_status = AC_STATUS_CONTINUE;
5788 	ac->ac_sb = sb;
5789 	ac->ac_inode = ar->inode;
5790 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5791 	ac->ac_o_ex.fe_group = group;
5792 	ac->ac_o_ex.fe_start = block;
5793 	ac->ac_o_ex.fe_len = len;
5794 	ac->ac_g_ex = ac->ac_o_ex;
5795 	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5796 	ac->ac_flags = ar->flags;
5797 
5798 	/* we have to define context: we'll work with a file or
5799 	 * locality group. this is a policy, actually */
5800 	ext4_mb_group_or_file(ac);
5801 
5802 	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5803 			"left: %u/%u, right %u/%u to %swritable\n",
5804 			(unsigned) ar->len, (unsigned) ar->logical,
5805 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5806 			(unsigned) ar->lleft, (unsigned) ar->pleft,
5807 			(unsigned) ar->lright, (unsigned) ar->pright,
5808 			inode_is_open_for_write(ar->inode) ? "" : "non-");
5809 }
5810 
5811 static noinline_for_stack void
5812 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5813 					struct ext4_locality_group *lg,
5814 					int order, int total_entries)
5815 {
5816 	ext4_group_t group = 0;
5817 	struct ext4_buddy e4b;
5818 	LIST_HEAD(discard_list);
5819 	struct ext4_prealloc_space *pa, *tmp;
5820 
5821 	mb_debug(sb, "discard locality group preallocation\n");
5822 
5823 	spin_lock(&lg->lg_prealloc_lock);
5824 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5825 				pa_node.lg_list,
5826 				lockdep_is_held(&lg->lg_prealloc_lock)) {
5827 		spin_lock(&pa->pa_lock);
5828 		if (atomic_read(&pa->pa_count)) {
5829 			/*
5830 			 * This is the pa that we just used
5831 			 * for block allocation. So don't
5832 			 * free that
5833 			 */
5834 			spin_unlock(&pa->pa_lock);
5835 			continue;
5836 		}
5837 		if (pa->pa_deleted) {
5838 			spin_unlock(&pa->pa_lock);
5839 			continue;
5840 		}
5841 		/* only lg prealloc space */
5842 		BUG_ON(pa->pa_type != MB_GROUP_PA);
5843 
5844 		/* seems this one can be freed ... */
5845 		ext4_mb_mark_pa_deleted(sb, pa);
5846 		spin_unlock(&pa->pa_lock);
5847 
5848 		list_del_rcu(&pa->pa_node.lg_list);
5849 		list_add(&pa->u.pa_tmp_list, &discard_list);
5850 
5851 		total_entries--;
5852 		if (total_entries <= 5) {
5853 			/*
5854 			 * we want to keep only 5 entries
5855 			 * allowing it to grow to 8. This
5856 			 * mak sure we don't call discard
5857 			 * soon for this list.
5858 			 */
5859 			break;
5860 		}
5861 	}
5862 	spin_unlock(&lg->lg_prealloc_lock);
5863 
5864 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5865 		int err;
5866 
5867 		group = ext4_get_group_number(sb, pa->pa_pstart);
5868 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5869 					     GFP_NOFS|__GFP_NOFAIL);
5870 		if (err) {
5871 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5872 				       err, group);
5873 			continue;
5874 		}
5875 		ext4_lock_group(sb, group);
5876 		list_del(&pa->pa_group_list);
5877 		ext4_mb_release_group_pa(&e4b, pa);
5878 		ext4_unlock_group(sb, group);
5879 
5880 		ext4_mb_unload_buddy(&e4b);
5881 		list_del(&pa->u.pa_tmp_list);
5882 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5883 	}
5884 }
5885 
5886 /*
5887  * We have incremented pa_count. So it cannot be freed at this
5888  * point. Also we hold lg_mutex. So no parallel allocation is
5889  * possible from this lg. That means pa_free cannot be updated.
5890  *
5891  * A parallel ext4_mb_discard_group_preallocations is possible.
5892  * which can cause the lg_prealloc_list to be updated.
5893  */
5894 
5895 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5896 {
5897 	int order, added = 0, lg_prealloc_count = 1;
5898 	struct super_block *sb = ac->ac_sb;
5899 	struct ext4_locality_group *lg = ac->ac_lg;
5900 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5901 
5902 	order = fls(pa->pa_free) - 1;
5903 	if (order > PREALLOC_TB_SIZE - 1)
5904 		/* The max size of hash table is PREALLOC_TB_SIZE */
5905 		order = PREALLOC_TB_SIZE - 1;
5906 	/* Add the prealloc space to lg */
5907 	spin_lock(&lg->lg_prealloc_lock);
5908 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5909 				pa_node.lg_list,
5910 				lockdep_is_held(&lg->lg_prealloc_lock)) {
5911 		spin_lock(&tmp_pa->pa_lock);
5912 		if (tmp_pa->pa_deleted) {
5913 			spin_unlock(&tmp_pa->pa_lock);
5914 			continue;
5915 		}
5916 		if (!added && pa->pa_free < tmp_pa->pa_free) {
5917 			/* Add to the tail of the previous entry */
5918 			list_add_tail_rcu(&pa->pa_node.lg_list,
5919 						&tmp_pa->pa_node.lg_list);
5920 			added = 1;
5921 			/*
5922 			 * we want to count the total
5923 			 * number of entries in the list
5924 			 */
5925 		}
5926 		spin_unlock(&tmp_pa->pa_lock);
5927 		lg_prealloc_count++;
5928 	}
5929 	if (!added)
5930 		list_add_tail_rcu(&pa->pa_node.lg_list,
5931 					&lg->lg_prealloc_list[order]);
5932 	spin_unlock(&lg->lg_prealloc_lock);
5933 
5934 	/* Now trim the list to be not more than 8 elements */
5935 	if (lg_prealloc_count > 8)
5936 		ext4_mb_discard_lg_preallocations(sb, lg,
5937 						  order, lg_prealloc_count);
5938 }
5939 
5940 /*
5941  * release all resource we used in allocation
5942  */
5943 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5944 {
5945 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5946 	struct ext4_prealloc_space *pa = ac->ac_pa;
5947 	if (pa) {
5948 		if (pa->pa_type == MB_GROUP_PA) {
5949 			/* see comment in ext4_mb_use_group_pa() */
5950 			spin_lock(&pa->pa_lock);
5951 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5952 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5953 			pa->pa_free -= ac->ac_b_ex.fe_len;
5954 			pa->pa_len -= ac->ac_b_ex.fe_len;
5955 			spin_unlock(&pa->pa_lock);
5956 
5957 			/*
5958 			 * We want to add the pa to the right bucket.
5959 			 * Remove it from the list and while adding
5960 			 * make sure the list to which we are adding
5961 			 * doesn't grow big.
5962 			 */
5963 			if (likely(pa->pa_free)) {
5964 				spin_lock(pa->pa_node_lock.lg_lock);
5965 				list_del_rcu(&pa->pa_node.lg_list);
5966 				spin_unlock(pa->pa_node_lock.lg_lock);
5967 				ext4_mb_add_n_trim(ac);
5968 			}
5969 		}
5970 
5971 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5972 	}
5973 	if (ac->ac_bitmap_page)
5974 		put_page(ac->ac_bitmap_page);
5975 	if (ac->ac_buddy_page)
5976 		put_page(ac->ac_buddy_page);
5977 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5978 		mutex_unlock(&ac->ac_lg->lg_mutex);
5979 	ext4_mb_collect_stats(ac);
5980 	return 0;
5981 }
5982 
5983 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5984 {
5985 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5986 	int ret;
5987 	int freed = 0, busy = 0;
5988 	int retry = 0;
5989 
5990 	trace_ext4_mb_discard_preallocations(sb, needed);
5991 
5992 	if (needed == 0)
5993 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5994  repeat:
5995 	for (i = 0; i < ngroups && needed > 0; i++) {
5996 		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5997 		freed += ret;
5998 		needed -= ret;
5999 		cond_resched();
6000 	}
6001 
6002 	if (needed > 0 && busy && ++retry < 3) {
6003 		busy = 0;
6004 		goto repeat;
6005 	}
6006 
6007 	return freed;
6008 }
6009 
6010 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6011 			struct ext4_allocation_context *ac, u64 *seq)
6012 {
6013 	int freed;
6014 	u64 seq_retry = 0;
6015 	bool ret = false;
6016 
6017 	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6018 	if (freed) {
6019 		ret = true;
6020 		goto out_dbg;
6021 	}
6022 	seq_retry = ext4_get_discard_pa_seq_sum();
6023 	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6024 		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6025 		*seq = seq_retry;
6026 		ret = true;
6027 	}
6028 
6029 out_dbg:
6030 	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
6031 	return ret;
6032 }
6033 
6034 /*
6035  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6036  * linearly starting at the goal block and also excludes the blocks which
6037  * are going to be in use after fast commit replay.
6038  */
6039 static ext4_fsblk_t
6040 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6041 {
6042 	struct buffer_head *bitmap_bh;
6043 	struct super_block *sb = ar->inode->i_sb;
6044 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6045 	ext4_group_t group, nr;
6046 	ext4_grpblk_t blkoff;
6047 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6048 	ext4_grpblk_t i = 0;
6049 	ext4_fsblk_t goal, block;
6050 	struct ext4_super_block *es = sbi->s_es;
6051 
6052 	goal = ar->goal;
6053 	if (goal < le32_to_cpu(es->s_first_data_block) ||
6054 			goal >= ext4_blocks_count(es))
6055 		goal = le32_to_cpu(es->s_first_data_block);
6056 
6057 	ar->len = 0;
6058 	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6059 	for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6060 		bitmap_bh = ext4_read_block_bitmap(sb, group);
6061 		if (IS_ERR(bitmap_bh)) {
6062 			*errp = PTR_ERR(bitmap_bh);
6063 			pr_warn("Failed to read block bitmap\n");
6064 			return 0;
6065 		}
6066 
6067 		while (1) {
6068 			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6069 						blkoff);
6070 			if (i >= max)
6071 				break;
6072 			if (ext4_fc_replay_check_excluded(sb,
6073 				ext4_group_first_block_no(sb, group) +
6074 				EXT4_C2B(sbi, i))) {
6075 				blkoff = i + 1;
6076 			} else
6077 				break;
6078 		}
6079 		brelse(bitmap_bh);
6080 		if (i < max)
6081 			break;
6082 
6083 		if (++group >= ext4_get_groups_count(sb))
6084 			group = 0;
6085 
6086 		blkoff = 0;
6087 	}
6088 
6089 	if (i >= max) {
6090 		*errp = -ENOSPC;
6091 		return 0;
6092 	}
6093 
6094 	block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6095 	ext4_mb_mark_bb(sb, block, 1, true);
6096 	ar->len = 1;
6097 
6098 	return block;
6099 }
6100 
6101 /*
6102  * Main entry point into mballoc to allocate blocks
6103  * it tries to use preallocation first, then falls back
6104  * to usual allocation
6105  */
6106 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6107 				struct ext4_allocation_request *ar, int *errp)
6108 {
6109 	struct ext4_allocation_context *ac = NULL;
6110 	struct ext4_sb_info *sbi;
6111 	struct super_block *sb;
6112 	ext4_fsblk_t block = 0;
6113 	unsigned int inquota = 0;
6114 	unsigned int reserv_clstrs = 0;
6115 	int retries = 0;
6116 	u64 seq;
6117 
6118 	might_sleep();
6119 	sb = ar->inode->i_sb;
6120 	sbi = EXT4_SB(sb);
6121 
6122 	trace_ext4_request_blocks(ar);
6123 	if (sbi->s_mount_state & EXT4_FC_REPLAY)
6124 		return ext4_mb_new_blocks_simple(ar, errp);
6125 
6126 	/* Allow to use superuser reservation for quota file */
6127 	if (ext4_is_quota_file(ar->inode))
6128 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6129 
6130 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6131 		/* Without delayed allocation we need to verify
6132 		 * there is enough free blocks to do block allocation
6133 		 * and verify allocation doesn't exceed the quota limits.
6134 		 */
6135 		while (ar->len &&
6136 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6137 
6138 			/* let others to free the space */
6139 			cond_resched();
6140 			ar->len = ar->len >> 1;
6141 		}
6142 		if (!ar->len) {
6143 			ext4_mb_show_pa(sb);
6144 			*errp = -ENOSPC;
6145 			return 0;
6146 		}
6147 		reserv_clstrs = ar->len;
6148 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6149 			dquot_alloc_block_nofail(ar->inode,
6150 						 EXT4_C2B(sbi, ar->len));
6151 		} else {
6152 			while (ar->len &&
6153 				dquot_alloc_block(ar->inode,
6154 						  EXT4_C2B(sbi, ar->len))) {
6155 
6156 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6157 				ar->len--;
6158 			}
6159 		}
6160 		inquota = ar->len;
6161 		if (ar->len == 0) {
6162 			*errp = -EDQUOT;
6163 			goto out;
6164 		}
6165 	}
6166 
6167 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6168 	if (!ac) {
6169 		ar->len = 0;
6170 		*errp = -ENOMEM;
6171 		goto out;
6172 	}
6173 
6174 	ext4_mb_initialize_context(ac, ar);
6175 
6176 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6177 	seq = this_cpu_read(discard_pa_seq);
6178 	if (!ext4_mb_use_preallocated(ac)) {
6179 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6180 		ext4_mb_normalize_request(ac, ar);
6181 
6182 		*errp = ext4_mb_pa_alloc(ac);
6183 		if (*errp)
6184 			goto errout;
6185 repeat:
6186 		/* allocate space in core */
6187 		*errp = ext4_mb_regular_allocator(ac);
6188 		/*
6189 		 * pa allocated above is added to grp->bb_prealloc_list only
6190 		 * when we were able to allocate some block i.e. when
6191 		 * ac->ac_status == AC_STATUS_FOUND.
6192 		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6193 		 * So we have to free this pa here itself.
6194 		 */
6195 		if (*errp) {
6196 			ext4_mb_pa_put_free(ac);
6197 			ext4_discard_allocated_blocks(ac);
6198 			goto errout;
6199 		}
6200 		if (ac->ac_status == AC_STATUS_FOUND &&
6201 			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6202 			ext4_mb_pa_put_free(ac);
6203 	}
6204 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6205 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6206 		if (*errp) {
6207 			ext4_discard_allocated_blocks(ac);
6208 			goto errout;
6209 		} else {
6210 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6211 			ar->len = ac->ac_b_ex.fe_len;
6212 		}
6213 	} else {
6214 		if (++retries < 3 &&
6215 		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6216 			goto repeat;
6217 		/*
6218 		 * If block allocation fails then the pa allocated above
6219 		 * needs to be freed here itself.
6220 		 */
6221 		ext4_mb_pa_put_free(ac);
6222 		*errp = -ENOSPC;
6223 	}
6224 
6225 	if (*errp) {
6226 errout:
6227 		ac->ac_b_ex.fe_len = 0;
6228 		ar->len = 0;
6229 		ext4_mb_show_ac(ac);
6230 	}
6231 	ext4_mb_release_context(ac);
6232 	kmem_cache_free(ext4_ac_cachep, ac);
6233 out:
6234 	if (inquota && ar->len < inquota)
6235 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6236 	if (!ar->len) {
6237 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6238 			/* release all the reserved blocks if non delalloc */
6239 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6240 						reserv_clstrs);
6241 	}
6242 
6243 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6244 
6245 	return block;
6246 }
6247 
6248 /*
6249  * We can merge two free data extents only if the physical blocks
6250  * are contiguous, AND the extents were freed by the same transaction,
6251  * AND the blocks are associated with the same group.
6252  */
6253 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6254 					struct ext4_free_data *entry,
6255 					struct ext4_free_data *new_entry,
6256 					struct rb_root *entry_rb_root)
6257 {
6258 	if ((entry->efd_tid != new_entry->efd_tid) ||
6259 	    (entry->efd_group != new_entry->efd_group))
6260 		return;
6261 	if (entry->efd_start_cluster + entry->efd_count ==
6262 	    new_entry->efd_start_cluster) {
6263 		new_entry->efd_start_cluster = entry->efd_start_cluster;
6264 		new_entry->efd_count += entry->efd_count;
6265 	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6266 		   entry->efd_start_cluster) {
6267 		new_entry->efd_count += entry->efd_count;
6268 	} else
6269 		return;
6270 	spin_lock(&sbi->s_md_lock);
6271 	list_del(&entry->efd_list);
6272 	spin_unlock(&sbi->s_md_lock);
6273 	rb_erase(&entry->efd_node, entry_rb_root);
6274 	kmem_cache_free(ext4_free_data_cachep, entry);
6275 }
6276 
6277 static noinline_for_stack void
6278 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6279 		      struct ext4_free_data *new_entry)
6280 {
6281 	ext4_group_t group = e4b->bd_group;
6282 	ext4_grpblk_t cluster;
6283 	ext4_grpblk_t clusters = new_entry->efd_count;
6284 	struct ext4_free_data *entry;
6285 	struct ext4_group_info *db = e4b->bd_info;
6286 	struct super_block *sb = e4b->bd_sb;
6287 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6288 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
6289 	struct rb_node *parent = NULL, *new_node;
6290 
6291 	BUG_ON(!ext4_handle_valid(handle));
6292 	BUG_ON(e4b->bd_bitmap_page == NULL);
6293 	BUG_ON(e4b->bd_buddy_page == NULL);
6294 
6295 	new_node = &new_entry->efd_node;
6296 	cluster = new_entry->efd_start_cluster;
6297 
6298 	if (!*n) {
6299 		/* first free block exent. We need to
6300 		   protect buddy cache from being freed,
6301 		 * otherwise we'll refresh it from
6302 		 * on-disk bitmap and lose not-yet-available
6303 		 * blocks */
6304 		get_page(e4b->bd_buddy_page);
6305 		get_page(e4b->bd_bitmap_page);
6306 	}
6307 	while (*n) {
6308 		parent = *n;
6309 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
6310 		if (cluster < entry->efd_start_cluster)
6311 			n = &(*n)->rb_left;
6312 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6313 			n = &(*n)->rb_right;
6314 		else {
6315 			ext4_grp_locked_error(sb, group, 0,
6316 				ext4_group_first_block_no(sb, group) +
6317 				EXT4_C2B(sbi, cluster),
6318 				"Block already on to-be-freed list");
6319 			kmem_cache_free(ext4_free_data_cachep, new_entry);
6320 			return;
6321 		}
6322 	}
6323 
6324 	rb_link_node(new_node, parent, n);
6325 	rb_insert_color(new_node, &db->bb_free_root);
6326 
6327 	/* Now try to see the extent can be merged to left and right */
6328 	node = rb_prev(new_node);
6329 	if (node) {
6330 		entry = rb_entry(node, struct ext4_free_data, efd_node);
6331 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6332 					    &(db->bb_free_root));
6333 	}
6334 
6335 	node = rb_next(new_node);
6336 	if (node) {
6337 		entry = rb_entry(node, struct ext4_free_data, efd_node);
6338 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6339 					    &(db->bb_free_root));
6340 	}
6341 
6342 	spin_lock(&sbi->s_md_lock);
6343 	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
6344 	sbi->s_mb_free_pending += clusters;
6345 	spin_unlock(&sbi->s_md_lock);
6346 }
6347 
6348 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6349 					unsigned long count)
6350 {
6351 	struct super_block *sb = inode->i_sb;
6352 	ext4_group_t group;
6353 	ext4_grpblk_t blkoff;
6354 
6355 	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6356 	ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
6357 			     EXT4_MB_BITMAP_MARKED_CHECK |
6358 			     EXT4_MB_SYNC_UPDATE,
6359 			     NULL);
6360 }
6361 
6362 /**
6363  * ext4_mb_clear_bb() -- helper function for freeing blocks.
6364  *			Used by ext4_free_blocks()
6365  * @handle:		handle for this transaction
6366  * @inode:		inode
6367  * @block:		starting physical block to be freed
6368  * @count:		number of blocks to be freed
6369  * @flags:		flags used by ext4_free_blocks
6370  */
6371 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6372 			       ext4_fsblk_t block, unsigned long count,
6373 			       int flags)
6374 {
6375 	struct super_block *sb = inode->i_sb;
6376 	struct ext4_group_info *grp;
6377 	unsigned int overflow;
6378 	ext4_grpblk_t bit;
6379 	ext4_group_t block_group;
6380 	struct ext4_sb_info *sbi;
6381 	struct ext4_buddy e4b;
6382 	unsigned int count_clusters;
6383 	int err = 0;
6384 	int mark_flags = 0;
6385 	ext4_grpblk_t changed;
6386 
6387 	sbi = EXT4_SB(sb);
6388 
6389 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6390 	    !ext4_inode_block_valid(inode, block, count)) {
6391 		ext4_error(sb, "Freeing blocks in system zone - "
6392 			   "Block = %llu, count = %lu", block, count);
6393 		/* err = 0. ext4_std_error should be a no op */
6394 		goto error_out;
6395 	}
6396 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6397 
6398 do_more:
6399 	overflow = 0;
6400 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6401 
6402 	grp = ext4_get_group_info(sb, block_group);
6403 	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6404 		return;
6405 
6406 	/*
6407 	 * Check to see if we are freeing blocks across a group
6408 	 * boundary.
6409 	 */
6410 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6411 		overflow = EXT4_C2B(sbi, bit) + count -
6412 			EXT4_BLOCKS_PER_GROUP(sb);
6413 		count -= overflow;
6414 		/* The range changed so it's no longer validated */
6415 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6416 	}
6417 	count_clusters = EXT4_NUM_B2C(sbi, count);
6418 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6419 
6420 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6421 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6422 				     GFP_NOFS|__GFP_NOFAIL);
6423 	if (err)
6424 		goto error_out;
6425 
6426 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6427 	    !ext4_inode_block_valid(inode, block, count)) {
6428 		ext4_error(sb, "Freeing blocks in system zone - "
6429 			   "Block = %llu, count = %lu", block, count);
6430 		/* err = 0. ext4_std_error should be a no op */
6431 		goto error_clean;
6432 	}
6433 
6434 #ifdef AGGRESSIVE_CHECK
6435 	mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
6436 #endif
6437 	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6438 				   count_clusters, mark_flags, &changed);
6439 
6440 
6441 	if (err && changed == 0)
6442 		goto error_clean;
6443 
6444 #ifdef AGGRESSIVE_CHECK
6445 	BUG_ON(changed != count_clusters);
6446 #endif
6447 
6448 	/*
6449 	 * We need to make sure we don't reuse the freed block until after the
6450 	 * transaction is committed. We make an exception if the inode is to be
6451 	 * written in writeback mode since writeback mode has weak data
6452 	 * consistency guarantees.
6453 	 */
6454 	if (ext4_handle_valid(handle) &&
6455 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6456 	     !ext4_should_writeback_data(inode))) {
6457 		struct ext4_free_data *new_entry;
6458 		/*
6459 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6460 		 * to fail.
6461 		 */
6462 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6463 				GFP_NOFS|__GFP_NOFAIL);
6464 		new_entry->efd_start_cluster = bit;
6465 		new_entry->efd_group = block_group;
6466 		new_entry->efd_count = count_clusters;
6467 		new_entry->efd_tid = handle->h_transaction->t_tid;
6468 
6469 		ext4_lock_group(sb, block_group);
6470 		ext4_mb_free_metadata(handle, &e4b, new_entry);
6471 	} else {
6472 		if (test_opt(sb, DISCARD)) {
6473 			err = ext4_issue_discard(sb, block_group, bit,
6474 						 count_clusters, NULL);
6475 			if (err && err != -EOPNOTSUPP)
6476 				ext4_msg(sb, KERN_WARNING, "discard request in"
6477 					 " group:%u block:%d count:%lu failed"
6478 					 " with %d", block_group, bit, count,
6479 					 err);
6480 		} else
6481 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6482 
6483 		ext4_lock_group(sb, block_group);
6484 		mb_free_blocks(inode, &e4b, bit, count_clusters);
6485 	}
6486 
6487 	ext4_unlock_group(sb, block_group);
6488 
6489 	/*
6490 	 * on a bigalloc file system, defer the s_freeclusters_counter
6491 	 * update to the caller (ext4_remove_space and friends) so they
6492 	 * can determine if a cluster freed here should be rereserved
6493 	 */
6494 	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6495 		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6496 			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6497 		percpu_counter_add(&sbi->s_freeclusters_counter,
6498 				   count_clusters);
6499 	}
6500 
6501 	if (overflow && !err) {
6502 		block += count;
6503 		count = overflow;
6504 		ext4_mb_unload_buddy(&e4b);
6505 		/* The range changed so it's no longer validated */
6506 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6507 		goto do_more;
6508 	}
6509 
6510 error_clean:
6511 	ext4_mb_unload_buddy(&e4b);
6512 error_out:
6513 	ext4_std_error(sb, err);
6514 }
6515 
6516 /**
6517  * ext4_free_blocks() -- Free given blocks and update quota
6518  * @handle:		handle for this transaction
6519  * @inode:		inode
6520  * @bh:			optional buffer of the block to be freed
6521  * @block:		starting physical block to be freed
6522  * @count:		number of blocks to be freed
6523  * @flags:		flags used by ext4_free_blocks
6524  */
6525 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6526 		      struct buffer_head *bh, ext4_fsblk_t block,
6527 		      unsigned long count, int flags)
6528 {
6529 	struct super_block *sb = inode->i_sb;
6530 	unsigned int overflow;
6531 	struct ext4_sb_info *sbi;
6532 
6533 	sbi = EXT4_SB(sb);
6534 
6535 	if (bh) {
6536 		if (block)
6537 			BUG_ON(block != bh->b_blocknr);
6538 		else
6539 			block = bh->b_blocknr;
6540 	}
6541 
6542 	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6543 		ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6544 		return;
6545 	}
6546 
6547 	might_sleep();
6548 
6549 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6550 	    !ext4_inode_block_valid(inode, block, count)) {
6551 		ext4_error(sb, "Freeing blocks not in datazone - "
6552 			   "block = %llu, count = %lu", block, count);
6553 		return;
6554 	}
6555 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6556 
6557 	ext4_debug("freeing block %llu\n", block);
6558 	trace_ext4_free_blocks(inode, block, count, flags);
6559 
6560 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6561 		BUG_ON(count > 1);
6562 
6563 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6564 			    inode, bh, block);
6565 	}
6566 
6567 	/*
6568 	 * If the extent to be freed does not begin on a cluster
6569 	 * boundary, we need to deal with partial clusters at the
6570 	 * beginning and end of the extent.  Normally we will free
6571 	 * blocks at the beginning or the end unless we are explicitly
6572 	 * requested to avoid doing so.
6573 	 */
6574 	overflow = EXT4_PBLK_COFF(sbi, block);
6575 	if (overflow) {
6576 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6577 			overflow = sbi->s_cluster_ratio - overflow;
6578 			block += overflow;
6579 			if (count > overflow)
6580 				count -= overflow;
6581 			else
6582 				return;
6583 		} else {
6584 			block -= overflow;
6585 			count += overflow;
6586 		}
6587 		/* The range changed so it's no longer validated */
6588 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6589 	}
6590 	overflow = EXT4_LBLK_COFF(sbi, count);
6591 	if (overflow) {
6592 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6593 			if (count > overflow)
6594 				count -= overflow;
6595 			else
6596 				return;
6597 		} else
6598 			count += sbi->s_cluster_ratio - overflow;
6599 		/* The range changed so it's no longer validated */
6600 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6601 	}
6602 
6603 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6604 		int i;
6605 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6606 
6607 		for (i = 0; i < count; i++) {
6608 			cond_resched();
6609 			if (is_metadata)
6610 				bh = sb_find_get_block(inode->i_sb, block + i);
6611 			ext4_forget(handle, is_metadata, inode, bh, block + i);
6612 		}
6613 	}
6614 
6615 	ext4_mb_clear_bb(handle, inode, block, count, flags);
6616 }
6617 
6618 /**
6619  * ext4_group_add_blocks() -- Add given blocks to an existing group
6620  * @handle:			handle to this transaction
6621  * @sb:				super block
6622  * @block:			start physical block to add to the block group
6623  * @count:			number of blocks to free
6624  *
6625  * This marks the blocks as free in the bitmap and buddy.
6626  */
6627 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6628 			 ext4_fsblk_t block, unsigned long count)
6629 {
6630 	ext4_group_t block_group;
6631 	ext4_grpblk_t bit;
6632 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6633 	struct ext4_buddy e4b;
6634 	int err = 0;
6635 	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6636 	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6637 	unsigned long cluster_count = last_cluster - first_cluster + 1;
6638 	ext4_grpblk_t changed;
6639 
6640 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6641 
6642 	if (cluster_count == 0)
6643 		return 0;
6644 
6645 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6646 	/*
6647 	 * Check to see if we are freeing blocks across a group
6648 	 * boundary.
6649 	 */
6650 	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6651 		ext4_warning(sb, "too many blocks added to group %u",
6652 			     block_group);
6653 		err = -EINVAL;
6654 		goto error_out;
6655 	}
6656 
6657 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
6658 	if (err)
6659 		goto error_out;
6660 
6661 	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6662 		ext4_error(sb, "Adding blocks in system zones - "
6663 			   "Block = %llu, count = %lu",
6664 			   block, count);
6665 		err = -EINVAL;
6666 		goto error_clean;
6667 	}
6668 
6669 	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6670 				   cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
6671 				   &changed);
6672 	if (err && changed == 0)
6673 		goto error_clean;
6674 
6675 	if (changed != cluster_count)
6676 		ext4_error(sb, "bit already cleared in group %u", block_group);
6677 
6678 	ext4_lock_group(sb, block_group);
6679 	mb_free_blocks(NULL, &e4b, bit, cluster_count);
6680 	ext4_unlock_group(sb, block_group);
6681 	percpu_counter_add(&sbi->s_freeclusters_counter,
6682 			   changed);
6683 
6684 error_clean:
6685 	ext4_mb_unload_buddy(&e4b);
6686 error_out:
6687 	ext4_std_error(sb, err);
6688 	return err;
6689 }
6690 
6691 /**
6692  * ext4_trim_extent -- function to TRIM one single free extent in the group
6693  * @sb:		super block for the file system
6694  * @start:	starting block of the free extent in the alloc. group
6695  * @count:	number of blocks to TRIM
6696  * @e4b:	ext4 buddy for the group
6697  *
6698  * Trim "count" blocks starting at "start" in the "group". To assure that no
6699  * one will allocate those blocks, mark it as used in buddy bitmap. This must
6700  * be called with under the group lock.
6701  */
6702 static int ext4_trim_extent(struct super_block *sb,
6703 		int start, int count, struct ext4_buddy *e4b)
6704 __releases(bitlock)
6705 __acquires(bitlock)
6706 {
6707 	struct ext4_free_extent ex;
6708 	ext4_group_t group = e4b->bd_group;
6709 	int ret = 0;
6710 
6711 	trace_ext4_trim_extent(sb, group, start, count);
6712 
6713 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
6714 
6715 	ex.fe_start = start;
6716 	ex.fe_group = group;
6717 	ex.fe_len = count;
6718 
6719 	/*
6720 	 * Mark blocks used, so no one can reuse them while
6721 	 * being trimmed.
6722 	 */
6723 	mb_mark_used(e4b, &ex);
6724 	ext4_unlock_group(sb, group);
6725 	ret = ext4_issue_discard(sb, group, start, count, NULL);
6726 	ext4_lock_group(sb, group);
6727 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
6728 	return ret;
6729 }
6730 
6731 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6732 					   ext4_group_t grp)
6733 {
6734 	if (grp < ext4_get_groups_count(sb))
6735 		return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6736 	return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6737 		ext4_group_first_block_no(sb, grp) - 1) >>
6738 					EXT4_CLUSTER_BITS(sb);
6739 }
6740 
6741 static bool ext4_trim_interrupted(void)
6742 {
6743 	return fatal_signal_pending(current) || freezing(current);
6744 }
6745 
6746 static int ext4_try_to_trim_range(struct super_block *sb,
6747 		struct ext4_buddy *e4b, ext4_grpblk_t start,
6748 		ext4_grpblk_t max, ext4_grpblk_t minblocks)
6749 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6750 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6751 {
6752 	ext4_grpblk_t next, count, free_count;
6753 	bool set_trimmed = false;
6754 	void *bitmap;
6755 
6756 	bitmap = e4b->bd_bitmap;
6757 	if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
6758 		set_trimmed = true;
6759 	start = max(e4b->bd_info->bb_first_free, start);
6760 	count = 0;
6761 	free_count = 0;
6762 
6763 	while (start <= max) {
6764 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
6765 		if (start > max)
6766 			break;
6767 		next = mb_find_next_bit(bitmap, max + 1, start);
6768 
6769 		if ((next - start) >= minblocks) {
6770 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
6771 
6772 			if (ret && ret != -EOPNOTSUPP)
6773 				return count;
6774 			count += next - start;
6775 		}
6776 		free_count += next - start;
6777 		start = next + 1;
6778 
6779 		if (ext4_trim_interrupted())
6780 			return count;
6781 
6782 		if (need_resched()) {
6783 			ext4_unlock_group(sb, e4b->bd_group);
6784 			cond_resched();
6785 			ext4_lock_group(sb, e4b->bd_group);
6786 		}
6787 
6788 		if ((e4b->bd_info->bb_free - free_count) < minblocks)
6789 			break;
6790 	}
6791 
6792 	if (set_trimmed)
6793 		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6794 
6795 	return count;
6796 }
6797 
6798 /**
6799  * ext4_trim_all_free -- function to trim all free space in alloc. group
6800  * @sb:			super block for file system
6801  * @group:		group to be trimmed
6802  * @start:		first group block to examine
6803  * @max:		last group block to examine
6804  * @minblocks:		minimum extent block count
6805  *
6806  * ext4_trim_all_free walks through group's block bitmap searching for free
6807  * extents. When the free extent is found, mark it as used in group buddy
6808  * bitmap. Then issue a TRIM command on this extent and free the extent in
6809  * the group buddy bitmap.
6810  */
6811 static ext4_grpblk_t
6812 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6813 		   ext4_grpblk_t start, ext4_grpblk_t max,
6814 		   ext4_grpblk_t minblocks)
6815 {
6816 	struct ext4_buddy e4b;
6817 	int ret;
6818 
6819 	trace_ext4_trim_all_free(sb, group, start, max);
6820 
6821 	ret = ext4_mb_load_buddy(sb, group, &e4b);
6822 	if (ret) {
6823 		ext4_warning(sb, "Error %d loading buddy information for %u",
6824 			     ret, group);
6825 		return ret;
6826 	}
6827 
6828 	ext4_lock_group(sb, group);
6829 
6830 	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6831 	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6832 		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6833 	else
6834 		ret = 0;
6835 
6836 	ext4_unlock_group(sb, group);
6837 	ext4_mb_unload_buddy(&e4b);
6838 
6839 	ext4_debug("trimmed %d blocks in the group %d\n",
6840 		ret, group);
6841 
6842 	return ret;
6843 }
6844 
6845 /**
6846  * ext4_trim_fs() -- trim ioctl handle function
6847  * @sb:			superblock for filesystem
6848  * @range:		fstrim_range structure
6849  *
6850  * start:	First Byte to trim
6851  * len:		number of Bytes to trim from start
6852  * minlen:	minimum extent length in Bytes
6853  * ext4_trim_fs goes through all allocation groups containing Bytes from
6854  * start to start+len. For each such a group ext4_trim_all_free function
6855  * is invoked to trim all free space.
6856  */
6857 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6858 {
6859 	unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6860 	struct ext4_group_info *grp;
6861 	ext4_group_t group, first_group, last_group;
6862 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6863 	uint64_t start, end, minlen, trimmed = 0;
6864 	ext4_fsblk_t first_data_blk =
6865 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6866 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6867 	int ret = 0;
6868 
6869 	start = range->start >> sb->s_blocksize_bits;
6870 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6871 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6872 			      range->minlen >> sb->s_blocksize_bits);
6873 
6874 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6875 	    start >= max_blks ||
6876 	    range->len < sb->s_blocksize)
6877 		return -EINVAL;
6878 	/* No point to try to trim less than discard granularity */
6879 	if (range->minlen < discard_granularity) {
6880 		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6881 				discard_granularity >> sb->s_blocksize_bits);
6882 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6883 			goto out;
6884 	}
6885 	if (end >= max_blks - 1)
6886 		end = max_blks - 1;
6887 	if (end <= first_data_blk)
6888 		goto out;
6889 	if (start < first_data_blk)
6890 		start = first_data_blk;
6891 
6892 	/* Determine first and last group to examine based on start and end */
6893 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6894 				     &first_group, &first_cluster);
6895 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6896 				     &last_group, &last_cluster);
6897 
6898 	/* end now represents the last cluster to discard in this group */
6899 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6900 
6901 	for (group = first_group; group <= last_group; group++) {
6902 		if (ext4_trim_interrupted())
6903 			break;
6904 		grp = ext4_get_group_info(sb, group);
6905 		if (!grp)
6906 			continue;
6907 		/* We only do this if the grp has never been initialized */
6908 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6909 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6910 			if (ret)
6911 				break;
6912 		}
6913 
6914 		/*
6915 		 * For all the groups except the last one, last cluster will
6916 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6917 		 * change it for the last group, note that last_cluster is
6918 		 * already computed earlier by ext4_get_group_no_and_offset()
6919 		 */
6920 		if (group == last_group)
6921 			end = last_cluster;
6922 		if (grp->bb_free >= minlen) {
6923 			cnt = ext4_trim_all_free(sb, group, first_cluster,
6924 						 end, minlen);
6925 			if (cnt < 0) {
6926 				ret = cnt;
6927 				break;
6928 			}
6929 			trimmed += cnt;
6930 		}
6931 
6932 		/*
6933 		 * For every group except the first one, we are sure
6934 		 * that the first cluster to discard will be cluster #0.
6935 		 */
6936 		first_cluster = 0;
6937 	}
6938 
6939 	if (!ret)
6940 		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6941 
6942 out:
6943 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6944 	return ret;
6945 }
6946 
6947 /* Iterate all the free extents in the group. */
6948 int
6949 ext4_mballoc_query_range(
6950 	struct super_block		*sb,
6951 	ext4_group_t			group,
6952 	ext4_grpblk_t			start,
6953 	ext4_grpblk_t			end,
6954 	ext4_mballoc_query_range_fn	formatter,
6955 	void				*priv)
6956 {
6957 	void				*bitmap;
6958 	ext4_grpblk_t			next;
6959 	struct ext4_buddy		e4b;
6960 	int				error;
6961 
6962 	error = ext4_mb_load_buddy(sb, group, &e4b);
6963 	if (error)
6964 		return error;
6965 	bitmap = e4b.bd_bitmap;
6966 
6967 	ext4_lock_group(sb, group);
6968 
6969 	start = max(e4b.bd_info->bb_first_free, start);
6970 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6971 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6972 
6973 	while (start <= end) {
6974 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
6975 		if (start > end)
6976 			break;
6977 		next = mb_find_next_bit(bitmap, end + 1, start);
6978 
6979 		ext4_unlock_group(sb, group);
6980 		error = formatter(sb, group, start, next - start, priv);
6981 		if (error)
6982 			goto out_unload;
6983 		ext4_lock_group(sb, group);
6984 
6985 		start = next + 1;
6986 	}
6987 
6988 	ext4_unlock_group(sb, group);
6989 out_unload:
6990 	ext4_mb_unload_buddy(&e4b);
6991 
6992 	return error;
6993 }
6994 
6995 #ifdef CONFIG_EXT4_KUNIT_TESTS
6996 #include "mballoc-test.c"
6997 #endif
6998