1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2c9de560dSAlex Tomas /* 3c9de560dSAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4c9de560dSAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5c9de560dSAlex Tomas */ 6c9de560dSAlex Tomas 7c9de560dSAlex Tomas 8c9de560dSAlex Tomas /* 9c9de560dSAlex Tomas * mballoc.c contains the multiblocks allocation routines 10c9de560dSAlex Tomas */ 11c9de560dSAlex Tomas 1218aadd47SBobi Jam #include "ext4_jbd2.h" 138f6e39a7SMingming Cao #include "mballoc.h" 1428623c2fSTheodore Ts'o #include <linux/log2.h> 15a0b30c12STheodore Ts'o #include <linux/module.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 171a5d5e5dSJeremy Cline #include <linux/nospec.h> 1866114cadSTejun Heo #include <linux/backing-dev.h> 199bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 209bffad1eSTheodore Ts'o 21c9de560dSAlex Tomas /* 22c9de560dSAlex Tomas * MUSTDO: 23c9de560dSAlex Tomas * - test ext4_ext_search_left() and ext4_ext_search_right() 24c9de560dSAlex Tomas * - search for metadata in few groups 25c9de560dSAlex Tomas * 26c9de560dSAlex Tomas * TODO v4: 27c9de560dSAlex Tomas * - normalization should take into account whether file is still open 28c9de560dSAlex Tomas * - discard preallocations if no free space left (policy?) 29c9de560dSAlex Tomas * - don't normalize tails 30c9de560dSAlex Tomas * - quota 31c9de560dSAlex Tomas * - reservation for superuser 32c9de560dSAlex Tomas * 33c9de560dSAlex Tomas * TODO v3: 34c9de560dSAlex Tomas * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35c9de560dSAlex Tomas * - track min/max extents in each group for better group selection 36c9de560dSAlex Tomas * - mb_mark_used() may allocate chunk right after splitting buddy 37c9de560dSAlex Tomas * - tree of groups sorted by number of free blocks 38c9de560dSAlex Tomas * - error handling 39c9de560dSAlex Tomas */ 40c9de560dSAlex Tomas 41c9de560dSAlex Tomas /* 42c9de560dSAlex Tomas * The allocation request involve request for multiple number of blocks 43c9de560dSAlex Tomas * near to the goal(block) value specified. 44c9de560dSAlex Tomas * 45b713a5ecSTheodore Ts'o * During initialization phase of the allocator we decide to use the 46b713a5ecSTheodore Ts'o * group preallocation or inode preallocation depending on the size of 47b713a5ecSTheodore Ts'o * the file. The size of the file could be the resulting file size we 48b713a5ecSTheodore Ts'o * would have after allocation, or the current file size, which ever 49b713a5ecSTheodore Ts'o * is larger. If the size is less than sbi->s_mb_stream_request we 50b713a5ecSTheodore Ts'o * select to use the group preallocation. The default value of 51b713a5ecSTheodore Ts'o * s_mb_stream_request is 16 blocks. This can also be tuned via 52b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53b713a5ecSTheodore Ts'o * terms of number of blocks. 54c9de560dSAlex Tomas * 55c9de560dSAlex Tomas * The main motivation for having small file use group preallocation is to 56b713a5ecSTheodore Ts'o * ensure that we have small files closer together on the disk. 57c9de560dSAlex Tomas * 58b713a5ecSTheodore Ts'o * First stage the allocator looks at the inode prealloc list, 59b713a5ecSTheodore Ts'o * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60b713a5ecSTheodore Ts'o * spaces for this particular inode. The inode prealloc space is 61b713a5ecSTheodore Ts'o * represented as: 62c9de560dSAlex Tomas * 63c9de560dSAlex Tomas * pa_lstart -> the logical start block for this prealloc space 64c9de560dSAlex Tomas * pa_pstart -> the physical start block for this prealloc space 6553accfa9STheodore Ts'o * pa_len -> length for this prealloc space (in clusters) 6653accfa9STheodore Ts'o * pa_free -> free space available in this prealloc space (in clusters) 67c9de560dSAlex Tomas * 68c9de560dSAlex Tomas * The inode preallocation space is used looking at the _logical_ start 69c9de560dSAlex Tomas * block. If only the logical file block falls within the range of prealloc 70caaf7a29STao Ma * space we will consume the particular prealloc space. This makes sure that 71caaf7a29STao Ma * we have contiguous physical blocks representing the file blocks 72c9de560dSAlex Tomas * 73c9de560dSAlex Tomas * The important thing to be noted in case of inode prealloc space is that 74c9de560dSAlex Tomas * we don't modify the values associated to inode prealloc space except 75c9de560dSAlex Tomas * pa_free. 76c9de560dSAlex Tomas * 77c9de560dSAlex Tomas * If we are not able to find blocks in the inode prealloc space and if we 78c9de560dSAlex Tomas * have the group allocation flag set then we look at the locality group 79caaf7a29STao Ma * prealloc space. These are per CPU prealloc list represented as 80c9de560dSAlex Tomas * 81c9de560dSAlex Tomas * ext4_sb_info.s_locality_groups[smp_processor_id()] 82c9de560dSAlex Tomas * 83c9de560dSAlex Tomas * The reason for having a per cpu locality group is to reduce the contention 84c9de560dSAlex Tomas * between CPUs. It is possible to get scheduled at this point. 85c9de560dSAlex Tomas * 86c9de560dSAlex Tomas * The locality group prealloc space is used looking at whether we have 8725985edcSLucas De Marchi * enough free space (pa_free) within the prealloc space. 88c9de560dSAlex Tomas * 89c9de560dSAlex Tomas * If we can't allocate blocks via inode prealloc or/and locality group 90c9de560dSAlex Tomas * prealloc then we look at the buddy cache. The buddy cache is represented 91c9de560dSAlex Tomas * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92c9de560dSAlex Tomas * mapped to the buddy and bitmap information regarding different 93c9de560dSAlex Tomas * groups. The buddy information is attached to buddy cache inode so that 94c9de560dSAlex Tomas * we can access them through the page cache. The information regarding 95c9de560dSAlex Tomas * each group is loaded via ext4_mb_load_buddy. The information involve 96c9de560dSAlex Tomas * block bitmap and buddy information. The information are stored in the 97c9de560dSAlex Tomas * inode as: 98c9de560dSAlex Tomas * 99c9de560dSAlex Tomas * { page } 100c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101c9de560dSAlex Tomas * 102c9de560dSAlex Tomas * 103c9de560dSAlex Tomas * one block each for bitmap and buddy information. So for each group we 104ea1754a0SKirill A. Shutemov * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105c9de560dSAlex Tomas * blocksize) blocks. So it can have information regarding groups_per_page 106c9de560dSAlex Tomas * which is blocks_per_page/2 107c9de560dSAlex Tomas * 108c9de560dSAlex Tomas * The buddy cache inode is not stored on disk. The inode is thrown 109c9de560dSAlex Tomas * away when the filesystem is unmounted. 110c9de560dSAlex Tomas * 111c9de560dSAlex Tomas * We look for count number of blocks in the buddy cache. If we were able 112c9de560dSAlex Tomas * to locate that many free blocks we return with additional information 113c9de560dSAlex Tomas * regarding rest of the contiguous physical block available 114c9de560dSAlex Tomas * 115c9de560dSAlex Tomas * Before allocating blocks via buddy cache we normalize the request 116c9de560dSAlex Tomas * blocks. This ensure we ask for more blocks that we needed. The extra 117c9de560dSAlex Tomas * blocks that we get after allocation is added to the respective prealloc 118c9de560dSAlex Tomas * list. In case of inode preallocation we follow a list of heuristics 119c9de560dSAlex Tomas * based on file size. This can be found in ext4_mb_normalize_request. If 120c9de560dSAlex Tomas * we are doing a group prealloc we try to normalize the request to 12127baebb8STheodore Ts'o * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 12227baebb8STheodore Ts'o * dependent on the cluster size; for non-bigalloc file systems, it is 123c9de560dSAlex Tomas * 512 blocks. This can be tuned via 124d7a1fee1SDan Ehrenberg * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125c9de560dSAlex Tomas * terms of number of blocks. If we have mounted the file system with -O 126c9de560dSAlex Tomas * stripe=<value> option the group prealloc request is normalized to the 127b483bb77SRandy Dunlap * smallest multiple of the stripe value (sbi->s_stripe) which is 128d7a1fee1SDan Ehrenberg * greater than the default mb_group_prealloc. 129c9de560dSAlex Tomas * 130196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131196e402aSHarshad Shirwadkar * structures in two data structures: 132196e402aSHarshad Shirwadkar * 133196e402aSHarshad Shirwadkar * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134196e402aSHarshad Shirwadkar * 135196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136196e402aSHarshad Shirwadkar * 137196e402aSHarshad Shirwadkar * This is an array of lists where the index in the array represents the 138196e402aSHarshad Shirwadkar * largest free order in the buddy bitmap of the participating group infos of 139196e402aSHarshad Shirwadkar * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140196e402aSHarshad Shirwadkar * number of buddy bitmap orders possible) number of lists. Group-infos are 141196e402aSHarshad Shirwadkar * placed in appropriate lists. 142196e402aSHarshad Shirwadkar * 14383e80a6eSJan Kara * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144196e402aSHarshad Shirwadkar * 14583e80a6eSJan Kara * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146196e402aSHarshad Shirwadkar * 14783e80a6eSJan Kara * This is an array of lists where in the i-th list there are groups with 14883e80a6eSJan Kara * average fragment size >= 2^i and < 2^(i+1). The average fragment size 14983e80a6eSJan Kara * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 15083e80a6eSJan Kara * Note that we don't bother with a special list for completely empty groups 15183e80a6eSJan Kara * so we only have MB_NUM_ORDERS(sb) lists. 152196e402aSHarshad Shirwadkar * 153196e402aSHarshad Shirwadkar * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154196e402aSHarshad Shirwadkar * structures to decide the order in which groups are to be traversed for 155196e402aSHarshad Shirwadkar * fulfilling an allocation request. 156196e402aSHarshad Shirwadkar * 157196e402aSHarshad Shirwadkar * At CR = 0, we look for groups which have the largest_free_order >= the order 158196e402aSHarshad Shirwadkar * of the request. We directly look at the largest free order list in the data 159196e402aSHarshad Shirwadkar * structure (1) above where largest_free_order = order of the request. If that 160196e402aSHarshad Shirwadkar * list is empty, we look at remaining list in the increasing order of 161196e402aSHarshad Shirwadkar * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 162196e402aSHarshad Shirwadkar * 163196e402aSHarshad Shirwadkar * At CR = 1, we only consider groups where average fragment size > request 164196e402aSHarshad Shirwadkar * size. So, we lookup a group which has average fragment size just above or 16583e80a6eSJan Kara * equal to request size using our average fragment size group lists (data 16683e80a6eSJan Kara * structure 2) in O(1) time. 167196e402aSHarshad Shirwadkar * 168196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 169196e402aSHarshad Shirwadkar * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 170196e402aSHarshad Shirwadkar * 171d7a1fee1SDan Ehrenberg * The regular allocator (using the buddy cache) supports a few tunables. 172c9de560dSAlex Tomas * 173b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_min_to_scan 174b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_max_to_scan 175b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req 176196e402aSHarshad Shirwadkar * /sys/fs/ext4/<partition>/mb_linear_limit 177c9de560dSAlex Tomas * 178b713a5ecSTheodore Ts'o * The regular allocator uses buddy scan only if the request len is power of 179c9de560dSAlex Tomas * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 180c9de560dSAlex Tomas * value of s_mb_order2_reqs can be tuned via 181b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 182af901ca1SAndré Goddard Rosa * stripe size (sbi->s_stripe), we try to search for contiguous block in 183b713a5ecSTheodore Ts'o * stripe size. This should result in better allocation on RAID setups. If 184b713a5ecSTheodore Ts'o * not, we search in the specific group using bitmap for best extents. The 185b713a5ecSTheodore Ts'o * tunable min_to_scan and max_to_scan control the behaviour here. 186c9de560dSAlex Tomas * min_to_scan indicate how long the mballoc __must__ look for a best 187b713a5ecSTheodore Ts'o * extent and max_to_scan indicates how long the mballoc __can__ look for a 188c9de560dSAlex Tomas * best extent in the found extents. Searching for the blocks starts with 189c9de560dSAlex Tomas * the group specified as the goal value in allocation context via 190c9de560dSAlex Tomas * ac_g_ex. Each group is first checked based on the criteria whether it 191caaf7a29STao Ma * can be used for allocation. ext4_mb_good_group explains how the groups are 192c9de560dSAlex Tomas * checked. 193c9de560dSAlex Tomas * 194196e402aSHarshad Shirwadkar * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 195196e402aSHarshad Shirwadkar * get traversed linearly. That may result in subsequent allocations being not 196196e402aSHarshad Shirwadkar * close to each other. And so, the underlying device may get filled up in a 197196e402aSHarshad Shirwadkar * non-linear fashion. While that may not matter on non-rotational devices, for 198196e402aSHarshad Shirwadkar * rotational devices that may result in higher seek times. "mb_linear_limit" 199196e402aSHarshad Shirwadkar * tells mballoc how many groups mballoc should search linearly before 200196e402aSHarshad Shirwadkar * performing consulting above data structures for more efficient lookups. For 201196e402aSHarshad Shirwadkar * non rotational devices, this value defaults to 0 and for rotational devices 202196e402aSHarshad Shirwadkar * this is set to MB_DEFAULT_LINEAR_LIMIT. 203196e402aSHarshad Shirwadkar * 204c9de560dSAlex Tomas * Both the prealloc space are getting populated as above. So for the first 205c9de560dSAlex Tomas * request we will hit the buddy cache which will result in this prealloc 206c9de560dSAlex Tomas * space getting filled. The prealloc space is then later used for the 207c9de560dSAlex Tomas * subsequent request. 208c9de560dSAlex Tomas */ 209c9de560dSAlex Tomas 210c9de560dSAlex Tomas /* 211c9de560dSAlex Tomas * mballoc operates on the following data: 212c9de560dSAlex Tomas * - on-disk bitmap 213c9de560dSAlex Tomas * - in-core buddy (actually includes buddy and bitmap) 214c9de560dSAlex Tomas * - preallocation descriptors (PAs) 215c9de560dSAlex Tomas * 216c9de560dSAlex Tomas * there are two types of preallocations: 217c9de560dSAlex Tomas * - inode 218c9de560dSAlex Tomas * assiged to specific inode and can be used for this inode only. 219c9de560dSAlex Tomas * it describes part of inode's space preallocated to specific 220c9de560dSAlex Tomas * physical blocks. any block from that preallocated can be used 221c9de560dSAlex Tomas * independent. the descriptor just tracks number of blocks left 222c9de560dSAlex Tomas * unused. so, before taking some block from descriptor, one must 223c9de560dSAlex Tomas * make sure corresponded logical block isn't allocated yet. this 224c9de560dSAlex Tomas * also means that freeing any block within descriptor's range 225c9de560dSAlex Tomas * must discard all preallocated blocks. 226c9de560dSAlex Tomas * - locality group 227c9de560dSAlex Tomas * assigned to specific locality group which does not translate to 228c9de560dSAlex Tomas * permanent set of inodes: inode can join and leave group. space 229c9de560dSAlex Tomas * from this type of preallocation can be used for any inode. thus 230c9de560dSAlex Tomas * it's consumed from the beginning to the end. 231c9de560dSAlex Tomas * 232c9de560dSAlex Tomas * relation between them can be expressed as: 233c9de560dSAlex Tomas * in-core buddy = on-disk bitmap + preallocation descriptors 234c9de560dSAlex Tomas * 235c9de560dSAlex Tomas * this mean blocks mballoc considers used are: 236c9de560dSAlex Tomas * - allocated blocks (persistent) 237c9de560dSAlex Tomas * - preallocated blocks (non-persistent) 238c9de560dSAlex Tomas * 239c9de560dSAlex Tomas * consistency in mballoc world means that at any time a block is either 240c9de560dSAlex Tomas * free or used in ALL structures. notice: "any time" should not be read 241c9de560dSAlex Tomas * literally -- time is discrete and delimited by locks. 242c9de560dSAlex Tomas * 243c9de560dSAlex Tomas * to keep it simple, we don't use block numbers, instead we count number of 244c9de560dSAlex Tomas * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 245c9de560dSAlex Tomas * 246c9de560dSAlex Tomas * all operations can be expressed as: 247c9de560dSAlex Tomas * - init buddy: buddy = on-disk + PAs 248c9de560dSAlex Tomas * - new PA: buddy += N; PA = N 249c9de560dSAlex Tomas * - use inode PA: on-disk += N; PA -= N 250c9de560dSAlex Tomas * - discard inode PA buddy -= on-disk - PA; PA = 0 251c9de560dSAlex Tomas * - use locality group PA on-disk += N; PA -= N 252c9de560dSAlex Tomas * - discard locality group PA buddy -= PA; PA = 0 253c9de560dSAlex Tomas * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 254c9de560dSAlex Tomas * is used in real operation because we can't know actual used 255c9de560dSAlex Tomas * bits from PA, only from on-disk bitmap 256c9de560dSAlex Tomas * 257c9de560dSAlex Tomas * if we follow this strict logic, then all operations above should be atomic. 258c9de560dSAlex Tomas * given some of them can block, we'd have to use something like semaphores 259c9de560dSAlex Tomas * killing performance on high-end SMP hardware. let's try to relax it using 260c9de560dSAlex Tomas * the following knowledge: 261c9de560dSAlex Tomas * 1) if buddy is referenced, it's already initialized 262c9de560dSAlex Tomas * 2) while block is used in buddy and the buddy is referenced, 263c9de560dSAlex Tomas * nobody can re-allocate that block 264c9de560dSAlex Tomas * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 265c9de560dSAlex Tomas * bit set and PA claims same block, it's OK. IOW, one can set bit in 266c9de560dSAlex Tomas * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 267c9de560dSAlex Tomas * block 268c9de560dSAlex Tomas * 269c9de560dSAlex Tomas * so, now we're building a concurrency table: 270c9de560dSAlex Tomas * - init buddy vs. 271c9de560dSAlex Tomas * - new PA 272c9de560dSAlex Tomas * blocks for PA are allocated in the buddy, buddy must be referenced 273c9de560dSAlex Tomas * until PA is linked to allocation group to avoid concurrent buddy init 274c9de560dSAlex Tomas * - use inode PA 275c9de560dSAlex Tomas * we need to make sure that either on-disk bitmap or PA has uptodate data 276c9de560dSAlex Tomas * given (3) we care that PA-=N operation doesn't interfere with init 277c9de560dSAlex Tomas * - discard inode PA 278c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 279c9de560dSAlex Tomas * - use locality group PA 280c9de560dSAlex Tomas * again PA-=N must be serialized with init 281c9de560dSAlex Tomas * - discard locality group PA 282c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 283c9de560dSAlex Tomas * - new PA vs. 284c9de560dSAlex Tomas * - use inode PA 285c9de560dSAlex Tomas * i_data_sem serializes them 286c9de560dSAlex Tomas * - discard inode PA 287c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 288c9de560dSAlex Tomas * - use locality group PA 289c9de560dSAlex Tomas * some mutex should serialize them 290c9de560dSAlex Tomas * - discard locality group PA 291c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 292c9de560dSAlex Tomas * - use inode PA 293c9de560dSAlex Tomas * - use inode PA 294c9de560dSAlex Tomas * i_data_sem or another mutex should serializes them 295c9de560dSAlex Tomas * - discard inode PA 296c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 297c9de560dSAlex Tomas * - use locality group PA 298c9de560dSAlex Tomas * nothing wrong here -- they're different PAs covering different blocks 299c9de560dSAlex Tomas * - discard locality group PA 300c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 301c9de560dSAlex Tomas * 302c9de560dSAlex Tomas * now we're ready to make few consequences: 303c9de560dSAlex Tomas * - PA is referenced and while it is no discard is possible 304c9de560dSAlex Tomas * - PA is referenced until block isn't marked in on-disk bitmap 305c9de560dSAlex Tomas * - PA changes only after on-disk bitmap 306c9de560dSAlex Tomas * - discard must not compete with init. either init is done before 307c9de560dSAlex Tomas * any discard or they're serialized somehow 308c9de560dSAlex Tomas * - buddy init as sum of on-disk bitmap and PAs is done atomically 309c9de560dSAlex Tomas * 310c9de560dSAlex Tomas * a special case when we've used PA to emptiness. no need to modify buddy 311c9de560dSAlex Tomas * in this case, but we should care about concurrent init 312c9de560dSAlex Tomas * 313c9de560dSAlex Tomas */ 314c9de560dSAlex Tomas 315c9de560dSAlex Tomas /* 316c9de560dSAlex Tomas * Logic in few words: 317c9de560dSAlex Tomas * 318c9de560dSAlex Tomas * - allocation: 319c9de560dSAlex Tomas * load group 320c9de560dSAlex Tomas * find blocks 321c9de560dSAlex Tomas * mark bits in on-disk bitmap 322c9de560dSAlex Tomas * release group 323c9de560dSAlex Tomas * 324c9de560dSAlex Tomas * - use preallocation: 325c9de560dSAlex Tomas * find proper PA (per-inode or group) 326c9de560dSAlex Tomas * load group 327c9de560dSAlex Tomas * mark bits in on-disk bitmap 328c9de560dSAlex Tomas * release group 329c9de560dSAlex Tomas * release PA 330c9de560dSAlex Tomas * 331c9de560dSAlex Tomas * - free: 332c9de560dSAlex Tomas * load group 333c9de560dSAlex Tomas * mark bits in on-disk bitmap 334c9de560dSAlex Tomas * release group 335c9de560dSAlex Tomas * 336c9de560dSAlex Tomas * - discard preallocations in group: 337c9de560dSAlex Tomas * mark PAs deleted 338c9de560dSAlex Tomas * move them onto local list 339c9de560dSAlex Tomas * load on-disk bitmap 340c9de560dSAlex Tomas * load group 341c9de560dSAlex Tomas * remove PA from object (inode or locality group) 342c9de560dSAlex Tomas * mark free blocks in-core 343c9de560dSAlex Tomas * 344c9de560dSAlex Tomas * - discard inode's preallocations: 345c9de560dSAlex Tomas */ 346c9de560dSAlex Tomas 347c9de560dSAlex Tomas /* 348c9de560dSAlex Tomas * Locking rules 349c9de560dSAlex Tomas * 350c9de560dSAlex Tomas * Locks: 351c9de560dSAlex Tomas * - bitlock on a group (group) 352c9de560dSAlex Tomas * - object (inode/locality) (object) 353c9de560dSAlex Tomas * - per-pa lock (pa) 354196e402aSHarshad Shirwadkar * - cr0 lists lock (cr0) 355196e402aSHarshad Shirwadkar * - cr1 tree lock (cr1) 356c9de560dSAlex Tomas * 357c9de560dSAlex Tomas * Paths: 358c9de560dSAlex Tomas * - new pa 359c9de560dSAlex Tomas * object 360c9de560dSAlex Tomas * group 361c9de560dSAlex Tomas * 362c9de560dSAlex Tomas * - find and use pa: 363c9de560dSAlex Tomas * pa 364c9de560dSAlex Tomas * 365c9de560dSAlex Tomas * - release consumed pa: 366c9de560dSAlex Tomas * pa 367c9de560dSAlex Tomas * group 368c9de560dSAlex Tomas * object 369c9de560dSAlex Tomas * 370c9de560dSAlex Tomas * - generate in-core bitmap: 371c9de560dSAlex Tomas * group 372c9de560dSAlex Tomas * pa 373c9de560dSAlex Tomas * 374c9de560dSAlex Tomas * - discard all for given object (inode, locality group): 375c9de560dSAlex Tomas * object 376c9de560dSAlex Tomas * pa 377c9de560dSAlex Tomas * group 378c9de560dSAlex Tomas * 379c9de560dSAlex Tomas * - discard all for given group: 380c9de560dSAlex Tomas * group 381c9de560dSAlex Tomas * pa 382c9de560dSAlex Tomas * group 383c9de560dSAlex Tomas * object 384c9de560dSAlex Tomas * 385196e402aSHarshad Shirwadkar * - allocation path (ext4_mb_regular_allocator) 386196e402aSHarshad Shirwadkar * group 387196e402aSHarshad Shirwadkar * cr0/cr1 388c9de560dSAlex Tomas */ 389c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_pspace_cachep; 390c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_ac_cachep; 39118aadd47SBobi Jam static struct kmem_cache *ext4_free_data_cachep; 392fb1813f4SCurt Wohlgemuth 393fb1813f4SCurt Wohlgemuth /* We create slab caches for groupinfo data structures based on the 394fb1813f4SCurt Wohlgemuth * superblock block size. There will be one per mounted filesystem for 395fb1813f4SCurt Wohlgemuth * each unique s_blocksize_bits */ 3962892c15dSEric Sandeen #define NR_GRPINFO_CACHES 8 397fb1813f4SCurt Wohlgemuth static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 398fb1813f4SCurt Wohlgemuth 399d6006186SEric Biggers static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 4002892c15dSEric Sandeen "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 4012892c15dSEric Sandeen "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 4022892c15dSEric Sandeen "ext4_groupinfo_64k", "ext4_groupinfo_128k" 4032892c15dSEric Sandeen }; 4042892c15dSEric Sandeen 405c3a326a6SAneesh Kumar K.V static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 406c3a326a6SAneesh Kumar K.V ext4_group_t group); 4077a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4087a2fcbf7SAneesh Kumar K.V ext4_group_t group); 40953f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 410c3a326a6SAneesh Kumar K.V 411196e402aSHarshad Shirwadkar static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 412196e402aSHarshad Shirwadkar ext4_group_t group, int cr); 413196e402aSHarshad Shirwadkar 41455cdd0afSWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 41555cdd0afSWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 41655cdd0afSWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks); 41755cdd0afSWang Jianchao 41807b5b8e1SRitesh Harjani /* 41907b5b8e1SRitesh Harjani * The algorithm using this percpu seq counter goes below: 42007b5b8e1SRitesh Harjani * 1. We sample the percpu discard_pa_seq counter before trying for block 42107b5b8e1SRitesh Harjani * allocation in ext4_mb_new_blocks(). 42207b5b8e1SRitesh Harjani * 2. We increment this percpu discard_pa_seq counter when we either allocate 42307b5b8e1SRitesh Harjani * or free these blocks i.e. while marking those blocks as used/free in 42407b5b8e1SRitesh Harjani * mb_mark_used()/mb_free_blocks(). 42507b5b8e1SRitesh Harjani * 3. We also increment this percpu seq counter when we successfully identify 42607b5b8e1SRitesh Harjani * that the bb_prealloc_list is not empty and hence proceed for discarding 42707b5b8e1SRitesh Harjani * of those PAs inside ext4_mb_discard_group_preallocations(). 42807b5b8e1SRitesh Harjani * 42907b5b8e1SRitesh Harjani * Now to make sure that the regular fast path of block allocation is not 43007b5b8e1SRitesh Harjani * affected, as a small optimization we only sample the percpu seq counter 43107b5b8e1SRitesh Harjani * on that cpu. Only when the block allocation fails and when freed blocks 43207b5b8e1SRitesh Harjani * found were 0, that is when we sample percpu seq counter for all cpus using 43307b5b8e1SRitesh Harjani * below function ext4_get_discard_pa_seq_sum(). This happens after making 43407b5b8e1SRitesh Harjani * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 43507b5b8e1SRitesh Harjani */ 43607b5b8e1SRitesh Harjani static DEFINE_PER_CPU(u64, discard_pa_seq); 43707b5b8e1SRitesh Harjani static inline u64 ext4_get_discard_pa_seq_sum(void) 43807b5b8e1SRitesh Harjani { 43907b5b8e1SRitesh Harjani int __cpu; 44007b5b8e1SRitesh Harjani u64 __seq = 0; 44107b5b8e1SRitesh Harjani 44207b5b8e1SRitesh Harjani for_each_possible_cpu(__cpu) 44307b5b8e1SRitesh Harjani __seq += per_cpu(discard_pa_seq, __cpu); 44407b5b8e1SRitesh Harjani return __seq; 44507b5b8e1SRitesh Harjani } 44607b5b8e1SRitesh Harjani 447ffad0a44SAneesh Kumar K.V static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 448ffad0a44SAneesh Kumar K.V { 449c9de560dSAlex Tomas #if BITS_PER_LONG == 64 450ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 7UL) << 3; 451ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~7UL); 452c9de560dSAlex Tomas #elif BITS_PER_LONG == 32 453ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 3UL) << 3; 454ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~3UL); 455c9de560dSAlex Tomas #else 456c9de560dSAlex Tomas #error "how many bits you are?!" 457c9de560dSAlex Tomas #endif 458ffad0a44SAneesh Kumar K.V return addr; 459ffad0a44SAneesh Kumar K.V } 460c9de560dSAlex Tomas 461c9de560dSAlex Tomas static inline int mb_test_bit(int bit, void *addr) 462c9de560dSAlex Tomas { 463c9de560dSAlex Tomas /* 464c9de560dSAlex Tomas * ext4_test_bit on architecture like powerpc 465c9de560dSAlex Tomas * needs unsigned long aligned address 466c9de560dSAlex Tomas */ 467ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 468c9de560dSAlex Tomas return ext4_test_bit(bit, addr); 469c9de560dSAlex Tomas } 470c9de560dSAlex Tomas 471c9de560dSAlex Tomas static inline void mb_set_bit(int bit, void *addr) 472c9de560dSAlex Tomas { 473ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 474c9de560dSAlex Tomas ext4_set_bit(bit, addr); 475c9de560dSAlex Tomas } 476c9de560dSAlex Tomas 477c9de560dSAlex Tomas static inline void mb_clear_bit(int bit, void *addr) 478c9de560dSAlex Tomas { 479ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 480c9de560dSAlex Tomas ext4_clear_bit(bit, addr); 481c9de560dSAlex Tomas } 482c9de560dSAlex Tomas 483eabe0444SAndrey Sidorov static inline int mb_test_and_clear_bit(int bit, void *addr) 484eabe0444SAndrey Sidorov { 485eabe0444SAndrey Sidorov addr = mb_correct_addr_and_bit(&bit, addr); 486eabe0444SAndrey Sidorov return ext4_test_and_clear_bit(bit, addr); 487eabe0444SAndrey Sidorov } 488eabe0444SAndrey Sidorov 489ffad0a44SAneesh Kumar K.V static inline int mb_find_next_zero_bit(void *addr, int max, int start) 490ffad0a44SAneesh Kumar K.V { 491e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 492ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 493e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 494ffad0a44SAneesh Kumar K.V start += fix; 495ffad0a44SAneesh Kumar K.V 496e7dfb246SAneesh Kumar K.V ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 497e7dfb246SAneesh Kumar K.V if (ret > max) 498e7dfb246SAneesh Kumar K.V return max; 499e7dfb246SAneesh Kumar K.V return ret; 500ffad0a44SAneesh Kumar K.V } 501ffad0a44SAneesh Kumar K.V 502ffad0a44SAneesh Kumar K.V static inline int mb_find_next_bit(void *addr, int max, int start) 503ffad0a44SAneesh Kumar K.V { 504e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 505ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 506e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 507ffad0a44SAneesh Kumar K.V start += fix; 508ffad0a44SAneesh Kumar K.V 509e7dfb246SAneesh Kumar K.V ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 510e7dfb246SAneesh Kumar K.V if (ret > max) 511e7dfb246SAneesh Kumar K.V return max; 512e7dfb246SAneesh Kumar K.V return ret; 513ffad0a44SAneesh Kumar K.V } 514ffad0a44SAneesh Kumar K.V 515c9de560dSAlex Tomas static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 516c9de560dSAlex Tomas { 517c9de560dSAlex Tomas char *bb; 518c9de560dSAlex Tomas 519c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 520c9de560dSAlex Tomas BUG_ON(max == NULL); 521c9de560dSAlex Tomas 522c9de560dSAlex Tomas if (order > e4b->bd_blkbits + 1) { 523c9de560dSAlex Tomas *max = 0; 524c9de560dSAlex Tomas return NULL; 525c9de560dSAlex Tomas } 526c9de560dSAlex Tomas 527c9de560dSAlex Tomas /* at order 0 we see each particular block */ 52884b775a3SColy Li if (order == 0) { 529c9de560dSAlex Tomas *max = 1 << (e4b->bd_blkbits + 3); 530c5e8f3f3STheodore Ts'o return e4b->bd_bitmap; 53184b775a3SColy Li } 532c9de560dSAlex Tomas 533c5e8f3f3STheodore Ts'o bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 534c9de560dSAlex Tomas *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 535c9de560dSAlex Tomas 536c9de560dSAlex Tomas return bb; 537c9de560dSAlex Tomas } 538c9de560dSAlex Tomas 539c9de560dSAlex Tomas #ifdef DOUBLE_CHECK 540c9de560dSAlex Tomas static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 541c9de560dSAlex Tomas int first, int count) 542c9de560dSAlex Tomas { 543c9de560dSAlex Tomas int i; 544c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 545c9de560dSAlex Tomas 546c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 547c9de560dSAlex Tomas return; 548bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 549c9de560dSAlex Tomas for (i = 0; i < count; i++) { 550c9de560dSAlex Tomas if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 551c9de560dSAlex Tomas ext4_fsblk_t blocknr; 5525661bd68SAkinobu Mita 5535661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 55453accfa9STheodore Ts'o blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 5555d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 556e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 557e29136f8STheodore Ts'o blocknr, 558e29136f8STheodore Ts'o "freeing block already freed " 559e29136f8STheodore Ts'o "(bit %u)", 560e29136f8STheodore Ts'o first + i); 561736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 562736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 563c9de560dSAlex Tomas } 564c9de560dSAlex Tomas mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 565c9de560dSAlex Tomas } 566c9de560dSAlex Tomas } 567c9de560dSAlex Tomas 568c9de560dSAlex Tomas static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 569c9de560dSAlex Tomas { 570c9de560dSAlex Tomas int i; 571c9de560dSAlex Tomas 572c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 573c9de560dSAlex Tomas return; 574bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 575c9de560dSAlex Tomas for (i = 0; i < count; i++) { 576c9de560dSAlex Tomas BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 577c9de560dSAlex Tomas mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 578c9de560dSAlex Tomas } 579c9de560dSAlex Tomas } 580c9de560dSAlex Tomas 581c9de560dSAlex Tomas static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 582c9de560dSAlex Tomas { 583eb2b8ebbSRitesh Harjani if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584eb2b8ebbSRitesh Harjani return; 585c9de560dSAlex Tomas if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 586c9de560dSAlex Tomas unsigned char *b1, *b2; 587c9de560dSAlex Tomas int i; 588c9de560dSAlex Tomas b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 589c9de560dSAlex Tomas b2 = (unsigned char *) bitmap; 590c9de560dSAlex Tomas for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 591c9de560dSAlex Tomas if (b1[i] != b2[i]) { 5929d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_ERR, 5939d8b9ec4STheodore Ts'o "corruption in group %u " 5944776004fSTheodore Ts'o "at byte %u(%u): %x in copy != %x " 5959d8b9ec4STheodore Ts'o "on disk/prealloc", 596c9de560dSAlex Tomas e4b->bd_group, i, i * 8, b1[i], b2[i]); 597c9de560dSAlex Tomas BUG(); 598c9de560dSAlex Tomas } 599c9de560dSAlex Tomas } 600c9de560dSAlex Tomas } 601c9de560dSAlex Tomas } 602c9de560dSAlex Tomas 603a3450215SRitesh Harjani static void mb_group_bb_bitmap_alloc(struct super_block *sb, 604a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 605a3450215SRitesh Harjani { 606a3450215SRitesh Harjani struct buffer_head *bh; 607a3450215SRitesh Harjani 608a3450215SRitesh Harjani grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 609eb2b8ebbSRitesh Harjani if (!grp->bb_bitmap) 610eb2b8ebbSRitesh Harjani return; 611a3450215SRitesh Harjani 612a3450215SRitesh Harjani bh = ext4_read_block_bitmap(sb, group); 613eb2b8ebbSRitesh Harjani if (IS_ERR_OR_NULL(bh)) { 614eb2b8ebbSRitesh Harjani kfree(grp->bb_bitmap); 615eb2b8ebbSRitesh Harjani grp->bb_bitmap = NULL; 616eb2b8ebbSRitesh Harjani return; 617eb2b8ebbSRitesh Harjani } 618a3450215SRitesh Harjani 619a3450215SRitesh Harjani memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 620a3450215SRitesh Harjani put_bh(bh); 621a3450215SRitesh Harjani } 622a3450215SRitesh Harjani 623a3450215SRitesh Harjani static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 624a3450215SRitesh Harjani { 625a3450215SRitesh Harjani kfree(grp->bb_bitmap); 626a3450215SRitesh Harjani } 627a3450215SRitesh Harjani 628c9de560dSAlex Tomas #else 629c9de560dSAlex Tomas static inline void mb_free_blocks_double(struct inode *inode, 630c9de560dSAlex Tomas struct ext4_buddy *e4b, int first, int count) 631c9de560dSAlex Tomas { 632c9de560dSAlex Tomas return; 633c9de560dSAlex Tomas } 634c9de560dSAlex Tomas static inline void mb_mark_used_double(struct ext4_buddy *e4b, 635c9de560dSAlex Tomas int first, int count) 636c9de560dSAlex Tomas { 637c9de560dSAlex Tomas return; 638c9de560dSAlex Tomas } 639c9de560dSAlex Tomas static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 640c9de560dSAlex Tomas { 641c9de560dSAlex Tomas return; 642c9de560dSAlex Tomas } 643a3450215SRitesh Harjani 644a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 645a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 646a3450215SRitesh Harjani { 647a3450215SRitesh Harjani return; 648a3450215SRitesh Harjani } 649a3450215SRitesh Harjani 650a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 651a3450215SRitesh Harjani { 652a3450215SRitesh Harjani return; 653a3450215SRitesh Harjani } 654c9de560dSAlex Tomas #endif 655c9de560dSAlex Tomas 656c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 657c9de560dSAlex Tomas 658c9de560dSAlex Tomas #define MB_CHECK_ASSERT(assert) \ 659c9de560dSAlex Tomas do { \ 660c9de560dSAlex Tomas if (!(assert)) { \ 661c9de560dSAlex Tomas printk(KERN_EMERG \ 662c9de560dSAlex Tomas "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 663c9de560dSAlex Tomas function, file, line, # assert); \ 664c9de560dSAlex Tomas BUG(); \ 665c9de560dSAlex Tomas } \ 666c9de560dSAlex Tomas } while (0) 667c9de560dSAlex Tomas 668c9de560dSAlex Tomas static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 669c9de560dSAlex Tomas const char *function, int line) 670c9de560dSAlex Tomas { 671c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 672c9de560dSAlex Tomas int order = e4b->bd_blkbits + 1; 673c9de560dSAlex Tomas int max; 674c9de560dSAlex Tomas int max2; 675c9de560dSAlex Tomas int i; 676c9de560dSAlex Tomas int j; 677c9de560dSAlex Tomas int k; 678c9de560dSAlex Tomas int count; 679c9de560dSAlex Tomas struct ext4_group_info *grp; 680c9de560dSAlex Tomas int fragments = 0; 681c9de560dSAlex Tomas int fstart; 682c9de560dSAlex Tomas struct list_head *cur; 683c9de560dSAlex Tomas void *buddy; 684c9de560dSAlex Tomas void *buddy2; 685c9de560dSAlex Tomas 686addd752cSChunguang Xu if (e4b->bd_info->bb_check_counter++ % 10) 687c9de560dSAlex Tomas return 0; 688c9de560dSAlex Tomas 689c9de560dSAlex Tomas while (order > 1) { 690c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, order, &max); 691c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy); 692c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, order - 1, &max2); 693c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy2); 694c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy != buddy2); 695c9de560dSAlex Tomas MB_CHECK_ASSERT(max * 2 == max2); 696c9de560dSAlex Tomas 697c9de560dSAlex Tomas count = 0; 698c9de560dSAlex Tomas for (i = 0; i < max; i++) { 699c9de560dSAlex Tomas 700c9de560dSAlex Tomas if (mb_test_bit(i, buddy)) { 701af2b3275SJinke Han /* only single bit in buddy2 may be 0 */ 702c9de560dSAlex Tomas if (!mb_test_bit(i << 1, buddy2)) { 703c9de560dSAlex Tomas MB_CHECK_ASSERT( 704c9de560dSAlex Tomas mb_test_bit((i<<1)+1, buddy2)); 705c9de560dSAlex Tomas } 706c9de560dSAlex Tomas continue; 707c9de560dSAlex Tomas } 708c9de560dSAlex Tomas 7090a10da73SRobin Dong /* both bits in buddy2 must be 1 */ 710c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712c9de560dSAlex Tomas 713c9de560dSAlex Tomas for (j = 0; j < (1 << order); j++) { 714c9de560dSAlex Tomas k = (i * (1 << order)) + j; 715c9de560dSAlex Tomas MB_CHECK_ASSERT( 716c5e8f3f3STheodore Ts'o !mb_test_bit(k, e4b->bd_bitmap)); 717c9de560dSAlex Tomas } 718c9de560dSAlex Tomas count++; 719c9de560dSAlex Tomas } 720c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721c9de560dSAlex Tomas order--; 722c9de560dSAlex Tomas } 723c9de560dSAlex Tomas 724c9de560dSAlex Tomas fstart = -1; 725c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, 0, &max); 726c9de560dSAlex Tomas for (i = 0; i < max; i++) { 727c9de560dSAlex Tomas if (!mb_test_bit(i, buddy)) { 728c9de560dSAlex Tomas MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729c9de560dSAlex Tomas if (fstart == -1) { 730c9de560dSAlex Tomas fragments++; 731c9de560dSAlex Tomas fstart = i; 732c9de560dSAlex Tomas } 733c9de560dSAlex Tomas continue; 734c9de560dSAlex Tomas } 735c9de560dSAlex Tomas fstart = -1; 736c9de560dSAlex Tomas /* check used bits only */ 737c9de560dSAlex Tomas for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, j, &max2); 739c9de560dSAlex Tomas k = i >> j; 740c9de560dSAlex Tomas MB_CHECK_ASSERT(k < max2); 741c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742c9de560dSAlex Tomas } 743c9de560dSAlex Tomas } 744c9de560dSAlex Tomas MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746c9de560dSAlex Tomas 747c9de560dSAlex Tomas grp = ext4_get_group_info(sb, e4b->bd_group); 7485354b2afSTheodore Ts'o if (!grp) 7495354b2afSTheodore Ts'o return NULL; 750c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 751c9de560dSAlex Tomas ext4_group_t groupnr; 752c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 75360bd63d1SSolofo Ramangalahy pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 75460bd63d1SSolofo Ramangalahy ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 755c9de560dSAlex Tomas MB_CHECK_ASSERT(groupnr == e4b->bd_group); 75660bd63d1SSolofo Ramangalahy for (i = 0; i < pa->pa_len; i++) 757c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 758c9de560dSAlex Tomas } 759c9de560dSAlex Tomas return 0; 760c9de560dSAlex Tomas } 761c9de560dSAlex Tomas #undef MB_CHECK_ASSERT 762c9de560dSAlex Tomas #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 76346e665e9SHarvey Harrison __FILE__, __func__, __LINE__) 764c9de560dSAlex Tomas #else 765c9de560dSAlex Tomas #define mb_check_buddy(e4b) 766c9de560dSAlex Tomas #endif 767c9de560dSAlex Tomas 7687c786059SColy Li /* 7697c786059SColy Li * Divide blocks started from @first with length @len into 7707c786059SColy Li * smaller chunks with power of 2 blocks. 7717c786059SColy Li * Clear the bits in bitmap which the blocks of the chunk(s) covered, 7727c786059SColy Li * then increase bb_counters[] for corresponded chunk size. 7737c786059SColy Li */ 774c9de560dSAlex Tomas static void ext4_mb_mark_free_simple(struct super_block *sb, 775a36b4498SEric Sandeen void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 776c9de560dSAlex Tomas struct ext4_group_info *grp) 777c9de560dSAlex Tomas { 778c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 779a36b4498SEric Sandeen ext4_grpblk_t min; 780a36b4498SEric Sandeen ext4_grpblk_t max; 781a36b4498SEric Sandeen ext4_grpblk_t chunk; 78269e43e8cSChandan Rajendra unsigned int border; 783c9de560dSAlex Tomas 7847137d7a4STheodore Ts'o BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 785c9de560dSAlex Tomas 786c9de560dSAlex Tomas border = 2 << sb->s_blocksize_bits; 787c9de560dSAlex Tomas 788c9de560dSAlex Tomas while (len > 0) { 789c9de560dSAlex Tomas /* find how many blocks can be covered since this position */ 790c9de560dSAlex Tomas max = ffs(first | border) - 1; 791c9de560dSAlex Tomas 792c9de560dSAlex Tomas /* find how many blocks of power 2 we need to mark */ 793c9de560dSAlex Tomas min = fls(len) - 1; 794c9de560dSAlex Tomas 795c9de560dSAlex Tomas if (max < min) 796c9de560dSAlex Tomas min = max; 797c9de560dSAlex Tomas chunk = 1 << min; 798c9de560dSAlex Tomas 799c9de560dSAlex Tomas /* mark multiblock chunks only */ 800c9de560dSAlex Tomas grp->bb_counters[min]++; 801c9de560dSAlex Tomas if (min > 0) 802c9de560dSAlex Tomas mb_clear_bit(first >> min, 803c9de560dSAlex Tomas buddy + sbi->s_mb_offsets[min]); 804c9de560dSAlex Tomas 805c9de560dSAlex Tomas len -= chunk; 806c9de560dSAlex Tomas first += chunk; 807c9de560dSAlex Tomas } 808c9de560dSAlex Tomas } 809c9de560dSAlex Tomas 81083e80a6eSJan Kara static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 811196e402aSHarshad Shirwadkar { 81283e80a6eSJan Kara int order; 813196e402aSHarshad Shirwadkar 814196e402aSHarshad Shirwadkar /* 81583e80a6eSJan Kara * We don't bother with a special lists groups with only 1 block free 81683e80a6eSJan Kara * extents and for completely empty groups. 817196e402aSHarshad Shirwadkar */ 81883e80a6eSJan Kara order = fls(len) - 2; 81983e80a6eSJan Kara if (order < 0) 82083e80a6eSJan Kara return 0; 82183e80a6eSJan Kara if (order == MB_NUM_ORDERS(sb)) 82283e80a6eSJan Kara order--; 82383e80a6eSJan Kara return order; 82483e80a6eSJan Kara } 82583e80a6eSJan Kara 82683e80a6eSJan Kara /* Move group to appropriate avg_fragment_size list */ 827196e402aSHarshad Shirwadkar static void 828196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 829196e402aSHarshad Shirwadkar { 830196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 83183e80a6eSJan Kara int new_order; 832196e402aSHarshad Shirwadkar 833196e402aSHarshad Shirwadkar if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 834196e402aSHarshad Shirwadkar return; 835196e402aSHarshad Shirwadkar 83683e80a6eSJan Kara new_order = mb_avg_fragment_size_order(sb, 83783e80a6eSJan Kara grp->bb_free / grp->bb_fragments); 83883e80a6eSJan Kara if (new_order == grp->bb_avg_fragment_size_order) 83983e80a6eSJan Kara return; 840196e402aSHarshad Shirwadkar 84183e80a6eSJan Kara if (grp->bb_avg_fragment_size_order != -1) { 84283e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84383e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84483e80a6eSJan Kara list_del(&grp->bb_avg_fragment_size_node); 84583e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 84683e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84783e80a6eSJan Kara } 84883e80a6eSJan Kara grp->bb_avg_fragment_size_order = new_order; 84983e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 85083e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 85183e80a6eSJan Kara list_add_tail(&grp->bb_avg_fragment_size_node, 85283e80a6eSJan Kara &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 85383e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 85483e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 855196e402aSHarshad Shirwadkar } 856196e402aSHarshad Shirwadkar 857196e402aSHarshad Shirwadkar /* 858196e402aSHarshad Shirwadkar * Choose next group by traversing largest_free_order lists. Updates *new_cr if 859196e402aSHarshad Shirwadkar * cr level needs an update. 860196e402aSHarshad Shirwadkar */ 861196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 862196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 863196e402aSHarshad Shirwadkar { 864196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 865196e402aSHarshad Shirwadkar struct ext4_group_info *iter, *grp; 866196e402aSHarshad Shirwadkar int i; 867196e402aSHarshad Shirwadkar 868196e402aSHarshad Shirwadkar if (ac->ac_status == AC_STATUS_FOUND) 869196e402aSHarshad Shirwadkar return; 870196e402aSHarshad Shirwadkar 871196e402aSHarshad Shirwadkar if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 872196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 873196e402aSHarshad Shirwadkar 874196e402aSHarshad Shirwadkar grp = NULL; 875196e402aSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 876196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) 877196e402aSHarshad Shirwadkar continue; 878196e402aSHarshad Shirwadkar read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 879196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 880196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 881196e402aSHarshad Shirwadkar continue; 882196e402aSHarshad Shirwadkar } 883196e402aSHarshad Shirwadkar grp = NULL; 884196e402aSHarshad Shirwadkar list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 885196e402aSHarshad Shirwadkar bb_largest_free_order_node) { 886196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 887196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 888196e402aSHarshad Shirwadkar if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 889196e402aSHarshad Shirwadkar grp = iter; 890196e402aSHarshad Shirwadkar break; 891196e402aSHarshad Shirwadkar } 892196e402aSHarshad Shirwadkar } 893196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 894196e402aSHarshad Shirwadkar if (grp) 895196e402aSHarshad Shirwadkar break; 896196e402aSHarshad Shirwadkar } 897196e402aSHarshad Shirwadkar 898196e402aSHarshad Shirwadkar if (!grp) { 899196e402aSHarshad Shirwadkar /* Increment cr and search again */ 900196e402aSHarshad Shirwadkar *new_cr = 1; 901196e402aSHarshad Shirwadkar } else { 902196e402aSHarshad Shirwadkar *group = grp->bb_group; 903196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 904196e402aSHarshad Shirwadkar } 905196e402aSHarshad Shirwadkar } 906196e402aSHarshad Shirwadkar 907196e402aSHarshad Shirwadkar /* 90883e80a6eSJan Kara * Choose next group by traversing average fragment size list of suitable 90983e80a6eSJan Kara * order. Updates *new_cr if cr level needs an update. 910196e402aSHarshad Shirwadkar */ 911196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 912196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 913196e402aSHarshad Shirwadkar { 914196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 915a078dff8SJan Kara struct ext4_group_info *grp = NULL, *iter; 91683e80a6eSJan Kara int i; 917196e402aSHarshad Shirwadkar 918196e402aSHarshad Shirwadkar if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 919196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 920196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 92183e80a6eSJan Kara } 92283e80a6eSJan Kara 92383e80a6eSJan Kara for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 92483e80a6eSJan Kara i < MB_NUM_ORDERS(ac->ac_sb); i++) { 92583e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) 92683e80a6eSJan Kara continue; 92783e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[i]); 92883e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) { 92983e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 93083e80a6eSJan Kara continue; 93183e80a6eSJan Kara } 93283e80a6eSJan Kara list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i], 93383e80a6eSJan Kara bb_avg_fragment_size_node) { 934196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 935196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 93683e80a6eSJan Kara if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) { 93783e80a6eSJan Kara grp = iter; 938196e402aSHarshad Shirwadkar break; 939196e402aSHarshad Shirwadkar } 94083e80a6eSJan Kara } 94183e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 94283e80a6eSJan Kara if (grp) 94383e80a6eSJan Kara break; 944196e402aSHarshad Shirwadkar } 945196e402aSHarshad Shirwadkar 94683e80a6eSJan Kara if (grp) { 947196e402aSHarshad Shirwadkar *group = grp->bb_group; 948196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 949196e402aSHarshad Shirwadkar } else { 950196e402aSHarshad Shirwadkar *new_cr = 2; 951196e402aSHarshad Shirwadkar } 952196e402aSHarshad Shirwadkar } 953196e402aSHarshad Shirwadkar 954196e402aSHarshad Shirwadkar static inline int should_optimize_scan(struct ext4_allocation_context *ac) 955196e402aSHarshad Shirwadkar { 956196e402aSHarshad Shirwadkar if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 957196e402aSHarshad Shirwadkar return 0; 958196e402aSHarshad Shirwadkar if (ac->ac_criteria >= 2) 959196e402aSHarshad Shirwadkar return 0; 960077d0c2cSOjaswin Mujoo if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 961196e402aSHarshad Shirwadkar return 0; 962196e402aSHarshad Shirwadkar return 1; 963196e402aSHarshad Shirwadkar } 964196e402aSHarshad Shirwadkar 965196e402aSHarshad Shirwadkar /* 966196e402aSHarshad Shirwadkar * Return next linear group for allocation. If linear traversal should not be 967196e402aSHarshad Shirwadkar * performed, this function just returns the same group 968196e402aSHarshad Shirwadkar */ 969196e402aSHarshad Shirwadkar static int 970196e402aSHarshad Shirwadkar next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 971196e402aSHarshad Shirwadkar { 972196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac)) 973196e402aSHarshad Shirwadkar goto inc_and_return; 974196e402aSHarshad Shirwadkar 975196e402aSHarshad Shirwadkar if (ac->ac_groups_linear_remaining) { 976196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining--; 977196e402aSHarshad Shirwadkar goto inc_and_return; 978196e402aSHarshad Shirwadkar } 979196e402aSHarshad Shirwadkar 980196e402aSHarshad Shirwadkar return group; 981196e402aSHarshad Shirwadkar inc_and_return: 982196e402aSHarshad Shirwadkar /* 983196e402aSHarshad Shirwadkar * Artificially restricted ngroups for non-extent 984196e402aSHarshad Shirwadkar * files makes group > ngroups possible on first loop. 985196e402aSHarshad Shirwadkar */ 986196e402aSHarshad Shirwadkar return group + 1 >= ngroups ? 0 : group + 1; 987196e402aSHarshad Shirwadkar } 988196e402aSHarshad Shirwadkar 989196e402aSHarshad Shirwadkar /* 990196e402aSHarshad Shirwadkar * ext4_mb_choose_next_group: choose next group for allocation. 991196e402aSHarshad Shirwadkar * 992196e402aSHarshad Shirwadkar * @ac Allocation Context 993196e402aSHarshad Shirwadkar * @new_cr This is an output parameter. If the there is no good group 994196e402aSHarshad Shirwadkar * available at current CR level, this field is updated to indicate 995196e402aSHarshad Shirwadkar * the new cr level that should be used. 996196e402aSHarshad Shirwadkar * @group This is an input / output parameter. As an input it indicates the 997196e402aSHarshad Shirwadkar * next group that the allocator intends to use for allocation. As 998196e402aSHarshad Shirwadkar * output, this field indicates the next group that should be used as 999196e402aSHarshad Shirwadkar * determined by the optimization functions. 1000196e402aSHarshad Shirwadkar * @ngroups Total number of groups 1001196e402aSHarshad Shirwadkar */ 1002196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1003196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1004196e402aSHarshad Shirwadkar { 1005196e402aSHarshad Shirwadkar *new_cr = ac->ac_criteria; 1006196e402aSHarshad Shirwadkar 10074fca50d4SJan Kara if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 10084fca50d4SJan Kara *group = next_linear_group(ac, *group, ngroups); 1009196e402aSHarshad Shirwadkar return; 10104fca50d4SJan Kara } 1011196e402aSHarshad Shirwadkar 1012196e402aSHarshad Shirwadkar if (*new_cr == 0) { 1013196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1014196e402aSHarshad Shirwadkar } else if (*new_cr == 1) { 1015196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1016196e402aSHarshad Shirwadkar } else { 1017196e402aSHarshad Shirwadkar /* 1018196e402aSHarshad Shirwadkar * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1019196e402aSHarshad Shirwadkar * bb_free. But until that happens, we should never come here. 1020196e402aSHarshad Shirwadkar */ 1021196e402aSHarshad Shirwadkar WARN_ON(1); 1022196e402aSHarshad Shirwadkar } 1023196e402aSHarshad Shirwadkar } 1024196e402aSHarshad Shirwadkar 10258a57d9d6SCurt Wohlgemuth /* 10268a57d9d6SCurt Wohlgemuth * Cache the order of the largest free extent we have available in this block 10278a57d9d6SCurt Wohlgemuth * group. 10288a57d9d6SCurt Wohlgemuth */ 10298a57d9d6SCurt Wohlgemuth static void 10308a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 10318a57d9d6SCurt Wohlgemuth { 1032196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 10338a57d9d6SCurt Wohlgemuth int i; 10348a57d9d6SCurt Wohlgemuth 10351940265eSJan Kara for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 10361940265eSJan Kara if (grp->bb_counters[i] > 0) 10371940265eSJan Kara break; 10381940265eSJan Kara /* No need to move between order lists? */ 10391940265eSJan Kara if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 10401940265eSJan Kara i == grp->bb_largest_free_order) { 10411940265eSJan Kara grp->bb_largest_free_order = i; 10421940265eSJan Kara return; 10431940265eSJan Kara } 10441940265eSJan Kara 10451940265eSJan Kara if (grp->bb_largest_free_order >= 0) { 1046196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1047196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1048196e402aSHarshad Shirwadkar list_del_init(&grp->bb_largest_free_order_node); 1049196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1050196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1051196e402aSHarshad Shirwadkar } 10528a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = i; 10531940265eSJan Kara if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1054196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1055196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1056196e402aSHarshad Shirwadkar list_add_tail(&grp->bb_largest_free_order_node, 1057196e402aSHarshad Shirwadkar &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1058196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1059196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1060196e402aSHarshad Shirwadkar } 10618a57d9d6SCurt Wohlgemuth } 10628a57d9d6SCurt Wohlgemuth 1063089ceeccSEric Sandeen static noinline_for_stack 1064089ceeccSEric Sandeen void ext4_mb_generate_buddy(struct super_block *sb, 10655354b2afSTheodore Ts'o void *buddy, void *bitmap, ext4_group_t group, 10665354b2afSTheodore Ts'o struct ext4_group_info *grp) 1067c9de560dSAlex Tomas { 1068e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 10697137d7a4STheodore Ts'o ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1070a36b4498SEric Sandeen ext4_grpblk_t i = 0; 1071a36b4498SEric Sandeen ext4_grpblk_t first; 1072a36b4498SEric Sandeen ext4_grpblk_t len; 1073c9de560dSAlex Tomas unsigned free = 0; 1074c9de560dSAlex Tomas unsigned fragments = 0; 1075c9de560dSAlex Tomas unsigned long long period = get_cycles(); 1076c9de560dSAlex Tomas 1077c9de560dSAlex Tomas /* initialize buddy from bitmap which is aggregation 1078c9de560dSAlex Tomas * of on-disk bitmap and preallocations */ 1079ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, 0); 1080c9de560dSAlex Tomas grp->bb_first_free = i; 1081c9de560dSAlex Tomas while (i < max) { 1082c9de560dSAlex Tomas fragments++; 1083c9de560dSAlex Tomas first = i; 1084ffad0a44SAneesh Kumar K.V i = mb_find_next_bit(bitmap, max, i); 1085c9de560dSAlex Tomas len = i - first; 1086c9de560dSAlex Tomas free += len; 1087c9de560dSAlex Tomas if (len > 1) 1088c9de560dSAlex Tomas ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1089c9de560dSAlex Tomas else 1090c9de560dSAlex Tomas grp->bb_counters[0]++; 1091c9de560dSAlex Tomas if (i < max) 1092ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, i); 1093c9de560dSAlex Tomas } 1094c9de560dSAlex Tomas grp->bb_fragments = fragments; 1095c9de560dSAlex Tomas 1096c9de560dSAlex Tomas if (free != grp->bb_free) { 1097e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, 109894d4c066STheodore Ts'o "block bitmap and bg descriptor " 109994d4c066STheodore Ts'o "inconsistent: %u vs %u free clusters", 1100e29136f8STheodore Ts'o free, grp->bb_free); 1101e56eb659SAneesh Kumar K.V /* 1102163a203dSDarrick J. Wong * If we intend to continue, we consider group descriptor 1103e56eb659SAneesh Kumar K.V * corrupt and update bb_free using bitmap value 1104e56eb659SAneesh Kumar K.V */ 1105c9de560dSAlex Tomas grp->bb_free = free; 1106db79e6d1SWang Shilong ext4_mark_group_bitmap_corrupted(sb, group, 1107db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1108c9de560dSAlex Tomas } 11098a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, grp); 111083e80a6eSJan Kara mb_update_avg_fragment_size(sb, grp); 1111c9de560dSAlex Tomas 1112c9de560dSAlex Tomas clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1113c9de560dSAlex Tomas 1114c9de560dSAlex Tomas period = get_cycles() - period; 111567d25186SHarshad Shirwadkar atomic_inc(&sbi->s_mb_buddies_generated); 111667d25186SHarshad Shirwadkar atomic64_add(period, &sbi->s_mb_generation_time); 1117c9de560dSAlex Tomas } 1118c9de560dSAlex Tomas 1119c9de560dSAlex Tomas /* The buddy information is attached the buddy cache inode 1120c9de560dSAlex Tomas * for convenience. The information regarding each group 1121c9de560dSAlex Tomas * is loaded via ext4_mb_load_buddy. The information involve 1122c9de560dSAlex Tomas * block bitmap and buddy information. The information are 1123c9de560dSAlex Tomas * stored in the inode as 1124c9de560dSAlex Tomas * 1125c9de560dSAlex Tomas * { page } 1126c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1127c9de560dSAlex Tomas * 1128c9de560dSAlex Tomas * 1129c9de560dSAlex Tomas * one block each for bitmap and buddy information. 1130c9de560dSAlex Tomas * So for each group we take up 2 blocks. A page can 1131ea1754a0SKirill A. Shutemov * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1132c9de560dSAlex Tomas * So it can have information regarding groups_per_page which 1133c9de560dSAlex Tomas * is blocks_per_page/2 11348a57d9d6SCurt Wohlgemuth * 11358a57d9d6SCurt Wohlgemuth * Locking note: This routine takes the block group lock of all groups 11368a57d9d6SCurt Wohlgemuth * for this page; do not hold this lock when calling this routine! 1137c9de560dSAlex Tomas */ 1138c9de560dSAlex Tomas 1139adb7ef60SKonstantin Khlebnikov static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1140c9de560dSAlex Tomas { 11418df9675fSTheodore Ts'o ext4_group_t ngroups; 1142c9de560dSAlex Tomas int blocksize; 1143c9de560dSAlex Tomas int blocks_per_page; 1144c9de560dSAlex Tomas int groups_per_page; 1145c9de560dSAlex Tomas int err = 0; 1146c9de560dSAlex Tomas int i; 1147813e5727STheodore Ts'o ext4_group_t first_group, group; 1148c9de560dSAlex Tomas int first_block; 1149c9de560dSAlex Tomas struct super_block *sb; 1150c9de560dSAlex Tomas struct buffer_head *bhs; 1151fa77dcfaSDarrick J. Wong struct buffer_head **bh = NULL; 1152c9de560dSAlex Tomas struct inode *inode; 1153c9de560dSAlex Tomas char *data; 1154c9de560dSAlex Tomas char *bitmap; 11559b8b7d35SAmir Goldstein struct ext4_group_info *grinfo; 1156c9de560dSAlex Tomas 1157c9de560dSAlex Tomas inode = page->mapping->host; 1158c9de560dSAlex Tomas sb = inode->i_sb; 11598df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 116093407472SFabian Frederick blocksize = i_blocksize(inode); 116109cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / blocksize; 1162c9de560dSAlex Tomas 1163d3df1453SRitesh Harjani mb_debug(sb, "init page %lu\n", page->index); 1164d3df1453SRitesh Harjani 1165c9de560dSAlex Tomas groups_per_page = blocks_per_page >> 1; 1166c9de560dSAlex Tomas if (groups_per_page == 0) 1167c9de560dSAlex Tomas groups_per_page = 1; 1168c9de560dSAlex Tomas 1169c9de560dSAlex Tomas /* allocate buffer_heads to read bitmaps */ 1170c9de560dSAlex Tomas if (groups_per_page > 1) { 1171c9de560dSAlex Tomas i = sizeof(struct buffer_head *) * groups_per_page; 1172adb7ef60SKonstantin Khlebnikov bh = kzalloc(i, gfp); 1173139f46d3SKemeng Shi if (bh == NULL) 1174139f46d3SKemeng Shi return -ENOMEM; 1175c9de560dSAlex Tomas } else 1176c9de560dSAlex Tomas bh = &bhs; 1177c9de560dSAlex Tomas 1178c9de560dSAlex Tomas first_group = page->index * blocks_per_page / 2; 1179c9de560dSAlex Tomas 1180c9de560dSAlex Tomas /* read all groups the page covers into the cache */ 1181813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1182813e5727STheodore Ts'o if (group >= ngroups) 1183c9de560dSAlex Tomas break; 1184c9de560dSAlex Tomas 1185813e5727STheodore Ts'o grinfo = ext4_get_group_info(sb, group); 11865354b2afSTheodore Ts'o if (!grinfo) 11875354b2afSTheodore Ts'o continue; 11889b8b7d35SAmir Goldstein /* 11899b8b7d35SAmir Goldstein * If page is uptodate then we came here after online resize 11909b8b7d35SAmir Goldstein * which added some new uninitialized group info structs, so 11919b8b7d35SAmir Goldstein * we must skip all initialized uptodate buddies on the page, 11929b8b7d35SAmir Goldstein * which may be currently in use by an allocating task. 11939b8b7d35SAmir Goldstein */ 11949b8b7d35SAmir Goldstein if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 11959b8b7d35SAmir Goldstein bh[i] = NULL; 11969b8b7d35SAmir Goldstein continue; 11979b8b7d35SAmir Goldstein } 1198cfd73237SAlex Zhuravlev bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 11999008a58eSDarrick J. Wong if (IS_ERR(bh[i])) { 12009008a58eSDarrick J. Wong err = PTR_ERR(bh[i]); 12019008a58eSDarrick J. Wong bh[i] = NULL; 1202c9de560dSAlex Tomas goto out; 12032ccb5fb9SAneesh Kumar K.V } 1204d3df1453SRitesh Harjani mb_debug(sb, "read bitmap for group %u\n", group); 1205c9de560dSAlex Tomas } 1206c9de560dSAlex Tomas 1207c9de560dSAlex Tomas /* wait for I/O completion */ 1208813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 12099008a58eSDarrick J. Wong int err2; 12109008a58eSDarrick J. Wong 12119008a58eSDarrick J. Wong if (!bh[i]) 12129008a58eSDarrick J. Wong continue; 12139008a58eSDarrick J. Wong err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 12149008a58eSDarrick J. Wong if (!err) 12159008a58eSDarrick J. Wong err = err2; 1216813e5727STheodore Ts'o } 1217c9de560dSAlex Tomas 1218c9de560dSAlex Tomas first_block = page->index * blocks_per_page; 1219c9de560dSAlex Tomas for (i = 0; i < blocks_per_page; i++) { 1220c9de560dSAlex Tomas group = (first_block + i) >> 1; 12218df9675fSTheodore Ts'o if (group >= ngroups) 1222c9de560dSAlex Tomas break; 1223c9de560dSAlex Tomas 12249b8b7d35SAmir Goldstein if (!bh[group - first_group]) 12259b8b7d35SAmir Goldstein /* skip initialized uptodate buddy */ 12269b8b7d35SAmir Goldstein continue; 12279b8b7d35SAmir Goldstein 1228bbdc322fSLukas Czerner if (!buffer_verified(bh[group - first_group])) 1229bbdc322fSLukas Czerner /* Skip faulty bitmaps */ 1230bbdc322fSLukas Czerner continue; 1231bbdc322fSLukas Czerner err = 0; 1232bbdc322fSLukas Czerner 1233c9de560dSAlex Tomas /* 1234c9de560dSAlex Tomas * data carry information regarding this 1235c9de560dSAlex Tomas * particular group in the format specified 1236c9de560dSAlex Tomas * above 1237c9de560dSAlex Tomas * 1238c9de560dSAlex Tomas */ 1239c9de560dSAlex Tomas data = page_address(page) + (i * blocksize); 1240c9de560dSAlex Tomas bitmap = bh[group - first_group]->b_data; 1241c9de560dSAlex Tomas 1242c9de560dSAlex Tomas /* 1243c9de560dSAlex Tomas * We place the buddy block and bitmap block 1244c9de560dSAlex Tomas * close together 1245c9de560dSAlex Tomas */ 1246c9de560dSAlex Tomas if ((first_block + i) & 1) { 1247c9de560dSAlex Tomas /* this is block of buddy */ 1248c9de560dSAlex Tomas BUG_ON(incore == NULL); 1249d3df1453SRitesh Harjani mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1250c9de560dSAlex Tomas group, page->index, i * blocksize); 1251f307333eSTheodore Ts'o trace_ext4_mb_buddy_bitmap_load(sb, group); 1252c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, group); 12535354b2afSTheodore Ts'o if (!grinfo) { 12545354b2afSTheodore Ts'o err = -EFSCORRUPTED; 12555354b2afSTheodore Ts'o goto out; 12565354b2afSTheodore Ts'o } 1257c9de560dSAlex Tomas grinfo->bb_fragments = 0; 1258c9de560dSAlex Tomas memset(grinfo->bb_counters, 0, 12591927805eSEric Sandeen sizeof(*grinfo->bb_counters) * 12604b68f6dfSHarshad Shirwadkar (MB_NUM_ORDERS(sb))); 1261c9de560dSAlex Tomas /* 1262c9de560dSAlex Tomas * incore got set to the group block bitmap below 1263c9de560dSAlex Tomas */ 12647a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, group); 12659b8b7d35SAmir Goldstein /* init the buddy */ 12669b8b7d35SAmir Goldstein memset(data, 0xff, blocksize); 12675354b2afSTheodore Ts'o ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 12687a2fcbf7SAneesh Kumar K.V ext4_unlock_group(sb, group); 1269c9de560dSAlex Tomas incore = NULL; 1270c9de560dSAlex Tomas } else { 1271c9de560dSAlex Tomas /* this is block of bitmap */ 1272c9de560dSAlex Tomas BUG_ON(incore != NULL); 1273d3df1453SRitesh Harjani mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1274c9de560dSAlex Tomas group, page->index, i * blocksize); 1275f307333eSTheodore Ts'o trace_ext4_mb_bitmap_load(sb, group); 1276c9de560dSAlex Tomas 1277c9de560dSAlex Tomas /* see comments in ext4_mb_put_pa() */ 1278c9de560dSAlex Tomas ext4_lock_group(sb, group); 1279c9de560dSAlex Tomas memcpy(data, bitmap, blocksize); 1280c9de560dSAlex Tomas 1281c9de560dSAlex Tomas /* mark all preallocated blks used in in-core bitmap */ 1282c9de560dSAlex Tomas ext4_mb_generate_from_pa(sb, data, group); 12837a2fcbf7SAneesh Kumar K.V ext4_mb_generate_from_freelist(sb, data, group); 1284c9de560dSAlex Tomas ext4_unlock_group(sb, group); 1285c9de560dSAlex Tomas 1286c9de560dSAlex Tomas /* set incore so that the buddy information can be 1287c9de560dSAlex Tomas * generated using this 1288c9de560dSAlex Tomas */ 1289c9de560dSAlex Tomas incore = data; 1290c9de560dSAlex Tomas } 1291c9de560dSAlex Tomas } 1292c9de560dSAlex Tomas SetPageUptodate(page); 1293c9de560dSAlex Tomas 1294c9de560dSAlex Tomas out: 1295c9de560dSAlex Tomas if (bh) { 12969b8b7d35SAmir Goldstein for (i = 0; i < groups_per_page; i++) 1297c9de560dSAlex Tomas brelse(bh[i]); 1298c9de560dSAlex Tomas if (bh != &bhs) 1299c9de560dSAlex Tomas kfree(bh); 1300c9de560dSAlex Tomas } 1301c9de560dSAlex Tomas return err; 1302c9de560dSAlex Tomas } 1303c9de560dSAlex Tomas 13048a57d9d6SCurt Wohlgemuth /* 13052de8807bSAmir Goldstein * Lock the buddy and bitmap pages. This make sure other parallel init_group 13062de8807bSAmir Goldstein * on the same buddy page doesn't happen whild holding the buddy page lock. 13072de8807bSAmir Goldstein * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 13082de8807bSAmir Goldstein * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1309eee4adc7SEric Sandeen */ 13102de8807bSAmir Goldstein static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1311adb7ef60SKonstantin Khlebnikov ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1312eee4adc7SEric Sandeen { 13132de8807bSAmir Goldstein struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 13142de8807bSAmir Goldstein int block, pnum, poff; 1315eee4adc7SEric Sandeen int blocks_per_page; 13162de8807bSAmir Goldstein struct page *page; 13172de8807bSAmir Goldstein 13182de8807bSAmir Goldstein e4b->bd_buddy_page = NULL; 13192de8807bSAmir Goldstein e4b->bd_bitmap_page = NULL; 1320eee4adc7SEric Sandeen 132109cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1322eee4adc7SEric Sandeen /* 1323eee4adc7SEric Sandeen * the buddy cache inode stores the block bitmap 1324eee4adc7SEric Sandeen * and buddy information in consecutive blocks. 1325eee4adc7SEric Sandeen * So for each group we need two blocks. 1326eee4adc7SEric Sandeen */ 1327eee4adc7SEric Sandeen block = group * 2; 1328eee4adc7SEric Sandeen pnum = block / blocks_per_page; 13292de8807bSAmir Goldstein poff = block % blocks_per_page; 1330adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13312de8807bSAmir Goldstein if (!page) 1332c57ab39bSYounger Liu return -ENOMEM; 13332de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13342de8807bSAmir Goldstein e4b->bd_bitmap_page = page; 13352de8807bSAmir Goldstein e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1336eee4adc7SEric Sandeen 13372de8807bSAmir Goldstein if (blocks_per_page >= 2) { 13382de8807bSAmir Goldstein /* buddy and bitmap are on the same page */ 13392de8807bSAmir Goldstein return 0; 1340eee4adc7SEric Sandeen } 1341eee4adc7SEric Sandeen 13422de8807bSAmir Goldstein block++; 1343eee4adc7SEric Sandeen pnum = block / blocks_per_page; 1344adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13452de8807bSAmir Goldstein if (!page) 1346c57ab39bSYounger Liu return -ENOMEM; 13472de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13482de8807bSAmir Goldstein e4b->bd_buddy_page = page; 13492de8807bSAmir Goldstein return 0; 1350eee4adc7SEric Sandeen } 1351eee4adc7SEric Sandeen 13522de8807bSAmir Goldstein static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 13532de8807bSAmir Goldstein { 13542de8807bSAmir Goldstein if (e4b->bd_bitmap_page) { 13552de8807bSAmir Goldstein unlock_page(e4b->bd_bitmap_page); 135609cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 13572de8807bSAmir Goldstein } 13582de8807bSAmir Goldstein if (e4b->bd_buddy_page) { 13592de8807bSAmir Goldstein unlock_page(e4b->bd_buddy_page); 136009cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 13612de8807bSAmir Goldstein } 1362eee4adc7SEric Sandeen } 1363eee4adc7SEric Sandeen 1364eee4adc7SEric Sandeen /* 13658a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 13668a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 13678a57d9d6SCurt Wohlgemuth * calling this routine! 13688a57d9d6SCurt Wohlgemuth */ 1369b6a758ecSAneesh Kumar K.V static noinline_for_stack 1370adb7ef60SKonstantin Khlebnikov int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1371b6a758ecSAneesh Kumar K.V { 1372b6a758ecSAneesh Kumar K.V 1373b6a758ecSAneesh Kumar K.V struct ext4_group_info *this_grp; 13742de8807bSAmir Goldstein struct ext4_buddy e4b; 13752de8807bSAmir Goldstein struct page *page; 13762de8807bSAmir Goldstein int ret = 0; 1377b6a758ecSAneesh Kumar K.V 1378b10a44c3STheodore Ts'o might_sleep(); 1379d3df1453SRitesh Harjani mb_debug(sb, "init group %u\n", group); 1380b6a758ecSAneesh Kumar K.V this_grp = ext4_get_group_info(sb, group); 13815354b2afSTheodore Ts'o if (!this_grp) 13825354b2afSTheodore Ts'o return -EFSCORRUPTED; 13835354b2afSTheodore Ts'o 1384b6a758ecSAneesh Kumar K.V /* 138508c3a813SAneesh Kumar K.V * This ensures that we don't reinit the buddy cache 138608c3a813SAneesh Kumar K.V * page which map to the group from which we are already 138708c3a813SAneesh Kumar K.V * allocating. If we are looking at the buddy cache we would 138808c3a813SAneesh Kumar K.V * have taken a reference using ext4_mb_load_buddy and that 13892de8807bSAmir Goldstein * would have pinned buddy page to page cache. 13902457aec6SMel Gorman * The call to ext4_mb_get_buddy_page_lock will mark the 13912457aec6SMel Gorman * page accessed. 1392b6a758ecSAneesh Kumar K.V */ 1393adb7ef60SKonstantin Khlebnikov ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 13942de8807bSAmir Goldstein if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1395b6a758ecSAneesh Kumar K.V /* 1396b6a758ecSAneesh Kumar K.V * somebody initialized the group 1397b6a758ecSAneesh Kumar K.V * return without doing anything 1398b6a758ecSAneesh Kumar K.V */ 1399b6a758ecSAneesh Kumar K.V goto err; 1400b6a758ecSAneesh Kumar K.V } 14012de8807bSAmir Goldstein 14022de8807bSAmir Goldstein page = e4b.bd_bitmap_page; 1403adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 14042de8807bSAmir Goldstein if (ret) 1405b6a758ecSAneesh Kumar K.V goto err; 14062de8807bSAmir Goldstein if (!PageUptodate(page)) { 1407b6a758ecSAneesh Kumar K.V ret = -EIO; 1408b6a758ecSAneesh Kumar K.V goto err; 1409b6a758ecSAneesh Kumar K.V } 1410b6a758ecSAneesh Kumar K.V 14112de8807bSAmir Goldstein if (e4b.bd_buddy_page == NULL) { 1412b6a758ecSAneesh Kumar K.V /* 1413b6a758ecSAneesh Kumar K.V * If both the bitmap and buddy are in 1414b6a758ecSAneesh Kumar K.V * the same page we don't need to force 1415b6a758ecSAneesh Kumar K.V * init the buddy 1416b6a758ecSAneesh Kumar K.V */ 14172de8807bSAmir Goldstein ret = 0; 1418b6a758ecSAneesh Kumar K.V goto err; 1419b6a758ecSAneesh Kumar K.V } 14202de8807bSAmir Goldstein /* init buddy cache */ 14212de8807bSAmir Goldstein page = e4b.bd_buddy_page; 1422adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 14232de8807bSAmir Goldstein if (ret) 14242de8807bSAmir Goldstein goto err; 14252de8807bSAmir Goldstein if (!PageUptodate(page)) { 1426b6a758ecSAneesh Kumar K.V ret = -EIO; 1427b6a758ecSAneesh Kumar K.V goto err; 1428b6a758ecSAneesh Kumar K.V } 1429b6a758ecSAneesh Kumar K.V err: 14302de8807bSAmir Goldstein ext4_mb_put_buddy_page_lock(&e4b); 1431b6a758ecSAneesh Kumar K.V return ret; 1432b6a758ecSAneesh Kumar K.V } 1433b6a758ecSAneesh Kumar K.V 14348a57d9d6SCurt Wohlgemuth /* 14358a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14368a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14378a57d9d6SCurt Wohlgemuth * calling this routine! 14388a57d9d6SCurt Wohlgemuth */ 14394ddfef7bSEric Sandeen static noinline_for_stack int 1440adb7ef60SKonstantin Khlebnikov ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1441adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b, gfp_t gfp) 1442c9de560dSAlex Tomas { 1443c9de560dSAlex Tomas int blocks_per_page; 1444c9de560dSAlex Tomas int block; 1445c9de560dSAlex Tomas int pnum; 1446c9de560dSAlex Tomas int poff; 1447c9de560dSAlex Tomas struct page *page; 1448fdf6c7a7SShen Feng int ret; 1449920313a7SAneesh Kumar K.V struct ext4_group_info *grp; 1450920313a7SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 1451920313a7SAneesh Kumar K.V struct inode *inode = sbi->s_buddy_cache; 1452c9de560dSAlex Tomas 1453b10a44c3STheodore Ts'o might_sleep(); 1454d3df1453SRitesh Harjani mb_debug(sb, "load group %u\n", group); 1455c9de560dSAlex Tomas 145609cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1457920313a7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 14585354b2afSTheodore Ts'o if (!grp) 14595354b2afSTheodore Ts'o return -EFSCORRUPTED; 1460c9de560dSAlex Tomas 1461c9de560dSAlex Tomas e4b->bd_blkbits = sb->s_blocksize_bits; 1462529da704STao Ma e4b->bd_info = grp; 1463c9de560dSAlex Tomas e4b->bd_sb = sb; 1464c9de560dSAlex Tomas e4b->bd_group = group; 1465c9de560dSAlex Tomas e4b->bd_buddy_page = NULL; 1466c9de560dSAlex Tomas e4b->bd_bitmap_page = NULL; 1467c9de560dSAlex Tomas 1468f41c0750SAneesh Kumar K.V if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1469f41c0750SAneesh Kumar K.V /* 1470f41c0750SAneesh Kumar K.V * we need full data about the group 1471f41c0750SAneesh Kumar K.V * to make a good selection 1472f41c0750SAneesh Kumar K.V */ 1473adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, gfp); 1474f41c0750SAneesh Kumar K.V if (ret) 1475f41c0750SAneesh Kumar K.V return ret; 1476f41c0750SAneesh Kumar K.V } 1477f41c0750SAneesh Kumar K.V 1478c9de560dSAlex Tomas /* 1479c9de560dSAlex Tomas * the buddy cache inode stores the block bitmap 1480c9de560dSAlex Tomas * and buddy information in consecutive blocks. 1481c9de560dSAlex Tomas * So for each group we need two blocks. 1482c9de560dSAlex Tomas */ 1483c9de560dSAlex Tomas block = group * 2; 1484c9de560dSAlex Tomas pnum = block / blocks_per_page; 1485c9de560dSAlex Tomas poff = block % blocks_per_page; 1486c9de560dSAlex Tomas 1487c9de560dSAlex Tomas /* we could use find_or_create_page(), but it locks page 1488c9de560dSAlex Tomas * what we'd like to avoid in fast path ... */ 14892457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1490c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1491c9de560dSAlex Tomas if (page) 1492920313a7SAneesh Kumar K.V /* 1493920313a7SAneesh Kumar K.V * drop the page reference and try 1494920313a7SAneesh Kumar K.V * to get the page with lock. If we 1495920313a7SAneesh Kumar K.V * are not uptodate that implies 1496920313a7SAneesh Kumar K.V * somebody just created the page but 1497920313a7SAneesh Kumar K.V * is yet to initialize the same. So 1498920313a7SAneesh Kumar K.V * wait for it to initialize. 1499920313a7SAneesh Kumar K.V */ 150009cbfeafSKirill A. Shutemov put_page(page); 1501adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1502c9de560dSAlex Tomas if (page) { 150319b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 150419b8b035STheodore Ts'o "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 150519b8b035STheodore Ts'o /* should never happen */ 150619b8b035STheodore Ts'o unlock_page(page); 150719b8b035STheodore Ts'o ret = -EINVAL; 150819b8b035STheodore Ts'o goto err; 150919b8b035STheodore Ts'o } 1510c9de560dSAlex Tomas if (!PageUptodate(page)) { 1511adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 1512fdf6c7a7SShen Feng if (ret) { 1513fdf6c7a7SShen Feng unlock_page(page); 1514fdf6c7a7SShen Feng goto err; 1515fdf6c7a7SShen Feng } 1516c9de560dSAlex Tomas mb_cmp_bitmaps(e4b, page_address(page) + 1517c9de560dSAlex Tomas (poff * sb->s_blocksize)); 1518c9de560dSAlex Tomas } 1519c9de560dSAlex Tomas unlock_page(page); 1520c9de560dSAlex Tomas } 1521c9de560dSAlex Tomas } 1522c57ab39bSYounger Liu if (page == NULL) { 1523c57ab39bSYounger Liu ret = -ENOMEM; 1524c57ab39bSYounger Liu goto err; 1525c57ab39bSYounger Liu } 1526c57ab39bSYounger Liu if (!PageUptodate(page)) { 1527fdf6c7a7SShen Feng ret = -EIO; 1528c9de560dSAlex Tomas goto err; 1529fdf6c7a7SShen Feng } 15302457aec6SMel Gorman 15312457aec6SMel Gorman /* Pages marked accessed already */ 1532c9de560dSAlex Tomas e4b->bd_bitmap_page = page; 1533c9de560dSAlex Tomas e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1534c9de560dSAlex Tomas 1535c9de560dSAlex Tomas block++; 1536c9de560dSAlex Tomas pnum = block / blocks_per_page; 1537c9de560dSAlex Tomas poff = block % blocks_per_page; 1538c9de560dSAlex Tomas 15392457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1540c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1541c9de560dSAlex Tomas if (page) 154209cbfeafSKirill A. Shutemov put_page(page); 1543adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1544c9de560dSAlex Tomas if (page) { 154519b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 154619b8b035STheodore Ts'o "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 154719b8b035STheodore Ts'o /* should never happen */ 154819b8b035STheodore Ts'o unlock_page(page); 154919b8b035STheodore Ts'o ret = -EINVAL; 155019b8b035STheodore Ts'o goto err; 155119b8b035STheodore Ts'o } 1552fdf6c7a7SShen Feng if (!PageUptodate(page)) { 1553adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1554adb7ef60SKonstantin Khlebnikov gfp); 1555fdf6c7a7SShen Feng if (ret) { 1556fdf6c7a7SShen Feng unlock_page(page); 1557fdf6c7a7SShen Feng goto err; 1558fdf6c7a7SShen Feng } 1559fdf6c7a7SShen Feng } 1560c9de560dSAlex Tomas unlock_page(page); 1561c9de560dSAlex Tomas } 1562c9de560dSAlex Tomas } 1563c57ab39bSYounger Liu if (page == NULL) { 1564c57ab39bSYounger Liu ret = -ENOMEM; 1565c57ab39bSYounger Liu goto err; 1566c57ab39bSYounger Liu } 1567c57ab39bSYounger Liu if (!PageUptodate(page)) { 1568fdf6c7a7SShen Feng ret = -EIO; 1569c9de560dSAlex Tomas goto err; 1570fdf6c7a7SShen Feng } 15712457aec6SMel Gorman 15722457aec6SMel Gorman /* Pages marked accessed already */ 1573c9de560dSAlex Tomas e4b->bd_buddy_page = page; 1574c9de560dSAlex Tomas e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1575c9de560dSAlex Tomas 1576c9de560dSAlex Tomas return 0; 1577c9de560dSAlex Tomas 1578c9de560dSAlex Tomas err: 157926626f11SYang Ruirui if (page) 158009cbfeafSKirill A. Shutemov put_page(page); 1581c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 158209cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1583285164b8SKemeng Shi 1584c9de560dSAlex Tomas e4b->bd_buddy = NULL; 1585c9de560dSAlex Tomas e4b->bd_bitmap = NULL; 1586fdf6c7a7SShen Feng return ret; 1587c9de560dSAlex Tomas } 1588c9de560dSAlex Tomas 1589adb7ef60SKonstantin Khlebnikov static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1590adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b) 1591adb7ef60SKonstantin Khlebnikov { 1592adb7ef60SKonstantin Khlebnikov return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1593adb7ef60SKonstantin Khlebnikov } 1594adb7ef60SKonstantin Khlebnikov 1595e39e07fdSJing Zhang static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1596c9de560dSAlex Tomas { 1597c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 159809cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1599c9de560dSAlex Tomas if (e4b->bd_buddy_page) 160009cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1601c9de560dSAlex Tomas } 1602c9de560dSAlex Tomas 1603c9de560dSAlex Tomas 1604c9de560dSAlex Tomas static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1605c9de560dSAlex Tomas { 1606ce3cca33SChunguang Xu int order = 1, max; 1607c9de560dSAlex Tomas void *bb; 1608c9de560dSAlex Tomas 1609c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1610c9de560dSAlex Tomas BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1611c9de560dSAlex Tomas 1612c9de560dSAlex Tomas while (order <= e4b->bd_blkbits + 1) { 1613ce3cca33SChunguang Xu bb = mb_find_buddy(e4b, order, &max); 1614ce3cca33SChunguang Xu if (!mb_test_bit(block >> order, bb)) { 1615c9de560dSAlex Tomas /* this block is part of buddy of order 'order' */ 1616c9de560dSAlex Tomas return order; 1617c9de560dSAlex Tomas } 1618c9de560dSAlex Tomas order++; 1619c9de560dSAlex Tomas } 1620c9de560dSAlex Tomas return 0; 1621c9de560dSAlex Tomas } 1622c9de560dSAlex Tomas 1623955ce5f5SAneesh Kumar K.V static void mb_clear_bits(void *bm, int cur, int len) 1624c9de560dSAlex Tomas { 1625c9de560dSAlex Tomas __u32 *addr; 1626c9de560dSAlex Tomas 1627c9de560dSAlex Tomas len = cur + len; 1628c9de560dSAlex Tomas while (cur < len) { 1629c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1630c9de560dSAlex Tomas /* fast path: clear whole word at once */ 1631c9de560dSAlex Tomas addr = bm + (cur >> 3); 1632c9de560dSAlex Tomas *addr = 0; 1633c9de560dSAlex Tomas cur += 32; 1634c9de560dSAlex Tomas continue; 1635c9de560dSAlex Tomas } 1636e8134b27SAneesh Kumar K.V mb_clear_bit(cur, bm); 1637c9de560dSAlex Tomas cur++; 1638c9de560dSAlex Tomas } 1639c9de560dSAlex Tomas } 1640c9de560dSAlex Tomas 1641eabe0444SAndrey Sidorov /* clear bits in given range 1642eabe0444SAndrey Sidorov * will return first found zero bit if any, -1 otherwise 1643eabe0444SAndrey Sidorov */ 1644eabe0444SAndrey Sidorov static int mb_test_and_clear_bits(void *bm, int cur, int len) 1645eabe0444SAndrey Sidorov { 1646eabe0444SAndrey Sidorov __u32 *addr; 1647eabe0444SAndrey Sidorov int zero_bit = -1; 1648eabe0444SAndrey Sidorov 1649eabe0444SAndrey Sidorov len = cur + len; 1650eabe0444SAndrey Sidorov while (cur < len) { 1651eabe0444SAndrey Sidorov if ((cur & 31) == 0 && (len - cur) >= 32) { 1652eabe0444SAndrey Sidorov /* fast path: clear whole word at once */ 1653eabe0444SAndrey Sidorov addr = bm + (cur >> 3); 1654eabe0444SAndrey Sidorov if (*addr != (__u32)(-1) && zero_bit == -1) 1655eabe0444SAndrey Sidorov zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1656eabe0444SAndrey Sidorov *addr = 0; 1657eabe0444SAndrey Sidorov cur += 32; 1658eabe0444SAndrey Sidorov continue; 1659eabe0444SAndrey Sidorov } 1660eabe0444SAndrey Sidorov if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1661eabe0444SAndrey Sidorov zero_bit = cur; 1662eabe0444SAndrey Sidorov cur++; 1663eabe0444SAndrey Sidorov } 1664eabe0444SAndrey Sidorov 1665eabe0444SAndrey Sidorov return zero_bit; 1666eabe0444SAndrey Sidorov } 1667eabe0444SAndrey Sidorov 1668123e3016SRitesh Harjani void mb_set_bits(void *bm, int cur, int len) 1669c9de560dSAlex Tomas { 1670c9de560dSAlex Tomas __u32 *addr; 1671c9de560dSAlex Tomas 1672c9de560dSAlex Tomas len = cur + len; 1673c9de560dSAlex Tomas while (cur < len) { 1674c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1675c9de560dSAlex Tomas /* fast path: set whole word at once */ 1676c9de560dSAlex Tomas addr = bm + (cur >> 3); 1677c9de560dSAlex Tomas *addr = 0xffffffff; 1678c9de560dSAlex Tomas cur += 32; 1679c9de560dSAlex Tomas continue; 1680c9de560dSAlex Tomas } 1681e8134b27SAneesh Kumar K.V mb_set_bit(cur, bm); 1682c9de560dSAlex Tomas cur++; 1683c9de560dSAlex Tomas } 1684c9de560dSAlex Tomas } 1685c9de560dSAlex Tomas 1686eabe0444SAndrey Sidorov static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1687eabe0444SAndrey Sidorov { 1688eabe0444SAndrey Sidorov if (mb_test_bit(*bit + side, bitmap)) { 1689eabe0444SAndrey Sidorov mb_clear_bit(*bit, bitmap); 1690eabe0444SAndrey Sidorov (*bit) -= side; 1691eabe0444SAndrey Sidorov return 1; 1692eabe0444SAndrey Sidorov } 1693eabe0444SAndrey Sidorov else { 1694eabe0444SAndrey Sidorov (*bit) += side; 1695eabe0444SAndrey Sidorov mb_set_bit(*bit, bitmap); 1696eabe0444SAndrey Sidorov return -1; 1697eabe0444SAndrey Sidorov } 1698eabe0444SAndrey Sidorov } 1699eabe0444SAndrey Sidorov 1700eabe0444SAndrey Sidorov static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1701eabe0444SAndrey Sidorov { 1702eabe0444SAndrey Sidorov int max; 1703eabe0444SAndrey Sidorov int order = 1; 1704eabe0444SAndrey Sidorov void *buddy = mb_find_buddy(e4b, order, &max); 1705eabe0444SAndrey Sidorov 1706eabe0444SAndrey Sidorov while (buddy) { 1707eabe0444SAndrey Sidorov void *buddy2; 1708eabe0444SAndrey Sidorov 1709eabe0444SAndrey Sidorov /* Bits in range [first; last] are known to be set since 1710eabe0444SAndrey Sidorov * corresponding blocks were allocated. Bits in range 1711eabe0444SAndrey Sidorov * (first; last) will stay set because they form buddies on 1712eabe0444SAndrey Sidorov * upper layer. We just deal with borders if they don't 1713eabe0444SAndrey Sidorov * align with upper layer and then go up. 1714eabe0444SAndrey Sidorov * Releasing entire group is all about clearing 1715eabe0444SAndrey Sidorov * single bit of highest order buddy. 1716eabe0444SAndrey Sidorov */ 1717eabe0444SAndrey Sidorov 1718eabe0444SAndrey Sidorov /* Example: 1719eabe0444SAndrey Sidorov * --------------------------------- 1720eabe0444SAndrey Sidorov * | 1 | 1 | 1 | 1 | 1721eabe0444SAndrey Sidorov * --------------------------------- 1722eabe0444SAndrey Sidorov * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1723eabe0444SAndrey Sidorov * --------------------------------- 1724eabe0444SAndrey Sidorov * 0 1 2 3 4 5 6 7 1725eabe0444SAndrey Sidorov * \_____________________/ 1726eabe0444SAndrey Sidorov * 1727eabe0444SAndrey Sidorov * Neither [1] nor [6] is aligned to above layer. 1728eabe0444SAndrey Sidorov * Left neighbour [0] is free, so mark it busy, 1729eabe0444SAndrey Sidorov * decrease bb_counters and extend range to 1730eabe0444SAndrey Sidorov * [0; 6] 1731eabe0444SAndrey Sidorov * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1732eabe0444SAndrey Sidorov * mark [6] free, increase bb_counters and shrink range to 1733eabe0444SAndrey Sidorov * [0; 5]. 1734eabe0444SAndrey Sidorov * Then shift range to [0; 2], go up and do the same. 1735eabe0444SAndrey Sidorov */ 1736eabe0444SAndrey Sidorov 1737eabe0444SAndrey Sidorov 1738eabe0444SAndrey Sidorov if (first & 1) 1739eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1740eabe0444SAndrey Sidorov if (!(last & 1)) 1741eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1742eabe0444SAndrey Sidorov if (first > last) 1743eabe0444SAndrey Sidorov break; 1744eabe0444SAndrey Sidorov order++; 1745eabe0444SAndrey Sidorov 1746976620bdSKemeng Shi buddy2 = mb_find_buddy(e4b, order, &max); 1747976620bdSKemeng Shi if (!buddy2) { 1748eabe0444SAndrey Sidorov mb_clear_bits(buddy, first, last - first + 1); 1749eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1750eabe0444SAndrey Sidorov break; 1751eabe0444SAndrey Sidorov } 1752eabe0444SAndrey Sidorov first >>= 1; 1753eabe0444SAndrey Sidorov last >>= 1; 1754eabe0444SAndrey Sidorov buddy = buddy2; 1755eabe0444SAndrey Sidorov } 1756eabe0444SAndrey Sidorov } 1757eabe0444SAndrey Sidorov 17587e5a8cddSShen Feng static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1759c9de560dSAlex Tomas int first, int count) 1760c9de560dSAlex Tomas { 1761eabe0444SAndrey Sidorov int left_is_free = 0; 1762eabe0444SAndrey Sidorov int right_is_free = 0; 1763eabe0444SAndrey Sidorov int block; 1764eabe0444SAndrey Sidorov int last = first + count - 1; 1765c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 1766c9de560dSAlex Tomas 1767c99d1e6eSTheodore Ts'o if (WARN_ON(count == 0)) 1768c99d1e6eSTheodore Ts'o return; 1769eabe0444SAndrey Sidorov BUG_ON(last >= (sb->s_blocksize << 3)); 1770bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1771163a203dSDarrick J. Wong /* Don't bother if the block group is corrupt. */ 1772163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1773163a203dSDarrick J. Wong return; 1774163a203dSDarrick J. Wong 1775c9de560dSAlex Tomas mb_check_buddy(e4b); 1776c9de560dSAlex Tomas mb_free_blocks_double(inode, e4b, first, count); 1777c9de560dSAlex Tomas 177807b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1779c9de560dSAlex Tomas e4b->bd_info->bb_free += count; 1780c9de560dSAlex Tomas if (first < e4b->bd_info->bb_first_free) 1781c9de560dSAlex Tomas e4b->bd_info->bb_first_free = first; 1782c9de560dSAlex Tomas 1783eabe0444SAndrey Sidorov /* access memory sequentially: check left neighbour, 1784eabe0444SAndrey Sidorov * clear range and then check right neighbour 1785eabe0444SAndrey Sidorov */ 1786c9de560dSAlex Tomas if (first != 0) 1787eabe0444SAndrey Sidorov left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1788eabe0444SAndrey Sidorov block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1789eabe0444SAndrey Sidorov if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1790eabe0444SAndrey Sidorov right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1791c9de560dSAlex Tomas 1792eabe0444SAndrey Sidorov if (unlikely(block != -1)) { 1793e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 1794c9de560dSAlex Tomas ext4_fsblk_t blocknr; 17955661bd68SAkinobu Mita 17965661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 179749598e04SJun Piao blocknr += EXT4_C2B(sbi, block); 17988016e29fSHarshad Shirwadkar if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 17995d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 1800e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 1801e29136f8STheodore Ts'o blocknr, 18028016e29fSHarshad Shirwadkar "freeing already freed block (bit %u); block bitmap corrupt.", 1803163a203dSDarrick J. Wong block); 18048016e29fSHarshad Shirwadkar ext4_mark_group_bitmap_corrupted( 18058016e29fSHarshad Shirwadkar sb, e4b->bd_group, 1806db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 18078016e29fSHarshad Shirwadkar } 1808eabe0444SAndrey Sidorov goto done; 1809c9de560dSAlex Tomas } 1810c9de560dSAlex Tomas 1811eabe0444SAndrey Sidorov /* let's maintain fragments counter */ 1812eabe0444SAndrey Sidorov if (left_is_free && right_is_free) 1813eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments--; 1814eabe0444SAndrey Sidorov else if (!left_is_free && !right_is_free) 1815eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments++; 1816c9de560dSAlex Tomas 1817eabe0444SAndrey Sidorov /* buddy[0] == bd_bitmap is a special case, so handle 1818eabe0444SAndrey Sidorov * it right away and let mb_buddy_mark_free stay free of 1819eabe0444SAndrey Sidorov * zero order checks. 1820eabe0444SAndrey Sidorov * Check if neighbours are to be coaleasced, 1821eabe0444SAndrey Sidorov * adjust bitmap bb_counters and borders appropriately. 1822eabe0444SAndrey Sidorov */ 1823eabe0444SAndrey Sidorov if (first & 1) { 1824eabe0444SAndrey Sidorov first += !left_is_free; 1825eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1826c9de560dSAlex Tomas } 1827eabe0444SAndrey Sidorov if (!(last & 1)) { 1828eabe0444SAndrey Sidorov last -= !right_is_free; 1829eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1830c9de560dSAlex Tomas } 1831eabe0444SAndrey Sidorov 1832eabe0444SAndrey Sidorov if (first <= last) 1833eabe0444SAndrey Sidorov mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1834eabe0444SAndrey Sidorov 1835eabe0444SAndrey Sidorov done: 18368a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, e4b->bd_info); 1837196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, e4b->bd_info); 1838c9de560dSAlex Tomas mb_check_buddy(e4b); 1839c9de560dSAlex Tomas } 1840c9de560dSAlex Tomas 184115c006a2SRobin Dong static int mb_find_extent(struct ext4_buddy *e4b, int block, 1842c9de560dSAlex Tomas int needed, struct ext4_free_extent *ex) 1843c9de560dSAlex Tomas { 1844c9de560dSAlex Tomas int next = block; 184515c006a2SRobin Dong int max, order; 1846c9de560dSAlex Tomas void *buddy; 1847c9de560dSAlex Tomas 1848bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1849c9de560dSAlex Tomas BUG_ON(ex == NULL); 1850c9de560dSAlex Tomas 185115c006a2SRobin Dong buddy = mb_find_buddy(e4b, 0, &max); 1852c9de560dSAlex Tomas BUG_ON(buddy == NULL); 1853c9de560dSAlex Tomas BUG_ON(block >= max); 1854c9de560dSAlex Tomas if (mb_test_bit(block, buddy)) { 1855c9de560dSAlex Tomas ex->fe_len = 0; 1856c9de560dSAlex Tomas ex->fe_start = 0; 1857c9de560dSAlex Tomas ex->fe_group = 0; 1858c9de560dSAlex Tomas return 0; 1859c9de560dSAlex Tomas } 1860c9de560dSAlex Tomas 1861c9de560dSAlex Tomas /* find actual order */ 1862c9de560dSAlex Tomas order = mb_find_order_for_block(e4b, block); 1863c9de560dSAlex Tomas block = block >> order; 1864c9de560dSAlex Tomas 1865c9de560dSAlex Tomas ex->fe_len = 1 << order; 1866c9de560dSAlex Tomas ex->fe_start = block << order; 1867c9de560dSAlex Tomas ex->fe_group = e4b->bd_group; 1868c9de560dSAlex Tomas 1869c9de560dSAlex Tomas /* calc difference from given start */ 1870c9de560dSAlex Tomas next = next - ex->fe_start; 1871c9de560dSAlex Tomas ex->fe_len -= next; 1872c9de560dSAlex Tomas ex->fe_start += next; 1873c9de560dSAlex Tomas 1874c9de560dSAlex Tomas while (needed > ex->fe_len && 1875d8ec0c39SAlan Cox mb_find_buddy(e4b, order, &max)) { 1876c9de560dSAlex Tomas 1877c9de560dSAlex Tomas if (block + 1 >= max) 1878c9de560dSAlex Tomas break; 1879c9de560dSAlex Tomas 1880c9de560dSAlex Tomas next = (block + 1) * (1 << order); 1881c5e8f3f3STheodore Ts'o if (mb_test_bit(next, e4b->bd_bitmap)) 1882c9de560dSAlex Tomas break; 1883c9de560dSAlex Tomas 1884b051d8dcSRobin Dong order = mb_find_order_for_block(e4b, next); 1885c9de560dSAlex Tomas 1886c9de560dSAlex Tomas block = next >> order; 1887c9de560dSAlex Tomas ex->fe_len += 1 << order; 1888c9de560dSAlex Tomas } 1889c9de560dSAlex Tomas 189031562b95SJan Kara if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 189143c73221STheodore Ts'o /* Should never happen! (but apparently sometimes does?!?) */ 189243c73221STheodore Ts'o WARN_ON(1); 1893cd84bbbaSStephen Brennan ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1894cd84bbbaSStephen Brennan "corruption or bug in mb_find_extent " 189543c73221STheodore Ts'o "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 189643c73221STheodore Ts'o block, order, needed, ex->fe_group, ex->fe_start, 189743c73221STheodore Ts'o ex->fe_len, ex->fe_logical); 189843c73221STheodore Ts'o ex->fe_len = 0; 189943c73221STheodore Ts'o ex->fe_start = 0; 190043c73221STheodore Ts'o ex->fe_group = 0; 190143c73221STheodore Ts'o } 1902c9de560dSAlex Tomas return ex->fe_len; 1903c9de560dSAlex Tomas } 1904c9de560dSAlex Tomas 1905c9de560dSAlex Tomas static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1906c9de560dSAlex Tomas { 1907c9de560dSAlex Tomas int ord; 1908c9de560dSAlex Tomas int mlen = 0; 1909c9de560dSAlex Tomas int max = 0; 1910c9de560dSAlex Tomas int cur; 1911c9de560dSAlex Tomas int start = ex->fe_start; 1912c9de560dSAlex Tomas int len = ex->fe_len; 1913c9de560dSAlex Tomas unsigned ret = 0; 1914c9de560dSAlex Tomas int len0 = len; 1915c9de560dSAlex Tomas void *buddy; 1916218a6944Shanjinke bool split = false; 1917c9de560dSAlex Tomas 1918c9de560dSAlex Tomas BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1919c9de560dSAlex Tomas BUG_ON(e4b->bd_group != ex->fe_group); 1920bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1921c9de560dSAlex Tomas mb_check_buddy(e4b); 1922c9de560dSAlex Tomas mb_mark_used_double(e4b, start, len); 1923c9de560dSAlex Tomas 192407b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1925c9de560dSAlex Tomas e4b->bd_info->bb_free -= len; 1926c9de560dSAlex Tomas if (e4b->bd_info->bb_first_free == start) 1927c9de560dSAlex Tomas e4b->bd_info->bb_first_free += len; 1928c9de560dSAlex Tomas 1929c9de560dSAlex Tomas /* let's maintain fragments counter */ 1930c9de560dSAlex Tomas if (start != 0) 1931c5e8f3f3STheodore Ts'o mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1932c9de560dSAlex Tomas if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1933c5e8f3f3STheodore Ts'o max = !mb_test_bit(start + len, e4b->bd_bitmap); 1934c9de560dSAlex Tomas if (mlen && max) 1935c9de560dSAlex Tomas e4b->bd_info->bb_fragments++; 1936c9de560dSAlex Tomas else if (!mlen && !max) 1937c9de560dSAlex Tomas e4b->bd_info->bb_fragments--; 1938c9de560dSAlex Tomas 1939c9de560dSAlex Tomas /* let's maintain buddy itself */ 1940c9de560dSAlex Tomas while (len) { 1941218a6944Shanjinke if (!split) 1942c9de560dSAlex Tomas ord = mb_find_order_for_block(e4b, start); 1943c9de560dSAlex Tomas 1944c9de560dSAlex Tomas if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1945c9de560dSAlex Tomas /* the whole chunk may be allocated at once! */ 1946c9de560dSAlex Tomas mlen = 1 << ord; 1947218a6944Shanjinke if (!split) 1948c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1949218a6944Shanjinke else 1950218a6944Shanjinke split = false; 1951c9de560dSAlex Tomas BUG_ON((start >> ord) >= max); 1952c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1953c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1954c9de560dSAlex Tomas start += mlen; 1955c9de560dSAlex Tomas len -= mlen; 1956c9de560dSAlex Tomas BUG_ON(len < 0); 1957c9de560dSAlex Tomas continue; 1958c9de560dSAlex Tomas } 1959c9de560dSAlex Tomas 1960c9de560dSAlex Tomas /* store for history */ 1961c9de560dSAlex Tomas if (ret == 0) 1962c9de560dSAlex Tomas ret = len | (ord << 16); 1963c9de560dSAlex Tomas 1964c9de560dSAlex Tomas /* we have to split large buddy */ 1965c9de560dSAlex Tomas BUG_ON(ord <= 0); 1966c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1967c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1968c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1969c9de560dSAlex Tomas 1970c9de560dSAlex Tomas ord--; 1971c9de560dSAlex Tomas cur = (start >> ord) & ~1U; 1972c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1973c9de560dSAlex Tomas mb_clear_bit(cur, buddy); 1974c9de560dSAlex Tomas mb_clear_bit(cur + 1, buddy); 1975c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1976c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1977218a6944Shanjinke split = true; 1978c9de560dSAlex Tomas } 19798a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1980c9de560dSAlex Tomas 1981196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1982123e3016SRitesh Harjani mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1983c9de560dSAlex Tomas mb_check_buddy(e4b); 1984c9de560dSAlex Tomas 1985c9de560dSAlex Tomas return ret; 1986c9de560dSAlex Tomas } 1987c9de560dSAlex Tomas 1988c9de560dSAlex Tomas /* 1989c9de560dSAlex Tomas * Must be called under group lock! 1990c9de560dSAlex Tomas */ 1991c9de560dSAlex Tomas static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1992c9de560dSAlex Tomas struct ext4_buddy *e4b) 1993c9de560dSAlex Tomas { 1994c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1995c9de560dSAlex Tomas int ret; 1996c9de560dSAlex Tomas 1997c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1998c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1999c9de560dSAlex Tomas 2000c9de560dSAlex Tomas ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2001c9de560dSAlex Tomas ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2002c9de560dSAlex Tomas ret = mb_mark_used(e4b, &ac->ac_b_ex); 2003c9de560dSAlex Tomas 2004c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 2005c9de560dSAlex Tomas * allocated blocks for history */ 2006c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 2007c9de560dSAlex Tomas 2008c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 2009c9de560dSAlex Tomas ac->ac_tail = ret & 0xffff; 2010c9de560dSAlex Tomas ac->ac_buddy = ret >> 16; 2011c9de560dSAlex Tomas 2012c3a326a6SAneesh Kumar K.V /* 2013c3a326a6SAneesh Kumar K.V * take the page reference. We want the page to be pinned 2014c3a326a6SAneesh Kumar K.V * so that we don't get a ext4_mb_init_cache_call for this 2015c3a326a6SAneesh Kumar K.V * group until we update the bitmap. That would mean we 2016c3a326a6SAneesh Kumar K.V * double allocate blocks. The reference is dropped 2017c3a326a6SAneesh Kumar K.V * in ext4_mb_release_context 2018c3a326a6SAneesh Kumar K.V */ 2019c9de560dSAlex Tomas ac->ac_bitmap_page = e4b->bd_bitmap_page; 2020c9de560dSAlex Tomas get_page(ac->ac_bitmap_page); 2021c9de560dSAlex Tomas ac->ac_buddy_page = e4b->bd_buddy_page; 2022c9de560dSAlex Tomas get_page(ac->ac_buddy_page); 2023c9de560dSAlex Tomas /* store last allocated for subsequent stream allocation */ 20244ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2025c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2026c9de560dSAlex Tomas sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2027c9de560dSAlex Tomas sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2028c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2029c9de560dSAlex Tomas } 203053f86b17SRitesh Harjani /* 203153f86b17SRitesh Harjani * As we've just preallocated more space than 203253f86b17SRitesh Harjani * user requested originally, we store allocated 203353f86b17SRitesh Harjani * space in a special descriptor. 203453f86b17SRitesh Harjani */ 203553f86b17SRitesh Harjani if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 203653f86b17SRitesh Harjani ext4_mb_new_preallocation(ac); 203753f86b17SRitesh Harjani 2038c9de560dSAlex Tomas } 2039c9de560dSAlex Tomas 2040c9de560dSAlex Tomas static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2041c9de560dSAlex Tomas struct ext4_buddy *e4b, 2042c9de560dSAlex Tomas int finish_group) 2043c9de560dSAlex Tomas { 2044c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2045c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2046c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2047c9de560dSAlex Tomas 2048032115fcSAneesh Kumar K.V if (ac->ac_status == AC_STATUS_FOUND) 2049032115fcSAneesh Kumar K.V return; 2050c9de560dSAlex Tomas /* 2051c9de560dSAlex Tomas * We don't want to scan for a whole year 2052c9de560dSAlex Tomas */ 2053c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan && 2054c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2055c9de560dSAlex Tomas ac->ac_status = AC_STATUS_BREAK; 2056c9de560dSAlex Tomas return; 2057c9de560dSAlex Tomas } 2058c9de560dSAlex Tomas 2059c9de560dSAlex Tomas /* 2060c9de560dSAlex Tomas * Haven't found good chunk so far, let's continue 2061c9de560dSAlex Tomas */ 2062c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) 2063c9de560dSAlex Tomas return; 2064c9de560dSAlex Tomas 20653582e745SOjaswin Mujoo if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2066c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2067c9de560dSAlex Tomas } 2068c9de560dSAlex Tomas 2069c9de560dSAlex Tomas /* 2070c9de560dSAlex Tomas * The routine checks whether found extent is good enough. If it is, 2071c9de560dSAlex Tomas * then the extent gets marked used and flag is set to the context 2072c9de560dSAlex Tomas * to stop scanning. Otherwise, the extent is compared with the 2073c9de560dSAlex Tomas * previous found extent and if new one is better, then it's stored 2074c9de560dSAlex Tomas * in the context. Later, the best found extent will be used, if 2075c9de560dSAlex Tomas * mballoc can't find good enough extent. 2076c9de560dSAlex Tomas * 20773582e745SOjaswin Mujoo * The algorithm used is roughly as follows: 20783582e745SOjaswin Mujoo * 20793582e745SOjaswin Mujoo * * If free extent found is exactly as big as goal, then 20803582e745SOjaswin Mujoo * stop the scan and use it immediately 20813582e745SOjaswin Mujoo * 20823582e745SOjaswin Mujoo * * If free extent found is smaller than goal, then keep retrying 20833582e745SOjaswin Mujoo * upto a max of sbi->s_mb_max_to_scan times (default 200). After 20843582e745SOjaswin Mujoo * that stop scanning and use whatever we have. 20853582e745SOjaswin Mujoo * 20863582e745SOjaswin Mujoo * * If free extent found is bigger than goal, then keep retrying 20873582e745SOjaswin Mujoo * upto a max of sbi->s_mb_min_to_scan times (default 10) before 20883582e745SOjaswin Mujoo * stopping the scan and using the extent. 20893582e745SOjaswin Mujoo * 20903582e745SOjaswin Mujoo * 2091c9de560dSAlex Tomas * FIXME: real allocation policy is to be designed yet! 2092c9de560dSAlex Tomas */ 2093c9de560dSAlex Tomas static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2094c9de560dSAlex Tomas struct ext4_free_extent *ex, 2095c9de560dSAlex Tomas struct ext4_buddy *e4b) 2096c9de560dSAlex Tomas { 2097c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2098c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2099c9de560dSAlex Tomas 2100c9de560dSAlex Tomas BUG_ON(ex->fe_len <= 0); 21017137d7a4STheodore Ts'o BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 21027137d7a4STheodore Ts'o BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2103c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2104c9de560dSAlex Tomas 2105c9de560dSAlex Tomas ac->ac_found++; 2106c9de560dSAlex Tomas 2107c9de560dSAlex Tomas /* 2108c9de560dSAlex Tomas * The special case - take what you catch first 2109c9de560dSAlex Tomas */ 2110c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2111c9de560dSAlex Tomas *bex = *ex; 2112c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2113c9de560dSAlex Tomas return; 2114c9de560dSAlex Tomas } 2115c9de560dSAlex Tomas 2116c9de560dSAlex Tomas /* 2117c9de560dSAlex Tomas * Let's check whether the chuck is good enough 2118c9de560dSAlex Tomas */ 2119c9de560dSAlex Tomas if (ex->fe_len == gex->fe_len) { 2120c9de560dSAlex Tomas *bex = *ex; 2121c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2122c9de560dSAlex Tomas return; 2123c9de560dSAlex Tomas } 2124c9de560dSAlex Tomas 2125c9de560dSAlex Tomas /* 2126c9de560dSAlex Tomas * If this is first found extent, just store it in the context 2127c9de560dSAlex Tomas */ 2128c9de560dSAlex Tomas if (bex->fe_len == 0) { 2129c9de560dSAlex Tomas *bex = *ex; 2130c9de560dSAlex Tomas return; 2131c9de560dSAlex Tomas } 2132c9de560dSAlex Tomas 2133c9de560dSAlex Tomas /* 2134c9de560dSAlex Tomas * If new found extent is better, store it in the context 2135c9de560dSAlex Tomas */ 2136c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) { 2137c9de560dSAlex Tomas /* if the request isn't satisfied, any found extent 2138c9de560dSAlex Tomas * larger than previous best one is better */ 2139c9de560dSAlex Tomas if (ex->fe_len > bex->fe_len) 2140c9de560dSAlex Tomas *bex = *ex; 2141c9de560dSAlex Tomas } else if (ex->fe_len > gex->fe_len) { 2142c9de560dSAlex Tomas /* if the request is satisfied, then we try to find 2143c9de560dSAlex Tomas * an extent that still satisfy the request, but is 2144c9de560dSAlex Tomas * smaller than previous one */ 2145c9de560dSAlex Tomas if (ex->fe_len < bex->fe_len) 2146c9de560dSAlex Tomas *bex = *ex; 2147c9de560dSAlex Tomas } 2148c9de560dSAlex Tomas 2149c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 0); 2150c9de560dSAlex Tomas } 2151c9de560dSAlex Tomas 2152089ceeccSEric Sandeen static noinline_for_stack 215385b67ffbSKemeng Shi void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2154c9de560dSAlex Tomas struct ext4_buddy *e4b) 2155c9de560dSAlex Tomas { 2156c9de560dSAlex Tomas struct ext4_free_extent ex = ac->ac_b_ex; 2157c9de560dSAlex Tomas ext4_group_t group = ex.fe_group; 2158c9de560dSAlex Tomas int max; 2159c9de560dSAlex Tomas int err; 2160c9de560dSAlex Tomas 2161c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2162c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2163c9de560dSAlex Tomas if (err) 216485b67ffbSKemeng Shi return; 2165c9de560dSAlex Tomas 2166c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 216715c006a2SRobin Dong max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2168c9de560dSAlex Tomas 2169c9de560dSAlex Tomas if (max > 0) { 2170c9de560dSAlex Tomas ac->ac_b_ex = ex; 2171c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2172c9de560dSAlex Tomas } 2173c9de560dSAlex Tomas 2174c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2175e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2176c9de560dSAlex Tomas } 2177c9de560dSAlex Tomas 2178089ceeccSEric Sandeen static noinline_for_stack 2179089ceeccSEric Sandeen int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2180c9de560dSAlex Tomas struct ext4_buddy *e4b) 2181c9de560dSAlex Tomas { 2182c9de560dSAlex Tomas ext4_group_t group = ac->ac_g_ex.fe_group; 2183c9de560dSAlex Tomas int max; 2184c9de560dSAlex Tomas int err; 2185c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2186838cd0cfSYongqiang Yang struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2187c9de560dSAlex Tomas struct ext4_free_extent ex; 2188c9de560dSAlex Tomas 21895354b2afSTheodore Ts'o if (!grp) 21905354b2afSTheodore Ts'o return -EFSCORRUPTED; 219101e4ca29SKemeng Shi if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2192c9de560dSAlex Tomas return 0; 2193838cd0cfSYongqiang Yang if (grp->bb_free == 0) 2194838cd0cfSYongqiang Yang return 0; 2195c9de560dSAlex Tomas 2196c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2197c9de560dSAlex Tomas if (err) 2198c9de560dSAlex Tomas return err; 2199c9de560dSAlex Tomas 2200163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2201163a203dSDarrick J. Wong ext4_mb_unload_buddy(e4b); 2202163a203dSDarrick J. Wong return 0; 2203163a203dSDarrick J. Wong } 2204163a203dSDarrick J. Wong 2205c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 220615c006a2SRobin Dong max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2207c9de560dSAlex Tomas ac->ac_g_ex.fe_len, &ex); 2208ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADFA11; /* debug value */ 2209c9de560dSAlex Tomas 2210c9de560dSAlex Tomas if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2211c9de560dSAlex Tomas ext4_fsblk_t start; 2212c9de560dSAlex Tomas 22135661bd68SAkinobu Mita start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 22145661bd68SAkinobu Mita ex.fe_start; 2215c9de560dSAlex Tomas /* use do_div to get remainder (would be 64-bit modulo) */ 2216c9de560dSAlex Tomas if (do_div(start, sbi->s_stripe) == 0) { 2217c9de560dSAlex Tomas ac->ac_found++; 2218c9de560dSAlex Tomas ac->ac_b_ex = ex; 2219c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2220c9de560dSAlex Tomas } 2221c9de560dSAlex Tomas } else if (max >= ac->ac_g_ex.fe_len) { 2222c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2223c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2224c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2225c9de560dSAlex Tomas ac->ac_found++; 2226c9de560dSAlex Tomas ac->ac_b_ex = ex; 2227c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2228c9de560dSAlex Tomas } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2229c9de560dSAlex Tomas /* Sometimes, caller may want to merge even small 2230c9de560dSAlex Tomas * number of blocks to an existing extent */ 2231c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2232c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2233c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2234c9de560dSAlex Tomas ac->ac_found++; 2235c9de560dSAlex Tomas ac->ac_b_ex = ex; 2236c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2237c9de560dSAlex Tomas } 2238c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2239e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2240c9de560dSAlex Tomas 2241c9de560dSAlex Tomas return 0; 2242c9de560dSAlex Tomas } 2243c9de560dSAlex Tomas 2244c9de560dSAlex Tomas /* 2245c9de560dSAlex Tomas * The routine scans buddy structures (not bitmap!) from given order 2246c9de560dSAlex Tomas * to max order and tries to find big enough chunk to satisfy the req 2247c9de560dSAlex Tomas */ 2248089ceeccSEric Sandeen static noinline_for_stack 2249089ceeccSEric Sandeen void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2250c9de560dSAlex Tomas struct ext4_buddy *e4b) 2251c9de560dSAlex Tomas { 2252c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2253c9de560dSAlex Tomas struct ext4_group_info *grp = e4b->bd_info; 2254c9de560dSAlex Tomas void *buddy; 2255c9de560dSAlex Tomas int i; 2256c9de560dSAlex Tomas int k; 2257c9de560dSAlex Tomas int max; 2258c9de560dSAlex Tomas 2259c9de560dSAlex Tomas BUG_ON(ac->ac_2order <= 0); 22604b68f6dfSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2261c9de560dSAlex Tomas if (grp->bb_counters[i] == 0) 2262c9de560dSAlex Tomas continue; 2263c9de560dSAlex Tomas 2264c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, i, &max); 226519b8b035STheodore Ts'o if (WARN_RATELIMIT(buddy == NULL, 226619b8b035STheodore Ts'o "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 226719b8b035STheodore Ts'o continue; 2268c9de560dSAlex Tomas 2269ffad0a44SAneesh Kumar K.V k = mb_find_next_zero_bit(buddy, max, 0); 2270eb576086SDmitry Monakhov if (k >= max) { 2271eb576086SDmitry Monakhov ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2272eb576086SDmitry Monakhov "%d free clusters of order %d. But found 0", 2273eb576086SDmitry Monakhov grp->bb_counters[i], i); 2274eb576086SDmitry Monakhov ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2275eb576086SDmitry Monakhov e4b->bd_group, 2276eb576086SDmitry Monakhov EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2277eb576086SDmitry Monakhov break; 2278eb576086SDmitry Monakhov } 2279c9de560dSAlex Tomas ac->ac_found++; 2280c9de560dSAlex Tomas 2281c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 1 << i; 2282c9de560dSAlex Tomas ac->ac_b_ex.fe_start = k << i; 2283c9de560dSAlex Tomas ac->ac_b_ex.fe_group = e4b->bd_group; 2284c9de560dSAlex Tomas 2285c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2286c9de560dSAlex Tomas 228753f86b17SRitesh Harjani BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2288c9de560dSAlex Tomas 2289c9de560dSAlex Tomas if (EXT4_SB(sb)->s_mb_stats) 2290c9de560dSAlex Tomas atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2291c9de560dSAlex Tomas 2292c9de560dSAlex Tomas break; 2293c9de560dSAlex Tomas } 2294c9de560dSAlex Tomas } 2295c9de560dSAlex Tomas 2296c9de560dSAlex Tomas /* 2297c9de560dSAlex Tomas * The routine scans the group and measures all found extents. 2298c9de560dSAlex Tomas * In order to optimize scanning, caller must pass number of 2299c9de560dSAlex Tomas * free blocks in the group, so the routine can know upper limit. 2300c9de560dSAlex Tomas */ 2301089ceeccSEric Sandeen static noinline_for_stack 2302089ceeccSEric Sandeen void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2303c9de560dSAlex Tomas struct ext4_buddy *e4b) 2304c9de560dSAlex Tomas { 2305c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2306c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2307c9de560dSAlex Tomas struct ext4_free_extent ex; 2308c9de560dSAlex Tomas int i; 2309c9de560dSAlex Tomas int free; 2310c9de560dSAlex Tomas 2311c9de560dSAlex Tomas free = e4b->bd_info->bb_free; 2312907ea529STheodore Ts'o if (WARN_ON(free <= 0)) 2313907ea529STheodore Ts'o return; 2314c9de560dSAlex Tomas 2315c9de560dSAlex Tomas i = e4b->bd_info->bb_first_free; 2316c9de560dSAlex Tomas 2317c9de560dSAlex Tomas while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2318ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, 23197137d7a4STheodore Ts'o EXT4_CLUSTERS_PER_GROUP(sb), i); 23207137d7a4STheodore Ts'o if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 232126346ff6SAneesh Kumar K.V /* 2322e56eb659SAneesh Kumar K.V * IF we have corrupt bitmap, we won't find any 232326346ff6SAneesh Kumar K.V * free blocks even though group info says we 2324b483bb77SRandy Dunlap * have free blocks 232526346ff6SAneesh Kumar K.V */ 2326e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 232753accfa9STheodore Ts'o "%d free clusters as per " 2328fde4d95aSTheodore Ts'o "group info. But bitmap says 0", 232926346ff6SAneesh Kumar K.V free); 2330736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2331736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2332c9de560dSAlex Tomas break; 2333c9de560dSAlex Tomas } 2334c9de560dSAlex Tomas 233515c006a2SRobin Dong mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2336907ea529STheodore Ts'o if (WARN_ON(ex.fe_len <= 0)) 2337907ea529STheodore Ts'o break; 233826346ff6SAneesh Kumar K.V if (free < ex.fe_len) { 2339e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 234053accfa9STheodore Ts'o "%d free clusters as per " 2341fde4d95aSTheodore Ts'o "group info. But got %d blocks", 234226346ff6SAneesh Kumar K.V free, ex.fe_len); 2343736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2344736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2345e56eb659SAneesh Kumar K.V /* 2346e56eb659SAneesh Kumar K.V * The number of free blocks differs. This mostly 2347e56eb659SAneesh Kumar K.V * indicate that the bitmap is corrupt. So exit 2348e56eb659SAneesh Kumar K.V * without claiming the space. 2349e56eb659SAneesh Kumar K.V */ 2350e56eb659SAneesh Kumar K.V break; 235126346ff6SAneesh Kumar K.V } 2352ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADC0DE; /* debug value */ 2353c9de560dSAlex Tomas ext4_mb_measure_extent(ac, &ex, e4b); 2354c9de560dSAlex Tomas 2355c9de560dSAlex Tomas i += ex.fe_len; 2356c9de560dSAlex Tomas free -= ex.fe_len; 2357c9de560dSAlex Tomas } 2358c9de560dSAlex Tomas 2359c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 1); 2360c9de560dSAlex Tomas } 2361c9de560dSAlex Tomas 2362c9de560dSAlex Tomas /* 2363c9de560dSAlex Tomas * This is a special case for storages like raid5 2364506bf2d8SEric Sandeen * we try to find stripe-aligned chunks for stripe-size-multiple requests 2365c9de560dSAlex Tomas */ 2366089ceeccSEric Sandeen static noinline_for_stack 2367089ceeccSEric Sandeen void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2368c9de560dSAlex Tomas struct ext4_buddy *e4b) 2369c9de560dSAlex Tomas { 2370c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2371c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2372c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2373c9de560dSAlex Tomas struct ext4_free_extent ex; 2374c9de560dSAlex Tomas ext4_fsblk_t first_group_block; 2375c9de560dSAlex Tomas ext4_fsblk_t a; 2376c9de560dSAlex Tomas ext4_grpblk_t i; 2377c9de560dSAlex Tomas int max; 2378c9de560dSAlex Tomas 2379c9de560dSAlex Tomas BUG_ON(sbi->s_stripe == 0); 2380c9de560dSAlex Tomas 2381c9de560dSAlex Tomas /* find first stripe-aligned block in group */ 23825661bd68SAkinobu Mita first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 23835661bd68SAkinobu Mita 2384c9de560dSAlex Tomas a = first_group_block + sbi->s_stripe - 1; 2385c9de560dSAlex Tomas do_div(a, sbi->s_stripe); 2386c9de560dSAlex Tomas i = (a * sbi->s_stripe) - first_group_block; 2387c9de560dSAlex Tomas 23887137d7a4STheodore Ts'o while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2389c9de560dSAlex Tomas if (!mb_test_bit(i, bitmap)) { 239015c006a2SRobin Dong max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2391c9de560dSAlex Tomas if (max >= sbi->s_stripe) { 2392c9de560dSAlex Tomas ac->ac_found++; 2393ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADF00D; /* debug value */ 2394c9de560dSAlex Tomas ac->ac_b_ex = ex; 2395c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2396c9de560dSAlex Tomas break; 2397c9de560dSAlex Tomas } 2398c9de560dSAlex Tomas } 2399c9de560dSAlex Tomas i += sbi->s_stripe; 2400c9de560dSAlex Tomas } 2401c9de560dSAlex Tomas } 2402c9de560dSAlex Tomas 240342ac1848SLukas Czerner /* 24048ef123feSRitesh Harjani * This is also called BEFORE we load the buddy bitmap. 240542ac1848SLukas Czerner * Returns either 1 or 0 indicating that the group is either suitable 24068ef123feSRitesh Harjani * for the allocation or not. 240742ac1848SLukas Czerner */ 24088ef123feSRitesh Harjani static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2409c9de560dSAlex Tomas ext4_group_t group, int cr) 2410c9de560dSAlex Tomas { 24118ef123feSRitesh Harjani ext4_grpblk_t free, fragments; 2412a4912123STheodore Ts'o int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2413c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2414c9de560dSAlex Tomas 2415c9de560dSAlex Tomas BUG_ON(cr < 0 || cr >= 4); 24168a57d9d6SCurt Wohlgemuth 24175354b2afSTheodore Ts'o if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) 24188ef123feSRitesh Harjani return false; 241901fc48e8STheodore Ts'o 2420dddcd2f9Sbrookxu free = grp->bb_free; 2421dddcd2f9Sbrookxu if (free == 0) 24228ef123feSRitesh Harjani return false; 2423c9de560dSAlex Tomas 2424c9de560dSAlex Tomas fragments = grp->bb_fragments; 2425c9de560dSAlex Tomas if (fragments == 0) 24268ef123feSRitesh Harjani return false; 2427c9de560dSAlex Tomas 2428c9de560dSAlex Tomas switch (cr) { 2429c9de560dSAlex Tomas case 0: 2430c9de560dSAlex Tomas BUG_ON(ac->ac_2order == 0); 2431c9de560dSAlex Tomas 2432a4912123STheodore Ts'o /* Avoid using the first bg of a flexgroup for data files */ 2433a4912123STheodore Ts'o if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2434a4912123STheodore Ts'o (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2435a4912123STheodore Ts'o ((group % flex_size) == 0)) 24368ef123feSRitesh Harjani return false; 2437a4912123STheodore Ts'o 2438dddcd2f9Sbrookxu if (free < ac->ac_g_ex.fe_len) 2439dddcd2f9Sbrookxu return false; 2440dddcd2f9Sbrookxu 24414b68f6dfSHarshad Shirwadkar if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 24428ef123feSRitesh Harjani return true; 244340ae3487STheodore Ts'o 244440ae3487STheodore Ts'o if (grp->bb_largest_free_order < ac->ac_2order) 24458ef123feSRitesh Harjani return false; 244640ae3487STheodore Ts'o 24478ef123feSRitesh Harjani return true; 2448c9de560dSAlex Tomas case 1: 2449c9de560dSAlex Tomas if ((free / fragments) >= ac->ac_g_ex.fe_len) 24508ef123feSRitesh Harjani return true; 2451c9de560dSAlex Tomas break; 2452c9de560dSAlex Tomas case 2: 2453c9de560dSAlex Tomas if (free >= ac->ac_g_ex.fe_len) 24548ef123feSRitesh Harjani return true; 2455c9de560dSAlex Tomas break; 2456c9de560dSAlex Tomas case 3: 24578ef123feSRitesh Harjani return true; 2458c9de560dSAlex Tomas default: 2459c9de560dSAlex Tomas BUG(); 2460c9de560dSAlex Tomas } 2461c9de560dSAlex Tomas 24628ef123feSRitesh Harjani return false; 24638ef123feSRitesh Harjani } 24648ef123feSRitesh Harjani 24658ef123feSRitesh Harjani /* 24668ef123feSRitesh Harjani * This could return negative error code if something goes wrong 24678ef123feSRitesh Harjani * during ext4_mb_init_group(). This should not be called with 24688ef123feSRitesh Harjani * ext4_lock_group() held. 2469a5fda113STheodore Ts'o * 2470a5fda113STheodore Ts'o * Note: because we are conditionally operating with the group lock in 2471a5fda113STheodore Ts'o * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2472a5fda113STheodore Ts'o * function using __acquire and __release. This means we need to be 2473a5fda113STheodore Ts'o * super careful before messing with the error path handling via "goto 2474a5fda113STheodore Ts'o * out"! 24758ef123feSRitesh Harjani */ 24768ef123feSRitesh Harjani static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 24778ef123feSRitesh Harjani ext4_group_t group, int cr) 24788ef123feSRitesh Harjani { 24798ef123feSRitesh Harjani struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 248099377830SRitesh Harjani struct super_block *sb = ac->ac_sb; 2481c1d2c7d4SAlex Zhuravlev struct ext4_sb_info *sbi = EXT4_SB(sb); 248299377830SRitesh Harjani bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 24838ef123feSRitesh Harjani ext4_grpblk_t free; 24848ef123feSRitesh Harjani int ret = 0; 24858ef123feSRitesh Harjani 24865354b2afSTheodore Ts'o if (!grp) 24875354b2afSTheodore Ts'o return -EFSCORRUPTED; 2488a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats) 2489a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2490a5fda113STheodore Ts'o if (should_lock) { 249199377830SRitesh Harjani ext4_lock_group(sb, group); 2492a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2493a5fda113STheodore Ts'o } 24948ef123feSRitesh Harjani free = grp->bb_free; 24958ef123feSRitesh Harjani if (free == 0) 24968ef123feSRitesh Harjani goto out; 24978ef123feSRitesh Harjani if (cr <= 2 && free < ac->ac_g_ex.fe_len) 24988ef123feSRitesh Harjani goto out; 24998ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 25008ef123feSRitesh Harjani goto out; 2501a5fda113STheodore Ts'o if (should_lock) { 2502a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 250399377830SRitesh Harjani ext4_unlock_group(sb, group); 2504a5fda113STheodore Ts'o } 25058ef123feSRitesh Harjani 25068ef123feSRitesh Harjani /* We only do this if the grp has never been initialized */ 25078ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2508c1d2c7d4SAlex Zhuravlev struct ext4_group_desc *gdp = 2509c1d2c7d4SAlex Zhuravlev ext4_get_group_desc(sb, group, NULL); 2510c1d2c7d4SAlex Zhuravlev int ret; 2511c1d2c7d4SAlex Zhuravlev 2512c1d2c7d4SAlex Zhuravlev /* cr=0/1 is a very optimistic search to find large 2513c1d2c7d4SAlex Zhuravlev * good chunks almost for free. If buddy data is not 2514c1d2c7d4SAlex Zhuravlev * ready, then this optimization makes no sense. But 2515c1d2c7d4SAlex Zhuravlev * we never skip the first block group in a flex_bg, 2516c1d2c7d4SAlex Zhuravlev * since this gets used for metadata block allocation, 2517c1d2c7d4SAlex Zhuravlev * and we want to make sure we locate metadata blocks 2518c1d2c7d4SAlex Zhuravlev * in the first block group in the flex_bg if possible. 2519c1d2c7d4SAlex Zhuravlev */ 2520c1d2c7d4SAlex Zhuravlev if (cr < 2 && 2521c1d2c7d4SAlex Zhuravlev (!sbi->s_log_groups_per_flex || 2522c1d2c7d4SAlex Zhuravlev ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2523c1d2c7d4SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2524c1d2c7d4SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2525c1d2c7d4SAlex Zhuravlev return 0; 2526c1d2c7d4SAlex Zhuravlev ret = ext4_mb_init_group(sb, group, GFP_NOFS); 25278ef123feSRitesh Harjani if (ret) 25288ef123feSRitesh Harjani return ret; 25298ef123feSRitesh Harjani } 25308ef123feSRitesh Harjani 2531a5fda113STheodore Ts'o if (should_lock) { 253299377830SRitesh Harjani ext4_lock_group(sb, group); 2533a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2534a5fda113STheodore Ts'o } 25358ef123feSRitesh Harjani ret = ext4_mb_good_group(ac, group, cr); 25368ef123feSRitesh Harjani out: 2537a5fda113STheodore Ts'o if (should_lock) { 2538a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 253999377830SRitesh Harjani ext4_unlock_group(sb, group); 2540a5fda113STheodore Ts'o } 25418ef123feSRitesh Harjani return ret; 2542c9de560dSAlex Tomas } 2543c9de560dSAlex Tomas 2544cfd73237SAlex Zhuravlev /* 2545cfd73237SAlex Zhuravlev * Start prefetching @nr block bitmaps starting at @group. 2546cfd73237SAlex Zhuravlev * Return the next group which needs to be prefetched. 2547cfd73237SAlex Zhuravlev */ 25483d392b26STheodore Ts'o ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2549cfd73237SAlex Zhuravlev unsigned int nr, int *cnt) 2550cfd73237SAlex Zhuravlev { 2551cfd73237SAlex Zhuravlev ext4_group_t ngroups = ext4_get_groups_count(sb); 2552cfd73237SAlex Zhuravlev struct buffer_head *bh; 2553cfd73237SAlex Zhuravlev struct blk_plug plug; 2554cfd73237SAlex Zhuravlev 2555cfd73237SAlex Zhuravlev blk_start_plug(&plug); 2556cfd73237SAlex Zhuravlev while (nr-- > 0) { 2557cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2558cfd73237SAlex Zhuravlev NULL); 2559cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2560cfd73237SAlex Zhuravlev 2561cfd73237SAlex Zhuravlev /* 2562cfd73237SAlex Zhuravlev * Prefetch block groups with free blocks; but don't 2563cfd73237SAlex Zhuravlev * bother if it is marked uninitialized on disk, since 2564cfd73237SAlex Zhuravlev * it won't require I/O to read. Also only try to 2565cfd73237SAlex Zhuravlev * prefetch once, so we avoid getblk() call, which can 2566cfd73237SAlex Zhuravlev * be expensive. 2567cfd73237SAlex Zhuravlev */ 25685354b2afSTheodore Ts'o if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2569cfd73237SAlex Zhuravlev EXT4_MB_GRP_NEED_INIT(grp) && 2570cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2571cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2572cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2573cfd73237SAlex Zhuravlev bh = ext4_read_block_bitmap_nowait(sb, group, true); 2574cfd73237SAlex Zhuravlev if (bh && !IS_ERR(bh)) { 2575cfd73237SAlex Zhuravlev if (!buffer_uptodate(bh) && cnt) 2576cfd73237SAlex Zhuravlev (*cnt)++; 2577cfd73237SAlex Zhuravlev brelse(bh); 2578cfd73237SAlex Zhuravlev } 2579cfd73237SAlex Zhuravlev } 2580cfd73237SAlex Zhuravlev if (++group >= ngroups) 2581cfd73237SAlex Zhuravlev group = 0; 2582cfd73237SAlex Zhuravlev } 2583cfd73237SAlex Zhuravlev blk_finish_plug(&plug); 2584cfd73237SAlex Zhuravlev return group; 2585cfd73237SAlex Zhuravlev } 2586cfd73237SAlex Zhuravlev 2587cfd73237SAlex Zhuravlev /* 2588cfd73237SAlex Zhuravlev * Prefetching reads the block bitmap into the buffer cache; but we 2589cfd73237SAlex Zhuravlev * need to make sure that the buddy bitmap in the page cache has been 2590cfd73237SAlex Zhuravlev * initialized. Note that ext4_mb_init_group() will block if the I/O 2591cfd73237SAlex Zhuravlev * is not yet completed, or indeed if it was not initiated by 2592cfd73237SAlex Zhuravlev * ext4_mb_prefetch did not start the I/O. 2593cfd73237SAlex Zhuravlev * 2594cfd73237SAlex Zhuravlev * TODO: We should actually kick off the buddy bitmap setup in a work 2595cfd73237SAlex Zhuravlev * queue when the buffer I/O is completed, so that we don't block 2596cfd73237SAlex Zhuravlev * waiting for the block allocation bitmap read to finish when 2597cfd73237SAlex Zhuravlev * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2598cfd73237SAlex Zhuravlev */ 25993d392b26STheodore Ts'o void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2600cfd73237SAlex Zhuravlev unsigned int nr) 2601cfd73237SAlex Zhuravlev { 260222fab984SKemeng Shi struct ext4_group_desc *gdp; 260322fab984SKemeng Shi struct ext4_group_info *grp; 2604cfd73237SAlex Zhuravlev 260522fab984SKemeng Shi while (nr-- > 0) { 2606cfd73237SAlex Zhuravlev if (!group) 2607cfd73237SAlex Zhuravlev group = ext4_get_groups_count(sb); 2608cfd73237SAlex Zhuravlev group--; 260922fab984SKemeng Shi gdp = ext4_get_group_desc(sb, group, NULL); 2610cfd73237SAlex Zhuravlev grp = ext4_get_group_info(sb, group); 2611cfd73237SAlex Zhuravlev 26125354b2afSTheodore Ts'o if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2613cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2614cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2615cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2616cfd73237SAlex Zhuravlev if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2617cfd73237SAlex Zhuravlev break; 2618cfd73237SAlex Zhuravlev } 2619cfd73237SAlex Zhuravlev } 2620cfd73237SAlex Zhuravlev } 2621cfd73237SAlex Zhuravlev 26224ddfef7bSEric Sandeen static noinline_for_stack int 26234ddfef7bSEric Sandeen ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2624c9de560dSAlex Tomas { 2625cfd73237SAlex Zhuravlev ext4_group_t prefetch_grp = 0, ngroups, group, i; 26264fca50d4SJan Kara int cr = -1, new_cr; 262742ac1848SLukas Czerner int err = 0, first_err = 0; 2628cfd73237SAlex Zhuravlev unsigned int nr = 0, prefetch_ios = 0; 2629c9de560dSAlex Tomas struct ext4_sb_info *sbi; 2630c9de560dSAlex Tomas struct super_block *sb; 2631c9de560dSAlex Tomas struct ext4_buddy e4b; 263266d5e027Sbrookxu int lost; 2633c9de560dSAlex Tomas 2634c9de560dSAlex Tomas sb = ac->ac_sb; 2635c9de560dSAlex Tomas sbi = EXT4_SB(sb); 26368df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 2637fb0a387dSEric Sandeen /* non-extent files are limited to low blocks/groups */ 263812e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2639fb0a387dSEric Sandeen ngroups = sbi->s_blockfile_groups; 2640fb0a387dSEric Sandeen 2641c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2642c9de560dSAlex Tomas 2643c9de560dSAlex Tomas /* first, try the goal */ 2644c9de560dSAlex Tomas err = ext4_mb_find_by_goal(ac, &e4b); 2645c9de560dSAlex Tomas if (err || ac->ac_status == AC_STATUS_FOUND) 2646c9de560dSAlex Tomas goto out; 2647c9de560dSAlex Tomas 2648c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2649c9de560dSAlex Tomas goto out; 2650c9de560dSAlex Tomas 2651c9de560dSAlex Tomas /* 2652e9a3cd48Sbrookxu * ac->ac_2order is set only if the fe_len is a power of 2 2653e9a3cd48Sbrookxu * if ac->ac_2order is set we also set criteria to 0 so that we 2654c9de560dSAlex Tomas * try exact allocation using buddy. 2655c9de560dSAlex Tomas */ 2656c9de560dSAlex Tomas i = fls(ac->ac_g_ex.fe_len); 2657c9de560dSAlex Tomas ac->ac_2order = 0; 2658c9de560dSAlex Tomas /* 2659c9de560dSAlex Tomas * We search using buddy data only if the order of the request 2660c9de560dSAlex Tomas * is greater than equal to the sbi_s_mb_order2_reqs 2661b713a5ecSTheodore Ts'o * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2662d9b22cf9SJan Kara * We also support searching for power-of-two requests only for 2663d9b22cf9SJan Kara * requests upto maximum buddy size we have constructed. 2664c9de560dSAlex Tomas */ 26654b68f6dfSHarshad Shirwadkar if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2666c9de560dSAlex Tomas /* 2667c9de560dSAlex Tomas * This should tell if fe_len is exactly power of 2 2668c9de560dSAlex Tomas */ 2669c9de560dSAlex Tomas if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 26701a5d5e5dSJeremy Cline ac->ac_2order = array_index_nospec(i - 1, 26714b68f6dfSHarshad Shirwadkar MB_NUM_ORDERS(sb)); 2672c9de560dSAlex Tomas } 2673c9de560dSAlex Tomas 26744ba74d00STheodore Ts'o /* if stream allocation is enabled, use global goal */ 26754ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2676c9de560dSAlex Tomas /* TBD: may be hot point */ 2677c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2678c9de560dSAlex Tomas ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2679c9de560dSAlex Tomas ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2680c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2681c9de560dSAlex Tomas } 26824ba74d00STheodore Ts'o 2683c9de560dSAlex Tomas /* Let's just scan groups to find more-less suitable blocks */ 2684c9de560dSAlex Tomas cr = ac->ac_2order ? 0 : 1; 2685c9de560dSAlex Tomas /* 2686c9de560dSAlex Tomas * cr == 0 try to get exact allocation, 2687c9de560dSAlex Tomas * cr == 3 try to get anything 2688c9de560dSAlex Tomas */ 2689c9de560dSAlex Tomas repeat: 2690c9de560dSAlex Tomas for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2691c9de560dSAlex Tomas ac->ac_criteria = cr; 2692ed8f9c75SAneesh Kumar K.V /* 2693ed8f9c75SAneesh Kumar K.V * searching for the right group start 2694ed8f9c75SAneesh Kumar K.V * from the goal value specified 2695ed8f9c75SAneesh Kumar K.V */ 2696ed8f9c75SAneesh Kumar K.V group = ac->ac_g_ex.fe_group; 2697196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2698cfd73237SAlex Zhuravlev prefetch_grp = group; 2699ed8f9c75SAneesh Kumar K.V 27004fca50d4SJan Kara for (i = 0, new_cr = cr; i < ngroups; i++, 27014fca50d4SJan Kara ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 27024fca50d4SJan Kara int ret = 0; 2703196e402aSHarshad Shirwadkar 27042ed5724dSTheodore Ts'o cond_resched(); 2705196e402aSHarshad Shirwadkar if (new_cr != cr) { 2706196e402aSHarshad Shirwadkar cr = new_cr; 2707196e402aSHarshad Shirwadkar goto repeat; 2708196e402aSHarshad Shirwadkar } 2709c9de560dSAlex Tomas 2710cfd73237SAlex Zhuravlev /* 2711cfd73237SAlex Zhuravlev * Batch reads of the block allocation bitmaps 2712cfd73237SAlex Zhuravlev * to get multiple READs in flight; limit 2713cfd73237SAlex Zhuravlev * prefetching at cr=0/1, otherwise mballoc can 2714cfd73237SAlex Zhuravlev * spend a lot of time loading imperfect groups 2715cfd73237SAlex Zhuravlev */ 2716cfd73237SAlex Zhuravlev if ((prefetch_grp == group) && 2717cfd73237SAlex Zhuravlev (cr > 1 || 2718cfd73237SAlex Zhuravlev prefetch_ios < sbi->s_mb_prefetch_limit)) { 2719cfd73237SAlex Zhuravlev unsigned int curr_ios = prefetch_ios; 2720cfd73237SAlex Zhuravlev 2721cfd73237SAlex Zhuravlev nr = sbi->s_mb_prefetch; 2722cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 272382ef1370SChunguang Xu nr = 1 << sbi->s_log_groups_per_flex; 272482ef1370SChunguang Xu nr -= group & (nr - 1); 272582ef1370SChunguang Xu nr = min(nr, sbi->s_mb_prefetch); 2726cfd73237SAlex Zhuravlev } 2727cfd73237SAlex Zhuravlev prefetch_grp = ext4_mb_prefetch(sb, group, 2728cfd73237SAlex Zhuravlev nr, &prefetch_ios); 2729cfd73237SAlex Zhuravlev if (prefetch_ios == curr_ios) 2730cfd73237SAlex Zhuravlev nr = 0; 2731cfd73237SAlex Zhuravlev } 2732cfd73237SAlex Zhuravlev 27338a57d9d6SCurt Wohlgemuth /* This now checks without needing the buddy page */ 27348ef123feSRitesh Harjani ret = ext4_mb_good_group_nolock(ac, group, cr); 273542ac1848SLukas Czerner if (ret <= 0) { 273642ac1848SLukas Czerner if (!first_err) 273742ac1848SLukas Czerner first_err = ret; 2738c9de560dSAlex Tomas continue; 273942ac1848SLukas Czerner } 2740c9de560dSAlex Tomas 2741c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2742c9de560dSAlex Tomas if (err) 2743c9de560dSAlex Tomas goto out; 2744c9de560dSAlex Tomas 2745c9de560dSAlex Tomas ext4_lock_group(sb, group); 27468a57d9d6SCurt Wohlgemuth 27478a57d9d6SCurt Wohlgemuth /* 27488a57d9d6SCurt Wohlgemuth * We need to check again after locking the 27498a57d9d6SCurt Wohlgemuth * block group 27508a57d9d6SCurt Wohlgemuth */ 275142ac1848SLukas Czerner ret = ext4_mb_good_group(ac, group, cr); 27528ef123feSRitesh Harjani if (ret == 0) { 2753c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2754e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2755c9de560dSAlex Tomas continue; 2756c9de560dSAlex Tomas } 2757c9de560dSAlex Tomas 2758c9de560dSAlex Tomas ac->ac_groups_scanned++; 2759d9b22cf9SJan Kara if (cr == 0) 2760c9de560dSAlex Tomas ext4_mb_simple_scan_group(ac, &e4b); 2761506bf2d8SEric Sandeen else if (cr == 1 && sbi->s_stripe && 2762506bf2d8SEric Sandeen !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2763c9de560dSAlex Tomas ext4_mb_scan_aligned(ac, &e4b); 2764c9de560dSAlex Tomas else 2765c9de560dSAlex Tomas ext4_mb_complex_scan_group(ac, &e4b); 2766c9de560dSAlex Tomas 2767c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2768e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2769c9de560dSAlex Tomas 2770c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_CONTINUE) 2771c9de560dSAlex Tomas break; 2772c9de560dSAlex Tomas } 2773a6c75eafSHarshad Shirwadkar /* Processed all groups and haven't found blocks */ 2774a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && i == ngroups) 2775a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2776c9de560dSAlex Tomas } 2777c9de560dSAlex Tomas 2778c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2779c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2780c9de560dSAlex Tomas /* 2781c9de560dSAlex Tomas * We've been searching too long. Let's try to allocate 2782c9de560dSAlex Tomas * the best chunk we've found so far 2783c9de560dSAlex Tomas */ 2784c9de560dSAlex Tomas ext4_mb_try_best_found(ac, &e4b); 2785c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_FOUND) { 2786c9de560dSAlex Tomas /* 2787c9de560dSAlex Tomas * Someone more lucky has already allocated it. 2788c9de560dSAlex Tomas * The only thing we can do is just take first 2789c9de560dSAlex Tomas * found block(s) 2790c9de560dSAlex Tomas */ 279166d5e027Sbrookxu lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 279266d5e027Sbrookxu mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2793c55ee7d2Sbrookxu ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2794c55ee7d2Sbrookxu ac->ac_b_ex.fe_len, lost); 2795c55ee7d2Sbrookxu 2796c9de560dSAlex Tomas ac->ac_b_ex.fe_group = 0; 2797c9de560dSAlex Tomas ac->ac_b_ex.fe_start = 0; 2798c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 0; 2799c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 2800c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_FIRST; 2801c9de560dSAlex Tomas cr = 3; 2802c9de560dSAlex Tomas goto repeat; 2803c9de560dSAlex Tomas } 2804c9de560dSAlex Tomas } 2805a6c75eafSHarshad Shirwadkar 2806a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2807a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2808c9de560dSAlex Tomas out: 280942ac1848SLukas Czerner if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 281042ac1848SLukas Czerner err = first_err; 2811bbc4ec77SRitesh Harjani 2812d3df1453SRitesh Harjani mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2813bbc4ec77SRitesh Harjani ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2814bbc4ec77SRitesh Harjani ac->ac_flags, cr, err); 2815cfd73237SAlex Zhuravlev 2816cfd73237SAlex Zhuravlev if (nr) 2817cfd73237SAlex Zhuravlev ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2818cfd73237SAlex Zhuravlev 2819c9de560dSAlex Tomas return err; 2820c9de560dSAlex Tomas } 2821c9de560dSAlex Tomas 2822c9de560dSAlex Tomas static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2823c9de560dSAlex Tomas { 2824359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2825c9de560dSAlex Tomas ext4_group_t group; 2826c9de560dSAlex Tomas 28278df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2828c9de560dSAlex Tomas return NULL; 2829c9de560dSAlex Tomas group = *pos + 1; 2830a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2831c9de560dSAlex Tomas } 2832c9de560dSAlex Tomas 2833c9de560dSAlex Tomas static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2834c9de560dSAlex Tomas { 2835359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2836c9de560dSAlex Tomas ext4_group_t group; 2837c9de560dSAlex Tomas 2838c9de560dSAlex Tomas ++*pos; 28398df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2840c9de560dSAlex Tomas return NULL; 2841c9de560dSAlex Tomas group = *pos + 1; 2842a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2843c9de560dSAlex Tomas } 2844c9de560dSAlex Tomas 2845c9de560dSAlex Tomas static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2846c9de560dSAlex Tomas { 2847359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2848a9df9a49STheodore Ts'o ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2849c9de560dSAlex Tomas int i; 28501c8457caSAditya Kali int err, buddy_loaded = 0; 2851c9de560dSAlex Tomas struct ext4_buddy e4b; 28521c8457caSAditya Kali struct ext4_group_info *grinfo; 28532df2c340SArnd Bergmann unsigned char blocksize_bits = min_t(unsigned char, 28542df2c340SArnd Bergmann sb->s_blocksize_bits, 28552df2c340SArnd Bergmann EXT4_MAX_BLOCK_LOG_SIZE); 2856c9de560dSAlex Tomas struct sg { 2857c9de560dSAlex Tomas struct ext4_group_info info; 2858b80b32b6STheodore Ts'o ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2859c9de560dSAlex Tomas } sg; 2860c9de560dSAlex Tomas 2861c9de560dSAlex Tomas group--; 2862c9de560dSAlex Tomas if (group == 0) 286397b4af2fSRasmus Villemoes seq_puts(seq, "#group: free frags first [" 286497b4af2fSRasmus Villemoes " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2865802cf1f9SHuaitong Han " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2866c9de560dSAlex Tomas 2867b80b32b6STheodore Ts'o i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2868b80b32b6STheodore Ts'o sizeof(struct ext4_group_info); 2869b80b32b6STheodore Ts'o 28701c8457caSAditya Kali grinfo = ext4_get_group_info(sb, group); 28715354b2afSTheodore Ts'o if (!grinfo) 28725354b2afSTheodore Ts'o return 0; 28731c8457caSAditya Kali /* Load the group info in memory only if not already loaded. */ 28741c8457caSAditya Kali if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2875c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2876c9de560dSAlex Tomas if (err) { 2877a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: I/O error\n", group); 2878c9de560dSAlex Tomas return 0; 2879c9de560dSAlex Tomas } 28801c8457caSAditya Kali buddy_loaded = 1; 28811c8457caSAditya Kali } 28821c8457caSAditya Kali 28835354b2afSTheodore Ts'o memcpy(&sg, grinfo, i); 28841c8457caSAditya Kali 28851c8457caSAditya Kali if (buddy_loaded) 2886e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2887c9de560dSAlex Tomas 2888a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2889c9de560dSAlex Tomas sg.info.bb_fragments, sg.info.bb_first_free); 2890c9de560dSAlex Tomas for (i = 0; i <= 13; i++) 28912df2c340SArnd Bergmann seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2892c9de560dSAlex Tomas sg.info.bb_counters[i] : 0); 2893e0d438c7SXu Wang seq_puts(seq, " ]\n"); 2894c9de560dSAlex Tomas 2895c9de560dSAlex Tomas return 0; 2896c9de560dSAlex Tomas } 2897c9de560dSAlex Tomas 2898c9de560dSAlex Tomas static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2899c9de560dSAlex Tomas { 2900c9de560dSAlex Tomas } 2901c9de560dSAlex Tomas 2902247dbed8SChristoph Hellwig const struct seq_operations ext4_mb_seq_groups_ops = { 2903c9de560dSAlex Tomas .start = ext4_mb_seq_groups_start, 2904c9de560dSAlex Tomas .next = ext4_mb_seq_groups_next, 2905c9de560dSAlex Tomas .stop = ext4_mb_seq_groups_stop, 2906c9de560dSAlex Tomas .show = ext4_mb_seq_groups_show, 2907c9de560dSAlex Tomas }; 2908c9de560dSAlex Tomas 2909a6c75eafSHarshad Shirwadkar int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2910a6c75eafSHarshad Shirwadkar { 2911c30365b9SYu Zhe struct super_block *sb = seq->private; 2912a6c75eafSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2913a6c75eafSHarshad Shirwadkar 2914a6c75eafSHarshad Shirwadkar seq_puts(seq, "mballoc:\n"); 2915a6c75eafSHarshad Shirwadkar if (!sbi->s_mb_stats) { 2916a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tmb stats collection turned off.\n"); 2917a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2918a6c75eafSHarshad Shirwadkar return 0; 2919a6c75eafSHarshad Shirwadkar } 2920a6c75eafSHarshad Shirwadkar seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2921a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2922a6c75eafSHarshad Shirwadkar 2923a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2924a6c75eafSHarshad Shirwadkar 2925a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr0_stats:\n"); 2926a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2927a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2928a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2929a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2930a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[0])); 2931196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2932196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2933a6c75eafSHarshad Shirwadkar 2934a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr1_stats:\n"); 2935a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2936a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2937a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2938a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2939a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[1])); 2940196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2941196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2942a6c75eafSHarshad Shirwadkar 2943a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr2_stats:\n"); 2944a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2945a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2946a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2947a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2948a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[2])); 2949a6c75eafSHarshad Shirwadkar 2950a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr3_stats:\n"); 2951a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2952a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2953a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2954a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2955a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[3])); 2956a6c75eafSHarshad Shirwadkar seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2957a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2958a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2959a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2960a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2961a6c75eafSHarshad Shirwadkar 2962a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2963a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 2964a6c75eafSHarshad Shirwadkar ext4_get_groups_count(sb)); 2965a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_time_used: %llu\n", 2966a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 2967a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tpreallocated: %u\n", 2968a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_preallocated)); 2969a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tdiscarded: %u\n", 2970a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_discarded)); 2971a6c75eafSHarshad Shirwadkar return 0; 2972a6c75eafSHarshad Shirwadkar } 2973a6c75eafSHarshad Shirwadkar 2974f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2975a5fda113STheodore Ts'o __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2976f68f4063SHarshad Shirwadkar { 2977359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2978f68f4063SHarshad Shirwadkar unsigned long position; 2979f68f4063SHarshad Shirwadkar 298083e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2981f68f4063SHarshad Shirwadkar return NULL; 2982f68f4063SHarshad Shirwadkar position = *pos + 1; 2983f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2984f68f4063SHarshad Shirwadkar } 2985f68f4063SHarshad Shirwadkar 2986f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 2987f68f4063SHarshad Shirwadkar { 2988359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2989f68f4063SHarshad Shirwadkar unsigned long position; 2990f68f4063SHarshad Shirwadkar 2991f68f4063SHarshad Shirwadkar ++*pos; 299283e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2993f68f4063SHarshad Shirwadkar return NULL; 2994f68f4063SHarshad Shirwadkar position = *pos + 1; 2995f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2996f68f4063SHarshad Shirwadkar } 2997f68f4063SHarshad Shirwadkar 2998f68f4063SHarshad Shirwadkar static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 2999f68f4063SHarshad Shirwadkar { 3000359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3001f68f4063SHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 3002f68f4063SHarshad Shirwadkar unsigned long position = ((unsigned long) v); 3003f68f4063SHarshad Shirwadkar struct ext4_group_info *grp; 300483e80a6eSJan Kara unsigned int count; 3005f68f4063SHarshad Shirwadkar 3006f68f4063SHarshad Shirwadkar position--; 3007f68f4063SHarshad Shirwadkar if (position >= MB_NUM_ORDERS(sb)) { 300883e80a6eSJan Kara position -= MB_NUM_ORDERS(sb); 300983e80a6eSJan Kara if (position == 0) 301083e80a6eSJan Kara seq_puts(seq, "avg_fragment_size_lists:\n"); 3011f68f4063SHarshad Shirwadkar 301283e80a6eSJan Kara count = 0; 301383e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 301483e80a6eSJan Kara list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 301583e80a6eSJan Kara bb_avg_fragment_size_node) 301683e80a6eSJan Kara count++; 301783e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 301883e80a6eSJan Kara seq_printf(seq, "\tlist_order_%u_groups: %u\n", 301983e80a6eSJan Kara (unsigned int)position, count); 3020f68f4063SHarshad Shirwadkar return 0; 3021f68f4063SHarshad Shirwadkar } 3022f68f4063SHarshad Shirwadkar 3023f68f4063SHarshad Shirwadkar if (position == 0) { 3024f68f4063SHarshad Shirwadkar seq_printf(seq, "optimize_scan: %d\n", 3025f68f4063SHarshad Shirwadkar test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3026f68f4063SHarshad Shirwadkar seq_puts(seq, "max_free_order_lists:\n"); 3027f68f4063SHarshad Shirwadkar } 3028f68f4063SHarshad Shirwadkar count = 0; 302983e80a6eSJan Kara read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3030f68f4063SHarshad Shirwadkar list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3031f68f4063SHarshad Shirwadkar bb_largest_free_order_node) 3032f68f4063SHarshad Shirwadkar count++; 303383e80a6eSJan Kara read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3034f68f4063SHarshad Shirwadkar seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3035f68f4063SHarshad Shirwadkar (unsigned int)position, count); 3036f68f4063SHarshad Shirwadkar 3037f68f4063SHarshad Shirwadkar return 0; 3038f68f4063SHarshad Shirwadkar } 3039f68f4063SHarshad Shirwadkar 3040f68f4063SHarshad Shirwadkar static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3041f68f4063SHarshad Shirwadkar { 3042f68f4063SHarshad Shirwadkar } 3043f68f4063SHarshad Shirwadkar 3044f68f4063SHarshad Shirwadkar const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3045f68f4063SHarshad Shirwadkar .start = ext4_mb_seq_structs_summary_start, 3046f68f4063SHarshad Shirwadkar .next = ext4_mb_seq_structs_summary_next, 3047f68f4063SHarshad Shirwadkar .stop = ext4_mb_seq_structs_summary_stop, 3048f68f4063SHarshad Shirwadkar .show = ext4_mb_seq_structs_summary_show, 3049f68f4063SHarshad Shirwadkar }; 3050f68f4063SHarshad Shirwadkar 3051fb1813f4SCurt Wohlgemuth static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3052fb1813f4SCurt Wohlgemuth { 3053fb1813f4SCurt Wohlgemuth int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3054fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3055fb1813f4SCurt Wohlgemuth 3056fb1813f4SCurt Wohlgemuth BUG_ON(!cachep); 3057fb1813f4SCurt Wohlgemuth return cachep; 3058fb1813f4SCurt Wohlgemuth } 30595f21b0e6SFrederic Bohe 306028623c2fSTheodore Ts'o /* 306128623c2fSTheodore Ts'o * Allocate the top-level s_group_info array for the specified number 306228623c2fSTheodore Ts'o * of groups 306328623c2fSTheodore Ts'o */ 306428623c2fSTheodore Ts'o int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 306528623c2fSTheodore Ts'o { 306628623c2fSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 306728623c2fSTheodore Ts'o unsigned size; 3068df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 306928623c2fSTheodore Ts'o 307028623c2fSTheodore Ts'o size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 307128623c2fSTheodore Ts'o EXT4_DESC_PER_BLOCK_BITS(sb); 307228623c2fSTheodore Ts'o if (size <= sbi->s_group_info_size) 307328623c2fSTheodore Ts'o return 0; 307428623c2fSTheodore Ts'o 307528623c2fSTheodore Ts'o size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3076a7c3e901SMichal Hocko new_groupinfo = kvzalloc(size, GFP_KERNEL); 307728623c2fSTheodore Ts'o if (!new_groupinfo) { 307828623c2fSTheodore Ts'o ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 307928623c2fSTheodore Ts'o return -ENOMEM; 308028623c2fSTheodore Ts'o } 3081df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3082df3da4eaSSuraj Jitindar Singh old_groupinfo = rcu_dereference(sbi->s_group_info); 3083df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3084df3da4eaSSuraj Jitindar Singh memcpy(new_groupinfo, old_groupinfo, 308528623c2fSTheodore Ts'o sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3086df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3087df3da4eaSSuraj Jitindar Singh rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 308828623c2fSTheodore Ts'o sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3089df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3090df3da4eaSSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groupinfo); 309128623c2fSTheodore Ts'o ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 309228623c2fSTheodore Ts'o sbi->s_group_info_size); 309328623c2fSTheodore Ts'o return 0; 309428623c2fSTheodore Ts'o } 309528623c2fSTheodore Ts'o 30965f21b0e6SFrederic Bohe /* Create and initialize ext4_group_info data for the given group. */ 3097920313a7SAneesh Kumar K.V int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 30985f21b0e6SFrederic Bohe struct ext4_group_desc *desc) 30995f21b0e6SFrederic Bohe { 3100fb1813f4SCurt Wohlgemuth int i; 31015f21b0e6SFrederic Bohe int metalen = 0; 3102df3da4eaSSuraj Jitindar Singh int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 31035f21b0e6SFrederic Bohe struct ext4_sb_info *sbi = EXT4_SB(sb); 31045f21b0e6SFrederic Bohe struct ext4_group_info **meta_group_info; 3105fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 31065f21b0e6SFrederic Bohe 31075f21b0e6SFrederic Bohe /* 31085f21b0e6SFrederic Bohe * First check if this group is the first of a reserved block. 31095f21b0e6SFrederic Bohe * If it's true, we have to allocate a new table of pointers 31105f21b0e6SFrederic Bohe * to ext4_group_info structures 31115f21b0e6SFrederic Bohe */ 31125f21b0e6SFrederic Bohe if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 31135f21b0e6SFrederic Bohe metalen = sizeof(*meta_group_info) << 31145f21b0e6SFrederic Bohe EXT4_DESC_PER_BLOCK_BITS(sb); 31154fdb5543SDmitry Monakhov meta_group_info = kmalloc(metalen, GFP_NOFS); 31165f21b0e6SFrederic Bohe if (meta_group_info == NULL) { 31177f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate mem " 31189d8b9ec4STheodore Ts'o "for a buddy group"); 3119df119095SKemeng Shi return -ENOMEM; 31205f21b0e6SFrederic Bohe } 3121df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3122df3da4eaSSuraj Jitindar Singh rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3123df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 31245f21b0e6SFrederic Bohe } 31255f21b0e6SFrederic Bohe 3126df3da4eaSSuraj Jitindar Singh meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 31275f21b0e6SFrederic Bohe i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 31285f21b0e6SFrederic Bohe 31294fdb5543SDmitry Monakhov meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 31305f21b0e6SFrederic Bohe if (meta_group_info[i] == NULL) { 31317f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 31325f21b0e6SFrederic Bohe goto exit_group_info; 31335f21b0e6SFrederic Bohe } 31345f21b0e6SFrederic Bohe set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 31355f21b0e6SFrederic Bohe &(meta_group_info[i]->bb_state)); 31365f21b0e6SFrederic Bohe 31375f21b0e6SFrederic Bohe /* 31385f21b0e6SFrederic Bohe * initialize bb_free to be able to skip 31395f21b0e6SFrederic Bohe * empty groups without initialization 31405f21b0e6SFrederic Bohe */ 31418844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 31428844618dSTheodore Ts'o (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 31435f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3144cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, group, desc); 31455f21b0e6SFrederic Bohe } else { 31465f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3147021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, desc); 31485f21b0e6SFrederic Bohe } 31495f21b0e6SFrederic Bohe 31505f21b0e6SFrederic Bohe INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3151920313a7SAneesh Kumar K.V init_rwsem(&meta_group_info[i]->alloc_sem); 315264e290ecSVenkatesh Pallipadi meta_group_info[i]->bb_free_root = RB_ROOT; 3153196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 315483e80a6eSJan Kara INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 31558a57d9d6SCurt Wohlgemuth meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 315683e80a6eSJan Kara meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3157196e402aSHarshad Shirwadkar meta_group_info[i]->bb_group = group; 31585f21b0e6SFrederic Bohe 3159a3450215SRitesh Harjani mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 31605f21b0e6SFrederic Bohe return 0; 31615f21b0e6SFrederic Bohe 31625f21b0e6SFrederic Bohe exit_group_info: 31635f21b0e6SFrederic Bohe /* If a meta_group_info table has been allocated, release it now */ 3164caaf7a29STao Ma if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3165df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3166df3da4eaSSuraj Jitindar Singh 3167df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3168df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3169df3da4eaSSuraj Jitindar Singh kfree(group_info[idx]); 3170df3da4eaSSuraj Jitindar Singh group_info[idx] = NULL; 3171df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3172caaf7a29STao Ma } 31735f21b0e6SFrederic Bohe return -ENOMEM; 31745f21b0e6SFrederic Bohe } /* ext4_mb_add_groupinfo */ 31755f21b0e6SFrederic Bohe 3176c9de560dSAlex Tomas static int ext4_mb_init_backend(struct super_block *sb) 3177c9de560dSAlex Tomas { 31788df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3179c9de560dSAlex Tomas ext4_group_t i; 3180c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 318128623c2fSTheodore Ts'o int err; 31825f21b0e6SFrederic Bohe struct ext4_group_desc *desc; 3183df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3184fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep; 3185c9de560dSAlex Tomas 318628623c2fSTheodore Ts'o err = ext4_mb_alloc_groupinfo(sb, ngroups); 318728623c2fSTheodore Ts'o if (err) 318828623c2fSTheodore Ts'o return err; 31895f21b0e6SFrederic Bohe 3190c9de560dSAlex Tomas sbi->s_buddy_cache = new_inode(sb); 3191c9de560dSAlex Tomas if (sbi->s_buddy_cache == NULL) { 31929d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't get new inode"); 3193c9de560dSAlex Tomas goto err_freesgi; 3194c9de560dSAlex Tomas } 319548e6061bSYu Jian /* To avoid potentially colliding with an valid on-disk inode number, 319648e6061bSYu Jian * use EXT4_BAD_INO for the buddy cache inode number. This inode is 319748e6061bSYu Jian * not in the inode hash, so it should never be found by iget(), but 319848e6061bSYu Jian * this will avoid confusion if it ever shows up during debugging. */ 319948e6061bSYu Jian sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3200c9de560dSAlex Tomas EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 32018df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 32024b99faa2SKhazhismel Kumykov cond_resched(); 3203c9de560dSAlex Tomas desc = ext4_get_group_desc(sb, i, NULL); 3204c9de560dSAlex Tomas if (desc == NULL) { 32059d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3206c9de560dSAlex Tomas goto err_freebuddy; 3207c9de560dSAlex Tomas } 32085f21b0e6SFrederic Bohe if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 32095f21b0e6SFrederic Bohe goto err_freebuddy; 3210c9de560dSAlex Tomas } 3211c9de560dSAlex Tomas 3212cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 3213f91436d5SSabyrzhan Tasbolatov /* a single flex group is supposed to be read by a single IO. 3214f91436d5SSabyrzhan Tasbolatov * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3215f91436d5SSabyrzhan Tasbolatov * unsigned integer, so the maximum shift is 32. 3216f91436d5SSabyrzhan Tasbolatov */ 3217f91436d5SSabyrzhan Tasbolatov if (sbi->s_es->s_log_groups_per_flex >= 32) { 3218f91436d5SSabyrzhan Tasbolatov ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3219a8867f4eSPhillip Potter goto err_freebuddy; 3220f91436d5SSabyrzhan Tasbolatov } 3221f91436d5SSabyrzhan Tasbolatov sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 322282ef1370SChunguang Xu BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3223cfd73237SAlex Zhuravlev sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3224cfd73237SAlex Zhuravlev } else { 3225cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = 32; 3226cfd73237SAlex Zhuravlev } 3227cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3228cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3229cfd73237SAlex Zhuravlev /* now many real IOs to prefetch within a single allocation at cr=0 3230cfd73237SAlex Zhuravlev * given cr=0 is an CPU-related optimization we shouldn't try to 3231cfd73237SAlex Zhuravlev * load too many groups, at some point we should start to use what 3232cfd73237SAlex Zhuravlev * we've got in memory. 3233cfd73237SAlex Zhuravlev * with an average random access time 5ms, it'd take a second to get 3234cfd73237SAlex Zhuravlev * 200 groups (* N with flex_bg), so let's make this limit 4 3235cfd73237SAlex Zhuravlev */ 3236cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3237cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3238cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3239cfd73237SAlex Zhuravlev 3240c9de560dSAlex Tomas return 0; 3241c9de560dSAlex Tomas 3242c9de560dSAlex Tomas err_freebuddy: 3243fb1813f4SCurt Wohlgemuth cachep = get_groupinfo_cache(sb->s_blocksize_bits); 32445354b2afSTheodore Ts'o while (i-- > 0) { 32455354b2afSTheodore Ts'o struct ext4_group_info *grp = ext4_get_group_info(sb, i); 32465354b2afSTheodore Ts'o 32475354b2afSTheodore Ts'o if (grp) 32485354b2afSTheodore Ts'o kmem_cache_free(cachep, grp); 32495354b2afSTheodore Ts'o } 325028623c2fSTheodore Ts'o i = sbi->s_group_info_size; 3251df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3252df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3253f1fa3342SRoel Kluin while (i-- > 0) 3254df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3255df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3256c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3257c9de560dSAlex Tomas err_freesgi: 3258df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3259df3da4eaSSuraj Jitindar Singh kvfree(rcu_dereference(sbi->s_group_info)); 3260df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3261c9de560dSAlex Tomas return -ENOMEM; 3262c9de560dSAlex Tomas } 3263c9de560dSAlex Tomas 32642892c15dSEric Sandeen static void ext4_groupinfo_destroy_slabs(void) 32652892c15dSEric Sandeen { 32662892c15dSEric Sandeen int i; 32672892c15dSEric Sandeen 32682892c15dSEric Sandeen for (i = 0; i < NR_GRPINFO_CACHES; i++) { 32692892c15dSEric Sandeen kmem_cache_destroy(ext4_groupinfo_caches[i]); 32702892c15dSEric Sandeen ext4_groupinfo_caches[i] = NULL; 32712892c15dSEric Sandeen } 32722892c15dSEric Sandeen } 32732892c15dSEric Sandeen 32742892c15dSEric Sandeen static int ext4_groupinfo_create_slab(size_t size) 32752892c15dSEric Sandeen { 32762892c15dSEric Sandeen static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 32772892c15dSEric Sandeen int slab_size; 32782892c15dSEric Sandeen int blocksize_bits = order_base_2(size); 32792892c15dSEric Sandeen int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 32802892c15dSEric Sandeen struct kmem_cache *cachep; 32812892c15dSEric Sandeen 32822892c15dSEric Sandeen if (cache_index >= NR_GRPINFO_CACHES) 32832892c15dSEric Sandeen return -EINVAL; 32842892c15dSEric Sandeen 32852892c15dSEric Sandeen if (unlikely(cache_index < 0)) 32862892c15dSEric Sandeen cache_index = 0; 32872892c15dSEric Sandeen 32882892c15dSEric Sandeen mutex_lock(&ext4_grpinfo_slab_create_mutex); 32892892c15dSEric Sandeen if (ext4_groupinfo_caches[cache_index]) { 32902892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 32912892c15dSEric Sandeen return 0; /* Already created */ 32922892c15dSEric Sandeen } 32932892c15dSEric Sandeen 32942892c15dSEric Sandeen slab_size = offsetof(struct ext4_group_info, 32952892c15dSEric Sandeen bb_counters[blocksize_bits + 2]); 32962892c15dSEric Sandeen 32972892c15dSEric Sandeen cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 32982892c15dSEric Sandeen slab_size, 0, SLAB_RECLAIM_ACCOUNT, 32992892c15dSEric Sandeen NULL); 33002892c15dSEric Sandeen 3301823ba01fSTao Ma ext4_groupinfo_caches[cache_index] = cachep; 3302823ba01fSTao Ma 33032892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 33042892c15dSEric Sandeen if (!cachep) { 33059d8b9ec4STheodore Ts'o printk(KERN_EMERG 33069d8b9ec4STheodore Ts'o "EXT4-fs: no memory for groupinfo slab cache\n"); 33072892c15dSEric Sandeen return -ENOMEM; 33082892c15dSEric Sandeen } 33092892c15dSEric Sandeen 33102892c15dSEric Sandeen return 0; 33112892c15dSEric Sandeen } 33122892c15dSEric Sandeen 331355cdd0afSWang Jianchao static void ext4_discard_work(struct work_struct *work) 331455cdd0afSWang Jianchao { 331555cdd0afSWang Jianchao struct ext4_sb_info *sbi = container_of(work, 331655cdd0afSWang Jianchao struct ext4_sb_info, s_discard_work); 331755cdd0afSWang Jianchao struct super_block *sb = sbi->s_sb; 331855cdd0afSWang Jianchao struct ext4_free_data *fd, *nfd; 331955cdd0afSWang Jianchao struct ext4_buddy e4b; 332055cdd0afSWang Jianchao struct list_head discard_list; 332155cdd0afSWang Jianchao ext4_group_t grp, load_grp; 332255cdd0afSWang Jianchao int err = 0; 332355cdd0afSWang Jianchao 332455cdd0afSWang Jianchao INIT_LIST_HEAD(&discard_list); 332555cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 332655cdd0afSWang Jianchao list_splice_init(&sbi->s_discard_list, &discard_list); 332755cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 332855cdd0afSWang Jianchao 332955cdd0afSWang Jianchao load_grp = UINT_MAX; 333055cdd0afSWang Jianchao list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 333155cdd0afSWang Jianchao /* 33325036ab8dSWang Jianchao * If filesystem is umounting or no memory or suffering 33335036ab8dSWang Jianchao * from no space, give up the discard 333455cdd0afSWang Jianchao */ 33355036ab8dSWang Jianchao if ((sb->s_flags & SB_ACTIVE) && !err && 33365036ab8dSWang Jianchao !atomic_read(&sbi->s_retry_alloc_pending)) { 333755cdd0afSWang Jianchao grp = fd->efd_group; 333855cdd0afSWang Jianchao if (grp != load_grp) { 333955cdd0afSWang Jianchao if (load_grp != UINT_MAX) 334055cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 334155cdd0afSWang Jianchao 334255cdd0afSWang Jianchao err = ext4_mb_load_buddy(sb, grp, &e4b); 334355cdd0afSWang Jianchao if (err) { 334455cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 334555cdd0afSWang Jianchao load_grp = UINT_MAX; 334655cdd0afSWang Jianchao continue; 334755cdd0afSWang Jianchao } else { 334855cdd0afSWang Jianchao load_grp = grp; 334955cdd0afSWang Jianchao } 335055cdd0afSWang Jianchao } 335155cdd0afSWang Jianchao 335255cdd0afSWang Jianchao ext4_lock_group(sb, grp); 335355cdd0afSWang Jianchao ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 335455cdd0afSWang Jianchao fd->efd_start_cluster + fd->efd_count - 1, 1); 335555cdd0afSWang Jianchao ext4_unlock_group(sb, grp); 335655cdd0afSWang Jianchao } 335755cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 335855cdd0afSWang Jianchao } 335955cdd0afSWang Jianchao 336055cdd0afSWang Jianchao if (load_grp != UINT_MAX) 336155cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 336255cdd0afSWang Jianchao } 336355cdd0afSWang Jianchao 33649d99012fSAkira Fujita int ext4_mb_init(struct super_block *sb) 3365c9de560dSAlex Tomas { 3366c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 33676be2ded1SAneesh Kumar K.V unsigned i, j; 3368935244cdSNicolai Stange unsigned offset, offset_incr; 3369c9de560dSAlex Tomas unsigned max; 337074767c5aSShen Feng int ret; 3371c9de560dSAlex Tomas 33724b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3373c9de560dSAlex Tomas 3374c9de560dSAlex Tomas sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3375c9de560dSAlex Tomas if (sbi->s_mb_offsets == NULL) { 3376fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3377fb1813f4SCurt Wohlgemuth goto out; 3378c9de560dSAlex Tomas } 3379ff7ef329SYasunori Goto 33804b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3381c9de560dSAlex Tomas sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3382c9de560dSAlex Tomas if (sbi->s_mb_maxs == NULL) { 3383fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3384fb1813f4SCurt Wohlgemuth goto out; 3385fb1813f4SCurt Wohlgemuth } 3386fb1813f4SCurt Wohlgemuth 33872892c15dSEric Sandeen ret = ext4_groupinfo_create_slab(sb->s_blocksize); 33882892c15dSEric Sandeen if (ret < 0) 3389fb1813f4SCurt Wohlgemuth goto out; 3390c9de560dSAlex Tomas 3391c9de560dSAlex Tomas /* order 0 is regular bitmap */ 3392c9de560dSAlex Tomas sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3393c9de560dSAlex Tomas sbi->s_mb_offsets[0] = 0; 3394c9de560dSAlex Tomas 3395c9de560dSAlex Tomas i = 1; 3396c9de560dSAlex Tomas offset = 0; 3397935244cdSNicolai Stange offset_incr = 1 << (sb->s_blocksize_bits - 1); 3398c9de560dSAlex Tomas max = sb->s_blocksize << 2; 3399c9de560dSAlex Tomas do { 3400c9de560dSAlex Tomas sbi->s_mb_offsets[i] = offset; 3401c9de560dSAlex Tomas sbi->s_mb_maxs[i] = max; 3402935244cdSNicolai Stange offset += offset_incr; 3403935244cdSNicolai Stange offset_incr = offset_incr >> 1; 3404c9de560dSAlex Tomas max = max >> 1; 3405c9de560dSAlex Tomas i++; 34064b68f6dfSHarshad Shirwadkar } while (i < MB_NUM_ORDERS(sb)); 34074b68f6dfSHarshad Shirwadkar 340883e80a6eSJan Kara sbi->s_mb_avg_fragment_size = 340983e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 341083e80a6eSJan Kara GFP_KERNEL); 341183e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size) { 341283e80a6eSJan Kara ret = -ENOMEM; 341383e80a6eSJan Kara goto out; 341483e80a6eSJan Kara } 341583e80a6eSJan Kara sbi->s_mb_avg_fragment_size_locks = 341683e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 341783e80a6eSJan Kara GFP_KERNEL); 341883e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size_locks) { 341983e80a6eSJan Kara ret = -ENOMEM; 342083e80a6eSJan Kara goto out; 342183e80a6eSJan Kara } 342283e80a6eSJan Kara for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 342383e80a6eSJan Kara INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 342483e80a6eSJan Kara rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 342583e80a6eSJan Kara } 3426196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders = 3427196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3428196e402aSHarshad Shirwadkar GFP_KERNEL); 3429196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders) { 3430196e402aSHarshad Shirwadkar ret = -ENOMEM; 3431196e402aSHarshad Shirwadkar goto out; 3432196e402aSHarshad Shirwadkar } 3433196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders_locks = 3434196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3435196e402aSHarshad Shirwadkar GFP_KERNEL); 3436196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders_locks) { 3437196e402aSHarshad Shirwadkar ret = -ENOMEM; 3438196e402aSHarshad Shirwadkar goto out; 3439196e402aSHarshad Shirwadkar } 3440196e402aSHarshad Shirwadkar for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3441196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3442196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3443196e402aSHarshad Shirwadkar } 3444c9de560dSAlex Tomas 3445c9de560dSAlex Tomas spin_lock_init(&sbi->s_md_lock); 3446d08854f5STheodore Ts'o sbi->s_mb_free_pending = 0; 3447a0154344SDaeho Jeong INIT_LIST_HEAD(&sbi->s_freed_data_list); 344855cdd0afSWang Jianchao INIT_LIST_HEAD(&sbi->s_discard_list); 344955cdd0afSWang Jianchao INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 34505036ab8dSWang Jianchao atomic_set(&sbi->s_retry_alloc_pending, 0); 3451c9de560dSAlex Tomas 3452c9de560dSAlex Tomas sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3453c9de560dSAlex Tomas sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3454c9de560dSAlex Tomas sbi->s_mb_stats = MB_DEFAULT_STATS; 3455c9de560dSAlex Tomas sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3456c9de560dSAlex Tomas sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 345727baebb8STheodore Ts'o /* 345827baebb8STheodore Ts'o * The default group preallocation is 512, which for 4k block 345927baebb8STheodore Ts'o * sizes translates to 2 megabytes. However for bigalloc file 346027baebb8STheodore Ts'o * systems, this is probably too big (i.e, if the cluster size 346127baebb8STheodore Ts'o * is 1 megabyte, then group preallocation size becomes half a 346227baebb8STheodore Ts'o * gigabyte!). As a default, we will keep a two megabyte 346327baebb8STheodore Ts'o * group pralloc size for cluster sizes up to 64k, and after 346427baebb8STheodore Ts'o * that, we will force a minimum group preallocation size of 346527baebb8STheodore Ts'o * 32 clusters. This translates to 8 megs when the cluster 346627baebb8STheodore Ts'o * size is 256k, and 32 megs when the cluster size is 1 meg, 346727baebb8STheodore Ts'o * which seems reasonable as a default. 346827baebb8STheodore Ts'o */ 346927baebb8STheodore Ts'o sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 347027baebb8STheodore Ts'o sbi->s_cluster_bits, 32); 3471d7a1fee1SDan Ehrenberg /* 3472d7a1fee1SDan Ehrenberg * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3473d7a1fee1SDan Ehrenberg * to the lowest multiple of s_stripe which is bigger than 3474d7a1fee1SDan Ehrenberg * the s_mb_group_prealloc as determined above. We want 3475d7a1fee1SDan Ehrenberg * the preallocation size to be an exact multiple of the 3476d7a1fee1SDan Ehrenberg * RAID stripe size so that preallocations don't fragment 3477d7a1fee1SDan Ehrenberg * the stripes. 3478d7a1fee1SDan Ehrenberg */ 3479d7a1fee1SDan Ehrenberg if (sbi->s_stripe > 1) { 3480d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc = roundup( 3481d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc, sbi->s_stripe); 3482d7a1fee1SDan Ehrenberg } 3483c9de560dSAlex Tomas 3484730c213cSEric Sandeen sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3485c9de560dSAlex Tomas if (sbi->s_locality_groups == NULL) { 3486fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3487029b10c5SAndrey Tsyvarev goto out; 3488c9de560dSAlex Tomas } 3489730c213cSEric Sandeen for_each_possible_cpu(i) { 3490c9de560dSAlex Tomas struct ext4_locality_group *lg; 3491730c213cSEric Sandeen lg = per_cpu_ptr(sbi->s_locality_groups, i); 3492c9de560dSAlex Tomas mutex_init(&lg->lg_mutex); 34936be2ded1SAneesh Kumar K.V for (j = 0; j < PREALLOC_TB_SIZE; j++) 34946be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3495c9de560dSAlex Tomas spin_lock_init(&lg->lg_prealloc_lock); 3496c9de560dSAlex Tomas } 3497c9de560dSAlex Tomas 349810f0d2a5SChristoph Hellwig if (bdev_nonrot(sb->s_bdev)) 3499196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = 0; 3500196e402aSHarshad Shirwadkar else 3501196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 350279a77c5aSYu Jian /* init file for buddy data */ 350379a77c5aSYu Jian ret = ext4_mb_init_backend(sb); 35047aa0baeaSTao Ma if (ret != 0) 35057aa0baeaSTao Ma goto out_free_locality_groups; 350679a77c5aSYu Jian 35077aa0baeaSTao Ma return 0; 35087aa0baeaSTao Ma 35097aa0baeaSTao Ma out_free_locality_groups: 35107aa0baeaSTao Ma free_percpu(sbi->s_locality_groups); 35117aa0baeaSTao Ma sbi->s_locality_groups = NULL; 3512fb1813f4SCurt Wohlgemuth out: 351383e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 351483e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3515196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3516196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3517fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_offsets); 35187aa0baeaSTao Ma sbi->s_mb_offsets = NULL; 3519fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_maxs); 35207aa0baeaSTao Ma sbi->s_mb_maxs = NULL; 3521fb1813f4SCurt Wohlgemuth return ret; 3522c9de560dSAlex Tomas } 3523c9de560dSAlex Tomas 3524955ce5f5SAneesh Kumar K.V /* need to called with the ext4 group lock held */ 3525d3df1453SRitesh Harjani static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3526c9de560dSAlex Tomas { 3527c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 3528c9de560dSAlex Tomas struct list_head *cur, *tmp; 3529c9de560dSAlex Tomas int count = 0; 3530c9de560dSAlex Tomas 3531c9de560dSAlex Tomas list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3532c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3533c9de560dSAlex Tomas list_del(&pa->pa_group_list); 3534c9de560dSAlex Tomas count++; 3535688f05a0SAneesh Kumar K.V kmem_cache_free(ext4_pspace_cachep, pa); 3536c9de560dSAlex Tomas } 3537d3df1453SRitesh Harjani return count; 3538c9de560dSAlex Tomas } 3539c9de560dSAlex Tomas 3540c9de560dSAlex Tomas int ext4_mb_release(struct super_block *sb) 3541c9de560dSAlex Tomas { 35428df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3543c9de560dSAlex Tomas ext4_group_t i; 3544c9de560dSAlex Tomas int num_meta_group_infos; 3545df3da4eaSSuraj Jitindar Singh struct ext4_group_info *grinfo, ***group_info; 3546c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3547fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3548d3df1453SRitesh Harjani int count; 3549c9de560dSAlex Tomas 355055cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 355155cdd0afSWang Jianchao /* 355255cdd0afSWang Jianchao * wait the discard work to drain all of ext4_free_data 355355cdd0afSWang Jianchao */ 355455cdd0afSWang Jianchao flush_work(&sbi->s_discard_work); 355555cdd0afSWang Jianchao WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 355655cdd0afSWang Jianchao } 355755cdd0afSWang Jianchao 3558c9de560dSAlex Tomas if (sbi->s_group_info) { 35598df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 35604b99faa2SKhazhismel Kumykov cond_resched(); 3561c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, i); 35625354b2afSTheodore Ts'o if (!grinfo) 35635354b2afSTheodore Ts'o continue; 3564a3450215SRitesh Harjani mb_group_bb_bitmap_free(grinfo); 3565c9de560dSAlex Tomas ext4_lock_group(sb, i); 3566d3df1453SRitesh Harjani count = ext4_mb_cleanup_pa(grinfo); 3567d3df1453SRitesh Harjani if (count) 3568d3df1453SRitesh Harjani mb_debug(sb, "mballoc: %d PAs left\n", 3569d3df1453SRitesh Harjani count); 3570c9de560dSAlex Tomas ext4_unlock_group(sb, i); 3571fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, grinfo); 3572c9de560dSAlex Tomas } 35738df9675fSTheodore Ts'o num_meta_group_infos = (ngroups + 3574c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK(sb) - 1) >> 3575c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK_BITS(sb); 3576df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3577df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3578c9de560dSAlex Tomas for (i = 0; i < num_meta_group_infos; i++) 3579df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3580df3da4eaSSuraj Jitindar Singh kvfree(group_info); 3581df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3582c9de560dSAlex Tomas } 358383e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 358483e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3585196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3586196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3587c9de560dSAlex Tomas kfree(sbi->s_mb_offsets); 3588c9de560dSAlex Tomas kfree(sbi->s_mb_maxs); 3589c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3590c9de560dSAlex Tomas if (sbi->s_mb_stats) { 35919d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35929d8b9ec4STheodore Ts'o "mballoc: %u blocks %u reqs (%u success)", 3593c9de560dSAlex Tomas atomic_read(&sbi->s_bal_allocated), 3594c9de560dSAlex Tomas atomic_read(&sbi->s_bal_reqs), 3595c9de560dSAlex Tomas atomic_read(&sbi->s_bal_success)); 35969d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 3597a6c75eafSHarshad Shirwadkar "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 35989d8b9ec4STheodore Ts'o "%u 2^N hits, %u breaks, %u lost", 3599c9de560dSAlex Tomas atomic_read(&sbi->s_bal_ex_scanned), 3600a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_bal_groups_scanned), 3601c9de560dSAlex Tomas atomic_read(&sbi->s_bal_goals), 3602c9de560dSAlex Tomas atomic_read(&sbi->s_bal_2orders), 3603c9de560dSAlex Tomas atomic_read(&sbi->s_bal_breaks), 3604c9de560dSAlex Tomas atomic_read(&sbi->s_mb_lost_chunks)); 36059d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 360667d25186SHarshad Shirwadkar "mballoc: %u generated and it took %llu", 360767d25186SHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 360867d25186SHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 36099d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 36109d8b9ec4STheodore Ts'o "mballoc: %u preallocated, %u discarded", 3611c9de560dSAlex Tomas atomic_read(&sbi->s_mb_preallocated), 3612c9de560dSAlex Tomas atomic_read(&sbi->s_mb_discarded)); 3613c9de560dSAlex Tomas } 3614c9de560dSAlex Tomas 3615730c213cSEric Sandeen free_percpu(sbi->s_locality_groups); 3616c9de560dSAlex Tomas 3617c9de560dSAlex Tomas return 0; 3618c9de560dSAlex Tomas } 3619c9de560dSAlex Tomas 362077ca6cdfSLukas Czerner static inline int ext4_issue_discard(struct super_block *sb, 3621a0154344SDaeho Jeong ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3622a0154344SDaeho Jeong struct bio **biop) 36235c521830SJiaying Zhang { 36245c521830SJiaying Zhang ext4_fsblk_t discard_block; 36255c521830SJiaying Zhang 362684130193STheodore Ts'o discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 362784130193STheodore Ts'o ext4_group_first_block_no(sb, block_group)); 362884130193STheodore Ts'o count = EXT4_C2B(EXT4_SB(sb), count); 36295c521830SJiaying Zhang trace_ext4_discard_blocks(sb, 36305c521830SJiaying Zhang (unsigned long long) discard_block, count); 3631a0154344SDaeho Jeong if (biop) { 3632a0154344SDaeho Jeong return __blkdev_issue_discard(sb->s_bdev, 3633a0154344SDaeho Jeong (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3634a0154344SDaeho Jeong (sector_t)count << (sb->s_blocksize_bits - 9), 363544abff2cSChristoph Hellwig GFP_NOFS, biop); 3636a0154344SDaeho Jeong } else 363793259636SLukas Czerner return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 36385c521830SJiaying Zhang } 36395c521830SJiaying Zhang 3640a0154344SDaeho Jeong static void ext4_free_data_in_buddy(struct super_block *sb, 3641a0154344SDaeho Jeong struct ext4_free_data *entry) 3642c9de560dSAlex Tomas { 3643c9de560dSAlex Tomas struct ext4_buddy e4b; 3644c894058dSAneesh Kumar K.V struct ext4_group_info *db; 3645c7f2bafaSKemeng Shi int err, count = 0; 3646c9de560dSAlex Tomas 3647d3df1453SRitesh Harjani mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 364818aadd47SBobi Jam entry->efd_count, entry->efd_group, entry); 3649c9de560dSAlex Tomas 365018aadd47SBobi Jam err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3651c9de560dSAlex Tomas /* we expect to find existing buddy because it's pinned */ 3652c9de560dSAlex Tomas BUG_ON(err != 0); 3653c9de560dSAlex Tomas 3654d08854f5STheodore Ts'o spin_lock(&EXT4_SB(sb)->s_md_lock); 3655d08854f5STheodore Ts'o EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3656d08854f5STheodore Ts'o spin_unlock(&EXT4_SB(sb)->s_md_lock); 365718aadd47SBobi Jam 3658c894058dSAneesh Kumar K.V db = e4b.bd_info; 3659c9de560dSAlex Tomas /* there are blocks to put in buddy to make them really free */ 366018aadd47SBobi Jam count += entry->efd_count; 366118aadd47SBobi Jam ext4_lock_group(sb, entry->efd_group); 3662c894058dSAneesh Kumar K.V /* Take it out of per group rb tree */ 366318aadd47SBobi Jam rb_erase(&entry->efd_node, &(db->bb_free_root)); 366418aadd47SBobi Jam mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3665c9de560dSAlex Tomas 36663d56b8d2STao Ma /* 36673d56b8d2STao Ma * Clear the trimmed flag for the group so that the next 36683d56b8d2STao Ma * ext4_trim_fs can trim it. 36693d56b8d2STao Ma * If the volume is mounted with -o discard, online discard 36703d56b8d2STao Ma * is supported and the free blocks will be trimmed online. 36713d56b8d2STao Ma */ 36723d56b8d2STao Ma if (!test_opt(sb, DISCARD)) 36733d56b8d2STao Ma EXT4_MB_GRP_CLEAR_TRIMMED(db); 36743d56b8d2STao Ma 3675c894058dSAneesh Kumar K.V if (!db->bb_free_root.rb_node) { 3676c894058dSAneesh Kumar K.V /* No more items in the per group rb tree 3677c894058dSAneesh Kumar K.V * balance refcounts from ext4_mb_free_metadata() 3678c894058dSAneesh Kumar K.V */ 367909cbfeafSKirill A. Shutemov put_page(e4b.bd_buddy_page); 368009cbfeafSKirill A. Shutemov put_page(e4b.bd_bitmap_page); 3681c894058dSAneesh Kumar K.V } 368218aadd47SBobi Jam ext4_unlock_group(sb, entry->efd_group); 3683e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 3684c9de560dSAlex Tomas 3685c7f2bafaSKemeng Shi mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3686c9de560dSAlex Tomas } 3687c9de560dSAlex Tomas 3688a0154344SDaeho Jeong /* 3689a0154344SDaeho Jeong * This function is called by the jbd2 layer once the commit has finished, 3690a0154344SDaeho Jeong * so we know we can free the blocks that were released with that commit. 3691a0154344SDaeho Jeong */ 3692a0154344SDaeho Jeong void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3693a0154344SDaeho Jeong { 3694a0154344SDaeho Jeong struct ext4_sb_info *sbi = EXT4_SB(sb); 3695a0154344SDaeho Jeong struct ext4_free_data *entry, *tmp; 3696a0154344SDaeho Jeong struct list_head freed_data_list; 3697a0154344SDaeho Jeong struct list_head *cut_pos = NULL; 369855cdd0afSWang Jianchao bool wake; 3699a0154344SDaeho Jeong 3700a0154344SDaeho Jeong INIT_LIST_HEAD(&freed_data_list); 3701a0154344SDaeho Jeong 3702a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 3703a0154344SDaeho Jeong list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3704a0154344SDaeho Jeong if (entry->efd_tid != commit_tid) 3705a0154344SDaeho Jeong break; 3706a0154344SDaeho Jeong cut_pos = &entry->efd_list; 3707a0154344SDaeho Jeong } 3708a0154344SDaeho Jeong if (cut_pos) 3709a0154344SDaeho Jeong list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3710a0154344SDaeho Jeong cut_pos); 3711a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 3712a0154344SDaeho Jeong 371355cdd0afSWang Jianchao list_for_each_entry(entry, &freed_data_list, efd_list) 3714a0154344SDaeho Jeong ext4_free_data_in_buddy(sb, entry); 371555cdd0afSWang Jianchao 371655cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 371755cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 371855cdd0afSWang Jianchao wake = list_empty(&sbi->s_discard_list); 371955cdd0afSWang Jianchao list_splice_tail(&freed_data_list, &sbi->s_discard_list); 372055cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 372155cdd0afSWang Jianchao if (wake) 372255cdd0afSWang Jianchao queue_work(system_unbound_wq, &sbi->s_discard_work); 372355cdd0afSWang Jianchao } else { 372455cdd0afSWang Jianchao list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 372555cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, entry); 372655cdd0afSWang Jianchao } 3727a0154344SDaeho Jeong } 3728a0154344SDaeho Jeong 37295dabfc78STheodore Ts'o int __init ext4_init_mballoc(void) 3730c9de560dSAlex Tomas { 373116828088STheodore Ts'o ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 373216828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3733c9de560dSAlex Tomas if (ext4_pspace_cachep == NULL) 3734f283529aSRitesh Harjani goto out; 3735c9de560dSAlex Tomas 373616828088STheodore Ts'o ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 373716828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3738f283529aSRitesh Harjani if (ext4_ac_cachep == NULL) 3739f283529aSRitesh Harjani goto out_pa_free; 3740c894058dSAneesh Kumar K.V 374118aadd47SBobi Jam ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 374216828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3743f283529aSRitesh Harjani if (ext4_free_data_cachep == NULL) 3744f283529aSRitesh Harjani goto out_ac_free; 3745f283529aSRitesh Harjani 3746c9de560dSAlex Tomas return 0; 3747f283529aSRitesh Harjani 3748f283529aSRitesh Harjani out_ac_free: 3749f283529aSRitesh Harjani kmem_cache_destroy(ext4_ac_cachep); 3750f283529aSRitesh Harjani out_pa_free: 3751f283529aSRitesh Harjani kmem_cache_destroy(ext4_pspace_cachep); 3752f283529aSRitesh Harjani out: 3753f283529aSRitesh Harjani return -ENOMEM; 3754c9de560dSAlex Tomas } 3755c9de560dSAlex Tomas 37565dabfc78STheodore Ts'o void ext4_exit_mballoc(void) 3757c9de560dSAlex Tomas { 37583e03f9caSJesper Dangaard Brouer /* 37593e03f9caSJesper Dangaard Brouer * Wait for completion of call_rcu()'s on ext4_pspace_cachep 37603e03f9caSJesper Dangaard Brouer * before destroying the slab cache. 37613e03f9caSJesper Dangaard Brouer */ 37623e03f9caSJesper Dangaard Brouer rcu_barrier(); 3763c9de560dSAlex Tomas kmem_cache_destroy(ext4_pspace_cachep); 3764256bdb49SEric Sandeen kmem_cache_destroy(ext4_ac_cachep); 376518aadd47SBobi Jam kmem_cache_destroy(ext4_free_data_cachep); 37662892c15dSEric Sandeen ext4_groupinfo_destroy_slabs(); 3767c9de560dSAlex Tomas } 3768c9de560dSAlex Tomas 3769c9de560dSAlex Tomas 3770c9de560dSAlex Tomas /* 377173b2c716SUwe Kleine-König * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3772c9de560dSAlex Tomas * Returns 0 if success or error code 3773c9de560dSAlex Tomas */ 37744ddfef7bSEric Sandeen static noinline_for_stack int 37754ddfef7bSEric Sandeen ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 377653accfa9STheodore Ts'o handle_t *handle, unsigned int reserv_clstrs) 3777c9de560dSAlex Tomas { 3778c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 3779c9de560dSAlex Tomas struct ext4_group_desc *gdp; 3780c9de560dSAlex Tomas struct buffer_head *gdp_bh; 3781c9de560dSAlex Tomas struct ext4_sb_info *sbi; 3782c9de560dSAlex Tomas struct super_block *sb; 3783c9de560dSAlex Tomas ext4_fsblk_t block; 3784519deca0SAneesh Kumar K.V int err, len; 3785c9de560dSAlex Tomas 3786c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3787c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_len <= 0); 3788c9de560dSAlex Tomas 3789c9de560dSAlex Tomas sb = ac->ac_sb; 3790c9de560dSAlex Tomas sbi = EXT4_SB(sb); 3791c9de560dSAlex Tomas 3792574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 37939008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 3794fb28f9ceSKemeng Shi return PTR_ERR(bitmap_bh); 37959008a58eSDarrick J. Wong } 3796c9de560dSAlex Tomas 37975d601255Sliang xie BUFFER_TRACE(bitmap_bh, "getting write access"); 3798188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3799188c299eSJan Kara EXT4_JTR_NONE); 3800c9de560dSAlex Tomas if (err) 3801c9de560dSAlex Tomas goto out_err; 3802c9de560dSAlex Tomas 3803c9de560dSAlex Tomas err = -EIO; 3804c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3805c9de560dSAlex Tomas if (!gdp) 3806c9de560dSAlex Tomas goto out_err; 3807c9de560dSAlex Tomas 3808a9df9a49STheodore Ts'o ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3809021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, gdp)); 381003cddb80SAneesh Kumar K.V 38115d601255Sliang xie BUFFER_TRACE(gdp_bh, "get_write_access"); 3812188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3813c9de560dSAlex Tomas if (err) 3814c9de560dSAlex Tomas goto out_err; 3815c9de560dSAlex Tomas 3816bda00de7SAkinobu Mita block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3817c9de560dSAlex Tomas 381853accfa9STheodore Ts'o len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3819ce9f24ccSJan Kara if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 382012062dddSEric Sandeen ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 38211084f252STheodore Ts'o "fs metadata", block, block+len); 3822519deca0SAneesh Kumar K.V /* File system mounted not to panic on error 3823554a5cccSVegard Nossum * Fix the bitmap and return EFSCORRUPTED 3824519deca0SAneesh Kumar K.V * We leak some of the blocks here. 3825519deca0SAneesh Kumar K.V */ 3826955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3827123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3828519deca0SAneesh Kumar K.V ac->ac_b_ex.fe_len); 3829955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 38300390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3831519deca0SAneesh Kumar K.V if (!err) 3832554a5cccSVegard Nossum err = -EFSCORRUPTED; 3833519deca0SAneesh Kumar K.V goto out_err; 3834c9de560dSAlex Tomas } 3835955ce5f5SAneesh Kumar K.V 3836955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3837c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 3838c9de560dSAlex Tomas { 3839c9de560dSAlex Tomas int i; 3840c9de560dSAlex Tomas for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3841c9de560dSAlex Tomas BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3842c9de560dSAlex Tomas bitmap_bh->b_data)); 3843c9de560dSAlex Tomas } 3844c9de560dSAlex Tomas } 3845c9de560dSAlex Tomas #endif 3846123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3847c3e94d1dSYongqiang Yang ac->ac_b_ex.fe_len); 38488844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 38498844618dSTheodore Ts'o (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3850c9de560dSAlex Tomas gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3851021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, 3852cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, 3853560671a0SAneesh Kumar K.V ac->ac_b_ex.fe_group, gdp)); 3854c9de560dSAlex Tomas } 3855021b65bbSTheodore Ts'o len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3856021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, len); 38571df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3858feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3859955ce5f5SAneesh Kumar K.V 3860955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 386157042651STheodore Ts'o percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3862d2a17637SMingming Cao /* 38636bc6e63fSAneesh Kumar K.V * Now reduce the dirty block count also. Should not go negative 3864d2a17637SMingming Cao */ 38656bc6e63fSAneesh Kumar K.V if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 38666bc6e63fSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 386757042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 386857042651STheodore Ts'o reserv_clstrs); 3869c9de560dSAlex Tomas 3870772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 3871772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, 3872772cb7c8SJose R. Santos ac->ac_b_ex.fe_group); 387390ba983fSTheodore Ts'o atomic64_sub(ac->ac_b_ex.fe_len, 38747c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 38757c990728SSuraj Jitindar Singh flex_group)->free_clusters); 3876772cb7c8SJose R. Santos } 3877772cb7c8SJose R. Santos 38780390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3879c9de560dSAlex Tomas if (err) 3880c9de560dSAlex Tomas goto out_err; 38810390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3882c9de560dSAlex Tomas 3883c9de560dSAlex Tomas out_err: 388442a10addSAneesh Kumar K.V brelse(bitmap_bh); 3885c9de560dSAlex Tomas return err; 3886c9de560dSAlex Tomas } 3887c9de560dSAlex Tomas 3888c9de560dSAlex Tomas /* 38898016e29fSHarshad Shirwadkar * Idempotent helper for Ext4 fast commit replay path to set the state of 38908016e29fSHarshad Shirwadkar * blocks in bitmaps and update counters. 38918016e29fSHarshad Shirwadkar */ 38928016e29fSHarshad Shirwadkar void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 38938016e29fSHarshad Shirwadkar int len, int state) 38948016e29fSHarshad Shirwadkar { 38958016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh = NULL; 38968016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 38978016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 38988016e29fSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 38998016e29fSHarshad Shirwadkar ext4_group_t group; 39008016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 3901a5c0e2fdSRitesh Harjani int i, err; 39028016e29fSHarshad Shirwadkar int already; 3903bfdc502aSRitesh Harjani unsigned int clen, clen_changed, thisgrp_len; 39048016e29fSHarshad Shirwadkar 3905bfdc502aSRitesh Harjani while (len > 0) { 39068016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3907bfdc502aSRitesh Harjani 3908bfdc502aSRitesh Harjani /* 3909bfdc502aSRitesh Harjani * Check to see if we are freeing blocks across a group 3910bfdc502aSRitesh Harjani * boundary. 3911bfdc502aSRitesh Harjani * In case of flex_bg, this can happen that (block, len) may 3912bfdc502aSRitesh Harjani * span across more than one group. In that case we need to 3913bfdc502aSRitesh Harjani * get the corresponding group metadata to work with. 3914bfdc502aSRitesh Harjani * For this we have goto again loop. 3915bfdc502aSRitesh Harjani */ 3916bfdc502aSRitesh Harjani thisgrp_len = min_t(unsigned int, (unsigned int)len, 3917bfdc502aSRitesh Harjani EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3918bfdc502aSRitesh Harjani clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3919bfdc502aSRitesh Harjani 39208c91c579SRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 39218c91c579SRitesh Harjani ext4_error(sb, "Marking blocks in system zone - " 39228c91c579SRitesh Harjani "Block = %llu, len = %u", 39238c91c579SRitesh Harjani block, thisgrp_len); 39248c91c579SRitesh Harjani bitmap_bh = NULL; 39258c91c579SRitesh Harjani break; 39268c91c579SRitesh Harjani } 39278c91c579SRitesh Harjani 39288016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 39298016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 39308016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 39318016e29fSHarshad Shirwadkar bitmap_bh = NULL; 3932bfdc502aSRitesh Harjani break; 39338016e29fSHarshad Shirwadkar } 39348016e29fSHarshad Shirwadkar 39358016e29fSHarshad Shirwadkar err = -EIO; 39368016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 39378016e29fSHarshad Shirwadkar if (!gdp) 3938bfdc502aSRitesh Harjani break; 39398016e29fSHarshad Shirwadkar 39408016e29fSHarshad Shirwadkar ext4_lock_group(sb, group); 39418016e29fSHarshad Shirwadkar already = 0; 39428016e29fSHarshad Shirwadkar for (i = 0; i < clen; i++) 3943bfdc502aSRitesh Harjani if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3944bfdc502aSRitesh Harjani !state) 39458016e29fSHarshad Shirwadkar already++; 39468016e29fSHarshad Shirwadkar 3947a5c0e2fdSRitesh Harjani clen_changed = clen - already; 39488016e29fSHarshad Shirwadkar if (state) 3949123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, blkoff, clen); 39508016e29fSHarshad Shirwadkar else 3951bd8247eeSRitesh Harjani mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 39528016e29fSHarshad Shirwadkar if (ext4_has_group_desc_csum(sb) && 39538016e29fSHarshad Shirwadkar (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 39548016e29fSHarshad Shirwadkar gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 39558016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, 3956bfdc502aSRitesh Harjani ext4_free_clusters_after_init(sb, group, gdp)); 39578016e29fSHarshad Shirwadkar } 39588016e29fSHarshad Shirwadkar if (state) 3959a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 39608016e29fSHarshad Shirwadkar else 3961a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 39628016e29fSHarshad Shirwadkar 39638016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, clen); 39641df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 39658016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 39668016e29fSHarshad Shirwadkar 39678016e29fSHarshad Shirwadkar ext4_unlock_group(sb, group); 39688016e29fSHarshad Shirwadkar 39698016e29fSHarshad Shirwadkar if (sbi->s_log_groups_per_flex) { 39708016e29fSHarshad Shirwadkar ext4_group_t flex_group = ext4_flex_group(sbi, group); 3971a5c0e2fdSRitesh Harjani struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3972a5c0e2fdSRitesh Harjani s_flex_groups, flex_group); 39738016e29fSHarshad Shirwadkar 3974a5c0e2fdSRitesh Harjani if (state) 3975a5c0e2fdSRitesh Harjani atomic64_sub(clen_changed, &fg->free_clusters); 3976a5c0e2fdSRitesh Harjani else 3977a5c0e2fdSRitesh Harjani atomic64_add(clen_changed, &fg->free_clusters); 3978bfdc502aSRitesh Harjani 39798016e29fSHarshad Shirwadkar } 39808016e29fSHarshad Shirwadkar 39818016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 39828016e29fSHarshad Shirwadkar if (err) 3983bfdc502aSRitesh Harjani break; 39848016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 39858016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 39868016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 3987bfdc502aSRitesh Harjani if (err) 3988bfdc502aSRitesh Harjani break; 39898016e29fSHarshad Shirwadkar 3990bfdc502aSRitesh Harjani block += thisgrp_len; 3991bfdc502aSRitesh Harjani len -= thisgrp_len; 3992bfdc502aSRitesh Harjani brelse(bitmap_bh); 3993bfdc502aSRitesh Harjani BUG_ON(len < 0); 3994bfdc502aSRitesh Harjani } 3995bfdc502aSRitesh Harjani 3996bfdc502aSRitesh Harjani if (err) 39978016e29fSHarshad Shirwadkar brelse(bitmap_bh); 39988016e29fSHarshad Shirwadkar } 39998016e29fSHarshad Shirwadkar 40008016e29fSHarshad Shirwadkar /* 4001c9de560dSAlex Tomas * here we normalize request for locality group 4002d7a1fee1SDan Ehrenberg * Group request are normalized to s_mb_group_prealloc, which goes to 4003d7a1fee1SDan Ehrenberg * s_strip if we set the same via mount option. 4004d7a1fee1SDan Ehrenberg * s_mb_group_prealloc can be configured via 4005b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_group_prealloc 4006c9de560dSAlex Tomas * 4007c9de560dSAlex Tomas * XXX: should we try to preallocate more than the group has now? 4008c9de560dSAlex Tomas */ 4009c9de560dSAlex Tomas static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4010c9de560dSAlex Tomas { 4011c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4012c9de560dSAlex Tomas struct ext4_locality_group *lg = ac->ac_lg; 4013c9de560dSAlex Tomas 4014c9de560dSAlex Tomas BUG_ON(lg == NULL); 4015c9de560dSAlex Tomas ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4016d3df1453SRitesh Harjani mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4017c9de560dSAlex Tomas } 4018c9de560dSAlex Tomas 401938727786SOjaswin Mujoo /* 402038727786SOjaswin Mujoo * This function returns the next element to look at during inode 402138727786SOjaswin Mujoo * PA rbtree walk. We assume that we have held the inode PA rbtree lock 402238727786SOjaswin Mujoo * (ei->i_prealloc_lock) 402338727786SOjaswin Mujoo * 402438727786SOjaswin Mujoo * new_start The start of the range we want to compare 402538727786SOjaswin Mujoo * cur_start The existing start that we are comparing against 402638727786SOjaswin Mujoo * node The node of the rb_tree 402738727786SOjaswin Mujoo */ 402838727786SOjaswin Mujoo static inline struct rb_node* 402938727786SOjaswin Mujoo ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 403038727786SOjaswin Mujoo { 403138727786SOjaswin Mujoo if (new_start < cur_start) 403238727786SOjaswin Mujoo return node->rb_left; 403338727786SOjaswin Mujoo else 403438727786SOjaswin Mujoo return node->rb_right; 403538727786SOjaswin Mujoo } 403638727786SOjaswin Mujoo 40377692094aSOjaswin Mujoo static inline void 40387692094aSOjaswin Mujoo ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 40397692094aSOjaswin Mujoo ext4_lblk_t start, ext4_lblk_t end) 40407692094aSOjaswin Mujoo { 40417692094aSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 40427692094aSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 40437692094aSOjaswin Mujoo struct ext4_prealloc_space *tmp_pa; 40447692094aSOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 404538727786SOjaswin Mujoo struct rb_node *iter; 40467692094aSOjaswin Mujoo 404738727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 404838727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 404938727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 405038727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 405138727786SOjaswin Mujoo pa_node.inode_node); 40527692094aSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 40537692094aSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 40547692094aSOjaswin Mujoo 405538727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 405638727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) 40577692094aSOjaswin Mujoo BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 40587692094aSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40597692094aSOjaswin Mujoo } 406038727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 40617692094aSOjaswin Mujoo } 40627692094aSOjaswin Mujoo 4063c9de560dSAlex Tomas /* 40640830344cSOjaswin Mujoo * Given an allocation context "ac" and a range "start", "end", check 40650830344cSOjaswin Mujoo * and adjust boundaries if the range overlaps with any of the existing 40660830344cSOjaswin Mujoo * preallocatoins stored in the corresponding inode of the allocation context. 40670830344cSOjaswin Mujoo * 40680830344cSOjaswin Mujoo * Parameters: 40690830344cSOjaswin Mujoo * ac allocation context 40700830344cSOjaswin Mujoo * start start of the new range 40710830344cSOjaswin Mujoo * end end of the new range 40720830344cSOjaswin Mujoo */ 40730830344cSOjaswin Mujoo static inline void 40740830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 40750830344cSOjaswin Mujoo ext4_lblk_t *start, ext4_lblk_t *end) 40760830344cSOjaswin Mujoo { 40770830344cSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 40780830344cSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 407938727786SOjaswin Mujoo struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 408038727786SOjaswin Mujoo struct rb_node *iter; 40810830344cSOjaswin Mujoo ext4_lblk_t new_start, new_end; 408238727786SOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1; 40830830344cSOjaswin Mujoo 40840830344cSOjaswin Mujoo new_start = *start; 40850830344cSOjaswin Mujoo new_end = *end; 40860830344cSOjaswin Mujoo 408738727786SOjaswin Mujoo /* 408838727786SOjaswin Mujoo * Adjust the normalized range so that it doesn't overlap with any 408938727786SOjaswin Mujoo * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 409038727786SOjaswin Mujoo * so it doesn't change underneath us. 409138727786SOjaswin Mujoo */ 409238727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 40930830344cSOjaswin Mujoo 409438727786SOjaswin Mujoo /* Step 1: find any one immediate neighboring PA of the normalized range */ 409538727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 409638727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 409738727786SOjaswin Mujoo tmp_pa_start, iter)) { 409838727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 409938727786SOjaswin Mujoo pa_node.inode_node); 41000830344cSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 41010830344cSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 41020830344cSOjaswin Mujoo 41030830344cSOjaswin Mujoo /* PA must not overlap original request */ 410438727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 410538727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) 41060830344cSOjaswin Mujoo BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 41070830344cSOjaswin Mujoo ac->ac_o_ex.fe_logical < tmp_pa_start)); 41080830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 41090830344cSOjaswin Mujoo } 41100830344cSOjaswin Mujoo 411138727786SOjaswin Mujoo /* 411238727786SOjaswin Mujoo * Step 2: check if the found PA is left or right neighbor and 411338727786SOjaswin Mujoo * get the other neighbor 411438727786SOjaswin Mujoo */ 411538727786SOjaswin Mujoo if (tmp_pa) { 411638727786SOjaswin Mujoo if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 411738727786SOjaswin Mujoo struct rb_node *tmp; 411838727786SOjaswin Mujoo 411938727786SOjaswin Mujoo left_pa = tmp_pa; 412038727786SOjaswin Mujoo tmp = rb_next(&left_pa->pa_node.inode_node); 412138727786SOjaswin Mujoo if (tmp) { 412238727786SOjaswin Mujoo right_pa = rb_entry(tmp, 412338727786SOjaswin Mujoo struct ext4_prealloc_space, 412438727786SOjaswin Mujoo pa_node.inode_node); 412538727786SOjaswin Mujoo } 412638727786SOjaswin Mujoo } else { 412738727786SOjaswin Mujoo struct rb_node *tmp; 412838727786SOjaswin Mujoo 412938727786SOjaswin Mujoo right_pa = tmp_pa; 413038727786SOjaswin Mujoo tmp = rb_prev(&right_pa->pa_node.inode_node); 413138727786SOjaswin Mujoo if (tmp) { 413238727786SOjaswin Mujoo left_pa = rb_entry(tmp, 413338727786SOjaswin Mujoo struct ext4_prealloc_space, 413438727786SOjaswin Mujoo pa_node.inode_node); 413538727786SOjaswin Mujoo } 413638727786SOjaswin Mujoo } 413738727786SOjaswin Mujoo } 413838727786SOjaswin Mujoo 413938727786SOjaswin Mujoo /* Step 3: get the non deleted neighbors */ 414038727786SOjaswin Mujoo if (left_pa) { 414138727786SOjaswin Mujoo for (iter = &left_pa->pa_node.inode_node;; 414238727786SOjaswin Mujoo iter = rb_prev(iter)) { 414338727786SOjaswin Mujoo if (!iter) { 414438727786SOjaswin Mujoo left_pa = NULL; 414538727786SOjaswin Mujoo break; 414638727786SOjaswin Mujoo } 414738727786SOjaswin Mujoo 414838727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 414938727786SOjaswin Mujoo pa_node.inode_node); 415038727786SOjaswin Mujoo left_pa = tmp_pa; 415138727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 415238727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) { 415338727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 415438727786SOjaswin Mujoo break; 41550830344cSOjaswin Mujoo } 41560830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 41570830344cSOjaswin Mujoo } 415838727786SOjaswin Mujoo } 415938727786SOjaswin Mujoo 416038727786SOjaswin Mujoo if (right_pa) { 416138727786SOjaswin Mujoo for (iter = &right_pa->pa_node.inode_node;; 416238727786SOjaswin Mujoo iter = rb_next(iter)) { 416338727786SOjaswin Mujoo if (!iter) { 416438727786SOjaswin Mujoo right_pa = NULL; 416538727786SOjaswin Mujoo break; 416638727786SOjaswin Mujoo } 416738727786SOjaswin Mujoo 416838727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 416938727786SOjaswin Mujoo pa_node.inode_node); 417038727786SOjaswin Mujoo right_pa = tmp_pa; 417138727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 417238727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) { 417338727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 417438727786SOjaswin Mujoo break; 417538727786SOjaswin Mujoo } 417638727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 417738727786SOjaswin Mujoo } 417838727786SOjaswin Mujoo } 417938727786SOjaswin Mujoo 418038727786SOjaswin Mujoo if (left_pa) { 418138727786SOjaswin Mujoo left_pa_end = 418238727786SOjaswin Mujoo left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len); 418338727786SOjaswin Mujoo BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 418438727786SOjaswin Mujoo } 418538727786SOjaswin Mujoo 418638727786SOjaswin Mujoo if (right_pa) { 418738727786SOjaswin Mujoo right_pa_start = right_pa->pa_lstart; 418838727786SOjaswin Mujoo BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 418938727786SOjaswin Mujoo } 419038727786SOjaswin Mujoo 419138727786SOjaswin Mujoo /* Step 4: trim our normalized range to not overlap with the neighbors */ 419238727786SOjaswin Mujoo if (left_pa) { 419338727786SOjaswin Mujoo if (left_pa_end > new_start) 419438727786SOjaswin Mujoo new_start = left_pa_end; 419538727786SOjaswin Mujoo } 419638727786SOjaswin Mujoo 419738727786SOjaswin Mujoo if (right_pa) { 419838727786SOjaswin Mujoo if (right_pa_start < new_end) 419938727786SOjaswin Mujoo new_end = right_pa_start; 420038727786SOjaswin Mujoo } 420138727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 42020830344cSOjaswin Mujoo 42030830344cSOjaswin Mujoo /* XXX: extra loop to check we really don't overlap preallocations */ 42040830344cSOjaswin Mujoo ext4_mb_pa_assert_overlap(ac, new_start, new_end); 42050830344cSOjaswin Mujoo 42060830344cSOjaswin Mujoo *start = new_start; 42070830344cSOjaswin Mujoo *end = new_end; 42080830344cSOjaswin Mujoo } 42090830344cSOjaswin Mujoo 42100830344cSOjaswin Mujoo /* 4211c9de560dSAlex Tomas * Normalization means making request better in terms of 4212c9de560dSAlex Tomas * size and alignment 4213c9de560dSAlex Tomas */ 42144ddfef7bSEric Sandeen static noinline_for_stack void 42154ddfef7bSEric Sandeen ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4216c9de560dSAlex Tomas struct ext4_allocation_request *ar) 4217c9de560dSAlex Tomas { 421853accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4219b07ffe69SKemeng Shi struct ext4_super_block *es = sbi->s_es; 4220c9de560dSAlex Tomas int bsbits, max; 4221c9de560dSAlex Tomas ext4_lblk_t end; 42221592d2c5SCurt Wohlgemuth loff_t size, start_off; 42231592d2c5SCurt Wohlgemuth loff_t orig_size __maybe_unused; 42245a0790c2SAndi Kleen ext4_lblk_t start; 4225c9de560dSAlex Tomas 4226c9de560dSAlex Tomas /* do normalize only data requests, metadata requests 4227c9de560dSAlex Tomas do not need preallocation */ 4228c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4229c9de560dSAlex Tomas return; 4230c9de560dSAlex Tomas 4231c9de560dSAlex Tomas /* sometime caller may want exact blocks */ 4232c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4233c9de560dSAlex Tomas return; 4234c9de560dSAlex Tomas 4235c9de560dSAlex Tomas /* caller may indicate that preallocation isn't 4236c9de560dSAlex Tomas * required (it's a tail, for example) */ 4237c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4238c9de560dSAlex Tomas return; 4239c9de560dSAlex Tomas 4240c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4241c9de560dSAlex Tomas ext4_mb_normalize_group_request(ac); 4242c9de560dSAlex Tomas return ; 4243c9de560dSAlex Tomas } 4244c9de560dSAlex Tomas 4245c9de560dSAlex Tomas bsbits = ac->ac_sb->s_blocksize_bits; 4246c9de560dSAlex Tomas 4247c9de560dSAlex Tomas /* first, let's learn actual file size 4248c9de560dSAlex Tomas * given current request is allocated */ 424953accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4250c9de560dSAlex Tomas size = size << bsbits; 4251c9de560dSAlex Tomas if (size < i_size_read(ac->ac_inode)) 4252c9de560dSAlex Tomas size = i_size_read(ac->ac_inode); 42535a0790c2SAndi Kleen orig_size = size; 4254c9de560dSAlex Tomas 42551930479cSValerie Clement /* max size of free chunks */ 42561930479cSValerie Clement max = 2 << bsbits; 4257c9de560dSAlex Tomas 42581930479cSValerie Clement #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 42591930479cSValerie Clement (req <= (size) || max <= (chunk_size)) 4260c9de560dSAlex Tomas 4261c9de560dSAlex Tomas /* first, try to predict filesize */ 4262c9de560dSAlex Tomas /* XXX: should this table be tunable? */ 4263c9de560dSAlex Tomas start_off = 0; 4264c9de560dSAlex Tomas if (size <= 16 * 1024) { 4265c9de560dSAlex Tomas size = 16 * 1024; 4266c9de560dSAlex Tomas } else if (size <= 32 * 1024) { 4267c9de560dSAlex Tomas size = 32 * 1024; 4268c9de560dSAlex Tomas } else if (size <= 64 * 1024) { 4269c9de560dSAlex Tomas size = 64 * 1024; 4270c9de560dSAlex Tomas } else if (size <= 128 * 1024) { 4271c9de560dSAlex Tomas size = 128 * 1024; 4272c9de560dSAlex Tomas } else if (size <= 256 * 1024) { 4273c9de560dSAlex Tomas size = 256 * 1024; 4274c9de560dSAlex Tomas } else if (size <= 512 * 1024) { 4275c9de560dSAlex Tomas size = 512 * 1024; 4276c9de560dSAlex Tomas } else if (size <= 1024 * 1024) { 4277c9de560dSAlex Tomas size = 1024 * 1024; 42781930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4279c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 42801930479cSValerie Clement (21 - bsbits)) << 21; 42811930479cSValerie Clement size = 2 * 1024 * 1024; 42821930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4283c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4284c9de560dSAlex Tomas (22 - bsbits)) << 22; 4285c9de560dSAlex Tomas size = 4 * 1024 * 1024; 4286*b3916da0SKemeng Shi } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 42871930479cSValerie Clement (8<<20)>>bsbits, max, 8 * 1024)) { 4288c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4289c9de560dSAlex Tomas (23 - bsbits)) << 23; 4290c9de560dSAlex Tomas size = 8 * 1024 * 1024; 4291c9de560dSAlex Tomas } else { 4292c9de560dSAlex Tomas start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 429391a48aafSKemeng Shi size = (loff_t) EXT4_C2B(sbi, 4294b27b1535SXiaoguang Wang ac->ac_o_ex.fe_len) << bsbits; 4295c9de560dSAlex Tomas } 42965a0790c2SAndi Kleen size = size >> bsbits; 42975a0790c2SAndi Kleen start = start_off >> bsbits; 4298c9de560dSAlex Tomas 4299a08f789dSBaokun Li /* 4300a08f789dSBaokun Li * For tiny groups (smaller than 8MB) the chosen allocation 4301a08f789dSBaokun Li * alignment may be larger than group size. Make sure the 4302a08f789dSBaokun Li * alignment does not move allocation to a different group which 4303a08f789dSBaokun Li * makes mballoc fail assertions later. 4304a08f789dSBaokun Li */ 4305a08f789dSBaokun Li start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4306a08f789dSBaokun Li (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4307a08f789dSBaokun Li 4308c9de560dSAlex Tomas /* don't cover already allocated blocks in selected range */ 4309c9de560dSAlex Tomas if (ar->pleft && start <= ar->lleft) { 4310c9de560dSAlex Tomas size -= ar->lleft + 1 - start; 4311c9de560dSAlex Tomas start = ar->lleft + 1; 4312c9de560dSAlex Tomas } 4313c9de560dSAlex Tomas if (ar->pright && start + size - 1 >= ar->lright) 4314c9de560dSAlex Tomas size -= start + size - ar->lright; 4315c9de560dSAlex Tomas 4316cd648b8aSJan Kara /* 4317cd648b8aSJan Kara * Trim allocation request for filesystems with artificially small 4318cd648b8aSJan Kara * groups. 4319cd648b8aSJan Kara */ 4320cd648b8aSJan Kara if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4321cd648b8aSJan Kara size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4322cd648b8aSJan Kara 4323c9de560dSAlex Tomas end = start + size; 4324c9de560dSAlex Tomas 43250830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(ac, &start, &end); 4326c9de560dSAlex Tomas 4327c9de560dSAlex Tomas size = end - start; 4328c9de560dSAlex Tomas 4329cf4ff938SBaokun Li /* 4330cf4ff938SBaokun Li * In this function "start" and "size" are normalized for better 4331cf4ff938SBaokun Li * alignment and length such that we could preallocate more blocks. 4332cf4ff938SBaokun Li * This normalization is done such that original request of 4333cf4ff938SBaokun Li * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4334cf4ff938SBaokun Li * "size" boundaries. 4335cf4ff938SBaokun Li * (Note fe_len can be relaxed since FS block allocation API does not 4336cf4ff938SBaokun Li * provide gurantee on number of contiguous blocks allocation since that 4337cf4ff938SBaokun Li * depends upon free space left, etc). 4338cf4ff938SBaokun Li * In case of inode pa, later we use the allocated blocks 43391221b235SKemeng Shi * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4340cf4ff938SBaokun Li * range of goal/best blocks [start, size] to put it at the 4341cf4ff938SBaokun Li * ac_o_ex.fe_logical extent of this inode. 4342cf4ff938SBaokun Li * (See ext4_mb_use_inode_pa() for more details) 4343cf4ff938SBaokun Li */ 4344cf4ff938SBaokun Li if (start + size <= ac->ac_o_ex.fe_logical || 4345c9de560dSAlex Tomas start > ac->ac_o_ex.fe_logical) { 43469d8b9ec4STheodore Ts'o ext4_msg(ac->ac_sb, KERN_ERR, 43479d8b9ec4STheodore Ts'o "start %lu, size %lu, fe_logical %lu", 4348c9de560dSAlex Tomas (unsigned long) start, (unsigned long) size, 4349c9de560dSAlex Tomas (unsigned long) ac->ac_o_ex.fe_logical); 4350dfe076c1SDmitry Monakhov BUG(); 4351c9de560dSAlex Tomas } 4352b5b60778SMaurizio Lombardi BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4353c9de560dSAlex Tomas 4354c9de560dSAlex Tomas /* now prepare goal request */ 4355c9de560dSAlex Tomas 4356c9de560dSAlex Tomas /* XXX: is it better to align blocks WRT to logical 4357c9de560dSAlex Tomas * placement or satisfy big request as is */ 4358c9de560dSAlex Tomas ac->ac_g_ex.fe_logical = start; 435953accfa9STheodore Ts'o ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4360c9de560dSAlex Tomas 4361c9de560dSAlex Tomas /* define goal start in order to merge */ 4362b07ffe69SKemeng Shi if (ar->pright && (ar->lright == (start + size)) && 4363b07ffe69SKemeng Shi ar->pright >= size && 4364b07ffe69SKemeng Shi ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4365c9de560dSAlex Tomas /* merge to the right */ 4366c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4367b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4368b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4369c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4370c9de560dSAlex Tomas } 4371b07ffe69SKemeng Shi if (ar->pleft && (ar->lleft + 1 == start) && 4372b07ffe69SKemeng Shi ar->pleft + 1 < ext4_blocks_count(es)) { 4373c9de560dSAlex Tomas /* merge to the left */ 4374c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4375b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4376b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4377c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4378c9de560dSAlex Tomas } 4379c9de560dSAlex Tomas 4380d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4381d3df1453SRitesh Harjani orig_size, start); 4382c9de560dSAlex Tomas } 4383c9de560dSAlex Tomas 4384c9de560dSAlex Tomas static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4385c9de560dSAlex Tomas { 4386c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4387c9de560dSAlex Tomas 4388a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4389c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_reqs); 4390c9de560dSAlex Tomas atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4391291dae47SCurt Wohlgemuth if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4392c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_success); 4393c9de560dSAlex Tomas atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4394a6c75eafSHarshad Shirwadkar atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4395c9de560dSAlex Tomas if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4396c9de560dSAlex Tomas ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4397c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_goals); 4398c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan) 4399c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_breaks); 4400c9de560dSAlex Tomas } 4401c9de560dSAlex Tomas 4402296c355cSTheodore Ts'o if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4403296c355cSTheodore Ts'o trace_ext4_mballoc_alloc(ac); 4404296c355cSTheodore Ts'o else 4405296c355cSTheodore Ts'o trace_ext4_mballoc_prealloc(ac); 4406c9de560dSAlex Tomas } 4407c9de560dSAlex Tomas 4408c9de560dSAlex Tomas /* 4409b844167eSCurt Wohlgemuth * Called on failure; free up any blocks from the inode PA for this 4410b844167eSCurt Wohlgemuth * context. We don't need this for MB_GROUP_PA because we only change 4411b844167eSCurt Wohlgemuth * pa_free in ext4_mb_release_context(), but on failure, we've already 4412b844167eSCurt Wohlgemuth * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4413b844167eSCurt Wohlgemuth */ 4414b844167eSCurt Wohlgemuth static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4415b844167eSCurt Wohlgemuth { 4416b844167eSCurt Wohlgemuth struct ext4_prealloc_space *pa = ac->ac_pa; 441786f0afd4STheodore Ts'o struct ext4_buddy e4b; 441886f0afd4STheodore Ts'o int err; 4419b844167eSCurt Wohlgemuth 442086f0afd4STheodore Ts'o if (pa == NULL) { 4421c99d1e6eSTheodore Ts'o if (ac->ac_f_ex.fe_len == 0) 4422c99d1e6eSTheodore Ts'o return; 442386f0afd4STheodore Ts'o err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 442419b8b035STheodore Ts'o if (WARN_RATELIMIT(err, 442519b8b035STheodore Ts'o "ext4: mb_load_buddy failed (%d)", err)) 442686f0afd4STheodore Ts'o /* 442786f0afd4STheodore Ts'o * This should never happen since we pin the 442886f0afd4STheodore Ts'o * pages in the ext4_allocation_context so 442986f0afd4STheodore Ts'o * ext4_mb_load_buddy() should never fail. 443086f0afd4STheodore Ts'o */ 443186f0afd4STheodore Ts'o return; 443286f0afd4STheodore Ts'o ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 443386f0afd4STheodore Ts'o mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 443486f0afd4STheodore Ts'o ac->ac_f_ex.fe_len); 443586f0afd4STheodore Ts'o ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4436c99d1e6eSTheodore Ts'o ext4_mb_unload_buddy(&e4b); 443786f0afd4STheodore Ts'o return; 443886f0afd4STheodore Ts'o } 443936cb0f52SKemeng Shi if (pa->pa_type == MB_INODE_PA) { 444036cb0f52SKemeng Shi spin_lock(&pa->pa_lock); 4441400db9d3SZheng Liu pa->pa_free += ac->ac_b_ex.fe_len; 444236cb0f52SKemeng Shi spin_unlock(&pa->pa_lock); 444336cb0f52SKemeng Shi } 4444b844167eSCurt Wohlgemuth } 4445b844167eSCurt Wohlgemuth 4446b844167eSCurt Wohlgemuth /* 4447c9de560dSAlex Tomas * use blocks preallocated to inode 4448c9de560dSAlex Tomas */ 4449c9de560dSAlex Tomas static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4450c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4451c9de560dSAlex Tomas { 445253accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4453c9de560dSAlex Tomas ext4_fsblk_t start; 4454c9de560dSAlex Tomas ext4_fsblk_t end; 4455c9de560dSAlex Tomas int len; 4456c9de560dSAlex Tomas 4457c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4458c9de560dSAlex Tomas start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 445953accfa9STheodore Ts'o end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 446053accfa9STheodore Ts'o start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 446153accfa9STheodore Ts'o len = EXT4_NUM_B2C(sbi, end - start); 4462c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4463c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4464c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4465c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4466c9de560dSAlex Tomas ac->ac_pa = pa; 4467c9de560dSAlex Tomas 4468c9de560dSAlex Tomas BUG_ON(start < pa->pa_pstart); 446953accfa9STheodore Ts'o BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4470c9de560dSAlex Tomas BUG_ON(pa->pa_free < len); 447193cdf49fSOjaswin Mujoo BUG_ON(ac->ac_b_ex.fe_len <= 0); 4472c9de560dSAlex Tomas pa->pa_free -= len; 4473c9de560dSAlex Tomas 4474d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4475c9de560dSAlex Tomas } 4476c9de560dSAlex Tomas 4477c9de560dSAlex Tomas /* 4478c9de560dSAlex Tomas * use blocks preallocated to locality group 4479c9de560dSAlex Tomas */ 4480c9de560dSAlex Tomas static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4481c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4482c9de560dSAlex Tomas { 448303cddb80SAneesh Kumar K.V unsigned int len = ac->ac_o_ex.fe_len; 44846be2ded1SAneesh Kumar K.V 4485c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4486c9de560dSAlex Tomas &ac->ac_b_ex.fe_group, 4487c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4488c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4489c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4490c9de560dSAlex Tomas ac->ac_pa = pa; 4491c9de560dSAlex Tomas 44921221b235SKemeng Shi /* we don't correct pa_pstart or pa_len here to avoid 449326346ff6SAneesh Kumar K.V * possible race when the group is being loaded concurrently 4494c9de560dSAlex Tomas * instead we correct pa later, after blocks are marked 449526346ff6SAneesh Kumar K.V * in on-disk bitmap -- see ext4_mb_release_context() 449626346ff6SAneesh Kumar K.V * Other CPUs are prevented from allocating from this pa by lg_mutex 4497c9de560dSAlex Tomas */ 4498d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 44991afdc588SKemeng Shi pa->pa_lstart, len, pa); 4500c9de560dSAlex Tomas } 4501c9de560dSAlex Tomas 4502c9de560dSAlex Tomas /* 45035e745b04SAneesh Kumar K.V * Return the prealloc space that have minimal distance 45045e745b04SAneesh Kumar K.V * from the goal block. @cpa is the prealloc 45055e745b04SAneesh Kumar K.V * space that is having currently known minimal distance 45065e745b04SAneesh Kumar K.V * from the goal block. 45075e745b04SAneesh Kumar K.V */ 45085e745b04SAneesh Kumar K.V static struct ext4_prealloc_space * 45095e745b04SAneesh Kumar K.V ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 45105e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, 45115e745b04SAneesh Kumar K.V struct ext4_prealloc_space *cpa) 45125e745b04SAneesh Kumar K.V { 45135e745b04SAneesh Kumar K.V ext4_fsblk_t cur_distance, new_distance; 45145e745b04SAneesh Kumar K.V 45155e745b04SAneesh Kumar K.V if (cpa == NULL) { 45165e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 45175e745b04SAneesh Kumar K.V return pa; 45185e745b04SAneesh Kumar K.V } 451979211c8eSAndrew Morton cur_distance = abs(goal_block - cpa->pa_pstart); 452079211c8eSAndrew Morton new_distance = abs(goal_block - pa->pa_pstart); 45215e745b04SAneesh Kumar K.V 45225a54b2f1SColy Li if (cur_distance <= new_distance) 45235e745b04SAneesh Kumar K.V return cpa; 45245e745b04SAneesh Kumar K.V 45255e745b04SAneesh Kumar K.V /* drop the previous reference */ 45265e745b04SAneesh Kumar K.V atomic_dec(&cpa->pa_count); 45275e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 45285e745b04SAneesh Kumar K.V return pa; 45295e745b04SAneesh Kumar K.V } 45305e745b04SAneesh Kumar K.V 45315e745b04SAneesh Kumar K.V /* 4532c9de560dSAlex Tomas * search goal blocks in preallocated space 4533c9de560dSAlex Tomas */ 45344fca8f07SRitesh Harjani static noinline_for_stack bool 45354ddfef7bSEric Sandeen ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4536c9de560dSAlex Tomas { 453753accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 45386be2ded1SAneesh Kumar K.V int order, i; 4539c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4540c9de560dSAlex Tomas struct ext4_locality_group *lg; 4541bcf43499SOjaswin Mujoo struct ext4_prealloc_space *tmp_pa, *cpa = NULL; 4542bcf43499SOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 454338727786SOjaswin Mujoo struct rb_node *iter; 45445e745b04SAneesh Kumar K.V ext4_fsblk_t goal_block; 4545c9de560dSAlex Tomas 4546c9de560dSAlex Tomas /* only data can be preallocated */ 4547c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 45484fca8f07SRitesh Harjani return false; 4549c9de560dSAlex Tomas 4550c9de560dSAlex Tomas /* first, try per-file preallocation */ 455138727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 455238727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 455338727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 455438727786SOjaswin Mujoo tmp_pa_start, iter)) { 455538727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 455638727786SOjaswin Mujoo pa_node.inode_node); 4557c9de560dSAlex Tomas 4558c9de560dSAlex Tomas /* all fields in this condition don't change, 4559c9de560dSAlex Tomas * so we can skip locking for them */ 4560bcf43499SOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 4561bcf43499SOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4562bcf43499SOjaswin Mujoo 456338727786SOjaswin Mujoo /* original request start doesn't lie in this PA */ 4564bcf43499SOjaswin Mujoo if (ac->ac_o_ex.fe_logical < tmp_pa_start || 4565bcf43499SOjaswin Mujoo ac->ac_o_ex.fe_logical >= tmp_pa_end) 4566c9de560dSAlex Tomas continue; 4567c9de560dSAlex Tomas 4568fb0a387dSEric Sandeen /* non-extent files can't have physical blocks past 2^32 */ 456912e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4570bcf43499SOjaswin Mujoo (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4571e86a7182SOjaswin Mujoo EXT4_MAX_BLOCK_FILE_PHYS)) { 4572e86a7182SOjaswin Mujoo /* 4573e86a7182SOjaswin Mujoo * Since PAs don't overlap, we won't find any 4574e86a7182SOjaswin Mujoo * other PA to satisfy this. 4575e86a7182SOjaswin Mujoo */ 4576e86a7182SOjaswin Mujoo break; 4577e86a7182SOjaswin Mujoo } 4578fb0a387dSEric Sandeen 4579c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4580bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 4581bcf43499SOjaswin Mujoo if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) { 4582bcf43499SOjaswin Mujoo atomic_inc(&tmp_pa->pa_count); 4583bcf43499SOjaswin Mujoo ext4_mb_use_inode_pa(ac, tmp_pa); 4584bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 4585c9de560dSAlex Tomas ac->ac_criteria = 10; 458638727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 45874fca8f07SRitesh Harjani return true; 4588c9de560dSAlex Tomas } 4589bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 4590c9de560dSAlex Tomas } 459138727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 4592c9de560dSAlex Tomas 4593c9de560dSAlex Tomas /* can we use group allocation? */ 4594c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 45954fca8f07SRitesh Harjani return false; 4596c9de560dSAlex Tomas 4597c9de560dSAlex Tomas /* inode may have no locality group for some reason */ 4598c9de560dSAlex Tomas lg = ac->ac_lg; 4599c9de560dSAlex Tomas if (lg == NULL) 46004fca8f07SRitesh Harjani return false; 46016be2ded1SAneesh Kumar K.V order = fls(ac->ac_o_ex.fe_len) - 1; 46026be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 46036be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 46046be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 4605c9de560dSAlex Tomas 4606bda00de7SAkinobu Mita goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 46075e745b04SAneesh Kumar K.V /* 46085e745b04SAneesh Kumar K.V * search for the prealloc space that is having 46095e745b04SAneesh Kumar K.V * minimal distance from the goal block. 46105e745b04SAneesh Kumar K.V */ 46116be2ded1SAneesh Kumar K.V for (i = order; i < PREALLOC_TB_SIZE; i++) { 4612c9de560dSAlex Tomas rcu_read_lock(); 4613bcf43499SOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4614a8e38fd3SOjaswin Mujoo pa_node.lg_list) { 4615bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 4616bcf43499SOjaswin Mujoo if (tmp_pa->pa_deleted == 0 && 4617bcf43499SOjaswin Mujoo tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 46185e745b04SAneesh Kumar K.V 46195e745b04SAneesh Kumar K.V cpa = ext4_mb_check_group_pa(goal_block, 4620bcf43499SOjaswin Mujoo tmp_pa, cpa); 46215e745b04SAneesh Kumar K.V } 4622bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 46235e745b04SAneesh Kumar K.V } 46245e745b04SAneesh Kumar K.V rcu_read_unlock(); 46255e745b04SAneesh Kumar K.V } 46265e745b04SAneesh Kumar K.V if (cpa) { 46275e745b04SAneesh Kumar K.V ext4_mb_use_group_pa(ac, cpa); 4628c9de560dSAlex Tomas ac->ac_criteria = 20; 46294fca8f07SRitesh Harjani return true; 4630c9de560dSAlex Tomas } 46314fca8f07SRitesh Harjani return false; 4632c9de560dSAlex Tomas } 4633c9de560dSAlex Tomas 4634c9de560dSAlex Tomas /* 46357a2fcbf7SAneesh Kumar K.V * the function goes through all block freed in the group 46367a2fcbf7SAneesh Kumar K.V * but not yet committed and marks them used in in-core bitmap. 46377a2fcbf7SAneesh Kumar K.V * buddy must be generated from this bitmap 4638955ce5f5SAneesh Kumar K.V * Need to be called with the ext4 group lock held 46397a2fcbf7SAneesh Kumar K.V */ 46407a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 46417a2fcbf7SAneesh Kumar K.V ext4_group_t group) 46427a2fcbf7SAneesh Kumar K.V { 46437a2fcbf7SAneesh Kumar K.V struct rb_node *n; 46447a2fcbf7SAneesh Kumar K.V struct ext4_group_info *grp; 46457a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 46467a2fcbf7SAneesh Kumar K.V 46477a2fcbf7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 46485354b2afSTheodore Ts'o if (!grp) 46495354b2afSTheodore Ts'o return; 46507a2fcbf7SAneesh Kumar K.V n = rb_first(&(grp->bb_free_root)); 46517a2fcbf7SAneesh Kumar K.V 46527a2fcbf7SAneesh Kumar K.V while (n) { 465318aadd47SBobi Jam entry = rb_entry(n, struct ext4_free_data, efd_node); 4654123e3016SRitesh Harjani mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 46557a2fcbf7SAneesh Kumar K.V n = rb_next(n); 46567a2fcbf7SAneesh Kumar K.V } 46577a2fcbf7SAneesh Kumar K.V return; 46587a2fcbf7SAneesh Kumar K.V } 46597a2fcbf7SAneesh Kumar K.V 46607a2fcbf7SAneesh Kumar K.V /* 4661c9de560dSAlex Tomas * the function goes through all preallocation in this group and marks them 4662c9de560dSAlex Tomas * used in in-core bitmap. buddy must be generated from this bitmap 4663955ce5f5SAneesh Kumar K.V * Need to be called with ext4 group lock held 4664c9de560dSAlex Tomas */ 4665089ceeccSEric Sandeen static noinline_for_stack 4666089ceeccSEric Sandeen void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4667c9de560dSAlex Tomas ext4_group_t group) 4668c9de560dSAlex Tomas { 4669c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4670c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4671c9de560dSAlex Tomas struct list_head *cur; 4672c9de560dSAlex Tomas ext4_group_t groupnr; 4673c9de560dSAlex Tomas ext4_grpblk_t start; 4674c9de560dSAlex Tomas int preallocated = 0; 4675c9de560dSAlex Tomas int len; 4676c9de560dSAlex Tomas 46775354b2afSTheodore Ts'o if (!grp) 46785354b2afSTheodore Ts'o return; 46795354b2afSTheodore Ts'o 4680c9de560dSAlex Tomas /* all form of preallocation discards first load group, 4681c9de560dSAlex Tomas * so the only competing code is preallocation use. 4682c9de560dSAlex Tomas * we don't need any locking here 4683c9de560dSAlex Tomas * notice we do NOT ignore preallocations with pa_deleted 4684c9de560dSAlex Tomas * otherwise we could leave used blocks available for 4685c9de560dSAlex Tomas * allocation in buddy when concurrent ext4_mb_put_pa() 4686c9de560dSAlex Tomas * is dropping preallocation 4687c9de560dSAlex Tomas */ 4688c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 4689c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4690c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4691c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4692c9de560dSAlex Tomas &groupnr, &start); 4693c9de560dSAlex Tomas len = pa->pa_len; 4694c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4695c9de560dSAlex Tomas if (unlikely(len == 0)) 4696c9de560dSAlex Tomas continue; 4697c9de560dSAlex Tomas BUG_ON(groupnr != group); 4698123e3016SRitesh Harjani mb_set_bits(bitmap, start, len); 4699c9de560dSAlex Tomas preallocated += len; 4700c9de560dSAlex Tomas } 4701d3df1453SRitesh Harjani mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4702c9de560dSAlex Tomas } 4703c9de560dSAlex Tomas 470427bc446eSbrookxu static void ext4_mb_mark_pa_deleted(struct super_block *sb, 470527bc446eSbrookxu struct ext4_prealloc_space *pa) 470627bc446eSbrookxu { 470727bc446eSbrookxu struct ext4_inode_info *ei; 470827bc446eSbrookxu 470927bc446eSbrookxu if (pa->pa_deleted) { 471027bc446eSbrookxu ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 471127bc446eSbrookxu pa->pa_type, pa->pa_pstart, pa->pa_lstart, 471227bc446eSbrookxu pa->pa_len); 471327bc446eSbrookxu return; 471427bc446eSbrookxu } 471527bc446eSbrookxu 471627bc446eSbrookxu pa->pa_deleted = 1; 471727bc446eSbrookxu 471827bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 471927bc446eSbrookxu ei = EXT4_I(pa->pa_inode); 472027bc446eSbrookxu atomic_dec(&ei->i_prealloc_active); 472127bc446eSbrookxu } 472227bc446eSbrookxu } 472327bc446eSbrookxu 472482089725SOjaswin Mujoo static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 4725c9de560dSAlex Tomas { 472682089725SOjaswin Mujoo BUG_ON(!pa); 47274e8d2139SJunho Ryu BUG_ON(atomic_read(&pa->pa_count)); 47284e8d2139SJunho Ryu BUG_ON(pa->pa_deleted == 0); 4729c9de560dSAlex Tomas kmem_cache_free(ext4_pspace_cachep, pa); 4730c9de560dSAlex Tomas } 4731c9de560dSAlex Tomas 473282089725SOjaswin Mujoo static void ext4_mb_pa_callback(struct rcu_head *head) 473382089725SOjaswin Mujoo { 473482089725SOjaswin Mujoo struct ext4_prealloc_space *pa; 473582089725SOjaswin Mujoo 473682089725SOjaswin Mujoo pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 473782089725SOjaswin Mujoo ext4_mb_pa_free(pa); 473882089725SOjaswin Mujoo } 473982089725SOjaswin Mujoo 4740c9de560dSAlex Tomas /* 4741c9de560dSAlex Tomas * drops a reference to preallocated space descriptor 4742c9de560dSAlex Tomas * if this was the last reference and the space is consumed 4743c9de560dSAlex Tomas */ 4744c9de560dSAlex Tomas static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4745c9de560dSAlex Tomas struct super_block *sb, struct ext4_prealloc_space *pa) 4746c9de560dSAlex Tomas { 4747a9df9a49STheodore Ts'o ext4_group_t grp; 4748d33a1976SEric Sandeen ext4_fsblk_t grp_blk; 474938727786SOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4750c9de560dSAlex Tomas 4751c9de560dSAlex Tomas /* in this short window concurrent discard can set pa_deleted */ 4752c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 47534e8d2139SJunho Ryu if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 47544e8d2139SJunho Ryu spin_unlock(&pa->pa_lock); 47554e8d2139SJunho Ryu return; 47564e8d2139SJunho Ryu } 47574e8d2139SJunho Ryu 4758c9de560dSAlex Tomas if (pa->pa_deleted == 1) { 4759c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4760c9de560dSAlex Tomas return; 4761c9de560dSAlex Tomas } 4762c9de560dSAlex Tomas 476327bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4764c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4765c9de560dSAlex Tomas 4766d33a1976SEric Sandeen grp_blk = pa->pa_pstart; 4767cc0fb9adSAneesh Kumar K.V /* 4768cc0fb9adSAneesh Kumar K.V * If doing group-based preallocation, pa_pstart may be in the 4769cc0fb9adSAneesh Kumar K.V * next group when pa is used up 4770cc0fb9adSAneesh Kumar K.V */ 4771cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 4772d33a1976SEric Sandeen grp_blk--; 4773d33a1976SEric Sandeen 4774bd86298eSLukas Czerner grp = ext4_get_group_number(sb, grp_blk); 4775c9de560dSAlex Tomas 4776c9de560dSAlex Tomas /* 4777c9de560dSAlex Tomas * possible race: 4778c9de560dSAlex Tomas * 4779c9de560dSAlex Tomas * P1 (buddy init) P2 (regular allocation) 4780c9de560dSAlex Tomas * find block B in PA 4781c9de560dSAlex Tomas * copy on-disk bitmap to buddy 4782c9de560dSAlex Tomas * mark B in on-disk bitmap 4783c9de560dSAlex Tomas * drop PA from group 4784c9de560dSAlex Tomas * mark all PAs in buddy 4785c9de560dSAlex Tomas * 4786c9de560dSAlex Tomas * thus, P1 initializes buddy with B available. to prevent this 4787c9de560dSAlex Tomas * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4788c9de560dSAlex Tomas * against that pair 4789c9de560dSAlex Tomas */ 4790c9de560dSAlex Tomas ext4_lock_group(sb, grp); 4791c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4792c9de560dSAlex Tomas ext4_unlock_group(sb, grp); 4793c9de560dSAlex Tomas 4794a8e38fd3SOjaswin Mujoo if (pa->pa_type == MB_INODE_PA) { 479538727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 479638727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 479738727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 479838727786SOjaswin Mujoo ext4_mb_pa_free(pa); 4799a8e38fd3SOjaswin Mujoo } else { 4800a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 4801a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 4802a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 480338727786SOjaswin Mujoo call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 480438727786SOjaswin Mujoo } 4805a8e38fd3SOjaswin Mujoo } 4806c9de560dSAlex Tomas 480738727786SOjaswin Mujoo static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 480838727786SOjaswin Mujoo { 480938727786SOjaswin Mujoo struct rb_node **iter = &root->rb_node, *parent = NULL; 481038727786SOjaswin Mujoo struct ext4_prealloc_space *iter_pa, *new_pa; 481138727786SOjaswin Mujoo ext4_lblk_t iter_start, new_start; 481238727786SOjaswin Mujoo 481338727786SOjaswin Mujoo while (*iter) { 481438727786SOjaswin Mujoo iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 481538727786SOjaswin Mujoo pa_node.inode_node); 481638727786SOjaswin Mujoo new_pa = rb_entry(new, struct ext4_prealloc_space, 481738727786SOjaswin Mujoo pa_node.inode_node); 481838727786SOjaswin Mujoo iter_start = iter_pa->pa_lstart; 481938727786SOjaswin Mujoo new_start = new_pa->pa_lstart; 482038727786SOjaswin Mujoo 482138727786SOjaswin Mujoo parent = *iter; 482238727786SOjaswin Mujoo if (new_start < iter_start) 482338727786SOjaswin Mujoo iter = &((*iter)->rb_left); 482438727786SOjaswin Mujoo else 482538727786SOjaswin Mujoo iter = &((*iter)->rb_right); 482638727786SOjaswin Mujoo } 482738727786SOjaswin Mujoo 482838727786SOjaswin Mujoo rb_link_node(new, parent, iter); 482938727786SOjaswin Mujoo rb_insert_color(new, root); 4830c9de560dSAlex Tomas } 4831c9de560dSAlex Tomas 4832c9de560dSAlex Tomas /* 4833c9de560dSAlex Tomas * creates new preallocated space for given inode 4834c9de560dSAlex Tomas */ 483553f86b17SRitesh Harjani static noinline_for_stack void 48364ddfef7bSEric Sandeen ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4837c9de560dSAlex Tomas { 4838c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 483953accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4840c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4841c9de560dSAlex Tomas struct ext4_group_info *grp; 4842c9de560dSAlex Tomas struct ext4_inode_info *ei; 4843c9de560dSAlex Tomas 4844c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4845c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4846c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4847c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 484853f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4849c9de560dSAlex Tomas 485053f86b17SRitesh Harjani pa = ac->ac_pa; 4851c9de560dSAlex Tomas 4852c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 485393cdf49fSOjaswin Mujoo int new_bex_start; 485493cdf49fSOjaswin Mujoo int new_bex_end; 4855c9de560dSAlex Tomas 4856c9de560dSAlex Tomas /* we can't allocate as much as normalizer wants. 4857c9de560dSAlex Tomas * so, found space must get proper lstart 4858c9de560dSAlex Tomas * to cover original request */ 4859c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4860c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4861c9de560dSAlex Tomas 486293cdf49fSOjaswin Mujoo /* 486393cdf49fSOjaswin Mujoo * Use the below logic for adjusting best extent as it keeps 486493cdf49fSOjaswin Mujoo * fragmentation in check while ensuring logical range of best 486593cdf49fSOjaswin Mujoo * extent doesn't overflow out of goal extent: 486693cdf49fSOjaswin Mujoo * 486793cdf49fSOjaswin Mujoo * 1. Check if best ex can be kept at end of goal and still 486893cdf49fSOjaswin Mujoo * cover original start 486993cdf49fSOjaswin Mujoo * 2. Else, check if best ex can be kept at start of goal and 487093cdf49fSOjaswin Mujoo * still cover original start 487193cdf49fSOjaswin Mujoo * 3. Else, keep the best ex at start of original request. 487293cdf49fSOjaswin Mujoo */ 487393cdf49fSOjaswin Mujoo new_bex_end = ac->ac_g_ex.fe_logical + 487493cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len); 487593cdf49fSOjaswin Mujoo new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 487693cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical >= new_bex_start) 487793cdf49fSOjaswin Mujoo goto adjust_bex; 4878c9de560dSAlex Tomas 487993cdf49fSOjaswin Mujoo new_bex_start = ac->ac_g_ex.fe_logical; 488093cdf49fSOjaswin Mujoo new_bex_end = 488193cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 488293cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical < new_bex_end) 488393cdf49fSOjaswin Mujoo goto adjust_bex; 4884c9de560dSAlex Tomas 488593cdf49fSOjaswin Mujoo new_bex_start = ac->ac_o_ex.fe_logical; 488693cdf49fSOjaswin Mujoo new_bex_end = 488793cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4888c9de560dSAlex Tomas 488993cdf49fSOjaswin Mujoo adjust_bex: 489093cdf49fSOjaswin Mujoo ac->ac_b_ex.fe_logical = new_bex_start; 4891c9de560dSAlex Tomas 4892c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4893c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 489493cdf49fSOjaswin Mujoo BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + 489593cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); 4896c9de560dSAlex Tomas } 4897c9de560dSAlex Tomas 4898c9de560dSAlex Tomas pa->pa_lstart = ac->ac_b_ex.fe_logical; 4899c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4900c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4901c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4902c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4903d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4904c9de560dSAlex Tomas pa->pa_deleted = 0; 4905cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_INODE_PA; 4906c9de560dSAlex Tomas 4907d3df1453SRitesh Harjani mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4908d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 49099bffad1eSTheodore Ts'o trace_ext4_mb_new_inode_pa(ac, pa); 4910c9de560dSAlex Tomas 491153accfa9STheodore Ts'o atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4912abc075d4SKemeng Shi ext4_mb_use_inode_pa(ac, pa); 4913c9de560dSAlex Tomas 4914c9de560dSAlex Tomas ei = EXT4_I(ac->ac_inode); 4915c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 49165354b2afSTheodore Ts'o if (!grp) 49175354b2afSTheodore Ts'o return; 4918c9de560dSAlex Tomas 4919a8e38fd3SOjaswin Mujoo pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 4920c9de560dSAlex Tomas pa->pa_inode = ac->ac_inode; 4921c9de560dSAlex Tomas 4922c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4923c9de560dSAlex Tomas 492438727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 492538727786SOjaswin Mujoo ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 492638727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 492727bc446eSbrookxu atomic_inc(&ei->i_prealloc_active); 4928c9de560dSAlex Tomas } 4929c9de560dSAlex Tomas 4930c9de560dSAlex Tomas /* 4931c9de560dSAlex Tomas * creates new preallocated space for locality group inodes belongs to 4932c9de560dSAlex Tomas */ 493353f86b17SRitesh Harjani static noinline_for_stack void 49344ddfef7bSEric Sandeen ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4935c9de560dSAlex Tomas { 4936c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4937c9de560dSAlex Tomas struct ext4_locality_group *lg; 4938c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4939c9de560dSAlex Tomas struct ext4_group_info *grp; 4940c9de560dSAlex Tomas 4941c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4942c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4943c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4944c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 494553f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4946c9de560dSAlex Tomas 494753f86b17SRitesh Harjani pa = ac->ac_pa; 4948c9de560dSAlex Tomas 4949c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4950c9de560dSAlex Tomas pa->pa_lstart = pa->pa_pstart; 4951c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4952c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4953c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4954a8e38fd3SOjaswin Mujoo INIT_LIST_HEAD(&pa->pa_node.lg_list); 4955d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4956c9de560dSAlex Tomas pa->pa_deleted = 0; 4957cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_GROUP_PA; 4958c9de560dSAlex Tomas 4959d3df1453SRitesh Harjani mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4960d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 49619bffad1eSTheodore Ts'o trace_ext4_mb_new_group_pa(ac, pa); 4962c9de560dSAlex Tomas 4963c9de560dSAlex Tomas ext4_mb_use_group_pa(ac, pa); 4964c9de560dSAlex Tomas atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4965c9de560dSAlex Tomas 4966c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 49675354b2afSTheodore Ts'o if (!grp) 49685354b2afSTheodore Ts'o return; 4969c9de560dSAlex Tomas lg = ac->ac_lg; 4970c9de560dSAlex Tomas BUG_ON(lg == NULL); 4971c9de560dSAlex Tomas 4972a8e38fd3SOjaswin Mujoo pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 4973c9de560dSAlex Tomas pa->pa_inode = NULL; 4974c9de560dSAlex Tomas 4975c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4976c9de560dSAlex Tomas 49776be2ded1SAneesh Kumar K.V /* 49786be2ded1SAneesh Kumar K.V * We will later add the new pa to the right bucket 49796be2ded1SAneesh Kumar K.V * after updating the pa_free in ext4_mb_release_context 49806be2ded1SAneesh Kumar K.V */ 4981c9de560dSAlex Tomas } 4982c9de560dSAlex Tomas 498353f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4984c9de560dSAlex Tomas { 4985c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 498653f86b17SRitesh Harjani ext4_mb_new_group_pa(ac); 4987c9de560dSAlex Tomas else 498853f86b17SRitesh Harjani ext4_mb_new_inode_pa(ac); 4989c9de560dSAlex Tomas } 4990c9de560dSAlex Tomas 4991c9de560dSAlex Tomas /* 4992c9de560dSAlex Tomas * finds all unused blocks in on-disk bitmap, frees them in 4993c9de560dSAlex Tomas * in-core bitmap and buddy. 4994c9de560dSAlex Tomas * @pa must be unlinked from inode and group lists, so that 4995c9de560dSAlex Tomas * nobody else can find/use it. 4996c9de560dSAlex Tomas * the caller MUST hold group/inode locks. 4997c9de560dSAlex Tomas * TODO: optimize the case when there are no in-core structures yet 4998c9de560dSAlex Tomas */ 49994ddfef7bSEric Sandeen static noinline_for_stack int 50004ddfef7bSEric Sandeen ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 50013e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 5002c9de560dSAlex Tomas { 5003c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5004c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5005498e5f24STheodore Ts'o unsigned int end; 5006498e5f24STheodore Ts'o unsigned int next; 5007c9de560dSAlex Tomas ext4_group_t group; 5008c9de560dSAlex Tomas ext4_grpblk_t bit; 5009ba80b101STheodore Ts'o unsigned long long grp_blk_start; 5010c9de560dSAlex Tomas int free = 0; 5011c9de560dSAlex Tomas 5012c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 5013c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 501453accfa9STheodore Ts'o grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5015c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5016c9de560dSAlex Tomas end = bit + pa->pa_len; 5017c9de560dSAlex Tomas 5018c9de560dSAlex Tomas while (bit < end) { 5019ffad0a44SAneesh Kumar K.V bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5020c9de560dSAlex Tomas if (bit >= end) 5021c9de560dSAlex Tomas break; 5022ffad0a44SAneesh Kumar K.V next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5023d3df1453SRitesh Harjani mb_debug(sb, "free preallocated %u/%u in group %u\n", 50245a0790c2SAndi Kleen (unsigned) ext4_group_first_block_no(sb, group) + bit, 50255a0790c2SAndi Kleen (unsigned) next - bit, (unsigned) group); 5026c9de560dSAlex Tomas free += next - bit; 5027c9de560dSAlex Tomas 50283e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 502953accfa9STheodore Ts'o trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 503053accfa9STheodore Ts'o EXT4_C2B(sbi, bit)), 5031a9c667f8SLukas Czerner next - bit); 5032c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5033c9de560dSAlex Tomas bit = next + 1; 5034c9de560dSAlex Tomas } 5035c9de560dSAlex Tomas if (free != pa->pa_free) { 50369d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_CRIT, 503736bad423SRitesh Harjani "pa %p: logic %lu, phys. %lu, len %d", 5038c9de560dSAlex Tomas pa, (unsigned long) pa->pa_lstart, 5039c9de560dSAlex Tomas (unsigned long) pa->pa_pstart, 504036bad423SRitesh Harjani pa->pa_len); 5041e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 504226346ff6SAneesh Kumar K.V free, pa->pa_free); 5043e56eb659SAneesh Kumar K.V /* 5044e56eb659SAneesh Kumar K.V * pa is already deleted so we use the value obtained 5045e56eb659SAneesh Kumar K.V * from the bitmap and continue. 5046e56eb659SAneesh Kumar K.V */ 5047c9de560dSAlex Tomas } 5048c9de560dSAlex Tomas atomic_add(free, &sbi->s_mb_discarded); 5049c9de560dSAlex Tomas 5050863c37fcSzhong jiang return 0; 5051c9de560dSAlex Tomas } 5052c9de560dSAlex Tomas 50534ddfef7bSEric Sandeen static noinline_for_stack int 50544ddfef7bSEric Sandeen ext4_mb_release_group_pa(struct ext4_buddy *e4b, 50553e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 5056c9de560dSAlex Tomas { 5057c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5058c9de560dSAlex Tomas ext4_group_t group; 5059c9de560dSAlex Tomas ext4_grpblk_t bit; 5060c9de560dSAlex Tomas 506160e07cf5SYongqiang Yang trace_ext4_mb_release_group_pa(sb, pa); 5062c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 5063c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5064463808f2STheodore Ts'o if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5065463808f2STheodore Ts'o ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5066463808f2STheodore Ts'o e4b->bd_group, group, pa->pa_pstart); 5067463808f2STheodore Ts'o return 0; 5068463808f2STheodore Ts'o } 5069c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5070c9de560dSAlex Tomas atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 50713e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5072c9de560dSAlex Tomas 5073c9de560dSAlex Tomas return 0; 5074c9de560dSAlex Tomas } 5075c9de560dSAlex Tomas 5076c9de560dSAlex Tomas /* 5077c9de560dSAlex Tomas * releases all preallocations in given group 5078c9de560dSAlex Tomas * 5079c9de560dSAlex Tomas * first, we need to decide discard policy: 5080c9de560dSAlex Tomas * - when do we discard 5081c9de560dSAlex Tomas * 1) ENOSPC 5082c9de560dSAlex Tomas * - how many do we discard 5083c9de560dSAlex Tomas * 1) how many requested 5084c9de560dSAlex Tomas */ 50854ddfef7bSEric Sandeen static noinline_for_stack int 50864ddfef7bSEric Sandeen ext4_mb_discard_group_preallocations(struct super_block *sb, 50878c80fb31SChunguang Xu ext4_group_t group, int *busy) 5088c9de560dSAlex Tomas { 5089c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5090c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 5091c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 5092c9de560dSAlex Tomas struct list_head list; 5093c9de560dSAlex Tomas struct ext4_buddy e4b; 509438727786SOjaswin Mujoo struct ext4_inode_info *ei; 5095c9de560dSAlex Tomas int err; 50968c80fb31SChunguang Xu int free = 0; 5097c9de560dSAlex Tomas 50985354b2afSTheodore Ts'o if (!grp) 50995354b2afSTheodore Ts'o return 0; 5100d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for group %u\n", group); 5101c9de560dSAlex Tomas if (list_empty(&grp->bb_prealloc_list)) 5102bbc4ec77SRitesh Harjani goto out_dbg; 5103c9de560dSAlex Tomas 5104574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 51059008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 51069008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 510754d3adbcSTheodore Ts'o ext4_error_err(sb, -err, 510854d3adbcSTheodore Ts'o "Error %d reading block bitmap for %u", 51099008a58eSDarrick J. Wong err, group); 5110bbc4ec77SRitesh Harjani goto out_dbg; 5111c9de560dSAlex Tomas } 5112c9de560dSAlex Tomas 5113c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 5114ce89f46cSAneesh Kumar K.V if (err) { 51159651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 51169651e6b2SKonstantin Khlebnikov err, group); 5117ce89f46cSAneesh Kumar K.V put_bh(bitmap_bh); 5118bbc4ec77SRitesh Harjani goto out_dbg; 5119ce89f46cSAneesh Kumar K.V } 5120c9de560dSAlex Tomas 5121c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 5122c9de560dSAlex Tomas ext4_lock_group(sb, group); 5123c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, 5124c9de560dSAlex Tomas &grp->bb_prealloc_list, pa_group_list) { 5125c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5126c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 5127c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 51288c80fb31SChunguang Xu *busy = 1; 5129c9de560dSAlex Tomas continue; 5130c9de560dSAlex Tomas } 5131c9de560dSAlex Tomas if (pa->pa_deleted) { 5132c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5133c9de560dSAlex Tomas continue; 5134c9de560dSAlex Tomas } 5135c9de560dSAlex Tomas 5136c9de560dSAlex Tomas /* seems this one can be freed ... */ 513727bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 5138c9de560dSAlex Tomas 513970022da8SYe Bin if (!free) 514070022da8SYe Bin this_cpu_inc(discard_pa_seq); 514170022da8SYe Bin 5142c9de560dSAlex Tomas /* we can trust pa_free ... */ 5143c9de560dSAlex Tomas free += pa->pa_free; 5144c9de560dSAlex Tomas 5145c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5146c9de560dSAlex Tomas 5147c9de560dSAlex Tomas list_del(&pa->pa_group_list); 5148c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 5149c9de560dSAlex Tomas } 5150c9de560dSAlex Tomas 5151c9de560dSAlex Tomas /* now free all selected PAs */ 5152c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5153c9de560dSAlex Tomas 5154c9de560dSAlex Tomas /* remove from object (inode or locality group) */ 5155a8e38fd3SOjaswin Mujoo if (pa->pa_type == MB_GROUP_PA) { 5156a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 5157a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 5158a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 5159a8e38fd3SOjaswin Mujoo } else { 516038727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 516138727786SOjaswin Mujoo ei = EXT4_I(pa->pa_inode); 516238727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 516338727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 5164a8e38fd3SOjaswin Mujoo } 5165c9de560dSAlex Tomas 5166c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 516738727786SOjaswin Mujoo 516838727786SOjaswin Mujoo if (pa->pa_type == MB_GROUP_PA) { 516938727786SOjaswin Mujoo ext4_mb_release_group_pa(&e4b, pa); 5170c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 517138727786SOjaswin Mujoo } else { 517238727786SOjaswin Mujoo ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 517338727786SOjaswin Mujoo ext4_mb_pa_free(pa); 517438727786SOjaswin Mujoo } 5175c9de560dSAlex Tomas } 5176c9de560dSAlex Tomas 5177c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5178e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5179c9de560dSAlex Tomas put_bh(bitmap_bh); 5180bbc4ec77SRitesh Harjani out_dbg: 5181d3df1453SRitesh Harjani mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 51828c80fb31SChunguang Xu free, group, grp->bb_free); 51838c80fb31SChunguang Xu return free; 5184c9de560dSAlex Tomas } 5185c9de560dSAlex Tomas 5186c9de560dSAlex Tomas /* 5187c9de560dSAlex Tomas * releases all non-used preallocated blocks for given inode 5188c9de560dSAlex Tomas * 5189c9de560dSAlex Tomas * It's important to discard preallocations under i_data_sem 5190c9de560dSAlex Tomas * We don't want another block to be served from the prealloc 5191c9de560dSAlex Tomas * space when we are discarding the inode prealloc space. 5192c9de560dSAlex Tomas * 5193c9de560dSAlex Tomas * FIXME!! Make sure it is valid at all the call sites 5194c9de560dSAlex Tomas */ 519527bc446eSbrookxu void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 5196c9de560dSAlex Tomas { 5197c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(inode); 5198c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 5199c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 5200c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 5201c9de560dSAlex Tomas ext4_group_t group = 0; 5202c9de560dSAlex Tomas struct list_head list; 5203c9de560dSAlex Tomas struct ext4_buddy e4b; 520438727786SOjaswin Mujoo struct rb_node *iter; 5205c9de560dSAlex Tomas int err; 5206c9de560dSAlex Tomas 5207c2ea3fdeSTheodore Ts'o if (!S_ISREG(inode->i_mode)) { 5208c9de560dSAlex Tomas return; 5209c9de560dSAlex Tomas } 5210c9de560dSAlex Tomas 52118016e29fSHarshad Shirwadkar if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 52128016e29fSHarshad Shirwadkar return; 52138016e29fSHarshad Shirwadkar 5214d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for inode %lu\n", 5215d3df1453SRitesh Harjani inode->i_ino); 521627bc446eSbrookxu trace_ext4_discard_preallocations(inode, 521727bc446eSbrookxu atomic_read(&ei->i_prealloc_active), needed); 5218c9de560dSAlex Tomas 5219c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 5220c9de560dSAlex Tomas 522127bc446eSbrookxu if (needed == 0) 522227bc446eSbrookxu needed = UINT_MAX; 522327bc446eSbrookxu 5224c9de560dSAlex Tomas repeat: 5225c9de560dSAlex Tomas /* first, collect all pa's in the inode */ 522638727786SOjaswin Mujoo write_lock(&ei->i_prealloc_lock); 522738727786SOjaswin Mujoo for (iter = rb_first(&ei->i_prealloc_node); iter && needed; 522838727786SOjaswin Mujoo iter = rb_next(iter)) { 522938727786SOjaswin Mujoo pa = rb_entry(iter, struct ext4_prealloc_space, 523038727786SOjaswin Mujoo pa_node.inode_node); 5231a8e38fd3SOjaswin Mujoo BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 523238727786SOjaswin Mujoo 5233c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5234c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 5235c9de560dSAlex Tomas /* this shouldn't happen often - nobody should 5236c9de560dSAlex Tomas * use preallocation while we're discarding it */ 5237c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 523838727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 52399d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, 52409d8b9ec4STheodore Ts'o "uh-oh! used pa while discarding"); 5241c9de560dSAlex Tomas WARN_ON(1); 5242c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5243c9de560dSAlex Tomas goto repeat; 5244c9de560dSAlex Tomas 5245c9de560dSAlex Tomas } 5246c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 524727bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 5248c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 524938727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5250c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 525127bc446eSbrookxu needed--; 5252c9de560dSAlex Tomas continue; 5253c9de560dSAlex Tomas } 5254c9de560dSAlex Tomas 5255c9de560dSAlex Tomas /* someone is deleting pa right now */ 5256c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 525738727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 5258c9de560dSAlex Tomas 5259c9de560dSAlex Tomas /* we have to wait here because pa_deleted 5260c9de560dSAlex Tomas * doesn't mean pa is already unlinked from 5261c9de560dSAlex Tomas * the list. as we might be called from 5262c9de560dSAlex Tomas * ->clear_inode() the inode will get freed 5263c9de560dSAlex Tomas * and concurrent thread which is unlinking 5264c9de560dSAlex Tomas * pa from inode's list may access already 5265c9de560dSAlex Tomas * freed memory, bad-bad-bad */ 5266c9de560dSAlex Tomas 5267c9de560dSAlex Tomas /* XXX: if this happens too often, we can 5268c9de560dSAlex Tomas * add a flag to force wait only in case 5269c9de560dSAlex Tomas * of ->clear_inode(), but not in case of 5270c9de560dSAlex Tomas * regular truncate */ 5271c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5272c9de560dSAlex Tomas goto repeat; 5273c9de560dSAlex Tomas } 527438727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 5275c9de560dSAlex Tomas 5276c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5277cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_INODE_PA); 5278bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 5279c9de560dSAlex Tomas 52809651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 52819651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5282ce89f46cSAneesh Kumar K.V if (err) { 528354d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 52849651e6b2SKonstantin Khlebnikov err, group); 5285ce89f46cSAneesh Kumar K.V continue; 5286ce89f46cSAneesh Kumar K.V } 5287c9de560dSAlex Tomas 5288574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 52899008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 52909008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 529154d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 52929008a58eSDarrick J. Wong err, group); 5293e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5294ce89f46cSAneesh Kumar K.V continue; 5295c9de560dSAlex Tomas } 5296c9de560dSAlex Tomas 5297c9de560dSAlex Tomas ext4_lock_group(sb, group); 5298c9de560dSAlex Tomas list_del(&pa->pa_group_list); 52993e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5300c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5301c9de560dSAlex Tomas 5302e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5303c9de560dSAlex Tomas put_bh(bitmap_bh); 5304c9de560dSAlex Tomas 5305c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 530638727786SOjaswin Mujoo ext4_mb_pa_free(pa); 5307c9de560dSAlex Tomas } 5308c9de560dSAlex Tomas } 5309c9de560dSAlex Tomas 531053f86b17SRitesh Harjani static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 531153f86b17SRitesh Harjani { 531253f86b17SRitesh Harjani struct ext4_prealloc_space *pa; 531353f86b17SRitesh Harjani 531453f86b17SRitesh Harjani BUG_ON(ext4_pspace_cachep == NULL); 531553f86b17SRitesh Harjani pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 531653f86b17SRitesh Harjani if (!pa) 531753f86b17SRitesh Harjani return -ENOMEM; 531853f86b17SRitesh Harjani atomic_set(&pa->pa_count, 1); 531953f86b17SRitesh Harjani ac->ac_pa = pa; 532053f86b17SRitesh Harjani return 0; 532153f86b17SRitesh Harjani } 532253f86b17SRitesh Harjani 532382089725SOjaswin Mujoo static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 532453f86b17SRitesh Harjani { 532553f86b17SRitesh Harjani struct ext4_prealloc_space *pa = ac->ac_pa; 532653f86b17SRitesh Harjani 532753f86b17SRitesh Harjani BUG_ON(!pa); 532853f86b17SRitesh Harjani ac->ac_pa = NULL; 532953f86b17SRitesh Harjani WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 533082089725SOjaswin Mujoo /* 533182089725SOjaswin Mujoo * current function is only called due to an error or due to 533282089725SOjaswin Mujoo * len of found blocks < len of requested blocks hence the PA has not 533382089725SOjaswin Mujoo * been added to grp->bb_prealloc_list. So we don't need to lock it 533482089725SOjaswin Mujoo */ 533582089725SOjaswin Mujoo pa->pa_deleted = 1; 533682089725SOjaswin Mujoo ext4_mb_pa_free(pa); 533753f86b17SRitesh Harjani } 533853f86b17SRitesh Harjani 53396ba495e9STheodore Ts'o #ifdef CONFIG_EXT4_DEBUG 5340e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5341c9de560dSAlex Tomas { 5342e68cf40cSRitesh Harjani ext4_group_t i, ngroups; 5343c9de560dSAlex Tomas 53449b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5345e3570639SEric Sandeen return; 5346e3570639SEric Sandeen 53478df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 5348d3df1453SRitesh Harjani mb_debug(sb, "groups: "); 53498df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 5350c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5351c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5352c9de560dSAlex Tomas ext4_grpblk_t start; 5353c9de560dSAlex Tomas struct list_head *cur; 53545354b2afSTheodore Ts'o 53555354b2afSTheodore Ts'o if (!grp) 53565354b2afSTheodore Ts'o continue; 5357c9de560dSAlex Tomas ext4_lock_group(sb, i); 5358c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 5359c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, 5360c9de560dSAlex Tomas pa_group_list); 5361c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5362c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5363c9de560dSAlex Tomas NULL, &start); 5364c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5365d3df1453SRitesh Harjani mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5366d3df1453SRitesh Harjani pa->pa_len); 5367c9de560dSAlex Tomas } 536860bd63d1SSolofo Ramangalahy ext4_unlock_group(sb, i); 5369d3df1453SRitesh Harjani mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5370d3df1453SRitesh Harjani grp->bb_fragments); 5371c9de560dSAlex Tomas } 5372c9de560dSAlex Tomas } 5373e68cf40cSRitesh Harjani 5374e68cf40cSRitesh Harjani static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5375e68cf40cSRitesh Harjani { 5376e68cf40cSRitesh Harjani struct super_block *sb = ac->ac_sb; 5377e68cf40cSRitesh Harjani 53789b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5379e68cf40cSRitesh Harjani return; 5380e68cf40cSRitesh Harjani 5381d3df1453SRitesh Harjani mb_debug(sb, "Can't allocate:" 5382e68cf40cSRitesh Harjani " Allocation context details:"); 5383d3df1453SRitesh Harjani mb_debug(sb, "status %u flags 0x%x", 5384e68cf40cSRitesh Harjani ac->ac_status, ac->ac_flags); 5385d3df1453SRitesh Harjani mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5386e68cf40cSRitesh Harjani "goal %lu/%lu/%lu@%lu, " 5387e68cf40cSRitesh Harjani "best %lu/%lu/%lu@%lu cr %d", 5388e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_group, 5389e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_start, 5390e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_len, 5391e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_logical, 5392e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_group, 5393e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_start, 5394e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_len, 5395e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_logical, 5396e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_group, 5397e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_start, 5398e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_len, 5399e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_logical, 5400e68cf40cSRitesh Harjani (int)ac->ac_criteria); 5401d3df1453SRitesh Harjani mb_debug(sb, "%u found", ac->ac_found); 5402e68cf40cSRitesh Harjani ext4_mb_show_pa(sb); 5403e68cf40cSRitesh Harjani } 5404c9de560dSAlex Tomas #else 5405e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5406e68cf40cSRitesh Harjani { 5407e68cf40cSRitesh Harjani return; 5408e68cf40cSRitesh Harjani } 5409c9de560dSAlex Tomas static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5410c9de560dSAlex Tomas { 5411e68cf40cSRitesh Harjani ext4_mb_show_pa(ac->ac_sb); 5412c9de560dSAlex Tomas return; 5413c9de560dSAlex Tomas } 5414c9de560dSAlex Tomas #endif 5415c9de560dSAlex Tomas 5416c9de560dSAlex Tomas /* 5417c9de560dSAlex Tomas * We use locality group preallocation for small size file. The size of the 5418c9de560dSAlex Tomas * file is determined by the current size or the resulting size after 5419c9de560dSAlex Tomas * allocation which ever is larger 5420c9de560dSAlex Tomas * 5421b713a5ecSTheodore Ts'o * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5422c9de560dSAlex Tomas */ 5423c9de560dSAlex Tomas static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5424c9de560dSAlex Tomas { 5425c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5426c9de560dSAlex Tomas int bsbits = ac->ac_sb->s_blocksize_bits; 5427c9de560dSAlex Tomas loff_t size, isize; 5428a9f2a293SJan Kara bool inode_pa_eligible, group_pa_eligible; 5429c9de560dSAlex Tomas 5430c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5431c9de560dSAlex Tomas return; 5432c9de560dSAlex Tomas 54334ba74d00STheodore Ts'o if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 54344ba74d00STheodore Ts'o return; 54354ba74d00STheodore Ts'o 5436a9f2a293SJan Kara group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5437a9f2a293SJan Kara inode_pa_eligible = true; 543853accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 543950797481STheodore Ts'o isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 544050797481STheodore Ts'o >> bsbits; 5441c9de560dSAlex Tomas 5442a9f2a293SJan Kara /* No point in using inode preallocation for closed files */ 544382dd124cSNikolay Borisov if ((size == isize) && !ext4_fs_is_busy(sbi) && 5444a9f2a293SJan Kara !inode_is_open_for_write(ac->ac_inode)) 5445a9f2a293SJan Kara inode_pa_eligible = false; 544650797481STheodore Ts'o 544771780577STheodore Ts'o size = max(size, isize); 5448a9f2a293SJan Kara /* Don't use group allocation for large files */ 5449a9f2a293SJan Kara if (size > sbi->s_mb_stream_request) 5450a9f2a293SJan Kara group_pa_eligible = false; 5451a9f2a293SJan Kara 5452a9f2a293SJan Kara if (!group_pa_eligible) { 5453a9f2a293SJan Kara if (inode_pa_eligible) 54544ba74d00STheodore Ts'o ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5455a9f2a293SJan Kara else 5456a9f2a293SJan Kara ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5457c9de560dSAlex Tomas return; 54584ba74d00STheodore Ts'o } 5459c9de560dSAlex Tomas 5460c9de560dSAlex Tomas BUG_ON(ac->ac_lg != NULL); 5461c9de560dSAlex Tomas /* 5462c9de560dSAlex Tomas * locality group prealloc space are per cpu. The reason for having 5463c9de560dSAlex Tomas * per cpu locality group is to reduce the contention between block 5464c9de560dSAlex Tomas * request from multiple CPUs. 5465c9de560dSAlex Tomas */ 5466a0b6bc63SChristoph Lameter ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5467c9de560dSAlex Tomas 5468c9de560dSAlex Tomas /* we're going to use group allocation */ 5469c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5470c9de560dSAlex Tomas 5471c9de560dSAlex Tomas /* serialize all allocations in the group */ 5472c9de560dSAlex Tomas mutex_lock(&ac->ac_lg->lg_mutex); 5473c9de560dSAlex Tomas } 5474c9de560dSAlex Tomas 5475d73eff68SGuoqing Jiang static noinline_for_stack void 54764ddfef7bSEric Sandeen ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5477c9de560dSAlex Tomas struct ext4_allocation_request *ar) 5478c9de560dSAlex Tomas { 5479c9de560dSAlex Tomas struct super_block *sb = ar->inode->i_sb; 5480c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5481c9de560dSAlex Tomas struct ext4_super_block *es = sbi->s_es; 5482c9de560dSAlex Tomas ext4_group_t group; 5483498e5f24STheodore Ts'o unsigned int len; 5484498e5f24STheodore Ts'o ext4_fsblk_t goal; 5485c9de560dSAlex Tomas ext4_grpblk_t block; 5486c9de560dSAlex Tomas 5487c9de560dSAlex Tomas /* we can't allocate > group size */ 5488c9de560dSAlex Tomas len = ar->len; 5489c9de560dSAlex Tomas 5490c9de560dSAlex Tomas /* just a dirty hack to filter too big requests */ 549140ae3487STheodore Ts'o if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 549240ae3487STheodore Ts'o len = EXT4_CLUSTERS_PER_GROUP(sb); 5493c9de560dSAlex Tomas 5494c9de560dSAlex Tomas /* start searching from the goal */ 5495c9de560dSAlex Tomas goal = ar->goal; 5496c9de560dSAlex Tomas if (goal < le32_to_cpu(es->s_first_data_block) || 5497c9de560dSAlex Tomas goal >= ext4_blocks_count(es)) 5498c9de560dSAlex Tomas goal = le32_to_cpu(es->s_first_data_block); 5499c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, goal, &group, &block); 5500c9de560dSAlex Tomas 5501c9de560dSAlex Tomas /* set up allocation goals */ 5502f5a44db5STheodore Ts'o ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5503c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 5504c9de560dSAlex Tomas ac->ac_sb = sb; 5505c9de560dSAlex Tomas ac->ac_inode = ar->inode; 550653accfa9STheodore Ts'o ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5507c9de560dSAlex Tomas ac->ac_o_ex.fe_group = group; 5508c9de560dSAlex Tomas ac->ac_o_ex.fe_start = block; 5509c9de560dSAlex Tomas ac->ac_o_ex.fe_len = len; 551053accfa9STheodore Ts'o ac->ac_g_ex = ac->ac_o_ex; 5511c9de560dSAlex Tomas ac->ac_flags = ar->flags; 5512c9de560dSAlex Tomas 55133cb77bd2Sbrookxu /* we have to define context: we'll work with a file or 5514c9de560dSAlex Tomas * locality group. this is a policy, actually */ 5515c9de560dSAlex Tomas ext4_mb_group_or_file(ac); 5516c9de560dSAlex Tomas 5517d3df1453SRitesh Harjani mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5518c9de560dSAlex Tomas "left: %u/%u, right %u/%u to %swritable\n", 5519c9de560dSAlex Tomas (unsigned) ar->len, (unsigned) ar->logical, 5520c9de560dSAlex Tomas (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5521c9de560dSAlex Tomas (unsigned) ar->lleft, (unsigned) ar->pleft, 5522c9de560dSAlex Tomas (unsigned) ar->lright, (unsigned) ar->pright, 552382dd124cSNikolay Borisov inode_is_open_for_write(ar->inode) ? "" : "non-"); 5524c9de560dSAlex Tomas } 5525c9de560dSAlex Tomas 55266be2ded1SAneesh Kumar K.V static noinline_for_stack void 55276be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(struct super_block *sb, 55286be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg, 55296be2ded1SAneesh Kumar K.V int order, int total_entries) 55306be2ded1SAneesh Kumar K.V { 55316be2ded1SAneesh Kumar K.V ext4_group_t group = 0; 55326be2ded1SAneesh Kumar K.V struct ext4_buddy e4b; 55336be2ded1SAneesh Kumar K.V struct list_head discard_list; 55346be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa, *tmp; 55356be2ded1SAneesh Kumar K.V 5536d3df1453SRitesh Harjani mb_debug(sb, "discard locality group preallocation\n"); 55376be2ded1SAneesh Kumar K.V 55386be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&discard_list); 55396be2ded1SAneesh Kumar K.V 55406be2ded1SAneesh Kumar K.V spin_lock(&lg->lg_prealloc_lock); 55416be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5542a8e38fd3SOjaswin Mujoo pa_node.lg_list, 554392e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 55446be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 55456be2ded1SAneesh Kumar K.V if (atomic_read(&pa->pa_count)) { 55466be2ded1SAneesh Kumar K.V /* 55476be2ded1SAneesh Kumar K.V * This is the pa that we just used 55486be2ded1SAneesh Kumar K.V * for block allocation. So don't 55496be2ded1SAneesh Kumar K.V * free that 55506be2ded1SAneesh Kumar K.V */ 55516be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 55526be2ded1SAneesh Kumar K.V continue; 55536be2ded1SAneesh Kumar K.V } 55546be2ded1SAneesh Kumar K.V if (pa->pa_deleted) { 55556be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 55566be2ded1SAneesh Kumar K.V continue; 55576be2ded1SAneesh Kumar K.V } 55586be2ded1SAneesh Kumar K.V /* only lg prealloc space */ 5559cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_GROUP_PA); 55606be2ded1SAneesh Kumar K.V 55616be2ded1SAneesh Kumar K.V /* seems this one can be freed ... */ 556227bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 55636be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 55646be2ded1SAneesh Kumar K.V 5565a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 55666be2ded1SAneesh Kumar K.V list_add(&pa->u.pa_tmp_list, &discard_list); 55676be2ded1SAneesh Kumar K.V 55686be2ded1SAneesh Kumar K.V total_entries--; 55696be2ded1SAneesh Kumar K.V if (total_entries <= 5) { 55706be2ded1SAneesh Kumar K.V /* 55716be2ded1SAneesh Kumar K.V * we want to keep only 5 entries 55726be2ded1SAneesh Kumar K.V * allowing it to grow to 8. This 55736be2ded1SAneesh Kumar K.V * mak sure we don't call discard 55746be2ded1SAneesh Kumar K.V * soon for this list. 55756be2ded1SAneesh Kumar K.V */ 55766be2ded1SAneesh Kumar K.V break; 55776be2ded1SAneesh Kumar K.V } 55786be2ded1SAneesh Kumar K.V } 55796be2ded1SAneesh Kumar K.V spin_unlock(&lg->lg_prealloc_lock); 55806be2ded1SAneesh Kumar K.V 55816be2ded1SAneesh Kumar K.V list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 55829651e6b2SKonstantin Khlebnikov int err; 55836be2ded1SAneesh Kumar K.V 5584bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 55859651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 55869651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 55879651e6b2SKonstantin Khlebnikov if (err) { 558854d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 55899651e6b2SKonstantin Khlebnikov err, group); 55906be2ded1SAneesh Kumar K.V continue; 55916be2ded1SAneesh Kumar K.V } 55926be2ded1SAneesh Kumar K.V ext4_lock_group(sb, group); 55936be2ded1SAneesh Kumar K.V list_del(&pa->pa_group_list); 55943e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 55956be2ded1SAneesh Kumar K.V ext4_unlock_group(sb, group); 55966be2ded1SAneesh Kumar K.V 5597e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 55986be2ded1SAneesh Kumar K.V list_del(&pa->u.pa_tmp_list); 55996be2ded1SAneesh Kumar K.V call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 56006be2ded1SAneesh Kumar K.V } 56016be2ded1SAneesh Kumar K.V } 56026be2ded1SAneesh Kumar K.V 56036be2ded1SAneesh Kumar K.V /* 56046be2ded1SAneesh Kumar K.V * We have incremented pa_count. So it cannot be freed at this 56056be2ded1SAneesh Kumar K.V * point. Also we hold lg_mutex. So no parallel allocation is 56066be2ded1SAneesh Kumar K.V * possible from this lg. That means pa_free cannot be updated. 56076be2ded1SAneesh Kumar K.V * 56086be2ded1SAneesh Kumar K.V * A parallel ext4_mb_discard_group_preallocations is possible. 56096be2ded1SAneesh Kumar K.V * which can cause the lg_prealloc_list to be updated. 56106be2ded1SAneesh Kumar K.V */ 56116be2ded1SAneesh Kumar K.V 56126be2ded1SAneesh Kumar K.V static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 56136be2ded1SAneesh Kumar K.V { 56146be2ded1SAneesh Kumar K.V int order, added = 0, lg_prealloc_count = 1; 56156be2ded1SAneesh Kumar K.V struct super_block *sb = ac->ac_sb; 56166be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg = ac->ac_lg; 56176be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 56186be2ded1SAneesh Kumar K.V 56196be2ded1SAneesh Kumar K.V order = fls(pa->pa_free) - 1; 56206be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 56216be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 56226be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 56236be2ded1SAneesh Kumar K.V /* Add the prealloc space to lg */ 5624f1167009SNiu Yawei spin_lock(&lg->lg_prealloc_lock); 56256be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5626a8e38fd3SOjaswin Mujoo pa_node.lg_list, 562792e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 56286be2ded1SAneesh Kumar K.V spin_lock(&tmp_pa->pa_lock); 56296be2ded1SAneesh Kumar K.V if (tmp_pa->pa_deleted) { 5630e7c9e3e9STheodore Ts'o spin_unlock(&tmp_pa->pa_lock); 56316be2ded1SAneesh Kumar K.V continue; 56326be2ded1SAneesh Kumar K.V } 56336be2ded1SAneesh Kumar K.V if (!added && pa->pa_free < tmp_pa->pa_free) { 56346be2ded1SAneesh Kumar K.V /* Add to the tail of the previous entry */ 5635a8e38fd3SOjaswin Mujoo list_add_tail_rcu(&pa->pa_node.lg_list, 5636a8e38fd3SOjaswin Mujoo &tmp_pa->pa_node.lg_list); 56376be2ded1SAneesh Kumar K.V added = 1; 56386be2ded1SAneesh Kumar K.V /* 56396be2ded1SAneesh Kumar K.V * we want to count the total 56406be2ded1SAneesh Kumar K.V * number of entries in the list 56416be2ded1SAneesh Kumar K.V */ 56426be2ded1SAneesh Kumar K.V } 56436be2ded1SAneesh Kumar K.V spin_unlock(&tmp_pa->pa_lock); 56446be2ded1SAneesh Kumar K.V lg_prealloc_count++; 56456be2ded1SAneesh Kumar K.V } 56466be2ded1SAneesh Kumar K.V if (!added) 5647a8e38fd3SOjaswin Mujoo list_add_tail_rcu(&pa->pa_node.lg_list, 56486be2ded1SAneesh Kumar K.V &lg->lg_prealloc_list[order]); 5649f1167009SNiu Yawei spin_unlock(&lg->lg_prealloc_lock); 56506be2ded1SAneesh Kumar K.V 56516be2ded1SAneesh Kumar K.V /* Now trim the list to be not more than 8 elements */ 56526be2ded1SAneesh Kumar K.V if (lg_prealloc_count > 8) { 56536be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(sb, lg, 56546be2ded1SAneesh Kumar K.V order, lg_prealloc_count); 56556be2ded1SAneesh Kumar K.V return; 56566be2ded1SAneesh Kumar K.V } 56576be2ded1SAneesh Kumar K.V return ; 56586be2ded1SAneesh Kumar K.V } 56596be2ded1SAneesh Kumar K.V 5660c9de560dSAlex Tomas /* 5661c9de560dSAlex Tomas * release all resource we used in allocation 5662c9de560dSAlex Tomas */ 5663c9de560dSAlex Tomas static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5664c9de560dSAlex Tomas { 566553accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 56666be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa = ac->ac_pa; 56676be2ded1SAneesh Kumar K.V if (pa) { 5668cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) { 5669c9de560dSAlex Tomas /* see comment in ext4_mb_use_group_pa() */ 56706be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 567153accfa9STheodore Ts'o pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 567253accfa9STheodore Ts'o pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 56736be2ded1SAneesh Kumar K.V pa->pa_free -= ac->ac_b_ex.fe_len; 56746be2ded1SAneesh Kumar K.V pa->pa_len -= ac->ac_b_ex.fe_len; 56756be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 567666d5e027Sbrookxu 56776be2ded1SAneesh Kumar K.V /* 56786be2ded1SAneesh Kumar K.V * We want to add the pa to the right bucket. 56796be2ded1SAneesh Kumar K.V * Remove it from the list and while adding 56806be2ded1SAneesh Kumar K.V * make sure the list to which we are adding 568144183d42SAmir Goldstein * doesn't grow big. 56826be2ded1SAneesh Kumar K.V */ 568366d5e027Sbrookxu if (likely(pa->pa_free)) { 5684a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 5685a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 5686a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 56876be2ded1SAneesh Kumar K.V ext4_mb_add_n_trim(ac); 5688c9de560dSAlex Tomas } 568966d5e027Sbrookxu } 569027bc446eSbrookxu 56916be2ded1SAneesh Kumar K.V ext4_mb_put_pa(ac, ac->ac_sb, pa); 5692c9de560dSAlex Tomas } 5693c9de560dSAlex Tomas if (ac->ac_bitmap_page) 569409cbfeafSKirill A. Shutemov put_page(ac->ac_bitmap_page); 5695c9de560dSAlex Tomas if (ac->ac_buddy_page) 569609cbfeafSKirill A. Shutemov put_page(ac->ac_buddy_page); 5697c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5698c9de560dSAlex Tomas mutex_unlock(&ac->ac_lg->lg_mutex); 5699c9de560dSAlex Tomas ext4_mb_collect_stats(ac); 5700c9de560dSAlex Tomas return 0; 5701c9de560dSAlex Tomas } 5702c9de560dSAlex Tomas 5703c9de560dSAlex Tomas static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5704c9de560dSAlex Tomas { 57058df9675fSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5706c9de560dSAlex Tomas int ret; 57078c80fb31SChunguang Xu int freed = 0, busy = 0; 57088c80fb31SChunguang Xu int retry = 0; 5709c9de560dSAlex Tomas 57109bffad1eSTheodore Ts'o trace_ext4_mb_discard_preallocations(sb, needed); 57118c80fb31SChunguang Xu 57128c80fb31SChunguang Xu if (needed == 0) 57138c80fb31SChunguang Xu needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 57148c80fb31SChunguang Xu repeat: 57158df9675fSTheodore Ts'o for (i = 0; i < ngroups && needed > 0; i++) { 57168c80fb31SChunguang Xu ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5717c9de560dSAlex Tomas freed += ret; 5718c9de560dSAlex Tomas needed -= ret; 57198c80fb31SChunguang Xu cond_resched(); 57208c80fb31SChunguang Xu } 57218c80fb31SChunguang Xu 57228c80fb31SChunguang Xu if (needed > 0 && busy && ++retry < 3) { 57238c80fb31SChunguang Xu busy = 0; 57248c80fb31SChunguang Xu goto repeat; 5725c9de560dSAlex Tomas } 5726c9de560dSAlex Tomas 5727c9de560dSAlex Tomas return freed; 5728c9de560dSAlex Tomas } 5729c9de560dSAlex Tomas 5730cf5e2ca6SRitesh Harjani static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 573107b5b8e1SRitesh Harjani struct ext4_allocation_context *ac, u64 *seq) 5732cf5e2ca6SRitesh Harjani { 5733cf5e2ca6SRitesh Harjani int freed; 573407b5b8e1SRitesh Harjani u64 seq_retry = 0; 573507b5b8e1SRitesh Harjani bool ret = false; 5736cf5e2ca6SRitesh Harjani 5737cf5e2ca6SRitesh Harjani freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 573807b5b8e1SRitesh Harjani if (freed) { 573907b5b8e1SRitesh Harjani ret = true; 574007b5b8e1SRitesh Harjani goto out_dbg; 574107b5b8e1SRitesh Harjani } 574207b5b8e1SRitesh Harjani seq_retry = ext4_get_discard_pa_seq_sum(); 574399377830SRitesh Harjani if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 574499377830SRitesh Harjani ac->ac_flags |= EXT4_MB_STRICT_CHECK; 574507b5b8e1SRitesh Harjani *seq = seq_retry; 574607b5b8e1SRitesh Harjani ret = true; 574707b5b8e1SRitesh Harjani } 574807b5b8e1SRitesh Harjani 574907b5b8e1SRitesh Harjani out_dbg: 575007b5b8e1SRitesh Harjani mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 575107b5b8e1SRitesh Harjani return ret; 5752cf5e2ca6SRitesh Harjani } 5753cf5e2ca6SRitesh Harjani 57548016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 57558016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp); 57568016e29fSHarshad Shirwadkar 5757c9de560dSAlex Tomas /* 5758c9de560dSAlex Tomas * Main entry point into mballoc to allocate blocks 5759c9de560dSAlex Tomas * it tries to use preallocation first, then falls back 5760c9de560dSAlex Tomas * to usual allocation 5761c9de560dSAlex Tomas */ 5762c9de560dSAlex Tomas ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5763c9de560dSAlex Tomas struct ext4_allocation_request *ar, int *errp) 5764c9de560dSAlex Tomas { 5765256bdb49SEric Sandeen struct ext4_allocation_context *ac = NULL; 5766c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5767c9de560dSAlex Tomas struct super_block *sb; 5768c9de560dSAlex Tomas ext4_fsblk_t block = 0; 576960e58e0fSMingming Cao unsigned int inquota = 0; 577053accfa9STheodore Ts'o unsigned int reserv_clstrs = 0; 577180fa46d6STheodore Ts'o int retries = 0; 577207b5b8e1SRitesh Harjani u64 seq; 5773c9de560dSAlex Tomas 5774b10a44c3STheodore Ts'o might_sleep(); 5775c9de560dSAlex Tomas sb = ar->inode->i_sb; 5776c9de560dSAlex Tomas sbi = EXT4_SB(sb); 5777c9de560dSAlex Tomas 57789bffad1eSTheodore Ts'o trace_ext4_request_blocks(ar); 57798016e29fSHarshad Shirwadkar if (sbi->s_mount_state & EXT4_FC_REPLAY) 57808016e29fSHarshad Shirwadkar return ext4_mb_new_blocks_simple(handle, ar, errp); 5781ba80b101STheodore Ts'o 578245dc63e7SDmitry Monakhov /* Allow to use superuser reservation for quota file */ 578302749a4cSTahsin Erdogan if (ext4_is_quota_file(ar->inode)) 578445dc63e7SDmitry Monakhov ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 578545dc63e7SDmitry Monakhov 5786e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 578760e58e0fSMingming Cao /* Without delayed allocation we need to verify 578860e58e0fSMingming Cao * there is enough free blocks to do block allocation 578960e58e0fSMingming Cao * and verify allocation doesn't exceed the quota limits. 5790d2a17637SMingming Cao */ 579155f020dbSAllison Henderson while (ar->len && 5792e7d5f315STheodore Ts'o ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 579355f020dbSAllison Henderson 5794030ba6bcSAneesh Kumar K.V /* let others to free the space */ 5795bb8b20edSLukas Czerner cond_resched(); 5796030ba6bcSAneesh Kumar K.V ar->len = ar->len >> 1; 5797030ba6bcSAneesh Kumar K.V } 5798030ba6bcSAneesh Kumar K.V if (!ar->len) { 5799bbc4ec77SRitesh Harjani ext4_mb_show_pa(sb); 580007031431SMingming Cao *errp = -ENOSPC; 580107031431SMingming Cao return 0; 580207031431SMingming Cao } 580353accfa9STheodore Ts'o reserv_clstrs = ar->len; 580455f020dbSAllison Henderson if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 580553accfa9STheodore Ts'o dquot_alloc_block_nofail(ar->inode, 580653accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len)); 580755f020dbSAllison Henderson } else { 580855f020dbSAllison Henderson while (ar->len && 580953accfa9STheodore Ts'o dquot_alloc_block(ar->inode, 581053accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len))) { 581155f020dbSAllison Henderson 5812c9de560dSAlex Tomas ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5813c9de560dSAlex Tomas ar->len--; 5814c9de560dSAlex Tomas } 581555f020dbSAllison Henderson } 581660e58e0fSMingming Cao inquota = ar->len; 5817c9de560dSAlex Tomas if (ar->len == 0) { 5818c9de560dSAlex Tomas *errp = -EDQUOT; 58196c7a120aSAditya Kali goto out; 5820c9de560dSAlex Tomas } 582160e58e0fSMingming Cao } 5822d2a17637SMingming Cao 582385556c9aSWei Yongjun ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5824833576b3STheodore Ts'o if (!ac) { 5825363d4251SShen Feng ar->len = 0; 5826256bdb49SEric Sandeen *errp = -ENOMEM; 58276c7a120aSAditya Kali goto out; 5828256bdb49SEric Sandeen } 5829256bdb49SEric Sandeen 5830d73eff68SGuoqing Jiang ext4_mb_initialize_context(ac, ar); 5831c9de560dSAlex Tomas 5832256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 583381198536SRitesh Harjani seq = this_cpu_read(discard_pa_seq); 5834256bdb49SEric Sandeen if (!ext4_mb_use_preallocated(ac)) { 5835256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5836256bdb49SEric Sandeen ext4_mb_normalize_request(ac, ar); 583753f86b17SRitesh Harjani 583853f86b17SRitesh Harjani *errp = ext4_mb_pa_alloc(ac); 583953f86b17SRitesh Harjani if (*errp) 584053f86b17SRitesh Harjani goto errout; 5841c9de560dSAlex Tomas repeat: 5842c9de560dSAlex Tomas /* allocate space in core */ 58436c7a120aSAditya Kali *errp = ext4_mb_regular_allocator(ac); 584453f86b17SRitesh Harjani /* 584553f86b17SRitesh Harjani * pa allocated above is added to grp->bb_prealloc_list only 584653f86b17SRitesh Harjani * when we were able to allocate some block i.e. when 584753f86b17SRitesh Harjani * ac->ac_status == AC_STATUS_FOUND. 584853f86b17SRitesh Harjani * And error from above mean ac->ac_status != AC_STATUS_FOUND 584953f86b17SRitesh Harjani * So we have to free this pa here itself. 585053f86b17SRitesh Harjani */ 58512c00ef3eSAlexey Khoroshilov if (*errp) { 585282089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 58532c00ef3eSAlexey Khoroshilov ext4_discard_allocated_blocks(ac); 58542c00ef3eSAlexey Khoroshilov goto errout; 58552c00ef3eSAlexey Khoroshilov } 585653f86b17SRitesh Harjani if (ac->ac_status == AC_STATUS_FOUND && 585753f86b17SRitesh Harjani ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 585882089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 5859c9de560dSAlex Tomas } 5860256bdb49SEric Sandeen if (likely(ac->ac_status == AC_STATUS_FOUND)) { 586153accfa9STheodore Ts'o *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5862554a5cccSVegard Nossum if (*errp) { 5863b844167eSCurt Wohlgemuth ext4_discard_allocated_blocks(ac); 58646d138cedSEric Sandeen goto errout; 58656d138cedSEric Sandeen } else { 5866256bdb49SEric Sandeen block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5867256bdb49SEric Sandeen ar->len = ac->ac_b_ex.fe_len; 5868519deca0SAneesh Kumar K.V } 5869c9de560dSAlex Tomas } else { 587080fa46d6STheodore Ts'o if (++retries < 3 && 587180fa46d6STheodore Ts'o ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5872c9de560dSAlex Tomas goto repeat; 587353f86b17SRitesh Harjani /* 587453f86b17SRitesh Harjani * If block allocation fails then the pa allocated above 587553f86b17SRitesh Harjani * needs to be freed here itself. 587653f86b17SRitesh Harjani */ 587782089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 5878c9de560dSAlex Tomas *errp = -ENOSPC; 58796c7a120aSAditya Kali } 58806c7a120aSAditya Kali 58816c7a120aSAditya Kali if (*errp) { 5882aaae558dSKemeng Shi errout: 5883256bdb49SEric Sandeen ac->ac_b_ex.fe_len = 0; 5884c9de560dSAlex Tomas ar->len = 0; 5885256bdb49SEric Sandeen ext4_mb_show_ac(ac); 5886c9de560dSAlex Tomas } 5887256bdb49SEric Sandeen ext4_mb_release_context(ac); 5888363d4251SShen Feng kmem_cache_free(ext4_ac_cachep, ac); 5889aaae558dSKemeng Shi out: 589060e58e0fSMingming Cao if (inquota && ar->len < inquota) 589153accfa9STheodore Ts'o dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 58920087d9fbSAneesh Kumar K.V if (!ar->len) { 5893e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 58940087d9fbSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 589557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 589653accfa9STheodore Ts'o reserv_clstrs); 58970087d9fbSAneesh Kumar K.V } 5898c9de560dSAlex Tomas 58999bffad1eSTheodore Ts'o trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5900ba80b101STheodore Ts'o 5901c9de560dSAlex Tomas return block; 5902c9de560dSAlex Tomas } 5903c9de560dSAlex Tomas 5904c894058dSAneesh Kumar K.V /* 5905c894058dSAneesh Kumar K.V * We can merge two free data extents only if the physical blocks 5906c894058dSAneesh Kumar K.V * are contiguous, AND the extents were freed by the same transaction, 5907c894058dSAneesh Kumar K.V * AND the blocks are associated with the same group. 5908c894058dSAneesh Kumar K.V */ 5909a0154344SDaeho Jeong static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5910a0154344SDaeho Jeong struct ext4_free_data *entry, 5911a0154344SDaeho Jeong struct ext4_free_data *new_entry, 5912a0154344SDaeho Jeong struct rb_root *entry_rb_root) 5913c894058dSAneesh Kumar K.V { 5914a0154344SDaeho Jeong if ((entry->efd_tid != new_entry->efd_tid) || 5915a0154344SDaeho Jeong (entry->efd_group != new_entry->efd_group)) 5916a0154344SDaeho Jeong return; 5917a0154344SDaeho Jeong if (entry->efd_start_cluster + entry->efd_count == 5918a0154344SDaeho Jeong new_entry->efd_start_cluster) { 5919a0154344SDaeho Jeong new_entry->efd_start_cluster = entry->efd_start_cluster; 5920a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5921a0154344SDaeho Jeong } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5922a0154344SDaeho Jeong entry->efd_start_cluster) { 5923a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5924a0154344SDaeho Jeong } else 5925a0154344SDaeho Jeong return; 5926a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 5927a0154344SDaeho Jeong list_del(&entry->efd_list); 5928a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 5929a0154344SDaeho Jeong rb_erase(&entry->efd_node, entry_rb_root); 5930a0154344SDaeho Jeong kmem_cache_free(ext4_free_data_cachep, entry); 5931c894058dSAneesh Kumar K.V } 5932c894058dSAneesh Kumar K.V 593385b67ffbSKemeng Shi static noinline_for_stack void 59344ddfef7bSEric Sandeen ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 59357a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry) 5936c9de560dSAlex Tomas { 5937e29136f8STheodore Ts'o ext4_group_t group = e4b->bd_group; 593884130193STheodore Ts'o ext4_grpblk_t cluster; 5939d08854f5STheodore Ts'o ext4_grpblk_t clusters = new_entry->efd_count; 59407a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 5941c9de560dSAlex Tomas struct ext4_group_info *db = e4b->bd_info; 5942c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5943c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5944c894058dSAneesh Kumar K.V struct rb_node **n = &db->bb_free_root.rb_node, *node; 5945c894058dSAneesh Kumar K.V struct rb_node *parent = NULL, *new_node; 5946c894058dSAneesh Kumar K.V 59470390131bSFrank Mayhar BUG_ON(!ext4_handle_valid(handle)); 5948c9de560dSAlex Tomas BUG_ON(e4b->bd_bitmap_page == NULL); 5949c9de560dSAlex Tomas BUG_ON(e4b->bd_buddy_page == NULL); 5950c9de560dSAlex Tomas 595118aadd47SBobi Jam new_node = &new_entry->efd_node; 595218aadd47SBobi Jam cluster = new_entry->efd_start_cluster; 5953c9de560dSAlex Tomas 5954c894058dSAneesh Kumar K.V if (!*n) { 5955c894058dSAneesh Kumar K.V /* first free block exent. We need to 5956c894058dSAneesh Kumar K.V protect buddy cache from being freed, 5957c9de560dSAlex Tomas * otherwise we'll refresh it from 5958c9de560dSAlex Tomas * on-disk bitmap and lose not-yet-available 5959c9de560dSAlex Tomas * blocks */ 596009cbfeafSKirill A. Shutemov get_page(e4b->bd_buddy_page); 596109cbfeafSKirill A. Shutemov get_page(e4b->bd_bitmap_page); 5962c894058dSAneesh Kumar K.V } 5963c894058dSAneesh Kumar K.V while (*n) { 5964c894058dSAneesh Kumar K.V parent = *n; 596518aadd47SBobi Jam entry = rb_entry(parent, struct ext4_free_data, efd_node); 596618aadd47SBobi Jam if (cluster < entry->efd_start_cluster) 5967c894058dSAneesh Kumar K.V n = &(*n)->rb_left; 596818aadd47SBobi Jam else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5969c894058dSAneesh Kumar K.V n = &(*n)->rb_right; 5970c894058dSAneesh Kumar K.V else { 5971e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 597284130193STheodore Ts'o ext4_group_first_block_no(sb, group) + 597384130193STheodore Ts'o EXT4_C2B(sbi, cluster), 5974e29136f8STheodore Ts'o "Block already on to-be-freed list"); 5975cca41553SChunguang Xu kmem_cache_free(ext4_free_data_cachep, new_entry); 597685b67ffbSKemeng Shi return; 5977c9de560dSAlex Tomas } 5978c9de560dSAlex Tomas } 5979c9de560dSAlex Tomas 5980c894058dSAneesh Kumar K.V rb_link_node(new_node, parent, n); 5981c894058dSAneesh Kumar K.V rb_insert_color(new_node, &db->bb_free_root); 5982c894058dSAneesh Kumar K.V 5983c894058dSAneesh Kumar K.V /* Now try to see the extent can be merged to left and right */ 5984c894058dSAneesh Kumar K.V node = rb_prev(new_node); 5985c894058dSAneesh Kumar K.V if (node) { 598618aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5987a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5988a0154344SDaeho Jeong &(db->bb_free_root)); 5989c9de560dSAlex Tomas } 5990c894058dSAneesh Kumar K.V 5991c894058dSAneesh Kumar K.V node = rb_next(new_node); 5992c894058dSAneesh Kumar K.V if (node) { 599318aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5994a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5995a0154344SDaeho Jeong &(db->bb_free_root)); 5996c894058dSAneesh Kumar K.V } 5997a0154344SDaeho Jeong 5998d08854f5STheodore Ts'o spin_lock(&sbi->s_md_lock); 5999a0154344SDaeho Jeong list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 6000d08854f5STheodore Ts'o sbi->s_mb_free_pending += clusters; 6001d08854f5STheodore Ts'o spin_unlock(&sbi->s_md_lock); 6002c9de560dSAlex Tomas } 6003c9de560dSAlex Tomas 60048016e29fSHarshad Shirwadkar /* 60058016e29fSHarshad Shirwadkar * Simple allocator for Ext4 fast commit replay path. It searches for blocks 60068016e29fSHarshad Shirwadkar * linearly starting at the goal block and also excludes the blocks which 60078016e29fSHarshad Shirwadkar * are going to be in use after fast commit replay. 60088016e29fSHarshad Shirwadkar */ 60098016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 60108016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp) 60118016e29fSHarshad Shirwadkar { 60128016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 60138016e29fSHarshad Shirwadkar struct super_block *sb = ar->inode->i_sb; 60148016e29fSHarshad Shirwadkar ext4_group_t group; 60158016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 601631a074a0SXin Yin ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 601731a074a0SXin Yin ext4_grpblk_t i = 0; 60188016e29fSHarshad Shirwadkar ext4_fsblk_t goal, block; 60198016e29fSHarshad Shirwadkar struct ext4_super_block *es = EXT4_SB(sb)->s_es; 60208016e29fSHarshad Shirwadkar 60218016e29fSHarshad Shirwadkar goal = ar->goal; 60228016e29fSHarshad Shirwadkar if (goal < le32_to_cpu(es->s_first_data_block) || 60238016e29fSHarshad Shirwadkar goal >= ext4_blocks_count(es)) 60248016e29fSHarshad Shirwadkar goal = le32_to_cpu(es->s_first_data_block); 60258016e29fSHarshad Shirwadkar 60268016e29fSHarshad Shirwadkar ar->len = 0; 60278016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 60288016e29fSHarshad Shirwadkar for (; group < ext4_get_groups_count(sb); group++) { 60298016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 60308016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 60318016e29fSHarshad Shirwadkar *errp = PTR_ERR(bitmap_bh); 60328016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 60338016e29fSHarshad Shirwadkar return 0; 60348016e29fSHarshad Shirwadkar } 60358016e29fSHarshad Shirwadkar 603631a074a0SXin Yin while (1) { 603731a074a0SXin Yin i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 60388016e29fSHarshad Shirwadkar blkoff); 603931a074a0SXin Yin if (i >= max) 604031a074a0SXin Yin break; 60418016e29fSHarshad Shirwadkar if (ext4_fc_replay_check_excluded(sb, 604231a074a0SXin Yin ext4_group_first_block_no(sb, group) + i)) { 604331a074a0SXin Yin blkoff = i + 1; 604431a074a0SXin Yin } else 604531a074a0SXin Yin break; 604631a074a0SXin Yin } 604731a074a0SXin Yin brelse(bitmap_bh); 604831a074a0SXin Yin if (i < max) 60498016e29fSHarshad Shirwadkar break; 6050253cacb0SKemeng Shi 6051253cacb0SKemeng Shi blkoff = 0; 60528016e29fSHarshad Shirwadkar } 60538016e29fSHarshad Shirwadkar 605431a074a0SXin Yin if (group >= ext4_get_groups_count(sb) || i >= max) { 605531a074a0SXin Yin *errp = -ENOSPC; 60568016e29fSHarshad Shirwadkar return 0; 605731a074a0SXin Yin } 60588016e29fSHarshad Shirwadkar 60598016e29fSHarshad Shirwadkar block = ext4_group_first_block_no(sb, group) + i; 60608016e29fSHarshad Shirwadkar ext4_mb_mark_bb(sb, block, 1, 1); 60618016e29fSHarshad Shirwadkar ar->len = 1; 60628016e29fSHarshad Shirwadkar 60638016e29fSHarshad Shirwadkar return block; 60648016e29fSHarshad Shirwadkar } 60658016e29fSHarshad Shirwadkar 60668016e29fSHarshad Shirwadkar static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 60678016e29fSHarshad Shirwadkar unsigned long count) 60688016e29fSHarshad Shirwadkar { 60698016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 60708016e29fSHarshad Shirwadkar struct super_block *sb = inode->i_sb; 60718016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 60728016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 60738016e29fSHarshad Shirwadkar ext4_group_t group; 60748016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 60758016e29fSHarshad Shirwadkar int already_freed = 0, err, i; 60768016e29fSHarshad Shirwadkar 60778016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 60788016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 60798016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 60808016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 60818016e29fSHarshad Shirwadkar return; 60828016e29fSHarshad Shirwadkar } 60838016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 60848016e29fSHarshad Shirwadkar if (!gdp) 60851b5c9d34SKemeng Shi goto err_out; 60868016e29fSHarshad Shirwadkar 60878016e29fSHarshad Shirwadkar for (i = 0; i < count; i++) { 60888016e29fSHarshad Shirwadkar if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 60898016e29fSHarshad Shirwadkar already_freed++; 60908016e29fSHarshad Shirwadkar } 60918016e29fSHarshad Shirwadkar mb_clear_bits(bitmap_bh->b_data, blkoff, count); 60928016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 60938016e29fSHarshad Shirwadkar if (err) 60941b5c9d34SKemeng Shi goto err_out; 60958016e29fSHarshad Shirwadkar ext4_free_group_clusters_set( 60968016e29fSHarshad Shirwadkar sb, gdp, ext4_free_group_clusters(sb, gdp) + 60978016e29fSHarshad Shirwadkar count - already_freed); 60981df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 60998016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 61008016e29fSHarshad Shirwadkar ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 61018016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 61028016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 61031b5c9d34SKemeng Shi 61041b5c9d34SKemeng Shi err_out: 61058016e29fSHarshad Shirwadkar brelse(bitmap_bh); 61068016e29fSHarshad Shirwadkar } 61078016e29fSHarshad Shirwadkar 610844338711STheodore Ts'o /** 61098ac3939dSRitesh Harjani * ext4_mb_clear_bb() -- helper function for freeing blocks. 61108ac3939dSRitesh Harjani * Used by ext4_free_blocks() 611144338711STheodore Ts'o * @handle: handle for this transaction 611244338711STheodore Ts'o * @inode: inode 6113c60990b3STheodore Ts'o * @block: starting physical block to be freed 6114c60990b3STheodore Ts'o * @count: number of blocks to be freed 61155def1360SYongqiang Yang * @flags: flags used by ext4_free_blocks 6116c9de560dSAlex Tomas */ 61178ac3939dSRitesh Harjani static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 61188ac3939dSRitesh Harjani ext4_fsblk_t block, unsigned long count, 61198ac3939dSRitesh Harjani int flags) 6120c9de560dSAlex Tomas { 612126346ff6SAneesh Kumar K.V struct buffer_head *bitmap_bh = NULL; 6122c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 6123c9de560dSAlex Tomas struct ext4_group_desc *gdp; 61245354b2afSTheodore Ts'o struct ext4_group_info *grp; 6125498e5f24STheodore Ts'o unsigned int overflow; 6126c9de560dSAlex Tomas ext4_grpblk_t bit; 6127c9de560dSAlex Tomas struct buffer_head *gd_bh; 6128c9de560dSAlex Tomas ext4_group_t block_group; 6129c9de560dSAlex Tomas struct ext4_sb_info *sbi; 6130c9de560dSAlex Tomas struct ext4_buddy e4b; 613184130193STheodore Ts'o unsigned int count_clusters; 6132c9de560dSAlex Tomas int err = 0; 6133c9de560dSAlex Tomas int ret; 6134c9de560dSAlex Tomas 61358016e29fSHarshad Shirwadkar sbi = EXT4_SB(sb); 61368016e29fSHarshad Shirwadkar 61371e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 61381e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 61391e1c2b86SLukas Czerner ext4_error(sb, "Freeing blocks in system zone - " 61401e1c2b86SLukas Czerner "Block = %llu, count = %lu", block, count); 61411e1c2b86SLukas Czerner /* err = 0. ext4_std_error should be a no op */ 61421e1c2b86SLukas Czerner goto error_return; 61431e1c2b86SLukas Czerner } 61441e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 61451e1c2b86SLukas Czerner 6146c9de560dSAlex Tomas do_more: 6147c9de560dSAlex Tomas overflow = 0; 6148c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6149c9de560dSAlex Tomas 61505354b2afSTheodore Ts'o grp = ext4_get_group_info(sb, block_group); 61515354b2afSTheodore Ts'o if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6152163a203dSDarrick J. Wong return; 6153163a203dSDarrick J. Wong 6154c9de560dSAlex Tomas /* 6155c9de560dSAlex Tomas * Check to see if we are freeing blocks across a group 6156c9de560dSAlex Tomas * boundary. 6157c9de560dSAlex Tomas */ 615884130193STheodore Ts'o if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 615984130193STheodore Ts'o overflow = EXT4_C2B(sbi, bit) + count - 616084130193STheodore Ts'o EXT4_BLOCKS_PER_GROUP(sb); 6161c9de560dSAlex Tomas count -= overflow; 61621e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 61631e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6164c9de560dSAlex Tomas } 6165810da240SLukas Czerner count_clusters = EXT4_NUM_B2C(sbi, count); 6166574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, block_group); 61679008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 61689008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 61699008a58eSDarrick J. Wong bitmap_bh = NULL; 6170c9de560dSAlex Tomas goto error_return; 6171ce89f46cSAneesh Kumar K.V } 6172c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 6173ce89f46cSAneesh Kumar K.V if (!gdp) { 6174ce89f46cSAneesh Kumar K.V err = -EIO; 6175c9de560dSAlex Tomas goto error_return; 6176ce89f46cSAneesh Kumar K.V } 6177c9de560dSAlex Tomas 61781e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 61791e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 618012062dddSEric Sandeen ext4_error(sb, "Freeing blocks in system zone - " 61810610b6e9STheodore Ts'o "Block = %llu, count = %lu", block, count); 6182519deca0SAneesh Kumar K.V /* err = 0. ext4_std_error should be a no op */ 6183519deca0SAneesh Kumar K.V goto error_return; 6184c9de560dSAlex Tomas } 6185c9de560dSAlex Tomas 6186c9de560dSAlex Tomas BUFFER_TRACE(bitmap_bh, "getting write access"); 6187188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6188188c299eSJan Kara EXT4_JTR_NONE); 6189c9de560dSAlex Tomas if (err) 6190c9de560dSAlex Tomas goto error_return; 6191c9de560dSAlex Tomas 6192c9de560dSAlex Tomas /* 6193c9de560dSAlex Tomas * We are about to modify some metadata. Call the journal APIs 6194c9de560dSAlex Tomas * to unshare ->b_data if a currently-committing transaction is 6195c9de560dSAlex Tomas * using it 6196c9de560dSAlex Tomas */ 6197c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "get_write_access"); 6198188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6199c9de560dSAlex Tomas if (err) 6200c9de560dSAlex Tomas goto error_return; 6201c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 6202c9de560dSAlex Tomas { 6203c9de560dSAlex Tomas int i; 620484130193STheodore Ts'o for (i = 0; i < count_clusters; i++) 6205c9de560dSAlex Tomas BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6206c9de560dSAlex Tomas } 6207c9de560dSAlex Tomas #endif 620884130193STheodore Ts'o trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6209c9de560dSAlex Tomas 6210adb7ef60SKonstantin Khlebnikov /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6211adb7ef60SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6212adb7ef60SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 6213920313a7SAneesh Kumar K.V if (err) 6214920313a7SAneesh Kumar K.V goto error_return; 6215e6362609STheodore Ts'o 6216f96c450dSDaeho Jeong /* 6217f96c450dSDaeho Jeong * We need to make sure we don't reuse the freed block until after the 6218f96c450dSDaeho Jeong * transaction is committed. We make an exception if the inode is to be 6219f96c450dSDaeho Jeong * written in writeback mode since writeback mode has weak data 6220f96c450dSDaeho Jeong * consistency guarantees. 6221f96c450dSDaeho Jeong */ 6222f96c450dSDaeho Jeong if (ext4_handle_valid(handle) && 6223f96c450dSDaeho Jeong ((flags & EXT4_FREE_BLOCKS_METADATA) || 6224f96c450dSDaeho Jeong !ext4_should_writeback_data(inode))) { 62257a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry; 62267a2fcbf7SAneesh Kumar K.V /* 62277444a072SMichal Hocko * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 62287444a072SMichal Hocko * to fail. 62297a2fcbf7SAneesh Kumar K.V */ 62307444a072SMichal Hocko new_entry = kmem_cache_alloc(ext4_free_data_cachep, 62317444a072SMichal Hocko GFP_NOFS|__GFP_NOFAIL); 623218aadd47SBobi Jam new_entry->efd_start_cluster = bit; 623318aadd47SBobi Jam new_entry->efd_group = block_group; 623418aadd47SBobi Jam new_entry->efd_count = count_clusters; 623518aadd47SBobi Jam new_entry->efd_tid = handle->h_transaction->t_tid; 6236955ce5f5SAneesh Kumar K.V 62377a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, block_group); 623884130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 62397a2fcbf7SAneesh Kumar K.V ext4_mb_free_metadata(handle, &e4b, new_entry); 6240c9de560dSAlex Tomas } else { 62417a2fcbf7SAneesh Kumar K.V /* need to update group_info->bb_free and bitmap 62427a2fcbf7SAneesh Kumar K.V * with group lock held. generate_buddy look at 62437a2fcbf7SAneesh Kumar K.V * them with group lock_held 62447a2fcbf7SAneesh Kumar K.V */ 6245d71c1ae2SLukas Czerner if (test_opt(sb, DISCARD)) { 6246a0154344SDaeho Jeong err = ext4_issue_discard(sb, block_group, bit, count, 6247a0154344SDaeho Jeong NULL); 6248d71c1ae2SLukas Czerner if (err && err != -EOPNOTSUPP) 6249d71c1ae2SLukas Czerner ext4_msg(sb, KERN_WARNING, "discard request in" 6250a00b482bSRitesh Harjani " group:%u block:%d count:%lu failed" 6251d71c1ae2SLukas Czerner " with %d", block_group, bit, count, 6252d71c1ae2SLukas Czerner err); 62538f9ff189SLukas Czerner } else 62548f9ff189SLukas Czerner EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6255d71c1ae2SLukas Czerner 6256955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, block_group); 625784130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 625884130193STheodore Ts'o mb_free_blocks(inode, &e4b, bit, count_clusters); 6259c9de560dSAlex Tomas } 6260c9de560dSAlex Tomas 6261021b65bbSTheodore Ts'o ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6262021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, ret); 62631df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6264feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, gdp); 6265955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, block_group); 6266c9de560dSAlex Tomas 6267772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 6268772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 626990ba983fSTheodore Ts'o atomic64_add(count_clusters, 62707c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 62717c990728SSuraj Jitindar Singh flex_group)->free_clusters); 6272772cb7c8SJose R. Santos } 6273772cb7c8SJose R. Santos 62749fe67149SEric Whitney /* 62759fe67149SEric Whitney * on a bigalloc file system, defer the s_freeclusters_counter 62769fe67149SEric Whitney * update to the caller (ext4_remove_space and friends) so they 62779fe67149SEric Whitney * can determine if a cluster freed here should be rereserved 62789fe67149SEric Whitney */ 62799fe67149SEric Whitney if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 62807b415bf6SAditya Kali if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 62817b415bf6SAditya Kali dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 62829fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 62839fe67149SEric Whitney count_clusters); 62849fe67149SEric Whitney } 62857d734532SJan Kara 62867d734532SJan Kara ext4_mb_unload_buddy(&e4b); 62877b415bf6SAditya Kali 62887a2fcbf7SAneesh Kumar K.V /* We dirtied the bitmap block */ 62897a2fcbf7SAneesh Kumar K.V BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 62907a2fcbf7SAneesh Kumar K.V err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 62917a2fcbf7SAneesh Kumar K.V 6292c9de560dSAlex Tomas /* And the group descriptor block */ 6293c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 62940390131bSFrank Mayhar ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6295c9de560dSAlex Tomas if (!err) 6296c9de560dSAlex Tomas err = ret; 6297c9de560dSAlex Tomas 6298c9de560dSAlex Tomas if (overflow && !err) { 6299c9de560dSAlex Tomas block += count; 6300c9de560dSAlex Tomas count = overflow; 6301c9de560dSAlex Tomas put_bh(bitmap_bh); 63021e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 63031e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6304c9de560dSAlex Tomas goto do_more; 6305c9de560dSAlex Tomas } 6306c9de560dSAlex Tomas error_return: 6307c9de560dSAlex Tomas brelse(bitmap_bh); 6308c9de560dSAlex Tomas ext4_std_error(sb, err); 6309c9de560dSAlex Tomas return; 6310c9de560dSAlex Tomas } 63117360d173SLukas Czerner 63127360d173SLukas Czerner /** 63138ac3939dSRitesh Harjani * ext4_free_blocks() -- Free given blocks and update quota 63148ac3939dSRitesh Harjani * @handle: handle for this transaction 63158ac3939dSRitesh Harjani * @inode: inode 63168ac3939dSRitesh Harjani * @bh: optional buffer of the block to be freed 63178ac3939dSRitesh Harjani * @block: starting physical block to be freed 63188ac3939dSRitesh Harjani * @count: number of blocks to be freed 63198ac3939dSRitesh Harjani * @flags: flags used by ext4_free_blocks 63208ac3939dSRitesh Harjani */ 63218ac3939dSRitesh Harjani void ext4_free_blocks(handle_t *handle, struct inode *inode, 63228ac3939dSRitesh Harjani struct buffer_head *bh, ext4_fsblk_t block, 63238ac3939dSRitesh Harjani unsigned long count, int flags) 63248ac3939dSRitesh Harjani { 63258ac3939dSRitesh Harjani struct super_block *sb = inode->i_sb; 63268ac3939dSRitesh Harjani unsigned int overflow; 63278ac3939dSRitesh Harjani struct ext4_sb_info *sbi; 63288ac3939dSRitesh Harjani 63298ac3939dSRitesh Harjani sbi = EXT4_SB(sb); 63308ac3939dSRitesh Harjani 63318ac3939dSRitesh Harjani if (sbi->s_mount_state & EXT4_FC_REPLAY) { 63328ac3939dSRitesh Harjani ext4_free_blocks_simple(inode, block, count); 63338ac3939dSRitesh Harjani return; 63348ac3939dSRitesh Harjani } 63358ac3939dSRitesh Harjani 63368ac3939dSRitesh Harjani might_sleep(); 63378ac3939dSRitesh Harjani if (bh) { 63388ac3939dSRitesh Harjani if (block) 63398ac3939dSRitesh Harjani BUG_ON(block != bh->b_blocknr); 63408ac3939dSRitesh Harjani else 63418ac3939dSRitesh Harjani block = bh->b_blocknr; 63428ac3939dSRitesh Harjani } 63438ac3939dSRitesh Harjani 63448ac3939dSRitesh Harjani if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 63458ac3939dSRitesh Harjani !ext4_inode_block_valid(inode, block, count)) { 63468ac3939dSRitesh Harjani ext4_error(sb, "Freeing blocks not in datazone - " 63478ac3939dSRitesh Harjani "block = %llu, count = %lu", block, count); 63488ac3939dSRitesh Harjani return; 63498ac3939dSRitesh Harjani } 63501e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 63518ac3939dSRitesh Harjani 63528ac3939dSRitesh Harjani ext4_debug("freeing block %llu\n", block); 63538ac3939dSRitesh Harjani trace_ext4_free_blocks(inode, block, count, flags); 63548ac3939dSRitesh Harjani 63558ac3939dSRitesh Harjani if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 63568ac3939dSRitesh Harjani BUG_ON(count > 1); 63578ac3939dSRitesh Harjani 63588ac3939dSRitesh Harjani ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 63598ac3939dSRitesh Harjani inode, bh, block); 63608ac3939dSRitesh Harjani } 63618ac3939dSRitesh Harjani 63628ac3939dSRitesh Harjani /* 63638ac3939dSRitesh Harjani * If the extent to be freed does not begin on a cluster 63648ac3939dSRitesh Harjani * boundary, we need to deal with partial clusters at the 63658ac3939dSRitesh Harjani * beginning and end of the extent. Normally we will free 63668ac3939dSRitesh Harjani * blocks at the beginning or the end unless we are explicitly 63678ac3939dSRitesh Harjani * requested to avoid doing so. 63688ac3939dSRitesh Harjani */ 63698ac3939dSRitesh Harjani overflow = EXT4_PBLK_COFF(sbi, block); 63708ac3939dSRitesh Harjani if (overflow) { 63718ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 63728ac3939dSRitesh Harjani overflow = sbi->s_cluster_ratio - overflow; 63738ac3939dSRitesh Harjani block += overflow; 63748ac3939dSRitesh Harjani if (count > overflow) 63758ac3939dSRitesh Harjani count -= overflow; 63768ac3939dSRitesh Harjani else 63778ac3939dSRitesh Harjani return; 63788ac3939dSRitesh Harjani } else { 63798ac3939dSRitesh Harjani block -= overflow; 63808ac3939dSRitesh Harjani count += overflow; 63818ac3939dSRitesh Harjani } 63821e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 63831e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 63848ac3939dSRitesh Harjani } 63858ac3939dSRitesh Harjani overflow = EXT4_LBLK_COFF(sbi, count); 63868ac3939dSRitesh Harjani if (overflow) { 63878ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 63888ac3939dSRitesh Harjani if (count > overflow) 63898ac3939dSRitesh Harjani count -= overflow; 63908ac3939dSRitesh Harjani else 63918ac3939dSRitesh Harjani return; 63928ac3939dSRitesh Harjani } else 63938ac3939dSRitesh Harjani count += sbi->s_cluster_ratio - overflow; 63941e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 63951e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 63968ac3939dSRitesh Harjani } 63978ac3939dSRitesh Harjani 63988ac3939dSRitesh Harjani if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 63998ac3939dSRitesh Harjani int i; 64008ac3939dSRitesh Harjani int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 64018ac3939dSRitesh Harjani 64028ac3939dSRitesh Harjani for (i = 0; i < count; i++) { 64038ac3939dSRitesh Harjani cond_resched(); 64048ac3939dSRitesh Harjani if (is_metadata) 64058ac3939dSRitesh Harjani bh = sb_find_get_block(inode->i_sb, block + i); 64068ac3939dSRitesh Harjani ext4_forget(handle, is_metadata, inode, bh, block + i); 64078ac3939dSRitesh Harjani } 64088ac3939dSRitesh Harjani } 64098ac3939dSRitesh Harjani 64108ac3939dSRitesh Harjani ext4_mb_clear_bb(handle, inode, block, count, flags); 64118ac3939dSRitesh Harjani return; 64128ac3939dSRitesh Harjani } 64138ac3939dSRitesh Harjani 64148ac3939dSRitesh Harjani /** 64150529155eSYongqiang Yang * ext4_group_add_blocks() -- Add given blocks to an existing group 64162846e820SAmir Goldstein * @handle: handle to this transaction 64172846e820SAmir Goldstein * @sb: super block 64184907cb7bSAnatol Pomozov * @block: start physical block to add to the block group 64192846e820SAmir Goldstein * @count: number of blocks to free 64202846e820SAmir Goldstein * 6421e73a347bSAmir Goldstein * This marks the blocks as free in the bitmap and buddy. 64222846e820SAmir Goldstein */ 6423cc7365dfSYongqiang Yang int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 64242846e820SAmir Goldstein ext4_fsblk_t block, unsigned long count) 64252846e820SAmir Goldstein { 64262846e820SAmir Goldstein struct buffer_head *bitmap_bh = NULL; 64272846e820SAmir Goldstein struct buffer_head *gd_bh; 64282846e820SAmir Goldstein ext4_group_t block_group; 64292846e820SAmir Goldstein ext4_grpblk_t bit; 64302846e820SAmir Goldstein unsigned int i; 64312846e820SAmir Goldstein struct ext4_group_desc *desc; 64322846e820SAmir Goldstein struct ext4_sb_info *sbi = EXT4_SB(sb); 6433e73a347bSAmir Goldstein struct ext4_buddy e4b; 6434d77147ffSharshads int err = 0, ret, free_clusters_count; 6435d77147ffSharshads ext4_grpblk_t clusters_freed; 6436d77147ffSharshads ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6437d77147ffSharshads ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6438d77147ffSharshads unsigned long cluster_count = last_cluster - first_cluster + 1; 64392846e820SAmir Goldstein 64402846e820SAmir Goldstein ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 64412846e820SAmir Goldstein 64424740b830SYongqiang Yang if (count == 0) 64434740b830SYongqiang Yang return 0; 64444740b830SYongqiang Yang 64452846e820SAmir Goldstein ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 64462846e820SAmir Goldstein /* 64472846e820SAmir Goldstein * Check to see if we are freeing blocks across a group 64482846e820SAmir Goldstein * boundary. 64492846e820SAmir Goldstein */ 6450d77147ffSharshads if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6451d77147ffSharshads ext4_warning(sb, "too many blocks added to group %u", 6452cc7365dfSYongqiang Yang block_group); 6453cc7365dfSYongqiang Yang err = -EINVAL; 64542846e820SAmir Goldstein goto error_return; 6455cc7365dfSYongqiang Yang } 64562cd05cc3STheodore Ts'o 64572846e820SAmir Goldstein bitmap_bh = ext4_read_block_bitmap(sb, block_group); 64589008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 64599008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 64609008a58eSDarrick J. Wong bitmap_bh = NULL; 64612846e820SAmir Goldstein goto error_return; 6462cc7365dfSYongqiang Yang } 6463cc7365dfSYongqiang Yang 64642846e820SAmir Goldstein desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6465cc7365dfSYongqiang Yang if (!desc) { 6466cc7365dfSYongqiang Yang err = -EIO; 64672846e820SAmir Goldstein goto error_return; 6468cc7365dfSYongqiang Yang } 64692846e820SAmir Goldstein 6470a00b482bSRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, count)) { 64712846e820SAmir Goldstein ext4_error(sb, "Adding blocks in system zones - " 64722846e820SAmir Goldstein "Block = %llu, count = %lu", 64732846e820SAmir Goldstein block, count); 6474cc7365dfSYongqiang Yang err = -EINVAL; 64752846e820SAmir Goldstein goto error_return; 64762846e820SAmir Goldstein } 64772846e820SAmir Goldstein 64782cd05cc3STheodore Ts'o BUFFER_TRACE(bitmap_bh, "getting write access"); 6479188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6480188c299eSJan Kara EXT4_JTR_NONE); 64812846e820SAmir Goldstein if (err) 64822846e820SAmir Goldstein goto error_return; 64832846e820SAmir Goldstein 64842846e820SAmir Goldstein /* 64852846e820SAmir Goldstein * We are about to modify some metadata. Call the journal APIs 64862846e820SAmir Goldstein * to unshare ->b_data if a currently-committing transaction is 64872846e820SAmir Goldstein * using it 64882846e820SAmir Goldstein */ 64892846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "get_write_access"); 6490188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 64912846e820SAmir Goldstein if (err) 64922846e820SAmir Goldstein goto error_return; 6493e73a347bSAmir Goldstein 6494d77147ffSharshads for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 64952846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "clear bit"); 6496e73a347bSAmir Goldstein if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 64972846e820SAmir Goldstein ext4_error(sb, "bit already cleared for block %llu", 64982846e820SAmir Goldstein (ext4_fsblk_t)(block + i)); 64992846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "bit already cleared"); 65002846e820SAmir Goldstein } else { 6501d77147ffSharshads clusters_freed++; 65022846e820SAmir Goldstein } 65032846e820SAmir Goldstein } 6504e73a347bSAmir Goldstein 6505e73a347bSAmir Goldstein err = ext4_mb_load_buddy(sb, block_group, &e4b); 6506e73a347bSAmir Goldstein if (err) 6507e73a347bSAmir Goldstein goto error_return; 6508e73a347bSAmir Goldstein 6509e73a347bSAmir Goldstein /* 6510e73a347bSAmir Goldstein * need to update group_info->bb_free and bitmap 6511e73a347bSAmir Goldstein * with group lock held. generate_buddy look at 6512e73a347bSAmir Goldstein * them with group lock_held 6513e73a347bSAmir Goldstein */ 65142846e820SAmir Goldstein ext4_lock_group(sb, block_group); 6515d77147ffSharshads mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6516d77147ffSharshads mb_free_blocks(NULL, &e4b, bit, cluster_count); 6517d77147ffSharshads free_clusters_count = clusters_freed + 6518d77147ffSharshads ext4_free_group_clusters(sb, desc); 6519d77147ffSharshads ext4_free_group_clusters_set(sb, desc, free_clusters_count); 65201df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6521feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, desc); 65222846e820SAmir Goldstein ext4_unlock_group(sb, block_group); 652357042651STheodore Ts'o percpu_counter_add(&sbi->s_freeclusters_counter, 6524d77147ffSharshads clusters_freed); 65252846e820SAmir Goldstein 65262846e820SAmir Goldstein if (sbi->s_log_groups_per_flex) { 65272846e820SAmir Goldstein ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6528d77147ffSharshads atomic64_add(clusters_freed, 65297c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 65307c990728SSuraj Jitindar Singh flex_group)->free_clusters); 65312846e820SAmir Goldstein } 6532e73a347bSAmir Goldstein 6533e73a347bSAmir Goldstein ext4_mb_unload_buddy(&e4b); 65342846e820SAmir Goldstein 65352846e820SAmir Goldstein /* We dirtied the bitmap block */ 65362846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 65372846e820SAmir Goldstein err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 65382846e820SAmir Goldstein 65392846e820SAmir Goldstein /* And the group descriptor block */ 65402846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 65412846e820SAmir Goldstein ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 65422846e820SAmir Goldstein if (!err) 65432846e820SAmir Goldstein err = ret; 65442846e820SAmir Goldstein 65452846e820SAmir Goldstein error_return: 65462846e820SAmir Goldstein brelse(bitmap_bh); 65472846e820SAmir Goldstein ext4_std_error(sb, err); 6548cc7365dfSYongqiang Yang return err; 65492846e820SAmir Goldstein } 65502846e820SAmir Goldstein 65512846e820SAmir Goldstein /** 65527360d173SLukas Czerner * ext4_trim_extent -- function to TRIM one single free extent in the group 65537360d173SLukas Czerner * @sb: super block for the file system 65547360d173SLukas Czerner * @start: starting block of the free extent in the alloc. group 65557360d173SLukas Czerner * @count: number of blocks to TRIM 65567360d173SLukas Czerner * @e4b: ext4 buddy for the group 65577360d173SLukas Czerner * 65587360d173SLukas Czerner * Trim "count" blocks starting at "start" in the "group". To assure that no 65597360d173SLukas Czerner * one will allocate those blocks, mark it as used in buddy bitmap. This must 65607360d173SLukas Czerner * be called with under the group lock. 65617360d173SLukas Czerner */ 6562bd2eea8dSWang Jianchao static int ext4_trim_extent(struct super_block *sb, 6563bd2eea8dSWang Jianchao int start, int count, struct ext4_buddy *e4b) 6564e2cbd587Sjon ernst __releases(bitlock) 6565e2cbd587Sjon ernst __acquires(bitlock) 65667360d173SLukas Czerner { 65677360d173SLukas Czerner struct ext4_free_extent ex; 6568bd2eea8dSWang Jianchao ext4_group_t group = e4b->bd_group; 6569d71c1ae2SLukas Czerner int ret = 0; 65707360d173SLukas Czerner 6571b3d4c2b1STao Ma trace_ext4_trim_extent(sb, group, start, count); 6572b3d4c2b1STao Ma 65737360d173SLukas Czerner assert_spin_locked(ext4_group_lock_ptr(sb, group)); 65747360d173SLukas Czerner 65757360d173SLukas Czerner ex.fe_start = start; 65767360d173SLukas Czerner ex.fe_group = group; 65777360d173SLukas Czerner ex.fe_len = count; 65787360d173SLukas Czerner 65797360d173SLukas Czerner /* 65807360d173SLukas Czerner * Mark blocks used, so no one can reuse them while 65817360d173SLukas Czerner * being trimmed. 65827360d173SLukas Czerner */ 65837360d173SLukas Czerner mb_mark_used(e4b, &ex); 65847360d173SLukas Czerner ext4_unlock_group(sb, group); 6585a0154344SDaeho Jeong ret = ext4_issue_discard(sb, group, start, count, NULL); 65867360d173SLukas Czerner ext4_lock_group(sb, group); 65877360d173SLukas Czerner mb_free_blocks(NULL, e4b, start, ex.fe_len); 6588d71c1ae2SLukas Czerner return ret; 65897360d173SLukas Czerner } 65907360d173SLukas Czerner 65916920b391SWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 65926920b391SWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 65936920b391SWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks) 6594a5fda113STheodore Ts'o __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6595a5fda113STheodore Ts'o __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 65966920b391SWang Jianchao { 65976920b391SWang Jianchao ext4_grpblk_t next, count, free_count; 65986920b391SWang Jianchao void *bitmap; 65996920b391SWang Jianchao 66006920b391SWang Jianchao bitmap = e4b->bd_bitmap; 66016920b391SWang Jianchao start = (e4b->bd_info->bb_first_free > start) ? 66026920b391SWang Jianchao e4b->bd_info->bb_first_free : start; 66036920b391SWang Jianchao count = 0; 66046920b391SWang Jianchao free_count = 0; 66056920b391SWang Jianchao 66066920b391SWang Jianchao while (start <= max) { 66076920b391SWang Jianchao start = mb_find_next_zero_bit(bitmap, max + 1, start); 66086920b391SWang Jianchao if (start > max) 66096920b391SWang Jianchao break; 66106920b391SWang Jianchao next = mb_find_next_bit(bitmap, max + 1, start); 66116920b391SWang Jianchao 66126920b391SWang Jianchao if ((next - start) >= minblocks) { 6613afcc4e32SLukas Bulwahn int ret = ext4_trim_extent(sb, start, next - start, e4b); 6614afcc4e32SLukas Bulwahn 66156920b391SWang Jianchao if (ret && ret != -EOPNOTSUPP) 66166920b391SWang Jianchao break; 66176920b391SWang Jianchao count += next - start; 66186920b391SWang Jianchao } 66196920b391SWang Jianchao free_count += next - start; 66206920b391SWang Jianchao start = next + 1; 66216920b391SWang Jianchao 66226920b391SWang Jianchao if (fatal_signal_pending(current)) { 66236920b391SWang Jianchao count = -ERESTARTSYS; 66246920b391SWang Jianchao break; 66256920b391SWang Jianchao } 66266920b391SWang Jianchao 66276920b391SWang Jianchao if (need_resched()) { 66286920b391SWang Jianchao ext4_unlock_group(sb, e4b->bd_group); 66296920b391SWang Jianchao cond_resched(); 66306920b391SWang Jianchao ext4_lock_group(sb, e4b->bd_group); 66316920b391SWang Jianchao } 66326920b391SWang Jianchao 66336920b391SWang Jianchao if ((e4b->bd_info->bb_free - free_count) < minblocks) 66346920b391SWang Jianchao break; 66356920b391SWang Jianchao } 66366920b391SWang Jianchao 66376920b391SWang Jianchao return count; 66386920b391SWang Jianchao } 66396920b391SWang Jianchao 66407360d173SLukas Czerner /** 66417360d173SLukas Czerner * ext4_trim_all_free -- function to trim all free space in alloc. group 66427360d173SLukas Czerner * @sb: super block for file system 664322612283STao Ma * @group: group to be trimmed 66447360d173SLukas Czerner * @start: first group block to examine 66457360d173SLukas Czerner * @max: last group block to examine 66467360d173SLukas Czerner * @minblocks: minimum extent block count 6647d63c00eaSDmitry Monakhov * @set_trimmed: set the trimmed flag if at least one block is trimmed 66487360d173SLukas Czerner * 66497360d173SLukas Czerner * ext4_trim_all_free walks through group's block bitmap searching for free 66507360d173SLukas Czerner * extents. When the free extent is found, mark it as used in group buddy 66517360d173SLukas Czerner * bitmap. Then issue a TRIM command on this extent and free the extent in 6652b6f5558cSWang Jianchao * the group buddy bitmap. 66537360d173SLukas Czerner */ 66540b75a840SLukas Czerner static ext4_grpblk_t 665578944086SLukas Czerner ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 665678944086SLukas Czerner ext4_grpblk_t start, ext4_grpblk_t max, 6657d63c00eaSDmitry Monakhov ext4_grpblk_t minblocks, bool set_trimmed) 66587360d173SLukas Czerner { 665978944086SLukas Czerner struct ext4_buddy e4b; 66606920b391SWang Jianchao int ret; 66617360d173SLukas Czerner 6662b3d4c2b1STao Ma trace_ext4_trim_all_free(sb, group, start, max); 6663b3d4c2b1STao Ma 666478944086SLukas Czerner ret = ext4_mb_load_buddy(sb, group, &e4b); 666578944086SLukas Czerner if (ret) { 66669651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 66679651e6b2SKonstantin Khlebnikov ret, group); 666878944086SLukas Czerner return ret; 666978944086SLukas Czerner } 667028739eeaSLukas Czerner 667128739eeaSLukas Czerner ext4_lock_group(sb, group); 66723d56b8d2STao Ma 66736920b391SWang Jianchao if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 66742327fb2eSLukas Czerner minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 66756920b391SWang Jianchao ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6676d63c00eaSDmitry Monakhov if (ret >= 0 && set_trimmed) 66773d56b8d2STao Ma EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 66786920b391SWang Jianchao } else { 66796920b391SWang Jianchao ret = 0; 6680d71c1ae2SLukas Czerner } 66816920b391SWang Jianchao 66827360d173SLukas Czerner ext4_unlock_group(sb, group); 668378944086SLukas Czerner ext4_mb_unload_buddy(&e4b); 66847360d173SLukas Czerner 66857360d173SLukas Czerner ext4_debug("trimmed %d blocks in the group %d\n", 66866920b391SWang Jianchao ret, group); 66877360d173SLukas Czerner 6688d71c1ae2SLukas Czerner return ret; 66897360d173SLukas Czerner } 66907360d173SLukas Czerner 66917360d173SLukas Czerner /** 66927360d173SLukas Czerner * ext4_trim_fs() -- trim ioctl handle function 66937360d173SLukas Czerner * @sb: superblock for filesystem 66947360d173SLukas Czerner * @range: fstrim_range structure 66957360d173SLukas Czerner * 66967360d173SLukas Czerner * start: First Byte to trim 66977360d173SLukas Czerner * len: number of Bytes to trim from start 66987360d173SLukas Czerner * minlen: minimum extent length in Bytes 66997360d173SLukas Czerner * ext4_trim_fs goes through all allocation groups containing Bytes from 67007360d173SLukas Czerner * start to start+len. For each such a group ext4_trim_all_free function 67017360d173SLukas Czerner * is invoked to trim all free space. 67027360d173SLukas Czerner */ 67037360d173SLukas Czerner int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 67047360d173SLukas Czerner { 67057b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 670678944086SLukas Czerner struct ext4_group_info *grp; 6707913eed83SLukas Czerner ext4_group_t group, first_group, last_group; 67087137d7a4STheodore Ts'o ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6709913eed83SLukas Czerner uint64_t start, end, minlen, trimmed = 0; 67100f0a25bfSJan Kara ext4_fsblk_t first_data_blk = 67110f0a25bfSJan Kara le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6712913eed83SLukas Czerner ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6713d63c00eaSDmitry Monakhov bool whole_group, eof = false; 67147360d173SLukas Czerner int ret = 0; 67157360d173SLukas Czerner 67167360d173SLukas Czerner start = range->start >> sb->s_blocksize_bits; 6717913eed83SLukas Czerner end = start + (range->len >> sb->s_blocksize_bits) - 1; 6718aaf7d73eSLukas Czerner minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6719aaf7d73eSLukas Czerner range->minlen >> sb->s_blocksize_bits); 67207360d173SLukas Czerner 67215de35e8dSLukas Czerner if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 67225de35e8dSLukas Czerner start >= max_blks || 67235de35e8dSLukas Czerner range->len < sb->s_blocksize) 67247360d173SLukas Czerner return -EINVAL; 6725173b6e38SJan Kara /* No point to try to trim less than discard granularity */ 67267b47ef52SChristoph Hellwig if (range->minlen < discard_granularity) { 6727173b6e38SJan Kara minlen = EXT4_NUM_B2C(EXT4_SB(sb), 67287b47ef52SChristoph Hellwig discard_granularity >> sb->s_blocksize_bits); 6729173b6e38SJan Kara if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6730173b6e38SJan Kara goto out; 6731173b6e38SJan Kara } 6732d63c00eaSDmitry Monakhov if (end >= max_blks - 1) { 6733913eed83SLukas Czerner end = max_blks - 1; 6734d63c00eaSDmitry Monakhov eof = true; 6735d63c00eaSDmitry Monakhov } 6736913eed83SLukas Czerner if (end <= first_data_blk) 673722f10457STao Ma goto out; 6738913eed83SLukas Czerner if (start < first_data_blk) 67390f0a25bfSJan Kara start = first_data_blk; 67407360d173SLukas Czerner 6741913eed83SLukas Czerner /* Determine first and last group to examine based on start and end */ 67427360d173SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 67437137d7a4STheodore Ts'o &first_group, &first_cluster); 6744913eed83SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 67457137d7a4STheodore Ts'o &last_group, &last_cluster); 67467360d173SLukas Czerner 6747913eed83SLukas Czerner /* end now represents the last cluster to discard in this group */ 6748913eed83SLukas Czerner end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6749d63c00eaSDmitry Monakhov whole_group = true; 67507360d173SLukas Czerner 67517360d173SLukas Czerner for (group = first_group; group <= last_group; group++) { 675278944086SLukas Czerner grp = ext4_get_group_info(sb, group); 67535354b2afSTheodore Ts'o if (!grp) 67545354b2afSTheodore Ts'o continue; 675578944086SLukas Czerner /* We only do this if the grp has never been initialized */ 675678944086SLukas Czerner if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6757adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, GFP_NOFS); 675878944086SLukas Czerner if (ret) 67597360d173SLukas Czerner break; 67607360d173SLukas Czerner } 67617360d173SLukas Czerner 67620ba08517STao Ma /* 6763913eed83SLukas Czerner * For all the groups except the last one, last cluster will 6764913eed83SLukas Czerner * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6765913eed83SLukas Czerner * change it for the last group, note that last_cluster is 6766913eed83SLukas Czerner * already computed earlier by ext4_get_group_no_and_offset() 67670ba08517STao Ma */ 6768d63c00eaSDmitry Monakhov if (group == last_group) { 6769913eed83SLukas Czerner end = last_cluster; 6770d63c00eaSDmitry Monakhov whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6771d63c00eaSDmitry Monakhov } 677278944086SLukas Czerner if (grp->bb_free >= minlen) { 67737137d7a4STheodore Ts'o cnt = ext4_trim_all_free(sb, group, first_cluster, 6774d63c00eaSDmitry Monakhov end, minlen, whole_group); 67757360d173SLukas Czerner if (cnt < 0) { 67767360d173SLukas Czerner ret = cnt; 67777360d173SLukas Czerner break; 67787360d173SLukas Czerner } 67797360d173SLukas Czerner trimmed += cnt; 678021e7fd22SLukas Czerner } 6781913eed83SLukas Czerner 6782913eed83SLukas Czerner /* 6783913eed83SLukas Czerner * For every group except the first one, we are sure 6784913eed83SLukas Czerner * that the first cluster to discard will be cluster #0. 6785913eed83SLukas Czerner */ 67867137d7a4STheodore Ts'o first_cluster = 0; 67877360d173SLukas Czerner } 67887360d173SLukas Czerner 67893d56b8d2STao Ma if (!ret) 67902327fb2eSLukas Czerner EXT4_SB(sb)->s_last_trim_minblks = minlen; 67913d56b8d2STao Ma 679222f10457STao Ma out: 6793aaf7d73eSLukas Czerner range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 67947360d173SLukas Czerner return ret; 67957360d173SLukas Czerner } 67960c9ec4beSDarrick J. Wong 67970c9ec4beSDarrick J. Wong /* Iterate all the free extents in the group. */ 67980c9ec4beSDarrick J. Wong int 67990c9ec4beSDarrick J. Wong ext4_mballoc_query_range( 68000c9ec4beSDarrick J. Wong struct super_block *sb, 68010c9ec4beSDarrick J. Wong ext4_group_t group, 68020c9ec4beSDarrick J. Wong ext4_grpblk_t start, 68030c9ec4beSDarrick J. Wong ext4_grpblk_t end, 68040c9ec4beSDarrick J. Wong ext4_mballoc_query_range_fn formatter, 68050c9ec4beSDarrick J. Wong void *priv) 68060c9ec4beSDarrick J. Wong { 68070c9ec4beSDarrick J. Wong void *bitmap; 68080c9ec4beSDarrick J. Wong ext4_grpblk_t next; 68090c9ec4beSDarrick J. Wong struct ext4_buddy e4b; 68100c9ec4beSDarrick J. Wong int error; 68110c9ec4beSDarrick J. Wong 68120c9ec4beSDarrick J. Wong error = ext4_mb_load_buddy(sb, group, &e4b); 68130c9ec4beSDarrick J. Wong if (error) 68140c9ec4beSDarrick J. Wong return error; 68150c9ec4beSDarrick J. Wong bitmap = e4b.bd_bitmap; 68160c9ec4beSDarrick J. Wong 68170c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 68180c9ec4beSDarrick J. Wong 68190c9ec4beSDarrick J. Wong start = (e4b.bd_info->bb_first_free > start) ? 68200c9ec4beSDarrick J. Wong e4b.bd_info->bb_first_free : start; 68210c9ec4beSDarrick J. Wong if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 68220c9ec4beSDarrick J. Wong end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 68230c9ec4beSDarrick J. Wong 68240c9ec4beSDarrick J. Wong while (start <= end) { 68250c9ec4beSDarrick J. Wong start = mb_find_next_zero_bit(bitmap, end + 1, start); 68260c9ec4beSDarrick J. Wong if (start > end) 68270c9ec4beSDarrick J. Wong break; 68280c9ec4beSDarrick J. Wong next = mb_find_next_bit(bitmap, end + 1, start); 68290c9ec4beSDarrick J. Wong 68300c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 68310c9ec4beSDarrick J. Wong error = formatter(sb, group, start, next - start, priv); 68320c9ec4beSDarrick J. Wong if (error) 68330c9ec4beSDarrick J. Wong goto out_unload; 68340c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 68350c9ec4beSDarrick J. Wong 68360c9ec4beSDarrick J. Wong start = next + 1; 68370c9ec4beSDarrick J. Wong } 68380c9ec4beSDarrick J. Wong 68390c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 68400c9ec4beSDarrick J. Wong out_unload: 68410c9ec4beSDarrick J. Wong ext4_mb_unload_buddy(&e4b); 68420c9ec4beSDarrick J. Wong 68430c9ec4beSDarrick J. Wong return error; 68440c9ec4beSDarrick J. Wong } 6845