1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2c9de560dSAlex Tomas /* 3c9de560dSAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4c9de560dSAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5c9de560dSAlex Tomas */ 6c9de560dSAlex Tomas 7c9de560dSAlex Tomas 8c9de560dSAlex Tomas /* 9c9de560dSAlex Tomas * mballoc.c contains the multiblocks allocation routines 10c9de560dSAlex Tomas */ 11c9de560dSAlex Tomas 1218aadd47SBobi Jam #include "ext4_jbd2.h" 138f6e39a7SMingming Cao #include "mballoc.h" 1428623c2fSTheodore Ts'o #include <linux/log2.h> 15a0b30c12STheodore Ts'o #include <linux/module.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 171a5d5e5dSJeremy Cline #include <linux/nospec.h> 1866114cadSTejun Heo #include <linux/backing-dev.h> 199bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 209bffad1eSTheodore Ts'o 21c9de560dSAlex Tomas /* 22c9de560dSAlex Tomas * MUSTDO: 23c9de560dSAlex Tomas * - test ext4_ext_search_left() and ext4_ext_search_right() 24c9de560dSAlex Tomas * - search for metadata in few groups 25c9de560dSAlex Tomas * 26c9de560dSAlex Tomas * TODO v4: 27c9de560dSAlex Tomas * - normalization should take into account whether file is still open 28c9de560dSAlex Tomas * - discard preallocations if no free space left (policy?) 29c9de560dSAlex Tomas * - don't normalize tails 30c9de560dSAlex Tomas * - quota 31c9de560dSAlex Tomas * - reservation for superuser 32c9de560dSAlex Tomas * 33c9de560dSAlex Tomas * TODO v3: 34c9de560dSAlex Tomas * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35c9de560dSAlex Tomas * - track min/max extents in each group for better group selection 36c9de560dSAlex Tomas * - mb_mark_used() may allocate chunk right after splitting buddy 37c9de560dSAlex Tomas * - tree of groups sorted by number of free blocks 38c9de560dSAlex Tomas * - error handling 39c9de560dSAlex Tomas */ 40c9de560dSAlex Tomas 41c9de560dSAlex Tomas /* 42c9de560dSAlex Tomas * The allocation request involve request for multiple number of blocks 43c9de560dSAlex Tomas * near to the goal(block) value specified. 44c9de560dSAlex Tomas * 45b713a5ecSTheodore Ts'o * During initialization phase of the allocator we decide to use the 46b713a5ecSTheodore Ts'o * group preallocation or inode preallocation depending on the size of 47b713a5ecSTheodore Ts'o * the file. The size of the file could be the resulting file size we 48b713a5ecSTheodore Ts'o * would have after allocation, or the current file size, which ever 49b713a5ecSTheodore Ts'o * is larger. If the size is less than sbi->s_mb_stream_request we 50b713a5ecSTheodore Ts'o * select to use the group preallocation. The default value of 51b713a5ecSTheodore Ts'o * s_mb_stream_request is 16 blocks. This can also be tuned via 52b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53b713a5ecSTheodore Ts'o * terms of number of blocks. 54c9de560dSAlex Tomas * 55c9de560dSAlex Tomas * The main motivation for having small file use group preallocation is to 56b713a5ecSTheodore Ts'o * ensure that we have small files closer together on the disk. 57c9de560dSAlex Tomas * 58b713a5ecSTheodore Ts'o * First stage the allocator looks at the inode prealloc list, 59b713a5ecSTheodore Ts'o * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60b713a5ecSTheodore Ts'o * spaces for this particular inode. The inode prealloc space is 61b713a5ecSTheodore Ts'o * represented as: 62c9de560dSAlex Tomas * 63c9de560dSAlex Tomas * pa_lstart -> the logical start block for this prealloc space 64c9de560dSAlex Tomas * pa_pstart -> the physical start block for this prealloc space 6553accfa9STheodore Ts'o * pa_len -> length for this prealloc space (in clusters) 6653accfa9STheodore Ts'o * pa_free -> free space available in this prealloc space (in clusters) 67c9de560dSAlex Tomas * 68c9de560dSAlex Tomas * The inode preallocation space is used looking at the _logical_ start 69c9de560dSAlex Tomas * block. If only the logical file block falls within the range of prealloc 70caaf7a29STao Ma * space we will consume the particular prealloc space. This makes sure that 71caaf7a29STao Ma * we have contiguous physical blocks representing the file blocks 72c9de560dSAlex Tomas * 73c9de560dSAlex Tomas * The important thing to be noted in case of inode prealloc space is that 74c9de560dSAlex Tomas * we don't modify the values associated to inode prealloc space except 75c9de560dSAlex Tomas * pa_free. 76c9de560dSAlex Tomas * 77c9de560dSAlex Tomas * If we are not able to find blocks in the inode prealloc space and if we 78c9de560dSAlex Tomas * have the group allocation flag set then we look at the locality group 79caaf7a29STao Ma * prealloc space. These are per CPU prealloc list represented as 80c9de560dSAlex Tomas * 81c9de560dSAlex Tomas * ext4_sb_info.s_locality_groups[smp_processor_id()] 82c9de560dSAlex Tomas * 83c9de560dSAlex Tomas * The reason for having a per cpu locality group is to reduce the contention 84c9de560dSAlex Tomas * between CPUs. It is possible to get scheduled at this point. 85c9de560dSAlex Tomas * 86c9de560dSAlex Tomas * The locality group prealloc space is used looking at whether we have 8725985edcSLucas De Marchi * enough free space (pa_free) within the prealloc space. 88c9de560dSAlex Tomas * 89c9de560dSAlex Tomas * If we can't allocate blocks via inode prealloc or/and locality group 90c9de560dSAlex Tomas * prealloc then we look at the buddy cache. The buddy cache is represented 91c9de560dSAlex Tomas * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92c9de560dSAlex Tomas * mapped to the buddy and bitmap information regarding different 93c9de560dSAlex Tomas * groups. The buddy information is attached to buddy cache inode so that 94c9de560dSAlex Tomas * we can access them through the page cache. The information regarding 95c9de560dSAlex Tomas * each group is loaded via ext4_mb_load_buddy. The information involve 96c9de560dSAlex Tomas * block bitmap and buddy information. The information are stored in the 97c9de560dSAlex Tomas * inode as: 98c9de560dSAlex Tomas * 99c9de560dSAlex Tomas * { page } 100c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101c9de560dSAlex Tomas * 102c9de560dSAlex Tomas * 103c9de560dSAlex Tomas * one block each for bitmap and buddy information. So for each group we 104ea1754a0SKirill A. Shutemov * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105c9de560dSAlex Tomas * blocksize) blocks. So it can have information regarding groups_per_page 106c9de560dSAlex Tomas * which is blocks_per_page/2 107c9de560dSAlex Tomas * 108c9de560dSAlex Tomas * The buddy cache inode is not stored on disk. The inode is thrown 109c9de560dSAlex Tomas * away when the filesystem is unmounted. 110c9de560dSAlex Tomas * 111c9de560dSAlex Tomas * We look for count number of blocks in the buddy cache. If we were able 112c9de560dSAlex Tomas * to locate that many free blocks we return with additional information 113c9de560dSAlex Tomas * regarding rest of the contiguous physical block available 114c9de560dSAlex Tomas * 115c9de560dSAlex Tomas * Before allocating blocks via buddy cache we normalize the request 116c9de560dSAlex Tomas * blocks. This ensure we ask for more blocks that we needed. The extra 117c9de560dSAlex Tomas * blocks that we get after allocation is added to the respective prealloc 118c9de560dSAlex Tomas * list. In case of inode preallocation we follow a list of heuristics 119c9de560dSAlex Tomas * based on file size. This can be found in ext4_mb_normalize_request. If 120c9de560dSAlex Tomas * we are doing a group prealloc we try to normalize the request to 12127baebb8STheodore Ts'o * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 12227baebb8STheodore Ts'o * dependent on the cluster size; for non-bigalloc file systems, it is 123c9de560dSAlex Tomas * 512 blocks. This can be tuned via 124d7a1fee1SDan Ehrenberg * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125c9de560dSAlex Tomas * terms of number of blocks. If we have mounted the file system with -O 126c9de560dSAlex Tomas * stripe=<value> option the group prealloc request is normalized to the 127b483bb77SRandy Dunlap * smallest multiple of the stripe value (sbi->s_stripe) which is 128d7a1fee1SDan Ehrenberg * greater than the default mb_group_prealloc. 129c9de560dSAlex Tomas * 130196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131196e402aSHarshad Shirwadkar * structures in two data structures: 132196e402aSHarshad Shirwadkar * 133196e402aSHarshad Shirwadkar * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134196e402aSHarshad Shirwadkar * 135196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136196e402aSHarshad Shirwadkar * 137196e402aSHarshad Shirwadkar * This is an array of lists where the index in the array represents the 138196e402aSHarshad Shirwadkar * largest free order in the buddy bitmap of the participating group infos of 139196e402aSHarshad Shirwadkar * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140196e402aSHarshad Shirwadkar * number of buddy bitmap orders possible) number of lists. Group-infos are 141196e402aSHarshad Shirwadkar * placed in appropriate lists. 142196e402aSHarshad Shirwadkar * 14383e80a6eSJan Kara * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144196e402aSHarshad Shirwadkar * 14583e80a6eSJan Kara * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146196e402aSHarshad Shirwadkar * 14783e80a6eSJan Kara * This is an array of lists where in the i-th list there are groups with 14883e80a6eSJan Kara * average fragment size >= 2^i and < 2^(i+1). The average fragment size 14983e80a6eSJan Kara * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 15083e80a6eSJan Kara * Note that we don't bother with a special list for completely empty groups 15183e80a6eSJan Kara * so we only have MB_NUM_ORDERS(sb) lists. 152196e402aSHarshad Shirwadkar * 153196e402aSHarshad Shirwadkar * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154196e402aSHarshad Shirwadkar * structures to decide the order in which groups are to be traversed for 155196e402aSHarshad Shirwadkar * fulfilling an allocation request. 156196e402aSHarshad Shirwadkar * 1574eb7a4a1SOjaswin Mujoo * At CR0 , we look for groups which have the largest_free_order >= the order 158196e402aSHarshad Shirwadkar * of the request. We directly look at the largest free order list in the data 159196e402aSHarshad Shirwadkar * structure (1) above where largest_free_order = order of the request. If that 160196e402aSHarshad Shirwadkar * list is empty, we look at remaining list in the increasing order of 1614eb7a4a1SOjaswin Mujoo * largest_free_order. This allows us to perform CR0 lookup in O(1) time. 162196e402aSHarshad Shirwadkar * 1634eb7a4a1SOjaswin Mujoo * At CR1, we only consider groups where average fragment size > request 164196e402aSHarshad Shirwadkar * size. So, we lookup a group which has average fragment size just above or 16583e80a6eSJan Kara * equal to request size using our average fragment size group lists (data 16683e80a6eSJan Kara * structure 2) in O(1) time. 167196e402aSHarshad Shirwadkar * 168196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 169196e402aSHarshad Shirwadkar * linear order which requires O(N) search time for each CR0 and CR1 phase. 170196e402aSHarshad Shirwadkar * 171d7a1fee1SDan Ehrenberg * The regular allocator (using the buddy cache) supports a few tunables. 172c9de560dSAlex Tomas * 173b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_min_to_scan 174b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_max_to_scan 175b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req 176196e402aSHarshad Shirwadkar * /sys/fs/ext4/<partition>/mb_linear_limit 177c9de560dSAlex Tomas * 178b713a5ecSTheodore Ts'o * The regular allocator uses buddy scan only if the request len is power of 179c9de560dSAlex Tomas * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 180c9de560dSAlex Tomas * value of s_mb_order2_reqs can be tuned via 181b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 182af901ca1SAndré Goddard Rosa * stripe size (sbi->s_stripe), we try to search for contiguous block in 183b713a5ecSTheodore Ts'o * stripe size. This should result in better allocation on RAID setups. If 184b713a5ecSTheodore Ts'o * not, we search in the specific group using bitmap for best extents. The 185b713a5ecSTheodore Ts'o * tunable min_to_scan and max_to_scan control the behaviour here. 186c9de560dSAlex Tomas * min_to_scan indicate how long the mballoc __must__ look for a best 187b713a5ecSTheodore Ts'o * extent and max_to_scan indicates how long the mballoc __can__ look for a 188c9de560dSAlex Tomas * best extent in the found extents. Searching for the blocks starts with 189c9de560dSAlex Tomas * the group specified as the goal value in allocation context via 190c9de560dSAlex Tomas * ac_g_ex. Each group is first checked based on the criteria whether it 191caaf7a29STao Ma * can be used for allocation. ext4_mb_good_group explains how the groups are 192c9de560dSAlex Tomas * checked. 193c9de560dSAlex Tomas * 194196e402aSHarshad Shirwadkar * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 195196e402aSHarshad Shirwadkar * get traversed linearly. That may result in subsequent allocations being not 196196e402aSHarshad Shirwadkar * close to each other. And so, the underlying device may get filled up in a 197196e402aSHarshad Shirwadkar * non-linear fashion. While that may not matter on non-rotational devices, for 198196e402aSHarshad Shirwadkar * rotational devices that may result in higher seek times. "mb_linear_limit" 199196e402aSHarshad Shirwadkar * tells mballoc how many groups mballoc should search linearly before 200196e402aSHarshad Shirwadkar * performing consulting above data structures for more efficient lookups. For 201196e402aSHarshad Shirwadkar * non rotational devices, this value defaults to 0 and for rotational devices 202196e402aSHarshad Shirwadkar * this is set to MB_DEFAULT_LINEAR_LIMIT. 203196e402aSHarshad Shirwadkar * 204c9de560dSAlex Tomas * Both the prealloc space are getting populated as above. So for the first 205c9de560dSAlex Tomas * request we will hit the buddy cache which will result in this prealloc 206c9de560dSAlex Tomas * space getting filled. The prealloc space is then later used for the 207c9de560dSAlex Tomas * subsequent request. 208c9de560dSAlex Tomas */ 209c9de560dSAlex Tomas 210c9de560dSAlex Tomas /* 211c9de560dSAlex Tomas * mballoc operates on the following data: 212c9de560dSAlex Tomas * - on-disk bitmap 213c9de560dSAlex Tomas * - in-core buddy (actually includes buddy and bitmap) 214c9de560dSAlex Tomas * - preallocation descriptors (PAs) 215c9de560dSAlex Tomas * 216c9de560dSAlex Tomas * there are two types of preallocations: 217c9de560dSAlex Tomas * - inode 218c9de560dSAlex Tomas * assiged to specific inode and can be used for this inode only. 219c9de560dSAlex Tomas * it describes part of inode's space preallocated to specific 220c9de560dSAlex Tomas * physical blocks. any block from that preallocated can be used 221c9de560dSAlex Tomas * independent. the descriptor just tracks number of blocks left 222c9de560dSAlex Tomas * unused. so, before taking some block from descriptor, one must 223c9de560dSAlex Tomas * make sure corresponded logical block isn't allocated yet. this 224c9de560dSAlex Tomas * also means that freeing any block within descriptor's range 225c9de560dSAlex Tomas * must discard all preallocated blocks. 226c9de560dSAlex Tomas * - locality group 227c9de560dSAlex Tomas * assigned to specific locality group which does not translate to 228c9de560dSAlex Tomas * permanent set of inodes: inode can join and leave group. space 229c9de560dSAlex Tomas * from this type of preallocation can be used for any inode. thus 230c9de560dSAlex Tomas * it's consumed from the beginning to the end. 231c9de560dSAlex Tomas * 232c9de560dSAlex Tomas * relation between them can be expressed as: 233c9de560dSAlex Tomas * in-core buddy = on-disk bitmap + preallocation descriptors 234c9de560dSAlex Tomas * 235c9de560dSAlex Tomas * this mean blocks mballoc considers used are: 236c9de560dSAlex Tomas * - allocated blocks (persistent) 237c9de560dSAlex Tomas * - preallocated blocks (non-persistent) 238c9de560dSAlex Tomas * 239c9de560dSAlex Tomas * consistency in mballoc world means that at any time a block is either 240c9de560dSAlex Tomas * free or used in ALL structures. notice: "any time" should not be read 241c9de560dSAlex Tomas * literally -- time is discrete and delimited by locks. 242c9de560dSAlex Tomas * 243c9de560dSAlex Tomas * to keep it simple, we don't use block numbers, instead we count number of 244c9de560dSAlex Tomas * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 245c9de560dSAlex Tomas * 246c9de560dSAlex Tomas * all operations can be expressed as: 247c9de560dSAlex Tomas * - init buddy: buddy = on-disk + PAs 248c9de560dSAlex Tomas * - new PA: buddy += N; PA = N 249c9de560dSAlex Tomas * - use inode PA: on-disk += N; PA -= N 250c9de560dSAlex Tomas * - discard inode PA buddy -= on-disk - PA; PA = 0 251c9de560dSAlex Tomas * - use locality group PA on-disk += N; PA -= N 252c9de560dSAlex Tomas * - discard locality group PA buddy -= PA; PA = 0 253c9de560dSAlex Tomas * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 254c9de560dSAlex Tomas * is used in real operation because we can't know actual used 255c9de560dSAlex Tomas * bits from PA, only from on-disk bitmap 256c9de560dSAlex Tomas * 257c9de560dSAlex Tomas * if we follow this strict logic, then all operations above should be atomic. 258c9de560dSAlex Tomas * given some of them can block, we'd have to use something like semaphores 259c9de560dSAlex Tomas * killing performance on high-end SMP hardware. let's try to relax it using 260c9de560dSAlex Tomas * the following knowledge: 261c9de560dSAlex Tomas * 1) if buddy is referenced, it's already initialized 262c9de560dSAlex Tomas * 2) while block is used in buddy and the buddy is referenced, 263c9de560dSAlex Tomas * nobody can re-allocate that block 264c9de560dSAlex Tomas * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 265c9de560dSAlex Tomas * bit set and PA claims same block, it's OK. IOW, one can set bit in 266c9de560dSAlex Tomas * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 267c9de560dSAlex Tomas * block 268c9de560dSAlex Tomas * 269c9de560dSAlex Tomas * so, now we're building a concurrency table: 270c9de560dSAlex Tomas * - init buddy vs. 271c9de560dSAlex Tomas * - new PA 272c9de560dSAlex Tomas * blocks for PA are allocated in the buddy, buddy must be referenced 273c9de560dSAlex Tomas * until PA is linked to allocation group to avoid concurrent buddy init 274c9de560dSAlex Tomas * - use inode PA 275c9de560dSAlex Tomas * we need to make sure that either on-disk bitmap or PA has uptodate data 276c9de560dSAlex Tomas * given (3) we care that PA-=N operation doesn't interfere with init 277c9de560dSAlex Tomas * - discard inode PA 278c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 279c9de560dSAlex Tomas * - use locality group PA 280c9de560dSAlex Tomas * again PA-=N must be serialized with init 281c9de560dSAlex Tomas * - discard locality group PA 282c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 283c9de560dSAlex Tomas * - new PA vs. 284c9de560dSAlex Tomas * - use inode PA 285c9de560dSAlex Tomas * i_data_sem serializes them 286c9de560dSAlex Tomas * - discard inode PA 287c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 288c9de560dSAlex Tomas * - use locality group PA 289c9de560dSAlex Tomas * some mutex should serialize them 290c9de560dSAlex Tomas * - discard locality group PA 291c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 292c9de560dSAlex Tomas * - use inode PA 293c9de560dSAlex Tomas * - use inode PA 294c9de560dSAlex Tomas * i_data_sem or another mutex should serializes them 295c9de560dSAlex Tomas * - discard inode PA 296c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 297c9de560dSAlex Tomas * - use locality group PA 298c9de560dSAlex Tomas * nothing wrong here -- they're different PAs covering different blocks 299c9de560dSAlex Tomas * - discard locality group PA 300c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 301c9de560dSAlex Tomas * 302c9de560dSAlex Tomas * now we're ready to make few consequences: 303c9de560dSAlex Tomas * - PA is referenced and while it is no discard is possible 304c9de560dSAlex Tomas * - PA is referenced until block isn't marked in on-disk bitmap 305c9de560dSAlex Tomas * - PA changes only after on-disk bitmap 306c9de560dSAlex Tomas * - discard must not compete with init. either init is done before 307c9de560dSAlex Tomas * any discard or they're serialized somehow 308c9de560dSAlex Tomas * - buddy init as sum of on-disk bitmap and PAs is done atomically 309c9de560dSAlex Tomas * 310c9de560dSAlex Tomas * a special case when we've used PA to emptiness. no need to modify buddy 311c9de560dSAlex Tomas * in this case, but we should care about concurrent init 312c9de560dSAlex Tomas * 313c9de560dSAlex Tomas */ 314c9de560dSAlex Tomas 315c9de560dSAlex Tomas /* 316c9de560dSAlex Tomas * Logic in few words: 317c9de560dSAlex Tomas * 318c9de560dSAlex Tomas * - allocation: 319c9de560dSAlex Tomas * load group 320c9de560dSAlex Tomas * find blocks 321c9de560dSAlex Tomas * mark bits in on-disk bitmap 322c9de560dSAlex Tomas * release group 323c9de560dSAlex Tomas * 324c9de560dSAlex Tomas * - use preallocation: 325c9de560dSAlex Tomas * find proper PA (per-inode or group) 326c9de560dSAlex Tomas * load group 327c9de560dSAlex Tomas * mark bits in on-disk bitmap 328c9de560dSAlex Tomas * release group 329c9de560dSAlex Tomas * release PA 330c9de560dSAlex Tomas * 331c9de560dSAlex Tomas * - free: 332c9de560dSAlex Tomas * load group 333c9de560dSAlex Tomas * mark bits in on-disk bitmap 334c9de560dSAlex Tomas * release group 335c9de560dSAlex Tomas * 336c9de560dSAlex Tomas * - discard preallocations in group: 337c9de560dSAlex Tomas * mark PAs deleted 338c9de560dSAlex Tomas * move them onto local list 339c9de560dSAlex Tomas * load on-disk bitmap 340c9de560dSAlex Tomas * load group 341c9de560dSAlex Tomas * remove PA from object (inode or locality group) 342c9de560dSAlex Tomas * mark free blocks in-core 343c9de560dSAlex Tomas * 344c9de560dSAlex Tomas * - discard inode's preallocations: 345c9de560dSAlex Tomas */ 346c9de560dSAlex Tomas 347c9de560dSAlex Tomas /* 348c9de560dSAlex Tomas * Locking rules 349c9de560dSAlex Tomas * 350c9de560dSAlex Tomas * Locks: 351c9de560dSAlex Tomas * - bitlock on a group (group) 352c9de560dSAlex Tomas * - object (inode/locality) (object) 353c9de560dSAlex Tomas * - per-pa lock (pa) 354196e402aSHarshad Shirwadkar * - cr0 lists lock (cr0) 355196e402aSHarshad Shirwadkar * - cr1 tree lock (cr1) 356c9de560dSAlex Tomas * 357c9de560dSAlex Tomas * Paths: 358c9de560dSAlex Tomas * - new pa 359c9de560dSAlex Tomas * object 360c9de560dSAlex Tomas * group 361c9de560dSAlex Tomas * 362c9de560dSAlex Tomas * - find and use pa: 363c9de560dSAlex Tomas * pa 364c9de560dSAlex Tomas * 365c9de560dSAlex Tomas * - release consumed pa: 366c9de560dSAlex Tomas * pa 367c9de560dSAlex Tomas * group 368c9de560dSAlex Tomas * object 369c9de560dSAlex Tomas * 370c9de560dSAlex Tomas * - generate in-core bitmap: 371c9de560dSAlex Tomas * group 372c9de560dSAlex Tomas * pa 373c9de560dSAlex Tomas * 374c9de560dSAlex Tomas * - discard all for given object (inode, locality group): 375c9de560dSAlex Tomas * object 376c9de560dSAlex Tomas * pa 377c9de560dSAlex Tomas * group 378c9de560dSAlex Tomas * 379c9de560dSAlex Tomas * - discard all for given group: 380c9de560dSAlex Tomas * group 381c9de560dSAlex Tomas * pa 382c9de560dSAlex Tomas * group 383c9de560dSAlex Tomas * object 384c9de560dSAlex Tomas * 385196e402aSHarshad Shirwadkar * - allocation path (ext4_mb_regular_allocator) 386196e402aSHarshad Shirwadkar * group 387196e402aSHarshad Shirwadkar * cr0/cr1 388c9de560dSAlex Tomas */ 389c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_pspace_cachep; 390c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_ac_cachep; 39118aadd47SBobi Jam static struct kmem_cache *ext4_free_data_cachep; 392fb1813f4SCurt Wohlgemuth 393fb1813f4SCurt Wohlgemuth /* We create slab caches for groupinfo data structures based on the 394fb1813f4SCurt Wohlgemuth * superblock block size. There will be one per mounted filesystem for 395fb1813f4SCurt Wohlgemuth * each unique s_blocksize_bits */ 3962892c15dSEric Sandeen #define NR_GRPINFO_CACHES 8 397fb1813f4SCurt Wohlgemuth static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 398fb1813f4SCurt Wohlgemuth 399d6006186SEric Biggers static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 4002892c15dSEric Sandeen "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 4012892c15dSEric Sandeen "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 4022892c15dSEric Sandeen "ext4_groupinfo_64k", "ext4_groupinfo_128k" 4032892c15dSEric Sandeen }; 4042892c15dSEric Sandeen 405c3a326a6SAneesh Kumar K.V static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 406c3a326a6SAneesh Kumar K.V ext4_group_t group); 4077a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4087a2fcbf7SAneesh Kumar K.V ext4_group_t group); 40953f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 410c3a326a6SAneesh Kumar K.V 411196e402aSHarshad Shirwadkar static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 4124eb7a4a1SOjaswin Mujoo ext4_group_t group, enum criteria cr); 413196e402aSHarshad Shirwadkar 41455cdd0afSWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 41555cdd0afSWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 41655cdd0afSWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks); 41755cdd0afSWang Jianchao 41807b5b8e1SRitesh Harjani /* 41907b5b8e1SRitesh Harjani * The algorithm using this percpu seq counter goes below: 42007b5b8e1SRitesh Harjani * 1. We sample the percpu discard_pa_seq counter before trying for block 42107b5b8e1SRitesh Harjani * allocation in ext4_mb_new_blocks(). 42207b5b8e1SRitesh Harjani * 2. We increment this percpu discard_pa_seq counter when we either allocate 42307b5b8e1SRitesh Harjani * or free these blocks i.e. while marking those blocks as used/free in 42407b5b8e1SRitesh Harjani * mb_mark_used()/mb_free_blocks(). 42507b5b8e1SRitesh Harjani * 3. We also increment this percpu seq counter when we successfully identify 42607b5b8e1SRitesh Harjani * that the bb_prealloc_list is not empty and hence proceed for discarding 42707b5b8e1SRitesh Harjani * of those PAs inside ext4_mb_discard_group_preallocations(). 42807b5b8e1SRitesh Harjani * 42907b5b8e1SRitesh Harjani * Now to make sure that the regular fast path of block allocation is not 43007b5b8e1SRitesh Harjani * affected, as a small optimization we only sample the percpu seq counter 43107b5b8e1SRitesh Harjani * on that cpu. Only when the block allocation fails and when freed blocks 43207b5b8e1SRitesh Harjani * found were 0, that is when we sample percpu seq counter for all cpus using 43307b5b8e1SRitesh Harjani * below function ext4_get_discard_pa_seq_sum(). This happens after making 43407b5b8e1SRitesh Harjani * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 43507b5b8e1SRitesh Harjani */ 43607b5b8e1SRitesh Harjani static DEFINE_PER_CPU(u64, discard_pa_seq); 43707b5b8e1SRitesh Harjani static inline u64 ext4_get_discard_pa_seq_sum(void) 43807b5b8e1SRitesh Harjani { 43907b5b8e1SRitesh Harjani int __cpu; 44007b5b8e1SRitesh Harjani u64 __seq = 0; 44107b5b8e1SRitesh Harjani 44207b5b8e1SRitesh Harjani for_each_possible_cpu(__cpu) 44307b5b8e1SRitesh Harjani __seq += per_cpu(discard_pa_seq, __cpu); 44407b5b8e1SRitesh Harjani return __seq; 44507b5b8e1SRitesh Harjani } 44607b5b8e1SRitesh Harjani 447ffad0a44SAneesh Kumar K.V static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 448ffad0a44SAneesh Kumar K.V { 449c9de560dSAlex Tomas #if BITS_PER_LONG == 64 450ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 7UL) << 3; 451ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~7UL); 452c9de560dSAlex Tomas #elif BITS_PER_LONG == 32 453ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 3UL) << 3; 454ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~3UL); 455c9de560dSAlex Tomas #else 456c9de560dSAlex Tomas #error "how many bits you are?!" 457c9de560dSAlex Tomas #endif 458ffad0a44SAneesh Kumar K.V return addr; 459ffad0a44SAneesh Kumar K.V } 460c9de560dSAlex Tomas 461c9de560dSAlex Tomas static inline int mb_test_bit(int bit, void *addr) 462c9de560dSAlex Tomas { 463c9de560dSAlex Tomas /* 464c9de560dSAlex Tomas * ext4_test_bit on architecture like powerpc 465c9de560dSAlex Tomas * needs unsigned long aligned address 466c9de560dSAlex Tomas */ 467ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 468c9de560dSAlex Tomas return ext4_test_bit(bit, addr); 469c9de560dSAlex Tomas } 470c9de560dSAlex Tomas 471c9de560dSAlex Tomas static inline void mb_set_bit(int bit, void *addr) 472c9de560dSAlex Tomas { 473ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 474c9de560dSAlex Tomas ext4_set_bit(bit, addr); 475c9de560dSAlex Tomas } 476c9de560dSAlex Tomas 477c9de560dSAlex Tomas static inline void mb_clear_bit(int bit, void *addr) 478c9de560dSAlex Tomas { 479ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 480c9de560dSAlex Tomas ext4_clear_bit(bit, addr); 481c9de560dSAlex Tomas } 482c9de560dSAlex Tomas 483eabe0444SAndrey Sidorov static inline int mb_test_and_clear_bit(int bit, void *addr) 484eabe0444SAndrey Sidorov { 485eabe0444SAndrey Sidorov addr = mb_correct_addr_and_bit(&bit, addr); 486eabe0444SAndrey Sidorov return ext4_test_and_clear_bit(bit, addr); 487eabe0444SAndrey Sidorov } 488eabe0444SAndrey Sidorov 489ffad0a44SAneesh Kumar K.V static inline int mb_find_next_zero_bit(void *addr, int max, int start) 490ffad0a44SAneesh Kumar K.V { 491e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 492ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 493e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 494ffad0a44SAneesh Kumar K.V start += fix; 495ffad0a44SAneesh Kumar K.V 496e7dfb246SAneesh Kumar K.V ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 497e7dfb246SAneesh Kumar K.V if (ret > max) 498e7dfb246SAneesh Kumar K.V return max; 499e7dfb246SAneesh Kumar K.V return ret; 500ffad0a44SAneesh Kumar K.V } 501ffad0a44SAneesh Kumar K.V 502ffad0a44SAneesh Kumar K.V static inline int mb_find_next_bit(void *addr, int max, int start) 503ffad0a44SAneesh Kumar K.V { 504e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 505ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 506e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 507ffad0a44SAneesh Kumar K.V start += fix; 508ffad0a44SAneesh Kumar K.V 509e7dfb246SAneesh Kumar K.V ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 510e7dfb246SAneesh Kumar K.V if (ret > max) 511e7dfb246SAneesh Kumar K.V return max; 512e7dfb246SAneesh Kumar K.V return ret; 513ffad0a44SAneesh Kumar K.V } 514ffad0a44SAneesh Kumar K.V 515c9de560dSAlex Tomas static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 516c9de560dSAlex Tomas { 517c9de560dSAlex Tomas char *bb; 518c9de560dSAlex Tomas 519c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 520c9de560dSAlex Tomas BUG_ON(max == NULL); 521c9de560dSAlex Tomas 522c9de560dSAlex Tomas if (order > e4b->bd_blkbits + 1) { 523c9de560dSAlex Tomas *max = 0; 524c9de560dSAlex Tomas return NULL; 525c9de560dSAlex Tomas } 526c9de560dSAlex Tomas 527c9de560dSAlex Tomas /* at order 0 we see each particular block */ 52884b775a3SColy Li if (order == 0) { 529c9de560dSAlex Tomas *max = 1 << (e4b->bd_blkbits + 3); 530c5e8f3f3STheodore Ts'o return e4b->bd_bitmap; 53184b775a3SColy Li } 532c9de560dSAlex Tomas 533c5e8f3f3STheodore Ts'o bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 534c9de560dSAlex Tomas *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 535c9de560dSAlex Tomas 536c9de560dSAlex Tomas return bb; 537c9de560dSAlex Tomas } 538c9de560dSAlex Tomas 539c9de560dSAlex Tomas #ifdef DOUBLE_CHECK 540c9de560dSAlex Tomas static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 541c9de560dSAlex Tomas int first, int count) 542c9de560dSAlex Tomas { 543c9de560dSAlex Tomas int i; 544c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 545c9de560dSAlex Tomas 546c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 547c9de560dSAlex Tomas return; 548bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 549c9de560dSAlex Tomas for (i = 0; i < count; i++) { 550c9de560dSAlex Tomas if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 551c9de560dSAlex Tomas ext4_fsblk_t blocknr; 5525661bd68SAkinobu Mita 5535661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 55453accfa9STheodore Ts'o blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 5555d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 556e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 557e29136f8STheodore Ts'o blocknr, 558e29136f8STheodore Ts'o "freeing block already freed " 559e29136f8STheodore Ts'o "(bit %u)", 560e29136f8STheodore Ts'o first + i); 561736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 562736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 563c9de560dSAlex Tomas } 564c9de560dSAlex Tomas mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 565c9de560dSAlex Tomas } 566c9de560dSAlex Tomas } 567c9de560dSAlex Tomas 568c9de560dSAlex Tomas static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 569c9de560dSAlex Tomas { 570c9de560dSAlex Tomas int i; 571c9de560dSAlex Tomas 572c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 573c9de560dSAlex Tomas return; 574bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 575c9de560dSAlex Tomas for (i = 0; i < count; i++) { 576c9de560dSAlex Tomas BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 577c9de560dSAlex Tomas mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 578c9de560dSAlex Tomas } 579c9de560dSAlex Tomas } 580c9de560dSAlex Tomas 581c9de560dSAlex Tomas static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 582c9de560dSAlex Tomas { 583eb2b8ebbSRitesh Harjani if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584eb2b8ebbSRitesh Harjani return; 585c9de560dSAlex Tomas if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 586c9de560dSAlex Tomas unsigned char *b1, *b2; 587c9de560dSAlex Tomas int i; 588c9de560dSAlex Tomas b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 589c9de560dSAlex Tomas b2 = (unsigned char *) bitmap; 590c9de560dSAlex Tomas for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 591c9de560dSAlex Tomas if (b1[i] != b2[i]) { 5929d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_ERR, 5939d8b9ec4STheodore Ts'o "corruption in group %u " 5944776004fSTheodore Ts'o "at byte %u(%u): %x in copy != %x " 5959d8b9ec4STheodore Ts'o "on disk/prealloc", 596c9de560dSAlex Tomas e4b->bd_group, i, i * 8, b1[i], b2[i]); 597c9de560dSAlex Tomas BUG(); 598c9de560dSAlex Tomas } 599c9de560dSAlex Tomas } 600c9de560dSAlex Tomas } 601c9de560dSAlex Tomas } 602c9de560dSAlex Tomas 603a3450215SRitesh Harjani static void mb_group_bb_bitmap_alloc(struct super_block *sb, 604a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 605a3450215SRitesh Harjani { 606a3450215SRitesh Harjani struct buffer_head *bh; 607a3450215SRitesh Harjani 608a3450215SRitesh Harjani grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 609eb2b8ebbSRitesh Harjani if (!grp->bb_bitmap) 610eb2b8ebbSRitesh Harjani return; 611a3450215SRitesh Harjani 612a3450215SRitesh Harjani bh = ext4_read_block_bitmap(sb, group); 613eb2b8ebbSRitesh Harjani if (IS_ERR_OR_NULL(bh)) { 614eb2b8ebbSRitesh Harjani kfree(grp->bb_bitmap); 615eb2b8ebbSRitesh Harjani grp->bb_bitmap = NULL; 616eb2b8ebbSRitesh Harjani return; 617eb2b8ebbSRitesh Harjani } 618a3450215SRitesh Harjani 619a3450215SRitesh Harjani memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 620a3450215SRitesh Harjani put_bh(bh); 621a3450215SRitesh Harjani } 622a3450215SRitesh Harjani 623a3450215SRitesh Harjani static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 624a3450215SRitesh Harjani { 625a3450215SRitesh Harjani kfree(grp->bb_bitmap); 626a3450215SRitesh Harjani } 627a3450215SRitesh Harjani 628c9de560dSAlex Tomas #else 629c9de560dSAlex Tomas static inline void mb_free_blocks_double(struct inode *inode, 630c9de560dSAlex Tomas struct ext4_buddy *e4b, int first, int count) 631c9de560dSAlex Tomas { 632c9de560dSAlex Tomas return; 633c9de560dSAlex Tomas } 634c9de560dSAlex Tomas static inline void mb_mark_used_double(struct ext4_buddy *e4b, 635c9de560dSAlex Tomas int first, int count) 636c9de560dSAlex Tomas { 637c9de560dSAlex Tomas return; 638c9de560dSAlex Tomas } 639c9de560dSAlex Tomas static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 640c9de560dSAlex Tomas { 641c9de560dSAlex Tomas return; 642c9de560dSAlex Tomas } 643a3450215SRitesh Harjani 644a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 645a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 646a3450215SRitesh Harjani { 647a3450215SRitesh Harjani return; 648a3450215SRitesh Harjani } 649a3450215SRitesh Harjani 650a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 651a3450215SRitesh Harjani { 652a3450215SRitesh Harjani return; 653a3450215SRitesh Harjani } 654c9de560dSAlex Tomas #endif 655c9de560dSAlex Tomas 656c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 657c9de560dSAlex Tomas 658c9de560dSAlex Tomas #define MB_CHECK_ASSERT(assert) \ 659c9de560dSAlex Tomas do { \ 660c9de560dSAlex Tomas if (!(assert)) { \ 661c9de560dSAlex Tomas printk(KERN_EMERG \ 662c9de560dSAlex Tomas "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 663c9de560dSAlex Tomas function, file, line, # assert); \ 664c9de560dSAlex Tomas BUG(); \ 665c9de560dSAlex Tomas } \ 666c9de560dSAlex Tomas } while (0) 667c9de560dSAlex Tomas 668c9de560dSAlex Tomas static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 669c9de560dSAlex Tomas const char *function, int line) 670c9de560dSAlex Tomas { 671c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 672c9de560dSAlex Tomas int order = e4b->bd_blkbits + 1; 673c9de560dSAlex Tomas int max; 674c9de560dSAlex Tomas int max2; 675c9de560dSAlex Tomas int i; 676c9de560dSAlex Tomas int j; 677c9de560dSAlex Tomas int k; 678c9de560dSAlex Tomas int count; 679c9de560dSAlex Tomas struct ext4_group_info *grp; 680c9de560dSAlex Tomas int fragments = 0; 681c9de560dSAlex Tomas int fstart; 682c9de560dSAlex Tomas struct list_head *cur; 683c9de560dSAlex Tomas void *buddy; 684c9de560dSAlex Tomas void *buddy2; 685c9de560dSAlex Tomas 686addd752cSChunguang Xu if (e4b->bd_info->bb_check_counter++ % 10) 687c9de560dSAlex Tomas return 0; 688c9de560dSAlex Tomas 689c9de560dSAlex Tomas while (order > 1) { 690c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, order, &max); 691c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy); 692c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, order - 1, &max2); 693c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy2); 694c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy != buddy2); 695c9de560dSAlex Tomas MB_CHECK_ASSERT(max * 2 == max2); 696c9de560dSAlex Tomas 697c9de560dSAlex Tomas count = 0; 698c9de560dSAlex Tomas for (i = 0; i < max; i++) { 699c9de560dSAlex Tomas 700c9de560dSAlex Tomas if (mb_test_bit(i, buddy)) { 701af2b3275SJinke Han /* only single bit in buddy2 may be 0 */ 702c9de560dSAlex Tomas if (!mb_test_bit(i << 1, buddy2)) { 703c9de560dSAlex Tomas MB_CHECK_ASSERT( 704c9de560dSAlex Tomas mb_test_bit((i<<1)+1, buddy2)); 705c9de560dSAlex Tomas } 706c9de560dSAlex Tomas continue; 707c9de560dSAlex Tomas } 708c9de560dSAlex Tomas 7090a10da73SRobin Dong /* both bits in buddy2 must be 1 */ 710c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712c9de560dSAlex Tomas 713c9de560dSAlex Tomas for (j = 0; j < (1 << order); j++) { 714c9de560dSAlex Tomas k = (i * (1 << order)) + j; 715c9de560dSAlex Tomas MB_CHECK_ASSERT( 716c5e8f3f3STheodore Ts'o !mb_test_bit(k, e4b->bd_bitmap)); 717c9de560dSAlex Tomas } 718c9de560dSAlex Tomas count++; 719c9de560dSAlex Tomas } 720c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721c9de560dSAlex Tomas order--; 722c9de560dSAlex Tomas } 723c9de560dSAlex Tomas 724c9de560dSAlex Tomas fstart = -1; 725c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, 0, &max); 726c9de560dSAlex Tomas for (i = 0; i < max; i++) { 727c9de560dSAlex Tomas if (!mb_test_bit(i, buddy)) { 728c9de560dSAlex Tomas MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729c9de560dSAlex Tomas if (fstart == -1) { 730c9de560dSAlex Tomas fragments++; 731c9de560dSAlex Tomas fstart = i; 732c9de560dSAlex Tomas } 733c9de560dSAlex Tomas continue; 734c9de560dSAlex Tomas } 735c9de560dSAlex Tomas fstart = -1; 736c9de560dSAlex Tomas /* check used bits only */ 737c9de560dSAlex Tomas for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, j, &max2); 739c9de560dSAlex Tomas k = i >> j; 740c9de560dSAlex Tomas MB_CHECK_ASSERT(k < max2); 741c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742c9de560dSAlex Tomas } 743c9de560dSAlex Tomas } 744c9de560dSAlex Tomas MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746c9de560dSAlex Tomas 747c9de560dSAlex Tomas grp = ext4_get_group_info(sb, e4b->bd_group); 7485354b2afSTheodore Ts'o if (!grp) 7495354b2afSTheodore Ts'o return NULL; 750c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 751c9de560dSAlex Tomas ext4_group_t groupnr; 752c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 75360bd63d1SSolofo Ramangalahy pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 75460bd63d1SSolofo Ramangalahy ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 755c9de560dSAlex Tomas MB_CHECK_ASSERT(groupnr == e4b->bd_group); 75660bd63d1SSolofo Ramangalahy for (i = 0; i < pa->pa_len; i++) 757c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 758c9de560dSAlex Tomas } 759c9de560dSAlex Tomas return 0; 760c9de560dSAlex Tomas } 761c9de560dSAlex Tomas #undef MB_CHECK_ASSERT 762c9de560dSAlex Tomas #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 76346e665e9SHarvey Harrison __FILE__, __func__, __LINE__) 764c9de560dSAlex Tomas #else 765c9de560dSAlex Tomas #define mb_check_buddy(e4b) 766c9de560dSAlex Tomas #endif 767c9de560dSAlex Tomas 7687c786059SColy Li /* 7697c786059SColy Li * Divide blocks started from @first with length @len into 7707c786059SColy Li * smaller chunks with power of 2 blocks. 7717c786059SColy Li * Clear the bits in bitmap which the blocks of the chunk(s) covered, 7727c786059SColy Li * then increase bb_counters[] for corresponded chunk size. 7737c786059SColy Li */ 774c9de560dSAlex Tomas static void ext4_mb_mark_free_simple(struct super_block *sb, 775a36b4498SEric Sandeen void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 776c9de560dSAlex Tomas struct ext4_group_info *grp) 777c9de560dSAlex Tomas { 778c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 779a36b4498SEric Sandeen ext4_grpblk_t min; 780a36b4498SEric Sandeen ext4_grpblk_t max; 781a36b4498SEric Sandeen ext4_grpblk_t chunk; 78269e43e8cSChandan Rajendra unsigned int border; 783c9de560dSAlex Tomas 7847137d7a4STheodore Ts'o BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 785c9de560dSAlex Tomas 786c9de560dSAlex Tomas border = 2 << sb->s_blocksize_bits; 787c9de560dSAlex Tomas 788c9de560dSAlex Tomas while (len > 0) { 789c9de560dSAlex Tomas /* find how many blocks can be covered since this position */ 790c9de560dSAlex Tomas max = ffs(first | border) - 1; 791c9de560dSAlex Tomas 792c9de560dSAlex Tomas /* find how many blocks of power 2 we need to mark */ 793c9de560dSAlex Tomas min = fls(len) - 1; 794c9de560dSAlex Tomas 795c9de560dSAlex Tomas if (max < min) 796c9de560dSAlex Tomas min = max; 797c9de560dSAlex Tomas chunk = 1 << min; 798c9de560dSAlex Tomas 799c9de560dSAlex Tomas /* mark multiblock chunks only */ 800c9de560dSAlex Tomas grp->bb_counters[min]++; 801c9de560dSAlex Tomas if (min > 0) 802c9de560dSAlex Tomas mb_clear_bit(first >> min, 803c9de560dSAlex Tomas buddy + sbi->s_mb_offsets[min]); 804c9de560dSAlex Tomas 805c9de560dSAlex Tomas len -= chunk; 806c9de560dSAlex Tomas first += chunk; 807c9de560dSAlex Tomas } 808c9de560dSAlex Tomas } 809c9de560dSAlex Tomas 81083e80a6eSJan Kara static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 811196e402aSHarshad Shirwadkar { 81283e80a6eSJan Kara int order; 813196e402aSHarshad Shirwadkar 814196e402aSHarshad Shirwadkar /* 81583e80a6eSJan Kara * We don't bother with a special lists groups with only 1 block free 81683e80a6eSJan Kara * extents and for completely empty groups. 817196e402aSHarshad Shirwadkar */ 81883e80a6eSJan Kara order = fls(len) - 2; 81983e80a6eSJan Kara if (order < 0) 82083e80a6eSJan Kara return 0; 82183e80a6eSJan Kara if (order == MB_NUM_ORDERS(sb)) 82283e80a6eSJan Kara order--; 82383e80a6eSJan Kara return order; 82483e80a6eSJan Kara } 82583e80a6eSJan Kara 82683e80a6eSJan Kara /* Move group to appropriate avg_fragment_size list */ 827196e402aSHarshad Shirwadkar static void 828196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 829196e402aSHarshad Shirwadkar { 830196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 83183e80a6eSJan Kara int new_order; 832196e402aSHarshad Shirwadkar 833196e402aSHarshad Shirwadkar if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 834196e402aSHarshad Shirwadkar return; 835196e402aSHarshad Shirwadkar 83683e80a6eSJan Kara new_order = mb_avg_fragment_size_order(sb, 83783e80a6eSJan Kara grp->bb_free / grp->bb_fragments); 83883e80a6eSJan Kara if (new_order == grp->bb_avg_fragment_size_order) 83983e80a6eSJan Kara return; 840196e402aSHarshad Shirwadkar 84183e80a6eSJan Kara if (grp->bb_avg_fragment_size_order != -1) { 84283e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84383e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84483e80a6eSJan Kara list_del(&grp->bb_avg_fragment_size_node); 84583e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 84683e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84783e80a6eSJan Kara } 84883e80a6eSJan Kara grp->bb_avg_fragment_size_order = new_order; 84983e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 85083e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 85183e80a6eSJan Kara list_add_tail(&grp->bb_avg_fragment_size_node, 85283e80a6eSJan Kara &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 85383e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 85483e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 855196e402aSHarshad Shirwadkar } 856196e402aSHarshad Shirwadkar 857196e402aSHarshad Shirwadkar /* 858196e402aSHarshad Shirwadkar * Choose next group by traversing largest_free_order lists. Updates *new_cr if 859196e402aSHarshad Shirwadkar * cr level needs an update. 860196e402aSHarshad Shirwadkar */ 861196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 8624eb7a4a1SOjaswin Mujoo enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 863196e402aSHarshad Shirwadkar { 864196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 865196e402aSHarshad Shirwadkar struct ext4_group_info *iter, *grp; 866196e402aSHarshad Shirwadkar int i; 867196e402aSHarshad Shirwadkar 868196e402aSHarshad Shirwadkar if (ac->ac_status == AC_STATUS_FOUND) 869196e402aSHarshad Shirwadkar return; 870196e402aSHarshad Shirwadkar 871196e402aSHarshad Shirwadkar if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 872196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 873196e402aSHarshad Shirwadkar 874196e402aSHarshad Shirwadkar grp = NULL; 875196e402aSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 876196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) 877196e402aSHarshad Shirwadkar continue; 878196e402aSHarshad Shirwadkar read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 879196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 880196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 881196e402aSHarshad Shirwadkar continue; 882196e402aSHarshad Shirwadkar } 883196e402aSHarshad Shirwadkar grp = NULL; 884196e402aSHarshad Shirwadkar list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 885196e402aSHarshad Shirwadkar bb_largest_free_order_node) { 886196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 8874eb7a4a1SOjaswin Mujoo atomic64_inc(&sbi->s_bal_cX_groups_considered[CR0]); 8884eb7a4a1SOjaswin Mujoo if (likely(ext4_mb_good_group(ac, iter->bb_group, CR0))) { 889196e402aSHarshad Shirwadkar grp = iter; 890196e402aSHarshad Shirwadkar break; 891196e402aSHarshad Shirwadkar } 892196e402aSHarshad Shirwadkar } 893196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 894196e402aSHarshad Shirwadkar if (grp) 895196e402aSHarshad Shirwadkar break; 896196e402aSHarshad Shirwadkar } 897196e402aSHarshad Shirwadkar 898196e402aSHarshad Shirwadkar if (!grp) { 899196e402aSHarshad Shirwadkar /* Increment cr and search again */ 9004eb7a4a1SOjaswin Mujoo *new_cr = CR1; 901196e402aSHarshad Shirwadkar } else { 902196e402aSHarshad Shirwadkar *group = grp->bb_group; 903196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 904196e402aSHarshad Shirwadkar } 905196e402aSHarshad Shirwadkar } 906196e402aSHarshad Shirwadkar 907196e402aSHarshad Shirwadkar /* 908*856d865cSOjaswin Mujoo * Find a suitable group of given order from the average fragments list. 909*856d865cSOjaswin Mujoo */ 910*856d865cSOjaswin Mujoo static struct ext4_group_info * 911*856d865cSOjaswin Mujoo ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) 912*856d865cSOjaswin Mujoo { 913*856d865cSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 914*856d865cSOjaswin Mujoo struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; 915*856d865cSOjaswin Mujoo rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; 916*856d865cSOjaswin Mujoo struct ext4_group_info *grp = NULL, *iter; 917*856d865cSOjaswin Mujoo enum criteria cr = ac->ac_criteria; 918*856d865cSOjaswin Mujoo 919*856d865cSOjaswin Mujoo if (list_empty(frag_list)) 920*856d865cSOjaswin Mujoo return NULL; 921*856d865cSOjaswin Mujoo read_lock(frag_list_lock); 922*856d865cSOjaswin Mujoo if (list_empty(frag_list)) { 923*856d865cSOjaswin Mujoo read_unlock(frag_list_lock); 924*856d865cSOjaswin Mujoo return NULL; 925*856d865cSOjaswin Mujoo } 926*856d865cSOjaswin Mujoo list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) { 927*856d865cSOjaswin Mujoo if (sbi->s_mb_stats) 928*856d865cSOjaswin Mujoo atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 929*856d865cSOjaswin Mujoo if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { 930*856d865cSOjaswin Mujoo grp = iter; 931*856d865cSOjaswin Mujoo break; 932*856d865cSOjaswin Mujoo } 933*856d865cSOjaswin Mujoo } 934*856d865cSOjaswin Mujoo read_unlock(frag_list_lock); 935*856d865cSOjaswin Mujoo return grp; 936*856d865cSOjaswin Mujoo } 937*856d865cSOjaswin Mujoo 938*856d865cSOjaswin Mujoo /* 93983e80a6eSJan Kara * Choose next group by traversing average fragment size list of suitable 94083e80a6eSJan Kara * order. Updates *new_cr if cr level needs an update. 941196e402aSHarshad Shirwadkar */ 942196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 9434eb7a4a1SOjaswin Mujoo enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 944196e402aSHarshad Shirwadkar { 945196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 946*856d865cSOjaswin Mujoo struct ext4_group_info *grp = NULL; 94783e80a6eSJan Kara int i; 948196e402aSHarshad Shirwadkar 949196e402aSHarshad Shirwadkar if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 950196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 951196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 95283e80a6eSJan Kara } 95383e80a6eSJan Kara 95483e80a6eSJan Kara for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 95583e80a6eSJan Kara i < MB_NUM_ORDERS(ac->ac_sb); i++) { 956*856d865cSOjaswin Mujoo grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); 95783e80a6eSJan Kara if (grp) 95883e80a6eSJan Kara break; 959196e402aSHarshad Shirwadkar } 960196e402aSHarshad Shirwadkar 96183e80a6eSJan Kara if (grp) { 962196e402aSHarshad Shirwadkar *group = grp->bb_group; 963196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 964196e402aSHarshad Shirwadkar } else { 9654eb7a4a1SOjaswin Mujoo *new_cr = CR2; 966196e402aSHarshad Shirwadkar } 967196e402aSHarshad Shirwadkar } 968196e402aSHarshad Shirwadkar 969196e402aSHarshad Shirwadkar static inline int should_optimize_scan(struct ext4_allocation_context *ac) 970196e402aSHarshad Shirwadkar { 971196e402aSHarshad Shirwadkar if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 972196e402aSHarshad Shirwadkar return 0; 9734eb7a4a1SOjaswin Mujoo if (ac->ac_criteria >= CR2) 974196e402aSHarshad Shirwadkar return 0; 975077d0c2cSOjaswin Mujoo if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 976196e402aSHarshad Shirwadkar return 0; 977196e402aSHarshad Shirwadkar return 1; 978196e402aSHarshad Shirwadkar } 979196e402aSHarshad Shirwadkar 980196e402aSHarshad Shirwadkar /* 981196e402aSHarshad Shirwadkar * Return next linear group for allocation. If linear traversal should not be 982196e402aSHarshad Shirwadkar * performed, this function just returns the same group 983196e402aSHarshad Shirwadkar */ 984196e402aSHarshad Shirwadkar static int 985196e402aSHarshad Shirwadkar next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 986196e402aSHarshad Shirwadkar { 987196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac)) 988196e402aSHarshad Shirwadkar goto inc_and_return; 989196e402aSHarshad Shirwadkar 990196e402aSHarshad Shirwadkar if (ac->ac_groups_linear_remaining) { 991196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining--; 992196e402aSHarshad Shirwadkar goto inc_and_return; 993196e402aSHarshad Shirwadkar } 994196e402aSHarshad Shirwadkar 995196e402aSHarshad Shirwadkar return group; 996196e402aSHarshad Shirwadkar inc_and_return: 997196e402aSHarshad Shirwadkar /* 998196e402aSHarshad Shirwadkar * Artificially restricted ngroups for non-extent 999196e402aSHarshad Shirwadkar * files makes group > ngroups possible on first loop. 1000196e402aSHarshad Shirwadkar */ 1001196e402aSHarshad Shirwadkar return group + 1 >= ngroups ? 0 : group + 1; 1002196e402aSHarshad Shirwadkar } 1003196e402aSHarshad Shirwadkar 1004196e402aSHarshad Shirwadkar /* 1005196e402aSHarshad Shirwadkar * ext4_mb_choose_next_group: choose next group for allocation. 1006196e402aSHarshad Shirwadkar * 1007196e402aSHarshad Shirwadkar * @ac Allocation Context 1008196e402aSHarshad Shirwadkar * @new_cr This is an output parameter. If the there is no good group 1009196e402aSHarshad Shirwadkar * available at current CR level, this field is updated to indicate 1010196e402aSHarshad Shirwadkar * the new cr level that should be used. 1011196e402aSHarshad Shirwadkar * @group This is an input / output parameter. As an input it indicates the 1012196e402aSHarshad Shirwadkar * next group that the allocator intends to use for allocation. As 1013196e402aSHarshad Shirwadkar * output, this field indicates the next group that should be used as 1014196e402aSHarshad Shirwadkar * determined by the optimization functions. 1015196e402aSHarshad Shirwadkar * @ngroups Total number of groups 1016196e402aSHarshad Shirwadkar */ 1017196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 10184eb7a4a1SOjaswin Mujoo enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1019196e402aSHarshad Shirwadkar { 1020196e402aSHarshad Shirwadkar *new_cr = ac->ac_criteria; 1021196e402aSHarshad Shirwadkar 10224fca50d4SJan Kara if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 10234fca50d4SJan Kara *group = next_linear_group(ac, *group, ngroups); 1024196e402aSHarshad Shirwadkar return; 10254fca50d4SJan Kara } 1026196e402aSHarshad Shirwadkar 10274eb7a4a1SOjaswin Mujoo if (*new_cr == CR0) { 1028196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 10294eb7a4a1SOjaswin Mujoo } else if (*new_cr == CR1) { 1030196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1031196e402aSHarshad Shirwadkar } else { 1032196e402aSHarshad Shirwadkar /* 1033196e402aSHarshad Shirwadkar * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1034196e402aSHarshad Shirwadkar * bb_free. But until that happens, we should never come here. 1035196e402aSHarshad Shirwadkar */ 1036196e402aSHarshad Shirwadkar WARN_ON(1); 1037196e402aSHarshad Shirwadkar } 1038196e402aSHarshad Shirwadkar } 1039196e402aSHarshad Shirwadkar 10408a57d9d6SCurt Wohlgemuth /* 10418a57d9d6SCurt Wohlgemuth * Cache the order of the largest free extent we have available in this block 10428a57d9d6SCurt Wohlgemuth * group. 10438a57d9d6SCurt Wohlgemuth */ 10448a57d9d6SCurt Wohlgemuth static void 10458a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 10468a57d9d6SCurt Wohlgemuth { 1047196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 10488a57d9d6SCurt Wohlgemuth int i; 10498a57d9d6SCurt Wohlgemuth 10501940265eSJan Kara for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 10511940265eSJan Kara if (grp->bb_counters[i] > 0) 10521940265eSJan Kara break; 10531940265eSJan Kara /* No need to move between order lists? */ 10541940265eSJan Kara if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 10551940265eSJan Kara i == grp->bb_largest_free_order) { 10561940265eSJan Kara grp->bb_largest_free_order = i; 10571940265eSJan Kara return; 10581940265eSJan Kara } 10591940265eSJan Kara 10601940265eSJan Kara if (grp->bb_largest_free_order >= 0) { 1061196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1062196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1063196e402aSHarshad Shirwadkar list_del_init(&grp->bb_largest_free_order_node); 1064196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1065196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1066196e402aSHarshad Shirwadkar } 10678a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = i; 10681940265eSJan Kara if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1069196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1070196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1071196e402aSHarshad Shirwadkar list_add_tail(&grp->bb_largest_free_order_node, 1072196e402aSHarshad Shirwadkar &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1073196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1074196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1075196e402aSHarshad Shirwadkar } 10768a57d9d6SCurt Wohlgemuth } 10778a57d9d6SCurt Wohlgemuth 1078089ceeccSEric Sandeen static noinline_for_stack 1079089ceeccSEric Sandeen void ext4_mb_generate_buddy(struct super_block *sb, 10805354b2afSTheodore Ts'o void *buddy, void *bitmap, ext4_group_t group, 10815354b2afSTheodore Ts'o struct ext4_group_info *grp) 1082c9de560dSAlex Tomas { 1083e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 10847137d7a4STheodore Ts'o ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1085a36b4498SEric Sandeen ext4_grpblk_t i = 0; 1086a36b4498SEric Sandeen ext4_grpblk_t first; 1087a36b4498SEric Sandeen ext4_grpblk_t len; 1088c9de560dSAlex Tomas unsigned free = 0; 1089c9de560dSAlex Tomas unsigned fragments = 0; 1090c9de560dSAlex Tomas unsigned long long period = get_cycles(); 1091c9de560dSAlex Tomas 1092c9de560dSAlex Tomas /* initialize buddy from bitmap which is aggregation 1093c9de560dSAlex Tomas * of on-disk bitmap and preallocations */ 1094ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, 0); 1095c9de560dSAlex Tomas grp->bb_first_free = i; 1096c9de560dSAlex Tomas while (i < max) { 1097c9de560dSAlex Tomas fragments++; 1098c9de560dSAlex Tomas first = i; 1099ffad0a44SAneesh Kumar K.V i = mb_find_next_bit(bitmap, max, i); 1100c9de560dSAlex Tomas len = i - first; 1101c9de560dSAlex Tomas free += len; 1102c9de560dSAlex Tomas if (len > 1) 1103c9de560dSAlex Tomas ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1104c9de560dSAlex Tomas else 1105c9de560dSAlex Tomas grp->bb_counters[0]++; 1106c9de560dSAlex Tomas if (i < max) 1107ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, i); 1108c9de560dSAlex Tomas } 1109c9de560dSAlex Tomas grp->bb_fragments = fragments; 1110c9de560dSAlex Tomas 1111c9de560dSAlex Tomas if (free != grp->bb_free) { 1112e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, 111394d4c066STheodore Ts'o "block bitmap and bg descriptor " 111494d4c066STheodore Ts'o "inconsistent: %u vs %u free clusters", 1115e29136f8STheodore Ts'o free, grp->bb_free); 1116e56eb659SAneesh Kumar K.V /* 1117163a203dSDarrick J. Wong * If we intend to continue, we consider group descriptor 1118e56eb659SAneesh Kumar K.V * corrupt and update bb_free using bitmap value 1119e56eb659SAneesh Kumar K.V */ 1120c9de560dSAlex Tomas grp->bb_free = free; 1121db79e6d1SWang Shilong ext4_mark_group_bitmap_corrupted(sb, group, 1122db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1123c9de560dSAlex Tomas } 11248a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, grp); 112583e80a6eSJan Kara mb_update_avg_fragment_size(sb, grp); 1126c9de560dSAlex Tomas 1127c9de560dSAlex Tomas clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1128c9de560dSAlex Tomas 1129c9de560dSAlex Tomas period = get_cycles() - period; 113067d25186SHarshad Shirwadkar atomic_inc(&sbi->s_mb_buddies_generated); 113167d25186SHarshad Shirwadkar atomic64_add(period, &sbi->s_mb_generation_time); 1132c9de560dSAlex Tomas } 1133c9de560dSAlex Tomas 1134c9de560dSAlex Tomas /* The buddy information is attached the buddy cache inode 1135c9de560dSAlex Tomas * for convenience. The information regarding each group 1136c9de560dSAlex Tomas * is loaded via ext4_mb_load_buddy. The information involve 1137c9de560dSAlex Tomas * block bitmap and buddy information. The information are 1138c9de560dSAlex Tomas * stored in the inode as 1139c9de560dSAlex Tomas * 1140c9de560dSAlex Tomas * { page } 1141c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1142c9de560dSAlex Tomas * 1143c9de560dSAlex Tomas * 1144c9de560dSAlex Tomas * one block each for bitmap and buddy information. 1145c9de560dSAlex Tomas * So for each group we take up 2 blocks. A page can 1146ea1754a0SKirill A. Shutemov * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1147c9de560dSAlex Tomas * So it can have information regarding groups_per_page which 1148c9de560dSAlex Tomas * is blocks_per_page/2 11498a57d9d6SCurt Wohlgemuth * 11508a57d9d6SCurt Wohlgemuth * Locking note: This routine takes the block group lock of all groups 11518a57d9d6SCurt Wohlgemuth * for this page; do not hold this lock when calling this routine! 1152c9de560dSAlex Tomas */ 1153c9de560dSAlex Tomas 1154adb7ef60SKonstantin Khlebnikov static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1155c9de560dSAlex Tomas { 11568df9675fSTheodore Ts'o ext4_group_t ngroups; 1157c9de560dSAlex Tomas int blocksize; 1158c9de560dSAlex Tomas int blocks_per_page; 1159c9de560dSAlex Tomas int groups_per_page; 1160c9de560dSAlex Tomas int err = 0; 1161c9de560dSAlex Tomas int i; 1162813e5727STheodore Ts'o ext4_group_t first_group, group; 1163c9de560dSAlex Tomas int first_block; 1164c9de560dSAlex Tomas struct super_block *sb; 1165c9de560dSAlex Tomas struct buffer_head *bhs; 1166fa77dcfaSDarrick J. Wong struct buffer_head **bh = NULL; 1167c9de560dSAlex Tomas struct inode *inode; 1168c9de560dSAlex Tomas char *data; 1169c9de560dSAlex Tomas char *bitmap; 11709b8b7d35SAmir Goldstein struct ext4_group_info *grinfo; 1171c9de560dSAlex Tomas 1172c9de560dSAlex Tomas inode = page->mapping->host; 1173c9de560dSAlex Tomas sb = inode->i_sb; 11748df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 117593407472SFabian Frederick blocksize = i_blocksize(inode); 117609cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / blocksize; 1177c9de560dSAlex Tomas 1178d3df1453SRitesh Harjani mb_debug(sb, "init page %lu\n", page->index); 1179d3df1453SRitesh Harjani 1180c9de560dSAlex Tomas groups_per_page = blocks_per_page >> 1; 1181c9de560dSAlex Tomas if (groups_per_page == 0) 1182c9de560dSAlex Tomas groups_per_page = 1; 1183c9de560dSAlex Tomas 1184c9de560dSAlex Tomas /* allocate buffer_heads to read bitmaps */ 1185c9de560dSAlex Tomas if (groups_per_page > 1) { 1186c9de560dSAlex Tomas i = sizeof(struct buffer_head *) * groups_per_page; 1187adb7ef60SKonstantin Khlebnikov bh = kzalloc(i, gfp); 1188139f46d3SKemeng Shi if (bh == NULL) 1189139f46d3SKemeng Shi return -ENOMEM; 1190c9de560dSAlex Tomas } else 1191c9de560dSAlex Tomas bh = &bhs; 1192c9de560dSAlex Tomas 1193c9de560dSAlex Tomas first_group = page->index * blocks_per_page / 2; 1194c9de560dSAlex Tomas 1195c9de560dSAlex Tomas /* read all groups the page covers into the cache */ 1196813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1197813e5727STheodore Ts'o if (group >= ngroups) 1198c9de560dSAlex Tomas break; 1199c9de560dSAlex Tomas 1200813e5727STheodore Ts'o grinfo = ext4_get_group_info(sb, group); 12015354b2afSTheodore Ts'o if (!grinfo) 12025354b2afSTheodore Ts'o continue; 12039b8b7d35SAmir Goldstein /* 12049b8b7d35SAmir Goldstein * If page is uptodate then we came here after online resize 12059b8b7d35SAmir Goldstein * which added some new uninitialized group info structs, so 12069b8b7d35SAmir Goldstein * we must skip all initialized uptodate buddies on the page, 12079b8b7d35SAmir Goldstein * which may be currently in use by an allocating task. 12089b8b7d35SAmir Goldstein */ 12099b8b7d35SAmir Goldstein if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 12109b8b7d35SAmir Goldstein bh[i] = NULL; 12119b8b7d35SAmir Goldstein continue; 12129b8b7d35SAmir Goldstein } 1213cfd73237SAlex Zhuravlev bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 12149008a58eSDarrick J. Wong if (IS_ERR(bh[i])) { 12159008a58eSDarrick J. Wong err = PTR_ERR(bh[i]); 12169008a58eSDarrick J. Wong bh[i] = NULL; 1217c9de560dSAlex Tomas goto out; 12182ccb5fb9SAneesh Kumar K.V } 1219d3df1453SRitesh Harjani mb_debug(sb, "read bitmap for group %u\n", group); 1220c9de560dSAlex Tomas } 1221c9de560dSAlex Tomas 1222c9de560dSAlex Tomas /* wait for I/O completion */ 1223813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 12249008a58eSDarrick J. Wong int err2; 12259008a58eSDarrick J. Wong 12269008a58eSDarrick J. Wong if (!bh[i]) 12279008a58eSDarrick J. Wong continue; 12289008a58eSDarrick J. Wong err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 12299008a58eSDarrick J. Wong if (!err) 12309008a58eSDarrick J. Wong err = err2; 1231813e5727STheodore Ts'o } 1232c9de560dSAlex Tomas 1233c9de560dSAlex Tomas first_block = page->index * blocks_per_page; 1234c9de560dSAlex Tomas for (i = 0; i < blocks_per_page; i++) { 1235c9de560dSAlex Tomas group = (first_block + i) >> 1; 12368df9675fSTheodore Ts'o if (group >= ngroups) 1237c9de560dSAlex Tomas break; 1238c9de560dSAlex Tomas 12399b8b7d35SAmir Goldstein if (!bh[group - first_group]) 12409b8b7d35SAmir Goldstein /* skip initialized uptodate buddy */ 12419b8b7d35SAmir Goldstein continue; 12429b8b7d35SAmir Goldstein 1243bbdc322fSLukas Czerner if (!buffer_verified(bh[group - first_group])) 1244bbdc322fSLukas Czerner /* Skip faulty bitmaps */ 1245bbdc322fSLukas Czerner continue; 1246bbdc322fSLukas Czerner err = 0; 1247bbdc322fSLukas Czerner 1248c9de560dSAlex Tomas /* 1249c9de560dSAlex Tomas * data carry information regarding this 1250c9de560dSAlex Tomas * particular group in the format specified 1251c9de560dSAlex Tomas * above 1252c9de560dSAlex Tomas * 1253c9de560dSAlex Tomas */ 1254c9de560dSAlex Tomas data = page_address(page) + (i * blocksize); 1255c9de560dSAlex Tomas bitmap = bh[group - first_group]->b_data; 1256c9de560dSAlex Tomas 1257c9de560dSAlex Tomas /* 1258c9de560dSAlex Tomas * We place the buddy block and bitmap block 1259c9de560dSAlex Tomas * close together 1260c9de560dSAlex Tomas */ 1261c9de560dSAlex Tomas if ((first_block + i) & 1) { 1262c9de560dSAlex Tomas /* this is block of buddy */ 1263c9de560dSAlex Tomas BUG_ON(incore == NULL); 1264d3df1453SRitesh Harjani mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1265c9de560dSAlex Tomas group, page->index, i * blocksize); 1266f307333eSTheodore Ts'o trace_ext4_mb_buddy_bitmap_load(sb, group); 1267c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, group); 12685354b2afSTheodore Ts'o if (!grinfo) { 12695354b2afSTheodore Ts'o err = -EFSCORRUPTED; 12705354b2afSTheodore Ts'o goto out; 12715354b2afSTheodore Ts'o } 1272c9de560dSAlex Tomas grinfo->bb_fragments = 0; 1273c9de560dSAlex Tomas memset(grinfo->bb_counters, 0, 12741927805eSEric Sandeen sizeof(*grinfo->bb_counters) * 12754b68f6dfSHarshad Shirwadkar (MB_NUM_ORDERS(sb))); 1276c9de560dSAlex Tomas /* 1277c9de560dSAlex Tomas * incore got set to the group block bitmap below 1278c9de560dSAlex Tomas */ 12797a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, group); 12809b8b7d35SAmir Goldstein /* init the buddy */ 12819b8b7d35SAmir Goldstein memset(data, 0xff, blocksize); 12825354b2afSTheodore Ts'o ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 12837a2fcbf7SAneesh Kumar K.V ext4_unlock_group(sb, group); 1284c9de560dSAlex Tomas incore = NULL; 1285c9de560dSAlex Tomas } else { 1286c9de560dSAlex Tomas /* this is block of bitmap */ 1287c9de560dSAlex Tomas BUG_ON(incore != NULL); 1288d3df1453SRitesh Harjani mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1289c9de560dSAlex Tomas group, page->index, i * blocksize); 1290f307333eSTheodore Ts'o trace_ext4_mb_bitmap_load(sb, group); 1291c9de560dSAlex Tomas 1292c9de560dSAlex Tomas /* see comments in ext4_mb_put_pa() */ 1293c9de560dSAlex Tomas ext4_lock_group(sb, group); 1294c9de560dSAlex Tomas memcpy(data, bitmap, blocksize); 1295c9de560dSAlex Tomas 1296c9de560dSAlex Tomas /* mark all preallocated blks used in in-core bitmap */ 1297c9de560dSAlex Tomas ext4_mb_generate_from_pa(sb, data, group); 12987a2fcbf7SAneesh Kumar K.V ext4_mb_generate_from_freelist(sb, data, group); 1299c9de560dSAlex Tomas ext4_unlock_group(sb, group); 1300c9de560dSAlex Tomas 1301c9de560dSAlex Tomas /* set incore so that the buddy information can be 1302c9de560dSAlex Tomas * generated using this 1303c9de560dSAlex Tomas */ 1304c9de560dSAlex Tomas incore = data; 1305c9de560dSAlex Tomas } 1306c9de560dSAlex Tomas } 1307c9de560dSAlex Tomas SetPageUptodate(page); 1308c9de560dSAlex Tomas 1309c9de560dSAlex Tomas out: 1310c9de560dSAlex Tomas if (bh) { 13119b8b7d35SAmir Goldstein for (i = 0; i < groups_per_page; i++) 1312c9de560dSAlex Tomas brelse(bh[i]); 1313c9de560dSAlex Tomas if (bh != &bhs) 1314c9de560dSAlex Tomas kfree(bh); 1315c9de560dSAlex Tomas } 1316c9de560dSAlex Tomas return err; 1317c9de560dSAlex Tomas } 1318c9de560dSAlex Tomas 13198a57d9d6SCurt Wohlgemuth /* 13202de8807bSAmir Goldstein * Lock the buddy and bitmap pages. This make sure other parallel init_group 13212de8807bSAmir Goldstein * on the same buddy page doesn't happen whild holding the buddy page lock. 13222de8807bSAmir Goldstein * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 13232de8807bSAmir Goldstein * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1324eee4adc7SEric Sandeen */ 13252de8807bSAmir Goldstein static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1326adb7ef60SKonstantin Khlebnikov ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1327eee4adc7SEric Sandeen { 13282de8807bSAmir Goldstein struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 13292de8807bSAmir Goldstein int block, pnum, poff; 1330eee4adc7SEric Sandeen int blocks_per_page; 13312de8807bSAmir Goldstein struct page *page; 13322de8807bSAmir Goldstein 13332de8807bSAmir Goldstein e4b->bd_buddy_page = NULL; 13342de8807bSAmir Goldstein e4b->bd_bitmap_page = NULL; 1335eee4adc7SEric Sandeen 133609cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1337eee4adc7SEric Sandeen /* 1338eee4adc7SEric Sandeen * the buddy cache inode stores the block bitmap 1339eee4adc7SEric Sandeen * and buddy information in consecutive blocks. 1340eee4adc7SEric Sandeen * So for each group we need two blocks. 1341eee4adc7SEric Sandeen */ 1342eee4adc7SEric Sandeen block = group * 2; 1343eee4adc7SEric Sandeen pnum = block / blocks_per_page; 13442de8807bSAmir Goldstein poff = block % blocks_per_page; 1345adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13462de8807bSAmir Goldstein if (!page) 1347c57ab39bSYounger Liu return -ENOMEM; 13482de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13492de8807bSAmir Goldstein e4b->bd_bitmap_page = page; 13502de8807bSAmir Goldstein e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1351eee4adc7SEric Sandeen 13522de8807bSAmir Goldstein if (blocks_per_page >= 2) { 13532de8807bSAmir Goldstein /* buddy and bitmap are on the same page */ 13542de8807bSAmir Goldstein return 0; 1355eee4adc7SEric Sandeen } 1356eee4adc7SEric Sandeen 13572de8807bSAmir Goldstein block++; 1358eee4adc7SEric Sandeen pnum = block / blocks_per_page; 1359adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13602de8807bSAmir Goldstein if (!page) 1361c57ab39bSYounger Liu return -ENOMEM; 13622de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13632de8807bSAmir Goldstein e4b->bd_buddy_page = page; 13642de8807bSAmir Goldstein return 0; 1365eee4adc7SEric Sandeen } 1366eee4adc7SEric Sandeen 13672de8807bSAmir Goldstein static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 13682de8807bSAmir Goldstein { 13692de8807bSAmir Goldstein if (e4b->bd_bitmap_page) { 13702de8807bSAmir Goldstein unlock_page(e4b->bd_bitmap_page); 137109cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 13722de8807bSAmir Goldstein } 13732de8807bSAmir Goldstein if (e4b->bd_buddy_page) { 13742de8807bSAmir Goldstein unlock_page(e4b->bd_buddy_page); 137509cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 13762de8807bSAmir Goldstein } 1377eee4adc7SEric Sandeen } 1378eee4adc7SEric Sandeen 1379eee4adc7SEric Sandeen /* 13808a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 13818a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 13828a57d9d6SCurt Wohlgemuth * calling this routine! 13838a57d9d6SCurt Wohlgemuth */ 1384b6a758ecSAneesh Kumar K.V static noinline_for_stack 1385adb7ef60SKonstantin Khlebnikov int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1386b6a758ecSAneesh Kumar K.V { 1387b6a758ecSAneesh Kumar K.V 1388b6a758ecSAneesh Kumar K.V struct ext4_group_info *this_grp; 13892de8807bSAmir Goldstein struct ext4_buddy e4b; 13902de8807bSAmir Goldstein struct page *page; 13912de8807bSAmir Goldstein int ret = 0; 1392b6a758ecSAneesh Kumar K.V 1393b10a44c3STheodore Ts'o might_sleep(); 1394d3df1453SRitesh Harjani mb_debug(sb, "init group %u\n", group); 1395b6a758ecSAneesh Kumar K.V this_grp = ext4_get_group_info(sb, group); 13965354b2afSTheodore Ts'o if (!this_grp) 13975354b2afSTheodore Ts'o return -EFSCORRUPTED; 13985354b2afSTheodore Ts'o 1399b6a758ecSAneesh Kumar K.V /* 140008c3a813SAneesh Kumar K.V * This ensures that we don't reinit the buddy cache 140108c3a813SAneesh Kumar K.V * page which map to the group from which we are already 140208c3a813SAneesh Kumar K.V * allocating. If we are looking at the buddy cache we would 140308c3a813SAneesh Kumar K.V * have taken a reference using ext4_mb_load_buddy and that 14042de8807bSAmir Goldstein * would have pinned buddy page to page cache. 14052457aec6SMel Gorman * The call to ext4_mb_get_buddy_page_lock will mark the 14062457aec6SMel Gorman * page accessed. 1407b6a758ecSAneesh Kumar K.V */ 1408adb7ef60SKonstantin Khlebnikov ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 14092de8807bSAmir Goldstein if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1410b6a758ecSAneesh Kumar K.V /* 1411b6a758ecSAneesh Kumar K.V * somebody initialized the group 1412b6a758ecSAneesh Kumar K.V * return without doing anything 1413b6a758ecSAneesh Kumar K.V */ 1414b6a758ecSAneesh Kumar K.V goto err; 1415b6a758ecSAneesh Kumar K.V } 14162de8807bSAmir Goldstein 14172de8807bSAmir Goldstein page = e4b.bd_bitmap_page; 1418adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 14192de8807bSAmir Goldstein if (ret) 1420b6a758ecSAneesh Kumar K.V goto err; 14212de8807bSAmir Goldstein if (!PageUptodate(page)) { 1422b6a758ecSAneesh Kumar K.V ret = -EIO; 1423b6a758ecSAneesh Kumar K.V goto err; 1424b6a758ecSAneesh Kumar K.V } 1425b6a758ecSAneesh Kumar K.V 14262de8807bSAmir Goldstein if (e4b.bd_buddy_page == NULL) { 1427b6a758ecSAneesh Kumar K.V /* 1428b6a758ecSAneesh Kumar K.V * If both the bitmap and buddy are in 1429b6a758ecSAneesh Kumar K.V * the same page we don't need to force 1430b6a758ecSAneesh Kumar K.V * init the buddy 1431b6a758ecSAneesh Kumar K.V */ 14322de8807bSAmir Goldstein ret = 0; 1433b6a758ecSAneesh Kumar K.V goto err; 1434b6a758ecSAneesh Kumar K.V } 14352de8807bSAmir Goldstein /* init buddy cache */ 14362de8807bSAmir Goldstein page = e4b.bd_buddy_page; 1437adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 14382de8807bSAmir Goldstein if (ret) 14392de8807bSAmir Goldstein goto err; 14402de8807bSAmir Goldstein if (!PageUptodate(page)) { 1441b6a758ecSAneesh Kumar K.V ret = -EIO; 1442b6a758ecSAneesh Kumar K.V goto err; 1443b6a758ecSAneesh Kumar K.V } 1444b6a758ecSAneesh Kumar K.V err: 14452de8807bSAmir Goldstein ext4_mb_put_buddy_page_lock(&e4b); 1446b6a758ecSAneesh Kumar K.V return ret; 1447b6a758ecSAneesh Kumar K.V } 1448b6a758ecSAneesh Kumar K.V 14498a57d9d6SCurt Wohlgemuth /* 14508a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14518a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14528a57d9d6SCurt Wohlgemuth * calling this routine! 14538a57d9d6SCurt Wohlgemuth */ 14544ddfef7bSEric Sandeen static noinline_for_stack int 1455adb7ef60SKonstantin Khlebnikov ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1456adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b, gfp_t gfp) 1457c9de560dSAlex Tomas { 1458c9de560dSAlex Tomas int blocks_per_page; 1459c9de560dSAlex Tomas int block; 1460c9de560dSAlex Tomas int pnum; 1461c9de560dSAlex Tomas int poff; 1462c9de560dSAlex Tomas struct page *page; 1463fdf6c7a7SShen Feng int ret; 1464920313a7SAneesh Kumar K.V struct ext4_group_info *grp; 1465920313a7SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 1466920313a7SAneesh Kumar K.V struct inode *inode = sbi->s_buddy_cache; 1467c9de560dSAlex Tomas 1468b10a44c3STheodore Ts'o might_sleep(); 1469d3df1453SRitesh Harjani mb_debug(sb, "load group %u\n", group); 1470c9de560dSAlex Tomas 147109cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1472920313a7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 14735354b2afSTheodore Ts'o if (!grp) 14745354b2afSTheodore Ts'o return -EFSCORRUPTED; 1475c9de560dSAlex Tomas 1476c9de560dSAlex Tomas e4b->bd_blkbits = sb->s_blocksize_bits; 1477529da704STao Ma e4b->bd_info = grp; 1478c9de560dSAlex Tomas e4b->bd_sb = sb; 1479c9de560dSAlex Tomas e4b->bd_group = group; 1480c9de560dSAlex Tomas e4b->bd_buddy_page = NULL; 1481c9de560dSAlex Tomas e4b->bd_bitmap_page = NULL; 1482c9de560dSAlex Tomas 1483f41c0750SAneesh Kumar K.V if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1484f41c0750SAneesh Kumar K.V /* 1485f41c0750SAneesh Kumar K.V * we need full data about the group 1486f41c0750SAneesh Kumar K.V * to make a good selection 1487f41c0750SAneesh Kumar K.V */ 1488adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, gfp); 1489f41c0750SAneesh Kumar K.V if (ret) 1490f41c0750SAneesh Kumar K.V return ret; 1491f41c0750SAneesh Kumar K.V } 1492f41c0750SAneesh Kumar K.V 1493c9de560dSAlex Tomas /* 1494c9de560dSAlex Tomas * the buddy cache inode stores the block bitmap 1495c9de560dSAlex Tomas * and buddy information in consecutive blocks. 1496c9de560dSAlex Tomas * So for each group we need two blocks. 1497c9de560dSAlex Tomas */ 1498c9de560dSAlex Tomas block = group * 2; 1499c9de560dSAlex Tomas pnum = block / blocks_per_page; 1500c9de560dSAlex Tomas poff = block % blocks_per_page; 1501c9de560dSAlex Tomas 1502c9de560dSAlex Tomas /* we could use find_or_create_page(), but it locks page 1503c9de560dSAlex Tomas * what we'd like to avoid in fast path ... */ 15042457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1505c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1506c9de560dSAlex Tomas if (page) 1507920313a7SAneesh Kumar K.V /* 1508920313a7SAneesh Kumar K.V * drop the page reference and try 1509920313a7SAneesh Kumar K.V * to get the page with lock. If we 1510920313a7SAneesh Kumar K.V * are not uptodate that implies 1511920313a7SAneesh Kumar K.V * somebody just created the page but 1512920313a7SAneesh Kumar K.V * is yet to initialize the same. So 1513920313a7SAneesh Kumar K.V * wait for it to initialize. 1514920313a7SAneesh Kumar K.V */ 151509cbfeafSKirill A. Shutemov put_page(page); 1516adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1517c9de560dSAlex Tomas if (page) { 151819b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 151919b8b035STheodore Ts'o "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 152019b8b035STheodore Ts'o /* should never happen */ 152119b8b035STheodore Ts'o unlock_page(page); 152219b8b035STheodore Ts'o ret = -EINVAL; 152319b8b035STheodore Ts'o goto err; 152419b8b035STheodore Ts'o } 1525c9de560dSAlex Tomas if (!PageUptodate(page)) { 1526adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 1527fdf6c7a7SShen Feng if (ret) { 1528fdf6c7a7SShen Feng unlock_page(page); 1529fdf6c7a7SShen Feng goto err; 1530fdf6c7a7SShen Feng } 1531c9de560dSAlex Tomas mb_cmp_bitmaps(e4b, page_address(page) + 1532c9de560dSAlex Tomas (poff * sb->s_blocksize)); 1533c9de560dSAlex Tomas } 1534c9de560dSAlex Tomas unlock_page(page); 1535c9de560dSAlex Tomas } 1536c9de560dSAlex Tomas } 1537c57ab39bSYounger Liu if (page == NULL) { 1538c57ab39bSYounger Liu ret = -ENOMEM; 1539c57ab39bSYounger Liu goto err; 1540c57ab39bSYounger Liu } 1541c57ab39bSYounger Liu if (!PageUptodate(page)) { 1542fdf6c7a7SShen Feng ret = -EIO; 1543c9de560dSAlex Tomas goto err; 1544fdf6c7a7SShen Feng } 15452457aec6SMel Gorman 15462457aec6SMel Gorman /* Pages marked accessed already */ 1547c9de560dSAlex Tomas e4b->bd_bitmap_page = page; 1548c9de560dSAlex Tomas e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1549c9de560dSAlex Tomas 1550c9de560dSAlex Tomas block++; 1551c9de560dSAlex Tomas pnum = block / blocks_per_page; 1552c9de560dSAlex Tomas poff = block % blocks_per_page; 1553c9de560dSAlex Tomas 15542457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1555c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1556c9de560dSAlex Tomas if (page) 155709cbfeafSKirill A. Shutemov put_page(page); 1558adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1559c9de560dSAlex Tomas if (page) { 156019b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 156119b8b035STheodore Ts'o "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 156219b8b035STheodore Ts'o /* should never happen */ 156319b8b035STheodore Ts'o unlock_page(page); 156419b8b035STheodore Ts'o ret = -EINVAL; 156519b8b035STheodore Ts'o goto err; 156619b8b035STheodore Ts'o } 1567fdf6c7a7SShen Feng if (!PageUptodate(page)) { 1568adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1569adb7ef60SKonstantin Khlebnikov gfp); 1570fdf6c7a7SShen Feng if (ret) { 1571fdf6c7a7SShen Feng unlock_page(page); 1572fdf6c7a7SShen Feng goto err; 1573fdf6c7a7SShen Feng } 1574fdf6c7a7SShen Feng } 1575c9de560dSAlex Tomas unlock_page(page); 1576c9de560dSAlex Tomas } 1577c9de560dSAlex Tomas } 1578c57ab39bSYounger Liu if (page == NULL) { 1579c57ab39bSYounger Liu ret = -ENOMEM; 1580c57ab39bSYounger Liu goto err; 1581c57ab39bSYounger Liu } 1582c57ab39bSYounger Liu if (!PageUptodate(page)) { 1583fdf6c7a7SShen Feng ret = -EIO; 1584c9de560dSAlex Tomas goto err; 1585fdf6c7a7SShen Feng } 15862457aec6SMel Gorman 15872457aec6SMel Gorman /* Pages marked accessed already */ 1588c9de560dSAlex Tomas e4b->bd_buddy_page = page; 1589c9de560dSAlex Tomas e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1590c9de560dSAlex Tomas 1591c9de560dSAlex Tomas return 0; 1592c9de560dSAlex Tomas 1593c9de560dSAlex Tomas err: 159426626f11SYang Ruirui if (page) 159509cbfeafSKirill A. Shutemov put_page(page); 1596c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 159709cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1598285164b8SKemeng Shi 1599c9de560dSAlex Tomas e4b->bd_buddy = NULL; 1600c9de560dSAlex Tomas e4b->bd_bitmap = NULL; 1601fdf6c7a7SShen Feng return ret; 1602c9de560dSAlex Tomas } 1603c9de560dSAlex Tomas 1604adb7ef60SKonstantin Khlebnikov static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1605adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b) 1606adb7ef60SKonstantin Khlebnikov { 1607adb7ef60SKonstantin Khlebnikov return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1608adb7ef60SKonstantin Khlebnikov } 1609adb7ef60SKonstantin Khlebnikov 1610e39e07fdSJing Zhang static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1611c9de560dSAlex Tomas { 1612c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 161309cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1614c9de560dSAlex Tomas if (e4b->bd_buddy_page) 161509cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1616c9de560dSAlex Tomas } 1617c9de560dSAlex Tomas 1618c9de560dSAlex Tomas 1619c9de560dSAlex Tomas static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1620c9de560dSAlex Tomas { 1621ce3cca33SChunguang Xu int order = 1, max; 1622c9de560dSAlex Tomas void *bb; 1623c9de560dSAlex Tomas 1624c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1625c9de560dSAlex Tomas BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1626c9de560dSAlex Tomas 1627c9de560dSAlex Tomas while (order <= e4b->bd_blkbits + 1) { 1628ce3cca33SChunguang Xu bb = mb_find_buddy(e4b, order, &max); 1629ce3cca33SChunguang Xu if (!mb_test_bit(block >> order, bb)) { 1630c9de560dSAlex Tomas /* this block is part of buddy of order 'order' */ 1631c9de560dSAlex Tomas return order; 1632c9de560dSAlex Tomas } 1633c9de560dSAlex Tomas order++; 1634c9de560dSAlex Tomas } 1635c9de560dSAlex Tomas return 0; 1636c9de560dSAlex Tomas } 1637c9de560dSAlex Tomas 1638955ce5f5SAneesh Kumar K.V static void mb_clear_bits(void *bm, int cur, int len) 1639c9de560dSAlex Tomas { 1640c9de560dSAlex Tomas __u32 *addr; 1641c9de560dSAlex Tomas 1642c9de560dSAlex Tomas len = cur + len; 1643c9de560dSAlex Tomas while (cur < len) { 1644c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1645c9de560dSAlex Tomas /* fast path: clear whole word at once */ 1646c9de560dSAlex Tomas addr = bm + (cur >> 3); 1647c9de560dSAlex Tomas *addr = 0; 1648c9de560dSAlex Tomas cur += 32; 1649c9de560dSAlex Tomas continue; 1650c9de560dSAlex Tomas } 1651e8134b27SAneesh Kumar K.V mb_clear_bit(cur, bm); 1652c9de560dSAlex Tomas cur++; 1653c9de560dSAlex Tomas } 1654c9de560dSAlex Tomas } 1655c9de560dSAlex Tomas 1656eabe0444SAndrey Sidorov /* clear bits in given range 1657eabe0444SAndrey Sidorov * will return first found zero bit if any, -1 otherwise 1658eabe0444SAndrey Sidorov */ 1659eabe0444SAndrey Sidorov static int mb_test_and_clear_bits(void *bm, int cur, int len) 1660eabe0444SAndrey Sidorov { 1661eabe0444SAndrey Sidorov __u32 *addr; 1662eabe0444SAndrey Sidorov int zero_bit = -1; 1663eabe0444SAndrey Sidorov 1664eabe0444SAndrey Sidorov len = cur + len; 1665eabe0444SAndrey Sidorov while (cur < len) { 1666eabe0444SAndrey Sidorov if ((cur & 31) == 0 && (len - cur) >= 32) { 1667eabe0444SAndrey Sidorov /* fast path: clear whole word at once */ 1668eabe0444SAndrey Sidorov addr = bm + (cur >> 3); 1669eabe0444SAndrey Sidorov if (*addr != (__u32)(-1) && zero_bit == -1) 1670eabe0444SAndrey Sidorov zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1671eabe0444SAndrey Sidorov *addr = 0; 1672eabe0444SAndrey Sidorov cur += 32; 1673eabe0444SAndrey Sidorov continue; 1674eabe0444SAndrey Sidorov } 1675eabe0444SAndrey Sidorov if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1676eabe0444SAndrey Sidorov zero_bit = cur; 1677eabe0444SAndrey Sidorov cur++; 1678eabe0444SAndrey Sidorov } 1679eabe0444SAndrey Sidorov 1680eabe0444SAndrey Sidorov return zero_bit; 1681eabe0444SAndrey Sidorov } 1682eabe0444SAndrey Sidorov 1683123e3016SRitesh Harjani void mb_set_bits(void *bm, int cur, int len) 1684c9de560dSAlex Tomas { 1685c9de560dSAlex Tomas __u32 *addr; 1686c9de560dSAlex Tomas 1687c9de560dSAlex Tomas len = cur + len; 1688c9de560dSAlex Tomas while (cur < len) { 1689c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1690c9de560dSAlex Tomas /* fast path: set whole word at once */ 1691c9de560dSAlex Tomas addr = bm + (cur >> 3); 1692c9de560dSAlex Tomas *addr = 0xffffffff; 1693c9de560dSAlex Tomas cur += 32; 1694c9de560dSAlex Tomas continue; 1695c9de560dSAlex Tomas } 1696e8134b27SAneesh Kumar K.V mb_set_bit(cur, bm); 1697c9de560dSAlex Tomas cur++; 1698c9de560dSAlex Tomas } 1699c9de560dSAlex Tomas } 1700c9de560dSAlex Tomas 1701eabe0444SAndrey Sidorov static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1702eabe0444SAndrey Sidorov { 1703eabe0444SAndrey Sidorov if (mb_test_bit(*bit + side, bitmap)) { 1704eabe0444SAndrey Sidorov mb_clear_bit(*bit, bitmap); 1705eabe0444SAndrey Sidorov (*bit) -= side; 1706eabe0444SAndrey Sidorov return 1; 1707eabe0444SAndrey Sidorov } 1708eabe0444SAndrey Sidorov else { 1709eabe0444SAndrey Sidorov (*bit) += side; 1710eabe0444SAndrey Sidorov mb_set_bit(*bit, bitmap); 1711eabe0444SAndrey Sidorov return -1; 1712eabe0444SAndrey Sidorov } 1713eabe0444SAndrey Sidorov } 1714eabe0444SAndrey Sidorov 1715eabe0444SAndrey Sidorov static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1716eabe0444SAndrey Sidorov { 1717eabe0444SAndrey Sidorov int max; 1718eabe0444SAndrey Sidorov int order = 1; 1719eabe0444SAndrey Sidorov void *buddy = mb_find_buddy(e4b, order, &max); 1720eabe0444SAndrey Sidorov 1721eabe0444SAndrey Sidorov while (buddy) { 1722eabe0444SAndrey Sidorov void *buddy2; 1723eabe0444SAndrey Sidorov 1724eabe0444SAndrey Sidorov /* Bits in range [first; last] are known to be set since 1725eabe0444SAndrey Sidorov * corresponding blocks were allocated. Bits in range 1726eabe0444SAndrey Sidorov * (first; last) will stay set because they form buddies on 1727eabe0444SAndrey Sidorov * upper layer. We just deal with borders if they don't 1728eabe0444SAndrey Sidorov * align with upper layer and then go up. 1729eabe0444SAndrey Sidorov * Releasing entire group is all about clearing 1730eabe0444SAndrey Sidorov * single bit of highest order buddy. 1731eabe0444SAndrey Sidorov */ 1732eabe0444SAndrey Sidorov 1733eabe0444SAndrey Sidorov /* Example: 1734eabe0444SAndrey Sidorov * --------------------------------- 1735eabe0444SAndrey Sidorov * | 1 | 1 | 1 | 1 | 1736eabe0444SAndrey Sidorov * --------------------------------- 1737eabe0444SAndrey Sidorov * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1738eabe0444SAndrey Sidorov * --------------------------------- 1739eabe0444SAndrey Sidorov * 0 1 2 3 4 5 6 7 1740eabe0444SAndrey Sidorov * \_____________________/ 1741eabe0444SAndrey Sidorov * 1742eabe0444SAndrey Sidorov * Neither [1] nor [6] is aligned to above layer. 1743eabe0444SAndrey Sidorov * Left neighbour [0] is free, so mark it busy, 1744eabe0444SAndrey Sidorov * decrease bb_counters and extend range to 1745eabe0444SAndrey Sidorov * [0; 6] 1746eabe0444SAndrey Sidorov * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1747eabe0444SAndrey Sidorov * mark [6] free, increase bb_counters and shrink range to 1748eabe0444SAndrey Sidorov * [0; 5]. 1749eabe0444SAndrey Sidorov * Then shift range to [0; 2], go up and do the same. 1750eabe0444SAndrey Sidorov */ 1751eabe0444SAndrey Sidorov 1752eabe0444SAndrey Sidorov 1753eabe0444SAndrey Sidorov if (first & 1) 1754eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1755eabe0444SAndrey Sidorov if (!(last & 1)) 1756eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1757eabe0444SAndrey Sidorov if (first > last) 1758eabe0444SAndrey Sidorov break; 1759eabe0444SAndrey Sidorov order++; 1760eabe0444SAndrey Sidorov 1761976620bdSKemeng Shi buddy2 = mb_find_buddy(e4b, order, &max); 1762976620bdSKemeng Shi if (!buddy2) { 1763eabe0444SAndrey Sidorov mb_clear_bits(buddy, first, last - first + 1); 1764eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1765eabe0444SAndrey Sidorov break; 1766eabe0444SAndrey Sidorov } 1767eabe0444SAndrey Sidorov first >>= 1; 1768eabe0444SAndrey Sidorov last >>= 1; 1769eabe0444SAndrey Sidorov buddy = buddy2; 1770eabe0444SAndrey Sidorov } 1771eabe0444SAndrey Sidorov } 1772eabe0444SAndrey Sidorov 17737e5a8cddSShen Feng static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1774c9de560dSAlex Tomas int first, int count) 1775c9de560dSAlex Tomas { 1776eabe0444SAndrey Sidorov int left_is_free = 0; 1777eabe0444SAndrey Sidorov int right_is_free = 0; 1778eabe0444SAndrey Sidorov int block; 1779eabe0444SAndrey Sidorov int last = first + count - 1; 1780c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 1781c9de560dSAlex Tomas 1782c99d1e6eSTheodore Ts'o if (WARN_ON(count == 0)) 1783c99d1e6eSTheodore Ts'o return; 1784eabe0444SAndrey Sidorov BUG_ON(last >= (sb->s_blocksize << 3)); 1785bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1786163a203dSDarrick J. Wong /* Don't bother if the block group is corrupt. */ 1787163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1788163a203dSDarrick J. Wong return; 1789163a203dSDarrick J. Wong 1790c9de560dSAlex Tomas mb_check_buddy(e4b); 1791c9de560dSAlex Tomas mb_free_blocks_double(inode, e4b, first, count); 1792c9de560dSAlex Tomas 179307b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1794c9de560dSAlex Tomas e4b->bd_info->bb_free += count; 1795c9de560dSAlex Tomas if (first < e4b->bd_info->bb_first_free) 1796c9de560dSAlex Tomas e4b->bd_info->bb_first_free = first; 1797c9de560dSAlex Tomas 1798eabe0444SAndrey Sidorov /* access memory sequentially: check left neighbour, 1799eabe0444SAndrey Sidorov * clear range and then check right neighbour 1800eabe0444SAndrey Sidorov */ 1801c9de560dSAlex Tomas if (first != 0) 1802eabe0444SAndrey Sidorov left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1803eabe0444SAndrey Sidorov block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1804eabe0444SAndrey Sidorov if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1805eabe0444SAndrey Sidorov right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1806c9de560dSAlex Tomas 1807eabe0444SAndrey Sidorov if (unlikely(block != -1)) { 1808e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 1809c9de560dSAlex Tomas ext4_fsblk_t blocknr; 18105661bd68SAkinobu Mita 18115661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 181249598e04SJun Piao blocknr += EXT4_C2B(sbi, block); 18138016e29fSHarshad Shirwadkar if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 18145d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 1815e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 1816e29136f8STheodore Ts'o blocknr, 18178016e29fSHarshad Shirwadkar "freeing already freed block (bit %u); block bitmap corrupt.", 1818163a203dSDarrick J. Wong block); 18198016e29fSHarshad Shirwadkar ext4_mark_group_bitmap_corrupted( 18208016e29fSHarshad Shirwadkar sb, e4b->bd_group, 1821db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 18228016e29fSHarshad Shirwadkar } 1823eabe0444SAndrey Sidorov goto done; 1824c9de560dSAlex Tomas } 1825c9de560dSAlex Tomas 1826eabe0444SAndrey Sidorov /* let's maintain fragments counter */ 1827eabe0444SAndrey Sidorov if (left_is_free && right_is_free) 1828eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments--; 1829eabe0444SAndrey Sidorov else if (!left_is_free && !right_is_free) 1830eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments++; 1831c9de560dSAlex Tomas 1832eabe0444SAndrey Sidorov /* buddy[0] == bd_bitmap is a special case, so handle 1833eabe0444SAndrey Sidorov * it right away and let mb_buddy_mark_free stay free of 1834eabe0444SAndrey Sidorov * zero order checks. 1835eabe0444SAndrey Sidorov * Check if neighbours are to be coaleasced, 1836eabe0444SAndrey Sidorov * adjust bitmap bb_counters and borders appropriately. 1837eabe0444SAndrey Sidorov */ 1838eabe0444SAndrey Sidorov if (first & 1) { 1839eabe0444SAndrey Sidorov first += !left_is_free; 1840eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1841c9de560dSAlex Tomas } 1842eabe0444SAndrey Sidorov if (!(last & 1)) { 1843eabe0444SAndrey Sidorov last -= !right_is_free; 1844eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1845c9de560dSAlex Tomas } 1846eabe0444SAndrey Sidorov 1847eabe0444SAndrey Sidorov if (first <= last) 1848eabe0444SAndrey Sidorov mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1849eabe0444SAndrey Sidorov 1850eabe0444SAndrey Sidorov done: 18518a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, e4b->bd_info); 1852196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, e4b->bd_info); 1853c9de560dSAlex Tomas mb_check_buddy(e4b); 1854c9de560dSAlex Tomas } 1855c9de560dSAlex Tomas 185615c006a2SRobin Dong static int mb_find_extent(struct ext4_buddy *e4b, int block, 1857c9de560dSAlex Tomas int needed, struct ext4_free_extent *ex) 1858c9de560dSAlex Tomas { 1859c9de560dSAlex Tomas int next = block; 186015c006a2SRobin Dong int max, order; 1861c9de560dSAlex Tomas void *buddy; 1862c9de560dSAlex Tomas 1863bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1864c9de560dSAlex Tomas BUG_ON(ex == NULL); 1865c9de560dSAlex Tomas 186615c006a2SRobin Dong buddy = mb_find_buddy(e4b, 0, &max); 1867c9de560dSAlex Tomas BUG_ON(buddy == NULL); 1868c9de560dSAlex Tomas BUG_ON(block >= max); 1869c9de560dSAlex Tomas if (mb_test_bit(block, buddy)) { 1870c9de560dSAlex Tomas ex->fe_len = 0; 1871c9de560dSAlex Tomas ex->fe_start = 0; 1872c9de560dSAlex Tomas ex->fe_group = 0; 1873c9de560dSAlex Tomas return 0; 1874c9de560dSAlex Tomas } 1875c9de560dSAlex Tomas 1876c9de560dSAlex Tomas /* find actual order */ 1877c9de560dSAlex Tomas order = mb_find_order_for_block(e4b, block); 1878c9de560dSAlex Tomas block = block >> order; 1879c9de560dSAlex Tomas 1880c9de560dSAlex Tomas ex->fe_len = 1 << order; 1881c9de560dSAlex Tomas ex->fe_start = block << order; 1882c9de560dSAlex Tomas ex->fe_group = e4b->bd_group; 1883c9de560dSAlex Tomas 1884c9de560dSAlex Tomas /* calc difference from given start */ 1885c9de560dSAlex Tomas next = next - ex->fe_start; 1886c9de560dSAlex Tomas ex->fe_len -= next; 1887c9de560dSAlex Tomas ex->fe_start += next; 1888c9de560dSAlex Tomas 1889c9de560dSAlex Tomas while (needed > ex->fe_len && 1890d8ec0c39SAlan Cox mb_find_buddy(e4b, order, &max)) { 1891c9de560dSAlex Tomas 1892c9de560dSAlex Tomas if (block + 1 >= max) 1893c9de560dSAlex Tomas break; 1894c9de560dSAlex Tomas 1895c9de560dSAlex Tomas next = (block + 1) * (1 << order); 1896c5e8f3f3STheodore Ts'o if (mb_test_bit(next, e4b->bd_bitmap)) 1897c9de560dSAlex Tomas break; 1898c9de560dSAlex Tomas 1899b051d8dcSRobin Dong order = mb_find_order_for_block(e4b, next); 1900c9de560dSAlex Tomas 1901c9de560dSAlex Tomas block = next >> order; 1902c9de560dSAlex Tomas ex->fe_len += 1 << order; 1903c9de560dSAlex Tomas } 1904c9de560dSAlex Tomas 190531562b95SJan Kara if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 190643c73221STheodore Ts'o /* Should never happen! (but apparently sometimes does?!?) */ 190743c73221STheodore Ts'o WARN_ON(1); 1908cd84bbbaSStephen Brennan ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1909cd84bbbaSStephen Brennan "corruption or bug in mb_find_extent " 191043c73221STheodore Ts'o "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 191143c73221STheodore Ts'o block, order, needed, ex->fe_group, ex->fe_start, 191243c73221STheodore Ts'o ex->fe_len, ex->fe_logical); 191343c73221STheodore Ts'o ex->fe_len = 0; 191443c73221STheodore Ts'o ex->fe_start = 0; 191543c73221STheodore Ts'o ex->fe_group = 0; 191643c73221STheodore Ts'o } 1917c9de560dSAlex Tomas return ex->fe_len; 1918c9de560dSAlex Tomas } 1919c9de560dSAlex Tomas 1920c9de560dSAlex Tomas static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1921c9de560dSAlex Tomas { 1922c9de560dSAlex Tomas int ord; 1923c9de560dSAlex Tomas int mlen = 0; 1924c9de560dSAlex Tomas int max = 0; 1925c9de560dSAlex Tomas int cur; 1926c9de560dSAlex Tomas int start = ex->fe_start; 1927c9de560dSAlex Tomas int len = ex->fe_len; 1928c9de560dSAlex Tomas unsigned ret = 0; 1929c9de560dSAlex Tomas int len0 = len; 1930c9de560dSAlex Tomas void *buddy; 1931218a6944Shanjinke bool split = false; 1932c9de560dSAlex Tomas 1933c9de560dSAlex Tomas BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1934c9de560dSAlex Tomas BUG_ON(e4b->bd_group != ex->fe_group); 1935bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1936c9de560dSAlex Tomas mb_check_buddy(e4b); 1937c9de560dSAlex Tomas mb_mark_used_double(e4b, start, len); 1938c9de560dSAlex Tomas 193907b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1940c9de560dSAlex Tomas e4b->bd_info->bb_free -= len; 1941c9de560dSAlex Tomas if (e4b->bd_info->bb_first_free == start) 1942c9de560dSAlex Tomas e4b->bd_info->bb_first_free += len; 1943c9de560dSAlex Tomas 1944c9de560dSAlex Tomas /* let's maintain fragments counter */ 1945c9de560dSAlex Tomas if (start != 0) 1946c5e8f3f3STheodore Ts'o mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1947c9de560dSAlex Tomas if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1948c5e8f3f3STheodore Ts'o max = !mb_test_bit(start + len, e4b->bd_bitmap); 1949c9de560dSAlex Tomas if (mlen && max) 1950c9de560dSAlex Tomas e4b->bd_info->bb_fragments++; 1951c9de560dSAlex Tomas else if (!mlen && !max) 1952c9de560dSAlex Tomas e4b->bd_info->bb_fragments--; 1953c9de560dSAlex Tomas 1954c9de560dSAlex Tomas /* let's maintain buddy itself */ 1955c9de560dSAlex Tomas while (len) { 1956218a6944Shanjinke if (!split) 1957c9de560dSAlex Tomas ord = mb_find_order_for_block(e4b, start); 1958c9de560dSAlex Tomas 1959c9de560dSAlex Tomas if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1960c9de560dSAlex Tomas /* the whole chunk may be allocated at once! */ 1961c9de560dSAlex Tomas mlen = 1 << ord; 1962218a6944Shanjinke if (!split) 1963c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1964218a6944Shanjinke else 1965218a6944Shanjinke split = false; 1966c9de560dSAlex Tomas BUG_ON((start >> ord) >= max); 1967c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1968c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1969c9de560dSAlex Tomas start += mlen; 1970c9de560dSAlex Tomas len -= mlen; 1971c9de560dSAlex Tomas BUG_ON(len < 0); 1972c9de560dSAlex Tomas continue; 1973c9de560dSAlex Tomas } 1974c9de560dSAlex Tomas 1975c9de560dSAlex Tomas /* store for history */ 1976c9de560dSAlex Tomas if (ret == 0) 1977c9de560dSAlex Tomas ret = len | (ord << 16); 1978c9de560dSAlex Tomas 1979c9de560dSAlex Tomas /* we have to split large buddy */ 1980c9de560dSAlex Tomas BUG_ON(ord <= 0); 1981c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1982c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1983c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1984c9de560dSAlex Tomas 1985c9de560dSAlex Tomas ord--; 1986c9de560dSAlex Tomas cur = (start >> ord) & ~1U; 1987c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1988c9de560dSAlex Tomas mb_clear_bit(cur, buddy); 1989c9de560dSAlex Tomas mb_clear_bit(cur + 1, buddy); 1990c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1991c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1992218a6944Shanjinke split = true; 1993c9de560dSAlex Tomas } 19948a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1995c9de560dSAlex Tomas 1996196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1997123e3016SRitesh Harjani mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1998c9de560dSAlex Tomas mb_check_buddy(e4b); 1999c9de560dSAlex Tomas 2000c9de560dSAlex Tomas return ret; 2001c9de560dSAlex Tomas } 2002c9de560dSAlex Tomas 2003c9de560dSAlex Tomas /* 2004c9de560dSAlex Tomas * Must be called under group lock! 2005c9de560dSAlex Tomas */ 2006c9de560dSAlex Tomas static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2007c9de560dSAlex Tomas struct ext4_buddy *e4b) 2008c9de560dSAlex Tomas { 2009c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2010c9de560dSAlex Tomas int ret; 2011c9de560dSAlex Tomas 2012c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2013c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2014c9de560dSAlex Tomas 2015c9de560dSAlex Tomas ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2016c9de560dSAlex Tomas ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2017c9de560dSAlex Tomas ret = mb_mark_used(e4b, &ac->ac_b_ex); 2018c9de560dSAlex Tomas 2019c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 2020c9de560dSAlex Tomas * allocated blocks for history */ 2021c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 2022c9de560dSAlex Tomas 2023c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 2024c9de560dSAlex Tomas ac->ac_tail = ret & 0xffff; 2025c9de560dSAlex Tomas ac->ac_buddy = ret >> 16; 2026c9de560dSAlex Tomas 2027c3a326a6SAneesh Kumar K.V /* 2028c3a326a6SAneesh Kumar K.V * take the page reference. We want the page to be pinned 2029c3a326a6SAneesh Kumar K.V * so that we don't get a ext4_mb_init_cache_call for this 2030c3a326a6SAneesh Kumar K.V * group until we update the bitmap. That would mean we 2031c3a326a6SAneesh Kumar K.V * double allocate blocks. The reference is dropped 2032c3a326a6SAneesh Kumar K.V * in ext4_mb_release_context 2033c3a326a6SAneesh Kumar K.V */ 2034c9de560dSAlex Tomas ac->ac_bitmap_page = e4b->bd_bitmap_page; 2035c9de560dSAlex Tomas get_page(ac->ac_bitmap_page); 2036c9de560dSAlex Tomas ac->ac_buddy_page = e4b->bd_buddy_page; 2037c9de560dSAlex Tomas get_page(ac->ac_buddy_page); 2038c9de560dSAlex Tomas /* store last allocated for subsequent stream allocation */ 20394ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2040c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2041c9de560dSAlex Tomas sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2042c9de560dSAlex Tomas sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2043c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2044c9de560dSAlex Tomas } 204553f86b17SRitesh Harjani /* 204653f86b17SRitesh Harjani * As we've just preallocated more space than 204753f86b17SRitesh Harjani * user requested originally, we store allocated 204853f86b17SRitesh Harjani * space in a special descriptor. 204953f86b17SRitesh Harjani */ 205053f86b17SRitesh Harjani if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 205153f86b17SRitesh Harjani ext4_mb_new_preallocation(ac); 205253f86b17SRitesh Harjani 2053c9de560dSAlex Tomas } 2054c9de560dSAlex Tomas 2055c9de560dSAlex Tomas static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2056c9de560dSAlex Tomas struct ext4_buddy *e4b, 2057c9de560dSAlex Tomas int finish_group) 2058c9de560dSAlex Tomas { 2059c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2060c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2061c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2062c9de560dSAlex Tomas 2063032115fcSAneesh Kumar K.V if (ac->ac_status == AC_STATUS_FOUND) 2064032115fcSAneesh Kumar K.V return; 2065c9de560dSAlex Tomas /* 2066c9de560dSAlex Tomas * We don't want to scan for a whole year 2067c9de560dSAlex Tomas */ 2068c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan && 2069c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2070c9de560dSAlex Tomas ac->ac_status = AC_STATUS_BREAK; 2071c9de560dSAlex Tomas return; 2072c9de560dSAlex Tomas } 2073c9de560dSAlex Tomas 2074c9de560dSAlex Tomas /* 2075c9de560dSAlex Tomas * Haven't found good chunk so far, let's continue 2076c9de560dSAlex Tomas */ 2077c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) 2078c9de560dSAlex Tomas return; 2079c9de560dSAlex Tomas 20803582e745SOjaswin Mujoo if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2081c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2082c9de560dSAlex Tomas } 2083c9de560dSAlex Tomas 2084c9de560dSAlex Tomas /* 2085c9de560dSAlex Tomas * The routine checks whether found extent is good enough. If it is, 2086c9de560dSAlex Tomas * then the extent gets marked used and flag is set to the context 2087c9de560dSAlex Tomas * to stop scanning. Otherwise, the extent is compared with the 2088c9de560dSAlex Tomas * previous found extent and if new one is better, then it's stored 2089c9de560dSAlex Tomas * in the context. Later, the best found extent will be used, if 2090c9de560dSAlex Tomas * mballoc can't find good enough extent. 2091c9de560dSAlex Tomas * 20923582e745SOjaswin Mujoo * The algorithm used is roughly as follows: 20933582e745SOjaswin Mujoo * 20943582e745SOjaswin Mujoo * * If free extent found is exactly as big as goal, then 20953582e745SOjaswin Mujoo * stop the scan and use it immediately 20963582e745SOjaswin Mujoo * 20973582e745SOjaswin Mujoo * * If free extent found is smaller than goal, then keep retrying 20983582e745SOjaswin Mujoo * upto a max of sbi->s_mb_max_to_scan times (default 200). After 20993582e745SOjaswin Mujoo * that stop scanning and use whatever we have. 21003582e745SOjaswin Mujoo * 21013582e745SOjaswin Mujoo * * If free extent found is bigger than goal, then keep retrying 21023582e745SOjaswin Mujoo * upto a max of sbi->s_mb_min_to_scan times (default 10) before 21033582e745SOjaswin Mujoo * stopping the scan and using the extent. 21043582e745SOjaswin Mujoo * 21053582e745SOjaswin Mujoo * 2106c9de560dSAlex Tomas * FIXME: real allocation policy is to be designed yet! 2107c9de560dSAlex Tomas */ 2108c9de560dSAlex Tomas static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2109c9de560dSAlex Tomas struct ext4_free_extent *ex, 2110c9de560dSAlex Tomas struct ext4_buddy *e4b) 2111c9de560dSAlex Tomas { 2112c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2113c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2114c9de560dSAlex Tomas 2115c9de560dSAlex Tomas BUG_ON(ex->fe_len <= 0); 21167137d7a4STheodore Ts'o BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 21177137d7a4STheodore Ts'o BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2118c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2119c9de560dSAlex Tomas 2120c9de560dSAlex Tomas ac->ac_found++; 2121fdd9a009SOjaswin Mujoo ac->ac_cX_found[ac->ac_criteria]++; 2122c9de560dSAlex Tomas 2123c9de560dSAlex Tomas /* 2124c9de560dSAlex Tomas * The special case - take what you catch first 2125c9de560dSAlex Tomas */ 2126c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2127c9de560dSAlex Tomas *bex = *ex; 2128c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2129c9de560dSAlex Tomas return; 2130c9de560dSAlex Tomas } 2131c9de560dSAlex Tomas 2132c9de560dSAlex Tomas /* 2133c9de560dSAlex Tomas * Let's check whether the chuck is good enough 2134c9de560dSAlex Tomas */ 2135c9de560dSAlex Tomas if (ex->fe_len == gex->fe_len) { 2136c9de560dSAlex Tomas *bex = *ex; 2137c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2138c9de560dSAlex Tomas return; 2139c9de560dSAlex Tomas } 2140c9de560dSAlex Tomas 2141c9de560dSAlex Tomas /* 2142c9de560dSAlex Tomas * If this is first found extent, just store it in the context 2143c9de560dSAlex Tomas */ 2144c9de560dSAlex Tomas if (bex->fe_len == 0) { 2145c9de560dSAlex Tomas *bex = *ex; 2146c9de560dSAlex Tomas return; 2147c9de560dSAlex Tomas } 2148c9de560dSAlex Tomas 2149c9de560dSAlex Tomas /* 2150c9de560dSAlex Tomas * If new found extent is better, store it in the context 2151c9de560dSAlex Tomas */ 2152c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) { 2153c9de560dSAlex Tomas /* if the request isn't satisfied, any found extent 2154c9de560dSAlex Tomas * larger than previous best one is better */ 2155c9de560dSAlex Tomas if (ex->fe_len > bex->fe_len) 2156c9de560dSAlex Tomas *bex = *ex; 2157c9de560dSAlex Tomas } else if (ex->fe_len > gex->fe_len) { 2158c9de560dSAlex Tomas /* if the request is satisfied, then we try to find 2159c9de560dSAlex Tomas * an extent that still satisfy the request, but is 2160c9de560dSAlex Tomas * smaller than previous one */ 2161c9de560dSAlex Tomas if (ex->fe_len < bex->fe_len) 2162c9de560dSAlex Tomas *bex = *ex; 2163c9de560dSAlex Tomas } 2164c9de560dSAlex Tomas 2165c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 0); 2166c9de560dSAlex Tomas } 2167c9de560dSAlex Tomas 2168089ceeccSEric Sandeen static noinline_for_stack 216985b67ffbSKemeng Shi void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2170c9de560dSAlex Tomas struct ext4_buddy *e4b) 2171c9de560dSAlex Tomas { 2172c9de560dSAlex Tomas struct ext4_free_extent ex = ac->ac_b_ex; 2173c9de560dSAlex Tomas ext4_group_t group = ex.fe_group; 2174c9de560dSAlex Tomas int max; 2175c9de560dSAlex Tomas int err; 2176c9de560dSAlex Tomas 2177c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2178c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2179c9de560dSAlex Tomas if (err) 218085b67ffbSKemeng Shi return; 2181c9de560dSAlex Tomas 2182c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 218315c006a2SRobin Dong max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2184c9de560dSAlex Tomas 2185c9de560dSAlex Tomas if (max > 0) { 2186c9de560dSAlex Tomas ac->ac_b_ex = ex; 2187c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2188c9de560dSAlex Tomas } 2189c9de560dSAlex Tomas 2190c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2191e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2192c9de560dSAlex Tomas } 2193c9de560dSAlex Tomas 2194089ceeccSEric Sandeen static noinline_for_stack 2195089ceeccSEric Sandeen int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2196c9de560dSAlex Tomas struct ext4_buddy *e4b) 2197c9de560dSAlex Tomas { 2198c9de560dSAlex Tomas ext4_group_t group = ac->ac_g_ex.fe_group; 2199c9de560dSAlex Tomas int max; 2200c9de560dSAlex Tomas int err; 2201c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2202838cd0cfSYongqiang Yang struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2203c9de560dSAlex Tomas struct ext4_free_extent ex; 2204c9de560dSAlex Tomas 22055354b2afSTheodore Ts'o if (!grp) 22065354b2afSTheodore Ts'o return -EFSCORRUPTED; 220701e4ca29SKemeng Shi if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2208c9de560dSAlex Tomas return 0; 2209838cd0cfSYongqiang Yang if (grp->bb_free == 0) 2210838cd0cfSYongqiang Yang return 0; 2211c9de560dSAlex Tomas 2212c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2213c9de560dSAlex Tomas if (err) 2214c9de560dSAlex Tomas return err; 2215c9de560dSAlex Tomas 2216163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2217163a203dSDarrick J. Wong ext4_mb_unload_buddy(e4b); 2218163a203dSDarrick J. Wong return 0; 2219163a203dSDarrick J. Wong } 2220163a203dSDarrick J. Wong 2221c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 222215c006a2SRobin Dong max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2223c9de560dSAlex Tomas ac->ac_g_ex.fe_len, &ex); 2224ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADFA11; /* debug value */ 2225c9de560dSAlex Tomas 2226c3defd99SKemeng Shi if (max >= ac->ac_g_ex.fe_len && 2227c3defd99SKemeng Shi ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { 2228c9de560dSAlex Tomas ext4_fsblk_t start; 2229c9de560dSAlex Tomas 223099c515e3SKemeng Shi start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2231c9de560dSAlex Tomas /* use do_div to get remainder (would be 64-bit modulo) */ 2232c9de560dSAlex Tomas if (do_div(start, sbi->s_stripe) == 0) { 2233c9de560dSAlex Tomas ac->ac_found++; 2234c9de560dSAlex Tomas ac->ac_b_ex = ex; 2235c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2236c9de560dSAlex Tomas } 2237c9de560dSAlex Tomas } else if (max >= ac->ac_g_ex.fe_len) { 2238c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2239c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2240c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2241c9de560dSAlex Tomas ac->ac_found++; 2242c9de560dSAlex Tomas ac->ac_b_ex = ex; 2243c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2244c9de560dSAlex Tomas } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2245c9de560dSAlex Tomas /* Sometimes, caller may want to merge even small 2246c9de560dSAlex Tomas * number of blocks to an existing extent */ 2247c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2248c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2249c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2250c9de560dSAlex Tomas ac->ac_found++; 2251c9de560dSAlex Tomas ac->ac_b_ex = ex; 2252c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2253c9de560dSAlex Tomas } 2254c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2255e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2256c9de560dSAlex Tomas 2257c9de560dSAlex Tomas return 0; 2258c9de560dSAlex Tomas } 2259c9de560dSAlex Tomas 2260c9de560dSAlex Tomas /* 2261c9de560dSAlex Tomas * The routine scans buddy structures (not bitmap!) from given order 2262c9de560dSAlex Tomas * to max order and tries to find big enough chunk to satisfy the req 2263c9de560dSAlex Tomas */ 2264089ceeccSEric Sandeen static noinline_for_stack 2265089ceeccSEric Sandeen void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2266c9de560dSAlex Tomas struct ext4_buddy *e4b) 2267c9de560dSAlex Tomas { 2268c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2269c9de560dSAlex Tomas struct ext4_group_info *grp = e4b->bd_info; 2270c9de560dSAlex Tomas void *buddy; 2271c9de560dSAlex Tomas int i; 2272c9de560dSAlex Tomas int k; 2273c9de560dSAlex Tomas int max; 2274c9de560dSAlex Tomas 2275c9de560dSAlex Tomas BUG_ON(ac->ac_2order <= 0); 22764b68f6dfSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2277c9de560dSAlex Tomas if (grp->bb_counters[i] == 0) 2278c9de560dSAlex Tomas continue; 2279c9de560dSAlex Tomas 2280c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, i, &max); 228119b8b035STheodore Ts'o if (WARN_RATELIMIT(buddy == NULL, 228219b8b035STheodore Ts'o "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 228319b8b035STheodore Ts'o continue; 2284c9de560dSAlex Tomas 2285ffad0a44SAneesh Kumar K.V k = mb_find_next_zero_bit(buddy, max, 0); 2286eb576086SDmitry Monakhov if (k >= max) { 2287eb576086SDmitry Monakhov ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2288eb576086SDmitry Monakhov "%d free clusters of order %d. But found 0", 2289eb576086SDmitry Monakhov grp->bb_counters[i], i); 2290eb576086SDmitry Monakhov ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2291eb576086SDmitry Monakhov e4b->bd_group, 2292eb576086SDmitry Monakhov EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2293eb576086SDmitry Monakhov break; 2294eb576086SDmitry Monakhov } 2295c9de560dSAlex Tomas ac->ac_found++; 2296fdd9a009SOjaswin Mujoo ac->ac_cX_found[ac->ac_criteria]++; 2297c9de560dSAlex Tomas 2298c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 1 << i; 2299c9de560dSAlex Tomas ac->ac_b_ex.fe_start = k << i; 2300c9de560dSAlex Tomas ac->ac_b_ex.fe_group = e4b->bd_group; 2301c9de560dSAlex Tomas 2302c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2303c9de560dSAlex Tomas 230453f86b17SRitesh Harjani BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2305c9de560dSAlex Tomas 2306c9de560dSAlex Tomas if (EXT4_SB(sb)->s_mb_stats) 2307c9de560dSAlex Tomas atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2308c9de560dSAlex Tomas 2309c9de560dSAlex Tomas break; 2310c9de560dSAlex Tomas } 2311c9de560dSAlex Tomas } 2312c9de560dSAlex Tomas 2313c9de560dSAlex Tomas /* 2314c9de560dSAlex Tomas * The routine scans the group and measures all found extents. 2315c9de560dSAlex Tomas * In order to optimize scanning, caller must pass number of 2316c9de560dSAlex Tomas * free blocks in the group, so the routine can know upper limit. 2317c9de560dSAlex Tomas */ 2318089ceeccSEric Sandeen static noinline_for_stack 2319089ceeccSEric Sandeen void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2320c9de560dSAlex Tomas struct ext4_buddy *e4b) 2321c9de560dSAlex Tomas { 2322c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2323c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2324c9de560dSAlex Tomas struct ext4_free_extent ex; 23251b420011SOjaswin Mujoo int i, j, freelen; 2326c9de560dSAlex Tomas int free; 2327c9de560dSAlex Tomas 2328c9de560dSAlex Tomas free = e4b->bd_info->bb_free; 2329907ea529STheodore Ts'o if (WARN_ON(free <= 0)) 2330907ea529STheodore Ts'o return; 2331c9de560dSAlex Tomas 2332c9de560dSAlex Tomas i = e4b->bd_info->bb_first_free; 2333c9de560dSAlex Tomas 2334c9de560dSAlex Tomas while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2335ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, 23367137d7a4STheodore Ts'o EXT4_CLUSTERS_PER_GROUP(sb), i); 23377137d7a4STheodore Ts'o if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 233826346ff6SAneesh Kumar K.V /* 2339e56eb659SAneesh Kumar K.V * IF we have corrupt bitmap, we won't find any 234026346ff6SAneesh Kumar K.V * free blocks even though group info says we 2341b483bb77SRandy Dunlap * have free blocks 234226346ff6SAneesh Kumar K.V */ 2343e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 234453accfa9STheodore Ts'o "%d free clusters as per " 2345fde4d95aSTheodore Ts'o "group info. But bitmap says 0", 234626346ff6SAneesh Kumar K.V free); 2347736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2348736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2349c9de560dSAlex Tomas break; 2350c9de560dSAlex Tomas } 2351c9de560dSAlex Tomas 23521b420011SOjaswin Mujoo if (ac->ac_criteria < CR2) { 23531b420011SOjaswin Mujoo /* 23541b420011SOjaswin Mujoo * In CR1, we are sure that this group will 23551b420011SOjaswin Mujoo * have a large enough continuous free extent, so skip 23561b420011SOjaswin Mujoo * over the smaller free extents 23571b420011SOjaswin Mujoo */ 23581b420011SOjaswin Mujoo j = mb_find_next_bit(bitmap, 23591b420011SOjaswin Mujoo EXT4_CLUSTERS_PER_GROUP(sb), i); 23601b420011SOjaswin Mujoo freelen = j - i; 23611b420011SOjaswin Mujoo 23621b420011SOjaswin Mujoo if (freelen < ac->ac_g_ex.fe_len) { 23631b420011SOjaswin Mujoo i = j; 23641b420011SOjaswin Mujoo free -= freelen; 23651b420011SOjaswin Mujoo continue; 23661b420011SOjaswin Mujoo } 23671b420011SOjaswin Mujoo } 23681b420011SOjaswin Mujoo 236915c006a2SRobin Dong mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2370907ea529STheodore Ts'o if (WARN_ON(ex.fe_len <= 0)) 2371907ea529STheodore Ts'o break; 237226346ff6SAneesh Kumar K.V if (free < ex.fe_len) { 2373e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 237453accfa9STheodore Ts'o "%d free clusters as per " 2375fde4d95aSTheodore Ts'o "group info. But got %d blocks", 237626346ff6SAneesh Kumar K.V free, ex.fe_len); 2377736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2378736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2379e56eb659SAneesh Kumar K.V /* 2380e56eb659SAneesh Kumar K.V * The number of free blocks differs. This mostly 2381e56eb659SAneesh Kumar K.V * indicate that the bitmap is corrupt. So exit 2382e56eb659SAneesh Kumar K.V * without claiming the space. 2383e56eb659SAneesh Kumar K.V */ 2384e56eb659SAneesh Kumar K.V break; 238526346ff6SAneesh Kumar K.V } 2386ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADC0DE; /* debug value */ 2387c9de560dSAlex Tomas ext4_mb_measure_extent(ac, &ex, e4b); 2388c9de560dSAlex Tomas 2389c9de560dSAlex Tomas i += ex.fe_len; 2390c9de560dSAlex Tomas free -= ex.fe_len; 2391c9de560dSAlex Tomas } 2392c9de560dSAlex Tomas 2393c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 1); 2394c9de560dSAlex Tomas } 2395c9de560dSAlex Tomas 2396c9de560dSAlex Tomas /* 2397c9de560dSAlex Tomas * This is a special case for storages like raid5 2398506bf2d8SEric Sandeen * we try to find stripe-aligned chunks for stripe-size-multiple requests 2399c9de560dSAlex Tomas */ 2400089ceeccSEric Sandeen static noinline_for_stack 2401089ceeccSEric Sandeen void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2402c9de560dSAlex Tomas struct ext4_buddy *e4b) 2403c9de560dSAlex Tomas { 2404c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2405c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2406c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2407c9de560dSAlex Tomas struct ext4_free_extent ex; 2408c9de560dSAlex Tomas ext4_fsblk_t first_group_block; 2409c9de560dSAlex Tomas ext4_fsblk_t a; 2410c3defd99SKemeng Shi ext4_grpblk_t i, stripe; 2411c9de560dSAlex Tomas int max; 2412c9de560dSAlex Tomas 2413c9de560dSAlex Tomas BUG_ON(sbi->s_stripe == 0); 2414c9de560dSAlex Tomas 2415c9de560dSAlex Tomas /* find first stripe-aligned block in group */ 24165661bd68SAkinobu Mita first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 24175661bd68SAkinobu Mita 2418c9de560dSAlex Tomas a = first_group_block + sbi->s_stripe - 1; 2419c9de560dSAlex Tomas do_div(a, sbi->s_stripe); 2420c9de560dSAlex Tomas i = (a * sbi->s_stripe) - first_group_block; 2421c9de560dSAlex Tomas 2422c3defd99SKemeng Shi stripe = EXT4_B2C(sbi, sbi->s_stripe); 2423c3defd99SKemeng Shi i = EXT4_B2C(sbi, i); 24247137d7a4STheodore Ts'o while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2425c9de560dSAlex Tomas if (!mb_test_bit(i, bitmap)) { 2426c3defd99SKemeng Shi max = mb_find_extent(e4b, i, stripe, &ex); 2427c3defd99SKemeng Shi if (max >= stripe) { 2428c9de560dSAlex Tomas ac->ac_found++; 2429fdd9a009SOjaswin Mujoo ac->ac_cX_found[ac->ac_criteria]++; 2430ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADF00D; /* debug value */ 2431c9de560dSAlex Tomas ac->ac_b_ex = ex; 2432c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2433c9de560dSAlex Tomas break; 2434c9de560dSAlex Tomas } 2435c9de560dSAlex Tomas } 2436c3defd99SKemeng Shi i += stripe; 2437c9de560dSAlex Tomas } 2438c9de560dSAlex Tomas } 2439c9de560dSAlex Tomas 244042ac1848SLukas Czerner /* 24418ef123feSRitesh Harjani * This is also called BEFORE we load the buddy bitmap. 244242ac1848SLukas Czerner * Returns either 1 or 0 indicating that the group is either suitable 24438ef123feSRitesh Harjani * for the allocation or not. 244442ac1848SLukas Czerner */ 24458ef123feSRitesh Harjani static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 24464eb7a4a1SOjaswin Mujoo ext4_group_t group, enum criteria cr) 2447c9de560dSAlex Tomas { 24488ef123feSRitesh Harjani ext4_grpblk_t free, fragments; 2449a4912123STheodore Ts'o int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2450c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2451c9de560dSAlex Tomas 24524eb7a4a1SOjaswin Mujoo BUG_ON(cr < CR0 || cr >= EXT4_MB_NUM_CRS); 24538a57d9d6SCurt Wohlgemuth 24545354b2afSTheodore Ts'o if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) 24558ef123feSRitesh Harjani return false; 245601fc48e8STheodore Ts'o 2457dddcd2f9Sbrookxu free = grp->bb_free; 2458dddcd2f9Sbrookxu if (free == 0) 24598ef123feSRitesh Harjani return false; 2460c9de560dSAlex Tomas 2461c9de560dSAlex Tomas fragments = grp->bb_fragments; 2462c9de560dSAlex Tomas if (fragments == 0) 24638ef123feSRitesh Harjani return false; 2464c9de560dSAlex Tomas 2465c9de560dSAlex Tomas switch (cr) { 24664eb7a4a1SOjaswin Mujoo case CR0: 2467c9de560dSAlex Tomas BUG_ON(ac->ac_2order == 0); 2468c9de560dSAlex Tomas 2469a4912123STheodore Ts'o /* Avoid using the first bg of a flexgroup for data files */ 2470a4912123STheodore Ts'o if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2471a4912123STheodore Ts'o (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2472a4912123STheodore Ts'o ((group % flex_size) == 0)) 24738ef123feSRitesh Harjani return false; 2474a4912123STheodore Ts'o 2475dddcd2f9Sbrookxu if (free < ac->ac_g_ex.fe_len) 2476dddcd2f9Sbrookxu return false; 2477dddcd2f9Sbrookxu 24784b68f6dfSHarshad Shirwadkar if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 24798ef123feSRitesh Harjani return true; 248040ae3487STheodore Ts'o 248140ae3487STheodore Ts'o if (grp->bb_largest_free_order < ac->ac_2order) 24828ef123feSRitesh Harjani return false; 248340ae3487STheodore Ts'o 24848ef123feSRitesh Harjani return true; 24854eb7a4a1SOjaswin Mujoo case CR1: 2486c9de560dSAlex Tomas if ((free / fragments) >= ac->ac_g_ex.fe_len) 24878ef123feSRitesh Harjani return true; 2488c9de560dSAlex Tomas break; 24894eb7a4a1SOjaswin Mujoo case CR2: 2490c9de560dSAlex Tomas if (free >= ac->ac_g_ex.fe_len) 24918ef123feSRitesh Harjani return true; 2492c9de560dSAlex Tomas break; 24934eb7a4a1SOjaswin Mujoo case CR3: 24948ef123feSRitesh Harjani return true; 2495c9de560dSAlex Tomas default: 2496c9de560dSAlex Tomas BUG(); 2497c9de560dSAlex Tomas } 2498c9de560dSAlex Tomas 24998ef123feSRitesh Harjani return false; 25008ef123feSRitesh Harjani } 25018ef123feSRitesh Harjani 25028ef123feSRitesh Harjani /* 25038ef123feSRitesh Harjani * This could return negative error code if something goes wrong 25048ef123feSRitesh Harjani * during ext4_mb_init_group(). This should not be called with 25058ef123feSRitesh Harjani * ext4_lock_group() held. 2506a5fda113STheodore Ts'o * 2507a5fda113STheodore Ts'o * Note: because we are conditionally operating with the group lock in 2508a5fda113STheodore Ts'o * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2509a5fda113STheodore Ts'o * function using __acquire and __release. This means we need to be 2510a5fda113STheodore Ts'o * super careful before messing with the error path handling via "goto 2511a5fda113STheodore Ts'o * out"! 25128ef123feSRitesh Harjani */ 25138ef123feSRitesh Harjani static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 25144eb7a4a1SOjaswin Mujoo ext4_group_t group, enum criteria cr) 25158ef123feSRitesh Harjani { 25168ef123feSRitesh Harjani struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 251799377830SRitesh Harjani struct super_block *sb = ac->ac_sb; 2518c1d2c7d4SAlex Zhuravlev struct ext4_sb_info *sbi = EXT4_SB(sb); 251999377830SRitesh Harjani bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 25208ef123feSRitesh Harjani ext4_grpblk_t free; 25218ef123feSRitesh Harjani int ret = 0; 25228ef123feSRitesh Harjani 25235354b2afSTheodore Ts'o if (!grp) 25245354b2afSTheodore Ts'o return -EFSCORRUPTED; 2525a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats) 2526a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2527a5fda113STheodore Ts'o if (should_lock) { 252899377830SRitesh Harjani ext4_lock_group(sb, group); 2529a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2530a5fda113STheodore Ts'o } 25318ef123feSRitesh Harjani free = grp->bb_free; 25328ef123feSRitesh Harjani if (free == 0) 25338ef123feSRitesh Harjani goto out; 25344eb7a4a1SOjaswin Mujoo if (cr <= CR2 && free < ac->ac_g_ex.fe_len) 25358ef123feSRitesh Harjani goto out; 25368ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 25378ef123feSRitesh Harjani goto out; 2538a5fda113STheodore Ts'o if (should_lock) { 2539a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 254099377830SRitesh Harjani ext4_unlock_group(sb, group); 2541a5fda113STheodore Ts'o } 25428ef123feSRitesh Harjani 25438ef123feSRitesh Harjani /* We only do this if the grp has never been initialized */ 25448ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2545c1d2c7d4SAlex Zhuravlev struct ext4_group_desc *gdp = 2546c1d2c7d4SAlex Zhuravlev ext4_get_group_desc(sb, group, NULL); 2547c1d2c7d4SAlex Zhuravlev int ret; 2548c1d2c7d4SAlex Zhuravlev 25494eb7a4a1SOjaswin Mujoo /* cr=CR0/CR1 is a very optimistic search to find large 2550c1d2c7d4SAlex Zhuravlev * good chunks almost for free. If buddy data is not 2551c1d2c7d4SAlex Zhuravlev * ready, then this optimization makes no sense. But 2552c1d2c7d4SAlex Zhuravlev * we never skip the first block group in a flex_bg, 2553c1d2c7d4SAlex Zhuravlev * since this gets used for metadata block allocation, 2554c1d2c7d4SAlex Zhuravlev * and we want to make sure we locate metadata blocks 2555c1d2c7d4SAlex Zhuravlev * in the first block group in the flex_bg if possible. 2556c1d2c7d4SAlex Zhuravlev */ 25574eb7a4a1SOjaswin Mujoo if (cr < CR2 && 2558c1d2c7d4SAlex Zhuravlev (!sbi->s_log_groups_per_flex || 2559c1d2c7d4SAlex Zhuravlev ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2560c1d2c7d4SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2561c1d2c7d4SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2562c1d2c7d4SAlex Zhuravlev return 0; 2563c1d2c7d4SAlex Zhuravlev ret = ext4_mb_init_group(sb, group, GFP_NOFS); 25648ef123feSRitesh Harjani if (ret) 25658ef123feSRitesh Harjani return ret; 25668ef123feSRitesh Harjani } 25678ef123feSRitesh Harjani 2568a5fda113STheodore Ts'o if (should_lock) { 256999377830SRitesh Harjani ext4_lock_group(sb, group); 2570a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2571a5fda113STheodore Ts'o } 25728ef123feSRitesh Harjani ret = ext4_mb_good_group(ac, group, cr); 25738ef123feSRitesh Harjani out: 2574a5fda113STheodore Ts'o if (should_lock) { 2575a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 257699377830SRitesh Harjani ext4_unlock_group(sb, group); 2577a5fda113STheodore Ts'o } 25788ef123feSRitesh Harjani return ret; 2579c9de560dSAlex Tomas } 2580c9de560dSAlex Tomas 2581cfd73237SAlex Zhuravlev /* 2582cfd73237SAlex Zhuravlev * Start prefetching @nr block bitmaps starting at @group. 2583cfd73237SAlex Zhuravlev * Return the next group which needs to be prefetched. 2584cfd73237SAlex Zhuravlev */ 25853d392b26STheodore Ts'o ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2586cfd73237SAlex Zhuravlev unsigned int nr, int *cnt) 2587cfd73237SAlex Zhuravlev { 2588cfd73237SAlex Zhuravlev ext4_group_t ngroups = ext4_get_groups_count(sb); 2589cfd73237SAlex Zhuravlev struct buffer_head *bh; 2590cfd73237SAlex Zhuravlev struct blk_plug plug; 2591cfd73237SAlex Zhuravlev 2592cfd73237SAlex Zhuravlev blk_start_plug(&plug); 2593cfd73237SAlex Zhuravlev while (nr-- > 0) { 2594cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2595cfd73237SAlex Zhuravlev NULL); 2596cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2597cfd73237SAlex Zhuravlev 2598cfd73237SAlex Zhuravlev /* 2599cfd73237SAlex Zhuravlev * Prefetch block groups with free blocks; but don't 2600cfd73237SAlex Zhuravlev * bother if it is marked uninitialized on disk, since 2601cfd73237SAlex Zhuravlev * it won't require I/O to read. Also only try to 2602cfd73237SAlex Zhuravlev * prefetch once, so we avoid getblk() call, which can 2603cfd73237SAlex Zhuravlev * be expensive. 2604cfd73237SAlex Zhuravlev */ 26055354b2afSTheodore Ts'o if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2606cfd73237SAlex Zhuravlev EXT4_MB_GRP_NEED_INIT(grp) && 26073c629604SOjaswin Mujoo ext4_free_group_clusters(sb, gdp) > 0 ) { 2608cfd73237SAlex Zhuravlev bh = ext4_read_block_bitmap_nowait(sb, group, true); 2609cfd73237SAlex Zhuravlev if (bh && !IS_ERR(bh)) { 2610cfd73237SAlex Zhuravlev if (!buffer_uptodate(bh) && cnt) 2611cfd73237SAlex Zhuravlev (*cnt)++; 2612cfd73237SAlex Zhuravlev brelse(bh); 2613cfd73237SAlex Zhuravlev } 2614cfd73237SAlex Zhuravlev } 2615cfd73237SAlex Zhuravlev if (++group >= ngroups) 2616cfd73237SAlex Zhuravlev group = 0; 2617cfd73237SAlex Zhuravlev } 2618cfd73237SAlex Zhuravlev blk_finish_plug(&plug); 2619cfd73237SAlex Zhuravlev return group; 2620cfd73237SAlex Zhuravlev } 2621cfd73237SAlex Zhuravlev 2622cfd73237SAlex Zhuravlev /* 2623cfd73237SAlex Zhuravlev * Prefetching reads the block bitmap into the buffer cache; but we 2624cfd73237SAlex Zhuravlev * need to make sure that the buddy bitmap in the page cache has been 2625cfd73237SAlex Zhuravlev * initialized. Note that ext4_mb_init_group() will block if the I/O 2626cfd73237SAlex Zhuravlev * is not yet completed, or indeed if it was not initiated by 2627cfd73237SAlex Zhuravlev * ext4_mb_prefetch did not start the I/O. 2628cfd73237SAlex Zhuravlev * 2629cfd73237SAlex Zhuravlev * TODO: We should actually kick off the buddy bitmap setup in a work 2630cfd73237SAlex Zhuravlev * queue when the buffer I/O is completed, so that we don't block 2631cfd73237SAlex Zhuravlev * waiting for the block allocation bitmap read to finish when 2632cfd73237SAlex Zhuravlev * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2633cfd73237SAlex Zhuravlev */ 26343d392b26STheodore Ts'o void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2635cfd73237SAlex Zhuravlev unsigned int nr) 2636cfd73237SAlex Zhuravlev { 263722fab984SKemeng Shi struct ext4_group_desc *gdp; 263822fab984SKemeng Shi struct ext4_group_info *grp; 2639cfd73237SAlex Zhuravlev 264022fab984SKemeng Shi while (nr-- > 0) { 2641cfd73237SAlex Zhuravlev if (!group) 2642cfd73237SAlex Zhuravlev group = ext4_get_groups_count(sb); 2643cfd73237SAlex Zhuravlev group--; 264422fab984SKemeng Shi gdp = ext4_get_group_desc(sb, group, NULL); 2645cfd73237SAlex Zhuravlev grp = ext4_get_group_info(sb, group); 2646cfd73237SAlex Zhuravlev 26475354b2afSTheodore Ts'o if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 26483c629604SOjaswin Mujoo ext4_free_group_clusters(sb, gdp) > 0) { 2649cfd73237SAlex Zhuravlev if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2650cfd73237SAlex Zhuravlev break; 2651cfd73237SAlex Zhuravlev } 2652cfd73237SAlex Zhuravlev } 2653cfd73237SAlex Zhuravlev } 2654cfd73237SAlex Zhuravlev 26554ddfef7bSEric Sandeen static noinline_for_stack int 26564ddfef7bSEric Sandeen ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2657c9de560dSAlex Tomas { 2658cfd73237SAlex Zhuravlev ext4_group_t prefetch_grp = 0, ngroups, group, i; 26594eb7a4a1SOjaswin Mujoo enum criteria cr, new_cr; 266042ac1848SLukas Czerner int err = 0, first_err = 0; 2661cfd73237SAlex Zhuravlev unsigned int nr = 0, prefetch_ios = 0; 2662c9de560dSAlex Tomas struct ext4_sb_info *sbi; 2663c9de560dSAlex Tomas struct super_block *sb; 2664c9de560dSAlex Tomas struct ext4_buddy e4b; 266566d5e027Sbrookxu int lost; 2666c9de560dSAlex Tomas 2667c9de560dSAlex Tomas sb = ac->ac_sb; 2668c9de560dSAlex Tomas sbi = EXT4_SB(sb); 26698df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 2670fb0a387dSEric Sandeen /* non-extent files are limited to low blocks/groups */ 267112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2672fb0a387dSEric Sandeen ngroups = sbi->s_blockfile_groups; 2673fb0a387dSEric Sandeen 2674c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2675c9de560dSAlex Tomas 2676c9de560dSAlex Tomas /* first, try the goal */ 2677c9de560dSAlex Tomas err = ext4_mb_find_by_goal(ac, &e4b); 2678c9de560dSAlex Tomas if (err || ac->ac_status == AC_STATUS_FOUND) 2679c9de560dSAlex Tomas goto out; 2680c9de560dSAlex Tomas 2681c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2682c9de560dSAlex Tomas goto out; 2683c9de560dSAlex Tomas 2684c9de560dSAlex Tomas /* 2685e9a3cd48Sbrookxu * ac->ac_2order is set only if the fe_len is a power of 2 2686e9a3cd48Sbrookxu * if ac->ac_2order is set we also set criteria to 0 so that we 2687c9de560dSAlex Tomas * try exact allocation using buddy. 2688c9de560dSAlex Tomas */ 2689c9de560dSAlex Tomas i = fls(ac->ac_g_ex.fe_len); 2690c9de560dSAlex Tomas ac->ac_2order = 0; 2691c9de560dSAlex Tomas /* 2692c9de560dSAlex Tomas * We search using buddy data only if the order of the request 2693c9de560dSAlex Tomas * is greater than equal to the sbi_s_mb_order2_reqs 2694b713a5ecSTheodore Ts'o * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2695d9b22cf9SJan Kara * We also support searching for power-of-two requests only for 2696d9b22cf9SJan Kara * requests upto maximum buddy size we have constructed. 2697c9de560dSAlex Tomas */ 26984b68f6dfSHarshad Shirwadkar if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2699c9de560dSAlex Tomas /* 2700c9de560dSAlex Tomas * This should tell if fe_len is exactly power of 2 2701c9de560dSAlex Tomas */ 2702c9de560dSAlex Tomas if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 27031a5d5e5dSJeremy Cline ac->ac_2order = array_index_nospec(i - 1, 27044b68f6dfSHarshad Shirwadkar MB_NUM_ORDERS(sb)); 2705c9de560dSAlex Tomas } 2706c9de560dSAlex Tomas 27074ba74d00STheodore Ts'o /* if stream allocation is enabled, use global goal */ 27084ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2709c9de560dSAlex Tomas /* TBD: may be hot point */ 2710c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2711c9de560dSAlex Tomas ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2712c9de560dSAlex Tomas ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2713c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2714c9de560dSAlex Tomas } 27154ba74d00STheodore Ts'o 2716c9de560dSAlex Tomas /* Let's just scan groups to find more-less suitable blocks */ 27174eb7a4a1SOjaswin Mujoo cr = ac->ac_2order ? CR0 : CR1; 2718c9de560dSAlex Tomas /* 27194eb7a4a1SOjaswin Mujoo * cr == CR0 try to get exact allocation, 27204eb7a4a1SOjaswin Mujoo * cr == CR3 try to get anything 2721c9de560dSAlex Tomas */ 2722c9de560dSAlex Tomas repeat: 27234eb7a4a1SOjaswin Mujoo for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2724c9de560dSAlex Tomas ac->ac_criteria = cr; 2725ed8f9c75SAneesh Kumar K.V /* 2726ed8f9c75SAneesh Kumar K.V * searching for the right group start 2727ed8f9c75SAneesh Kumar K.V * from the goal value specified 2728ed8f9c75SAneesh Kumar K.V */ 2729ed8f9c75SAneesh Kumar K.V group = ac->ac_g_ex.fe_group; 2730196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2731cfd73237SAlex Zhuravlev prefetch_grp = group; 2732ed8f9c75SAneesh Kumar K.V 27334fca50d4SJan Kara for (i = 0, new_cr = cr; i < ngroups; i++, 27344fca50d4SJan Kara ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 27354fca50d4SJan Kara int ret = 0; 2736196e402aSHarshad Shirwadkar 27372ed5724dSTheodore Ts'o cond_resched(); 2738196e402aSHarshad Shirwadkar if (new_cr != cr) { 2739196e402aSHarshad Shirwadkar cr = new_cr; 2740196e402aSHarshad Shirwadkar goto repeat; 2741196e402aSHarshad Shirwadkar } 2742c9de560dSAlex Tomas 2743cfd73237SAlex Zhuravlev /* 2744cfd73237SAlex Zhuravlev * Batch reads of the block allocation bitmaps 2745cfd73237SAlex Zhuravlev * to get multiple READs in flight; limit 2746cfd73237SAlex Zhuravlev * prefetching at cr=0/1, otherwise mballoc can 2747cfd73237SAlex Zhuravlev * spend a lot of time loading imperfect groups 2748cfd73237SAlex Zhuravlev */ 2749cfd73237SAlex Zhuravlev if ((prefetch_grp == group) && 27504eb7a4a1SOjaswin Mujoo (cr > CR1 || 2751cfd73237SAlex Zhuravlev prefetch_ios < sbi->s_mb_prefetch_limit)) { 2752cfd73237SAlex Zhuravlev nr = sbi->s_mb_prefetch; 2753cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 275482ef1370SChunguang Xu nr = 1 << sbi->s_log_groups_per_flex; 275582ef1370SChunguang Xu nr -= group & (nr - 1); 275682ef1370SChunguang Xu nr = min(nr, sbi->s_mb_prefetch); 2757cfd73237SAlex Zhuravlev } 2758cfd73237SAlex Zhuravlev prefetch_grp = ext4_mb_prefetch(sb, group, 2759cfd73237SAlex Zhuravlev nr, &prefetch_ios); 2760cfd73237SAlex Zhuravlev } 2761cfd73237SAlex Zhuravlev 27628a57d9d6SCurt Wohlgemuth /* This now checks without needing the buddy page */ 27638ef123feSRitesh Harjani ret = ext4_mb_good_group_nolock(ac, group, cr); 276442ac1848SLukas Czerner if (ret <= 0) { 276542ac1848SLukas Czerner if (!first_err) 276642ac1848SLukas Czerner first_err = ret; 2767c9de560dSAlex Tomas continue; 276842ac1848SLukas Czerner } 2769c9de560dSAlex Tomas 2770c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2771c9de560dSAlex Tomas if (err) 2772c9de560dSAlex Tomas goto out; 2773c9de560dSAlex Tomas 2774c9de560dSAlex Tomas ext4_lock_group(sb, group); 27758a57d9d6SCurt Wohlgemuth 27768a57d9d6SCurt Wohlgemuth /* 27778a57d9d6SCurt Wohlgemuth * We need to check again after locking the 27788a57d9d6SCurt Wohlgemuth * block group 27798a57d9d6SCurt Wohlgemuth */ 278042ac1848SLukas Czerner ret = ext4_mb_good_group(ac, group, cr); 27818ef123feSRitesh Harjani if (ret == 0) { 2782c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2783e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2784c9de560dSAlex Tomas continue; 2785c9de560dSAlex Tomas } 2786c9de560dSAlex Tomas 2787c9de560dSAlex Tomas ac->ac_groups_scanned++; 27884eb7a4a1SOjaswin Mujoo if (cr == CR0) 2789c9de560dSAlex Tomas ext4_mb_simple_scan_group(ac, &e4b); 27904eb7a4a1SOjaswin Mujoo else if (cr == CR1 && sbi->s_stripe && 2791c3defd99SKemeng Shi !(ac->ac_g_ex.fe_len % 2792c3defd99SKemeng Shi EXT4_B2C(sbi, sbi->s_stripe))) 2793c9de560dSAlex Tomas ext4_mb_scan_aligned(ac, &e4b); 2794c9de560dSAlex Tomas else 2795c9de560dSAlex Tomas ext4_mb_complex_scan_group(ac, &e4b); 2796c9de560dSAlex Tomas 2797c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2798e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2799c9de560dSAlex Tomas 2800c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_CONTINUE) 2801c9de560dSAlex Tomas break; 2802c9de560dSAlex Tomas } 2803a6c75eafSHarshad Shirwadkar /* Processed all groups and haven't found blocks */ 2804a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && i == ngroups) 2805a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2806c9de560dSAlex Tomas } 2807c9de560dSAlex Tomas 2808c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2809c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2810c9de560dSAlex Tomas /* 2811c9de560dSAlex Tomas * We've been searching too long. Let's try to allocate 2812c9de560dSAlex Tomas * the best chunk we've found so far 2813c9de560dSAlex Tomas */ 2814c9de560dSAlex Tomas ext4_mb_try_best_found(ac, &e4b); 2815c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_FOUND) { 2816c9de560dSAlex Tomas /* 2817c9de560dSAlex Tomas * Someone more lucky has already allocated it. 2818c9de560dSAlex Tomas * The only thing we can do is just take first 2819c9de560dSAlex Tomas * found block(s) 2820c9de560dSAlex Tomas */ 282166d5e027Sbrookxu lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 282266d5e027Sbrookxu mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2823c55ee7d2Sbrookxu ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2824c55ee7d2Sbrookxu ac->ac_b_ex.fe_len, lost); 2825c55ee7d2Sbrookxu 2826c9de560dSAlex Tomas ac->ac_b_ex.fe_group = 0; 2827c9de560dSAlex Tomas ac->ac_b_ex.fe_start = 0; 2828c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 0; 2829c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 2830c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_FIRST; 28314eb7a4a1SOjaswin Mujoo cr = CR3; 2832c9de560dSAlex Tomas goto repeat; 2833c9de560dSAlex Tomas } 2834c9de560dSAlex Tomas } 2835a6c75eafSHarshad Shirwadkar 2836a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2837a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2838c9de560dSAlex Tomas out: 283942ac1848SLukas Czerner if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 284042ac1848SLukas Czerner err = first_err; 2841bbc4ec77SRitesh Harjani 2842d3df1453SRitesh Harjani mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2843bbc4ec77SRitesh Harjani ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2844bbc4ec77SRitesh Harjani ac->ac_flags, cr, err); 2845cfd73237SAlex Zhuravlev 2846cfd73237SAlex Zhuravlev if (nr) 2847cfd73237SAlex Zhuravlev ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2848cfd73237SAlex Zhuravlev 2849c9de560dSAlex Tomas return err; 2850c9de560dSAlex Tomas } 2851c9de560dSAlex Tomas 2852c9de560dSAlex Tomas static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2853c9de560dSAlex Tomas { 2854359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2855c9de560dSAlex Tomas ext4_group_t group; 2856c9de560dSAlex Tomas 28578df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2858c9de560dSAlex Tomas return NULL; 2859c9de560dSAlex Tomas group = *pos + 1; 2860a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2861c9de560dSAlex Tomas } 2862c9de560dSAlex Tomas 2863c9de560dSAlex Tomas static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2864c9de560dSAlex Tomas { 2865359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2866c9de560dSAlex Tomas ext4_group_t group; 2867c9de560dSAlex Tomas 2868c9de560dSAlex Tomas ++*pos; 28698df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2870c9de560dSAlex Tomas return NULL; 2871c9de560dSAlex Tomas group = *pos + 1; 2872a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2873c9de560dSAlex Tomas } 2874c9de560dSAlex Tomas 2875c9de560dSAlex Tomas static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2876c9de560dSAlex Tomas { 2877359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2878a9df9a49STheodore Ts'o ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2879c9de560dSAlex Tomas int i; 28801c8457caSAditya Kali int err, buddy_loaded = 0; 2881c9de560dSAlex Tomas struct ext4_buddy e4b; 28821c8457caSAditya Kali struct ext4_group_info *grinfo; 28832df2c340SArnd Bergmann unsigned char blocksize_bits = min_t(unsigned char, 28842df2c340SArnd Bergmann sb->s_blocksize_bits, 28852df2c340SArnd Bergmann EXT4_MAX_BLOCK_LOG_SIZE); 2886c9de560dSAlex Tomas struct sg { 2887c9de560dSAlex Tomas struct ext4_group_info info; 2888b80b32b6STheodore Ts'o ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2889c9de560dSAlex Tomas } sg; 2890c9de560dSAlex Tomas 2891c9de560dSAlex Tomas group--; 2892c9de560dSAlex Tomas if (group == 0) 289397b4af2fSRasmus Villemoes seq_puts(seq, "#group: free frags first [" 289497b4af2fSRasmus Villemoes " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2895802cf1f9SHuaitong Han " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2896c9de560dSAlex Tomas 2897b80b32b6STheodore Ts'o i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2898b80b32b6STheodore Ts'o sizeof(struct ext4_group_info); 2899b80b32b6STheodore Ts'o 29001c8457caSAditya Kali grinfo = ext4_get_group_info(sb, group); 29015354b2afSTheodore Ts'o if (!grinfo) 29025354b2afSTheodore Ts'o return 0; 29031c8457caSAditya Kali /* Load the group info in memory only if not already loaded. */ 29041c8457caSAditya Kali if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2905c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2906c9de560dSAlex Tomas if (err) { 2907a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: I/O error\n", group); 2908c9de560dSAlex Tomas return 0; 2909c9de560dSAlex Tomas } 29101c8457caSAditya Kali buddy_loaded = 1; 29111c8457caSAditya Kali } 29121c8457caSAditya Kali 29135354b2afSTheodore Ts'o memcpy(&sg, grinfo, i); 29141c8457caSAditya Kali 29151c8457caSAditya Kali if (buddy_loaded) 2916e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2917c9de560dSAlex Tomas 2918a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2919c9de560dSAlex Tomas sg.info.bb_fragments, sg.info.bb_first_free); 2920c9de560dSAlex Tomas for (i = 0; i <= 13; i++) 29212df2c340SArnd Bergmann seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2922c9de560dSAlex Tomas sg.info.bb_counters[i] : 0); 2923e0d438c7SXu Wang seq_puts(seq, " ]\n"); 2924c9de560dSAlex Tomas 2925c9de560dSAlex Tomas return 0; 2926c9de560dSAlex Tomas } 2927c9de560dSAlex Tomas 2928c9de560dSAlex Tomas static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2929c9de560dSAlex Tomas { 2930c9de560dSAlex Tomas } 2931c9de560dSAlex Tomas 2932247dbed8SChristoph Hellwig const struct seq_operations ext4_mb_seq_groups_ops = { 2933c9de560dSAlex Tomas .start = ext4_mb_seq_groups_start, 2934c9de560dSAlex Tomas .next = ext4_mb_seq_groups_next, 2935c9de560dSAlex Tomas .stop = ext4_mb_seq_groups_stop, 2936c9de560dSAlex Tomas .show = ext4_mb_seq_groups_show, 2937c9de560dSAlex Tomas }; 2938c9de560dSAlex Tomas 2939a6c75eafSHarshad Shirwadkar int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2940a6c75eafSHarshad Shirwadkar { 2941c30365b9SYu Zhe struct super_block *sb = seq->private; 2942a6c75eafSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2943a6c75eafSHarshad Shirwadkar 2944a6c75eafSHarshad Shirwadkar seq_puts(seq, "mballoc:\n"); 2945a6c75eafSHarshad Shirwadkar if (!sbi->s_mb_stats) { 2946a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tmb stats collection turned off.\n"); 2947a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2948a6c75eafSHarshad Shirwadkar return 0; 2949a6c75eafSHarshad Shirwadkar } 2950a6c75eafSHarshad Shirwadkar seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2951a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2952a6c75eafSHarshad Shirwadkar 2953a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2954a6c75eafSHarshad Shirwadkar 2955a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr0_stats:\n"); 29564eb7a4a1SOjaswin Mujoo seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR0])); 2957a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 29584eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_groups_considered[CR0])); 2959fdd9a009SOjaswin Mujoo seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR0])); 2960a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 29614eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_failed[CR0])); 2962196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2963196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2964a6c75eafSHarshad Shirwadkar 2965a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr1_stats:\n"); 29664eb7a4a1SOjaswin Mujoo seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR1])); 2967a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 29684eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_groups_considered[CR1])); 2969fdd9a009SOjaswin Mujoo seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR1])); 2970a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 29714eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_failed[CR1])); 2972196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2973196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2974a6c75eafSHarshad Shirwadkar 2975a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr2_stats:\n"); 29764eb7a4a1SOjaswin Mujoo seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR2])); 2977a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 29784eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_groups_considered[CR2])); 2979fdd9a009SOjaswin Mujoo seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR2])); 2980a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 29814eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_failed[CR2])); 2982a6c75eafSHarshad Shirwadkar 2983a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr3_stats:\n"); 29844eb7a4a1SOjaswin Mujoo seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR3])); 2985a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 29864eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_groups_considered[CR3])); 2987fdd9a009SOjaswin Mujoo seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR3])); 2988a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 29894eb7a4a1SOjaswin Mujoo atomic64_read(&sbi->s_bal_cX_failed[CR3])); 2990a6c75eafSHarshad Shirwadkar seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2991a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 29923ef5d263SOjaswin Mujoo seq_printf(seq, "\t\tlen_goal_hits: %u\n", atomic_read(&sbi->s_bal_len_goals)); 2993a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2994a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2995a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2996a6c75eafSHarshad Shirwadkar 2997a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2998a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 2999a6c75eafSHarshad Shirwadkar ext4_get_groups_count(sb)); 3000a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_time_used: %llu\n", 3001a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 3002a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tpreallocated: %u\n", 3003a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_preallocated)); 3004a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tdiscarded: %u\n", 3005a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_discarded)); 3006a6c75eafSHarshad Shirwadkar return 0; 3007a6c75eafSHarshad Shirwadkar } 3008a6c75eafSHarshad Shirwadkar 3009f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3010a5fda113STheodore Ts'o __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 3011f68f4063SHarshad Shirwadkar { 3012359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3013f68f4063SHarshad Shirwadkar unsigned long position; 3014f68f4063SHarshad Shirwadkar 301583e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3016f68f4063SHarshad Shirwadkar return NULL; 3017f68f4063SHarshad Shirwadkar position = *pos + 1; 3018f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 3019f68f4063SHarshad Shirwadkar } 3020f68f4063SHarshad Shirwadkar 3021f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3022f68f4063SHarshad Shirwadkar { 3023359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3024f68f4063SHarshad Shirwadkar unsigned long position; 3025f68f4063SHarshad Shirwadkar 3026f68f4063SHarshad Shirwadkar ++*pos; 302783e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3028f68f4063SHarshad Shirwadkar return NULL; 3029f68f4063SHarshad Shirwadkar position = *pos + 1; 3030f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 3031f68f4063SHarshad Shirwadkar } 3032f68f4063SHarshad Shirwadkar 3033f68f4063SHarshad Shirwadkar static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3034f68f4063SHarshad Shirwadkar { 3035359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3036f68f4063SHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 3037f68f4063SHarshad Shirwadkar unsigned long position = ((unsigned long) v); 3038f68f4063SHarshad Shirwadkar struct ext4_group_info *grp; 303983e80a6eSJan Kara unsigned int count; 3040f68f4063SHarshad Shirwadkar 3041f68f4063SHarshad Shirwadkar position--; 3042f68f4063SHarshad Shirwadkar if (position >= MB_NUM_ORDERS(sb)) { 304383e80a6eSJan Kara position -= MB_NUM_ORDERS(sb); 304483e80a6eSJan Kara if (position == 0) 304583e80a6eSJan Kara seq_puts(seq, "avg_fragment_size_lists:\n"); 3046f68f4063SHarshad Shirwadkar 304783e80a6eSJan Kara count = 0; 304883e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 304983e80a6eSJan Kara list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 305083e80a6eSJan Kara bb_avg_fragment_size_node) 305183e80a6eSJan Kara count++; 305283e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 305383e80a6eSJan Kara seq_printf(seq, "\tlist_order_%u_groups: %u\n", 305483e80a6eSJan Kara (unsigned int)position, count); 3055f68f4063SHarshad Shirwadkar return 0; 3056f68f4063SHarshad Shirwadkar } 3057f68f4063SHarshad Shirwadkar 3058f68f4063SHarshad Shirwadkar if (position == 0) { 3059f68f4063SHarshad Shirwadkar seq_printf(seq, "optimize_scan: %d\n", 3060f68f4063SHarshad Shirwadkar test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3061f68f4063SHarshad Shirwadkar seq_puts(seq, "max_free_order_lists:\n"); 3062f68f4063SHarshad Shirwadkar } 3063f68f4063SHarshad Shirwadkar count = 0; 306483e80a6eSJan Kara read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3065f68f4063SHarshad Shirwadkar list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3066f68f4063SHarshad Shirwadkar bb_largest_free_order_node) 3067f68f4063SHarshad Shirwadkar count++; 306883e80a6eSJan Kara read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3069f68f4063SHarshad Shirwadkar seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3070f68f4063SHarshad Shirwadkar (unsigned int)position, count); 3071f68f4063SHarshad Shirwadkar 3072f68f4063SHarshad Shirwadkar return 0; 3073f68f4063SHarshad Shirwadkar } 3074f68f4063SHarshad Shirwadkar 3075f68f4063SHarshad Shirwadkar static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3076f68f4063SHarshad Shirwadkar { 3077f68f4063SHarshad Shirwadkar } 3078f68f4063SHarshad Shirwadkar 3079f68f4063SHarshad Shirwadkar const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3080f68f4063SHarshad Shirwadkar .start = ext4_mb_seq_structs_summary_start, 3081f68f4063SHarshad Shirwadkar .next = ext4_mb_seq_structs_summary_next, 3082f68f4063SHarshad Shirwadkar .stop = ext4_mb_seq_structs_summary_stop, 3083f68f4063SHarshad Shirwadkar .show = ext4_mb_seq_structs_summary_show, 3084f68f4063SHarshad Shirwadkar }; 3085f68f4063SHarshad Shirwadkar 3086fb1813f4SCurt Wohlgemuth static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3087fb1813f4SCurt Wohlgemuth { 3088fb1813f4SCurt Wohlgemuth int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3089fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3090fb1813f4SCurt Wohlgemuth 3091fb1813f4SCurt Wohlgemuth BUG_ON(!cachep); 3092fb1813f4SCurt Wohlgemuth return cachep; 3093fb1813f4SCurt Wohlgemuth } 30945f21b0e6SFrederic Bohe 309528623c2fSTheodore Ts'o /* 309628623c2fSTheodore Ts'o * Allocate the top-level s_group_info array for the specified number 309728623c2fSTheodore Ts'o * of groups 309828623c2fSTheodore Ts'o */ 309928623c2fSTheodore Ts'o int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 310028623c2fSTheodore Ts'o { 310128623c2fSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 310228623c2fSTheodore Ts'o unsigned size; 3103df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 310428623c2fSTheodore Ts'o 310528623c2fSTheodore Ts'o size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 310628623c2fSTheodore Ts'o EXT4_DESC_PER_BLOCK_BITS(sb); 310728623c2fSTheodore Ts'o if (size <= sbi->s_group_info_size) 310828623c2fSTheodore Ts'o return 0; 310928623c2fSTheodore Ts'o 311028623c2fSTheodore Ts'o size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3111a7c3e901SMichal Hocko new_groupinfo = kvzalloc(size, GFP_KERNEL); 311228623c2fSTheodore Ts'o if (!new_groupinfo) { 311328623c2fSTheodore Ts'o ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 311428623c2fSTheodore Ts'o return -ENOMEM; 311528623c2fSTheodore Ts'o } 3116df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3117df3da4eaSSuraj Jitindar Singh old_groupinfo = rcu_dereference(sbi->s_group_info); 3118df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3119df3da4eaSSuraj Jitindar Singh memcpy(new_groupinfo, old_groupinfo, 312028623c2fSTheodore Ts'o sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3121df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3122df3da4eaSSuraj Jitindar Singh rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 312328623c2fSTheodore Ts'o sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3124df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3125df3da4eaSSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groupinfo); 312628623c2fSTheodore Ts'o ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 312728623c2fSTheodore Ts'o sbi->s_group_info_size); 312828623c2fSTheodore Ts'o return 0; 312928623c2fSTheodore Ts'o } 313028623c2fSTheodore Ts'o 31315f21b0e6SFrederic Bohe /* Create and initialize ext4_group_info data for the given group. */ 3132920313a7SAneesh Kumar K.V int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 31335f21b0e6SFrederic Bohe struct ext4_group_desc *desc) 31345f21b0e6SFrederic Bohe { 3135fb1813f4SCurt Wohlgemuth int i; 31365f21b0e6SFrederic Bohe int metalen = 0; 3137df3da4eaSSuraj Jitindar Singh int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 31385f21b0e6SFrederic Bohe struct ext4_sb_info *sbi = EXT4_SB(sb); 31395f21b0e6SFrederic Bohe struct ext4_group_info **meta_group_info; 3140fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 31415f21b0e6SFrederic Bohe 31425f21b0e6SFrederic Bohe /* 31435f21b0e6SFrederic Bohe * First check if this group is the first of a reserved block. 31445f21b0e6SFrederic Bohe * If it's true, we have to allocate a new table of pointers 31455f21b0e6SFrederic Bohe * to ext4_group_info structures 31465f21b0e6SFrederic Bohe */ 31475f21b0e6SFrederic Bohe if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 31485f21b0e6SFrederic Bohe metalen = sizeof(*meta_group_info) << 31495f21b0e6SFrederic Bohe EXT4_DESC_PER_BLOCK_BITS(sb); 31504fdb5543SDmitry Monakhov meta_group_info = kmalloc(metalen, GFP_NOFS); 31515f21b0e6SFrederic Bohe if (meta_group_info == NULL) { 31527f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate mem " 31539d8b9ec4STheodore Ts'o "for a buddy group"); 3154df119095SKemeng Shi return -ENOMEM; 31555f21b0e6SFrederic Bohe } 3156df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3157df3da4eaSSuraj Jitindar Singh rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3158df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 31595f21b0e6SFrederic Bohe } 31605f21b0e6SFrederic Bohe 3161df3da4eaSSuraj Jitindar Singh meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 31625f21b0e6SFrederic Bohe i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 31635f21b0e6SFrederic Bohe 31644fdb5543SDmitry Monakhov meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 31655f21b0e6SFrederic Bohe if (meta_group_info[i] == NULL) { 31667f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 31675f21b0e6SFrederic Bohe goto exit_group_info; 31685f21b0e6SFrederic Bohe } 31695f21b0e6SFrederic Bohe set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 31705f21b0e6SFrederic Bohe &(meta_group_info[i]->bb_state)); 31715f21b0e6SFrederic Bohe 31725f21b0e6SFrederic Bohe /* 31735f21b0e6SFrederic Bohe * initialize bb_free to be able to skip 31745f21b0e6SFrederic Bohe * empty groups without initialization 31755f21b0e6SFrederic Bohe */ 31768844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 31778844618dSTheodore Ts'o (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 31785f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3179cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, group, desc); 31805f21b0e6SFrederic Bohe } else { 31815f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3182021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, desc); 31835f21b0e6SFrederic Bohe } 31845f21b0e6SFrederic Bohe 31855f21b0e6SFrederic Bohe INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3186920313a7SAneesh Kumar K.V init_rwsem(&meta_group_info[i]->alloc_sem); 318764e290ecSVenkatesh Pallipadi meta_group_info[i]->bb_free_root = RB_ROOT; 3188196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 318983e80a6eSJan Kara INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 31908a57d9d6SCurt Wohlgemuth meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 319183e80a6eSJan Kara meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3192196e402aSHarshad Shirwadkar meta_group_info[i]->bb_group = group; 31935f21b0e6SFrederic Bohe 3194a3450215SRitesh Harjani mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 31955f21b0e6SFrederic Bohe return 0; 31965f21b0e6SFrederic Bohe 31975f21b0e6SFrederic Bohe exit_group_info: 31985f21b0e6SFrederic Bohe /* If a meta_group_info table has been allocated, release it now */ 3199caaf7a29STao Ma if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3200df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3201df3da4eaSSuraj Jitindar Singh 3202df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3203df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3204df3da4eaSSuraj Jitindar Singh kfree(group_info[idx]); 3205df3da4eaSSuraj Jitindar Singh group_info[idx] = NULL; 3206df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3207caaf7a29STao Ma } 32085f21b0e6SFrederic Bohe return -ENOMEM; 32095f21b0e6SFrederic Bohe } /* ext4_mb_add_groupinfo */ 32105f21b0e6SFrederic Bohe 3211c9de560dSAlex Tomas static int ext4_mb_init_backend(struct super_block *sb) 3212c9de560dSAlex Tomas { 32138df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3214c9de560dSAlex Tomas ext4_group_t i; 3215c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 321628623c2fSTheodore Ts'o int err; 32175f21b0e6SFrederic Bohe struct ext4_group_desc *desc; 3218df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3219fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep; 3220c9de560dSAlex Tomas 322128623c2fSTheodore Ts'o err = ext4_mb_alloc_groupinfo(sb, ngroups); 322228623c2fSTheodore Ts'o if (err) 322328623c2fSTheodore Ts'o return err; 32245f21b0e6SFrederic Bohe 3225c9de560dSAlex Tomas sbi->s_buddy_cache = new_inode(sb); 3226c9de560dSAlex Tomas if (sbi->s_buddy_cache == NULL) { 32279d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't get new inode"); 3228c9de560dSAlex Tomas goto err_freesgi; 3229c9de560dSAlex Tomas } 323048e6061bSYu Jian /* To avoid potentially colliding with an valid on-disk inode number, 323148e6061bSYu Jian * use EXT4_BAD_INO for the buddy cache inode number. This inode is 323248e6061bSYu Jian * not in the inode hash, so it should never be found by iget(), but 323348e6061bSYu Jian * this will avoid confusion if it ever shows up during debugging. */ 323448e6061bSYu Jian sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3235c9de560dSAlex Tomas EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 32368df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 32374b99faa2SKhazhismel Kumykov cond_resched(); 3238c9de560dSAlex Tomas desc = ext4_get_group_desc(sb, i, NULL); 3239c9de560dSAlex Tomas if (desc == NULL) { 32409d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3241c9de560dSAlex Tomas goto err_freebuddy; 3242c9de560dSAlex Tomas } 32435f21b0e6SFrederic Bohe if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 32445f21b0e6SFrederic Bohe goto err_freebuddy; 3245c9de560dSAlex Tomas } 3246c9de560dSAlex Tomas 3247cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 3248f91436d5SSabyrzhan Tasbolatov /* a single flex group is supposed to be read by a single IO. 3249f91436d5SSabyrzhan Tasbolatov * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3250f91436d5SSabyrzhan Tasbolatov * unsigned integer, so the maximum shift is 32. 3251f91436d5SSabyrzhan Tasbolatov */ 3252f91436d5SSabyrzhan Tasbolatov if (sbi->s_es->s_log_groups_per_flex >= 32) { 3253f91436d5SSabyrzhan Tasbolatov ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3254a8867f4eSPhillip Potter goto err_freebuddy; 3255f91436d5SSabyrzhan Tasbolatov } 3256f91436d5SSabyrzhan Tasbolatov sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 325782ef1370SChunguang Xu BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3258cfd73237SAlex Zhuravlev sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3259cfd73237SAlex Zhuravlev } else { 3260cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = 32; 3261cfd73237SAlex Zhuravlev } 3262cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3263cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3264cfd73237SAlex Zhuravlev /* now many real IOs to prefetch within a single allocation at cr=0 3265cfd73237SAlex Zhuravlev * given cr=0 is an CPU-related optimization we shouldn't try to 3266cfd73237SAlex Zhuravlev * load too many groups, at some point we should start to use what 3267cfd73237SAlex Zhuravlev * we've got in memory. 3268cfd73237SAlex Zhuravlev * with an average random access time 5ms, it'd take a second to get 3269cfd73237SAlex Zhuravlev * 200 groups (* N with flex_bg), so let's make this limit 4 3270cfd73237SAlex Zhuravlev */ 3271cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3272cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3273cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3274cfd73237SAlex Zhuravlev 3275c9de560dSAlex Tomas return 0; 3276c9de560dSAlex Tomas 3277c9de560dSAlex Tomas err_freebuddy: 3278fb1813f4SCurt Wohlgemuth cachep = get_groupinfo_cache(sb->s_blocksize_bits); 32795354b2afSTheodore Ts'o while (i-- > 0) { 32805354b2afSTheodore Ts'o struct ext4_group_info *grp = ext4_get_group_info(sb, i); 32815354b2afSTheodore Ts'o 32825354b2afSTheodore Ts'o if (grp) 32835354b2afSTheodore Ts'o kmem_cache_free(cachep, grp); 32845354b2afSTheodore Ts'o } 328528623c2fSTheodore Ts'o i = sbi->s_group_info_size; 3286df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3287df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3288f1fa3342SRoel Kluin while (i-- > 0) 3289df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3290df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3291c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3292c9de560dSAlex Tomas err_freesgi: 3293df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3294df3da4eaSSuraj Jitindar Singh kvfree(rcu_dereference(sbi->s_group_info)); 3295df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3296c9de560dSAlex Tomas return -ENOMEM; 3297c9de560dSAlex Tomas } 3298c9de560dSAlex Tomas 32992892c15dSEric Sandeen static void ext4_groupinfo_destroy_slabs(void) 33002892c15dSEric Sandeen { 33012892c15dSEric Sandeen int i; 33022892c15dSEric Sandeen 33032892c15dSEric Sandeen for (i = 0; i < NR_GRPINFO_CACHES; i++) { 33042892c15dSEric Sandeen kmem_cache_destroy(ext4_groupinfo_caches[i]); 33052892c15dSEric Sandeen ext4_groupinfo_caches[i] = NULL; 33062892c15dSEric Sandeen } 33072892c15dSEric Sandeen } 33082892c15dSEric Sandeen 33092892c15dSEric Sandeen static int ext4_groupinfo_create_slab(size_t size) 33102892c15dSEric Sandeen { 33112892c15dSEric Sandeen static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 33122892c15dSEric Sandeen int slab_size; 33132892c15dSEric Sandeen int blocksize_bits = order_base_2(size); 33142892c15dSEric Sandeen int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 33152892c15dSEric Sandeen struct kmem_cache *cachep; 33162892c15dSEric Sandeen 33172892c15dSEric Sandeen if (cache_index >= NR_GRPINFO_CACHES) 33182892c15dSEric Sandeen return -EINVAL; 33192892c15dSEric Sandeen 33202892c15dSEric Sandeen if (unlikely(cache_index < 0)) 33212892c15dSEric Sandeen cache_index = 0; 33222892c15dSEric Sandeen 33232892c15dSEric Sandeen mutex_lock(&ext4_grpinfo_slab_create_mutex); 33242892c15dSEric Sandeen if (ext4_groupinfo_caches[cache_index]) { 33252892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 33262892c15dSEric Sandeen return 0; /* Already created */ 33272892c15dSEric Sandeen } 33282892c15dSEric Sandeen 33292892c15dSEric Sandeen slab_size = offsetof(struct ext4_group_info, 33302892c15dSEric Sandeen bb_counters[blocksize_bits + 2]); 33312892c15dSEric Sandeen 33322892c15dSEric Sandeen cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 33332892c15dSEric Sandeen slab_size, 0, SLAB_RECLAIM_ACCOUNT, 33342892c15dSEric Sandeen NULL); 33352892c15dSEric Sandeen 3336823ba01fSTao Ma ext4_groupinfo_caches[cache_index] = cachep; 3337823ba01fSTao Ma 33382892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 33392892c15dSEric Sandeen if (!cachep) { 33409d8b9ec4STheodore Ts'o printk(KERN_EMERG 33419d8b9ec4STheodore Ts'o "EXT4-fs: no memory for groupinfo slab cache\n"); 33422892c15dSEric Sandeen return -ENOMEM; 33432892c15dSEric Sandeen } 33442892c15dSEric Sandeen 33452892c15dSEric Sandeen return 0; 33462892c15dSEric Sandeen } 33472892c15dSEric Sandeen 334855cdd0afSWang Jianchao static void ext4_discard_work(struct work_struct *work) 334955cdd0afSWang Jianchao { 335055cdd0afSWang Jianchao struct ext4_sb_info *sbi = container_of(work, 335155cdd0afSWang Jianchao struct ext4_sb_info, s_discard_work); 335255cdd0afSWang Jianchao struct super_block *sb = sbi->s_sb; 335355cdd0afSWang Jianchao struct ext4_free_data *fd, *nfd; 335455cdd0afSWang Jianchao struct ext4_buddy e4b; 335555cdd0afSWang Jianchao struct list_head discard_list; 335655cdd0afSWang Jianchao ext4_group_t grp, load_grp; 335755cdd0afSWang Jianchao int err = 0; 335855cdd0afSWang Jianchao 335955cdd0afSWang Jianchao INIT_LIST_HEAD(&discard_list); 336055cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 336155cdd0afSWang Jianchao list_splice_init(&sbi->s_discard_list, &discard_list); 336255cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 336355cdd0afSWang Jianchao 336455cdd0afSWang Jianchao load_grp = UINT_MAX; 336555cdd0afSWang Jianchao list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 336655cdd0afSWang Jianchao /* 33675036ab8dSWang Jianchao * If filesystem is umounting or no memory or suffering 33685036ab8dSWang Jianchao * from no space, give up the discard 336955cdd0afSWang Jianchao */ 33705036ab8dSWang Jianchao if ((sb->s_flags & SB_ACTIVE) && !err && 33715036ab8dSWang Jianchao !atomic_read(&sbi->s_retry_alloc_pending)) { 337255cdd0afSWang Jianchao grp = fd->efd_group; 337355cdd0afSWang Jianchao if (grp != load_grp) { 337455cdd0afSWang Jianchao if (load_grp != UINT_MAX) 337555cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 337655cdd0afSWang Jianchao 337755cdd0afSWang Jianchao err = ext4_mb_load_buddy(sb, grp, &e4b); 337855cdd0afSWang Jianchao if (err) { 337955cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 338055cdd0afSWang Jianchao load_grp = UINT_MAX; 338155cdd0afSWang Jianchao continue; 338255cdd0afSWang Jianchao } else { 338355cdd0afSWang Jianchao load_grp = grp; 338455cdd0afSWang Jianchao } 338555cdd0afSWang Jianchao } 338655cdd0afSWang Jianchao 338755cdd0afSWang Jianchao ext4_lock_group(sb, grp); 338855cdd0afSWang Jianchao ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 338955cdd0afSWang Jianchao fd->efd_start_cluster + fd->efd_count - 1, 1); 339055cdd0afSWang Jianchao ext4_unlock_group(sb, grp); 339155cdd0afSWang Jianchao } 339255cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 339355cdd0afSWang Jianchao } 339455cdd0afSWang Jianchao 339555cdd0afSWang Jianchao if (load_grp != UINT_MAX) 339655cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 339755cdd0afSWang Jianchao } 339855cdd0afSWang Jianchao 33999d99012fSAkira Fujita int ext4_mb_init(struct super_block *sb) 3400c9de560dSAlex Tomas { 3401c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 34026be2ded1SAneesh Kumar K.V unsigned i, j; 3403935244cdSNicolai Stange unsigned offset, offset_incr; 3404c9de560dSAlex Tomas unsigned max; 340574767c5aSShen Feng int ret; 3406c9de560dSAlex Tomas 34074b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3408c9de560dSAlex Tomas 3409c9de560dSAlex Tomas sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3410c9de560dSAlex Tomas if (sbi->s_mb_offsets == NULL) { 3411fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3412fb1813f4SCurt Wohlgemuth goto out; 3413c9de560dSAlex Tomas } 3414ff7ef329SYasunori Goto 34154b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3416c9de560dSAlex Tomas sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3417c9de560dSAlex Tomas if (sbi->s_mb_maxs == NULL) { 3418fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3419fb1813f4SCurt Wohlgemuth goto out; 3420fb1813f4SCurt Wohlgemuth } 3421fb1813f4SCurt Wohlgemuth 34222892c15dSEric Sandeen ret = ext4_groupinfo_create_slab(sb->s_blocksize); 34232892c15dSEric Sandeen if (ret < 0) 3424fb1813f4SCurt Wohlgemuth goto out; 3425c9de560dSAlex Tomas 3426c9de560dSAlex Tomas /* order 0 is regular bitmap */ 3427c9de560dSAlex Tomas sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3428c9de560dSAlex Tomas sbi->s_mb_offsets[0] = 0; 3429c9de560dSAlex Tomas 3430c9de560dSAlex Tomas i = 1; 3431c9de560dSAlex Tomas offset = 0; 3432935244cdSNicolai Stange offset_incr = 1 << (sb->s_blocksize_bits - 1); 3433c9de560dSAlex Tomas max = sb->s_blocksize << 2; 3434c9de560dSAlex Tomas do { 3435c9de560dSAlex Tomas sbi->s_mb_offsets[i] = offset; 3436c9de560dSAlex Tomas sbi->s_mb_maxs[i] = max; 3437935244cdSNicolai Stange offset += offset_incr; 3438935244cdSNicolai Stange offset_incr = offset_incr >> 1; 3439c9de560dSAlex Tomas max = max >> 1; 3440c9de560dSAlex Tomas i++; 34414b68f6dfSHarshad Shirwadkar } while (i < MB_NUM_ORDERS(sb)); 34424b68f6dfSHarshad Shirwadkar 344383e80a6eSJan Kara sbi->s_mb_avg_fragment_size = 344483e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 344583e80a6eSJan Kara GFP_KERNEL); 344683e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size) { 344783e80a6eSJan Kara ret = -ENOMEM; 344883e80a6eSJan Kara goto out; 344983e80a6eSJan Kara } 345083e80a6eSJan Kara sbi->s_mb_avg_fragment_size_locks = 345183e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 345283e80a6eSJan Kara GFP_KERNEL); 345383e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size_locks) { 345483e80a6eSJan Kara ret = -ENOMEM; 345583e80a6eSJan Kara goto out; 345683e80a6eSJan Kara } 345783e80a6eSJan Kara for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 345883e80a6eSJan Kara INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 345983e80a6eSJan Kara rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 346083e80a6eSJan Kara } 3461196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders = 3462196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3463196e402aSHarshad Shirwadkar GFP_KERNEL); 3464196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders) { 3465196e402aSHarshad Shirwadkar ret = -ENOMEM; 3466196e402aSHarshad Shirwadkar goto out; 3467196e402aSHarshad Shirwadkar } 3468196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders_locks = 3469196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3470196e402aSHarshad Shirwadkar GFP_KERNEL); 3471196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders_locks) { 3472196e402aSHarshad Shirwadkar ret = -ENOMEM; 3473196e402aSHarshad Shirwadkar goto out; 3474196e402aSHarshad Shirwadkar } 3475196e402aSHarshad Shirwadkar for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3476196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3477196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3478196e402aSHarshad Shirwadkar } 3479c9de560dSAlex Tomas 3480c9de560dSAlex Tomas spin_lock_init(&sbi->s_md_lock); 3481d08854f5STheodore Ts'o sbi->s_mb_free_pending = 0; 3482a0154344SDaeho Jeong INIT_LIST_HEAD(&sbi->s_freed_data_list); 348355cdd0afSWang Jianchao INIT_LIST_HEAD(&sbi->s_discard_list); 348455cdd0afSWang Jianchao INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 34855036ab8dSWang Jianchao atomic_set(&sbi->s_retry_alloc_pending, 0); 3486c9de560dSAlex Tomas 3487c9de560dSAlex Tomas sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3488c9de560dSAlex Tomas sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3489c9de560dSAlex Tomas sbi->s_mb_stats = MB_DEFAULT_STATS; 3490c9de560dSAlex Tomas sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3491c9de560dSAlex Tomas sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 349227baebb8STheodore Ts'o /* 349327baebb8STheodore Ts'o * The default group preallocation is 512, which for 4k block 349427baebb8STheodore Ts'o * sizes translates to 2 megabytes. However for bigalloc file 349527baebb8STheodore Ts'o * systems, this is probably too big (i.e, if the cluster size 349627baebb8STheodore Ts'o * is 1 megabyte, then group preallocation size becomes half a 349727baebb8STheodore Ts'o * gigabyte!). As a default, we will keep a two megabyte 349827baebb8STheodore Ts'o * group pralloc size for cluster sizes up to 64k, and after 349927baebb8STheodore Ts'o * that, we will force a minimum group preallocation size of 350027baebb8STheodore Ts'o * 32 clusters. This translates to 8 megs when the cluster 350127baebb8STheodore Ts'o * size is 256k, and 32 megs when the cluster size is 1 meg, 350227baebb8STheodore Ts'o * which seems reasonable as a default. 350327baebb8STheodore Ts'o */ 350427baebb8STheodore Ts'o sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 350527baebb8STheodore Ts'o sbi->s_cluster_bits, 32); 3506d7a1fee1SDan Ehrenberg /* 3507d7a1fee1SDan Ehrenberg * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3508d7a1fee1SDan Ehrenberg * to the lowest multiple of s_stripe which is bigger than 3509d7a1fee1SDan Ehrenberg * the s_mb_group_prealloc as determined above. We want 3510d7a1fee1SDan Ehrenberg * the preallocation size to be an exact multiple of the 3511d7a1fee1SDan Ehrenberg * RAID stripe size so that preallocations don't fragment 3512d7a1fee1SDan Ehrenberg * the stripes. 3513d7a1fee1SDan Ehrenberg */ 3514d7a1fee1SDan Ehrenberg if (sbi->s_stripe > 1) { 3515d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc = roundup( 3516c3defd99SKemeng Shi sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); 3517d7a1fee1SDan Ehrenberg } 3518c9de560dSAlex Tomas 3519730c213cSEric Sandeen sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3520c9de560dSAlex Tomas if (sbi->s_locality_groups == NULL) { 3521fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3522029b10c5SAndrey Tsyvarev goto out; 3523c9de560dSAlex Tomas } 3524730c213cSEric Sandeen for_each_possible_cpu(i) { 3525c9de560dSAlex Tomas struct ext4_locality_group *lg; 3526730c213cSEric Sandeen lg = per_cpu_ptr(sbi->s_locality_groups, i); 3527c9de560dSAlex Tomas mutex_init(&lg->lg_mutex); 35286be2ded1SAneesh Kumar K.V for (j = 0; j < PREALLOC_TB_SIZE; j++) 35296be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3530c9de560dSAlex Tomas spin_lock_init(&lg->lg_prealloc_lock); 3531c9de560dSAlex Tomas } 3532c9de560dSAlex Tomas 353310f0d2a5SChristoph Hellwig if (bdev_nonrot(sb->s_bdev)) 3534196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = 0; 3535196e402aSHarshad Shirwadkar else 3536196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 353779a77c5aSYu Jian /* init file for buddy data */ 353879a77c5aSYu Jian ret = ext4_mb_init_backend(sb); 35397aa0baeaSTao Ma if (ret != 0) 35407aa0baeaSTao Ma goto out_free_locality_groups; 354179a77c5aSYu Jian 35427aa0baeaSTao Ma return 0; 35437aa0baeaSTao Ma 35447aa0baeaSTao Ma out_free_locality_groups: 35457aa0baeaSTao Ma free_percpu(sbi->s_locality_groups); 35467aa0baeaSTao Ma sbi->s_locality_groups = NULL; 3547fb1813f4SCurt Wohlgemuth out: 354883e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 354983e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3550196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3551196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3552fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_offsets); 35537aa0baeaSTao Ma sbi->s_mb_offsets = NULL; 3554fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_maxs); 35557aa0baeaSTao Ma sbi->s_mb_maxs = NULL; 3556fb1813f4SCurt Wohlgemuth return ret; 3557c9de560dSAlex Tomas } 3558c9de560dSAlex Tomas 3559955ce5f5SAneesh Kumar K.V /* need to called with the ext4 group lock held */ 3560d3df1453SRitesh Harjani static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3561c9de560dSAlex Tomas { 3562c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 3563c9de560dSAlex Tomas struct list_head *cur, *tmp; 3564c9de560dSAlex Tomas int count = 0; 3565c9de560dSAlex Tomas 3566c9de560dSAlex Tomas list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3567c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3568c9de560dSAlex Tomas list_del(&pa->pa_group_list); 3569c9de560dSAlex Tomas count++; 3570688f05a0SAneesh Kumar K.V kmem_cache_free(ext4_pspace_cachep, pa); 3571c9de560dSAlex Tomas } 3572d3df1453SRitesh Harjani return count; 3573c9de560dSAlex Tomas } 3574c9de560dSAlex Tomas 3575c9de560dSAlex Tomas int ext4_mb_release(struct super_block *sb) 3576c9de560dSAlex Tomas { 35778df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3578c9de560dSAlex Tomas ext4_group_t i; 3579c9de560dSAlex Tomas int num_meta_group_infos; 3580df3da4eaSSuraj Jitindar Singh struct ext4_group_info *grinfo, ***group_info; 3581c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3582fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3583d3df1453SRitesh Harjani int count; 3584c9de560dSAlex Tomas 358555cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 358655cdd0afSWang Jianchao /* 358755cdd0afSWang Jianchao * wait the discard work to drain all of ext4_free_data 358855cdd0afSWang Jianchao */ 358955cdd0afSWang Jianchao flush_work(&sbi->s_discard_work); 359055cdd0afSWang Jianchao WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 359155cdd0afSWang Jianchao } 359255cdd0afSWang Jianchao 3593c9de560dSAlex Tomas if (sbi->s_group_info) { 35948df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 35954b99faa2SKhazhismel Kumykov cond_resched(); 3596c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, i); 35975354b2afSTheodore Ts'o if (!grinfo) 35985354b2afSTheodore Ts'o continue; 3599a3450215SRitesh Harjani mb_group_bb_bitmap_free(grinfo); 3600c9de560dSAlex Tomas ext4_lock_group(sb, i); 3601d3df1453SRitesh Harjani count = ext4_mb_cleanup_pa(grinfo); 3602d3df1453SRitesh Harjani if (count) 3603d3df1453SRitesh Harjani mb_debug(sb, "mballoc: %d PAs left\n", 3604d3df1453SRitesh Harjani count); 3605c9de560dSAlex Tomas ext4_unlock_group(sb, i); 3606fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, grinfo); 3607c9de560dSAlex Tomas } 36088df9675fSTheodore Ts'o num_meta_group_infos = (ngroups + 3609c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK(sb) - 1) >> 3610c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK_BITS(sb); 3611df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3612df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3613c9de560dSAlex Tomas for (i = 0; i < num_meta_group_infos; i++) 3614df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3615df3da4eaSSuraj Jitindar Singh kvfree(group_info); 3616df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3617c9de560dSAlex Tomas } 361883e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 361983e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3620196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3621196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3622c9de560dSAlex Tomas kfree(sbi->s_mb_offsets); 3623c9de560dSAlex Tomas kfree(sbi->s_mb_maxs); 3624c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3625c9de560dSAlex Tomas if (sbi->s_mb_stats) { 36269d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 36279d8b9ec4STheodore Ts'o "mballoc: %u blocks %u reqs (%u success)", 3628c9de560dSAlex Tomas atomic_read(&sbi->s_bal_allocated), 3629c9de560dSAlex Tomas atomic_read(&sbi->s_bal_reqs), 3630c9de560dSAlex Tomas atomic_read(&sbi->s_bal_success)); 36319d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 3632a6c75eafSHarshad Shirwadkar "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 36339d8b9ec4STheodore Ts'o "%u 2^N hits, %u breaks, %u lost", 3634c9de560dSAlex Tomas atomic_read(&sbi->s_bal_ex_scanned), 3635a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_bal_groups_scanned), 3636c9de560dSAlex Tomas atomic_read(&sbi->s_bal_goals), 3637c9de560dSAlex Tomas atomic_read(&sbi->s_bal_2orders), 3638c9de560dSAlex Tomas atomic_read(&sbi->s_bal_breaks), 3639c9de560dSAlex Tomas atomic_read(&sbi->s_mb_lost_chunks)); 36409d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 364167d25186SHarshad Shirwadkar "mballoc: %u generated and it took %llu", 364267d25186SHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 364367d25186SHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 36449d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 36459d8b9ec4STheodore Ts'o "mballoc: %u preallocated, %u discarded", 3646c9de560dSAlex Tomas atomic_read(&sbi->s_mb_preallocated), 3647c9de560dSAlex Tomas atomic_read(&sbi->s_mb_discarded)); 3648c9de560dSAlex Tomas } 3649c9de560dSAlex Tomas 3650730c213cSEric Sandeen free_percpu(sbi->s_locality_groups); 3651c9de560dSAlex Tomas 3652c9de560dSAlex Tomas return 0; 3653c9de560dSAlex Tomas } 3654c9de560dSAlex Tomas 365577ca6cdfSLukas Czerner static inline int ext4_issue_discard(struct super_block *sb, 3656a0154344SDaeho Jeong ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3657a0154344SDaeho Jeong struct bio **biop) 36585c521830SJiaying Zhang { 36595c521830SJiaying Zhang ext4_fsblk_t discard_block; 36605c521830SJiaying Zhang 366184130193STheodore Ts'o discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 366284130193STheodore Ts'o ext4_group_first_block_no(sb, block_group)); 366384130193STheodore Ts'o count = EXT4_C2B(EXT4_SB(sb), count); 36645c521830SJiaying Zhang trace_ext4_discard_blocks(sb, 36655c521830SJiaying Zhang (unsigned long long) discard_block, count); 3666a0154344SDaeho Jeong if (biop) { 3667a0154344SDaeho Jeong return __blkdev_issue_discard(sb->s_bdev, 3668a0154344SDaeho Jeong (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3669a0154344SDaeho Jeong (sector_t)count << (sb->s_blocksize_bits - 9), 367044abff2cSChristoph Hellwig GFP_NOFS, biop); 3671a0154344SDaeho Jeong } else 367293259636SLukas Czerner return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 36735c521830SJiaying Zhang } 36745c521830SJiaying Zhang 3675a0154344SDaeho Jeong static void ext4_free_data_in_buddy(struct super_block *sb, 3676a0154344SDaeho Jeong struct ext4_free_data *entry) 3677c9de560dSAlex Tomas { 3678c9de560dSAlex Tomas struct ext4_buddy e4b; 3679c894058dSAneesh Kumar K.V struct ext4_group_info *db; 3680c7f2bafaSKemeng Shi int err, count = 0; 3681c9de560dSAlex Tomas 3682d3df1453SRitesh Harjani mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 368318aadd47SBobi Jam entry->efd_count, entry->efd_group, entry); 3684c9de560dSAlex Tomas 368518aadd47SBobi Jam err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3686c9de560dSAlex Tomas /* we expect to find existing buddy because it's pinned */ 3687c9de560dSAlex Tomas BUG_ON(err != 0); 3688c9de560dSAlex Tomas 3689d08854f5STheodore Ts'o spin_lock(&EXT4_SB(sb)->s_md_lock); 3690d08854f5STheodore Ts'o EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3691d08854f5STheodore Ts'o spin_unlock(&EXT4_SB(sb)->s_md_lock); 369218aadd47SBobi Jam 3693c894058dSAneesh Kumar K.V db = e4b.bd_info; 3694c9de560dSAlex Tomas /* there are blocks to put in buddy to make them really free */ 369518aadd47SBobi Jam count += entry->efd_count; 369618aadd47SBobi Jam ext4_lock_group(sb, entry->efd_group); 3697c894058dSAneesh Kumar K.V /* Take it out of per group rb tree */ 369818aadd47SBobi Jam rb_erase(&entry->efd_node, &(db->bb_free_root)); 369918aadd47SBobi Jam mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3700c9de560dSAlex Tomas 37013d56b8d2STao Ma /* 37023d56b8d2STao Ma * Clear the trimmed flag for the group so that the next 37033d56b8d2STao Ma * ext4_trim_fs can trim it. 37043d56b8d2STao Ma * If the volume is mounted with -o discard, online discard 37053d56b8d2STao Ma * is supported and the free blocks will be trimmed online. 37063d56b8d2STao Ma */ 37073d56b8d2STao Ma if (!test_opt(sb, DISCARD)) 37083d56b8d2STao Ma EXT4_MB_GRP_CLEAR_TRIMMED(db); 37093d56b8d2STao Ma 3710c894058dSAneesh Kumar K.V if (!db->bb_free_root.rb_node) { 3711c894058dSAneesh Kumar K.V /* No more items in the per group rb tree 3712c894058dSAneesh Kumar K.V * balance refcounts from ext4_mb_free_metadata() 3713c894058dSAneesh Kumar K.V */ 371409cbfeafSKirill A. Shutemov put_page(e4b.bd_buddy_page); 371509cbfeafSKirill A. Shutemov put_page(e4b.bd_bitmap_page); 3716c894058dSAneesh Kumar K.V } 371718aadd47SBobi Jam ext4_unlock_group(sb, entry->efd_group); 3718e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 3719c9de560dSAlex Tomas 3720c7f2bafaSKemeng Shi mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3721c9de560dSAlex Tomas } 3722c9de560dSAlex Tomas 3723a0154344SDaeho Jeong /* 3724a0154344SDaeho Jeong * This function is called by the jbd2 layer once the commit has finished, 3725a0154344SDaeho Jeong * so we know we can free the blocks that were released with that commit. 3726a0154344SDaeho Jeong */ 3727a0154344SDaeho Jeong void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3728a0154344SDaeho Jeong { 3729a0154344SDaeho Jeong struct ext4_sb_info *sbi = EXT4_SB(sb); 3730a0154344SDaeho Jeong struct ext4_free_data *entry, *tmp; 3731a0154344SDaeho Jeong struct list_head freed_data_list; 3732a0154344SDaeho Jeong struct list_head *cut_pos = NULL; 373355cdd0afSWang Jianchao bool wake; 3734a0154344SDaeho Jeong 3735a0154344SDaeho Jeong INIT_LIST_HEAD(&freed_data_list); 3736a0154344SDaeho Jeong 3737a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 3738a0154344SDaeho Jeong list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3739a0154344SDaeho Jeong if (entry->efd_tid != commit_tid) 3740a0154344SDaeho Jeong break; 3741a0154344SDaeho Jeong cut_pos = &entry->efd_list; 3742a0154344SDaeho Jeong } 3743a0154344SDaeho Jeong if (cut_pos) 3744a0154344SDaeho Jeong list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3745a0154344SDaeho Jeong cut_pos); 3746a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 3747a0154344SDaeho Jeong 374855cdd0afSWang Jianchao list_for_each_entry(entry, &freed_data_list, efd_list) 3749a0154344SDaeho Jeong ext4_free_data_in_buddy(sb, entry); 375055cdd0afSWang Jianchao 375155cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 375255cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 375355cdd0afSWang Jianchao wake = list_empty(&sbi->s_discard_list); 375455cdd0afSWang Jianchao list_splice_tail(&freed_data_list, &sbi->s_discard_list); 375555cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 375655cdd0afSWang Jianchao if (wake) 375755cdd0afSWang Jianchao queue_work(system_unbound_wq, &sbi->s_discard_work); 375855cdd0afSWang Jianchao } else { 375955cdd0afSWang Jianchao list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 376055cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, entry); 376155cdd0afSWang Jianchao } 3762a0154344SDaeho Jeong } 3763a0154344SDaeho Jeong 37645dabfc78STheodore Ts'o int __init ext4_init_mballoc(void) 3765c9de560dSAlex Tomas { 376616828088STheodore Ts'o ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 376716828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3768c9de560dSAlex Tomas if (ext4_pspace_cachep == NULL) 3769f283529aSRitesh Harjani goto out; 3770c9de560dSAlex Tomas 377116828088STheodore Ts'o ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 377216828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3773f283529aSRitesh Harjani if (ext4_ac_cachep == NULL) 3774f283529aSRitesh Harjani goto out_pa_free; 3775c894058dSAneesh Kumar K.V 377618aadd47SBobi Jam ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 377716828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3778f283529aSRitesh Harjani if (ext4_free_data_cachep == NULL) 3779f283529aSRitesh Harjani goto out_ac_free; 3780f283529aSRitesh Harjani 3781c9de560dSAlex Tomas return 0; 3782f283529aSRitesh Harjani 3783f283529aSRitesh Harjani out_ac_free: 3784f283529aSRitesh Harjani kmem_cache_destroy(ext4_ac_cachep); 3785f283529aSRitesh Harjani out_pa_free: 3786f283529aSRitesh Harjani kmem_cache_destroy(ext4_pspace_cachep); 3787f283529aSRitesh Harjani out: 3788f283529aSRitesh Harjani return -ENOMEM; 3789c9de560dSAlex Tomas } 3790c9de560dSAlex Tomas 37915dabfc78STheodore Ts'o void ext4_exit_mballoc(void) 3792c9de560dSAlex Tomas { 37933e03f9caSJesper Dangaard Brouer /* 37943e03f9caSJesper Dangaard Brouer * Wait for completion of call_rcu()'s on ext4_pspace_cachep 37953e03f9caSJesper Dangaard Brouer * before destroying the slab cache. 37963e03f9caSJesper Dangaard Brouer */ 37973e03f9caSJesper Dangaard Brouer rcu_barrier(); 3798c9de560dSAlex Tomas kmem_cache_destroy(ext4_pspace_cachep); 3799256bdb49SEric Sandeen kmem_cache_destroy(ext4_ac_cachep); 380018aadd47SBobi Jam kmem_cache_destroy(ext4_free_data_cachep); 38012892c15dSEric Sandeen ext4_groupinfo_destroy_slabs(); 3802c9de560dSAlex Tomas } 3803c9de560dSAlex Tomas 3804c9de560dSAlex Tomas 3805c9de560dSAlex Tomas /* 380673b2c716SUwe Kleine-König * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3807c9de560dSAlex Tomas * Returns 0 if success or error code 3808c9de560dSAlex Tomas */ 38094ddfef7bSEric Sandeen static noinline_for_stack int 38104ddfef7bSEric Sandeen ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 381153accfa9STheodore Ts'o handle_t *handle, unsigned int reserv_clstrs) 3812c9de560dSAlex Tomas { 3813c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 3814c9de560dSAlex Tomas struct ext4_group_desc *gdp; 3815c9de560dSAlex Tomas struct buffer_head *gdp_bh; 3816c9de560dSAlex Tomas struct ext4_sb_info *sbi; 3817c9de560dSAlex Tomas struct super_block *sb; 3818c9de560dSAlex Tomas ext4_fsblk_t block; 3819519deca0SAneesh Kumar K.V int err, len; 3820c9de560dSAlex Tomas 3821c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3822c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_len <= 0); 3823c9de560dSAlex Tomas 3824c9de560dSAlex Tomas sb = ac->ac_sb; 3825c9de560dSAlex Tomas sbi = EXT4_SB(sb); 3826c9de560dSAlex Tomas 3827574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 38289008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 3829fb28f9ceSKemeng Shi return PTR_ERR(bitmap_bh); 38309008a58eSDarrick J. Wong } 3831c9de560dSAlex Tomas 38325d601255Sliang xie BUFFER_TRACE(bitmap_bh, "getting write access"); 3833188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3834188c299eSJan Kara EXT4_JTR_NONE); 3835c9de560dSAlex Tomas if (err) 3836c9de560dSAlex Tomas goto out_err; 3837c9de560dSAlex Tomas 3838c9de560dSAlex Tomas err = -EIO; 3839c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3840c9de560dSAlex Tomas if (!gdp) 3841c9de560dSAlex Tomas goto out_err; 3842c9de560dSAlex Tomas 3843a9df9a49STheodore Ts'o ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3844021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, gdp)); 384503cddb80SAneesh Kumar K.V 38465d601255Sliang xie BUFFER_TRACE(gdp_bh, "get_write_access"); 3847188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3848c9de560dSAlex Tomas if (err) 3849c9de560dSAlex Tomas goto out_err; 3850c9de560dSAlex Tomas 3851bda00de7SAkinobu Mita block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3852c9de560dSAlex Tomas 385353accfa9STheodore Ts'o len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3854ce9f24ccSJan Kara if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 385512062dddSEric Sandeen ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 38561084f252STheodore Ts'o "fs metadata", block, block+len); 3857519deca0SAneesh Kumar K.V /* File system mounted not to panic on error 3858554a5cccSVegard Nossum * Fix the bitmap and return EFSCORRUPTED 3859519deca0SAneesh Kumar K.V * We leak some of the blocks here. 3860519deca0SAneesh Kumar K.V */ 3861955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3862123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3863519deca0SAneesh Kumar K.V ac->ac_b_ex.fe_len); 3864955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 38650390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3866519deca0SAneesh Kumar K.V if (!err) 3867554a5cccSVegard Nossum err = -EFSCORRUPTED; 3868519deca0SAneesh Kumar K.V goto out_err; 3869c9de560dSAlex Tomas } 3870955ce5f5SAneesh Kumar K.V 3871955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3872c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 3873c9de560dSAlex Tomas { 3874c9de560dSAlex Tomas int i; 3875c9de560dSAlex Tomas for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3876c9de560dSAlex Tomas BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3877c9de560dSAlex Tomas bitmap_bh->b_data)); 3878c9de560dSAlex Tomas } 3879c9de560dSAlex Tomas } 3880c9de560dSAlex Tomas #endif 3881123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3882c3e94d1dSYongqiang Yang ac->ac_b_ex.fe_len); 38838844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 38848844618dSTheodore Ts'o (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3885c9de560dSAlex Tomas gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3886021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, 3887cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, 3888560671a0SAneesh Kumar K.V ac->ac_b_ex.fe_group, gdp)); 3889c9de560dSAlex Tomas } 3890021b65bbSTheodore Ts'o len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3891021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, len); 38921df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3893feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3894955ce5f5SAneesh Kumar K.V 3895955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 389657042651STheodore Ts'o percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3897d2a17637SMingming Cao /* 38986bc6e63fSAneesh Kumar K.V * Now reduce the dirty block count also. Should not go negative 3899d2a17637SMingming Cao */ 39006bc6e63fSAneesh Kumar K.V if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 39016bc6e63fSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 390257042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 390357042651STheodore Ts'o reserv_clstrs); 3904c9de560dSAlex Tomas 3905772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 3906772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, 3907772cb7c8SJose R. Santos ac->ac_b_ex.fe_group); 390890ba983fSTheodore Ts'o atomic64_sub(ac->ac_b_ex.fe_len, 39097c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 39107c990728SSuraj Jitindar Singh flex_group)->free_clusters); 3911772cb7c8SJose R. Santos } 3912772cb7c8SJose R. Santos 39130390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3914c9de560dSAlex Tomas if (err) 3915c9de560dSAlex Tomas goto out_err; 39160390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3917c9de560dSAlex Tomas 3918c9de560dSAlex Tomas out_err: 391942a10addSAneesh Kumar K.V brelse(bitmap_bh); 3920c9de560dSAlex Tomas return err; 3921c9de560dSAlex Tomas } 3922c9de560dSAlex Tomas 3923c9de560dSAlex Tomas /* 39248016e29fSHarshad Shirwadkar * Idempotent helper for Ext4 fast commit replay path to set the state of 39258016e29fSHarshad Shirwadkar * blocks in bitmaps and update counters. 39268016e29fSHarshad Shirwadkar */ 39278016e29fSHarshad Shirwadkar void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 39288016e29fSHarshad Shirwadkar int len, int state) 39298016e29fSHarshad Shirwadkar { 39308016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh = NULL; 39318016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 39328016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 39338016e29fSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 39348016e29fSHarshad Shirwadkar ext4_group_t group; 39358016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 3936a5c0e2fdSRitesh Harjani int i, err; 39378016e29fSHarshad Shirwadkar int already; 3938bfdc502aSRitesh Harjani unsigned int clen, clen_changed, thisgrp_len; 39398016e29fSHarshad Shirwadkar 3940bfdc502aSRitesh Harjani while (len > 0) { 39418016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3942bfdc502aSRitesh Harjani 3943bfdc502aSRitesh Harjani /* 3944bfdc502aSRitesh Harjani * Check to see if we are freeing blocks across a group 3945bfdc502aSRitesh Harjani * boundary. 3946bfdc502aSRitesh Harjani * In case of flex_bg, this can happen that (block, len) may 3947bfdc502aSRitesh Harjani * span across more than one group. In that case we need to 3948bfdc502aSRitesh Harjani * get the corresponding group metadata to work with. 3949bfdc502aSRitesh Harjani * For this we have goto again loop. 3950bfdc502aSRitesh Harjani */ 3951bfdc502aSRitesh Harjani thisgrp_len = min_t(unsigned int, (unsigned int)len, 3952bfdc502aSRitesh Harjani EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3953bfdc502aSRitesh Harjani clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3954bfdc502aSRitesh Harjani 39558c91c579SRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 39568c91c579SRitesh Harjani ext4_error(sb, "Marking blocks in system zone - " 39578c91c579SRitesh Harjani "Block = %llu, len = %u", 39588c91c579SRitesh Harjani block, thisgrp_len); 39598c91c579SRitesh Harjani bitmap_bh = NULL; 39608c91c579SRitesh Harjani break; 39618c91c579SRitesh Harjani } 39628c91c579SRitesh Harjani 39638016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 39648016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 39658016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 39668016e29fSHarshad Shirwadkar bitmap_bh = NULL; 3967bfdc502aSRitesh Harjani break; 39688016e29fSHarshad Shirwadkar } 39698016e29fSHarshad Shirwadkar 39708016e29fSHarshad Shirwadkar err = -EIO; 39718016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 39728016e29fSHarshad Shirwadkar if (!gdp) 3973bfdc502aSRitesh Harjani break; 39748016e29fSHarshad Shirwadkar 39758016e29fSHarshad Shirwadkar ext4_lock_group(sb, group); 39768016e29fSHarshad Shirwadkar already = 0; 39778016e29fSHarshad Shirwadkar for (i = 0; i < clen; i++) 3978bfdc502aSRitesh Harjani if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3979bfdc502aSRitesh Harjani !state) 39808016e29fSHarshad Shirwadkar already++; 39818016e29fSHarshad Shirwadkar 3982a5c0e2fdSRitesh Harjani clen_changed = clen - already; 39838016e29fSHarshad Shirwadkar if (state) 3984123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, blkoff, clen); 39858016e29fSHarshad Shirwadkar else 3986bd8247eeSRitesh Harjani mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 39878016e29fSHarshad Shirwadkar if (ext4_has_group_desc_csum(sb) && 39888016e29fSHarshad Shirwadkar (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 39898016e29fSHarshad Shirwadkar gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 39908016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, 3991bfdc502aSRitesh Harjani ext4_free_clusters_after_init(sb, group, gdp)); 39928016e29fSHarshad Shirwadkar } 39938016e29fSHarshad Shirwadkar if (state) 3994a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 39958016e29fSHarshad Shirwadkar else 3996a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 39978016e29fSHarshad Shirwadkar 39988016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, clen); 39991df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 40008016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 40018016e29fSHarshad Shirwadkar 40028016e29fSHarshad Shirwadkar ext4_unlock_group(sb, group); 40038016e29fSHarshad Shirwadkar 40048016e29fSHarshad Shirwadkar if (sbi->s_log_groups_per_flex) { 40058016e29fSHarshad Shirwadkar ext4_group_t flex_group = ext4_flex_group(sbi, group); 4006a5c0e2fdSRitesh Harjani struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4007a5c0e2fdSRitesh Harjani s_flex_groups, flex_group); 40088016e29fSHarshad Shirwadkar 4009a5c0e2fdSRitesh Harjani if (state) 4010a5c0e2fdSRitesh Harjani atomic64_sub(clen_changed, &fg->free_clusters); 4011a5c0e2fdSRitesh Harjani else 4012a5c0e2fdSRitesh Harjani atomic64_add(clen_changed, &fg->free_clusters); 4013bfdc502aSRitesh Harjani 40148016e29fSHarshad Shirwadkar } 40158016e29fSHarshad Shirwadkar 40168016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 40178016e29fSHarshad Shirwadkar if (err) 4018bfdc502aSRitesh Harjani break; 40198016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 40208016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 40218016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 4022bfdc502aSRitesh Harjani if (err) 4023bfdc502aSRitesh Harjani break; 40248016e29fSHarshad Shirwadkar 4025bfdc502aSRitesh Harjani block += thisgrp_len; 4026bfdc502aSRitesh Harjani len -= thisgrp_len; 4027bfdc502aSRitesh Harjani brelse(bitmap_bh); 4028bfdc502aSRitesh Harjani BUG_ON(len < 0); 4029bfdc502aSRitesh Harjani } 4030bfdc502aSRitesh Harjani 4031bfdc502aSRitesh Harjani if (err) 40328016e29fSHarshad Shirwadkar brelse(bitmap_bh); 40338016e29fSHarshad Shirwadkar } 40348016e29fSHarshad Shirwadkar 40358016e29fSHarshad Shirwadkar /* 4036c9de560dSAlex Tomas * here we normalize request for locality group 4037d7a1fee1SDan Ehrenberg * Group request are normalized to s_mb_group_prealloc, which goes to 4038d7a1fee1SDan Ehrenberg * s_strip if we set the same via mount option. 4039d7a1fee1SDan Ehrenberg * s_mb_group_prealloc can be configured via 4040b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_group_prealloc 4041c9de560dSAlex Tomas * 4042c9de560dSAlex Tomas * XXX: should we try to preallocate more than the group has now? 4043c9de560dSAlex Tomas */ 4044c9de560dSAlex Tomas static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4045c9de560dSAlex Tomas { 4046c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4047c9de560dSAlex Tomas struct ext4_locality_group *lg = ac->ac_lg; 4048c9de560dSAlex Tomas 4049c9de560dSAlex Tomas BUG_ON(lg == NULL); 4050c9de560dSAlex Tomas ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4051d3df1453SRitesh Harjani mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4052c9de560dSAlex Tomas } 4053c9de560dSAlex Tomas 405438727786SOjaswin Mujoo /* 405538727786SOjaswin Mujoo * This function returns the next element to look at during inode 405638727786SOjaswin Mujoo * PA rbtree walk. We assume that we have held the inode PA rbtree lock 405738727786SOjaswin Mujoo * (ei->i_prealloc_lock) 405838727786SOjaswin Mujoo * 405938727786SOjaswin Mujoo * new_start The start of the range we want to compare 406038727786SOjaswin Mujoo * cur_start The existing start that we are comparing against 406138727786SOjaswin Mujoo * node The node of the rb_tree 406238727786SOjaswin Mujoo */ 406338727786SOjaswin Mujoo static inline struct rb_node* 406438727786SOjaswin Mujoo ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 406538727786SOjaswin Mujoo { 406638727786SOjaswin Mujoo if (new_start < cur_start) 406738727786SOjaswin Mujoo return node->rb_left; 406838727786SOjaswin Mujoo else 406938727786SOjaswin Mujoo return node->rb_right; 407038727786SOjaswin Mujoo } 407138727786SOjaswin Mujoo 40727692094aSOjaswin Mujoo static inline void 40737692094aSOjaswin Mujoo ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 40747692094aSOjaswin Mujoo ext4_lblk_t start, ext4_lblk_t end) 40757692094aSOjaswin Mujoo { 40767692094aSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 40777692094aSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 40787692094aSOjaswin Mujoo struct ext4_prealloc_space *tmp_pa; 40797692094aSOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 408038727786SOjaswin Mujoo struct rb_node *iter; 40817692094aSOjaswin Mujoo 408238727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 408338727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 408438727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 408538727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 408638727786SOjaswin Mujoo pa_node.inode_node); 40877692094aSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 40887692094aSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 40897692094aSOjaswin Mujoo 409038727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 409138727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) 40927692094aSOjaswin Mujoo BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 40937692094aSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40947692094aSOjaswin Mujoo } 409538727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 40967692094aSOjaswin Mujoo } 40977692094aSOjaswin Mujoo 4098c9de560dSAlex Tomas /* 40990830344cSOjaswin Mujoo * Given an allocation context "ac" and a range "start", "end", check 41000830344cSOjaswin Mujoo * and adjust boundaries if the range overlaps with any of the existing 41010830344cSOjaswin Mujoo * preallocatoins stored in the corresponding inode of the allocation context. 41020830344cSOjaswin Mujoo * 41030830344cSOjaswin Mujoo * Parameters: 41040830344cSOjaswin Mujoo * ac allocation context 41050830344cSOjaswin Mujoo * start start of the new range 41060830344cSOjaswin Mujoo * end end of the new range 41070830344cSOjaswin Mujoo */ 41080830344cSOjaswin Mujoo static inline void 41090830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 41100830344cSOjaswin Mujoo ext4_lblk_t *start, ext4_lblk_t *end) 41110830344cSOjaswin Mujoo { 41120830344cSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 41130830344cSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 411438727786SOjaswin Mujoo struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 411538727786SOjaswin Mujoo struct rb_node *iter; 41160830344cSOjaswin Mujoo ext4_lblk_t new_start, new_end; 411738727786SOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1; 41180830344cSOjaswin Mujoo 41190830344cSOjaswin Mujoo new_start = *start; 41200830344cSOjaswin Mujoo new_end = *end; 41210830344cSOjaswin Mujoo 412238727786SOjaswin Mujoo /* 412338727786SOjaswin Mujoo * Adjust the normalized range so that it doesn't overlap with any 412438727786SOjaswin Mujoo * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 412538727786SOjaswin Mujoo * so it doesn't change underneath us. 412638727786SOjaswin Mujoo */ 412738727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 41280830344cSOjaswin Mujoo 412938727786SOjaswin Mujoo /* Step 1: find any one immediate neighboring PA of the normalized range */ 413038727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 413138727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 413238727786SOjaswin Mujoo tmp_pa_start, iter)) { 413338727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 413438727786SOjaswin Mujoo pa_node.inode_node); 41350830344cSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 41360830344cSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 41370830344cSOjaswin Mujoo 41380830344cSOjaswin Mujoo /* PA must not overlap original request */ 413938727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 414038727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) 41410830344cSOjaswin Mujoo BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 41420830344cSOjaswin Mujoo ac->ac_o_ex.fe_logical < tmp_pa_start)); 41430830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 41440830344cSOjaswin Mujoo } 41450830344cSOjaswin Mujoo 414638727786SOjaswin Mujoo /* 414738727786SOjaswin Mujoo * Step 2: check if the found PA is left or right neighbor and 414838727786SOjaswin Mujoo * get the other neighbor 414938727786SOjaswin Mujoo */ 415038727786SOjaswin Mujoo if (tmp_pa) { 415138727786SOjaswin Mujoo if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 415238727786SOjaswin Mujoo struct rb_node *tmp; 415338727786SOjaswin Mujoo 415438727786SOjaswin Mujoo left_pa = tmp_pa; 415538727786SOjaswin Mujoo tmp = rb_next(&left_pa->pa_node.inode_node); 415638727786SOjaswin Mujoo if (tmp) { 415738727786SOjaswin Mujoo right_pa = rb_entry(tmp, 415838727786SOjaswin Mujoo struct ext4_prealloc_space, 415938727786SOjaswin Mujoo pa_node.inode_node); 416038727786SOjaswin Mujoo } 416138727786SOjaswin Mujoo } else { 416238727786SOjaswin Mujoo struct rb_node *tmp; 416338727786SOjaswin Mujoo 416438727786SOjaswin Mujoo right_pa = tmp_pa; 416538727786SOjaswin Mujoo tmp = rb_prev(&right_pa->pa_node.inode_node); 416638727786SOjaswin Mujoo if (tmp) { 416738727786SOjaswin Mujoo left_pa = rb_entry(tmp, 416838727786SOjaswin Mujoo struct ext4_prealloc_space, 416938727786SOjaswin Mujoo pa_node.inode_node); 417038727786SOjaswin Mujoo } 417138727786SOjaswin Mujoo } 417238727786SOjaswin Mujoo } 417338727786SOjaswin Mujoo 417438727786SOjaswin Mujoo /* Step 3: get the non deleted neighbors */ 417538727786SOjaswin Mujoo if (left_pa) { 417638727786SOjaswin Mujoo for (iter = &left_pa->pa_node.inode_node;; 417738727786SOjaswin Mujoo iter = rb_prev(iter)) { 417838727786SOjaswin Mujoo if (!iter) { 417938727786SOjaswin Mujoo left_pa = NULL; 418038727786SOjaswin Mujoo break; 418138727786SOjaswin Mujoo } 418238727786SOjaswin Mujoo 418338727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 418438727786SOjaswin Mujoo pa_node.inode_node); 418538727786SOjaswin Mujoo left_pa = tmp_pa; 418638727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 418738727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) { 418838727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 418938727786SOjaswin Mujoo break; 41900830344cSOjaswin Mujoo } 41910830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 41920830344cSOjaswin Mujoo } 419338727786SOjaswin Mujoo } 419438727786SOjaswin Mujoo 419538727786SOjaswin Mujoo if (right_pa) { 419638727786SOjaswin Mujoo for (iter = &right_pa->pa_node.inode_node;; 419738727786SOjaswin Mujoo iter = rb_next(iter)) { 419838727786SOjaswin Mujoo if (!iter) { 419938727786SOjaswin Mujoo right_pa = NULL; 420038727786SOjaswin Mujoo break; 420138727786SOjaswin Mujoo } 420238727786SOjaswin Mujoo 420338727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 420438727786SOjaswin Mujoo pa_node.inode_node); 420538727786SOjaswin Mujoo right_pa = tmp_pa; 420638727786SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 420738727786SOjaswin Mujoo if (tmp_pa->pa_deleted == 0) { 420838727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 420938727786SOjaswin Mujoo break; 421038727786SOjaswin Mujoo } 421138727786SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 421238727786SOjaswin Mujoo } 421338727786SOjaswin Mujoo } 421438727786SOjaswin Mujoo 421538727786SOjaswin Mujoo if (left_pa) { 421638727786SOjaswin Mujoo left_pa_end = 421738727786SOjaswin Mujoo left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len); 421838727786SOjaswin Mujoo BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 421938727786SOjaswin Mujoo } 422038727786SOjaswin Mujoo 422138727786SOjaswin Mujoo if (right_pa) { 422238727786SOjaswin Mujoo right_pa_start = right_pa->pa_lstart; 422338727786SOjaswin Mujoo BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 422438727786SOjaswin Mujoo } 422538727786SOjaswin Mujoo 422638727786SOjaswin Mujoo /* Step 4: trim our normalized range to not overlap with the neighbors */ 422738727786SOjaswin Mujoo if (left_pa) { 422838727786SOjaswin Mujoo if (left_pa_end > new_start) 422938727786SOjaswin Mujoo new_start = left_pa_end; 423038727786SOjaswin Mujoo } 423138727786SOjaswin Mujoo 423238727786SOjaswin Mujoo if (right_pa) { 423338727786SOjaswin Mujoo if (right_pa_start < new_end) 423438727786SOjaswin Mujoo new_end = right_pa_start; 423538727786SOjaswin Mujoo } 423638727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 42370830344cSOjaswin Mujoo 42380830344cSOjaswin Mujoo /* XXX: extra loop to check we really don't overlap preallocations */ 42390830344cSOjaswin Mujoo ext4_mb_pa_assert_overlap(ac, new_start, new_end); 42400830344cSOjaswin Mujoo 42410830344cSOjaswin Mujoo *start = new_start; 42420830344cSOjaswin Mujoo *end = new_end; 42430830344cSOjaswin Mujoo } 42440830344cSOjaswin Mujoo 42450830344cSOjaswin Mujoo /* 4246c9de560dSAlex Tomas * Normalization means making request better in terms of 4247c9de560dSAlex Tomas * size and alignment 4248c9de560dSAlex Tomas */ 42494ddfef7bSEric Sandeen static noinline_for_stack void 42504ddfef7bSEric Sandeen ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4251c9de560dSAlex Tomas struct ext4_allocation_request *ar) 4252c9de560dSAlex Tomas { 425353accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4254b07ffe69SKemeng Shi struct ext4_super_block *es = sbi->s_es; 4255c9de560dSAlex Tomas int bsbits, max; 4256c9de560dSAlex Tomas ext4_lblk_t end; 42571592d2c5SCurt Wohlgemuth loff_t size, start_off; 42581592d2c5SCurt Wohlgemuth loff_t orig_size __maybe_unused; 42595a0790c2SAndi Kleen ext4_lblk_t start; 4260c9de560dSAlex Tomas 4261c9de560dSAlex Tomas /* do normalize only data requests, metadata requests 4262c9de560dSAlex Tomas do not need preallocation */ 4263c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4264c9de560dSAlex Tomas return; 4265c9de560dSAlex Tomas 4266c9de560dSAlex Tomas /* sometime caller may want exact blocks */ 4267c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4268c9de560dSAlex Tomas return; 4269c9de560dSAlex Tomas 4270c9de560dSAlex Tomas /* caller may indicate that preallocation isn't 4271c9de560dSAlex Tomas * required (it's a tail, for example) */ 4272c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4273c9de560dSAlex Tomas return; 4274c9de560dSAlex Tomas 4275c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4276c9de560dSAlex Tomas ext4_mb_normalize_group_request(ac); 4277c9de560dSAlex Tomas return ; 4278c9de560dSAlex Tomas } 4279c9de560dSAlex Tomas 4280c9de560dSAlex Tomas bsbits = ac->ac_sb->s_blocksize_bits; 4281c9de560dSAlex Tomas 4282c9de560dSAlex Tomas /* first, let's learn actual file size 4283c9de560dSAlex Tomas * given current request is allocated */ 428453accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4285c9de560dSAlex Tomas size = size << bsbits; 4286c9de560dSAlex Tomas if (size < i_size_read(ac->ac_inode)) 4287c9de560dSAlex Tomas size = i_size_read(ac->ac_inode); 42885a0790c2SAndi Kleen orig_size = size; 4289c9de560dSAlex Tomas 42901930479cSValerie Clement /* max size of free chunks */ 42911930479cSValerie Clement max = 2 << bsbits; 4292c9de560dSAlex Tomas 42931930479cSValerie Clement #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 42941930479cSValerie Clement (req <= (size) || max <= (chunk_size)) 4295c9de560dSAlex Tomas 4296c9de560dSAlex Tomas /* first, try to predict filesize */ 4297c9de560dSAlex Tomas /* XXX: should this table be tunable? */ 4298c9de560dSAlex Tomas start_off = 0; 4299c9de560dSAlex Tomas if (size <= 16 * 1024) { 4300c9de560dSAlex Tomas size = 16 * 1024; 4301c9de560dSAlex Tomas } else if (size <= 32 * 1024) { 4302c9de560dSAlex Tomas size = 32 * 1024; 4303c9de560dSAlex Tomas } else if (size <= 64 * 1024) { 4304c9de560dSAlex Tomas size = 64 * 1024; 4305c9de560dSAlex Tomas } else if (size <= 128 * 1024) { 4306c9de560dSAlex Tomas size = 128 * 1024; 4307c9de560dSAlex Tomas } else if (size <= 256 * 1024) { 4308c9de560dSAlex Tomas size = 256 * 1024; 4309c9de560dSAlex Tomas } else if (size <= 512 * 1024) { 4310c9de560dSAlex Tomas size = 512 * 1024; 4311c9de560dSAlex Tomas } else if (size <= 1024 * 1024) { 4312c9de560dSAlex Tomas size = 1024 * 1024; 43131930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4314c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 43151930479cSValerie Clement (21 - bsbits)) << 21; 43161930479cSValerie Clement size = 2 * 1024 * 1024; 43171930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4318c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4319c9de560dSAlex Tomas (22 - bsbits)) << 22; 4320c9de560dSAlex Tomas size = 4 * 1024 * 1024; 4321b3916da0SKemeng Shi } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 43221930479cSValerie Clement (8<<20)>>bsbits, max, 8 * 1024)) { 4323c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4324c9de560dSAlex Tomas (23 - bsbits)) << 23; 4325c9de560dSAlex Tomas size = 8 * 1024 * 1024; 4326c9de560dSAlex Tomas } else { 4327c9de560dSAlex Tomas start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 432891a48aafSKemeng Shi size = (loff_t) EXT4_C2B(sbi, 4329b27b1535SXiaoguang Wang ac->ac_o_ex.fe_len) << bsbits; 4330c9de560dSAlex Tomas } 43315a0790c2SAndi Kleen size = size >> bsbits; 43325a0790c2SAndi Kleen start = start_off >> bsbits; 4333c9de560dSAlex Tomas 4334a08f789dSBaokun Li /* 4335a08f789dSBaokun Li * For tiny groups (smaller than 8MB) the chosen allocation 4336a08f789dSBaokun Li * alignment may be larger than group size. Make sure the 4337a08f789dSBaokun Li * alignment does not move allocation to a different group which 4338a08f789dSBaokun Li * makes mballoc fail assertions later. 4339a08f789dSBaokun Li */ 4340a08f789dSBaokun Li start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4341a08f789dSBaokun Li (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4342a08f789dSBaokun Li 4343c9de560dSAlex Tomas /* don't cover already allocated blocks in selected range */ 4344c9de560dSAlex Tomas if (ar->pleft && start <= ar->lleft) { 4345c9de560dSAlex Tomas size -= ar->lleft + 1 - start; 4346c9de560dSAlex Tomas start = ar->lleft + 1; 4347c9de560dSAlex Tomas } 4348c9de560dSAlex Tomas if (ar->pright && start + size - 1 >= ar->lright) 4349c9de560dSAlex Tomas size -= start + size - ar->lright; 4350c9de560dSAlex Tomas 4351cd648b8aSJan Kara /* 4352cd648b8aSJan Kara * Trim allocation request for filesystems with artificially small 4353cd648b8aSJan Kara * groups. 4354cd648b8aSJan Kara */ 4355cd648b8aSJan Kara if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4356cd648b8aSJan Kara size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4357cd648b8aSJan Kara 4358c9de560dSAlex Tomas end = start + size; 4359c9de560dSAlex Tomas 43600830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(ac, &start, &end); 4361c9de560dSAlex Tomas 4362c9de560dSAlex Tomas size = end - start; 4363c9de560dSAlex Tomas 4364cf4ff938SBaokun Li /* 4365cf4ff938SBaokun Li * In this function "start" and "size" are normalized for better 4366cf4ff938SBaokun Li * alignment and length such that we could preallocate more blocks. 4367cf4ff938SBaokun Li * This normalization is done such that original request of 4368cf4ff938SBaokun Li * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4369cf4ff938SBaokun Li * "size" boundaries. 4370cf4ff938SBaokun Li * (Note fe_len can be relaxed since FS block allocation API does not 4371cf4ff938SBaokun Li * provide gurantee on number of contiguous blocks allocation since that 4372cf4ff938SBaokun Li * depends upon free space left, etc). 4373cf4ff938SBaokun Li * In case of inode pa, later we use the allocated blocks 43741221b235SKemeng Shi * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4375cf4ff938SBaokun Li * range of goal/best blocks [start, size] to put it at the 4376cf4ff938SBaokun Li * ac_o_ex.fe_logical extent of this inode. 4377cf4ff938SBaokun Li * (See ext4_mb_use_inode_pa() for more details) 4378cf4ff938SBaokun Li */ 4379cf4ff938SBaokun Li if (start + size <= ac->ac_o_ex.fe_logical || 4380c9de560dSAlex Tomas start > ac->ac_o_ex.fe_logical) { 43819d8b9ec4STheodore Ts'o ext4_msg(ac->ac_sb, KERN_ERR, 43829d8b9ec4STheodore Ts'o "start %lu, size %lu, fe_logical %lu", 4383c9de560dSAlex Tomas (unsigned long) start, (unsigned long) size, 4384c9de560dSAlex Tomas (unsigned long) ac->ac_o_ex.fe_logical); 4385dfe076c1SDmitry Monakhov BUG(); 4386c9de560dSAlex Tomas } 4387b5b60778SMaurizio Lombardi BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4388c9de560dSAlex Tomas 4389c9de560dSAlex Tomas /* now prepare goal request */ 4390c9de560dSAlex Tomas 4391c9de560dSAlex Tomas /* XXX: is it better to align blocks WRT to logical 4392c9de560dSAlex Tomas * placement or satisfy big request as is */ 4393c9de560dSAlex Tomas ac->ac_g_ex.fe_logical = start; 439453accfa9STheodore Ts'o ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4395c9de560dSAlex Tomas 4396c9de560dSAlex Tomas /* define goal start in order to merge */ 4397b07ffe69SKemeng Shi if (ar->pright && (ar->lright == (start + size)) && 4398b07ffe69SKemeng Shi ar->pright >= size && 4399b07ffe69SKemeng Shi ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4400c9de560dSAlex Tomas /* merge to the right */ 4401c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4402b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4403b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4404c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4405c9de560dSAlex Tomas } 4406b07ffe69SKemeng Shi if (ar->pleft && (ar->lleft + 1 == start) && 4407b07ffe69SKemeng Shi ar->pleft + 1 < ext4_blocks_count(es)) { 4408c9de560dSAlex Tomas /* merge to the left */ 4409c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4410b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4411b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4412c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4413c9de560dSAlex Tomas } 4414c9de560dSAlex Tomas 4415d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4416d3df1453SRitesh Harjani orig_size, start); 4417c9de560dSAlex Tomas } 4418c9de560dSAlex Tomas 4419c9de560dSAlex Tomas static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4420c9de560dSAlex Tomas { 4421c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4422c9de560dSAlex Tomas 4423a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4424c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_reqs); 4425c9de560dSAlex Tomas atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4426291dae47SCurt Wohlgemuth if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4427c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_success); 4428fdd9a009SOjaswin Mujoo 4429c9de560dSAlex Tomas atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4430fdd9a009SOjaswin Mujoo for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4431fdd9a009SOjaswin Mujoo atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4432fdd9a009SOjaswin Mujoo } 4433fdd9a009SOjaswin Mujoo 4434a6c75eafSHarshad Shirwadkar atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4435c9de560dSAlex Tomas if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4436c9de560dSAlex Tomas ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4437c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_goals); 44383ef5d263SOjaswin Mujoo if (ac->ac_f_ex.fe_len == ac->ac_g_ex.fe_len) 44393ef5d263SOjaswin Mujoo atomic_inc(&sbi->s_bal_len_goals); 4440c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan) 4441c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_breaks); 4442c9de560dSAlex Tomas } 4443c9de560dSAlex Tomas 4444296c355cSTheodore Ts'o if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4445296c355cSTheodore Ts'o trace_ext4_mballoc_alloc(ac); 4446296c355cSTheodore Ts'o else 4447296c355cSTheodore Ts'o trace_ext4_mballoc_prealloc(ac); 4448c9de560dSAlex Tomas } 4449c9de560dSAlex Tomas 4450c9de560dSAlex Tomas /* 4451b844167eSCurt Wohlgemuth * Called on failure; free up any blocks from the inode PA for this 4452b844167eSCurt Wohlgemuth * context. We don't need this for MB_GROUP_PA because we only change 4453b844167eSCurt Wohlgemuth * pa_free in ext4_mb_release_context(), but on failure, we've already 4454b844167eSCurt Wohlgemuth * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4455b844167eSCurt Wohlgemuth */ 4456b844167eSCurt Wohlgemuth static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4457b844167eSCurt Wohlgemuth { 4458b844167eSCurt Wohlgemuth struct ext4_prealloc_space *pa = ac->ac_pa; 445986f0afd4STheodore Ts'o struct ext4_buddy e4b; 446086f0afd4STheodore Ts'o int err; 4461b844167eSCurt Wohlgemuth 446286f0afd4STheodore Ts'o if (pa == NULL) { 4463c99d1e6eSTheodore Ts'o if (ac->ac_f_ex.fe_len == 0) 4464c99d1e6eSTheodore Ts'o return; 446586f0afd4STheodore Ts'o err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 446619b8b035STheodore Ts'o if (WARN_RATELIMIT(err, 446719b8b035STheodore Ts'o "ext4: mb_load_buddy failed (%d)", err)) 446886f0afd4STheodore Ts'o /* 446986f0afd4STheodore Ts'o * This should never happen since we pin the 447086f0afd4STheodore Ts'o * pages in the ext4_allocation_context so 447186f0afd4STheodore Ts'o * ext4_mb_load_buddy() should never fail. 447286f0afd4STheodore Ts'o */ 447386f0afd4STheodore Ts'o return; 447486f0afd4STheodore Ts'o ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 447586f0afd4STheodore Ts'o mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 447686f0afd4STheodore Ts'o ac->ac_f_ex.fe_len); 447786f0afd4STheodore Ts'o ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4478c99d1e6eSTheodore Ts'o ext4_mb_unload_buddy(&e4b); 447986f0afd4STheodore Ts'o return; 448086f0afd4STheodore Ts'o } 448136cb0f52SKemeng Shi if (pa->pa_type == MB_INODE_PA) { 448236cb0f52SKemeng Shi spin_lock(&pa->pa_lock); 4483400db9d3SZheng Liu pa->pa_free += ac->ac_b_ex.fe_len; 448436cb0f52SKemeng Shi spin_unlock(&pa->pa_lock); 448536cb0f52SKemeng Shi } 4486b844167eSCurt Wohlgemuth } 4487b844167eSCurt Wohlgemuth 4488b844167eSCurt Wohlgemuth /* 4489c9de560dSAlex Tomas * use blocks preallocated to inode 4490c9de560dSAlex Tomas */ 4491c9de560dSAlex Tomas static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4492c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4493c9de560dSAlex Tomas { 449453accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4495c9de560dSAlex Tomas ext4_fsblk_t start; 4496c9de560dSAlex Tomas ext4_fsblk_t end; 4497c9de560dSAlex Tomas int len; 4498c9de560dSAlex Tomas 4499c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4500c9de560dSAlex Tomas start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 450153accfa9STheodore Ts'o end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 450253accfa9STheodore Ts'o start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 450353accfa9STheodore Ts'o len = EXT4_NUM_B2C(sbi, end - start); 4504c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4505c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4506c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4507c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4508c9de560dSAlex Tomas ac->ac_pa = pa; 4509c9de560dSAlex Tomas 4510c9de560dSAlex Tomas BUG_ON(start < pa->pa_pstart); 451153accfa9STheodore Ts'o BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4512c9de560dSAlex Tomas BUG_ON(pa->pa_free < len); 451393cdf49fSOjaswin Mujoo BUG_ON(ac->ac_b_ex.fe_len <= 0); 4514c9de560dSAlex Tomas pa->pa_free -= len; 4515c9de560dSAlex Tomas 4516d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4517c9de560dSAlex Tomas } 4518c9de560dSAlex Tomas 4519c9de560dSAlex Tomas /* 4520c9de560dSAlex Tomas * use blocks preallocated to locality group 4521c9de560dSAlex Tomas */ 4522c9de560dSAlex Tomas static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4523c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4524c9de560dSAlex Tomas { 452503cddb80SAneesh Kumar K.V unsigned int len = ac->ac_o_ex.fe_len; 45266be2ded1SAneesh Kumar K.V 4527c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4528c9de560dSAlex Tomas &ac->ac_b_ex.fe_group, 4529c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4530c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4531c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4532c9de560dSAlex Tomas ac->ac_pa = pa; 4533c9de560dSAlex Tomas 45341221b235SKemeng Shi /* we don't correct pa_pstart or pa_len here to avoid 453526346ff6SAneesh Kumar K.V * possible race when the group is being loaded concurrently 4536c9de560dSAlex Tomas * instead we correct pa later, after blocks are marked 453726346ff6SAneesh Kumar K.V * in on-disk bitmap -- see ext4_mb_release_context() 453826346ff6SAneesh Kumar K.V * Other CPUs are prevented from allocating from this pa by lg_mutex 4539c9de560dSAlex Tomas */ 4540d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 45411afdc588SKemeng Shi pa->pa_lstart, len, pa); 4542c9de560dSAlex Tomas } 4543c9de560dSAlex Tomas 4544c9de560dSAlex Tomas /* 45455e745b04SAneesh Kumar K.V * Return the prealloc space that have minimal distance 45465e745b04SAneesh Kumar K.V * from the goal block. @cpa is the prealloc 45475e745b04SAneesh Kumar K.V * space that is having currently known minimal distance 45485e745b04SAneesh Kumar K.V * from the goal block. 45495e745b04SAneesh Kumar K.V */ 45505e745b04SAneesh Kumar K.V static struct ext4_prealloc_space * 45515e745b04SAneesh Kumar K.V ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 45525e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, 45535e745b04SAneesh Kumar K.V struct ext4_prealloc_space *cpa) 45545e745b04SAneesh Kumar K.V { 45555e745b04SAneesh Kumar K.V ext4_fsblk_t cur_distance, new_distance; 45565e745b04SAneesh Kumar K.V 45575e745b04SAneesh Kumar K.V if (cpa == NULL) { 45585e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 45595e745b04SAneesh Kumar K.V return pa; 45605e745b04SAneesh Kumar K.V } 456179211c8eSAndrew Morton cur_distance = abs(goal_block - cpa->pa_pstart); 456279211c8eSAndrew Morton new_distance = abs(goal_block - pa->pa_pstart); 45635e745b04SAneesh Kumar K.V 45645a54b2f1SColy Li if (cur_distance <= new_distance) 45655e745b04SAneesh Kumar K.V return cpa; 45665e745b04SAneesh Kumar K.V 45675e745b04SAneesh Kumar K.V /* drop the previous reference */ 45685e745b04SAneesh Kumar K.V atomic_dec(&cpa->pa_count); 45695e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 45705e745b04SAneesh Kumar K.V return pa; 45715e745b04SAneesh Kumar K.V } 45725e745b04SAneesh Kumar K.V 45735e745b04SAneesh Kumar K.V /* 45741eff5904SKemeng Shi * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 45751eff5904SKemeng Shi */ 45761eff5904SKemeng Shi static bool 45771eff5904SKemeng Shi ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 45781eff5904SKemeng Shi struct ext4_prealloc_space *pa) 45791eff5904SKemeng Shi { 45801eff5904SKemeng Shi struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 45811eff5904SKemeng Shi ext4_fsblk_t start; 45821eff5904SKemeng Shi 45831eff5904SKemeng Shi if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 45841eff5904SKemeng Shi return true; 45851eff5904SKemeng Shi 45861eff5904SKemeng Shi /* 45871eff5904SKemeng Shi * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 45881eff5904SKemeng Shi * in ext4_mb_normalize_request and will keep same with ac_o_ex 45891eff5904SKemeng Shi * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 45901eff5904SKemeng Shi * consistent with ext4_mb_find_by_goal. 45911eff5904SKemeng Shi */ 45921eff5904SKemeng Shi start = pa->pa_pstart + 45931eff5904SKemeng Shi (ac->ac_g_ex.fe_logical - pa->pa_lstart); 45941eff5904SKemeng Shi if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 45951eff5904SKemeng Shi return false; 45961eff5904SKemeng Shi 45971eff5904SKemeng Shi if (ac->ac_g_ex.fe_len > pa->pa_len - 45981eff5904SKemeng Shi EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 45991eff5904SKemeng Shi return false; 46001eff5904SKemeng Shi 46011eff5904SKemeng Shi return true; 46021eff5904SKemeng Shi } 46031eff5904SKemeng Shi 46041eff5904SKemeng Shi /* 4605c9de560dSAlex Tomas * search goal blocks in preallocated space 4606c9de560dSAlex Tomas */ 46074fca8f07SRitesh Harjani static noinline_for_stack bool 46084ddfef7bSEric Sandeen ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4609c9de560dSAlex Tomas { 461053accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 46116be2ded1SAneesh Kumar K.V int order, i; 4612c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4613c9de560dSAlex Tomas struct ext4_locality_group *lg; 4614bcf43499SOjaswin Mujoo struct ext4_prealloc_space *tmp_pa, *cpa = NULL; 4615bcf43499SOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 461638727786SOjaswin Mujoo struct rb_node *iter; 46175e745b04SAneesh Kumar K.V ext4_fsblk_t goal_block; 4618c9de560dSAlex Tomas 4619c9de560dSAlex Tomas /* only data can be preallocated */ 4620c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 46214fca8f07SRitesh Harjani return false; 4622c9de560dSAlex Tomas 4623c9de560dSAlex Tomas /* first, try per-file preallocation */ 462438727786SOjaswin Mujoo read_lock(&ei->i_prealloc_lock); 462538727786SOjaswin Mujoo for (iter = ei->i_prealloc_node.rb_node; iter; 462638727786SOjaswin Mujoo iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 462738727786SOjaswin Mujoo tmp_pa_start, iter)) { 462838727786SOjaswin Mujoo tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 462938727786SOjaswin Mujoo pa_node.inode_node); 4630c9de560dSAlex Tomas 4631c9de560dSAlex Tomas /* all fields in this condition don't change, 4632c9de560dSAlex Tomas * so we can skip locking for them */ 4633bcf43499SOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 4634bcf43499SOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4635bcf43499SOjaswin Mujoo 463638727786SOjaswin Mujoo /* original request start doesn't lie in this PA */ 4637bcf43499SOjaswin Mujoo if (ac->ac_o_ex.fe_logical < tmp_pa_start || 4638bcf43499SOjaswin Mujoo ac->ac_o_ex.fe_logical >= tmp_pa_end) 4639c9de560dSAlex Tomas continue; 4640c9de560dSAlex Tomas 4641fb0a387dSEric Sandeen /* non-extent files can't have physical blocks past 2^32 */ 464212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4643bcf43499SOjaswin Mujoo (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4644e86a7182SOjaswin Mujoo EXT4_MAX_BLOCK_FILE_PHYS)) { 4645e86a7182SOjaswin Mujoo /* 4646e86a7182SOjaswin Mujoo * Since PAs don't overlap, we won't find any 4647e86a7182SOjaswin Mujoo * other PA to satisfy this. 4648e86a7182SOjaswin Mujoo */ 4649e86a7182SOjaswin Mujoo break; 4650e86a7182SOjaswin Mujoo } 4651fb0a387dSEric Sandeen 4652c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4653bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 46541eff5904SKemeng Shi if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free && 46551eff5904SKemeng Shi likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 4656bcf43499SOjaswin Mujoo atomic_inc(&tmp_pa->pa_count); 4657bcf43499SOjaswin Mujoo ext4_mb_use_inode_pa(ac, tmp_pa); 4658bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 465938727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 46604fca8f07SRitesh Harjani return true; 4661c9de560dSAlex Tomas } 4662bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 4663c9de560dSAlex Tomas } 466438727786SOjaswin Mujoo read_unlock(&ei->i_prealloc_lock); 4665c9de560dSAlex Tomas 4666c9de560dSAlex Tomas /* can we use group allocation? */ 4667c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 46684fca8f07SRitesh Harjani return false; 4669c9de560dSAlex Tomas 4670c9de560dSAlex Tomas /* inode may have no locality group for some reason */ 4671c9de560dSAlex Tomas lg = ac->ac_lg; 4672c9de560dSAlex Tomas if (lg == NULL) 46734fca8f07SRitesh Harjani return false; 46746be2ded1SAneesh Kumar K.V order = fls(ac->ac_o_ex.fe_len) - 1; 46756be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 46766be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 46776be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 4678c9de560dSAlex Tomas 4679bda00de7SAkinobu Mita goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 46805e745b04SAneesh Kumar K.V /* 46815e745b04SAneesh Kumar K.V * search for the prealloc space that is having 46825e745b04SAneesh Kumar K.V * minimal distance from the goal block. 46835e745b04SAneesh Kumar K.V */ 46846be2ded1SAneesh Kumar K.V for (i = order; i < PREALLOC_TB_SIZE; i++) { 4685c9de560dSAlex Tomas rcu_read_lock(); 4686bcf43499SOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4687a8e38fd3SOjaswin Mujoo pa_node.lg_list) { 4688bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 4689bcf43499SOjaswin Mujoo if (tmp_pa->pa_deleted == 0 && 4690bcf43499SOjaswin Mujoo tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 46915e745b04SAneesh Kumar K.V 46925e745b04SAneesh Kumar K.V cpa = ext4_mb_check_group_pa(goal_block, 4693bcf43499SOjaswin Mujoo tmp_pa, cpa); 46945e745b04SAneesh Kumar K.V } 4695bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 46965e745b04SAneesh Kumar K.V } 46975e745b04SAneesh Kumar K.V rcu_read_unlock(); 46985e745b04SAneesh Kumar K.V } 46995e745b04SAneesh Kumar K.V if (cpa) { 47005e745b04SAneesh Kumar K.V ext4_mb_use_group_pa(ac, cpa); 47014fca8f07SRitesh Harjani return true; 4702c9de560dSAlex Tomas } 47034fca8f07SRitesh Harjani return false; 4704c9de560dSAlex Tomas } 4705c9de560dSAlex Tomas 4706c9de560dSAlex Tomas /* 47077a2fcbf7SAneesh Kumar K.V * the function goes through all block freed in the group 47087a2fcbf7SAneesh Kumar K.V * but not yet committed and marks them used in in-core bitmap. 47097a2fcbf7SAneesh Kumar K.V * buddy must be generated from this bitmap 4710955ce5f5SAneesh Kumar K.V * Need to be called with the ext4 group lock held 47117a2fcbf7SAneesh Kumar K.V */ 47127a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 47137a2fcbf7SAneesh Kumar K.V ext4_group_t group) 47147a2fcbf7SAneesh Kumar K.V { 47157a2fcbf7SAneesh Kumar K.V struct rb_node *n; 47167a2fcbf7SAneesh Kumar K.V struct ext4_group_info *grp; 47177a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 47187a2fcbf7SAneesh Kumar K.V 47197a2fcbf7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 47205354b2afSTheodore Ts'o if (!grp) 47215354b2afSTheodore Ts'o return; 47227a2fcbf7SAneesh Kumar K.V n = rb_first(&(grp->bb_free_root)); 47237a2fcbf7SAneesh Kumar K.V 47247a2fcbf7SAneesh Kumar K.V while (n) { 472518aadd47SBobi Jam entry = rb_entry(n, struct ext4_free_data, efd_node); 4726123e3016SRitesh Harjani mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 47277a2fcbf7SAneesh Kumar K.V n = rb_next(n); 47287a2fcbf7SAneesh Kumar K.V } 47297a2fcbf7SAneesh Kumar K.V return; 47307a2fcbf7SAneesh Kumar K.V } 47317a2fcbf7SAneesh Kumar K.V 47327a2fcbf7SAneesh Kumar K.V /* 4733c9de560dSAlex Tomas * the function goes through all preallocation in this group and marks them 4734c9de560dSAlex Tomas * used in in-core bitmap. buddy must be generated from this bitmap 4735955ce5f5SAneesh Kumar K.V * Need to be called with ext4 group lock held 4736c9de560dSAlex Tomas */ 4737089ceeccSEric Sandeen static noinline_for_stack 4738089ceeccSEric Sandeen void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4739c9de560dSAlex Tomas ext4_group_t group) 4740c9de560dSAlex Tomas { 4741c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4742c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4743c9de560dSAlex Tomas struct list_head *cur; 4744c9de560dSAlex Tomas ext4_group_t groupnr; 4745c9de560dSAlex Tomas ext4_grpblk_t start; 4746c9de560dSAlex Tomas int preallocated = 0; 4747c9de560dSAlex Tomas int len; 4748c9de560dSAlex Tomas 47495354b2afSTheodore Ts'o if (!grp) 47505354b2afSTheodore Ts'o return; 47515354b2afSTheodore Ts'o 4752c9de560dSAlex Tomas /* all form of preallocation discards first load group, 4753c9de560dSAlex Tomas * so the only competing code is preallocation use. 4754c9de560dSAlex Tomas * we don't need any locking here 4755c9de560dSAlex Tomas * notice we do NOT ignore preallocations with pa_deleted 4756c9de560dSAlex Tomas * otherwise we could leave used blocks available for 4757c9de560dSAlex Tomas * allocation in buddy when concurrent ext4_mb_put_pa() 4758c9de560dSAlex Tomas * is dropping preallocation 4759c9de560dSAlex Tomas */ 4760c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 4761c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4762c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4763c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4764c9de560dSAlex Tomas &groupnr, &start); 4765c9de560dSAlex Tomas len = pa->pa_len; 4766c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4767c9de560dSAlex Tomas if (unlikely(len == 0)) 4768c9de560dSAlex Tomas continue; 4769c9de560dSAlex Tomas BUG_ON(groupnr != group); 4770123e3016SRitesh Harjani mb_set_bits(bitmap, start, len); 4771c9de560dSAlex Tomas preallocated += len; 4772c9de560dSAlex Tomas } 4773d3df1453SRitesh Harjani mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4774c9de560dSAlex Tomas } 4775c9de560dSAlex Tomas 477627bc446eSbrookxu static void ext4_mb_mark_pa_deleted(struct super_block *sb, 477727bc446eSbrookxu struct ext4_prealloc_space *pa) 477827bc446eSbrookxu { 477927bc446eSbrookxu struct ext4_inode_info *ei; 478027bc446eSbrookxu 478127bc446eSbrookxu if (pa->pa_deleted) { 478227bc446eSbrookxu ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 478327bc446eSbrookxu pa->pa_type, pa->pa_pstart, pa->pa_lstart, 478427bc446eSbrookxu pa->pa_len); 478527bc446eSbrookxu return; 478627bc446eSbrookxu } 478727bc446eSbrookxu 478827bc446eSbrookxu pa->pa_deleted = 1; 478927bc446eSbrookxu 479027bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 479127bc446eSbrookxu ei = EXT4_I(pa->pa_inode); 479227bc446eSbrookxu atomic_dec(&ei->i_prealloc_active); 479327bc446eSbrookxu } 479427bc446eSbrookxu } 479527bc446eSbrookxu 479682089725SOjaswin Mujoo static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 4797c9de560dSAlex Tomas { 479882089725SOjaswin Mujoo BUG_ON(!pa); 47994e8d2139SJunho Ryu BUG_ON(atomic_read(&pa->pa_count)); 48004e8d2139SJunho Ryu BUG_ON(pa->pa_deleted == 0); 4801c9de560dSAlex Tomas kmem_cache_free(ext4_pspace_cachep, pa); 4802c9de560dSAlex Tomas } 4803c9de560dSAlex Tomas 480482089725SOjaswin Mujoo static void ext4_mb_pa_callback(struct rcu_head *head) 480582089725SOjaswin Mujoo { 480682089725SOjaswin Mujoo struct ext4_prealloc_space *pa; 480782089725SOjaswin Mujoo 480882089725SOjaswin Mujoo pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 480982089725SOjaswin Mujoo ext4_mb_pa_free(pa); 481082089725SOjaswin Mujoo } 481182089725SOjaswin Mujoo 4812c9de560dSAlex Tomas /* 4813c9de560dSAlex Tomas * drops a reference to preallocated space descriptor 4814c9de560dSAlex Tomas * if this was the last reference and the space is consumed 4815c9de560dSAlex Tomas */ 4816c9de560dSAlex Tomas static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4817c9de560dSAlex Tomas struct super_block *sb, struct ext4_prealloc_space *pa) 4818c9de560dSAlex Tomas { 4819a9df9a49STheodore Ts'o ext4_group_t grp; 4820d33a1976SEric Sandeen ext4_fsblk_t grp_blk; 482138727786SOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4822c9de560dSAlex Tomas 4823c9de560dSAlex Tomas /* in this short window concurrent discard can set pa_deleted */ 4824c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 48254e8d2139SJunho Ryu if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 48264e8d2139SJunho Ryu spin_unlock(&pa->pa_lock); 48274e8d2139SJunho Ryu return; 48284e8d2139SJunho Ryu } 48294e8d2139SJunho Ryu 4830c9de560dSAlex Tomas if (pa->pa_deleted == 1) { 4831c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4832c9de560dSAlex Tomas return; 4833c9de560dSAlex Tomas } 4834c9de560dSAlex Tomas 483527bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4836c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4837c9de560dSAlex Tomas 4838d33a1976SEric Sandeen grp_blk = pa->pa_pstart; 4839cc0fb9adSAneesh Kumar K.V /* 4840cc0fb9adSAneesh Kumar K.V * If doing group-based preallocation, pa_pstart may be in the 4841cc0fb9adSAneesh Kumar K.V * next group when pa is used up 4842cc0fb9adSAneesh Kumar K.V */ 4843cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 4844d33a1976SEric Sandeen grp_blk--; 4845d33a1976SEric Sandeen 4846bd86298eSLukas Czerner grp = ext4_get_group_number(sb, grp_blk); 4847c9de560dSAlex Tomas 4848c9de560dSAlex Tomas /* 4849c9de560dSAlex Tomas * possible race: 4850c9de560dSAlex Tomas * 4851c9de560dSAlex Tomas * P1 (buddy init) P2 (regular allocation) 4852c9de560dSAlex Tomas * find block B in PA 4853c9de560dSAlex Tomas * copy on-disk bitmap to buddy 4854c9de560dSAlex Tomas * mark B in on-disk bitmap 4855c9de560dSAlex Tomas * drop PA from group 4856c9de560dSAlex Tomas * mark all PAs in buddy 4857c9de560dSAlex Tomas * 4858c9de560dSAlex Tomas * thus, P1 initializes buddy with B available. to prevent this 4859c9de560dSAlex Tomas * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4860c9de560dSAlex Tomas * against that pair 4861c9de560dSAlex Tomas */ 4862c9de560dSAlex Tomas ext4_lock_group(sb, grp); 4863c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4864c9de560dSAlex Tomas ext4_unlock_group(sb, grp); 4865c9de560dSAlex Tomas 4866a8e38fd3SOjaswin Mujoo if (pa->pa_type == MB_INODE_PA) { 486738727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 486838727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 486938727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 487038727786SOjaswin Mujoo ext4_mb_pa_free(pa); 4871a8e38fd3SOjaswin Mujoo } else { 4872a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 4873a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 4874a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 487538727786SOjaswin Mujoo call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 487638727786SOjaswin Mujoo } 4877a8e38fd3SOjaswin Mujoo } 4878c9de560dSAlex Tomas 487938727786SOjaswin Mujoo static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 488038727786SOjaswin Mujoo { 488138727786SOjaswin Mujoo struct rb_node **iter = &root->rb_node, *parent = NULL; 488238727786SOjaswin Mujoo struct ext4_prealloc_space *iter_pa, *new_pa; 488338727786SOjaswin Mujoo ext4_lblk_t iter_start, new_start; 488438727786SOjaswin Mujoo 488538727786SOjaswin Mujoo while (*iter) { 488638727786SOjaswin Mujoo iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 488738727786SOjaswin Mujoo pa_node.inode_node); 488838727786SOjaswin Mujoo new_pa = rb_entry(new, struct ext4_prealloc_space, 488938727786SOjaswin Mujoo pa_node.inode_node); 489038727786SOjaswin Mujoo iter_start = iter_pa->pa_lstart; 489138727786SOjaswin Mujoo new_start = new_pa->pa_lstart; 489238727786SOjaswin Mujoo 489338727786SOjaswin Mujoo parent = *iter; 489438727786SOjaswin Mujoo if (new_start < iter_start) 489538727786SOjaswin Mujoo iter = &((*iter)->rb_left); 489638727786SOjaswin Mujoo else 489738727786SOjaswin Mujoo iter = &((*iter)->rb_right); 489838727786SOjaswin Mujoo } 489938727786SOjaswin Mujoo 490038727786SOjaswin Mujoo rb_link_node(new, parent, iter); 490138727786SOjaswin Mujoo rb_insert_color(new, root); 4902c9de560dSAlex Tomas } 4903c9de560dSAlex Tomas 4904c9de560dSAlex Tomas /* 4905c9de560dSAlex Tomas * creates new preallocated space for given inode 4906c9de560dSAlex Tomas */ 490753f86b17SRitesh Harjani static noinline_for_stack void 49084ddfef7bSEric Sandeen ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4909c9de560dSAlex Tomas { 4910c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 491153accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4912c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4913c9de560dSAlex Tomas struct ext4_group_info *grp; 4914c9de560dSAlex Tomas struct ext4_inode_info *ei; 4915c9de560dSAlex Tomas 4916c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4917c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4918c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4919c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 492053f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4921c9de560dSAlex Tomas 492253f86b17SRitesh Harjani pa = ac->ac_pa; 4923c9de560dSAlex Tomas 4924c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 492593cdf49fSOjaswin Mujoo int new_bex_start; 492693cdf49fSOjaswin Mujoo int new_bex_end; 4927c9de560dSAlex Tomas 4928c9de560dSAlex Tomas /* we can't allocate as much as normalizer wants. 4929c9de560dSAlex Tomas * so, found space must get proper lstart 4930c9de560dSAlex Tomas * to cover original request */ 4931c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4932c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4933c9de560dSAlex Tomas 493493cdf49fSOjaswin Mujoo /* 493593cdf49fSOjaswin Mujoo * Use the below logic for adjusting best extent as it keeps 493693cdf49fSOjaswin Mujoo * fragmentation in check while ensuring logical range of best 493793cdf49fSOjaswin Mujoo * extent doesn't overflow out of goal extent: 493893cdf49fSOjaswin Mujoo * 493993cdf49fSOjaswin Mujoo * 1. Check if best ex can be kept at end of goal and still 494093cdf49fSOjaswin Mujoo * cover original start 494193cdf49fSOjaswin Mujoo * 2. Else, check if best ex can be kept at start of goal and 494293cdf49fSOjaswin Mujoo * still cover original start 494393cdf49fSOjaswin Mujoo * 3. Else, keep the best ex at start of original request. 494493cdf49fSOjaswin Mujoo */ 494593cdf49fSOjaswin Mujoo new_bex_end = ac->ac_g_ex.fe_logical + 494693cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len); 494793cdf49fSOjaswin Mujoo new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 494893cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical >= new_bex_start) 494993cdf49fSOjaswin Mujoo goto adjust_bex; 4950c9de560dSAlex Tomas 495193cdf49fSOjaswin Mujoo new_bex_start = ac->ac_g_ex.fe_logical; 495293cdf49fSOjaswin Mujoo new_bex_end = 495393cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 495493cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical < new_bex_end) 495593cdf49fSOjaswin Mujoo goto adjust_bex; 4956c9de560dSAlex Tomas 495793cdf49fSOjaswin Mujoo new_bex_start = ac->ac_o_ex.fe_logical; 495893cdf49fSOjaswin Mujoo new_bex_end = 495993cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4960c9de560dSAlex Tomas 496193cdf49fSOjaswin Mujoo adjust_bex: 496293cdf49fSOjaswin Mujoo ac->ac_b_ex.fe_logical = new_bex_start; 4963c9de560dSAlex Tomas 4964c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4965c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 496693cdf49fSOjaswin Mujoo BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + 496793cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); 4968c9de560dSAlex Tomas } 4969c9de560dSAlex Tomas 4970c9de560dSAlex Tomas pa->pa_lstart = ac->ac_b_ex.fe_logical; 4971c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4972c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4973c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4974c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4975d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4976c9de560dSAlex Tomas pa->pa_deleted = 0; 4977cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_INODE_PA; 4978c9de560dSAlex Tomas 4979d3df1453SRitesh Harjani mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4980d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 49819bffad1eSTheodore Ts'o trace_ext4_mb_new_inode_pa(ac, pa); 4982c9de560dSAlex Tomas 498353accfa9STheodore Ts'o atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4984abc075d4SKemeng Shi ext4_mb_use_inode_pa(ac, pa); 4985c9de560dSAlex Tomas 4986c9de560dSAlex Tomas ei = EXT4_I(ac->ac_inode); 4987c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 49885354b2afSTheodore Ts'o if (!grp) 49895354b2afSTheodore Ts'o return; 4990c9de560dSAlex Tomas 4991a8e38fd3SOjaswin Mujoo pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 4992c9de560dSAlex Tomas pa->pa_inode = ac->ac_inode; 4993c9de560dSAlex Tomas 4994c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4995c9de560dSAlex Tomas 499638727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 499738727786SOjaswin Mujoo ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 499838727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 499927bc446eSbrookxu atomic_inc(&ei->i_prealloc_active); 5000c9de560dSAlex Tomas } 5001c9de560dSAlex Tomas 5002c9de560dSAlex Tomas /* 5003c9de560dSAlex Tomas * creates new preallocated space for locality group inodes belongs to 5004c9de560dSAlex Tomas */ 500553f86b17SRitesh Harjani static noinline_for_stack void 50064ddfef7bSEric Sandeen ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5007c9de560dSAlex Tomas { 5008c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 5009c9de560dSAlex Tomas struct ext4_locality_group *lg; 5010c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5011c9de560dSAlex Tomas struct ext4_group_info *grp; 5012c9de560dSAlex Tomas 5013c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 5014c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5015c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5016c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 501753f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 5018c9de560dSAlex Tomas 501953f86b17SRitesh Harjani pa = ac->ac_pa; 5020c9de560dSAlex Tomas 5021c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5022c9de560dSAlex Tomas pa->pa_lstart = pa->pa_pstart; 5023c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 5024c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 5025c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 5026a8e38fd3SOjaswin Mujoo INIT_LIST_HEAD(&pa->pa_node.lg_list); 5027d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 5028c9de560dSAlex Tomas pa->pa_deleted = 0; 5029cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_GROUP_PA; 5030c9de560dSAlex Tomas 5031d3df1453SRitesh Harjani mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5032d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 50339bffad1eSTheodore Ts'o trace_ext4_mb_new_group_pa(ac, pa); 5034c9de560dSAlex Tomas 5035c9de560dSAlex Tomas ext4_mb_use_group_pa(ac, pa); 5036c9de560dSAlex Tomas atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5037c9de560dSAlex Tomas 5038c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 50395354b2afSTheodore Ts'o if (!grp) 50405354b2afSTheodore Ts'o return; 5041c9de560dSAlex Tomas lg = ac->ac_lg; 5042c9de560dSAlex Tomas BUG_ON(lg == NULL); 5043c9de560dSAlex Tomas 5044a8e38fd3SOjaswin Mujoo pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5045c9de560dSAlex Tomas pa->pa_inode = NULL; 5046c9de560dSAlex Tomas 5047c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5048c9de560dSAlex Tomas 50496be2ded1SAneesh Kumar K.V /* 50506be2ded1SAneesh Kumar K.V * We will later add the new pa to the right bucket 50516be2ded1SAneesh Kumar K.V * after updating the pa_free in ext4_mb_release_context 50526be2ded1SAneesh Kumar K.V */ 5053c9de560dSAlex Tomas } 5054c9de560dSAlex Tomas 505553f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5056c9de560dSAlex Tomas { 5057c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 505853f86b17SRitesh Harjani ext4_mb_new_group_pa(ac); 5059c9de560dSAlex Tomas else 506053f86b17SRitesh Harjani ext4_mb_new_inode_pa(ac); 5061c9de560dSAlex Tomas } 5062c9de560dSAlex Tomas 5063c9de560dSAlex Tomas /* 5064c9de560dSAlex Tomas * finds all unused blocks in on-disk bitmap, frees them in 5065c9de560dSAlex Tomas * in-core bitmap and buddy. 5066c9de560dSAlex Tomas * @pa must be unlinked from inode and group lists, so that 5067c9de560dSAlex Tomas * nobody else can find/use it. 5068c9de560dSAlex Tomas * the caller MUST hold group/inode locks. 5069c9de560dSAlex Tomas * TODO: optimize the case when there are no in-core structures yet 5070c9de560dSAlex Tomas */ 50714ddfef7bSEric Sandeen static noinline_for_stack int 50724ddfef7bSEric Sandeen ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 50733e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 5074c9de560dSAlex Tomas { 5075c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5076c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5077498e5f24STheodore Ts'o unsigned int end; 5078498e5f24STheodore Ts'o unsigned int next; 5079c9de560dSAlex Tomas ext4_group_t group; 5080c9de560dSAlex Tomas ext4_grpblk_t bit; 5081ba80b101STheodore Ts'o unsigned long long grp_blk_start; 5082c9de560dSAlex Tomas int free = 0; 5083c9de560dSAlex Tomas 5084c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 5085c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 508653accfa9STheodore Ts'o grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5087c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5088c9de560dSAlex Tomas end = bit + pa->pa_len; 5089c9de560dSAlex Tomas 5090c9de560dSAlex Tomas while (bit < end) { 5091ffad0a44SAneesh Kumar K.V bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5092c9de560dSAlex Tomas if (bit >= end) 5093c9de560dSAlex Tomas break; 5094ffad0a44SAneesh Kumar K.V next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5095d3df1453SRitesh Harjani mb_debug(sb, "free preallocated %u/%u in group %u\n", 50965a0790c2SAndi Kleen (unsigned) ext4_group_first_block_no(sb, group) + bit, 50975a0790c2SAndi Kleen (unsigned) next - bit, (unsigned) group); 5098c9de560dSAlex Tomas free += next - bit; 5099c9de560dSAlex Tomas 51003e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 510153accfa9STheodore Ts'o trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 510253accfa9STheodore Ts'o EXT4_C2B(sbi, bit)), 5103a9c667f8SLukas Czerner next - bit); 5104c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5105c9de560dSAlex Tomas bit = next + 1; 5106c9de560dSAlex Tomas } 5107c9de560dSAlex Tomas if (free != pa->pa_free) { 51089d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_CRIT, 510936bad423SRitesh Harjani "pa %p: logic %lu, phys. %lu, len %d", 5110c9de560dSAlex Tomas pa, (unsigned long) pa->pa_lstart, 5111c9de560dSAlex Tomas (unsigned long) pa->pa_pstart, 511236bad423SRitesh Harjani pa->pa_len); 5113e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 511426346ff6SAneesh Kumar K.V free, pa->pa_free); 5115e56eb659SAneesh Kumar K.V /* 5116e56eb659SAneesh Kumar K.V * pa is already deleted so we use the value obtained 5117e56eb659SAneesh Kumar K.V * from the bitmap and continue. 5118e56eb659SAneesh Kumar K.V */ 5119c9de560dSAlex Tomas } 5120c9de560dSAlex Tomas atomic_add(free, &sbi->s_mb_discarded); 5121c9de560dSAlex Tomas 5122863c37fcSzhong jiang return 0; 5123c9de560dSAlex Tomas } 5124c9de560dSAlex Tomas 51254ddfef7bSEric Sandeen static noinline_for_stack int 51264ddfef7bSEric Sandeen ext4_mb_release_group_pa(struct ext4_buddy *e4b, 51273e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 5128c9de560dSAlex Tomas { 5129c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5130c9de560dSAlex Tomas ext4_group_t group; 5131c9de560dSAlex Tomas ext4_grpblk_t bit; 5132c9de560dSAlex Tomas 513360e07cf5SYongqiang Yang trace_ext4_mb_release_group_pa(sb, pa); 5134c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 5135c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5136463808f2STheodore Ts'o if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5137463808f2STheodore Ts'o ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5138463808f2STheodore Ts'o e4b->bd_group, group, pa->pa_pstart); 5139463808f2STheodore Ts'o return 0; 5140463808f2STheodore Ts'o } 5141c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5142c9de560dSAlex Tomas atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 51433e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5144c9de560dSAlex Tomas 5145c9de560dSAlex Tomas return 0; 5146c9de560dSAlex Tomas } 5147c9de560dSAlex Tomas 5148c9de560dSAlex Tomas /* 5149c9de560dSAlex Tomas * releases all preallocations in given group 5150c9de560dSAlex Tomas * 5151c9de560dSAlex Tomas * first, we need to decide discard policy: 5152c9de560dSAlex Tomas * - when do we discard 5153c9de560dSAlex Tomas * 1) ENOSPC 5154c9de560dSAlex Tomas * - how many do we discard 5155c9de560dSAlex Tomas * 1) how many requested 5156c9de560dSAlex Tomas */ 51574ddfef7bSEric Sandeen static noinline_for_stack int 51584ddfef7bSEric Sandeen ext4_mb_discard_group_preallocations(struct super_block *sb, 51598c80fb31SChunguang Xu ext4_group_t group, int *busy) 5160c9de560dSAlex Tomas { 5161c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5162c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 5163c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 5164c9de560dSAlex Tomas struct list_head list; 5165c9de560dSAlex Tomas struct ext4_buddy e4b; 516638727786SOjaswin Mujoo struct ext4_inode_info *ei; 5167c9de560dSAlex Tomas int err; 51688c80fb31SChunguang Xu int free = 0; 5169c9de560dSAlex Tomas 51705354b2afSTheodore Ts'o if (!grp) 51715354b2afSTheodore Ts'o return 0; 5172d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for group %u\n", group); 5173c9de560dSAlex Tomas if (list_empty(&grp->bb_prealloc_list)) 5174bbc4ec77SRitesh Harjani goto out_dbg; 5175c9de560dSAlex Tomas 5176574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 51779008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 51789008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 517954d3adbcSTheodore Ts'o ext4_error_err(sb, -err, 518054d3adbcSTheodore Ts'o "Error %d reading block bitmap for %u", 51819008a58eSDarrick J. Wong err, group); 5182bbc4ec77SRitesh Harjani goto out_dbg; 5183c9de560dSAlex Tomas } 5184c9de560dSAlex Tomas 5185c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 5186ce89f46cSAneesh Kumar K.V if (err) { 51879651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 51889651e6b2SKonstantin Khlebnikov err, group); 5189ce89f46cSAneesh Kumar K.V put_bh(bitmap_bh); 5190bbc4ec77SRitesh Harjani goto out_dbg; 5191ce89f46cSAneesh Kumar K.V } 5192c9de560dSAlex Tomas 5193c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 5194c9de560dSAlex Tomas ext4_lock_group(sb, group); 5195c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, 5196c9de560dSAlex Tomas &grp->bb_prealloc_list, pa_group_list) { 5197c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5198c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 5199c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 52008c80fb31SChunguang Xu *busy = 1; 5201c9de560dSAlex Tomas continue; 5202c9de560dSAlex Tomas } 5203c9de560dSAlex Tomas if (pa->pa_deleted) { 5204c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5205c9de560dSAlex Tomas continue; 5206c9de560dSAlex Tomas } 5207c9de560dSAlex Tomas 5208c9de560dSAlex Tomas /* seems this one can be freed ... */ 520927bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 5210c9de560dSAlex Tomas 521170022da8SYe Bin if (!free) 521270022da8SYe Bin this_cpu_inc(discard_pa_seq); 521370022da8SYe Bin 5214c9de560dSAlex Tomas /* we can trust pa_free ... */ 5215c9de560dSAlex Tomas free += pa->pa_free; 5216c9de560dSAlex Tomas 5217c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5218c9de560dSAlex Tomas 5219c9de560dSAlex Tomas list_del(&pa->pa_group_list); 5220c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 5221c9de560dSAlex Tomas } 5222c9de560dSAlex Tomas 5223c9de560dSAlex Tomas /* now free all selected PAs */ 5224c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5225c9de560dSAlex Tomas 5226c9de560dSAlex Tomas /* remove from object (inode or locality group) */ 5227a8e38fd3SOjaswin Mujoo if (pa->pa_type == MB_GROUP_PA) { 5228a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 5229a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 5230a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 5231a8e38fd3SOjaswin Mujoo } else { 523238727786SOjaswin Mujoo write_lock(pa->pa_node_lock.inode_lock); 523338727786SOjaswin Mujoo ei = EXT4_I(pa->pa_inode); 523438727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 523538727786SOjaswin Mujoo write_unlock(pa->pa_node_lock.inode_lock); 5236a8e38fd3SOjaswin Mujoo } 5237c9de560dSAlex Tomas 5238c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 523938727786SOjaswin Mujoo 524038727786SOjaswin Mujoo if (pa->pa_type == MB_GROUP_PA) { 524138727786SOjaswin Mujoo ext4_mb_release_group_pa(&e4b, pa); 5242c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 524338727786SOjaswin Mujoo } else { 524438727786SOjaswin Mujoo ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 524538727786SOjaswin Mujoo ext4_mb_pa_free(pa); 524638727786SOjaswin Mujoo } 5247c9de560dSAlex Tomas } 5248c9de560dSAlex Tomas 5249c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5250e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5251c9de560dSAlex Tomas put_bh(bitmap_bh); 5252bbc4ec77SRitesh Harjani out_dbg: 5253d3df1453SRitesh Harjani mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 52548c80fb31SChunguang Xu free, group, grp->bb_free); 52558c80fb31SChunguang Xu return free; 5256c9de560dSAlex Tomas } 5257c9de560dSAlex Tomas 5258c9de560dSAlex Tomas /* 5259c9de560dSAlex Tomas * releases all non-used preallocated blocks for given inode 5260c9de560dSAlex Tomas * 5261c9de560dSAlex Tomas * It's important to discard preallocations under i_data_sem 5262c9de560dSAlex Tomas * We don't want another block to be served from the prealloc 5263c9de560dSAlex Tomas * space when we are discarding the inode prealloc space. 5264c9de560dSAlex Tomas * 5265c9de560dSAlex Tomas * FIXME!! Make sure it is valid at all the call sites 5266c9de560dSAlex Tomas */ 526727bc446eSbrookxu void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 5268c9de560dSAlex Tomas { 5269c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(inode); 5270c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 5271c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 5272c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 5273c9de560dSAlex Tomas ext4_group_t group = 0; 5274c9de560dSAlex Tomas struct list_head list; 5275c9de560dSAlex Tomas struct ext4_buddy e4b; 527638727786SOjaswin Mujoo struct rb_node *iter; 5277c9de560dSAlex Tomas int err; 5278c9de560dSAlex Tomas 5279c2ea3fdeSTheodore Ts'o if (!S_ISREG(inode->i_mode)) { 5280c9de560dSAlex Tomas return; 5281c9de560dSAlex Tomas } 5282c9de560dSAlex Tomas 52838016e29fSHarshad Shirwadkar if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 52848016e29fSHarshad Shirwadkar return; 52858016e29fSHarshad Shirwadkar 5286d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for inode %lu\n", 5287d3df1453SRitesh Harjani inode->i_ino); 528827bc446eSbrookxu trace_ext4_discard_preallocations(inode, 528927bc446eSbrookxu atomic_read(&ei->i_prealloc_active), needed); 5290c9de560dSAlex Tomas 5291c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 5292c9de560dSAlex Tomas 529327bc446eSbrookxu if (needed == 0) 529427bc446eSbrookxu needed = UINT_MAX; 529527bc446eSbrookxu 5296c9de560dSAlex Tomas repeat: 5297c9de560dSAlex Tomas /* first, collect all pa's in the inode */ 529838727786SOjaswin Mujoo write_lock(&ei->i_prealloc_lock); 529938727786SOjaswin Mujoo for (iter = rb_first(&ei->i_prealloc_node); iter && needed; 530038727786SOjaswin Mujoo iter = rb_next(iter)) { 530138727786SOjaswin Mujoo pa = rb_entry(iter, struct ext4_prealloc_space, 530238727786SOjaswin Mujoo pa_node.inode_node); 5303a8e38fd3SOjaswin Mujoo BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 530438727786SOjaswin Mujoo 5305c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5306c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 5307c9de560dSAlex Tomas /* this shouldn't happen often - nobody should 5308c9de560dSAlex Tomas * use preallocation while we're discarding it */ 5309c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 531038727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 53119d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, 53129d8b9ec4STheodore Ts'o "uh-oh! used pa while discarding"); 5313c9de560dSAlex Tomas WARN_ON(1); 5314c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5315c9de560dSAlex Tomas goto repeat; 5316c9de560dSAlex Tomas 5317c9de560dSAlex Tomas } 5318c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 531927bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 5320c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 532138727786SOjaswin Mujoo rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5322c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 532327bc446eSbrookxu needed--; 5324c9de560dSAlex Tomas continue; 5325c9de560dSAlex Tomas } 5326c9de560dSAlex Tomas 5327c9de560dSAlex Tomas /* someone is deleting pa right now */ 5328c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 532938727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 5330c9de560dSAlex Tomas 5331c9de560dSAlex Tomas /* we have to wait here because pa_deleted 5332c9de560dSAlex Tomas * doesn't mean pa is already unlinked from 5333c9de560dSAlex Tomas * the list. as we might be called from 5334c9de560dSAlex Tomas * ->clear_inode() the inode will get freed 5335c9de560dSAlex Tomas * and concurrent thread which is unlinking 5336c9de560dSAlex Tomas * pa from inode's list may access already 5337c9de560dSAlex Tomas * freed memory, bad-bad-bad */ 5338c9de560dSAlex Tomas 5339c9de560dSAlex Tomas /* XXX: if this happens too often, we can 5340c9de560dSAlex Tomas * add a flag to force wait only in case 5341c9de560dSAlex Tomas * of ->clear_inode(), but not in case of 5342c9de560dSAlex Tomas * regular truncate */ 5343c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5344c9de560dSAlex Tomas goto repeat; 5345c9de560dSAlex Tomas } 534638727786SOjaswin Mujoo write_unlock(&ei->i_prealloc_lock); 5347c9de560dSAlex Tomas 5348c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5349cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_INODE_PA); 5350bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 5351c9de560dSAlex Tomas 53529651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 53539651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5354ce89f46cSAneesh Kumar K.V if (err) { 535554d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 53569651e6b2SKonstantin Khlebnikov err, group); 5357ce89f46cSAneesh Kumar K.V continue; 5358ce89f46cSAneesh Kumar K.V } 5359c9de560dSAlex Tomas 5360574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 53619008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 53629008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 536354d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 53649008a58eSDarrick J. Wong err, group); 5365e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5366ce89f46cSAneesh Kumar K.V continue; 5367c9de560dSAlex Tomas } 5368c9de560dSAlex Tomas 5369c9de560dSAlex Tomas ext4_lock_group(sb, group); 5370c9de560dSAlex Tomas list_del(&pa->pa_group_list); 53713e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5372c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5373c9de560dSAlex Tomas 5374e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5375c9de560dSAlex Tomas put_bh(bitmap_bh); 5376c9de560dSAlex Tomas 5377c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 537838727786SOjaswin Mujoo ext4_mb_pa_free(pa); 5379c9de560dSAlex Tomas } 5380c9de560dSAlex Tomas } 5381c9de560dSAlex Tomas 538253f86b17SRitesh Harjani static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 538353f86b17SRitesh Harjani { 538453f86b17SRitesh Harjani struct ext4_prealloc_space *pa; 538553f86b17SRitesh Harjani 538653f86b17SRitesh Harjani BUG_ON(ext4_pspace_cachep == NULL); 538753f86b17SRitesh Harjani pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 538853f86b17SRitesh Harjani if (!pa) 538953f86b17SRitesh Harjani return -ENOMEM; 539053f86b17SRitesh Harjani atomic_set(&pa->pa_count, 1); 539153f86b17SRitesh Harjani ac->ac_pa = pa; 539253f86b17SRitesh Harjani return 0; 539353f86b17SRitesh Harjani } 539453f86b17SRitesh Harjani 539582089725SOjaswin Mujoo static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 539653f86b17SRitesh Harjani { 539753f86b17SRitesh Harjani struct ext4_prealloc_space *pa = ac->ac_pa; 539853f86b17SRitesh Harjani 539953f86b17SRitesh Harjani BUG_ON(!pa); 540053f86b17SRitesh Harjani ac->ac_pa = NULL; 540153f86b17SRitesh Harjani WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 540282089725SOjaswin Mujoo /* 540382089725SOjaswin Mujoo * current function is only called due to an error or due to 540482089725SOjaswin Mujoo * len of found blocks < len of requested blocks hence the PA has not 540582089725SOjaswin Mujoo * been added to grp->bb_prealloc_list. So we don't need to lock it 540682089725SOjaswin Mujoo */ 540782089725SOjaswin Mujoo pa->pa_deleted = 1; 540882089725SOjaswin Mujoo ext4_mb_pa_free(pa); 540953f86b17SRitesh Harjani } 541053f86b17SRitesh Harjani 54116ba495e9STheodore Ts'o #ifdef CONFIG_EXT4_DEBUG 5412e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5413c9de560dSAlex Tomas { 5414e68cf40cSRitesh Harjani ext4_group_t i, ngroups; 5415c9de560dSAlex Tomas 54169b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5417e3570639SEric Sandeen return; 5418e3570639SEric Sandeen 54198df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 5420d3df1453SRitesh Harjani mb_debug(sb, "groups: "); 54218df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 5422c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5423c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5424c9de560dSAlex Tomas ext4_grpblk_t start; 5425c9de560dSAlex Tomas struct list_head *cur; 54265354b2afSTheodore Ts'o 54275354b2afSTheodore Ts'o if (!grp) 54285354b2afSTheodore Ts'o continue; 5429c9de560dSAlex Tomas ext4_lock_group(sb, i); 5430c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 5431c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, 5432c9de560dSAlex Tomas pa_group_list); 5433c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5434c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5435c9de560dSAlex Tomas NULL, &start); 5436c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5437d3df1453SRitesh Harjani mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5438d3df1453SRitesh Harjani pa->pa_len); 5439c9de560dSAlex Tomas } 544060bd63d1SSolofo Ramangalahy ext4_unlock_group(sb, i); 5441d3df1453SRitesh Harjani mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5442d3df1453SRitesh Harjani grp->bb_fragments); 5443c9de560dSAlex Tomas } 5444c9de560dSAlex Tomas } 5445e68cf40cSRitesh Harjani 5446e68cf40cSRitesh Harjani static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5447e68cf40cSRitesh Harjani { 5448e68cf40cSRitesh Harjani struct super_block *sb = ac->ac_sb; 5449e68cf40cSRitesh Harjani 54509b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5451e68cf40cSRitesh Harjani return; 5452e68cf40cSRitesh Harjani 5453d3df1453SRitesh Harjani mb_debug(sb, "Can't allocate:" 5454e68cf40cSRitesh Harjani " Allocation context details:"); 5455d3df1453SRitesh Harjani mb_debug(sb, "status %u flags 0x%x", 5456e68cf40cSRitesh Harjani ac->ac_status, ac->ac_flags); 5457d3df1453SRitesh Harjani mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5458e68cf40cSRitesh Harjani "goal %lu/%lu/%lu@%lu, " 5459e68cf40cSRitesh Harjani "best %lu/%lu/%lu@%lu cr %d", 5460e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_group, 5461e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_start, 5462e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_len, 5463e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_logical, 5464e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_group, 5465e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_start, 5466e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_len, 5467e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_logical, 5468e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_group, 5469e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_start, 5470e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_len, 5471e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_logical, 5472e68cf40cSRitesh Harjani (int)ac->ac_criteria); 5473d3df1453SRitesh Harjani mb_debug(sb, "%u found", ac->ac_found); 5474569f196fSRitesh Harjani mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); 5475569f196fSRitesh Harjani if (ac->ac_pa) 5476569f196fSRitesh Harjani mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5477569f196fSRitesh Harjani "group pa" : "inode pa"); 5478e68cf40cSRitesh Harjani ext4_mb_show_pa(sb); 5479e68cf40cSRitesh Harjani } 5480c9de560dSAlex Tomas #else 5481e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5482e68cf40cSRitesh Harjani { 5483e68cf40cSRitesh Harjani return; 5484e68cf40cSRitesh Harjani } 5485c9de560dSAlex Tomas static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5486c9de560dSAlex Tomas { 5487e68cf40cSRitesh Harjani ext4_mb_show_pa(ac->ac_sb); 5488c9de560dSAlex Tomas return; 5489c9de560dSAlex Tomas } 5490c9de560dSAlex Tomas #endif 5491c9de560dSAlex Tomas 5492c9de560dSAlex Tomas /* 5493c9de560dSAlex Tomas * We use locality group preallocation for small size file. The size of the 5494c9de560dSAlex Tomas * file is determined by the current size or the resulting size after 5495c9de560dSAlex Tomas * allocation which ever is larger 5496c9de560dSAlex Tomas * 5497b713a5ecSTheodore Ts'o * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5498c9de560dSAlex Tomas */ 5499c9de560dSAlex Tomas static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5500c9de560dSAlex Tomas { 5501c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5502c9de560dSAlex Tomas int bsbits = ac->ac_sb->s_blocksize_bits; 5503c9de560dSAlex Tomas loff_t size, isize; 5504a9f2a293SJan Kara bool inode_pa_eligible, group_pa_eligible; 5505c9de560dSAlex Tomas 5506c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5507c9de560dSAlex Tomas return; 5508c9de560dSAlex Tomas 55094ba74d00STheodore Ts'o if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 55104ba74d00STheodore Ts'o return; 55114ba74d00STheodore Ts'o 5512a9f2a293SJan Kara group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5513a9f2a293SJan Kara inode_pa_eligible = true; 551453accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 551550797481STheodore Ts'o isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 551650797481STheodore Ts'o >> bsbits; 5517c9de560dSAlex Tomas 5518a9f2a293SJan Kara /* No point in using inode preallocation for closed files */ 551982dd124cSNikolay Borisov if ((size == isize) && !ext4_fs_is_busy(sbi) && 5520a9f2a293SJan Kara !inode_is_open_for_write(ac->ac_inode)) 5521a9f2a293SJan Kara inode_pa_eligible = false; 552250797481STheodore Ts'o 552371780577STheodore Ts'o size = max(size, isize); 5524a9f2a293SJan Kara /* Don't use group allocation for large files */ 5525a9f2a293SJan Kara if (size > sbi->s_mb_stream_request) 5526a9f2a293SJan Kara group_pa_eligible = false; 5527a9f2a293SJan Kara 5528a9f2a293SJan Kara if (!group_pa_eligible) { 5529a9f2a293SJan Kara if (inode_pa_eligible) 55304ba74d00STheodore Ts'o ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5531a9f2a293SJan Kara else 5532a9f2a293SJan Kara ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5533c9de560dSAlex Tomas return; 55344ba74d00STheodore Ts'o } 5535c9de560dSAlex Tomas 5536c9de560dSAlex Tomas BUG_ON(ac->ac_lg != NULL); 5537c9de560dSAlex Tomas /* 5538c9de560dSAlex Tomas * locality group prealloc space are per cpu. The reason for having 5539c9de560dSAlex Tomas * per cpu locality group is to reduce the contention between block 5540c9de560dSAlex Tomas * request from multiple CPUs. 5541c9de560dSAlex Tomas */ 5542a0b6bc63SChristoph Lameter ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5543c9de560dSAlex Tomas 5544c9de560dSAlex Tomas /* we're going to use group allocation */ 5545c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5546c9de560dSAlex Tomas 5547c9de560dSAlex Tomas /* serialize all allocations in the group */ 5548c9de560dSAlex Tomas mutex_lock(&ac->ac_lg->lg_mutex); 5549c9de560dSAlex Tomas } 5550c9de560dSAlex Tomas 5551d73eff68SGuoqing Jiang static noinline_for_stack void 55524ddfef7bSEric Sandeen ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5553c9de560dSAlex Tomas struct ext4_allocation_request *ar) 5554c9de560dSAlex Tomas { 5555c9de560dSAlex Tomas struct super_block *sb = ar->inode->i_sb; 5556c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5557c9de560dSAlex Tomas struct ext4_super_block *es = sbi->s_es; 5558c9de560dSAlex Tomas ext4_group_t group; 5559498e5f24STheodore Ts'o unsigned int len; 5560498e5f24STheodore Ts'o ext4_fsblk_t goal; 5561c9de560dSAlex Tomas ext4_grpblk_t block; 5562c9de560dSAlex Tomas 5563c9de560dSAlex Tomas /* we can't allocate > group size */ 5564c9de560dSAlex Tomas len = ar->len; 5565c9de560dSAlex Tomas 5566c9de560dSAlex Tomas /* just a dirty hack to filter too big requests */ 556740ae3487STheodore Ts'o if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 556840ae3487STheodore Ts'o len = EXT4_CLUSTERS_PER_GROUP(sb); 5569c9de560dSAlex Tomas 5570c9de560dSAlex Tomas /* start searching from the goal */ 5571c9de560dSAlex Tomas goal = ar->goal; 5572c9de560dSAlex Tomas if (goal < le32_to_cpu(es->s_first_data_block) || 5573c9de560dSAlex Tomas goal >= ext4_blocks_count(es)) 5574c9de560dSAlex Tomas goal = le32_to_cpu(es->s_first_data_block); 5575c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, goal, &group, &block); 5576c9de560dSAlex Tomas 5577c9de560dSAlex Tomas /* set up allocation goals */ 5578f5a44db5STheodore Ts'o ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5579c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 5580c9de560dSAlex Tomas ac->ac_sb = sb; 5581c9de560dSAlex Tomas ac->ac_inode = ar->inode; 558253accfa9STheodore Ts'o ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5583c9de560dSAlex Tomas ac->ac_o_ex.fe_group = group; 5584c9de560dSAlex Tomas ac->ac_o_ex.fe_start = block; 5585c9de560dSAlex Tomas ac->ac_o_ex.fe_len = len; 558653accfa9STheodore Ts'o ac->ac_g_ex = ac->ac_o_ex; 5587c9de560dSAlex Tomas ac->ac_flags = ar->flags; 5588c9de560dSAlex Tomas 55893cb77bd2Sbrookxu /* we have to define context: we'll work with a file or 5590c9de560dSAlex Tomas * locality group. this is a policy, actually */ 5591c9de560dSAlex Tomas ext4_mb_group_or_file(ac); 5592c9de560dSAlex Tomas 5593d3df1453SRitesh Harjani mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5594c9de560dSAlex Tomas "left: %u/%u, right %u/%u to %swritable\n", 5595c9de560dSAlex Tomas (unsigned) ar->len, (unsigned) ar->logical, 5596c9de560dSAlex Tomas (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5597c9de560dSAlex Tomas (unsigned) ar->lleft, (unsigned) ar->pleft, 5598c9de560dSAlex Tomas (unsigned) ar->lright, (unsigned) ar->pright, 559982dd124cSNikolay Borisov inode_is_open_for_write(ar->inode) ? "" : "non-"); 5600c9de560dSAlex Tomas } 5601c9de560dSAlex Tomas 56026be2ded1SAneesh Kumar K.V static noinline_for_stack void 56036be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(struct super_block *sb, 56046be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg, 56056be2ded1SAneesh Kumar K.V int order, int total_entries) 56066be2ded1SAneesh Kumar K.V { 56076be2ded1SAneesh Kumar K.V ext4_group_t group = 0; 56086be2ded1SAneesh Kumar K.V struct ext4_buddy e4b; 56096be2ded1SAneesh Kumar K.V struct list_head discard_list; 56106be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa, *tmp; 56116be2ded1SAneesh Kumar K.V 5612d3df1453SRitesh Harjani mb_debug(sb, "discard locality group preallocation\n"); 56136be2ded1SAneesh Kumar K.V 56146be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&discard_list); 56156be2ded1SAneesh Kumar K.V 56166be2ded1SAneesh Kumar K.V spin_lock(&lg->lg_prealloc_lock); 56176be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5618a8e38fd3SOjaswin Mujoo pa_node.lg_list, 561992e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 56206be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 56216be2ded1SAneesh Kumar K.V if (atomic_read(&pa->pa_count)) { 56226be2ded1SAneesh Kumar K.V /* 56236be2ded1SAneesh Kumar K.V * This is the pa that we just used 56246be2ded1SAneesh Kumar K.V * for block allocation. So don't 56256be2ded1SAneesh Kumar K.V * free that 56266be2ded1SAneesh Kumar K.V */ 56276be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 56286be2ded1SAneesh Kumar K.V continue; 56296be2ded1SAneesh Kumar K.V } 56306be2ded1SAneesh Kumar K.V if (pa->pa_deleted) { 56316be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 56326be2ded1SAneesh Kumar K.V continue; 56336be2ded1SAneesh Kumar K.V } 56346be2ded1SAneesh Kumar K.V /* only lg prealloc space */ 5635cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_GROUP_PA); 56366be2ded1SAneesh Kumar K.V 56376be2ded1SAneesh Kumar K.V /* seems this one can be freed ... */ 563827bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 56396be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 56406be2ded1SAneesh Kumar K.V 5641a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 56426be2ded1SAneesh Kumar K.V list_add(&pa->u.pa_tmp_list, &discard_list); 56436be2ded1SAneesh Kumar K.V 56446be2ded1SAneesh Kumar K.V total_entries--; 56456be2ded1SAneesh Kumar K.V if (total_entries <= 5) { 56466be2ded1SAneesh Kumar K.V /* 56476be2ded1SAneesh Kumar K.V * we want to keep only 5 entries 56486be2ded1SAneesh Kumar K.V * allowing it to grow to 8. This 56496be2ded1SAneesh Kumar K.V * mak sure we don't call discard 56506be2ded1SAneesh Kumar K.V * soon for this list. 56516be2ded1SAneesh Kumar K.V */ 56526be2ded1SAneesh Kumar K.V break; 56536be2ded1SAneesh Kumar K.V } 56546be2ded1SAneesh Kumar K.V } 56556be2ded1SAneesh Kumar K.V spin_unlock(&lg->lg_prealloc_lock); 56566be2ded1SAneesh Kumar K.V 56576be2ded1SAneesh Kumar K.V list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 56589651e6b2SKonstantin Khlebnikov int err; 56596be2ded1SAneesh Kumar K.V 5660bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 56619651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 56629651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 56639651e6b2SKonstantin Khlebnikov if (err) { 566454d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 56659651e6b2SKonstantin Khlebnikov err, group); 56666be2ded1SAneesh Kumar K.V continue; 56676be2ded1SAneesh Kumar K.V } 56686be2ded1SAneesh Kumar K.V ext4_lock_group(sb, group); 56696be2ded1SAneesh Kumar K.V list_del(&pa->pa_group_list); 56703e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 56716be2ded1SAneesh Kumar K.V ext4_unlock_group(sb, group); 56726be2ded1SAneesh Kumar K.V 5673e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 56746be2ded1SAneesh Kumar K.V list_del(&pa->u.pa_tmp_list); 56756be2ded1SAneesh Kumar K.V call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 56766be2ded1SAneesh Kumar K.V } 56776be2ded1SAneesh Kumar K.V } 56786be2ded1SAneesh Kumar K.V 56796be2ded1SAneesh Kumar K.V /* 56806be2ded1SAneesh Kumar K.V * We have incremented pa_count. So it cannot be freed at this 56816be2ded1SAneesh Kumar K.V * point. Also we hold lg_mutex. So no parallel allocation is 56826be2ded1SAneesh Kumar K.V * possible from this lg. That means pa_free cannot be updated. 56836be2ded1SAneesh Kumar K.V * 56846be2ded1SAneesh Kumar K.V * A parallel ext4_mb_discard_group_preallocations is possible. 56856be2ded1SAneesh Kumar K.V * which can cause the lg_prealloc_list to be updated. 56866be2ded1SAneesh Kumar K.V */ 56876be2ded1SAneesh Kumar K.V 56886be2ded1SAneesh Kumar K.V static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 56896be2ded1SAneesh Kumar K.V { 56906be2ded1SAneesh Kumar K.V int order, added = 0, lg_prealloc_count = 1; 56916be2ded1SAneesh Kumar K.V struct super_block *sb = ac->ac_sb; 56926be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg = ac->ac_lg; 56936be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 56946be2ded1SAneesh Kumar K.V 56956be2ded1SAneesh Kumar K.V order = fls(pa->pa_free) - 1; 56966be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 56976be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 56986be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 56996be2ded1SAneesh Kumar K.V /* Add the prealloc space to lg */ 5700f1167009SNiu Yawei spin_lock(&lg->lg_prealloc_lock); 57016be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5702a8e38fd3SOjaswin Mujoo pa_node.lg_list, 570392e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 57046be2ded1SAneesh Kumar K.V spin_lock(&tmp_pa->pa_lock); 57056be2ded1SAneesh Kumar K.V if (tmp_pa->pa_deleted) { 5706e7c9e3e9STheodore Ts'o spin_unlock(&tmp_pa->pa_lock); 57076be2ded1SAneesh Kumar K.V continue; 57086be2ded1SAneesh Kumar K.V } 57096be2ded1SAneesh Kumar K.V if (!added && pa->pa_free < tmp_pa->pa_free) { 57106be2ded1SAneesh Kumar K.V /* Add to the tail of the previous entry */ 5711a8e38fd3SOjaswin Mujoo list_add_tail_rcu(&pa->pa_node.lg_list, 5712a8e38fd3SOjaswin Mujoo &tmp_pa->pa_node.lg_list); 57136be2ded1SAneesh Kumar K.V added = 1; 57146be2ded1SAneesh Kumar K.V /* 57156be2ded1SAneesh Kumar K.V * we want to count the total 57166be2ded1SAneesh Kumar K.V * number of entries in the list 57176be2ded1SAneesh Kumar K.V */ 57186be2ded1SAneesh Kumar K.V } 57196be2ded1SAneesh Kumar K.V spin_unlock(&tmp_pa->pa_lock); 57206be2ded1SAneesh Kumar K.V lg_prealloc_count++; 57216be2ded1SAneesh Kumar K.V } 57226be2ded1SAneesh Kumar K.V if (!added) 5723a8e38fd3SOjaswin Mujoo list_add_tail_rcu(&pa->pa_node.lg_list, 57246be2ded1SAneesh Kumar K.V &lg->lg_prealloc_list[order]); 5725f1167009SNiu Yawei spin_unlock(&lg->lg_prealloc_lock); 57266be2ded1SAneesh Kumar K.V 57276be2ded1SAneesh Kumar K.V /* Now trim the list to be not more than 8 elements */ 57286be2ded1SAneesh Kumar K.V if (lg_prealloc_count > 8) { 57296be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(sb, lg, 57306be2ded1SAneesh Kumar K.V order, lg_prealloc_count); 57316be2ded1SAneesh Kumar K.V return; 57326be2ded1SAneesh Kumar K.V } 57336be2ded1SAneesh Kumar K.V return ; 57346be2ded1SAneesh Kumar K.V } 57356be2ded1SAneesh Kumar K.V 5736c9de560dSAlex Tomas /* 5737c9de560dSAlex Tomas * release all resource we used in allocation 5738c9de560dSAlex Tomas */ 5739c9de560dSAlex Tomas static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5740c9de560dSAlex Tomas { 574153accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 57426be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa = ac->ac_pa; 57436be2ded1SAneesh Kumar K.V if (pa) { 5744cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) { 5745c9de560dSAlex Tomas /* see comment in ext4_mb_use_group_pa() */ 57466be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 574753accfa9STheodore Ts'o pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 574853accfa9STheodore Ts'o pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 57496be2ded1SAneesh Kumar K.V pa->pa_free -= ac->ac_b_ex.fe_len; 57506be2ded1SAneesh Kumar K.V pa->pa_len -= ac->ac_b_ex.fe_len; 57516be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 575266d5e027Sbrookxu 57536be2ded1SAneesh Kumar K.V /* 57546be2ded1SAneesh Kumar K.V * We want to add the pa to the right bucket. 57556be2ded1SAneesh Kumar K.V * Remove it from the list and while adding 57566be2ded1SAneesh Kumar K.V * make sure the list to which we are adding 575744183d42SAmir Goldstein * doesn't grow big. 57586be2ded1SAneesh Kumar K.V */ 575966d5e027Sbrookxu if (likely(pa->pa_free)) { 5760a8e38fd3SOjaswin Mujoo spin_lock(pa->pa_node_lock.lg_lock); 5761a8e38fd3SOjaswin Mujoo list_del_rcu(&pa->pa_node.lg_list); 5762a8e38fd3SOjaswin Mujoo spin_unlock(pa->pa_node_lock.lg_lock); 57636be2ded1SAneesh Kumar K.V ext4_mb_add_n_trim(ac); 5764c9de560dSAlex Tomas } 576566d5e027Sbrookxu } 576627bc446eSbrookxu 57676be2ded1SAneesh Kumar K.V ext4_mb_put_pa(ac, ac->ac_sb, pa); 5768c9de560dSAlex Tomas } 5769c9de560dSAlex Tomas if (ac->ac_bitmap_page) 577009cbfeafSKirill A. Shutemov put_page(ac->ac_bitmap_page); 5771c9de560dSAlex Tomas if (ac->ac_buddy_page) 577209cbfeafSKirill A. Shutemov put_page(ac->ac_buddy_page); 5773c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5774c9de560dSAlex Tomas mutex_unlock(&ac->ac_lg->lg_mutex); 5775c9de560dSAlex Tomas ext4_mb_collect_stats(ac); 5776c9de560dSAlex Tomas return 0; 5777c9de560dSAlex Tomas } 5778c9de560dSAlex Tomas 5779c9de560dSAlex Tomas static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5780c9de560dSAlex Tomas { 57818df9675fSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5782c9de560dSAlex Tomas int ret; 57838c80fb31SChunguang Xu int freed = 0, busy = 0; 57848c80fb31SChunguang Xu int retry = 0; 5785c9de560dSAlex Tomas 57869bffad1eSTheodore Ts'o trace_ext4_mb_discard_preallocations(sb, needed); 57878c80fb31SChunguang Xu 57888c80fb31SChunguang Xu if (needed == 0) 57898c80fb31SChunguang Xu needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 57908c80fb31SChunguang Xu repeat: 57918df9675fSTheodore Ts'o for (i = 0; i < ngroups && needed > 0; i++) { 57928c80fb31SChunguang Xu ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5793c9de560dSAlex Tomas freed += ret; 5794c9de560dSAlex Tomas needed -= ret; 57958c80fb31SChunguang Xu cond_resched(); 57968c80fb31SChunguang Xu } 57978c80fb31SChunguang Xu 57988c80fb31SChunguang Xu if (needed > 0 && busy && ++retry < 3) { 57998c80fb31SChunguang Xu busy = 0; 58008c80fb31SChunguang Xu goto repeat; 5801c9de560dSAlex Tomas } 5802c9de560dSAlex Tomas 5803c9de560dSAlex Tomas return freed; 5804c9de560dSAlex Tomas } 5805c9de560dSAlex Tomas 5806cf5e2ca6SRitesh Harjani static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 580707b5b8e1SRitesh Harjani struct ext4_allocation_context *ac, u64 *seq) 5808cf5e2ca6SRitesh Harjani { 5809cf5e2ca6SRitesh Harjani int freed; 581007b5b8e1SRitesh Harjani u64 seq_retry = 0; 581107b5b8e1SRitesh Harjani bool ret = false; 5812cf5e2ca6SRitesh Harjani 5813cf5e2ca6SRitesh Harjani freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 581407b5b8e1SRitesh Harjani if (freed) { 581507b5b8e1SRitesh Harjani ret = true; 581607b5b8e1SRitesh Harjani goto out_dbg; 581707b5b8e1SRitesh Harjani } 581807b5b8e1SRitesh Harjani seq_retry = ext4_get_discard_pa_seq_sum(); 581999377830SRitesh Harjani if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 582099377830SRitesh Harjani ac->ac_flags |= EXT4_MB_STRICT_CHECK; 582107b5b8e1SRitesh Harjani *seq = seq_retry; 582207b5b8e1SRitesh Harjani ret = true; 582307b5b8e1SRitesh Harjani } 582407b5b8e1SRitesh Harjani 582507b5b8e1SRitesh Harjani out_dbg: 582607b5b8e1SRitesh Harjani mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 582707b5b8e1SRitesh Harjani return ret; 5828cf5e2ca6SRitesh Harjani } 5829cf5e2ca6SRitesh Harjani 5830ad78b5efSKemeng Shi /* 5831ad78b5efSKemeng Shi * Simple allocator for Ext4 fast commit replay path. It searches for blocks 5832ad78b5efSKemeng Shi * linearly starting at the goal block and also excludes the blocks which 5833ad78b5efSKemeng Shi * are going to be in use after fast commit replay. 5834ad78b5efSKemeng Shi */ 5835ad78b5efSKemeng Shi static ext4_fsblk_t 5836ad78b5efSKemeng Shi ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 5837ad78b5efSKemeng Shi { 5838ad78b5efSKemeng Shi struct buffer_head *bitmap_bh; 5839ad78b5efSKemeng Shi struct super_block *sb = ar->inode->i_sb; 5840ad78b5efSKemeng Shi struct ext4_sb_info *sbi = EXT4_SB(sb); 5841ad78b5efSKemeng Shi ext4_group_t group, nr; 5842ad78b5efSKemeng Shi ext4_grpblk_t blkoff; 5843ad78b5efSKemeng Shi ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 5844ad78b5efSKemeng Shi ext4_grpblk_t i = 0; 5845ad78b5efSKemeng Shi ext4_fsblk_t goal, block; 5846ad78b5efSKemeng Shi struct ext4_super_block *es = EXT4_SB(sb)->s_es; 5847ad78b5efSKemeng Shi 5848ad78b5efSKemeng Shi goal = ar->goal; 5849ad78b5efSKemeng Shi if (goal < le32_to_cpu(es->s_first_data_block) || 5850ad78b5efSKemeng Shi goal >= ext4_blocks_count(es)) 5851ad78b5efSKemeng Shi goal = le32_to_cpu(es->s_first_data_block); 5852ad78b5efSKemeng Shi 5853ad78b5efSKemeng Shi ar->len = 0; 5854ad78b5efSKemeng Shi ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 5855ad78b5efSKemeng Shi for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 5856ad78b5efSKemeng Shi bitmap_bh = ext4_read_block_bitmap(sb, group); 5857ad78b5efSKemeng Shi if (IS_ERR(bitmap_bh)) { 5858ad78b5efSKemeng Shi *errp = PTR_ERR(bitmap_bh); 5859ad78b5efSKemeng Shi pr_warn("Failed to read block bitmap\n"); 5860ad78b5efSKemeng Shi return 0; 5861ad78b5efSKemeng Shi } 5862ad78b5efSKemeng Shi 5863ad78b5efSKemeng Shi while (1) { 5864ad78b5efSKemeng Shi i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 5865ad78b5efSKemeng Shi blkoff); 5866ad78b5efSKemeng Shi if (i >= max) 5867ad78b5efSKemeng Shi break; 5868ad78b5efSKemeng Shi if (ext4_fc_replay_check_excluded(sb, 5869ad78b5efSKemeng Shi ext4_group_first_block_no(sb, group) + 5870ad78b5efSKemeng Shi EXT4_C2B(sbi, i))) { 5871ad78b5efSKemeng Shi blkoff = i + 1; 5872ad78b5efSKemeng Shi } else 5873ad78b5efSKemeng Shi break; 5874ad78b5efSKemeng Shi } 5875ad78b5efSKemeng Shi brelse(bitmap_bh); 5876ad78b5efSKemeng Shi if (i < max) 5877ad78b5efSKemeng Shi break; 5878ad78b5efSKemeng Shi 5879ad78b5efSKemeng Shi if (++group >= ext4_get_groups_count(sb)) 5880ad78b5efSKemeng Shi group = 0; 5881ad78b5efSKemeng Shi 5882ad78b5efSKemeng Shi blkoff = 0; 5883ad78b5efSKemeng Shi } 5884ad78b5efSKemeng Shi 5885ad78b5efSKemeng Shi if (i >= max) { 5886ad78b5efSKemeng Shi *errp = -ENOSPC; 5887ad78b5efSKemeng Shi return 0; 5888ad78b5efSKemeng Shi } 5889ad78b5efSKemeng Shi 5890ad78b5efSKemeng Shi block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 5891ad78b5efSKemeng Shi ext4_mb_mark_bb(sb, block, 1, 1); 5892ad78b5efSKemeng Shi ar->len = 1; 5893ad78b5efSKemeng Shi 5894ad78b5efSKemeng Shi return block; 5895ad78b5efSKemeng Shi } 58968016e29fSHarshad Shirwadkar 5897c9de560dSAlex Tomas /* 5898c9de560dSAlex Tomas * Main entry point into mballoc to allocate blocks 5899c9de560dSAlex Tomas * it tries to use preallocation first, then falls back 5900c9de560dSAlex Tomas * to usual allocation 5901c9de560dSAlex Tomas */ 5902c9de560dSAlex Tomas ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5903c9de560dSAlex Tomas struct ext4_allocation_request *ar, int *errp) 5904c9de560dSAlex Tomas { 5905256bdb49SEric Sandeen struct ext4_allocation_context *ac = NULL; 5906c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5907c9de560dSAlex Tomas struct super_block *sb; 5908c9de560dSAlex Tomas ext4_fsblk_t block = 0; 590960e58e0fSMingming Cao unsigned int inquota = 0; 591053accfa9STheodore Ts'o unsigned int reserv_clstrs = 0; 591180fa46d6STheodore Ts'o int retries = 0; 591207b5b8e1SRitesh Harjani u64 seq; 5913c9de560dSAlex Tomas 5914b10a44c3STheodore Ts'o might_sleep(); 5915c9de560dSAlex Tomas sb = ar->inode->i_sb; 5916c9de560dSAlex Tomas sbi = EXT4_SB(sb); 5917c9de560dSAlex Tomas 59189bffad1eSTheodore Ts'o trace_ext4_request_blocks(ar); 59198016e29fSHarshad Shirwadkar if (sbi->s_mount_state & EXT4_FC_REPLAY) 5920ad78b5efSKemeng Shi return ext4_mb_new_blocks_simple(ar, errp); 5921ba80b101STheodore Ts'o 592245dc63e7SDmitry Monakhov /* Allow to use superuser reservation for quota file */ 592302749a4cSTahsin Erdogan if (ext4_is_quota_file(ar->inode)) 592445dc63e7SDmitry Monakhov ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 592545dc63e7SDmitry Monakhov 5926e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 592760e58e0fSMingming Cao /* Without delayed allocation we need to verify 592860e58e0fSMingming Cao * there is enough free blocks to do block allocation 592960e58e0fSMingming Cao * and verify allocation doesn't exceed the quota limits. 5930d2a17637SMingming Cao */ 593155f020dbSAllison Henderson while (ar->len && 5932e7d5f315STheodore Ts'o ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 593355f020dbSAllison Henderson 5934030ba6bcSAneesh Kumar K.V /* let others to free the space */ 5935bb8b20edSLukas Czerner cond_resched(); 5936030ba6bcSAneesh Kumar K.V ar->len = ar->len >> 1; 5937030ba6bcSAneesh Kumar K.V } 5938030ba6bcSAneesh Kumar K.V if (!ar->len) { 5939bbc4ec77SRitesh Harjani ext4_mb_show_pa(sb); 594007031431SMingming Cao *errp = -ENOSPC; 594107031431SMingming Cao return 0; 594207031431SMingming Cao } 594353accfa9STheodore Ts'o reserv_clstrs = ar->len; 594455f020dbSAllison Henderson if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 594553accfa9STheodore Ts'o dquot_alloc_block_nofail(ar->inode, 594653accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len)); 594755f020dbSAllison Henderson } else { 594855f020dbSAllison Henderson while (ar->len && 594953accfa9STheodore Ts'o dquot_alloc_block(ar->inode, 595053accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len))) { 595155f020dbSAllison Henderson 5952c9de560dSAlex Tomas ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5953c9de560dSAlex Tomas ar->len--; 5954c9de560dSAlex Tomas } 595555f020dbSAllison Henderson } 595660e58e0fSMingming Cao inquota = ar->len; 5957c9de560dSAlex Tomas if (ar->len == 0) { 5958c9de560dSAlex Tomas *errp = -EDQUOT; 59596c7a120aSAditya Kali goto out; 5960c9de560dSAlex Tomas } 596160e58e0fSMingming Cao } 5962d2a17637SMingming Cao 596385556c9aSWei Yongjun ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5964833576b3STheodore Ts'o if (!ac) { 5965363d4251SShen Feng ar->len = 0; 5966256bdb49SEric Sandeen *errp = -ENOMEM; 59676c7a120aSAditya Kali goto out; 5968256bdb49SEric Sandeen } 5969256bdb49SEric Sandeen 5970d73eff68SGuoqing Jiang ext4_mb_initialize_context(ac, ar); 5971c9de560dSAlex Tomas 5972256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 597381198536SRitesh Harjani seq = this_cpu_read(discard_pa_seq); 5974256bdb49SEric Sandeen if (!ext4_mb_use_preallocated(ac)) { 5975256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5976256bdb49SEric Sandeen ext4_mb_normalize_request(ac, ar); 597753f86b17SRitesh Harjani 597853f86b17SRitesh Harjani *errp = ext4_mb_pa_alloc(ac); 597953f86b17SRitesh Harjani if (*errp) 598053f86b17SRitesh Harjani goto errout; 5981c9de560dSAlex Tomas repeat: 5982c9de560dSAlex Tomas /* allocate space in core */ 59836c7a120aSAditya Kali *errp = ext4_mb_regular_allocator(ac); 598453f86b17SRitesh Harjani /* 598553f86b17SRitesh Harjani * pa allocated above is added to grp->bb_prealloc_list only 598653f86b17SRitesh Harjani * when we were able to allocate some block i.e. when 598753f86b17SRitesh Harjani * ac->ac_status == AC_STATUS_FOUND. 598853f86b17SRitesh Harjani * And error from above mean ac->ac_status != AC_STATUS_FOUND 598953f86b17SRitesh Harjani * So we have to free this pa here itself. 599053f86b17SRitesh Harjani */ 59912c00ef3eSAlexey Khoroshilov if (*errp) { 599282089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 59932c00ef3eSAlexey Khoroshilov ext4_discard_allocated_blocks(ac); 59942c00ef3eSAlexey Khoroshilov goto errout; 59952c00ef3eSAlexey Khoroshilov } 599653f86b17SRitesh Harjani if (ac->ac_status == AC_STATUS_FOUND && 599753f86b17SRitesh Harjani ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 599882089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 5999c9de560dSAlex Tomas } 6000256bdb49SEric Sandeen if (likely(ac->ac_status == AC_STATUS_FOUND)) { 600153accfa9STheodore Ts'o *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 6002554a5cccSVegard Nossum if (*errp) { 6003b844167eSCurt Wohlgemuth ext4_discard_allocated_blocks(ac); 60046d138cedSEric Sandeen goto errout; 60056d138cedSEric Sandeen } else { 6006256bdb49SEric Sandeen block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6007256bdb49SEric Sandeen ar->len = ac->ac_b_ex.fe_len; 6008519deca0SAneesh Kumar K.V } 6009c9de560dSAlex Tomas } else { 601080fa46d6STheodore Ts'o if (++retries < 3 && 601180fa46d6STheodore Ts'o ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6012c9de560dSAlex Tomas goto repeat; 601353f86b17SRitesh Harjani /* 601453f86b17SRitesh Harjani * If block allocation fails then the pa allocated above 601553f86b17SRitesh Harjani * needs to be freed here itself. 601653f86b17SRitesh Harjani */ 601782089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 6018c9de560dSAlex Tomas *errp = -ENOSPC; 60196c7a120aSAditya Kali } 60206c7a120aSAditya Kali 60216c7a120aSAditya Kali if (*errp) { 6022aaae558dSKemeng Shi errout: 6023256bdb49SEric Sandeen ac->ac_b_ex.fe_len = 0; 6024c9de560dSAlex Tomas ar->len = 0; 6025256bdb49SEric Sandeen ext4_mb_show_ac(ac); 6026c9de560dSAlex Tomas } 6027256bdb49SEric Sandeen ext4_mb_release_context(ac); 6028363d4251SShen Feng kmem_cache_free(ext4_ac_cachep, ac); 6029aaae558dSKemeng Shi out: 603060e58e0fSMingming Cao if (inquota && ar->len < inquota) 603153accfa9STheodore Ts'o dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 60320087d9fbSAneesh Kumar K.V if (!ar->len) { 6033e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 60340087d9fbSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 603557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 603653accfa9STheodore Ts'o reserv_clstrs); 60370087d9fbSAneesh Kumar K.V } 6038c9de560dSAlex Tomas 60399bffad1eSTheodore Ts'o trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6040ba80b101STheodore Ts'o 6041c9de560dSAlex Tomas return block; 6042c9de560dSAlex Tomas } 6043c9de560dSAlex Tomas 6044c894058dSAneesh Kumar K.V /* 6045c894058dSAneesh Kumar K.V * We can merge two free data extents only if the physical blocks 6046c894058dSAneesh Kumar K.V * are contiguous, AND the extents were freed by the same transaction, 6047c894058dSAneesh Kumar K.V * AND the blocks are associated with the same group. 6048c894058dSAneesh Kumar K.V */ 6049a0154344SDaeho Jeong static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 6050a0154344SDaeho Jeong struct ext4_free_data *entry, 6051a0154344SDaeho Jeong struct ext4_free_data *new_entry, 6052a0154344SDaeho Jeong struct rb_root *entry_rb_root) 6053c894058dSAneesh Kumar K.V { 6054a0154344SDaeho Jeong if ((entry->efd_tid != new_entry->efd_tid) || 6055a0154344SDaeho Jeong (entry->efd_group != new_entry->efd_group)) 6056a0154344SDaeho Jeong return; 6057a0154344SDaeho Jeong if (entry->efd_start_cluster + entry->efd_count == 6058a0154344SDaeho Jeong new_entry->efd_start_cluster) { 6059a0154344SDaeho Jeong new_entry->efd_start_cluster = entry->efd_start_cluster; 6060a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 6061a0154344SDaeho Jeong } else if (new_entry->efd_start_cluster + new_entry->efd_count == 6062a0154344SDaeho Jeong entry->efd_start_cluster) { 6063a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 6064a0154344SDaeho Jeong } else 6065a0154344SDaeho Jeong return; 6066a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 6067a0154344SDaeho Jeong list_del(&entry->efd_list); 6068a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 6069a0154344SDaeho Jeong rb_erase(&entry->efd_node, entry_rb_root); 6070a0154344SDaeho Jeong kmem_cache_free(ext4_free_data_cachep, entry); 6071c894058dSAneesh Kumar K.V } 6072c894058dSAneesh Kumar K.V 607385b67ffbSKemeng Shi static noinline_for_stack void 60744ddfef7bSEric Sandeen ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 60757a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry) 6076c9de560dSAlex Tomas { 6077e29136f8STheodore Ts'o ext4_group_t group = e4b->bd_group; 607884130193STheodore Ts'o ext4_grpblk_t cluster; 6079d08854f5STheodore Ts'o ext4_grpblk_t clusters = new_entry->efd_count; 60807a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 6081c9de560dSAlex Tomas struct ext4_group_info *db = e4b->bd_info; 6082c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 6083c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 6084c894058dSAneesh Kumar K.V struct rb_node **n = &db->bb_free_root.rb_node, *node; 6085c894058dSAneesh Kumar K.V struct rb_node *parent = NULL, *new_node; 6086c894058dSAneesh Kumar K.V 60870390131bSFrank Mayhar BUG_ON(!ext4_handle_valid(handle)); 6088c9de560dSAlex Tomas BUG_ON(e4b->bd_bitmap_page == NULL); 6089c9de560dSAlex Tomas BUG_ON(e4b->bd_buddy_page == NULL); 6090c9de560dSAlex Tomas 609118aadd47SBobi Jam new_node = &new_entry->efd_node; 609218aadd47SBobi Jam cluster = new_entry->efd_start_cluster; 6093c9de560dSAlex Tomas 6094c894058dSAneesh Kumar K.V if (!*n) { 6095c894058dSAneesh Kumar K.V /* first free block exent. We need to 6096c894058dSAneesh Kumar K.V protect buddy cache from being freed, 6097c9de560dSAlex Tomas * otherwise we'll refresh it from 6098c9de560dSAlex Tomas * on-disk bitmap and lose not-yet-available 6099c9de560dSAlex Tomas * blocks */ 610009cbfeafSKirill A. Shutemov get_page(e4b->bd_buddy_page); 610109cbfeafSKirill A. Shutemov get_page(e4b->bd_bitmap_page); 6102c894058dSAneesh Kumar K.V } 6103c894058dSAneesh Kumar K.V while (*n) { 6104c894058dSAneesh Kumar K.V parent = *n; 610518aadd47SBobi Jam entry = rb_entry(parent, struct ext4_free_data, efd_node); 610618aadd47SBobi Jam if (cluster < entry->efd_start_cluster) 6107c894058dSAneesh Kumar K.V n = &(*n)->rb_left; 610818aadd47SBobi Jam else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6109c894058dSAneesh Kumar K.V n = &(*n)->rb_right; 6110c894058dSAneesh Kumar K.V else { 6111e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 611284130193STheodore Ts'o ext4_group_first_block_no(sb, group) + 611384130193STheodore Ts'o EXT4_C2B(sbi, cluster), 6114e29136f8STheodore Ts'o "Block already on to-be-freed list"); 6115cca41553SChunguang Xu kmem_cache_free(ext4_free_data_cachep, new_entry); 611685b67ffbSKemeng Shi return; 6117c9de560dSAlex Tomas } 6118c9de560dSAlex Tomas } 6119c9de560dSAlex Tomas 6120c894058dSAneesh Kumar K.V rb_link_node(new_node, parent, n); 6121c894058dSAneesh Kumar K.V rb_insert_color(new_node, &db->bb_free_root); 6122c894058dSAneesh Kumar K.V 6123c894058dSAneesh Kumar K.V /* Now try to see the extent can be merged to left and right */ 6124c894058dSAneesh Kumar K.V node = rb_prev(new_node); 6125c894058dSAneesh Kumar K.V if (node) { 612618aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 6127a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 6128a0154344SDaeho Jeong &(db->bb_free_root)); 6129c9de560dSAlex Tomas } 6130c894058dSAneesh Kumar K.V 6131c894058dSAneesh Kumar K.V node = rb_next(new_node); 6132c894058dSAneesh Kumar K.V if (node) { 613318aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 6134a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 6135a0154344SDaeho Jeong &(db->bb_free_root)); 6136c894058dSAneesh Kumar K.V } 6137a0154344SDaeho Jeong 6138d08854f5STheodore Ts'o spin_lock(&sbi->s_md_lock); 6139a0154344SDaeho Jeong list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 6140d08854f5STheodore Ts'o sbi->s_mb_free_pending += clusters; 6141d08854f5STheodore Ts'o spin_unlock(&sbi->s_md_lock); 6142c9de560dSAlex Tomas } 6143c9de560dSAlex Tomas 61448016e29fSHarshad Shirwadkar static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 61458016e29fSHarshad Shirwadkar unsigned long count) 61468016e29fSHarshad Shirwadkar { 61478016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 61488016e29fSHarshad Shirwadkar struct super_block *sb = inode->i_sb; 61498016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 61508016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 61518016e29fSHarshad Shirwadkar ext4_group_t group; 61528016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 61538016e29fSHarshad Shirwadkar int already_freed = 0, err, i; 61548016e29fSHarshad Shirwadkar 61558016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 61568016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 61578016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 61588016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 61598016e29fSHarshad Shirwadkar return; 61608016e29fSHarshad Shirwadkar } 61618016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 61628016e29fSHarshad Shirwadkar if (!gdp) 61631b5c9d34SKemeng Shi goto err_out; 61648016e29fSHarshad Shirwadkar 61658016e29fSHarshad Shirwadkar for (i = 0; i < count; i++) { 61668016e29fSHarshad Shirwadkar if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 61678016e29fSHarshad Shirwadkar already_freed++; 61688016e29fSHarshad Shirwadkar } 61698016e29fSHarshad Shirwadkar mb_clear_bits(bitmap_bh->b_data, blkoff, count); 61708016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 61718016e29fSHarshad Shirwadkar if (err) 61721b5c9d34SKemeng Shi goto err_out; 61738016e29fSHarshad Shirwadkar ext4_free_group_clusters_set( 61748016e29fSHarshad Shirwadkar sb, gdp, ext4_free_group_clusters(sb, gdp) + 61758016e29fSHarshad Shirwadkar count - already_freed); 61761df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 61778016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 61788016e29fSHarshad Shirwadkar ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 61798016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 61808016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 61811b5c9d34SKemeng Shi 61821b5c9d34SKemeng Shi err_out: 61838016e29fSHarshad Shirwadkar brelse(bitmap_bh); 61848016e29fSHarshad Shirwadkar } 61858016e29fSHarshad Shirwadkar 618644338711STheodore Ts'o /** 61878ac3939dSRitesh Harjani * ext4_mb_clear_bb() -- helper function for freeing blocks. 61888ac3939dSRitesh Harjani * Used by ext4_free_blocks() 618944338711STheodore Ts'o * @handle: handle for this transaction 619044338711STheodore Ts'o * @inode: inode 6191c60990b3STheodore Ts'o * @block: starting physical block to be freed 6192c60990b3STheodore Ts'o * @count: number of blocks to be freed 61935def1360SYongqiang Yang * @flags: flags used by ext4_free_blocks 6194c9de560dSAlex Tomas */ 61958ac3939dSRitesh Harjani static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 61968ac3939dSRitesh Harjani ext4_fsblk_t block, unsigned long count, 61978ac3939dSRitesh Harjani int flags) 6198c9de560dSAlex Tomas { 619926346ff6SAneesh Kumar K.V struct buffer_head *bitmap_bh = NULL; 6200c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 6201c9de560dSAlex Tomas struct ext4_group_desc *gdp; 62025354b2afSTheodore Ts'o struct ext4_group_info *grp; 6203498e5f24STheodore Ts'o unsigned int overflow; 6204c9de560dSAlex Tomas ext4_grpblk_t bit; 6205c9de560dSAlex Tomas struct buffer_head *gd_bh; 6206c9de560dSAlex Tomas ext4_group_t block_group; 6207c9de560dSAlex Tomas struct ext4_sb_info *sbi; 6208c9de560dSAlex Tomas struct ext4_buddy e4b; 620984130193STheodore Ts'o unsigned int count_clusters; 6210c9de560dSAlex Tomas int err = 0; 6211c9de560dSAlex Tomas int ret; 6212c9de560dSAlex Tomas 62138016e29fSHarshad Shirwadkar sbi = EXT4_SB(sb); 62148016e29fSHarshad Shirwadkar 62151e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 62161e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 62171e1c2b86SLukas Czerner ext4_error(sb, "Freeing blocks in system zone - " 62181e1c2b86SLukas Czerner "Block = %llu, count = %lu", block, count); 62191e1c2b86SLukas Czerner /* err = 0. ext4_std_error should be a no op */ 62201e1c2b86SLukas Czerner goto error_return; 62211e1c2b86SLukas Czerner } 62221e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 62231e1c2b86SLukas Czerner 6224c9de560dSAlex Tomas do_more: 6225c9de560dSAlex Tomas overflow = 0; 6226c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6227c9de560dSAlex Tomas 62285354b2afSTheodore Ts'o grp = ext4_get_group_info(sb, block_group); 62295354b2afSTheodore Ts'o if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6230163a203dSDarrick J. Wong return; 6231163a203dSDarrick J. Wong 6232c9de560dSAlex Tomas /* 6233c9de560dSAlex Tomas * Check to see if we are freeing blocks across a group 6234c9de560dSAlex Tomas * boundary. 6235c9de560dSAlex Tomas */ 623684130193STheodore Ts'o if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 623784130193STheodore Ts'o overflow = EXT4_C2B(sbi, bit) + count - 623884130193STheodore Ts'o EXT4_BLOCKS_PER_GROUP(sb); 6239c9de560dSAlex Tomas count -= overflow; 62401e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 62411e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6242c9de560dSAlex Tomas } 6243810da240SLukas Czerner count_clusters = EXT4_NUM_B2C(sbi, count); 6244574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, block_group); 62459008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 62469008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 62479008a58eSDarrick J. Wong bitmap_bh = NULL; 6248c9de560dSAlex Tomas goto error_return; 6249ce89f46cSAneesh Kumar K.V } 6250c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 6251ce89f46cSAneesh Kumar K.V if (!gdp) { 6252ce89f46cSAneesh Kumar K.V err = -EIO; 6253c9de560dSAlex Tomas goto error_return; 6254ce89f46cSAneesh Kumar K.V } 6255c9de560dSAlex Tomas 62561e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 62571e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 625812062dddSEric Sandeen ext4_error(sb, "Freeing blocks in system zone - " 62590610b6e9STheodore Ts'o "Block = %llu, count = %lu", block, count); 6260519deca0SAneesh Kumar K.V /* err = 0. ext4_std_error should be a no op */ 6261519deca0SAneesh Kumar K.V goto error_return; 6262c9de560dSAlex Tomas } 6263c9de560dSAlex Tomas 6264c9de560dSAlex Tomas BUFFER_TRACE(bitmap_bh, "getting write access"); 6265188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6266188c299eSJan Kara EXT4_JTR_NONE); 6267c9de560dSAlex Tomas if (err) 6268c9de560dSAlex Tomas goto error_return; 6269c9de560dSAlex Tomas 6270c9de560dSAlex Tomas /* 6271c9de560dSAlex Tomas * We are about to modify some metadata. Call the journal APIs 6272c9de560dSAlex Tomas * to unshare ->b_data if a currently-committing transaction is 6273c9de560dSAlex Tomas * using it 6274c9de560dSAlex Tomas */ 6275c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "get_write_access"); 6276188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6277c9de560dSAlex Tomas if (err) 6278c9de560dSAlex Tomas goto error_return; 6279c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 6280c9de560dSAlex Tomas { 6281c9de560dSAlex Tomas int i; 628284130193STheodore Ts'o for (i = 0; i < count_clusters; i++) 6283c9de560dSAlex Tomas BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6284c9de560dSAlex Tomas } 6285c9de560dSAlex Tomas #endif 628684130193STheodore Ts'o trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6287c9de560dSAlex Tomas 6288adb7ef60SKonstantin Khlebnikov /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6289adb7ef60SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6290adb7ef60SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 6291920313a7SAneesh Kumar K.V if (err) 6292920313a7SAneesh Kumar K.V goto error_return; 6293e6362609STheodore Ts'o 6294f96c450dSDaeho Jeong /* 6295f96c450dSDaeho Jeong * We need to make sure we don't reuse the freed block until after the 6296f96c450dSDaeho Jeong * transaction is committed. We make an exception if the inode is to be 6297f96c450dSDaeho Jeong * written in writeback mode since writeback mode has weak data 6298f96c450dSDaeho Jeong * consistency guarantees. 6299f96c450dSDaeho Jeong */ 6300f96c450dSDaeho Jeong if (ext4_handle_valid(handle) && 6301f96c450dSDaeho Jeong ((flags & EXT4_FREE_BLOCKS_METADATA) || 6302f96c450dSDaeho Jeong !ext4_should_writeback_data(inode))) { 63037a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry; 63047a2fcbf7SAneesh Kumar K.V /* 63057444a072SMichal Hocko * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 63067444a072SMichal Hocko * to fail. 63077a2fcbf7SAneesh Kumar K.V */ 63087444a072SMichal Hocko new_entry = kmem_cache_alloc(ext4_free_data_cachep, 63097444a072SMichal Hocko GFP_NOFS|__GFP_NOFAIL); 631018aadd47SBobi Jam new_entry->efd_start_cluster = bit; 631118aadd47SBobi Jam new_entry->efd_group = block_group; 631218aadd47SBobi Jam new_entry->efd_count = count_clusters; 631318aadd47SBobi Jam new_entry->efd_tid = handle->h_transaction->t_tid; 6314955ce5f5SAneesh Kumar K.V 63157a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, block_group); 631684130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 63177a2fcbf7SAneesh Kumar K.V ext4_mb_free_metadata(handle, &e4b, new_entry); 6318c9de560dSAlex Tomas } else { 63197a2fcbf7SAneesh Kumar K.V /* need to update group_info->bb_free and bitmap 63207a2fcbf7SAneesh Kumar K.V * with group lock held. generate_buddy look at 63217a2fcbf7SAneesh Kumar K.V * them with group lock_held 63227a2fcbf7SAneesh Kumar K.V */ 6323d71c1ae2SLukas Czerner if (test_opt(sb, DISCARD)) { 6324247c3d21SKemeng Shi err = ext4_issue_discard(sb, block_group, bit, 6325247c3d21SKemeng Shi count_clusters, NULL); 6326d71c1ae2SLukas Czerner if (err && err != -EOPNOTSUPP) 6327d71c1ae2SLukas Czerner ext4_msg(sb, KERN_WARNING, "discard request in" 6328a00b482bSRitesh Harjani " group:%u block:%d count:%lu failed" 6329d71c1ae2SLukas Czerner " with %d", block_group, bit, count, 6330d71c1ae2SLukas Czerner err); 63318f9ff189SLukas Czerner } else 63328f9ff189SLukas Czerner EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6333d71c1ae2SLukas Czerner 6334955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, block_group); 633584130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 633684130193STheodore Ts'o mb_free_blocks(inode, &e4b, bit, count_clusters); 6337c9de560dSAlex Tomas } 6338c9de560dSAlex Tomas 6339021b65bbSTheodore Ts'o ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6340021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, ret); 63411df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6342feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, gdp); 6343955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, block_group); 6344c9de560dSAlex Tomas 6345772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 6346772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 634790ba983fSTheodore Ts'o atomic64_add(count_clusters, 63487c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 63497c990728SSuraj Jitindar Singh flex_group)->free_clusters); 6350772cb7c8SJose R. Santos } 6351772cb7c8SJose R. Santos 63529fe67149SEric Whitney /* 63539fe67149SEric Whitney * on a bigalloc file system, defer the s_freeclusters_counter 63549fe67149SEric Whitney * update to the caller (ext4_remove_space and friends) so they 63559fe67149SEric Whitney * can determine if a cluster freed here should be rereserved 63569fe67149SEric Whitney */ 63579fe67149SEric Whitney if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 63587b415bf6SAditya Kali if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 63597b415bf6SAditya Kali dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 63609fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 63619fe67149SEric Whitney count_clusters); 63629fe67149SEric Whitney } 63637d734532SJan Kara 63647d734532SJan Kara ext4_mb_unload_buddy(&e4b); 63657b415bf6SAditya Kali 63667a2fcbf7SAneesh Kumar K.V /* We dirtied the bitmap block */ 63677a2fcbf7SAneesh Kumar K.V BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 63687a2fcbf7SAneesh Kumar K.V err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 63697a2fcbf7SAneesh Kumar K.V 6370c9de560dSAlex Tomas /* And the group descriptor block */ 6371c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 63720390131bSFrank Mayhar ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6373c9de560dSAlex Tomas if (!err) 6374c9de560dSAlex Tomas err = ret; 6375c9de560dSAlex Tomas 6376c9de560dSAlex Tomas if (overflow && !err) { 6377c9de560dSAlex Tomas block += count; 6378c9de560dSAlex Tomas count = overflow; 6379c9de560dSAlex Tomas put_bh(bitmap_bh); 63801e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 63811e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6382c9de560dSAlex Tomas goto do_more; 6383c9de560dSAlex Tomas } 6384c9de560dSAlex Tomas error_return: 6385c9de560dSAlex Tomas brelse(bitmap_bh); 6386c9de560dSAlex Tomas ext4_std_error(sb, err); 6387c9de560dSAlex Tomas return; 6388c9de560dSAlex Tomas } 63897360d173SLukas Czerner 63907360d173SLukas Czerner /** 63918ac3939dSRitesh Harjani * ext4_free_blocks() -- Free given blocks and update quota 63928ac3939dSRitesh Harjani * @handle: handle for this transaction 63938ac3939dSRitesh Harjani * @inode: inode 63948ac3939dSRitesh Harjani * @bh: optional buffer of the block to be freed 63958ac3939dSRitesh Harjani * @block: starting physical block to be freed 63968ac3939dSRitesh Harjani * @count: number of blocks to be freed 63978ac3939dSRitesh Harjani * @flags: flags used by ext4_free_blocks 63988ac3939dSRitesh Harjani */ 63998ac3939dSRitesh Harjani void ext4_free_blocks(handle_t *handle, struct inode *inode, 64008ac3939dSRitesh Harjani struct buffer_head *bh, ext4_fsblk_t block, 64018ac3939dSRitesh Harjani unsigned long count, int flags) 64028ac3939dSRitesh Harjani { 64038ac3939dSRitesh Harjani struct super_block *sb = inode->i_sb; 64048ac3939dSRitesh Harjani unsigned int overflow; 64058ac3939dSRitesh Harjani struct ext4_sb_info *sbi; 64068ac3939dSRitesh Harjani 64078ac3939dSRitesh Harjani sbi = EXT4_SB(sb); 64088ac3939dSRitesh Harjani 64098ac3939dSRitesh Harjani if (bh) { 64108ac3939dSRitesh Harjani if (block) 64118ac3939dSRitesh Harjani BUG_ON(block != bh->b_blocknr); 64128ac3939dSRitesh Harjani else 64138ac3939dSRitesh Harjani block = bh->b_blocknr; 64148ac3939dSRitesh Harjani } 64158ac3939dSRitesh Harjani 641611b6890bSKemeng Shi if (sbi->s_mount_state & EXT4_FC_REPLAY) { 64172ec6d0a5SKemeng Shi ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 641811b6890bSKemeng Shi return; 641911b6890bSKemeng Shi } 642011b6890bSKemeng Shi 642111b6890bSKemeng Shi might_sleep(); 642211b6890bSKemeng Shi 64238ac3939dSRitesh Harjani if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 64248ac3939dSRitesh Harjani !ext4_inode_block_valid(inode, block, count)) { 64258ac3939dSRitesh Harjani ext4_error(sb, "Freeing blocks not in datazone - " 64268ac3939dSRitesh Harjani "block = %llu, count = %lu", block, count); 64278ac3939dSRitesh Harjani return; 64288ac3939dSRitesh Harjani } 64291e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 64308ac3939dSRitesh Harjani 64318ac3939dSRitesh Harjani ext4_debug("freeing block %llu\n", block); 64328ac3939dSRitesh Harjani trace_ext4_free_blocks(inode, block, count, flags); 64338ac3939dSRitesh Harjani 64348ac3939dSRitesh Harjani if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 64358ac3939dSRitesh Harjani BUG_ON(count > 1); 64368ac3939dSRitesh Harjani 64378ac3939dSRitesh Harjani ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 64388ac3939dSRitesh Harjani inode, bh, block); 64398ac3939dSRitesh Harjani } 64408ac3939dSRitesh Harjani 64418ac3939dSRitesh Harjani /* 64428ac3939dSRitesh Harjani * If the extent to be freed does not begin on a cluster 64438ac3939dSRitesh Harjani * boundary, we need to deal with partial clusters at the 64448ac3939dSRitesh Harjani * beginning and end of the extent. Normally we will free 64458ac3939dSRitesh Harjani * blocks at the beginning or the end unless we are explicitly 64468ac3939dSRitesh Harjani * requested to avoid doing so. 64478ac3939dSRitesh Harjani */ 64488ac3939dSRitesh Harjani overflow = EXT4_PBLK_COFF(sbi, block); 64498ac3939dSRitesh Harjani if (overflow) { 64508ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 64518ac3939dSRitesh Harjani overflow = sbi->s_cluster_ratio - overflow; 64528ac3939dSRitesh Harjani block += overflow; 64538ac3939dSRitesh Harjani if (count > overflow) 64548ac3939dSRitesh Harjani count -= overflow; 64558ac3939dSRitesh Harjani else 64568ac3939dSRitesh Harjani return; 64578ac3939dSRitesh Harjani } else { 64588ac3939dSRitesh Harjani block -= overflow; 64598ac3939dSRitesh Harjani count += overflow; 64608ac3939dSRitesh Harjani } 64611e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 64621e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 64638ac3939dSRitesh Harjani } 64648ac3939dSRitesh Harjani overflow = EXT4_LBLK_COFF(sbi, count); 64658ac3939dSRitesh Harjani if (overflow) { 64668ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 64678ac3939dSRitesh Harjani if (count > overflow) 64688ac3939dSRitesh Harjani count -= overflow; 64698ac3939dSRitesh Harjani else 64708ac3939dSRitesh Harjani return; 64718ac3939dSRitesh Harjani } else 64728ac3939dSRitesh Harjani count += sbi->s_cluster_ratio - overflow; 64731e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 64741e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 64758ac3939dSRitesh Harjani } 64768ac3939dSRitesh Harjani 64778ac3939dSRitesh Harjani if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 64788ac3939dSRitesh Harjani int i; 64798ac3939dSRitesh Harjani int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 64808ac3939dSRitesh Harjani 64818ac3939dSRitesh Harjani for (i = 0; i < count; i++) { 64828ac3939dSRitesh Harjani cond_resched(); 64838ac3939dSRitesh Harjani if (is_metadata) 64848ac3939dSRitesh Harjani bh = sb_find_get_block(inode->i_sb, block + i); 64858ac3939dSRitesh Harjani ext4_forget(handle, is_metadata, inode, bh, block + i); 64868ac3939dSRitesh Harjani } 64878ac3939dSRitesh Harjani } 64888ac3939dSRitesh Harjani 64898ac3939dSRitesh Harjani ext4_mb_clear_bb(handle, inode, block, count, flags); 64908ac3939dSRitesh Harjani return; 64918ac3939dSRitesh Harjani } 64928ac3939dSRitesh Harjani 64938ac3939dSRitesh Harjani /** 64940529155eSYongqiang Yang * ext4_group_add_blocks() -- Add given blocks to an existing group 64952846e820SAmir Goldstein * @handle: handle to this transaction 64962846e820SAmir Goldstein * @sb: super block 64974907cb7bSAnatol Pomozov * @block: start physical block to add to the block group 64982846e820SAmir Goldstein * @count: number of blocks to free 64992846e820SAmir Goldstein * 6500e73a347bSAmir Goldstein * This marks the blocks as free in the bitmap and buddy. 65012846e820SAmir Goldstein */ 6502cc7365dfSYongqiang Yang int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 65032846e820SAmir Goldstein ext4_fsblk_t block, unsigned long count) 65042846e820SAmir Goldstein { 65052846e820SAmir Goldstein struct buffer_head *bitmap_bh = NULL; 65062846e820SAmir Goldstein struct buffer_head *gd_bh; 65072846e820SAmir Goldstein ext4_group_t block_group; 65082846e820SAmir Goldstein ext4_grpblk_t bit; 65092846e820SAmir Goldstein unsigned int i; 65102846e820SAmir Goldstein struct ext4_group_desc *desc; 65112846e820SAmir Goldstein struct ext4_sb_info *sbi = EXT4_SB(sb); 6512e73a347bSAmir Goldstein struct ext4_buddy e4b; 6513d77147ffSharshads int err = 0, ret, free_clusters_count; 6514d77147ffSharshads ext4_grpblk_t clusters_freed; 6515d77147ffSharshads ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6516d77147ffSharshads ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6517d77147ffSharshads unsigned long cluster_count = last_cluster - first_cluster + 1; 65182846e820SAmir Goldstein 65192846e820SAmir Goldstein ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 65202846e820SAmir Goldstein 65214740b830SYongqiang Yang if (count == 0) 65224740b830SYongqiang Yang return 0; 65234740b830SYongqiang Yang 65242846e820SAmir Goldstein ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 65252846e820SAmir Goldstein /* 65262846e820SAmir Goldstein * Check to see if we are freeing blocks across a group 65272846e820SAmir Goldstein * boundary. 65282846e820SAmir Goldstein */ 6529d77147ffSharshads if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6530d77147ffSharshads ext4_warning(sb, "too many blocks added to group %u", 6531cc7365dfSYongqiang Yang block_group); 6532cc7365dfSYongqiang Yang err = -EINVAL; 65332846e820SAmir Goldstein goto error_return; 6534cc7365dfSYongqiang Yang } 65352cd05cc3STheodore Ts'o 65362846e820SAmir Goldstein bitmap_bh = ext4_read_block_bitmap(sb, block_group); 65379008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 65389008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 65399008a58eSDarrick J. Wong bitmap_bh = NULL; 65402846e820SAmir Goldstein goto error_return; 6541cc7365dfSYongqiang Yang } 6542cc7365dfSYongqiang Yang 65432846e820SAmir Goldstein desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6544cc7365dfSYongqiang Yang if (!desc) { 6545cc7365dfSYongqiang Yang err = -EIO; 65462846e820SAmir Goldstein goto error_return; 6547cc7365dfSYongqiang Yang } 65482846e820SAmir Goldstein 6549a00b482bSRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, count)) { 65502846e820SAmir Goldstein ext4_error(sb, "Adding blocks in system zones - " 65512846e820SAmir Goldstein "Block = %llu, count = %lu", 65522846e820SAmir Goldstein block, count); 6553cc7365dfSYongqiang Yang err = -EINVAL; 65542846e820SAmir Goldstein goto error_return; 65552846e820SAmir Goldstein } 65562846e820SAmir Goldstein 65572cd05cc3STheodore Ts'o BUFFER_TRACE(bitmap_bh, "getting write access"); 6558188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6559188c299eSJan Kara EXT4_JTR_NONE); 65602846e820SAmir Goldstein if (err) 65612846e820SAmir Goldstein goto error_return; 65622846e820SAmir Goldstein 65632846e820SAmir Goldstein /* 65642846e820SAmir Goldstein * We are about to modify some metadata. Call the journal APIs 65652846e820SAmir Goldstein * to unshare ->b_data if a currently-committing transaction is 65662846e820SAmir Goldstein * using it 65672846e820SAmir Goldstein */ 65682846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "get_write_access"); 6569188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 65702846e820SAmir Goldstein if (err) 65712846e820SAmir Goldstein goto error_return; 6572e73a347bSAmir Goldstein 6573d77147ffSharshads for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 65742846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "clear bit"); 6575e73a347bSAmir Goldstein if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 65762846e820SAmir Goldstein ext4_error(sb, "bit already cleared for block %llu", 65772846e820SAmir Goldstein (ext4_fsblk_t)(block + i)); 65782846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "bit already cleared"); 65792846e820SAmir Goldstein } else { 6580d77147ffSharshads clusters_freed++; 65812846e820SAmir Goldstein } 65822846e820SAmir Goldstein } 6583e73a347bSAmir Goldstein 6584e73a347bSAmir Goldstein err = ext4_mb_load_buddy(sb, block_group, &e4b); 6585e73a347bSAmir Goldstein if (err) 6586e73a347bSAmir Goldstein goto error_return; 6587e73a347bSAmir Goldstein 6588e73a347bSAmir Goldstein /* 6589e73a347bSAmir Goldstein * need to update group_info->bb_free and bitmap 6590e73a347bSAmir Goldstein * with group lock held. generate_buddy look at 6591e73a347bSAmir Goldstein * them with group lock_held 6592e73a347bSAmir Goldstein */ 65932846e820SAmir Goldstein ext4_lock_group(sb, block_group); 6594d77147ffSharshads mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6595d77147ffSharshads mb_free_blocks(NULL, &e4b, bit, cluster_count); 6596d77147ffSharshads free_clusters_count = clusters_freed + 6597d77147ffSharshads ext4_free_group_clusters(sb, desc); 6598d77147ffSharshads ext4_free_group_clusters_set(sb, desc, free_clusters_count); 65991df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6600feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, desc); 66012846e820SAmir Goldstein ext4_unlock_group(sb, block_group); 660257042651STheodore Ts'o percpu_counter_add(&sbi->s_freeclusters_counter, 6603d77147ffSharshads clusters_freed); 66042846e820SAmir Goldstein 66052846e820SAmir Goldstein if (sbi->s_log_groups_per_flex) { 66062846e820SAmir Goldstein ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6607d77147ffSharshads atomic64_add(clusters_freed, 66087c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 66097c990728SSuraj Jitindar Singh flex_group)->free_clusters); 66102846e820SAmir Goldstein } 6611e73a347bSAmir Goldstein 6612e73a347bSAmir Goldstein ext4_mb_unload_buddy(&e4b); 66132846e820SAmir Goldstein 66142846e820SAmir Goldstein /* We dirtied the bitmap block */ 66152846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 66162846e820SAmir Goldstein err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 66172846e820SAmir Goldstein 66182846e820SAmir Goldstein /* And the group descriptor block */ 66192846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 66202846e820SAmir Goldstein ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 66212846e820SAmir Goldstein if (!err) 66222846e820SAmir Goldstein err = ret; 66232846e820SAmir Goldstein 66242846e820SAmir Goldstein error_return: 66252846e820SAmir Goldstein brelse(bitmap_bh); 66262846e820SAmir Goldstein ext4_std_error(sb, err); 6627cc7365dfSYongqiang Yang return err; 66282846e820SAmir Goldstein } 66292846e820SAmir Goldstein 66302846e820SAmir Goldstein /** 66317360d173SLukas Czerner * ext4_trim_extent -- function to TRIM one single free extent in the group 66327360d173SLukas Czerner * @sb: super block for the file system 66337360d173SLukas Czerner * @start: starting block of the free extent in the alloc. group 66347360d173SLukas Czerner * @count: number of blocks to TRIM 66357360d173SLukas Czerner * @e4b: ext4 buddy for the group 66367360d173SLukas Czerner * 66377360d173SLukas Czerner * Trim "count" blocks starting at "start" in the "group". To assure that no 66387360d173SLukas Czerner * one will allocate those blocks, mark it as used in buddy bitmap. This must 66397360d173SLukas Czerner * be called with under the group lock. 66407360d173SLukas Czerner */ 6641bd2eea8dSWang Jianchao static int ext4_trim_extent(struct super_block *sb, 6642bd2eea8dSWang Jianchao int start, int count, struct ext4_buddy *e4b) 6643e2cbd587Sjon ernst __releases(bitlock) 6644e2cbd587Sjon ernst __acquires(bitlock) 66457360d173SLukas Czerner { 66467360d173SLukas Czerner struct ext4_free_extent ex; 6647bd2eea8dSWang Jianchao ext4_group_t group = e4b->bd_group; 6648d71c1ae2SLukas Czerner int ret = 0; 66497360d173SLukas Czerner 6650b3d4c2b1STao Ma trace_ext4_trim_extent(sb, group, start, count); 6651b3d4c2b1STao Ma 66527360d173SLukas Czerner assert_spin_locked(ext4_group_lock_ptr(sb, group)); 66537360d173SLukas Czerner 66547360d173SLukas Czerner ex.fe_start = start; 66557360d173SLukas Czerner ex.fe_group = group; 66567360d173SLukas Czerner ex.fe_len = count; 66577360d173SLukas Czerner 66587360d173SLukas Czerner /* 66597360d173SLukas Czerner * Mark blocks used, so no one can reuse them while 66607360d173SLukas Czerner * being trimmed. 66617360d173SLukas Czerner */ 66627360d173SLukas Czerner mb_mark_used(e4b, &ex); 66637360d173SLukas Czerner ext4_unlock_group(sb, group); 6664a0154344SDaeho Jeong ret = ext4_issue_discard(sb, group, start, count, NULL); 66657360d173SLukas Czerner ext4_lock_group(sb, group); 66667360d173SLukas Czerner mb_free_blocks(NULL, e4b, start, ex.fe_len); 6667d71c1ae2SLukas Czerner return ret; 66687360d173SLukas Czerner } 66697360d173SLukas Czerner 66706920b391SWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 66716920b391SWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 66726920b391SWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks) 6673a5fda113STheodore Ts'o __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6674a5fda113STheodore Ts'o __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 66756920b391SWang Jianchao { 66766920b391SWang Jianchao ext4_grpblk_t next, count, free_count; 66776920b391SWang Jianchao void *bitmap; 66786920b391SWang Jianchao 66796920b391SWang Jianchao bitmap = e4b->bd_bitmap; 66806920b391SWang Jianchao start = (e4b->bd_info->bb_first_free > start) ? 66816920b391SWang Jianchao e4b->bd_info->bb_first_free : start; 66826920b391SWang Jianchao count = 0; 66836920b391SWang Jianchao free_count = 0; 66846920b391SWang Jianchao 66856920b391SWang Jianchao while (start <= max) { 66866920b391SWang Jianchao start = mb_find_next_zero_bit(bitmap, max + 1, start); 66876920b391SWang Jianchao if (start > max) 66886920b391SWang Jianchao break; 66896920b391SWang Jianchao next = mb_find_next_bit(bitmap, max + 1, start); 66906920b391SWang Jianchao 66916920b391SWang Jianchao if ((next - start) >= minblocks) { 6692afcc4e32SLukas Bulwahn int ret = ext4_trim_extent(sb, start, next - start, e4b); 6693afcc4e32SLukas Bulwahn 66946920b391SWang Jianchao if (ret && ret != -EOPNOTSUPP) 66956920b391SWang Jianchao break; 66966920b391SWang Jianchao count += next - start; 66976920b391SWang Jianchao } 66986920b391SWang Jianchao free_count += next - start; 66996920b391SWang Jianchao start = next + 1; 67006920b391SWang Jianchao 67016920b391SWang Jianchao if (fatal_signal_pending(current)) { 67026920b391SWang Jianchao count = -ERESTARTSYS; 67036920b391SWang Jianchao break; 67046920b391SWang Jianchao } 67056920b391SWang Jianchao 67066920b391SWang Jianchao if (need_resched()) { 67076920b391SWang Jianchao ext4_unlock_group(sb, e4b->bd_group); 67086920b391SWang Jianchao cond_resched(); 67096920b391SWang Jianchao ext4_lock_group(sb, e4b->bd_group); 67106920b391SWang Jianchao } 67116920b391SWang Jianchao 67126920b391SWang Jianchao if ((e4b->bd_info->bb_free - free_count) < minblocks) 67136920b391SWang Jianchao break; 67146920b391SWang Jianchao } 67156920b391SWang Jianchao 67166920b391SWang Jianchao return count; 67176920b391SWang Jianchao } 67186920b391SWang Jianchao 67197360d173SLukas Czerner /** 67207360d173SLukas Czerner * ext4_trim_all_free -- function to trim all free space in alloc. group 67217360d173SLukas Czerner * @sb: super block for file system 672222612283STao Ma * @group: group to be trimmed 67237360d173SLukas Czerner * @start: first group block to examine 67247360d173SLukas Czerner * @max: last group block to examine 67257360d173SLukas Czerner * @minblocks: minimum extent block count 6726d63c00eaSDmitry Monakhov * @set_trimmed: set the trimmed flag if at least one block is trimmed 67277360d173SLukas Czerner * 67287360d173SLukas Czerner * ext4_trim_all_free walks through group's block bitmap searching for free 67297360d173SLukas Czerner * extents. When the free extent is found, mark it as used in group buddy 67307360d173SLukas Czerner * bitmap. Then issue a TRIM command on this extent and free the extent in 6731b6f5558cSWang Jianchao * the group buddy bitmap. 67327360d173SLukas Czerner */ 67330b75a840SLukas Czerner static ext4_grpblk_t 673478944086SLukas Czerner ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 673578944086SLukas Czerner ext4_grpblk_t start, ext4_grpblk_t max, 6736d63c00eaSDmitry Monakhov ext4_grpblk_t minblocks, bool set_trimmed) 67377360d173SLukas Czerner { 673878944086SLukas Czerner struct ext4_buddy e4b; 67396920b391SWang Jianchao int ret; 67407360d173SLukas Czerner 6741b3d4c2b1STao Ma trace_ext4_trim_all_free(sb, group, start, max); 6742b3d4c2b1STao Ma 674378944086SLukas Czerner ret = ext4_mb_load_buddy(sb, group, &e4b); 674478944086SLukas Czerner if (ret) { 67459651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 67469651e6b2SKonstantin Khlebnikov ret, group); 674778944086SLukas Czerner return ret; 674878944086SLukas Czerner } 674928739eeaSLukas Czerner 675028739eeaSLukas Czerner ext4_lock_group(sb, group); 67513d56b8d2STao Ma 67526920b391SWang Jianchao if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 67532327fb2eSLukas Czerner minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 67546920b391SWang Jianchao ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6755d63c00eaSDmitry Monakhov if (ret >= 0 && set_trimmed) 67563d56b8d2STao Ma EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 67576920b391SWang Jianchao } else { 67586920b391SWang Jianchao ret = 0; 6759d71c1ae2SLukas Czerner } 67606920b391SWang Jianchao 67617360d173SLukas Czerner ext4_unlock_group(sb, group); 676278944086SLukas Czerner ext4_mb_unload_buddy(&e4b); 67637360d173SLukas Czerner 67647360d173SLukas Czerner ext4_debug("trimmed %d blocks in the group %d\n", 67656920b391SWang Jianchao ret, group); 67667360d173SLukas Czerner 6767d71c1ae2SLukas Czerner return ret; 67687360d173SLukas Czerner } 67697360d173SLukas Czerner 67707360d173SLukas Czerner /** 67717360d173SLukas Czerner * ext4_trim_fs() -- trim ioctl handle function 67727360d173SLukas Czerner * @sb: superblock for filesystem 67737360d173SLukas Czerner * @range: fstrim_range structure 67747360d173SLukas Czerner * 67757360d173SLukas Czerner * start: First Byte to trim 67767360d173SLukas Czerner * len: number of Bytes to trim from start 67777360d173SLukas Czerner * minlen: minimum extent length in Bytes 67787360d173SLukas Czerner * ext4_trim_fs goes through all allocation groups containing Bytes from 67797360d173SLukas Czerner * start to start+len. For each such a group ext4_trim_all_free function 67807360d173SLukas Czerner * is invoked to trim all free space. 67817360d173SLukas Czerner */ 67827360d173SLukas Czerner int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 67837360d173SLukas Czerner { 67847b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 678578944086SLukas Czerner struct ext4_group_info *grp; 6786913eed83SLukas Czerner ext4_group_t group, first_group, last_group; 67877137d7a4STheodore Ts'o ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6788913eed83SLukas Czerner uint64_t start, end, minlen, trimmed = 0; 67890f0a25bfSJan Kara ext4_fsblk_t first_data_blk = 67900f0a25bfSJan Kara le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6791913eed83SLukas Czerner ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6792d63c00eaSDmitry Monakhov bool whole_group, eof = false; 67937360d173SLukas Czerner int ret = 0; 67947360d173SLukas Czerner 67957360d173SLukas Czerner start = range->start >> sb->s_blocksize_bits; 6796913eed83SLukas Czerner end = start + (range->len >> sb->s_blocksize_bits) - 1; 6797aaf7d73eSLukas Czerner minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6798aaf7d73eSLukas Czerner range->minlen >> sb->s_blocksize_bits); 67997360d173SLukas Czerner 68005de35e8dSLukas Czerner if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 68015de35e8dSLukas Czerner start >= max_blks || 68025de35e8dSLukas Czerner range->len < sb->s_blocksize) 68037360d173SLukas Czerner return -EINVAL; 6804173b6e38SJan Kara /* No point to try to trim less than discard granularity */ 68057b47ef52SChristoph Hellwig if (range->minlen < discard_granularity) { 6806173b6e38SJan Kara minlen = EXT4_NUM_B2C(EXT4_SB(sb), 68077b47ef52SChristoph Hellwig discard_granularity >> sb->s_blocksize_bits); 6808173b6e38SJan Kara if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6809173b6e38SJan Kara goto out; 6810173b6e38SJan Kara } 6811d63c00eaSDmitry Monakhov if (end >= max_blks - 1) { 6812913eed83SLukas Czerner end = max_blks - 1; 6813d63c00eaSDmitry Monakhov eof = true; 6814d63c00eaSDmitry Monakhov } 6815913eed83SLukas Czerner if (end <= first_data_blk) 681622f10457STao Ma goto out; 6817913eed83SLukas Czerner if (start < first_data_blk) 68180f0a25bfSJan Kara start = first_data_blk; 68197360d173SLukas Czerner 6820913eed83SLukas Czerner /* Determine first and last group to examine based on start and end */ 68217360d173SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 68227137d7a4STheodore Ts'o &first_group, &first_cluster); 6823913eed83SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 68247137d7a4STheodore Ts'o &last_group, &last_cluster); 68257360d173SLukas Czerner 6826913eed83SLukas Czerner /* end now represents the last cluster to discard in this group */ 6827913eed83SLukas Czerner end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6828d63c00eaSDmitry Monakhov whole_group = true; 68297360d173SLukas Czerner 68307360d173SLukas Czerner for (group = first_group; group <= last_group; group++) { 683178944086SLukas Czerner grp = ext4_get_group_info(sb, group); 68325354b2afSTheodore Ts'o if (!grp) 68335354b2afSTheodore Ts'o continue; 683478944086SLukas Czerner /* We only do this if the grp has never been initialized */ 683578944086SLukas Czerner if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6836adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, GFP_NOFS); 683778944086SLukas Czerner if (ret) 68387360d173SLukas Czerner break; 68397360d173SLukas Czerner } 68407360d173SLukas Czerner 68410ba08517STao Ma /* 6842913eed83SLukas Czerner * For all the groups except the last one, last cluster will 6843913eed83SLukas Czerner * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6844913eed83SLukas Czerner * change it for the last group, note that last_cluster is 6845913eed83SLukas Czerner * already computed earlier by ext4_get_group_no_and_offset() 68460ba08517STao Ma */ 6847d63c00eaSDmitry Monakhov if (group == last_group) { 6848913eed83SLukas Czerner end = last_cluster; 6849d63c00eaSDmitry Monakhov whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6850d63c00eaSDmitry Monakhov } 685178944086SLukas Czerner if (grp->bb_free >= minlen) { 68527137d7a4STheodore Ts'o cnt = ext4_trim_all_free(sb, group, first_cluster, 6853d63c00eaSDmitry Monakhov end, minlen, whole_group); 68547360d173SLukas Czerner if (cnt < 0) { 68557360d173SLukas Czerner ret = cnt; 68567360d173SLukas Czerner break; 68577360d173SLukas Czerner } 68587360d173SLukas Czerner trimmed += cnt; 685921e7fd22SLukas Czerner } 6860913eed83SLukas Czerner 6861913eed83SLukas Czerner /* 6862913eed83SLukas Czerner * For every group except the first one, we are sure 6863913eed83SLukas Czerner * that the first cluster to discard will be cluster #0. 6864913eed83SLukas Czerner */ 68657137d7a4STheodore Ts'o first_cluster = 0; 68667360d173SLukas Czerner } 68677360d173SLukas Czerner 68683d56b8d2STao Ma if (!ret) 68692327fb2eSLukas Czerner EXT4_SB(sb)->s_last_trim_minblks = minlen; 68703d56b8d2STao Ma 687122f10457STao Ma out: 6872aaf7d73eSLukas Czerner range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 68737360d173SLukas Czerner return ret; 68747360d173SLukas Czerner } 68750c9ec4beSDarrick J. Wong 68760c9ec4beSDarrick J. Wong /* Iterate all the free extents in the group. */ 68770c9ec4beSDarrick J. Wong int 68780c9ec4beSDarrick J. Wong ext4_mballoc_query_range( 68790c9ec4beSDarrick J. Wong struct super_block *sb, 68800c9ec4beSDarrick J. Wong ext4_group_t group, 68810c9ec4beSDarrick J. Wong ext4_grpblk_t start, 68820c9ec4beSDarrick J. Wong ext4_grpblk_t end, 68830c9ec4beSDarrick J. Wong ext4_mballoc_query_range_fn formatter, 68840c9ec4beSDarrick J. Wong void *priv) 68850c9ec4beSDarrick J. Wong { 68860c9ec4beSDarrick J. Wong void *bitmap; 68870c9ec4beSDarrick J. Wong ext4_grpblk_t next; 68880c9ec4beSDarrick J. Wong struct ext4_buddy e4b; 68890c9ec4beSDarrick J. Wong int error; 68900c9ec4beSDarrick J. Wong 68910c9ec4beSDarrick J. Wong error = ext4_mb_load_buddy(sb, group, &e4b); 68920c9ec4beSDarrick J. Wong if (error) 68930c9ec4beSDarrick J. Wong return error; 68940c9ec4beSDarrick J. Wong bitmap = e4b.bd_bitmap; 68950c9ec4beSDarrick J. Wong 68960c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 68970c9ec4beSDarrick J. Wong 68980c9ec4beSDarrick J. Wong start = (e4b.bd_info->bb_first_free > start) ? 68990c9ec4beSDarrick J. Wong e4b.bd_info->bb_first_free : start; 69000c9ec4beSDarrick J. Wong if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 69010c9ec4beSDarrick J. Wong end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 69020c9ec4beSDarrick J. Wong 69030c9ec4beSDarrick J. Wong while (start <= end) { 69040c9ec4beSDarrick J. Wong start = mb_find_next_zero_bit(bitmap, end + 1, start); 69050c9ec4beSDarrick J. Wong if (start > end) 69060c9ec4beSDarrick J. Wong break; 69070c9ec4beSDarrick J. Wong next = mb_find_next_bit(bitmap, end + 1, start); 69080c9ec4beSDarrick J. Wong 69090c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 69100c9ec4beSDarrick J. Wong error = formatter(sb, group, start, next - start, priv); 69110c9ec4beSDarrick J. Wong if (error) 69120c9ec4beSDarrick J. Wong goto out_unload; 69130c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 69140c9ec4beSDarrick J. Wong 69150c9ec4beSDarrick J. Wong start = next + 1; 69160c9ec4beSDarrick J. Wong } 69170c9ec4beSDarrick J. Wong 69180c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 69190c9ec4beSDarrick J. Wong out_unload: 69200c9ec4beSDarrick J. Wong ext4_mb_unload_buddy(&e4b); 69210c9ec4beSDarrick J. Wong 69220c9ec4beSDarrick J. Wong return error; 69230c9ec4beSDarrick J. Wong } 6924