1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2c9de560dSAlex Tomas /* 3c9de560dSAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4c9de560dSAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5c9de560dSAlex Tomas */ 6c9de560dSAlex Tomas 7c9de560dSAlex Tomas 8c9de560dSAlex Tomas /* 9c9de560dSAlex Tomas * mballoc.c contains the multiblocks allocation routines 10c9de560dSAlex Tomas */ 11c9de560dSAlex Tomas 1218aadd47SBobi Jam #include "ext4_jbd2.h" 138f6e39a7SMingming Cao #include "mballoc.h" 1428623c2fSTheodore Ts'o #include <linux/log2.h> 15a0b30c12STheodore Ts'o #include <linux/module.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 171a5d5e5dSJeremy Cline #include <linux/nospec.h> 1866114cadSTejun Heo #include <linux/backing-dev.h> 199bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 209bffad1eSTheodore Ts'o 21c9de560dSAlex Tomas /* 22c9de560dSAlex Tomas * MUSTDO: 23c9de560dSAlex Tomas * - test ext4_ext_search_left() and ext4_ext_search_right() 24c9de560dSAlex Tomas * - search for metadata in few groups 25c9de560dSAlex Tomas * 26c9de560dSAlex Tomas * TODO v4: 27c9de560dSAlex Tomas * - normalization should take into account whether file is still open 28c9de560dSAlex Tomas * - discard preallocations if no free space left (policy?) 29c9de560dSAlex Tomas * - don't normalize tails 30c9de560dSAlex Tomas * - quota 31c9de560dSAlex Tomas * - reservation for superuser 32c9de560dSAlex Tomas * 33c9de560dSAlex Tomas * TODO v3: 34c9de560dSAlex Tomas * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35c9de560dSAlex Tomas * - track min/max extents in each group for better group selection 36c9de560dSAlex Tomas * - mb_mark_used() may allocate chunk right after splitting buddy 37c9de560dSAlex Tomas * - tree of groups sorted by number of free blocks 38c9de560dSAlex Tomas * - error handling 39c9de560dSAlex Tomas */ 40c9de560dSAlex Tomas 41c9de560dSAlex Tomas /* 42c9de560dSAlex Tomas * The allocation request involve request for multiple number of blocks 43c9de560dSAlex Tomas * near to the goal(block) value specified. 44c9de560dSAlex Tomas * 45b713a5ecSTheodore Ts'o * During initialization phase of the allocator we decide to use the 46b713a5ecSTheodore Ts'o * group preallocation or inode preallocation depending on the size of 47b713a5ecSTheodore Ts'o * the file. The size of the file could be the resulting file size we 48b713a5ecSTheodore Ts'o * would have after allocation, or the current file size, which ever 49b713a5ecSTheodore Ts'o * is larger. If the size is less than sbi->s_mb_stream_request we 50b713a5ecSTheodore Ts'o * select to use the group preallocation. The default value of 51b713a5ecSTheodore Ts'o * s_mb_stream_request is 16 blocks. This can also be tuned via 52b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53b713a5ecSTheodore Ts'o * terms of number of blocks. 54c9de560dSAlex Tomas * 55c9de560dSAlex Tomas * The main motivation for having small file use group preallocation is to 56b713a5ecSTheodore Ts'o * ensure that we have small files closer together on the disk. 57c9de560dSAlex Tomas * 58b713a5ecSTheodore Ts'o * First stage the allocator looks at the inode prealloc list, 59b713a5ecSTheodore Ts'o * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60b713a5ecSTheodore Ts'o * spaces for this particular inode. The inode prealloc space is 61b713a5ecSTheodore Ts'o * represented as: 62c9de560dSAlex Tomas * 63c9de560dSAlex Tomas * pa_lstart -> the logical start block for this prealloc space 64c9de560dSAlex Tomas * pa_pstart -> the physical start block for this prealloc space 6553accfa9STheodore Ts'o * pa_len -> length for this prealloc space (in clusters) 6653accfa9STheodore Ts'o * pa_free -> free space available in this prealloc space (in clusters) 67c9de560dSAlex Tomas * 68c9de560dSAlex Tomas * The inode preallocation space is used looking at the _logical_ start 69c9de560dSAlex Tomas * block. If only the logical file block falls within the range of prealloc 70caaf7a29STao Ma * space we will consume the particular prealloc space. This makes sure that 71caaf7a29STao Ma * we have contiguous physical blocks representing the file blocks 72c9de560dSAlex Tomas * 73c9de560dSAlex Tomas * The important thing to be noted in case of inode prealloc space is that 74c9de560dSAlex Tomas * we don't modify the values associated to inode prealloc space except 75c9de560dSAlex Tomas * pa_free. 76c9de560dSAlex Tomas * 77c9de560dSAlex Tomas * If we are not able to find blocks in the inode prealloc space and if we 78c9de560dSAlex Tomas * have the group allocation flag set then we look at the locality group 79caaf7a29STao Ma * prealloc space. These are per CPU prealloc list represented as 80c9de560dSAlex Tomas * 81c9de560dSAlex Tomas * ext4_sb_info.s_locality_groups[smp_processor_id()] 82c9de560dSAlex Tomas * 83c9de560dSAlex Tomas * The reason for having a per cpu locality group is to reduce the contention 84c9de560dSAlex Tomas * between CPUs. It is possible to get scheduled at this point. 85c9de560dSAlex Tomas * 86c9de560dSAlex Tomas * The locality group prealloc space is used looking at whether we have 8725985edcSLucas De Marchi * enough free space (pa_free) within the prealloc space. 88c9de560dSAlex Tomas * 89c9de560dSAlex Tomas * If we can't allocate blocks via inode prealloc or/and locality group 90c9de560dSAlex Tomas * prealloc then we look at the buddy cache. The buddy cache is represented 91c9de560dSAlex Tomas * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92c9de560dSAlex Tomas * mapped to the buddy and bitmap information regarding different 93c9de560dSAlex Tomas * groups. The buddy information is attached to buddy cache inode so that 94c9de560dSAlex Tomas * we can access them through the page cache. The information regarding 95c9de560dSAlex Tomas * each group is loaded via ext4_mb_load_buddy. The information involve 96c9de560dSAlex Tomas * block bitmap and buddy information. The information are stored in the 97c9de560dSAlex Tomas * inode as: 98c9de560dSAlex Tomas * 99c9de560dSAlex Tomas * { page } 100c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101c9de560dSAlex Tomas * 102c9de560dSAlex Tomas * 103c9de560dSAlex Tomas * one block each for bitmap and buddy information. So for each group we 104ea1754a0SKirill A. Shutemov * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105c9de560dSAlex Tomas * blocksize) blocks. So it can have information regarding groups_per_page 106c9de560dSAlex Tomas * which is blocks_per_page/2 107c9de560dSAlex Tomas * 108c9de560dSAlex Tomas * The buddy cache inode is not stored on disk. The inode is thrown 109c9de560dSAlex Tomas * away when the filesystem is unmounted. 110c9de560dSAlex Tomas * 111c9de560dSAlex Tomas * We look for count number of blocks in the buddy cache. If we were able 112c9de560dSAlex Tomas * to locate that many free blocks we return with additional information 113c9de560dSAlex Tomas * regarding rest of the contiguous physical block available 114c9de560dSAlex Tomas * 115c9de560dSAlex Tomas * Before allocating blocks via buddy cache we normalize the request 116c9de560dSAlex Tomas * blocks. This ensure we ask for more blocks that we needed. The extra 117c9de560dSAlex Tomas * blocks that we get after allocation is added to the respective prealloc 118c9de560dSAlex Tomas * list. In case of inode preallocation we follow a list of heuristics 119c9de560dSAlex Tomas * based on file size. This can be found in ext4_mb_normalize_request. If 120c9de560dSAlex Tomas * we are doing a group prealloc we try to normalize the request to 12127baebb8STheodore Ts'o * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 12227baebb8STheodore Ts'o * dependent on the cluster size; for non-bigalloc file systems, it is 123c9de560dSAlex Tomas * 512 blocks. This can be tuned via 124d7a1fee1SDan Ehrenberg * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125c9de560dSAlex Tomas * terms of number of blocks. If we have mounted the file system with -O 126c9de560dSAlex Tomas * stripe=<value> option the group prealloc request is normalized to the 127b483bb77SRandy Dunlap * smallest multiple of the stripe value (sbi->s_stripe) which is 128d7a1fee1SDan Ehrenberg * greater than the default mb_group_prealloc. 129c9de560dSAlex Tomas * 130196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131196e402aSHarshad Shirwadkar * structures in two data structures: 132196e402aSHarshad Shirwadkar * 133196e402aSHarshad Shirwadkar * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134196e402aSHarshad Shirwadkar * 135196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136196e402aSHarshad Shirwadkar * 137196e402aSHarshad Shirwadkar * This is an array of lists where the index in the array represents the 138196e402aSHarshad Shirwadkar * largest free order in the buddy bitmap of the participating group infos of 139196e402aSHarshad Shirwadkar * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140196e402aSHarshad Shirwadkar * number of buddy bitmap orders possible) number of lists. Group-infos are 141196e402aSHarshad Shirwadkar * placed in appropriate lists. 142196e402aSHarshad Shirwadkar * 14383e80a6eSJan Kara * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144196e402aSHarshad Shirwadkar * 14583e80a6eSJan Kara * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146196e402aSHarshad Shirwadkar * 14783e80a6eSJan Kara * This is an array of lists where in the i-th list there are groups with 14883e80a6eSJan Kara * average fragment size >= 2^i and < 2^(i+1). The average fragment size 14983e80a6eSJan Kara * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 15083e80a6eSJan Kara * Note that we don't bother with a special list for completely empty groups 15183e80a6eSJan Kara * so we only have MB_NUM_ORDERS(sb) lists. 152196e402aSHarshad Shirwadkar * 153196e402aSHarshad Shirwadkar * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154196e402aSHarshad Shirwadkar * structures to decide the order in which groups are to be traversed for 155196e402aSHarshad Shirwadkar * fulfilling an allocation request. 156196e402aSHarshad Shirwadkar * 157196e402aSHarshad Shirwadkar * At CR = 0, we look for groups which have the largest_free_order >= the order 158196e402aSHarshad Shirwadkar * of the request. We directly look at the largest free order list in the data 159196e402aSHarshad Shirwadkar * structure (1) above where largest_free_order = order of the request. If that 160196e402aSHarshad Shirwadkar * list is empty, we look at remaining list in the increasing order of 161196e402aSHarshad Shirwadkar * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 162196e402aSHarshad Shirwadkar * 163196e402aSHarshad Shirwadkar * At CR = 1, we only consider groups where average fragment size > request 164196e402aSHarshad Shirwadkar * size. So, we lookup a group which has average fragment size just above or 16583e80a6eSJan Kara * equal to request size using our average fragment size group lists (data 16683e80a6eSJan Kara * structure 2) in O(1) time. 167196e402aSHarshad Shirwadkar * 168196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 169196e402aSHarshad Shirwadkar * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 170196e402aSHarshad Shirwadkar * 171d7a1fee1SDan Ehrenberg * The regular allocator (using the buddy cache) supports a few tunables. 172c9de560dSAlex Tomas * 173b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_min_to_scan 174b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_max_to_scan 175b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req 176196e402aSHarshad Shirwadkar * /sys/fs/ext4/<partition>/mb_linear_limit 177c9de560dSAlex Tomas * 178b713a5ecSTheodore Ts'o * The regular allocator uses buddy scan only if the request len is power of 179c9de560dSAlex Tomas * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 180c9de560dSAlex Tomas * value of s_mb_order2_reqs can be tuned via 181b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 182af901ca1SAndré Goddard Rosa * stripe size (sbi->s_stripe), we try to search for contiguous block in 183b713a5ecSTheodore Ts'o * stripe size. This should result in better allocation on RAID setups. If 184b713a5ecSTheodore Ts'o * not, we search in the specific group using bitmap for best extents. The 185b713a5ecSTheodore Ts'o * tunable min_to_scan and max_to_scan control the behaviour here. 186c9de560dSAlex Tomas * min_to_scan indicate how long the mballoc __must__ look for a best 187b713a5ecSTheodore Ts'o * extent and max_to_scan indicates how long the mballoc __can__ look for a 188c9de560dSAlex Tomas * best extent in the found extents. Searching for the blocks starts with 189c9de560dSAlex Tomas * the group specified as the goal value in allocation context via 190c9de560dSAlex Tomas * ac_g_ex. Each group is first checked based on the criteria whether it 191caaf7a29STao Ma * can be used for allocation. ext4_mb_good_group explains how the groups are 192c9de560dSAlex Tomas * checked. 193c9de560dSAlex Tomas * 194196e402aSHarshad Shirwadkar * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 195196e402aSHarshad Shirwadkar * get traversed linearly. That may result in subsequent allocations being not 196196e402aSHarshad Shirwadkar * close to each other. And so, the underlying device may get filled up in a 197196e402aSHarshad Shirwadkar * non-linear fashion. While that may not matter on non-rotational devices, for 198196e402aSHarshad Shirwadkar * rotational devices that may result in higher seek times. "mb_linear_limit" 199196e402aSHarshad Shirwadkar * tells mballoc how many groups mballoc should search linearly before 200196e402aSHarshad Shirwadkar * performing consulting above data structures for more efficient lookups. For 201196e402aSHarshad Shirwadkar * non rotational devices, this value defaults to 0 and for rotational devices 202196e402aSHarshad Shirwadkar * this is set to MB_DEFAULT_LINEAR_LIMIT. 203196e402aSHarshad Shirwadkar * 204c9de560dSAlex Tomas * Both the prealloc space are getting populated as above. So for the first 205c9de560dSAlex Tomas * request we will hit the buddy cache which will result in this prealloc 206c9de560dSAlex Tomas * space getting filled. The prealloc space is then later used for the 207c9de560dSAlex Tomas * subsequent request. 208c9de560dSAlex Tomas */ 209c9de560dSAlex Tomas 210c9de560dSAlex Tomas /* 211c9de560dSAlex Tomas * mballoc operates on the following data: 212c9de560dSAlex Tomas * - on-disk bitmap 213c9de560dSAlex Tomas * - in-core buddy (actually includes buddy and bitmap) 214c9de560dSAlex Tomas * - preallocation descriptors (PAs) 215c9de560dSAlex Tomas * 216c9de560dSAlex Tomas * there are two types of preallocations: 217c9de560dSAlex Tomas * - inode 218c9de560dSAlex Tomas * assiged to specific inode and can be used for this inode only. 219c9de560dSAlex Tomas * it describes part of inode's space preallocated to specific 220c9de560dSAlex Tomas * physical blocks. any block from that preallocated can be used 221c9de560dSAlex Tomas * independent. the descriptor just tracks number of blocks left 222c9de560dSAlex Tomas * unused. so, before taking some block from descriptor, one must 223c9de560dSAlex Tomas * make sure corresponded logical block isn't allocated yet. this 224c9de560dSAlex Tomas * also means that freeing any block within descriptor's range 225c9de560dSAlex Tomas * must discard all preallocated blocks. 226c9de560dSAlex Tomas * - locality group 227c9de560dSAlex Tomas * assigned to specific locality group which does not translate to 228c9de560dSAlex Tomas * permanent set of inodes: inode can join and leave group. space 229c9de560dSAlex Tomas * from this type of preallocation can be used for any inode. thus 230c9de560dSAlex Tomas * it's consumed from the beginning to the end. 231c9de560dSAlex Tomas * 232c9de560dSAlex Tomas * relation between them can be expressed as: 233c9de560dSAlex Tomas * in-core buddy = on-disk bitmap + preallocation descriptors 234c9de560dSAlex Tomas * 235c9de560dSAlex Tomas * this mean blocks mballoc considers used are: 236c9de560dSAlex Tomas * - allocated blocks (persistent) 237c9de560dSAlex Tomas * - preallocated blocks (non-persistent) 238c9de560dSAlex Tomas * 239c9de560dSAlex Tomas * consistency in mballoc world means that at any time a block is either 240c9de560dSAlex Tomas * free or used in ALL structures. notice: "any time" should not be read 241c9de560dSAlex Tomas * literally -- time is discrete and delimited by locks. 242c9de560dSAlex Tomas * 243c9de560dSAlex Tomas * to keep it simple, we don't use block numbers, instead we count number of 244c9de560dSAlex Tomas * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 245c9de560dSAlex Tomas * 246c9de560dSAlex Tomas * all operations can be expressed as: 247c9de560dSAlex Tomas * - init buddy: buddy = on-disk + PAs 248c9de560dSAlex Tomas * - new PA: buddy += N; PA = N 249c9de560dSAlex Tomas * - use inode PA: on-disk += N; PA -= N 250c9de560dSAlex Tomas * - discard inode PA buddy -= on-disk - PA; PA = 0 251c9de560dSAlex Tomas * - use locality group PA on-disk += N; PA -= N 252c9de560dSAlex Tomas * - discard locality group PA buddy -= PA; PA = 0 253c9de560dSAlex Tomas * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 254c9de560dSAlex Tomas * is used in real operation because we can't know actual used 255c9de560dSAlex Tomas * bits from PA, only from on-disk bitmap 256c9de560dSAlex Tomas * 257c9de560dSAlex Tomas * if we follow this strict logic, then all operations above should be atomic. 258c9de560dSAlex Tomas * given some of them can block, we'd have to use something like semaphores 259c9de560dSAlex Tomas * killing performance on high-end SMP hardware. let's try to relax it using 260c9de560dSAlex Tomas * the following knowledge: 261c9de560dSAlex Tomas * 1) if buddy is referenced, it's already initialized 262c9de560dSAlex Tomas * 2) while block is used in buddy and the buddy is referenced, 263c9de560dSAlex Tomas * nobody can re-allocate that block 264c9de560dSAlex Tomas * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 265c9de560dSAlex Tomas * bit set and PA claims same block, it's OK. IOW, one can set bit in 266c9de560dSAlex Tomas * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 267c9de560dSAlex Tomas * block 268c9de560dSAlex Tomas * 269c9de560dSAlex Tomas * so, now we're building a concurrency table: 270c9de560dSAlex Tomas * - init buddy vs. 271c9de560dSAlex Tomas * - new PA 272c9de560dSAlex Tomas * blocks for PA are allocated in the buddy, buddy must be referenced 273c9de560dSAlex Tomas * until PA is linked to allocation group to avoid concurrent buddy init 274c9de560dSAlex Tomas * - use inode PA 275c9de560dSAlex Tomas * we need to make sure that either on-disk bitmap or PA has uptodate data 276c9de560dSAlex Tomas * given (3) we care that PA-=N operation doesn't interfere with init 277c9de560dSAlex Tomas * - discard inode PA 278c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 279c9de560dSAlex Tomas * - use locality group PA 280c9de560dSAlex Tomas * again PA-=N must be serialized with init 281c9de560dSAlex Tomas * - discard locality group PA 282c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 283c9de560dSAlex Tomas * - new PA vs. 284c9de560dSAlex Tomas * - use inode PA 285c9de560dSAlex Tomas * i_data_sem serializes them 286c9de560dSAlex Tomas * - discard inode PA 287c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 288c9de560dSAlex Tomas * - use locality group PA 289c9de560dSAlex Tomas * some mutex should serialize them 290c9de560dSAlex Tomas * - discard locality group PA 291c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 292c9de560dSAlex Tomas * - use inode PA 293c9de560dSAlex Tomas * - use inode PA 294c9de560dSAlex Tomas * i_data_sem or another mutex should serializes them 295c9de560dSAlex Tomas * - discard inode PA 296c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 297c9de560dSAlex Tomas * - use locality group PA 298c9de560dSAlex Tomas * nothing wrong here -- they're different PAs covering different blocks 299c9de560dSAlex Tomas * - discard locality group PA 300c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 301c9de560dSAlex Tomas * 302c9de560dSAlex Tomas * now we're ready to make few consequences: 303c9de560dSAlex Tomas * - PA is referenced and while it is no discard is possible 304c9de560dSAlex Tomas * - PA is referenced until block isn't marked in on-disk bitmap 305c9de560dSAlex Tomas * - PA changes only after on-disk bitmap 306c9de560dSAlex Tomas * - discard must not compete with init. either init is done before 307c9de560dSAlex Tomas * any discard or they're serialized somehow 308c9de560dSAlex Tomas * - buddy init as sum of on-disk bitmap and PAs is done atomically 309c9de560dSAlex Tomas * 310c9de560dSAlex Tomas * a special case when we've used PA to emptiness. no need to modify buddy 311c9de560dSAlex Tomas * in this case, but we should care about concurrent init 312c9de560dSAlex Tomas * 313c9de560dSAlex Tomas */ 314c9de560dSAlex Tomas 315c9de560dSAlex Tomas /* 316c9de560dSAlex Tomas * Logic in few words: 317c9de560dSAlex Tomas * 318c9de560dSAlex Tomas * - allocation: 319c9de560dSAlex Tomas * load group 320c9de560dSAlex Tomas * find blocks 321c9de560dSAlex Tomas * mark bits in on-disk bitmap 322c9de560dSAlex Tomas * release group 323c9de560dSAlex Tomas * 324c9de560dSAlex Tomas * - use preallocation: 325c9de560dSAlex Tomas * find proper PA (per-inode or group) 326c9de560dSAlex Tomas * load group 327c9de560dSAlex Tomas * mark bits in on-disk bitmap 328c9de560dSAlex Tomas * release group 329c9de560dSAlex Tomas * release PA 330c9de560dSAlex Tomas * 331c9de560dSAlex Tomas * - free: 332c9de560dSAlex Tomas * load group 333c9de560dSAlex Tomas * mark bits in on-disk bitmap 334c9de560dSAlex Tomas * release group 335c9de560dSAlex Tomas * 336c9de560dSAlex Tomas * - discard preallocations in group: 337c9de560dSAlex Tomas * mark PAs deleted 338c9de560dSAlex Tomas * move them onto local list 339c9de560dSAlex Tomas * load on-disk bitmap 340c9de560dSAlex Tomas * load group 341c9de560dSAlex Tomas * remove PA from object (inode or locality group) 342c9de560dSAlex Tomas * mark free blocks in-core 343c9de560dSAlex Tomas * 344c9de560dSAlex Tomas * - discard inode's preallocations: 345c9de560dSAlex Tomas */ 346c9de560dSAlex Tomas 347c9de560dSAlex Tomas /* 348c9de560dSAlex Tomas * Locking rules 349c9de560dSAlex Tomas * 350c9de560dSAlex Tomas * Locks: 351c9de560dSAlex Tomas * - bitlock on a group (group) 352c9de560dSAlex Tomas * - object (inode/locality) (object) 353c9de560dSAlex Tomas * - per-pa lock (pa) 354196e402aSHarshad Shirwadkar * - cr0 lists lock (cr0) 355196e402aSHarshad Shirwadkar * - cr1 tree lock (cr1) 356c9de560dSAlex Tomas * 357c9de560dSAlex Tomas * Paths: 358c9de560dSAlex Tomas * - new pa 359c9de560dSAlex Tomas * object 360c9de560dSAlex Tomas * group 361c9de560dSAlex Tomas * 362c9de560dSAlex Tomas * - find and use pa: 363c9de560dSAlex Tomas * pa 364c9de560dSAlex Tomas * 365c9de560dSAlex Tomas * - release consumed pa: 366c9de560dSAlex Tomas * pa 367c9de560dSAlex Tomas * group 368c9de560dSAlex Tomas * object 369c9de560dSAlex Tomas * 370c9de560dSAlex Tomas * - generate in-core bitmap: 371c9de560dSAlex Tomas * group 372c9de560dSAlex Tomas * pa 373c9de560dSAlex Tomas * 374c9de560dSAlex Tomas * - discard all for given object (inode, locality group): 375c9de560dSAlex Tomas * object 376c9de560dSAlex Tomas * pa 377c9de560dSAlex Tomas * group 378c9de560dSAlex Tomas * 379c9de560dSAlex Tomas * - discard all for given group: 380c9de560dSAlex Tomas * group 381c9de560dSAlex Tomas * pa 382c9de560dSAlex Tomas * group 383c9de560dSAlex Tomas * object 384c9de560dSAlex Tomas * 385196e402aSHarshad Shirwadkar * - allocation path (ext4_mb_regular_allocator) 386196e402aSHarshad Shirwadkar * group 387196e402aSHarshad Shirwadkar * cr0/cr1 388c9de560dSAlex Tomas */ 389c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_pspace_cachep; 390c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_ac_cachep; 39118aadd47SBobi Jam static struct kmem_cache *ext4_free_data_cachep; 392fb1813f4SCurt Wohlgemuth 393fb1813f4SCurt Wohlgemuth /* We create slab caches for groupinfo data structures based on the 394fb1813f4SCurt Wohlgemuth * superblock block size. There will be one per mounted filesystem for 395fb1813f4SCurt Wohlgemuth * each unique s_blocksize_bits */ 3962892c15dSEric Sandeen #define NR_GRPINFO_CACHES 8 397fb1813f4SCurt Wohlgemuth static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 398fb1813f4SCurt Wohlgemuth 399d6006186SEric Biggers static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 4002892c15dSEric Sandeen "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 4012892c15dSEric Sandeen "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 4022892c15dSEric Sandeen "ext4_groupinfo_64k", "ext4_groupinfo_128k" 4032892c15dSEric Sandeen }; 4042892c15dSEric Sandeen 405c3a326a6SAneesh Kumar K.V static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 406c3a326a6SAneesh Kumar K.V ext4_group_t group); 4077a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4087a2fcbf7SAneesh Kumar K.V ext4_group_t group); 40953f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 410c3a326a6SAneesh Kumar K.V 411196e402aSHarshad Shirwadkar static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 412196e402aSHarshad Shirwadkar ext4_group_t group, int cr); 413196e402aSHarshad Shirwadkar 41455cdd0afSWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 41555cdd0afSWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 41655cdd0afSWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks); 41755cdd0afSWang Jianchao 41807b5b8e1SRitesh Harjani /* 41907b5b8e1SRitesh Harjani * The algorithm using this percpu seq counter goes below: 42007b5b8e1SRitesh Harjani * 1. We sample the percpu discard_pa_seq counter before trying for block 42107b5b8e1SRitesh Harjani * allocation in ext4_mb_new_blocks(). 42207b5b8e1SRitesh Harjani * 2. We increment this percpu discard_pa_seq counter when we either allocate 42307b5b8e1SRitesh Harjani * or free these blocks i.e. while marking those blocks as used/free in 42407b5b8e1SRitesh Harjani * mb_mark_used()/mb_free_blocks(). 42507b5b8e1SRitesh Harjani * 3. We also increment this percpu seq counter when we successfully identify 42607b5b8e1SRitesh Harjani * that the bb_prealloc_list is not empty and hence proceed for discarding 42707b5b8e1SRitesh Harjani * of those PAs inside ext4_mb_discard_group_preallocations(). 42807b5b8e1SRitesh Harjani * 42907b5b8e1SRitesh Harjani * Now to make sure that the regular fast path of block allocation is not 43007b5b8e1SRitesh Harjani * affected, as a small optimization we only sample the percpu seq counter 43107b5b8e1SRitesh Harjani * on that cpu. Only when the block allocation fails and when freed blocks 43207b5b8e1SRitesh Harjani * found were 0, that is when we sample percpu seq counter for all cpus using 43307b5b8e1SRitesh Harjani * below function ext4_get_discard_pa_seq_sum(). This happens after making 43407b5b8e1SRitesh Harjani * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 43507b5b8e1SRitesh Harjani */ 43607b5b8e1SRitesh Harjani static DEFINE_PER_CPU(u64, discard_pa_seq); 43707b5b8e1SRitesh Harjani static inline u64 ext4_get_discard_pa_seq_sum(void) 43807b5b8e1SRitesh Harjani { 43907b5b8e1SRitesh Harjani int __cpu; 44007b5b8e1SRitesh Harjani u64 __seq = 0; 44107b5b8e1SRitesh Harjani 44207b5b8e1SRitesh Harjani for_each_possible_cpu(__cpu) 44307b5b8e1SRitesh Harjani __seq += per_cpu(discard_pa_seq, __cpu); 44407b5b8e1SRitesh Harjani return __seq; 44507b5b8e1SRitesh Harjani } 44607b5b8e1SRitesh Harjani 447ffad0a44SAneesh Kumar K.V static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 448ffad0a44SAneesh Kumar K.V { 449c9de560dSAlex Tomas #if BITS_PER_LONG == 64 450ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 7UL) << 3; 451ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~7UL); 452c9de560dSAlex Tomas #elif BITS_PER_LONG == 32 453ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 3UL) << 3; 454ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~3UL); 455c9de560dSAlex Tomas #else 456c9de560dSAlex Tomas #error "how many bits you are?!" 457c9de560dSAlex Tomas #endif 458ffad0a44SAneesh Kumar K.V return addr; 459ffad0a44SAneesh Kumar K.V } 460c9de560dSAlex Tomas 461c9de560dSAlex Tomas static inline int mb_test_bit(int bit, void *addr) 462c9de560dSAlex Tomas { 463c9de560dSAlex Tomas /* 464c9de560dSAlex Tomas * ext4_test_bit on architecture like powerpc 465c9de560dSAlex Tomas * needs unsigned long aligned address 466c9de560dSAlex Tomas */ 467ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 468c9de560dSAlex Tomas return ext4_test_bit(bit, addr); 469c9de560dSAlex Tomas } 470c9de560dSAlex Tomas 471c9de560dSAlex Tomas static inline void mb_set_bit(int bit, void *addr) 472c9de560dSAlex Tomas { 473ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 474c9de560dSAlex Tomas ext4_set_bit(bit, addr); 475c9de560dSAlex Tomas } 476c9de560dSAlex Tomas 477c9de560dSAlex Tomas static inline void mb_clear_bit(int bit, void *addr) 478c9de560dSAlex Tomas { 479ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 480c9de560dSAlex Tomas ext4_clear_bit(bit, addr); 481c9de560dSAlex Tomas } 482c9de560dSAlex Tomas 483eabe0444SAndrey Sidorov static inline int mb_test_and_clear_bit(int bit, void *addr) 484eabe0444SAndrey Sidorov { 485eabe0444SAndrey Sidorov addr = mb_correct_addr_and_bit(&bit, addr); 486eabe0444SAndrey Sidorov return ext4_test_and_clear_bit(bit, addr); 487eabe0444SAndrey Sidorov } 488eabe0444SAndrey Sidorov 489ffad0a44SAneesh Kumar K.V static inline int mb_find_next_zero_bit(void *addr, int max, int start) 490ffad0a44SAneesh Kumar K.V { 491e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 492ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 493e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 494ffad0a44SAneesh Kumar K.V start += fix; 495ffad0a44SAneesh Kumar K.V 496e7dfb246SAneesh Kumar K.V ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 497e7dfb246SAneesh Kumar K.V if (ret > max) 498e7dfb246SAneesh Kumar K.V return max; 499e7dfb246SAneesh Kumar K.V return ret; 500ffad0a44SAneesh Kumar K.V } 501ffad0a44SAneesh Kumar K.V 502ffad0a44SAneesh Kumar K.V static inline int mb_find_next_bit(void *addr, int max, int start) 503ffad0a44SAneesh Kumar K.V { 504e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 505ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 506e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 507ffad0a44SAneesh Kumar K.V start += fix; 508ffad0a44SAneesh Kumar K.V 509e7dfb246SAneesh Kumar K.V ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 510e7dfb246SAneesh Kumar K.V if (ret > max) 511e7dfb246SAneesh Kumar K.V return max; 512e7dfb246SAneesh Kumar K.V return ret; 513ffad0a44SAneesh Kumar K.V } 514ffad0a44SAneesh Kumar K.V 515c9de560dSAlex Tomas static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 516c9de560dSAlex Tomas { 517c9de560dSAlex Tomas char *bb; 518c9de560dSAlex Tomas 519c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 520c9de560dSAlex Tomas BUG_ON(max == NULL); 521c9de560dSAlex Tomas 522c9de560dSAlex Tomas if (order > e4b->bd_blkbits + 1) { 523c9de560dSAlex Tomas *max = 0; 524c9de560dSAlex Tomas return NULL; 525c9de560dSAlex Tomas } 526c9de560dSAlex Tomas 527c9de560dSAlex Tomas /* at order 0 we see each particular block */ 52884b775a3SColy Li if (order == 0) { 529c9de560dSAlex Tomas *max = 1 << (e4b->bd_blkbits + 3); 530c5e8f3f3STheodore Ts'o return e4b->bd_bitmap; 53184b775a3SColy Li } 532c9de560dSAlex Tomas 533c5e8f3f3STheodore Ts'o bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 534c9de560dSAlex Tomas *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 535c9de560dSAlex Tomas 536c9de560dSAlex Tomas return bb; 537c9de560dSAlex Tomas } 538c9de560dSAlex Tomas 539c9de560dSAlex Tomas #ifdef DOUBLE_CHECK 540c9de560dSAlex Tomas static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 541c9de560dSAlex Tomas int first, int count) 542c9de560dSAlex Tomas { 543c9de560dSAlex Tomas int i; 544c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 545c9de560dSAlex Tomas 546c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 547c9de560dSAlex Tomas return; 548bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 549c9de560dSAlex Tomas for (i = 0; i < count; i++) { 550c9de560dSAlex Tomas if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 551c9de560dSAlex Tomas ext4_fsblk_t blocknr; 5525661bd68SAkinobu Mita 5535661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 55453accfa9STheodore Ts'o blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 5555d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 556e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 557e29136f8STheodore Ts'o blocknr, 558e29136f8STheodore Ts'o "freeing block already freed " 559e29136f8STheodore Ts'o "(bit %u)", 560e29136f8STheodore Ts'o first + i); 561736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 562736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 563c9de560dSAlex Tomas } 564c9de560dSAlex Tomas mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 565c9de560dSAlex Tomas } 566c9de560dSAlex Tomas } 567c9de560dSAlex Tomas 568c9de560dSAlex Tomas static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 569c9de560dSAlex Tomas { 570c9de560dSAlex Tomas int i; 571c9de560dSAlex Tomas 572c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 573c9de560dSAlex Tomas return; 574bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 575c9de560dSAlex Tomas for (i = 0; i < count; i++) { 576c9de560dSAlex Tomas BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 577c9de560dSAlex Tomas mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 578c9de560dSAlex Tomas } 579c9de560dSAlex Tomas } 580c9de560dSAlex Tomas 581c9de560dSAlex Tomas static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 582c9de560dSAlex Tomas { 583eb2b8ebbSRitesh Harjani if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584eb2b8ebbSRitesh Harjani return; 585c9de560dSAlex Tomas if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 586c9de560dSAlex Tomas unsigned char *b1, *b2; 587c9de560dSAlex Tomas int i; 588c9de560dSAlex Tomas b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 589c9de560dSAlex Tomas b2 = (unsigned char *) bitmap; 590c9de560dSAlex Tomas for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 591c9de560dSAlex Tomas if (b1[i] != b2[i]) { 5929d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_ERR, 5939d8b9ec4STheodore Ts'o "corruption in group %u " 5944776004fSTheodore Ts'o "at byte %u(%u): %x in copy != %x " 5959d8b9ec4STheodore Ts'o "on disk/prealloc", 596c9de560dSAlex Tomas e4b->bd_group, i, i * 8, b1[i], b2[i]); 597c9de560dSAlex Tomas BUG(); 598c9de560dSAlex Tomas } 599c9de560dSAlex Tomas } 600c9de560dSAlex Tomas } 601c9de560dSAlex Tomas } 602c9de560dSAlex Tomas 603a3450215SRitesh Harjani static void mb_group_bb_bitmap_alloc(struct super_block *sb, 604a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 605a3450215SRitesh Harjani { 606a3450215SRitesh Harjani struct buffer_head *bh; 607a3450215SRitesh Harjani 608a3450215SRitesh Harjani grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 609eb2b8ebbSRitesh Harjani if (!grp->bb_bitmap) 610eb2b8ebbSRitesh Harjani return; 611a3450215SRitesh Harjani 612a3450215SRitesh Harjani bh = ext4_read_block_bitmap(sb, group); 613eb2b8ebbSRitesh Harjani if (IS_ERR_OR_NULL(bh)) { 614eb2b8ebbSRitesh Harjani kfree(grp->bb_bitmap); 615eb2b8ebbSRitesh Harjani grp->bb_bitmap = NULL; 616eb2b8ebbSRitesh Harjani return; 617eb2b8ebbSRitesh Harjani } 618a3450215SRitesh Harjani 619a3450215SRitesh Harjani memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 620a3450215SRitesh Harjani put_bh(bh); 621a3450215SRitesh Harjani } 622a3450215SRitesh Harjani 623a3450215SRitesh Harjani static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 624a3450215SRitesh Harjani { 625a3450215SRitesh Harjani kfree(grp->bb_bitmap); 626a3450215SRitesh Harjani } 627a3450215SRitesh Harjani 628c9de560dSAlex Tomas #else 629c9de560dSAlex Tomas static inline void mb_free_blocks_double(struct inode *inode, 630c9de560dSAlex Tomas struct ext4_buddy *e4b, int first, int count) 631c9de560dSAlex Tomas { 632c9de560dSAlex Tomas return; 633c9de560dSAlex Tomas } 634c9de560dSAlex Tomas static inline void mb_mark_used_double(struct ext4_buddy *e4b, 635c9de560dSAlex Tomas int first, int count) 636c9de560dSAlex Tomas { 637c9de560dSAlex Tomas return; 638c9de560dSAlex Tomas } 639c9de560dSAlex Tomas static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 640c9de560dSAlex Tomas { 641c9de560dSAlex Tomas return; 642c9de560dSAlex Tomas } 643a3450215SRitesh Harjani 644a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 645a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 646a3450215SRitesh Harjani { 647a3450215SRitesh Harjani return; 648a3450215SRitesh Harjani } 649a3450215SRitesh Harjani 650a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 651a3450215SRitesh Harjani { 652a3450215SRitesh Harjani return; 653a3450215SRitesh Harjani } 654c9de560dSAlex Tomas #endif 655c9de560dSAlex Tomas 656c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 657c9de560dSAlex Tomas 658c9de560dSAlex Tomas #define MB_CHECK_ASSERT(assert) \ 659c9de560dSAlex Tomas do { \ 660c9de560dSAlex Tomas if (!(assert)) { \ 661c9de560dSAlex Tomas printk(KERN_EMERG \ 662c9de560dSAlex Tomas "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 663c9de560dSAlex Tomas function, file, line, # assert); \ 664c9de560dSAlex Tomas BUG(); \ 665c9de560dSAlex Tomas } \ 666c9de560dSAlex Tomas } while (0) 667c9de560dSAlex Tomas 668c9de560dSAlex Tomas static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 669c9de560dSAlex Tomas const char *function, int line) 670c9de560dSAlex Tomas { 671c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 672c9de560dSAlex Tomas int order = e4b->bd_blkbits + 1; 673c9de560dSAlex Tomas int max; 674c9de560dSAlex Tomas int max2; 675c9de560dSAlex Tomas int i; 676c9de560dSAlex Tomas int j; 677c9de560dSAlex Tomas int k; 678c9de560dSAlex Tomas int count; 679c9de560dSAlex Tomas struct ext4_group_info *grp; 680c9de560dSAlex Tomas int fragments = 0; 681c9de560dSAlex Tomas int fstart; 682c9de560dSAlex Tomas struct list_head *cur; 683c9de560dSAlex Tomas void *buddy; 684c9de560dSAlex Tomas void *buddy2; 685c9de560dSAlex Tomas 686addd752cSChunguang Xu if (e4b->bd_info->bb_check_counter++ % 10) 687c9de560dSAlex Tomas return 0; 688c9de560dSAlex Tomas 689c9de560dSAlex Tomas while (order > 1) { 690c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, order, &max); 691c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy); 692c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, order - 1, &max2); 693c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy2); 694c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy != buddy2); 695c9de560dSAlex Tomas MB_CHECK_ASSERT(max * 2 == max2); 696c9de560dSAlex Tomas 697c9de560dSAlex Tomas count = 0; 698c9de560dSAlex Tomas for (i = 0; i < max; i++) { 699c9de560dSAlex Tomas 700c9de560dSAlex Tomas if (mb_test_bit(i, buddy)) { 701af2b3275SJinke Han /* only single bit in buddy2 may be 0 */ 702c9de560dSAlex Tomas if (!mb_test_bit(i << 1, buddy2)) { 703c9de560dSAlex Tomas MB_CHECK_ASSERT( 704c9de560dSAlex Tomas mb_test_bit((i<<1)+1, buddy2)); 705c9de560dSAlex Tomas } 706c9de560dSAlex Tomas continue; 707c9de560dSAlex Tomas } 708c9de560dSAlex Tomas 7090a10da73SRobin Dong /* both bits in buddy2 must be 1 */ 710c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712c9de560dSAlex Tomas 713c9de560dSAlex Tomas for (j = 0; j < (1 << order); j++) { 714c9de560dSAlex Tomas k = (i * (1 << order)) + j; 715c9de560dSAlex Tomas MB_CHECK_ASSERT( 716c5e8f3f3STheodore Ts'o !mb_test_bit(k, e4b->bd_bitmap)); 717c9de560dSAlex Tomas } 718c9de560dSAlex Tomas count++; 719c9de560dSAlex Tomas } 720c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721c9de560dSAlex Tomas order--; 722c9de560dSAlex Tomas } 723c9de560dSAlex Tomas 724c9de560dSAlex Tomas fstart = -1; 725c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, 0, &max); 726c9de560dSAlex Tomas for (i = 0; i < max; i++) { 727c9de560dSAlex Tomas if (!mb_test_bit(i, buddy)) { 728c9de560dSAlex Tomas MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729c9de560dSAlex Tomas if (fstart == -1) { 730c9de560dSAlex Tomas fragments++; 731c9de560dSAlex Tomas fstart = i; 732c9de560dSAlex Tomas } 733c9de560dSAlex Tomas continue; 734c9de560dSAlex Tomas } 735c9de560dSAlex Tomas fstart = -1; 736c9de560dSAlex Tomas /* check used bits only */ 737c9de560dSAlex Tomas for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, j, &max2); 739c9de560dSAlex Tomas k = i >> j; 740c9de560dSAlex Tomas MB_CHECK_ASSERT(k < max2); 741c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742c9de560dSAlex Tomas } 743c9de560dSAlex Tomas } 744c9de560dSAlex Tomas MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746c9de560dSAlex Tomas 747c9de560dSAlex Tomas grp = ext4_get_group_info(sb, e4b->bd_group); 748c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 749c9de560dSAlex Tomas ext4_group_t groupnr; 750c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 75160bd63d1SSolofo Ramangalahy pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 75260bd63d1SSolofo Ramangalahy ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 753c9de560dSAlex Tomas MB_CHECK_ASSERT(groupnr == e4b->bd_group); 75460bd63d1SSolofo Ramangalahy for (i = 0; i < pa->pa_len; i++) 755c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 756c9de560dSAlex Tomas } 757c9de560dSAlex Tomas return 0; 758c9de560dSAlex Tomas } 759c9de560dSAlex Tomas #undef MB_CHECK_ASSERT 760c9de560dSAlex Tomas #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 76146e665e9SHarvey Harrison __FILE__, __func__, __LINE__) 762c9de560dSAlex Tomas #else 763c9de560dSAlex Tomas #define mb_check_buddy(e4b) 764c9de560dSAlex Tomas #endif 765c9de560dSAlex Tomas 7667c786059SColy Li /* 7677c786059SColy Li * Divide blocks started from @first with length @len into 7687c786059SColy Li * smaller chunks with power of 2 blocks. 7697c786059SColy Li * Clear the bits in bitmap which the blocks of the chunk(s) covered, 7707c786059SColy Li * then increase bb_counters[] for corresponded chunk size. 7717c786059SColy Li */ 772c9de560dSAlex Tomas static void ext4_mb_mark_free_simple(struct super_block *sb, 773a36b4498SEric Sandeen void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 774c9de560dSAlex Tomas struct ext4_group_info *grp) 775c9de560dSAlex Tomas { 776c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 777a36b4498SEric Sandeen ext4_grpblk_t min; 778a36b4498SEric Sandeen ext4_grpblk_t max; 779a36b4498SEric Sandeen ext4_grpblk_t chunk; 78069e43e8cSChandan Rajendra unsigned int border; 781c9de560dSAlex Tomas 7827137d7a4STheodore Ts'o BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 783c9de560dSAlex Tomas 784c9de560dSAlex Tomas border = 2 << sb->s_blocksize_bits; 785c9de560dSAlex Tomas 786c9de560dSAlex Tomas while (len > 0) { 787c9de560dSAlex Tomas /* find how many blocks can be covered since this position */ 788c9de560dSAlex Tomas max = ffs(first | border) - 1; 789c9de560dSAlex Tomas 790c9de560dSAlex Tomas /* find how many blocks of power 2 we need to mark */ 791c9de560dSAlex Tomas min = fls(len) - 1; 792c9de560dSAlex Tomas 793c9de560dSAlex Tomas if (max < min) 794c9de560dSAlex Tomas min = max; 795c9de560dSAlex Tomas chunk = 1 << min; 796c9de560dSAlex Tomas 797c9de560dSAlex Tomas /* mark multiblock chunks only */ 798c9de560dSAlex Tomas grp->bb_counters[min]++; 799c9de560dSAlex Tomas if (min > 0) 800c9de560dSAlex Tomas mb_clear_bit(first >> min, 801c9de560dSAlex Tomas buddy + sbi->s_mb_offsets[min]); 802c9de560dSAlex Tomas 803c9de560dSAlex Tomas len -= chunk; 804c9de560dSAlex Tomas first += chunk; 805c9de560dSAlex Tomas } 806c9de560dSAlex Tomas } 807c9de560dSAlex Tomas 80883e80a6eSJan Kara static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 809196e402aSHarshad Shirwadkar { 81083e80a6eSJan Kara int order; 811196e402aSHarshad Shirwadkar 812196e402aSHarshad Shirwadkar /* 81383e80a6eSJan Kara * We don't bother with a special lists groups with only 1 block free 81483e80a6eSJan Kara * extents and for completely empty groups. 815196e402aSHarshad Shirwadkar */ 81683e80a6eSJan Kara order = fls(len) - 2; 81783e80a6eSJan Kara if (order < 0) 81883e80a6eSJan Kara return 0; 81983e80a6eSJan Kara if (order == MB_NUM_ORDERS(sb)) 82083e80a6eSJan Kara order--; 82183e80a6eSJan Kara return order; 82283e80a6eSJan Kara } 82383e80a6eSJan Kara 82483e80a6eSJan Kara /* Move group to appropriate avg_fragment_size list */ 825196e402aSHarshad Shirwadkar static void 826196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 827196e402aSHarshad Shirwadkar { 828196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 82983e80a6eSJan Kara int new_order; 830196e402aSHarshad Shirwadkar 831196e402aSHarshad Shirwadkar if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 832196e402aSHarshad Shirwadkar return; 833196e402aSHarshad Shirwadkar 83483e80a6eSJan Kara new_order = mb_avg_fragment_size_order(sb, 83583e80a6eSJan Kara grp->bb_free / grp->bb_fragments); 83683e80a6eSJan Kara if (new_order == grp->bb_avg_fragment_size_order) 83783e80a6eSJan Kara return; 838196e402aSHarshad Shirwadkar 83983e80a6eSJan Kara if (grp->bb_avg_fragment_size_order != -1) { 84083e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84183e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84283e80a6eSJan Kara list_del(&grp->bb_avg_fragment_size_node); 84383e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 84483e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84583e80a6eSJan Kara } 84683e80a6eSJan Kara grp->bb_avg_fragment_size_order = new_order; 84783e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84883e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84983e80a6eSJan Kara list_add_tail(&grp->bb_avg_fragment_size_node, 85083e80a6eSJan Kara &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 85183e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 85283e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 853196e402aSHarshad Shirwadkar } 854196e402aSHarshad Shirwadkar 855196e402aSHarshad Shirwadkar /* 856196e402aSHarshad Shirwadkar * Choose next group by traversing largest_free_order lists. Updates *new_cr if 857196e402aSHarshad Shirwadkar * cr level needs an update. 858196e402aSHarshad Shirwadkar */ 859196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 860196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 861196e402aSHarshad Shirwadkar { 862196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 863196e402aSHarshad Shirwadkar struct ext4_group_info *iter, *grp; 864196e402aSHarshad Shirwadkar int i; 865196e402aSHarshad Shirwadkar 866196e402aSHarshad Shirwadkar if (ac->ac_status == AC_STATUS_FOUND) 867196e402aSHarshad Shirwadkar return; 868196e402aSHarshad Shirwadkar 869196e402aSHarshad Shirwadkar if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 870196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 871196e402aSHarshad Shirwadkar 872196e402aSHarshad Shirwadkar grp = NULL; 873196e402aSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 874196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) 875196e402aSHarshad Shirwadkar continue; 876196e402aSHarshad Shirwadkar read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 877196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 878196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 879196e402aSHarshad Shirwadkar continue; 880196e402aSHarshad Shirwadkar } 881196e402aSHarshad Shirwadkar grp = NULL; 882196e402aSHarshad Shirwadkar list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 883196e402aSHarshad Shirwadkar bb_largest_free_order_node) { 884196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 885196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 886196e402aSHarshad Shirwadkar if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 887196e402aSHarshad Shirwadkar grp = iter; 888196e402aSHarshad Shirwadkar break; 889196e402aSHarshad Shirwadkar } 890196e402aSHarshad Shirwadkar } 891196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 892196e402aSHarshad Shirwadkar if (grp) 893196e402aSHarshad Shirwadkar break; 894196e402aSHarshad Shirwadkar } 895196e402aSHarshad Shirwadkar 896196e402aSHarshad Shirwadkar if (!grp) { 897196e402aSHarshad Shirwadkar /* Increment cr and search again */ 898196e402aSHarshad Shirwadkar *new_cr = 1; 899196e402aSHarshad Shirwadkar } else { 900196e402aSHarshad Shirwadkar *group = grp->bb_group; 901196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 902196e402aSHarshad Shirwadkar } 903196e402aSHarshad Shirwadkar } 904196e402aSHarshad Shirwadkar 905196e402aSHarshad Shirwadkar /* 90683e80a6eSJan Kara * Choose next group by traversing average fragment size list of suitable 90783e80a6eSJan Kara * order. Updates *new_cr if cr level needs an update. 908196e402aSHarshad Shirwadkar */ 909196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 910196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 911196e402aSHarshad Shirwadkar { 912196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 913a078dff8SJan Kara struct ext4_group_info *grp = NULL, *iter; 91483e80a6eSJan Kara int i; 915196e402aSHarshad Shirwadkar 916196e402aSHarshad Shirwadkar if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 917196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 918196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 91983e80a6eSJan Kara } 92083e80a6eSJan Kara 92183e80a6eSJan Kara for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 92283e80a6eSJan Kara i < MB_NUM_ORDERS(ac->ac_sb); i++) { 92383e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) 92483e80a6eSJan Kara continue; 92583e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[i]); 92683e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) { 92783e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 92883e80a6eSJan Kara continue; 92983e80a6eSJan Kara } 93083e80a6eSJan Kara list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i], 93183e80a6eSJan Kara bb_avg_fragment_size_node) { 932196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 933196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 93483e80a6eSJan Kara if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) { 93583e80a6eSJan Kara grp = iter; 936196e402aSHarshad Shirwadkar break; 937196e402aSHarshad Shirwadkar } 93883e80a6eSJan Kara } 93983e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 94083e80a6eSJan Kara if (grp) 94183e80a6eSJan Kara break; 942196e402aSHarshad Shirwadkar } 943196e402aSHarshad Shirwadkar 94483e80a6eSJan Kara if (grp) { 945196e402aSHarshad Shirwadkar *group = grp->bb_group; 946196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 947196e402aSHarshad Shirwadkar } else { 948196e402aSHarshad Shirwadkar *new_cr = 2; 949196e402aSHarshad Shirwadkar } 950196e402aSHarshad Shirwadkar } 951196e402aSHarshad Shirwadkar 952196e402aSHarshad Shirwadkar static inline int should_optimize_scan(struct ext4_allocation_context *ac) 953196e402aSHarshad Shirwadkar { 954196e402aSHarshad Shirwadkar if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 955196e402aSHarshad Shirwadkar return 0; 956196e402aSHarshad Shirwadkar if (ac->ac_criteria >= 2) 957196e402aSHarshad Shirwadkar return 0; 958077d0c2cSOjaswin Mujoo if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 959196e402aSHarshad Shirwadkar return 0; 960196e402aSHarshad Shirwadkar return 1; 961196e402aSHarshad Shirwadkar } 962196e402aSHarshad Shirwadkar 963196e402aSHarshad Shirwadkar /* 964196e402aSHarshad Shirwadkar * Return next linear group for allocation. If linear traversal should not be 965196e402aSHarshad Shirwadkar * performed, this function just returns the same group 966196e402aSHarshad Shirwadkar */ 967196e402aSHarshad Shirwadkar static int 968196e402aSHarshad Shirwadkar next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 969196e402aSHarshad Shirwadkar { 970196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac)) 971196e402aSHarshad Shirwadkar goto inc_and_return; 972196e402aSHarshad Shirwadkar 973196e402aSHarshad Shirwadkar if (ac->ac_groups_linear_remaining) { 974196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining--; 975196e402aSHarshad Shirwadkar goto inc_and_return; 976196e402aSHarshad Shirwadkar } 977196e402aSHarshad Shirwadkar 978196e402aSHarshad Shirwadkar return group; 979196e402aSHarshad Shirwadkar inc_and_return: 980196e402aSHarshad Shirwadkar /* 981196e402aSHarshad Shirwadkar * Artificially restricted ngroups for non-extent 982196e402aSHarshad Shirwadkar * files makes group > ngroups possible on first loop. 983196e402aSHarshad Shirwadkar */ 984196e402aSHarshad Shirwadkar return group + 1 >= ngroups ? 0 : group + 1; 985196e402aSHarshad Shirwadkar } 986196e402aSHarshad Shirwadkar 987196e402aSHarshad Shirwadkar /* 988196e402aSHarshad Shirwadkar * ext4_mb_choose_next_group: choose next group for allocation. 989196e402aSHarshad Shirwadkar * 990196e402aSHarshad Shirwadkar * @ac Allocation Context 991196e402aSHarshad Shirwadkar * @new_cr This is an output parameter. If the there is no good group 992196e402aSHarshad Shirwadkar * available at current CR level, this field is updated to indicate 993196e402aSHarshad Shirwadkar * the new cr level that should be used. 994196e402aSHarshad Shirwadkar * @group This is an input / output parameter. As an input it indicates the 995196e402aSHarshad Shirwadkar * next group that the allocator intends to use for allocation. As 996196e402aSHarshad Shirwadkar * output, this field indicates the next group that should be used as 997196e402aSHarshad Shirwadkar * determined by the optimization functions. 998196e402aSHarshad Shirwadkar * @ngroups Total number of groups 999196e402aSHarshad Shirwadkar */ 1000196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1001196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1002196e402aSHarshad Shirwadkar { 1003196e402aSHarshad Shirwadkar *new_cr = ac->ac_criteria; 1004196e402aSHarshad Shirwadkar 10054fca50d4SJan Kara if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 10064fca50d4SJan Kara *group = next_linear_group(ac, *group, ngroups); 1007196e402aSHarshad Shirwadkar return; 10084fca50d4SJan Kara } 1009196e402aSHarshad Shirwadkar 1010196e402aSHarshad Shirwadkar if (*new_cr == 0) { 1011196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1012196e402aSHarshad Shirwadkar } else if (*new_cr == 1) { 1013196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1014196e402aSHarshad Shirwadkar } else { 1015196e402aSHarshad Shirwadkar /* 1016196e402aSHarshad Shirwadkar * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1017196e402aSHarshad Shirwadkar * bb_free. But until that happens, we should never come here. 1018196e402aSHarshad Shirwadkar */ 1019196e402aSHarshad Shirwadkar WARN_ON(1); 1020196e402aSHarshad Shirwadkar } 1021196e402aSHarshad Shirwadkar } 1022196e402aSHarshad Shirwadkar 10238a57d9d6SCurt Wohlgemuth /* 10248a57d9d6SCurt Wohlgemuth * Cache the order of the largest free extent we have available in this block 10258a57d9d6SCurt Wohlgemuth * group. 10268a57d9d6SCurt Wohlgemuth */ 10278a57d9d6SCurt Wohlgemuth static void 10288a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 10298a57d9d6SCurt Wohlgemuth { 1030196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 10318a57d9d6SCurt Wohlgemuth int i; 10328a57d9d6SCurt Wohlgemuth 10331940265eSJan Kara for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 10341940265eSJan Kara if (grp->bb_counters[i] > 0) 10351940265eSJan Kara break; 10361940265eSJan Kara /* No need to move between order lists? */ 10371940265eSJan Kara if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 10381940265eSJan Kara i == grp->bb_largest_free_order) { 10391940265eSJan Kara grp->bb_largest_free_order = i; 10401940265eSJan Kara return; 10411940265eSJan Kara } 10421940265eSJan Kara 10431940265eSJan Kara if (grp->bb_largest_free_order >= 0) { 1044196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1045196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1046196e402aSHarshad Shirwadkar list_del_init(&grp->bb_largest_free_order_node); 1047196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1048196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1049196e402aSHarshad Shirwadkar } 10508a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = i; 10511940265eSJan Kara if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1052196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1053196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1054196e402aSHarshad Shirwadkar list_add_tail(&grp->bb_largest_free_order_node, 1055196e402aSHarshad Shirwadkar &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1056196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1057196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1058196e402aSHarshad Shirwadkar } 10598a57d9d6SCurt Wohlgemuth } 10608a57d9d6SCurt Wohlgemuth 1061089ceeccSEric Sandeen static noinline_for_stack 1062089ceeccSEric Sandeen void ext4_mb_generate_buddy(struct super_block *sb, 1063c9de560dSAlex Tomas void *buddy, void *bitmap, ext4_group_t group) 1064c9de560dSAlex Tomas { 1065c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1066e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 10677137d7a4STheodore Ts'o ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1068a36b4498SEric Sandeen ext4_grpblk_t i = 0; 1069a36b4498SEric Sandeen ext4_grpblk_t first; 1070a36b4498SEric Sandeen ext4_grpblk_t len; 1071c9de560dSAlex Tomas unsigned free = 0; 1072c9de560dSAlex Tomas unsigned fragments = 0; 1073c9de560dSAlex Tomas unsigned long long period = get_cycles(); 1074c9de560dSAlex Tomas 1075c9de560dSAlex Tomas /* initialize buddy from bitmap which is aggregation 1076c9de560dSAlex Tomas * of on-disk bitmap and preallocations */ 1077ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, 0); 1078c9de560dSAlex Tomas grp->bb_first_free = i; 1079c9de560dSAlex Tomas while (i < max) { 1080c9de560dSAlex Tomas fragments++; 1081c9de560dSAlex Tomas first = i; 1082ffad0a44SAneesh Kumar K.V i = mb_find_next_bit(bitmap, max, i); 1083c9de560dSAlex Tomas len = i - first; 1084c9de560dSAlex Tomas free += len; 1085c9de560dSAlex Tomas if (len > 1) 1086c9de560dSAlex Tomas ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1087c9de560dSAlex Tomas else 1088c9de560dSAlex Tomas grp->bb_counters[0]++; 1089c9de560dSAlex Tomas if (i < max) 1090ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, i); 1091c9de560dSAlex Tomas } 1092c9de560dSAlex Tomas grp->bb_fragments = fragments; 1093c9de560dSAlex Tomas 1094c9de560dSAlex Tomas if (free != grp->bb_free) { 1095e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, 109694d4c066STheodore Ts'o "block bitmap and bg descriptor " 109794d4c066STheodore Ts'o "inconsistent: %u vs %u free clusters", 1098e29136f8STheodore Ts'o free, grp->bb_free); 1099e56eb659SAneesh Kumar K.V /* 1100163a203dSDarrick J. Wong * If we intend to continue, we consider group descriptor 1101e56eb659SAneesh Kumar K.V * corrupt and update bb_free using bitmap value 1102e56eb659SAneesh Kumar K.V */ 1103c9de560dSAlex Tomas grp->bb_free = free; 1104db79e6d1SWang Shilong ext4_mark_group_bitmap_corrupted(sb, group, 1105db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1106c9de560dSAlex Tomas } 11078a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, grp); 110883e80a6eSJan Kara mb_update_avg_fragment_size(sb, grp); 1109c9de560dSAlex Tomas 1110c9de560dSAlex Tomas clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1111c9de560dSAlex Tomas 1112c9de560dSAlex Tomas period = get_cycles() - period; 111367d25186SHarshad Shirwadkar atomic_inc(&sbi->s_mb_buddies_generated); 111467d25186SHarshad Shirwadkar atomic64_add(period, &sbi->s_mb_generation_time); 1115c9de560dSAlex Tomas } 1116c9de560dSAlex Tomas 1117c9de560dSAlex Tomas /* The buddy information is attached the buddy cache inode 1118c9de560dSAlex Tomas * for convenience. The information regarding each group 1119c9de560dSAlex Tomas * is loaded via ext4_mb_load_buddy. The information involve 1120c9de560dSAlex Tomas * block bitmap and buddy information. The information are 1121c9de560dSAlex Tomas * stored in the inode as 1122c9de560dSAlex Tomas * 1123c9de560dSAlex Tomas * { page } 1124c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1125c9de560dSAlex Tomas * 1126c9de560dSAlex Tomas * 1127c9de560dSAlex Tomas * one block each for bitmap and buddy information. 1128c9de560dSAlex Tomas * So for each group we take up 2 blocks. A page can 1129ea1754a0SKirill A. Shutemov * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1130c9de560dSAlex Tomas * So it can have information regarding groups_per_page which 1131c9de560dSAlex Tomas * is blocks_per_page/2 11328a57d9d6SCurt Wohlgemuth * 11338a57d9d6SCurt Wohlgemuth * Locking note: This routine takes the block group lock of all groups 11348a57d9d6SCurt Wohlgemuth * for this page; do not hold this lock when calling this routine! 1135c9de560dSAlex Tomas */ 1136c9de560dSAlex Tomas 1137adb7ef60SKonstantin Khlebnikov static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1138c9de560dSAlex Tomas { 11398df9675fSTheodore Ts'o ext4_group_t ngroups; 1140c9de560dSAlex Tomas int blocksize; 1141c9de560dSAlex Tomas int blocks_per_page; 1142c9de560dSAlex Tomas int groups_per_page; 1143c9de560dSAlex Tomas int err = 0; 1144c9de560dSAlex Tomas int i; 1145813e5727STheodore Ts'o ext4_group_t first_group, group; 1146c9de560dSAlex Tomas int first_block; 1147c9de560dSAlex Tomas struct super_block *sb; 1148c9de560dSAlex Tomas struct buffer_head *bhs; 1149fa77dcfaSDarrick J. Wong struct buffer_head **bh = NULL; 1150c9de560dSAlex Tomas struct inode *inode; 1151c9de560dSAlex Tomas char *data; 1152c9de560dSAlex Tomas char *bitmap; 11539b8b7d35SAmir Goldstein struct ext4_group_info *grinfo; 1154c9de560dSAlex Tomas 1155c9de560dSAlex Tomas inode = page->mapping->host; 1156c9de560dSAlex Tomas sb = inode->i_sb; 11578df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 115893407472SFabian Frederick blocksize = i_blocksize(inode); 115909cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / blocksize; 1160c9de560dSAlex Tomas 1161d3df1453SRitesh Harjani mb_debug(sb, "init page %lu\n", page->index); 1162d3df1453SRitesh Harjani 1163c9de560dSAlex Tomas groups_per_page = blocks_per_page >> 1; 1164c9de560dSAlex Tomas if (groups_per_page == 0) 1165c9de560dSAlex Tomas groups_per_page = 1; 1166c9de560dSAlex Tomas 1167c9de560dSAlex Tomas /* allocate buffer_heads to read bitmaps */ 1168c9de560dSAlex Tomas if (groups_per_page > 1) { 1169c9de560dSAlex Tomas i = sizeof(struct buffer_head *) * groups_per_page; 1170adb7ef60SKonstantin Khlebnikov bh = kzalloc(i, gfp); 1171139f46d3SKemeng Shi if (bh == NULL) 1172139f46d3SKemeng Shi return -ENOMEM; 1173c9de560dSAlex Tomas } else 1174c9de560dSAlex Tomas bh = &bhs; 1175c9de560dSAlex Tomas 1176c9de560dSAlex Tomas first_group = page->index * blocks_per_page / 2; 1177c9de560dSAlex Tomas 1178c9de560dSAlex Tomas /* read all groups the page covers into the cache */ 1179813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1180813e5727STheodore Ts'o if (group >= ngroups) 1181c9de560dSAlex Tomas break; 1182c9de560dSAlex Tomas 1183813e5727STheodore Ts'o grinfo = ext4_get_group_info(sb, group); 11849b8b7d35SAmir Goldstein /* 11859b8b7d35SAmir Goldstein * If page is uptodate then we came here after online resize 11869b8b7d35SAmir Goldstein * which added some new uninitialized group info structs, so 11879b8b7d35SAmir Goldstein * we must skip all initialized uptodate buddies on the page, 11889b8b7d35SAmir Goldstein * which may be currently in use by an allocating task. 11899b8b7d35SAmir Goldstein */ 11909b8b7d35SAmir Goldstein if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 11919b8b7d35SAmir Goldstein bh[i] = NULL; 11929b8b7d35SAmir Goldstein continue; 11939b8b7d35SAmir Goldstein } 1194cfd73237SAlex Zhuravlev bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 11959008a58eSDarrick J. Wong if (IS_ERR(bh[i])) { 11969008a58eSDarrick J. Wong err = PTR_ERR(bh[i]); 11979008a58eSDarrick J. Wong bh[i] = NULL; 1198c9de560dSAlex Tomas goto out; 11992ccb5fb9SAneesh Kumar K.V } 1200d3df1453SRitesh Harjani mb_debug(sb, "read bitmap for group %u\n", group); 1201c9de560dSAlex Tomas } 1202c9de560dSAlex Tomas 1203c9de560dSAlex Tomas /* wait for I/O completion */ 1204813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 12059008a58eSDarrick J. Wong int err2; 12069008a58eSDarrick J. Wong 12079008a58eSDarrick J. Wong if (!bh[i]) 12089008a58eSDarrick J. Wong continue; 12099008a58eSDarrick J. Wong err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 12109008a58eSDarrick J. Wong if (!err) 12119008a58eSDarrick J. Wong err = err2; 1212813e5727STheodore Ts'o } 1213c9de560dSAlex Tomas 1214c9de560dSAlex Tomas first_block = page->index * blocks_per_page; 1215c9de560dSAlex Tomas for (i = 0; i < blocks_per_page; i++) { 1216c9de560dSAlex Tomas group = (first_block + i) >> 1; 12178df9675fSTheodore Ts'o if (group >= ngroups) 1218c9de560dSAlex Tomas break; 1219c9de560dSAlex Tomas 12209b8b7d35SAmir Goldstein if (!bh[group - first_group]) 12219b8b7d35SAmir Goldstein /* skip initialized uptodate buddy */ 12229b8b7d35SAmir Goldstein continue; 12239b8b7d35SAmir Goldstein 1224bbdc322fSLukas Czerner if (!buffer_verified(bh[group - first_group])) 1225bbdc322fSLukas Czerner /* Skip faulty bitmaps */ 1226bbdc322fSLukas Czerner continue; 1227bbdc322fSLukas Czerner err = 0; 1228bbdc322fSLukas Czerner 1229c9de560dSAlex Tomas /* 1230c9de560dSAlex Tomas * data carry information regarding this 1231c9de560dSAlex Tomas * particular group in the format specified 1232c9de560dSAlex Tomas * above 1233c9de560dSAlex Tomas * 1234c9de560dSAlex Tomas */ 1235c9de560dSAlex Tomas data = page_address(page) + (i * blocksize); 1236c9de560dSAlex Tomas bitmap = bh[group - first_group]->b_data; 1237c9de560dSAlex Tomas 1238c9de560dSAlex Tomas /* 1239c9de560dSAlex Tomas * We place the buddy block and bitmap block 1240c9de560dSAlex Tomas * close together 1241c9de560dSAlex Tomas */ 1242c9de560dSAlex Tomas if ((first_block + i) & 1) { 1243c9de560dSAlex Tomas /* this is block of buddy */ 1244c9de560dSAlex Tomas BUG_ON(incore == NULL); 1245d3df1453SRitesh Harjani mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1246c9de560dSAlex Tomas group, page->index, i * blocksize); 1247f307333eSTheodore Ts'o trace_ext4_mb_buddy_bitmap_load(sb, group); 1248c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, group); 1249c9de560dSAlex Tomas grinfo->bb_fragments = 0; 1250c9de560dSAlex Tomas memset(grinfo->bb_counters, 0, 12511927805eSEric Sandeen sizeof(*grinfo->bb_counters) * 12524b68f6dfSHarshad Shirwadkar (MB_NUM_ORDERS(sb))); 1253c9de560dSAlex Tomas /* 1254c9de560dSAlex Tomas * incore got set to the group block bitmap below 1255c9de560dSAlex Tomas */ 12567a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, group); 12579b8b7d35SAmir Goldstein /* init the buddy */ 12589b8b7d35SAmir Goldstein memset(data, 0xff, blocksize); 1259c9de560dSAlex Tomas ext4_mb_generate_buddy(sb, data, incore, group); 12607a2fcbf7SAneesh Kumar K.V ext4_unlock_group(sb, group); 1261c9de560dSAlex Tomas incore = NULL; 1262c9de560dSAlex Tomas } else { 1263c9de560dSAlex Tomas /* this is block of bitmap */ 1264c9de560dSAlex Tomas BUG_ON(incore != NULL); 1265d3df1453SRitesh Harjani mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1266c9de560dSAlex Tomas group, page->index, i * blocksize); 1267f307333eSTheodore Ts'o trace_ext4_mb_bitmap_load(sb, group); 1268c9de560dSAlex Tomas 1269c9de560dSAlex Tomas /* see comments in ext4_mb_put_pa() */ 1270c9de560dSAlex Tomas ext4_lock_group(sb, group); 1271c9de560dSAlex Tomas memcpy(data, bitmap, blocksize); 1272c9de560dSAlex Tomas 1273c9de560dSAlex Tomas /* mark all preallocated blks used in in-core bitmap */ 1274c9de560dSAlex Tomas ext4_mb_generate_from_pa(sb, data, group); 12757a2fcbf7SAneesh Kumar K.V ext4_mb_generate_from_freelist(sb, data, group); 1276c9de560dSAlex Tomas ext4_unlock_group(sb, group); 1277c9de560dSAlex Tomas 1278c9de560dSAlex Tomas /* set incore so that the buddy information can be 1279c9de560dSAlex Tomas * generated using this 1280c9de560dSAlex Tomas */ 1281c9de560dSAlex Tomas incore = data; 1282c9de560dSAlex Tomas } 1283c9de560dSAlex Tomas } 1284c9de560dSAlex Tomas SetPageUptodate(page); 1285c9de560dSAlex Tomas 1286c9de560dSAlex Tomas out: 1287c9de560dSAlex Tomas if (bh) { 12889b8b7d35SAmir Goldstein for (i = 0; i < groups_per_page; i++) 1289c9de560dSAlex Tomas brelse(bh[i]); 1290c9de560dSAlex Tomas if (bh != &bhs) 1291c9de560dSAlex Tomas kfree(bh); 1292c9de560dSAlex Tomas } 1293c9de560dSAlex Tomas return err; 1294c9de560dSAlex Tomas } 1295c9de560dSAlex Tomas 12968a57d9d6SCurt Wohlgemuth /* 12972de8807bSAmir Goldstein * Lock the buddy and bitmap pages. This make sure other parallel init_group 12982de8807bSAmir Goldstein * on the same buddy page doesn't happen whild holding the buddy page lock. 12992de8807bSAmir Goldstein * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 13002de8807bSAmir Goldstein * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1301eee4adc7SEric Sandeen */ 13022de8807bSAmir Goldstein static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1303adb7ef60SKonstantin Khlebnikov ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1304eee4adc7SEric Sandeen { 13052de8807bSAmir Goldstein struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 13062de8807bSAmir Goldstein int block, pnum, poff; 1307eee4adc7SEric Sandeen int blocks_per_page; 13082de8807bSAmir Goldstein struct page *page; 13092de8807bSAmir Goldstein 13102de8807bSAmir Goldstein e4b->bd_buddy_page = NULL; 13112de8807bSAmir Goldstein e4b->bd_bitmap_page = NULL; 1312eee4adc7SEric Sandeen 131309cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1314eee4adc7SEric Sandeen /* 1315eee4adc7SEric Sandeen * the buddy cache inode stores the block bitmap 1316eee4adc7SEric Sandeen * and buddy information in consecutive blocks. 1317eee4adc7SEric Sandeen * So for each group we need two blocks. 1318eee4adc7SEric Sandeen */ 1319eee4adc7SEric Sandeen block = group * 2; 1320eee4adc7SEric Sandeen pnum = block / blocks_per_page; 13212de8807bSAmir Goldstein poff = block % blocks_per_page; 1322adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13232de8807bSAmir Goldstein if (!page) 1324c57ab39bSYounger Liu return -ENOMEM; 13252de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13262de8807bSAmir Goldstein e4b->bd_bitmap_page = page; 13272de8807bSAmir Goldstein e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1328eee4adc7SEric Sandeen 13292de8807bSAmir Goldstein if (blocks_per_page >= 2) { 13302de8807bSAmir Goldstein /* buddy and bitmap are on the same page */ 13312de8807bSAmir Goldstein return 0; 1332eee4adc7SEric Sandeen } 1333eee4adc7SEric Sandeen 13342de8807bSAmir Goldstein block++; 1335eee4adc7SEric Sandeen pnum = block / blocks_per_page; 1336adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13372de8807bSAmir Goldstein if (!page) 1338c57ab39bSYounger Liu return -ENOMEM; 13392de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13402de8807bSAmir Goldstein e4b->bd_buddy_page = page; 13412de8807bSAmir Goldstein return 0; 1342eee4adc7SEric Sandeen } 1343eee4adc7SEric Sandeen 13442de8807bSAmir Goldstein static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 13452de8807bSAmir Goldstein { 13462de8807bSAmir Goldstein if (e4b->bd_bitmap_page) { 13472de8807bSAmir Goldstein unlock_page(e4b->bd_bitmap_page); 134809cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 13492de8807bSAmir Goldstein } 13502de8807bSAmir Goldstein if (e4b->bd_buddy_page) { 13512de8807bSAmir Goldstein unlock_page(e4b->bd_buddy_page); 135209cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 13532de8807bSAmir Goldstein } 1354eee4adc7SEric Sandeen } 1355eee4adc7SEric Sandeen 1356eee4adc7SEric Sandeen /* 13578a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 13588a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 13598a57d9d6SCurt Wohlgemuth * calling this routine! 13608a57d9d6SCurt Wohlgemuth */ 1361b6a758ecSAneesh Kumar K.V static noinline_for_stack 1362adb7ef60SKonstantin Khlebnikov int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1363b6a758ecSAneesh Kumar K.V { 1364b6a758ecSAneesh Kumar K.V 1365b6a758ecSAneesh Kumar K.V struct ext4_group_info *this_grp; 13662de8807bSAmir Goldstein struct ext4_buddy e4b; 13672de8807bSAmir Goldstein struct page *page; 13682de8807bSAmir Goldstein int ret = 0; 1369b6a758ecSAneesh Kumar K.V 1370b10a44c3STheodore Ts'o might_sleep(); 1371d3df1453SRitesh Harjani mb_debug(sb, "init group %u\n", group); 1372b6a758ecSAneesh Kumar K.V this_grp = ext4_get_group_info(sb, group); 1373b6a758ecSAneesh Kumar K.V /* 137408c3a813SAneesh Kumar K.V * This ensures that we don't reinit the buddy cache 137508c3a813SAneesh Kumar K.V * page which map to the group from which we are already 137608c3a813SAneesh Kumar K.V * allocating. If we are looking at the buddy cache we would 137708c3a813SAneesh Kumar K.V * have taken a reference using ext4_mb_load_buddy and that 13782de8807bSAmir Goldstein * would have pinned buddy page to page cache. 13792457aec6SMel Gorman * The call to ext4_mb_get_buddy_page_lock will mark the 13802457aec6SMel Gorman * page accessed. 1381b6a758ecSAneesh Kumar K.V */ 1382adb7ef60SKonstantin Khlebnikov ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 13832de8807bSAmir Goldstein if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1384b6a758ecSAneesh Kumar K.V /* 1385b6a758ecSAneesh Kumar K.V * somebody initialized the group 1386b6a758ecSAneesh Kumar K.V * return without doing anything 1387b6a758ecSAneesh Kumar K.V */ 1388b6a758ecSAneesh Kumar K.V goto err; 1389b6a758ecSAneesh Kumar K.V } 13902de8807bSAmir Goldstein 13912de8807bSAmir Goldstein page = e4b.bd_bitmap_page; 1392adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 13932de8807bSAmir Goldstein if (ret) 1394b6a758ecSAneesh Kumar K.V goto err; 13952de8807bSAmir Goldstein if (!PageUptodate(page)) { 1396b6a758ecSAneesh Kumar K.V ret = -EIO; 1397b6a758ecSAneesh Kumar K.V goto err; 1398b6a758ecSAneesh Kumar K.V } 1399b6a758ecSAneesh Kumar K.V 14002de8807bSAmir Goldstein if (e4b.bd_buddy_page == NULL) { 1401b6a758ecSAneesh Kumar K.V /* 1402b6a758ecSAneesh Kumar K.V * If both the bitmap and buddy are in 1403b6a758ecSAneesh Kumar K.V * the same page we don't need to force 1404b6a758ecSAneesh Kumar K.V * init the buddy 1405b6a758ecSAneesh Kumar K.V */ 14062de8807bSAmir Goldstein ret = 0; 1407b6a758ecSAneesh Kumar K.V goto err; 1408b6a758ecSAneesh Kumar K.V } 14092de8807bSAmir Goldstein /* init buddy cache */ 14102de8807bSAmir Goldstein page = e4b.bd_buddy_page; 1411adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 14122de8807bSAmir Goldstein if (ret) 14132de8807bSAmir Goldstein goto err; 14142de8807bSAmir Goldstein if (!PageUptodate(page)) { 1415b6a758ecSAneesh Kumar K.V ret = -EIO; 1416b6a758ecSAneesh Kumar K.V goto err; 1417b6a758ecSAneesh Kumar K.V } 1418b6a758ecSAneesh Kumar K.V err: 14192de8807bSAmir Goldstein ext4_mb_put_buddy_page_lock(&e4b); 1420b6a758ecSAneesh Kumar K.V return ret; 1421b6a758ecSAneesh Kumar K.V } 1422b6a758ecSAneesh Kumar K.V 14238a57d9d6SCurt Wohlgemuth /* 14248a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14258a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14268a57d9d6SCurt Wohlgemuth * calling this routine! 14278a57d9d6SCurt Wohlgemuth */ 14284ddfef7bSEric Sandeen static noinline_for_stack int 1429adb7ef60SKonstantin Khlebnikov ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1430adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b, gfp_t gfp) 1431c9de560dSAlex Tomas { 1432c9de560dSAlex Tomas int blocks_per_page; 1433c9de560dSAlex Tomas int block; 1434c9de560dSAlex Tomas int pnum; 1435c9de560dSAlex Tomas int poff; 1436c9de560dSAlex Tomas struct page *page; 1437fdf6c7a7SShen Feng int ret; 1438920313a7SAneesh Kumar K.V struct ext4_group_info *grp; 1439920313a7SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 1440920313a7SAneesh Kumar K.V struct inode *inode = sbi->s_buddy_cache; 1441c9de560dSAlex Tomas 1442b10a44c3STheodore Ts'o might_sleep(); 1443d3df1453SRitesh Harjani mb_debug(sb, "load group %u\n", group); 1444c9de560dSAlex Tomas 144509cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1446920313a7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 1447c9de560dSAlex Tomas 1448c9de560dSAlex Tomas e4b->bd_blkbits = sb->s_blocksize_bits; 1449529da704STao Ma e4b->bd_info = grp; 1450c9de560dSAlex Tomas e4b->bd_sb = sb; 1451c9de560dSAlex Tomas e4b->bd_group = group; 1452c9de560dSAlex Tomas e4b->bd_buddy_page = NULL; 1453c9de560dSAlex Tomas e4b->bd_bitmap_page = NULL; 1454c9de560dSAlex Tomas 1455f41c0750SAneesh Kumar K.V if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1456f41c0750SAneesh Kumar K.V /* 1457f41c0750SAneesh Kumar K.V * we need full data about the group 1458f41c0750SAneesh Kumar K.V * to make a good selection 1459f41c0750SAneesh Kumar K.V */ 1460adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, gfp); 1461f41c0750SAneesh Kumar K.V if (ret) 1462f41c0750SAneesh Kumar K.V return ret; 1463f41c0750SAneesh Kumar K.V } 1464f41c0750SAneesh Kumar K.V 1465c9de560dSAlex Tomas /* 1466c9de560dSAlex Tomas * the buddy cache inode stores the block bitmap 1467c9de560dSAlex Tomas * and buddy information in consecutive blocks. 1468c9de560dSAlex Tomas * So for each group we need two blocks. 1469c9de560dSAlex Tomas */ 1470c9de560dSAlex Tomas block = group * 2; 1471c9de560dSAlex Tomas pnum = block / blocks_per_page; 1472c9de560dSAlex Tomas poff = block % blocks_per_page; 1473c9de560dSAlex Tomas 1474c9de560dSAlex Tomas /* we could use find_or_create_page(), but it locks page 1475c9de560dSAlex Tomas * what we'd like to avoid in fast path ... */ 14762457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1477c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1478c9de560dSAlex Tomas if (page) 1479920313a7SAneesh Kumar K.V /* 1480920313a7SAneesh Kumar K.V * drop the page reference and try 1481920313a7SAneesh Kumar K.V * to get the page with lock. If we 1482920313a7SAneesh Kumar K.V * are not uptodate that implies 1483920313a7SAneesh Kumar K.V * somebody just created the page but 1484920313a7SAneesh Kumar K.V * is yet to initialize the same. So 1485920313a7SAneesh Kumar K.V * wait for it to initialize. 1486920313a7SAneesh Kumar K.V */ 148709cbfeafSKirill A. Shutemov put_page(page); 1488adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1489c9de560dSAlex Tomas if (page) { 149019b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 149119b8b035STheodore Ts'o "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 149219b8b035STheodore Ts'o /* should never happen */ 149319b8b035STheodore Ts'o unlock_page(page); 149419b8b035STheodore Ts'o ret = -EINVAL; 149519b8b035STheodore Ts'o goto err; 149619b8b035STheodore Ts'o } 1497c9de560dSAlex Tomas if (!PageUptodate(page)) { 1498adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 1499fdf6c7a7SShen Feng if (ret) { 1500fdf6c7a7SShen Feng unlock_page(page); 1501fdf6c7a7SShen Feng goto err; 1502fdf6c7a7SShen Feng } 1503c9de560dSAlex Tomas mb_cmp_bitmaps(e4b, page_address(page) + 1504c9de560dSAlex Tomas (poff * sb->s_blocksize)); 1505c9de560dSAlex Tomas } 1506c9de560dSAlex Tomas unlock_page(page); 1507c9de560dSAlex Tomas } 1508c9de560dSAlex Tomas } 1509c57ab39bSYounger Liu if (page == NULL) { 1510c57ab39bSYounger Liu ret = -ENOMEM; 1511c57ab39bSYounger Liu goto err; 1512c57ab39bSYounger Liu } 1513c57ab39bSYounger Liu if (!PageUptodate(page)) { 1514fdf6c7a7SShen Feng ret = -EIO; 1515c9de560dSAlex Tomas goto err; 1516fdf6c7a7SShen Feng } 15172457aec6SMel Gorman 15182457aec6SMel Gorman /* Pages marked accessed already */ 1519c9de560dSAlex Tomas e4b->bd_bitmap_page = page; 1520c9de560dSAlex Tomas e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1521c9de560dSAlex Tomas 1522c9de560dSAlex Tomas block++; 1523c9de560dSAlex Tomas pnum = block / blocks_per_page; 1524c9de560dSAlex Tomas poff = block % blocks_per_page; 1525c9de560dSAlex Tomas 15262457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1527c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1528c9de560dSAlex Tomas if (page) 152909cbfeafSKirill A. Shutemov put_page(page); 1530adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1531c9de560dSAlex Tomas if (page) { 153219b8b035STheodore Ts'o if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 153319b8b035STheodore Ts'o "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 153419b8b035STheodore Ts'o /* should never happen */ 153519b8b035STheodore Ts'o unlock_page(page); 153619b8b035STheodore Ts'o ret = -EINVAL; 153719b8b035STheodore Ts'o goto err; 153819b8b035STheodore Ts'o } 1539fdf6c7a7SShen Feng if (!PageUptodate(page)) { 1540adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1541adb7ef60SKonstantin Khlebnikov gfp); 1542fdf6c7a7SShen Feng if (ret) { 1543fdf6c7a7SShen Feng unlock_page(page); 1544fdf6c7a7SShen Feng goto err; 1545fdf6c7a7SShen Feng } 1546fdf6c7a7SShen Feng } 1547c9de560dSAlex Tomas unlock_page(page); 1548c9de560dSAlex Tomas } 1549c9de560dSAlex Tomas } 1550c57ab39bSYounger Liu if (page == NULL) { 1551c57ab39bSYounger Liu ret = -ENOMEM; 1552c57ab39bSYounger Liu goto err; 1553c57ab39bSYounger Liu } 1554c57ab39bSYounger Liu if (!PageUptodate(page)) { 1555fdf6c7a7SShen Feng ret = -EIO; 1556c9de560dSAlex Tomas goto err; 1557fdf6c7a7SShen Feng } 15582457aec6SMel Gorman 15592457aec6SMel Gorman /* Pages marked accessed already */ 1560c9de560dSAlex Tomas e4b->bd_buddy_page = page; 1561c9de560dSAlex Tomas e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1562c9de560dSAlex Tomas 1563c9de560dSAlex Tomas return 0; 1564c9de560dSAlex Tomas 1565c9de560dSAlex Tomas err: 156626626f11SYang Ruirui if (page) 156709cbfeafSKirill A. Shutemov put_page(page); 1568c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 156909cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1570285164b8SKemeng Shi 1571c9de560dSAlex Tomas e4b->bd_buddy = NULL; 1572c9de560dSAlex Tomas e4b->bd_bitmap = NULL; 1573fdf6c7a7SShen Feng return ret; 1574c9de560dSAlex Tomas } 1575c9de560dSAlex Tomas 1576adb7ef60SKonstantin Khlebnikov static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1577adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b) 1578adb7ef60SKonstantin Khlebnikov { 1579adb7ef60SKonstantin Khlebnikov return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1580adb7ef60SKonstantin Khlebnikov } 1581adb7ef60SKonstantin Khlebnikov 1582e39e07fdSJing Zhang static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1583c9de560dSAlex Tomas { 1584c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 158509cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1586c9de560dSAlex Tomas if (e4b->bd_buddy_page) 158709cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1588c9de560dSAlex Tomas } 1589c9de560dSAlex Tomas 1590c9de560dSAlex Tomas 1591c9de560dSAlex Tomas static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1592c9de560dSAlex Tomas { 1593ce3cca33SChunguang Xu int order = 1, max; 1594c9de560dSAlex Tomas void *bb; 1595c9de560dSAlex Tomas 1596c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1597c9de560dSAlex Tomas BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1598c9de560dSAlex Tomas 1599c9de560dSAlex Tomas while (order <= e4b->bd_blkbits + 1) { 1600ce3cca33SChunguang Xu bb = mb_find_buddy(e4b, order, &max); 1601ce3cca33SChunguang Xu if (!mb_test_bit(block >> order, bb)) { 1602c9de560dSAlex Tomas /* this block is part of buddy of order 'order' */ 1603c9de560dSAlex Tomas return order; 1604c9de560dSAlex Tomas } 1605c9de560dSAlex Tomas order++; 1606c9de560dSAlex Tomas } 1607c9de560dSAlex Tomas return 0; 1608c9de560dSAlex Tomas } 1609c9de560dSAlex Tomas 1610955ce5f5SAneesh Kumar K.V static void mb_clear_bits(void *bm, int cur, int len) 1611c9de560dSAlex Tomas { 1612c9de560dSAlex Tomas __u32 *addr; 1613c9de560dSAlex Tomas 1614c9de560dSAlex Tomas len = cur + len; 1615c9de560dSAlex Tomas while (cur < len) { 1616c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1617c9de560dSAlex Tomas /* fast path: clear whole word at once */ 1618c9de560dSAlex Tomas addr = bm + (cur >> 3); 1619c9de560dSAlex Tomas *addr = 0; 1620c9de560dSAlex Tomas cur += 32; 1621c9de560dSAlex Tomas continue; 1622c9de560dSAlex Tomas } 1623e8134b27SAneesh Kumar K.V mb_clear_bit(cur, bm); 1624c9de560dSAlex Tomas cur++; 1625c9de560dSAlex Tomas } 1626c9de560dSAlex Tomas } 1627c9de560dSAlex Tomas 1628eabe0444SAndrey Sidorov /* clear bits in given range 1629eabe0444SAndrey Sidorov * will return first found zero bit if any, -1 otherwise 1630eabe0444SAndrey Sidorov */ 1631eabe0444SAndrey Sidorov static int mb_test_and_clear_bits(void *bm, int cur, int len) 1632eabe0444SAndrey Sidorov { 1633eabe0444SAndrey Sidorov __u32 *addr; 1634eabe0444SAndrey Sidorov int zero_bit = -1; 1635eabe0444SAndrey Sidorov 1636eabe0444SAndrey Sidorov len = cur + len; 1637eabe0444SAndrey Sidorov while (cur < len) { 1638eabe0444SAndrey Sidorov if ((cur & 31) == 0 && (len - cur) >= 32) { 1639eabe0444SAndrey Sidorov /* fast path: clear whole word at once */ 1640eabe0444SAndrey Sidorov addr = bm + (cur >> 3); 1641eabe0444SAndrey Sidorov if (*addr != (__u32)(-1) && zero_bit == -1) 1642eabe0444SAndrey Sidorov zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1643eabe0444SAndrey Sidorov *addr = 0; 1644eabe0444SAndrey Sidorov cur += 32; 1645eabe0444SAndrey Sidorov continue; 1646eabe0444SAndrey Sidorov } 1647eabe0444SAndrey Sidorov if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1648eabe0444SAndrey Sidorov zero_bit = cur; 1649eabe0444SAndrey Sidorov cur++; 1650eabe0444SAndrey Sidorov } 1651eabe0444SAndrey Sidorov 1652eabe0444SAndrey Sidorov return zero_bit; 1653eabe0444SAndrey Sidorov } 1654eabe0444SAndrey Sidorov 1655123e3016SRitesh Harjani void mb_set_bits(void *bm, int cur, int len) 1656c9de560dSAlex Tomas { 1657c9de560dSAlex Tomas __u32 *addr; 1658c9de560dSAlex Tomas 1659c9de560dSAlex Tomas len = cur + len; 1660c9de560dSAlex Tomas while (cur < len) { 1661c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1662c9de560dSAlex Tomas /* fast path: set whole word at once */ 1663c9de560dSAlex Tomas addr = bm + (cur >> 3); 1664c9de560dSAlex Tomas *addr = 0xffffffff; 1665c9de560dSAlex Tomas cur += 32; 1666c9de560dSAlex Tomas continue; 1667c9de560dSAlex Tomas } 1668e8134b27SAneesh Kumar K.V mb_set_bit(cur, bm); 1669c9de560dSAlex Tomas cur++; 1670c9de560dSAlex Tomas } 1671c9de560dSAlex Tomas } 1672c9de560dSAlex Tomas 1673eabe0444SAndrey Sidorov static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1674eabe0444SAndrey Sidorov { 1675eabe0444SAndrey Sidorov if (mb_test_bit(*bit + side, bitmap)) { 1676eabe0444SAndrey Sidorov mb_clear_bit(*bit, bitmap); 1677eabe0444SAndrey Sidorov (*bit) -= side; 1678eabe0444SAndrey Sidorov return 1; 1679eabe0444SAndrey Sidorov } 1680eabe0444SAndrey Sidorov else { 1681eabe0444SAndrey Sidorov (*bit) += side; 1682eabe0444SAndrey Sidorov mb_set_bit(*bit, bitmap); 1683eabe0444SAndrey Sidorov return -1; 1684eabe0444SAndrey Sidorov } 1685eabe0444SAndrey Sidorov } 1686eabe0444SAndrey Sidorov 1687eabe0444SAndrey Sidorov static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1688eabe0444SAndrey Sidorov { 1689eabe0444SAndrey Sidorov int max; 1690eabe0444SAndrey Sidorov int order = 1; 1691eabe0444SAndrey Sidorov void *buddy = mb_find_buddy(e4b, order, &max); 1692eabe0444SAndrey Sidorov 1693eabe0444SAndrey Sidorov while (buddy) { 1694eabe0444SAndrey Sidorov void *buddy2; 1695eabe0444SAndrey Sidorov 1696eabe0444SAndrey Sidorov /* Bits in range [first; last] are known to be set since 1697eabe0444SAndrey Sidorov * corresponding blocks were allocated. Bits in range 1698eabe0444SAndrey Sidorov * (first; last) will stay set because they form buddies on 1699eabe0444SAndrey Sidorov * upper layer. We just deal with borders if they don't 1700eabe0444SAndrey Sidorov * align with upper layer and then go up. 1701eabe0444SAndrey Sidorov * Releasing entire group is all about clearing 1702eabe0444SAndrey Sidorov * single bit of highest order buddy. 1703eabe0444SAndrey Sidorov */ 1704eabe0444SAndrey Sidorov 1705eabe0444SAndrey Sidorov /* Example: 1706eabe0444SAndrey Sidorov * --------------------------------- 1707eabe0444SAndrey Sidorov * | 1 | 1 | 1 | 1 | 1708eabe0444SAndrey Sidorov * --------------------------------- 1709eabe0444SAndrey Sidorov * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1710eabe0444SAndrey Sidorov * --------------------------------- 1711eabe0444SAndrey Sidorov * 0 1 2 3 4 5 6 7 1712eabe0444SAndrey Sidorov * \_____________________/ 1713eabe0444SAndrey Sidorov * 1714eabe0444SAndrey Sidorov * Neither [1] nor [6] is aligned to above layer. 1715eabe0444SAndrey Sidorov * Left neighbour [0] is free, so mark it busy, 1716eabe0444SAndrey Sidorov * decrease bb_counters and extend range to 1717eabe0444SAndrey Sidorov * [0; 6] 1718eabe0444SAndrey Sidorov * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1719eabe0444SAndrey Sidorov * mark [6] free, increase bb_counters and shrink range to 1720eabe0444SAndrey Sidorov * [0; 5]. 1721eabe0444SAndrey Sidorov * Then shift range to [0; 2], go up and do the same. 1722eabe0444SAndrey Sidorov */ 1723eabe0444SAndrey Sidorov 1724eabe0444SAndrey Sidorov 1725eabe0444SAndrey Sidorov if (first & 1) 1726eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1727eabe0444SAndrey Sidorov if (!(last & 1)) 1728eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1729eabe0444SAndrey Sidorov if (first > last) 1730eabe0444SAndrey Sidorov break; 1731eabe0444SAndrey Sidorov order++; 1732eabe0444SAndrey Sidorov 1733976620bdSKemeng Shi buddy2 = mb_find_buddy(e4b, order, &max); 1734976620bdSKemeng Shi if (!buddy2) { 1735eabe0444SAndrey Sidorov mb_clear_bits(buddy, first, last - first + 1); 1736eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1737eabe0444SAndrey Sidorov break; 1738eabe0444SAndrey Sidorov } 1739eabe0444SAndrey Sidorov first >>= 1; 1740eabe0444SAndrey Sidorov last >>= 1; 1741eabe0444SAndrey Sidorov buddy = buddy2; 1742eabe0444SAndrey Sidorov } 1743eabe0444SAndrey Sidorov } 1744eabe0444SAndrey Sidorov 17457e5a8cddSShen Feng static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1746c9de560dSAlex Tomas int first, int count) 1747c9de560dSAlex Tomas { 1748eabe0444SAndrey Sidorov int left_is_free = 0; 1749eabe0444SAndrey Sidorov int right_is_free = 0; 1750eabe0444SAndrey Sidorov int block; 1751eabe0444SAndrey Sidorov int last = first + count - 1; 1752c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 1753c9de560dSAlex Tomas 1754c99d1e6eSTheodore Ts'o if (WARN_ON(count == 0)) 1755c99d1e6eSTheodore Ts'o return; 1756eabe0444SAndrey Sidorov BUG_ON(last >= (sb->s_blocksize << 3)); 1757bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1758163a203dSDarrick J. Wong /* Don't bother if the block group is corrupt. */ 1759163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1760163a203dSDarrick J. Wong return; 1761163a203dSDarrick J. Wong 1762c9de560dSAlex Tomas mb_check_buddy(e4b); 1763c9de560dSAlex Tomas mb_free_blocks_double(inode, e4b, first, count); 1764c9de560dSAlex Tomas 176507b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1766c9de560dSAlex Tomas e4b->bd_info->bb_free += count; 1767c9de560dSAlex Tomas if (first < e4b->bd_info->bb_first_free) 1768c9de560dSAlex Tomas e4b->bd_info->bb_first_free = first; 1769c9de560dSAlex Tomas 1770eabe0444SAndrey Sidorov /* access memory sequentially: check left neighbour, 1771eabe0444SAndrey Sidorov * clear range and then check right neighbour 1772eabe0444SAndrey Sidorov */ 1773c9de560dSAlex Tomas if (first != 0) 1774eabe0444SAndrey Sidorov left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1775eabe0444SAndrey Sidorov block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1776eabe0444SAndrey Sidorov if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1777eabe0444SAndrey Sidorov right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1778c9de560dSAlex Tomas 1779eabe0444SAndrey Sidorov if (unlikely(block != -1)) { 1780e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 1781c9de560dSAlex Tomas ext4_fsblk_t blocknr; 17825661bd68SAkinobu Mita 17835661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 178449598e04SJun Piao blocknr += EXT4_C2B(sbi, block); 17858016e29fSHarshad Shirwadkar if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 17865d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 1787e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 1788e29136f8STheodore Ts'o blocknr, 17898016e29fSHarshad Shirwadkar "freeing already freed block (bit %u); block bitmap corrupt.", 1790163a203dSDarrick J. Wong block); 17918016e29fSHarshad Shirwadkar ext4_mark_group_bitmap_corrupted( 17928016e29fSHarshad Shirwadkar sb, e4b->bd_group, 1793db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 17948016e29fSHarshad Shirwadkar } 1795eabe0444SAndrey Sidorov goto done; 1796c9de560dSAlex Tomas } 1797c9de560dSAlex Tomas 1798eabe0444SAndrey Sidorov /* let's maintain fragments counter */ 1799eabe0444SAndrey Sidorov if (left_is_free && right_is_free) 1800eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments--; 1801eabe0444SAndrey Sidorov else if (!left_is_free && !right_is_free) 1802eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments++; 1803c9de560dSAlex Tomas 1804eabe0444SAndrey Sidorov /* buddy[0] == bd_bitmap is a special case, so handle 1805eabe0444SAndrey Sidorov * it right away and let mb_buddy_mark_free stay free of 1806eabe0444SAndrey Sidorov * zero order checks. 1807eabe0444SAndrey Sidorov * Check if neighbours are to be coaleasced, 1808eabe0444SAndrey Sidorov * adjust bitmap bb_counters and borders appropriately. 1809eabe0444SAndrey Sidorov */ 1810eabe0444SAndrey Sidorov if (first & 1) { 1811eabe0444SAndrey Sidorov first += !left_is_free; 1812eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1813c9de560dSAlex Tomas } 1814eabe0444SAndrey Sidorov if (!(last & 1)) { 1815eabe0444SAndrey Sidorov last -= !right_is_free; 1816eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1817c9de560dSAlex Tomas } 1818eabe0444SAndrey Sidorov 1819eabe0444SAndrey Sidorov if (first <= last) 1820eabe0444SAndrey Sidorov mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1821eabe0444SAndrey Sidorov 1822eabe0444SAndrey Sidorov done: 18238a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, e4b->bd_info); 1824196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, e4b->bd_info); 1825c9de560dSAlex Tomas mb_check_buddy(e4b); 1826c9de560dSAlex Tomas } 1827c9de560dSAlex Tomas 182815c006a2SRobin Dong static int mb_find_extent(struct ext4_buddy *e4b, int block, 1829c9de560dSAlex Tomas int needed, struct ext4_free_extent *ex) 1830c9de560dSAlex Tomas { 1831c9de560dSAlex Tomas int next = block; 183215c006a2SRobin Dong int max, order; 1833c9de560dSAlex Tomas void *buddy; 1834c9de560dSAlex Tomas 1835bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1836c9de560dSAlex Tomas BUG_ON(ex == NULL); 1837c9de560dSAlex Tomas 183815c006a2SRobin Dong buddy = mb_find_buddy(e4b, 0, &max); 1839c9de560dSAlex Tomas BUG_ON(buddy == NULL); 1840c9de560dSAlex Tomas BUG_ON(block >= max); 1841c9de560dSAlex Tomas if (mb_test_bit(block, buddy)) { 1842c9de560dSAlex Tomas ex->fe_len = 0; 1843c9de560dSAlex Tomas ex->fe_start = 0; 1844c9de560dSAlex Tomas ex->fe_group = 0; 1845c9de560dSAlex Tomas return 0; 1846c9de560dSAlex Tomas } 1847c9de560dSAlex Tomas 1848c9de560dSAlex Tomas /* find actual order */ 1849c9de560dSAlex Tomas order = mb_find_order_for_block(e4b, block); 1850c9de560dSAlex Tomas block = block >> order; 1851c9de560dSAlex Tomas 1852c9de560dSAlex Tomas ex->fe_len = 1 << order; 1853c9de560dSAlex Tomas ex->fe_start = block << order; 1854c9de560dSAlex Tomas ex->fe_group = e4b->bd_group; 1855c9de560dSAlex Tomas 1856c9de560dSAlex Tomas /* calc difference from given start */ 1857c9de560dSAlex Tomas next = next - ex->fe_start; 1858c9de560dSAlex Tomas ex->fe_len -= next; 1859c9de560dSAlex Tomas ex->fe_start += next; 1860c9de560dSAlex Tomas 1861c9de560dSAlex Tomas while (needed > ex->fe_len && 1862d8ec0c39SAlan Cox mb_find_buddy(e4b, order, &max)) { 1863c9de560dSAlex Tomas 1864c9de560dSAlex Tomas if (block + 1 >= max) 1865c9de560dSAlex Tomas break; 1866c9de560dSAlex Tomas 1867c9de560dSAlex Tomas next = (block + 1) * (1 << order); 1868c5e8f3f3STheodore Ts'o if (mb_test_bit(next, e4b->bd_bitmap)) 1869c9de560dSAlex Tomas break; 1870c9de560dSAlex Tomas 1871b051d8dcSRobin Dong order = mb_find_order_for_block(e4b, next); 1872c9de560dSAlex Tomas 1873c9de560dSAlex Tomas block = next >> order; 1874c9de560dSAlex Tomas ex->fe_len += 1 << order; 1875c9de560dSAlex Tomas } 1876c9de560dSAlex Tomas 187731562b95SJan Kara if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 187843c73221STheodore Ts'o /* Should never happen! (but apparently sometimes does?!?) */ 187943c73221STheodore Ts'o WARN_ON(1); 1880cd84bbbaSStephen Brennan ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1881cd84bbbaSStephen Brennan "corruption or bug in mb_find_extent " 188243c73221STheodore Ts'o "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 188343c73221STheodore Ts'o block, order, needed, ex->fe_group, ex->fe_start, 188443c73221STheodore Ts'o ex->fe_len, ex->fe_logical); 188543c73221STheodore Ts'o ex->fe_len = 0; 188643c73221STheodore Ts'o ex->fe_start = 0; 188743c73221STheodore Ts'o ex->fe_group = 0; 188843c73221STheodore Ts'o } 1889c9de560dSAlex Tomas return ex->fe_len; 1890c9de560dSAlex Tomas } 1891c9de560dSAlex Tomas 1892c9de560dSAlex Tomas static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1893c9de560dSAlex Tomas { 1894c9de560dSAlex Tomas int ord; 1895c9de560dSAlex Tomas int mlen = 0; 1896c9de560dSAlex Tomas int max = 0; 1897c9de560dSAlex Tomas int cur; 1898c9de560dSAlex Tomas int start = ex->fe_start; 1899c9de560dSAlex Tomas int len = ex->fe_len; 1900c9de560dSAlex Tomas unsigned ret = 0; 1901c9de560dSAlex Tomas int len0 = len; 1902c9de560dSAlex Tomas void *buddy; 1903218a6944Shanjinke bool split = false; 1904c9de560dSAlex Tomas 1905c9de560dSAlex Tomas BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1906c9de560dSAlex Tomas BUG_ON(e4b->bd_group != ex->fe_group); 1907bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1908c9de560dSAlex Tomas mb_check_buddy(e4b); 1909c9de560dSAlex Tomas mb_mark_used_double(e4b, start, len); 1910c9de560dSAlex Tomas 191107b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1912c9de560dSAlex Tomas e4b->bd_info->bb_free -= len; 1913c9de560dSAlex Tomas if (e4b->bd_info->bb_first_free == start) 1914c9de560dSAlex Tomas e4b->bd_info->bb_first_free += len; 1915c9de560dSAlex Tomas 1916c9de560dSAlex Tomas /* let's maintain fragments counter */ 1917c9de560dSAlex Tomas if (start != 0) 1918c5e8f3f3STheodore Ts'o mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1919c9de560dSAlex Tomas if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1920c5e8f3f3STheodore Ts'o max = !mb_test_bit(start + len, e4b->bd_bitmap); 1921c9de560dSAlex Tomas if (mlen && max) 1922c9de560dSAlex Tomas e4b->bd_info->bb_fragments++; 1923c9de560dSAlex Tomas else if (!mlen && !max) 1924c9de560dSAlex Tomas e4b->bd_info->bb_fragments--; 1925c9de560dSAlex Tomas 1926c9de560dSAlex Tomas /* let's maintain buddy itself */ 1927c9de560dSAlex Tomas while (len) { 1928218a6944Shanjinke if (!split) 1929c9de560dSAlex Tomas ord = mb_find_order_for_block(e4b, start); 1930c9de560dSAlex Tomas 1931c9de560dSAlex Tomas if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1932c9de560dSAlex Tomas /* the whole chunk may be allocated at once! */ 1933c9de560dSAlex Tomas mlen = 1 << ord; 1934218a6944Shanjinke if (!split) 1935c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1936218a6944Shanjinke else 1937218a6944Shanjinke split = false; 1938c9de560dSAlex Tomas BUG_ON((start >> ord) >= max); 1939c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1940c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1941c9de560dSAlex Tomas start += mlen; 1942c9de560dSAlex Tomas len -= mlen; 1943c9de560dSAlex Tomas BUG_ON(len < 0); 1944c9de560dSAlex Tomas continue; 1945c9de560dSAlex Tomas } 1946c9de560dSAlex Tomas 1947c9de560dSAlex Tomas /* store for history */ 1948c9de560dSAlex Tomas if (ret == 0) 1949c9de560dSAlex Tomas ret = len | (ord << 16); 1950c9de560dSAlex Tomas 1951c9de560dSAlex Tomas /* we have to split large buddy */ 1952c9de560dSAlex Tomas BUG_ON(ord <= 0); 1953c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1954c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1955c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1956c9de560dSAlex Tomas 1957c9de560dSAlex Tomas ord--; 1958c9de560dSAlex Tomas cur = (start >> ord) & ~1U; 1959c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1960c9de560dSAlex Tomas mb_clear_bit(cur, buddy); 1961c9de560dSAlex Tomas mb_clear_bit(cur + 1, buddy); 1962c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1963c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1964218a6944Shanjinke split = true; 1965c9de560dSAlex Tomas } 19668a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1967c9de560dSAlex Tomas 1968196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1969123e3016SRitesh Harjani mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1970c9de560dSAlex Tomas mb_check_buddy(e4b); 1971c9de560dSAlex Tomas 1972c9de560dSAlex Tomas return ret; 1973c9de560dSAlex Tomas } 1974c9de560dSAlex Tomas 1975c9de560dSAlex Tomas /* 1976c9de560dSAlex Tomas * Must be called under group lock! 1977c9de560dSAlex Tomas */ 1978c9de560dSAlex Tomas static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1979c9de560dSAlex Tomas struct ext4_buddy *e4b) 1980c9de560dSAlex Tomas { 1981c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1982c9de560dSAlex Tomas int ret; 1983c9de560dSAlex Tomas 1984c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1985c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1986c9de560dSAlex Tomas 1987c9de560dSAlex Tomas ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1988c9de560dSAlex Tomas ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1989c9de560dSAlex Tomas ret = mb_mark_used(e4b, &ac->ac_b_ex); 1990c9de560dSAlex Tomas 1991c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 1992c9de560dSAlex Tomas * allocated blocks for history */ 1993c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 1994c9de560dSAlex Tomas 1995c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 1996c9de560dSAlex Tomas ac->ac_tail = ret & 0xffff; 1997c9de560dSAlex Tomas ac->ac_buddy = ret >> 16; 1998c9de560dSAlex Tomas 1999c3a326a6SAneesh Kumar K.V /* 2000c3a326a6SAneesh Kumar K.V * take the page reference. We want the page to be pinned 2001c3a326a6SAneesh Kumar K.V * so that we don't get a ext4_mb_init_cache_call for this 2002c3a326a6SAneesh Kumar K.V * group until we update the bitmap. That would mean we 2003c3a326a6SAneesh Kumar K.V * double allocate blocks. The reference is dropped 2004c3a326a6SAneesh Kumar K.V * in ext4_mb_release_context 2005c3a326a6SAneesh Kumar K.V */ 2006c9de560dSAlex Tomas ac->ac_bitmap_page = e4b->bd_bitmap_page; 2007c9de560dSAlex Tomas get_page(ac->ac_bitmap_page); 2008c9de560dSAlex Tomas ac->ac_buddy_page = e4b->bd_buddy_page; 2009c9de560dSAlex Tomas get_page(ac->ac_buddy_page); 2010c9de560dSAlex Tomas /* store last allocated for subsequent stream allocation */ 20114ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2012c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2013c9de560dSAlex Tomas sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2014c9de560dSAlex Tomas sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2015c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2016c9de560dSAlex Tomas } 201753f86b17SRitesh Harjani /* 201853f86b17SRitesh Harjani * As we've just preallocated more space than 201953f86b17SRitesh Harjani * user requested originally, we store allocated 202053f86b17SRitesh Harjani * space in a special descriptor. 202153f86b17SRitesh Harjani */ 202253f86b17SRitesh Harjani if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 202353f86b17SRitesh Harjani ext4_mb_new_preallocation(ac); 202453f86b17SRitesh Harjani 2025c9de560dSAlex Tomas } 2026c9de560dSAlex Tomas 2027c9de560dSAlex Tomas static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2028c9de560dSAlex Tomas struct ext4_buddy *e4b, 2029c9de560dSAlex Tomas int finish_group) 2030c9de560dSAlex Tomas { 2031c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2032c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2033c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2034c9de560dSAlex Tomas 2035032115fcSAneesh Kumar K.V if (ac->ac_status == AC_STATUS_FOUND) 2036032115fcSAneesh Kumar K.V return; 2037c9de560dSAlex Tomas /* 2038c9de560dSAlex Tomas * We don't want to scan for a whole year 2039c9de560dSAlex Tomas */ 2040c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan && 2041c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2042c9de560dSAlex Tomas ac->ac_status = AC_STATUS_BREAK; 2043c9de560dSAlex Tomas return; 2044c9de560dSAlex Tomas } 2045c9de560dSAlex Tomas 2046c9de560dSAlex Tomas /* 2047c9de560dSAlex Tomas * Haven't found good chunk so far, let's continue 2048c9de560dSAlex Tomas */ 2049c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) 2050c9de560dSAlex Tomas return; 2051c9de560dSAlex Tomas 205278dc9f84SKemeng Shi if (finish_group) 2053c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2054c9de560dSAlex Tomas } 2055c9de560dSAlex Tomas 2056c9de560dSAlex Tomas /* 2057c9de560dSAlex Tomas * The routine checks whether found extent is good enough. If it is, 2058c9de560dSAlex Tomas * then the extent gets marked used and flag is set to the context 2059c9de560dSAlex Tomas * to stop scanning. Otherwise, the extent is compared with the 2060c9de560dSAlex Tomas * previous found extent and if new one is better, then it's stored 2061c9de560dSAlex Tomas * in the context. Later, the best found extent will be used, if 2062c9de560dSAlex Tomas * mballoc can't find good enough extent. 2063c9de560dSAlex Tomas * 2064c9de560dSAlex Tomas * FIXME: real allocation policy is to be designed yet! 2065c9de560dSAlex Tomas */ 2066c9de560dSAlex Tomas static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2067c9de560dSAlex Tomas struct ext4_free_extent *ex, 2068c9de560dSAlex Tomas struct ext4_buddy *e4b) 2069c9de560dSAlex Tomas { 2070c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2071c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2072c9de560dSAlex Tomas 2073c9de560dSAlex Tomas BUG_ON(ex->fe_len <= 0); 20747137d7a4STheodore Ts'o BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 20757137d7a4STheodore Ts'o BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2076c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2077c9de560dSAlex Tomas 2078c9de560dSAlex Tomas ac->ac_found++; 2079c9de560dSAlex Tomas 2080c9de560dSAlex Tomas /* 2081c9de560dSAlex Tomas * The special case - take what you catch first 2082c9de560dSAlex Tomas */ 2083c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2084c9de560dSAlex Tomas *bex = *ex; 2085c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2086c9de560dSAlex Tomas return; 2087c9de560dSAlex Tomas } 2088c9de560dSAlex Tomas 2089c9de560dSAlex Tomas /* 2090c9de560dSAlex Tomas * Let's check whether the chuck is good enough 2091c9de560dSAlex Tomas */ 2092c9de560dSAlex Tomas if (ex->fe_len == gex->fe_len) { 2093c9de560dSAlex Tomas *bex = *ex; 2094c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2095c9de560dSAlex Tomas return; 2096c9de560dSAlex Tomas } 2097c9de560dSAlex Tomas 2098c9de560dSAlex Tomas /* 2099c9de560dSAlex Tomas * If this is first found extent, just store it in the context 2100c9de560dSAlex Tomas */ 2101c9de560dSAlex Tomas if (bex->fe_len == 0) { 2102c9de560dSAlex Tomas *bex = *ex; 2103c9de560dSAlex Tomas return; 2104c9de560dSAlex Tomas } 2105c9de560dSAlex Tomas 2106c9de560dSAlex Tomas /* 2107c9de560dSAlex Tomas * If new found extent is better, store it in the context 2108c9de560dSAlex Tomas */ 2109c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) { 2110c9de560dSAlex Tomas /* if the request isn't satisfied, any found extent 2111c9de560dSAlex Tomas * larger than previous best one is better */ 2112c9de560dSAlex Tomas if (ex->fe_len > bex->fe_len) 2113c9de560dSAlex Tomas *bex = *ex; 2114c9de560dSAlex Tomas } else if (ex->fe_len > gex->fe_len) { 2115c9de560dSAlex Tomas /* if the request is satisfied, then we try to find 2116c9de560dSAlex Tomas * an extent that still satisfy the request, but is 2117c9de560dSAlex Tomas * smaller than previous one */ 2118c9de560dSAlex Tomas if (ex->fe_len < bex->fe_len) 2119c9de560dSAlex Tomas *bex = *ex; 2120c9de560dSAlex Tomas } 2121c9de560dSAlex Tomas 2122c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 0); 2123c9de560dSAlex Tomas } 2124c9de560dSAlex Tomas 2125089ceeccSEric Sandeen static noinline_for_stack 212685b67ffbSKemeng Shi void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2127c9de560dSAlex Tomas struct ext4_buddy *e4b) 2128c9de560dSAlex Tomas { 2129c9de560dSAlex Tomas struct ext4_free_extent ex = ac->ac_b_ex; 2130c9de560dSAlex Tomas ext4_group_t group = ex.fe_group; 2131c9de560dSAlex Tomas int max; 2132c9de560dSAlex Tomas int err; 2133c9de560dSAlex Tomas 2134c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2135c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2136c9de560dSAlex Tomas if (err) 213785b67ffbSKemeng Shi return; 2138c9de560dSAlex Tomas 2139c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 214015c006a2SRobin Dong max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2141c9de560dSAlex Tomas 2142c9de560dSAlex Tomas if (max > 0) { 2143c9de560dSAlex Tomas ac->ac_b_ex = ex; 2144c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2145c9de560dSAlex Tomas } 2146c9de560dSAlex Tomas 2147c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2148e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2149c9de560dSAlex Tomas } 2150c9de560dSAlex Tomas 2151089ceeccSEric Sandeen static noinline_for_stack 2152089ceeccSEric Sandeen int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2153c9de560dSAlex Tomas struct ext4_buddy *e4b) 2154c9de560dSAlex Tomas { 2155c9de560dSAlex Tomas ext4_group_t group = ac->ac_g_ex.fe_group; 2156c9de560dSAlex Tomas int max; 2157c9de560dSAlex Tomas int err; 2158c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2159838cd0cfSYongqiang Yang struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2160c9de560dSAlex Tomas struct ext4_free_extent ex; 2161c9de560dSAlex Tomas 216201e4ca29SKemeng Shi if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2163c9de560dSAlex Tomas return 0; 2164838cd0cfSYongqiang Yang if (grp->bb_free == 0) 2165838cd0cfSYongqiang Yang return 0; 2166c9de560dSAlex Tomas 2167c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2168c9de560dSAlex Tomas if (err) 2169c9de560dSAlex Tomas return err; 2170c9de560dSAlex Tomas 2171163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2172163a203dSDarrick J. Wong ext4_mb_unload_buddy(e4b); 2173163a203dSDarrick J. Wong return 0; 2174163a203dSDarrick J. Wong } 2175163a203dSDarrick J. Wong 2176c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 217715c006a2SRobin Dong max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2178c9de560dSAlex Tomas ac->ac_g_ex.fe_len, &ex); 2179ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADFA11; /* debug value */ 2180c9de560dSAlex Tomas 2181c9de560dSAlex Tomas if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2182c9de560dSAlex Tomas ext4_fsblk_t start; 2183c9de560dSAlex Tomas 21845661bd68SAkinobu Mita start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 21855661bd68SAkinobu Mita ex.fe_start; 2186c9de560dSAlex Tomas /* use do_div to get remainder (would be 64-bit modulo) */ 2187c9de560dSAlex Tomas if (do_div(start, sbi->s_stripe) == 0) { 2188c9de560dSAlex Tomas ac->ac_found++; 2189c9de560dSAlex Tomas ac->ac_b_ex = ex; 2190c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2191c9de560dSAlex Tomas } 2192c9de560dSAlex Tomas } else if (max >= ac->ac_g_ex.fe_len) { 2193c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2194c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2195c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2196c9de560dSAlex Tomas ac->ac_found++; 2197c9de560dSAlex Tomas ac->ac_b_ex = ex; 2198c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2199c9de560dSAlex Tomas } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2200c9de560dSAlex Tomas /* Sometimes, caller may want to merge even small 2201c9de560dSAlex Tomas * number of blocks to an existing extent */ 2202c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2203c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2204c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2205c9de560dSAlex Tomas ac->ac_found++; 2206c9de560dSAlex Tomas ac->ac_b_ex = ex; 2207c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2208c9de560dSAlex Tomas } 2209c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2210e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2211c9de560dSAlex Tomas 2212c9de560dSAlex Tomas return 0; 2213c9de560dSAlex Tomas } 2214c9de560dSAlex Tomas 2215c9de560dSAlex Tomas /* 2216c9de560dSAlex Tomas * The routine scans buddy structures (not bitmap!) from given order 2217c9de560dSAlex Tomas * to max order and tries to find big enough chunk to satisfy the req 2218c9de560dSAlex Tomas */ 2219089ceeccSEric Sandeen static noinline_for_stack 2220089ceeccSEric Sandeen void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2221c9de560dSAlex Tomas struct ext4_buddy *e4b) 2222c9de560dSAlex Tomas { 2223c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2224c9de560dSAlex Tomas struct ext4_group_info *grp = e4b->bd_info; 2225c9de560dSAlex Tomas void *buddy; 2226c9de560dSAlex Tomas int i; 2227c9de560dSAlex Tomas int k; 2228c9de560dSAlex Tomas int max; 2229c9de560dSAlex Tomas 2230c9de560dSAlex Tomas BUG_ON(ac->ac_2order <= 0); 22314b68f6dfSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2232c9de560dSAlex Tomas if (grp->bb_counters[i] == 0) 2233c9de560dSAlex Tomas continue; 2234c9de560dSAlex Tomas 2235c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, i, &max); 223619b8b035STheodore Ts'o if (WARN_RATELIMIT(buddy == NULL, 223719b8b035STheodore Ts'o "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 223819b8b035STheodore Ts'o continue; 2239c9de560dSAlex Tomas 2240ffad0a44SAneesh Kumar K.V k = mb_find_next_zero_bit(buddy, max, 0); 2241eb576086SDmitry Monakhov if (k >= max) { 2242eb576086SDmitry Monakhov ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2243eb576086SDmitry Monakhov "%d free clusters of order %d. But found 0", 2244eb576086SDmitry Monakhov grp->bb_counters[i], i); 2245eb576086SDmitry Monakhov ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2246eb576086SDmitry Monakhov e4b->bd_group, 2247eb576086SDmitry Monakhov EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2248eb576086SDmitry Monakhov break; 2249eb576086SDmitry Monakhov } 2250c9de560dSAlex Tomas ac->ac_found++; 2251c9de560dSAlex Tomas 2252c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 1 << i; 2253c9de560dSAlex Tomas ac->ac_b_ex.fe_start = k << i; 2254c9de560dSAlex Tomas ac->ac_b_ex.fe_group = e4b->bd_group; 2255c9de560dSAlex Tomas 2256c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2257c9de560dSAlex Tomas 225853f86b17SRitesh Harjani BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2259c9de560dSAlex Tomas 2260c9de560dSAlex Tomas if (EXT4_SB(sb)->s_mb_stats) 2261c9de560dSAlex Tomas atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2262c9de560dSAlex Tomas 2263c9de560dSAlex Tomas break; 2264c9de560dSAlex Tomas } 2265c9de560dSAlex Tomas } 2266c9de560dSAlex Tomas 2267c9de560dSAlex Tomas /* 2268c9de560dSAlex Tomas * The routine scans the group and measures all found extents. 2269c9de560dSAlex Tomas * In order to optimize scanning, caller must pass number of 2270c9de560dSAlex Tomas * free blocks in the group, so the routine can know upper limit. 2271c9de560dSAlex Tomas */ 2272089ceeccSEric Sandeen static noinline_for_stack 2273089ceeccSEric Sandeen void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2274c9de560dSAlex Tomas struct ext4_buddy *e4b) 2275c9de560dSAlex Tomas { 2276c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2277c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2278c9de560dSAlex Tomas struct ext4_free_extent ex; 2279c9de560dSAlex Tomas int i; 2280c9de560dSAlex Tomas int free; 2281c9de560dSAlex Tomas 2282c9de560dSAlex Tomas free = e4b->bd_info->bb_free; 2283907ea529STheodore Ts'o if (WARN_ON(free <= 0)) 2284907ea529STheodore Ts'o return; 2285c9de560dSAlex Tomas 2286c9de560dSAlex Tomas i = e4b->bd_info->bb_first_free; 2287c9de560dSAlex Tomas 2288c9de560dSAlex Tomas while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2289ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, 22907137d7a4STheodore Ts'o EXT4_CLUSTERS_PER_GROUP(sb), i); 22917137d7a4STheodore Ts'o if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 229226346ff6SAneesh Kumar K.V /* 2293e56eb659SAneesh Kumar K.V * IF we have corrupt bitmap, we won't find any 229426346ff6SAneesh Kumar K.V * free blocks even though group info says we 2295b483bb77SRandy Dunlap * have free blocks 229626346ff6SAneesh Kumar K.V */ 2297e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 229853accfa9STheodore Ts'o "%d free clusters as per " 2299fde4d95aSTheodore Ts'o "group info. But bitmap says 0", 230026346ff6SAneesh Kumar K.V free); 2301736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2302736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2303c9de560dSAlex Tomas break; 2304c9de560dSAlex Tomas } 2305c9de560dSAlex Tomas 230615c006a2SRobin Dong mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2307907ea529STheodore Ts'o if (WARN_ON(ex.fe_len <= 0)) 2308907ea529STheodore Ts'o break; 230926346ff6SAneesh Kumar K.V if (free < ex.fe_len) { 2310e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 231153accfa9STheodore Ts'o "%d free clusters as per " 2312fde4d95aSTheodore Ts'o "group info. But got %d blocks", 231326346ff6SAneesh Kumar K.V free, ex.fe_len); 2314736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2315736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2316e56eb659SAneesh Kumar K.V /* 2317e56eb659SAneesh Kumar K.V * The number of free blocks differs. This mostly 2318e56eb659SAneesh Kumar K.V * indicate that the bitmap is corrupt. So exit 2319e56eb659SAneesh Kumar K.V * without claiming the space. 2320e56eb659SAneesh Kumar K.V */ 2321e56eb659SAneesh Kumar K.V break; 232226346ff6SAneesh Kumar K.V } 2323ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADC0DE; /* debug value */ 2324c9de560dSAlex Tomas ext4_mb_measure_extent(ac, &ex, e4b); 2325c9de560dSAlex Tomas 2326c9de560dSAlex Tomas i += ex.fe_len; 2327c9de560dSAlex Tomas free -= ex.fe_len; 2328c9de560dSAlex Tomas } 2329c9de560dSAlex Tomas 2330c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 1); 2331c9de560dSAlex Tomas } 2332c9de560dSAlex Tomas 2333c9de560dSAlex Tomas /* 2334c9de560dSAlex Tomas * This is a special case for storages like raid5 2335506bf2d8SEric Sandeen * we try to find stripe-aligned chunks for stripe-size-multiple requests 2336c9de560dSAlex Tomas */ 2337089ceeccSEric Sandeen static noinline_for_stack 2338089ceeccSEric Sandeen void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2339c9de560dSAlex Tomas struct ext4_buddy *e4b) 2340c9de560dSAlex Tomas { 2341c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2342c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2343c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2344c9de560dSAlex Tomas struct ext4_free_extent ex; 2345c9de560dSAlex Tomas ext4_fsblk_t first_group_block; 2346c9de560dSAlex Tomas ext4_fsblk_t a; 2347c9de560dSAlex Tomas ext4_grpblk_t i; 2348c9de560dSAlex Tomas int max; 2349c9de560dSAlex Tomas 2350c9de560dSAlex Tomas BUG_ON(sbi->s_stripe == 0); 2351c9de560dSAlex Tomas 2352c9de560dSAlex Tomas /* find first stripe-aligned block in group */ 23535661bd68SAkinobu Mita first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 23545661bd68SAkinobu Mita 2355c9de560dSAlex Tomas a = first_group_block + sbi->s_stripe - 1; 2356c9de560dSAlex Tomas do_div(a, sbi->s_stripe); 2357c9de560dSAlex Tomas i = (a * sbi->s_stripe) - first_group_block; 2358c9de560dSAlex Tomas 23597137d7a4STheodore Ts'o while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2360c9de560dSAlex Tomas if (!mb_test_bit(i, bitmap)) { 236115c006a2SRobin Dong max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2362c9de560dSAlex Tomas if (max >= sbi->s_stripe) { 2363c9de560dSAlex Tomas ac->ac_found++; 2364ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADF00D; /* debug value */ 2365c9de560dSAlex Tomas ac->ac_b_ex = ex; 2366c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2367c9de560dSAlex Tomas break; 2368c9de560dSAlex Tomas } 2369c9de560dSAlex Tomas } 2370c9de560dSAlex Tomas i += sbi->s_stripe; 2371c9de560dSAlex Tomas } 2372c9de560dSAlex Tomas } 2373c9de560dSAlex Tomas 237442ac1848SLukas Czerner /* 23758ef123feSRitesh Harjani * This is also called BEFORE we load the buddy bitmap. 237642ac1848SLukas Czerner * Returns either 1 or 0 indicating that the group is either suitable 23778ef123feSRitesh Harjani * for the allocation or not. 237842ac1848SLukas Czerner */ 23798ef123feSRitesh Harjani static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2380c9de560dSAlex Tomas ext4_group_t group, int cr) 2381c9de560dSAlex Tomas { 23828ef123feSRitesh Harjani ext4_grpblk_t free, fragments; 2383a4912123STheodore Ts'o int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2384c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2385c9de560dSAlex Tomas 2386c9de560dSAlex Tomas BUG_ON(cr < 0 || cr >= 4); 23878a57d9d6SCurt Wohlgemuth 2388dddcd2f9Sbrookxu if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 23898ef123feSRitesh Harjani return false; 239001fc48e8STheodore Ts'o 2391dddcd2f9Sbrookxu free = grp->bb_free; 2392dddcd2f9Sbrookxu if (free == 0) 23938ef123feSRitesh Harjani return false; 2394c9de560dSAlex Tomas 2395c9de560dSAlex Tomas fragments = grp->bb_fragments; 2396c9de560dSAlex Tomas if (fragments == 0) 23978ef123feSRitesh Harjani return false; 2398c9de560dSAlex Tomas 2399c9de560dSAlex Tomas switch (cr) { 2400c9de560dSAlex Tomas case 0: 2401c9de560dSAlex Tomas BUG_ON(ac->ac_2order == 0); 2402c9de560dSAlex Tomas 2403a4912123STheodore Ts'o /* Avoid using the first bg of a flexgroup for data files */ 2404a4912123STheodore Ts'o if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2405a4912123STheodore Ts'o (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2406a4912123STheodore Ts'o ((group % flex_size) == 0)) 24078ef123feSRitesh Harjani return false; 2408a4912123STheodore Ts'o 2409dddcd2f9Sbrookxu if (free < ac->ac_g_ex.fe_len) 2410dddcd2f9Sbrookxu return false; 2411dddcd2f9Sbrookxu 24124b68f6dfSHarshad Shirwadkar if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 24138ef123feSRitesh Harjani return true; 241440ae3487STheodore Ts'o 241540ae3487STheodore Ts'o if (grp->bb_largest_free_order < ac->ac_2order) 24168ef123feSRitesh Harjani return false; 241740ae3487STheodore Ts'o 24188ef123feSRitesh Harjani return true; 2419c9de560dSAlex Tomas case 1: 2420c9de560dSAlex Tomas if ((free / fragments) >= ac->ac_g_ex.fe_len) 24218ef123feSRitesh Harjani return true; 2422c9de560dSAlex Tomas break; 2423c9de560dSAlex Tomas case 2: 2424c9de560dSAlex Tomas if (free >= ac->ac_g_ex.fe_len) 24258ef123feSRitesh Harjani return true; 2426c9de560dSAlex Tomas break; 2427c9de560dSAlex Tomas case 3: 24288ef123feSRitesh Harjani return true; 2429c9de560dSAlex Tomas default: 2430c9de560dSAlex Tomas BUG(); 2431c9de560dSAlex Tomas } 2432c9de560dSAlex Tomas 24338ef123feSRitesh Harjani return false; 24348ef123feSRitesh Harjani } 24358ef123feSRitesh Harjani 24368ef123feSRitesh Harjani /* 24378ef123feSRitesh Harjani * This could return negative error code if something goes wrong 24388ef123feSRitesh Harjani * during ext4_mb_init_group(). This should not be called with 24398ef123feSRitesh Harjani * ext4_lock_group() held. 2440a5fda113STheodore Ts'o * 2441a5fda113STheodore Ts'o * Note: because we are conditionally operating with the group lock in 2442a5fda113STheodore Ts'o * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2443a5fda113STheodore Ts'o * function using __acquire and __release. This means we need to be 2444a5fda113STheodore Ts'o * super careful before messing with the error path handling via "goto 2445a5fda113STheodore Ts'o * out"! 24468ef123feSRitesh Harjani */ 24478ef123feSRitesh Harjani static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 24488ef123feSRitesh Harjani ext4_group_t group, int cr) 24498ef123feSRitesh Harjani { 24508ef123feSRitesh Harjani struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 245199377830SRitesh Harjani struct super_block *sb = ac->ac_sb; 2452c1d2c7d4SAlex Zhuravlev struct ext4_sb_info *sbi = EXT4_SB(sb); 245399377830SRitesh Harjani bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 24548ef123feSRitesh Harjani ext4_grpblk_t free; 24558ef123feSRitesh Harjani int ret = 0; 24568ef123feSRitesh Harjani 2457a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats) 2458a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2459a5fda113STheodore Ts'o if (should_lock) { 246099377830SRitesh Harjani ext4_lock_group(sb, group); 2461a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2462a5fda113STheodore Ts'o } 24638ef123feSRitesh Harjani free = grp->bb_free; 24648ef123feSRitesh Harjani if (free == 0) 24658ef123feSRitesh Harjani goto out; 24668ef123feSRitesh Harjani if (cr <= 2 && free < ac->ac_g_ex.fe_len) 24678ef123feSRitesh Harjani goto out; 24688ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 24698ef123feSRitesh Harjani goto out; 2470a5fda113STheodore Ts'o if (should_lock) { 2471a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 247299377830SRitesh Harjani ext4_unlock_group(sb, group); 2473a5fda113STheodore Ts'o } 24748ef123feSRitesh Harjani 24758ef123feSRitesh Harjani /* We only do this if the grp has never been initialized */ 24768ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2477c1d2c7d4SAlex Zhuravlev struct ext4_group_desc *gdp = 2478c1d2c7d4SAlex Zhuravlev ext4_get_group_desc(sb, group, NULL); 2479c1d2c7d4SAlex Zhuravlev int ret; 2480c1d2c7d4SAlex Zhuravlev 2481c1d2c7d4SAlex Zhuravlev /* cr=0/1 is a very optimistic search to find large 2482c1d2c7d4SAlex Zhuravlev * good chunks almost for free. If buddy data is not 2483c1d2c7d4SAlex Zhuravlev * ready, then this optimization makes no sense. But 2484c1d2c7d4SAlex Zhuravlev * we never skip the first block group in a flex_bg, 2485c1d2c7d4SAlex Zhuravlev * since this gets used for metadata block allocation, 2486c1d2c7d4SAlex Zhuravlev * and we want to make sure we locate metadata blocks 2487c1d2c7d4SAlex Zhuravlev * in the first block group in the flex_bg if possible. 2488c1d2c7d4SAlex Zhuravlev */ 2489c1d2c7d4SAlex Zhuravlev if (cr < 2 && 2490c1d2c7d4SAlex Zhuravlev (!sbi->s_log_groups_per_flex || 2491c1d2c7d4SAlex Zhuravlev ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2492c1d2c7d4SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2493c1d2c7d4SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2494c1d2c7d4SAlex Zhuravlev return 0; 2495c1d2c7d4SAlex Zhuravlev ret = ext4_mb_init_group(sb, group, GFP_NOFS); 24968ef123feSRitesh Harjani if (ret) 24978ef123feSRitesh Harjani return ret; 24988ef123feSRitesh Harjani } 24998ef123feSRitesh Harjani 2500a5fda113STheodore Ts'o if (should_lock) { 250199377830SRitesh Harjani ext4_lock_group(sb, group); 2502a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2503a5fda113STheodore Ts'o } 25048ef123feSRitesh Harjani ret = ext4_mb_good_group(ac, group, cr); 25058ef123feSRitesh Harjani out: 2506a5fda113STheodore Ts'o if (should_lock) { 2507a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 250899377830SRitesh Harjani ext4_unlock_group(sb, group); 2509a5fda113STheodore Ts'o } 25108ef123feSRitesh Harjani return ret; 2511c9de560dSAlex Tomas } 2512c9de560dSAlex Tomas 2513cfd73237SAlex Zhuravlev /* 2514cfd73237SAlex Zhuravlev * Start prefetching @nr block bitmaps starting at @group. 2515cfd73237SAlex Zhuravlev * Return the next group which needs to be prefetched. 2516cfd73237SAlex Zhuravlev */ 25173d392b26STheodore Ts'o ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2518cfd73237SAlex Zhuravlev unsigned int nr, int *cnt) 2519cfd73237SAlex Zhuravlev { 2520cfd73237SAlex Zhuravlev ext4_group_t ngroups = ext4_get_groups_count(sb); 2521cfd73237SAlex Zhuravlev struct buffer_head *bh; 2522cfd73237SAlex Zhuravlev struct blk_plug plug; 2523cfd73237SAlex Zhuravlev 2524cfd73237SAlex Zhuravlev blk_start_plug(&plug); 2525cfd73237SAlex Zhuravlev while (nr-- > 0) { 2526cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2527cfd73237SAlex Zhuravlev NULL); 2528cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2529cfd73237SAlex Zhuravlev 2530cfd73237SAlex Zhuravlev /* 2531cfd73237SAlex Zhuravlev * Prefetch block groups with free blocks; but don't 2532cfd73237SAlex Zhuravlev * bother if it is marked uninitialized on disk, since 2533cfd73237SAlex Zhuravlev * it won't require I/O to read. Also only try to 2534cfd73237SAlex Zhuravlev * prefetch once, so we avoid getblk() call, which can 2535cfd73237SAlex Zhuravlev * be expensive. 2536cfd73237SAlex Zhuravlev */ 2537cfd73237SAlex Zhuravlev if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2538cfd73237SAlex Zhuravlev EXT4_MB_GRP_NEED_INIT(grp) && 2539cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2540cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2541cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2542cfd73237SAlex Zhuravlev bh = ext4_read_block_bitmap_nowait(sb, group, true); 2543cfd73237SAlex Zhuravlev if (bh && !IS_ERR(bh)) { 2544cfd73237SAlex Zhuravlev if (!buffer_uptodate(bh) && cnt) 2545cfd73237SAlex Zhuravlev (*cnt)++; 2546cfd73237SAlex Zhuravlev brelse(bh); 2547cfd73237SAlex Zhuravlev } 2548cfd73237SAlex Zhuravlev } 2549cfd73237SAlex Zhuravlev if (++group >= ngroups) 2550cfd73237SAlex Zhuravlev group = 0; 2551cfd73237SAlex Zhuravlev } 2552cfd73237SAlex Zhuravlev blk_finish_plug(&plug); 2553cfd73237SAlex Zhuravlev return group; 2554cfd73237SAlex Zhuravlev } 2555cfd73237SAlex Zhuravlev 2556cfd73237SAlex Zhuravlev /* 2557cfd73237SAlex Zhuravlev * Prefetching reads the block bitmap into the buffer cache; but we 2558cfd73237SAlex Zhuravlev * need to make sure that the buddy bitmap in the page cache has been 2559cfd73237SAlex Zhuravlev * initialized. Note that ext4_mb_init_group() will block if the I/O 2560cfd73237SAlex Zhuravlev * is not yet completed, or indeed if it was not initiated by 2561cfd73237SAlex Zhuravlev * ext4_mb_prefetch did not start the I/O. 2562cfd73237SAlex Zhuravlev * 2563cfd73237SAlex Zhuravlev * TODO: We should actually kick off the buddy bitmap setup in a work 2564cfd73237SAlex Zhuravlev * queue when the buffer I/O is completed, so that we don't block 2565cfd73237SAlex Zhuravlev * waiting for the block allocation bitmap read to finish when 2566cfd73237SAlex Zhuravlev * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2567cfd73237SAlex Zhuravlev */ 25683d392b26STheodore Ts'o void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2569cfd73237SAlex Zhuravlev unsigned int nr) 2570cfd73237SAlex Zhuravlev { 257122fab984SKemeng Shi struct ext4_group_desc *gdp; 257222fab984SKemeng Shi struct ext4_group_info *grp; 2573cfd73237SAlex Zhuravlev 257422fab984SKemeng Shi while (nr-- > 0) { 2575cfd73237SAlex Zhuravlev if (!group) 2576cfd73237SAlex Zhuravlev group = ext4_get_groups_count(sb); 2577cfd73237SAlex Zhuravlev group--; 257822fab984SKemeng Shi gdp = ext4_get_group_desc(sb, group, NULL); 2579cfd73237SAlex Zhuravlev grp = ext4_get_group_info(sb, group); 2580cfd73237SAlex Zhuravlev 2581cfd73237SAlex Zhuravlev if (EXT4_MB_GRP_NEED_INIT(grp) && 2582cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2583cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2584cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2585cfd73237SAlex Zhuravlev if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2586cfd73237SAlex Zhuravlev break; 2587cfd73237SAlex Zhuravlev } 2588cfd73237SAlex Zhuravlev } 2589cfd73237SAlex Zhuravlev } 2590cfd73237SAlex Zhuravlev 25914ddfef7bSEric Sandeen static noinline_for_stack int 25924ddfef7bSEric Sandeen ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2593c9de560dSAlex Tomas { 2594cfd73237SAlex Zhuravlev ext4_group_t prefetch_grp = 0, ngroups, group, i; 25954fca50d4SJan Kara int cr = -1, new_cr; 259642ac1848SLukas Czerner int err = 0, first_err = 0; 2597cfd73237SAlex Zhuravlev unsigned int nr = 0, prefetch_ios = 0; 2598c9de560dSAlex Tomas struct ext4_sb_info *sbi; 2599c9de560dSAlex Tomas struct super_block *sb; 2600c9de560dSAlex Tomas struct ext4_buddy e4b; 260166d5e027Sbrookxu int lost; 2602c9de560dSAlex Tomas 2603c9de560dSAlex Tomas sb = ac->ac_sb; 2604c9de560dSAlex Tomas sbi = EXT4_SB(sb); 26058df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 2606fb0a387dSEric Sandeen /* non-extent files are limited to low blocks/groups */ 260712e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2608fb0a387dSEric Sandeen ngroups = sbi->s_blockfile_groups; 2609fb0a387dSEric Sandeen 2610c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2611c9de560dSAlex Tomas 2612c9de560dSAlex Tomas /* first, try the goal */ 2613c9de560dSAlex Tomas err = ext4_mb_find_by_goal(ac, &e4b); 2614c9de560dSAlex Tomas if (err || ac->ac_status == AC_STATUS_FOUND) 2615c9de560dSAlex Tomas goto out; 2616c9de560dSAlex Tomas 2617c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2618c9de560dSAlex Tomas goto out; 2619c9de560dSAlex Tomas 2620c9de560dSAlex Tomas /* 2621e9a3cd48Sbrookxu * ac->ac_2order is set only if the fe_len is a power of 2 2622e9a3cd48Sbrookxu * if ac->ac_2order is set we also set criteria to 0 so that we 2623c9de560dSAlex Tomas * try exact allocation using buddy. 2624c9de560dSAlex Tomas */ 2625c9de560dSAlex Tomas i = fls(ac->ac_g_ex.fe_len); 2626c9de560dSAlex Tomas ac->ac_2order = 0; 2627c9de560dSAlex Tomas /* 2628c9de560dSAlex Tomas * We search using buddy data only if the order of the request 2629c9de560dSAlex Tomas * is greater than equal to the sbi_s_mb_order2_reqs 2630b713a5ecSTheodore Ts'o * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2631d9b22cf9SJan Kara * We also support searching for power-of-two requests only for 2632d9b22cf9SJan Kara * requests upto maximum buddy size we have constructed. 2633c9de560dSAlex Tomas */ 26344b68f6dfSHarshad Shirwadkar if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2635c9de560dSAlex Tomas /* 2636c9de560dSAlex Tomas * This should tell if fe_len is exactly power of 2 2637c9de560dSAlex Tomas */ 2638c9de560dSAlex Tomas if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 26391a5d5e5dSJeremy Cline ac->ac_2order = array_index_nospec(i - 1, 26404b68f6dfSHarshad Shirwadkar MB_NUM_ORDERS(sb)); 2641c9de560dSAlex Tomas } 2642c9de560dSAlex Tomas 26434ba74d00STheodore Ts'o /* if stream allocation is enabled, use global goal */ 26444ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2645c9de560dSAlex Tomas /* TBD: may be hot point */ 2646c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2647c9de560dSAlex Tomas ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2648c9de560dSAlex Tomas ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2649c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2650c9de560dSAlex Tomas } 26514ba74d00STheodore Ts'o 2652c9de560dSAlex Tomas /* Let's just scan groups to find more-less suitable blocks */ 2653c9de560dSAlex Tomas cr = ac->ac_2order ? 0 : 1; 2654c9de560dSAlex Tomas /* 2655c9de560dSAlex Tomas * cr == 0 try to get exact allocation, 2656c9de560dSAlex Tomas * cr == 3 try to get anything 2657c9de560dSAlex Tomas */ 2658c9de560dSAlex Tomas repeat: 2659c9de560dSAlex Tomas for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2660c9de560dSAlex Tomas ac->ac_criteria = cr; 2661ed8f9c75SAneesh Kumar K.V /* 2662ed8f9c75SAneesh Kumar K.V * searching for the right group start 2663ed8f9c75SAneesh Kumar K.V * from the goal value specified 2664ed8f9c75SAneesh Kumar K.V */ 2665ed8f9c75SAneesh Kumar K.V group = ac->ac_g_ex.fe_group; 2666196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2667cfd73237SAlex Zhuravlev prefetch_grp = group; 2668ed8f9c75SAneesh Kumar K.V 26694fca50d4SJan Kara for (i = 0, new_cr = cr; i < ngroups; i++, 26704fca50d4SJan Kara ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 26714fca50d4SJan Kara int ret = 0; 2672196e402aSHarshad Shirwadkar 26732ed5724dSTheodore Ts'o cond_resched(); 2674196e402aSHarshad Shirwadkar if (new_cr != cr) { 2675196e402aSHarshad Shirwadkar cr = new_cr; 2676196e402aSHarshad Shirwadkar goto repeat; 2677196e402aSHarshad Shirwadkar } 2678c9de560dSAlex Tomas 2679cfd73237SAlex Zhuravlev /* 2680cfd73237SAlex Zhuravlev * Batch reads of the block allocation bitmaps 2681cfd73237SAlex Zhuravlev * to get multiple READs in flight; limit 2682cfd73237SAlex Zhuravlev * prefetching at cr=0/1, otherwise mballoc can 2683cfd73237SAlex Zhuravlev * spend a lot of time loading imperfect groups 2684cfd73237SAlex Zhuravlev */ 2685cfd73237SAlex Zhuravlev if ((prefetch_grp == group) && 2686cfd73237SAlex Zhuravlev (cr > 1 || 2687cfd73237SAlex Zhuravlev prefetch_ios < sbi->s_mb_prefetch_limit)) { 2688cfd73237SAlex Zhuravlev unsigned int curr_ios = prefetch_ios; 2689cfd73237SAlex Zhuravlev 2690cfd73237SAlex Zhuravlev nr = sbi->s_mb_prefetch; 2691cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 269282ef1370SChunguang Xu nr = 1 << sbi->s_log_groups_per_flex; 269382ef1370SChunguang Xu nr -= group & (nr - 1); 269482ef1370SChunguang Xu nr = min(nr, sbi->s_mb_prefetch); 2695cfd73237SAlex Zhuravlev } 2696cfd73237SAlex Zhuravlev prefetch_grp = ext4_mb_prefetch(sb, group, 2697cfd73237SAlex Zhuravlev nr, &prefetch_ios); 2698cfd73237SAlex Zhuravlev if (prefetch_ios == curr_ios) 2699cfd73237SAlex Zhuravlev nr = 0; 2700cfd73237SAlex Zhuravlev } 2701cfd73237SAlex Zhuravlev 27028a57d9d6SCurt Wohlgemuth /* This now checks without needing the buddy page */ 27038ef123feSRitesh Harjani ret = ext4_mb_good_group_nolock(ac, group, cr); 270442ac1848SLukas Czerner if (ret <= 0) { 270542ac1848SLukas Czerner if (!first_err) 270642ac1848SLukas Czerner first_err = ret; 2707c9de560dSAlex Tomas continue; 270842ac1848SLukas Czerner } 2709c9de560dSAlex Tomas 2710c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2711c9de560dSAlex Tomas if (err) 2712c9de560dSAlex Tomas goto out; 2713c9de560dSAlex Tomas 2714c9de560dSAlex Tomas ext4_lock_group(sb, group); 27158a57d9d6SCurt Wohlgemuth 27168a57d9d6SCurt Wohlgemuth /* 27178a57d9d6SCurt Wohlgemuth * We need to check again after locking the 27188a57d9d6SCurt Wohlgemuth * block group 27198a57d9d6SCurt Wohlgemuth */ 272042ac1848SLukas Czerner ret = ext4_mb_good_group(ac, group, cr); 27218ef123feSRitesh Harjani if (ret == 0) { 2722c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2723e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2724c9de560dSAlex Tomas continue; 2725c9de560dSAlex Tomas } 2726c9de560dSAlex Tomas 2727c9de560dSAlex Tomas ac->ac_groups_scanned++; 2728d9b22cf9SJan Kara if (cr == 0) 2729c9de560dSAlex Tomas ext4_mb_simple_scan_group(ac, &e4b); 2730506bf2d8SEric Sandeen else if (cr == 1 && sbi->s_stripe && 2731506bf2d8SEric Sandeen !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2732c9de560dSAlex Tomas ext4_mb_scan_aligned(ac, &e4b); 2733c9de560dSAlex Tomas else 2734c9de560dSAlex Tomas ext4_mb_complex_scan_group(ac, &e4b); 2735c9de560dSAlex Tomas 2736c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2737e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2738c9de560dSAlex Tomas 2739c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_CONTINUE) 2740c9de560dSAlex Tomas break; 2741c9de560dSAlex Tomas } 2742a6c75eafSHarshad Shirwadkar /* Processed all groups and haven't found blocks */ 2743a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && i == ngroups) 2744a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2745c9de560dSAlex Tomas } 2746c9de560dSAlex Tomas 2747c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2748c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2749c9de560dSAlex Tomas /* 2750c9de560dSAlex Tomas * We've been searching too long. Let's try to allocate 2751c9de560dSAlex Tomas * the best chunk we've found so far 2752c9de560dSAlex Tomas */ 2753c9de560dSAlex Tomas ext4_mb_try_best_found(ac, &e4b); 2754c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_FOUND) { 2755c9de560dSAlex Tomas /* 2756c9de560dSAlex Tomas * Someone more lucky has already allocated it. 2757c9de560dSAlex Tomas * The only thing we can do is just take first 2758c9de560dSAlex Tomas * found block(s) 2759c9de560dSAlex Tomas */ 276066d5e027Sbrookxu lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 276166d5e027Sbrookxu mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2762c55ee7d2Sbrookxu ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2763c55ee7d2Sbrookxu ac->ac_b_ex.fe_len, lost); 2764c55ee7d2Sbrookxu 2765c9de560dSAlex Tomas ac->ac_b_ex.fe_group = 0; 2766c9de560dSAlex Tomas ac->ac_b_ex.fe_start = 0; 2767c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 0; 2768c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 2769c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_FIRST; 2770c9de560dSAlex Tomas cr = 3; 2771c9de560dSAlex Tomas goto repeat; 2772c9de560dSAlex Tomas } 2773c9de560dSAlex Tomas } 2774a6c75eafSHarshad Shirwadkar 2775a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2776a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2777c9de560dSAlex Tomas out: 277842ac1848SLukas Czerner if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 277942ac1848SLukas Czerner err = first_err; 2780bbc4ec77SRitesh Harjani 2781d3df1453SRitesh Harjani mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2782bbc4ec77SRitesh Harjani ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2783bbc4ec77SRitesh Harjani ac->ac_flags, cr, err); 2784cfd73237SAlex Zhuravlev 2785cfd73237SAlex Zhuravlev if (nr) 2786cfd73237SAlex Zhuravlev ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2787cfd73237SAlex Zhuravlev 2788c9de560dSAlex Tomas return err; 2789c9de560dSAlex Tomas } 2790c9de560dSAlex Tomas 2791c9de560dSAlex Tomas static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2792c9de560dSAlex Tomas { 2793359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2794c9de560dSAlex Tomas ext4_group_t group; 2795c9de560dSAlex Tomas 27968df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2797c9de560dSAlex Tomas return NULL; 2798c9de560dSAlex Tomas group = *pos + 1; 2799a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2800c9de560dSAlex Tomas } 2801c9de560dSAlex Tomas 2802c9de560dSAlex Tomas static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2803c9de560dSAlex Tomas { 2804359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2805c9de560dSAlex Tomas ext4_group_t group; 2806c9de560dSAlex Tomas 2807c9de560dSAlex Tomas ++*pos; 28088df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2809c9de560dSAlex Tomas return NULL; 2810c9de560dSAlex Tomas group = *pos + 1; 2811a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2812c9de560dSAlex Tomas } 2813c9de560dSAlex Tomas 2814c9de560dSAlex Tomas static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2815c9de560dSAlex Tomas { 2816359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2817a9df9a49STheodore Ts'o ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2818c9de560dSAlex Tomas int i; 28191c8457caSAditya Kali int err, buddy_loaded = 0; 2820c9de560dSAlex Tomas struct ext4_buddy e4b; 28211c8457caSAditya Kali struct ext4_group_info *grinfo; 28222df2c340SArnd Bergmann unsigned char blocksize_bits = min_t(unsigned char, 28232df2c340SArnd Bergmann sb->s_blocksize_bits, 28242df2c340SArnd Bergmann EXT4_MAX_BLOCK_LOG_SIZE); 2825c9de560dSAlex Tomas struct sg { 2826c9de560dSAlex Tomas struct ext4_group_info info; 2827b80b32b6STheodore Ts'o ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2828c9de560dSAlex Tomas } sg; 2829c9de560dSAlex Tomas 2830c9de560dSAlex Tomas group--; 2831c9de560dSAlex Tomas if (group == 0) 283297b4af2fSRasmus Villemoes seq_puts(seq, "#group: free frags first [" 283397b4af2fSRasmus Villemoes " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2834802cf1f9SHuaitong Han " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2835c9de560dSAlex Tomas 2836b80b32b6STheodore Ts'o i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2837b80b32b6STheodore Ts'o sizeof(struct ext4_group_info); 2838b80b32b6STheodore Ts'o 28391c8457caSAditya Kali grinfo = ext4_get_group_info(sb, group); 28401c8457caSAditya Kali /* Load the group info in memory only if not already loaded. */ 28411c8457caSAditya Kali if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2842c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2843c9de560dSAlex Tomas if (err) { 2844a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: I/O error\n", group); 2845c9de560dSAlex Tomas return 0; 2846c9de560dSAlex Tomas } 28471c8457caSAditya Kali buddy_loaded = 1; 28481c8457caSAditya Kali } 28491c8457caSAditya Kali 2850b80b32b6STheodore Ts'o memcpy(&sg, ext4_get_group_info(sb, group), i); 28511c8457caSAditya Kali 28521c8457caSAditya Kali if (buddy_loaded) 2853e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2854c9de560dSAlex Tomas 2855a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2856c9de560dSAlex Tomas sg.info.bb_fragments, sg.info.bb_first_free); 2857c9de560dSAlex Tomas for (i = 0; i <= 13; i++) 28582df2c340SArnd Bergmann seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2859c9de560dSAlex Tomas sg.info.bb_counters[i] : 0); 2860e0d438c7SXu Wang seq_puts(seq, " ]\n"); 2861c9de560dSAlex Tomas 2862c9de560dSAlex Tomas return 0; 2863c9de560dSAlex Tomas } 2864c9de560dSAlex Tomas 2865c9de560dSAlex Tomas static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2866c9de560dSAlex Tomas { 2867c9de560dSAlex Tomas } 2868c9de560dSAlex Tomas 2869247dbed8SChristoph Hellwig const struct seq_operations ext4_mb_seq_groups_ops = { 2870c9de560dSAlex Tomas .start = ext4_mb_seq_groups_start, 2871c9de560dSAlex Tomas .next = ext4_mb_seq_groups_next, 2872c9de560dSAlex Tomas .stop = ext4_mb_seq_groups_stop, 2873c9de560dSAlex Tomas .show = ext4_mb_seq_groups_show, 2874c9de560dSAlex Tomas }; 2875c9de560dSAlex Tomas 2876a6c75eafSHarshad Shirwadkar int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2877a6c75eafSHarshad Shirwadkar { 2878c30365b9SYu Zhe struct super_block *sb = seq->private; 2879a6c75eafSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2880a6c75eafSHarshad Shirwadkar 2881a6c75eafSHarshad Shirwadkar seq_puts(seq, "mballoc:\n"); 2882a6c75eafSHarshad Shirwadkar if (!sbi->s_mb_stats) { 2883a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tmb stats collection turned off.\n"); 2884a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2885a6c75eafSHarshad Shirwadkar return 0; 2886a6c75eafSHarshad Shirwadkar } 2887a6c75eafSHarshad Shirwadkar seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2888a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2889a6c75eafSHarshad Shirwadkar 2890a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2891a6c75eafSHarshad Shirwadkar 2892a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr0_stats:\n"); 2893a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2894a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2895a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2896a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2897a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[0])); 2898196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2899196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2900a6c75eafSHarshad Shirwadkar 2901a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr1_stats:\n"); 2902a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2903a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2904a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2905a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2906a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[1])); 2907196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2908196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2909a6c75eafSHarshad Shirwadkar 2910a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr2_stats:\n"); 2911a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2912a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2913a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2914a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2915a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[2])); 2916a6c75eafSHarshad Shirwadkar 2917a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr3_stats:\n"); 2918a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2919a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2920a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2921a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2922a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[3])); 2923a6c75eafSHarshad Shirwadkar seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2924a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2925a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2926a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2927a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2928a6c75eafSHarshad Shirwadkar 2929a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2930a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 2931a6c75eafSHarshad Shirwadkar ext4_get_groups_count(sb)); 2932a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_time_used: %llu\n", 2933a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 2934a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tpreallocated: %u\n", 2935a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_preallocated)); 2936a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tdiscarded: %u\n", 2937a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_discarded)); 2938a6c75eafSHarshad Shirwadkar return 0; 2939a6c75eafSHarshad Shirwadkar } 2940a6c75eafSHarshad Shirwadkar 2941f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2942a5fda113STheodore Ts'o __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2943f68f4063SHarshad Shirwadkar { 2944359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2945f68f4063SHarshad Shirwadkar unsigned long position; 2946f68f4063SHarshad Shirwadkar 294783e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2948f68f4063SHarshad Shirwadkar return NULL; 2949f68f4063SHarshad Shirwadkar position = *pos + 1; 2950f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2951f68f4063SHarshad Shirwadkar } 2952f68f4063SHarshad Shirwadkar 2953f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 2954f68f4063SHarshad Shirwadkar { 2955359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2956f68f4063SHarshad Shirwadkar unsigned long position; 2957f68f4063SHarshad Shirwadkar 2958f68f4063SHarshad Shirwadkar ++*pos; 295983e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2960f68f4063SHarshad Shirwadkar return NULL; 2961f68f4063SHarshad Shirwadkar position = *pos + 1; 2962f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2963f68f4063SHarshad Shirwadkar } 2964f68f4063SHarshad Shirwadkar 2965f68f4063SHarshad Shirwadkar static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 2966f68f4063SHarshad Shirwadkar { 2967359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2968f68f4063SHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2969f68f4063SHarshad Shirwadkar unsigned long position = ((unsigned long) v); 2970f68f4063SHarshad Shirwadkar struct ext4_group_info *grp; 297183e80a6eSJan Kara unsigned int count; 2972f68f4063SHarshad Shirwadkar 2973f68f4063SHarshad Shirwadkar position--; 2974f68f4063SHarshad Shirwadkar if (position >= MB_NUM_ORDERS(sb)) { 297583e80a6eSJan Kara position -= MB_NUM_ORDERS(sb); 297683e80a6eSJan Kara if (position == 0) 297783e80a6eSJan Kara seq_puts(seq, "avg_fragment_size_lists:\n"); 2978f68f4063SHarshad Shirwadkar 297983e80a6eSJan Kara count = 0; 298083e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 298183e80a6eSJan Kara list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 298283e80a6eSJan Kara bb_avg_fragment_size_node) 298383e80a6eSJan Kara count++; 298483e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 298583e80a6eSJan Kara seq_printf(seq, "\tlist_order_%u_groups: %u\n", 298683e80a6eSJan Kara (unsigned int)position, count); 2987f68f4063SHarshad Shirwadkar return 0; 2988f68f4063SHarshad Shirwadkar } 2989f68f4063SHarshad Shirwadkar 2990f68f4063SHarshad Shirwadkar if (position == 0) { 2991f68f4063SHarshad Shirwadkar seq_printf(seq, "optimize_scan: %d\n", 2992f68f4063SHarshad Shirwadkar test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 2993f68f4063SHarshad Shirwadkar seq_puts(seq, "max_free_order_lists:\n"); 2994f68f4063SHarshad Shirwadkar } 2995f68f4063SHarshad Shirwadkar count = 0; 299683e80a6eSJan Kara read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 2997f68f4063SHarshad Shirwadkar list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 2998f68f4063SHarshad Shirwadkar bb_largest_free_order_node) 2999f68f4063SHarshad Shirwadkar count++; 300083e80a6eSJan Kara read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3001f68f4063SHarshad Shirwadkar seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3002f68f4063SHarshad Shirwadkar (unsigned int)position, count); 3003f68f4063SHarshad Shirwadkar 3004f68f4063SHarshad Shirwadkar return 0; 3005f68f4063SHarshad Shirwadkar } 3006f68f4063SHarshad Shirwadkar 3007f68f4063SHarshad Shirwadkar static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3008f68f4063SHarshad Shirwadkar { 3009f68f4063SHarshad Shirwadkar } 3010f68f4063SHarshad Shirwadkar 3011f68f4063SHarshad Shirwadkar const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3012f68f4063SHarshad Shirwadkar .start = ext4_mb_seq_structs_summary_start, 3013f68f4063SHarshad Shirwadkar .next = ext4_mb_seq_structs_summary_next, 3014f68f4063SHarshad Shirwadkar .stop = ext4_mb_seq_structs_summary_stop, 3015f68f4063SHarshad Shirwadkar .show = ext4_mb_seq_structs_summary_show, 3016f68f4063SHarshad Shirwadkar }; 3017f68f4063SHarshad Shirwadkar 3018fb1813f4SCurt Wohlgemuth static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3019fb1813f4SCurt Wohlgemuth { 3020fb1813f4SCurt Wohlgemuth int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3021fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3022fb1813f4SCurt Wohlgemuth 3023fb1813f4SCurt Wohlgemuth BUG_ON(!cachep); 3024fb1813f4SCurt Wohlgemuth return cachep; 3025fb1813f4SCurt Wohlgemuth } 30265f21b0e6SFrederic Bohe 302728623c2fSTheodore Ts'o /* 302828623c2fSTheodore Ts'o * Allocate the top-level s_group_info array for the specified number 302928623c2fSTheodore Ts'o * of groups 303028623c2fSTheodore Ts'o */ 303128623c2fSTheodore Ts'o int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 303228623c2fSTheodore Ts'o { 303328623c2fSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 303428623c2fSTheodore Ts'o unsigned size; 3035df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 303628623c2fSTheodore Ts'o 303728623c2fSTheodore Ts'o size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 303828623c2fSTheodore Ts'o EXT4_DESC_PER_BLOCK_BITS(sb); 303928623c2fSTheodore Ts'o if (size <= sbi->s_group_info_size) 304028623c2fSTheodore Ts'o return 0; 304128623c2fSTheodore Ts'o 304228623c2fSTheodore Ts'o size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3043a7c3e901SMichal Hocko new_groupinfo = kvzalloc(size, GFP_KERNEL); 304428623c2fSTheodore Ts'o if (!new_groupinfo) { 304528623c2fSTheodore Ts'o ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 304628623c2fSTheodore Ts'o return -ENOMEM; 304728623c2fSTheodore Ts'o } 3048df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3049df3da4eaSSuraj Jitindar Singh old_groupinfo = rcu_dereference(sbi->s_group_info); 3050df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3051df3da4eaSSuraj Jitindar Singh memcpy(new_groupinfo, old_groupinfo, 305228623c2fSTheodore Ts'o sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3053df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3054df3da4eaSSuraj Jitindar Singh rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 305528623c2fSTheodore Ts'o sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3056df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3057df3da4eaSSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groupinfo); 305828623c2fSTheodore Ts'o ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 305928623c2fSTheodore Ts'o sbi->s_group_info_size); 306028623c2fSTheodore Ts'o return 0; 306128623c2fSTheodore Ts'o } 306228623c2fSTheodore Ts'o 30635f21b0e6SFrederic Bohe /* Create and initialize ext4_group_info data for the given group. */ 3064920313a7SAneesh Kumar K.V int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 30655f21b0e6SFrederic Bohe struct ext4_group_desc *desc) 30665f21b0e6SFrederic Bohe { 3067fb1813f4SCurt Wohlgemuth int i; 30685f21b0e6SFrederic Bohe int metalen = 0; 3069df3da4eaSSuraj Jitindar Singh int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 30705f21b0e6SFrederic Bohe struct ext4_sb_info *sbi = EXT4_SB(sb); 30715f21b0e6SFrederic Bohe struct ext4_group_info **meta_group_info; 3072fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 30735f21b0e6SFrederic Bohe 30745f21b0e6SFrederic Bohe /* 30755f21b0e6SFrederic Bohe * First check if this group is the first of a reserved block. 30765f21b0e6SFrederic Bohe * If it's true, we have to allocate a new table of pointers 30775f21b0e6SFrederic Bohe * to ext4_group_info structures 30785f21b0e6SFrederic Bohe */ 30795f21b0e6SFrederic Bohe if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 30805f21b0e6SFrederic Bohe metalen = sizeof(*meta_group_info) << 30815f21b0e6SFrederic Bohe EXT4_DESC_PER_BLOCK_BITS(sb); 30824fdb5543SDmitry Monakhov meta_group_info = kmalloc(metalen, GFP_NOFS); 30835f21b0e6SFrederic Bohe if (meta_group_info == NULL) { 30847f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate mem " 30859d8b9ec4STheodore Ts'o "for a buddy group"); 3086df119095SKemeng Shi return -ENOMEM; 30875f21b0e6SFrederic Bohe } 3088df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3089df3da4eaSSuraj Jitindar Singh rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3090df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 30915f21b0e6SFrederic Bohe } 30925f21b0e6SFrederic Bohe 3093df3da4eaSSuraj Jitindar Singh meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 30945f21b0e6SFrederic Bohe i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 30955f21b0e6SFrederic Bohe 30964fdb5543SDmitry Monakhov meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 30975f21b0e6SFrederic Bohe if (meta_group_info[i] == NULL) { 30987f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 30995f21b0e6SFrederic Bohe goto exit_group_info; 31005f21b0e6SFrederic Bohe } 31015f21b0e6SFrederic Bohe set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 31025f21b0e6SFrederic Bohe &(meta_group_info[i]->bb_state)); 31035f21b0e6SFrederic Bohe 31045f21b0e6SFrederic Bohe /* 31055f21b0e6SFrederic Bohe * initialize bb_free to be able to skip 31065f21b0e6SFrederic Bohe * empty groups without initialization 31075f21b0e6SFrederic Bohe */ 31088844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 31098844618dSTheodore Ts'o (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 31105f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3111cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, group, desc); 31125f21b0e6SFrederic Bohe } else { 31135f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3114021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, desc); 31155f21b0e6SFrederic Bohe } 31165f21b0e6SFrederic Bohe 31175f21b0e6SFrederic Bohe INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3118920313a7SAneesh Kumar K.V init_rwsem(&meta_group_info[i]->alloc_sem); 311964e290ecSVenkatesh Pallipadi meta_group_info[i]->bb_free_root = RB_ROOT; 3120196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 312183e80a6eSJan Kara INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 31228a57d9d6SCurt Wohlgemuth meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 312383e80a6eSJan Kara meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3124196e402aSHarshad Shirwadkar meta_group_info[i]->bb_group = group; 31255f21b0e6SFrederic Bohe 3126a3450215SRitesh Harjani mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 31275f21b0e6SFrederic Bohe return 0; 31285f21b0e6SFrederic Bohe 31295f21b0e6SFrederic Bohe exit_group_info: 31305f21b0e6SFrederic Bohe /* If a meta_group_info table has been allocated, release it now */ 3131caaf7a29STao Ma if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3132df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3133df3da4eaSSuraj Jitindar Singh 3134df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3135df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3136df3da4eaSSuraj Jitindar Singh kfree(group_info[idx]); 3137df3da4eaSSuraj Jitindar Singh group_info[idx] = NULL; 3138df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3139caaf7a29STao Ma } 31405f21b0e6SFrederic Bohe return -ENOMEM; 31415f21b0e6SFrederic Bohe } /* ext4_mb_add_groupinfo */ 31425f21b0e6SFrederic Bohe 3143c9de560dSAlex Tomas static int ext4_mb_init_backend(struct super_block *sb) 3144c9de560dSAlex Tomas { 31458df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3146c9de560dSAlex Tomas ext4_group_t i; 3147c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 314828623c2fSTheodore Ts'o int err; 31495f21b0e6SFrederic Bohe struct ext4_group_desc *desc; 3150df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3151fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep; 3152c9de560dSAlex Tomas 315328623c2fSTheodore Ts'o err = ext4_mb_alloc_groupinfo(sb, ngroups); 315428623c2fSTheodore Ts'o if (err) 315528623c2fSTheodore Ts'o return err; 31565f21b0e6SFrederic Bohe 3157c9de560dSAlex Tomas sbi->s_buddy_cache = new_inode(sb); 3158c9de560dSAlex Tomas if (sbi->s_buddy_cache == NULL) { 31599d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't get new inode"); 3160c9de560dSAlex Tomas goto err_freesgi; 3161c9de560dSAlex Tomas } 316248e6061bSYu Jian /* To avoid potentially colliding with an valid on-disk inode number, 316348e6061bSYu Jian * use EXT4_BAD_INO for the buddy cache inode number. This inode is 316448e6061bSYu Jian * not in the inode hash, so it should never be found by iget(), but 316548e6061bSYu Jian * this will avoid confusion if it ever shows up during debugging. */ 316648e6061bSYu Jian sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3167c9de560dSAlex Tomas EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 31688df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 31694b99faa2SKhazhismel Kumykov cond_resched(); 3170c9de560dSAlex Tomas desc = ext4_get_group_desc(sb, i, NULL); 3171c9de560dSAlex Tomas if (desc == NULL) { 31729d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3173c9de560dSAlex Tomas goto err_freebuddy; 3174c9de560dSAlex Tomas } 31755f21b0e6SFrederic Bohe if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 31765f21b0e6SFrederic Bohe goto err_freebuddy; 3177c9de560dSAlex Tomas } 3178c9de560dSAlex Tomas 3179cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 3180f91436d5SSabyrzhan Tasbolatov /* a single flex group is supposed to be read by a single IO. 3181f91436d5SSabyrzhan Tasbolatov * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3182f91436d5SSabyrzhan Tasbolatov * unsigned integer, so the maximum shift is 32. 3183f91436d5SSabyrzhan Tasbolatov */ 3184f91436d5SSabyrzhan Tasbolatov if (sbi->s_es->s_log_groups_per_flex >= 32) { 3185f91436d5SSabyrzhan Tasbolatov ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3186a8867f4eSPhillip Potter goto err_freebuddy; 3187f91436d5SSabyrzhan Tasbolatov } 3188f91436d5SSabyrzhan Tasbolatov sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 318982ef1370SChunguang Xu BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3190cfd73237SAlex Zhuravlev sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3191cfd73237SAlex Zhuravlev } else { 3192cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = 32; 3193cfd73237SAlex Zhuravlev } 3194cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3195cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3196cfd73237SAlex Zhuravlev /* now many real IOs to prefetch within a single allocation at cr=0 3197cfd73237SAlex Zhuravlev * given cr=0 is an CPU-related optimization we shouldn't try to 3198cfd73237SAlex Zhuravlev * load too many groups, at some point we should start to use what 3199cfd73237SAlex Zhuravlev * we've got in memory. 3200cfd73237SAlex Zhuravlev * with an average random access time 5ms, it'd take a second to get 3201cfd73237SAlex Zhuravlev * 200 groups (* N with flex_bg), so let's make this limit 4 3202cfd73237SAlex Zhuravlev */ 3203cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3204cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3205cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3206cfd73237SAlex Zhuravlev 3207c9de560dSAlex Tomas return 0; 3208c9de560dSAlex Tomas 3209c9de560dSAlex Tomas err_freebuddy: 3210fb1813f4SCurt Wohlgemuth cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3211f1fa3342SRoel Kluin while (i-- > 0) 3212fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 321328623c2fSTheodore Ts'o i = sbi->s_group_info_size; 3214df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3215df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3216f1fa3342SRoel Kluin while (i-- > 0) 3217df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3218df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3219c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3220c9de560dSAlex Tomas err_freesgi: 3221df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3222df3da4eaSSuraj Jitindar Singh kvfree(rcu_dereference(sbi->s_group_info)); 3223df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3224c9de560dSAlex Tomas return -ENOMEM; 3225c9de560dSAlex Tomas } 3226c9de560dSAlex Tomas 32272892c15dSEric Sandeen static void ext4_groupinfo_destroy_slabs(void) 32282892c15dSEric Sandeen { 32292892c15dSEric Sandeen int i; 32302892c15dSEric Sandeen 32312892c15dSEric Sandeen for (i = 0; i < NR_GRPINFO_CACHES; i++) { 32322892c15dSEric Sandeen kmem_cache_destroy(ext4_groupinfo_caches[i]); 32332892c15dSEric Sandeen ext4_groupinfo_caches[i] = NULL; 32342892c15dSEric Sandeen } 32352892c15dSEric Sandeen } 32362892c15dSEric Sandeen 32372892c15dSEric Sandeen static int ext4_groupinfo_create_slab(size_t size) 32382892c15dSEric Sandeen { 32392892c15dSEric Sandeen static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 32402892c15dSEric Sandeen int slab_size; 32412892c15dSEric Sandeen int blocksize_bits = order_base_2(size); 32422892c15dSEric Sandeen int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 32432892c15dSEric Sandeen struct kmem_cache *cachep; 32442892c15dSEric Sandeen 32452892c15dSEric Sandeen if (cache_index >= NR_GRPINFO_CACHES) 32462892c15dSEric Sandeen return -EINVAL; 32472892c15dSEric Sandeen 32482892c15dSEric Sandeen if (unlikely(cache_index < 0)) 32492892c15dSEric Sandeen cache_index = 0; 32502892c15dSEric Sandeen 32512892c15dSEric Sandeen mutex_lock(&ext4_grpinfo_slab_create_mutex); 32522892c15dSEric Sandeen if (ext4_groupinfo_caches[cache_index]) { 32532892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 32542892c15dSEric Sandeen return 0; /* Already created */ 32552892c15dSEric Sandeen } 32562892c15dSEric Sandeen 32572892c15dSEric Sandeen slab_size = offsetof(struct ext4_group_info, 32582892c15dSEric Sandeen bb_counters[blocksize_bits + 2]); 32592892c15dSEric Sandeen 32602892c15dSEric Sandeen cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 32612892c15dSEric Sandeen slab_size, 0, SLAB_RECLAIM_ACCOUNT, 32622892c15dSEric Sandeen NULL); 32632892c15dSEric Sandeen 3264823ba01fSTao Ma ext4_groupinfo_caches[cache_index] = cachep; 3265823ba01fSTao Ma 32662892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 32672892c15dSEric Sandeen if (!cachep) { 32689d8b9ec4STheodore Ts'o printk(KERN_EMERG 32699d8b9ec4STheodore Ts'o "EXT4-fs: no memory for groupinfo slab cache\n"); 32702892c15dSEric Sandeen return -ENOMEM; 32712892c15dSEric Sandeen } 32722892c15dSEric Sandeen 32732892c15dSEric Sandeen return 0; 32742892c15dSEric Sandeen } 32752892c15dSEric Sandeen 327655cdd0afSWang Jianchao static void ext4_discard_work(struct work_struct *work) 327755cdd0afSWang Jianchao { 327855cdd0afSWang Jianchao struct ext4_sb_info *sbi = container_of(work, 327955cdd0afSWang Jianchao struct ext4_sb_info, s_discard_work); 328055cdd0afSWang Jianchao struct super_block *sb = sbi->s_sb; 328155cdd0afSWang Jianchao struct ext4_free_data *fd, *nfd; 328255cdd0afSWang Jianchao struct ext4_buddy e4b; 328355cdd0afSWang Jianchao struct list_head discard_list; 328455cdd0afSWang Jianchao ext4_group_t grp, load_grp; 328555cdd0afSWang Jianchao int err = 0; 328655cdd0afSWang Jianchao 328755cdd0afSWang Jianchao INIT_LIST_HEAD(&discard_list); 328855cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 328955cdd0afSWang Jianchao list_splice_init(&sbi->s_discard_list, &discard_list); 329055cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 329155cdd0afSWang Jianchao 329255cdd0afSWang Jianchao load_grp = UINT_MAX; 329355cdd0afSWang Jianchao list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 329455cdd0afSWang Jianchao /* 32955036ab8dSWang Jianchao * If filesystem is umounting or no memory or suffering 32965036ab8dSWang Jianchao * from no space, give up the discard 329755cdd0afSWang Jianchao */ 32985036ab8dSWang Jianchao if ((sb->s_flags & SB_ACTIVE) && !err && 32995036ab8dSWang Jianchao !atomic_read(&sbi->s_retry_alloc_pending)) { 330055cdd0afSWang Jianchao grp = fd->efd_group; 330155cdd0afSWang Jianchao if (grp != load_grp) { 330255cdd0afSWang Jianchao if (load_grp != UINT_MAX) 330355cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 330455cdd0afSWang Jianchao 330555cdd0afSWang Jianchao err = ext4_mb_load_buddy(sb, grp, &e4b); 330655cdd0afSWang Jianchao if (err) { 330755cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 330855cdd0afSWang Jianchao load_grp = UINT_MAX; 330955cdd0afSWang Jianchao continue; 331055cdd0afSWang Jianchao } else { 331155cdd0afSWang Jianchao load_grp = grp; 331255cdd0afSWang Jianchao } 331355cdd0afSWang Jianchao } 331455cdd0afSWang Jianchao 331555cdd0afSWang Jianchao ext4_lock_group(sb, grp); 331655cdd0afSWang Jianchao ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 331755cdd0afSWang Jianchao fd->efd_start_cluster + fd->efd_count - 1, 1); 331855cdd0afSWang Jianchao ext4_unlock_group(sb, grp); 331955cdd0afSWang Jianchao } 332055cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 332155cdd0afSWang Jianchao } 332255cdd0afSWang Jianchao 332355cdd0afSWang Jianchao if (load_grp != UINT_MAX) 332455cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 332555cdd0afSWang Jianchao } 332655cdd0afSWang Jianchao 33279d99012fSAkira Fujita int ext4_mb_init(struct super_block *sb) 3328c9de560dSAlex Tomas { 3329c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 33306be2ded1SAneesh Kumar K.V unsigned i, j; 3331935244cdSNicolai Stange unsigned offset, offset_incr; 3332c9de560dSAlex Tomas unsigned max; 333374767c5aSShen Feng int ret; 3334c9de560dSAlex Tomas 33354b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3336c9de560dSAlex Tomas 3337c9de560dSAlex Tomas sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3338c9de560dSAlex Tomas if (sbi->s_mb_offsets == NULL) { 3339fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3340fb1813f4SCurt Wohlgemuth goto out; 3341c9de560dSAlex Tomas } 3342ff7ef329SYasunori Goto 33434b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3344c9de560dSAlex Tomas sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3345c9de560dSAlex Tomas if (sbi->s_mb_maxs == NULL) { 3346fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3347fb1813f4SCurt Wohlgemuth goto out; 3348fb1813f4SCurt Wohlgemuth } 3349fb1813f4SCurt Wohlgemuth 33502892c15dSEric Sandeen ret = ext4_groupinfo_create_slab(sb->s_blocksize); 33512892c15dSEric Sandeen if (ret < 0) 3352fb1813f4SCurt Wohlgemuth goto out; 3353c9de560dSAlex Tomas 3354c9de560dSAlex Tomas /* order 0 is regular bitmap */ 3355c9de560dSAlex Tomas sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3356c9de560dSAlex Tomas sbi->s_mb_offsets[0] = 0; 3357c9de560dSAlex Tomas 3358c9de560dSAlex Tomas i = 1; 3359c9de560dSAlex Tomas offset = 0; 3360935244cdSNicolai Stange offset_incr = 1 << (sb->s_blocksize_bits - 1); 3361c9de560dSAlex Tomas max = sb->s_blocksize << 2; 3362c9de560dSAlex Tomas do { 3363c9de560dSAlex Tomas sbi->s_mb_offsets[i] = offset; 3364c9de560dSAlex Tomas sbi->s_mb_maxs[i] = max; 3365935244cdSNicolai Stange offset += offset_incr; 3366935244cdSNicolai Stange offset_incr = offset_incr >> 1; 3367c9de560dSAlex Tomas max = max >> 1; 3368c9de560dSAlex Tomas i++; 33694b68f6dfSHarshad Shirwadkar } while (i < MB_NUM_ORDERS(sb)); 33704b68f6dfSHarshad Shirwadkar 337183e80a6eSJan Kara sbi->s_mb_avg_fragment_size = 337283e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 337383e80a6eSJan Kara GFP_KERNEL); 337483e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size) { 337583e80a6eSJan Kara ret = -ENOMEM; 337683e80a6eSJan Kara goto out; 337783e80a6eSJan Kara } 337883e80a6eSJan Kara sbi->s_mb_avg_fragment_size_locks = 337983e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 338083e80a6eSJan Kara GFP_KERNEL); 338183e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size_locks) { 338283e80a6eSJan Kara ret = -ENOMEM; 338383e80a6eSJan Kara goto out; 338483e80a6eSJan Kara } 338583e80a6eSJan Kara for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 338683e80a6eSJan Kara INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 338783e80a6eSJan Kara rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 338883e80a6eSJan Kara } 3389196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders = 3390196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3391196e402aSHarshad Shirwadkar GFP_KERNEL); 3392196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders) { 3393196e402aSHarshad Shirwadkar ret = -ENOMEM; 3394196e402aSHarshad Shirwadkar goto out; 3395196e402aSHarshad Shirwadkar } 3396196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders_locks = 3397196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3398196e402aSHarshad Shirwadkar GFP_KERNEL); 3399196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders_locks) { 3400196e402aSHarshad Shirwadkar ret = -ENOMEM; 3401196e402aSHarshad Shirwadkar goto out; 3402196e402aSHarshad Shirwadkar } 3403196e402aSHarshad Shirwadkar for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3404196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3405196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3406196e402aSHarshad Shirwadkar } 3407c9de560dSAlex Tomas 3408c9de560dSAlex Tomas spin_lock_init(&sbi->s_md_lock); 3409d08854f5STheodore Ts'o sbi->s_mb_free_pending = 0; 3410a0154344SDaeho Jeong INIT_LIST_HEAD(&sbi->s_freed_data_list); 341155cdd0afSWang Jianchao INIT_LIST_HEAD(&sbi->s_discard_list); 341255cdd0afSWang Jianchao INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 34135036ab8dSWang Jianchao atomic_set(&sbi->s_retry_alloc_pending, 0); 3414c9de560dSAlex Tomas 3415c9de560dSAlex Tomas sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3416c9de560dSAlex Tomas sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3417c9de560dSAlex Tomas sbi->s_mb_stats = MB_DEFAULT_STATS; 3418c9de560dSAlex Tomas sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3419c9de560dSAlex Tomas sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 342027bc446eSbrookxu sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; 342127baebb8STheodore Ts'o /* 342227baebb8STheodore Ts'o * The default group preallocation is 512, which for 4k block 342327baebb8STheodore Ts'o * sizes translates to 2 megabytes. However for bigalloc file 342427baebb8STheodore Ts'o * systems, this is probably too big (i.e, if the cluster size 342527baebb8STheodore Ts'o * is 1 megabyte, then group preallocation size becomes half a 342627baebb8STheodore Ts'o * gigabyte!). As a default, we will keep a two megabyte 342727baebb8STheodore Ts'o * group pralloc size for cluster sizes up to 64k, and after 342827baebb8STheodore Ts'o * that, we will force a minimum group preallocation size of 342927baebb8STheodore Ts'o * 32 clusters. This translates to 8 megs when the cluster 343027baebb8STheodore Ts'o * size is 256k, and 32 megs when the cluster size is 1 meg, 343127baebb8STheodore Ts'o * which seems reasonable as a default. 343227baebb8STheodore Ts'o */ 343327baebb8STheodore Ts'o sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 343427baebb8STheodore Ts'o sbi->s_cluster_bits, 32); 3435d7a1fee1SDan Ehrenberg /* 3436d7a1fee1SDan Ehrenberg * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3437d7a1fee1SDan Ehrenberg * to the lowest multiple of s_stripe which is bigger than 3438d7a1fee1SDan Ehrenberg * the s_mb_group_prealloc as determined above. We want 3439d7a1fee1SDan Ehrenberg * the preallocation size to be an exact multiple of the 3440d7a1fee1SDan Ehrenberg * RAID stripe size so that preallocations don't fragment 3441d7a1fee1SDan Ehrenberg * the stripes. 3442d7a1fee1SDan Ehrenberg */ 3443d7a1fee1SDan Ehrenberg if (sbi->s_stripe > 1) { 3444d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc = roundup( 3445d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc, sbi->s_stripe); 3446d7a1fee1SDan Ehrenberg } 3447c9de560dSAlex Tomas 3448730c213cSEric Sandeen sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3449c9de560dSAlex Tomas if (sbi->s_locality_groups == NULL) { 3450fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3451029b10c5SAndrey Tsyvarev goto out; 3452c9de560dSAlex Tomas } 3453730c213cSEric Sandeen for_each_possible_cpu(i) { 3454c9de560dSAlex Tomas struct ext4_locality_group *lg; 3455730c213cSEric Sandeen lg = per_cpu_ptr(sbi->s_locality_groups, i); 3456c9de560dSAlex Tomas mutex_init(&lg->lg_mutex); 34576be2ded1SAneesh Kumar K.V for (j = 0; j < PREALLOC_TB_SIZE; j++) 34586be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3459c9de560dSAlex Tomas spin_lock_init(&lg->lg_prealloc_lock); 3460c9de560dSAlex Tomas } 3461c9de560dSAlex Tomas 346210f0d2a5SChristoph Hellwig if (bdev_nonrot(sb->s_bdev)) 3463196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = 0; 3464196e402aSHarshad Shirwadkar else 3465196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 346679a77c5aSYu Jian /* init file for buddy data */ 346779a77c5aSYu Jian ret = ext4_mb_init_backend(sb); 34687aa0baeaSTao Ma if (ret != 0) 34697aa0baeaSTao Ma goto out_free_locality_groups; 347079a77c5aSYu Jian 34717aa0baeaSTao Ma return 0; 34727aa0baeaSTao Ma 34737aa0baeaSTao Ma out_free_locality_groups: 34747aa0baeaSTao Ma free_percpu(sbi->s_locality_groups); 34757aa0baeaSTao Ma sbi->s_locality_groups = NULL; 3476fb1813f4SCurt Wohlgemuth out: 347783e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 347883e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3479196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3480196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3481fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_offsets); 34827aa0baeaSTao Ma sbi->s_mb_offsets = NULL; 3483fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_maxs); 34847aa0baeaSTao Ma sbi->s_mb_maxs = NULL; 3485fb1813f4SCurt Wohlgemuth return ret; 3486c9de560dSAlex Tomas } 3487c9de560dSAlex Tomas 3488955ce5f5SAneesh Kumar K.V /* need to called with the ext4 group lock held */ 3489d3df1453SRitesh Harjani static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3490c9de560dSAlex Tomas { 3491c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 3492c9de560dSAlex Tomas struct list_head *cur, *tmp; 3493c9de560dSAlex Tomas int count = 0; 3494c9de560dSAlex Tomas 3495c9de560dSAlex Tomas list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3496c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3497c9de560dSAlex Tomas list_del(&pa->pa_group_list); 3498c9de560dSAlex Tomas count++; 3499688f05a0SAneesh Kumar K.V kmem_cache_free(ext4_pspace_cachep, pa); 3500c9de560dSAlex Tomas } 3501d3df1453SRitesh Harjani return count; 3502c9de560dSAlex Tomas } 3503c9de560dSAlex Tomas 3504c9de560dSAlex Tomas int ext4_mb_release(struct super_block *sb) 3505c9de560dSAlex Tomas { 35068df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3507c9de560dSAlex Tomas ext4_group_t i; 3508c9de560dSAlex Tomas int num_meta_group_infos; 3509df3da4eaSSuraj Jitindar Singh struct ext4_group_info *grinfo, ***group_info; 3510c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3511fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3512d3df1453SRitesh Harjani int count; 3513c9de560dSAlex Tomas 351455cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 351555cdd0afSWang Jianchao /* 351655cdd0afSWang Jianchao * wait the discard work to drain all of ext4_free_data 351755cdd0afSWang Jianchao */ 351855cdd0afSWang Jianchao flush_work(&sbi->s_discard_work); 351955cdd0afSWang Jianchao WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 352055cdd0afSWang Jianchao } 352155cdd0afSWang Jianchao 3522c9de560dSAlex Tomas if (sbi->s_group_info) { 35238df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 35244b99faa2SKhazhismel Kumykov cond_resched(); 3525c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, i); 3526a3450215SRitesh Harjani mb_group_bb_bitmap_free(grinfo); 3527c9de560dSAlex Tomas ext4_lock_group(sb, i); 3528d3df1453SRitesh Harjani count = ext4_mb_cleanup_pa(grinfo); 3529d3df1453SRitesh Harjani if (count) 3530d3df1453SRitesh Harjani mb_debug(sb, "mballoc: %d PAs left\n", 3531d3df1453SRitesh Harjani count); 3532c9de560dSAlex Tomas ext4_unlock_group(sb, i); 3533fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, grinfo); 3534c9de560dSAlex Tomas } 35358df9675fSTheodore Ts'o num_meta_group_infos = (ngroups + 3536c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK(sb) - 1) >> 3537c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK_BITS(sb); 3538df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3539df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3540c9de560dSAlex Tomas for (i = 0; i < num_meta_group_infos; i++) 3541df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3542df3da4eaSSuraj Jitindar Singh kvfree(group_info); 3543df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3544c9de560dSAlex Tomas } 354583e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 354683e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3547196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3548196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3549c9de560dSAlex Tomas kfree(sbi->s_mb_offsets); 3550c9de560dSAlex Tomas kfree(sbi->s_mb_maxs); 3551c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3552c9de560dSAlex Tomas if (sbi->s_mb_stats) { 35539d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35549d8b9ec4STheodore Ts'o "mballoc: %u blocks %u reqs (%u success)", 3555c9de560dSAlex Tomas atomic_read(&sbi->s_bal_allocated), 3556c9de560dSAlex Tomas atomic_read(&sbi->s_bal_reqs), 3557c9de560dSAlex Tomas atomic_read(&sbi->s_bal_success)); 35589d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 3559a6c75eafSHarshad Shirwadkar "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 35609d8b9ec4STheodore Ts'o "%u 2^N hits, %u breaks, %u lost", 3561c9de560dSAlex Tomas atomic_read(&sbi->s_bal_ex_scanned), 3562a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_bal_groups_scanned), 3563c9de560dSAlex Tomas atomic_read(&sbi->s_bal_goals), 3564c9de560dSAlex Tomas atomic_read(&sbi->s_bal_2orders), 3565c9de560dSAlex Tomas atomic_read(&sbi->s_bal_breaks), 3566c9de560dSAlex Tomas atomic_read(&sbi->s_mb_lost_chunks)); 35679d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 356867d25186SHarshad Shirwadkar "mballoc: %u generated and it took %llu", 356967d25186SHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 357067d25186SHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 35719d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35729d8b9ec4STheodore Ts'o "mballoc: %u preallocated, %u discarded", 3573c9de560dSAlex Tomas atomic_read(&sbi->s_mb_preallocated), 3574c9de560dSAlex Tomas atomic_read(&sbi->s_mb_discarded)); 3575c9de560dSAlex Tomas } 3576c9de560dSAlex Tomas 3577730c213cSEric Sandeen free_percpu(sbi->s_locality_groups); 3578c9de560dSAlex Tomas 3579c9de560dSAlex Tomas return 0; 3580c9de560dSAlex Tomas } 3581c9de560dSAlex Tomas 358277ca6cdfSLukas Czerner static inline int ext4_issue_discard(struct super_block *sb, 3583a0154344SDaeho Jeong ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3584a0154344SDaeho Jeong struct bio **biop) 35855c521830SJiaying Zhang { 35865c521830SJiaying Zhang ext4_fsblk_t discard_block; 35875c521830SJiaying Zhang 358884130193STheodore Ts'o discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 358984130193STheodore Ts'o ext4_group_first_block_no(sb, block_group)); 359084130193STheodore Ts'o count = EXT4_C2B(EXT4_SB(sb), count); 35915c521830SJiaying Zhang trace_ext4_discard_blocks(sb, 35925c521830SJiaying Zhang (unsigned long long) discard_block, count); 3593a0154344SDaeho Jeong if (biop) { 3594a0154344SDaeho Jeong return __blkdev_issue_discard(sb->s_bdev, 3595a0154344SDaeho Jeong (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3596a0154344SDaeho Jeong (sector_t)count << (sb->s_blocksize_bits - 9), 359744abff2cSChristoph Hellwig GFP_NOFS, biop); 3598a0154344SDaeho Jeong } else 359993259636SLukas Czerner return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 36005c521830SJiaying Zhang } 36015c521830SJiaying Zhang 3602a0154344SDaeho Jeong static void ext4_free_data_in_buddy(struct super_block *sb, 3603a0154344SDaeho Jeong struct ext4_free_data *entry) 3604c9de560dSAlex Tomas { 3605c9de560dSAlex Tomas struct ext4_buddy e4b; 3606c894058dSAneesh Kumar K.V struct ext4_group_info *db; 3607c7f2bafaSKemeng Shi int err, count = 0; 3608c9de560dSAlex Tomas 3609d3df1453SRitesh Harjani mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 361018aadd47SBobi Jam entry->efd_count, entry->efd_group, entry); 3611c9de560dSAlex Tomas 361218aadd47SBobi Jam err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3613c9de560dSAlex Tomas /* we expect to find existing buddy because it's pinned */ 3614c9de560dSAlex Tomas BUG_ON(err != 0); 3615c9de560dSAlex Tomas 3616d08854f5STheodore Ts'o spin_lock(&EXT4_SB(sb)->s_md_lock); 3617d08854f5STheodore Ts'o EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3618d08854f5STheodore Ts'o spin_unlock(&EXT4_SB(sb)->s_md_lock); 361918aadd47SBobi Jam 3620c894058dSAneesh Kumar K.V db = e4b.bd_info; 3621c9de560dSAlex Tomas /* there are blocks to put in buddy to make them really free */ 362218aadd47SBobi Jam count += entry->efd_count; 362318aadd47SBobi Jam ext4_lock_group(sb, entry->efd_group); 3624c894058dSAneesh Kumar K.V /* Take it out of per group rb tree */ 362518aadd47SBobi Jam rb_erase(&entry->efd_node, &(db->bb_free_root)); 362618aadd47SBobi Jam mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3627c9de560dSAlex Tomas 36283d56b8d2STao Ma /* 36293d56b8d2STao Ma * Clear the trimmed flag for the group so that the next 36303d56b8d2STao Ma * ext4_trim_fs can trim it. 36313d56b8d2STao Ma * If the volume is mounted with -o discard, online discard 36323d56b8d2STao Ma * is supported and the free blocks will be trimmed online. 36333d56b8d2STao Ma */ 36343d56b8d2STao Ma if (!test_opt(sb, DISCARD)) 36353d56b8d2STao Ma EXT4_MB_GRP_CLEAR_TRIMMED(db); 36363d56b8d2STao Ma 3637c894058dSAneesh Kumar K.V if (!db->bb_free_root.rb_node) { 3638c894058dSAneesh Kumar K.V /* No more items in the per group rb tree 3639c894058dSAneesh Kumar K.V * balance refcounts from ext4_mb_free_metadata() 3640c894058dSAneesh Kumar K.V */ 364109cbfeafSKirill A. Shutemov put_page(e4b.bd_buddy_page); 364209cbfeafSKirill A. Shutemov put_page(e4b.bd_bitmap_page); 3643c894058dSAneesh Kumar K.V } 364418aadd47SBobi Jam ext4_unlock_group(sb, entry->efd_group); 3645e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 3646c9de560dSAlex Tomas 3647c7f2bafaSKemeng Shi mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3648c9de560dSAlex Tomas } 3649c9de560dSAlex Tomas 3650a0154344SDaeho Jeong /* 3651a0154344SDaeho Jeong * This function is called by the jbd2 layer once the commit has finished, 3652a0154344SDaeho Jeong * so we know we can free the blocks that were released with that commit. 3653a0154344SDaeho Jeong */ 3654a0154344SDaeho Jeong void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3655a0154344SDaeho Jeong { 3656a0154344SDaeho Jeong struct ext4_sb_info *sbi = EXT4_SB(sb); 3657a0154344SDaeho Jeong struct ext4_free_data *entry, *tmp; 3658a0154344SDaeho Jeong struct list_head freed_data_list; 3659a0154344SDaeho Jeong struct list_head *cut_pos = NULL; 366055cdd0afSWang Jianchao bool wake; 3661a0154344SDaeho Jeong 3662a0154344SDaeho Jeong INIT_LIST_HEAD(&freed_data_list); 3663a0154344SDaeho Jeong 3664a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 3665a0154344SDaeho Jeong list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3666a0154344SDaeho Jeong if (entry->efd_tid != commit_tid) 3667a0154344SDaeho Jeong break; 3668a0154344SDaeho Jeong cut_pos = &entry->efd_list; 3669a0154344SDaeho Jeong } 3670a0154344SDaeho Jeong if (cut_pos) 3671a0154344SDaeho Jeong list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3672a0154344SDaeho Jeong cut_pos); 3673a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 3674a0154344SDaeho Jeong 367555cdd0afSWang Jianchao list_for_each_entry(entry, &freed_data_list, efd_list) 3676a0154344SDaeho Jeong ext4_free_data_in_buddy(sb, entry); 367755cdd0afSWang Jianchao 367855cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 367955cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 368055cdd0afSWang Jianchao wake = list_empty(&sbi->s_discard_list); 368155cdd0afSWang Jianchao list_splice_tail(&freed_data_list, &sbi->s_discard_list); 368255cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 368355cdd0afSWang Jianchao if (wake) 368455cdd0afSWang Jianchao queue_work(system_unbound_wq, &sbi->s_discard_work); 368555cdd0afSWang Jianchao } else { 368655cdd0afSWang Jianchao list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 368755cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, entry); 368855cdd0afSWang Jianchao } 3689a0154344SDaeho Jeong } 3690a0154344SDaeho Jeong 36915dabfc78STheodore Ts'o int __init ext4_init_mballoc(void) 3692c9de560dSAlex Tomas { 369316828088STheodore Ts'o ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 369416828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3695c9de560dSAlex Tomas if (ext4_pspace_cachep == NULL) 3696f283529aSRitesh Harjani goto out; 3697c9de560dSAlex Tomas 369816828088STheodore Ts'o ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 369916828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3700f283529aSRitesh Harjani if (ext4_ac_cachep == NULL) 3701f283529aSRitesh Harjani goto out_pa_free; 3702c894058dSAneesh Kumar K.V 370318aadd47SBobi Jam ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 370416828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3705f283529aSRitesh Harjani if (ext4_free_data_cachep == NULL) 3706f283529aSRitesh Harjani goto out_ac_free; 3707f283529aSRitesh Harjani 3708c9de560dSAlex Tomas return 0; 3709f283529aSRitesh Harjani 3710f283529aSRitesh Harjani out_ac_free: 3711f283529aSRitesh Harjani kmem_cache_destroy(ext4_ac_cachep); 3712f283529aSRitesh Harjani out_pa_free: 3713f283529aSRitesh Harjani kmem_cache_destroy(ext4_pspace_cachep); 3714f283529aSRitesh Harjani out: 3715f283529aSRitesh Harjani return -ENOMEM; 3716c9de560dSAlex Tomas } 3717c9de560dSAlex Tomas 37185dabfc78STheodore Ts'o void ext4_exit_mballoc(void) 3719c9de560dSAlex Tomas { 37203e03f9caSJesper Dangaard Brouer /* 37213e03f9caSJesper Dangaard Brouer * Wait for completion of call_rcu()'s on ext4_pspace_cachep 37223e03f9caSJesper Dangaard Brouer * before destroying the slab cache. 37233e03f9caSJesper Dangaard Brouer */ 37243e03f9caSJesper Dangaard Brouer rcu_barrier(); 3725c9de560dSAlex Tomas kmem_cache_destroy(ext4_pspace_cachep); 3726256bdb49SEric Sandeen kmem_cache_destroy(ext4_ac_cachep); 372718aadd47SBobi Jam kmem_cache_destroy(ext4_free_data_cachep); 37282892c15dSEric Sandeen ext4_groupinfo_destroy_slabs(); 3729c9de560dSAlex Tomas } 3730c9de560dSAlex Tomas 3731c9de560dSAlex Tomas 3732c9de560dSAlex Tomas /* 373373b2c716SUwe Kleine-König * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3734c9de560dSAlex Tomas * Returns 0 if success or error code 3735c9de560dSAlex Tomas */ 37364ddfef7bSEric Sandeen static noinline_for_stack int 37374ddfef7bSEric Sandeen ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 373853accfa9STheodore Ts'o handle_t *handle, unsigned int reserv_clstrs) 3739c9de560dSAlex Tomas { 3740c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 3741c9de560dSAlex Tomas struct ext4_group_desc *gdp; 3742c9de560dSAlex Tomas struct buffer_head *gdp_bh; 3743c9de560dSAlex Tomas struct ext4_sb_info *sbi; 3744c9de560dSAlex Tomas struct super_block *sb; 3745c9de560dSAlex Tomas ext4_fsblk_t block; 3746519deca0SAneesh Kumar K.V int err, len; 3747c9de560dSAlex Tomas 3748c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3749c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_len <= 0); 3750c9de560dSAlex Tomas 3751c9de560dSAlex Tomas sb = ac->ac_sb; 3752c9de560dSAlex Tomas sbi = EXT4_SB(sb); 3753c9de560dSAlex Tomas 3754574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 37559008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 3756fb28f9ceSKemeng Shi return PTR_ERR(bitmap_bh); 37579008a58eSDarrick J. Wong } 3758c9de560dSAlex Tomas 37595d601255Sliang xie BUFFER_TRACE(bitmap_bh, "getting write access"); 3760188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3761188c299eSJan Kara EXT4_JTR_NONE); 3762c9de560dSAlex Tomas if (err) 3763c9de560dSAlex Tomas goto out_err; 3764c9de560dSAlex Tomas 3765c9de560dSAlex Tomas err = -EIO; 3766c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3767c9de560dSAlex Tomas if (!gdp) 3768c9de560dSAlex Tomas goto out_err; 3769c9de560dSAlex Tomas 3770a9df9a49STheodore Ts'o ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3771021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, gdp)); 377203cddb80SAneesh Kumar K.V 37735d601255Sliang xie BUFFER_TRACE(gdp_bh, "get_write_access"); 3774188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3775c9de560dSAlex Tomas if (err) 3776c9de560dSAlex Tomas goto out_err; 3777c9de560dSAlex Tomas 3778bda00de7SAkinobu Mita block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3779c9de560dSAlex Tomas 378053accfa9STheodore Ts'o len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3781ce9f24ccSJan Kara if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 378212062dddSEric Sandeen ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 37831084f252STheodore Ts'o "fs metadata", block, block+len); 3784519deca0SAneesh Kumar K.V /* File system mounted not to panic on error 3785554a5cccSVegard Nossum * Fix the bitmap and return EFSCORRUPTED 3786519deca0SAneesh Kumar K.V * We leak some of the blocks here. 3787519deca0SAneesh Kumar K.V */ 3788955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3789123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3790519deca0SAneesh Kumar K.V ac->ac_b_ex.fe_len); 3791955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 37920390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3793519deca0SAneesh Kumar K.V if (!err) 3794554a5cccSVegard Nossum err = -EFSCORRUPTED; 3795519deca0SAneesh Kumar K.V goto out_err; 3796c9de560dSAlex Tomas } 3797955ce5f5SAneesh Kumar K.V 3798955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3799c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 3800c9de560dSAlex Tomas { 3801c9de560dSAlex Tomas int i; 3802c9de560dSAlex Tomas for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3803c9de560dSAlex Tomas BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3804c9de560dSAlex Tomas bitmap_bh->b_data)); 3805c9de560dSAlex Tomas } 3806c9de560dSAlex Tomas } 3807c9de560dSAlex Tomas #endif 3808123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3809c3e94d1dSYongqiang Yang ac->ac_b_ex.fe_len); 38108844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 38118844618dSTheodore Ts'o (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3812c9de560dSAlex Tomas gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3813021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, 3814cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, 3815560671a0SAneesh Kumar K.V ac->ac_b_ex.fe_group, gdp)); 3816c9de560dSAlex Tomas } 3817021b65bbSTheodore Ts'o len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3818021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, len); 38191df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3820feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3821955ce5f5SAneesh Kumar K.V 3822955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 382357042651STheodore Ts'o percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3824d2a17637SMingming Cao /* 38256bc6e63fSAneesh Kumar K.V * Now reduce the dirty block count also. Should not go negative 3826d2a17637SMingming Cao */ 38276bc6e63fSAneesh Kumar K.V if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 38286bc6e63fSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 382957042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 383057042651STheodore Ts'o reserv_clstrs); 3831c9de560dSAlex Tomas 3832772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 3833772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, 3834772cb7c8SJose R. Santos ac->ac_b_ex.fe_group); 383590ba983fSTheodore Ts'o atomic64_sub(ac->ac_b_ex.fe_len, 38367c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 38377c990728SSuraj Jitindar Singh flex_group)->free_clusters); 3838772cb7c8SJose R. Santos } 3839772cb7c8SJose R. Santos 38400390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3841c9de560dSAlex Tomas if (err) 3842c9de560dSAlex Tomas goto out_err; 38430390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3844c9de560dSAlex Tomas 3845c9de560dSAlex Tomas out_err: 384642a10addSAneesh Kumar K.V brelse(bitmap_bh); 3847c9de560dSAlex Tomas return err; 3848c9de560dSAlex Tomas } 3849c9de560dSAlex Tomas 3850c9de560dSAlex Tomas /* 38518016e29fSHarshad Shirwadkar * Idempotent helper for Ext4 fast commit replay path to set the state of 38528016e29fSHarshad Shirwadkar * blocks in bitmaps and update counters. 38538016e29fSHarshad Shirwadkar */ 38548016e29fSHarshad Shirwadkar void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 38558016e29fSHarshad Shirwadkar int len, int state) 38568016e29fSHarshad Shirwadkar { 38578016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh = NULL; 38588016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 38598016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 38608016e29fSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 38618016e29fSHarshad Shirwadkar ext4_group_t group; 38628016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 3863a5c0e2fdSRitesh Harjani int i, err; 38648016e29fSHarshad Shirwadkar int already; 3865bfdc502aSRitesh Harjani unsigned int clen, clen_changed, thisgrp_len; 38668016e29fSHarshad Shirwadkar 3867bfdc502aSRitesh Harjani while (len > 0) { 38688016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3869bfdc502aSRitesh Harjani 3870bfdc502aSRitesh Harjani /* 3871bfdc502aSRitesh Harjani * Check to see if we are freeing blocks across a group 3872bfdc502aSRitesh Harjani * boundary. 3873bfdc502aSRitesh Harjani * In case of flex_bg, this can happen that (block, len) may 3874bfdc502aSRitesh Harjani * span across more than one group. In that case we need to 3875bfdc502aSRitesh Harjani * get the corresponding group metadata to work with. 3876bfdc502aSRitesh Harjani * For this we have goto again loop. 3877bfdc502aSRitesh Harjani */ 3878bfdc502aSRitesh Harjani thisgrp_len = min_t(unsigned int, (unsigned int)len, 3879bfdc502aSRitesh Harjani EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3880bfdc502aSRitesh Harjani clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3881bfdc502aSRitesh Harjani 38828c91c579SRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 38838c91c579SRitesh Harjani ext4_error(sb, "Marking blocks in system zone - " 38848c91c579SRitesh Harjani "Block = %llu, len = %u", 38858c91c579SRitesh Harjani block, thisgrp_len); 38868c91c579SRitesh Harjani bitmap_bh = NULL; 38878c91c579SRitesh Harjani break; 38888c91c579SRitesh Harjani } 38898c91c579SRitesh Harjani 38908016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 38918016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 38928016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 38938016e29fSHarshad Shirwadkar bitmap_bh = NULL; 3894bfdc502aSRitesh Harjani break; 38958016e29fSHarshad Shirwadkar } 38968016e29fSHarshad Shirwadkar 38978016e29fSHarshad Shirwadkar err = -EIO; 38988016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 38998016e29fSHarshad Shirwadkar if (!gdp) 3900bfdc502aSRitesh Harjani break; 39018016e29fSHarshad Shirwadkar 39028016e29fSHarshad Shirwadkar ext4_lock_group(sb, group); 39038016e29fSHarshad Shirwadkar already = 0; 39048016e29fSHarshad Shirwadkar for (i = 0; i < clen; i++) 3905bfdc502aSRitesh Harjani if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3906bfdc502aSRitesh Harjani !state) 39078016e29fSHarshad Shirwadkar already++; 39088016e29fSHarshad Shirwadkar 3909a5c0e2fdSRitesh Harjani clen_changed = clen - already; 39108016e29fSHarshad Shirwadkar if (state) 3911123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, blkoff, clen); 39128016e29fSHarshad Shirwadkar else 3913bd8247eeSRitesh Harjani mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 39148016e29fSHarshad Shirwadkar if (ext4_has_group_desc_csum(sb) && 39158016e29fSHarshad Shirwadkar (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 39168016e29fSHarshad Shirwadkar gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 39178016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, 3918bfdc502aSRitesh Harjani ext4_free_clusters_after_init(sb, group, gdp)); 39198016e29fSHarshad Shirwadkar } 39208016e29fSHarshad Shirwadkar if (state) 3921a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 39228016e29fSHarshad Shirwadkar else 3923a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 39248016e29fSHarshad Shirwadkar 39258016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, clen); 39261df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 39278016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 39288016e29fSHarshad Shirwadkar 39298016e29fSHarshad Shirwadkar ext4_unlock_group(sb, group); 39308016e29fSHarshad Shirwadkar 39318016e29fSHarshad Shirwadkar if (sbi->s_log_groups_per_flex) { 39328016e29fSHarshad Shirwadkar ext4_group_t flex_group = ext4_flex_group(sbi, group); 3933a5c0e2fdSRitesh Harjani struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3934a5c0e2fdSRitesh Harjani s_flex_groups, flex_group); 39358016e29fSHarshad Shirwadkar 3936a5c0e2fdSRitesh Harjani if (state) 3937a5c0e2fdSRitesh Harjani atomic64_sub(clen_changed, &fg->free_clusters); 3938a5c0e2fdSRitesh Harjani else 3939a5c0e2fdSRitesh Harjani atomic64_add(clen_changed, &fg->free_clusters); 3940bfdc502aSRitesh Harjani 39418016e29fSHarshad Shirwadkar } 39428016e29fSHarshad Shirwadkar 39438016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 39448016e29fSHarshad Shirwadkar if (err) 3945bfdc502aSRitesh Harjani break; 39468016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 39478016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 39488016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 3949bfdc502aSRitesh Harjani if (err) 3950bfdc502aSRitesh Harjani break; 39518016e29fSHarshad Shirwadkar 3952bfdc502aSRitesh Harjani block += thisgrp_len; 3953bfdc502aSRitesh Harjani len -= thisgrp_len; 3954bfdc502aSRitesh Harjani brelse(bitmap_bh); 3955bfdc502aSRitesh Harjani BUG_ON(len < 0); 3956bfdc502aSRitesh Harjani } 3957bfdc502aSRitesh Harjani 3958bfdc502aSRitesh Harjani if (err) 39598016e29fSHarshad Shirwadkar brelse(bitmap_bh); 39608016e29fSHarshad Shirwadkar } 39618016e29fSHarshad Shirwadkar 39628016e29fSHarshad Shirwadkar /* 3963c9de560dSAlex Tomas * here we normalize request for locality group 3964d7a1fee1SDan Ehrenberg * Group request are normalized to s_mb_group_prealloc, which goes to 3965d7a1fee1SDan Ehrenberg * s_strip if we set the same via mount option. 3966d7a1fee1SDan Ehrenberg * s_mb_group_prealloc can be configured via 3967b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_group_prealloc 3968c9de560dSAlex Tomas * 3969c9de560dSAlex Tomas * XXX: should we try to preallocate more than the group has now? 3970c9de560dSAlex Tomas */ 3971c9de560dSAlex Tomas static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3972c9de560dSAlex Tomas { 3973c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 3974c9de560dSAlex Tomas struct ext4_locality_group *lg = ac->ac_lg; 3975c9de560dSAlex Tomas 3976c9de560dSAlex Tomas BUG_ON(lg == NULL); 3977c9de560dSAlex Tomas ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3978d3df1453SRitesh Harjani mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 3979c9de560dSAlex Tomas } 3980c9de560dSAlex Tomas 39817692094aSOjaswin Mujoo static inline void 39827692094aSOjaswin Mujoo ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 39837692094aSOjaswin Mujoo ext4_lblk_t start, ext4_lblk_t end) 39847692094aSOjaswin Mujoo { 39857692094aSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 39867692094aSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 39877692094aSOjaswin Mujoo struct ext4_prealloc_space *tmp_pa; 39887692094aSOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 39897692094aSOjaswin Mujoo 39907692094aSOjaswin Mujoo rcu_read_lock(); 39917692094aSOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) { 39927692094aSOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 39937692094aSOjaswin Mujoo if (tmp_pa->pa_deleted == 0) { 39947692094aSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 39957692094aSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 39967692094aSOjaswin Mujoo 39977692094aSOjaswin Mujoo BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 39987692094aSOjaswin Mujoo } 39997692094aSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40007692094aSOjaswin Mujoo } 40017692094aSOjaswin Mujoo rcu_read_unlock(); 40027692094aSOjaswin Mujoo } 40037692094aSOjaswin Mujoo 4004c9de560dSAlex Tomas /* 40050830344cSOjaswin Mujoo * Given an allocation context "ac" and a range "start", "end", check 40060830344cSOjaswin Mujoo * and adjust boundaries if the range overlaps with any of the existing 40070830344cSOjaswin Mujoo * preallocatoins stored in the corresponding inode of the allocation context. 40080830344cSOjaswin Mujoo * 40090830344cSOjaswin Mujoo *Parameters: 40100830344cSOjaswin Mujoo * ac allocation context 40110830344cSOjaswin Mujoo * start start of the new range 40120830344cSOjaswin Mujoo * end end of the new range 40130830344cSOjaswin Mujoo */ 40140830344cSOjaswin Mujoo static inline void 40150830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 40160830344cSOjaswin Mujoo ext4_lblk_t *start, ext4_lblk_t *end) 40170830344cSOjaswin Mujoo { 40180830344cSOjaswin Mujoo struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 40190830344cSOjaswin Mujoo struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 40200830344cSOjaswin Mujoo struct ext4_prealloc_space *tmp_pa; 40210830344cSOjaswin Mujoo ext4_lblk_t new_start, new_end; 40220830344cSOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 40230830344cSOjaswin Mujoo 40240830344cSOjaswin Mujoo new_start = *start; 40250830344cSOjaswin Mujoo new_end = *end; 40260830344cSOjaswin Mujoo 40270830344cSOjaswin Mujoo /* check we don't cross already preallocated blocks */ 40280830344cSOjaswin Mujoo rcu_read_lock(); 40290830344cSOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) { 40300830344cSOjaswin Mujoo if (tmp_pa->pa_deleted) 40310830344cSOjaswin Mujoo continue; 40320830344cSOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 40330830344cSOjaswin Mujoo if (tmp_pa->pa_deleted) { 40340830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40350830344cSOjaswin Mujoo continue; 40360830344cSOjaswin Mujoo } 40370830344cSOjaswin Mujoo 40380830344cSOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 40390830344cSOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 40400830344cSOjaswin Mujoo 40410830344cSOjaswin Mujoo /* PA must not overlap original request */ 40420830344cSOjaswin Mujoo BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 40430830344cSOjaswin Mujoo ac->ac_o_ex.fe_logical < tmp_pa_start)); 40440830344cSOjaswin Mujoo 40450830344cSOjaswin Mujoo /* skip PAs this normalized request doesn't overlap with */ 40460830344cSOjaswin Mujoo if (tmp_pa_start >= new_end || tmp_pa_end <= new_start) { 40470830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40480830344cSOjaswin Mujoo continue; 40490830344cSOjaswin Mujoo } 40500830344cSOjaswin Mujoo BUG_ON(tmp_pa_start <= new_start && tmp_pa_end >= new_end); 40510830344cSOjaswin Mujoo 40520830344cSOjaswin Mujoo /* adjust start or end to be adjacent to this pa */ 40530830344cSOjaswin Mujoo if (tmp_pa_end <= ac->ac_o_ex.fe_logical) { 40540830344cSOjaswin Mujoo BUG_ON(tmp_pa_end < new_start); 40550830344cSOjaswin Mujoo new_start = tmp_pa_end; 40560830344cSOjaswin Mujoo } else if (tmp_pa_start > ac->ac_o_ex.fe_logical) { 40570830344cSOjaswin Mujoo BUG_ON(tmp_pa_start > new_end); 40580830344cSOjaswin Mujoo new_end = tmp_pa_start; 40590830344cSOjaswin Mujoo } 40600830344cSOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 40610830344cSOjaswin Mujoo } 40620830344cSOjaswin Mujoo rcu_read_unlock(); 40630830344cSOjaswin Mujoo 40640830344cSOjaswin Mujoo /* XXX: extra loop to check we really don't overlap preallocations */ 40650830344cSOjaswin Mujoo ext4_mb_pa_assert_overlap(ac, new_start, new_end); 40660830344cSOjaswin Mujoo 40670830344cSOjaswin Mujoo *start = new_start; 40680830344cSOjaswin Mujoo *end = new_end; 40690830344cSOjaswin Mujoo } 40700830344cSOjaswin Mujoo 40710830344cSOjaswin Mujoo /* 4072c9de560dSAlex Tomas * Normalization means making request better in terms of 4073c9de560dSAlex Tomas * size and alignment 4074c9de560dSAlex Tomas */ 40754ddfef7bSEric Sandeen static noinline_for_stack void 40764ddfef7bSEric Sandeen ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4077c9de560dSAlex Tomas struct ext4_allocation_request *ar) 4078c9de560dSAlex Tomas { 407953accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4080b07ffe69SKemeng Shi struct ext4_super_block *es = sbi->s_es; 4081c9de560dSAlex Tomas int bsbits, max; 4082c9de560dSAlex Tomas ext4_lblk_t end; 40831592d2c5SCurt Wohlgemuth loff_t size, start_off; 40841592d2c5SCurt Wohlgemuth loff_t orig_size __maybe_unused; 40855a0790c2SAndi Kleen ext4_lblk_t start; 4086c9de560dSAlex Tomas 4087c9de560dSAlex Tomas /* do normalize only data requests, metadata requests 4088c9de560dSAlex Tomas do not need preallocation */ 4089c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4090c9de560dSAlex Tomas return; 4091c9de560dSAlex Tomas 4092c9de560dSAlex Tomas /* sometime caller may want exact blocks */ 4093c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4094c9de560dSAlex Tomas return; 4095c9de560dSAlex Tomas 4096c9de560dSAlex Tomas /* caller may indicate that preallocation isn't 4097c9de560dSAlex Tomas * required (it's a tail, for example) */ 4098c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4099c9de560dSAlex Tomas return; 4100c9de560dSAlex Tomas 4101c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4102c9de560dSAlex Tomas ext4_mb_normalize_group_request(ac); 4103c9de560dSAlex Tomas return ; 4104c9de560dSAlex Tomas } 4105c9de560dSAlex Tomas 4106c9de560dSAlex Tomas bsbits = ac->ac_sb->s_blocksize_bits; 4107c9de560dSAlex Tomas 4108c9de560dSAlex Tomas /* first, let's learn actual file size 4109c9de560dSAlex Tomas * given current request is allocated */ 411053accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4111c9de560dSAlex Tomas size = size << bsbits; 4112c9de560dSAlex Tomas if (size < i_size_read(ac->ac_inode)) 4113c9de560dSAlex Tomas size = i_size_read(ac->ac_inode); 41145a0790c2SAndi Kleen orig_size = size; 4115c9de560dSAlex Tomas 41161930479cSValerie Clement /* max size of free chunks */ 41171930479cSValerie Clement max = 2 << bsbits; 4118c9de560dSAlex Tomas 41191930479cSValerie Clement #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 41201930479cSValerie Clement (req <= (size) || max <= (chunk_size)) 4121c9de560dSAlex Tomas 4122c9de560dSAlex Tomas /* first, try to predict filesize */ 4123c9de560dSAlex Tomas /* XXX: should this table be tunable? */ 4124c9de560dSAlex Tomas start_off = 0; 4125c9de560dSAlex Tomas if (size <= 16 * 1024) { 4126c9de560dSAlex Tomas size = 16 * 1024; 4127c9de560dSAlex Tomas } else if (size <= 32 * 1024) { 4128c9de560dSAlex Tomas size = 32 * 1024; 4129c9de560dSAlex Tomas } else if (size <= 64 * 1024) { 4130c9de560dSAlex Tomas size = 64 * 1024; 4131c9de560dSAlex Tomas } else if (size <= 128 * 1024) { 4132c9de560dSAlex Tomas size = 128 * 1024; 4133c9de560dSAlex Tomas } else if (size <= 256 * 1024) { 4134c9de560dSAlex Tomas size = 256 * 1024; 4135c9de560dSAlex Tomas } else if (size <= 512 * 1024) { 4136c9de560dSAlex Tomas size = 512 * 1024; 4137c9de560dSAlex Tomas } else if (size <= 1024 * 1024) { 4138c9de560dSAlex Tomas size = 1024 * 1024; 41391930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4140c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 41411930479cSValerie Clement (21 - bsbits)) << 21; 41421930479cSValerie Clement size = 2 * 1024 * 1024; 41431930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4144c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4145c9de560dSAlex Tomas (22 - bsbits)) << 22; 4146c9de560dSAlex Tomas size = 4 * 1024 * 1024; 4147c9de560dSAlex Tomas } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 41481930479cSValerie Clement (8<<20)>>bsbits, max, 8 * 1024)) { 4149c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4150c9de560dSAlex Tomas (23 - bsbits)) << 23; 4151c9de560dSAlex Tomas size = 8 * 1024 * 1024; 4152c9de560dSAlex Tomas } else { 4153c9de560dSAlex Tomas start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 415491a48aafSKemeng Shi size = (loff_t) EXT4_C2B(sbi, 4155b27b1535SXiaoguang Wang ac->ac_o_ex.fe_len) << bsbits; 4156c9de560dSAlex Tomas } 41575a0790c2SAndi Kleen size = size >> bsbits; 41585a0790c2SAndi Kleen start = start_off >> bsbits; 4159c9de560dSAlex Tomas 4160a08f789dSBaokun Li /* 4161a08f789dSBaokun Li * For tiny groups (smaller than 8MB) the chosen allocation 4162a08f789dSBaokun Li * alignment may be larger than group size. Make sure the 4163a08f789dSBaokun Li * alignment does not move allocation to a different group which 4164a08f789dSBaokun Li * makes mballoc fail assertions later. 4165a08f789dSBaokun Li */ 4166a08f789dSBaokun Li start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4167a08f789dSBaokun Li (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4168a08f789dSBaokun Li 4169c9de560dSAlex Tomas /* don't cover already allocated blocks in selected range */ 4170c9de560dSAlex Tomas if (ar->pleft && start <= ar->lleft) { 4171c9de560dSAlex Tomas size -= ar->lleft + 1 - start; 4172c9de560dSAlex Tomas start = ar->lleft + 1; 4173c9de560dSAlex Tomas } 4174c9de560dSAlex Tomas if (ar->pright && start + size - 1 >= ar->lright) 4175c9de560dSAlex Tomas size -= start + size - ar->lright; 4176c9de560dSAlex Tomas 4177cd648b8aSJan Kara /* 4178cd648b8aSJan Kara * Trim allocation request for filesystems with artificially small 4179cd648b8aSJan Kara * groups. 4180cd648b8aSJan Kara */ 4181cd648b8aSJan Kara if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4182cd648b8aSJan Kara size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4183cd648b8aSJan Kara 4184c9de560dSAlex Tomas end = start + size; 4185c9de560dSAlex Tomas 41860830344cSOjaswin Mujoo ext4_mb_pa_adjust_overlap(ac, &start, &end); 4187c9de560dSAlex Tomas 4188c9de560dSAlex Tomas size = end - start; 4189c9de560dSAlex Tomas 4190cf4ff938SBaokun Li /* 4191cf4ff938SBaokun Li * In this function "start" and "size" are normalized for better 4192cf4ff938SBaokun Li * alignment and length such that we could preallocate more blocks. 4193cf4ff938SBaokun Li * This normalization is done such that original request of 4194cf4ff938SBaokun Li * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4195cf4ff938SBaokun Li * "size" boundaries. 4196cf4ff938SBaokun Li * (Note fe_len can be relaxed since FS block allocation API does not 4197cf4ff938SBaokun Li * provide gurantee on number of contiguous blocks allocation since that 4198cf4ff938SBaokun Li * depends upon free space left, etc). 4199cf4ff938SBaokun Li * In case of inode pa, later we use the allocated blocks 42001221b235SKemeng Shi * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4201cf4ff938SBaokun Li * range of goal/best blocks [start, size] to put it at the 4202cf4ff938SBaokun Li * ac_o_ex.fe_logical extent of this inode. 4203cf4ff938SBaokun Li * (See ext4_mb_use_inode_pa() for more details) 4204cf4ff938SBaokun Li */ 4205cf4ff938SBaokun Li if (start + size <= ac->ac_o_ex.fe_logical || 4206c9de560dSAlex Tomas start > ac->ac_o_ex.fe_logical) { 42079d8b9ec4STheodore Ts'o ext4_msg(ac->ac_sb, KERN_ERR, 42089d8b9ec4STheodore Ts'o "start %lu, size %lu, fe_logical %lu", 4209c9de560dSAlex Tomas (unsigned long) start, (unsigned long) size, 4210c9de560dSAlex Tomas (unsigned long) ac->ac_o_ex.fe_logical); 4211dfe076c1SDmitry Monakhov BUG(); 4212c9de560dSAlex Tomas } 4213b5b60778SMaurizio Lombardi BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4214c9de560dSAlex Tomas 4215c9de560dSAlex Tomas /* now prepare goal request */ 4216c9de560dSAlex Tomas 4217c9de560dSAlex Tomas /* XXX: is it better to align blocks WRT to logical 4218c9de560dSAlex Tomas * placement or satisfy big request as is */ 4219c9de560dSAlex Tomas ac->ac_g_ex.fe_logical = start; 422053accfa9STheodore Ts'o ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4221c9de560dSAlex Tomas 4222c9de560dSAlex Tomas /* define goal start in order to merge */ 4223b07ffe69SKemeng Shi if (ar->pright && (ar->lright == (start + size)) && 4224b07ffe69SKemeng Shi ar->pright >= size && 4225b07ffe69SKemeng Shi ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4226c9de560dSAlex Tomas /* merge to the right */ 4227c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4228b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4229b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4230c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4231c9de560dSAlex Tomas } 4232b07ffe69SKemeng Shi if (ar->pleft && (ar->lleft + 1 == start) && 4233b07ffe69SKemeng Shi ar->pleft + 1 < ext4_blocks_count(es)) { 4234c9de560dSAlex Tomas /* merge to the left */ 4235c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4236b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4237b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4238c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4239c9de560dSAlex Tomas } 4240c9de560dSAlex Tomas 4241d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4242d3df1453SRitesh Harjani orig_size, start); 4243c9de560dSAlex Tomas } 4244c9de560dSAlex Tomas 4245c9de560dSAlex Tomas static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4246c9de560dSAlex Tomas { 4247c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4248c9de560dSAlex Tomas 4249a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4250c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_reqs); 4251c9de560dSAlex Tomas atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4252291dae47SCurt Wohlgemuth if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4253c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_success); 4254c9de560dSAlex Tomas atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4255a6c75eafSHarshad Shirwadkar atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4256c9de560dSAlex Tomas if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4257c9de560dSAlex Tomas ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4258c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_goals); 4259c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan) 4260c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_breaks); 4261c9de560dSAlex Tomas } 4262c9de560dSAlex Tomas 4263296c355cSTheodore Ts'o if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4264296c355cSTheodore Ts'o trace_ext4_mballoc_alloc(ac); 4265296c355cSTheodore Ts'o else 4266296c355cSTheodore Ts'o trace_ext4_mballoc_prealloc(ac); 4267c9de560dSAlex Tomas } 4268c9de560dSAlex Tomas 4269c9de560dSAlex Tomas /* 4270b844167eSCurt Wohlgemuth * Called on failure; free up any blocks from the inode PA for this 4271b844167eSCurt Wohlgemuth * context. We don't need this for MB_GROUP_PA because we only change 4272b844167eSCurt Wohlgemuth * pa_free in ext4_mb_release_context(), but on failure, we've already 4273b844167eSCurt Wohlgemuth * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4274b844167eSCurt Wohlgemuth */ 4275b844167eSCurt Wohlgemuth static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4276b844167eSCurt Wohlgemuth { 4277b844167eSCurt Wohlgemuth struct ext4_prealloc_space *pa = ac->ac_pa; 427886f0afd4STheodore Ts'o struct ext4_buddy e4b; 427986f0afd4STheodore Ts'o int err; 4280b844167eSCurt Wohlgemuth 428186f0afd4STheodore Ts'o if (pa == NULL) { 4282c99d1e6eSTheodore Ts'o if (ac->ac_f_ex.fe_len == 0) 4283c99d1e6eSTheodore Ts'o return; 428486f0afd4STheodore Ts'o err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 428519b8b035STheodore Ts'o if (WARN_RATELIMIT(err, 428619b8b035STheodore Ts'o "ext4: mb_load_buddy failed (%d)", err)) 428786f0afd4STheodore Ts'o /* 428886f0afd4STheodore Ts'o * This should never happen since we pin the 428986f0afd4STheodore Ts'o * pages in the ext4_allocation_context so 429086f0afd4STheodore Ts'o * ext4_mb_load_buddy() should never fail. 429186f0afd4STheodore Ts'o */ 429286f0afd4STheodore Ts'o return; 429386f0afd4STheodore Ts'o ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 429486f0afd4STheodore Ts'o mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 429586f0afd4STheodore Ts'o ac->ac_f_ex.fe_len); 429686f0afd4STheodore Ts'o ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4297c99d1e6eSTheodore Ts'o ext4_mb_unload_buddy(&e4b); 429886f0afd4STheodore Ts'o return; 429986f0afd4STheodore Ts'o } 430036cb0f52SKemeng Shi if (pa->pa_type == MB_INODE_PA) { 430136cb0f52SKemeng Shi spin_lock(&pa->pa_lock); 4302400db9d3SZheng Liu pa->pa_free += ac->ac_b_ex.fe_len; 430336cb0f52SKemeng Shi spin_unlock(&pa->pa_lock); 430436cb0f52SKemeng Shi } 4305b844167eSCurt Wohlgemuth } 4306b844167eSCurt Wohlgemuth 4307b844167eSCurt Wohlgemuth /* 4308c9de560dSAlex Tomas * use blocks preallocated to inode 4309c9de560dSAlex Tomas */ 4310c9de560dSAlex Tomas static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4311c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4312c9de560dSAlex Tomas { 431353accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4314c9de560dSAlex Tomas ext4_fsblk_t start; 4315c9de560dSAlex Tomas ext4_fsblk_t end; 4316c9de560dSAlex Tomas int len; 4317c9de560dSAlex Tomas 4318c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4319c9de560dSAlex Tomas start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 432053accfa9STheodore Ts'o end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 432153accfa9STheodore Ts'o start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 432253accfa9STheodore Ts'o len = EXT4_NUM_B2C(sbi, end - start); 4323c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4324c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4325c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4326c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4327c9de560dSAlex Tomas ac->ac_pa = pa; 4328c9de560dSAlex Tomas 4329c9de560dSAlex Tomas BUG_ON(start < pa->pa_pstart); 433053accfa9STheodore Ts'o BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4331c9de560dSAlex Tomas BUG_ON(pa->pa_free < len); 4332*93cdf49fSOjaswin Mujoo BUG_ON(ac->ac_b_ex.fe_len <= 0); 4333c9de560dSAlex Tomas pa->pa_free -= len; 4334c9de560dSAlex Tomas 4335d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4336c9de560dSAlex Tomas } 4337c9de560dSAlex Tomas 4338c9de560dSAlex Tomas /* 4339c9de560dSAlex Tomas * use blocks preallocated to locality group 4340c9de560dSAlex Tomas */ 4341c9de560dSAlex Tomas static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4342c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4343c9de560dSAlex Tomas { 434403cddb80SAneesh Kumar K.V unsigned int len = ac->ac_o_ex.fe_len; 43456be2ded1SAneesh Kumar K.V 4346c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4347c9de560dSAlex Tomas &ac->ac_b_ex.fe_group, 4348c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4349c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4350c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4351c9de560dSAlex Tomas ac->ac_pa = pa; 4352c9de560dSAlex Tomas 43531221b235SKemeng Shi /* we don't correct pa_pstart or pa_len here to avoid 435426346ff6SAneesh Kumar K.V * possible race when the group is being loaded concurrently 4355c9de560dSAlex Tomas * instead we correct pa later, after blocks are marked 435626346ff6SAneesh Kumar K.V * in on-disk bitmap -- see ext4_mb_release_context() 435726346ff6SAneesh Kumar K.V * Other CPUs are prevented from allocating from this pa by lg_mutex 4358c9de560dSAlex Tomas */ 4359d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 43601afdc588SKemeng Shi pa->pa_lstart, len, pa); 4361c9de560dSAlex Tomas } 4362c9de560dSAlex Tomas 4363c9de560dSAlex Tomas /* 43645e745b04SAneesh Kumar K.V * Return the prealloc space that have minimal distance 43655e745b04SAneesh Kumar K.V * from the goal block. @cpa is the prealloc 43665e745b04SAneesh Kumar K.V * space that is having currently known minimal distance 43675e745b04SAneesh Kumar K.V * from the goal block. 43685e745b04SAneesh Kumar K.V */ 43695e745b04SAneesh Kumar K.V static struct ext4_prealloc_space * 43705e745b04SAneesh Kumar K.V ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 43715e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, 43725e745b04SAneesh Kumar K.V struct ext4_prealloc_space *cpa) 43735e745b04SAneesh Kumar K.V { 43745e745b04SAneesh Kumar K.V ext4_fsblk_t cur_distance, new_distance; 43755e745b04SAneesh Kumar K.V 43765e745b04SAneesh Kumar K.V if (cpa == NULL) { 43775e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43785e745b04SAneesh Kumar K.V return pa; 43795e745b04SAneesh Kumar K.V } 438079211c8eSAndrew Morton cur_distance = abs(goal_block - cpa->pa_pstart); 438179211c8eSAndrew Morton new_distance = abs(goal_block - pa->pa_pstart); 43825e745b04SAneesh Kumar K.V 43835a54b2f1SColy Li if (cur_distance <= new_distance) 43845e745b04SAneesh Kumar K.V return cpa; 43855e745b04SAneesh Kumar K.V 43865e745b04SAneesh Kumar K.V /* drop the previous reference */ 43875e745b04SAneesh Kumar K.V atomic_dec(&cpa->pa_count); 43885e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43895e745b04SAneesh Kumar K.V return pa; 43905e745b04SAneesh Kumar K.V } 43915e745b04SAneesh Kumar K.V 43925e745b04SAneesh Kumar K.V /* 4393c9de560dSAlex Tomas * search goal blocks in preallocated space 4394c9de560dSAlex Tomas */ 43954fca8f07SRitesh Harjani static noinline_for_stack bool 43964ddfef7bSEric Sandeen ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4397c9de560dSAlex Tomas { 439853accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 43996be2ded1SAneesh Kumar K.V int order, i; 4400c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4401c9de560dSAlex Tomas struct ext4_locality_group *lg; 4402bcf43499SOjaswin Mujoo struct ext4_prealloc_space *tmp_pa, *cpa = NULL; 4403bcf43499SOjaswin Mujoo ext4_lblk_t tmp_pa_start, tmp_pa_end; 44045e745b04SAneesh Kumar K.V ext4_fsblk_t goal_block; 4405c9de560dSAlex Tomas 4406c9de560dSAlex Tomas /* only data can be preallocated */ 4407c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 44084fca8f07SRitesh Harjani return false; 4409c9de560dSAlex Tomas 4410c9de560dSAlex Tomas /* first, try per-file preallocation */ 4411c9de560dSAlex Tomas rcu_read_lock(); 4412bcf43499SOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) { 4413c9de560dSAlex Tomas 4414c9de560dSAlex Tomas /* all fields in this condition don't change, 4415c9de560dSAlex Tomas * so we can skip locking for them */ 4416bcf43499SOjaswin Mujoo tmp_pa_start = tmp_pa->pa_lstart; 4417bcf43499SOjaswin Mujoo tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4418bcf43499SOjaswin Mujoo 4419bcf43499SOjaswin Mujoo if (ac->ac_o_ex.fe_logical < tmp_pa_start || 4420bcf43499SOjaswin Mujoo ac->ac_o_ex.fe_logical >= tmp_pa_end) 4421c9de560dSAlex Tomas continue; 4422c9de560dSAlex Tomas 4423fb0a387dSEric Sandeen /* non-extent files can't have physical blocks past 2^32 */ 442412e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4425bcf43499SOjaswin Mujoo (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4426e86a7182SOjaswin Mujoo EXT4_MAX_BLOCK_FILE_PHYS)) { 4427e86a7182SOjaswin Mujoo /* 4428e86a7182SOjaswin Mujoo * Since PAs don't overlap, we won't find any 4429e86a7182SOjaswin Mujoo * other PA to satisfy this. 4430e86a7182SOjaswin Mujoo */ 4431e86a7182SOjaswin Mujoo break; 4432e86a7182SOjaswin Mujoo } 4433fb0a387dSEric Sandeen 4434c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4435bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 4436bcf43499SOjaswin Mujoo if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) { 4437bcf43499SOjaswin Mujoo atomic_inc(&tmp_pa->pa_count); 4438bcf43499SOjaswin Mujoo ext4_mb_use_inode_pa(ac, tmp_pa); 4439bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 4440c9de560dSAlex Tomas ac->ac_criteria = 10; 4441c9de560dSAlex Tomas rcu_read_unlock(); 44424fca8f07SRitesh Harjani return true; 4443c9de560dSAlex Tomas } 4444bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 4445c9de560dSAlex Tomas } 4446c9de560dSAlex Tomas rcu_read_unlock(); 4447c9de560dSAlex Tomas 4448c9de560dSAlex Tomas /* can we use group allocation? */ 4449c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 44504fca8f07SRitesh Harjani return false; 4451c9de560dSAlex Tomas 4452c9de560dSAlex Tomas /* inode may have no locality group for some reason */ 4453c9de560dSAlex Tomas lg = ac->ac_lg; 4454c9de560dSAlex Tomas if (lg == NULL) 44554fca8f07SRitesh Harjani return false; 44566be2ded1SAneesh Kumar K.V order = fls(ac->ac_o_ex.fe_len) - 1; 44576be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 44586be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 44596be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 4460c9de560dSAlex Tomas 4461bda00de7SAkinobu Mita goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 44625e745b04SAneesh Kumar K.V /* 44635e745b04SAneesh Kumar K.V * search for the prealloc space that is having 44645e745b04SAneesh Kumar K.V * minimal distance from the goal block. 44655e745b04SAneesh Kumar K.V */ 44666be2ded1SAneesh Kumar K.V for (i = order; i < PREALLOC_TB_SIZE; i++) { 4467c9de560dSAlex Tomas rcu_read_lock(); 4468bcf43499SOjaswin Mujoo list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 44696be2ded1SAneesh Kumar K.V pa_inode_list) { 4470bcf43499SOjaswin Mujoo spin_lock(&tmp_pa->pa_lock); 4471bcf43499SOjaswin Mujoo if (tmp_pa->pa_deleted == 0 && 4472bcf43499SOjaswin Mujoo tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 44735e745b04SAneesh Kumar K.V 44745e745b04SAneesh Kumar K.V cpa = ext4_mb_check_group_pa(goal_block, 4475bcf43499SOjaswin Mujoo tmp_pa, cpa); 44765e745b04SAneesh Kumar K.V } 4477bcf43499SOjaswin Mujoo spin_unlock(&tmp_pa->pa_lock); 44785e745b04SAneesh Kumar K.V } 44795e745b04SAneesh Kumar K.V rcu_read_unlock(); 44805e745b04SAneesh Kumar K.V } 44815e745b04SAneesh Kumar K.V if (cpa) { 44825e745b04SAneesh Kumar K.V ext4_mb_use_group_pa(ac, cpa); 4483c9de560dSAlex Tomas ac->ac_criteria = 20; 44844fca8f07SRitesh Harjani return true; 4485c9de560dSAlex Tomas } 44864fca8f07SRitesh Harjani return false; 4487c9de560dSAlex Tomas } 4488c9de560dSAlex Tomas 4489c9de560dSAlex Tomas /* 44907a2fcbf7SAneesh Kumar K.V * the function goes through all block freed in the group 44917a2fcbf7SAneesh Kumar K.V * but not yet committed and marks them used in in-core bitmap. 44927a2fcbf7SAneesh Kumar K.V * buddy must be generated from this bitmap 4493955ce5f5SAneesh Kumar K.V * Need to be called with the ext4 group lock held 44947a2fcbf7SAneesh Kumar K.V */ 44957a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 44967a2fcbf7SAneesh Kumar K.V ext4_group_t group) 44977a2fcbf7SAneesh Kumar K.V { 44987a2fcbf7SAneesh Kumar K.V struct rb_node *n; 44997a2fcbf7SAneesh Kumar K.V struct ext4_group_info *grp; 45007a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 45017a2fcbf7SAneesh Kumar K.V 45027a2fcbf7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 45037a2fcbf7SAneesh Kumar K.V n = rb_first(&(grp->bb_free_root)); 45047a2fcbf7SAneesh Kumar K.V 45057a2fcbf7SAneesh Kumar K.V while (n) { 450618aadd47SBobi Jam entry = rb_entry(n, struct ext4_free_data, efd_node); 4507123e3016SRitesh Harjani mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 45087a2fcbf7SAneesh Kumar K.V n = rb_next(n); 45097a2fcbf7SAneesh Kumar K.V } 45107a2fcbf7SAneesh Kumar K.V return; 45117a2fcbf7SAneesh Kumar K.V } 45127a2fcbf7SAneesh Kumar K.V 45137a2fcbf7SAneesh Kumar K.V /* 4514c9de560dSAlex Tomas * the function goes through all preallocation in this group and marks them 4515c9de560dSAlex Tomas * used in in-core bitmap. buddy must be generated from this bitmap 4516955ce5f5SAneesh Kumar K.V * Need to be called with ext4 group lock held 4517c9de560dSAlex Tomas */ 4518089ceeccSEric Sandeen static noinline_for_stack 4519089ceeccSEric Sandeen void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4520c9de560dSAlex Tomas ext4_group_t group) 4521c9de560dSAlex Tomas { 4522c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4523c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4524c9de560dSAlex Tomas struct list_head *cur; 4525c9de560dSAlex Tomas ext4_group_t groupnr; 4526c9de560dSAlex Tomas ext4_grpblk_t start; 4527c9de560dSAlex Tomas int preallocated = 0; 4528c9de560dSAlex Tomas int len; 4529c9de560dSAlex Tomas 4530c9de560dSAlex Tomas /* all form of preallocation discards first load group, 4531c9de560dSAlex Tomas * so the only competing code is preallocation use. 4532c9de560dSAlex Tomas * we don't need any locking here 4533c9de560dSAlex Tomas * notice we do NOT ignore preallocations with pa_deleted 4534c9de560dSAlex Tomas * otherwise we could leave used blocks available for 4535c9de560dSAlex Tomas * allocation in buddy when concurrent ext4_mb_put_pa() 4536c9de560dSAlex Tomas * is dropping preallocation 4537c9de560dSAlex Tomas */ 4538c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 4539c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4540c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4541c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4542c9de560dSAlex Tomas &groupnr, &start); 4543c9de560dSAlex Tomas len = pa->pa_len; 4544c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4545c9de560dSAlex Tomas if (unlikely(len == 0)) 4546c9de560dSAlex Tomas continue; 4547c9de560dSAlex Tomas BUG_ON(groupnr != group); 4548123e3016SRitesh Harjani mb_set_bits(bitmap, start, len); 4549c9de560dSAlex Tomas preallocated += len; 4550c9de560dSAlex Tomas } 4551d3df1453SRitesh Harjani mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4552c9de560dSAlex Tomas } 4553c9de560dSAlex Tomas 455427bc446eSbrookxu static void ext4_mb_mark_pa_deleted(struct super_block *sb, 455527bc446eSbrookxu struct ext4_prealloc_space *pa) 455627bc446eSbrookxu { 455727bc446eSbrookxu struct ext4_inode_info *ei; 455827bc446eSbrookxu 455927bc446eSbrookxu if (pa->pa_deleted) { 456027bc446eSbrookxu ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 456127bc446eSbrookxu pa->pa_type, pa->pa_pstart, pa->pa_lstart, 456227bc446eSbrookxu pa->pa_len); 456327bc446eSbrookxu return; 456427bc446eSbrookxu } 456527bc446eSbrookxu 456627bc446eSbrookxu pa->pa_deleted = 1; 456727bc446eSbrookxu 456827bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 456927bc446eSbrookxu ei = EXT4_I(pa->pa_inode); 457027bc446eSbrookxu atomic_dec(&ei->i_prealloc_active); 457127bc446eSbrookxu } 457227bc446eSbrookxu } 457327bc446eSbrookxu 457482089725SOjaswin Mujoo static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 4575c9de560dSAlex Tomas { 457682089725SOjaswin Mujoo BUG_ON(!pa); 45774e8d2139SJunho Ryu BUG_ON(atomic_read(&pa->pa_count)); 45784e8d2139SJunho Ryu BUG_ON(pa->pa_deleted == 0); 4579c9de560dSAlex Tomas kmem_cache_free(ext4_pspace_cachep, pa); 4580c9de560dSAlex Tomas } 4581c9de560dSAlex Tomas 458282089725SOjaswin Mujoo static void ext4_mb_pa_callback(struct rcu_head *head) 458382089725SOjaswin Mujoo { 458482089725SOjaswin Mujoo struct ext4_prealloc_space *pa; 458582089725SOjaswin Mujoo 458682089725SOjaswin Mujoo pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 458782089725SOjaswin Mujoo ext4_mb_pa_free(pa); 458882089725SOjaswin Mujoo } 458982089725SOjaswin Mujoo 4590c9de560dSAlex Tomas /* 4591c9de560dSAlex Tomas * drops a reference to preallocated space descriptor 4592c9de560dSAlex Tomas * if this was the last reference and the space is consumed 4593c9de560dSAlex Tomas */ 4594c9de560dSAlex Tomas static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4595c9de560dSAlex Tomas struct super_block *sb, struct ext4_prealloc_space *pa) 4596c9de560dSAlex Tomas { 4597a9df9a49STheodore Ts'o ext4_group_t grp; 4598d33a1976SEric Sandeen ext4_fsblk_t grp_blk; 4599c9de560dSAlex Tomas 4600c9de560dSAlex Tomas /* in this short window concurrent discard can set pa_deleted */ 4601c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 46024e8d2139SJunho Ryu if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 46034e8d2139SJunho Ryu spin_unlock(&pa->pa_lock); 46044e8d2139SJunho Ryu return; 46054e8d2139SJunho Ryu } 46064e8d2139SJunho Ryu 4607c9de560dSAlex Tomas if (pa->pa_deleted == 1) { 4608c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4609c9de560dSAlex Tomas return; 4610c9de560dSAlex Tomas } 4611c9de560dSAlex Tomas 461227bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4613c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4614c9de560dSAlex Tomas 4615d33a1976SEric Sandeen grp_blk = pa->pa_pstart; 4616cc0fb9adSAneesh Kumar K.V /* 4617cc0fb9adSAneesh Kumar K.V * If doing group-based preallocation, pa_pstart may be in the 4618cc0fb9adSAneesh Kumar K.V * next group when pa is used up 4619cc0fb9adSAneesh Kumar K.V */ 4620cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 4621d33a1976SEric Sandeen grp_blk--; 4622d33a1976SEric Sandeen 4623bd86298eSLukas Czerner grp = ext4_get_group_number(sb, grp_blk); 4624c9de560dSAlex Tomas 4625c9de560dSAlex Tomas /* 4626c9de560dSAlex Tomas * possible race: 4627c9de560dSAlex Tomas * 4628c9de560dSAlex Tomas * P1 (buddy init) P2 (regular allocation) 4629c9de560dSAlex Tomas * find block B in PA 4630c9de560dSAlex Tomas * copy on-disk bitmap to buddy 4631c9de560dSAlex Tomas * mark B in on-disk bitmap 4632c9de560dSAlex Tomas * drop PA from group 4633c9de560dSAlex Tomas * mark all PAs in buddy 4634c9de560dSAlex Tomas * 4635c9de560dSAlex Tomas * thus, P1 initializes buddy with B available. to prevent this 4636c9de560dSAlex Tomas * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4637c9de560dSAlex Tomas * against that pair 4638c9de560dSAlex Tomas */ 4639c9de560dSAlex Tomas ext4_lock_group(sb, grp); 4640c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4641c9de560dSAlex Tomas ext4_unlock_group(sb, grp); 4642c9de560dSAlex Tomas 4643c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4644c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4645c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4646c9de560dSAlex Tomas 4647c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4648c9de560dSAlex Tomas } 4649c9de560dSAlex Tomas 4650c9de560dSAlex Tomas /* 4651c9de560dSAlex Tomas * creates new preallocated space for given inode 4652c9de560dSAlex Tomas */ 465353f86b17SRitesh Harjani static noinline_for_stack void 46544ddfef7bSEric Sandeen ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4655c9de560dSAlex Tomas { 4656c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 465753accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4658c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4659c9de560dSAlex Tomas struct ext4_group_info *grp; 4660c9de560dSAlex Tomas struct ext4_inode_info *ei; 4661c9de560dSAlex Tomas 4662c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4663c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4664c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4665c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 466653f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4667c9de560dSAlex Tomas 466853f86b17SRitesh Harjani pa = ac->ac_pa; 4669c9de560dSAlex Tomas 4670c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 4671*93cdf49fSOjaswin Mujoo int new_bex_start; 4672*93cdf49fSOjaswin Mujoo int new_bex_end; 4673c9de560dSAlex Tomas 4674c9de560dSAlex Tomas /* we can't allocate as much as normalizer wants. 4675c9de560dSAlex Tomas * so, found space must get proper lstart 4676c9de560dSAlex Tomas * to cover original request */ 4677c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4678c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4679c9de560dSAlex Tomas 4680*93cdf49fSOjaswin Mujoo /* 4681*93cdf49fSOjaswin Mujoo * Use the below logic for adjusting best extent as it keeps 4682*93cdf49fSOjaswin Mujoo * fragmentation in check while ensuring logical range of best 4683*93cdf49fSOjaswin Mujoo * extent doesn't overflow out of goal extent: 4684*93cdf49fSOjaswin Mujoo * 4685*93cdf49fSOjaswin Mujoo * 1. Check if best ex can be kept at end of goal and still 4686*93cdf49fSOjaswin Mujoo * cover original start 4687*93cdf49fSOjaswin Mujoo * 2. Else, check if best ex can be kept at start of goal and 4688*93cdf49fSOjaswin Mujoo * still cover original start 4689*93cdf49fSOjaswin Mujoo * 3. Else, keep the best ex at start of original request. 4690*93cdf49fSOjaswin Mujoo */ 4691*93cdf49fSOjaswin Mujoo new_bex_end = ac->ac_g_ex.fe_logical + 4692*93cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len); 4693*93cdf49fSOjaswin Mujoo new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4694*93cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical >= new_bex_start) 4695*93cdf49fSOjaswin Mujoo goto adjust_bex; 4696c9de560dSAlex Tomas 4697*93cdf49fSOjaswin Mujoo new_bex_start = ac->ac_g_ex.fe_logical; 4698*93cdf49fSOjaswin Mujoo new_bex_end = 4699*93cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4700*93cdf49fSOjaswin Mujoo if (ac->ac_o_ex.fe_logical < new_bex_end) 4701*93cdf49fSOjaswin Mujoo goto adjust_bex; 4702c9de560dSAlex Tomas 4703*93cdf49fSOjaswin Mujoo new_bex_start = ac->ac_o_ex.fe_logical; 4704*93cdf49fSOjaswin Mujoo new_bex_end = 4705*93cdf49fSOjaswin Mujoo new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4706c9de560dSAlex Tomas 4707*93cdf49fSOjaswin Mujoo adjust_bex: 4708*93cdf49fSOjaswin Mujoo ac->ac_b_ex.fe_logical = new_bex_start; 4709c9de560dSAlex Tomas 4710c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4711c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4712*93cdf49fSOjaswin Mujoo BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + 4713*93cdf49fSOjaswin Mujoo EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); 4714c9de560dSAlex Tomas } 4715c9de560dSAlex Tomas 4716c9de560dSAlex Tomas pa->pa_lstart = ac->ac_b_ex.fe_logical; 4717c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4718c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4719c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4720c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4721d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4722d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4723c9de560dSAlex Tomas pa->pa_deleted = 0; 4724cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_INODE_PA; 4725c9de560dSAlex Tomas 4726d3df1453SRitesh Harjani mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4727d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 47289bffad1eSTheodore Ts'o trace_ext4_mb_new_inode_pa(ac, pa); 4729c9de560dSAlex Tomas 473053accfa9STheodore Ts'o atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4731abc075d4SKemeng Shi ext4_mb_use_inode_pa(ac, pa); 4732c9de560dSAlex Tomas 4733c9de560dSAlex Tomas ei = EXT4_I(ac->ac_inode); 4734c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4735c9de560dSAlex Tomas 4736c9de560dSAlex Tomas pa->pa_obj_lock = &ei->i_prealloc_lock; 4737c9de560dSAlex Tomas pa->pa_inode = ac->ac_inode; 4738c9de560dSAlex Tomas 4739c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4740c9de560dSAlex Tomas 4741c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4742c9de560dSAlex Tomas list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 4743c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 474427bc446eSbrookxu atomic_inc(&ei->i_prealloc_active); 4745c9de560dSAlex Tomas } 4746c9de560dSAlex Tomas 4747c9de560dSAlex Tomas /* 4748c9de560dSAlex Tomas * creates new preallocated space for locality group inodes belongs to 4749c9de560dSAlex Tomas */ 475053f86b17SRitesh Harjani static noinline_for_stack void 47514ddfef7bSEric Sandeen ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4752c9de560dSAlex Tomas { 4753c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4754c9de560dSAlex Tomas struct ext4_locality_group *lg; 4755c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4756c9de560dSAlex Tomas struct ext4_group_info *grp; 4757c9de560dSAlex Tomas 4758c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4759c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4760c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4761c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 476253f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4763c9de560dSAlex Tomas 476453f86b17SRitesh Harjani pa = ac->ac_pa; 4765c9de560dSAlex Tomas 4766c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4767c9de560dSAlex Tomas pa->pa_lstart = pa->pa_pstart; 4768c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4769c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4770c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 47716be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4772d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4773c9de560dSAlex Tomas pa->pa_deleted = 0; 4774cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_GROUP_PA; 4775c9de560dSAlex Tomas 4776d3df1453SRitesh Harjani mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4777d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 47789bffad1eSTheodore Ts'o trace_ext4_mb_new_group_pa(ac, pa); 4779c9de560dSAlex Tomas 4780c9de560dSAlex Tomas ext4_mb_use_group_pa(ac, pa); 4781c9de560dSAlex Tomas atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4782c9de560dSAlex Tomas 4783c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4784c9de560dSAlex Tomas lg = ac->ac_lg; 4785c9de560dSAlex Tomas BUG_ON(lg == NULL); 4786c9de560dSAlex Tomas 4787c9de560dSAlex Tomas pa->pa_obj_lock = &lg->lg_prealloc_lock; 4788c9de560dSAlex Tomas pa->pa_inode = NULL; 4789c9de560dSAlex Tomas 4790c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4791c9de560dSAlex Tomas 47926be2ded1SAneesh Kumar K.V /* 47936be2ded1SAneesh Kumar K.V * We will later add the new pa to the right bucket 47946be2ded1SAneesh Kumar K.V * after updating the pa_free in ext4_mb_release_context 47956be2ded1SAneesh Kumar K.V */ 4796c9de560dSAlex Tomas } 4797c9de560dSAlex Tomas 479853f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4799c9de560dSAlex Tomas { 4800c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 480153f86b17SRitesh Harjani ext4_mb_new_group_pa(ac); 4802c9de560dSAlex Tomas else 480353f86b17SRitesh Harjani ext4_mb_new_inode_pa(ac); 4804c9de560dSAlex Tomas } 4805c9de560dSAlex Tomas 4806c9de560dSAlex Tomas /* 4807c9de560dSAlex Tomas * finds all unused blocks in on-disk bitmap, frees them in 4808c9de560dSAlex Tomas * in-core bitmap and buddy. 4809c9de560dSAlex Tomas * @pa must be unlinked from inode and group lists, so that 4810c9de560dSAlex Tomas * nobody else can find/use it. 4811c9de560dSAlex Tomas * the caller MUST hold group/inode locks. 4812c9de560dSAlex Tomas * TODO: optimize the case when there are no in-core structures yet 4813c9de560dSAlex Tomas */ 48144ddfef7bSEric Sandeen static noinline_for_stack int 48154ddfef7bSEric Sandeen ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 48163e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4817c9de560dSAlex Tomas { 4818c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4819c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 4820498e5f24STheodore Ts'o unsigned int end; 4821498e5f24STheodore Ts'o unsigned int next; 4822c9de560dSAlex Tomas ext4_group_t group; 4823c9de560dSAlex Tomas ext4_grpblk_t bit; 4824ba80b101STheodore Ts'o unsigned long long grp_blk_start; 4825c9de560dSAlex Tomas int free = 0; 4826c9de560dSAlex Tomas 4827c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4828c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 482953accfa9STheodore Ts'o grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 4830c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4831c9de560dSAlex Tomas end = bit + pa->pa_len; 4832c9de560dSAlex Tomas 4833c9de560dSAlex Tomas while (bit < end) { 4834ffad0a44SAneesh Kumar K.V bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 4835c9de560dSAlex Tomas if (bit >= end) 4836c9de560dSAlex Tomas break; 4837ffad0a44SAneesh Kumar K.V next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 4838d3df1453SRitesh Harjani mb_debug(sb, "free preallocated %u/%u in group %u\n", 48395a0790c2SAndi Kleen (unsigned) ext4_group_first_block_no(sb, group) + bit, 48405a0790c2SAndi Kleen (unsigned) next - bit, (unsigned) group); 4841c9de560dSAlex Tomas free += next - bit; 4842c9de560dSAlex Tomas 48433e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 484453accfa9STheodore Ts'o trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 484553accfa9STheodore Ts'o EXT4_C2B(sbi, bit)), 4846a9c667f8SLukas Czerner next - bit); 4847c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 4848c9de560dSAlex Tomas bit = next + 1; 4849c9de560dSAlex Tomas } 4850c9de560dSAlex Tomas if (free != pa->pa_free) { 48519d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_CRIT, 485236bad423SRitesh Harjani "pa %p: logic %lu, phys. %lu, len %d", 4853c9de560dSAlex Tomas pa, (unsigned long) pa->pa_lstart, 4854c9de560dSAlex Tomas (unsigned long) pa->pa_pstart, 485536bad423SRitesh Harjani pa->pa_len); 4856e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 485726346ff6SAneesh Kumar K.V free, pa->pa_free); 4858e56eb659SAneesh Kumar K.V /* 4859e56eb659SAneesh Kumar K.V * pa is already deleted so we use the value obtained 4860e56eb659SAneesh Kumar K.V * from the bitmap and continue. 4861e56eb659SAneesh Kumar K.V */ 4862c9de560dSAlex Tomas } 4863c9de560dSAlex Tomas atomic_add(free, &sbi->s_mb_discarded); 4864c9de560dSAlex Tomas 4865863c37fcSzhong jiang return 0; 4866c9de560dSAlex Tomas } 4867c9de560dSAlex Tomas 48684ddfef7bSEric Sandeen static noinline_for_stack int 48694ddfef7bSEric Sandeen ext4_mb_release_group_pa(struct ext4_buddy *e4b, 48703e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4871c9de560dSAlex Tomas { 4872c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4873c9de560dSAlex Tomas ext4_group_t group; 4874c9de560dSAlex Tomas ext4_grpblk_t bit; 4875c9de560dSAlex Tomas 487660e07cf5SYongqiang Yang trace_ext4_mb_release_group_pa(sb, pa); 4877c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4878c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4879c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4880c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 4881c9de560dSAlex Tomas atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 48823e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 4883c9de560dSAlex Tomas 4884c9de560dSAlex Tomas return 0; 4885c9de560dSAlex Tomas } 4886c9de560dSAlex Tomas 4887c9de560dSAlex Tomas /* 4888c9de560dSAlex Tomas * releases all preallocations in given group 4889c9de560dSAlex Tomas * 4890c9de560dSAlex Tomas * first, we need to decide discard policy: 4891c9de560dSAlex Tomas * - when do we discard 4892c9de560dSAlex Tomas * 1) ENOSPC 4893c9de560dSAlex Tomas * - how many do we discard 4894c9de560dSAlex Tomas * 1) how many requested 4895c9de560dSAlex Tomas */ 48964ddfef7bSEric Sandeen static noinline_for_stack int 48974ddfef7bSEric Sandeen ext4_mb_discard_group_preallocations(struct super_block *sb, 48988c80fb31SChunguang Xu ext4_group_t group, int *busy) 4899c9de560dSAlex Tomas { 4900c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4901c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4902c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 4903c9de560dSAlex Tomas struct list_head list; 4904c9de560dSAlex Tomas struct ext4_buddy e4b; 4905c9de560dSAlex Tomas int err; 49068c80fb31SChunguang Xu int free = 0; 4907c9de560dSAlex Tomas 4908d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for group %u\n", group); 4909c9de560dSAlex Tomas if (list_empty(&grp->bb_prealloc_list)) 4910bbc4ec77SRitesh Harjani goto out_dbg; 4911c9de560dSAlex Tomas 4912574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 49139008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 49149008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 491554d3adbcSTheodore Ts'o ext4_error_err(sb, -err, 491654d3adbcSTheodore Ts'o "Error %d reading block bitmap for %u", 49179008a58eSDarrick J. Wong err, group); 4918bbc4ec77SRitesh Harjani goto out_dbg; 4919c9de560dSAlex Tomas } 4920c9de560dSAlex Tomas 4921c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 4922ce89f46cSAneesh Kumar K.V if (err) { 49239651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 49249651e6b2SKonstantin Khlebnikov err, group); 4925ce89f46cSAneesh Kumar K.V put_bh(bitmap_bh); 4926bbc4ec77SRitesh Harjani goto out_dbg; 4927ce89f46cSAneesh Kumar K.V } 4928c9de560dSAlex Tomas 4929c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 4930c9de560dSAlex Tomas ext4_lock_group(sb, group); 4931c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, 4932c9de560dSAlex Tomas &grp->bb_prealloc_list, pa_group_list) { 4933c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4934c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 4935c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 49368c80fb31SChunguang Xu *busy = 1; 4937c9de560dSAlex Tomas continue; 4938c9de560dSAlex Tomas } 4939c9de560dSAlex Tomas if (pa->pa_deleted) { 4940c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4941c9de560dSAlex Tomas continue; 4942c9de560dSAlex Tomas } 4943c9de560dSAlex Tomas 4944c9de560dSAlex Tomas /* seems this one can be freed ... */ 494527bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4946c9de560dSAlex Tomas 494770022da8SYe Bin if (!free) 494870022da8SYe Bin this_cpu_inc(discard_pa_seq); 494970022da8SYe Bin 4950c9de560dSAlex Tomas /* we can trust pa_free ... */ 4951c9de560dSAlex Tomas free += pa->pa_free; 4952c9de560dSAlex Tomas 4953c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4954c9de560dSAlex Tomas 4955c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4956c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 4957c9de560dSAlex Tomas } 4958c9de560dSAlex Tomas 4959c9de560dSAlex Tomas /* now free all selected PAs */ 4960c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4961c9de560dSAlex Tomas 4962c9de560dSAlex Tomas /* remove from object (inode or locality group) */ 4963c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4964c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4965c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4966c9de560dSAlex Tomas 4967cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 49683e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 4969c9de560dSAlex Tomas else 49703e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4971c9de560dSAlex Tomas 4972c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 4973c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4974c9de560dSAlex Tomas } 4975c9de560dSAlex Tomas 4976c9de560dSAlex Tomas ext4_unlock_group(sb, group); 4977e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 4978c9de560dSAlex Tomas put_bh(bitmap_bh); 4979bbc4ec77SRitesh Harjani out_dbg: 4980d3df1453SRitesh Harjani mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 49818c80fb31SChunguang Xu free, group, grp->bb_free); 49828c80fb31SChunguang Xu return free; 4983c9de560dSAlex Tomas } 4984c9de560dSAlex Tomas 4985c9de560dSAlex Tomas /* 4986c9de560dSAlex Tomas * releases all non-used preallocated blocks for given inode 4987c9de560dSAlex Tomas * 4988c9de560dSAlex Tomas * It's important to discard preallocations under i_data_sem 4989c9de560dSAlex Tomas * We don't want another block to be served from the prealloc 4990c9de560dSAlex Tomas * space when we are discarding the inode prealloc space. 4991c9de560dSAlex Tomas * 4992c9de560dSAlex Tomas * FIXME!! Make sure it is valid at all the call sites 4993c9de560dSAlex Tomas */ 499427bc446eSbrookxu void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 4995c9de560dSAlex Tomas { 4996c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(inode); 4997c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 4998c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4999c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 5000c9de560dSAlex Tomas ext4_group_t group = 0; 5001c9de560dSAlex Tomas struct list_head list; 5002c9de560dSAlex Tomas struct ext4_buddy e4b; 5003c9de560dSAlex Tomas int err; 5004c9de560dSAlex Tomas 5005c2ea3fdeSTheodore Ts'o if (!S_ISREG(inode->i_mode)) { 5006c9de560dSAlex Tomas return; 5007c9de560dSAlex Tomas } 5008c9de560dSAlex Tomas 50098016e29fSHarshad Shirwadkar if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 50108016e29fSHarshad Shirwadkar return; 50118016e29fSHarshad Shirwadkar 5012d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for inode %lu\n", 5013d3df1453SRitesh Harjani inode->i_ino); 501427bc446eSbrookxu trace_ext4_discard_preallocations(inode, 501527bc446eSbrookxu atomic_read(&ei->i_prealloc_active), needed); 5016c9de560dSAlex Tomas 5017c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 5018c9de560dSAlex Tomas 501927bc446eSbrookxu if (needed == 0) 502027bc446eSbrookxu needed = UINT_MAX; 502127bc446eSbrookxu 5022c9de560dSAlex Tomas repeat: 5023c9de560dSAlex Tomas /* first, collect all pa's in the inode */ 5024c9de560dSAlex Tomas spin_lock(&ei->i_prealloc_lock); 502527bc446eSbrookxu while (!list_empty(&ei->i_prealloc_list) && needed) { 502627bc446eSbrookxu pa = list_entry(ei->i_prealloc_list.prev, 5027c9de560dSAlex Tomas struct ext4_prealloc_space, pa_inode_list); 5028c9de560dSAlex Tomas BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 5029c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5030c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 5031c9de560dSAlex Tomas /* this shouldn't happen often - nobody should 5032c9de560dSAlex Tomas * use preallocation while we're discarding it */ 5033c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5034c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 50359d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, 50369d8b9ec4STheodore Ts'o "uh-oh! used pa while discarding"); 5037c9de560dSAlex Tomas WARN_ON(1); 5038c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5039c9de560dSAlex Tomas goto repeat; 5040c9de560dSAlex Tomas 5041c9de560dSAlex Tomas } 5042c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 504327bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 5044c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5045c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 5046c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 504727bc446eSbrookxu needed--; 5048c9de560dSAlex Tomas continue; 5049c9de560dSAlex Tomas } 5050c9de560dSAlex Tomas 5051c9de560dSAlex Tomas /* someone is deleting pa right now */ 5052c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5053c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5054c9de560dSAlex Tomas 5055c9de560dSAlex Tomas /* we have to wait here because pa_deleted 5056c9de560dSAlex Tomas * doesn't mean pa is already unlinked from 5057c9de560dSAlex Tomas * the list. as we might be called from 5058c9de560dSAlex Tomas * ->clear_inode() the inode will get freed 5059c9de560dSAlex Tomas * and concurrent thread which is unlinking 5060c9de560dSAlex Tomas * pa from inode's list may access already 5061c9de560dSAlex Tomas * freed memory, bad-bad-bad */ 5062c9de560dSAlex Tomas 5063c9de560dSAlex Tomas /* XXX: if this happens too often, we can 5064c9de560dSAlex Tomas * add a flag to force wait only in case 5065c9de560dSAlex Tomas * of ->clear_inode(), but not in case of 5066c9de560dSAlex Tomas * regular truncate */ 5067c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5068c9de560dSAlex Tomas goto repeat; 5069c9de560dSAlex Tomas } 5070c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5071c9de560dSAlex Tomas 5072c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5073cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_INODE_PA); 5074bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 5075c9de560dSAlex Tomas 50769651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 50779651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5078ce89f46cSAneesh Kumar K.V if (err) { 507954d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 50809651e6b2SKonstantin Khlebnikov err, group); 5081ce89f46cSAneesh Kumar K.V continue; 5082ce89f46cSAneesh Kumar K.V } 5083c9de560dSAlex Tomas 5084574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 50859008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 50869008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 508754d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 50889008a58eSDarrick J. Wong err, group); 5089e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5090ce89f46cSAneesh Kumar K.V continue; 5091c9de560dSAlex Tomas } 5092c9de560dSAlex Tomas 5093c9de560dSAlex Tomas ext4_lock_group(sb, group); 5094c9de560dSAlex Tomas list_del(&pa->pa_group_list); 50953e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5096c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5097c9de560dSAlex Tomas 5098e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5099c9de560dSAlex Tomas put_bh(bitmap_bh); 5100c9de560dSAlex Tomas 5101c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 5102c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5103c9de560dSAlex Tomas } 5104c9de560dSAlex Tomas } 5105c9de560dSAlex Tomas 510653f86b17SRitesh Harjani static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 510753f86b17SRitesh Harjani { 510853f86b17SRitesh Harjani struct ext4_prealloc_space *pa; 510953f86b17SRitesh Harjani 511053f86b17SRitesh Harjani BUG_ON(ext4_pspace_cachep == NULL); 511153f86b17SRitesh Harjani pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 511253f86b17SRitesh Harjani if (!pa) 511353f86b17SRitesh Harjani return -ENOMEM; 511453f86b17SRitesh Harjani atomic_set(&pa->pa_count, 1); 511553f86b17SRitesh Harjani ac->ac_pa = pa; 511653f86b17SRitesh Harjani return 0; 511753f86b17SRitesh Harjani } 511853f86b17SRitesh Harjani 511982089725SOjaswin Mujoo static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 512053f86b17SRitesh Harjani { 512153f86b17SRitesh Harjani struct ext4_prealloc_space *pa = ac->ac_pa; 512253f86b17SRitesh Harjani 512353f86b17SRitesh Harjani BUG_ON(!pa); 512453f86b17SRitesh Harjani ac->ac_pa = NULL; 512553f86b17SRitesh Harjani WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 512682089725SOjaswin Mujoo /* 512782089725SOjaswin Mujoo * current function is only called due to an error or due to 512882089725SOjaswin Mujoo * len of found blocks < len of requested blocks hence the PA has not 512982089725SOjaswin Mujoo * been added to grp->bb_prealloc_list. So we don't need to lock it 513082089725SOjaswin Mujoo */ 513182089725SOjaswin Mujoo pa->pa_deleted = 1; 513282089725SOjaswin Mujoo ext4_mb_pa_free(pa); 513353f86b17SRitesh Harjani } 513453f86b17SRitesh Harjani 51356ba495e9STheodore Ts'o #ifdef CONFIG_EXT4_DEBUG 5136e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5137c9de560dSAlex Tomas { 5138e68cf40cSRitesh Harjani ext4_group_t i, ngroups; 5139c9de560dSAlex Tomas 51409b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5141e3570639SEric Sandeen return; 5142e3570639SEric Sandeen 51438df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 5144d3df1453SRitesh Harjani mb_debug(sb, "groups: "); 51458df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 5146c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5147c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5148c9de560dSAlex Tomas ext4_grpblk_t start; 5149c9de560dSAlex Tomas struct list_head *cur; 5150c9de560dSAlex Tomas ext4_lock_group(sb, i); 5151c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 5152c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, 5153c9de560dSAlex Tomas pa_group_list); 5154c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5155c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5156c9de560dSAlex Tomas NULL, &start); 5157c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5158d3df1453SRitesh Harjani mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5159d3df1453SRitesh Harjani pa->pa_len); 5160c9de560dSAlex Tomas } 516160bd63d1SSolofo Ramangalahy ext4_unlock_group(sb, i); 5162d3df1453SRitesh Harjani mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5163d3df1453SRitesh Harjani grp->bb_fragments); 5164c9de560dSAlex Tomas } 5165c9de560dSAlex Tomas } 5166e68cf40cSRitesh Harjani 5167e68cf40cSRitesh Harjani static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5168e68cf40cSRitesh Harjani { 5169e68cf40cSRitesh Harjani struct super_block *sb = ac->ac_sb; 5170e68cf40cSRitesh Harjani 51719b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5172e68cf40cSRitesh Harjani return; 5173e68cf40cSRitesh Harjani 5174d3df1453SRitesh Harjani mb_debug(sb, "Can't allocate:" 5175e68cf40cSRitesh Harjani " Allocation context details:"); 5176d3df1453SRitesh Harjani mb_debug(sb, "status %u flags 0x%x", 5177e68cf40cSRitesh Harjani ac->ac_status, ac->ac_flags); 5178d3df1453SRitesh Harjani mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5179e68cf40cSRitesh Harjani "goal %lu/%lu/%lu@%lu, " 5180e68cf40cSRitesh Harjani "best %lu/%lu/%lu@%lu cr %d", 5181e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_group, 5182e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_start, 5183e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_len, 5184e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_logical, 5185e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_group, 5186e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_start, 5187e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_len, 5188e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_logical, 5189e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_group, 5190e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_start, 5191e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_len, 5192e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_logical, 5193e68cf40cSRitesh Harjani (int)ac->ac_criteria); 5194d3df1453SRitesh Harjani mb_debug(sb, "%u found", ac->ac_found); 5195e68cf40cSRitesh Harjani ext4_mb_show_pa(sb); 5196e68cf40cSRitesh Harjani } 5197c9de560dSAlex Tomas #else 5198e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5199e68cf40cSRitesh Harjani { 5200e68cf40cSRitesh Harjani return; 5201e68cf40cSRitesh Harjani } 5202c9de560dSAlex Tomas static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5203c9de560dSAlex Tomas { 5204e68cf40cSRitesh Harjani ext4_mb_show_pa(ac->ac_sb); 5205c9de560dSAlex Tomas return; 5206c9de560dSAlex Tomas } 5207c9de560dSAlex Tomas #endif 5208c9de560dSAlex Tomas 5209c9de560dSAlex Tomas /* 5210c9de560dSAlex Tomas * We use locality group preallocation for small size file. The size of the 5211c9de560dSAlex Tomas * file is determined by the current size or the resulting size after 5212c9de560dSAlex Tomas * allocation which ever is larger 5213c9de560dSAlex Tomas * 5214b713a5ecSTheodore Ts'o * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5215c9de560dSAlex Tomas */ 5216c9de560dSAlex Tomas static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5217c9de560dSAlex Tomas { 5218c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5219c9de560dSAlex Tomas int bsbits = ac->ac_sb->s_blocksize_bits; 5220c9de560dSAlex Tomas loff_t size, isize; 5221a9f2a293SJan Kara bool inode_pa_eligible, group_pa_eligible; 5222c9de560dSAlex Tomas 5223c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5224c9de560dSAlex Tomas return; 5225c9de560dSAlex Tomas 52264ba74d00STheodore Ts'o if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 52274ba74d00STheodore Ts'o return; 52284ba74d00STheodore Ts'o 5229a9f2a293SJan Kara group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5230a9f2a293SJan Kara inode_pa_eligible = true; 523153accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 523250797481STheodore Ts'o isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 523350797481STheodore Ts'o >> bsbits; 5234c9de560dSAlex Tomas 5235a9f2a293SJan Kara /* No point in using inode preallocation for closed files */ 523682dd124cSNikolay Borisov if ((size == isize) && !ext4_fs_is_busy(sbi) && 5237a9f2a293SJan Kara !inode_is_open_for_write(ac->ac_inode)) 5238a9f2a293SJan Kara inode_pa_eligible = false; 523950797481STheodore Ts'o 524071780577STheodore Ts'o size = max(size, isize); 5241a9f2a293SJan Kara /* Don't use group allocation for large files */ 5242a9f2a293SJan Kara if (size > sbi->s_mb_stream_request) 5243a9f2a293SJan Kara group_pa_eligible = false; 5244a9f2a293SJan Kara 5245a9f2a293SJan Kara if (!group_pa_eligible) { 5246a9f2a293SJan Kara if (inode_pa_eligible) 52474ba74d00STheodore Ts'o ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5248a9f2a293SJan Kara else 5249a9f2a293SJan Kara ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5250c9de560dSAlex Tomas return; 52514ba74d00STheodore Ts'o } 5252c9de560dSAlex Tomas 5253c9de560dSAlex Tomas BUG_ON(ac->ac_lg != NULL); 5254c9de560dSAlex Tomas /* 5255c9de560dSAlex Tomas * locality group prealloc space are per cpu. The reason for having 5256c9de560dSAlex Tomas * per cpu locality group is to reduce the contention between block 5257c9de560dSAlex Tomas * request from multiple CPUs. 5258c9de560dSAlex Tomas */ 5259a0b6bc63SChristoph Lameter ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5260c9de560dSAlex Tomas 5261c9de560dSAlex Tomas /* we're going to use group allocation */ 5262c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5263c9de560dSAlex Tomas 5264c9de560dSAlex Tomas /* serialize all allocations in the group */ 5265c9de560dSAlex Tomas mutex_lock(&ac->ac_lg->lg_mutex); 5266c9de560dSAlex Tomas } 5267c9de560dSAlex Tomas 5268d73eff68SGuoqing Jiang static noinline_for_stack void 52694ddfef7bSEric Sandeen ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5270c9de560dSAlex Tomas struct ext4_allocation_request *ar) 5271c9de560dSAlex Tomas { 5272c9de560dSAlex Tomas struct super_block *sb = ar->inode->i_sb; 5273c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5274c9de560dSAlex Tomas struct ext4_super_block *es = sbi->s_es; 5275c9de560dSAlex Tomas ext4_group_t group; 5276498e5f24STheodore Ts'o unsigned int len; 5277498e5f24STheodore Ts'o ext4_fsblk_t goal; 5278c9de560dSAlex Tomas ext4_grpblk_t block; 5279c9de560dSAlex Tomas 5280c9de560dSAlex Tomas /* we can't allocate > group size */ 5281c9de560dSAlex Tomas len = ar->len; 5282c9de560dSAlex Tomas 5283c9de560dSAlex Tomas /* just a dirty hack to filter too big requests */ 528440ae3487STheodore Ts'o if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 528540ae3487STheodore Ts'o len = EXT4_CLUSTERS_PER_GROUP(sb); 5286c9de560dSAlex Tomas 5287c9de560dSAlex Tomas /* start searching from the goal */ 5288c9de560dSAlex Tomas goal = ar->goal; 5289c9de560dSAlex Tomas if (goal < le32_to_cpu(es->s_first_data_block) || 5290c9de560dSAlex Tomas goal >= ext4_blocks_count(es)) 5291c9de560dSAlex Tomas goal = le32_to_cpu(es->s_first_data_block); 5292c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, goal, &group, &block); 5293c9de560dSAlex Tomas 5294c9de560dSAlex Tomas /* set up allocation goals */ 5295f5a44db5STheodore Ts'o ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5296c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 5297c9de560dSAlex Tomas ac->ac_sb = sb; 5298c9de560dSAlex Tomas ac->ac_inode = ar->inode; 529953accfa9STheodore Ts'o ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5300c9de560dSAlex Tomas ac->ac_o_ex.fe_group = group; 5301c9de560dSAlex Tomas ac->ac_o_ex.fe_start = block; 5302c9de560dSAlex Tomas ac->ac_o_ex.fe_len = len; 530353accfa9STheodore Ts'o ac->ac_g_ex = ac->ac_o_ex; 5304c9de560dSAlex Tomas ac->ac_flags = ar->flags; 5305c9de560dSAlex Tomas 53063cb77bd2Sbrookxu /* we have to define context: we'll work with a file or 5307c9de560dSAlex Tomas * locality group. this is a policy, actually */ 5308c9de560dSAlex Tomas ext4_mb_group_or_file(ac); 5309c9de560dSAlex Tomas 5310d3df1453SRitesh Harjani mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5311c9de560dSAlex Tomas "left: %u/%u, right %u/%u to %swritable\n", 5312c9de560dSAlex Tomas (unsigned) ar->len, (unsigned) ar->logical, 5313c9de560dSAlex Tomas (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5314c9de560dSAlex Tomas (unsigned) ar->lleft, (unsigned) ar->pleft, 5315c9de560dSAlex Tomas (unsigned) ar->lright, (unsigned) ar->pright, 531682dd124cSNikolay Borisov inode_is_open_for_write(ar->inode) ? "" : "non-"); 5317c9de560dSAlex Tomas } 5318c9de560dSAlex Tomas 53196be2ded1SAneesh Kumar K.V static noinline_for_stack void 53206be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(struct super_block *sb, 53216be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg, 53226be2ded1SAneesh Kumar K.V int order, int total_entries) 53236be2ded1SAneesh Kumar K.V { 53246be2ded1SAneesh Kumar K.V ext4_group_t group = 0; 53256be2ded1SAneesh Kumar K.V struct ext4_buddy e4b; 53266be2ded1SAneesh Kumar K.V struct list_head discard_list; 53276be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa, *tmp; 53286be2ded1SAneesh Kumar K.V 5329d3df1453SRitesh Harjani mb_debug(sb, "discard locality group preallocation\n"); 53306be2ded1SAneesh Kumar K.V 53316be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&discard_list); 53326be2ded1SAneesh Kumar K.V 53336be2ded1SAneesh Kumar K.V spin_lock(&lg->lg_prealloc_lock); 53346be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 533592e9c58cSMadhuparna Bhowmik pa_inode_list, 533692e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 53376be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 53386be2ded1SAneesh Kumar K.V if (atomic_read(&pa->pa_count)) { 53396be2ded1SAneesh Kumar K.V /* 53406be2ded1SAneesh Kumar K.V * This is the pa that we just used 53416be2ded1SAneesh Kumar K.V * for block allocation. So don't 53426be2ded1SAneesh Kumar K.V * free that 53436be2ded1SAneesh Kumar K.V */ 53446be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 53456be2ded1SAneesh Kumar K.V continue; 53466be2ded1SAneesh Kumar K.V } 53476be2ded1SAneesh Kumar K.V if (pa->pa_deleted) { 53486be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 53496be2ded1SAneesh Kumar K.V continue; 53506be2ded1SAneesh Kumar K.V } 53516be2ded1SAneesh Kumar K.V /* only lg prealloc space */ 5352cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_GROUP_PA); 53536be2ded1SAneesh Kumar K.V 53546be2ded1SAneesh Kumar K.V /* seems this one can be freed ... */ 535527bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 53566be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 53576be2ded1SAneesh Kumar K.V 53586be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 53596be2ded1SAneesh Kumar K.V list_add(&pa->u.pa_tmp_list, &discard_list); 53606be2ded1SAneesh Kumar K.V 53616be2ded1SAneesh Kumar K.V total_entries--; 53626be2ded1SAneesh Kumar K.V if (total_entries <= 5) { 53636be2ded1SAneesh Kumar K.V /* 53646be2ded1SAneesh Kumar K.V * we want to keep only 5 entries 53656be2ded1SAneesh Kumar K.V * allowing it to grow to 8. This 53666be2ded1SAneesh Kumar K.V * mak sure we don't call discard 53676be2ded1SAneesh Kumar K.V * soon for this list. 53686be2ded1SAneesh Kumar K.V */ 53696be2ded1SAneesh Kumar K.V break; 53706be2ded1SAneesh Kumar K.V } 53716be2ded1SAneesh Kumar K.V } 53726be2ded1SAneesh Kumar K.V spin_unlock(&lg->lg_prealloc_lock); 53736be2ded1SAneesh Kumar K.V 53746be2ded1SAneesh Kumar K.V list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 53759651e6b2SKonstantin Khlebnikov int err; 53766be2ded1SAneesh Kumar K.V 5377bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 53789651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 53799651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 53809651e6b2SKonstantin Khlebnikov if (err) { 538154d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 53829651e6b2SKonstantin Khlebnikov err, group); 53836be2ded1SAneesh Kumar K.V continue; 53846be2ded1SAneesh Kumar K.V } 53856be2ded1SAneesh Kumar K.V ext4_lock_group(sb, group); 53866be2ded1SAneesh Kumar K.V list_del(&pa->pa_group_list); 53873e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 53886be2ded1SAneesh Kumar K.V ext4_unlock_group(sb, group); 53896be2ded1SAneesh Kumar K.V 5390e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 53916be2ded1SAneesh Kumar K.V list_del(&pa->u.pa_tmp_list); 53926be2ded1SAneesh Kumar K.V call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 53936be2ded1SAneesh Kumar K.V } 53946be2ded1SAneesh Kumar K.V } 53956be2ded1SAneesh Kumar K.V 53966be2ded1SAneesh Kumar K.V /* 53976be2ded1SAneesh Kumar K.V * We have incremented pa_count. So it cannot be freed at this 53986be2ded1SAneesh Kumar K.V * point. Also we hold lg_mutex. So no parallel allocation is 53996be2ded1SAneesh Kumar K.V * possible from this lg. That means pa_free cannot be updated. 54006be2ded1SAneesh Kumar K.V * 54016be2ded1SAneesh Kumar K.V * A parallel ext4_mb_discard_group_preallocations is possible. 54026be2ded1SAneesh Kumar K.V * which can cause the lg_prealloc_list to be updated. 54036be2ded1SAneesh Kumar K.V */ 54046be2ded1SAneesh Kumar K.V 54056be2ded1SAneesh Kumar K.V static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 54066be2ded1SAneesh Kumar K.V { 54076be2ded1SAneesh Kumar K.V int order, added = 0, lg_prealloc_count = 1; 54086be2ded1SAneesh Kumar K.V struct super_block *sb = ac->ac_sb; 54096be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg = ac->ac_lg; 54106be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 54116be2ded1SAneesh Kumar K.V 54126be2ded1SAneesh Kumar K.V order = fls(pa->pa_free) - 1; 54136be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 54146be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 54156be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 54166be2ded1SAneesh Kumar K.V /* Add the prealloc space to lg */ 5417f1167009SNiu Yawei spin_lock(&lg->lg_prealloc_lock); 54186be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 541992e9c58cSMadhuparna Bhowmik pa_inode_list, 542092e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 54216be2ded1SAneesh Kumar K.V spin_lock(&tmp_pa->pa_lock); 54226be2ded1SAneesh Kumar K.V if (tmp_pa->pa_deleted) { 5423e7c9e3e9STheodore Ts'o spin_unlock(&tmp_pa->pa_lock); 54246be2ded1SAneesh Kumar K.V continue; 54256be2ded1SAneesh Kumar K.V } 54266be2ded1SAneesh Kumar K.V if (!added && pa->pa_free < tmp_pa->pa_free) { 54276be2ded1SAneesh Kumar K.V /* Add to the tail of the previous entry */ 54286be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 54296be2ded1SAneesh Kumar K.V &tmp_pa->pa_inode_list); 54306be2ded1SAneesh Kumar K.V added = 1; 54316be2ded1SAneesh Kumar K.V /* 54326be2ded1SAneesh Kumar K.V * we want to count the total 54336be2ded1SAneesh Kumar K.V * number of entries in the list 54346be2ded1SAneesh Kumar K.V */ 54356be2ded1SAneesh Kumar K.V } 54366be2ded1SAneesh Kumar K.V spin_unlock(&tmp_pa->pa_lock); 54376be2ded1SAneesh Kumar K.V lg_prealloc_count++; 54386be2ded1SAneesh Kumar K.V } 54396be2ded1SAneesh Kumar K.V if (!added) 54406be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 54416be2ded1SAneesh Kumar K.V &lg->lg_prealloc_list[order]); 5442f1167009SNiu Yawei spin_unlock(&lg->lg_prealloc_lock); 54436be2ded1SAneesh Kumar K.V 54446be2ded1SAneesh Kumar K.V /* Now trim the list to be not more than 8 elements */ 54456be2ded1SAneesh Kumar K.V if (lg_prealloc_count > 8) { 54466be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(sb, lg, 54476be2ded1SAneesh Kumar K.V order, lg_prealloc_count); 54486be2ded1SAneesh Kumar K.V return; 54496be2ded1SAneesh Kumar K.V } 54506be2ded1SAneesh Kumar K.V return ; 54516be2ded1SAneesh Kumar K.V } 54526be2ded1SAneesh Kumar K.V 5453c9de560dSAlex Tomas /* 545427bc446eSbrookxu * if per-inode prealloc list is too long, trim some PA 545527bc446eSbrookxu */ 545627bc446eSbrookxu static void ext4_mb_trim_inode_pa(struct inode *inode) 545727bc446eSbrookxu { 545827bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 545927bc446eSbrookxu struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 546027bc446eSbrookxu int count, delta; 546127bc446eSbrookxu 546227bc446eSbrookxu count = atomic_read(&ei->i_prealloc_active); 546327bc446eSbrookxu delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; 546427bc446eSbrookxu if (count > sbi->s_mb_max_inode_prealloc + delta) { 546527bc446eSbrookxu count -= sbi->s_mb_max_inode_prealloc; 546627bc446eSbrookxu ext4_discard_preallocations(inode, count); 546727bc446eSbrookxu } 546827bc446eSbrookxu } 546927bc446eSbrookxu 547027bc446eSbrookxu /* 5471c9de560dSAlex Tomas * release all resource we used in allocation 5472c9de560dSAlex Tomas */ 5473c9de560dSAlex Tomas static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5474c9de560dSAlex Tomas { 547527bc446eSbrookxu struct inode *inode = ac->ac_inode; 547627bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 547753accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 54786be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa = ac->ac_pa; 54796be2ded1SAneesh Kumar K.V if (pa) { 5480cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) { 5481c9de560dSAlex Tomas /* see comment in ext4_mb_use_group_pa() */ 54826be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 548353accfa9STheodore Ts'o pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 548453accfa9STheodore Ts'o pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 54856be2ded1SAneesh Kumar K.V pa->pa_free -= ac->ac_b_ex.fe_len; 54866be2ded1SAneesh Kumar K.V pa->pa_len -= ac->ac_b_ex.fe_len; 54876be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 548866d5e027Sbrookxu 54896be2ded1SAneesh Kumar K.V /* 54906be2ded1SAneesh Kumar K.V * We want to add the pa to the right bucket. 54916be2ded1SAneesh Kumar K.V * Remove it from the list and while adding 54926be2ded1SAneesh Kumar K.V * make sure the list to which we are adding 549344183d42SAmir Goldstein * doesn't grow big. 54946be2ded1SAneesh Kumar K.V */ 549566d5e027Sbrookxu if (likely(pa->pa_free)) { 54966be2ded1SAneesh Kumar K.V spin_lock(pa->pa_obj_lock); 54976be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 54986be2ded1SAneesh Kumar K.V spin_unlock(pa->pa_obj_lock); 54996be2ded1SAneesh Kumar K.V ext4_mb_add_n_trim(ac); 5500c9de560dSAlex Tomas } 550166d5e027Sbrookxu } 550227bc446eSbrookxu 550327bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 550427bc446eSbrookxu /* 550527bc446eSbrookxu * treat per-inode prealloc list as a lru list, then try 550627bc446eSbrookxu * to trim the least recently used PA. 550727bc446eSbrookxu */ 550827bc446eSbrookxu spin_lock(pa->pa_obj_lock); 550927bc446eSbrookxu list_move(&pa->pa_inode_list, &ei->i_prealloc_list); 551027bc446eSbrookxu spin_unlock(pa->pa_obj_lock); 551127bc446eSbrookxu } 551227bc446eSbrookxu 55136be2ded1SAneesh Kumar K.V ext4_mb_put_pa(ac, ac->ac_sb, pa); 5514c9de560dSAlex Tomas } 5515c9de560dSAlex Tomas if (ac->ac_bitmap_page) 551609cbfeafSKirill A. Shutemov put_page(ac->ac_bitmap_page); 5517c9de560dSAlex Tomas if (ac->ac_buddy_page) 551809cbfeafSKirill A. Shutemov put_page(ac->ac_buddy_page); 5519c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5520c9de560dSAlex Tomas mutex_unlock(&ac->ac_lg->lg_mutex); 5521c9de560dSAlex Tomas ext4_mb_collect_stats(ac); 552227bc446eSbrookxu ext4_mb_trim_inode_pa(inode); 5523c9de560dSAlex Tomas return 0; 5524c9de560dSAlex Tomas } 5525c9de560dSAlex Tomas 5526c9de560dSAlex Tomas static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5527c9de560dSAlex Tomas { 55288df9675fSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5529c9de560dSAlex Tomas int ret; 55308c80fb31SChunguang Xu int freed = 0, busy = 0; 55318c80fb31SChunguang Xu int retry = 0; 5532c9de560dSAlex Tomas 55339bffad1eSTheodore Ts'o trace_ext4_mb_discard_preallocations(sb, needed); 55348c80fb31SChunguang Xu 55358c80fb31SChunguang Xu if (needed == 0) 55368c80fb31SChunguang Xu needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 55378c80fb31SChunguang Xu repeat: 55388df9675fSTheodore Ts'o for (i = 0; i < ngroups && needed > 0; i++) { 55398c80fb31SChunguang Xu ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5540c9de560dSAlex Tomas freed += ret; 5541c9de560dSAlex Tomas needed -= ret; 55428c80fb31SChunguang Xu cond_resched(); 55438c80fb31SChunguang Xu } 55448c80fb31SChunguang Xu 55458c80fb31SChunguang Xu if (needed > 0 && busy && ++retry < 3) { 55468c80fb31SChunguang Xu busy = 0; 55478c80fb31SChunguang Xu goto repeat; 5548c9de560dSAlex Tomas } 5549c9de560dSAlex Tomas 5550c9de560dSAlex Tomas return freed; 5551c9de560dSAlex Tomas } 5552c9de560dSAlex Tomas 5553cf5e2ca6SRitesh Harjani static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 555407b5b8e1SRitesh Harjani struct ext4_allocation_context *ac, u64 *seq) 5555cf5e2ca6SRitesh Harjani { 5556cf5e2ca6SRitesh Harjani int freed; 555707b5b8e1SRitesh Harjani u64 seq_retry = 0; 555807b5b8e1SRitesh Harjani bool ret = false; 5559cf5e2ca6SRitesh Harjani 5560cf5e2ca6SRitesh Harjani freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 556107b5b8e1SRitesh Harjani if (freed) { 556207b5b8e1SRitesh Harjani ret = true; 556307b5b8e1SRitesh Harjani goto out_dbg; 556407b5b8e1SRitesh Harjani } 556507b5b8e1SRitesh Harjani seq_retry = ext4_get_discard_pa_seq_sum(); 556699377830SRitesh Harjani if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 556799377830SRitesh Harjani ac->ac_flags |= EXT4_MB_STRICT_CHECK; 556807b5b8e1SRitesh Harjani *seq = seq_retry; 556907b5b8e1SRitesh Harjani ret = true; 557007b5b8e1SRitesh Harjani } 557107b5b8e1SRitesh Harjani 557207b5b8e1SRitesh Harjani out_dbg: 557307b5b8e1SRitesh Harjani mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 557407b5b8e1SRitesh Harjani return ret; 5575cf5e2ca6SRitesh Harjani } 5576cf5e2ca6SRitesh Harjani 55778016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 55788016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp); 55798016e29fSHarshad Shirwadkar 5580c9de560dSAlex Tomas /* 5581c9de560dSAlex Tomas * Main entry point into mballoc to allocate blocks 5582c9de560dSAlex Tomas * it tries to use preallocation first, then falls back 5583c9de560dSAlex Tomas * to usual allocation 5584c9de560dSAlex Tomas */ 5585c9de560dSAlex Tomas ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5586c9de560dSAlex Tomas struct ext4_allocation_request *ar, int *errp) 5587c9de560dSAlex Tomas { 5588256bdb49SEric Sandeen struct ext4_allocation_context *ac = NULL; 5589c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5590c9de560dSAlex Tomas struct super_block *sb; 5591c9de560dSAlex Tomas ext4_fsblk_t block = 0; 559260e58e0fSMingming Cao unsigned int inquota = 0; 559353accfa9STheodore Ts'o unsigned int reserv_clstrs = 0; 559480fa46d6STheodore Ts'o int retries = 0; 559507b5b8e1SRitesh Harjani u64 seq; 5596c9de560dSAlex Tomas 5597b10a44c3STheodore Ts'o might_sleep(); 5598c9de560dSAlex Tomas sb = ar->inode->i_sb; 5599c9de560dSAlex Tomas sbi = EXT4_SB(sb); 5600c9de560dSAlex Tomas 56019bffad1eSTheodore Ts'o trace_ext4_request_blocks(ar); 56028016e29fSHarshad Shirwadkar if (sbi->s_mount_state & EXT4_FC_REPLAY) 56038016e29fSHarshad Shirwadkar return ext4_mb_new_blocks_simple(handle, ar, errp); 5604ba80b101STheodore Ts'o 560545dc63e7SDmitry Monakhov /* Allow to use superuser reservation for quota file */ 560602749a4cSTahsin Erdogan if (ext4_is_quota_file(ar->inode)) 560745dc63e7SDmitry Monakhov ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 560845dc63e7SDmitry Monakhov 5609e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 561060e58e0fSMingming Cao /* Without delayed allocation we need to verify 561160e58e0fSMingming Cao * there is enough free blocks to do block allocation 561260e58e0fSMingming Cao * and verify allocation doesn't exceed the quota limits. 5613d2a17637SMingming Cao */ 561455f020dbSAllison Henderson while (ar->len && 5615e7d5f315STheodore Ts'o ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 561655f020dbSAllison Henderson 5617030ba6bcSAneesh Kumar K.V /* let others to free the space */ 5618bb8b20edSLukas Czerner cond_resched(); 5619030ba6bcSAneesh Kumar K.V ar->len = ar->len >> 1; 5620030ba6bcSAneesh Kumar K.V } 5621030ba6bcSAneesh Kumar K.V if (!ar->len) { 5622bbc4ec77SRitesh Harjani ext4_mb_show_pa(sb); 562307031431SMingming Cao *errp = -ENOSPC; 562407031431SMingming Cao return 0; 562507031431SMingming Cao } 562653accfa9STheodore Ts'o reserv_clstrs = ar->len; 562755f020dbSAllison Henderson if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 562853accfa9STheodore Ts'o dquot_alloc_block_nofail(ar->inode, 562953accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len)); 563055f020dbSAllison Henderson } else { 563155f020dbSAllison Henderson while (ar->len && 563253accfa9STheodore Ts'o dquot_alloc_block(ar->inode, 563353accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len))) { 563455f020dbSAllison Henderson 5635c9de560dSAlex Tomas ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5636c9de560dSAlex Tomas ar->len--; 5637c9de560dSAlex Tomas } 563855f020dbSAllison Henderson } 563960e58e0fSMingming Cao inquota = ar->len; 5640c9de560dSAlex Tomas if (ar->len == 0) { 5641c9de560dSAlex Tomas *errp = -EDQUOT; 56426c7a120aSAditya Kali goto out; 5643c9de560dSAlex Tomas } 564460e58e0fSMingming Cao } 5645d2a17637SMingming Cao 564685556c9aSWei Yongjun ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5647833576b3STheodore Ts'o if (!ac) { 5648363d4251SShen Feng ar->len = 0; 5649256bdb49SEric Sandeen *errp = -ENOMEM; 56506c7a120aSAditya Kali goto out; 5651256bdb49SEric Sandeen } 5652256bdb49SEric Sandeen 5653d73eff68SGuoqing Jiang ext4_mb_initialize_context(ac, ar); 5654c9de560dSAlex Tomas 5655256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 565681198536SRitesh Harjani seq = this_cpu_read(discard_pa_seq); 5657256bdb49SEric Sandeen if (!ext4_mb_use_preallocated(ac)) { 5658256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5659256bdb49SEric Sandeen ext4_mb_normalize_request(ac, ar); 566053f86b17SRitesh Harjani 566153f86b17SRitesh Harjani *errp = ext4_mb_pa_alloc(ac); 566253f86b17SRitesh Harjani if (*errp) 566353f86b17SRitesh Harjani goto errout; 5664c9de560dSAlex Tomas repeat: 5665c9de560dSAlex Tomas /* allocate space in core */ 56666c7a120aSAditya Kali *errp = ext4_mb_regular_allocator(ac); 566753f86b17SRitesh Harjani /* 566853f86b17SRitesh Harjani * pa allocated above is added to grp->bb_prealloc_list only 566953f86b17SRitesh Harjani * when we were able to allocate some block i.e. when 567053f86b17SRitesh Harjani * ac->ac_status == AC_STATUS_FOUND. 567153f86b17SRitesh Harjani * And error from above mean ac->ac_status != AC_STATUS_FOUND 567253f86b17SRitesh Harjani * So we have to free this pa here itself. 567353f86b17SRitesh Harjani */ 56742c00ef3eSAlexey Khoroshilov if (*errp) { 567582089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 56762c00ef3eSAlexey Khoroshilov ext4_discard_allocated_blocks(ac); 56772c00ef3eSAlexey Khoroshilov goto errout; 56782c00ef3eSAlexey Khoroshilov } 567953f86b17SRitesh Harjani if (ac->ac_status == AC_STATUS_FOUND && 568053f86b17SRitesh Harjani ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 568182089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 5682c9de560dSAlex Tomas } 5683256bdb49SEric Sandeen if (likely(ac->ac_status == AC_STATUS_FOUND)) { 568453accfa9STheodore Ts'o *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5685554a5cccSVegard Nossum if (*errp) { 5686b844167eSCurt Wohlgemuth ext4_discard_allocated_blocks(ac); 56876d138cedSEric Sandeen goto errout; 56886d138cedSEric Sandeen } else { 5689256bdb49SEric Sandeen block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5690256bdb49SEric Sandeen ar->len = ac->ac_b_ex.fe_len; 5691519deca0SAneesh Kumar K.V } 5692c9de560dSAlex Tomas } else { 569380fa46d6STheodore Ts'o if (++retries < 3 && 569480fa46d6STheodore Ts'o ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5695c9de560dSAlex Tomas goto repeat; 569653f86b17SRitesh Harjani /* 569753f86b17SRitesh Harjani * If block allocation fails then the pa allocated above 569853f86b17SRitesh Harjani * needs to be freed here itself. 569953f86b17SRitesh Harjani */ 570082089725SOjaswin Mujoo ext4_mb_pa_put_free(ac); 5701c9de560dSAlex Tomas *errp = -ENOSPC; 57026c7a120aSAditya Kali } 57036c7a120aSAditya Kali 57046c7a120aSAditya Kali if (*errp) { 5705aaae558dSKemeng Shi errout: 5706256bdb49SEric Sandeen ac->ac_b_ex.fe_len = 0; 5707c9de560dSAlex Tomas ar->len = 0; 5708256bdb49SEric Sandeen ext4_mb_show_ac(ac); 5709c9de560dSAlex Tomas } 5710256bdb49SEric Sandeen ext4_mb_release_context(ac); 5711363d4251SShen Feng kmem_cache_free(ext4_ac_cachep, ac); 5712aaae558dSKemeng Shi out: 571360e58e0fSMingming Cao if (inquota && ar->len < inquota) 571453accfa9STheodore Ts'o dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 57150087d9fbSAneesh Kumar K.V if (!ar->len) { 5716e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 57170087d9fbSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 571857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 571953accfa9STheodore Ts'o reserv_clstrs); 57200087d9fbSAneesh Kumar K.V } 5721c9de560dSAlex Tomas 57229bffad1eSTheodore Ts'o trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5723ba80b101STheodore Ts'o 5724c9de560dSAlex Tomas return block; 5725c9de560dSAlex Tomas } 5726c9de560dSAlex Tomas 5727c894058dSAneesh Kumar K.V /* 5728c894058dSAneesh Kumar K.V * We can merge two free data extents only if the physical blocks 5729c894058dSAneesh Kumar K.V * are contiguous, AND the extents were freed by the same transaction, 5730c894058dSAneesh Kumar K.V * AND the blocks are associated with the same group. 5731c894058dSAneesh Kumar K.V */ 5732a0154344SDaeho Jeong static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5733a0154344SDaeho Jeong struct ext4_free_data *entry, 5734a0154344SDaeho Jeong struct ext4_free_data *new_entry, 5735a0154344SDaeho Jeong struct rb_root *entry_rb_root) 5736c894058dSAneesh Kumar K.V { 5737a0154344SDaeho Jeong if ((entry->efd_tid != new_entry->efd_tid) || 5738a0154344SDaeho Jeong (entry->efd_group != new_entry->efd_group)) 5739a0154344SDaeho Jeong return; 5740a0154344SDaeho Jeong if (entry->efd_start_cluster + entry->efd_count == 5741a0154344SDaeho Jeong new_entry->efd_start_cluster) { 5742a0154344SDaeho Jeong new_entry->efd_start_cluster = entry->efd_start_cluster; 5743a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5744a0154344SDaeho Jeong } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5745a0154344SDaeho Jeong entry->efd_start_cluster) { 5746a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5747a0154344SDaeho Jeong } else 5748a0154344SDaeho Jeong return; 5749a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 5750a0154344SDaeho Jeong list_del(&entry->efd_list); 5751a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 5752a0154344SDaeho Jeong rb_erase(&entry->efd_node, entry_rb_root); 5753a0154344SDaeho Jeong kmem_cache_free(ext4_free_data_cachep, entry); 5754c894058dSAneesh Kumar K.V } 5755c894058dSAneesh Kumar K.V 575685b67ffbSKemeng Shi static noinline_for_stack void 57574ddfef7bSEric Sandeen ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 57587a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry) 5759c9de560dSAlex Tomas { 5760e29136f8STheodore Ts'o ext4_group_t group = e4b->bd_group; 576184130193STheodore Ts'o ext4_grpblk_t cluster; 5762d08854f5STheodore Ts'o ext4_grpblk_t clusters = new_entry->efd_count; 57637a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 5764c9de560dSAlex Tomas struct ext4_group_info *db = e4b->bd_info; 5765c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5766c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5767c894058dSAneesh Kumar K.V struct rb_node **n = &db->bb_free_root.rb_node, *node; 5768c894058dSAneesh Kumar K.V struct rb_node *parent = NULL, *new_node; 5769c894058dSAneesh Kumar K.V 57700390131bSFrank Mayhar BUG_ON(!ext4_handle_valid(handle)); 5771c9de560dSAlex Tomas BUG_ON(e4b->bd_bitmap_page == NULL); 5772c9de560dSAlex Tomas BUG_ON(e4b->bd_buddy_page == NULL); 5773c9de560dSAlex Tomas 577418aadd47SBobi Jam new_node = &new_entry->efd_node; 577518aadd47SBobi Jam cluster = new_entry->efd_start_cluster; 5776c9de560dSAlex Tomas 5777c894058dSAneesh Kumar K.V if (!*n) { 5778c894058dSAneesh Kumar K.V /* first free block exent. We need to 5779c894058dSAneesh Kumar K.V protect buddy cache from being freed, 5780c9de560dSAlex Tomas * otherwise we'll refresh it from 5781c9de560dSAlex Tomas * on-disk bitmap and lose not-yet-available 5782c9de560dSAlex Tomas * blocks */ 578309cbfeafSKirill A. Shutemov get_page(e4b->bd_buddy_page); 578409cbfeafSKirill A. Shutemov get_page(e4b->bd_bitmap_page); 5785c894058dSAneesh Kumar K.V } 5786c894058dSAneesh Kumar K.V while (*n) { 5787c894058dSAneesh Kumar K.V parent = *n; 578818aadd47SBobi Jam entry = rb_entry(parent, struct ext4_free_data, efd_node); 578918aadd47SBobi Jam if (cluster < entry->efd_start_cluster) 5790c894058dSAneesh Kumar K.V n = &(*n)->rb_left; 579118aadd47SBobi Jam else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5792c894058dSAneesh Kumar K.V n = &(*n)->rb_right; 5793c894058dSAneesh Kumar K.V else { 5794e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 579584130193STheodore Ts'o ext4_group_first_block_no(sb, group) + 579684130193STheodore Ts'o EXT4_C2B(sbi, cluster), 5797e29136f8STheodore Ts'o "Block already on to-be-freed list"); 5798cca41553SChunguang Xu kmem_cache_free(ext4_free_data_cachep, new_entry); 579985b67ffbSKemeng Shi return; 5800c9de560dSAlex Tomas } 5801c9de560dSAlex Tomas } 5802c9de560dSAlex Tomas 5803c894058dSAneesh Kumar K.V rb_link_node(new_node, parent, n); 5804c894058dSAneesh Kumar K.V rb_insert_color(new_node, &db->bb_free_root); 5805c894058dSAneesh Kumar K.V 5806c894058dSAneesh Kumar K.V /* Now try to see the extent can be merged to left and right */ 5807c894058dSAneesh Kumar K.V node = rb_prev(new_node); 5808c894058dSAneesh Kumar K.V if (node) { 580918aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5810a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5811a0154344SDaeho Jeong &(db->bb_free_root)); 5812c9de560dSAlex Tomas } 5813c894058dSAneesh Kumar K.V 5814c894058dSAneesh Kumar K.V node = rb_next(new_node); 5815c894058dSAneesh Kumar K.V if (node) { 581618aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5817a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5818a0154344SDaeho Jeong &(db->bb_free_root)); 5819c894058dSAneesh Kumar K.V } 5820a0154344SDaeho Jeong 5821d08854f5STheodore Ts'o spin_lock(&sbi->s_md_lock); 5822a0154344SDaeho Jeong list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 5823d08854f5STheodore Ts'o sbi->s_mb_free_pending += clusters; 5824d08854f5STheodore Ts'o spin_unlock(&sbi->s_md_lock); 5825c9de560dSAlex Tomas } 5826c9de560dSAlex Tomas 58278016e29fSHarshad Shirwadkar /* 58288016e29fSHarshad Shirwadkar * Simple allocator for Ext4 fast commit replay path. It searches for blocks 58298016e29fSHarshad Shirwadkar * linearly starting at the goal block and also excludes the blocks which 58308016e29fSHarshad Shirwadkar * are going to be in use after fast commit replay. 58318016e29fSHarshad Shirwadkar */ 58328016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 58338016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp) 58348016e29fSHarshad Shirwadkar { 58358016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 58368016e29fSHarshad Shirwadkar struct super_block *sb = ar->inode->i_sb; 58378016e29fSHarshad Shirwadkar ext4_group_t group; 58388016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 583931a074a0SXin Yin ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 584031a074a0SXin Yin ext4_grpblk_t i = 0; 58418016e29fSHarshad Shirwadkar ext4_fsblk_t goal, block; 58428016e29fSHarshad Shirwadkar struct ext4_super_block *es = EXT4_SB(sb)->s_es; 58438016e29fSHarshad Shirwadkar 58448016e29fSHarshad Shirwadkar goal = ar->goal; 58458016e29fSHarshad Shirwadkar if (goal < le32_to_cpu(es->s_first_data_block) || 58468016e29fSHarshad Shirwadkar goal >= ext4_blocks_count(es)) 58478016e29fSHarshad Shirwadkar goal = le32_to_cpu(es->s_first_data_block); 58488016e29fSHarshad Shirwadkar 58498016e29fSHarshad Shirwadkar ar->len = 0; 58508016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 58518016e29fSHarshad Shirwadkar for (; group < ext4_get_groups_count(sb); group++) { 58528016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 58538016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 58548016e29fSHarshad Shirwadkar *errp = PTR_ERR(bitmap_bh); 58558016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 58568016e29fSHarshad Shirwadkar return 0; 58578016e29fSHarshad Shirwadkar } 58588016e29fSHarshad Shirwadkar 585931a074a0SXin Yin while (1) { 586031a074a0SXin Yin i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 58618016e29fSHarshad Shirwadkar blkoff); 586231a074a0SXin Yin if (i >= max) 586331a074a0SXin Yin break; 58648016e29fSHarshad Shirwadkar if (ext4_fc_replay_check_excluded(sb, 586531a074a0SXin Yin ext4_group_first_block_no(sb, group) + i)) { 586631a074a0SXin Yin blkoff = i + 1; 586731a074a0SXin Yin } else 586831a074a0SXin Yin break; 586931a074a0SXin Yin } 587031a074a0SXin Yin brelse(bitmap_bh); 587131a074a0SXin Yin if (i < max) 58728016e29fSHarshad Shirwadkar break; 5873253cacb0SKemeng Shi 5874253cacb0SKemeng Shi blkoff = 0; 58758016e29fSHarshad Shirwadkar } 58768016e29fSHarshad Shirwadkar 587731a074a0SXin Yin if (group >= ext4_get_groups_count(sb) || i >= max) { 587831a074a0SXin Yin *errp = -ENOSPC; 58798016e29fSHarshad Shirwadkar return 0; 588031a074a0SXin Yin } 58818016e29fSHarshad Shirwadkar 58828016e29fSHarshad Shirwadkar block = ext4_group_first_block_no(sb, group) + i; 58838016e29fSHarshad Shirwadkar ext4_mb_mark_bb(sb, block, 1, 1); 58848016e29fSHarshad Shirwadkar ar->len = 1; 58858016e29fSHarshad Shirwadkar 58868016e29fSHarshad Shirwadkar return block; 58878016e29fSHarshad Shirwadkar } 58888016e29fSHarshad Shirwadkar 58898016e29fSHarshad Shirwadkar static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 58908016e29fSHarshad Shirwadkar unsigned long count) 58918016e29fSHarshad Shirwadkar { 58928016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 58938016e29fSHarshad Shirwadkar struct super_block *sb = inode->i_sb; 58948016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 58958016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 58968016e29fSHarshad Shirwadkar ext4_group_t group; 58978016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 58988016e29fSHarshad Shirwadkar int already_freed = 0, err, i; 58998016e29fSHarshad Shirwadkar 59008016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 59018016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 59028016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 59038016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 59048016e29fSHarshad Shirwadkar return; 59058016e29fSHarshad Shirwadkar } 59068016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 59078016e29fSHarshad Shirwadkar if (!gdp) 59081b5c9d34SKemeng Shi goto err_out; 59098016e29fSHarshad Shirwadkar 59108016e29fSHarshad Shirwadkar for (i = 0; i < count; i++) { 59118016e29fSHarshad Shirwadkar if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 59128016e29fSHarshad Shirwadkar already_freed++; 59138016e29fSHarshad Shirwadkar } 59148016e29fSHarshad Shirwadkar mb_clear_bits(bitmap_bh->b_data, blkoff, count); 59158016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 59168016e29fSHarshad Shirwadkar if (err) 59171b5c9d34SKemeng Shi goto err_out; 59188016e29fSHarshad Shirwadkar ext4_free_group_clusters_set( 59198016e29fSHarshad Shirwadkar sb, gdp, ext4_free_group_clusters(sb, gdp) + 59208016e29fSHarshad Shirwadkar count - already_freed); 59211df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 59228016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 59238016e29fSHarshad Shirwadkar ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 59248016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 59258016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 59261b5c9d34SKemeng Shi 59271b5c9d34SKemeng Shi err_out: 59288016e29fSHarshad Shirwadkar brelse(bitmap_bh); 59298016e29fSHarshad Shirwadkar } 59308016e29fSHarshad Shirwadkar 593144338711STheodore Ts'o /** 59328ac3939dSRitesh Harjani * ext4_mb_clear_bb() -- helper function for freeing blocks. 59338ac3939dSRitesh Harjani * Used by ext4_free_blocks() 593444338711STheodore Ts'o * @handle: handle for this transaction 593544338711STheodore Ts'o * @inode: inode 5936c60990b3STheodore Ts'o * @block: starting physical block to be freed 5937c60990b3STheodore Ts'o * @count: number of blocks to be freed 59385def1360SYongqiang Yang * @flags: flags used by ext4_free_blocks 5939c9de560dSAlex Tomas */ 59408ac3939dSRitesh Harjani static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 59418ac3939dSRitesh Harjani ext4_fsblk_t block, unsigned long count, 59428ac3939dSRitesh Harjani int flags) 5943c9de560dSAlex Tomas { 594426346ff6SAneesh Kumar K.V struct buffer_head *bitmap_bh = NULL; 5945c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 5946c9de560dSAlex Tomas struct ext4_group_desc *gdp; 5947498e5f24STheodore Ts'o unsigned int overflow; 5948c9de560dSAlex Tomas ext4_grpblk_t bit; 5949c9de560dSAlex Tomas struct buffer_head *gd_bh; 5950c9de560dSAlex Tomas ext4_group_t block_group; 5951c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5952c9de560dSAlex Tomas struct ext4_buddy e4b; 595384130193STheodore Ts'o unsigned int count_clusters; 5954c9de560dSAlex Tomas int err = 0; 5955c9de560dSAlex Tomas int ret; 5956c9de560dSAlex Tomas 59578016e29fSHarshad Shirwadkar sbi = EXT4_SB(sb); 59588016e29fSHarshad Shirwadkar 59591e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 59601e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 59611e1c2b86SLukas Czerner ext4_error(sb, "Freeing blocks in system zone - " 59621e1c2b86SLukas Czerner "Block = %llu, count = %lu", block, count); 59631e1c2b86SLukas Czerner /* err = 0. ext4_std_error should be a no op */ 59641e1c2b86SLukas Czerner goto error_return; 59651e1c2b86SLukas Czerner } 59661e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 59671e1c2b86SLukas Czerner 5968c9de560dSAlex Tomas do_more: 5969c9de560dSAlex Tomas overflow = 0; 5970c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5971c9de560dSAlex Tomas 5972163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 5973163a203dSDarrick J. Wong ext4_get_group_info(sb, block_group)))) 5974163a203dSDarrick J. Wong return; 5975163a203dSDarrick J. Wong 5976c9de560dSAlex Tomas /* 5977c9de560dSAlex Tomas * Check to see if we are freeing blocks across a group 5978c9de560dSAlex Tomas * boundary. 5979c9de560dSAlex Tomas */ 598084130193STheodore Ts'o if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 598184130193STheodore Ts'o overflow = EXT4_C2B(sbi, bit) + count - 598284130193STheodore Ts'o EXT4_BLOCKS_PER_GROUP(sb); 5983c9de560dSAlex Tomas count -= overflow; 59841e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 59851e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 5986c9de560dSAlex Tomas } 5987810da240SLukas Czerner count_clusters = EXT4_NUM_B2C(sbi, count); 5988574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, block_group); 59899008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 59909008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 59919008a58eSDarrick J. Wong bitmap_bh = NULL; 5992c9de560dSAlex Tomas goto error_return; 5993ce89f46cSAneesh Kumar K.V } 5994c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 5995ce89f46cSAneesh Kumar K.V if (!gdp) { 5996ce89f46cSAneesh Kumar K.V err = -EIO; 5997c9de560dSAlex Tomas goto error_return; 5998ce89f46cSAneesh Kumar K.V } 5999c9de560dSAlex Tomas 60001e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 60011e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 600212062dddSEric Sandeen ext4_error(sb, "Freeing blocks in system zone - " 60030610b6e9STheodore Ts'o "Block = %llu, count = %lu", block, count); 6004519deca0SAneesh Kumar K.V /* err = 0. ext4_std_error should be a no op */ 6005519deca0SAneesh Kumar K.V goto error_return; 6006c9de560dSAlex Tomas } 6007c9de560dSAlex Tomas 6008c9de560dSAlex Tomas BUFFER_TRACE(bitmap_bh, "getting write access"); 6009188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6010188c299eSJan Kara EXT4_JTR_NONE); 6011c9de560dSAlex Tomas if (err) 6012c9de560dSAlex Tomas goto error_return; 6013c9de560dSAlex Tomas 6014c9de560dSAlex Tomas /* 6015c9de560dSAlex Tomas * We are about to modify some metadata. Call the journal APIs 6016c9de560dSAlex Tomas * to unshare ->b_data if a currently-committing transaction is 6017c9de560dSAlex Tomas * using it 6018c9de560dSAlex Tomas */ 6019c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "get_write_access"); 6020188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6021c9de560dSAlex Tomas if (err) 6022c9de560dSAlex Tomas goto error_return; 6023c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 6024c9de560dSAlex Tomas { 6025c9de560dSAlex Tomas int i; 602684130193STheodore Ts'o for (i = 0; i < count_clusters; i++) 6027c9de560dSAlex Tomas BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6028c9de560dSAlex Tomas } 6029c9de560dSAlex Tomas #endif 603084130193STheodore Ts'o trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6031c9de560dSAlex Tomas 6032adb7ef60SKonstantin Khlebnikov /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6033adb7ef60SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6034adb7ef60SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 6035920313a7SAneesh Kumar K.V if (err) 6036920313a7SAneesh Kumar K.V goto error_return; 6037e6362609STheodore Ts'o 6038f96c450dSDaeho Jeong /* 6039f96c450dSDaeho Jeong * We need to make sure we don't reuse the freed block until after the 6040f96c450dSDaeho Jeong * transaction is committed. We make an exception if the inode is to be 6041f96c450dSDaeho Jeong * written in writeback mode since writeback mode has weak data 6042f96c450dSDaeho Jeong * consistency guarantees. 6043f96c450dSDaeho Jeong */ 6044f96c450dSDaeho Jeong if (ext4_handle_valid(handle) && 6045f96c450dSDaeho Jeong ((flags & EXT4_FREE_BLOCKS_METADATA) || 6046f96c450dSDaeho Jeong !ext4_should_writeback_data(inode))) { 60477a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry; 60487a2fcbf7SAneesh Kumar K.V /* 60497444a072SMichal Hocko * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 60507444a072SMichal Hocko * to fail. 60517a2fcbf7SAneesh Kumar K.V */ 60527444a072SMichal Hocko new_entry = kmem_cache_alloc(ext4_free_data_cachep, 60537444a072SMichal Hocko GFP_NOFS|__GFP_NOFAIL); 605418aadd47SBobi Jam new_entry->efd_start_cluster = bit; 605518aadd47SBobi Jam new_entry->efd_group = block_group; 605618aadd47SBobi Jam new_entry->efd_count = count_clusters; 605718aadd47SBobi Jam new_entry->efd_tid = handle->h_transaction->t_tid; 6058955ce5f5SAneesh Kumar K.V 60597a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, block_group); 606084130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 60617a2fcbf7SAneesh Kumar K.V ext4_mb_free_metadata(handle, &e4b, new_entry); 6062c9de560dSAlex Tomas } else { 60637a2fcbf7SAneesh Kumar K.V /* need to update group_info->bb_free and bitmap 60647a2fcbf7SAneesh Kumar K.V * with group lock held. generate_buddy look at 60657a2fcbf7SAneesh Kumar K.V * them with group lock_held 60667a2fcbf7SAneesh Kumar K.V */ 6067d71c1ae2SLukas Czerner if (test_opt(sb, DISCARD)) { 6068a0154344SDaeho Jeong err = ext4_issue_discard(sb, block_group, bit, count, 6069a0154344SDaeho Jeong NULL); 6070d71c1ae2SLukas Czerner if (err && err != -EOPNOTSUPP) 6071d71c1ae2SLukas Czerner ext4_msg(sb, KERN_WARNING, "discard request in" 6072a00b482bSRitesh Harjani " group:%u block:%d count:%lu failed" 6073d71c1ae2SLukas Czerner " with %d", block_group, bit, count, 6074d71c1ae2SLukas Czerner err); 60758f9ff189SLukas Czerner } else 60768f9ff189SLukas Czerner EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6077d71c1ae2SLukas Czerner 6078955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, block_group); 607984130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 608084130193STheodore Ts'o mb_free_blocks(inode, &e4b, bit, count_clusters); 6081c9de560dSAlex Tomas } 6082c9de560dSAlex Tomas 6083021b65bbSTheodore Ts'o ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6084021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, ret); 60851df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6086feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, gdp); 6087955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, block_group); 6088c9de560dSAlex Tomas 6089772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 6090772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 609190ba983fSTheodore Ts'o atomic64_add(count_clusters, 60927c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 60937c990728SSuraj Jitindar Singh flex_group)->free_clusters); 6094772cb7c8SJose R. Santos } 6095772cb7c8SJose R. Santos 60969fe67149SEric Whitney /* 60979fe67149SEric Whitney * on a bigalloc file system, defer the s_freeclusters_counter 60989fe67149SEric Whitney * update to the caller (ext4_remove_space and friends) so they 60999fe67149SEric Whitney * can determine if a cluster freed here should be rereserved 61009fe67149SEric Whitney */ 61019fe67149SEric Whitney if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 61027b415bf6SAditya Kali if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 61037b415bf6SAditya Kali dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 61049fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 61059fe67149SEric Whitney count_clusters); 61069fe67149SEric Whitney } 61077d734532SJan Kara 61087d734532SJan Kara ext4_mb_unload_buddy(&e4b); 61097b415bf6SAditya Kali 61107a2fcbf7SAneesh Kumar K.V /* We dirtied the bitmap block */ 61117a2fcbf7SAneesh Kumar K.V BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 61127a2fcbf7SAneesh Kumar K.V err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 61137a2fcbf7SAneesh Kumar K.V 6114c9de560dSAlex Tomas /* And the group descriptor block */ 6115c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 61160390131bSFrank Mayhar ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6117c9de560dSAlex Tomas if (!err) 6118c9de560dSAlex Tomas err = ret; 6119c9de560dSAlex Tomas 6120c9de560dSAlex Tomas if (overflow && !err) { 6121c9de560dSAlex Tomas block += count; 6122c9de560dSAlex Tomas count = overflow; 6123c9de560dSAlex Tomas put_bh(bitmap_bh); 61241e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 61251e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6126c9de560dSAlex Tomas goto do_more; 6127c9de560dSAlex Tomas } 6128c9de560dSAlex Tomas error_return: 6129c9de560dSAlex Tomas brelse(bitmap_bh); 6130c9de560dSAlex Tomas ext4_std_error(sb, err); 6131c9de560dSAlex Tomas return; 6132c9de560dSAlex Tomas } 61337360d173SLukas Czerner 61347360d173SLukas Czerner /** 61358ac3939dSRitesh Harjani * ext4_free_blocks() -- Free given blocks and update quota 61368ac3939dSRitesh Harjani * @handle: handle for this transaction 61378ac3939dSRitesh Harjani * @inode: inode 61388ac3939dSRitesh Harjani * @bh: optional buffer of the block to be freed 61398ac3939dSRitesh Harjani * @block: starting physical block to be freed 61408ac3939dSRitesh Harjani * @count: number of blocks to be freed 61418ac3939dSRitesh Harjani * @flags: flags used by ext4_free_blocks 61428ac3939dSRitesh Harjani */ 61438ac3939dSRitesh Harjani void ext4_free_blocks(handle_t *handle, struct inode *inode, 61448ac3939dSRitesh Harjani struct buffer_head *bh, ext4_fsblk_t block, 61458ac3939dSRitesh Harjani unsigned long count, int flags) 61468ac3939dSRitesh Harjani { 61478ac3939dSRitesh Harjani struct super_block *sb = inode->i_sb; 61488ac3939dSRitesh Harjani unsigned int overflow; 61498ac3939dSRitesh Harjani struct ext4_sb_info *sbi; 61508ac3939dSRitesh Harjani 61518ac3939dSRitesh Harjani sbi = EXT4_SB(sb); 61528ac3939dSRitesh Harjani 61538ac3939dSRitesh Harjani if (sbi->s_mount_state & EXT4_FC_REPLAY) { 61548ac3939dSRitesh Harjani ext4_free_blocks_simple(inode, block, count); 61558ac3939dSRitesh Harjani return; 61568ac3939dSRitesh Harjani } 61578ac3939dSRitesh Harjani 61588ac3939dSRitesh Harjani might_sleep(); 61598ac3939dSRitesh Harjani if (bh) { 61608ac3939dSRitesh Harjani if (block) 61618ac3939dSRitesh Harjani BUG_ON(block != bh->b_blocknr); 61628ac3939dSRitesh Harjani else 61638ac3939dSRitesh Harjani block = bh->b_blocknr; 61648ac3939dSRitesh Harjani } 61658ac3939dSRitesh Harjani 61668ac3939dSRitesh Harjani if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 61678ac3939dSRitesh Harjani !ext4_inode_block_valid(inode, block, count)) { 61688ac3939dSRitesh Harjani ext4_error(sb, "Freeing blocks not in datazone - " 61698ac3939dSRitesh Harjani "block = %llu, count = %lu", block, count); 61708ac3939dSRitesh Harjani return; 61718ac3939dSRitesh Harjani } 61721e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 61738ac3939dSRitesh Harjani 61748ac3939dSRitesh Harjani ext4_debug("freeing block %llu\n", block); 61758ac3939dSRitesh Harjani trace_ext4_free_blocks(inode, block, count, flags); 61768ac3939dSRitesh Harjani 61778ac3939dSRitesh Harjani if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 61788ac3939dSRitesh Harjani BUG_ON(count > 1); 61798ac3939dSRitesh Harjani 61808ac3939dSRitesh Harjani ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 61818ac3939dSRitesh Harjani inode, bh, block); 61828ac3939dSRitesh Harjani } 61838ac3939dSRitesh Harjani 61848ac3939dSRitesh Harjani /* 61858ac3939dSRitesh Harjani * If the extent to be freed does not begin on a cluster 61868ac3939dSRitesh Harjani * boundary, we need to deal with partial clusters at the 61878ac3939dSRitesh Harjani * beginning and end of the extent. Normally we will free 61888ac3939dSRitesh Harjani * blocks at the beginning or the end unless we are explicitly 61898ac3939dSRitesh Harjani * requested to avoid doing so. 61908ac3939dSRitesh Harjani */ 61918ac3939dSRitesh Harjani overflow = EXT4_PBLK_COFF(sbi, block); 61928ac3939dSRitesh Harjani if (overflow) { 61938ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 61948ac3939dSRitesh Harjani overflow = sbi->s_cluster_ratio - overflow; 61958ac3939dSRitesh Harjani block += overflow; 61968ac3939dSRitesh Harjani if (count > overflow) 61978ac3939dSRitesh Harjani count -= overflow; 61988ac3939dSRitesh Harjani else 61998ac3939dSRitesh Harjani return; 62008ac3939dSRitesh Harjani } else { 62018ac3939dSRitesh Harjani block -= overflow; 62028ac3939dSRitesh Harjani count += overflow; 62038ac3939dSRitesh Harjani } 62041e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 62051e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 62068ac3939dSRitesh Harjani } 62078ac3939dSRitesh Harjani overflow = EXT4_LBLK_COFF(sbi, count); 62088ac3939dSRitesh Harjani if (overflow) { 62098ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 62108ac3939dSRitesh Harjani if (count > overflow) 62118ac3939dSRitesh Harjani count -= overflow; 62128ac3939dSRitesh Harjani else 62138ac3939dSRitesh Harjani return; 62148ac3939dSRitesh Harjani } else 62158ac3939dSRitesh Harjani count += sbi->s_cluster_ratio - overflow; 62161e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 62171e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 62188ac3939dSRitesh Harjani } 62198ac3939dSRitesh Harjani 62208ac3939dSRitesh Harjani if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 62218ac3939dSRitesh Harjani int i; 62228ac3939dSRitesh Harjani int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 62238ac3939dSRitesh Harjani 62248ac3939dSRitesh Harjani for (i = 0; i < count; i++) { 62258ac3939dSRitesh Harjani cond_resched(); 62268ac3939dSRitesh Harjani if (is_metadata) 62278ac3939dSRitesh Harjani bh = sb_find_get_block(inode->i_sb, block + i); 62288ac3939dSRitesh Harjani ext4_forget(handle, is_metadata, inode, bh, block + i); 62298ac3939dSRitesh Harjani } 62308ac3939dSRitesh Harjani } 62318ac3939dSRitesh Harjani 62328ac3939dSRitesh Harjani ext4_mb_clear_bb(handle, inode, block, count, flags); 62338ac3939dSRitesh Harjani return; 62348ac3939dSRitesh Harjani } 62358ac3939dSRitesh Harjani 62368ac3939dSRitesh Harjani /** 62370529155eSYongqiang Yang * ext4_group_add_blocks() -- Add given blocks to an existing group 62382846e820SAmir Goldstein * @handle: handle to this transaction 62392846e820SAmir Goldstein * @sb: super block 62404907cb7bSAnatol Pomozov * @block: start physical block to add to the block group 62412846e820SAmir Goldstein * @count: number of blocks to free 62422846e820SAmir Goldstein * 6243e73a347bSAmir Goldstein * This marks the blocks as free in the bitmap and buddy. 62442846e820SAmir Goldstein */ 6245cc7365dfSYongqiang Yang int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 62462846e820SAmir Goldstein ext4_fsblk_t block, unsigned long count) 62472846e820SAmir Goldstein { 62482846e820SAmir Goldstein struct buffer_head *bitmap_bh = NULL; 62492846e820SAmir Goldstein struct buffer_head *gd_bh; 62502846e820SAmir Goldstein ext4_group_t block_group; 62512846e820SAmir Goldstein ext4_grpblk_t bit; 62522846e820SAmir Goldstein unsigned int i; 62532846e820SAmir Goldstein struct ext4_group_desc *desc; 62542846e820SAmir Goldstein struct ext4_sb_info *sbi = EXT4_SB(sb); 6255e73a347bSAmir Goldstein struct ext4_buddy e4b; 6256d77147ffSharshads int err = 0, ret, free_clusters_count; 6257d77147ffSharshads ext4_grpblk_t clusters_freed; 6258d77147ffSharshads ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6259d77147ffSharshads ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6260d77147ffSharshads unsigned long cluster_count = last_cluster - first_cluster + 1; 62612846e820SAmir Goldstein 62622846e820SAmir Goldstein ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 62632846e820SAmir Goldstein 62644740b830SYongqiang Yang if (count == 0) 62654740b830SYongqiang Yang return 0; 62664740b830SYongqiang Yang 62672846e820SAmir Goldstein ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 62682846e820SAmir Goldstein /* 62692846e820SAmir Goldstein * Check to see if we are freeing blocks across a group 62702846e820SAmir Goldstein * boundary. 62712846e820SAmir Goldstein */ 6272d77147ffSharshads if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6273d77147ffSharshads ext4_warning(sb, "too many blocks added to group %u", 6274cc7365dfSYongqiang Yang block_group); 6275cc7365dfSYongqiang Yang err = -EINVAL; 62762846e820SAmir Goldstein goto error_return; 6277cc7365dfSYongqiang Yang } 62782cd05cc3STheodore Ts'o 62792846e820SAmir Goldstein bitmap_bh = ext4_read_block_bitmap(sb, block_group); 62809008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 62819008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 62829008a58eSDarrick J. Wong bitmap_bh = NULL; 62832846e820SAmir Goldstein goto error_return; 6284cc7365dfSYongqiang Yang } 6285cc7365dfSYongqiang Yang 62862846e820SAmir Goldstein desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6287cc7365dfSYongqiang Yang if (!desc) { 6288cc7365dfSYongqiang Yang err = -EIO; 62892846e820SAmir Goldstein goto error_return; 6290cc7365dfSYongqiang Yang } 62912846e820SAmir Goldstein 6292a00b482bSRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, count)) { 62932846e820SAmir Goldstein ext4_error(sb, "Adding blocks in system zones - " 62942846e820SAmir Goldstein "Block = %llu, count = %lu", 62952846e820SAmir Goldstein block, count); 6296cc7365dfSYongqiang Yang err = -EINVAL; 62972846e820SAmir Goldstein goto error_return; 62982846e820SAmir Goldstein } 62992846e820SAmir Goldstein 63002cd05cc3STheodore Ts'o BUFFER_TRACE(bitmap_bh, "getting write access"); 6301188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6302188c299eSJan Kara EXT4_JTR_NONE); 63032846e820SAmir Goldstein if (err) 63042846e820SAmir Goldstein goto error_return; 63052846e820SAmir Goldstein 63062846e820SAmir Goldstein /* 63072846e820SAmir Goldstein * We are about to modify some metadata. Call the journal APIs 63082846e820SAmir Goldstein * to unshare ->b_data if a currently-committing transaction is 63092846e820SAmir Goldstein * using it 63102846e820SAmir Goldstein */ 63112846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "get_write_access"); 6312188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 63132846e820SAmir Goldstein if (err) 63142846e820SAmir Goldstein goto error_return; 6315e73a347bSAmir Goldstein 6316d77147ffSharshads for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 63172846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "clear bit"); 6318e73a347bSAmir Goldstein if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 63192846e820SAmir Goldstein ext4_error(sb, "bit already cleared for block %llu", 63202846e820SAmir Goldstein (ext4_fsblk_t)(block + i)); 63212846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "bit already cleared"); 63222846e820SAmir Goldstein } else { 6323d77147ffSharshads clusters_freed++; 63242846e820SAmir Goldstein } 63252846e820SAmir Goldstein } 6326e73a347bSAmir Goldstein 6327e73a347bSAmir Goldstein err = ext4_mb_load_buddy(sb, block_group, &e4b); 6328e73a347bSAmir Goldstein if (err) 6329e73a347bSAmir Goldstein goto error_return; 6330e73a347bSAmir Goldstein 6331e73a347bSAmir Goldstein /* 6332e73a347bSAmir Goldstein * need to update group_info->bb_free and bitmap 6333e73a347bSAmir Goldstein * with group lock held. generate_buddy look at 6334e73a347bSAmir Goldstein * them with group lock_held 6335e73a347bSAmir Goldstein */ 63362846e820SAmir Goldstein ext4_lock_group(sb, block_group); 6337d77147ffSharshads mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6338d77147ffSharshads mb_free_blocks(NULL, &e4b, bit, cluster_count); 6339d77147ffSharshads free_clusters_count = clusters_freed + 6340d77147ffSharshads ext4_free_group_clusters(sb, desc); 6341d77147ffSharshads ext4_free_group_clusters_set(sb, desc, free_clusters_count); 63421df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6343feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, desc); 63442846e820SAmir Goldstein ext4_unlock_group(sb, block_group); 634557042651STheodore Ts'o percpu_counter_add(&sbi->s_freeclusters_counter, 6346d77147ffSharshads clusters_freed); 63472846e820SAmir Goldstein 63482846e820SAmir Goldstein if (sbi->s_log_groups_per_flex) { 63492846e820SAmir Goldstein ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6350d77147ffSharshads atomic64_add(clusters_freed, 63517c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 63527c990728SSuraj Jitindar Singh flex_group)->free_clusters); 63532846e820SAmir Goldstein } 6354e73a347bSAmir Goldstein 6355e73a347bSAmir Goldstein ext4_mb_unload_buddy(&e4b); 63562846e820SAmir Goldstein 63572846e820SAmir Goldstein /* We dirtied the bitmap block */ 63582846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 63592846e820SAmir Goldstein err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 63602846e820SAmir Goldstein 63612846e820SAmir Goldstein /* And the group descriptor block */ 63622846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 63632846e820SAmir Goldstein ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 63642846e820SAmir Goldstein if (!err) 63652846e820SAmir Goldstein err = ret; 63662846e820SAmir Goldstein 63672846e820SAmir Goldstein error_return: 63682846e820SAmir Goldstein brelse(bitmap_bh); 63692846e820SAmir Goldstein ext4_std_error(sb, err); 6370cc7365dfSYongqiang Yang return err; 63712846e820SAmir Goldstein } 63722846e820SAmir Goldstein 63732846e820SAmir Goldstein /** 63747360d173SLukas Czerner * ext4_trim_extent -- function to TRIM one single free extent in the group 63757360d173SLukas Czerner * @sb: super block for the file system 63767360d173SLukas Czerner * @start: starting block of the free extent in the alloc. group 63777360d173SLukas Czerner * @count: number of blocks to TRIM 63787360d173SLukas Czerner * @e4b: ext4 buddy for the group 63797360d173SLukas Czerner * 63807360d173SLukas Czerner * Trim "count" blocks starting at "start" in the "group". To assure that no 63817360d173SLukas Czerner * one will allocate those blocks, mark it as used in buddy bitmap. This must 63827360d173SLukas Czerner * be called with under the group lock. 63837360d173SLukas Czerner */ 6384bd2eea8dSWang Jianchao static int ext4_trim_extent(struct super_block *sb, 6385bd2eea8dSWang Jianchao int start, int count, struct ext4_buddy *e4b) 6386e2cbd587Sjon ernst __releases(bitlock) 6387e2cbd587Sjon ernst __acquires(bitlock) 63887360d173SLukas Czerner { 63897360d173SLukas Czerner struct ext4_free_extent ex; 6390bd2eea8dSWang Jianchao ext4_group_t group = e4b->bd_group; 6391d71c1ae2SLukas Czerner int ret = 0; 63927360d173SLukas Czerner 6393b3d4c2b1STao Ma trace_ext4_trim_extent(sb, group, start, count); 6394b3d4c2b1STao Ma 63957360d173SLukas Czerner assert_spin_locked(ext4_group_lock_ptr(sb, group)); 63967360d173SLukas Czerner 63977360d173SLukas Czerner ex.fe_start = start; 63987360d173SLukas Czerner ex.fe_group = group; 63997360d173SLukas Czerner ex.fe_len = count; 64007360d173SLukas Czerner 64017360d173SLukas Czerner /* 64027360d173SLukas Czerner * Mark blocks used, so no one can reuse them while 64037360d173SLukas Czerner * being trimmed. 64047360d173SLukas Czerner */ 64057360d173SLukas Czerner mb_mark_used(e4b, &ex); 64067360d173SLukas Czerner ext4_unlock_group(sb, group); 6407a0154344SDaeho Jeong ret = ext4_issue_discard(sb, group, start, count, NULL); 64087360d173SLukas Czerner ext4_lock_group(sb, group); 64097360d173SLukas Czerner mb_free_blocks(NULL, e4b, start, ex.fe_len); 6410d71c1ae2SLukas Czerner return ret; 64117360d173SLukas Czerner } 64127360d173SLukas Czerner 64136920b391SWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 64146920b391SWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 64156920b391SWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks) 6416a5fda113STheodore Ts'o __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6417a5fda113STheodore Ts'o __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 64186920b391SWang Jianchao { 64196920b391SWang Jianchao ext4_grpblk_t next, count, free_count; 64206920b391SWang Jianchao void *bitmap; 64216920b391SWang Jianchao 64226920b391SWang Jianchao bitmap = e4b->bd_bitmap; 64236920b391SWang Jianchao start = (e4b->bd_info->bb_first_free > start) ? 64246920b391SWang Jianchao e4b->bd_info->bb_first_free : start; 64256920b391SWang Jianchao count = 0; 64266920b391SWang Jianchao free_count = 0; 64276920b391SWang Jianchao 64286920b391SWang Jianchao while (start <= max) { 64296920b391SWang Jianchao start = mb_find_next_zero_bit(bitmap, max + 1, start); 64306920b391SWang Jianchao if (start > max) 64316920b391SWang Jianchao break; 64326920b391SWang Jianchao next = mb_find_next_bit(bitmap, max + 1, start); 64336920b391SWang Jianchao 64346920b391SWang Jianchao if ((next - start) >= minblocks) { 6435afcc4e32SLukas Bulwahn int ret = ext4_trim_extent(sb, start, next - start, e4b); 6436afcc4e32SLukas Bulwahn 64376920b391SWang Jianchao if (ret && ret != -EOPNOTSUPP) 64386920b391SWang Jianchao break; 64396920b391SWang Jianchao count += next - start; 64406920b391SWang Jianchao } 64416920b391SWang Jianchao free_count += next - start; 64426920b391SWang Jianchao start = next + 1; 64436920b391SWang Jianchao 64446920b391SWang Jianchao if (fatal_signal_pending(current)) { 64456920b391SWang Jianchao count = -ERESTARTSYS; 64466920b391SWang Jianchao break; 64476920b391SWang Jianchao } 64486920b391SWang Jianchao 64496920b391SWang Jianchao if (need_resched()) { 64506920b391SWang Jianchao ext4_unlock_group(sb, e4b->bd_group); 64516920b391SWang Jianchao cond_resched(); 64526920b391SWang Jianchao ext4_lock_group(sb, e4b->bd_group); 64536920b391SWang Jianchao } 64546920b391SWang Jianchao 64556920b391SWang Jianchao if ((e4b->bd_info->bb_free - free_count) < minblocks) 64566920b391SWang Jianchao break; 64576920b391SWang Jianchao } 64586920b391SWang Jianchao 64596920b391SWang Jianchao return count; 64606920b391SWang Jianchao } 64616920b391SWang Jianchao 64627360d173SLukas Czerner /** 64637360d173SLukas Czerner * ext4_trim_all_free -- function to trim all free space in alloc. group 64647360d173SLukas Czerner * @sb: super block for file system 646522612283STao Ma * @group: group to be trimmed 64667360d173SLukas Czerner * @start: first group block to examine 64677360d173SLukas Czerner * @max: last group block to examine 64687360d173SLukas Czerner * @minblocks: minimum extent block count 6469d63c00eaSDmitry Monakhov * @set_trimmed: set the trimmed flag if at least one block is trimmed 64707360d173SLukas Czerner * 64717360d173SLukas Czerner * ext4_trim_all_free walks through group's block bitmap searching for free 64727360d173SLukas Czerner * extents. When the free extent is found, mark it as used in group buddy 64737360d173SLukas Czerner * bitmap. Then issue a TRIM command on this extent and free the extent in 6474b6f5558cSWang Jianchao * the group buddy bitmap. 64757360d173SLukas Czerner */ 64760b75a840SLukas Czerner static ext4_grpblk_t 647778944086SLukas Czerner ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 647878944086SLukas Czerner ext4_grpblk_t start, ext4_grpblk_t max, 6479d63c00eaSDmitry Monakhov ext4_grpblk_t minblocks, bool set_trimmed) 64807360d173SLukas Czerner { 648178944086SLukas Czerner struct ext4_buddy e4b; 64826920b391SWang Jianchao int ret; 64837360d173SLukas Czerner 6484b3d4c2b1STao Ma trace_ext4_trim_all_free(sb, group, start, max); 6485b3d4c2b1STao Ma 648678944086SLukas Czerner ret = ext4_mb_load_buddy(sb, group, &e4b); 648778944086SLukas Czerner if (ret) { 64889651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 64899651e6b2SKonstantin Khlebnikov ret, group); 649078944086SLukas Czerner return ret; 649178944086SLukas Czerner } 649228739eeaSLukas Czerner 649328739eeaSLukas Czerner ext4_lock_group(sb, group); 64943d56b8d2STao Ma 64956920b391SWang Jianchao if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 64962327fb2eSLukas Czerner minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 64976920b391SWang Jianchao ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6498d63c00eaSDmitry Monakhov if (ret >= 0 && set_trimmed) 64993d56b8d2STao Ma EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 65006920b391SWang Jianchao } else { 65016920b391SWang Jianchao ret = 0; 6502d71c1ae2SLukas Czerner } 65036920b391SWang Jianchao 65047360d173SLukas Czerner ext4_unlock_group(sb, group); 650578944086SLukas Czerner ext4_mb_unload_buddy(&e4b); 65067360d173SLukas Czerner 65077360d173SLukas Czerner ext4_debug("trimmed %d blocks in the group %d\n", 65086920b391SWang Jianchao ret, group); 65097360d173SLukas Czerner 6510d71c1ae2SLukas Czerner return ret; 65117360d173SLukas Czerner } 65127360d173SLukas Czerner 65137360d173SLukas Czerner /** 65147360d173SLukas Czerner * ext4_trim_fs() -- trim ioctl handle function 65157360d173SLukas Czerner * @sb: superblock for filesystem 65167360d173SLukas Czerner * @range: fstrim_range structure 65177360d173SLukas Czerner * 65187360d173SLukas Czerner * start: First Byte to trim 65197360d173SLukas Czerner * len: number of Bytes to trim from start 65207360d173SLukas Czerner * minlen: minimum extent length in Bytes 65217360d173SLukas Czerner * ext4_trim_fs goes through all allocation groups containing Bytes from 65227360d173SLukas Czerner * start to start+len. For each such a group ext4_trim_all_free function 65237360d173SLukas Czerner * is invoked to trim all free space. 65247360d173SLukas Czerner */ 65257360d173SLukas Czerner int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 65267360d173SLukas Czerner { 65277b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 652878944086SLukas Czerner struct ext4_group_info *grp; 6529913eed83SLukas Czerner ext4_group_t group, first_group, last_group; 65307137d7a4STheodore Ts'o ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6531913eed83SLukas Czerner uint64_t start, end, minlen, trimmed = 0; 65320f0a25bfSJan Kara ext4_fsblk_t first_data_blk = 65330f0a25bfSJan Kara le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6534913eed83SLukas Czerner ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6535d63c00eaSDmitry Monakhov bool whole_group, eof = false; 65367360d173SLukas Czerner int ret = 0; 65377360d173SLukas Czerner 65387360d173SLukas Czerner start = range->start >> sb->s_blocksize_bits; 6539913eed83SLukas Czerner end = start + (range->len >> sb->s_blocksize_bits) - 1; 6540aaf7d73eSLukas Czerner minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6541aaf7d73eSLukas Czerner range->minlen >> sb->s_blocksize_bits); 65427360d173SLukas Czerner 65435de35e8dSLukas Czerner if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 65445de35e8dSLukas Czerner start >= max_blks || 65455de35e8dSLukas Czerner range->len < sb->s_blocksize) 65467360d173SLukas Czerner return -EINVAL; 6547173b6e38SJan Kara /* No point to try to trim less than discard granularity */ 65487b47ef52SChristoph Hellwig if (range->minlen < discard_granularity) { 6549173b6e38SJan Kara minlen = EXT4_NUM_B2C(EXT4_SB(sb), 65507b47ef52SChristoph Hellwig discard_granularity >> sb->s_blocksize_bits); 6551173b6e38SJan Kara if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6552173b6e38SJan Kara goto out; 6553173b6e38SJan Kara } 6554d63c00eaSDmitry Monakhov if (end >= max_blks - 1) { 6555913eed83SLukas Czerner end = max_blks - 1; 6556d63c00eaSDmitry Monakhov eof = true; 6557d63c00eaSDmitry Monakhov } 6558913eed83SLukas Czerner if (end <= first_data_blk) 655922f10457STao Ma goto out; 6560913eed83SLukas Czerner if (start < first_data_blk) 65610f0a25bfSJan Kara start = first_data_blk; 65627360d173SLukas Czerner 6563913eed83SLukas Czerner /* Determine first and last group to examine based on start and end */ 65647360d173SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 65657137d7a4STheodore Ts'o &first_group, &first_cluster); 6566913eed83SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 65677137d7a4STheodore Ts'o &last_group, &last_cluster); 65687360d173SLukas Czerner 6569913eed83SLukas Czerner /* end now represents the last cluster to discard in this group */ 6570913eed83SLukas Czerner end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6571d63c00eaSDmitry Monakhov whole_group = true; 65727360d173SLukas Czerner 65737360d173SLukas Czerner for (group = first_group; group <= last_group; group++) { 657478944086SLukas Czerner grp = ext4_get_group_info(sb, group); 657578944086SLukas Czerner /* We only do this if the grp has never been initialized */ 657678944086SLukas Czerner if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6577adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, GFP_NOFS); 657878944086SLukas Czerner if (ret) 65797360d173SLukas Czerner break; 65807360d173SLukas Czerner } 65817360d173SLukas Czerner 65820ba08517STao Ma /* 6583913eed83SLukas Czerner * For all the groups except the last one, last cluster will 6584913eed83SLukas Czerner * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6585913eed83SLukas Czerner * change it for the last group, note that last_cluster is 6586913eed83SLukas Czerner * already computed earlier by ext4_get_group_no_and_offset() 65870ba08517STao Ma */ 6588d63c00eaSDmitry Monakhov if (group == last_group) { 6589913eed83SLukas Czerner end = last_cluster; 6590d63c00eaSDmitry Monakhov whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6591d63c00eaSDmitry Monakhov } 659278944086SLukas Czerner if (grp->bb_free >= minlen) { 65937137d7a4STheodore Ts'o cnt = ext4_trim_all_free(sb, group, first_cluster, 6594d63c00eaSDmitry Monakhov end, minlen, whole_group); 65957360d173SLukas Czerner if (cnt < 0) { 65967360d173SLukas Czerner ret = cnt; 65977360d173SLukas Czerner break; 65987360d173SLukas Czerner } 65997360d173SLukas Czerner trimmed += cnt; 660021e7fd22SLukas Czerner } 6601913eed83SLukas Czerner 6602913eed83SLukas Czerner /* 6603913eed83SLukas Czerner * For every group except the first one, we are sure 6604913eed83SLukas Czerner * that the first cluster to discard will be cluster #0. 6605913eed83SLukas Czerner */ 66067137d7a4STheodore Ts'o first_cluster = 0; 66077360d173SLukas Czerner } 66087360d173SLukas Czerner 66093d56b8d2STao Ma if (!ret) 66102327fb2eSLukas Czerner EXT4_SB(sb)->s_last_trim_minblks = minlen; 66113d56b8d2STao Ma 661222f10457STao Ma out: 6613aaf7d73eSLukas Czerner range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 66147360d173SLukas Czerner return ret; 66157360d173SLukas Czerner } 66160c9ec4beSDarrick J. Wong 66170c9ec4beSDarrick J. Wong /* Iterate all the free extents in the group. */ 66180c9ec4beSDarrick J. Wong int 66190c9ec4beSDarrick J. Wong ext4_mballoc_query_range( 66200c9ec4beSDarrick J. Wong struct super_block *sb, 66210c9ec4beSDarrick J. Wong ext4_group_t group, 66220c9ec4beSDarrick J. Wong ext4_grpblk_t start, 66230c9ec4beSDarrick J. Wong ext4_grpblk_t end, 66240c9ec4beSDarrick J. Wong ext4_mballoc_query_range_fn formatter, 66250c9ec4beSDarrick J. Wong void *priv) 66260c9ec4beSDarrick J. Wong { 66270c9ec4beSDarrick J. Wong void *bitmap; 66280c9ec4beSDarrick J. Wong ext4_grpblk_t next; 66290c9ec4beSDarrick J. Wong struct ext4_buddy e4b; 66300c9ec4beSDarrick J. Wong int error; 66310c9ec4beSDarrick J. Wong 66320c9ec4beSDarrick J. Wong error = ext4_mb_load_buddy(sb, group, &e4b); 66330c9ec4beSDarrick J. Wong if (error) 66340c9ec4beSDarrick J. Wong return error; 66350c9ec4beSDarrick J. Wong bitmap = e4b.bd_bitmap; 66360c9ec4beSDarrick J. Wong 66370c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 66380c9ec4beSDarrick J. Wong 66390c9ec4beSDarrick J. Wong start = (e4b.bd_info->bb_first_free > start) ? 66400c9ec4beSDarrick J. Wong e4b.bd_info->bb_first_free : start; 66410c9ec4beSDarrick J. Wong if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 66420c9ec4beSDarrick J. Wong end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 66430c9ec4beSDarrick J. Wong 66440c9ec4beSDarrick J. Wong while (start <= end) { 66450c9ec4beSDarrick J. Wong start = mb_find_next_zero_bit(bitmap, end + 1, start); 66460c9ec4beSDarrick J. Wong if (start > end) 66470c9ec4beSDarrick J. Wong break; 66480c9ec4beSDarrick J. Wong next = mb_find_next_bit(bitmap, end + 1, start); 66490c9ec4beSDarrick J. Wong 66500c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 66510c9ec4beSDarrick J. Wong error = formatter(sb, group, start, next - start, priv); 66520c9ec4beSDarrick J. Wong if (error) 66530c9ec4beSDarrick J. Wong goto out_unload; 66540c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 66550c9ec4beSDarrick J. Wong 66560c9ec4beSDarrick J. Wong start = next + 1; 66570c9ec4beSDarrick J. Wong } 66580c9ec4beSDarrick J. Wong 66590c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 66600c9ec4beSDarrick J. Wong out_unload: 66610c9ec4beSDarrick J. Wong ext4_mb_unload_buddy(&e4b); 66620c9ec4beSDarrick J. Wong 66630c9ec4beSDarrick J. Wong return error; 66640c9ec4beSDarrick J. Wong } 6665