1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/balloc.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
13 */
14
15 #include <linux/time.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
20 #include "ext4.h"
21 #include "ext4_jbd2.h"
22 #include "mballoc.h"
23
24 #include <trace/events/ext4.h>
25 #include <kunit/static_stub.h>
26
27 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
28 ext4_group_t block_group);
29 /*
30 * balloc.c contains the blocks allocation and deallocation routines
31 */
32
33 /*
34 * Calculate block group number for a given block number
35 */
ext4_get_group_number(struct super_block * sb,ext4_fsblk_t block)36 ext4_group_t ext4_get_group_number(struct super_block *sb,
37 ext4_fsblk_t block)
38 {
39 ext4_group_t group;
40
41 if (test_opt2(sb, STD_GROUP_SIZE))
42 group = (block -
43 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
44 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
45 else
46 ext4_get_group_no_and_offset(sb, block, &group, NULL);
47 return group;
48 }
49
50 /*
51 * Calculate the block group number and offset into the block/cluster
52 * allocation bitmap, given a block number
53 */
ext4_get_group_no_and_offset(struct super_block * sb,ext4_fsblk_t blocknr,ext4_group_t * blockgrpp,ext4_grpblk_t * offsetp)54 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
55 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
56 {
57 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
58 ext4_grpblk_t offset;
59
60 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
61 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
62 EXT4_SB(sb)->s_cluster_bits;
63 if (offsetp)
64 *offsetp = offset;
65 if (blockgrpp)
66 *blockgrpp = blocknr;
67
68 }
69
70 /*
71 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
72 * and 0 otherwise.
73 */
ext4_block_in_group(struct super_block * sb,ext4_fsblk_t block,ext4_group_t block_group)74 static inline int ext4_block_in_group(struct super_block *sb,
75 ext4_fsblk_t block,
76 ext4_group_t block_group)
77 {
78 ext4_group_t actual_group;
79
80 actual_group = ext4_get_group_number(sb, block);
81 return (actual_group == block_group) ? 1 : 0;
82 }
83
84 /*
85 * Return the number of clusters used for file system metadata; this
86 * represents the overhead needed by the file system.
87 */
ext4_num_overhead_clusters(struct super_block * sb,ext4_group_t block_group,struct ext4_group_desc * gdp)88 static unsigned ext4_num_overhead_clusters(struct super_block *sb,
89 ext4_group_t block_group,
90 struct ext4_group_desc *gdp)
91 {
92 unsigned base_clusters, num_clusters;
93 int block_cluster = -1, inode_cluster;
94 int itbl_cluster_start = -1, itbl_cluster_end = -1;
95 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
96 ext4_fsblk_t end = start + EXT4_BLOCKS_PER_GROUP(sb) - 1;
97 ext4_fsblk_t itbl_blk_start, itbl_blk_end;
98 struct ext4_sb_info *sbi = EXT4_SB(sb);
99
100 /* This is the number of clusters used by the superblock,
101 * block group descriptors, and reserved block group
102 * descriptor blocks */
103 base_clusters = ext4_num_base_meta_clusters(sb, block_group);
104 num_clusters = base_clusters;
105
106 /*
107 * Account and record inode table clusters if any cluster
108 * is in the block group, or inode table cluster range is
109 * [-1, -1] and won't overlap with block/inode bitmap cluster
110 * accounted below.
111 */
112 itbl_blk_start = ext4_inode_table(sb, gdp);
113 itbl_blk_end = itbl_blk_start + sbi->s_itb_per_group - 1;
114 if (itbl_blk_start <= end && itbl_blk_end >= start) {
115 itbl_blk_start = max(itbl_blk_start, start);
116 itbl_blk_end = min(itbl_blk_end, end);
117
118 itbl_cluster_start = EXT4_B2C(sbi, itbl_blk_start - start);
119 itbl_cluster_end = EXT4_B2C(sbi, itbl_blk_end - start);
120
121 num_clusters += itbl_cluster_end - itbl_cluster_start + 1;
122 /* check if border cluster is overlapped */
123 if (itbl_cluster_start == base_clusters - 1)
124 num_clusters--;
125 }
126
127 /*
128 * For the allocation bitmaps, we first need to check to see
129 * if the block is in the block group. If it is, then check
130 * to see if the cluster is already accounted for in the clusters
131 * used for the base metadata cluster and inode tables cluster.
132 * Normally all of these blocks are contiguous, so the special
133 * case handling shouldn't be necessary except for *very*
134 * unusual file system layouts.
135 */
136 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
137 block_cluster = EXT4_B2C(sbi,
138 ext4_block_bitmap(sb, gdp) - start);
139 if (block_cluster >= base_clusters &&
140 (block_cluster < itbl_cluster_start ||
141 block_cluster > itbl_cluster_end))
142 num_clusters++;
143 }
144
145 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
146 inode_cluster = EXT4_B2C(sbi,
147 ext4_inode_bitmap(sb, gdp) - start);
148 /*
149 * Additional check if inode bitmap is in just accounted
150 * block_cluster
151 */
152 if (inode_cluster != block_cluster &&
153 inode_cluster >= base_clusters &&
154 (inode_cluster < itbl_cluster_start ||
155 inode_cluster > itbl_cluster_end))
156 num_clusters++;
157 }
158
159 return num_clusters;
160 }
161
num_clusters_in_group(struct super_block * sb,ext4_group_t block_group)162 static unsigned int num_clusters_in_group(struct super_block *sb,
163 ext4_group_t block_group)
164 {
165 unsigned int blocks;
166
167 if (block_group == ext4_get_groups_count(sb) - 1) {
168 /*
169 * Even though mke2fs always initializes the first and
170 * last group, just in case some other tool was used,
171 * we need to make sure we calculate the right free
172 * blocks.
173 */
174 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
175 ext4_group_first_block_no(sb, block_group);
176 } else
177 blocks = EXT4_BLOCKS_PER_GROUP(sb);
178 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
179 }
180
181 /* Initializes an uninitialized block bitmap */
ext4_init_block_bitmap(struct super_block * sb,struct buffer_head * bh,ext4_group_t block_group,struct ext4_group_desc * gdp)182 static int ext4_init_block_bitmap(struct super_block *sb,
183 struct buffer_head *bh,
184 ext4_group_t block_group,
185 struct ext4_group_desc *gdp)
186 {
187 unsigned int bit, bit_max;
188 struct ext4_sb_info *sbi = EXT4_SB(sb);
189 ext4_fsblk_t start, tmp;
190
191 ASSERT(buffer_locked(bh));
192
193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194 ext4_mark_group_bitmap_corrupted(sb, block_group,
195 EXT4_GROUP_INFO_BBITMAP_CORRUPT |
196 EXT4_GROUP_INFO_IBITMAP_CORRUPT);
197 return -EFSBADCRC;
198 }
199 memset(bh->b_data, 0, sb->s_blocksize);
200
201 bit_max = ext4_num_base_meta_clusters(sb, block_group);
202 if ((bit_max >> 3) >= bh->b_size)
203 return -EFSCORRUPTED;
204
205 for (bit = 0; bit < bit_max; bit++)
206 ext4_set_bit(bit, bh->b_data);
207
208 start = ext4_group_first_block_no(sb, block_group);
209
210 /* Set bits for block and inode bitmaps, and inode table */
211 tmp = ext4_block_bitmap(sb, gdp);
212 if (ext4_block_in_group(sb, tmp, block_group))
213 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
214
215 tmp = ext4_inode_bitmap(sb, gdp);
216 if (ext4_block_in_group(sb, tmp, block_group))
217 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
218
219 tmp = ext4_inode_table(sb, gdp);
220 for (; tmp < ext4_inode_table(sb, gdp) +
221 sbi->s_itb_per_group; tmp++) {
222 if (ext4_block_in_group(sb, tmp, block_group))
223 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
224 }
225
226 /*
227 * Also if the number of blocks within the group is less than
228 * the blocksize * 8 ( which is the size of bitmap ), set rest
229 * of the block bitmap to 1
230 */
231 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
232 sb->s_blocksize * 8, bh->b_data);
233 return 0;
234 }
235
236 /* Return the number of free blocks in a block group. It is used when
237 * the block bitmap is uninitialized, so we can't just count the bits
238 * in the bitmap. */
ext4_free_clusters_after_init(struct super_block * sb,ext4_group_t block_group,struct ext4_group_desc * gdp)239 unsigned ext4_free_clusters_after_init(struct super_block *sb,
240 ext4_group_t block_group,
241 struct ext4_group_desc *gdp)
242 {
243 return num_clusters_in_group(sb, block_group) -
244 ext4_num_overhead_clusters(sb, block_group, gdp);
245 }
246
247 /*
248 * The free blocks are managed by bitmaps. A file system contains several
249 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
250 * block for inodes, N blocks for the inode table and data blocks.
251 *
252 * The file system contains group descriptors which are located after the
253 * super block. Each descriptor contains the number of the bitmap block and
254 * the free blocks count in the block. The descriptors are loaded in memory
255 * when a file system is mounted (see ext4_fill_super).
256 */
257
258 /**
259 * ext4_get_group_desc() -- load group descriptor from disk
260 * @sb: super block
261 * @block_group: given block group
262 * @bh: pointer to the buffer head to store the block
263 * group descriptor
264 */
ext4_get_group_desc(struct super_block * sb,ext4_group_t block_group,struct buffer_head ** bh)265 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
266 ext4_group_t block_group,
267 struct buffer_head **bh)
268 {
269 unsigned int group_desc;
270 unsigned int offset;
271 ext4_group_t ngroups = ext4_get_groups_count(sb);
272 struct ext4_group_desc *desc;
273 struct ext4_sb_info *sbi = EXT4_SB(sb);
274 struct buffer_head *bh_p;
275
276 KUNIT_STATIC_STUB_REDIRECT(ext4_get_group_desc,
277 sb, block_group, bh);
278
279 if (block_group >= ngroups) {
280 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
281 " groups_count = %u", block_group, ngroups);
282
283 return NULL;
284 }
285
286 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
287 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
288 bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
289 /*
290 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
291 * the pointer being dereferenced won't be dereferenced again. By
292 * looking at the usage in add_new_gdb() the value isn't modified,
293 * just the pointer, and so it remains valid.
294 */
295 if (!bh_p) {
296 ext4_error(sb, "Group descriptor not loaded - "
297 "block_group = %u, group_desc = %u, desc = %u",
298 block_group, group_desc, offset);
299 return NULL;
300 }
301
302 desc = (struct ext4_group_desc *)(
303 (__u8 *)bh_p->b_data +
304 offset * EXT4_DESC_SIZE(sb));
305 if (bh)
306 *bh = bh_p;
307 return desc;
308 }
309
ext4_valid_block_bitmap_padding(struct super_block * sb,ext4_group_t block_group,struct buffer_head * bh)310 static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
311 ext4_group_t block_group,
312 struct buffer_head *bh)
313 {
314 ext4_grpblk_t next_zero_bit;
315 unsigned long bitmap_size = sb->s_blocksize * 8;
316 unsigned int offset = num_clusters_in_group(sb, block_group);
317
318 if (bitmap_size <= offset)
319 return 0;
320
321 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
322
323 return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
324 }
325
ext4_get_group_info(struct super_block * sb,ext4_group_t group)326 struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
327 ext4_group_t group)
328 {
329 struct ext4_group_info **grp_info;
330 long indexv, indexh;
331
332 if (unlikely(group >= EXT4_SB(sb)->s_groups_count))
333 return NULL;
334 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
335 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
336 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
337 return grp_info[indexh];
338 }
339
340 /*
341 * Return the block number which was discovered to be invalid, or 0 if
342 * the block bitmap is valid.
343 */
ext4_valid_block_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)344 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
345 struct ext4_group_desc *desc,
346 ext4_group_t block_group,
347 struct buffer_head *bh)
348 {
349 struct ext4_sb_info *sbi = EXT4_SB(sb);
350 ext4_grpblk_t offset;
351 ext4_grpblk_t next_zero_bit;
352 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
353 ext4_fsblk_t blk;
354 ext4_fsblk_t group_first_block;
355
356 if (ext4_has_feature_flex_bg(sb)) {
357 /* with FLEX_BG, the inode/block bitmaps and itable
358 * blocks may not be in the group at all
359 * so the bitmap validation will be skipped for those groups
360 * or it has to also read the block group where the bitmaps
361 * are located to verify they are set.
362 */
363 return 0;
364 }
365 group_first_block = ext4_group_first_block_no(sb, block_group);
366
367 /* check whether block bitmap block number is set */
368 blk = ext4_block_bitmap(sb, desc);
369 offset = blk - group_first_block;
370 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
371 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
372 /* bad block bitmap */
373 return blk;
374
375 /* check whether the inode bitmap block number is set */
376 blk = ext4_inode_bitmap(sb, desc);
377 offset = blk - group_first_block;
378 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
379 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
380 /* bad block bitmap */
381 return blk;
382
383 /* check whether the inode table block number is set */
384 blk = ext4_inode_table(sb, desc);
385 offset = blk - group_first_block;
386 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
387 EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) >= max_bit)
388 return blk;
389 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
390 EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1,
391 EXT4_B2C(sbi, offset));
392 if (next_zero_bit <
393 EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1)
394 /* bad bitmap for inode tables */
395 return blk;
396 return 0;
397 }
398
ext4_validate_block_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)399 static int ext4_validate_block_bitmap(struct super_block *sb,
400 struct ext4_group_desc *desc,
401 ext4_group_t block_group,
402 struct buffer_head *bh)
403 {
404 ext4_fsblk_t blk;
405 struct ext4_group_info *grp;
406
407 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
408 return 0;
409
410 grp = ext4_get_group_info(sb, block_group);
411
412 if (buffer_verified(bh))
413 return 0;
414 if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
415 return -EFSCORRUPTED;
416
417 ext4_lock_group(sb, block_group);
418 if (buffer_verified(bh))
419 goto verified;
420 if (unlikely(!ext4_block_bitmap_csum_verify(sb, desc, bh) ||
421 ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) {
422 ext4_unlock_group(sb, block_group);
423 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
424 ext4_mark_group_bitmap_corrupted(sb, block_group,
425 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
426 return -EFSBADCRC;
427 }
428 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
429 if (unlikely(blk != 0)) {
430 ext4_unlock_group(sb, block_group);
431 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
432 block_group, blk);
433 ext4_mark_group_bitmap_corrupted(sb, block_group,
434 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
435 return -EFSCORRUPTED;
436 }
437 blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
438 if (unlikely(blk != 0)) {
439 ext4_unlock_group(sb, block_group);
440 ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
441 block_group, blk);
442 ext4_mark_group_bitmap_corrupted(sb, block_group,
443 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
444 return -EFSCORRUPTED;
445 }
446 set_buffer_verified(bh);
447 verified:
448 ext4_unlock_group(sb, block_group);
449 return 0;
450 }
451
452 /**
453 * ext4_read_block_bitmap_nowait()
454 * @sb: super block
455 * @block_group: given block group
456 * @ignore_locked: ignore locked buffers
457 *
458 * Read the bitmap for a given block_group,and validate the
459 * bits for block/inode/inode tables are set in the bitmaps
460 *
461 * Return buffer_head on success or an ERR_PTR in case of failure.
462 */
463 struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block * sb,ext4_group_t block_group,bool ignore_locked)464 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
465 bool ignore_locked)
466 {
467 struct ext4_group_desc *desc;
468 struct ext4_sb_info *sbi = EXT4_SB(sb);
469 struct buffer_head *bh;
470 ext4_fsblk_t bitmap_blk;
471 int err;
472
473 KUNIT_STATIC_STUB_REDIRECT(ext4_read_block_bitmap_nowait,
474 sb, block_group, ignore_locked);
475
476 desc = ext4_get_group_desc(sb, block_group, NULL);
477 if (!desc)
478 return ERR_PTR(-EFSCORRUPTED);
479 bitmap_blk = ext4_block_bitmap(sb, desc);
480 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
481 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
482 ext4_error(sb, "Invalid block bitmap block %llu in "
483 "block_group %u", bitmap_blk, block_group);
484 ext4_mark_group_bitmap_corrupted(sb, block_group,
485 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
486 return ERR_PTR(-EFSCORRUPTED);
487 }
488 bh = sb_getblk(sb, bitmap_blk);
489 if (unlikely(!bh)) {
490 ext4_warning(sb, "Cannot get buffer for block bitmap - "
491 "block_group = %u, block_bitmap = %llu",
492 block_group, bitmap_blk);
493 return ERR_PTR(-ENOMEM);
494 }
495
496 if (ignore_locked && buffer_locked(bh)) {
497 /* buffer under IO already, return if called for prefetching */
498 put_bh(bh);
499 return NULL;
500 }
501
502 if (bitmap_uptodate(bh))
503 goto verify;
504
505 lock_buffer(bh);
506 if (bitmap_uptodate(bh)) {
507 unlock_buffer(bh);
508 goto verify;
509 }
510 ext4_lock_group(sb, block_group);
511 if (ext4_has_group_desc_csum(sb) &&
512 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
513 if (block_group == 0) {
514 ext4_unlock_group(sb, block_group);
515 unlock_buffer(bh);
516 ext4_error(sb, "Block bitmap for bg 0 marked "
517 "uninitialized");
518 err = -EFSCORRUPTED;
519 goto out;
520 }
521 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
522 if (err) {
523 ext4_unlock_group(sb, block_group);
524 unlock_buffer(bh);
525 ext4_error(sb, "Failed to init block bitmap for group "
526 "%u: %d", block_group, err);
527 goto out;
528 }
529 set_bitmap_uptodate(bh);
530 set_buffer_uptodate(bh);
531 set_buffer_verified(bh);
532 ext4_unlock_group(sb, block_group);
533 unlock_buffer(bh);
534 return bh;
535 }
536 ext4_unlock_group(sb, block_group);
537 if (buffer_uptodate(bh)) {
538 /*
539 * if not uninit if bh is uptodate,
540 * bitmap is also uptodate
541 */
542 set_bitmap_uptodate(bh);
543 unlock_buffer(bh);
544 goto verify;
545 }
546 /*
547 * submit the buffer_head for reading
548 */
549 set_buffer_new(bh);
550 trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
551 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO |
552 (ignore_locked ? REQ_RAHEAD : 0),
553 ext4_end_bitmap_read);
554 return bh;
555 verify:
556 err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
557 if (err)
558 goto out;
559 return bh;
560 out:
561 put_bh(bh);
562 return ERR_PTR(err);
563 }
564
565 /* Returns 0 on success, -errno on error */
ext4_wait_block_bitmap(struct super_block * sb,ext4_group_t block_group,struct buffer_head * bh)566 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
567 struct buffer_head *bh)
568 {
569 struct ext4_group_desc *desc;
570
571 KUNIT_STATIC_STUB_REDIRECT(ext4_wait_block_bitmap,
572 sb, block_group, bh);
573
574 if (!buffer_new(bh))
575 return 0;
576 desc = ext4_get_group_desc(sb, block_group, NULL);
577 if (!desc)
578 return -EFSCORRUPTED;
579 wait_on_buffer(bh);
580 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
581 if (!buffer_uptodate(bh)) {
582 ext4_error_err(sb, EIO, "Cannot read block bitmap - "
583 "block_group = %u, block_bitmap = %llu",
584 block_group, (unsigned long long) bh->b_blocknr);
585 ext4_mark_group_bitmap_corrupted(sb, block_group,
586 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
587 return -EIO;
588 }
589 clear_buffer_new(bh);
590 /* Panic or remount fs read-only if block bitmap is invalid */
591 return ext4_validate_block_bitmap(sb, desc, block_group, bh);
592 }
593
594 struct buffer_head *
ext4_read_block_bitmap(struct super_block * sb,ext4_group_t block_group)595 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
596 {
597 struct buffer_head *bh;
598 int err;
599
600 bh = ext4_read_block_bitmap_nowait(sb, block_group, false);
601 if (IS_ERR(bh))
602 return bh;
603 err = ext4_wait_block_bitmap(sb, block_group, bh);
604 if (err) {
605 put_bh(bh);
606 return ERR_PTR(err);
607 }
608 return bh;
609 }
610
611 /**
612 * ext4_has_free_clusters()
613 * @sbi: in-core super block structure.
614 * @nclusters: number of needed blocks
615 * @flags: flags from ext4_mb_new_blocks()
616 *
617 * Check if filesystem has nclusters free & available for allocation.
618 * On success return 1, return 0 on failure.
619 */
ext4_has_free_clusters(struct ext4_sb_info * sbi,s64 nclusters,unsigned int flags)620 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
621 s64 nclusters, unsigned int flags)
622 {
623 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
624 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
625 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
626
627 free_clusters = percpu_counter_read_positive(fcc);
628 dirty_clusters = percpu_counter_read_positive(dcc);
629 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
630
631 /*
632 * r_blocks_count should always be multiple of the cluster ratio so
633 * we are safe to do a plane bit shift only.
634 */
635 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
636 resv_clusters;
637
638 if (free_clusters - (nclusters + rsv + dirty_clusters) <
639 EXT4_FREECLUSTERS_WATERMARK) {
640 free_clusters = percpu_counter_sum_positive(fcc);
641 dirty_clusters = percpu_counter_sum_positive(dcc);
642 }
643 /* Check whether we have space after accounting for current
644 * dirty clusters & root reserved clusters.
645 */
646 if (free_clusters >= (rsv + nclusters + dirty_clusters))
647 return 1;
648
649 /* Hm, nope. Are (enough) root reserved clusters available? */
650 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
651 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
652 capable(CAP_SYS_RESOURCE) ||
653 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
654
655 if (free_clusters >= (nclusters + dirty_clusters +
656 resv_clusters))
657 return 1;
658 }
659 /* No free blocks. Let's see if we can dip into reserved pool */
660 if (flags & EXT4_MB_USE_RESERVED) {
661 if (free_clusters >= (nclusters + dirty_clusters))
662 return 1;
663 }
664
665 return 0;
666 }
667
ext4_claim_free_clusters(struct ext4_sb_info * sbi,s64 nclusters,unsigned int flags)668 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
669 s64 nclusters, unsigned int flags)
670 {
671 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
672 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
673 return 0;
674 } else
675 return -ENOSPC;
676 }
677
678 /**
679 * ext4_should_retry_alloc() - check if a block allocation should be retried
680 * @sb: superblock
681 * @retries: number of retry attempts made so far
682 *
683 * ext4_should_retry_alloc() is called when ENOSPC is returned while
684 * attempting to allocate blocks. If there's an indication that a pending
685 * journal transaction might free some space and allow another attempt to
686 * succeed, this function will wait for the current or committing transaction
687 * to complete and then return TRUE.
688 */
ext4_should_retry_alloc(struct super_block * sb,int * retries)689 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
690 {
691 struct ext4_sb_info *sbi = EXT4_SB(sb);
692
693 if (!sbi->s_journal)
694 return 0;
695
696 if (++(*retries) > 3) {
697 percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
698 return 0;
699 }
700
701 /*
702 * if there's no indication that blocks are about to be freed it's
703 * possible we just missed a transaction commit that did so
704 */
705 smp_mb();
706 if (sbi->s_mb_free_pending == 0) {
707 if (test_opt(sb, DISCARD)) {
708 atomic_inc(&sbi->s_retry_alloc_pending);
709 flush_work(&sbi->s_discard_work);
710 atomic_dec(&sbi->s_retry_alloc_pending);
711 }
712 return ext4_has_free_clusters(sbi, 1, 0);
713 }
714
715 /*
716 * it's possible we've just missed a transaction commit here,
717 * so ignore the returned status
718 */
719 ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id);
720 (void) jbd2_journal_force_commit_nested(sbi->s_journal);
721 return 1;
722 }
723
724 /*
725 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
726 *
727 * @handle: handle to this transaction
728 * @inode: file inode
729 * @goal: given target block(filesystem wide)
730 * @count: pointer to total number of clusters needed
731 * @errp: error code
732 *
733 * Return 1st allocated block number on success, *count stores total account
734 * error stores in errp pointer
735 */
ext4_new_meta_blocks(handle_t * handle,struct inode * inode,ext4_fsblk_t goal,unsigned int flags,unsigned long * count,int * errp)736 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
737 ext4_fsblk_t goal, unsigned int flags,
738 unsigned long *count, int *errp)
739 {
740 struct ext4_allocation_request ar;
741 ext4_fsblk_t ret;
742
743 memset(&ar, 0, sizeof(ar));
744 /* Fill with neighbour allocated blocks */
745 ar.inode = inode;
746 ar.goal = goal;
747 ar.len = count ? *count : 1;
748 ar.flags = flags;
749
750 ret = ext4_mb_new_blocks(handle, &ar, errp);
751 if (count)
752 *count = ar.len;
753 /*
754 * Account for the allocated meta blocks. We will never
755 * fail EDQUOT for metdata, but we do account for it.
756 */
757 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
758 dquot_alloc_block_nofail(inode,
759 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
760 }
761 return ret;
762 }
763
764 /**
765 * ext4_count_free_clusters() -- count filesystem free clusters
766 * @sb: superblock
767 *
768 * Adds up the number of free clusters from each block group.
769 */
ext4_count_free_clusters(struct super_block * sb)770 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
771 {
772 ext4_fsblk_t desc_count;
773 struct ext4_group_desc *gdp;
774 ext4_group_t i;
775 ext4_group_t ngroups = ext4_get_groups_count(sb);
776 struct ext4_group_info *grp;
777 #ifdef EXT4FS_DEBUG
778 struct ext4_super_block *es;
779 ext4_fsblk_t bitmap_count;
780 unsigned int x;
781 struct buffer_head *bitmap_bh = NULL;
782
783 es = EXT4_SB(sb)->s_es;
784 desc_count = 0;
785 bitmap_count = 0;
786 gdp = NULL;
787
788 for (i = 0; i < ngroups; i++) {
789 gdp = ext4_get_group_desc(sb, i, NULL);
790 if (!gdp)
791 continue;
792 grp = NULL;
793 if (EXT4_SB(sb)->s_group_info)
794 grp = ext4_get_group_info(sb, i);
795 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
796 desc_count += ext4_free_group_clusters(sb, gdp);
797 brelse(bitmap_bh);
798 bitmap_bh = ext4_read_block_bitmap(sb, i);
799 if (IS_ERR(bitmap_bh)) {
800 bitmap_bh = NULL;
801 continue;
802 }
803
804 x = ext4_count_free(bitmap_bh->b_data,
805 EXT4_CLUSTERS_PER_GROUP(sb) / 8);
806 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
807 i, ext4_free_group_clusters(sb, gdp), x);
808 bitmap_count += x;
809 }
810 brelse(bitmap_bh);
811 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
812 ", computed = %llu, %llu\n",
813 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
814 desc_count, bitmap_count);
815 return bitmap_count;
816 #else
817 desc_count = 0;
818 for (i = 0; i < ngroups; i++) {
819 gdp = ext4_get_group_desc(sb, i, NULL);
820 if (!gdp)
821 continue;
822 grp = NULL;
823 if (EXT4_SB(sb)->s_group_info)
824 grp = ext4_get_group_info(sb, i);
825 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
826 desc_count += ext4_free_group_clusters(sb, gdp);
827 }
828
829 return desc_count;
830 #endif
831 }
832
test_root(ext4_group_t a,int b)833 static inline int test_root(ext4_group_t a, int b)
834 {
835 while (1) {
836 if (a < b)
837 return 0;
838 if (a == b)
839 return 1;
840 if ((a % b) != 0)
841 return 0;
842 a = a / b;
843 }
844 }
845
846 /**
847 * ext4_bg_has_super - number of blocks used by the superblock in group
848 * @sb: superblock for filesystem
849 * @group: group number to check
850 *
851 * Return the number of blocks used by the superblock (primary or backup)
852 * in this group. Currently this will be only 0 or 1.
853 */
ext4_bg_has_super(struct super_block * sb,ext4_group_t group)854 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
855 {
856 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
857
858 if (group == 0)
859 return 1;
860 if (ext4_has_feature_sparse_super2(sb)) {
861 if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
862 group == le32_to_cpu(es->s_backup_bgs[1]))
863 return 1;
864 return 0;
865 }
866 if ((group <= 1) || !ext4_has_feature_sparse_super(sb))
867 return 1;
868 if (!(group & 1))
869 return 0;
870 if (test_root(group, 3) || (test_root(group, 5)) ||
871 test_root(group, 7))
872 return 1;
873
874 return 0;
875 }
876
ext4_bg_num_gdb_meta(struct super_block * sb,ext4_group_t group)877 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
878 ext4_group_t group)
879 {
880 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
881 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
882 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
883
884 if (group == first || group == first + 1 || group == last)
885 return 1;
886 return 0;
887 }
888
ext4_bg_num_gdb_nometa(struct super_block * sb,ext4_group_t group)889 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
890 ext4_group_t group)
891 {
892 if (!ext4_bg_has_super(sb, group))
893 return 0;
894
895 if (ext4_has_feature_meta_bg(sb))
896 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
897 else
898 return EXT4_SB(sb)->s_gdb_count;
899 }
900
901 /**
902 * ext4_bg_num_gdb - number of blocks used by the group table in group
903 * @sb: superblock for filesystem
904 * @group: group number to check
905 *
906 * Return the number of blocks used by the group descriptor table
907 * (primary or backup) in this group. In the future there may be a
908 * different number of descriptor blocks in each group.
909 */
ext4_bg_num_gdb(struct super_block * sb,ext4_group_t group)910 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
911 {
912 unsigned long first_meta_bg =
913 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
914 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
915
916 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg)
917 return ext4_bg_num_gdb_nometa(sb, group);
918
919 return ext4_bg_num_gdb_meta(sb,group);
920
921 }
922
923 /*
924 * This function returns the number of file system metadata blocks at
925 * the beginning of a block group, including the reserved gdt blocks.
926 */
ext4_num_base_meta_blocks(struct super_block * sb,ext4_group_t block_group)927 unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
928 ext4_group_t block_group)
929 {
930 struct ext4_sb_info *sbi = EXT4_SB(sb);
931 unsigned num;
932
933 /* Check for superblock and gdt backups in this group */
934 num = ext4_bg_has_super(sb, block_group);
935
936 if (!ext4_has_feature_meta_bg(sb) ||
937 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
938 sbi->s_desc_per_block) {
939 if (num) {
940 num += ext4_bg_num_gdb_nometa(sb, block_group);
941 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
942 }
943 } else { /* For META_BG_BLOCK_GROUPS */
944 num += ext4_bg_num_gdb_meta(sb, block_group);
945 }
946 return num;
947 }
948
ext4_num_base_meta_clusters(struct super_block * sb,ext4_group_t block_group)949 static unsigned int ext4_num_base_meta_clusters(struct super_block *sb,
950 ext4_group_t block_group)
951 {
952 return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group));
953 }
954
955 /**
956 * ext4_inode_to_goal_block - return a hint for block allocation
957 * @inode: inode for block allocation
958 *
959 * Return the ideal location to start allocating blocks for a
960 * newly created inode.
961 */
ext4_inode_to_goal_block(struct inode * inode)962 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
963 {
964 struct ext4_inode_info *ei = EXT4_I(inode);
965 ext4_group_t block_group;
966 ext4_grpblk_t colour;
967 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
968 ext4_fsblk_t bg_start;
969 ext4_fsblk_t last_block;
970
971 block_group = ei->i_block_group;
972 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
973 /*
974 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
975 * block groups per flexgroup, reserve the first block
976 * group for directories and special files. Regular
977 * files will start at the second block group. This
978 * tends to speed up directory access and improves
979 * fsck times.
980 */
981 block_group &= ~(flex_size-1);
982 if (S_ISREG(inode->i_mode))
983 block_group++;
984 }
985 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
986 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
987
988 /*
989 * If we are doing delayed allocation, we don't need take
990 * colour into account.
991 */
992 if (test_opt(inode->i_sb, DELALLOC))
993 return bg_start;
994
995 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
996 colour = (task_pid_nr(current) % 16) *
997 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
998 else
999 colour = (task_pid_nr(current) % 16) *
1000 ((last_block - bg_start) / 16);
1001 return bg_start + colour;
1002 }
1003
1004