1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/fs.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/prefetch.h> 17 #include <linux/blkdev.h> 18 #include <linux/rbtree.h> 19 #include <linux/random.h> 20 21 #include "gfs2.h" 22 #include "incore.h" 23 #include "glock.h" 24 #include "glops.h" 25 #include "lops.h" 26 #include "meta_io.h" 27 #include "quota.h" 28 #include "rgrp.h" 29 #include "super.h" 30 #include "trans.h" 31 #include "util.h" 32 #include "log.h" 33 #include "inode.h" 34 #include "trace_gfs2.h" 35 36 #define BFITNOENT ((u32)~0) 37 #define NO_BLOCK ((u64)~0) 38 39 #if BITS_PER_LONG == 32 40 #define LBITMASK (0x55555555UL) 41 #define LBITSKIP55 (0x55555555UL) 42 #define LBITSKIP00 (0x00000000UL) 43 #else 44 #define LBITMASK (0x5555555555555555UL) 45 #define LBITSKIP55 (0x5555555555555555UL) 46 #define LBITSKIP00 (0x0000000000000000UL) 47 #endif 48 49 /* 50 * These routines are used by the resource group routines (rgrp.c) 51 * to keep track of block allocation. Each block is represented by two 52 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. 53 * 54 * 0 = Free 55 * 1 = Used (not metadata) 56 * 2 = Unlinked (still in use) inode 57 * 3 = Used (metadata) 58 */ 59 60 static const char valid_change[16] = { 61 /* current */ 62 /* n */ 0, 1, 1, 1, 63 /* e */ 1, 0, 0, 0, 64 /* w */ 0, 0, 0, 1, 65 1, 0, 0, 0 66 }; 67 68 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, 69 const struct gfs2_inode *ip, bool nowrap); 70 71 72 /** 73 * gfs2_setbit - Set a bit in the bitmaps 74 * @rbm: The position of the bit to set 75 * @do_clone: Also set the clone bitmap, if it exists 76 * @new_state: the new state of the block 77 * 78 */ 79 80 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, 81 unsigned char new_state) 82 { 83 unsigned char *byte1, *byte2, *end, cur_state; 84 unsigned int buflen = rbm->bi->bi_len; 85 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; 86 87 byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 88 end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; 89 90 BUG_ON(byte1 >= end); 91 92 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; 93 94 if (unlikely(!valid_change[new_state * 4 + cur_state])) { 95 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " 96 "new_state=%d\n", rbm->offset, cur_state, new_state); 97 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", 98 (unsigned long long)rbm->rgd->rd_addr, 99 rbm->bi->bi_start); 100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", 101 rbm->bi->bi_offset, rbm->bi->bi_len); 102 dump_stack(); 103 gfs2_consist_rgrpd(rbm->rgd); 104 return; 105 } 106 *byte1 ^= (cur_state ^ new_state) << bit; 107 108 if (do_clone && rbm->bi->bi_clone) { 109 byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; 111 *byte2 ^= (cur_state ^ new_state) << bit; 112 } 113 } 114 115 /** 116 * gfs2_testbit - test a bit in the bitmaps 117 * @rbm: The bit to test 118 * 119 * Returns: The two bit block state of the requested bit 120 */ 121 122 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) 123 { 124 const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; 125 const u8 *byte; 126 unsigned int bit; 127 128 byte = buffer + (rbm->offset / GFS2_NBBY); 129 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; 130 131 return (*byte >> bit) & GFS2_BIT_MASK; 132 } 133 134 /** 135 * gfs2_bit_search 136 * @ptr: Pointer to bitmap data 137 * @mask: Mask to use (normally 0x55555.... but adjusted for search start) 138 * @state: The state we are searching for 139 * 140 * We xor the bitmap data with a patter which is the bitwise opposite 141 * of what we are looking for, this gives rise to a pattern of ones 142 * wherever there is a match. Since we have two bits per entry, we 143 * take this pattern, shift it down by one place and then and it with 144 * the original. All the even bit positions (0,2,4, etc) then represent 145 * successful matches, so we mask with 0x55555..... to remove the unwanted 146 * odd bit positions. 147 * 148 * This allows searching of a whole u64 at once (32 blocks) with a 149 * single test (on 64 bit arches). 150 */ 151 152 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) 153 { 154 u64 tmp; 155 static const u64 search[] = { 156 [0] = 0xffffffffffffffffULL, 157 [1] = 0xaaaaaaaaaaaaaaaaULL, 158 [2] = 0x5555555555555555ULL, 159 [3] = 0x0000000000000000ULL, 160 }; 161 tmp = le64_to_cpu(*ptr) ^ search[state]; 162 tmp &= (tmp >> 1); 163 tmp &= mask; 164 return tmp; 165 } 166 167 /** 168 * rs_cmp - multi-block reservation range compare 169 * @blk: absolute file system block number of the new reservation 170 * @len: number of blocks in the new reservation 171 * @rs: existing reservation to compare against 172 * 173 * returns: 1 if the block range is beyond the reach of the reservation 174 * -1 if the block range is before the start of the reservation 175 * 0 if the block range overlaps with the reservation 176 */ 177 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) 178 { 179 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm); 180 181 if (blk >= startblk + rs->rs_free) 182 return 1; 183 if (blk + len - 1 < startblk) 184 return -1; 185 return 0; 186 } 187 188 /** 189 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing 190 * a block in a given allocation state. 191 * @buf: the buffer that holds the bitmaps 192 * @len: the length (in bytes) of the buffer 193 * @goal: start search at this block's bit-pair (within @buffer) 194 * @state: GFS2_BLKST_XXX the state of the block we're looking for. 195 * 196 * Scope of @goal and returned block number is only within this bitmap buffer, 197 * not entire rgrp or filesystem. @buffer will be offset from the actual 198 * beginning of a bitmap block buffer, skipping any header structures, but 199 * headers are always a multiple of 64 bits long so that the buffer is 200 * always aligned to a 64 bit boundary. 201 * 202 * The size of the buffer is in bytes, but is it assumed that it is 203 * always ok to read a complete multiple of 64 bits at the end 204 * of the block in case the end is no aligned to a natural boundary. 205 * 206 * Return: the block number (bitmap buffer scope) that was found 207 */ 208 209 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, 210 u32 goal, u8 state) 211 { 212 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1); 213 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5); 214 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64))); 215 u64 tmp; 216 u64 mask = 0x5555555555555555ULL; 217 u32 bit; 218 219 /* Mask off bits we don't care about at the start of the search */ 220 mask <<= spoint; 221 tmp = gfs2_bit_search(ptr, mask, state); 222 ptr++; 223 while(tmp == 0 && ptr < end) { 224 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state); 225 ptr++; 226 } 227 /* Mask off any bits which are more than len bytes from the start */ 228 if (ptr == end && (len & (sizeof(u64) - 1))) 229 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1)))); 230 /* Didn't find anything, so return */ 231 if (tmp == 0) 232 return BFITNOENT; 233 ptr--; 234 bit = __ffs64(tmp); 235 bit /= 2; /* two bits per entry in the bitmap */ 236 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; 237 } 238 239 /** 240 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number 241 * @rbm: The rbm with rgd already set correctly 242 * @block: The block number (filesystem relative) 243 * 244 * This sets the bi and offset members of an rbm based on a 245 * resource group and a filesystem relative block number. The 246 * resource group must be set in the rbm on entry, the bi and 247 * offset members will be set by this function. 248 * 249 * Returns: 0 on success, or an error code 250 */ 251 252 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) 253 { 254 u64 rblock = block - rbm->rgd->rd_data0; 255 u32 x; 256 257 if (WARN_ON_ONCE(rblock > UINT_MAX)) 258 return -EINVAL; 259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) 260 return -E2BIG; 261 262 rbm->bi = rbm->rgd->rd_bits; 263 rbm->offset = (u32)(rblock); 264 /* Check if the block is within the first block */ 265 if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) 266 return 0; 267 268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ 269 rbm->offset += (sizeof(struct gfs2_rgrp) - 270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY; 271 x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 272 rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 273 rbm->bi += x; 274 return 0; 275 } 276 277 /** 278 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned 279 * @rbm: Position to search (value/result) 280 * @n_unaligned: Number of unaligned blocks to check 281 * @len: Decremented for each block found (terminate on zero) 282 * 283 * Returns: true if a non-free block is encountered 284 */ 285 286 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) 287 { 288 u64 block; 289 u32 n; 290 u8 res; 291 292 for (n = 0; n < n_unaligned; n++) { 293 res = gfs2_testbit(rbm); 294 if (res != GFS2_BLKST_FREE) 295 return true; 296 (*len)--; 297 if (*len == 0) 298 return true; 299 block = gfs2_rbm_to_block(rbm); 300 if (gfs2_rbm_from_block(rbm, block + 1)) 301 return true; 302 } 303 304 return false; 305 } 306 307 /** 308 * gfs2_free_extlen - Return extent length of free blocks 309 * @rbm: Starting position 310 * @len: Max length to check 311 * 312 * Starting at the block specified by the rbm, see how many free blocks 313 * there are, not reading more than len blocks ahead. This can be done 314 * using memchr_inv when the blocks are byte aligned, but has to be done 315 * on a block by block basis in case of unaligned blocks. Also this 316 * function can cope with bitmap boundaries (although it must stop on 317 * a resource group boundary) 318 * 319 * Returns: Number of free blocks in the extent 320 */ 321 322 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len) 323 { 324 struct gfs2_rbm rbm = *rrbm; 325 u32 n_unaligned = rbm.offset & 3; 326 u32 size = len; 327 u32 bytes; 328 u32 chunk_size; 329 u8 *ptr, *start, *end; 330 u64 block; 331 332 if (n_unaligned && 333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) 334 goto out; 335 336 n_unaligned = len & 3; 337 /* Start is now byte aligned */ 338 while (len > 3) { 339 start = rbm.bi->bi_bh->b_data; 340 if (rbm.bi->bi_clone) 341 start = rbm.bi->bi_clone; 342 end = start + rbm.bi->bi_bh->b_size; 343 start += rbm.bi->bi_offset; 344 BUG_ON(rbm.offset & 3); 345 start += (rbm.offset / GFS2_NBBY); 346 bytes = min_t(u32, len / GFS2_NBBY, (end - start)); 347 ptr = memchr_inv(start, 0, bytes); 348 chunk_size = ((ptr == NULL) ? bytes : (ptr - start)); 349 chunk_size *= GFS2_NBBY; 350 BUG_ON(len < chunk_size); 351 len -= chunk_size; 352 block = gfs2_rbm_to_block(&rbm); 353 gfs2_rbm_from_block(&rbm, block + chunk_size); 354 n_unaligned = 3; 355 if (ptr) 356 break; 357 n_unaligned = len & 3; 358 } 359 360 /* Deal with any bits left over at the end */ 361 if (n_unaligned) 362 gfs2_unaligned_extlen(&rbm, n_unaligned, &len); 363 out: 364 return size - len; 365 } 366 367 /** 368 * gfs2_bitcount - count the number of bits in a certain state 369 * @rgd: the resource group descriptor 370 * @buffer: the buffer that holds the bitmaps 371 * @buflen: the length (in bytes) of the buffer 372 * @state: the state of the block we're looking for 373 * 374 * Returns: The number of bits 375 */ 376 377 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer, 378 unsigned int buflen, u8 state) 379 { 380 const u8 *byte = buffer; 381 const u8 *end = buffer + buflen; 382 const u8 state1 = state << 2; 383 const u8 state2 = state << 4; 384 const u8 state3 = state << 6; 385 u32 count = 0; 386 387 for (; byte < end; byte++) { 388 if (((*byte) & 0x03) == state) 389 count++; 390 if (((*byte) & 0x0C) == state1) 391 count++; 392 if (((*byte) & 0x30) == state2) 393 count++; 394 if (((*byte) & 0xC0) == state3) 395 count++; 396 } 397 398 return count; 399 } 400 401 /** 402 * gfs2_rgrp_verify - Verify that a resource group is consistent 403 * @rgd: the rgrp 404 * 405 */ 406 407 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) 408 { 409 struct gfs2_sbd *sdp = rgd->rd_sbd; 410 struct gfs2_bitmap *bi = NULL; 411 u32 length = rgd->rd_length; 412 u32 count[4], tmp; 413 int buf, x; 414 415 memset(count, 0, 4 * sizeof(u32)); 416 417 /* Count # blocks in each of 4 possible allocation states */ 418 for (buf = 0; buf < length; buf++) { 419 bi = rgd->rd_bits + buf; 420 for (x = 0; x < 4; x++) 421 count[x] += gfs2_bitcount(rgd, 422 bi->bi_bh->b_data + 423 bi->bi_offset, 424 bi->bi_len, x); 425 } 426 427 if (count[0] != rgd->rd_free) { 428 if (gfs2_consist_rgrpd(rgd)) 429 fs_err(sdp, "free data mismatch: %u != %u\n", 430 count[0], rgd->rd_free); 431 return; 432 } 433 434 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; 435 if (count[1] != tmp) { 436 if (gfs2_consist_rgrpd(rgd)) 437 fs_err(sdp, "used data mismatch: %u != %u\n", 438 count[1], tmp); 439 return; 440 } 441 442 if (count[2] + count[3] != rgd->rd_dinodes) { 443 if (gfs2_consist_rgrpd(rgd)) 444 fs_err(sdp, "used metadata mismatch: %u != %u\n", 445 count[2] + count[3], rgd->rd_dinodes); 446 return; 447 } 448 } 449 450 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) 451 { 452 u64 first = rgd->rd_data0; 453 u64 last = first + rgd->rd_data; 454 return first <= block && block < last; 455 } 456 457 /** 458 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number 459 * @sdp: The GFS2 superblock 460 * @blk: The data block number 461 * @exact: True if this needs to be an exact match 462 * 463 * Returns: The resource group, or NULL if not found 464 */ 465 466 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) 467 { 468 struct rb_node *n, *next; 469 struct gfs2_rgrpd *cur; 470 471 spin_lock(&sdp->sd_rindex_spin); 472 n = sdp->sd_rindex_tree.rb_node; 473 while (n) { 474 cur = rb_entry(n, struct gfs2_rgrpd, rd_node); 475 next = NULL; 476 if (blk < cur->rd_addr) 477 next = n->rb_left; 478 else if (blk >= cur->rd_data0 + cur->rd_data) 479 next = n->rb_right; 480 if (next == NULL) { 481 spin_unlock(&sdp->sd_rindex_spin); 482 if (exact) { 483 if (blk < cur->rd_addr) 484 return NULL; 485 if (blk >= cur->rd_data0 + cur->rd_data) 486 return NULL; 487 } 488 return cur; 489 } 490 n = next; 491 } 492 spin_unlock(&sdp->sd_rindex_spin); 493 494 return NULL; 495 } 496 497 /** 498 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem 499 * @sdp: The GFS2 superblock 500 * 501 * Returns: The first rgrp in the filesystem 502 */ 503 504 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) 505 { 506 const struct rb_node *n; 507 struct gfs2_rgrpd *rgd; 508 509 spin_lock(&sdp->sd_rindex_spin); 510 n = rb_first(&sdp->sd_rindex_tree); 511 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); 512 spin_unlock(&sdp->sd_rindex_spin); 513 514 return rgd; 515 } 516 517 /** 518 * gfs2_rgrpd_get_next - get the next RG 519 * @rgd: the resource group descriptor 520 * 521 * Returns: The next rgrp 522 */ 523 524 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) 525 { 526 struct gfs2_sbd *sdp = rgd->rd_sbd; 527 const struct rb_node *n; 528 529 spin_lock(&sdp->sd_rindex_spin); 530 n = rb_next(&rgd->rd_node); 531 if (n == NULL) 532 n = rb_first(&sdp->sd_rindex_tree); 533 534 if (unlikely(&rgd->rd_node == n)) { 535 spin_unlock(&sdp->sd_rindex_spin); 536 return NULL; 537 } 538 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); 539 spin_unlock(&sdp->sd_rindex_spin); 540 return rgd; 541 } 542 543 void gfs2_free_clones(struct gfs2_rgrpd *rgd) 544 { 545 int x; 546 547 for (x = 0; x < rgd->rd_length; x++) { 548 struct gfs2_bitmap *bi = rgd->rd_bits + x; 549 kfree(bi->bi_clone); 550 bi->bi_clone = NULL; 551 } 552 } 553 554 /** 555 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode 556 * @ip: the inode for this reservation 557 */ 558 int gfs2_rs_alloc(struct gfs2_inode *ip) 559 { 560 struct gfs2_blkreserv *res; 561 562 if (ip->i_res) 563 return 0; 564 565 res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); 566 if (!res) 567 return -ENOMEM; 568 569 RB_CLEAR_NODE(&res->rs_node); 570 571 down_write(&ip->i_rw_mutex); 572 if (ip->i_res) 573 kmem_cache_free(gfs2_rsrv_cachep, res); 574 else 575 ip->i_res = res; 576 up_write(&ip->i_rw_mutex); 577 return 0; 578 } 579 580 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) 581 { 582 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n", 583 (unsigned long long)rs->rs_inum, 584 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm), 585 rs->rs_rbm.offset, rs->rs_free); 586 } 587 588 /** 589 * __rs_deltree - remove a multi-block reservation from the rgd tree 590 * @rs: The reservation to remove 591 * 592 */ 593 static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs) 594 { 595 struct gfs2_rgrpd *rgd; 596 597 if (!gfs2_rs_active(rs)) 598 return; 599 600 rgd = rs->rs_rbm.rgd; 601 trace_gfs2_rs(rs, TRACE_RS_TREEDEL); 602 rb_erase(&rs->rs_node, &rgd->rd_rstree); 603 RB_CLEAR_NODE(&rs->rs_node); 604 605 if (rs->rs_free) { 606 /* return reserved blocks to the rgrp and the ip */ 607 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); 608 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; 609 rs->rs_free = 0; 610 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); 611 smp_mb__after_clear_bit(); 612 } 613 } 614 615 /** 616 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree 617 * @rs: The reservation to remove 618 * 619 */ 620 void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs) 621 { 622 struct gfs2_rgrpd *rgd; 623 624 rgd = rs->rs_rbm.rgd; 625 if (rgd) { 626 spin_lock(&rgd->rd_rsspin); 627 __rs_deltree(ip, rs); 628 spin_unlock(&rgd->rd_rsspin); 629 } 630 } 631 632 /** 633 * gfs2_rs_delete - delete a multi-block reservation 634 * @ip: The inode for this reservation 635 * 636 */ 637 void gfs2_rs_delete(struct gfs2_inode *ip) 638 { 639 down_write(&ip->i_rw_mutex); 640 if (ip->i_res) { 641 gfs2_rs_deltree(ip, ip->i_res); 642 BUG_ON(ip->i_res->rs_free); 643 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); 644 ip->i_res = NULL; 645 } 646 up_write(&ip->i_rw_mutex); 647 } 648 649 /** 650 * return_all_reservations - return all reserved blocks back to the rgrp. 651 * @rgd: the rgrp that needs its space back 652 * 653 * We previously reserved a bunch of blocks for allocation. Now we need to 654 * give them back. This leave the reservation structures in tact, but removes 655 * all of their corresponding "no-fly zones". 656 */ 657 static void return_all_reservations(struct gfs2_rgrpd *rgd) 658 { 659 struct rb_node *n; 660 struct gfs2_blkreserv *rs; 661 662 spin_lock(&rgd->rd_rsspin); 663 while ((n = rb_first(&rgd->rd_rstree))) { 664 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); 665 __rs_deltree(NULL, rs); 666 } 667 spin_unlock(&rgd->rd_rsspin); 668 } 669 670 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) 671 { 672 struct rb_node *n; 673 struct gfs2_rgrpd *rgd; 674 struct gfs2_glock *gl; 675 676 while ((n = rb_first(&sdp->sd_rindex_tree))) { 677 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); 678 gl = rgd->rd_gl; 679 680 rb_erase(n, &sdp->sd_rindex_tree); 681 682 if (gl) { 683 spin_lock(&gl->gl_spin); 684 gl->gl_object = NULL; 685 spin_unlock(&gl->gl_spin); 686 gfs2_glock_add_to_lru(gl); 687 gfs2_glock_put(gl); 688 } 689 690 gfs2_free_clones(rgd); 691 kfree(rgd->rd_bits); 692 return_all_reservations(rgd); 693 kmem_cache_free(gfs2_rgrpd_cachep, rgd); 694 } 695 } 696 697 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) 698 { 699 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); 700 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length); 701 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); 702 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data); 703 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes); 704 } 705 706 /** 707 * gfs2_compute_bitstructs - Compute the bitmap sizes 708 * @rgd: The resource group descriptor 709 * 710 * Calculates bitmap descriptors, one for each block that contains bitmap data 711 * 712 * Returns: errno 713 */ 714 715 static int compute_bitstructs(struct gfs2_rgrpd *rgd) 716 { 717 struct gfs2_sbd *sdp = rgd->rd_sbd; 718 struct gfs2_bitmap *bi; 719 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ 720 u32 bytes_left, bytes; 721 int x; 722 723 if (!length) 724 return -EINVAL; 725 726 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); 727 if (!rgd->rd_bits) 728 return -ENOMEM; 729 730 bytes_left = rgd->rd_bitbytes; 731 732 for (x = 0; x < length; x++) { 733 bi = rgd->rd_bits + x; 734 735 bi->bi_flags = 0; 736 /* small rgrp; bitmap stored completely in header block */ 737 if (length == 1) { 738 bytes = bytes_left; 739 bi->bi_offset = sizeof(struct gfs2_rgrp); 740 bi->bi_start = 0; 741 bi->bi_len = bytes; 742 /* header block */ 743 } else if (x == 0) { 744 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); 745 bi->bi_offset = sizeof(struct gfs2_rgrp); 746 bi->bi_start = 0; 747 bi->bi_len = bytes; 748 /* last block */ 749 } else if (x + 1 == length) { 750 bytes = bytes_left; 751 bi->bi_offset = sizeof(struct gfs2_meta_header); 752 bi->bi_start = rgd->rd_bitbytes - bytes_left; 753 bi->bi_len = bytes; 754 /* other blocks */ 755 } else { 756 bytes = sdp->sd_sb.sb_bsize - 757 sizeof(struct gfs2_meta_header); 758 bi->bi_offset = sizeof(struct gfs2_meta_header); 759 bi->bi_start = rgd->rd_bitbytes - bytes_left; 760 bi->bi_len = bytes; 761 } 762 763 bytes_left -= bytes; 764 } 765 766 if (bytes_left) { 767 gfs2_consist_rgrpd(rgd); 768 return -EIO; 769 } 770 bi = rgd->rd_bits + (length - 1); 771 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) { 772 if (gfs2_consist_rgrpd(rgd)) { 773 gfs2_rindex_print(rgd); 774 fs_err(sdp, "start=%u len=%u offset=%u\n", 775 bi->bi_start, bi->bi_len, bi->bi_offset); 776 } 777 return -EIO; 778 } 779 780 return 0; 781 } 782 783 /** 784 * gfs2_ri_total - Total up the file system space, according to the rindex. 785 * @sdp: the filesystem 786 * 787 */ 788 u64 gfs2_ri_total(struct gfs2_sbd *sdp) 789 { 790 u64 total_data = 0; 791 struct inode *inode = sdp->sd_rindex; 792 struct gfs2_inode *ip = GFS2_I(inode); 793 char buf[sizeof(struct gfs2_rindex)]; 794 int error, rgrps; 795 796 for (rgrps = 0;; rgrps++) { 797 loff_t pos = rgrps * sizeof(struct gfs2_rindex); 798 799 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode)) 800 break; 801 error = gfs2_internal_read(ip, buf, &pos, 802 sizeof(struct gfs2_rindex)); 803 if (error != sizeof(struct gfs2_rindex)) 804 break; 805 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); 806 } 807 return total_data; 808 } 809 810 static int rgd_insert(struct gfs2_rgrpd *rgd) 811 { 812 struct gfs2_sbd *sdp = rgd->rd_sbd; 813 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; 814 815 /* Figure out where to put new node */ 816 while (*newn) { 817 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, 818 rd_node); 819 820 parent = *newn; 821 if (rgd->rd_addr < cur->rd_addr) 822 newn = &((*newn)->rb_left); 823 else if (rgd->rd_addr > cur->rd_addr) 824 newn = &((*newn)->rb_right); 825 else 826 return -EEXIST; 827 } 828 829 rb_link_node(&rgd->rd_node, parent, newn); 830 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); 831 sdp->sd_rgrps++; 832 return 0; 833 } 834 835 /** 836 * read_rindex_entry - Pull in a new resource index entry from the disk 837 * @ip: Pointer to the rindex inode 838 * 839 * Returns: 0 on success, > 0 on EOF, error code otherwise 840 */ 841 842 static int read_rindex_entry(struct gfs2_inode *ip) 843 { 844 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 845 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); 846 struct gfs2_rindex buf; 847 int error; 848 struct gfs2_rgrpd *rgd; 849 850 if (pos >= i_size_read(&ip->i_inode)) 851 return 1; 852 853 error = gfs2_internal_read(ip, (char *)&buf, &pos, 854 sizeof(struct gfs2_rindex)); 855 856 if (error != sizeof(struct gfs2_rindex)) 857 return (error == 0) ? 1 : error; 858 859 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); 860 error = -ENOMEM; 861 if (!rgd) 862 return error; 863 864 rgd->rd_sbd = sdp; 865 rgd->rd_addr = be64_to_cpu(buf.ri_addr); 866 rgd->rd_length = be32_to_cpu(buf.ri_length); 867 rgd->rd_data0 = be64_to_cpu(buf.ri_data0); 868 rgd->rd_data = be32_to_cpu(buf.ri_data); 869 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); 870 spin_lock_init(&rgd->rd_rsspin); 871 872 error = compute_bitstructs(rgd); 873 if (error) 874 goto fail; 875 876 error = gfs2_glock_get(sdp, rgd->rd_addr, 877 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); 878 if (error) 879 goto fail; 880 881 rgd->rd_gl->gl_object = rgd; 882 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; 883 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 884 if (rgd->rd_data > sdp->sd_max_rg_data) 885 sdp->sd_max_rg_data = rgd->rd_data; 886 spin_lock(&sdp->sd_rindex_spin); 887 error = rgd_insert(rgd); 888 spin_unlock(&sdp->sd_rindex_spin); 889 if (!error) 890 return 0; 891 892 error = 0; /* someone else read in the rgrp; free it and ignore it */ 893 gfs2_glock_put(rgd->rd_gl); 894 895 fail: 896 kfree(rgd->rd_bits); 897 kmem_cache_free(gfs2_rgrpd_cachep, rgd); 898 return error; 899 } 900 901 /** 902 * gfs2_ri_update - Pull in a new resource index from the disk 903 * @ip: pointer to the rindex inode 904 * 905 * Returns: 0 on successful update, error code otherwise 906 */ 907 908 static int gfs2_ri_update(struct gfs2_inode *ip) 909 { 910 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 911 int error; 912 913 do { 914 error = read_rindex_entry(ip); 915 } while (error == 0); 916 917 if (error < 0) 918 return error; 919 920 sdp->sd_rindex_uptodate = 1; 921 return 0; 922 } 923 924 /** 925 * gfs2_rindex_update - Update the rindex if required 926 * @sdp: The GFS2 superblock 927 * 928 * We grab a lock on the rindex inode to make sure that it doesn't 929 * change whilst we are performing an operation. We keep this lock 930 * for quite long periods of time compared to other locks. This 931 * doesn't matter, since it is shared and it is very, very rarely 932 * accessed in the exclusive mode (i.e. only when expanding the filesystem). 933 * 934 * This makes sure that we're using the latest copy of the resource index 935 * special file, which might have been updated if someone expanded the 936 * filesystem (via gfs2_grow utility), which adds new resource groups. 937 * 938 * Returns: 0 on succeess, error code otherwise 939 */ 940 941 int gfs2_rindex_update(struct gfs2_sbd *sdp) 942 { 943 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); 944 struct gfs2_glock *gl = ip->i_gl; 945 struct gfs2_holder ri_gh; 946 int error = 0; 947 int unlock_required = 0; 948 949 /* Read new copy from disk if we don't have the latest */ 950 if (!sdp->sd_rindex_uptodate) { 951 if (!gfs2_glock_is_locked_by_me(gl)) { 952 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); 953 if (error) 954 return error; 955 unlock_required = 1; 956 } 957 if (!sdp->sd_rindex_uptodate) 958 error = gfs2_ri_update(ip); 959 if (unlock_required) 960 gfs2_glock_dq_uninit(&ri_gh); 961 } 962 963 return error; 964 } 965 966 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) 967 { 968 const struct gfs2_rgrp *str = buf; 969 u32 rg_flags; 970 971 rg_flags = be32_to_cpu(str->rg_flags); 972 rg_flags &= ~GFS2_RDF_MASK; 973 rgd->rd_flags &= GFS2_RDF_MASK; 974 rgd->rd_flags |= rg_flags; 975 rgd->rd_free = be32_to_cpu(str->rg_free); 976 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); 977 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); 978 } 979 980 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) 981 { 982 struct gfs2_rgrp *str = buf; 983 984 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK); 985 str->rg_free = cpu_to_be32(rgd->rd_free); 986 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); 987 str->__pad = cpu_to_be32(0); 988 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); 989 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); 990 } 991 992 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd) 993 { 994 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; 995 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data; 996 997 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free || 998 rgl->rl_dinodes != str->rg_dinodes || 999 rgl->rl_igeneration != str->rg_igeneration) 1000 return 0; 1001 return 1; 1002 } 1003 1004 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf) 1005 { 1006 const struct gfs2_rgrp *str = buf; 1007 1008 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC); 1009 rgl->rl_flags = str->rg_flags; 1010 rgl->rl_free = str->rg_free; 1011 rgl->rl_dinodes = str->rg_dinodes; 1012 rgl->rl_igeneration = str->rg_igeneration; 1013 rgl->__pad = 0UL; 1014 } 1015 1016 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change) 1017 { 1018 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; 1019 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change; 1020 rgl->rl_unlinked = cpu_to_be32(unlinked); 1021 } 1022 1023 static u32 count_unlinked(struct gfs2_rgrpd *rgd) 1024 { 1025 struct gfs2_bitmap *bi; 1026 const u32 length = rgd->rd_length; 1027 const u8 *buffer = NULL; 1028 u32 i, goal, count = 0; 1029 1030 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) { 1031 goal = 0; 1032 buffer = bi->bi_bh->b_data + bi->bi_offset; 1033 WARN_ON(!buffer_uptodate(bi->bi_bh)); 1034 while (goal < bi->bi_len * GFS2_NBBY) { 1035 goal = gfs2_bitfit(buffer, bi->bi_len, goal, 1036 GFS2_BLKST_UNLINKED); 1037 if (goal == BFITNOENT) 1038 break; 1039 count++; 1040 goal++; 1041 } 1042 } 1043 1044 return count; 1045 } 1046 1047 1048 /** 1049 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps 1050 * @rgd: the struct gfs2_rgrpd describing the RG to read in 1051 * 1052 * Read in all of a Resource Group's header and bitmap blocks. 1053 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. 1054 * 1055 * Returns: errno 1056 */ 1057 1058 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) 1059 { 1060 struct gfs2_sbd *sdp = rgd->rd_sbd; 1061 struct gfs2_glock *gl = rgd->rd_gl; 1062 unsigned int length = rgd->rd_length; 1063 struct gfs2_bitmap *bi; 1064 unsigned int x, y; 1065 int error; 1066 1067 if (rgd->rd_bits[0].bi_bh != NULL) 1068 return 0; 1069 1070 for (x = 0; x < length; x++) { 1071 bi = rgd->rd_bits + x; 1072 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); 1073 if (error) 1074 goto fail; 1075 } 1076 1077 for (y = length; y--;) { 1078 bi = rgd->rd_bits + y; 1079 error = gfs2_meta_wait(sdp, bi->bi_bh); 1080 if (error) 1081 goto fail; 1082 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : 1083 GFS2_METATYPE_RG)) { 1084 error = -EIO; 1085 goto fail; 1086 } 1087 } 1088 1089 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) { 1090 for (x = 0; x < length; x++) 1091 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); 1092 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); 1093 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); 1094 rgd->rd_free_clone = rgd->rd_free; 1095 } 1096 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { 1097 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd)); 1098 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, 1099 rgd->rd_bits[0].bi_bh->b_data); 1100 } 1101 else if (sdp->sd_args.ar_rgrplvb) { 1102 if (!gfs2_rgrp_lvb_valid(rgd)){ 1103 gfs2_consist_rgrpd(rgd); 1104 error = -EIO; 1105 goto fail; 1106 } 1107 if (rgd->rd_rgl->rl_unlinked == 0) 1108 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1109 } 1110 return 0; 1111 1112 fail: 1113 while (x--) { 1114 bi = rgd->rd_bits + x; 1115 brelse(bi->bi_bh); 1116 bi->bi_bh = NULL; 1117 gfs2_assert_warn(sdp, !bi->bi_clone); 1118 } 1119 1120 return error; 1121 } 1122 1123 int update_rgrp_lvb(struct gfs2_rgrpd *rgd) 1124 { 1125 u32 rl_flags; 1126 1127 if (rgd->rd_flags & GFS2_RDF_UPTODATE) 1128 return 0; 1129 1130 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) 1131 return gfs2_rgrp_bh_get(rgd); 1132 1133 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); 1134 rl_flags &= ~GFS2_RDF_MASK; 1135 rgd->rd_flags &= GFS2_RDF_MASK; 1136 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); 1137 if (rgd->rd_rgl->rl_unlinked == 0) 1138 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1139 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); 1140 rgd->rd_free_clone = rgd->rd_free; 1141 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes); 1142 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration); 1143 return 0; 1144 } 1145 1146 int gfs2_rgrp_go_lock(struct gfs2_holder *gh) 1147 { 1148 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; 1149 struct gfs2_sbd *sdp = rgd->rd_sbd; 1150 1151 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb) 1152 return 0; 1153 return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object); 1154 } 1155 1156 /** 1157 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() 1158 * @gh: The glock holder for the resource group 1159 * 1160 */ 1161 1162 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) 1163 { 1164 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; 1165 int x, length = rgd->rd_length; 1166 1167 for (x = 0; x < length; x++) { 1168 struct gfs2_bitmap *bi = rgd->rd_bits + x; 1169 if (bi->bi_bh) { 1170 brelse(bi->bi_bh); 1171 bi->bi_bh = NULL; 1172 } 1173 } 1174 1175 } 1176 1177 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, 1178 struct buffer_head *bh, 1179 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) 1180 { 1181 struct super_block *sb = sdp->sd_vfs; 1182 struct block_device *bdev = sb->s_bdev; 1183 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 1184 bdev_logical_block_size(sb->s_bdev); 1185 u64 blk; 1186 sector_t start = 0; 1187 sector_t nr_sects = 0; 1188 int rv; 1189 unsigned int x; 1190 u32 trimmed = 0; 1191 u8 diff; 1192 1193 for (x = 0; x < bi->bi_len; x++) { 1194 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; 1195 clone += bi->bi_offset; 1196 clone += x; 1197 if (bh) { 1198 const u8 *orig = bh->b_data + bi->bi_offset + x; 1199 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); 1200 } else { 1201 diff = ~(*clone | (*clone >> 1)); 1202 } 1203 diff &= 0x55; 1204 if (diff == 0) 1205 continue; 1206 blk = offset + ((bi->bi_start + x) * GFS2_NBBY); 1207 blk *= sects_per_blk; /* convert to sectors */ 1208 while(diff) { 1209 if (diff & 1) { 1210 if (nr_sects == 0) 1211 goto start_new_extent; 1212 if ((start + nr_sects) != blk) { 1213 if (nr_sects >= minlen) { 1214 rv = blkdev_issue_discard(bdev, 1215 start, nr_sects, 1216 GFP_NOFS, 0); 1217 if (rv) 1218 goto fail; 1219 trimmed += nr_sects; 1220 } 1221 nr_sects = 0; 1222 start_new_extent: 1223 start = blk; 1224 } 1225 nr_sects += sects_per_blk; 1226 } 1227 diff >>= 2; 1228 blk += sects_per_blk; 1229 } 1230 } 1231 if (nr_sects >= minlen) { 1232 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); 1233 if (rv) 1234 goto fail; 1235 trimmed += nr_sects; 1236 } 1237 if (ptrimmed) 1238 *ptrimmed = trimmed; 1239 return 0; 1240 1241 fail: 1242 if (sdp->sd_args.ar_discard) 1243 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv); 1244 sdp->sd_args.ar_discard = 0; 1245 return -EIO; 1246 } 1247 1248 /** 1249 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem 1250 * @filp: Any file on the filesystem 1251 * @argp: Pointer to the arguments (also used to pass result) 1252 * 1253 * Returns: 0 on success, otherwise error code 1254 */ 1255 1256 int gfs2_fitrim(struct file *filp, void __user *argp) 1257 { 1258 struct inode *inode = filp->f_dentry->d_inode; 1259 struct gfs2_sbd *sdp = GFS2_SB(inode); 1260 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); 1261 struct buffer_head *bh; 1262 struct gfs2_rgrpd *rgd; 1263 struct gfs2_rgrpd *rgd_end; 1264 struct gfs2_holder gh; 1265 struct fstrim_range r; 1266 int ret = 0; 1267 u64 amt; 1268 u64 trimmed = 0; 1269 u64 start, end, minlen; 1270 unsigned int x; 1271 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; 1272 1273 if (!capable(CAP_SYS_ADMIN)) 1274 return -EPERM; 1275 1276 if (!blk_queue_discard(q)) 1277 return -EOPNOTSUPP; 1278 1279 if (copy_from_user(&r, argp, sizeof(r))) 1280 return -EFAULT; 1281 1282 ret = gfs2_rindex_update(sdp); 1283 if (ret) 1284 return ret; 1285 1286 start = r.start >> bs_shift; 1287 end = start + (r.len >> bs_shift); 1288 minlen = max_t(u64, r.minlen, 1289 q->limits.discard_granularity) >> bs_shift; 1290 1291 rgd = gfs2_blk2rgrpd(sdp, start, 0); 1292 rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); 1293 1294 if (end <= start || 1295 minlen > sdp->sd_max_rg_data || 1296 start > rgd_end->rd_data0 + rgd_end->rd_data) 1297 return -EINVAL; 1298 1299 while (1) { 1300 1301 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); 1302 if (ret) 1303 goto out; 1304 1305 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) { 1306 /* Trim each bitmap in the rgrp */ 1307 for (x = 0; x < rgd->rd_length; x++) { 1308 struct gfs2_bitmap *bi = rgd->rd_bits + x; 1309 ret = gfs2_rgrp_send_discards(sdp, 1310 rgd->rd_data0, NULL, bi, minlen, 1311 &amt); 1312 if (ret) { 1313 gfs2_glock_dq_uninit(&gh); 1314 goto out; 1315 } 1316 trimmed += amt; 1317 } 1318 1319 /* Mark rgrp as having been trimmed */ 1320 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); 1321 if (ret == 0) { 1322 bh = rgd->rd_bits[0].bi_bh; 1323 rgd->rd_flags |= GFS2_RGF_TRIMMED; 1324 gfs2_trans_add_bh(rgd->rd_gl, bh, 1); 1325 gfs2_rgrp_out(rgd, bh->b_data); 1326 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data); 1327 gfs2_trans_end(sdp); 1328 } 1329 } 1330 gfs2_glock_dq_uninit(&gh); 1331 1332 if (rgd == rgd_end) 1333 break; 1334 1335 rgd = gfs2_rgrpd_get_next(rgd); 1336 } 1337 1338 out: 1339 r.len = trimmed << 9; 1340 if (copy_to_user(argp, &r, sizeof(r))) 1341 return -EFAULT; 1342 1343 return ret; 1344 } 1345 1346 /** 1347 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree 1348 * @ip: the inode structure 1349 * 1350 */ 1351 static void rs_insert(struct gfs2_inode *ip) 1352 { 1353 struct rb_node **newn, *parent = NULL; 1354 int rc; 1355 struct gfs2_blkreserv *rs = ip->i_res; 1356 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd; 1357 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm); 1358 1359 BUG_ON(gfs2_rs_active(rs)); 1360 1361 spin_lock(&rgd->rd_rsspin); 1362 newn = &rgd->rd_rstree.rb_node; 1363 while (*newn) { 1364 struct gfs2_blkreserv *cur = 1365 rb_entry(*newn, struct gfs2_blkreserv, rs_node); 1366 1367 parent = *newn; 1368 rc = rs_cmp(fsblock, rs->rs_free, cur); 1369 if (rc > 0) 1370 newn = &((*newn)->rb_right); 1371 else if (rc < 0) 1372 newn = &((*newn)->rb_left); 1373 else { 1374 spin_unlock(&rgd->rd_rsspin); 1375 WARN_ON(1); 1376 return; 1377 } 1378 } 1379 1380 rb_link_node(&rs->rs_node, parent, newn); 1381 rb_insert_color(&rs->rs_node, &rgd->rd_rstree); 1382 1383 /* Do our rgrp accounting for the reservation */ 1384 rgd->rd_reserved += rs->rs_free; /* blocks reserved */ 1385 spin_unlock(&rgd->rd_rsspin); 1386 trace_gfs2_rs(rs, TRACE_RS_INSERT); 1387 } 1388 1389 /** 1390 * rg_mblk_search - find a group of multiple free blocks to form a reservation 1391 * @rgd: the resource group descriptor 1392 * @ip: pointer to the inode for which we're reserving blocks 1393 * @requested: number of blocks required for this allocation 1394 * 1395 */ 1396 1397 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, 1398 unsigned requested) 1399 { 1400 struct gfs2_rbm rbm = { .rgd = rgd, }; 1401 u64 goal; 1402 struct gfs2_blkreserv *rs = ip->i_res; 1403 u32 extlen; 1404 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved; 1405 int ret; 1406 1407 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); 1408 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); 1409 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) 1410 return; 1411 1412 /* Find bitmap block that contains bits for goal block */ 1413 if (rgrp_contains_block(rgd, ip->i_goal)) 1414 goal = ip->i_goal; 1415 else 1416 goal = rgd->rd_last_alloc + rgd->rd_data0; 1417 1418 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) 1419 return; 1420 1421 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true); 1422 if (ret == 0) { 1423 rs->rs_rbm = rbm; 1424 rs->rs_free = extlen; 1425 rs->rs_inum = ip->i_no_addr; 1426 rs_insert(ip); 1427 } 1428 } 1429 1430 /** 1431 * gfs2_next_unreserved_block - Return next block that is not reserved 1432 * @rgd: The resource group 1433 * @block: The starting block 1434 * @length: The required length 1435 * @ip: Ignore any reservations for this inode 1436 * 1437 * If the block does not appear in any reservation, then return the 1438 * block number unchanged. If it does appear in the reservation, then 1439 * keep looking through the tree of reservations in order to find the 1440 * first block number which is not reserved. 1441 */ 1442 1443 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, 1444 u32 length, 1445 const struct gfs2_inode *ip) 1446 { 1447 struct gfs2_blkreserv *rs; 1448 struct rb_node *n; 1449 int rc; 1450 1451 spin_lock(&rgd->rd_rsspin); 1452 n = rgd->rd_rstree.rb_node; 1453 while (n) { 1454 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); 1455 rc = rs_cmp(block, length, rs); 1456 if (rc < 0) 1457 n = n->rb_left; 1458 else if (rc > 0) 1459 n = n->rb_right; 1460 else 1461 break; 1462 } 1463 1464 if (n) { 1465 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) { 1466 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free; 1467 n = n->rb_right; 1468 if (n == NULL) 1469 break; 1470 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); 1471 } 1472 } 1473 1474 spin_unlock(&rgd->rd_rsspin); 1475 return block; 1476 } 1477 1478 /** 1479 * gfs2_reservation_check_and_update - Check for reservations during block alloc 1480 * @rbm: The current position in the resource group 1481 * @ip: The inode for which we are searching for blocks 1482 * @minext: The minimum extent length 1483 * 1484 * This checks the current position in the rgrp to see whether there is 1485 * a reservation covering this block. If not then this function is a 1486 * no-op. If there is, then the position is moved to the end of the 1487 * contiguous reservation(s) so that we are pointing at the first 1488 * non-reserved block. 1489 * 1490 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error 1491 */ 1492 1493 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, 1494 const struct gfs2_inode *ip, 1495 u32 minext) 1496 { 1497 u64 block = gfs2_rbm_to_block(rbm); 1498 u32 extlen = 1; 1499 u64 nblock; 1500 int ret; 1501 1502 /* 1503 * If we have a minimum extent length, then skip over any extent 1504 * which is less than the min extent length in size. 1505 */ 1506 if (minext) { 1507 extlen = gfs2_free_extlen(rbm, minext); 1508 nblock = block + extlen; 1509 if (extlen < minext) 1510 goto fail; 1511 } 1512 1513 /* 1514 * Check the extent which has been found against the reservations 1515 * and skip if parts of it are already reserved 1516 */ 1517 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); 1518 if (nblock == block) 1519 return 0; 1520 fail: 1521 ret = gfs2_rbm_from_block(rbm, nblock); 1522 if (ret < 0) 1523 return ret; 1524 return 1; 1525 } 1526 1527 /** 1528 * gfs2_rbm_find - Look for blocks of a particular state 1529 * @rbm: Value/result starting position and final position 1530 * @state: The state which we want to find 1531 * @minext: The requested extent length (0 for a single block) 1532 * @ip: If set, check for reservations 1533 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping 1534 * around until we've reached the starting point. 1535 * 1536 * Side effects: 1537 * - If looking for free blocks, we set GBF_FULL on each bitmap which 1538 * has no free blocks in it. 1539 * 1540 * Returns: 0 on success, -ENOSPC if there is no block of the requested state 1541 */ 1542 1543 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, 1544 const struct gfs2_inode *ip, bool nowrap) 1545 { 1546 struct buffer_head *bh; 1547 struct gfs2_bitmap *initial_bi; 1548 u32 initial_offset; 1549 u32 offset; 1550 u8 *buffer; 1551 int index; 1552 int n = 0; 1553 int iters = rbm->rgd->rd_length; 1554 int ret; 1555 1556 /* If we are not starting at the beginning of a bitmap, then we 1557 * need to add one to the bitmap count to ensure that we search 1558 * the starting bitmap twice. 1559 */ 1560 if (rbm->offset != 0) 1561 iters++; 1562 1563 while(1) { 1564 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && 1565 (state == GFS2_BLKST_FREE)) 1566 goto next_bitmap; 1567 1568 bh = rbm->bi->bi_bh; 1569 buffer = bh->b_data + rbm->bi->bi_offset; 1570 WARN_ON(!buffer_uptodate(bh)); 1571 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) 1572 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; 1573 initial_offset = rbm->offset; 1574 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); 1575 if (offset == BFITNOENT) 1576 goto bitmap_full; 1577 rbm->offset = offset; 1578 if (ip == NULL) 1579 return 0; 1580 1581 initial_bi = rbm->bi; 1582 ret = gfs2_reservation_check_and_update(rbm, ip, minext); 1583 if (ret == 0) 1584 return 0; 1585 if (ret > 0) { 1586 n += (rbm->bi - initial_bi); 1587 goto next_iter; 1588 } 1589 if (ret == -E2BIG) { 1590 index = 0; 1591 rbm->offset = 0; 1592 n += (rbm->bi - initial_bi); 1593 goto res_covered_end_of_rgrp; 1594 } 1595 return ret; 1596 1597 bitmap_full: /* Mark bitmap as full and fall through */ 1598 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) 1599 set_bit(GBF_FULL, &rbm->bi->bi_flags); 1600 1601 next_bitmap: /* Find next bitmap in the rgrp */ 1602 rbm->offset = 0; 1603 index = rbm->bi - rbm->rgd->rd_bits; 1604 index++; 1605 if (index == rbm->rgd->rd_length) 1606 index = 0; 1607 res_covered_end_of_rgrp: 1608 rbm->bi = &rbm->rgd->rd_bits[index]; 1609 if ((index == 0) && nowrap) 1610 break; 1611 n++; 1612 next_iter: 1613 if (n >= iters) 1614 break; 1615 } 1616 1617 return -ENOSPC; 1618 } 1619 1620 /** 1621 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes 1622 * @rgd: The rgrp 1623 * @last_unlinked: block address of the last dinode we unlinked 1624 * @skip: block address we should explicitly not unlink 1625 * 1626 * Returns: 0 if no error 1627 * The inode, if one has been found, in inode. 1628 */ 1629 1630 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) 1631 { 1632 u64 block; 1633 struct gfs2_sbd *sdp = rgd->rd_sbd; 1634 struct gfs2_glock *gl; 1635 struct gfs2_inode *ip; 1636 int error; 1637 int found = 0; 1638 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; 1639 1640 while (1) { 1641 down_write(&sdp->sd_log_flush_lock); 1642 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true); 1643 up_write(&sdp->sd_log_flush_lock); 1644 if (error == -ENOSPC) 1645 break; 1646 if (WARN_ON_ONCE(error)) 1647 break; 1648 1649 block = gfs2_rbm_to_block(&rbm); 1650 if (gfs2_rbm_from_block(&rbm, block + 1)) 1651 break; 1652 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked) 1653 continue; 1654 if (block == skip) 1655 continue; 1656 *last_unlinked = block; 1657 1658 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl); 1659 if (error) 1660 continue; 1661 1662 /* If the inode is already in cache, we can ignore it here 1663 * because the existing inode disposal code will deal with 1664 * it when all refs have gone away. Accessing gl_object like 1665 * this is not safe in general. Here it is ok because we do 1666 * not dereference the pointer, and we only need an approx 1667 * answer to whether it is NULL or not. 1668 */ 1669 ip = gl->gl_object; 1670 1671 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 1672 gfs2_glock_put(gl); 1673 else 1674 found++; 1675 1676 /* Limit reclaim to sensible number of tasks */ 1677 if (found > NR_CPUS) 1678 return; 1679 } 1680 1681 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1682 return; 1683 } 1684 1685 /** 1686 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested 1687 * @rgd: The rgrp in question 1688 * @loops: An indication of how picky we can be (0=very, 1=less so) 1689 * 1690 * This function uses the recently added glock statistics in order to 1691 * figure out whether a parciular resource group is suffering from 1692 * contention from multiple nodes. This is done purely on the basis 1693 * of timings, since this is the only data we have to work with and 1694 * our aim here is to reject a resource group which is highly contended 1695 * but (very important) not to do this too often in order to ensure that 1696 * we do not land up introducing fragmentation by changing resource 1697 * groups when not actually required. 1698 * 1699 * The calculation is fairly simple, we want to know whether the SRTTB 1700 * (i.e. smoothed round trip time for blocking operations) to acquire 1701 * the lock for this rgrp's glock is significantly greater than the 1702 * time taken for resource groups on average. We introduce a margin in 1703 * the form of the variable @var which is computed as the sum of the two 1704 * respective variences, and multiplied by a factor depending on @loops 1705 * and whether we have a lot of data to base the decision on. This is 1706 * then tested against the square difference of the means in order to 1707 * decide whether the result is statistically significant or not. 1708 * 1709 * Returns: A boolean verdict on the congestion status 1710 */ 1711 1712 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) 1713 { 1714 const struct gfs2_glock *gl = rgd->rd_gl; 1715 const struct gfs2_sbd *sdp = gl->gl_sbd; 1716 struct gfs2_lkstats *st; 1717 s64 r_dcount, l_dcount; 1718 s64 r_srttb, l_srttb; 1719 s64 srttb_diff; 1720 s64 sqr_diff; 1721 s64 var; 1722 1723 preempt_disable(); 1724 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP]; 1725 r_srttb = st->stats[GFS2_LKS_SRTTB]; 1726 r_dcount = st->stats[GFS2_LKS_DCOUNT]; 1727 var = st->stats[GFS2_LKS_SRTTVARB] + 1728 gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; 1729 preempt_enable(); 1730 1731 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; 1732 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; 1733 1734 if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0)) 1735 return false; 1736 1737 srttb_diff = r_srttb - l_srttb; 1738 sqr_diff = srttb_diff * srttb_diff; 1739 1740 var *= 2; 1741 if (l_dcount < 8 || r_dcount < 8) 1742 var *= 2; 1743 if (loops == 1) 1744 var *= 2; 1745 1746 return ((srttb_diff < 0) && (sqr_diff > var)); 1747 } 1748 1749 /** 1750 * gfs2_rgrp_used_recently 1751 * @rs: The block reservation with the rgrp to test 1752 * @msecs: The time limit in milliseconds 1753 * 1754 * Returns: True if the rgrp glock has been used within the time limit 1755 */ 1756 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, 1757 u64 msecs) 1758 { 1759 u64 tdiff; 1760 1761 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(), 1762 rs->rs_rbm.rgd->rd_gl->gl_dstamp)); 1763 1764 return tdiff > (msecs * 1000 * 1000); 1765 } 1766 1767 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) 1768 { 1769 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1770 u32 skip; 1771 1772 get_random_bytes(&skip, sizeof(skip)); 1773 return skip % sdp->sd_rgrps; 1774 } 1775 1776 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) 1777 { 1778 struct gfs2_rgrpd *rgd = *pos; 1779 struct gfs2_sbd *sdp = rgd->rd_sbd; 1780 1781 rgd = gfs2_rgrpd_get_next(rgd); 1782 if (rgd == NULL) 1783 rgd = gfs2_rgrpd_get_first(sdp); 1784 *pos = rgd; 1785 if (rgd != begin) /* If we didn't wrap */ 1786 return true; 1787 return false; 1788 } 1789 1790 /** 1791 * gfs2_inplace_reserve - Reserve space in the filesystem 1792 * @ip: the inode to reserve space for 1793 * @requested: the number of blocks to be reserved 1794 * 1795 * Returns: errno 1796 */ 1797 1798 int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags) 1799 { 1800 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1801 struct gfs2_rgrpd *begin = NULL; 1802 struct gfs2_blkreserv *rs = ip->i_res; 1803 int error = 0, rg_locked, flags = 0; 1804 u64 last_unlinked = NO_BLOCK; 1805 int loops = 0; 1806 u32 skip = 0; 1807 1808 if (sdp->sd_args.ar_rgrplvb) 1809 flags |= GL_SKIP; 1810 if (gfs2_assert_warn(sdp, requested)) 1811 return -EINVAL; 1812 if (gfs2_rs_active(rs)) { 1813 begin = rs->rs_rbm.rgd; 1814 flags = 0; /* Yoda: Do or do not. There is no try */ 1815 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { 1816 rs->rs_rbm.rgd = begin = ip->i_rgd; 1817 } else { 1818 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); 1819 } 1820 if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV)) 1821 skip = gfs2_orlov_skip(ip); 1822 if (rs->rs_rbm.rgd == NULL) 1823 return -EBADSLT; 1824 1825 while (loops < 3) { 1826 rg_locked = 1; 1827 1828 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) { 1829 rg_locked = 0; 1830 if (skip && skip--) 1831 goto next_rgrp; 1832 if (!gfs2_rs_active(rs) && (loops < 2) && 1833 gfs2_rgrp_used_recently(rs, 1000) && 1834 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) 1835 goto next_rgrp; 1836 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, 1837 LM_ST_EXCLUSIVE, flags, 1838 &rs->rs_rgd_gh); 1839 if (unlikely(error)) 1840 return error; 1841 if (!gfs2_rs_active(rs) && (loops < 2) && 1842 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops)) 1843 goto skip_rgrp; 1844 if (sdp->sd_args.ar_rgrplvb) { 1845 error = update_rgrp_lvb(rs->rs_rbm.rgd); 1846 if (unlikely(error)) { 1847 gfs2_glock_dq_uninit(&rs->rs_rgd_gh); 1848 return error; 1849 } 1850 } 1851 } 1852 1853 /* Skip unuseable resource groups */ 1854 if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) 1855 goto skip_rgrp; 1856 1857 if (sdp->sd_args.ar_rgrplvb) 1858 gfs2_rgrp_bh_get(rs->rs_rbm.rgd); 1859 1860 /* Get a reservation if we don't already have one */ 1861 if (!gfs2_rs_active(rs)) 1862 rg_mblk_search(rs->rs_rbm.rgd, ip, requested); 1863 1864 /* Skip rgrps when we can't get a reservation on first pass */ 1865 if (!gfs2_rs_active(rs) && (loops < 1)) 1866 goto check_rgrp; 1867 1868 /* If rgrp has enough free space, use it */ 1869 if (rs->rs_rbm.rgd->rd_free_clone >= requested) { 1870 ip->i_rgd = rs->rs_rbm.rgd; 1871 return 0; 1872 } 1873 1874 /* Drop reservation, if we couldn't use reserved rgrp */ 1875 if (gfs2_rs_active(rs)) 1876 gfs2_rs_deltree(ip, rs); 1877 check_rgrp: 1878 /* Check for unlinked inodes which can be reclaimed */ 1879 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) 1880 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked, 1881 ip->i_no_addr); 1882 skip_rgrp: 1883 /* Unlock rgrp if required */ 1884 if (!rg_locked) 1885 gfs2_glock_dq_uninit(&rs->rs_rgd_gh); 1886 next_rgrp: 1887 /* Find the next rgrp, and continue looking */ 1888 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) 1889 continue; 1890 if (skip) 1891 continue; 1892 1893 /* If we've scanned all the rgrps, but found no free blocks 1894 * then this checks for some less likely conditions before 1895 * trying again. 1896 */ 1897 loops++; 1898 /* Check that fs hasn't grown if writing to rindex */ 1899 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { 1900 error = gfs2_ri_update(ip); 1901 if (error) 1902 return error; 1903 } 1904 /* Flushing the log may release space */ 1905 if (loops == 2) 1906 gfs2_log_flush(sdp, NULL); 1907 } 1908 1909 return -ENOSPC; 1910 } 1911 1912 /** 1913 * gfs2_inplace_release - release an inplace reservation 1914 * @ip: the inode the reservation was taken out on 1915 * 1916 * Release a reservation made by gfs2_inplace_reserve(). 1917 */ 1918 1919 void gfs2_inplace_release(struct gfs2_inode *ip) 1920 { 1921 struct gfs2_blkreserv *rs = ip->i_res; 1922 1923 if (rs->rs_rgd_gh.gh_gl) 1924 gfs2_glock_dq_uninit(&rs->rs_rgd_gh); 1925 } 1926 1927 /** 1928 * gfs2_get_block_type - Check a block in a RG is of given type 1929 * @rgd: the resource group holding the block 1930 * @block: the block number 1931 * 1932 * Returns: The block type (GFS2_BLKST_*) 1933 */ 1934 1935 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) 1936 { 1937 struct gfs2_rbm rbm = { .rgd = rgd, }; 1938 int ret; 1939 1940 ret = gfs2_rbm_from_block(&rbm, block); 1941 WARN_ON_ONCE(ret != 0); 1942 1943 return gfs2_testbit(&rbm); 1944 } 1945 1946 1947 /** 1948 * gfs2_alloc_extent - allocate an extent from a given bitmap 1949 * @rbm: the resource group information 1950 * @dinode: TRUE if the first block we allocate is for a dinode 1951 * @n: The extent length (value/result) 1952 * 1953 * Add the bitmap buffer to the transaction. 1954 * Set the found bits to @new_state to change block's allocation state. 1955 */ 1956 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, 1957 unsigned int *n) 1958 { 1959 struct gfs2_rbm pos = { .rgd = rbm->rgd, }; 1960 const unsigned int elen = *n; 1961 u64 block; 1962 int ret; 1963 1964 *n = 1; 1965 block = gfs2_rbm_to_block(rbm); 1966 gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1); 1967 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 1968 block++; 1969 while (*n < elen) { 1970 ret = gfs2_rbm_from_block(&pos, block); 1971 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) 1972 break; 1973 gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1); 1974 gfs2_setbit(&pos, true, GFS2_BLKST_USED); 1975 (*n)++; 1976 block++; 1977 } 1978 } 1979 1980 /** 1981 * rgblk_free - Change alloc state of given block(s) 1982 * @sdp: the filesystem 1983 * @bstart: the start of a run of blocks to free 1984 * @blen: the length of the block run (all must lie within ONE RG!) 1985 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1986 * 1987 * Returns: Resource group containing the block(s) 1988 */ 1989 1990 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, 1991 u32 blen, unsigned char new_state) 1992 { 1993 struct gfs2_rbm rbm; 1994 1995 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); 1996 if (!rbm.rgd) { 1997 if (gfs2_consist(sdp)) 1998 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); 1999 return NULL; 2000 } 2001 2002 while (blen--) { 2003 gfs2_rbm_from_block(&rbm, bstart); 2004 bstart++; 2005 if (!rbm.bi->bi_clone) { 2006 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, 2007 GFP_NOFS | __GFP_NOFAIL); 2008 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, 2009 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, 2010 rbm.bi->bi_len); 2011 } 2012 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1); 2013 gfs2_setbit(&rbm, false, new_state); 2014 } 2015 2016 return rbm.rgd; 2017 } 2018 2019 /** 2020 * gfs2_rgrp_dump - print out an rgrp 2021 * @seq: The iterator 2022 * @gl: The glock in question 2023 * 2024 */ 2025 2026 int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl) 2027 { 2028 struct gfs2_rgrpd *rgd = gl->gl_object; 2029 struct gfs2_blkreserv *trs; 2030 const struct rb_node *n; 2031 2032 if (rgd == NULL) 2033 return 0; 2034 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n", 2035 (unsigned long long)rgd->rd_addr, rgd->rd_flags, 2036 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes, 2037 rgd->rd_reserved); 2038 spin_lock(&rgd->rd_rsspin); 2039 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { 2040 trs = rb_entry(n, struct gfs2_blkreserv, rs_node); 2041 dump_rs(seq, trs); 2042 } 2043 spin_unlock(&rgd->rd_rsspin); 2044 return 0; 2045 } 2046 2047 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd) 2048 { 2049 struct gfs2_sbd *sdp = rgd->rd_sbd; 2050 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n", 2051 (unsigned long long)rgd->rd_addr); 2052 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n"); 2053 gfs2_rgrp_dump(NULL, rgd->rd_gl); 2054 rgd->rd_flags |= GFS2_RDF_ERROR; 2055 } 2056 2057 /** 2058 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation 2059 * @ip: The inode we have just allocated blocks for 2060 * @rbm: The start of the allocated blocks 2061 * @len: The extent length 2062 * 2063 * Adjusts a reservation after an allocation has taken place. If the 2064 * reservation does not match the allocation, or if it is now empty 2065 * then it is removed. 2066 */ 2067 2068 static void gfs2_adjust_reservation(struct gfs2_inode *ip, 2069 const struct gfs2_rbm *rbm, unsigned len) 2070 { 2071 struct gfs2_blkreserv *rs = ip->i_res; 2072 struct gfs2_rgrpd *rgd = rbm->rgd; 2073 unsigned rlen; 2074 u64 block; 2075 int ret; 2076 2077 spin_lock(&rgd->rd_rsspin); 2078 if (gfs2_rs_active(rs)) { 2079 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) { 2080 block = gfs2_rbm_to_block(rbm); 2081 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len); 2082 rlen = min(rs->rs_free, len); 2083 rs->rs_free -= rlen; 2084 rgd->rd_reserved -= rlen; 2085 trace_gfs2_rs(rs, TRACE_RS_CLAIM); 2086 if (rs->rs_free && !ret) 2087 goto out; 2088 } 2089 __rs_deltree(ip, rs); 2090 } 2091 out: 2092 spin_unlock(&rgd->rd_rsspin); 2093 } 2094 2095 /** 2096 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode 2097 * @ip: the inode to allocate the block for 2098 * @bn: Used to return the starting block number 2099 * @nblocks: requested number of blocks/extent length (value/result) 2100 * @dinode: 1 if we're allocating a dinode block, else 0 2101 * @generation: the generation number of the inode 2102 * 2103 * Returns: 0 or error 2104 */ 2105 2106 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, 2107 bool dinode, u64 *generation) 2108 { 2109 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2110 struct buffer_head *dibh; 2111 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; 2112 unsigned int ndata; 2113 u64 goal; 2114 u64 block; /* block, within the file system scope */ 2115 int error; 2116 2117 if (gfs2_rs_active(ip->i_res)) 2118 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm); 2119 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal)) 2120 goal = ip->i_goal; 2121 else 2122 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0; 2123 2124 gfs2_rbm_from_block(&rbm, goal); 2125 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false); 2126 2127 if (error == -ENOSPC) { 2128 gfs2_rbm_from_block(&rbm, goal); 2129 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false); 2130 } 2131 2132 /* Since all blocks are reserved in advance, this shouldn't happen */ 2133 if (error) { 2134 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n", 2135 (unsigned long long)ip->i_no_addr, error, *nblocks, 2136 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags)); 2137 goto rgrp_error; 2138 } 2139 2140 gfs2_alloc_extent(&rbm, dinode, nblocks); 2141 block = gfs2_rbm_to_block(&rbm); 2142 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; 2143 if (gfs2_rs_active(ip->i_res)) 2144 gfs2_adjust_reservation(ip, &rbm, *nblocks); 2145 ndata = *nblocks; 2146 if (dinode) 2147 ndata--; 2148 2149 if (!dinode) { 2150 ip->i_goal = block + ndata - 1; 2151 error = gfs2_meta_inode_buffer(ip, &dibh); 2152 if (error == 0) { 2153 struct gfs2_dinode *di = 2154 (struct gfs2_dinode *)dibh->b_data; 2155 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 2156 di->di_goal_meta = di->di_goal_data = 2157 cpu_to_be64(ip->i_goal); 2158 brelse(dibh); 2159 } 2160 } 2161 if (rbm.rgd->rd_free < *nblocks) { 2162 printk(KERN_WARNING "nblocks=%u\n", *nblocks); 2163 goto rgrp_error; 2164 } 2165 2166 rbm.rgd->rd_free -= *nblocks; 2167 if (dinode) { 2168 rbm.rgd->rd_dinodes++; 2169 *generation = rbm.rgd->rd_igeneration++; 2170 if (*generation == 0) 2171 *generation = rbm.rgd->rd_igeneration++; 2172 } 2173 2174 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1); 2175 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); 2176 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data); 2177 2178 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); 2179 if (dinode) 2180 gfs2_trans_add_unrevoke(sdp, block, 1); 2181 2182 /* 2183 * This needs reviewing to see why we cannot do the quota change 2184 * at this point in the dinode case. 2185 */ 2186 if (ndata) 2187 gfs2_quota_change(ip, ndata, ip->i_inode.i_uid, 2188 ip->i_inode.i_gid); 2189 2190 rbm.rgd->rd_free_clone -= *nblocks; 2191 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, 2192 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 2193 *bn = block; 2194 return 0; 2195 2196 rgrp_error: 2197 gfs2_rgrp_error(rbm.rgd); 2198 return -EIO; 2199 } 2200 2201 /** 2202 * __gfs2_free_blocks - free a contiguous run of block(s) 2203 * @ip: the inode these blocks are being freed from 2204 * @bstart: first block of a run of contiguous blocks 2205 * @blen: the length of the block run 2206 * @meta: 1 if the blocks represent metadata 2207 * 2208 */ 2209 2210 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) 2211 { 2212 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2213 struct gfs2_rgrpd *rgd; 2214 2215 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 2216 if (!rgd) 2217 return; 2218 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); 2219 rgd->rd_free += blen; 2220 rgd->rd_flags &= ~GFS2_RGF_TRIMMED; 2221 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2222 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2223 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2224 2225 /* Directories keep their data in the metadata address space */ 2226 if (meta || ip->i_depth) 2227 gfs2_meta_wipe(ip, bstart, blen); 2228 } 2229 2230 /** 2231 * gfs2_free_meta - free a contiguous run of data block(s) 2232 * @ip: the inode these blocks are being freed from 2233 * @bstart: first block of a run of contiguous blocks 2234 * @blen: the length of the block run 2235 * 2236 */ 2237 2238 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) 2239 { 2240 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2241 2242 __gfs2_free_blocks(ip, bstart, blen, 1); 2243 gfs2_statfs_change(sdp, 0, +blen, 0); 2244 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); 2245 } 2246 2247 void gfs2_unlink_di(struct inode *inode) 2248 { 2249 struct gfs2_inode *ip = GFS2_I(inode); 2250 struct gfs2_sbd *sdp = GFS2_SB(inode); 2251 struct gfs2_rgrpd *rgd; 2252 u64 blkno = ip->i_no_addr; 2253 2254 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); 2255 if (!rgd) 2256 return; 2257 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); 2258 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2259 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2260 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2261 update_rgrp_lvb_unlinked(rgd, 1); 2262 } 2263 2264 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) 2265 { 2266 struct gfs2_sbd *sdp = rgd->rd_sbd; 2267 struct gfs2_rgrpd *tmp_rgd; 2268 2269 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); 2270 if (!tmp_rgd) 2271 return; 2272 gfs2_assert_withdraw(sdp, rgd == tmp_rgd); 2273 2274 if (!rgd->rd_dinodes) 2275 gfs2_consist_rgrpd(rgd); 2276 rgd->rd_dinodes--; 2277 rgd->rd_free++; 2278 2279 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2280 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2281 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2282 update_rgrp_lvb_unlinked(rgd, -1); 2283 2284 gfs2_statfs_change(sdp, 0, +1, -1); 2285 } 2286 2287 2288 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 2289 { 2290 gfs2_free_uninit_di(rgd, ip->i_no_addr); 2291 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); 2292 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); 2293 gfs2_meta_wipe(ip, ip->i_no_addr, 1); 2294 } 2295 2296 /** 2297 * gfs2_check_blk_type - Check the type of a block 2298 * @sdp: The superblock 2299 * @no_addr: The block number to check 2300 * @type: The block type we are looking for 2301 * 2302 * Returns: 0 if the block type matches the expected type 2303 * -ESTALE if it doesn't match 2304 * or -ve errno if something went wrong while checking 2305 */ 2306 2307 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) 2308 { 2309 struct gfs2_rgrpd *rgd; 2310 struct gfs2_holder rgd_gh; 2311 int error = -EINVAL; 2312 2313 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); 2314 if (!rgd) 2315 goto fail; 2316 2317 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); 2318 if (error) 2319 goto fail; 2320 2321 if (gfs2_get_block_type(rgd, no_addr) != type) 2322 error = -ESTALE; 2323 2324 gfs2_glock_dq_uninit(&rgd_gh); 2325 fail: 2326 return error; 2327 } 2328 2329 /** 2330 * gfs2_rlist_add - add a RG to a list of RGs 2331 * @ip: the inode 2332 * @rlist: the list of resource groups 2333 * @block: the block 2334 * 2335 * Figure out what RG a block belongs to and add that RG to the list 2336 * 2337 * FIXME: Don't use NOFAIL 2338 * 2339 */ 2340 2341 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, 2342 u64 block) 2343 { 2344 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2345 struct gfs2_rgrpd *rgd; 2346 struct gfs2_rgrpd **tmp; 2347 unsigned int new_space; 2348 unsigned int x; 2349 2350 if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) 2351 return; 2352 2353 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) 2354 rgd = ip->i_rgd; 2355 else 2356 rgd = gfs2_blk2rgrpd(sdp, block, 1); 2357 if (!rgd) { 2358 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); 2359 return; 2360 } 2361 ip->i_rgd = rgd; 2362 2363 for (x = 0; x < rlist->rl_rgrps; x++) 2364 if (rlist->rl_rgd[x] == rgd) 2365 return; 2366 2367 if (rlist->rl_rgrps == rlist->rl_space) { 2368 new_space = rlist->rl_space + 10; 2369 2370 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), 2371 GFP_NOFS | __GFP_NOFAIL); 2372 2373 if (rlist->rl_rgd) { 2374 memcpy(tmp, rlist->rl_rgd, 2375 rlist->rl_space * sizeof(struct gfs2_rgrpd *)); 2376 kfree(rlist->rl_rgd); 2377 } 2378 2379 rlist->rl_space = new_space; 2380 rlist->rl_rgd = tmp; 2381 } 2382 2383 rlist->rl_rgd[rlist->rl_rgrps++] = rgd; 2384 } 2385 2386 /** 2387 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate 2388 * and initialize an array of glock holders for them 2389 * @rlist: the list of resource groups 2390 * @state: the lock state to acquire the RG lock in 2391 * 2392 * FIXME: Don't use NOFAIL 2393 * 2394 */ 2395 2396 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) 2397 { 2398 unsigned int x; 2399 2400 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), 2401 GFP_NOFS | __GFP_NOFAIL); 2402 for (x = 0; x < rlist->rl_rgrps; x++) 2403 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, 2404 state, 0, 2405 &rlist->rl_ghs[x]); 2406 } 2407 2408 /** 2409 * gfs2_rlist_free - free a resource group list 2410 * @list: the list of resource groups 2411 * 2412 */ 2413 2414 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) 2415 { 2416 unsigned int x; 2417 2418 kfree(rlist->rl_rgd); 2419 2420 if (rlist->rl_ghs) { 2421 for (x = 0; x < rlist->rl_rgrps; x++) 2422 gfs2_holder_uninit(&rlist->rl_ghs[x]); 2423 kfree(rlist->rl_ghs); 2424 rlist->rl_ghs = NULL; 2425 } 2426 } 2427 2428