1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_bmap.h" 22 #include "xfs_trans.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_icreate_item.h" 25 #include "xfs_icache.h" 26 #include "xfs_trace.h" 27 #include "xfs_log.h" 28 #include "xfs_rmap.h" 29 #include "xfs_ag.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_has_sparseinodes(cur->bc_mp)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 const union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_has_sparseinodes(mp)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* Compute the freecount of an incore inode record. */ 99 uint8_t 100 xfs_inobt_rec_freecount( 101 const struct xfs_inobt_rec_incore *irec) 102 { 103 uint64_t realfree = irec->ir_free; 104 105 if (xfs_inobt_issparse(irec->ir_holemask)) 106 realfree &= xfs_inobt_irec_to_allocmask(irec); 107 return hweight64(realfree); 108 } 109 110 /* Simple checks for inode records. */ 111 xfs_failaddr_t 112 xfs_inobt_check_irec( 113 struct xfs_perag *pag, 114 const struct xfs_inobt_rec_incore *irec) 115 { 116 /* Record has to be properly aligned within the AG. */ 117 if (!xfs_verify_agino(pag, irec->ir_startino)) 118 return __this_address; 119 if (!xfs_verify_agino(pag, 120 irec->ir_startino + XFS_INODES_PER_CHUNK - 1)) 121 return __this_address; 122 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 123 irec->ir_count > XFS_INODES_PER_CHUNK) 124 return __this_address; 125 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 126 return __this_address; 127 128 if (xfs_inobt_rec_freecount(irec) != irec->ir_freecount) 129 return __this_address; 130 131 return NULL; 132 } 133 134 static inline int 135 xfs_inobt_complain_bad_rec( 136 struct xfs_btree_cur *cur, 137 xfs_failaddr_t fa, 138 const struct xfs_inobt_rec_incore *irec) 139 { 140 struct xfs_mount *mp = cur->bc_mp; 141 142 xfs_warn(mp, 143 "%s Inode BTree record corruption in AG %d detected at %pS!", 144 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", 145 cur->bc_ag.pag->pag_agno, fa); 146 xfs_warn(mp, 147 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 148 irec->ir_startino, irec->ir_count, irec->ir_freecount, 149 irec->ir_free, irec->ir_holemask); 150 return -EFSCORRUPTED; 151 } 152 153 /* 154 * Get the data from the pointed-to record. 155 */ 156 int 157 xfs_inobt_get_rec( 158 struct xfs_btree_cur *cur, 159 struct xfs_inobt_rec_incore *irec, 160 int *stat) 161 { 162 struct xfs_mount *mp = cur->bc_mp; 163 union xfs_btree_rec *rec; 164 xfs_failaddr_t fa; 165 int error; 166 167 error = xfs_btree_get_rec(cur, &rec, stat); 168 if (error || *stat == 0) 169 return error; 170 171 xfs_inobt_btrec_to_irec(mp, rec, irec); 172 fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec); 173 if (fa) 174 return xfs_inobt_complain_bad_rec(cur, fa, irec); 175 176 return 0; 177 } 178 179 /* 180 * Insert a single inobt record. Cursor must already point to desired location. 181 */ 182 int 183 xfs_inobt_insert_rec( 184 struct xfs_btree_cur *cur, 185 uint16_t holemask, 186 uint8_t count, 187 int32_t freecount, 188 xfs_inofree_t free, 189 int *stat) 190 { 191 cur->bc_rec.i.ir_holemask = holemask; 192 cur->bc_rec.i.ir_count = count; 193 cur->bc_rec.i.ir_freecount = freecount; 194 cur->bc_rec.i.ir_free = free; 195 return xfs_btree_insert(cur, stat); 196 } 197 198 /* 199 * Insert records describing a newly allocated inode chunk into the inobt. 200 */ 201 STATIC int 202 xfs_inobt_insert( 203 struct xfs_perag *pag, 204 struct xfs_trans *tp, 205 struct xfs_buf *agbp, 206 xfs_agino_t newino, 207 xfs_agino_t newlen, 208 xfs_btnum_t btnum) 209 { 210 struct xfs_btree_cur *cur; 211 xfs_agino_t thisino; 212 int i; 213 int error; 214 215 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 216 217 for (thisino = newino; 218 thisino < newino + newlen; 219 thisino += XFS_INODES_PER_CHUNK) { 220 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 221 if (error) { 222 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 223 return error; 224 } 225 ASSERT(i == 0); 226 227 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 228 XFS_INODES_PER_CHUNK, 229 XFS_INODES_PER_CHUNK, 230 XFS_INOBT_ALL_FREE, &i); 231 if (error) { 232 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 233 return error; 234 } 235 ASSERT(i == 1); 236 } 237 238 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 239 240 return 0; 241 } 242 243 /* 244 * Verify that the number of free inodes in the AGI is correct. 245 */ 246 #ifdef DEBUG 247 static int 248 xfs_check_agi_freecount( 249 struct xfs_btree_cur *cur) 250 { 251 if (cur->bc_nlevels == 1) { 252 xfs_inobt_rec_incore_t rec; 253 int freecount = 0; 254 int error; 255 int i; 256 257 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 258 if (error) 259 return error; 260 261 do { 262 error = xfs_inobt_get_rec(cur, &rec, &i); 263 if (error) 264 return error; 265 266 if (i) { 267 freecount += rec.ir_freecount; 268 error = xfs_btree_increment(cur, 0, &i); 269 if (error) 270 return error; 271 } 272 } while (i == 1); 273 274 if (!xfs_is_shutdown(cur->bc_mp)) 275 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); 276 } 277 return 0; 278 } 279 #else 280 #define xfs_check_agi_freecount(cur) 0 281 #endif 282 283 /* 284 * Initialise a new set of inodes. When called without a transaction context 285 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 286 * than logging them (which in a transaction context puts them into the AIL 287 * for writeback rather than the xfsbufd queue). 288 */ 289 int 290 xfs_ialloc_inode_init( 291 struct xfs_mount *mp, 292 struct xfs_trans *tp, 293 struct list_head *buffer_list, 294 int icount, 295 xfs_agnumber_t agno, 296 xfs_agblock_t agbno, 297 xfs_agblock_t length, 298 unsigned int gen) 299 { 300 struct xfs_buf *fbuf; 301 struct xfs_dinode *free; 302 int nbufs; 303 int version; 304 int i, j; 305 xfs_daddr_t d; 306 xfs_ino_t ino = 0; 307 int error; 308 309 /* 310 * Loop over the new block(s), filling in the inodes. For small block 311 * sizes, manipulate the inodes in buffers which are multiples of the 312 * blocks size. 313 */ 314 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 315 316 /* 317 * Figure out what version number to use in the inodes we create. If 318 * the superblock version has caught up to the one that supports the new 319 * inode format, then use the new inode version. Otherwise use the old 320 * version so that old kernels will continue to be able to use the file 321 * system. 322 * 323 * For v3 inodes, we also need to write the inode number into the inode, 324 * so calculate the first inode number of the chunk here as 325 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 326 * across multiple filesystem blocks (such as a cluster) and so cannot 327 * be used in the cluster buffer loop below. 328 * 329 * Further, because we are writing the inode directly into the buffer 330 * and calculating a CRC on the entire inode, we have ot log the entire 331 * inode so that the entire range the CRC covers is present in the log. 332 * That means for v3 inode we log the entire buffer rather than just the 333 * inode cores. 334 */ 335 if (xfs_has_v3inodes(mp)) { 336 version = 3; 337 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 338 339 /* 340 * log the initialisation that is about to take place as an 341 * logical operation. This means the transaction does not 342 * need to log the physical changes to the inode buffers as log 343 * recovery will know what initialisation is actually needed. 344 * Hence we only need to log the buffers as "ordered" buffers so 345 * they track in the AIL as if they were physically logged. 346 */ 347 if (tp) 348 xfs_icreate_log(tp, agno, agbno, icount, 349 mp->m_sb.sb_inodesize, length, gen); 350 } else 351 version = 2; 352 353 for (j = 0; j < nbufs; j++) { 354 /* 355 * Get the block. 356 */ 357 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 358 (j * M_IGEO(mp)->blocks_per_cluster)); 359 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 360 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 361 XBF_UNMAPPED, &fbuf); 362 if (error) 363 return error; 364 365 /* Initialize the inode buffers and log them appropriately. */ 366 fbuf->b_ops = &xfs_inode_buf_ops; 367 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 368 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 369 int ioffset = i << mp->m_sb.sb_inodelog; 370 371 free = xfs_make_iptr(mp, fbuf, i); 372 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 373 free->di_version = version; 374 free->di_gen = cpu_to_be32(gen); 375 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 376 377 if (version == 3) { 378 free->di_ino = cpu_to_be64(ino); 379 ino++; 380 uuid_copy(&free->di_uuid, 381 &mp->m_sb.sb_meta_uuid); 382 xfs_dinode_calc_crc(mp, free); 383 } else if (tp) { 384 /* just log the inode core */ 385 xfs_trans_log_buf(tp, fbuf, ioffset, 386 ioffset + XFS_DINODE_SIZE(mp) - 1); 387 } 388 } 389 390 if (tp) { 391 /* 392 * Mark the buffer as an inode allocation buffer so it 393 * sticks in AIL at the point of this allocation 394 * transaction. This ensures the they are on disk before 395 * the tail of the log can be moved past this 396 * transaction (i.e. by preventing relogging from moving 397 * it forward in the log). 398 */ 399 xfs_trans_inode_alloc_buf(tp, fbuf); 400 if (version == 3) { 401 /* 402 * Mark the buffer as ordered so that they are 403 * not physically logged in the transaction but 404 * still tracked in the AIL as part of the 405 * transaction and pin the log appropriately. 406 */ 407 xfs_trans_ordered_buf(tp, fbuf); 408 } 409 } else { 410 fbuf->b_flags |= XBF_DONE; 411 xfs_buf_delwri_queue(fbuf, buffer_list); 412 xfs_buf_relse(fbuf); 413 } 414 } 415 return 0; 416 } 417 418 /* 419 * Align startino and allocmask for a recently allocated sparse chunk such that 420 * they are fit for insertion (or merge) into the on-disk inode btrees. 421 * 422 * Background: 423 * 424 * When enabled, sparse inode support increases the inode alignment from cluster 425 * size to inode chunk size. This means that the minimum range between two 426 * non-adjacent inode records in the inobt is large enough for a full inode 427 * record. This allows for cluster sized, cluster aligned block allocation 428 * without need to worry about whether the resulting inode record overlaps with 429 * another record in the tree. Without this basic rule, we would have to deal 430 * with the consequences of overlap by potentially undoing recent allocations in 431 * the inode allocation codepath. 432 * 433 * Because of this alignment rule (which is enforced on mount), there are two 434 * inobt possibilities for newly allocated sparse chunks. One is that the 435 * aligned inode record for the chunk covers a range of inodes not already 436 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 437 * other is that a record already exists at the aligned startino that considers 438 * the newly allocated range as sparse. In the latter case, record content is 439 * merged in hope that sparse inode chunks fill to full chunks over time. 440 */ 441 STATIC void 442 xfs_align_sparse_ino( 443 struct xfs_mount *mp, 444 xfs_agino_t *startino, 445 uint16_t *allocmask) 446 { 447 xfs_agblock_t agbno; 448 xfs_agblock_t mod; 449 int offset; 450 451 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 452 mod = agbno % mp->m_sb.sb_inoalignmt; 453 if (!mod) 454 return; 455 456 /* calculate the inode offset and align startino */ 457 offset = XFS_AGB_TO_AGINO(mp, mod); 458 *startino -= offset; 459 460 /* 461 * Since startino has been aligned down, left shift allocmask such that 462 * it continues to represent the same physical inodes relative to the 463 * new startino. 464 */ 465 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 466 } 467 468 /* 469 * Determine whether the source inode record can merge into the target. Both 470 * records must be sparse, the inode ranges must match and there must be no 471 * allocation overlap between the records. 472 */ 473 STATIC bool 474 __xfs_inobt_can_merge( 475 struct xfs_inobt_rec_incore *trec, /* tgt record */ 476 struct xfs_inobt_rec_incore *srec) /* src record */ 477 { 478 uint64_t talloc; 479 uint64_t salloc; 480 481 /* records must cover the same inode range */ 482 if (trec->ir_startino != srec->ir_startino) 483 return false; 484 485 /* both records must be sparse */ 486 if (!xfs_inobt_issparse(trec->ir_holemask) || 487 !xfs_inobt_issparse(srec->ir_holemask)) 488 return false; 489 490 /* both records must track some inodes */ 491 if (!trec->ir_count || !srec->ir_count) 492 return false; 493 494 /* can't exceed capacity of a full record */ 495 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 496 return false; 497 498 /* verify there is no allocation overlap */ 499 talloc = xfs_inobt_irec_to_allocmask(trec); 500 salloc = xfs_inobt_irec_to_allocmask(srec); 501 if (talloc & salloc) 502 return false; 503 504 return true; 505 } 506 507 /* 508 * Merge the source inode record into the target. The caller must call 509 * __xfs_inobt_can_merge() to ensure the merge is valid. 510 */ 511 STATIC void 512 __xfs_inobt_rec_merge( 513 struct xfs_inobt_rec_incore *trec, /* target */ 514 struct xfs_inobt_rec_incore *srec) /* src */ 515 { 516 ASSERT(trec->ir_startino == srec->ir_startino); 517 518 /* combine the counts */ 519 trec->ir_count += srec->ir_count; 520 trec->ir_freecount += srec->ir_freecount; 521 522 /* 523 * Merge the holemask and free mask. For both fields, 0 bits refer to 524 * allocated inodes. We combine the allocated ranges with bitwise AND. 525 */ 526 trec->ir_holemask &= srec->ir_holemask; 527 trec->ir_free &= srec->ir_free; 528 } 529 530 /* 531 * Insert a new sparse inode chunk into the associated inode btree. The inode 532 * record for the sparse chunk is pre-aligned to a startino that should match 533 * any pre-existing sparse inode record in the tree. This allows sparse chunks 534 * to fill over time. 535 * 536 * This function supports two modes of handling preexisting records depending on 537 * the merge flag. If merge is true, the provided record is merged with the 538 * existing record and updated in place. The merged record is returned in nrec. 539 * If merge is false, an existing record is replaced with the provided record. 540 * If no preexisting record exists, the provided record is always inserted. 541 * 542 * It is considered corruption if a merge is requested and not possible. Given 543 * the sparse inode alignment constraints, this should never happen. 544 */ 545 STATIC int 546 xfs_inobt_insert_sprec( 547 struct xfs_perag *pag, 548 struct xfs_trans *tp, 549 struct xfs_buf *agbp, 550 int btnum, 551 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 552 bool merge) /* merge or replace */ 553 { 554 struct xfs_mount *mp = pag->pag_mount; 555 struct xfs_btree_cur *cur; 556 int error; 557 int i; 558 struct xfs_inobt_rec_incore rec; 559 560 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 561 562 /* the new record is pre-aligned so we know where to look */ 563 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 564 if (error) 565 goto error; 566 /* if nothing there, insert a new record and return */ 567 if (i == 0) { 568 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 569 nrec->ir_count, nrec->ir_freecount, 570 nrec->ir_free, &i); 571 if (error) 572 goto error; 573 if (XFS_IS_CORRUPT(mp, i != 1)) { 574 error = -EFSCORRUPTED; 575 goto error; 576 } 577 578 goto out; 579 } 580 581 /* 582 * A record exists at this startino. Merge or replace the record 583 * depending on what we've been asked to do. 584 */ 585 if (merge) { 586 error = xfs_inobt_get_rec(cur, &rec, &i); 587 if (error) 588 goto error; 589 if (XFS_IS_CORRUPT(mp, i != 1)) { 590 error = -EFSCORRUPTED; 591 goto error; 592 } 593 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 594 error = -EFSCORRUPTED; 595 goto error; 596 } 597 598 /* 599 * This should never fail. If we have coexisting records that 600 * cannot merge, something is seriously wrong. 601 */ 602 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 603 error = -EFSCORRUPTED; 604 goto error; 605 } 606 607 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, 608 rec.ir_holemask, nrec->ir_startino, 609 nrec->ir_holemask); 610 611 /* merge to nrec to output the updated record */ 612 __xfs_inobt_rec_merge(nrec, &rec); 613 614 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, 615 nrec->ir_holemask); 616 617 error = xfs_inobt_rec_check_count(mp, nrec); 618 if (error) 619 goto error; 620 } 621 622 error = xfs_inobt_update(cur, nrec); 623 if (error) 624 goto error; 625 626 out: 627 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 628 return 0; 629 error: 630 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 631 return error; 632 } 633 634 /* 635 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if 636 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so 637 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum 638 * inode count threshold, or the usual negative error code for other errors. 639 */ 640 STATIC int 641 xfs_ialloc_ag_alloc( 642 struct xfs_perag *pag, 643 struct xfs_trans *tp, 644 struct xfs_buf *agbp) 645 { 646 struct xfs_agi *agi; 647 struct xfs_alloc_arg args; 648 int error; 649 xfs_agino_t newino; /* new first inode's number */ 650 xfs_agino_t newlen; /* new number of inodes */ 651 int isaligned = 0; /* inode allocation at stripe */ 652 /* unit boundary */ 653 /* init. to full chunk */ 654 struct xfs_inobt_rec_incore rec; 655 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 656 uint16_t allocmask = (uint16_t) -1; 657 int do_sparse = 0; 658 659 memset(&args, 0, sizeof(args)); 660 args.tp = tp; 661 args.mp = tp->t_mountp; 662 args.fsbno = NULLFSBLOCK; 663 args.oinfo = XFS_RMAP_OINFO_INODES; 664 args.pag = pag; 665 666 #ifdef DEBUG 667 /* randomly do sparse inode allocations */ 668 if (xfs_has_sparseinodes(tp->t_mountp) && 669 igeo->ialloc_min_blks < igeo->ialloc_blks) 670 do_sparse = get_random_u32_below(2); 671 #endif 672 673 /* 674 * Locking will ensure that we don't have two callers in here 675 * at one time. 676 */ 677 newlen = igeo->ialloc_inos; 678 if (igeo->maxicount && 679 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 680 igeo->maxicount) 681 return -ENOSPC; 682 args.minlen = args.maxlen = igeo->ialloc_blks; 683 /* 684 * First try to allocate inodes contiguous with the last-allocated 685 * chunk of inodes. If the filesystem is striped, this will fill 686 * an entire stripe unit with inodes. 687 */ 688 agi = agbp->b_addr; 689 newino = be32_to_cpu(agi->agi_newino); 690 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 691 igeo->ialloc_blks; 692 if (do_sparse) 693 goto sparse_alloc; 694 if (likely(newino != NULLAGINO && 695 (args.agbno < be32_to_cpu(agi->agi_length)))) { 696 args.prod = 1; 697 698 /* 699 * We need to take into account alignment here to ensure that 700 * we don't modify the free list if we fail to have an exact 701 * block. If we don't have an exact match, and every oher 702 * attempt allocation attempt fails, we'll end up cancelling 703 * a dirty transaction and shutting down. 704 * 705 * For an exact allocation, alignment must be 1, 706 * however we need to take cluster alignment into account when 707 * fixing up the freelist. Use the minalignslop field to 708 * indicate that extra blocks might be required for alignment, 709 * but not to use them in the actual exact allocation. 710 */ 711 args.alignment = 1; 712 args.minalignslop = igeo->cluster_align - 1; 713 714 /* Allow space for the inode btree to split. */ 715 args.minleft = igeo->inobt_maxlevels; 716 error = xfs_alloc_vextent_exact_bno(&args, 717 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 718 args.agbno)); 719 if (error) 720 return error; 721 722 /* 723 * This request might have dirtied the transaction if the AG can 724 * satisfy the request, but the exact block was not available. 725 * If the allocation did fail, subsequent requests will relax 726 * the exact agbno requirement and increase the alignment 727 * instead. It is critical that the total size of the request 728 * (len + alignment + slop) does not increase from this point 729 * on, so reset minalignslop to ensure it is not included in 730 * subsequent requests. 731 */ 732 args.minalignslop = 0; 733 } 734 735 if (unlikely(args.fsbno == NULLFSBLOCK)) { 736 /* 737 * Set the alignment for the allocation. 738 * If stripe alignment is turned on then align at stripe unit 739 * boundary. 740 * If the cluster size is smaller than a filesystem block 741 * then we're doing I/O for inodes in filesystem block size 742 * pieces, so don't need alignment anyway. 743 */ 744 isaligned = 0; 745 if (igeo->ialloc_align) { 746 ASSERT(!xfs_has_noalign(args.mp)); 747 args.alignment = args.mp->m_dalign; 748 isaligned = 1; 749 } else 750 args.alignment = igeo->cluster_align; 751 /* 752 * Allocate a fixed-size extent of inodes. 753 */ 754 args.prod = 1; 755 /* 756 * Allow space for the inode btree to split. 757 */ 758 args.minleft = igeo->inobt_maxlevels; 759 error = xfs_alloc_vextent_near_bno(&args, 760 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 761 be32_to_cpu(agi->agi_root))); 762 if (error) 763 return error; 764 } 765 766 /* 767 * If stripe alignment is turned on, then try again with cluster 768 * alignment. 769 */ 770 if (isaligned && args.fsbno == NULLFSBLOCK) { 771 args.alignment = igeo->cluster_align; 772 error = xfs_alloc_vextent_near_bno(&args, 773 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 774 be32_to_cpu(agi->agi_root))); 775 if (error) 776 return error; 777 } 778 779 /* 780 * Finally, try a sparse allocation if the filesystem supports it and 781 * the sparse allocation length is smaller than a full chunk. 782 */ 783 if (xfs_has_sparseinodes(args.mp) && 784 igeo->ialloc_min_blks < igeo->ialloc_blks && 785 args.fsbno == NULLFSBLOCK) { 786 sparse_alloc: 787 args.alignment = args.mp->m_sb.sb_spino_align; 788 args.prod = 1; 789 790 args.minlen = igeo->ialloc_min_blks; 791 args.maxlen = args.minlen; 792 793 /* 794 * The inode record will be aligned to full chunk size. We must 795 * prevent sparse allocation from AG boundaries that result in 796 * invalid inode records, such as records that start at agbno 0 797 * or extend beyond the AG. 798 * 799 * Set min agbno to the first aligned, non-zero agbno and max to 800 * the last aligned agbno that is at least one full chunk from 801 * the end of the AG. 802 */ 803 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 804 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 805 args.mp->m_sb.sb_inoalignmt) - 806 igeo->ialloc_blks; 807 808 error = xfs_alloc_vextent_near_bno(&args, 809 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 810 be32_to_cpu(agi->agi_root))); 811 if (error) 812 return error; 813 814 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 815 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 816 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 817 } 818 819 if (args.fsbno == NULLFSBLOCK) 820 return -EAGAIN; 821 822 ASSERT(args.len == args.minlen); 823 824 /* 825 * Stamp and write the inode buffers. 826 * 827 * Seed the new inode cluster with a random generation number. This 828 * prevents short-term reuse of generation numbers if a chunk is 829 * freed and then immediately reallocated. We use random numbers 830 * rather than a linear progression to prevent the next generation 831 * number from being easily guessable. 832 */ 833 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 834 args.agbno, args.len, get_random_u32()); 835 836 if (error) 837 return error; 838 /* 839 * Convert the results. 840 */ 841 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 842 843 if (xfs_inobt_issparse(~allocmask)) { 844 /* 845 * We've allocated a sparse chunk. Align the startino and mask. 846 */ 847 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 848 849 rec.ir_startino = newino; 850 rec.ir_holemask = ~allocmask; 851 rec.ir_count = newlen; 852 rec.ir_freecount = newlen; 853 rec.ir_free = XFS_INOBT_ALL_FREE; 854 855 /* 856 * Insert the sparse record into the inobt and allow for a merge 857 * if necessary. If a merge does occur, rec is updated to the 858 * merged record. 859 */ 860 error = xfs_inobt_insert_sprec(pag, tp, agbp, 861 XFS_BTNUM_INO, &rec, true); 862 if (error == -EFSCORRUPTED) { 863 xfs_alert(args.mp, 864 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 865 XFS_AGINO_TO_INO(args.mp, pag->pag_agno, 866 rec.ir_startino), 867 rec.ir_holemask, rec.ir_count); 868 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 869 } 870 if (error) 871 return error; 872 873 /* 874 * We can't merge the part we've just allocated as for the inobt 875 * due to finobt semantics. The original record may or may not 876 * exist independent of whether physical inodes exist in this 877 * sparse chunk. 878 * 879 * We must update the finobt record based on the inobt record. 880 * rec contains the fully merged and up to date inobt record 881 * from the previous call. Set merge false to replace any 882 * existing record with this one. 883 */ 884 if (xfs_has_finobt(args.mp)) { 885 error = xfs_inobt_insert_sprec(pag, tp, agbp, 886 XFS_BTNUM_FINO, &rec, false); 887 if (error) 888 return error; 889 } 890 } else { 891 /* full chunk - insert new records to both btrees */ 892 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, 893 XFS_BTNUM_INO); 894 if (error) 895 return error; 896 897 if (xfs_has_finobt(args.mp)) { 898 error = xfs_inobt_insert(pag, tp, agbp, newino, 899 newlen, XFS_BTNUM_FINO); 900 if (error) 901 return error; 902 } 903 } 904 905 /* 906 * Update AGI counts and newino. 907 */ 908 be32_add_cpu(&agi->agi_count, newlen); 909 be32_add_cpu(&agi->agi_freecount, newlen); 910 pag->pagi_freecount += newlen; 911 pag->pagi_count += newlen; 912 agi->agi_newino = cpu_to_be32(newino); 913 914 /* 915 * Log allocation group header fields 916 */ 917 xfs_ialloc_log_agi(tp, agbp, 918 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 919 /* 920 * Modify/log superblock values for inode count and inode free count. 921 */ 922 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 923 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 924 return 0; 925 } 926 927 /* 928 * Try to retrieve the next record to the left/right from the current one. 929 */ 930 STATIC int 931 xfs_ialloc_next_rec( 932 struct xfs_btree_cur *cur, 933 xfs_inobt_rec_incore_t *rec, 934 int *done, 935 int left) 936 { 937 int error; 938 int i; 939 940 if (left) 941 error = xfs_btree_decrement(cur, 0, &i); 942 else 943 error = xfs_btree_increment(cur, 0, &i); 944 945 if (error) 946 return error; 947 *done = !i; 948 if (i) { 949 error = xfs_inobt_get_rec(cur, rec, &i); 950 if (error) 951 return error; 952 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 953 return -EFSCORRUPTED; 954 } 955 956 return 0; 957 } 958 959 STATIC int 960 xfs_ialloc_get_rec( 961 struct xfs_btree_cur *cur, 962 xfs_agino_t agino, 963 xfs_inobt_rec_incore_t *rec, 964 int *done) 965 { 966 int error; 967 int i; 968 969 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 970 if (error) 971 return error; 972 *done = !i; 973 if (i) { 974 error = xfs_inobt_get_rec(cur, rec, &i); 975 if (error) 976 return error; 977 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 978 return -EFSCORRUPTED; 979 } 980 981 return 0; 982 } 983 984 /* 985 * Return the offset of the first free inode in the record. If the inode chunk 986 * is sparsely allocated, we convert the record holemask to inode granularity 987 * and mask off the unallocated regions from the inode free mask. 988 */ 989 STATIC int 990 xfs_inobt_first_free_inode( 991 struct xfs_inobt_rec_incore *rec) 992 { 993 xfs_inofree_t realfree; 994 995 /* if there are no holes, return the first available offset */ 996 if (!xfs_inobt_issparse(rec->ir_holemask)) 997 return xfs_lowbit64(rec->ir_free); 998 999 realfree = xfs_inobt_irec_to_allocmask(rec); 1000 realfree &= rec->ir_free; 1001 1002 return xfs_lowbit64(realfree); 1003 } 1004 1005 /* 1006 * Allocate an inode using the inobt-only algorithm. 1007 */ 1008 STATIC int 1009 xfs_dialloc_ag_inobt( 1010 struct xfs_perag *pag, 1011 struct xfs_trans *tp, 1012 struct xfs_buf *agbp, 1013 xfs_ino_t parent, 1014 xfs_ino_t *inop) 1015 { 1016 struct xfs_mount *mp = tp->t_mountp; 1017 struct xfs_agi *agi = agbp->b_addr; 1018 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1019 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1020 struct xfs_btree_cur *cur, *tcur; 1021 struct xfs_inobt_rec_incore rec, trec; 1022 xfs_ino_t ino; 1023 int error; 1024 int offset; 1025 int i, j; 1026 int searchdistance = 10; 1027 1028 ASSERT(xfs_perag_initialised_agi(pag)); 1029 ASSERT(xfs_perag_allows_inodes(pag)); 1030 ASSERT(pag->pagi_freecount > 0); 1031 1032 restart_pagno: 1033 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1034 /* 1035 * If pagino is 0 (this is the root inode allocation) use newino. 1036 * This must work because we've just allocated some. 1037 */ 1038 if (!pagino) 1039 pagino = be32_to_cpu(agi->agi_newino); 1040 1041 error = xfs_check_agi_freecount(cur); 1042 if (error) 1043 goto error0; 1044 1045 /* 1046 * If in the same AG as the parent, try to get near the parent. 1047 */ 1048 if (pagno == pag->pag_agno) { 1049 int doneleft; /* done, to the left */ 1050 int doneright; /* done, to the right */ 1051 1052 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1053 if (error) 1054 goto error0; 1055 if (XFS_IS_CORRUPT(mp, i != 1)) { 1056 error = -EFSCORRUPTED; 1057 goto error0; 1058 } 1059 1060 error = xfs_inobt_get_rec(cur, &rec, &j); 1061 if (error) 1062 goto error0; 1063 if (XFS_IS_CORRUPT(mp, j != 1)) { 1064 error = -EFSCORRUPTED; 1065 goto error0; 1066 } 1067 1068 if (rec.ir_freecount > 0) { 1069 /* 1070 * Found a free inode in the same chunk 1071 * as the parent, done. 1072 */ 1073 goto alloc_inode; 1074 } 1075 1076 1077 /* 1078 * In the same AG as parent, but parent's chunk is full. 1079 */ 1080 1081 /* duplicate the cursor, search left & right simultaneously */ 1082 error = xfs_btree_dup_cursor(cur, &tcur); 1083 if (error) 1084 goto error0; 1085 1086 /* 1087 * Skip to last blocks looked up if same parent inode. 1088 */ 1089 if (pagino != NULLAGINO && 1090 pag->pagl_pagino == pagino && 1091 pag->pagl_leftrec != NULLAGINO && 1092 pag->pagl_rightrec != NULLAGINO) { 1093 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1094 &trec, &doneleft); 1095 if (error) 1096 goto error1; 1097 1098 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1099 &rec, &doneright); 1100 if (error) 1101 goto error1; 1102 } else { 1103 /* search left with tcur, back up 1 record */ 1104 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1105 if (error) 1106 goto error1; 1107 1108 /* search right with cur, go forward 1 record. */ 1109 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1110 if (error) 1111 goto error1; 1112 } 1113 1114 /* 1115 * Loop until we find an inode chunk with a free inode. 1116 */ 1117 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1118 int useleft; /* using left inode chunk this time */ 1119 1120 /* figure out the closer block if both are valid. */ 1121 if (!doneleft && !doneright) { 1122 useleft = pagino - 1123 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1124 rec.ir_startino - pagino; 1125 } else { 1126 useleft = !doneleft; 1127 } 1128 1129 /* free inodes to the left? */ 1130 if (useleft && trec.ir_freecount) { 1131 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1132 cur = tcur; 1133 1134 pag->pagl_leftrec = trec.ir_startino; 1135 pag->pagl_rightrec = rec.ir_startino; 1136 pag->pagl_pagino = pagino; 1137 rec = trec; 1138 goto alloc_inode; 1139 } 1140 1141 /* free inodes to the right? */ 1142 if (!useleft && rec.ir_freecount) { 1143 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1144 1145 pag->pagl_leftrec = trec.ir_startino; 1146 pag->pagl_rightrec = rec.ir_startino; 1147 pag->pagl_pagino = pagino; 1148 goto alloc_inode; 1149 } 1150 1151 /* get next record to check */ 1152 if (useleft) { 1153 error = xfs_ialloc_next_rec(tcur, &trec, 1154 &doneleft, 1); 1155 } else { 1156 error = xfs_ialloc_next_rec(cur, &rec, 1157 &doneright, 0); 1158 } 1159 if (error) 1160 goto error1; 1161 } 1162 1163 if (searchdistance <= 0) { 1164 /* 1165 * Not in range - save last search 1166 * location and allocate a new inode 1167 */ 1168 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1169 pag->pagl_leftrec = trec.ir_startino; 1170 pag->pagl_rightrec = rec.ir_startino; 1171 pag->pagl_pagino = pagino; 1172 1173 } else { 1174 /* 1175 * We've reached the end of the btree. because 1176 * we are only searching a small chunk of the 1177 * btree each search, there is obviously free 1178 * inodes closer to the parent inode than we 1179 * are now. restart the search again. 1180 */ 1181 pag->pagl_pagino = NULLAGINO; 1182 pag->pagl_leftrec = NULLAGINO; 1183 pag->pagl_rightrec = NULLAGINO; 1184 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1185 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1186 goto restart_pagno; 1187 } 1188 } 1189 1190 /* 1191 * In a different AG from the parent. 1192 * See if the most recently allocated block has any free. 1193 */ 1194 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1195 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1196 XFS_LOOKUP_EQ, &i); 1197 if (error) 1198 goto error0; 1199 1200 if (i == 1) { 1201 error = xfs_inobt_get_rec(cur, &rec, &j); 1202 if (error) 1203 goto error0; 1204 1205 if (j == 1 && rec.ir_freecount > 0) { 1206 /* 1207 * The last chunk allocated in the group 1208 * still has a free inode. 1209 */ 1210 goto alloc_inode; 1211 } 1212 } 1213 } 1214 1215 /* 1216 * None left in the last group, search the whole AG 1217 */ 1218 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1219 if (error) 1220 goto error0; 1221 if (XFS_IS_CORRUPT(mp, i != 1)) { 1222 error = -EFSCORRUPTED; 1223 goto error0; 1224 } 1225 1226 for (;;) { 1227 error = xfs_inobt_get_rec(cur, &rec, &i); 1228 if (error) 1229 goto error0; 1230 if (XFS_IS_CORRUPT(mp, i != 1)) { 1231 error = -EFSCORRUPTED; 1232 goto error0; 1233 } 1234 if (rec.ir_freecount > 0) 1235 break; 1236 error = xfs_btree_increment(cur, 0, &i); 1237 if (error) 1238 goto error0; 1239 if (XFS_IS_CORRUPT(mp, i != 1)) { 1240 error = -EFSCORRUPTED; 1241 goto error0; 1242 } 1243 } 1244 1245 alloc_inode: 1246 offset = xfs_inobt_first_free_inode(&rec); 1247 ASSERT(offset >= 0); 1248 ASSERT(offset < XFS_INODES_PER_CHUNK); 1249 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1250 XFS_INODES_PER_CHUNK) == 0); 1251 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1252 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1253 rec.ir_freecount--; 1254 error = xfs_inobt_update(cur, &rec); 1255 if (error) 1256 goto error0; 1257 be32_add_cpu(&agi->agi_freecount, -1); 1258 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1259 pag->pagi_freecount--; 1260 1261 error = xfs_check_agi_freecount(cur); 1262 if (error) 1263 goto error0; 1264 1265 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1266 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1267 *inop = ino; 1268 return 0; 1269 error1: 1270 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1271 error0: 1272 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1273 return error; 1274 } 1275 1276 /* 1277 * Use the free inode btree to allocate an inode based on distance from the 1278 * parent. Note that the provided cursor may be deleted and replaced. 1279 */ 1280 STATIC int 1281 xfs_dialloc_ag_finobt_near( 1282 xfs_agino_t pagino, 1283 struct xfs_btree_cur **ocur, 1284 struct xfs_inobt_rec_incore *rec) 1285 { 1286 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1287 struct xfs_btree_cur *rcur; /* right search cursor */ 1288 struct xfs_inobt_rec_incore rrec; 1289 int error; 1290 int i, j; 1291 1292 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1293 if (error) 1294 return error; 1295 1296 if (i == 1) { 1297 error = xfs_inobt_get_rec(lcur, rec, &i); 1298 if (error) 1299 return error; 1300 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1301 return -EFSCORRUPTED; 1302 1303 /* 1304 * See if we've landed in the parent inode record. The finobt 1305 * only tracks chunks with at least one free inode, so record 1306 * existence is enough. 1307 */ 1308 if (pagino >= rec->ir_startino && 1309 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1310 return 0; 1311 } 1312 1313 error = xfs_btree_dup_cursor(lcur, &rcur); 1314 if (error) 1315 return error; 1316 1317 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1318 if (error) 1319 goto error_rcur; 1320 if (j == 1) { 1321 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1322 if (error) 1323 goto error_rcur; 1324 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1325 error = -EFSCORRUPTED; 1326 goto error_rcur; 1327 } 1328 } 1329 1330 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1331 error = -EFSCORRUPTED; 1332 goto error_rcur; 1333 } 1334 if (i == 1 && j == 1) { 1335 /* 1336 * Both the left and right records are valid. Choose the closer 1337 * inode chunk to the target. 1338 */ 1339 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1340 (rrec.ir_startino - pagino)) { 1341 *rec = rrec; 1342 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1343 *ocur = rcur; 1344 } else { 1345 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1346 } 1347 } else if (j == 1) { 1348 /* only the right record is valid */ 1349 *rec = rrec; 1350 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1351 *ocur = rcur; 1352 } else if (i == 1) { 1353 /* only the left record is valid */ 1354 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1355 } 1356 1357 return 0; 1358 1359 error_rcur: 1360 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1361 return error; 1362 } 1363 1364 /* 1365 * Use the free inode btree to find a free inode based on a newino hint. If 1366 * the hint is NULL, find the first free inode in the AG. 1367 */ 1368 STATIC int 1369 xfs_dialloc_ag_finobt_newino( 1370 struct xfs_agi *agi, 1371 struct xfs_btree_cur *cur, 1372 struct xfs_inobt_rec_incore *rec) 1373 { 1374 int error; 1375 int i; 1376 1377 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1378 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1379 XFS_LOOKUP_EQ, &i); 1380 if (error) 1381 return error; 1382 if (i == 1) { 1383 error = xfs_inobt_get_rec(cur, rec, &i); 1384 if (error) 1385 return error; 1386 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1387 return -EFSCORRUPTED; 1388 return 0; 1389 } 1390 } 1391 1392 /* 1393 * Find the first inode available in the AG. 1394 */ 1395 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1396 if (error) 1397 return error; 1398 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1399 return -EFSCORRUPTED; 1400 1401 error = xfs_inobt_get_rec(cur, rec, &i); 1402 if (error) 1403 return error; 1404 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1405 return -EFSCORRUPTED; 1406 1407 return 0; 1408 } 1409 1410 /* 1411 * Update the inobt based on a modification made to the finobt. Also ensure that 1412 * the records from both trees are equivalent post-modification. 1413 */ 1414 STATIC int 1415 xfs_dialloc_ag_update_inobt( 1416 struct xfs_btree_cur *cur, /* inobt cursor */ 1417 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1418 int offset) /* inode offset */ 1419 { 1420 struct xfs_inobt_rec_incore rec; 1421 int error; 1422 int i; 1423 1424 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1425 if (error) 1426 return error; 1427 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1428 return -EFSCORRUPTED; 1429 1430 error = xfs_inobt_get_rec(cur, &rec, &i); 1431 if (error) 1432 return error; 1433 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1434 return -EFSCORRUPTED; 1435 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1436 XFS_INODES_PER_CHUNK) == 0); 1437 1438 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1439 rec.ir_freecount--; 1440 1441 if (XFS_IS_CORRUPT(cur->bc_mp, 1442 rec.ir_free != frec->ir_free || 1443 rec.ir_freecount != frec->ir_freecount)) 1444 return -EFSCORRUPTED; 1445 1446 return xfs_inobt_update(cur, &rec); 1447 } 1448 1449 /* 1450 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1451 * back to the inobt search algorithm. 1452 * 1453 * The caller selected an AG for us, and made sure that free inodes are 1454 * available. 1455 */ 1456 static int 1457 xfs_dialloc_ag( 1458 struct xfs_perag *pag, 1459 struct xfs_trans *tp, 1460 struct xfs_buf *agbp, 1461 xfs_ino_t parent, 1462 xfs_ino_t *inop) 1463 { 1464 struct xfs_mount *mp = tp->t_mountp; 1465 struct xfs_agi *agi = agbp->b_addr; 1466 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1467 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1468 struct xfs_btree_cur *cur; /* finobt cursor */ 1469 struct xfs_btree_cur *icur; /* inobt cursor */ 1470 struct xfs_inobt_rec_incore rec; 1471 xfs_ino_t ino; 1472 int error; 1473 int offset; 1474 int i; 1475 1476 if (!xfs_has_finobt(mp)) 1477 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop); 1478 1479 /* 1480 * If pagino is 0 (this is the root inode allocation) use newino. 1481 * This must work because we've just allocated some. 1482 */ 1483 if (!pagino) 1484 pagino = be32_to_cpu(agi->agi_newino); 1485 1486 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 1487 1488 error = xfs_check_agi_freecount(cur); 1489 if (error) 1490 goto error_cur; 1491 1492 /* 1493 * The search algorithm depends on whether we're in the same AG as the 1494 * parent. If so, find the closest available inode to the parent. If 1495 * not, consider the agi hint or find the first free inode in the AG. 1496 */ 1497 if (pag->pag_agno == pagno) 1498 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1499 else 1500 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1501 if (error) 1502 goto error_cur; 1503 1504 offset = xfs_inobt_first_free_inode(&rec); 1505 ASSERT(offset >= 0); 1506 ASSERT(offset < XFS_INODES_PER_CHUNK); 1507 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1508 XFS_INODES_PER_CHUNK) == 0); 1509 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1510 1511 /* 1512 * Modify or remove the finobt record. 1513 */ 1514 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1515 rec.ir_freecount--; 1516 if (rec.ir_freecount) 1517 error = xfs_inobt_update(cur, &rec); 1518 else 1519 error = xfs_btree_delete(cur, &i); 1520 if (error) 1521 goto error_cur; 1522 1523 /* 1524 * The finobt has now been updated appropriately. We haven't updated the 1525 * agi and superblock yet, so we can create an inobt cursor and validate 1526 * the original freecount. If all is well, make the equivalent update to 1527 * the inobt using the finobt record and offset information. 1528 */ 1529 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1530 1531 error = xfs_check_agi_freecount(icur); 1532 if (error) 1533 goto error_icur; 1534 1535 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1536 if (error) 1537 goto error_icur; 1538 1539 /* 1540 * Both trees have now been updated. We must update the perag and 1541 * superblock before we can check the freecount for each btree. 1542 */ 1543 be32_add_cpu(&agi->agi_freecount, -1); 1544 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1545 pag->pagi_freecount--; 1546 1547 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1548 1549 error = xfs_check_agi_freecount(icur); 1550 if (error) 1551 goto error_icur; 1552 error = xfs_check_agi_freecount(cur); 1553 if (error) 1554 goto error_icur; 1555 1556 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1557 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1558 *inop = ino; 1559 return 0; 1560 1561 error_icur: 1562 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1563 error_cur: 1564 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1565 return error; 1566 } 1567 1568 static int 1569 xfs_dialloc_roll( 1570 struct xfs_trans **tpp, 1571 struct xfs_buf *agibp) 1572 { 1573 struct xfs_trans *tp = *tpp; 1574 struct xfs_dquot_acct *dqinfo; 1575 int error; 1576 1577 /* 1578 * Hold to on to the agibp across the commit so no other allocation can 1579 * come in and take the free inodes we just allocated for our caller. 1580 */ 1581 xfs_trans_bhold(tp, agibp); 1582 1583 /* 1584 * We want the quota changes to be associated with the next transaction, 1585 * NOT this one. So, detach the dqinfo from this and attach it to the 1586 * next transaction. 1587 */ 1588 dqinfo = tp->t_dqinfo; 1589 tp->t_dqinfo = NULL; 1590 1591 error = xfs_trans_roll(&tp); 1592 1593 /* Re-attach the quota info that we detached from prev trx. */ 1594 tp->t_dqinfo = dqinfo; 1595 1596 /* 1597 * Join the buffer even on commit error so that the buffer is released 1598 * when the caller cancels the transaction and doesn't have to handle 1599 * this error case specially. 1600 */ 1601 xfs_trans_bjoin(tp, agibp); 1602 *tpp = tp; 1603 return error; 1604 } 1605 1606 static bool 1607 xfs_dialloc_good_ag( 1608 struct xfs_perag *pag, 1609 struct xfs_trans *tp, 1610 umode_t mode, 1611 int flags, 1612 bool ok_alloc) 1613 { 1614 struct xfs_mount *mp = tp->t_mountp; 1615 xfs_extlen_t ineed; 1616 xfs_extlen_t longest = 0; 1617 int needspace; 1618 int error; 1619 1620 if (!pag) 1621 return false; 1622 if (!xfs_perag_allows_inodes(pag)) 1623 return false; 1624 1625 if (!xfs_perag_initialised_agi(pag)) { 1626 error = xfs_ialloc_read_agi(pag, tp, NULL); 1627 if (error) 1628 return false; 1629 } 1630 1631 if (pag->pagi_freecount) 1632 return true; 1633 if (!ok_alloc) 1634 return false; 1635 1636 if (!xfs_perag_initialised_agf(pag)) { 1637 error = xfs_alloc_read_agf(pag, tp, flags, NULL); 1638 if (error) 1639 return false; 1640 } 1641 1642 /* 1643 * Check that there is enough free space for the file plus a chunk of 1644 * inodes if we need to allocate some. If this is the first pass across 1645 * the AGs, take into account the potential space needed for alignment 1646 * of inode chunks when checking the longest contiguous free space in 1647 * the AG - this prevents us from getting ENOSPC because we have free 1648 * space larger than ialloc_blks but alignment constraints prevent us 1649 * from using it. 1650 * 1651 * If we can't find an AG with space for full alignment slack to be 1652 * taken into account, we must be near ENOSPC in all AGs. Hence we 1653 * don't include alignment for the second pass and so if we fail 1654 * allocation due to alignment issues then it is most likely a real 1655 * ENOSPC condition. 1656 * 1657 * XXX(dgc): this calculation is now bogus thanks to the per-ag 1658 * reservations that xfs_alloc_fix_freelist() now does via 1659 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will 1660 * be more than large enough for the check below to succeed, but 1661 * xfs_alloc_space_available() will fail because of the non-zero 1662 * metadata reservation and hence we won't actually be able to allocate 1663 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC 1664 * because of this. 1665 */ 1666 ineed = M_IGEO(mp)->ialloc_min_blks; 1667 if (flags && ineed > 1) 1668 ineed += M_IGEO(mp)->cluster_align; 1669 longest = pag->pagf_longest; 1670 if (!longest) 1671 longest = pag->pagf_flcount > 0; 1672 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 1673 1674 if (pag->pagf_freeblks < needspace + ineed || longest < ineed) 1675 return false; 1676 return true; 1677 } 1678 1679 static int 1680 xfs_dialloc_try_ag( 1681 struct xfs_perag *pag, 1682 struct xfs_trans **tpp, 1683 xfs_ino_t parent, 1684 xfs_ino_t *new_ino, 1685 bool ok_alloc) 1686 { 1687 struct xfs_buf *agbp; 1688 xfs_ino_t ino; 1689 int error; 1690 1691 /* 1692 * Then read in the AGI buffer and recheck with the AGI buffer 1693 * lock held. 1694 */ 1695 error = xfs_ialloc_read_agi(pag, *tpp, &agbp); 1696 if (error) 1697 return error; 1698 1699 if (!pag->pagi_freecount) { 1700 if (!ok_alloc) { 1701 error = -EAGAIN; 1702 goto out_release; 1703 } 1704 1705 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp); 1706 if (error < 0) 1707 goto out_release; 1708 1709 /* 1710 * We successfully allocated space for an inode cluster in this 1711 * AG. Roll the transaction so that we can allocate one of the 1712 * new inodes. 1713 */ 1714 ASSERT(pag->pagi_freecount > 0); 1715 error = xfs_dialloc_roll(tpp, agbp); 1716 if (error) 1717 goto out_release; 1718 } 1719 1720 /* Allocate an inode in the found AG */ 1721 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino); 1722 if (!error) 1723 *new_ino = ino; 1724 return error; 1725 1726 out_release: 1727 xfs_trans_brelse(*tpp, agbp); 1728 return error; 1729 } 1730 1731 /* 1732 * Allocate an on-disk inode. 1733 * 1734 * Mode is used to tell whether the new inode is a directory and hence where to 1735 * locate it. The on-disk inode that is allocated will be returned in @new_ino 1736 * on success, otherwise an error will be set to indicate the failure (e.g. 1737 * -ENOSPC). 1738 */ 1739 int 1740 xfs_dialloc( 1741 struct xfs_trans **tpp, 1742 xfs_ino_t parent, 1743 umode_t mode, 1744 xfs_ino_t *new_ino) 1745 { 1746 struct xfs_mount *mp = (*tpp)->t_mountp; 1747 xfs_agnumber_t agno; 1748 int error = 0; 1749 xfs_agnumber_t start_agno; 1750 struct xfs_perag *pag; 1751 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1752 bool ok_alloc = true; 1753 bool low_space = false; 1754 int flags; 1755 xfs_ino_t ino = NULLFSINO; 1756 1757 /* 1758 * Directories, symlinks, and regular files frequently allocate at least 1759 * one block, so factor that potential expansion when we examine whether 1760 * an AG has enough space for file creation. 1761 */ 1762 if (S_ISDIR(mode)) 1763 start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) % 1764 mp->m_maxagi; 1765 else { 1766 start_agno = XFS_INO_TO_AGNO(mp, parent); 1767 if (start_agno >= mp->m_maxagi) 1768 start_agno = 0; 1769 } 1770 1771 /* 1772 * If we have already hit the ceiling of inode blocks then clear 1773 * ok_alloc so we scan all available agi structures for a free 1774 * inode. 1775 * 1776 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1777 * which will sacrifice the preciseness but improve the performance. 1778 */ 1779 if (igeo->maxicount && 1780 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1781 > igeo->maxicount) { 1782 ok_alloc = false; 1783 } 1784 1785 /* 1786 * If we are near to ENOSPC, we want to prefer allocation from AGs that 1787 * have free inodes in them rather than use up free space allocating new 1788 * inode chunks. Hence we turn off allocation for the first non-blocking 1789 * pass through the AGs if we are near ENOSPC to consume free inodes 1790 * that we can immediately allocate, but then we allow allocation on the 1791 * second pass if we fail to find an AG with free inodes in it. 1792 */ 1793 if (percpu_counter_read_positive(&mp->m_fdblocks) < 1794 mp->m_low_space[XFS_LOWSP_1_PCNT]) { 1795 ok_alloc = false; 1796 low_space = true; 1797 } 1798 1799 /* 1800 * Loop until we find an allocation group that either has free inodes 1801 * or in which we can allocate some inodes. Iterate through the 1802 * allocation groups upward, wrapping at the end. 1803 */ 1804 flags = XFS_ALLOC_FLAG_TRYLOCK; 1805 retry: 1806 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) { 1807 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) { 1808 error = xfs_dialloc_try_ag(pag, tpp, parent, 1809 &ino, ok_alloc); 1810 if (error != -EAGAIN) 1811 break; 1812 error = 0; 1813 } 1814 1815 if (xfs_is_shutdown(mp)) { 1816 error = -EFSCORRUPTED; 1817 break; 1818 } 1819 } 1820 if (pag) 1821 xfs_perag_rele(pag); 1822 if (error) 1823 return error; 1824 if (ino == NULLFSINO) { 1825 if (flags) { 1826 flags = 0; 1827 if (low_space) 1828 ok_alloc = true; 1829 goto retry; 1830 } 1831 return -ENOSPC; 1832 } 1833 *new_ino = ino; 1834 return 0; 1835 } 1836 1837 /* 1838 * Free the blocks of an inode chunk. We must consider that the inode chunk 1839 * might be sparse and only free the regions that are allocated as part of the 1840 * chunk. 1841 */ 1842 static int 1843 xfs_difree_inode_chunk( 1844 struct xfs_trans *tp, 1845 xfs_agnumber_t agno, 1846 struct xfs_inobt_rec_incore *rec) 1847 { 1848 struct xfs_mount *mp = tp->t_mountp; 1849 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1850 rec->ir_startino); 1851 int startidx, endidx; 1852 int nextbit; 1853 xfs_agblock_t agbno; 1854 int contigblk; 1855 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1856 1857 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1858 /* not sparse, calculate extent info directly */ 1859 return xfs_free_extent_later(tp, 1860 XFS_AGB_TO_FSB(mp, agno, sagbno), 1861 M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES, 1862 XFS_AG_RESV_NONE, false); 1863 } 1864 1865 /* holemask is only 16-bits (fits in an unsigned long) */ 1866 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1867 holemask[0] = rec->ir_holemask; 1868 1869 /* 1870 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1871 * holemask and convert the start/end index of each range to an extent. 1872 * We start with the start and end index both pointing at the first 0 in 1873 * the mask. 1874 */ 1875 startidx = endidx = find_first_zero_bit(holemask, 1876 XFS_INOBT_HOLEMASK_BITS); 1877 nextbit = startidx + 1; 1878 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1879 int error; 1880 1881 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1882 nextbit); 1883 /* 1884 * If the next zero bit is contiguous, update the end index of 1885 * the current range and continue. 1886 */ 1887 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1888 nextbit == endidx + 1) { 1889 endidx = nextbit; 1890 goto next; 1891 } 1892 1893 /* 1894 * nextbit is not contiguous with the current end index. Convert 1895 * the current start/end to an extent and add it to the free 1896 * list. 1897 */ 1898 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1899 mp->m_sb.sb_inopblock; 1900 contigblk = ((endidx - startidx + 1) * 1901 XFS_INODES_PER_HOLEMASK_BIT) / 1902 mp->m_sb.sb_inopblock; 1903 1904 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1905 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1906 error = xfs_free_extent_later(tp, 1907 XFS_AGB_TO_FSB(mp, agno, agbno), contigblk, 1908 &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE, 1909 false); 1910 if (error) 1911 return error; 1912 1913 /* reset range to current bit and carry on... */ 1914 startidx = endidx = nextbit; 1915 1916 next: 1917 nextbit++; 1918 } 1919 return 0; 1920 } 1921 1922 STATIC int 1923 xfs_difree_inobt( 1924 struct xfs_perag *pag, 1925 struct xfs_trans *tp, 1926 struct xfs_buf *agbp, 1927 xfs_agino_t agino, 1928 struct xfs_icluster *xic, 1929 struct xfs_inobt_rec_incore *orec) 1930 { 1931 struct xfs_mount *mp = pag->pag_mount; 1932 struct xfs_agi *agi = agbp->b_addr; 1933 struct xfs_btree_cur *cur; 1934 struct xfs_inobt_rec_incore rec; 1935 int ilen; 1936 int error; 1937 int i; 1938 int off; 1939 1940 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1941 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1942 1943 /* 1944 * Initialize the cursor. 1945 */ 1946 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1947 1948 error = xfs_check_agi_freecount(cur); 1949 if (error) 1950 goto error0; 1951 1952 /* 1953 * Look for the entry describing this inode. 1954 */ 1955 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1956 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1957 __func__, error); 1958 goto error0; 1959 } 1960 if (XFS_IS_CORRUPT(mp, i != 1)) { 1961 error = -EFSCORRUPTED; 1962 goto error0; 1963 } 1964 error = xfs_inobt_get_rec(cur, &rec, &i); 1965 if (error) { 1966 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1967 __func__, error); 1968 goto error0; 1969 } 1970 if (XFS_IS_CORRUPT(mp, i != 1)) { 1971 error = -EFSCORRUPTED; 1972 goto error0; 1973 } 1974 /* 1975 * Get the offset in the inode chunk. 1976 */ 1977 off = agino - rec.ir_startino; 1978 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1979 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1980 /* 1981 * Mark the inode free & increment the count. 1982 */ 1983 rec.ir_free |= XFS_INOBT_MASK(off); 1984 rec.ir_freecount++; 1985 1986 /* 1987 * When an inode chunk is free, it becomes eligible for removal. Don't 1988 * remove the chunk if the block size is large enough for multiple inode 1989 * chunks (that might not be free). 1990 */ 1991 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 1992 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1993 xic->deleted = true; 1994 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, 1995 rec.ir_startino); 1996 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1997 1998 /* 1999 * Remove the inode cluster from the AGI B+Tree, adjust the 2000 * AGI and Superblock inode counts, and mark the disk space 2001 * to be freed when the transaction is committed. 2002 */ 2003 ilen = rec.ir_freecount; 2004 be32_add_cpu(&agi->agi_count, -ilen); 2005 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 2006 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 2007 pag->pagi_freecount -= ilen - 1; 2008 pag->pagi_count -= ilen; 2009 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 2010 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 2011 2012 if ((error = xfs_btree_delete(cur, &i))) { 2013 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 2014 __func__, error); 2015 goto error0; 2016 } 2017 2018 error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 2019 if (error) 2020 goto error0; 2021 } else { 2022 xic->deleted = false; 2023 2024 error = xfs_inobt_update(cur, &rec); 2025 if (error) { 2026 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 2027 __func__, error); 2028 goto error0; 2029 } 2030 2031 /* 2032 * Change the inode free counts and log the ag/sb changes. 2033 */ 2034 be32_add_cpu(&agi->agi_freecount, 1); 2035 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2036 pag->pagi_freecount++; 2037 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2038 } 2039 2040 error = xfs_check_agi_freecount(cur); 2041 if (error) 2042 goto error0; 2043 2044 *orec = rec; 2045 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2046 return 0; 2047 2048 error0: 2049 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2050 return error; 2051 } 2052 2053 /* 2054 * Free an inode in the free inode btree. 2055 */ 2056 STATIC int 2057 xfs_difree_finobt( 2058 struct xfs_perag *pag, 2059 struct xfs_trans *tp, 2060 struct xfs_buf *agbp, 2061 xfs_agino_t agino, 2062 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2063 { 2064 struct xfs_mount *mp = pag->pag_mount; 2065 struct xfs_btree_cur *cur; 2066 struct xfs_inobt_rec_incore rec; 2067 int offset = agino - ibtrec->ir_startino; 2068 int error; 2069 int i; 2070 2071 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 2072 2073 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2074 if (error) 2075 goto error; 2076 if (i == 0) { 2077 /* 2078 * If the record does not exist in the finobt, we must have just 2079 * freed an inode in a previously fully allocated chunk. If not, 2080 * something is out of sync. 2081 */ 2082 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2083 error = -EFSCORRUPTED; 2084 goto error; 2085 } 2086 2087 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2088 ibtrec->ir_count, 2089 ibtrec->ir_freecount, 2090 ibtrec->ir_free, &i); 2091 if (error) 2092 goto error; 2093 ASSERT(i == 1); 2094 2095 goto out; 2096 } 2097 2098 /* 2099 * Read and update the existing record. We could just copy the ibtrec 2100 * across here, but that would defeat the purpose of having redundant 2101 * metadata. By making the modifications independently, we can catch 2102 * corruptions that we wouldn't see if we just copied from one record 2103 * to another. 2104 */ 2105 error = xfs_inobt_get_rec(cur, &rec, &i); 2106 if (error) 2107 goto error; 2108 if (XFS_IS_CORRUPT(mp, i != 1)) { 2109 error = -EFSCORRUPTED; 2110 goto error; 2111 } 2112 2113 rec.ir_free |= XFS_INOBT_MASK(offset); 2114 rec.ir_freecount++; 2115 2116 if (XFS_IS_CORRUPT(mp, 2117 rec.ir_free != ibtrec->ir_free || 2118 rec.ir_freecount != ibtrec->ir_freecount)) { 2119 error = -EFSCORRUPTED; 2120 goto error; 2121 } 2122 2123 /* 2124 * The content of inobt records should always match between the inobt 2125 * and finobt. The lifecycle of records in the finobt is different from 2126 * the inobt in that the finobt only tracks records with at least one 2127 * free inode. Hence, if all of the inodes are free and we aren't 2128 * keeping inode chunks permanently on disk, remove the record. 2129 * Otherwise, update the record with the new information. 2130 * 2131 * Note that we currently can't free chunks when the block size is large 2132 * enough for multiple chunks. Leave the finobt record to remain in sync 2133 * with the inobt. 2134 */ 2135 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 2136 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2137 error = xfs_btree_delete(cur, &i); 2138 if (error) 2139 goto error; 2140 ASSERT(i == 1); 2141 } else { 2142 error = xfs_inobt_update(cur, &rec); 2143 if (error) 2144 goto error; 2145 } 2146 2147 out: 2148 error = xfs_check_agi_freecount(cur); 2149 if (error) 2150 goto error; 2151 2152 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2153 return 0; 2154 2155 error: 2156 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2157 return error; 2158 } 2159 2160 /* 2161 * Free disk inode. Carefully avoids touching the incore inode, all 2162 * manipulations incore are the caller's responsibility. 2163 * The on-disk inode is not changed by this operation, only the 2164 * btree (free inode mask) is changed. 2165 */ 2166 int 2167 xfs_difree( 2168 struct xfs_trans *tp, 2169 struct xfs_perag *pag, 2170 xfs_ino_t inode, 2171 struct xfs_icluster *xic) 2172 { 2173 /* REFERENCED */ 2174 xfs_agblock_t agbno; /* block number containing inode */ 2175 struct xfs_buf *agbp; /* buffer for allocation group header */ 2176 xfs_agino_t agino; /* allocation group inode number */ 2177 int error; /* error return value */ 2178 struct xfs_mount *mp = tp->t_mountp; 2179 struct xfs_inobt_rec_incore rec;/* btree record */ 2180 2181 /* 2182 * Break up inode number into its components. 2183 */ 2184 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { 2185 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", 2186 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); 2187 ASSERT(0); 2188 return -EINVAL; 2189 } 2190 agino = XFS_INO_TO_AGINO(mp, inode); 2191 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2192 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2193 __func__, (unsigned long long)inode, 2194 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2195 ASSERT(0); 2196 return -EINVAL; 2197 } 2198 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2199 if (agbno >= mp->m_sb.sb_agblocks) { 2200 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2201 __func__, agbno, mp->m_sb.sb_agblocks); 2202 ASSERT(0); 2203 return -EINVAL; 2204 } 2205 /* 2206 * Get the allocation group header. 2207 */ 2208 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2209 if (error) { 2210 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2211 __func__, error); 2212 return error; 2213 } 2214 2215 /* 2216 * Fix up the inode allocation btree. 2217 */ 2218 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec); 2219 if (error) 2220 goto error0; 2221 2222 /* 2223 * Fix up the free inode btree. 2224 */ 2225 if (xfs_has_finobt(mp)) { 2226 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec); 2227 if (error) 2228 goto error0; 2229 } 2230 2231 return 0; 2232 2233 error0: 2234 return error; 2235 } 2236 2237 STATIC int 2238 xfs_imap_lookup( 2239 struct xfs_perag *pag, 2240 struct xfs_trans *tp, 2241 xfs_agino_t agino, 2242 xfs_agblock_t agbno, 2243 xfs_agblock_t *chunk_agbno, 2244 xfs_agblock_t *offset_agbno, 2245 int flags) 2246 { 2247 struct xfs_mount *mp = pag->pag_mount; 2248 struct xfs_inobt_rec_incore rec; 2249 struct xfs_btree_cur *cur; 2250 struct xfs_buf *agbp; 2251 int error; 2252 int i; 2253 2254 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2255 if (error) { 2256 xfs_alert(mp, 2257 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2258 __func__, error, pag->pag_agno); 2259 return error; 2260 } 2261 2262 /* 2263 * Lookup the inode record for the given agino. If the record cannot be 2264 * found, then it's an invalid inode number and we should abort. Once 2265 * we have a record, we need to ensure it contains the inode number 2266 * we are looking up. 2267 */ 2268 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 2269 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2270 if (!error) { 2271 if (i) 2272 error = xfs_inobt_get_rec(cur, &rec, &i); 2273 if (!error && i == 0) 2274 error = -EINVAL; 2275 } 2276 2277 xfs_trans_brelse(tp, agbp); 2278 xfs_btree_del_cursor(cur, error); 2279 if (error) 2280 return error; 2281 2282 /* check that the returned record contains the required inode */ 2283 if (rec.ir_startino > agino || 2284 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2285 return -EINVAL; 2286 2287 /* for untrusted inodes check it is allocated first */ 2288 if ((flags & XFS_IGET_UNTRUSTED) && 2289 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2290 return -EINVAL; 2291 2292 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2293 *offset_agbno = agbno - *chunk_agbno; 2294 return 0; 2295 } 2296 2297 /* 2298 * Return the location of the inode in imap, for mapping it into a buffer. 2299 */ 2300 int 2301 xfs_imap( 2302 struct xfs_perag *pag, 2303 struct xfs_trans *tp, 2304 xfs_ino_t ino, /* inode to locate */ 2305 struct xfs_imap *imap, /* location map structure */ 2306 uint flags) /* flags for inode btree lookup */ 2307 { 2308 struct xfs_mount *mp = pag->pag_mount; 2309 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2310 xfs_agino_t agino; /* inode number within alloc group */ 2311 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2312 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2313 int error; /* error code */ 2314 int offset; /* index of inode in its buffer */ 2315 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2316 2317 ASSERT(ino != NULLFSINO); 2318 2319 /* 2320 * Split up the inode number into its parts. 2321 */ 2322 agino = XFS_INO_TO_AGINO(mp, ino); 2323 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2324 if (agbno >= mp->m_sb.sb_agblocks || 2325 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2326 error = -EINVAL; 2327 #ifdef DEBUG 2328 /* 2329 * Don't output diagnostic information for untrusted inodes 2330 * as they can be invalid without implying corruption. 2331 */ 2332 if (flags & XFS_IGET_UNTRUSTED) 2333 return error; 2334 if (agbno >= mp->m_sb.sb_agblocks) { 2335 xfs_alert(mp, 2336 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2337 __func__, (unsigned long long)agbno, 2338 (unsigned long)mp->m_sb.sb_agblocks); 2339 } 2340 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2341 xfs_alert(mp, 2342 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2343 __func__, ino, 2344 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2345 } 2346 xfs_stack_trace(); 2347 #endif /* DEBUG */ 2348 return error; 2349 } 2350 2351 /* 2352 * For bulkstat and handle lookups, we have an untrusted inode number 2353 * that we have to verify is valid. We cannot do this just by reading 2354 * the inode buffer as it may have been unlinked and removed leaving 2355 * inodes in stale state on disk. Hence we have to do a btree lookup 2356 * in all cases where an untrusted inode number is passed. 2357 */ 2358 if (flags & XFS_IGET_UNTRUSTED) { 2359 error = xfs_imap_lookup(pag, tp, agino, agbno, 2360 &chunk_agbno, &offset_agbno, flags); 2361 if (error) 2362 return error; 2363 goto out_map; 2364 } 2365 2366 /* 2367 * If the inode cluster size is the same as the blocksize or 2368 * smaller we get to the buffer by simple arithmetics. 2369 */ 2370 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2371 offset = XFS_INO_TO_OFFSET(mp, ino); 2372 ASSERT(offset < mp->m_sb.sb_inopblock); 2373 2374 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno); 2375 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2376 imap->im_boffset = (unsigned short)(offset << 2377 mp->m_sb.sb_inodelog); 2378 return 0; 2379 } 2380 2381 /* 2382 * If the inode chunks are aligned then use simple maths to 2383 * find the location. Otherwise we have to do a btree 2384 * lookup to find the location. 2385 */ 2386 if (M_IGEO(mp)->inoalign_mask) { 2387 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2388 chunk_agbno = agbno - offset_agbno; 2389 } else { 2390 error = xfs_imap_lookup(pag, tp, agino, agbno, 2391 &chunk_agbno, &offset_agbno, flags); 2392 if (error) 2393 return error; 2394 } 2395 2396 out_map: 2397 ASSERT(agbno >= chunk_agbno); 2398 cluster_agbno = chunk_agbno + 2399 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2400 M_IGEO(mp)->blocks_per_cluster); 2401 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2402 XFS_INO_TO_OFFSET(mp, ino); 2403 2404 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno); 2405 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2406 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2407 2408 /* 2409 * If the inode number maps to a block outside the bounds 2410 * of the file system then return NULL rather than calling 2411 * read_buf and panicing when we get an error from the 2412 * driver. 2413 */ 2414 if ((imap->im_blkno + imap->im_len) > 2415 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2416 xfs_alert(mp, 2417 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2418 __func__, (unsigned long long) imap->im_blkno, 2419 (unsigned long long) imap->im_len, 2420 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2421 return -EINVAL; 2422 } 2423 return 0; 2424 } 2425 2426 /* 2427 * Log specified fields for the ag hdr (inode section). The growth of the agi 2428 * structure over time requires that we interpret the buffer as two logical 2429 * regions delineated by the end of the unlinked list. This is due to the size 2430 * of the hash table and its location in the middle of the agi. 2431 * 2432 * For example, a request to log a field before agi_unlinked and a field after 2433 * agi_unlinked could cause us to log the entire hash table and use an excessive 2434 * amount of log space. To avoid this behavior, log the region up through 2435 * agi_unlinked in one call and the region after agi_unlinked through the end of 2436 * the structure in another. 2437 */ 2438 void 2439 xfs_ialloc_log_agi( 2440 struct xfs_trans *tp, 2441 struct xfs_buf *bp, 2442 uint32_t fields) 2443 { 2444 int first; /* first byte number */ 2445 int last; /* last byte number */ 2446 static const short offsets[] = { /* field starting offsets */ 2447 /* keep in sync with bit definitions */ 2448 offsetof(xfs_agi_t, agi_magicnum), 2449 offsetof(xfs_agi_t, agi_versionnum), 2450 offsetof(xfs_agi_t, agi_seqno), 2451 offsetof(xfs_agi_t, agi_length), 2452 offsetof(xfs_agi_t, agi_count), 2453 offsetof(xfs_agi_t, agi_root), 2454 offsetof(xfs_agi_t, agi_level), 2455 offsetof(xfs_agi_t, agi_freecount), 2456 offsetof(xfs_agi_t, agi_newino), 2457 offsetof(xfs_agi_t, agi_dirino), 2458 offsetof(xfs_agi_t, agi_unlinked), 2459 offsetof(xfs_agi_t, agi_free_root), 2460 offsetof(xfs_agi_t, agi_free_level), 2461 offsetof(xfs_agi_t, agi_iblocks), 2462 sizeof(xfs_agi_t) 2463 }; 2464 #ifdef DEBUG 2465 struct xfs_agi *agi = bp->b_addr; 2466 2467 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2468 #endif 2469 2470 /* 2471 * Compute byte offsets for the first and last fields in the first 2472 * region and log the agi buffer. This only logs up through 2473 * agi_unlinked. 2474 */ 2475 if (fields & XFS_AGI_ALL_BITS_R1) { 2476 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2477 &first, &last); 2478 xfs_trans_log_buf(tp, bp, first, last); 2479 } 2480 2481 /* 2482 * Mask off the bits in the first region and calculate the first and 2483 * last field offsets for any bits in the second region. 2484 */ 2485 fields &= ~XFS_AGI_ALL_BITS_R1; 2486 if (fields) { 2487 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2488 &first, &last); 2489 xfs_trans_log_buf(tp, bp, first, last); 2490 } 2491 } 2492 2493 static xfs_failaddr_t 2494 xfs_agi_verify( 2495 struct xfs_buf *bp) 2496 { 2497 struct xfs_mount *mp = bp->b_mount; 2498 struct xfs_agi *agi = bp->b_addr; 2499 xfs_failaddr_t fa; 2500 uint32_t agi_seqno = be32_to_cpu(agi->agi_seqno); 2501 uint32_t agi_length = be32_to_cpu(agi->agi_length); 2502 int i; 2503 2504 if (xfs_has_crc(mp)) { 2505 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2506 return __this_address; 2507 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn))) 2508 return __this_address; 2509 } 2510 2511 /* 2512 * Validate the magic number of the agi block. 2513 */ 2514 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2515 return __this_address; 2516 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2517 return __this_address; 2518 2519 fa = xfs_validate_ag_length(bp, agi_seqno, agi_length); 2520 if (fa) 2521 return fa; 2522 2523 if (be32_to_cpu(agi->agi_level) < 1 || 2524 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels) 2525 return __this_address; 2526 2527 if (xfs_has_finobt(mp) && 2528 (be32_to_cpu(agi->agi_free_level) < 1 || 2529 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels)) 2530 return __this_address; 2531 2532 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2533 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2534 continue; 2535 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2536 return __this_address; 2537 } 2538 2539 return NULL; 2540 } 2541 2542 static void 2543 xfs_agi_read_verify( 2544 struct xfs_buf *bp) 2545 { 2546 struct xfs_mount *mp = bp->b_mount; 2547 xfs_failaddr_t fa; 2548 2549 if (xfs_has_crc(mp) && 2550 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2551 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2552 else { 2553 fa = xfs_agi_verify(bp); 2554 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2555 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2556 } 2557 } 2558 2559 static void 2560 xfs_agi_write_verify( 2561 struct xfs_buf *bp) 2562 { 2563 struct xfs_mount *mp = bp->b_mount; 2564 struct xfs_buf_log_item *bip = bp->b_log_item; 2565 struct xfs_agi *agi = bp->b_addr; 2566 xfs_failaddr_t fa; 2567 2568 fa = xfs_agi_verify(bp); 2569 if (fa) { 2570 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2571 return; 2572 } 2573 2574 if (!xfs_has_crc(mp)) 2575 return; 2576 2577 if (bip) 2578 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2579 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2580 } 2581 2582 const struct xfs_buf_ops xfs_agi_buf_ops = { 2583 .name = "xfs_agi", 2584 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2585 .verify_read = xfs_agi_read_verify, 2586 .verify_write = xfs_agi_write_verify, 2587 .verify_struct = xfs_agi_verify, 2588 }; 2589 2590 /* 2591 * Read in the allocation group header (inode allocation section) 2592 */ 2593 int 2594 xfs_read_agi( 2595 struct xfs_perag *pag, 2596 struct xfs_trans *tp, 2597 struct xfs_buf **agibpp) 2598 { 2599 struct xfs_mount *mp = pag->pag_mount; 2600 int error; 2601 2602 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno); 2603 2604 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2605 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), 2606 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops); 2607 if (error) 2608 return error; 2609 if (tp) 2610 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF); 2611 2612 xfs_buf_set_ref(*agibpp, XFS_AGI_REF); 2613 return 0; 2614 } 2615 2616 /* 2617 * Read in the agi and initialise the per-ag data. If the caller supplies a 2618 * @agibpp, return the locked AGI buffer to them, otherwise release it. 2619 */ 2620 int 2621 xfs_ialloc_read_agi( 2622 struct xfs_perag *pag, 2623 struct xfs_trans *tp, 2624 struct xfs_buf **agibpp) 2625 { 2626 struct xfs_buf *agibp; 2627 struct xfs_agi *agi; 2628 int error; 2629 2630 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno); 2631 2632 error = xfs_read_agi(pag, tp, &agibp); 2633 if (error) 2634 return error; 2635 2636 agi = agibp->b_addr; 2637 if (!xfs_perag_initialised_agi(pag)) { 2638 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2639 pag->pagi_count = be32_to_cpu(agi->agi_count); 2640 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 2641 } 2642 2643 /* 2644 * It's possible for these to be out of sync if 2645 * we are in the middle of a forced shutdown. 2646 */ 2647 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2648 xfs_is_shutdown(pag->pag_mount)); 2649 if (agibpp) 2650 *agibpp = agibp; 2651 else 2652 xfs_trans_brelse(tp, agibp); 2653 return 0; 2654 } 2655 2656 /* How many inodes are backed by inode clusters ondisk? */ 2657 STATIC int 2658 xfs_ialloc_count_ondisk( 2659 struct xfs_btree_cur *cur, 2660 xfs_agino_t low, 2661 xfs_agino_t high, 2662 unsigned int *allocated) 2663 { 2664 struct xfs_inobt_rec_incore irec; 2665 unsigned int ret = 0; 2666 int has_record; 2667 int error; 2668 2669 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2670 if (error) 2671 return error; 2672 2673 while (has_record) { 2674 unsigned int i, hole_idx; 2675 2676 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2677 if (error) 2678 return error; 2679 if (irec.ir_startino > high) 2680 break; 2681 2682 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { 2683 if (irec.ir_startino + i < low) 2684 continue; 2685 if (irec.ir_startino + i > high) 2686 break; 2687 2688 hole_idx = i / XFS_INODES_PER_HOLEMASK_BIT; 2689 if (!(irec.ir_holemask & (1U << hole_idx))) 2690 ret++; 2691 } 2692 2693 error = xfs_btree_increment(cur, 0, &has_record); 2694 if (error) 2695 return error; 2696 } 2697 2698 *allocated = ret; 2699 return 0; 2700 } 2701 2702 /* Is there an inode record covering a given extent? */ 2703 int 2704 xfs_ialloc_has_inodes_at_extent( 2705 struct xfs_btree_cur *cur, 2706 xfs_agblock_t bno, 2707 xfs_extlen_t len, 2708 enum xbtree_recpacking *outcome) 2709 { 2710 xfs_agino_t agino; 2711 xfs_agino_t last_agino; 2712 unsigned int allocated; 2713 int error; 2714 2715 agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2716 last_agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2717 2718 error = xfs_ialloc_count_ondisk(cur, agino, last_agino, &allocated); 2719 if (error) 2720 return error; 2721 2722 if (allocated == 0) 2723 *outcome = XBTREE_RECPACKING_EMPTY; 2724 else if (allocated == last_agino - agino + 1) 2725 *outcome = XBTREE_RECPACKING_FULL; 2726 else 2727 *outcome = XBTREE_RECPACKING_SPARSE; 2728 return 0; 2729 } 2730 2731 struct xfs_ialloc_count_inodes { 2732 xfs_agino_t count; 2733 xfs_agino_t freecount; 2734 }; 2735 2736 /* Record inode counts across all inobt records. */ 2737 STATIC int 2738 xfs_ialloc_count_inodes_rec( 2739 struct xfs_btree_cur *cur, 2740 const union xfs_btree_rec *rec, 2741 void *priv) 2742 { 2743 struct xfs_inobt_rec_incore irec; 2744 struct xfs_ialloc_count_inodes *ci = priv; 2745 xfs_failaddr_t fa; 2746 2747 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2748 fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec); 2749 if (fa) 2750 return xfs_inobt_complain_bad_rec(cur, fa, &irec); 2751 2752 ci->count += irec.ir_count; 2753 ci->freecount += irec.ir_freecount; 2754 2755 return 0; 2756 } 2757 2758 /* Count allocated and free inodes under an inobt. */ 2759 int 2760 xfs_ialloc_count_inodes( 2761 struct xfs_btree_cur *cur, 2762 xfs_agino_t *count, 2763 xfs_agino_t *freecount) 2764 { 2765 struct xfs_ialloc_count_inodes ci = {0}; 2766 int error; 2767 2768 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2769 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2770 if (error) 2771 return error; 2772 2773 *count = ci.count; 2774 *freecount = ci.freecount; 2775 return 0; 2776 } 2777 2778 /* 2779 * Initialize inode-related geometry information. 2780 * 2781 * Compute the inode btree min and max levels and set maxicount. 2782 * 2783 * Set the inode cluster size. This may still be overridden by the file 2784 * system block size if it is larger than the chosen cluster size. 2785 * 2786 * For v5 filesystems, scale the cluster size with the inode size to keep a 2787 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2788 * inode alignment value appropriately for larger cluster sizes. 2789 * 2790 * Then compute the inode cluster alignment information. 2791 */ 2792 void 2793 xfs_ialloc_setup_geometry( 2794 struct xfs_mount *mp) 2795 { 2796 struct xfs_sb *sbp = &mp->m_sb; 2797 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2798 uint64_t icount; 2799 uint inodes; 2800 2801 igeo->new_diflags2 = 0; 2802 if (xfs_has_bigtime(mp)) 2803 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2804 if (xfs_has_large_extent_counts(mp)) 2805 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64; 2806 2807 /* Compute inode btree geometry. */ 2808 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2809 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2810 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2811 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2812 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2813 2814 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2815 sbp->sb_inopblock); 2816 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2817 2818 if (sbp->sb_spino_align) 2819 igeo->ialloc_min_blks = sbp->sb_spino_align; 2820 else 2821 igeo->ialloc_min_blks = igeo->ialloc_blks; 2822 2823 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2824 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2825 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2826 inodes); 2827 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk()); 2828 2829 /* 2830 * Set the maximum inode count for this filesystem, being careful not 2831 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2832 * users should never get here due to failing sb verification, but 2833 * certain users (xfs_db) need to be usable even with corrupt metadata. 2834 */ 2835 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2836 /* 2837 * Make sure the maximum inode count is a multiple 2838 * of the units we allocate inodes in. 2839 */ 2840 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2841 do_div(icount, 100); 2842 do_div(icount, igeo->ialloc_blks); 2843 igeo->maxicount = XFS_FSB_TO_INO(mp, 2844 icount * igeo->ialloc_blks); 2845 } else { 2846 igeo->maxicount = 0; 2847 } 2848 2849 /* 2850 * Compute the desired size of an inode cluster buffer size, which 2851 * starts at 8K and (on v5 filesystems) scales up with larger inode 2852 * sizes. 2853 * 2854 * Preserve the desired inode cluster size because the sparse inodes 2855 * feature uses that desired size (not the actual size) to compute the 2856 * sparse inode alignment. The mount code validates this value, so we 2857 * cannot change the behavior. 2858 */ 2859 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2860 if (xfs_has_v3inodes(mp)) { 2861 int new_size = igeo->inode_cluster_size_raw; 2862 2863 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2864 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2865 igeo->inode_cluster_size_raw = new_size; 2866 } 2867 2868 /* Calculate inode cluster ratios. */ 2869 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2870 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2871 igeo->inode_cluster_size_raw); 2872 else 2873 igeo->blocks_per_cluster = 1; 2874 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2875 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2876 2877 /* Calculate inode cluster alignment. */ 2878 if (xfs_has_align(mp) && 2879 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2880 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2881 else 2882 igeo->cluster_align = 1; 2883 igeo->inoalign_mask = igeo->cluster_align - 1; 2884 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2885 2886 /* 2887 * If we are using stripe alignment, check whether 2888 * the stripe unit is a multiple of the inode alignment 2889 */ 2890 if (mp->m_dalign && igeo->inoalign_mask && 2891 !(mp->m_dalign & igeo->inoalign_mask)) 2892 igeo->ialloc_align = mp->m_dalign; 2893 else 2894 igeo->ialloc_align = 0; 2895 } 2896 2897 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2898 xfs_ino_t 2899 xfs_ialloc_calc_rootino( 2900 struct xfs_mount *mp, 2901 int sunit) 2902 { 2903 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2904 xfs_agblock_t first_bno; 2905 2906 /* 2907 * Pre-calculate the geometry of AG 0. We know what it looks like 2908 * because libxfs knows how to create allocation groups now. 2909 * 2910 * first_bno is the first block in which mkfs could possibly have 2911 * allocated the root directory inode, once we factor in the metadata 2912 * that mkfs formats before it. Namely, the four AG headers... 2913 */ 2914 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2915 2916 /* ...the two free space btree roots... */ 2917 first_bno += 2; 2918 2919 /* ...the inode btree root... */ 2920 first_bno += 1; 2921 2922 /* ...the initial AGFL... */ 2923 first_bno += xfs_alloc_min_freelist(mp, NULL); 2924 2925 /* ...the free inode btree root... */ 2926 if (xfs_has_finobt(mp)) 2927 first_bno++; 2928 2929 /* ...the reverse mapping btree root... */ 2930 if (xfs_has_rmapbt(mp)) 2931 first_bno++; 2932 2933 /* ...the reference count btree... */ 2934 if (xfs_has_reflink(mp)) 2935 first_bno++; 2936 2937 /* 2938 * ...and the log, if it is allocated in the first allocation group. 2939 * 2940 * This can happen with filesystems that only have a single 2941 * allocation group, or very odd geometries created by old mkfs 2942 * versions on very small filesystems. 2943 */ 2944 if (xfs_ag_contains_log(mp, 0)) 2945 first_bno += mp->m_sb.sb_logblocks; 2946 2947 /* 2948 * Now round first_bno up to whatever allocation alignment is given 2949 * by the filesystem or was passed in. 2950 */ 2951 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2952 first_bno = roundup(first_bno, sunit); 2953 else if (xfs_has_align(mp) && 2954 mp->m_sb.sb_inoalignmt > 1) 2955 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2956 2957 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2958 } 2959 2960 /* 2961 * Ensure there are not sparse inode clusters that cross the new EOAG. 2962 * 2963 * This is a no-op for non-spinode filesystems since clusters are always fully 2964 * allocated and checking the bnobt suffices. However, a spinode filesystem 2965 * could have a record where the upper inodes are free blocks. If those blocks 2966 * were removed from the filesystem, the inode record would extend beyond EOAG, 2967 * which will be flagged as corruption. 2968 */ 2969 int 2970 xfs_ialloc_check_shrink( 2971 struct xfs_perag *pag, 2972 struct xfs_trans *tp, 2973 struct xfs_buf *agibp, 2974 xfs_agblock_t new_length) 2975 { 2976 struct xfs_inobt_rec_incore rec; 2977 struct xfs_btree_cur *cur; 2978 xfs_agino_t agino; 2979 int has; 2980 int error; 2981 2982 if (!xfs_has_sparseinodes(pag->pag_mount)) 2983 return 0; 2984 2985 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO); 2986 2987 /* Look up the inobt record that would correspond to the new EOFS. */ 2988 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); 2989 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); 2990 if (error || !has) 2991 goto out; 2992 2993 error = xfs_inobt_get_rec(cur, &rec, &has); 2994 if (error) 2995 goto out; 2996 2997 if (!has) { 2998 error = -EFSCORRUPTED; 2999 goto out; 3000 } 3001 3002 /* If the record covers inodes that would be beyond EOFS, bail out. */ 3003 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { 3004 error = -ENOSPC; 3005 goto out; 3006 } 3007 out: 3008 xfs_btree_del_cursor(cur, error); 3009 return error; 3010 } 3011